diff --git a/.changelog/unreleased/SDK/1953-phase-out-try-halt.md b/.changelog/unreleased/SDK/1953-phase-out-try-halt.md new file mode 100644 index 0000000000..48280ecb2f --- /dev/null +++ b/.changelog/unreleased/SDK/1953-phase-out-try-halt.md @@ -0,0 +1,2 @@ +- Phase out Halt abstractions + ([\#1953](https://github.com/anoma/namada/pull/1953)) \ No newline at end of file diff --git a/.changelog/unreleased/SDK/1957-bp-client-validation.md b/.changelog/unreleased/SDK/1957-bp-client-validation.md new file mode 100644 index 0000000000..ad829f2c88 --- /dev/null +++ b/.changelog/unreleased/SDK/1957-bp-client-validation.md @@ -0,0 +1,2 @@ +- Validate Bridge pool transfers before submitting them to the network + ([\#1957](https://github.com/anoma/namada/pull/1957)) \ No newline at end of file diff --git a/.changelog/unreleased/SDK/1963-sdk-refactor-rebased.md b/.changelog/unreleased/SDK/1963-sdk-refactor-rebased.md new file mode 100644 index 0000000000..6add26845e --- /dev/null +++ b/.changelog/unreleased/SDK/1963-sdk-refactor-rebased.md @@ -0,0 +1,2 @@ +- Improved the usability of the SDK and moved it to separate crate. + ([\#1963](https://github.com/anoma/namada/pull/1963)) \ No newline at end of file diff --git a/.changelog/unreleased/SDK/2033-sdk-re-exports.md b/.changelog/unreleased/SDK/2033-sdk-re-exports.md new file mode 100644 index 0000000000..049af49e8f --- /dev/null +++ b/.changelog/unreleased/SDK/2033-sdk-re-exports.md @@ -0,0 +1,2 @@ +- Now re-exporting crates that will commonly be used with the SDK. + ([\#2033](https://github.com/anoma/namada/pull/2033)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/1949-pow-solution-fix.md b/.changelog/unreleased/bug-fixes/1949-pow-solution-fix.md new file mode 100644 index 0000000000..6addee1bdd --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1949-pow-solution-fix.md @@ -0,0 +1,2 @@ +- Reintroduced a dummy field in order to achieve compatibility with hardware + wallet. ([\#1949](https://github.com/anoma/namada/pull/1949)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/1964-fix-protocol-txs.md b/.changelog/unreleased/bug-fixes/1964-fix-protocol-txs.md new file mode 100644 index 0000000000..6c39e70d2e --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1964-fix-protocol-txs.md @@ -0,0 +1,2 @@ +- Fix broadcasting logic for protocol txs when a node operating the network is a + validator ([\#1964](https://github.com/anoma/namada/pull/1964)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/1984-rm-redundant-writes.md b/.changelog/unreleased/bug-fixes/1984-rm-redundant-writes.md new file mode 100644 index 0000000000..f88ad5858f --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1984-rm-redundant-writes.md @@ -0,0 +1,4 @@ +- Avoid redundant storage deletions in lazy collections that would incur + extra gas cause and appear in transaction result as changed keys even if not + changed occurred. This may have caused PoS transactions to run out of gas. + ([\#1984](https://github.com/anoma/namada/pull/1984)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/1989-fix-ibc-client-validation.md b/.changelog/unreleased/bug-fixes/1989-fix-ibc-client-validation.md new file mode 100644 index 0000000000..8adbad706a --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1989-fix-ibc-client-validation.md @@ -0,0 +1,2 @@ +- Update ibc-rs with the fix for ibc-rs/#911 + ([\#1989](https://github.com/anoma/namada/issues/1989)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/1999-fix-pgf-stewards-funding.md b/.changelog/unreleased/bug-fixes/1999-fix-pgf-stewards-funding.md new file mode 100644 index 0000000000..73ae0da0da --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1999-fix-pgf-stewards-funding.md @@ -0,0 +1,2 @@ +- Fixed the pgf stewards reward to be constant regardless of the number of + stewards. ([\#1999](https://github.com/anoma/namada/pull/1999)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1867-fix-replay-protection.md b/.changelog/unreleased/improvements/1867-fix-replay-protection.md new file mode 100644 index 0000000000..ad22c70c55 --- /dev/null +++ b/.changelog/unreleased/improvements/1867-fix-replay-protection.md @@ -0,0 +1,2 @@ +- Reworked the signature of inner transactions to improve safety and fix replay + protection. ([\#1867](https://github.com/anoma/namada/pull/1867)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1888-ledger-test-vector-generator-fix-0.22.0.md b/.changelog/unreleased/improvements/1888-ledger-test-vector-generator-fix-0.22.0.md new file mode 100644 index 0000000000..0aeb7196d1 --- /dev/null +++ b/.changelog/unreleased/improvements/1888-ledger-test-vector-generator-fix-0.22.0.md @@ -0,0 +1,2 @@ +- Updated the generation of hardware wallet test vectors to cover current + codebase ([\#1888](https://github.com/anoma/namada/pull/1888)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1917-ibc-shielded-actions.md b/.changelog/unreleased/improvements/1917-ibc-shielded-actions.md new file mode 100644 index 0000000000..d9ec6743d0 --- /dev/null +++ b/.changelog/unreleased/improvements/1917-ibc-shielded-actions.md @@ -0,0 +1,2 @@ +- IBC transfer to a payment address + ([\#1917](https://github.com/anoma/namada/issues/1917)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1930-update-borsh.md b/.changelog/unreleased/improvements/1930-update-borsh.md new file mode 100644 index 0000000000..e7fb8b994e --- /dev/null +++ b/.changelog/unreleased/improvements/1930-update-borsh.md @@ -0,0 +1 @@ +- Migrate to upstream borsh ([\#1930](https://github.com/anoma/namada/pull/1930)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1943-refactor-past-epoch-offsets.md b/.changelog/unreleased/improvements/1943-refactor-past-epoch-offsets.md new file mode 100644 index 0000000000..958b40760d --- /dev/null +++ b/.changelog/unreleased/improvements/1943-refactor-past-epoch-offsets.md @@ -0,0 +1,3 @@ +- Improve the Epoched data structure's bookkeeping of past + epochs, now parameterizable by PoS and governance params. + ([\#1943](https://github.com/anoma/namada/pull/1943)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1944-tune-storage-past-epochs.md b/.changelog/unreleased/improvements/1944-tune-storage-past-epochs.md new file mode 100644 index 0000000000..9ddda02759 --- /dev/null +++ b/.changelog/unreleased/improvements/1944-tune-storage-past-epochs.md @@ -0,0 +1,2 @@ +- New implementation and parameters for purging old epochs for Epoched validator + data in storage. ([\#1944](https://github.com/anoma/namada/pull/1944)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1946-ibc-balance-query.md b/.changelog/unreleased/improvements/1946-ibc-balance-query.md new file mode 100644 index 0000000000..1f1093caa0 --- /dev/null +++ b/.changelog/unreleased/improvements/1946-ibc-balance-query.md @@ -0,0 +1,2 @@ +- Query also IBC token balances + ([\#1946](https://github.com/anoma/namada/issues/1946)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1954-gas-in-sig-ver.md b/.changelog/unreleased/improvements/1954-gas-in-sig-ver.md new file mode 100644 index 0000000000..27ba173c46 --- /dev/null +++ b/.changelog/unreleased/improvements/1954-gas-in-sig-ver.md @@ -0,0 +1,2 @@ +- Increased resoultion of gas accounting for signature verification. + ([\#1954](https://github.com/anoma/namada/pull/1954)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1955-avoid-testing-feature-in-workspace.md b/.changelog/unreleased/improvements/1955-avoid-testing-feature-in-workspace.md new file mode 100644 index 0000000000..7ce5c574b2 --- /dev/null +++ b/.changelog/unreleased/improvements/1955-avoid-testing-feature-in-workspace.md @@ -0,0 +1,3 @@ +- Refactor benchmarks to avoid enabling `"testing`" and `"dev"`` features by + default in the workspace. + ([\#1955](https://github.com/anoma/namada/pull/1955)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1973-refine-commission-tx.md b/.changelog/unreleased/improvements/1973-refine-commission-tx.md new file mode 100644 index 0000000000..04a00bac66 --- /dev/null +++ b/.changelog/unreleased/improvements/1973-refine-commission-tx.md @@ -0,0 +1,2 @@ +- Add missing checks for the commission rate change tx and code clean-up + ([\#1973](https://github.com/anoma/namada/pull/1973)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1977-replay-protection-storage.md b/.changelog/unreleased/improvements/1977-replay-protection-storage.md new file mode 100644 index 0000000000..0686adca5f --- /dev/null +++ b/.changelog/unreleased/improvements/1977-replay-protection-storage.md @@ -0,0 +1,2 @@ +- Reduced the storage consumption of replay protection. + ([\#1977](https://github.com/anoma/namada/pull/1977)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1979-proposal-result-in-storage.md b/.changelog/unreleased/improvements/1979-proposal-result-in-storage.md new file mode 100644 index 0000000000..1eb15aa889 --- /dev/null +++ b/.changelog/unreleased/improvements/1979-proposal-result-in-storage.md @@ -0,0 +1,2 @@ +- Persist the results of governance proposals in storage to allow recovering old + results. ([\#1979](https://github.com/anoma/namada/pull/1979)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1985-compounding-rewards.md b/.changelog/unreleased/improvements/1985-compounding-rewards.md new file mode 100644 index 0000000000..1b41c6ca20 --- /dev/null +++ b/.changelog/unreleased/improvements/1985-compounding-rewards.md @@ -0,0 +1,2 @@ +- MASP rewards are now distributed in the manner dictated by the PD-controller + ([\#1985](https://github.com/anoma/namada/pull/1985)) \ No newline at end of file diff --git a/.changelog/unreleased/miscellaneous/1958-k256.md b/.changelog/unreleased/miscellaneous/1958-k256.md new file mode 100644 index 0000000000..82a38bb863 --- /dev/null +++ b/.changelog/unreleased/miscellaneous/1958-k256.md @@ -0,0 +1,2 @@ +- Switched from using `libsecp256k1` to `k256` crate. + ([\#1958](https://github.com/anoma/namada/pull/1958)) \ No newline at end of file diff --git a/.changelog/unreleased/miscellaneous/1958-zeroize-secret-keys.md b/.changelog/unreleased/miscellaneous/1958-zeroize-secret-keys.md new file mode 100644 index 0000000000..f606d9d7ef --- /dev/null +++ b/.changelog/unreleased/miscellaneous/1958-zeroize-secret-keys.md @@ -0,0 +1,2 @@ +- Tag `ed25519` keys with `ZeroizeOnDrop` + ([\#1958](https://github.com/anoma/namada/pull/1958)) \ No newline at end of file diff --git a/.changelog/unreleased/testing/1976-int-test-services.md b/.changelog/unreleased/testing/1976-int-test-services.md new file mode 100644 index 0000000000..aaeecc3db5 --- /dev/null +++ b/.changelog/unreleased/testing/1976-int-test-services.md @@ -0,0 +1,2 @@ +- Mock ledger services in integration tests + ([\#1976](https://github.com/anoma/namada/pull/1976)) \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 82c84ffdd0..0334476964 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -37,18 +37,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "aes" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" -dependencies = [ - "cfg-if 1.0.0", - "cipher 0.3.0", - "cpufeatures", - "opaque-debug 0.3.0", -] - [[package]] name = "aes" version = "0.8.3" @@ -95,15 +83,6 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - [[package]] name = "anstream" version = "0.3.2" @@ -497,16 +476,16 @@ checksum = "cf9ff0bbfd639f15c74af777d81383cf53efb7c93613f6cab67c6c11e05bbf8b" [[package]] name = "bellman" -version = "0.13.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4dd656ef4fdf7debb6d87d4dd92642fcbcdb78cbf6600c13e25c87e4d1a3807" +checksum = "9afceed28bac7f9f5a508bca8aeeff51cdfa4770c0b967ac55c621e2ddfd6171" dependencies = [ "bitvec 1.0.1", "blake2s_simd", "byteorder", "crossbeam-channel 0.5.8", - "ff 0.12.1", - "group 0.12.1", + "ff", + "group", "lazy_static", "log", "num_cpus", @@ -557,14 +536,14 @@ dependencies = [ [[package]] name = "bip0039" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0830ae4cc96b0617cc912970c2b17e89456fecbf55e8eed53a956f37ab50c41" +checksum = "bef0f0152ec5cf17f49a5866afaa3439816207fd4f0a224c0211ffaf5e278426" dependencies = [ - "hmac 0.11.0", - "pbkdf2 0.9.0", + "hmac 0.12.1", + "pbkdf2 0.10.1", "rand 0.8.5", - "sha2 0.9.9", + "sha2 0.10.6", "unicode-normalization", "zeroize", ] @@ -679,7 +658,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding 0.1.5", + "block-padding", "byte-tools", "byteorder", "generic-array 0.12.4", @@ -703,16 +682,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "block-modes" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb03d1bed155d89dce0f845b7899b18a9a163e148fd004e1c28421a783e2d8e" -dependencies = [ - "block-padding 0.2.1", - "cipher 0.3.0", -] - [[package]] name = "block-padding" version = "0.1.5" @@ -722,20 +691,14 @@ dependencies = [ "byte-tools", ] -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - [[package]] name = "bls12_381" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3c196a77437e7cc2fb515ce413a6401291578b5afc8ecb29a3c7ab957f05941" +checksum = "d7bc6d6292be3a19e6379786dac800f551e5865a5bb51ebbe3064ab80433f403" dependencies = [ - "ff 0.12.1", - "group 0.12.1", + "ff", + "group", "pairing", "rand_core 0.6.4", "subtle 2.4.1", @@ -743,10 +706,11 @@ dependencies = [ [[package]] name = "borsh" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" dependencies = [ - "borsh-derive 0.9.4", + "borsh-derive 0.9.3", "hashbrown 0.11.2", ] @@ -760,13 +724,24 @@ dependencies = [ "hashbrown 0.12.3", ] +[[package]] +name = "borsh" +version = "1.0.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41898277107b0d3f027593697912977397eba6ac39a55bdd2eb02c1d5d5013b5" +dependencies = [ + "borsh-derive 1.0.0-alpha.4", + "cfg_aliases", +] + [[package]] name = "borsh-derive" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" dependencies = [ - "borsh-derive-internal 0.9.4", - "borsh-schema-derive-internal 0.9.4", + "borsh-derive-internal 0.9.3", + "borsh-schema-derive-internal 0.9.3", "proc-macro-crate 0.1.5", "proc-macro2", "syn 1.0.109", @@ -785,10 +760,25 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "borsh-derive" +version = "1.0.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413cb435569fe499e89235f758304e0e7198016baa351d8f5827ea0f40526ce0" +dependencies = [ + "once_cell", + "proc-macro-crate 1.3.1", + "proc-macro2", + "quote", + "syn 2.0.15", + "syn_derive", +] + [[package]] name = "borsh-derive-internal" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" dependencies = [ "proc-macro2", "quote", @@ -806,10 +796,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "borsh-ext" +version = "1.0.0-alpha.4" +source = "git+https://github.com/heliaxdev/borsh-ext?tag=v1.0.0-alpha.4#6bebf357002f96574ac37a28f547b6c88e91b799" +dependencies = [ + "borsh 1.0.0-alpha.4", +] + [[package]] name = "borsh-schema-derive-internal" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" dependencies = [ "proc-macro2", "quote", @@ -961,6 +960,15 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +[[package]] +name = "cbc" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" +dependencies = [ + "cipher 0.4.4", +] + [[package]] name = "cc" version = "1.0.79" @@ -991,6 +999,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chacha20" version = "0.8.2" @@ -1202,27 +1216,27 @@ dependencies = [ [[package]] name = "color-eyre" -version = "0.5.11" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f1885697ee8a177096d42f158922251a41973117f6d8a234cee94b9509157b7" +checksum = "5a667583cca8c4f8436db8de46ea8233c42a7d9ae424a82d338f2e4675229204" dependencies = [ "backtrace", "color-spantrace", "eyre", "indenter", "once_cell", - "owo-colors 1.3.0", + "owo-colors", "tracing-error", ] [[package]] name = "color-spantrace" -version = "0.1.6" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6eee477a4a8a72f4addd4de416eb56d54bc307b284d6601bafdee1f4ea462d1" +checksum = "1ba75b3d9449ecdccb27ecbc479fdc0b87fa2dd43d2f8298f9bf0e59aacc8dce" dependencies = [ "once_cell", - "owo-colors 1.3.0", + "owo-colors", "tracing-core 0.1.31", "tracing-error", ] @@ -1409,7 +1423,7 @@ dependencies = [ "clap", "criterion-plot", "is-terminal", - "itertools", + "itertools 0.10.5", "num-traits 0.2.15", "once_cell", "oorandom", @@ -1430,7 +1444,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools", + "itertools 0.10.5", ] [[package]] @@ -1545,16 +1559,6 @@ dependencies = [ "subtle 2.4.1", ] -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array 0.14.7", - "subtle 2.4.1", -] - [[package]] name = "ct-codecs" version = "1.1.1" @@ -1570,16 +1574,6 @@ dependencies = [ "sct 0.6.1", ] -[[package]] -name = "ctor" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" -dependencies = [ - "quote", - "syn 1.0.109", -] - [[package]] name = "ctr" version = "0.9.2" @@ -1832,6 +1826,7 @@ dependencies = [ "digest 0.10.6", "elliptic-curve", "rfc6979", + "serdect", "signature 2.1.0", "spki", ] @@ -1892,12 +1887,13 @@ dependencies = [ "base16ct", "crypto-bigint", "digest 0.10.6", - "ff 0.13.0", + "ff", "generic-array 0.14.7", - "group 0.13.0", + "group", "pkcs8", "rand_core 0.6.4", "sec1", + "serdect", "subtle 2.4.1", "zeroize", ] @@ -2024,7 +2020,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" dependencies = [ - "aes 0.8.3", + "aes", "ctr", "digest 0.10.6", "hex", @@ -2409,14 +2405,14 @@ dependencies = [ "bincode", "blake2", "blake2b_simd", - "borsh 0.9.4", + "borsh 0.9.3", "digest 0.10.6", "ed25519-dalek", "either", "ferveo-common", "group-threshold-cryptography", "hex", - "itertools", + "itertools 0.10.5", "measure_time", "miracl_core", "num 0.4.0", @@ -2443,23 +2439,13 @@ dependencies = [ "serde_bytes", ] -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "bitvec 1.0.1", - "rand_core 0.6.4", - "subtle 2.4.1", -] - [[package]] name = "ff" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ + "bitvec 1.0.1", "rand_core 0.6.4", "subtle 2.4.1", ] @@ -2557,12 +2543,12 @@ dependencies = [ [[package]] name = "fpe" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd910db5f9ca4dc3116f8c46367825807aa2b942f72565f16b4be0b208a00a9e" +checksum = "26c4b37de5ae15812a764c958297cfc50f5c010438f60c6ce75d11b802abd404" dependencies = [ - "block-modes", - "cipher 0.3.0", + "cbc", + "cipher 0.4.4", "libm", "num-bigint 0.4.3", "num-integer", @@ -2795,25 +2781,14 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff 0.12.1", - "memuse", - "rand_core 0.6.4", - "subtle 2.4.1", -] - [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff 0.13.0", + "ff", + "memuse", "rand_core 0.6.4", "subtle 2.4.1", ] @@ -2833,7 +2808,7 @@ dependencies = [ "blake2b_simd", "chacha20 0.8.2", "hex", - "itertools", + "itertools 0.10.5", "miracl_core", "rand 0.8.5", "rand_core 0.6.4", @@ -3002,16 +2977,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "hmac" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" -dependencies = [ - "crypto-mac 0.11.1", - "digest 0.9.0", -] - [[package]] name = "hmac" version = "0.12.1" @@ -3032,17 +2997,6 @@ dependencies = [ "hmac 0.7.1", ] -[[package]] -name = "hmac-drbg" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" -dependencies = [ - "digest 0.9.0", - "generic-array 0.14.7", - "hmac 0.8.1", -] - [[package]] name = "hmac-sha512" version = "0.1.9" @@ -3195,7 +3149,7 @@ dependencies = [ [[package]] name = "ibc" version = "0.41.0" -source = "git+https://github.com/heliaxdev/cosmos-ibc-rs.git?rev=38a827d3901e590b2935ee5b6b81b4d67c399560#38a827d3901e590b2935ee5b6b81b4d67c399560" +source = "git+https://github.com/heliaxdev/cosmos-ibc-rs.git?rev=206cb5fa74a7ca38038b937d202ae39fbbd63c19#206cb5fa74a7ca38038b937d202ae39fbbd63c19" dependencies = [ "bytes", "cfg-if 1.0.0", @@ -3338,10 +3292,10 @@ checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] name = "index-set" -version = "0.7.1" -source = "git+https://github.com/heliaxdev/index-set?tag=v0.7.1#dc24cdbbe3664514d59f1a4c4031863fc565f1c2" +version = "0.8.0" +source = "git+https://github.com/heliaxdev/index-set?tag=v0.8.0#0c218cc300c1bb7a1acf34f21b6e9d489df5fda8" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "serde 1.0.163", ] @@ -3421,7 +3375,7 @@ checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", - "rustix 0.37.1", + "rustix 0.37.13", "windows-sys 0.48.0", ] @@ -3434,6 +3388,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.6" @@ -3460,14 +3423,14 @@ dependencies = [ [[package]] name = "jubjub" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a575df5f985fe1cd5b2b05664ff6accfc46559032b954529fd225a2168d27b0f" +checksum = "8499f7a74008aafbecb2a2e608a3e13e4dd3e84df198b604451efe93f2de6e61" dependencies = [ "bitvec 1.0.1", "bls12_381", - "ff 0.12.1", - "group 0.12.1", + "ff", + "group", "rand_core 0.6.4", "subtle 2.4.1", ] @@ -3482,6 +3445,7 @@ dependencies = [ "ecdsa", "elliptic-curve", "once_cell", + "serdect", "sha2 0.10.6", "signature 2.1.0", ] @@ -3587,57 +3551,13 @@ dependencies = [ "arrayref", "crunchy", "digest 0.8.1", - "hmac-drbg 0.2.0", + "hmac-drbg", "rand 0.7.3", "sha2 0.8.2", "subtle 2.4.1", "typenum", ] -[[package]] -name = "libsecp256k1" -version = "0.7.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "arrayref", - "base64 0.13.1", - "digest 0.9.0", - "hmac-drbg 0.3.0", - "libsecp256k1-core", - "libsecp256k1-gen-ecmult", - "libsecp256k1-gen-genmult", - "rand 0.8.5", - "serde 1.0.163", - "sha2 0.9.9", - "typenum", -] - -[[package]] -name = "libsecp256k1-core" -version = "0.3.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "crunchy", - "digest 0.9.0", - "subtle 2.4.1", -] - -[[package]] -name = "libsecp256k1-gen-ecmult" -version = "0.3.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "libsecp256k1-core", -] - -[[package]] -name = "libsecp256k1-gen-genmult" -version = "0.3.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "libsecp256k1-core", -] - [[package]] name = "libssh2-sys" version = "0.2.23" @@ -3748,9 +3668,9 @@ dependencies = [ [[package]] name = "masp_note_encryption" version = "0.2.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=77e009626f3f52fe83c81ec6ee38fc2547d38da3#77e009626f3f52fe83c81ec6ee38fc2547d38da3" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "chacha20 0.9.1", "chacha20poly1305", "cipher 0.4.4", @@ -3761,19 +3681,19 @@ dependencies = [ [[package]] name = "masp_primitives" version = "0.9.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=77e009626f3f52fe83c81ec6ee38fc2547d38da3#77e009626f3f52fe83c81ec6ee38fc2547d38da3" dependencies = [ - "aes 0.7.5", + "aes", "bip0039", "bitvec 1.0.1", "blake2b_simd", "blake2s_simd", "bls12_381", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "byteorder", - "ff 0.12.1", + "ff", "fpe", - "group 0.12.1", + "group", "hex", "incrementalmerkletree", "jubjub", @@ -3784,7 +3704,7 @@ dependencies = [ "num-traits 0.2.15", "rand 0.8.5", "rand_core 0.6.4", - "sha2 0.9.9", + "sha2 0.10.6", "subtle 2.4.1", "zcash_encoding", ] @@ -3792,15 +3712,15 @@ dependencies = [ [[package]] name = "masp_proofs" version = "0.9.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=77e009626f3f52fe83c81ec6ee38fc2547d38da3#77e009626f3f52fe83c81ec6ee38fc2547d38da3" dependencies = [ "bellman", "blake2b_simd", "bls12_381", "directories", "getrandom 0.2.9", - "group 0.12.1", - "itertools", + "group", + "itertools 0.11.0", "jubjub", "lazy_static", "masp_primitives", @@ -3962,14 +3882,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.48.0", + "windows-sys 0.45.0", ] [[package]] @@ -4016,7 +3936,8 @@ dependencies = [ "async-trait", "base58 0.2.0", "bimap", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "byte-unit", "circular-queue", "clru", @@ -4026,19 +3947,21 @@ dependencies = [ "ethbridge-bridge-contract", "ethers", "eyre", + "fd-lock", "futures", - "itertools", - "libsecp256k1 0.7.0", + "itertools 0.10.5", + "k256", "loupe", "masp_primitives", "masp_proofs", "namada_core", "namada_ethereum_bridge", "namada_proof_of_stake", + "namada_sdk", "namada_test_utils", "num256", "orion", - "owo-colors 3.5.0", + "owo-colors", "parity-wasm", "parse_duration", "paste", @@ -4062,7 +3985,7 @@ dependencies = [ "tokio", "toml 0.5.9", "tracing 0.1.37", - "tracing-subscriber 0.3.17", + "tracing-subscriber", "wasm-instrument", "wasmer", "wasmer-cache", @@ -4088,7 +4011,8 @@ dependencies = [ "bimap", "bit-set", "blake2b-rs", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "byte-unit", "byteorder", "bytes", @@ -4109,13 +4033,14 @@ dependencies = [ "flate2", "futures", "git2", - "itertools", + "itertools 0.10.5", "lazy_static", "libc", "libloading", "masp_primitives", "masp_proofs", "namada", + "namada_sdk", "namada_test_utils", "num-derive", "num-rational 0.4.1", @@ -4157,7 +4082,7 @@ dependencies = [ "tracing 0.1.37", "tracing-appender", "tracing-log", - "tracing-subscriber 0.3.17", + "tracing-subscriber", "warp", "winapi", "zeroize", @@ -4167,22 +4092,15 @@ dependencies = [ name = "namada_benchmarks" version = "0.23.1" dependencies = [ - "async-trait", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "criterion", "ferveo-common", - "masp_primitives", - "masp_proofs", "namada", "namada_apps", - "namada_test_utils", - "prost", "rand 0.8.5", "rand_core 0.6.4", "sha2 0.9.9", - "tempfile", - "tokio", - "tracing-subscriber 0.3.17", ] [[package]] @@ -4194,7 +4112,8 @@ dependencies = [ "ark-serialize", "assert_matches", "bech32 0.8.1", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "chrono", "data-encoding", "derivative", @@ -4210,8 +4129,8 @@ dependencies = [ "ics23", "impl-num-traits", "index-set", - "itertools", - "libsecp256k1 0.7.0", + "itertools 0.10.5", + "k256", "masp_primitives", "namada_macros", "num-integer", @@ -4238,7 +4157,7 @@ dependencies = [ "toml 0.5.9", "tonic-build", "tracing 0.1.37", - "tracing-subscriber 0.3.17", + "tracing-subscriber", "uint", "zeroize", ] @@ -4247,8 +4166,8 @@ dependencies = [ name = "namada_encoding_spec" version = "0.23.1" dependencies = [ - "borsh 0.9.4", - "itertools", + "borsh 1.0.0-alpha.4", + "itertools 0.10.5", "lazy_static", "madato", "namada", @@ -4259,12 +4178,13 @@ name = "namada_ethereum_bridge" version = "0.23.1" dependencies = [ "assert_matches", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "data-encoding", "ethabi", "ethers", "eyre", - "itertools", + "itertools 0.10.5", "namada_core", "namada_macros", "namada_proof_of_stake", @@ -4291,25 +4211,76 @@ dependencies = [ name = "namada_proof_of_stake" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "assert_matches", + "borsh 1.0.0-alpha.4", "data-encoding", "derivative", - "itertools", + "itertools 0.10.5", "namada_core", "once_cell", + "pretty_assertions", "proptest", "proptest-state-machine", "test-log", "thiserror", "tracing 0.1.37", - "tracing-subscriber 0.3.17", + "tracing-subscriber", + "yansi", +] + +[[package]] +name = "namada_sdk" +version = "0.23.1" +dependencies = [ + "assert_matches", + "async-trait", + "bimap", + "borsh 1.0.0-alpha.4", + "borsh-ext", + "circular-queue", + "data-encoding", + "derivation-path", + "ethbridge-bridge-contract", + "ethers", + "fd-lock", + "futures", + "itertools 0.10.5", + "masp_primitives", + "masp_proofs", + "namada_core", + "namada_ethereum_bridge", + "namada_proof_of_stake", + "namada_test_utils", + "num256", + "orion", + "owo-colors", + "parse_duration", + "paste", + "prost", + "rand 0.8.5", + "rand_core 0.6.4", + "ripemd", + "serde 1.0.163", + "serde_json", + "sha2 0.9.9", + "slip10_ed25519", + "tempfile", + "tendermint-rpc", + "thiserror", + "tiny-bip39", + "tiny-hderive", + "tokio", + "toml 0.5.9", + "tracing 0.1.37", + "wasmtimer", + "zeroize", ] [[package]] name = "namada_test_utils" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "namada_core", "strum", ] @@ -4320,7 +4291,8 @@ version = "0.23.1" dependencies = [ "assert_cmd", "async-trait", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "chrono", "clap", "color-eyre", @@ -4333,11 +4305,12 @@ dependencies = [ "file-serve", "fs_extra", "hyper", - "itertools", + "itertools 0.10.5", "lazy_static", "namada", "namada_apps", "namada_core", + "namada_sdk", "namada_test_utils", "namada_tx_prelude", "namada_vp_prelude", @@ -4358,14 +4331,15 @@ dependencies = [ "tokio", "toml 0.5.9", "tracing 0.1.37", - "tracing-subscriber 0.3.17", + "tracing-subscriber", ] [[package]] name = "namada_tx_prelude" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "masp_primitives", "namada_core", "namada_macros", @@ -4379,7 +4353,7 @@ dependencies = [ name = "namada_vm_env" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "masp_primitives", "namada_core", ] @@ -4388,7 +4362,8 @@ dependencies = [ name = "namada_vp_prelude" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "namada_core", "namada_macros", "namada_proof_of_stake", @@ -4687,9 +4662,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "oorandom" @@ -4790,27 +4765,12 @@ dependencies = [ "zeroize", ] -[[package]] -name = "output_vt100" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628223faebab4e3e40667ee0b2336d34a5b960ff60ea743ddfdbcf7770bcfb66" -dependencies = [ - "winapi", -] - [[package]] name = "overload" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" -[[package]] -name = "owo-colors" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2386b4ebe91c2f7f51082d4cefa145d030e33a1842a96b12e4885cc3c01f7a55" - [[package]] name = "owo-colors" version = "3.5.0" @@ -4819,11 +4779,11 @@ checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" [[package]] name = "pairing" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "135590d8bdba2b31346f9cd1fb2a912329f5135e832a4f422942eb6ead8b6b3b" +checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" dependencies = [ - "group 0.12.1", + "group", ] [[package]] @@ -4903,6 +4863,19 @@ dependencies = [ "subtle 2.4.1", ] +[[package]] +name = "pasta_curves" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e57598f73cc7e1b2ac63c79c517b31a0877cd7c402cdcaa311b5208de7a095" +dependencies = [ + "ff", + "group", + "rand 0.8.5", + "static_assertions", + "subtle 2.4.1", +] + [[package]] name = "paste" version = "1.0.12" @@ -4920,11 +4893,11 @@ dependencies = [ [[package]] name = "pbkdf2" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f05894bce6a1ba4be299d0c5f29563e08af2bc18bb7d48313113bed71e904739" +checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" dependencies = [ - "crypto-mac 0.11.1", + "digest 0.10.6", "password-hash", ] @@ -5116,7 +5089,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" dependencies = [ "difflib", - "itertools", + "itertools 0.10.5", "predicates-core", ] @@ -5138,14 +5111,12 @@ dependencies = [ [[package]] name = "pretty_assertions" -version = "0.7.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cab0e7c02cf376875e9335e0ba1da535775beb5450d21e1dffca068818ed98b" +checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" dependencies = [ - "ansi_term", - "ctor", "diff", - "output_vt100", + "yansi", ] [[package]] @@ -5281,7 +5252,7 @@ checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck", - "itertools", + "itertools 0.10.5", "lazy_static", "log", "multimap", @@ -5302,7 +5273,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", - "itertools", + "itertools 0.10.5", "proc-macro2", "quote", "syn 1.0.109", @@ -5478,21 +5449,36 @@ dependencies = [ ] [[package]] -name = "redjubjub" -version = "0.5.0" +name = "reddsa" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6039ff156887caf92df308cbaccdc058c9d3155a913da046add6e48c4cdbd91d" +checksum = "78a5191930e84973293aa5f532b513404460cd2216c1cfb76d08748c15b40b02" dependencies = [ "blake2b_simd", "byteorder", - "digest 0.9.0", + "group", + "hex", "jubjub", + "pasta_curves", "rand_core 0.6.4", "serde 1.0.163", "thiserror", "zeroize", ] +[[package]] +name = "redjubjub" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a60db2c3bc9c6fd1e8631fee75abc008841d27144be744951d6b9b75f9b569c" +dependencies = [ + "rand_core 0.6.4", + "reddsa", + "serde 1.0.163", + "thiserror", + "zeroize", +] + [[package]] name = "redox_syscall" version = "0.2.16" @@ -5780,16 +5766,16 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.1" +version = "0.37.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4790277f605573dd24b6751701e0823582a63c7cafc095e427e6c66e45dd75e" +checksum = "f79bef90eb6d984c72722595b5b1348ab39275a5e5123faca6863bf07d75a4e0" dependencies = [ "bitflags 1.2.1", "errno", "io-lifetimes", "libc", "linux-raw-sys 0.3.7", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -6033,6 +6019,7 @@ dependencies = [ "der", "generic-array 0.14.7", "pkcs8", + "serdect", "subtle 2.4.1", "zeroize", ] @@ -6211,6 +6198,16 @@ dependencies = [ "yaml-rust", ] +[[package]] +name = "serdect" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" +dependencies = [ + "base16ct", + "serde 1.0.163", +] + [[package]] name = "sha1" version = "0.10.5" @@ -6367,10 +6364,10 @@ dependencies = [ [[package]] name = "sparse-merkle-tree" version = "0.3.1-pre" -source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=e086b235ed6e68929bf73f617dd61cd17b000a56#e086b235ed6e68929bf73f617dd61cd17b000a56" +source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=df7ec062e7c40d5e76b136064e9aaf8bd2490750#df7ec062e7c40d5e76b136064e9aaf8bd2490750" dependencies = [ "blake2b-rs", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "cfg-if 1.0.0", "ics23", "sha2 0.9.9", @@ -6500,6 +6497,18 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn_derive" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae6eef0000c4a12ecdfd7873ea84a8b5aab5e44db72e38e07b028a25386f29a5" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.15", +] + [[package]] name = "sync_wrapper" version = "0.1.2" @@ -6552,7 +6561,7 @@ dependencies = [ "cfg-if 1.0.0", "fastrand", "redox_syscall 0.3.5", - "rustix 0.37.1", + "rustix 0.37.13", "windows-sys 0.45.0", ] @@ -6804,7 +6813,7 @@ checksum = "01b874a4992538d4b2f4fbbac11b9419d685f4b39bdc3fed95b04e07bfd76040" dependencies = [ "base58 0.1.0", "hmac 0.7.1", - "libsecp256k1 0.3.5", + "libsecp256k1", "memzero", "sha2 0.8.2", ] @@ -7160,7 +7169,7 @@ checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e" dependencies = [ "crossbeam-channel 0.5.8", "time", - "tracing-subscriber 0.3.17", + "tracing-subscriber", ] [[package]] @@ -7204,12 +7213,12 @@ dependencies = [ [[package]] name = "tracing-error" -version = "0.1.2" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4d7c0b83d4a500748fa5879461652b361edf5c9d51ede2a2ac03875ca185e24" +checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" dependencies = [ "tracing 0.1.37", - "tracing-subscriber 0.2.25", + "tracing-subscriber", ] [[package]] @@ -7252,17 +7261,6 @@ dependencies = [ "tracing-core 0.1.31", ] -[[package]] -name = "tracing-subscriber" -version = "0.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" -dependencies = [ - "sharded-slab", - "thread_local", - "tracing-core 0.1.31", -] - [[package]] name = "tracing-subscriber" version = "0.3.17" @@ -8290,10 +8288,16 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "yansi" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" + [[package]] name = "zcash_encoding" -version = "0.0.0" -source = "git+https://github.com/zcash/librustzcash?rev=43c18d0#43c18d000fcbe45363b2d53585d5102841eff99e" +version = "0.2.0" +source = "git+https://github.com/zcash/librustzcash?rev=bd7f9d7#bd7f9d7c3ce5cfd14af169ffe0e1c5c903162f46" dependencies = [ "byteorder", "nonempty", diff --git a/Cargo.toml b/Cargo.toml index 2b5bacdb9a..c1ef55bae8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,6 +15,7 @@ members = [ "macros", "vp_prelude", "encoding_spec", + "sdk", ] # wasm packages have to be built separately @@ -40,7 +41,7 @@ ark-bls12-381 = {version = "0.3"} ark-serialize = {version = "0.3"} ark-std = "0.3.0" # branch = "bat/arse-merkle-tree" -arse-merkle-tree = {package = "sparse-merkle-tree", git = "https://github.com/heliaxdev/sparse-merkle-tree", rev = "e086b235ed6e68929bf73f617dd61cd17b000a56", default-features = false, features = ["std", "borsh"]} +arse-merkle-tree = {package = "sparse-merkle-tree", git = "https://github.com/heliaxdev/sparse-merkle-tree", rev = "df7ec062e7c40d5e76b136064e9aaf8bd2490750", default-features = false, features = ["std", "borsh"]} assert_cmd = "1.0.7" assert_matches = "1.5.0" async-trait = {version = "0.1.51"} @@ -52,12 +53,13 @@ bit-set = "0.5.2" blake2b-rs = "0.2.0" byte-unit = "4.0.13" byteorder = "1.4.2" -borsh = "0.9.0" +borsh = {version = "1.0.0-alpha.4", features = ["schema", "derive"]} +borsh-ext = {tag = "v1.0.0-alpha.4", git = "https://github.com/heliaxdev/borsh-ext"} chrono = {version = "0.4.22", default-features = false, features = ["clock", "std"]} circular-queue = "0.2.6" clap = "4.3.4" clru = {git = "https://github.com/marmeladema/clru-rs.git", rev = "71ca566"} -color-eyre = "0.5.10" +color-eyre = "0.6.2" concat-idents = "1.1.2" config = "0.11.0" data-encoding = "2.3.2" @@ -83,15 +85,15 @@ fs_extra = "1.2.0" futures = "0.3" git2 = "0.13.25" ics23 = "0.9.0" -index-set = {git = "https://github.com/heliaxdev/index-set", tag = "v0.7.1", features = ["serialize-borsh", "serialize-serde"]} +index-set = {git = "https://github.com/heliaxdev/index-set", tag = "v0.8.0", features = ["serialize-borsh", "serialize-serde"]} itertools = "0.10.0" +k256 = { version = "0.13.0", default-features = false, features = ["ecdsa", "pkcs8", "precomputed-tables", "serde", "std"]} lazy_static = "1.4.0" libc = "0.2.97" libloading = "0.7.2" -libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9", default-features = false, features = ["std", "static-context"]} # branch = "murisi/namada-integration" -masp_primitives = { git = "https://github.com/anoma/masp", rev = "50acc5028fbcd52a05970fe7991c7850ab04358e" } -masp_proofs = { git = "https://github.com/anoma/masp", rev = "50acc5028fbcd52a05970fe7991c7850ab04358e", default-features = false, features = ["local-prover"] } +masp_primitives = { git = "https://github.com/anoma/masp", rev = "77e009626f3f52fe83c81ec6ee38fc2547d38da3" } +masp_proofs = { git = "https://github.com/anoma/masp", rev = "77e009626f3f52fe83c81ec6ee38fc2547d38da3", default-features = false, features = ["local-prover"] } num256 = "0.3.5" num_cpus = "1.13.0" num-derive = "0.3.3" @@ -100,7 +102,7 @@ num-traits = "0.2.14" once_cell = "1.8.0" orion = "0.16.0" paste = "1.0.9" -pretty_assertions = "0.7.2" +pretty_assertions = "1.4.0" primitive-types = "0.12.1" proptest = "1.2.0" proptest-state-machine = "0.1.0" @@ -145,14 +147,8 @@ tracing-log = "0.1.2" tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} wasmparser = "0.107.0" winapi = "0.3.9" -zeroize = {version = "1.5.5", features = ["zeroize_derive"]} - -[patch.crates-io] -# TODO temp patch for , and more tba. -borsh = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} -borsh-derive = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} -borsh-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} -borsh-schema-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} +yansi = "0.5.1" +zeroize = { version = "1.5.5", features = ["zeroize_derive"] } [profile.release] lto = true diff --git a/Makefile b/Makefile index e487847277..dc67c3c431 100644 --- a/Makefile +++ b/Makefile @@ -6,6 +6,9 @@ NAMADA_E2E_DEBUG ?= true RUST_BACKTRACE ?= 1 NAMADA_MASP_TEST_SEED ?= 0 PROPTEST_CASES ?= 100 +# Disable shrinking in `make test-pos-sm` for CI runs. If the test fail in CI, +# we only want to get the seed. +PROPTEST_MAX_SHRINK_ITERS ?= 0 cargo := $(env) cargo rustup := $(env) rustup @@ -129,7 +132,8 @@ test-coverage: $(cargo) +$(nightly) llvm-cov --output-dir target \ --features namada/testing \ --html \ - -- --skip e2e -Z unstable-options --report-time + -- --skip e2e --skip pos_state_machine_test \ + -Z unstable-options --report-time # NOTE: `TEST_FILTER` is prepended with `e2e::`. Since filters in `cargo test` # work with a substring search, TEST_FILTER only works if it contains a string @@ -211,11 +215,13 @@ test-debug: test-benches: $(cargo) +$(nightly) test --package namada_benchmarks --benches -# Run PoS state machine tests +# Run PoS state machine tests with shrinking disabled by default (can be +# overriden with `PROPTEST_MAX_SHRINK_ITERS`) test-pos-sm: cd proof_of_stake && \ - RUST_BACKTRACE=1 \ + RUST_BACKTRACE=1 \ PROPTEST_CASES=$(PROPTEST_CASES) \ + PROPTEST_MAX_SHRINK_ITERS=$(PROPTEST_MAX_SHRINK_ITERS) \ RUSTFLAGS='-C debuginfo=2 -C debug-assertions=true -C overflow-checks=true' \ cargo test pos_state_machine_test --release diff --git a/apps/Cargo.toml b/apps/Cargo.toml index 1d33f55df7..186b470439 100644 --- a/apps/Cargo.toml +++ b/apps/Cargo.toml @@ -55,9 +55,9 @@ mainnet = [ "namada/mainnet", ] dev = ["namada/dev"] -std = ["ed25519-consensus/std", "rand/std", "rand_core/std", "namada/std"] +std = ["ed25519-consensus/std", "rand/std", "rand_core/std", "namada/std", "namada_sdk/std"] # for integration tests and test utilies -testing = ["dev"] +testing = ["dev", "namada_test_utils"] abciplus = [ "namada/abciplus", @@ -67,6 +67,8 @@ abciplus = [ [dependencies] namada = {path = "../shared", features = ["ferveo-tpke", "masp-tx-gen", "multicore", "http-client"]} +namada_sdk = {path = "../sdk", default-features = false, features = ["wasm-runtime", "masp-tx-gen"]} +namada_test_utils = {path = "../test_utils", optional = true} ark-serialize.workspace = true ark-std.workspace = true arse-merkle-tree = { workspace = true, features = ["blake2b"] } @@ -77,6 +79,7 @@ bech32.workspace = true bimap.workspace = true blake2b-rs.workspace = true borsh.workspace = true +borsh-ext.workspace = true byte-unit.workspace = true byteorder.workspace = true clap.workspace = true diff --git a/apps/src/bin/namada-client/main.rs b/apps/src/bin/namada-client/main.rs index 9b43ca8f91..770dcf5367 100644 --- a/apps/src/bin/namada-client/main.rs +++ b/apps/src/bin/namada-client/main.rs @@ -13,9 +13,10 @@ async fn main() -> Result<()> { let _log_guard = logging::init_from_env_or(LevelFilter::INFO)?; // run the CLI - CliApi::::handle_client_command::( + CliApi::handle_client_command::( None, cli::namada_client_cli()?, + &CliIo, ) .await } diff --git a/apps/src/bin/namada-relayer/main.rs b/apps/src/bin/namada-relayer/main.rs index 05d2620bcb..f9d98a2a4e 100644 --- a/apps/src/bin/namada-relayer/main.rs +++ b/apps/src/bin/namada-relayer/main.rs @@ -14,5 +14,5 @@ async fn main() -> Result<()> { let cmd = cli::namada_relayer_cli()?; // run the CLI - CliApi::::handle_relayer_command::(None, cmd).await + CliApi::handle_relayer_command::(None, cmd, &CliIo).await } diff --git a/apps/src/bin/namada-wallet/main.rs b/apps/src/bin/namada-wallet/main.rs index 5e94831716..30d4a64156 100644 --- a/apps/src/bin/namada-wallet/main.rs +++ b/apps/src/bin/namada-wallet/main.rs @@ -6,5 +6,5 @@ pub fn main() -> Result<()> { color_eyre::install()?; let (cmd, ctx) = cli::namada_wallet_cli()?; // run the CLI - CliApi::::handle_wallet_command(cmd, ctx) + CliApi::handle_wallet_command(cmd, ctx, &CliIo) } diff --git a/benches/lib.rs b/apps/src/lib/bench_utils.rs similarity index 86% rename from benches/lib.rs rename to apps/src/lib/bench_utils.rs index 47645abdf4..db14551a76 100644 --- a/benches/lib.rs +++ b/apps/src/lib/bench_utils.rs @@ -1,18 +1,5 @@ -//! Benchmarks module based on criterion. -//! -//! Measurements are taken on the elapsed wall-time. -//! -//! The benchmarks only focus on sucessfull transactions and vps: in case of -//! failure, the bench function shall panic to avoid timing incomplete execution -//! paths. -//! -//! In addition, this module also contains benchmarks for -//! [`WrapperTx`][`namada::core::types::transaction::wrapper::WrapperTx`] -//! validation and [`host_env`][`namada::vm::host_env`] exposed functions that -//! define the gas constants of [`gas`][`namada::core::ledger::gas`]. -//! -//! For more realistic results these benchmarks should be run on all the -//! combination of supported OS/architecture. +//! Library code for benchmarks provides a wrapper of the ledger's shell +//! `BenchShell` and helper functions to generate transactions. use std::fs::{File, OpenOptions}; use std::io::{Read, Write}; @@ -21,6 +8,7 @@ use std::path::PathBuf; use std::sync::Once; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use masp_primitives::transaction::Transaction; use masp_primitives::zip32::ExtendedFullViewingKey; use masp_proofs::prover::LocalTxProver; @@ -63,6 +51,7 @@ use namada::ibc::core::Msg; use namada::ibc::Height as IbcHeight; use namada::ibc_proto::google::protobuf::Any; use namada::ibc_proto::protobuf::Protobuf; +use namada::ledger::dry_run_tx; use namada::ledger::gas::TxGasMeter; use namada::ledger::ibc::storage::{channel_key, connection_key}; use namada::ledger::queries::{ @@ -71,16 +60,11 @@ use namada::ledger::queries::{ use namada::ledger::storage_api::StorageRead; use namada::proof_of_stake; use namada::proto::{Code, Data, Section, Signature, Tx}; -use namada::sdk::args::InputAmount; -use namada::sdk::masp::{ - self, ShieldedContext, ShieldedTransfer, ShieldedUtils, -}; -use namada::sdk::wallet::Wallet; use namada::tendermint::Hash; use namada::tendermint_rpc::{self}; use namada::types::address::InternalAddress; use namada::types::chain::ChainId; -use namada::types::io::DefaultIo; +use namada::types::io::StdIo; use namada::types::masp::{ ExtendedViewingKey, PaymentAddress, TransferSource, TransferTarget, }; @@ -89,34 +73,43 @@ use namada::types::time::DateTimeUtc; use namada::types::token::DenominatedAmount; use namada::types::transaction::governance::InitProposalData; use namada::types::transaction::pos::Bond; -use namada::types::transaction::GasLimit; use namada::vm::wasm::run; -use namada_apps::cli::args::{Tx as TxArgs, TxTransfer}; -use namada_apps::cli::context::FromContext; -use namada_apps::cli::Context; -use namada_apps::config::TendermintMode; -use namada_apps::facade::tendermint_proto::abci::RequestInitChain; -use namada_apps::facade::tendermint_proto::google::protobuf::Timestamp; -use namada_apps::node::ledger::shell::Shell; -use namada_apps::wallet::{defaults, CliWalletUtils}; -use namada_apps::{config, wasm_loader}; +use namada_sdk::masp::{ + self, ShieldedContext, ShieldedTransfer, ShieldedUtils, +}; +use namada_sdk::wallet::Wallet; +use namada_sdk::NamadaImpl; use namada_test_utils::tx_data::TxWriteData; use rand_core::OsRng; use sha2::{Digest, Sha256}; use tempfile::TempDir; +use crate::cli::context::FromContext; +use crate::cli::Context; +use crate::config::TendermintMode; +use crate::facade::tendermint_proto::abci::RequestInitChain; +use crate::facade::tendermint_proto::google::protobuf::Timestamp; +use crate::node::ledger::shell::Shell; +use crate::wallet::{defaults, CliWalletUtils}; +use crate::{config, wasm_loader}; + pub const WASM_DIR: &str = "../wasm"; pub const TX_BOND_WASM: &str = "tx_bond.wasm"; pub const TX_TRANSFER_WASM: &str = "tx_transfer.wasm"; pub const TX_UPDATE_ACCOUNT_WASM: &str = "tx_update_account.wasm"; pub const TX_VOTE_PROPOSAL_WASM: &str = "tx_vote_proposal.wasm"; pub const TX_UNBOND_WASM: &str = "tx_unbond.wasm"; +pub const TX_REDELEGATE_WASM: &str = "tx_redelegate.wasm"; pub const TX_INIT_PROPOSAL_WASM: &str = "tx_init_proposal.wasm"; pub const TX_REVEAL_PK_WASM: &str = "tx_reveal_pk.wasm"; pub const TX_CHANGE_VALIDATOR_COMMISSION_WASM: &str = "tx_change_validator_commission.wasm"; pub const TX_IBC_WASM: &str = "tx_ibc.wasm"; pub const TX_UNJAIL_VALIDATOR_WASM: &str = "tx_unjail_validator.wasm"; +pub const TX_WITHDRAW_WASM: &str = "tx_withdraw.wasm"; +pub const TX_INIT_ACCOUNT_WASM: &str = "tx_init_account.wasm"; +pub const TX_INIT_VALIDATOR_WASM: &str = "tx_init_validator.wasm"; + pub const VP_VALIDATOR_WASM: &str = "vp_validator.wasm"; pub const ALBERT_PAYMENT_ADDRESS: &str = "albert_payment"; @@ -267,10 +260,8 @@ impl BenchShell { } pub fn advance_epoch(&mut self) { - let pipeline_len = - proof_of_stake::read_pos_params(&self.inner.wl_storage) - .unwrap() - .pipeline_len; + let params = + proof_of_stake::read_pos_params(&self.inner.wl_storage).unwrap(); self.wl_storage.storage.block.epoch = self.wl_storage.storage.block.epoch.next(); @@ -278,8 +269,9 @@ impl BenchShell { proof_of_stake::copy_validator_sets_and_positions( &mut self.wl_storage, + ¶ms, current_epoch, - current_epoch + pipeline_len, + current_epoch + params.pipeline_len, ) .unwrap(); } @@ -436,7 +428,7 @@ pub fn generate_tx( WASM_DIR, wasm_code_path, ))); - tx.set_data(Data::new(data.try_to_vec().unwrap())); + tx.set_data(Data::new(data.serialize_to_vec())); if let Some(transaction) = shielded { tx.add_section(Section::MaspTx(transaction)); @@ -452,7 +444,7 @@ pub fn generate_tx( if let Some(signer) = signer { tx.add_section(Section::Signature(Signature::new( - tx.sechashes(), + vec![tx.raw_header_hash()], [(0, signer.clone())].into_iter().collect(), None, ))); @@ -491,11 +483,10 @@ pub fn generate_foreign_key_tx(signer: &SecretKey) -> Tx { key: Key::from("bench_foreign_key".to_string().to_db_key()), value: vec![0; 64], } - .try_to_vec() - .unwrap(), + .serialize_to_vec(), )); tx.add_section(Section::Signature(Signature::new( - tx.sechashes(), + vec![tx.raw_header_hash()], [(0, signer.clone())].into_iter().collect(), None, ))); @@ -563,7 +554,7 @@ impl Clone for WrapperTempDir { #[derive(BorshSerialize, BorshDeserialize, Debug, Clone, Default)] pub struct BenchShieldedUtils { - #[borsh_skip] + #[borsh(skip)] context_dir: WrapperTempDir, } @@ -585,22 +576,29 @@ impl ShieldedUtils for BenchShieldedUtils { /// Try to load the last saved shielded context from the given context /// directory. If this fails, then leave the current context unchanged. - async fn load(self) -> std::io::Result> { + async fn load( + &self, + ctx: &mut ShieldedContext, + ) -> std::io::Result<()> { // Try to load shielded context from file let mut ctx_file = File::open( self.context_dir.0.path().to_path_buf().join(FILE_NAME), )?; let mut bytes = Vec::new(); ctx_file.read_to_end(&mut bytes)?; - let mut new_ctx = ShieldedContext::deserialize(&mut &bytes[..])?; - // Associate the originating context directory with the - // shielded context under construction - new_ctx.utils = self; - Ok(new_ctx) + // Fill the supplied context with the deserialized object + *ctx = ShieldedContext { + utils: ctx.utils.clone(), + ..ShieldedContext::deserialize(&mut &bytes[..])? + }; + Ok(()) } /// Save this shielded context into its associated context directory - async fn save(&self, ctx: &ShieldedContext) -> std::io::Result<()> { + async fn save( + &self, + ctx: &ShieldedContext, + ) -> std::io::Result<()> { let tmp_path = self.context_dir.0.path().to_path_buf().join(TMP_FILE_NAME); { @@ -662,8 +660,12 @@ impl Client for BenchShell { storage_read_past_height_limit: None, }; - RPC.handle(ctx, &request) - .map_err(|_| std::io::Error::from(std::io::ErrorKind::NotFound)) + if request.path == "/shell/dry_run_tx" { + dry_run_tx(ctx, &request) + } else { + RPC.handle(ctx, &request) + } + .map_err(|_| std::io::Error::from(std::io::ErrorKind::NotFound)) } async fn perform( @@ -681,13 +683,12 @@ impl Default for BenchShieldedCtx { fn default() -> Self { let mut shell = BenchShell::default(); - let mut ctx = - Context::new::(namada_apps::cli::args::Global { - chain_id: None, - base_dir: shell.tempdir.as_ref().canonicalize().unwrap(), - wasm_dir: Some(WASM_DIR.into()), - }) - .unwrap(); + let mut ctx = Context::new::(crate::cli::args::Global { + chain_id: None, + base_dir: shell.tempdir.as_ref().canonicalize().unwrap(), + wasm_dir: Some(WASM_DIR.into()), + }) + .unwrap(); // Generate spending key for Albert and Bertha ctx.wallet.gen_spending_key( @@ -700,7 +701,7 @@ impl Default for BenchShieldedCtx { None, true, ); - namada_apps::wallet::save(&ctx.wallet).unwrap(); + crate::wallet::save(&ctx.wallet).unwrap(); // Generate payment addresses for both Albert and Bertha for (alias, viewing_alias) in [ @@ -720,7 +721,7 @@ impl Default for BenchShieldedCtx { .fvk .vk; let (div, _g_d) = - namada::sdk::masp::find_valid_diversifier(&mut OsRng); + namada_sdk::masp::find_valid_diversifier(&mut OsRng); let payment_addr = viewing_key.to_payment_address(div).unwrap(); let _ = ctx .wallet @@ -732,7 +733,7 @@ impl Default for BenchShieldedCtx { .unwrap(); } - namada_apps::wallet::save(&ctx.wallet).unwrap(); + crate::wallet::save(&ctx.wallet).unwrap(); namada::ledger::storage::update_allowed_conversions( &mut shell.wl_storage, ) @@ -753,44 +754,10 @@ impl BenchShieldedCtx { source: TransferSource, target: TransferTarget, ) -> Tx { - let mock_args = TxArgs { - dry_run: false, - dry_run_wrapper: false, - dump_tx: false, - force: false, - broadcast_only: false, - ledger_address: (), - initialized_account_alias: None, - fee_amount: None, - fee_token: address::nam(), - fee_unshield: None, - gas_limit: GasLimit::from(u64::MAX), - expiration: None, - disposable_signing_key: false, - signing_keys: vec![defaults::albert_keypair()], - signatures: vec![], - wallet_alias_force: true, - chain_id: None, - tx_reveal_code_path: TX_REVEAL_PK_WASM.into(), - verification_key: None, - password: None, - wrapper_fee_payer: None, - output_folder: None, + let denominated_amount = DenominatedAmount { + amount, + denom: 0.into(), }; - - let args = TxTransfer { - tx: mock_args, - source: source.clone(), - target: target.clone(), - token: address::nam(), - amount: InputAmount::Validated(DenominatedAmount { - amount, - denom: 0.into(), - }), - native_token: self.shell.wl_storage.storage.native_token.clone(), - tx_code_path: TX_TRANSFER_WASM.into(), - }; - let async_runtime = tokio::runtime::Runtime::new().unwrap(); let spending_key = self .wallet @@ -803,10 +770,22 @@ impl BenchShieldedCtx { &[], )) .unwrap(); + let namada = NamadaImpl::native_new( + &self.shell, + &mut self.wallet, + &mut self.shielded, + &StdIo, + self.shell.wl_storage.storage.native_token.clone(), + ); let shielded = async_runtime .block_on( - self.shielded - .gen_shielded_transfer::<_, DefaultIo>(&self.shell, args), + ShieldedContext::::gen_shielded_transfer( + &namada, + &source, + &target, + &address::nam(), + denominated_amount, + ), ) .unwrap() .map( diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index 135ff1e3c5..51e3b03a73 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -15,7 +15,7 @@ pub mod wallet; use clap::{ArgGroup, ArgMatches, ColorChoice}; use color_eyre::eyre::Result; -use namada::types::io::DefaultIo; +use namada::types::io::StdIo; use utils::*; pub use utils::{safe_exit, Cmd}; @@ -228,6 +228,7 @@ pub mod cmds { .subcommand(Bond::def().display_order(2)) .subcommand(Unbond::def().display_order(2)) .subcommand(Withdraw::def().display_order(2)) + .subcommand(Redelegate::def().display_order(2)) .subcommand(TxCommissionRateChange::def().display_order(2)) // Ethereum bridge transactions .subcommand(AddToEthBridgePool::def().display_order(3)) @@ -255,6 +256,7 @@ pub mod cmds { .subcommand(QueryValidatorState::def().display_order(5)) // Actions .subcommand(SignTx::def().display_order(6)) + .subcommand(GenIbcShieldedTransafer::def().display_order(6)) // Utils .subcommand(Utils::def().display_order(7)) } @@ -285,6 +287,7 @@ pub mod cmds { let bond = Self::parse_with_ctx(matches, Bond); let unbond = Self::parse_with_ctx(matches, Unbond); let withdraw = Self::parse_with_ctx(matches, Withdraw); + let redelegate = Self::parse_with_ctx(matches, Redelegate); let query_epoch = Self::parse_with_ctx(matches, QueryEpoch); let query_account = Self::parse_with_ctx(matches, QueryAccount); let query_transfers = Self::parse_with_ctx(matches, QueryTransfers); @@ -313,6 +316,8 @@ pub mod cmds { let add_to_eth_bridge_pool = Self::parse_with_ctx(matches, AddToEthBridgePool); let sign_tx = Self::parse_with_ctx(matches, SignTx); + let gen_ibc_shielded = + Self::parse_with_ctx(matches, GenIbcShieldedTransafer); let utils = SubCmd::parse(matches).map(Self::WithoutContext); tx_custom .or(tx_transfer) @@ -328,6 +333,7 @@ pub mod cmds { .or(bond) .or(unbond) .or(withdraw) + .or(redelegate) .or(add_to_eth_bridge_pool) .or(tx_update_steward_commission) .or(tx_resign_steward) @@ -350,6 +356,7 @@ pub mod cmds { .or(query_validator_state) .or(query_account) .or(sign_tx) + .or(gen_ibc_shielded) .or(utils) } } @@ -402,6 +409,7 @@ pub mod cmds { Bond(Bond), Unbond(Unbond), Withdraw(Withdraw), + Redelegate(Redelegate), AddToEthBridgePool(AddToEthBridgePool), TxUpdateStewardCommission(TxUpdateStewardCommission), TxResignSteward(TxResignSteward), @@ -424,6 +432,7 @@ pub mod cmds { QueryPgf(QueryPgf), QueryValidatorState(QueryValidatorState), SignTx(SignTx), + GenIbcShieldedTransafer(GenIbcShieldedTransafer), } #[allow(clippy::large_enum_variant)] @@ -1425,6 +1434,27 @@ pub mod cmds { } } + #[derive(Clone, Debug)] + pub struct Redelegate(pub args::Redelegate); + + impl SubCmd for Redelegate { + const CMD: &'static str = "redelegate"; + + fn parse(matches: &ArgMatches) -> Option { + matches + .subcommand_matches(Self::CMD) + .map(|matches| Redelegate(args::Redelegate::parse(matches))) + } + + fn def() -> App { + App::new(Self::CMD) + .about( + "Redelegate bonded tokens from one validator to another.", + ) + .add_args::>() + } + } + #[derive(Clone, Debug)] pub struct QueryEpoch(pub args::Query); @@ -1874,6 +1904,29 @@ pub mod cmds { } } + #[derive(Clone, Debug)] + pub struct GenIbcShieldedTransafer( + pub args::GenIbcShieldedTransafer, + ); + + impl SubCmd for GenIbcShieldedTransafer { + const CMD: &'static str = "ibc-gen-shielded"; + + fn parse(matches: &ArgMatches) -> Option { + matches.subcommand_matches(Self::CMD).map(|matches| { + GenIbcShieldedTransafer(args::GenIbcShieldedTransafer::parse( + matches, + )) + }) + } + + fn def() -> App { + App::new(Self::CMD) + .about("Generate shielded transfer for IBC.") + .add_args::>() + } + } + #[derive(Clone, Debug)] pub struct EpochSleep(pub args::Query); @@ -2517,7 +2570,6 @@ pub mod args { use std::str::FromStr; use namada::ibc::core::ics24_host::identifier::{ChannelId, PortId}; - pub use namada::sdk::args::*; use namada::types::address::Address; use namada::types::chain::{ChainId, ChainIdPrefix}; use namada::types::dec::Dec; @@ -2530,10 +2582,12 @@ pub mod args { use namada::types::token; use namada::types::token::NATIVE_MAX_DECIMAL_PLACES; use namada::types::transaction::GasLimit; + pub use namada_sdk::args::*; use super::context::*; use super::utils::*; use super::{ArgGroup, ArgMatches}; + use crate::cli::context::FromContext; use crate::config::{self, Action, ActionAtHeight}; use crate::facade::tendermint::Timeout; use crate::facade::tendermint_config::net::Address as TendermintAddress; @@ -2551,6 +2605,7 @@ pub mod args { pub const TX_TRANSFER_WASM: &str = "tx_transfer.wasm"; pub const TX_UNBOND_WASM: &str = "tx_unbond.wasm"; pub const TX_UNJAIL_VALIDATOR_WASM: &str = "tx_unjail_validator.wasm"; + pub const TX_REDELEGATE_WASM: &str = "tx_redelegate.wasm"; pub const TX_UPDATE_VP_WASM: &str = "tx_update_vp.wasm"; pub const TX_UPDATE_STEWARD_COMMISSION: &str = "tx_update_steward_commission.wasm"; @@ -2581,7 +2636,7 @@ pub mod args { arg_default( "pool-gas-amount", DefaultFn(|| token::DenominatedAmount { - amount: token::Amount::default(), + amount: token::Amount::zero(), denom: NATIVE_MAX_DECIMAL_PLACES.into(), }), ); @@ -2614,6 +2669,8 @@ pub mod args { pub const DATA_PATH: Arg = arg("data-path"); pub const DECRYPT: ArgFlag = flag("decrypt"); pub const DISPOSABLE_SIGNING_KEY: ArgFlag = flag("disposable-gas-payer"); + pub const DESTINATION_VALIDATOR: Arg = + arg("destination-validator"); pub const DONT_ARCHIVE: ArgFlag = flag("dont-archive"); pub const DONT_PREFETCH_WASM: ArgFlag = flag("dont-prefetch-wasm"); pub const DRY_RUN_TX: ArgFlag = flag("dry-run"); @@ -2659,7 +2716,7 @@ pub mod args { pub const HD_WALLET_DERIVATION_PATH_OPT: ArgOpt = HD_WALLET_DERIVATION_PATH.opt(); pub const HISTORIC: ArgFlag = flag("historic"); - pub const IBC_TRANSFER_MEMO: ArgOpt = arg_opt("memo"); + pub const IBC_TRANSFER_MEMO_PATH: ArgOpt = arg_opt("memo-path"); pub const LEDGER_ADDRESS_ABOUT: &str = "Address of a ledger node as \"{scheme}://{host}:{port}\". If the \ scheme is not supplied, it is assumed to be TCP."; @@ -2713,11 +2770,13 @@ pub mod args { pub const SAFE_MODE: ArgFlag = flag("safe-mode"); pub const SCHEME: ArgDefault = arg_default("scheme", DefaultFn(|| SchemeType::Ed25519)); + pub const SENDER: Arg = arg("sender"); pub const SIGNING_KEYS: ArgMulti = arg_multi("signing-keys"); pub const SIGNATURES: ArgMulti = arg_multi("signatures"); pub const SOURCE: Arg = arg("source"); pub const SOURCE_OPT: ArgOpt = SOURCE.opt(); pub const STEWARD: Arg = arg("steward"); + pub const SOURCE_VALIDATOR: Arg = arg("source-validator"); pub const STORAGE_KEY: Arg = arg("storage-key"); pub const SUSPEND_ACTION: ArgFlag = flag("suspend"); pub const TIMEOUT_HEIGHT: ArgOpt = arg_opt("timeout-height"); @@ -3525,8 +3584,8 @@ pub mod args { target, token, amount, - native_token: (), tx_code_path, + native_token: (), } } @@ -3574,7 +3633,10 @@ pub mod args { let channel_id = CHANNEL_ID.parse(matches); let timeout_height = TIMEOUT_HEIGHT.parse(matches); let timeout_sec_offset = TIMEOUT_SEC_OFFSET.parse(matches); - let memo = IBC_TRANSFER_MEMO.parse(matches); + let memo = IBC_TRANSFER_MEMO_PATH.parse(matches).map(|path| { + std::fs::read_to_string(path) + .expect("Expected a file at given path") + }); let tx_code_path = PathBuf::from(TX_IBC_WASM); Self { tx, @@ -3611,9 +3673,9 @@ pub mod args { ) .arg(TIMEOUT_SEC_OFFSET.def().help("The timeout as seconds.")) .arg( - IBC_TRANSFER_MEMO + IBC_TRANSFER_MEMO_PATH .def() - .help("Memo field of ICS20 transfer."), + .help("The path for the memo field of ICS20 transfer."), ) } } @@ -3881,8 +3943,8 @@ pub mod args { validator, amount, source, - native_token: (), tx_code_path, + native_token: (), } } @@ -4018,6 +4080,63 @@ pub mod args { } } + impl CliToSdk> for Redelegate { + fn to_sdk(self, ctx: &mut Context) -> Redelegate { + Redelegate:: { + tx: self.tx.to_sdk(ctx), + src_validator: ctx.get(&self.src_validator), + dest_validator: ctx.get(&self.dest_validator), + owner: ctx.get(&self.owner), + amount: self.amount, + tx_code_path: self.tx_code_path.to_path_buf(), + } + } + } + + impl Args for Redelegate { + fn parse(matches: &ArgMatches) -> Self { + let tx = Tx::parse(matches); + let src_validator = SOURCE_VALIDATOR.parse(matches); + let dest_validator = DESTINATION_VALIDATOR.parse(matches); + let owner = OWNER.parse(matches); + let amount = AMOUNT.parse(matches); + let amount = amount + .canonical() + .increase_precision(NATIVE_MAX_DECIMAL_PLACES.into()) + .unwrap_or_else(|e| { + println!("Could not parse bond amount: {:?}", e); + safe_exit(1); + }) + .amount; + let tx_code_path = PathBuf::from(TX_REDELEGATE_WASM); + Self { + tx, + src_validator, + dest_validator, + owner, + amount, + tx_code_path, + } + } + + fn def(app: App) -> App { + app.add_args::>() + .arg( + SOURCE_VALIDATOR + .def() + .help("Source validator address for the redelegation."), + ) + .arg(DESTINATION_VALIDATOR.def().help( + "Destination validator address for the redelegation.", + )) + .arg(OWNER.def().help( + "Delegator (owner) address of the bonds that are being \ + redelegated.", + )) + .arg(AMOUNT.def().help("Amount of tokens to redelegate.")) + } + } + impl CliToSdk> for InitProposal { fn to_sdk(self, ctx: &mut Context) -> InitProposal { InitProposal:: { @@ -4731,6 +4850,66 @@ pub mod args { } } + impl CliToSdk> + for GenIbcShieldedTransafer + { + fn to_sdk( + self, + ctx: &mut Context, + ) -> GenIbcShieldedTransafer { + GenIbcShieldedTransafer:: { + query: self.query.to_sdk(ctx), + output_folder: self.output_folder, + target: ctx.get(&self.target), + token: ctx.get(&self.token), + amount: self.amount, + port_id: self.port_id, + channel_id: self.channel_id, + } + } + } + + impl Args for GenIbcShieldedTransafer { + fn parse(matches: &ArgMatches) -> Self { + let query = Query::parse(matches); + let output_folder = OUTPUT_FOLDER_PATH.parse(matches); + let target = TRANSFER_TARGET.parse(matches); + let token = TOKEN.parse(matches); + let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); + let port_id = PORT_ID.parse(matches); + let channel_id = CHANNEL_ID.parse(matches); + Self { + query, + output_folder, + target, + token, + amount, + port_id, + channel_id, + } + } + + fn def(app: App) -> App { + app.add_args::>() + .arg(OUTPUT_FOLDER_PATH.def().help( + "The output folder path where the artifact will be stored.", + )) + .arg(TRANSFER_TARGET.def().help("The target address.")) + .arg(TOKEN.def().help("The transfer token.")) + .arg(AMOUNT.def().help("The amount to transfer in decimal.")) + .arg( + PORT_ID + .def() + .help("The port ID via which the token is received."), + ) + .arg( + CHANNEL_ID.def().help( + "The channel ID via which the token is received.", + ), + ) + } + } + impl CliToSdk> for QueryCommissionRate { fn to_sdk(self, ctx: &mut Context) -> QueryCommissionRate { QueryCommissionRate:: { @@ -5799,7 +5978,7 @@ pub fn namada_relayer_cli() -> Result { cmds::EthBridgePool::WithContext(sub_cmd), ) => { let global_args = args::Global::parse(&matches); - let context = Context::new::(global_args)?; + let context = Context::new::(global_args)?; Ok(NamadaRelayer::EthBridgePoolWithCtx(Box::new(( sub_cmd, context, )))) diff --git a/apps/src/lib/cli/api.rs b/apps/src/lib/cli/api.rs index bb387c5d9a..0748e77549 100644 --- a/apps/src/lib/cli/api.rs +++ b/apps/src/lib/cli/api.rs @@ -1,10 +1,8 @@ -use std::marker::PhantomData; - -use namada::sdk::queries::Client; -use namada::sdk::rpc::wait_until_node_is_synched; use namada::tendermint_rpc::HttpClient; -use namada::types::control_flow::Halt; use namada::types::io::Io; +use namada_sdk::error::Error; +use namada_sdk::queries::Client; +use namada_sdk::rpc::wait_until_node_is_synched; use tendermint_config::net::Address as TendermintAddress; use crate::client::utils; @@ -13,7 +11,10 @@ use crate::client::utils; #[async_trait::async_trait(?Send)] pub trait CliClient: Client + Sync { fn from_tendermint_address(address: &mut TendermintAddress) -> Self; - async fn wait_until_node_is_synced(&self) -> Halt<()>; + async fn wait_until_node_is_synced( + &self, + io: &impl Io, + ) -> Result<(), Error>; } #[async_trait::async_trait(?Send)] @@ -22,8 +23,11 @@ impl CliClient for HttpClient { HttpClient::new(utils::take_config_address(address)).unwrap() } - async fn wait_until_node_is_synced(&self) -> Halt<()> { - wait_until_node_is_synched::<_, IO>(self).await + async fn wait_until_node_is_synced( + &self, + io: &impl Io, + ) -> Result<(), Error> { + wait_until_node_is_synched(self, io).await } } @@ -32,4 +36,4 @@ pub struct CliIo; #[async_trait::async_trait(?Send)] impl Io for CliIo {} -pub struct CliApi(PhantomData); +pub struct CliApi; diff --git a/apps/src/lib/cli/client.rs b/apps/src/lib/cli/client.rs index 1a7d9f534a..ec60584642 100644 --- a/apps/src/lib/cli/client.rs +++ b/apps/src/lib/cli/client.rs @@ -1,9 +1,6 @@ -use color_eyre::eyre::{eyre, Report, Result}; -use namada::ledger::eth_bridge::bridge_pool; -use namada::sdk::tx::dump_tx; -use namada::sdk::{signing, tx as sdk_tx}; -use namada::types::control_flow::ProceedOrElse; +use color_eyre::eyre::Result; use namada::types::io::Io; +use namada_sdk::{Namada, NamadaImpl}; use crate::cli; use crate::cli::api::{CliApi, CliClient}; @@ -11,14 +8,11 @@ use crate::cli::args::CliToSdk; use crate::cli::cmds::*; use crate::client::{rpc, tx, utils}; -fn error() -> Report { - eyre!("Fatal error") -} - -impl CliApi { - pub async fn handle_client_command( +impl CliApi { + pub async fn handle_client_command( client: Option, cmd: cli::NamadaClient, + io: &IO, ) -> Result<()> where C: CliClient, @@ -35,20 +29,20 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); + let namada = ctx.to_sdk(&client, io); let dry_run = args.tx.dry_run || args.tx.dry_run_wrapper; - tx::submit_custom::<_, IO>(&client, &mut ctx, args) - .await?; + tx::submit_custom(&namada, args).await?; if !dry_run { - crate::wallet::save(&ctx.wallet) + namada + .wallet() + .await + .save() .unwrap_or_else(|err| eprintln!("{}", err)); } else { - IO::println( + io.println( "Transaction dry run. No addresses have been \ saved.", ) @@ -60,13 +54,10 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - tx::submit_transfer::<_, IO>(&client, ctx, args) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_transfer(&namada, args).await?; } Sub::TxIbcTransfer(TxIbcTransfer(mut args)) => { let client = client.unwrap_or_else(|| { @@ -74,13 +65,10 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - tx::submit_ibc_transfer::<_, IO>(&client, ctx, args) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_ibc_transfer(&namada, args).await?; } Sub::TxUpdateAccount(TxUpdateAccount(mut args)) => { let client = client.unwrap_or_else(|| { @@ -88,15 +76,10 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - tx::submit_update_account::<_, IO>( - &client, &mut ctx, args, - ) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_update_account(&namada, args).await?; } Sub::TxInitAccount(TxInitAccount(mut args)) => { let client = client.unwrap_or_else(|| { @@ -104,22 +87,20 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); + let namada = ctx.to_sdk(&client, io); let dry_run = args.tx.dry_run || args.tx.dry_run_wrapper; - tx::submit_init_account::<_, IO>( - &client, &mut ctx, args, - ) - .await?; + tx::submit_init_account(&namada, args).await?; if !dry_run { - crate::wallet::save(&ctx.wallet) + namada + .wallet() + .await + .save() .unwrap_or_else(|err| eprintln!("{}", err)); } else { - IO::println( + io.println( "Transaction dry run. No addresses have been \ saved.", ) @@ -131,13 +112,21 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - tx::submit_init_validator::<_, IO>(&client, ctx, args) - .await?; + let namada = NamadaImpl::native_new( + &client, + &mut ctx.wallet, + &mut ctx.shielded, + io, + ctx.native_token, + ); + tx::submit_init_validator( + &namada, + &mut ctx.config, + args, + ) + .await?; } Sub::TxInitProposal(TxInitProposal(mut args)) => { let client = client.unwrap_or_else(|| { @@ -145,13 +134,10 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - tx::submit_init_proposal::<_, IO>(&client, ctx, args) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_init_proposal(&namada, args).await?; } Sub::TxVoteProposal(TxVoteProposal(mut args)) => { let client = client.unwrap_or_else(|| { @@ -159,13 +145,10 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - tx::submit_vote_proposal::<_, IO>(&client, ctx, args) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_vote_proposal(&namada, args).await?; } Sub::TxRevealPk(TxRevealPk(mut args)) => { let client = client.unwrap_or_else(|| { @@ -173,13 +156,10 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - tx::submit_reveal_pk::<_, IO>(&client, &mut ctx, args) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_reveal_pk(&namada, args).await?; } Sub::Bond(Bond(mut args)) => { let client = client.unwrap_or_else(|| { @@ -187,13 +167,10 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - tx::submit_bond::<_, IO>(&client, &mut ctx, args) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_bond(&namada, args).await?; } Sub::Unbond(Unbond(mut args)) => { let client = client.unwrap_or_else(|| { @@ -201,13 +178,10 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - tx::submit_unbond::<_, IO>(&client, &mut ctx, args) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_unbond(&namada, args).await?; } Sub::Withdraw(Withdraw(mut args)) => { let client = client.unwrap_or_else(|| { @@ -215,13 +189,21 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - tx::submit_withdraw::<_, IO>(&client, ctx, args) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_withdraw(&namada, args).await?; + } + Sub::Redelegate(Redelegate(mut args)) => { + let client = client.unwrap_or_else(|| { + C::from_tendermint_address( + &mut args.tx.ledger_address, + ) + }); + client.wait_until_node_is_synced(io).await?; + let args = args.to_sdk(&mut ctx); + let namada = ctx.to_sdk(&client, io); + tx::submit_redelegate(&namada, args).await?; } Sub::TxCommissionRateChange(TxCommissionRateChange( mut args, @@ -231,15 +213,11 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - tx::submit_validator_commission_change::<_, IO>( - &client, ctx, args, - ) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_validator_commission_change(&namada, args) + .await?; } // Eth bridge Sub::AddToEthBridgePool(args) => { @@ -249,66 +227,10 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - let tx_args = args.tx.clone(); - - let default_signer = Some(args.sender.clone()); - let signing_data = tx::aux_signing_data::<_, IO>( - &client, - &mut ctx.wallet, - &args.tx, - Some(args.sender.clone()), - default_signer, - ) - .await?; - - let (mut tx, _epoch) = - bridge_pool::build_bridge_pool_tx::<_, _, _, IO>( - &client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - signing_data.fee_payer.clone(), - ) - .await?; - - signing::generate_test_vector::<_, _, IO>( - &client, - &mut ctx.wallet, - &tx, - ) - .await?; - - if args.tx.dump_tx { - dump_tx::(&args.tx, tx); - } else { - tx::submit_reveal_aux::<_, IO>( - &client, - &mut ctx, - tx_args.clone(), - &args.sender, - ) - .await?; - - signing::sign_tx( - &mut ctx.wallet, - &tx_args, - &mut tx, - signing_data, - )?; - - sdk_tx::process_tx::<_, _, IO>( - &client, - &mut ctx.wallet, - &tx_args, - tx, - ) - .await?; - } + let namada = ctx.to_sdk(&client, io); + tx::submit_bridge_pool_tx(&namada, args).await?; } Sub::TxUnjailValidator(TxUnjailValidator(mut args)) => { let client = client.unwrap_or_else(|| { @@ -316,15 +238,10 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - tx::submit_unjail_validator::<_, IO>( - &client, ctx, args, - ) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_unjail_validator(&namada, args).await?; } Sub::TxUpdateStewardCommission( TxUpdateStewardCommission(mut args), @@ -334,15 +251,11 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - tx::submit_update_steward_commission::<_, IO>( - &client, ctx, args, - ) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_update_steward_commission(&namada, args) + .await?; } Sub::TxResignSteward(TxResignSteward(mut args)) => { let client = client.unwrap_or_else(|| { @@ -350,24 +263,19 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - tx::submit_resign_steward::<_, IO>(&client, ctx, args) - .await?; + let namada = ctx.to_sdk(&client, io); + tx::submit_resign_steward(&namada, args).await?; } // Ledger queries Sub::QueryEpoch(QueryEpoch(mut args)) => { let client = client.unwrap_or_else(|| { C::from_tendermint_address(&mut args.ledger_address) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; - rpc::query_and_print_epoch::<_, IO>(&client).await; + client.wait_until_node_is_synced(io).await?; + let namada = ctx.to_sdk(&client, io); + rpc::query_and_print_epoch(&namada).await; } Sub::QueryValidatorState(QueryValidatorState(mut args)) => { let client = client.unwrap_or_else(|| { @@ -375,17 +283,11 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - rpc::query_and_print_validator_state::<_, IO>( - &client, - &mut ctx.wallet, - args, - ) - .await; + let namada = ctx.to_sdk(&client, io); + rpc::query_and_print_validator_state(&namada, args) + .await; } Sub::QueryTransfers(QueryTransfers(mut args)) => { let client = client.unwrap_or_else(|| { @@ -393,18 +295,10 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - rpc::query_transfers::<_, _, IO>( - &client, - &mut ctx.wallet, - &mut ctx.shielded, - args, - ) - .await; + let namada = ctx.to_sdk(&client, io); + rpc::query_transfers(&namada, args).await; } Sub::QueryConversions(QueryConversions(mut args)) => { let client = client.unwrap_or_else(|| { @@ -412,27 +306,18 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - rpc::query_conversions::<_, IO>( - &client, - &mut ctx.wallet, - args, - ) - .await; + let namada = ctx.to_sdk(&client, io); + rpc::query_conversions(&namada, args).await; } Sub::QueryBlock(QueryBlock(mut args)) => { let client = client.unwrap_or_else(|| { C::from_tendermint_address(&mut args.ledger_address) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; - rpc::query_block::<_, IO>(&client).await; + client.wait_until_node_is_synced(io).await?; + let namada = ctx.to_sdk(&client, io); + rpc::query_block(&namada).await; } Sub::QueryBalance(QueryBalance(mut args)) => { let client = client.unwrap_or_else(|| { @@ -440,18 +325,10 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - rpc::query_balance::<_, _, IO>( - &client, - &mut ctx.wallet, - &mut ctx.shielded, - args, - ) - .await; + let namada = ctx.to_sdk(&client, io); + rpc::query_balance(&namada, args).await; } Sub::QueryBonds(QueryBonds(mut args)) => { let client = client.unwrap_or_else(|| { @@ -459,18 +336,12 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - rpc::query_bonds::<_, IO>( - &client, - &mut ctx.wallet, - args, - ) - .await - .expect("expected successful query of bonds"); + let namada = ctx.to_sdk(&client, io); + rpc::query_bonds(&namada, args) + .await + .expect("expected successful query of bonds"); } Sub::QueryBondedStake(QueryBondedStake(mut args)) => { let client = client.unwrap_or_else(|| { @@ -478,12 +349,10 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - rpc::query_bonded_stake::<_, IO>(&client, args).await; + let namada = ctx.to_sdk(&client, io); + rpc::query_bonded_stake(&namada, args).await; } Sub::QueryCommissionRate(QueryCommissionRate(mut args)) => { let client = client.unwrap_or_else(|| { @@ -491,17 +360,11 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - rpc::query_and_print_commission_rate::<_, IO>( - &client, - &mut ctx.wallet, - args, - ) - .await; + let namada = ctx.to_sdk(&client, io); + rpc::query_and_print_commission_rate(&namada, args) + .await; } Sub::QuerySlashes(QuerySlashes(mut args)) => { let client = client.unwrap_or_else(|| { @@ -509,17 +372,10 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - rpc::query_slashes::<_, IO>( - &client, - &mut ctx.wallet, - args, - ) - .await; + let namada = ctx.to_sdk(&client, io); + rpc::query_slashes(&namada, args).await; } Sub::QueryDelegations(QueryDelegations(mut args)) => { let client = client.unwrap_or_else(|| { @@ -527,17 +383,10 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - rpc::query_delegations::<_, IO>( - &client, - &mut ctx.wallet, - args, - ) - .await; + let namada = ctx.to_sdk(&client, io); + rpc::query_delegations(&namada, args).await; } Sub::QueryFindValidator(QueryFindValidator(mut args)) => { let client = client.unwrap_or_else(|| { @@ -545,12 +394,10 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - rpc::query_find_validator::<_, IO>(&client, args).await; + let namada = ctx.to_sdk(&client, io); + rpc::query_find_validator(&namada, args).await; } Sub::QueryResult(QueryResult(mut args)) => { let client = client.unwrap_or_else(|| { @@ -558,12 +405,10 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - rpc::query_result::<_, IO>(&client, args).await; + let namada = ctx.to_sdk(&client, io); + rpc::query_result(&namada, args).await; } Sub::QueryRawBytes(QueryRawBytes(mut args)) => { let client = client.unwrap_or_else(|| { @@ -571,12 +416,10 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - rpc::query_raw_bytes::<_, IO>(&client, args).await; + let namada = ctx.to_sdk(&client, io); + rpc::query_raw_bytes(&namada, args).await; } Sub::QueryProposal(QueryProposal(mut args)) => { let client = client.unwrap_or_else(|| { @@ -584,12 +427,10 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - rpc::query_proposal::<_, IO>(&client, args).await; + let namada = ctx.to_sdk(&client, io); + rpc::query_proposal(&namada, args).await; } Sub::QueryProposalResult(QueryProposalResult(mut args)) => { let client = client.unwrap_or_else(|| { @@ -597,13 +438,10 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - rpc::query_proposal_result::<_, IO>(&client, args) - .await; + let namada = ctx.to_sdk(&client, io); + rpc::query_proposal_result(&namada, args).await; } Sub::QueryProtocolParameters(QueryProtocolParameters( mut args, @@ -613,13 +451,10 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - rpc::query_protocol_parameters::<_, IO>(&client, args) - .await; + let namada = ctx.to_sdk(&client, io); + rpc::query_protocol_parameters(&namada, args).await; } Sub::QueryPgf(QueryPgf(mut args)) => { let client = client.unwrap_or_else(|| { @@ -627,12 +462,10 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - rpc::query_pgf::<_, IO>(&client, args).await; + let namada = ctx.to_sdk(&client, io); + rpc::query_pgf(&namada, args).await; } Sub::QueryAccount(QueryAccount(mut args)) => { let client = client.unwrap_or_else(|| { @@ -640,12 +473,10 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - rpc::query_account::<_, IO>(&client, args).await; + let namada = ctx.to_sdk(&client, io); + rpc::query_account(&namada, args).await; } Sub::SignTx(SignTx(mut args)) => { let client = client.unwrap_or_else(|| { @@ -653,12 +484,23 @@ impl CliApi { &mut args.tx.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; + let args = args.to_sdk(&mut ctx); + let namada = ctx.to_sdk(&client, io); + tx::sign_tx(&namada, args).await?; + } + Sub::GenIbcShieldedTransafer(GenIbcShieldedTransafer( + mut args, + )) => { + let client = client.unwrap_or_else(|| { + C::from_tendermint_address( + &mut args.query.ledger_address, + ) + }); + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - tx::sign_tx::<_, IO>(&client, &mut ctx, args).await?; + let namada = ctx.to_sdk(&client, io); + tx::gen_ibc_shielded_transfer(&namada, args).await?; } } } @@ -691,12 +533,10 @@ impl CliApi { let mut ledger_address = args.ledger_address.clone(); let client = C::from_tendermint_address(&mut ledger_address); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - rpc::epoch_sleep::<_, IO>(&client, args).await; + let namada = ctx.to_sdk(&client, io); + rpc::epoch_sleep(&namada, args).await; } }, } diff --git a/apps/src/lib/cli/context.rs b/apps/src/lib/cli/context.rs index 4aac8b1026..64401984ce 100644 --- a/apps/src/lib/cli/context.rs +++ b/apps/src/lib/cli/context.rs @@ -6,17 +6,20 @@ use std::path::{Path, PathBuf}; use std::str::FromStr; use color_eyre::eyre::Result; -use namada::sdk::masp::ShieldedContext; -use namada::sdk::wallet::Wallet; +use namada::ledger::ibc::storage::ibc_token; use namada::types::address::{Address, InternalAddress}; use namada::types::chain::ChainId; use namada::types::ethereum_events::EthAddress; +use namada::types::ibc::is_ibc_denom; use namada::types::io::Io; use namada::types::key::*; use namada::types::masp::*; +use namada_sdk::masp::fs::FsShieldedUtils; +use namada_sdk::masp::ShieldedContext; +use namada_sdk::wallet::Wallet; +use namada_sdk::{Namada, NamadaImpl}; use super::args; -use crate::client::tx::CLIShieldedUtils; #[cfg(any(test, feature = "dev"))] use crate::config::genesis; use crate::config::genesis::genesis_config; @@ -78,7 +81,7 @@ pub struct Context { /// The ledger configuration for a specific chain ID pub config: Config, /// The context fr shielded operations - pub shielded: ShieldedContext, + pub shielded: ShieldedContext, /// Native token's address pub native_token: Address, } @@ -145,11 +148,30 @@ impl Context { wallet, global_config, config, - shielded: CLIShieldedUtils::new::(chain_dir), + shielded: FsShieldedUtils::new(chain_dir), native_token, }) } + /// Make an implementation of Namada from this object and parameters. + pub fn to_sdk<'a, C, IO>( + &'a mut self, + client: &'a C, + io: &'a IO, + ) -> impl Namada + where + C: namada::ledger::queries::Client + Sync, + IO: Io, + { + NamadaImpl::native_new( + client, + &mut self.wallet, + &mut self.shielded, + io, + self.native_token.clone(), + ) + } + /// Parse and/or look-up the value from the context. pub fn get(&self, from_context: &FromContext) -> T where @@ -367,6 +389,20 @@ impl ArgFromContext for Address { }) .unwrap_or(Err(Skip)) }) + // An IBC token + .or_else(|_| { + is_ibc_denom(raw) + .map(|(trace_path, base_denom)| { + let base_token = ctx + .wallet + .find_address(&base_denom) + .map(|addr| addr.to_string()) + .unwrap_or(base_denom); + let ibc_denom = format!("{trace_path}/{base_token}"); + ibc_token(ibc_denom) + }) + .ok_or(Skip) + }) // Or it can be an alias that may be found in the wallet .or_else(|_| ctx.wallet.find_address(raw).cloned().ok_or(Skip)) .map_err(|_| format!("Unknown address {raw}")) diff --git a/apps/src/lib/cli/relayer.rs b/apps/src/lib/cli/relayer.rs index 3322e84e2f..9d241f5cd0 100644 --- a/apps/src/lib/cli/relayer.rs +++ b/apps/src/lib/cli/relayer.rs @@ -1,24 +1,20 @@ use std::sync::Arc; -use color_eyre::eyre::{eyre, Report, Result}; +use color_eyre::eyre::Result; use namada::eth_bridge::ethers::providers::{Http, Provider}; -use namada::ledger::eth_bridge::{bridge_pool, validator_set}; -use namada::types::control_flow::ProceedOrElse; use namada::types::io::Io; +use namada_sdk::eth_bridge::{bridge_pool, validator_set}; use crate::cli; use crate::cli::api::{CliApi, CliClient}; use crate::cli::args::{CliToSdk, CliToSdkCtxless}; use crate::cli::cmds::*; -fn error() -> Report { - eyre!("Fatal error") -} - -impl CliApi { +impl CliApi { pub async fn handle_relayer_command( client: Option, cmd: cli::NamadaRelayer, + io: &impl Io, ) -> Result<()> where C: CliClient, @@ -35,14 +31,10 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk(&mut ctx); - bridge_pool::recommend_batch::<_, IO>(&client, args) - .await - .proceed_or_else(error)?; + let namada = ctx.to_sdk(&client, io); + bridge_pool::recommend_batch(&namada, args).await?; } } } @@ -55,14 +47,9 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk_ctxless(); - bridge_pool::construct_proof::<_, IO>(&client, args) - .await - .proceed_or_else(error)?; + bridge_pool::construct_proof(&client, io, args).await?; } EthBridgePoolWithoutCtx::RelayProof(RelayProof(mut args)) => { let client = client.unwrap_or_else(|| { @@ -70,20 +57,16 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let eth_client = Arc::new( Provider::::try_from(&args.eth_rpc_endpoint) .unwrap(), ); let args = args.to_sdk_ctxless(); - bridge_pool::relay_bridge_pool_proof::<_, _, IO>( - eth_client, &client, args, + bridge_pool::relay_bridge_pool_proof( + eth_client, &client, io, args, ) - .await - .proceed_or_else(error)?; + .await?; } EthBridgePoolWithoutCtx::QueryPool(QueryEthBridgePool( mut query, @@ -91,11 +74,8 @@ impl CliApi { let client = client.unwrap_or_else(|| { C::from_tendermint_address(&mut query.ledger_address) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; - bridge_pool::query_bridge_pool::<_, IO>(&client).await; + client.wait_until_node_is_synced(io).await?; + bridge_pool::query_bridge_pool(&client, io).await?; } EthBridgePoolWithoutCtx::QuerySigned( QuerySignedBridgePool(mut query), @@ -103,13 +83,8 @@ impl CliApi { let client = client.unwrap_or_else(|| { C::from_tendermint_address(&mut query.ledger_address) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; - bridge_pool::query_signed_bridge_pool::<_, IO>(&client) - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; + bridge_pool::query_signed_bridge_pool(&client, io).await?; } EthBridgePoolWithoutCtx::QueryRelays(QueryRelayProgress( mut query, @@ -117,11 +92,8 @@ impl CliApi { let client = client.unwrap_or_else(|| { C::from_tendermint_address(&mut query.ledger_address) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; - bridge_pool::query_relay_progress::<_, IO>(&client).await; + client.wait_until_node_is_synced(io).await?; + bridge_pool::query_relay_progress(&client, io).await?; } }, cli::NamadaRelayer::ValidatorSet(sub) => match sub { @@ -133,15 +105,12 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk_ctxless(); - validator_set::query_bridge_validator_set::<_, IO>( - &client, args, + validator_set::query_bridge_validator_set( + &client, io, args, ) - .await; + .await?; } ValidatorSet::GovernanceValidatorSet( GovernanceValidatorSet(mut args), @@ -151,15 +120,12 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk_ctxless(); - validator_set::query_governnace_validator_set::<_, IO>( - &client, args, + validator_set::query_governnace_validator_set( + &client, io, args, ) - .await; + .await?; } ValidatorSet::ValidatorSetProof(ValidatorSetProof( mut args, @@ -169,15 +135,12 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let args = args.to_sdk_ctxless(); - validator_set::query_validator_set_update_proof::<_, IO>( - &client, args, + validator_set::query_validator_set_update_proof( + &client, io, args, ) - .await; + .await?; } ValidatorSet::ValidatorSetUpdateRelay( ValidatorSetUpdateRelay(mut args), @@ -187,20 +150,16 @@ impl CliApi { &mut args.query.ledger_address, ) }); - client - .wait_until_node_is_synced::() - .await - .proceed_or_else(error)?; + client.wait_until_node_is_synced(io).await?; let eth_client = Arc::new( Provider::::try_from(&args.eth_rpc_endpoint) .unwrap(), ); let args = args.to_sdk_ctxless(); - validator_set::relay_validator_set_update::<_, _, IO>( - eth_client, &client, args, + validator_set::relay_validator_set_update( + eth_client, &client, io, args, ) - .await - .proceed_or_else(error)?; + .await?; } }, } diff --git a/apps/src/lib/cli/utils.rs b/apps/src/lib/cli/utils.rs index 26cc38ff7f..7c8bc4100c 100644 --- a/apps/src/lib/cli/utils.rs +++ b/apps/src/lib/cli/utils.rs @@ -8,8 +8,9 @@ use clap::{ArgAction, ArgMatches}; use color_eyre::eyre::Result; use super::args; -use super::context::{Context, FromContext}; +use super::context::Context; use crate::cli::api::CliIo; +use crate::cli::context::FromContext; // We only use static strings pub type App = clap::Command; diff --git a/apps/src/lib/cli/wallet.rs b/apps/src/lib/cli/wallet.rs index d039c7ef10..14b9ab9d9a 100644 --- a/apps/src/lib/cli/wallet.rs +++ b/apps/src/lib/cli/wallet.rs @@ -3,16 +3,20 @@ use std::fs::File; use std::io::{self, Write}; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use color_eyre::eyre::Result; use itertools::sorted; use masp_primitives::zip32::ExtendedFullViewingKey; -use namada::sdk::masp::find_valid_diversifier; -use namada::sdk::wallet::{DecryptionError, FindKeyError}; use namada::types::io::Io; use namada::types::key::*; use namada::types::masp::{MaspValue, PaymentAddress}; -use namada::{display, display_line, edisplay_line}; +use namada_sdk::masp::find_valid_diversifier; +use namada_sdk::wallet::{ + DecryptionError, FindKeyError, GenRestoreKeyError, Wallet, WalletIo, + WalletStorage, +}; +use namada_sdk::{display, display_line, edisplay_line}; +use rand::RngCore; use rand_core::OsRng; use crate::cli; @@ -21,65 +25,66 @@ use crate::cli::args::CliToSdk; use crate::cli::{args, cmds, Context}; use crate::wallet::{read_and_confirm_encryption_password, CliWalletUtils}; -impl CliApi { +impl CliApi { pub fn handle_wallet_command( cmd: cmds::NamadaWallet, mut ctx: Context, + io: &impl Io, ) -> Result<()> { match cmd { cmds::NamadaWallet::Key(sub) => match sub { cmds::WalletKey::Restore(cmds::KeyRestore(args)) => { - key_and_address_restore::(ctx, args) + key_and_address_restore(&mut ctx.wallet, io, args) } cmds::WalletKey::Gen(cmds::KeyGen(args)) => { - key_and_address_gen::(ctx, args) + key_and_address_gen(&mut ctx.wallet, io, &mut OsRng, args) } cmds::WalletKey::Find(cmds::KeyFind(args)) => { - key_find::(ctx, args) + key_find(&mut ctx.wallet, io, args) } cmds::WalletKey::List(cmds::KeyList(args)) => { - key_list::(ctx, args) + key_list(&mut ctx.wallet, io, args) } cmds::WalletKey::Export(cmds::Export(args)) => { - key_export::(ctx, args) + key_export(&mut ctx.wallet, io, args) } }, cmds::NamadaWallet::Address(sub) => match sub { cmds::WalletAddress::Gen(cmds::AddressGen(args)) => { - key_and_address_gen::(ctx, args) + key_and_address_gen(&mut ctx.wallet, io, &mut OsRng, args) } cmds::WalletAddress::Restore(cmds::AddressRestore(args)) => { - key_and_address_restore::(ctx, args) + key_and_address_restore(&mut ctx.wallet, io, args) } cmds::WalletAddress::Find(cmds::AddressOrAliasFind(args)) => { - address_or_alias_find::(ctx, args) + address_or_alias_find(&mut ctx.wallet, io, args) } cmds::WalletAddress::List(cmds::AddressList) => { - address_list::(ctx) + address_list(&mut ctx.wallet, io) } cmds::WalletAddress::Add(cmds::AddressAdd(args)) => { - address_add::(ctx, args) + address_add(&mut ctx.wallet, io, args) } }, cmds::NamadaWallet::Masp(sub) => match sub { cmds::WalletMasp::GenSpendKey(cmds::MaspGenSpendKey(args)) => { - spending_key_gen::(ctx, args) + spending_key_gen(&mut ctx.wallet, io, args) } cmds::WalletMasp::GenPayAddr(cmds::MaspGenPayAddr(args)) => { let args = args.to_sdk(&mut ctx); - payment_address_gen::(ctx, args) + payment_address_gen(&mut ctx.wallet, io, args) } cmds::WalletMasp::AddAddrKey(cmds::MaspAddAddrKey(args)) => { - address_key_add::(ctx, args) + address_key_add(&mut ctx.wallet, io, args) } cmds::WalletMasp::ListPayAddrs(cmds::MaspListPayAddrs) => { - payment_addresses_list::(ctx) + payment_addresses_list(&mut ctx.wallet, io) } cmds::WalletMasp::ListKeys(cmds::MaspListKeys(args)) => { - spending_keys_list::(ctx, args) + spending_keys_list(&mut ctx.wallet, io, args) } cmds::WalletMasp::FindAddrKey(cmds::MaspFindAddrKey(args)) => { - address_key_find::(ctx, args) + address_key_find(&mut ctx.wallet, io, args) } }, } @@ -88,35 +93,35 @@ impl CliApi { } /// Find shielded address or key -fn address_key_find( - ctx: Context, +fn address_key_find( + wallet: &mut Wallet, + io: &impl Io, args::AddrKeyFind { alias, unsafe_show_secret, }: args::AddrKeyFind, ) { - let mut wallet = ctx.wallet; let alias = alias.to_lowercase(); if let Ok(viewing_key) = wallet.find_viewing_key(&alias) { // Check if alias is a viewing key - display_line!(IO, "Viewing key: {}", viewing_key); + display_line!(io, "Viewing key: {}", viewing_key); if unsafe_show_secret { // Check if alias is also a spending key match wallet.find_spending_key(&alias, None) { Ok(spending_key) => { - display_line!(IO, "Spending key: {}", spending_key) + display_line!(io, "Spending key: {}", spending_key) } Err(FindKeyError::KeyNotFound) => {} - Err(err) => edisplay_line!(IO, "{}", err), + Err(err) => edisplay_line!(io, "{}", err), } } } else if let Some(payment_addr) = wallet.find_payment_addr(&alias) { // Failing that, check if alias is a payment address - display_line!(IO, "Payment address: {}", payment_addr); + display_line!(io, "Payment address: {}", payment_addr); } else { // Otherwise alias cannot be referring to any shielded value display_line!( - IO, + io, "No shielded address or key with alias {} found. Use the commands \ `masp list-addrs` and `masp list-keys` to see all the known \ addresses and keys.", @@ -126,44 +131,44 @@ fn address_key_find( } /// List spending keys. -fn spending_keys_list( - ctx: Context, +fn spending_keys_list( + wallet: &mut Wallet, + io: &impl Io, args::MaspKeysList { decrypt, unsafe_show_secret, }: args::MaspKeysList, ) { - let wallet = ctx.wallet; let known_view_keys = wallet.get_viewing_keys(); let known_spend_keys = wallet.get_spending_keys(); if known_view_keys.is_empty() { display_line!( - IO, + io, "No known keys. Try `masp add --alias my-addr --value ...` to add \ a new key to the wallet.", ); } else { let stdout = io::stdout(); let mut w = stdout.lock(); - display_line!(IO, &mut w; "Known keys:").unwrap(); + display_line!(io, &mut w; "Known keys:").unwrap(); for (alias, key) in known_view_keys { - display!(IO, &mut w; " Alias \"{}\"", alias).unwrap(); + display!(io, &mut w; " Alias \"{}\"", alias).unwrap(); let spending_key_opt = known_spend_keys.get(&alias); // If this alias is associated with a spending key, indicate whether // or not the spending key is encrypted // TODO: consider turning if let into match if let Some(spending_key) = spending_key_opt { if spending_key.is_encrypted() { - display_line!(IO, &mut w; " (encrypted):") + display_line!(io, &mut w; " (encrypted):") } else { - display_line!(IO, &mut w; " (not encrypted):") + display_line!(io, &mut w; " (not encrypted):") } .unwrap(); } else { - display_line!(IO, &mut w; ":").unwrap(); + display_line!(io, &mut w; ":").unwrap(); } // Always print the corresponding viewing key - display_line!(IO, &mut w; " Viewing Key: {}", key).unwrap(); + display_line!(io, &mut w; " Viewing Key: {}", key).unwrap(); // A subset of viewing keys will have corresponding spending keys. // Print those too if they are available and requested. if unsafe_show_secret { @@ -172,7 +177,7 @@ fn spending_keys_list( // Here the spending key is unencrypted or successfully // decrypted Ok(spending_key) => { - display_line!(IO, + display_line!(io, &mut w; " Spending key: {}", spending_key, ) @@ -186,7 +191,7 @@ fn spending_keys_list( // Here the key is encrypted but incorrect password has // been provided Err(err) => { - display_line!(IO, + display_line!(io, &mut w; " Couldn't decrypt the spending key: {}", err, @@ -201,49 +206,52 @@ fn spending_keys_list( } /// List payment addresses. -fn payment_addresses_list(ctx: Context) { - let wallet = ctx.wallet; +fn payment_addresses_list( + wallet: &mut Wallet, + io: &impl Io, +) { let known_addresses = wallet.get_payment_addrs(); if known_addresses.is_empty() { display_line!( - IO, + io, "No known payment addresses. Try `masp gen-addr --alias my-addr` \ to generate a new payment address.", ); } else { let stdout = io::stdout(); let mut w = stdout.lock(); - display_line!(IO, &mut w; "Known payment addresses:").unwrap(); + display_line!(io, &mut w; "Known payment addresses:").unwrap(); for (alias, address) in sorted(known_addresses) { - display_line!(IO, &mut w; " \"{}\": {}", alias, address).unwrap(); + display_line!(io, &mut w; " \"{}\": {}", alias, address).unwrap(); } } } /// Generate a spending key. -fn spending_key_gen( - ctx: Context, +fn spending_key_gen( + wallet: &mut Wallet, + io: &impl Io, args::MaspSpendKeyGen { alias, alias_force, unsafe_dont_encrypt, }: args::MaspSpendKeyGen, ) { - let mut wallet = ctx.wallet; let alias = alias.to_lowercase(); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); let (alias, _key) = wallet.gen_spending_key(alias, password, alias_force); - crate::wallet::save(&wallet).unwrap_or_else(|err| eprintln!("{}", err)); + wallet.save().unwrap_or_else(|err| eprintln!("{}", err)); display_line!( - IO, + io, "Successfully added a spending key with alias: \"{}\"", alias ); } /// Generate a shielded payment address from the given key. -fn payment_address_gen( - ctx: Context, +fn payment_address_gen( + wallet: &mut Wallet, + io: &impl Io, args::MaspPayAddrGen { alias, alias_force, @@ -257,7 +265,6 @@ fn payment_address_gen( let payment_addr = viewing_key .to_payment_address(div) .expect("a PaymentAddress"); - let mut wallet = ctx.wallet; let alias = wallet .insert_payment_addr( alias, @@ -265,20 +272,21 @@ fn payment_address_gen( alias_force, ) .unwrap_or_else(|| { - edisplay_line!(IO, "Payment address not added"); + edisplay_line!(io, "Payment address not added"); cli::safe_exit(1); }); - crate::wallet::save(&wallet).unwrap_or_else(|err| eprintln!("{}", err)); + wallet.save().unwrap_or_else(|err| eprintln!("{}", err)); display_line!( - IO, + io, "Successfully generated a payment address with the following alias: {}", alias, ); } /// Add a viewing key, spending key, or payment address to wallet. -fn address_key_add( - mut ctx: Context, +fn address_key_add( + wallet: &mut Wallet, + io: &impl Io, args::MaspAddrKeyAdd { alias, alias_force, @@ -289,11 +297,10 @@ fn address_key_add( let alias = alias.to_lowercase(); let (alias, typ) = match value { MaspValue::FullViewingKey(viewing_key) => { - let alias = ctx - .wallet + let alias = wallet .insert_viewing_key(alias, viewing_key, alias_force) .unwrap_or_else(|| { - edisplay_line!(IO, "Viewing key not added"); + edisplay_line!(io, "Viewing key not added"); cli::safe_exit(1); }); (alias, "viewing key") @@ -301,8 +308,7 @@ fn address_key_add( MaspValue::ExtendedSpendingKey(spending_key) => { let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - let alias = ctx - .wallet + let alias = wallet .encrypt_insert_spending_key( alias, spending_key, @@ -310,25 +316,24 @@ fn address_key_add( alias_force, ) .unwrap_or_else(|| { - edisplay_line!(IO, "Spending key not added"); + edisplay_line!(io, "Spending key not added"); cli::safe_exit(1); }); (alias, "spending key") } MaspValue::PaymentAddress(payment_addr) => { - let alias = ctx - .wallet + let alias = wallet .insert_payment_addr(alias, payment_addr, alias_force) .unwrap_or_else(|| { - edisplay_line!(IO, "Payment address not added"); + edisplay_line!(io, "Payment address not added"); cli::safe_exit(1); }); (alias, "payment address") } }; - crate::wallet::save(&ctx.wallet).unwrap_or_else(|err| eprintln!("{}", err)); + wallet.save().unwrap_or_else(|err| eprintln!("{}", err)); display_line!( - IO, + io, "Successfully added a {} with the following alias to wallet: {}", typ, alias, @@ -337,8 +342,9 @@ fn address_key_add( /// Restore a keypair and an implicit address from the mnemonic code in the /// wallet. -fn key_and_address_restore( - ctx: Context, +fn key_and_address_restore( + wallet: &mut Wallet, + io: &impl Io, args::KeyAndAddressRestore { scheme, alias, @@ -347,7 +353,6 @@ fn key_and_address_restore( derivation_path, }: args::KeyAndAddressRestore, ) { - let mut wallet = ctx.wallet; let encryption_password = read_and_confirm_encryption_password(unsafe_dont_encrypt); let (alias, _key) = wallet @@ -356,20 +361,22 @@ fn key_and_address_restore( alias, alias_force, derivation_path, + None, encryption_password, ) .unwrap_or_else(|err| { - edisplay_line!(IO, "{}", err); + edisplay_line!(io, "{}", err); cli::safe_exit(1) }) .unwrap_or_else(|| { - display_line!(IO, "No changes are persisted. Exiting."); + display_line!(io, "No changes are persisted. Exiting."); cli::safe_exit(0); }); - crate::wallet::save(&wallet) - .unwrap_or_else(|err| edisplay_line!(IO, "{}", err)); + wallet + .save() + .unwrap_or_else(|err| edisplay_line!(io, "{}", err)); display_line!( - IO, + io, "Successfully added a key and an address with alias: \"{}\"", alias ); @@ -377,8 +384,10 @@ fn key_and_address_restore( /// Generate a new keypair and derive implicit address from it and store them in /// the wallet. -fn key_and_address_gen( - ctx: Context, +fn key_and_address_gen( + wallet: &mut Wallet>, + io: &impl Io, + rng: &mut R, args::KeyAndAddressGen { scheme, alias, @@ -387,40 +396,42 @@ fn key_and_address_gen( derivation_path, }: args::KeyAndAddressGen, ) { - let mut wallet = ctx.wallet; let encryption_password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - let mut rng = OsRng; - let derivation_path_and_mnemonic_rng = - derivation_path.map(|p| (p, &mut rng)); - let (alias, _key) = wallet + let derivation_path_and_mnemonic_rng = derivation_path.map(|p| (p, rng)); + let (alias, _key, _mnemonic) = wallet .gen_key( scheme, alias, alias_force, + None, encryption_password, derivation_path_and_mnemonic_rng, ) - .unwrap_or_else(|err| { - edisplay_line!(IO, "{}", err); - cli::safe_exit(1); - }) - .unwrap_or_else(|| { - display_line!(IO, "No changes are persisted. Exiting."); - cli::safe_exit(0); + .unwrap_or_else(|err| match err { + GenRestoreKeyError::KeyStorageError => { + println!("No changes are persisted. Exiting."); + cli::safe_exit(0); + } + _ => { + eprintln!("{}", err); + cli::safe_exit(1); + } }); - crate::wallet::save(&wallet) - .unwrap_or_else(|err| edisplay_line!(IO, "{}", err)); + wallet + .save() + .unwrap_or_else(|err| edisplay_line!(io, "{}", err)); display_line!( - IO, + io, "Successfully added a key and an address with alias: \"{}\"", alias ); } /// Find a keypair in the wallet store. -fn key_find( - ctx: Context, +fn key_find( + wallet: &mut Wallet, + io: &impl Io, args::KeyFind { public_key, alias, @@ -428,7 +439,6 @@ fn key_find( unsafe_show_secret, }: args::KeyFind, ) { - let mut wallet = ctx.wallet; let found_keypair = match public_key { Some(pk) => wallet.find_key_by_pk(&pk, None), None => { @@ -436,7 +446,7 @@ fn key_find( match alias { None => { edisplay_line!( - IO, + io, "An alias, public key or public key hash needs to be \ supplied", ); @@ -449,62 +459,62 @@ fn key_find( match found_keypair { Ok(keypair) => { let pkh: PublicKeyHash = (&keypair.ref_to()).into(); - display_line!(IO, "Public key hash: {}", pkh); - display_line!(IO, "Public key: {}", keypair.ref_to()); + display_line!(io, "Public key hash: {}", pkh); + display_line!(io, "Public key: {}", keypair.ref_to()); if unsafe_show_secret { - display_line!(IO, "Secret key: {}", keypair); + display_line!(io, "Secret key: {}", keypair); } } Err(err) => { - edisplay_line!(IO, "{}", err); + edisplay_line!(io, "{}", err); } } } /// List all known keys. -fn key_list( - ctx: Context, +fn key_list( + wallet: &mut Wallet, + io: &impl Io, args::KeyList { decrypt, unsafe_show_secret, }: args::KeyList, ) { - let wallet = ctx.wallet; let known_keys = wallet.get_keys(); if known_keys.is_empty() { display_line!( - IO, + io, "No known keys. Try `key gen --alias my-key` to generate a new \ key.", ); } else { let stdout = io::stdout(); let mut w = stdout.lock(); - display_line!(IO, &mut w; "Known keys:").unwrap(); + display_line!(io, &mut w; "Known keys:").unwrap(); for (alias, (stored_keypair, pkh)) in known_keys { let encrypted = if stored_keypair.is_encrypted() { "encrypted" } else { "not encrypted" }; - display_line!(IO, + display_line!(io, &mut w; " Alias \"{}\" ({}):", alias, encrypted, ) .unwrap(); if let Some(pkh) = pkh { - display_line!(IO, &mut w; " Public key hash: {}", pkh) + display_line!(io, &mut w; " Public key hash: {}", pkh) .unwrap(); } match stored_keypair.get::(decrypt, None) { Ok(keypair) => { - display_line!(IO, + display_line!(io, &mut w; " Public key: {}", keypair.ref_to(), ) .unwrap(); if unsafe_show_secret { - display_line!(IO, + display_line!(io, &mut w; " Secret key: {}", keypair, ) @@ -515,7 +525,7 @@ fn key_list( continue; } Err(err) => { - display_line!(IO, + display_line!(io, &mut w; " Couldn't decrypt the keypair: {}", err, ) @@ -527,45 +537,45 @@ fn key_list( } /// Export a keypair to a file. -fn key_export( - ctx: Context, +fn key_export( + wallet: &mut Wallet, + io: &impl Io, args::KeyExport { alias }: args::KeyExport, ) { - let mut wallet = ctx.wallet; wallet .find_key(alias.to_lowercase(), None) .map(|keypair| { - let file_data = keypair - .try_to_vec() - .expect("Encoding keypair shouldn't fail"); + let file_data = keypair.serialize_to_vec(); let file_name = format!("key_{}", alias.to_lowercase()); let mut file = File::create(&file_name).unwrap(); file.write_all(file_data.as_ref()).unwrap(); - display_line!(IO, "Exported to file {}", file_name); + display_line!(io, "Exported to file {}", file_name); }) .unwrap_or_else(|err| { - edisplay_line!(IO, "{}", err); + edisplay_line!(io, "{}", err); cli::safe_exit(1) }) } /// List all known addresses. -fn address_list(ctx: Context) { - let wallet = ctx.wallet; +fn address_list( + wallet: &mut Wallet, + io: &impl Io, +) { let known_addresses = wallet.get_addresses(); if known_addresses.is_empty() { display_line!( - IO, + io, "No known addresses. Try `address gen --alias my-addr` to \ generate a new implicit address.", ); } else { let stdout = io::stdout(); let mut w = stdout.lock(); - display_line!(IO, &mut w; "Known addresses:").unwrap(); + display_line!(io, &mut w; "Known addresses:").unwrap(); for (alias, address) in sorted(known_addresses) { - display_line!(IO, + display_line!(io, &mut w; " \"{}\": {}", alias, address.to_pretty_string(), ) @@ -575,8 +585,11 @@ fn address_list(ctx: Context) { } /// Find address (alias) by its alias (address). -fn address_or_alias_find(ctx: Context, args: args::AddressOrAliasFind) { - let wallet = ctx.wallet; +fn address_or_alias_find( + wallet: &mut Wallet, + io: &impl Io, + args: args::AddressOrAliasFind, +) { if args.address.is_some() && args.alias.is_some() { panic!( "This should not be happening: clap should emit its own error \ @@ -585,10 +598,10 @@ fn address_or_alias_find(ctx: Context, args: args::AddressOrAliasFind) { } else if args.alias.is_some() { if let Some(address) = wallet.find_address(args.alias.as_ref().unwrap()) { - display_line!(IO, "Found address {}", address.to_pretty_string()); + display_line!(io, "Found address {}", address.to_pretty_string()); } else { display_line!( - IO, + io, "No address with alias {} found. Use the command `address \ list` to see all the known addresses.", args.alias.unwrap().to_lowercase() @@ -596,10 +609,10 @@ fn address_or_alias_find(ctx: Context, args: args::AddressOrAliasFind) { } } else if args.address.is_some() { if let Some(alias) = wallet.find_alias(args.address.as_ref().unwrap()) { - display_line!(IO, "Found alias {}", alias); + display_line!(io, "Found alias {}", alias); } else { display_line!( - IO, + io, "No alias with address {} found. Use the command `address \ list` to see all the known addresses.", args.address.unwrap() @@ -609,8 +622,11 @@ fn address_or_alias_find(ctx: Context, args: args::AddressOrAliasFind) { } /// Add an address to the wallet. -fn address_add(ctx: Context, args: args::AddressAdd) { - let mut wallet = ctx.wallet; +fn address_add( + wallet: &mut Wallet, + io: &impl Io, + args: args::AddressAdd, +) { if wallet .add_address( args.alias.clone().to_lowercase(), @@ -619,13 +635,14 @@ fn address_add(ctx: Context, args: args::AddressAdd) { ) .is_none() { - edisplay_line!(IO, "Address not added"); + edisplay_line!(io, "Address not added"); cli::safe_exit(1); } - crate::wallet::save(&wallet) - .unwrap_or_else(|err| edisplay_line!(IO, "{}", err)); + wallet + .save() + .unwrap_or_else(|err| edisplay_line!(io, "{}", err)); display_line!( - IO, + io, "Successfully added a key and an address with alias: \"{}\"", args.alias.to_lowercase() ); diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index d750ccc759..e6ddbff2e9 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -7,7 +7,8 @@ use std::io; use std::iter::Iterator; use std::str::FromStr; -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXLOWER; use itertools::Either; use masp_primitives::asset_type::AssetType; @@ -29,76 +30,67 @@ use namada::core::ledger::governance::utils::{ use namada::core::ledger::pgf::parameters::PgfParameters; use namada::core::ledger::pgf::storage::steward::StewardDetail; use namada::ledger::events::Event; +use namada::ledger::ibc::storage::{ + ibc_denom_key, ibc_denom_key_prefix, is_ibc_denom_key, +}; use namada::ledger::parameters::{storage as param_storage, EpochDuration}; -use namada::ledger::pos::{CommissionPair, PosParams, Slash}; +use namada::ledger::pos::types::{CommissionPair, Slash}; +use namada::ledger::pos::PosParams; use namada::ledger::queries::RPC; use namada::ledger::storage::ConversionState; use namada::proof_of_stake::types::{ValidatorState, WeightedValidator}; -use namada::sdk::error; -use namada::sdk::error::{is_pinned_error, Error, PinnedBalanceError}; -use namada::sdk::masp::{ - Conversions, MaspAmount, MaspChange, ShieldedContext, ShieldedUtils, -}; -use namada::sdk::rpc::{ - self, enriched_bonds_and_unbonds, format_denominated_amount, query_epoch, - TxResponse, -}; -use namada::sdk::wallet::{AddressVpType, Wallet}; -use namada::types::address::{masp, Address}; -use namada::types::control_flow::ProceedOrElse; +use namada::types::address::{masp, Address, InternalAddress}; use namada::types::hash::Hash; +use namada::types::ibc::is_ibc_denom; use namada::types::io::Io; use namada::types::key::*; use namada::types::masp::{BalanceOwner, ExtendedViewingKey, PaymentAddress}; use namada::types::storage::{BlockHeight, BlockResults, Epoch, Key, KeySeg}; use namada::types::token::{Change, MaspDenom}; use namada::types::{storage, token}; -use namada::{display, display_line, edisplay_line, prompt}; +use namada_sdk::error::{is_pinned_error, Error, PinnedBalanceError}; +use namada_sdk::masp::{Conversions, MaspAmount, MaspChange}; +use namada_sdk::rpc::{ + self, enriched_bonds_and_unbonds, query_epoch, TxResponse, +}; +use namada_sdk::wallet::AddressVpType; +use namada_sdk::{display, display_line, edisplay_line, error, prompt, Namada}; use tokio::time::Instant; use crate::cli::{self, args}; use crate::facade::tendermint::merkle::proof::Proof; use crate::facade::tendermint_rpc::error::Error as TError; -use crate::wallet::CliWalletUtils; /// Query the status of a given transaction. /// /// If a response is not delivered until `deadline`, we exit the cli with an /// error. -pub async fn query_tx_status< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - status: namada::sdk::rpc::TxEventQuery<'_>, +pub async fn query_tx_status<'a>( + namada: &impl Namada<'a>, + status: namada_sdk::rpc::TxEventQuery<'_>, deadline: Instant, ) -> Event { - rpc::query_tx_status::<_, IO>(client, status, deadline) + rpc::query_tx_status(namada, status, deadline) .await - .proceed() + .unwrap() } /// Query and print the epoch of the last committed block -pub async fn query_and_print_epoch< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, -) -> Epoch { - let epoch = rpc::query_epoch(client).await.unwrap(); - display_line!(IO, "Last committed epoch: {}", epoch); +pub async fn query_and_print_epoch<'a>(context: &impl Namada<'a>) -> Epoch { + let epoch = rpc::query_epoch(context.client()).await.unwrap(); + display_line!(context.io(), "Last committed epoch: {}", epoch); epoch } /// Query the last committed block -pub async fn query_block( - client: &C, -) { - let block = namada::sdk::rpc::query_block(client).await.unwrap(); +pub async fn query_block<'a>(context: &impl Namada<'a>) { + let block = namada_sdk::rpc::query_block(context.client()) + .await + .unwrap(); match block { Some(block) => { display_line!( - IO, + context.io(), "Last committed block ID: {}, height: {}, time: {}", block.hash, block.height, @@ -106,7 +98,7 @@ pub async fn query_block( ); } None => { - display_line!(IO, "No block has been committed yet."); + display_line!(context.io(), "No block has been committed yet."); } } } @@ -122,26 +114,22 @@ pub async fn query_results( } /// Query the specified accepted transfers from the ledger -pub async fn query_transfers< - C: namada::ledger::queries::Client + Sync, - U: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn query_transfers<'a>( + context: &impl Namada<'a>, args: args::QueryTransfers, ) { let query_token = args.token; + let wallet = context.wallet().await; let query_owner = args.owner.map_or_else( || Either::Right(wallet.get_addresses().into_values().collect()), Either::Left, ); + let mut shielded = context.shielded_mut().await; let _ = shielded.load().await; // Obtain the effects of all shielded and transparent transactions let transfers = shielded .query_tx_deltas( - client, + context.client(), &query_owner, &query_token, &wallet.get_viewing_keys(), @@ -173,8 +161,9 @@ pub async fn query_transfers< // Realize the rewards that would have been attained upon the // transaction's reception let amt = shielded - .compute_exchanged_amount::<_, IO>( - client, + .compute_exchanged_amount( + context.client(), + context.io(), amt, epoch, Conversions::new(), @@ -182,7 +171,8 @@ pub async fn query_transfers< .await .unwrap() .0; - let dec = shielded.decode_amount(client, amt, epoch).await; + let dec = + shielded.decode_amount(context.client(), amt, epoch).await; shielded_accounts.insert(acc, dec); } // Check if this transfer pertains to the supplied token @@ -205,7 +195,7 @@ pub async fn query_transfers< continue; } display_line!( - IO, + context.io(), "Height: {}, Index: {}, Transparent Transfer:", height, idx @@ -213,222 +203,195 @@ pub async fn query_transfers< // Display the transparent changes first for (account, MaspChange { ref asset, change }) in tfer_delta { if account != masp() { - display!(IO, " {}:", account); - let token_alias = wallet.lookup_alias(asset); + display!(context.io(), " {}:", account); + let token_alias = + lookup_token_alias(context, asset, &account).await; let sign = match change.cmp(&Change::zero()) { Ordering::Greater => "+", Ordering::Less => "-", Ordering::Equal => "", }; display!( - IO, + context.io(), " {}{} {}", sign, - format_denominated_amount::<_, IO>( - client, - asset, - change.into(), - ) - .await, + context.format_amount(asset, change.into()).await, token_alias ); } - display_line!(IO, ""); + display_line!(context.io(), ""); } // Then display the shielded changes afterwards // TODO: turn this to a display impl // (account, amt) for (account, masp_change) in shielded_accounts { if fvk_map.contains_key(&account) { - display!(IO, " {}:", fvk_map[&account]); + display!(context.io(), " {}:", fvk_map[&account]); for (token_addr, val) in masp_change { - let token_alias = wallet.lookup_alias(&token_addr); + let token_alias = + lookup_token_alias(context, &token_addr, &masp()).await; let sign = match val.cmp(&Change::zero()) { Ordering::Greater => "+", Ordering::Less => "-", Ordering::Equal => "", }; display!( - IO, + context.io(), " {}{} {}", sign, - format_denominated_amount::<_, IO>( - client, - &token_addr, - val.into(), - ) - .await, + context.format_amount(&token_addr, val.into()).await, token_alias, ); } - display_line!(IO, ""); + display_line!(context.io(), ""); } } } } /// Query the raw bytes of given storage key -pub async fn query_raw_bytes< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_raw_bytes<'a, N: Namada<'a>>( + context: &N, args: args::QueryRawBytes, ) { - let response = unwrap_client_response::( + let response = unwrap_client_response::( RPC.shell() - .storage_value(client, None, None, false, &args.storage_key) + .storage_value( + context.client(), + None, + None, + false, + &args.storage_key, + ) .await, ); if !response.data.is_empty() { - display_line!(IO, "Found data: 0x{}", HEXLOWER.encode(&response.data)); + display_line!( + context.io(), + "Found data: 0x{}", + HEXLOWER.encode(&response.data) + ); } else { - display_line!(IO, "No data found for key {}", args.storage_key); + display_line!( + context.io(), + "No data found for key {}", + args.storage_key + ); } } /// Query token balance(s) -pub async fn query_balance< - C: namada::ledger::queries::Client + Sync, - U: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn query_balance<'a>( + context: &impl Namada<'a>, args: args::QueryBalance, ) { // Query the balances of shielded or transparent account types depending on // the CLI arguments match &args.owner { Some(BalanceOwner::FullViewingKey(_viewing_key)) => { - query_shielded_balance::<_, _, IO>(client, wallet, shielded, args) - .await + query_shielded_balance(context, args).await } Some(BalanceOwner::Address(_owner)) => { - query_transparent_balance::<_, IO>(client, wallet, args).await + query_transparent_balance(context, args).await } Some(BalanceOwner::PaymentAddress(_owner)) => { - query_pinned_balance::<_, _, IO>(client, wallet, shielded, args) - .await + query_pinned_balance(context, args).await } None => { // Print pinned balance - query_pinned_balance::<_, _, IO>( - client, - wallet, - shielded, - args.clone(), - ) - .await; + query_pinned_balance(context, args.clone()).await; // Print shielded balance - query_shielded_balance::<_, _, IO>( - client, - wallet, - shielded, - args.clone(), - ) - .await; + query_shielded_balance(context, args.clone()).await; // Then print transparent balance - query_transparent_balance::<_, IO>(client, wallet, args).await; + query_transparent_balance(context, args).await; } }; } /// Query token balance(s) -pub async fn query_transparent_balance< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, +pub async fn query_transparent_balance<'a>( + context: &impl Namada<'a>, args: args::QueryBalance, ) { let prefix = Key::from( Address::Internal(namada::types::address::InternalAddress::Multitoken) .to_db_key(), ); - let tokens = wallet.tokens_with_aliases(); match (args.token, args.owner) { - (Some(token), Some(owner)) => { - let balance_key = - token::balance_key(&token, &owner.address().unwrap()); - let token_alias = wallet.lookup_alias(&token); - match query_storage_value::(client, &balance_key) + (Some(base_token), Some(owner)) => { + let owner = owner.address().unwrap(); + let tokens = + query_tokens(context, Some(&base_token), Some(&owner)).await; + for (token_alias, token) in tokens { + let balance_key = token::balance_key(&token, &owner); + match query_storage_value::<_, token::Amount>( + context.client(), + &balance_key, + ) .await - { - Ok(balance) => { - let balance = format_denominated_amount::<_, IO>( - client, &token, balance, - ) - .await; - display_line!(IO, "{}: {}", token_alias, balance); - } - Err(e) => { - display_line!(IO, "Eror in querying: {e}"); - display_line!( - IO, - "No {} balance found for {}", - token_alias, - owner - ) + { + Ok(balance) => { + let balance = + context.format_amount(&token, balance).await; + display_line!( + context.io(), + "{}: {}", + token_alias, + balance + ); + } + Err(e) => { + display_line!(context.io(), "Querying error: {e}"); + display_line!( + context.io(), + "No {} balance found for {}", + token_alias, + owner + ) + } } } } (None, Some(owner)) => { let owner = owner.address().unwrap(); + let tokens = query_tokens(context, None, Some(&owner)).await; for (token_alias, token) in tokens { - let balance = get_token_balance(client, &token, &owner).await; + let balance = + get_token_balance(context.client(), &token, &owner).await; if !balance.is_zero() { - let balance = format_denominated_amount::<_, IO>( - client, &token, balance, - ) - .await; - display_line!(IO, "{}: {}", token_alias, balance); + let balance = context.format_amount(&token, balance).await; + display_line!(context.io(), "{}: {}", token_alias, balance); } } } - (Some(token), None) => { - let prefix = token::balance_prefix(&token); - let balances = - query_storage_prefix::(client, &prefix) - .await; - if let Some(balances) = balances { - print_balances::<_, IO>( - client, - wallet, - balances, - Some(&token), - None, - ) - .await; + (Some(base_token), None) => { + let tokens = query_tokens(context, Some(&base_token), None).await; + for (_, token) in tokens { + let prefix = token::balance_prefix(&token); + let balances = + query_storage_prefix::(context, &prefix) + .await; + if let Some(balances) = balances { + print_balances(context, balances, Some(&token), None).await; + } } } (None, None) => { - let balances = - query_storage_prefix::(client, &prefix) - .await; + let balances = query_storage_prefix(context, &prefix).await; if let Some(balances) = balances { - print_balances::<_, IO>(client, wallet, balances, None, None) - .await; + print_balances(context, balances, None, None).await; } } } } /// Query the token pinned balance(s) -pub async fn query_pinned_balance< - C: namada::ledger::queries::Client + Sync, - U: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn query_pinned_balance<'a>( + context: &impl Namada<'a>, args: args::QueryBalance, ) { // Map addresses to token names - let tokens = wallet.get_addresses_with_vp_type(AddressVpType::Token); + let wallet = context.wallet().await; let owners = if let Some(pa) = args.owner.and_then(|x| x.payment_address()) { vec![pa] @@ -445,7 +408,7 @@ pub async fn query_pinned_balance< .values() .map(|fvk| ExtendedFullViewingKey::from(*fvk).fvk.vk) .collect(); - let _ = shielded.load().await; + let _ = context.shielded_mut().await.load().await; // Print the token balances by payment address let pinned_error = Err(Error::from(PinnedBalanceError::InvalidViewingKey)); for owner in owners { @@ -453,8 +416,10 @@ pub async fn query_pinned_balance< // Find the viewing key that can recognize payments the current payment // address for vk in &viewing_keys { - balance = shielded - .compute_exchanged_pinned_balance::<_, IO>(client, owner, vk) + balance = context + .shielded_mut() + .await + .compute_exchanged_pinned_balance(context, owner, vk) .await; if !is_pinned_error(&balance) { break; @@ -463,18 +428,21 @@ pub async fn query_pinned_balance< // If a suitable viewing key was not found, then demand it from the user if is_pinned_error(&balance) { let vk_str = - prompt!(IO, "Enter the viewing key for {}: ", owner).await; + prompt!(context.io(), "Enter the viewing key for {}: ", owner) + .await; let fvk = match ExtendedViewingKey::from_str(vk_str.trim()) { Ok(fvk) => fvk, _ => { - edisplay_line!(IO, "Invalid viewing key entered"); + edisplay_line!(context.io(), "Invalid viewing key entered"); continue; } }; let vk = ExtendedFullViewingKey::from(fvk).fvk.vk; // Use the given viewing key to decrypt pinned transaction data - balance = shielded - .compute_exchanged_pinned_balance::<_, IO>(client, owner, &vk) + balance = context + .shielded_mut() + .await + .compute_exchanged_pinned_balance(context, owner, &vk) .await } @@ -482,7 +450,7 @@ pub async fn query_pinned_balance< match (balance, args.token.as_ref()) { (Err(Error::Pinned(PinnedBalanceError::InvalidViewingKey)), _) => { display_line!( - IO, + context.io(), "Supplied viewing key cannot decode transactions to given \ payment address." ) @@ -492,47 +460,50 @@ pub async fn query_pinned_balance< _, ) => { display_line!( - IO, + context.io(), "Payment address {} has not yet been consumed.", owner ) } (Err(other), _) => { - display_line!(IO, "Error in Querying Pinned balance {}", other) + display_line!( + context.io(), + "Error in Querying Pinned balance {}", + other + ) } - (Ok((balance, epoch)), Some(token)) => { - let token_alias = wallet.lookup_alias(token); - - let total_balance = balance - .get(&(epoch, token.clone())) - .cloned() - .unwrap_or_default(); - - if total_balance.is_zero() { - display_line!( - IO, - "Payment address {} was consumed during epoch {}. \ - Received no shielded {}", - owner, - epoch, - token_alias - ); - } else { - let formatted = format_denominated_amount::<_, IO>( - client, - token, - total_balance.into(), - ) - .await; - display_line!( - IO, - "Payment address {} was consumed during epoch {}. \ - Received {} {}", - owner, - epoch, - formatted, - token_alias, - ); + (Ok((balance, epoch)), Some(base_token)) => { + let tokens = + query_tokens(context, Some(base_token), None).await; + for (token_alias, token) in &tokens { + let total_balance = balance + .get(&(epoch, token.clone())) + .cloned() + .unwrap_or_default(); + + if total_balance.is_zero() { + display_line!( + context.io(), + "Payment address {} was consumed during epoch {}. \ + Received no shielded {}", + owner, + epoch, + token_alias + ); + } else { + let formatted = context + .format_amount(token, total_balance.into()) + .await; + display_line!( + context.io(), + "Payment address {} was consumed during epoch {}. \ + Received {} {}", + owner, + epoch, + formatted, + token_alias, + ); + } } } (Ok((balance, epoch)), None) => { @@ -544,7 +515,7 @@ pub async fn query_pinned_balance< { if !found_any { display_line!( - IO, + context.io(), "Payment address {} was consumed during epoch {}. \ Received:", owner, @@ -552,21 +523,21 @@ pub async fn query_pinned_balance< ); found_any = true; } - let formatted = format_denominated_amount::<_, IO>( - client, - token_addr, - (*value).into(), - ) - .await; - let token_alias = tokens - .get(token_addr) - .map(|a| a.to_string()) - .unwrap_or_else(|| token_addr.to_string()); - display_line!(IO, " {}: {}", token_alias, formatted,); + let formatted = context + .format_amount(token_addr, (*value).into()) + .await; + let token_alias = + lookup_token_alias(context, token_addr, &masp()).await; + display_line!( + context.io(), + " {}: {}", + token_alias, + formatted, + ); } if !found_any { display_line!( - IO, + context.io(), "Payment address {} was consumed during epoch {}. \ Received no shielded assets.", owner, @@ -578,15 +549,15 @@ pub async fn query_pinned_balance< } } -async fn print_balances( - client: &C, - wallet: &Wallet, +async fn print_balances<'a>( + context: &impl Namada<'a>, balances: impl Iterator, token: Option<&Address>, target: Option<&Address>, ) { let stdout = io::stdout(); let mut w = stdout.lock(); + let wallet = context.wallet().await; let mut print_num = 0; let mut print_token = None; @@ -599,13 +570,13 @@ async fn print_balances( owner.clone(), format!( ": {}, owned by {}", - format_denominated_amount::<_, IO>(client, tok, balance) - .await, + context.format_amount(tok, balance).await, wallet.lookup_alias(owner) ), ), None => continue, }; + let token_alias = lookup_token_alias(context, &t, &o).await; // Get the token and the balance let (t, s) = match (token, target) { // the given token and the given target are the same as the @@ -628,20 +599,20 @@ async fn print_balances( // the token has been already printed } _ => { - let token_alias = wallet.lookup_alias(&t); - display_line!(IO, &mut w; "Token {}", token_alias).unwrap(); + display_line!(context.io(), &mut w; "Token {}", token_alias) + .unwrap(); print_token = Some(t); } } // Print the balance - display_line!(IO, &mut w; "{}", s).unwrap(); + display_line!(context.io(), &mut w; "{}", s).unwrap(); print_num += 1; } if print_num == 0 { match (token, target) { (Some(_), Some(target)) | (None, Some(target)) => display_line!( - IO, + context.io(), &mut w; "No balances owned by {}", wallet.lookup_alias(target) @@ -649,38 +620,126 @@ async fn print_balances( .unwrap(), (Some(token), None) => { let token_alias = wallet.lookup_alias(token); - display_line!(IO, &mut w; "No balances for token {}", token_alias).unwrap() + display_line!(context.io(), &mut w; "No balances for token {}", token_alias).unwrap() + } + (None, None) => { + display_line!(context.io(), &mut w; "No balances").unwrap() + } + } + } +} + +async fn lookup_token_alias<'a>( + context: &impl Namada<'a>, + token: &Address, + owner: &Address, +) -> String { + if let Address::Internal(InternalAddress::IbcToken(trace_hash)) = token { + let ibc_denom_key = ibc_denom_key(owner.to_string(), trace_hash); + match query_storage_value::<_, String>(context.client(), &ibc_denom_key) + .await + { + Ok(ibc_denom) => get_ibc_denom_alias(context, ibc_denom).await, + Err(_) => token.to_string(), + } + } else { + context.wallet().await.lookup_alias(token) + } +} + +/// Returns pairs of token alias and token address +async fn query_tokens<'a>( + context: &impl Namada<'a>, + base_token: Option<&Address>, + owner: Option<&Address>, +) -> BTreeMap { + let wallet = context.wallet().await; + // Base tokens + let mut tokens = match base_token { + Some(base_token) => { + let mut map = BTreeMap::new(); + map.insert(wallet.lookup_alias(base_token), base_token.clone()); + map + } + None => wallet.tokens_with_aliases(), + }; + + let prefixes = match (base_token, owner) { + (Some(base_token), Some(owner)) => vec![ + ibc_denom_key_prefix(Some(base_token.to_string())), + ibc_denom_key_prefix(Some(owner.to_string())), + ], + (Some(base_token), None) => { + vec![ibc_denom_key_prefix(Some(base_token.to_string()))] + } + (None, Some(_)) => { + // Check all IBC denoms because the owner might not know IBC token + // transfers in the same chain + vec![ibc_denom_key_prefix(None)] + } + (None, None) => vec![ibc_denom_key_prefix(None)], + }; + + for prefix in prefixes { + let ibc_denoms = query_storage_prefix::(context, &prefix).await; + if let Some(ibc_denoms) = ibc_denoms { + for (key, ibc_denom) in ibc_denoms { + if let Some((_, hash)) = is_ibc_denom_key(&key) { + let ibc_denom_alias = + get_ibc_denom_alias(context, ibc_denom).await; + let ibc_token = + Address::Internal(InternalAddress::IbcToken(hash)); + tokens.insert(ibc_denom_alias, ibc_token); + } } - (None, None) => display_line!(IO, &mut w; "No balances").unwrap(), } } + tokens +} + +async fn get_ibc_denom_alias<'a>( + context: &impl Namada<'a>, + ibc_denom: impl AsRef, +) -> String { + let wallet = context.wallet().await; + is_ibc_denom(&ibc_denom) + .map(|(trace_path, base_token)| { + let base_token_alias = match Address::decode(&base_token) { + Ok(base_token) => wallet.lookup_alias(&base_token), + Err(_) => base_token, + }; + if trace_path.is_empty() { + base_token_alias + } else { + format!("{}/{}", trace_path, base_token_alias) + } + }) + .unwrap_or(ibc_denom.as_ref().to_string()) } /// Query Proposals -pub async fn query_proposal< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_proposal<'a>( + context: &impl Namada<'a>, args: args::QueryProposal, ) { - let current_epoch = query_and_print_epoch::<_, IO>(client).await; + let current_epoch = query_and_print_epoch(context).await; if let Some(id) = args.proposal_id { - let proposal = query_proposal_by_id(client, id).await.unwrap(); + let proposal = + query_proposal_by_id(context.client(), id).await.unwrap(); if let Some(proposal) = proposal { display_line!( - IO, + context.io(), "{}", proposal.to_string_with_status(current_epoch) ); } else { - edisplay_line!(IO, "No proposal found with id: {}", id); + edisplay_line!(context.io(), "No proposal found with id: {}", id); } } else { let last_proposal_id_key = governance_storage::get_counter_key(); - let last_proposal_id = - query_storage_value::(client, &last_proposal_id_key) + let last_proposal_id: u64 = + query_storage_value(context.client(), &last_proposal_id_key) .await .unwrap(); @@ -690,14 +749,14 @@ pub async fn query_proposal< 0 }; - display_line!(IO, "id: {}", last_proposal_id); + display_line!(context.io(), "id: {}", last_proposal_id); for id in from_id..last_proposal_id { - let proposal = query_proposal_by_id(client, id) + let proposal = query_proposal_by_id(context.client(), id) .await .unwrap() .expect("Proposal should be written to storage."); - display_line!(IO, "{}", proposal); + display_line!(context.io(), "{}", proposal); } } } @@ -707,18 +766,12 @@ pub async fn query_proposal_by_id( client: &C, proposal_id: u64, ) -> Result, error::Error> { - namada::sdk::rpc::query_proposal_by_id(client, proposal_id).await + namada_sdk::rpc::query_proposal_by_id(client, proposal_id).await } /// Query token shielded balance(s) -pub async fn query_shielded_balance< - C: namada::ledger::queries::Client + Sync, - U: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn query_shielded_balance<'a>( + context: &impl Namada<'a>, args: args::QueryBalance, ) { // Used to control whether balances for all keys or a specific key are @@ -730,68 +783,86 @@ pub async fn query_shielded_balance< // provided, then convert to a viewing key first. let viewing_keys = match owner { Some(viewing_key) => vec![viewing_key], - None => wallet.get_viewing_keys().values().copied().collect(), + None => context + .wallet() + .await + .get_viewing_keys() + .values() + .copied() + .collect(), }; - let _ = shielded.load().await; - let fvks: Vec<_> = viewing_keys - .iter() - .map(|fvk| ExtendedFullViewingKey::from(*fvk).fvk.vk) - .collect(); - shielded.fetch(client, &[], &fvks).await.unwrap(); - // Save the update state so that future fetches can be short-circuited - let _ = shielded.save().await; + { + let mut shielded = context.shielded_mut().await; + let _ = shielded.load().await; + let fvks: Vec<_> = viewing_keys + .iter() + .map(|fvk| ExtendedFullViewingKey::from(*fvk).fvk.vk) + .collect(); + shielded.fetch(context.client(), &[], &fvks).await.unwrap(); + // Save the update state so that future fetches can be short-circuited + let _ = shielded.save().await; + } // The epoch is required to identify timestamped tokens - let epoch = query_and_print_epoch::<_, IO>(client).await; + let epoch = query_and_print_epoch(context).await; // Map addresses to token names - let tokens = wallet.get_addresses_with_vp_type(AddressVpType::Token); match (args.token, owner.is_some()) { // Here the user wants to know the balance for a specific token - (Some(token), true) => { - // Query the multi-asset balance at the given spending key - let viewing_key = - ExtendedFullViewingKey::from(viewing_keys[0]).fvk.vk; - let balance: MaspAmount = if no_conversions { - shielded - .compute_shielded_balance(client, &viewing_key) - .await - .unwrap() - .expect("context should contain viewing key") - } else { - shielded - .compute_exchanged_balance::<_, IO>( - client, - &viewing_key, - epoch, - ) - .await - .unwrap() - .expect("context should contain viewing key") - }; - - let token_alias = wallet.lookup_alias(&token); + (Some(base_token), true) => { + let tokens = + query_tokens(context, Some(&base_token), Some(&masp())).await; + for (token_alias, token) in tokens { + // Query the multi-asset balance at the given spending key + let viewing_key = + ExtendedFullViewingKey::from(viewing_keys[0]).fvk.vk; + let balance: MaspAmount = if no_conversions { + context + .shielded_mut() + .await + .compute_shielded_balance( + context.client(), + &viewing_key, + ) + .await + .unwrap() + .expect("context should contain viewing key") + } else { + context + .shielded_mut() + .await + .compute_exchanged_balance( + context.client(), + context.io(), + &viewing_key, + epoch, + ) + .await + .unwrap() + .expect("context should contain viewing key") + }; - let total_balance = balance - .get(&(epoch, token.clone())) - .cloned() - .unwrap_or_default(); - if total_balance.is_zero() { - display_line!( - IO, - "No shielded {} balance found for given key", - token_alias - ); - } else { - display_line!( - IO, - "{}: {}", - token_alias, - format_denominated_amount::<_, IO>( - client, - &token, - token::Amount::from(total_balance) - ) - .await - ); + let total_balance = balance + .get(&(epoch, token.clone())) + .cloned() + .unwrap_or_default(); + if total_balance.is_zero() { + display_line!( + context.io(), + "No shielded {} balance found for given key", + token_alias + ); + } else { + display_line!( + context.io(), + "{}: {}", + token_alias, + context + .format_amount( + &token, + token::Amount::from(total_balance), + ) + .await + ); + } } } // Here the user wants to know the balance of all tokens across users @@ -802,15 +873,23 @@ pub async fn query_shielded_balance< // Query the multi-asset balance at the given spending key let viewing_key = ExtendedFullViewingKey::from(fvk).fvk.vk; let balance = if no_conversions { - shielded - .compute_shielded_balance(client, &viewing_key) + context + .shielded_mut() + .await + .compute_shielded_balance( + context.client(), + &viewing_key, + ) .await .unwrap() .expect("context should contain viewing key") } else { - shielded - .compute_exchanged_balance::<_, IO>( - client, + context + .shielded_mut() + .await + .compute_exchanged_balance( + context.client(), + context.io(), &viewing_key, epoch, ) @@ -836,7 +915,7 @@ pub async fn query_shielded_balance< // hashtable creation any uglier if balances.is_empty() { display_line!( - IO, + context.io(), "No shielded {} balance found for any wallet key", &token_addr ); @@ -848,77 +927,81 @@ pub async fn query_shielded_balance< } for ((fvk, token), token_balance) in balance_map { // Only assets with the current timestamp count - let alias = tokens - .get(&token) - .map(|a| a.to_string()) - .unwrap_or_else(|| token.to_string()); - display_line!(IO, "Shielded Token {}:", alias); - let formatted = format_denominated_amount::<_, IO>( - client, - &token, - token_balance.into(), - ) - .await; - display_line!(IO, " {}, owned by {}", formatted, fvk); + let alias = lookup_token_alias(context, &token, &masp()).await; + display_line!(context.io(), "Shielded Token {}:", alias); + let formatted = + context.format_amount(&token, token_balance.into()).await; + display_line!( + context.io(), + " {}, owned by {}", + formatted, + fvk + ); } } // Here the user wants to know the balance for a specific token across // users - (Some(token), false) => { - // Compute the unique asset identifier from the token address - let token = token; - let _asset_type = AssetType::new( - (token.clone(), epoch.0) - .try_to_vec() - .expect("token addresses should serialize") - .as_ref(), - ) - .unwrap(); - let token_alias = wallet.lookup_alias(&token); - display_line!(IO, "Shielded Token {}:", token_alias); - let mut found_any = false; - let token_alias = wallet.lookup_alias(&token); - display_line!(IO, "Shielded Token {}:", token_alias,); - for fvk in viewing_keys { - // Query the multi-asset balance at the given spending key - let viewing_key = ExtendedFullViewingKey::from(fvk).fvk.vk; - let balance = if no_conversions { - shielded - .compute_shielded_balance(client, &viewing_key) - .await - .unwrap() - .expect("context should contain viewing key") - } else { - shielded - .compute_exchanged_balance::<_, IO>( - client, - &viewing_key, - epoch, - ) - .await - .unwrap() - .expect("context should contain viewing key") - }; + (Some(base_token), false) => { + let tokens = query_tokens(context, Some(&base_token), None).await; + for (token_alias, token) in tokens { + // Compute the unique asset identifier from the token address + let token = token; + let _asset_type = AssetType::new( + (token.clone(), epoch.0).serialize_to_vec().as_ref(), + ) + .unwrap(); + let mut found_any = false; + display_line!(context.io(), "Shielded Token {}:", token_alias); + for fvk in &viewing_keys { + // Query the multi-asset balance at the given spending key + let viewing_key = ExtendedFullViewingKey::from(*fvk).fvk.vk; + let balance = if no_conversions { + context + .shielded_mut() + .await + .compute_shielded_balance( + context.client(), + &viewing_key, + ) + .await + .unwrap() + .expect("context should contain viewing key") + } else { + context + .shielded_mut() + .await + .compute_exchanged_balance( + context.client(), + context.io(), + &viewing_key, + epoch, + ) + .await + .unwrap() + .expect("context should contain viewing key") + }; - for ((_, address), val) in balance.iter() { - if !val.is_zero() { - found_any = true; + for ((_, address), val) in balance.iter() { + if !val.is_zero() { + found_any = true; + } + let formatted = + context.format_amount(address, (*val).into()).await; + display_line!( + context.io(), + " {}, owned by {}", + formatted, + fvk + ); } - let formatted = format_denominated_amount::<_, IO>( - client, - address, - (*val).into(), - ) - .await; - display_line!(IO, " {}, owned by {}", formatted, fvk); } - } - if !found_any { - display_line!( - IO, - "No shielded {} balance found for any wallet key", - token_alias, - ); + if !found_any { + display_line!( + context.io(), + "No shielded {} balance found for any wallet key", + token_alias, + ); + } } } // Here the user wants to know all possible token balances for a key @@ -927,20 +1010,22 @@ pub async fn query_shielded_balance< let viewing_key = ExtendedFullViewingKey::from(viewing_keys[0]).fvk.vk; if no_conversions { - let balance = shielded - .compute_shielded_balance(client, &viewing_key) + let balance = context + .shielded_mut() + .await + .compute_shielded_balance(context.client(), &viewing_key) .await .unwrap() .expect("context should contain viewing key"); // Print balances by human-readable token names - print_decoded_balance_with_epoch::<_, IO>( - client, wallet, balance, - ) - .await; + print_decoded_balance_with_epoch(context, balance).await; } else { - let balance = shielded - .compute_exchanged_balance::<_, IO>( - client, + let balance = context + .shielded_mut() + .await + .compute_exchanged_balance( + context.client(), + context.io(), &viewing_key, epoch, ) @@ -948,55 +1033,44 @@ pub async fn query_shielded_balance< .unwrap() .expect("context should contain viewing key"); // Print balances by human-readable token names - print_decoded_balance::<_, IO>(client, wallet, balance, epoch) - .await; + print_decoded_balance(context, balance, epoch).await; } } } } -pub async fn print_decoded_balance< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, +pub async fn print_decoded_balance<'a>( + context: &impl Namada<'a>, decoded_balance: MaspAmount, epoch: Epoch, ) { if decoded_balance.is_empty() { - display_line!(IO, "No shielded balance found for given key"); + display_line!(context.io(), "No shielded balance found for given key"); } else { for ((_, token_addr), amount) in decoded_balance .iter() .filter(|((token_epoch, _), _)| *token_epoch == epoch) { display_line!( - IO, + context.io(), "{} : {}", - wallet.lookup_alias(token_addr), - format_denominated_amount::<_, IO>( - client, - token_addr, - (*amount).into() - ) - .await, + lookup_token_alias(context, token_addr, &masp()).await, + context.format_amount(token_addr, (*amount).into()).await, ); } } } -pub async fn print_decoded_balance_with_epoch< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, +pub async fn print_decoded_balance_with_epoch<'a>( + context: &impl Namada<'a>, decoded_balance: MaspAmount, ) { - let tokens = wallet.get_addresses_with_vp_type(AddressVpType::Token); + let tokens = context + .wallet() + .await + .get_addresses_with_vp_type(AddressVpType::Token); if decoded_balance.is_empty() { - display_line!(IO, "No shielded balance found for given key"); + display_line!(context.io(), "No shielded balance found for given key"); } for ((epoch, token_addr), value) in decoded_balance.iter() { let asset_value = (*value).into(); @@ -1005,12 +1079,11 @@ pub async fn print_decoded_balance_with_epoch< .map(|a| a.to_string()) .unwrap_or_else(|| token_addr.to_string()); display_line!( - IO, + context.io(), "{} | {} : {}", alias, epoch, - format_denominated_amount::<_, IO>(client, token_addr, asset_value) - .await, + context.format_amount(token_addr, asset_value).await, ); } } @@ -1021,50 +1094,65 @@ pub async fn get_token_balance( token: &Address, owner: &Address, ) -> token::Amount { - namada::sdk::rpc::get_token_balance(client, token, owner) + namada_sdk::rpc::get_token_balance(client, token, owner) .await .unwrap() } -pub async fn query_proposal_result< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_proposal_result<'a>( + context: &impl Namada<'a>, args: args::QueryProposalResult, ) { if args.proposal_id.is_some() { let proposal_id = args.proposal_id.expect("Proposal id should be defined."); let proposal = if let Some(proposal) = - query_proposal_by_id(client, proposal_id).await.unwrap() + query_proposal_by_id(context.client(), proposal_id) + .await + .unwrap() { proposal } else { - edisplay_line!(IO, "Proposal {} not found.", proposal_id); + edisplay_line!(context.io(), "Proposal {} not found.", proposal_id); return; }; - let is_author_steward = query_pgf_stewards(client) - .await - .iter() - .any(|steward| steward.address.eq(&proposal.author)); - let tally_type = proposal.get_tally_type(is_author_steward); - let total_voting_power = - get_total_staked_tokens(client, proposal.voting_end_epoch).await; - - let votes = compute_proposal_votes( - client, - proposal_id, - proposal.voting_end_epoch, - ) - .await; - + let proposal_result_key = + governance_storage::get_proposal_result_key(proposal_id); let proposal_result = - compute_proposal_result(votes, total_voting_power, tally_type); + // Try to directly query the result in storage first + match query_storage_value(context.client(), &proposal_result_key).await { + Ok(result) => result, + Err(_) => { + // If failure, run the tally + let is_author_steward = query_pgf_stewards(context.client()) + .await + .iter() + .any(|steward| steward.address.eq(&proposal.author)); + let tally_type = proposal.get_tally_type(is_author_steward); + let total_voting_power = get_total_staked_tokens( + context.client(), + proposal.voting_end_epoch, + ) + .await; + + let votes = compute_proposal_votes( + context.client(), + proposal_id, + proposal.voting_end_epoch, + ) + .await; - display_line!(IO, "Proposal Id: {} ", proposal_id); - display_line!(IO, "{:4}{}", "", proposal_result); + compute_proposal_result( + votes, + total_voting_power, + tally_type, + ) + } + }; + + display_line!(context.io(), "Proposal Id: {} ", proposal_id); + display_line!(context.io(), "{:4}{}", "", proposal_result); } else { let proposal_folder = args.proposal_folder.expect( "The argument --proposal-folder is required with --offline.", @@ -1085,11 +1173,13 @@ pub async fn query_proposal_result< serde_json::from_reader(proposal_file) .expect("file should be proper JSON"); - let author_account = - rpc::get_account_info(client, &proposal.proposal.author) - .await - .unwrap() - .expect("Account should exist."); + let author_account = rpc::get_account_info( + context.client(), + &proposal.proposal.author, + ) + .await + .unwrap() + .expect("Account should exist."); let proposal = proposal.validate( &author_account.public_keys_map, @@ -1100,12 +1190,15 @@ pub async fn query_proposal_result< if proposal.is_ok() { proposal.unwrap() } else { - edisplay_line!(IO, "The offline proposal is not valid."); + edisplay_line!( + context.io(), + "The offline proposal is not valid." + ); return; } } else { edisplay_line!( - IO, + context.io(), "Couldn't find a file name offline_proposal_*.json." ); return; @@ -1121,15 +1214,14 @@ pub async fn query_proposal_result< }) .collect::>(); - let proposal_votes = compute_offline_proposal_votes::<_, IO>( - client, - &proposal, - votes.clone(), + let proposal_votes = + compute_offline_proposal_votes(context, &proposal, votes.clone()) + .await; + let total_voting_power = get_total_staked_tokens( + context.client(), + proposal.proposal.tally_epoch, ) .await; - let total_voting_power = - get_total_staked_tokens(client, proposal.proposal.tally_epoch) - .await; let proposal_result = compute_proposal_result( proposal_votes, @@ -1137,51 +1229,54 @@ pub async fn query_proposal_result< TallyType::TwoThird, ); - display_line!(IO, "Proposal offline: {}", proposal.proposal.hash()); - display_line!(IO, "Parsed {} votes.", votes.len()); - display_line!(IO, "{:4}{}", "", proposal_result); + display_line!( + context.io(), + "Proposal offline: {}", + proposal.proposal.hash() + ); + display_line!(context.io(), "Parsed {} votes.", votes.len()); + display_line!(context.io(), "{:4}{}", "", proposal_result); } } -pub async fn query_account< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_account<'a>( + context: &impl Namada<'a>, args: args::QueryAccount, ) { - let account = rpc::get_account_info(client, &args.owner).await.unwrap(); + let account = rpc::get_account_info(context.client(), &args.owner) + .await + .unwrap(); if let Some(account) = account { - display_line!(IO, "Address: {}", account.address); - display_line!(IO, "Threshold: {}", account.threshold); - display_line!(IO, "Public keys:"); + display_line!(context.io(), "Address: {}", account.address); + display_line!(context.io(), "Threshold: {}", account.threshold); + display_line!(context.io(), "Public keys:"); for (public_key, _) in account.public_keys_map.pk_to_idx { - display_line!(IO, "- {}", public_key); + display_line!(context.io(), "- {}", public_key); } } else { - display_line!(IO, "No account exists for {}", args.owner); + display_line!(context.io(), "No account exists for {}", args.owner); } } -pub async fn query_pgf( - client: &C, - _args: args::QueryPgf, -) { - let stewards = query_pgf_stewards(client).await; - let fundings = query_pgf_fundings(client).await; +pub async fn query_pgf<'a>(context: &impl Namada<'a>, _args: args::QueryPgf) { + let stewards = query_pgf_stewards(context.client()).await; + let fundings = query_pgf_fundings(context.client()).await; match stewards.is_empty() { true => { - display_line!(IO, "Pgf stewards: no stewards are currectly set.") + display_line!( + context.io(), + "Pgf stewards: no stewards are currectly set." + ) } false => { - display_line!(IO, "Pgf stewards:"); + display_line!(context.io(), "Pgf stewards:"); for steward in stewards { - display_line!(IO, "{:4}- {}", "", steward.address); - display_line!(IO, "{:4} Reward distribution:", ""); + display_line!(context.io(), "{:4}- {}", "", steward.address); + display_line!(context.io(), "{:4} Reward distribution:", ""); for (address, percentage) in steward.reward_distribution { display_line!( - IO, + context.io(), "{:6}- {} to {}", "", percentage, @@ -1194,13 +1289,16 @@ pub async fn query_pgf( match fundings.is_empty() { true => { - display_line!(IO, "Pgf fundings: no fundings are currently set.") + display_line!( + context.io(), + "Pgf fundings: no fundings are currently set." + ) } false => { - display_line!(IO, "Pgf fundings:"); + display_line!(context.io(), "Pgf fundings:"); for funding in fundings { display_line!( - IO, + context.io(), "{:4}- {} for {}", "", funding.detail.target, @@ -1211,180 +1309,198 @@ pub async fn query_pgf( } } -pub async fn query_protocol_parameters< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_protocol_parameters<'a>( + context: &impl Namada<'a>, _args: args::QueryProtocolParameters, ) { - let governance_parameters = query_governance_parameters(client).await; - display_line!(IO, "Governance Parameters\n"); + let governance_parameters = + query_governance_parameters(context.client()).await; + display_line!(context.io(), "Governance Parameters\n"); display_line!( - IO, + context.io(), "{:4}Min. proposal fund: {}", "", governance_parameters.min_proposal_fund.to_string_native() ); display_line!( - IO, + context.io(), "{:4}Max. proposal code size: {}", "", governance_parameters.max_proposal_code_size ); display_line!( - IO, + context.io(), "{:4}Min. proposal voting period: {}", "", governance_parameters.min_proposal_voting_period ); display_line!( - IO, + context.io(), "{:4}Max. proposal period: {}", "", governance_parameters.max_proposal_period ); display_line!( - IO, + context.io(), "{:4}Max. proposal content size: {}", "", governance_parameters.max_proposal_content_size ); display_line!( - IO, + context.io(), "{:4}Min. proposal grace epochs: {}", "", governance_parameters.min_proposal_grace_epochs ); - let pgf_parameters = query_pgf_parameters(client).await; - display_line!(IO, "Public Goods Funding Parameters\n"); + let pgf_parameters = query_pgf_parameters(context.client()).await; + display_line!(context.io(), "Public Goods Funding Parameters\n"); display_line!( - IO, + context.io(), "{:4}Pgf inflation rate: {}", "", pgf_parameters.pgf_inflation_rate ); display_line!( - IO, + context.io(), "{:4}Steward inflation rate: {}", "", pgf_parameters.stewards_inflation_rate ); - display_line!(IO, "Protocol parameters"); + display_line!(context.io(), "Protocol parameters"); let key = param_storage::get_epoch_duration_storage_key(); - let epoch_duration = query_storage_value::(client, &key) - .await - .expect("Parameter should be definied."); + let epoch_duration: EpochDuration = + query_storage_value(context.client(), &key) + .await + .expect("Parameter should be definied."); display_line!( - IO, + context.io(), "{:4}Min. epoch duration: {}", "", epoch_duration.min_duration ); display_line!( - IO, + context.io(), "{:4}Min. number of blocks: {}", "", epoch_duration.min_num_of_blocks ); let key = param_storage::get_max_expected_time_per_block_key(); - let max_block_duration = query_storage_value::(client, &key) + let max_block_duration: u64 = query_storage_value(context.client(), &key) .await .expect("Parameter should be defined."); - display_line!(IO, "{:4}Max. block duration: {}", "", max_block_duration); + display_line!( + context.io(), + "{:4}Max. block duration: {}", + "", + max_block_duration + ); let key = param_storage::get_tx_whitelist_storage_key(); - let vp_whitelist = query_storage_value::>(client, &key) + let vp_whitelist: Vec = query_storage_value(context.client(), &key) .await .expect("Parameter should be defined."); - display_line!(IO, "{:4}VP whitelist: {:?}", "", vp_whitelist); + display_line!(context.io(), "{:4}VP whitelist: {:?}", "", vp_whitelist); let key = param_storage::get_tx_whitelist_storage_key(); - let tx_whitelist = query_storage_value::>(client, &key) + let tx_whitelist: Vec = query_storage_value(context.client(), &key) .await .expect("Parameter should be defined."); - display_line!(IO, "{:4}Transactions whitelist: {:?}", "", tx_whitelist); + display_line!( + context.io(), + "{:4}Transactions whitelist: {:?}", + "", + tx_whitelist + ); let key = param_storage::get_max_block_gas_key(); - let max_block_gas = query_storage_value::(client, &key) + let max_block_gas: u64 = query_storage_value(context.client(), &key) .await .expect("Parameter should be defined."); - display_line!(IO, "{:4}Max block gas: {:?}", "", max_block_gas); + display_line!(context.io(), "{:4}Max block gas: {:?}", "", max_block_gas); let key = param_storage::get_fee_unshielding_gas_limit_key(); - let fee_unshielding_gas_limit = query_storage_value::(client, &key) - .await - .expect("Parameter should be defined."); + let fee_unshielding_gas_limit: u64 = + query_storage_value(context.client(), &key) + .await + .expect("Parameter should be defined."); display_line!( - IO, + context.io(), "{:4}Fee unshielding gas limit: {:?}", "", fee_unshielding_gas_limit ); let key = param_storage::get_fee_unshielding_descriptions_limit_key(); - let fee_unshielding_descriptions_limit = - query_storage_value::(client, &key) + let fee_unshielding_descriptions_limit: u64 = + query_storage_value(context.client(), &key) .await .expect("Parameter should be defined."); display_line!( - IO, + context.io(), "{:4}Fee unshielding descriptions limit: {:?}", "", fee_unshielding_descriptions_limit ); let key = param_storage::get_gas_cost_key(); - let gas_cost_table = query_storage_value::< - C, - BTreeMap, - >(client, &key) - .await - .expect("Parameter should be defined."); - display_line!(IO, "{:4}Gas cost table:", ""); + let gas_cost_table: BTreeMap = + query_storage_value(context.client(), &key) + .await + .expect("Parameter should be defined."); + display_line!(context.io(), "{:4}Gas cost table:", ""); for (token, gas_cost) in gas_cost_table { - display_line!(IO, "{:8}{}: {:?}", "", token, gas_cost); + display_line!(context.io(), "{:8}{}: {:?}", "", token, gas_cost); } - display_line!(IO, "PoS parameters"); - let pos_params = query_pos_parameters(client).await; + display_line!(context.io(), "PoS parameters"); + let pos_params = query_pos_parameters(context.client()).await; display_line!( - IO, + context.io(), "{:4}Block proposer reward: {}", "", pos_params.block_proposer_reward ); display_line!( - IO, + context.io(), "{:4}Block vote reward: {}", "", pos_params.block_vote_reward ); display_line!( - IO, + context.io(), "{:4}Duplicate vote minimum slash rate: {}", "", pos_params.duplicate_vote_min_slash_rate ); display_line!( - IO, + context.io(), "{:4}Light client attack minimum slash rate: {}", "", pos_params.light_client_attack_min_slash_rate ); display_line!( - IO, + context.io(), "{:4}Max. validator slots: {}", "", pos_params.max_validator_slots ); - display_line!(IO, "{:4}Pipeline length: {}", "", pos_params.pipeline_len); - display_line!(IO, "{:4}Unbonding length: {}", "", pos_params.unbonding_len); display_line!( - IO, + context.io(), + "{:4}Pipeline length: {}", + "", + pos_params.pipeline_len + ); + display_line!( + context.io(), + "{:4}Unbonding length: {}", + "", + pos_params.unbonding_len + ); + display_line!( + context.io(), "{:4}Votes per token: {}", "", pos_params.tm_votes_per_token @@ -1443,18 +1559,16 @@ pub async fn query_pgf_parameters( unwrap_client_response::(RPC.vp().pgf().parameters(client).await) } -pub async fn query_and_print_unbonds< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_and_print_unbonds<'a>( + context: &impl Namada<'a>, source: &Address, validator: &Address, ) { - let unbonds = query_unbond_with_slashing(client, source, validator).await; - let current_epoch = query_epoch(client).await.unwrap(); + let unbonds = + query_unbond_with_slashing(context.client(), source, validator).await; + let current_epoch = query_epoch(context.client()).await.unwrap(); - let mut total_withdrawable = token::Amount::default(); + let mut total_withdrawable = token::Amount::zero(); let mut not_yet_withdrawable = HashMap::::new(); for ((_start_epoch, withdraw_epoch), amount) in unbonds.into_iter() { if withdraw_epoch <= current_epoch { @@ -1465,19 +1579,19 @@ pub async fn query_and_print_unbonds< *withdrawable_amount += amount; } } - if total_withdrawable != token::Amount::default() { + if !total_withdrawable.is_zero() { display_line!( - IO, + context.io(), "Total withdrawable now: {}.", total_withdrawable.to_string_native() ); } if !not_yet_withdrawable.is_empty() { - display_line!(IO, "Current epoch: {current_epoch}."); + display_line!(context.io(), "Current epoch: {current_epoch}."); } for (withdraw_epoch, amount) in not_yet_withdrawable { display_line!( - IO, + context.io(), "Amount {} withdrawable starting from epoch {withdraw_epoch}.", amount.to_string_native(), ); @@ -1501,12 +1615,11 @@ pub async fn query_withdrawable_tokens< } /// Query PoS bond(s) and unbond(s) -pub async fn query_bonds( - client: &C, - _wallet: &mut Wallet, +pub async fn query_bonds<'a>( + context: &impl Namada<'a>, args: args::QueryBonds, ) -> std::io::Result<()> { - let epoch = query_and_print_epoch::<_, IO>(client).await; + let epoch = query_and_print_epoch(context).await; let source = args.owner; let validator = args.validator; @@ -1514,10 +1627,14 @@ pub async fn query_bonds( let stdout = io::stdout(); let mut w = stdout.lock(); - let bonds_and_unbonds = - enriched_bonds_and_unbonds(client, epoch, &source, &validator) - .await - .unwrap(); + let bonds_and_unbonds = enriched_bonds_and_unbonds( + context.client(), + epoch, + &source, + &validator, + ) + .await + .unwrap(); for (bond_id, details) in &bonds_and_unbonds.data { let bond_type = if bond_id.source == bond_id.validator { @@ -1528,26 +1645,26 @@ pub async fn query_bonds( bond_id.source, bond_id.validator ) }; - display_line!(IO, &mut w; "{}:", bond_type)?; + display_line!(context.io(), &mut w; "{}:", bond_type)?; for bond in &details.data.bonds { display_line!( - IO, + context.io(), &mut w; " Remaining active bond from epoch {}: Δ {}", bond.start, bond.amount.to_string_native() )?; } - if details.bonds_total != token::Amount::zero() { + if !details.bonds_total.is_zero() { display_line!( - IO, + context.io(), &mut w; "Active (slashed) bonds total: {}", details.bonds_total_active().to_string_native() )?; } - display_line!(IO, &mut w; "Bonds total: {}", details.bonds_total.to_string_native())?; - display_line!(IO, &mut w; "")?; + display_line!(context.io(), &mut w; "Bonds total: {}", details.bonds_total.to_string_native())?; + display_line!(context.io(), &mut w; "")?; if !details.data.unbonds.is_empty() { let bond_type = if bond_id.source == bond_id.validator { @@ -1555,10 +1672,10 @@ pub async fn query_bonds( } else { format!("Unbonded delegations from {}", bond_id.source) }; - display_line!(IO, &mut w; "{}:", bond_type)?; + display_line!(context.io(), &mut w; "{}:", bond_type)?; for unbond in &details.data.unbonds { display_line!( - IO, + context.io(), &mut w; " Withdrawable from epoch {} (active from {}): Δ {}", unbond.withdraw, @@ -1567,30 +1684,30 @@ pub async fn query_bonds( )?; } display_line!( - IO, + context.io(), &mut w; "Unbonded total: {}", details.unbonds_total.to_string_native() )?; } display_line!( - IO, + context.io(), &mut w; "Withdrawable total: {}", details.total_withdrawable.to_string_native() )?; - display_line!(IO, &mut w; "")?; + display_line!(context.io(), &mut w; "")?; } if bonds_and_unbonds.bonds_total != bonds_and_unbonds.bonds_total_slashed { display_line!( - IO, + context.io(), &mut w; "All bonds total active: {}", bonds_and_unbonds.bonds_total_active().to_string_native() )?; } display_line!( - IO, + context.io(), &mut w; "All bonds total: {}", bonds_and_unbonds.bonds_total.to_string_native() @@ -1600,20 +1717,20 @@ pub async fn query_bonds( != bonds_and_unbonds.unbonds_total_slashed { display_line!( - IO, + context.io(), &mut w; "All unbonds total active: {}", bonds_and_unbonds.unbonds_total_active().to_string_native() )?; } display_line!( - IO, + context.io(), &mut w; "All unbonds total: {}", bonds_and_unbonds.unbonds_total.to_string_native() )?; display_line!( - IO, + context.io(), &mut w; "All unbonds total withdrawable: {}", bonds_and_unbonds.total_withdrawable.to_string_native() @@ -1622,51 +1739,55 @@ pub async fn query_bonds( } /// Query PoS bonded stake -pub async fn query_bonded_stake< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_bonded_stake<'a, N: Namada<'a>>( + context: &N, args: args::QueryBondedStake, ) { let epoch = match args.epoch { Some(epoch) => epoch, - None => query_and_print_epoch::<_, IO>(client).await, + None => query_and_print_epoch(context).await, }; match args.validator { Some(validator) => { let validator = validator; // Find bonded stake for the given validator - let stake = get_validator_stake(client, epoch, &validator).await; + let stake = + get_validator_stake(context.client(), epoch, &validator).await; match stake { Some(stake) => { // TODO: show if it's in consensus set, below capacity, or // below threshold set display_line!( - IO, + context.io(), "Bonded stake of validator {validator}: {}", stake.to_string_native() ) } None => { - display_line!(IO, "No bonded stake found for {validator}"); + display_line!( + context.io(), + "No bonded stake found for {validator}" + ); } } } None => { - let consensus = - unwrap_client_response::>( + let consensus: BTreeSet = + unwrap_client_response::( RPC.vp() .pos() - .consensus_validator_set(client, &Some(epoch)) + .consensus_validator_set(context.client(), &Some(epoch)) .await, ); - let below_capacity = - unwrap_client_response::>( + let below_capacity: BTreeSet = + unwrap_client_response::( RPC.vp() .pos() - .below_capacity_validator_set(client, &Some(epoch)) + .below_capacity_validator_set( + context.client(), + &Some(epoch), + ) .await, ); @@ -1674,10 +1795,11 @@ pub async fn query_bonded_stake< let stdout = io::stdout(); let mut w = stdout.lock(); - display_line!(IO, &mut w; "Consensus validators:").unwrap(); + display_line!(context.io(), &mut w; "Consensus validators:") + .unwrap(); for val in consensus.into_iter().rev() { display_line!( - IO, + context.io(), &mut w; " {}: {}", val.address.encode(), @@ -1686,11 +1808,11 @@ pub async fn query_bonded_stake< .unwrap(); } if !below_capacity.is_empty() { - display_line!(IO, &mut w; "Below capacity validators:") + display_line!(context.io(), &mut w; "Below capacity validators:") .unwrap(); for val in below_capacity.into_iter().rev() { display_line!( - IO, + context.io(), &mut w; " {}: {}", val.address.encode(), @@ -1702,9 +1824,10 @@ pub async fn query_bonded_stake< } } - let total_staked_tokens = get_total_staked_tokens(client, epoch).await; + let total_staked_tokens = + get_total_staked_tokens(context.client(), epoch).await; display_line!( - IO, + context.io(), "Total bonded stake: {}", total_staked_tokens.to_string_native() ); @@ -1744,47 +1867,43 @@ pub async fn query_validator_state< } /// Query a validator's state information -pub async fn query_and_print_validator_state< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - _wallet: &mut Wallet, +pub async fn query_and_print_validator_state<'a>( + context: &impl Namada<'a>, args: args::QueryValidatorState, ) { let validator = args.validator; let state: Option = - query_validator_state(client, &validator, args.epoch).await; + query_validator_state(context.client(), &validator, args.epoch).await; match state { Some(state) => match state { ValidatorState::Consensus => { display_line!( - IO, + context.io(), "Validator {validator} is in the consensus set" ) } ValidatorState::BelowCapacity => { display_line!( - IO, + context.io(), "Validator {validator} is in the below-capacity set" ) } ValidatorState::BelowThreshold => { display_line!( - IO, + context.io(), "Validator {validator} is in the below-threshold set" ) } ValidatorState::Inactive => { - display_line!(IO, "Validator {validator} is inactive") + display_line!(context.io(), "Validator {validator} is inactive") } ValidatorState::Jailed => { - display_line!(IO, "Validator {validator} is jailed") + display_line!(context.io(), "Validator {validator} is jailed") } }, None => display_line!( - IO, + context.io(), "Validator {validator} is either not a validator, or an epoch \ before the current epoch has been queried (and the validator \ state information is no longer stored)" @@ -1793,25 +1912,21 @@ pub async fn query_and_print_validator_state< } /// Query PoS validator's commission rate information -pub async fn query_and_print_commission_rate< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - _wallet: &mut Wallet, +pub async fn query_and_print_commission_rate<'a>( + context: &impl Namada<'a>, args: args::QueryCommissionRate, ) { let validator = args.validator; let info: Option = - query_commission_rate(client, &validator, args.epoch).await; + query_commission_rate(context.client(), &validator, args.epoch).await; match info { Some(CommissionPair { commission_rate: rate, max_commission_change_per_epoch: change, }) => { display_line!( - IO, + context.io(), "Validator {} commission rate: {}, max change per epoch: {}", validator.encode(), rate, @@ -1820,7 +1935,7 @@ pub async fn query_and_print_commission_rate< } None => { display_line!( - IO, + context.io(), "Address {} is not a validator (did not find commission rate \ and max change)", validator.encode(), @@ -1830,28 +1945,27 @@ pub async fn query_and_print_commission_rate< } /// Query PoS slashes -pub async fn query_slashes< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - _wallet: &mut Wallet, +pub async fn query_slashes<'a, N: Namada<'a>>( + context: &N, args: args::QuerySlashes, ) { match args.validator { Some(validator) => { let validator = validator; // Find slashes for the given validator - let slashes: Vec = unwrap_client_response::>( - RPC.vp().pos().validator_slashes(client, &validator).await, + let slashes: Vec = unwrap_client_response::( + RPC.vp() + .pos() + .validator_slashes(context.client(), &validator) + .await, ); if !slashes.is_empty() { - display_line!(IO, "Processed slashes:"); + display_line!(context.io(), "Processed slashes:"); let stdout = io::stdout(); let mut w = stdout.lock(); for slash in slashes { display_line!( - IO, + context.io(), &mut w; "Infraction epoch {}, block height {}, type {}, rate \ {}", @@ -1864,7 +1978,7 @@ pub async fn query_slashes< } } else { display_line!( - IO, + context.io(), "No processed slashes found for {}", validator.encode() ) @@ -1874,20 +1988,26 @@ pub async fn query_slashes< let enqueued_slashes: HashMap< Address, BTreeMap>, - > = unwrap_client_response::< - C, - HashMap>>, - >(RPC.vp().pos().enqueued_slashes(client).await); + > = unwrap_client_response::( + RPC.vp().pos().enqueued_slashes(context.client()).await, + ); let enqueued_slashes = enqueued_slashes.get(&validator).cloned(); if let Some(enqueued) = enqueued_slashes { - display_line!(IO, "\nEnqueued slashes for future processing"); + display_line!( + context.io(), + "\nEnqueued slashes for future processing" + ); for (epoch, slashes) in enqueued { - display_line!(IO, "To be processed in epoch {}", epoch); + display_line!( + context.io(), + "To be processed in epoch {}", + epoch + ); for slash in slashes { let stdout = io::stdout(); let mut w = stdout.lock(); display_line!( - IO, + context.io(), &mut w; "Infraction epoch {}, block height {}, type {}", slash.epoch, slash.block_height, slash.r#type, @@ -1897,7 +2017,7 @@ pub async fn query_slashes< } } else { display_line!( - IO, + context.io(), "No enqueued slashes found for {}", validator.encode() ) @@ -1905,18 +2025,18 @@ pub async fn query_slashes< } None => { let all_slashes: HashMap> = - unwrap_client_response::>>( - RPC.vp().pos().slashes(client).await, + unwrap_client_response::( + RPC.vp().pos().slashes(context.client()).await, ); if !all_slashes.is_empty() { let stdout = io::stdout(); let mut w = stdout.lock(); - display_line!(IO, "Processed slashes:"); + display_line!(context.io(), "Processed slashes:"); for (validator, slashes) in all_slashes.into_iter() { for slash in slashes { display_line!( - IO, + context.io(), &mut w; "Infraction epoch {}, block height {}, rate {}, \ type {}, validator {}", @@ -1930,7 +2050,7 @@ pub async fn query_slashes< } } } else { - display_line!(IO, "No processed slashes found") + display_line!(context.io(), "No processed slashes found") } // Find enqueued slashes to be processed in the future for the given @@ -1938,16 +2058,18 @@ pub async fn query_slashes< let enqueued_slashes: HashMap< Address, BTreeMap>, - > = unwrap_client_response::< - C, - HashMap>>, - >(RPC.vp().pos().enqueued_slashes(client).await); + > = unwrap_client_response::( + RPC.vp().pos().enqueued_slashes(context.client()).await, + ); if !enqueued_slashes.is_empty() { - display_line!(IO, "\nEnqueued slashes for future processing"); + display_line!( + context.io(), + "\nEnqueued slashes for future processing" + ); for (validator, slashes_by_epoch) in enqueued_slashes { for (epoch, slashes) in slashes_by_epoch { display_line!( - IO, + context.io(), "\nTo be processed in epoch {}", epoch ); @@ -1955,7 +2077,7 @@ pub async fn query_slashes< let stdout = io::stdout(); let mut w = stdout.lock(); display_line!( - IO, + context.io(), &mut w; "Infraction epoch {}, block height {}, type \ {}, validator {}", @@ -1970,7 +2092,7 @@ pub async fn query_slashes< } } else { display_line!( - IO, + context.io(), "\nNo enqueued slashes found for future processing" ) } @@ -1978,55 +2100,57 @@ pub async fn query_slashes< } } -pub async fn query_delegations< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - _wallet: &mut Wallet, +pub async fn query_delegations<'a, N: Namada<'a>>( + context: &N, args: args::QueryDelegations, ) { let owner = args.owner; - let delegations = unwrap_client_response::>( - RPC.vp().pos().delegation_validators(client, &owner).await, + let delegations: HashSet
= unwrap_client_response::( + RPC.vp() + .pos() + .delegation_validators(context.client(), &owner) + .await, ); if delegations.is_empty() { - display_line!(IO, "No delegations found"); + display_line!(context.io(), "No delegations found"); } else { - display_line!(IO, "Found delegations to:"); + display_line!(context.io(), "Found delegations to:"); for delegation in delegations { - display_line!(IO, " {delegation}"); + display_line!(context.io(), " {delegation}"); } } } -pub async fn query_find_validator< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_find_validator<'a, N: Namada<'a>>( + context: &N, args: args::QueryFindValidator, ) { let args::QueryFindValidator { query: _, tm_addr } = args; if tm_addr.len() != 40 { edisplay_line!( - IO, + context.io(), "Expected 40 characters in Tendermint address, got {}", tm_addr.len() ); cli::safe_exit(1); } let tm_addr = tm_addr.to_ascii_uppercase(); - let validator = unwrap_client_response::( - RPC.vp().pos().validator_by_tm_addr(client, &tm_addr).await, + let validator = unwrap_client_response::( + RPC.vp() + .pos() + .validator_by_tm_addr(context.client(), &tm_addr) + .await, ); match validator { Some(address) => { - display_line!(IO, "Found validator address \"{address}\".") + display_line!( + context.io(), + "Found validator address \"{address}\"." + ) } None => { display_line!( - IO, + context.io(), "No validator with Tendermint address {tm_addr} found." ) } @@ -2034,18 +2158,17 @@ pub async fn query_find_validator< } /// Dry run a transaction -pub async fn dry_run_tx( - client: &C, +pub async fn dry_run_tx<'a, N: Namada<'a>>( + context: &N, tx_bytes: Vec, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { display_line!( - IO, + context.io(), "Dry-run result: {}", - rpc::dry_run_tx::<_, IO>(client, tx_bytes).await? + rpc::dry_run_tx(context, tx_bytes).await? ); Ok(()) } @@ -2064,7 +2187,7 @@ pub async fn is_validator( client: &C, address: &Address, ) -> bool { - namada::sdk::rpc::is_validator(client, address) + namada_sdk::rpc::is_validator(client, address) .await .unwrap() } @@ -2074,7 +2197,7 @@ pub async fn is_delegator( client: &C, address: &Address, ) -> bool { - namada::sdk::rpc::is_delegator(client, address) + namada_sdk::rpc::is_delegator(client, address) .await .unwrap() } @@ -2084,7 +2207,7 @@ pub async fn is_delegator_at( address: &Address, epoch: Epoch, ) -> bool { - namada::sdk::rpc::is_delegator_at(client, address, epoch) + namada_sdk::rpc::is_delegator_at(client, address, epoch) .await .unwrap() } @@ -2096,37 +2219,36 @@ pub async fn known_address( client: &C, address: &Address, ) -> bool { - namada::sdk::rpc::known_address(client, address) + namada_sdk::rpc::known_address(client, address) .await .unwrap() } /// Query for all conversions. -pub async fn query_conversions< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, +pub async fn query_conversions<'a>( + context: &impl Namada<'a>, args: args::QueryConversions, ) { // The chosen token type of the conversions let target_token = args.token; // To facilitate human readable token addresses - let tokens = wallet.get_addresses_with_vp_type(AddressVpType::Token); + let tokens = context + .wallet() + .await + .get_addresses_with_vp_type(AddressVpType::Token); let masp_addr = masp(); let key_prefix: Key = masp_addr.to_db_key().into(); let state_key = key_prefix .push(&(token::CONVERSION_KEY_PREFIX.to_owned())) .unwrap(); - let conv_state = - query_storage_value::(client, &state_key) + let conv_state: ConversionState = + query_storage_value(context.client(), &state_key) .await .expect("Conversions should be defined"); // Track whether any non-sentinel conversions are found let mut conversions_found = false; for ((addr, _), epoch, conv, _) in conv_state.assets.values() { - let amt: masp_primitives::transaction::components::I32Sum = + let amt: masp_primitives::transaction::components::I128Sum = conv.clone().into(); // If the user has specified any targets, then meet them // If we have a sentinel conversion, then skip printing @@ -2139,7 +2261,7 @@ pub async fn query_conversions< conversions_found = true; // Print the asset to which the conversion applies display!( - IO, + context.io(), "{}[{}]: ", tokens.get(addr).cloned().unwrap_or_else(|| addr.clone()), epoch, @@ -2152,7 +2274,7 @@ pub async fn query_conversions< let ((addr, _), epoch, _, _) = &conv_state.assets[asset_type]; // Now print out this component of the conversion display!( - IO, + context.io(), "{}{} {}[{}]", prefix, val, @@ -2163,11 +2285,11 @@ pub async fn query_conversions< prefix = " + "; } // Allowed conversions are always implicit equations - display_line!(IO, " = 0"); + display_line!(context.io(), " = 0"); } if !conversions_found { display_line!( - IO, + context.io(), "No conversions found satisfying specified criteria." ); } @@ -2181,21 +2303,18 @@ pub async fn query_conversion( Address, MaspDenom, Epoch, - masp_primitives::transaction::components::I32Sum, + masp_primitives::transaction::components::I128Sum, MerklePath, )> { - namada::sdk::rpc::query_conversion(client, asset_type).await + namada_sdk::rpc::query_conversion(client, asset_type).await } /// Query a wasm code hash -pub async fn query_wasm_code_hash< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_wasm_code_hash<'a>( + context: &impl Namada<'a>, code_path: impl AsRef, ) -> Result { - rpc::query_wasm_code_hash::<_, IO>(client, code_path).await + rpc::query_wasm_code_hash(context, code_path).await } /// Query a storage value and decode it with [`BorshDeserialize`]. @@ -2206,7 +2325,7 @@ pub async fn query_storage_value( where T: BorshDeserialize, { - namada::sdk::rpc::query_storage_value(client, key).await + namada_sdk::rpc::query_storage_value(client, key).await } /// Query a storage value and the proof without decoding. @@ -2218,7 +2337,7 @@ pub async fn query_storage_value_bytes< height: Option, prove: bool, ) -> (Option>, Option) { - namada::sdk::rpc::query_storage_value_bytes(client, key, height, prove) + namada_sdk::rpc::query_storage_value_bytes(client, key, height, prove) .await .unwrap() } @@ -2226,20 +2345,14 @@ pub async fn query_storage_value_bytes< /// Query a range of storage values with a matching prefix and decode them with /// [`BorshDeserialize`]. Returns an iterator of the storage keys paired with /// their associated values. -pub async fn query_storage_prefix< - C: namada::ledger::queries::Client + Sync, - T, - IO: Io, ->( - client: &C, +pub async fn query_storage_prefix<'a, 'b, T>( + context: &'b impl Namada<'a>, key: &storage::Key, -) -> Option> +) -> Option> where T: BorshDeserialize, { - rpc::query_storage_prefix::<_, IO, _>(client, key) - .await - .unwrap() + rpc::query_storage_prefix(context, key).await.unwrap() } /// Query to check if the given storage key exists. @@ -2249,7 +2362,7 @@ pub async fn query_has_storage_key< client: &C, key: &storage::Key, ) -> bool { - namada::sdk::rpc::query_has_storage_key(client, key) + namada_sdk::rpc::query_has_storage_key(client, key) .await .unwrap() } @@ -2258,39 +2371,39 @@ pub async fn query_has_storage_key< /// the current status of a transation. pub async fn query_tx_events( client: &C, - tx_event_query: namada::sdk::rpc::TxEventQuery<'_>, + tx_event_query: namada_sdk::rpc::TxEventQuery<'_>, ) -> std::result::Result< Option, ::Error, > { - namada::sdk::rpc::query_tx_events(client, tx_event_query).await + namada_sdk::rpc::query_tx_events(client, tx_event_query).await } /// Lookup the full response accompanying the specified transaction event // TODO: maybe remove this in favor of `query_tx_status` pub async fn query_tx_response( client: &C, - tx_query: namada::sdk::rpc::TxEventQuery<'_>, + tx_query: namada_sdk::rpc::TxEventQuery<'_>, ) -> Result { - namada::sdk::rpc::query_tx_response(client, tx_query).await + namada_sdk::rpc::query_tx_response(client, tx_query).await } /// Lookup the results of applying the specified transaction to the /// blockchain. -pub async fn query_result( - client: &C, +pub async fn query_result<'a>( + context: &impl Namada<'a>, args: args::QueryResult, ) { // First try looking up application event pertaining to given hash. let tx_response = query_tx_response( - client, - namada::sdk::rpc::TxEventQuery::Applied(&args.tx_hash), + context.client(), + namada_sdk::rpc::TxEventQuery::Applied(&args.tx_hash), ) .await; match tx_response { Ok(result) => { display_line!( - IO, + context.io(), "Transaction was applied with result: {}", serde_json::to_string_pretty(&result).unwrap() ) @@ -2298,19 +2411,19 @@ pub async fn query_result( Err(err1) => { // If this fails then instead look for an acceptance event. let tx_response = query_tx_response( - client, - namada::sdk::rpc::TxEventQuery::Accepted(&args.tx_hash), + context.client(), + namada_sdk::rpc::TxEventQuery::Accepted(&args.tx_hash), ) .await; match tx_response { Ok(result) => display_line!( - IO, + context.io(), "Transaction was accepted with result: {}", serde_json::to_string_pretty(&result).unwrap() ), Err(err2) => { // Print the errors that caused the lookups to fail - edisplay_line!(IO, "{}\n{}", err1, err2); + edisplay_line!(context.io(), "{}\n{}", err1, err2); cli::safe_exit(1) } } @@ -2318,16 +2431,13 @@ pub async fn query_result( } } -pub async fn epoch_sleep( - client: &C, - _args: args::Query, -) { - let start_epoch = query_and_print_epoch::<_, IO>(client).await; +pub async fn epoch_sleep<'a>(context: &impl Namada<'a>, _args: args::Query) { + let start_epoch = query_and_print_epoch(context).await; loop { tokio::time::sleep(core::time::Duration::from_secs(1)).await; - let current_epoch = query_epoch(client).await.unwrap(); + let current_epoch = query_epoch(context.client()).await.unwrap(); if current_epoch > start_epoch { - display_line!(IO, "Reached epoch {}", current_epoch); + display_line!(context.io(), "Reached epoch {}", current_epoch); break; } } @@ -2339,13 +2449,12 @@ pub async fn get_bond_amount_at( validator: &Address, epoch: Epoch, ) -> Option { - let (_total, total_active) = - unwrap_client_response::( - RPC.vp() - .pos() - .bond_with_slashing(client, delegator, validator, &Some(epoch)) - .await, - ); + let total_active = unwrap_client_response::( + RPC.vp() + .pos() + .bond_with_slashing(client, delegator, validator, &Some(epoch)) + .await, + ); Some(total_active) } @@ -2353,7 +2462,7 @@ pub async fn get_all_validators( client: &C, epoch: Epoch, ) -> HashSet
{ - namada::sdk::rpc::get_all_validators(client, epoch) + namada_sdk::rpc::get_all_validators(client, epoch) .await .unwrap() } @@ -2364,7 +2473,7 @@ pub async fn get_total_staked_tokens< client: &C, epoch: Epoch, ) -> token::Amount { - namada::sdk::rpc::get_total_staked_tokens(client, epoch) + namada_sdk::rpc::get_total_staked_tokens(client, epoch) .await .unwrap() } @@ -2392,7 +2501,7 @@ pub async fn get_delegators_delegation< client: &C, address: &Address, ) -> HashSet
{ - namada::sdk::rpc::get_delegators_delegation(client, address) + namada_sdk::rpc::get_delegators_delegation(client, address) .await .unwrap() } @@ -2404,7 +2513,7 @@ pub async fn get_delegators_delegation_at< address: &Address, epoch: Epoch, ) -> HashMap { - namada::sdk::rpc::get_delegators_delegation_at(client, address, epoch) + namada_sdk::rpc::get_delegators_delegation_at(client, address, epoch) .await .unwrap() } @@ -2414,7 +2523,7 @@ pub async fn query_governance_parameters< >( client: &C, ) -> GovernanceParameters { - namada::sdk::rpc::query_governance_parameters(client).await + namada_sdk::rpc::query_governance_parameters(client).await } /// A helper to unwrap client's response. Will shut down process on error. @@ -2427,11 +2536,8 @@ fn unwrap_client_response( }) } -pub async fn compute_offline_proposal_votes< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn compute_offline_proposal_votes<'a>( + context: &impl Namada<'a>, proposal: &OfflineSignedProposal, votes: Vec, ) -> ProposalVotes { @@ -2444,11 +2550,11 @@ pub async fn compute_offline_proposal_votes< HashMap, > = HashMap::default(); for vote in votes { - let is_validator = is_validator(client, &vote.address).await; - let is_delegator = is_delegator(client, &vote.address).await; + let is_validator = is_validator(context.client(), &vote.address).await; + let is_delegator = is_delegator(context.client(), &vote.address).await; if is_validator { let validator_stake = get_validator_stake( - client, + context.client(), proposal.proposal.tally_epoch, &vote.address, ) @@ -2459,7 +2565,7 @@ pub async fn compute_offline_proposal_votes< .insert(vote.address.clone(), validator_stake); } else if is_delegator { let validators = get_delegators_delegation_at( - client, + context.client(), &vote.address.clone(), proposal.proposal.tally_epoch, ) @@ -2478,7 +2584,7 @@ pub async fn compute_offline_proposal_votes< } } else { display_line!( - IO, + context.io(), "Skipping vote, not a validator/delegator at epoch {}.", proposal.proposal.tally_epoch ); @@ -2500,7 +2606,7 @@ pub async fn compute_proposal_votes< proposal_id: u64, epoch: Epoch, ) -> ProposalVotes { - let votes = namada::sdk::rpc::query_proposal_votes(client, proposal_id) + let votes = namada_sdk::rpc::query_proposal_votes(client, proposal_id) .await .unwrap(); diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index c8c42b190b..d939d5691e 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -1,74 +1,53 @@ -use std::env; -use std::fmt::Debug; -use std::fs::{File, OpenOptions}; -use std::io::{Read, Write}; -use std::path::PathBuf; - -use borsh::{BorshDeserialize, BorshSerialize}; -use masp_proofs::prover::LocalTxProver; +use std::fs::File; +use std::io::Write; + use namada::core::ledger::governance::cli::offline::{ OfflineProposal, OfflineSignedProposal, OfflineVote, }; use namada::core::ledger::governance::cli::onchain::{ DefaultProposal, PgfFundingProposal, PgfStewardProposal, ProposalVote, }; -use namada::ledger::pos; -use namada::proof_of_stake::parameters::PosParams; +use namada::ibc::applications::transfer::Memo; use namada::proto::Tx; -use namada::sdk::rpc::{TxBroadcastData, TxResponse}; -use namada::sdk::wallet::{Wallet, WalletUtils}; -use namada::sdk::{error, masp, signing, tx}; -use namada::tendermint_rpc::HttpClient; use namada::types::address::{Address, ImplicitAddress}; use namada::types::dec::Dec; use namada::types::io::Io; use namada::types::key::{self, *}; use namada::types::transaction::pos::InitValidator; -use namada::{display_line, edisplay_line}; +use namada_sdk::rpc::{TxBroadcastData, TxResponse}; +use namada_sdk::{display_line, edisplay_line, error, signing, tx, Namada}; use super::rpc; -use crate::cli::{args, safe_exit, Context}; +use crate::cli::{args, safe_exit}; use crate::client::rpc::query_wasm_code_hash; use crate::client::tx::tx::ProcessTxResponse; use crate::config::TendermintMode; use crate::facade::tendermint_rpc::endpoint::broadcast::tx_sync::Response; use crate::node::ledger::tendermint_node; -use crate::wallet::{ - gen_validator_keys, read_and_confirm_encryption_password, CliWalletUtils, -}; +use crate::wallet::{gen_validator_keys, read_and_confirm_encryption_password}; /// Wrapper around `signing::aux_signing_data` that stores the optional /// disposable address to the wallet -pub async fn aux_signing_data< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, +pub async fn aux_signing_data<'a>( + context: &impl Namada<'a>, args: &args::Tx, owner: Option
, default_signer: Option
, ) -> Result { - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - wallet, - args, - owner, - default_signer, - ) - .await?; + let signing_data = + signing::aux_signing_data(context, args, owner, default_signer).await?; if args.disposable_signing_key { if !(args.dry_run || args.dry_run_wrapper) { // Store the generated signing key to wallet in case of need - crate::wallet::save(wallet).map_err(|_| { + context.wallet().await.save().map_err(|_| { error::Error::Other( "Failed to save disposable address to wallet".to_string(), ) })?; } else { display_line!( - IO, + context.io(), "Transaction dry run. The disposable address will not be \ saved to wallet." ) @@ -79,12 +58,8 @@ pub async fn aux_signing_data< } // Build a transaction to reveal the signer of the given transaction. -pub async fn submit_reveal_aux< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - ctx: &mut Context, +pub async fn submit_reveal_aux<'a>( + context: &impl Namada<'a>, args: args::Tx, address: &Address, ) -> Result<(), error::Error> { @@ -93,181 +68,123 @@ pub async fn submit_reveal_aux< } if let Address::Implicit(ImplicitAddress(pkh)) = address { - let key = ctx - .wallet + let key = context + .wallet_mut() + .await .find_key_by_pkh(pkh, args.clone().password) .map_err(|e| error::Error::Other(e.to_string()))?; let public_key = key.ref_to(); - if tx::is_reveal_pk_needed::(client, address, args.force).await? { - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args, - None, - None, - ) - .await?; - - let (mut tx, _epoch) = tx::build_reveal_pk::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - &args, - address, - &public_key, - &signing_data.fee_payer, - ) - .await?; + if tx::is_reveal_pk_needed(context.client(), address, args.force) + .await? + { + println!( + "Submitting a tx to reveal the public key for address \ + {address}..." + ); + let (mut tx, signing_data, _epoch) = + tx::build_reveal_pk(context, &args, &public_key).await?; - signing::generate_test_vector::<_, _, IO>( - client, - &mut ctx.wallet, - &tx, - ) - .await?; + signing::generate_test_vector(context, &tx).await?; - signing::sign_tx(&mut ctx.wallet, &args, &mut tx, signing_data)?; + context.sign(&mut tx, &args, signing_data).await?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args, tx) - .await?; + context.submit(tx, &args).await?; } } Ok(()) } -pub async fn submit_custom( - client: &C, - ctx: &mut Context, +pub async fn submit_bridge_pool_tx<'a, N: Namada<'a>>( + namada: &N, + args: args::EthereumBridgePool, +) -> Result<(), error::Error> { + let tx_args = args.tx.clone(); + let (mut tx, signing_data, _epoch) = args.clone().build(namada).await?; + + signing::generate_test_vector(namada, &tx).await?; + + if args.tx.dump_tx { + tx::dump_tx(namada.io(), &args.tx, tx); + } else { + submit_reveal_aux(namada, tx_args.clone(), &args.sender).await?; + namada.sign(&mut tx, &tx_args, signing_data).await?; + namada.submit(tx, &tx_args).await?; + } + + Ok(()) +} + +pub async fn submit_custom<'a, N: Namada<'a>>( + namada: &N, args: args::TxCustom, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let default_signer = Some(args.owner.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(args.owner.clone()), - default_signer, - ) - .await?; - - submit_reveal_aux::<_, IO>(client, ctx, args.tx.clone(), &args.owner) - .await?; + submit_reveal_aux(namada, args.tx.clone(), &args.owner).await?; - let (mut tx, _epoch) = tx::build_custom::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - &signing_data.fee_payer, - ) - .await?; + let (mut tx, signing_data, _epoch) = args.build(namada).await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.sign(&mut tx, &args.tx, signing_data).await?; + namada.submit(tx, &args.tx).await?; } Ok(()) } -pub async fn submit_update_account( - client: &C, - ctx: &mut Context, +pub async fn submit_update_account<'a, N: Namada<'a>>( + namada: &N, args: args::TxUpdateAccount, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let default_signer = Some(args.addr.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(args.addr.clone()), - default_signer, - ) - .await?; + let (mut tx, signing_data, _epoch) = args.build(namada).await?; - let (mut tx, _epoch) = tx::build_update_account::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - signing_data.fee_payer.clone(), - ) - .await?; - - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.sign(&mut tx, &args.tx, signing_data).await?; + namada.submit(tx, &args.tx).await?; } Ok(()) } -pub async fn submit_init_account( - client: &C, - ctx: &mut Context, +pub async fn submit_init_account<'a, N: Namada<'a>>( + namada: &N, args: args::TxInitAccount, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - None, - None, - ) - .await?; - - let (mut tx, _epoch) = tx::build_init_account::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - &signing_data.fee_payer, - ) - .await?; + let (mut tx, signing_data, _epoch) = + tx::build_init_account(namada, &args).await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.sign(&mut tx, &args.tx, signing_data).await?; + namada.submit(tx, &args.tx).await?; } Ok(()) } -pub async fn submit_init_validator( - client: &C, - mut ctx: Context, +pub async fn submit_init_validator<'a>( + namada: &impl Namada<'a>, + config: &mut crate::config::Config, args::TxInitValidator { tx: tx_args, scheme, @@ -283,15 +200,12 @@ pub async fn submit_init_validator( unsafe_dont_encrypt, tx_code_path: _, }: args::TxInitValidator, -) -> Result<(), error::Error> -where - C: namada::ledger::queries::Client + Sync, -{ +) -> Result<(), error::Error> { let tx_args = args::Tx { chain_id: tx_args .clone() .chain_id - .or_else(|| Some(ctx.config.ledger.chain_id.clone())), + .or_else(|| Some(config.ledger.chain_id.clone())), ..tx_args.clone() }; let alias = tx_args @@ -316,29 +230,33 @@ where let eth_hot_key_alias = format!("{}-eth-hot-key", alias); let eth_cold_key_alias = format!("{}-eth-cold-key", alias); + let mut wallet = namada.wallet_mut().await; let consensus_key = consensus_key .map(|key| match key { common::SecretKey::Ed25519(_) => key, common::SecretKey::Secp256k1(_) => { - edisplay_line!(IO, "Consensus key can only be ed25519"); + edisplay_line!( + namada.io(), + "Consensus key can only be ed25519" + ); safe_exit(1) } }) .unwrap_or_else(|| { - display_line!(IO, "Generating consensus key..."); + display_line!(namada.io(), "Generating consensus key..."); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - ctx.wallet + wallet .gen_key( // Note that TM only allows ed25519 for consensus key SchemeType::Ed25519, Some(consensus_key_alias.clone()), tx_args.wallet_alias_force, + None, password, None, ) .expect("Key generation should not fail.") - .expect("No existing alias expected.") .1 }); @@ -346,25 +264,28 @@ where .map(|key| match key { common::SecretKey::Secp256k1(_) => key.ref_to(), common::SecretKey::Ed25519(_) => { - edisplay_line!(IO, "Eth cold key can only be secp256k1"); + edisplay_line!( + namada.io(), + "Eth cold key can only be secp256k1" + ); safe_exit(1) } }) .unwrap_or_else(|| { - display_line!(IO, "Generating Eth cold key..."); + display_line!(namada.io(), "Generating Eth cold key..."); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - ctx.wallet + wallet .gen_key( // Note that ETH only allows secp256k1 SchemeType::Secp256k1, Some(eth_cold_key_alias.clone()), tx_args.wallet_alias_force, + None, password, None, ) .expect("Key generation should not fail.") - .expect("No existing alias expected.") .1 .ref_to() }); @@ -373,35 +294,40 @@ where .map(|key| match key { common::SecretKey::Secp256k1(_) => key.ref_to(), common::SecretKey::Ed25519(_) => { - edisplay_line!(IO, "Eth hot key can only be secp256k1"); + edisplay_line!( + namada.io(), + "Eth hot key can only be secp256k1" + ); safe_exit(1) } }) .unwrap_or_else(|| { - display_line!(IO, "Generating Eth hot key..."); + display_line!(namada.io(), "Generating Eth hot key..."); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - ctx.wallet + wallet .gen_key( // Note that ETH only allows secp256k1 SchemeType::Secp256k1, Some(eth_hot_key_alias.clone()), tx_args.wallet_alias_force, + None, password, None, ) .expect("Key generation should not fail.") - .expect("No existing alias expected.") .1 .ref_to() }); + // To avoid wallet deadlocks in following operations + drop(wallet); if protocol_key.is_none() { - display_line!(IO, "Generating protocol signing key..."); + display_line!(namada.io(), "Generating protocol signing key..."); } // Generate the validator keys let validator_keys = gen_validator_keys( - &mut ctx.wallet, + *namada.wallet_mut().await, Some(eth_hot_pk.clone()), protocol_key, scheme, @@ -414,17 +340,15 @@ where .expect("DKG sessions keys should have been created") .public(); - let validator_vp_code_hash = query_wasm_code_hash::( - client, - validator_vp_code_path.to_str().unwrap(), - ) - .await - .unwrap(); + let validator_vp_code_hash = + query_wasm_code_hash(namada, validator_vp_code_path.to_str().unwrap()) + .await + .unwrap(); // Validate the commission rate data if commission_rate > Dec::one() || commission_rate < Dec::zero() { edisplay_line!( - IO, + namada.io(), "The validator commission rate must not exceed 1.0 or 100%, and \ it must be 0 or positive" ); @@ -436,7 +360,7 @@ where || max_commission_rate_change < Dec::zero() { edisplay_line!( - IO, + namada.io(), "The validator maximum change in commission rate per epoch must \ not exceed 1.0 or 100%" ); @@ -445,7 +369,7 @@ where } } let tx_code_hash = - query_wasm_code_hash::<_, IO>(client, args::TX_INIT_VALIDATOR_WASM) + query_wasm_code_hash(namada, args::TX_INIT_VALIDATOR_WASM) .await .unwrap(); @@ -471,19 +395,10 @@ where tx.add_code_from_hash(tx_code_hash).add_data(data); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - &mut ctx.wallet, - &tx_args, - None, - None, - ) - .await?; + let signing_data = aux_signing_data(namada, &tx_args, None, None).await?; - tx::prepare_tx::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, + tx::prepare_tx( + namada, &tx_args, &mut tx, signing_data.fee_payer.clone(), @@ -491,18 +406,14 @@ where ) .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + signing::generate_test_vector(namada, &tx).await?; if tx_args.dump_tx { - tx::dump_tx::(&tx_args, tx); + tx::dump_tx(namada.io(), &tx_args, tx); } else { - signing::sign_tx(&mut ctx.wallet, &tx_args, &mut tx, signing_data)?; + namada.sign(&mut tx, &tx_args, signing_data).await?; - let result = - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &tx_args, tx) - .await? - .initialized_accounts(); + let result = namada.submit(tx, &tx_args).await?.initialized_accounts(); if !tx_args.dry_run { let (validator_address_alias, validator_address) = match &result[..] @@ -510,29 +421,37 @@ where // There should be 1 account for the validator itself [validator_address] => { if let Some(alias) = - ctx.wallet.find_alias(validator_address) + namada.wallet().await.find_alias(validator_address) { (alias.clone(), validator_address.clone()) } else { edisplay_line!( - IO, + namada.io(), "Expected one account to be created" ); safe_exit(1) } } _ => { - edisplay_line!(IO, "Expected one account to be created"); + edisplay_line!( + namada.io(), + "Expected one account to be created" + ); safe_exit(1) } }; // add validator address and keys to the wallet - ctx.wallet + namada + .wallet_mut() + .await .add_validator_data(validator_address, validator_keys); - crate::wallet::save(&ctx.wallet) - .unwrap_or_else(|err| edisplay_line!(IO, "{}", err)); + namada + .wallet_mut() + .await + .save() + .unwrap_or_else(|err| edisplay_line!(namada.io(), "{}", err)); - let tendermint_home = ctx.config.ledger.cometbft_dir(); + let tendermint_home = config.ledger.cometbft_dir(); tendermint_node::write_validator_key( &tendermint_home, &consensus_key, @@ -541,51 +460,51 @@ where // Write Namada config stuff or figure out how to do the above // tendermint_node things two epochs in the future!!! - ctx.config.ledger.shell.tendermint_mode = TendermintMode::Validator; - ctx.config + config.ledger.shell.tendermint_mode = TendermintMode::Validator; + config .write( - &ctx.config.ledger.shell.base_dir, - &ctx.config.ledger.chain_id, + &config.ledger.shell.base_dir, + &config.ledger.chain_id, true, ) .unwrap(); - let key = pos::params_key(); - let pos_params = - rpc::query_storage_value::(client, &key) - .await - .expect("Pos parameter should be defined."); + let pos_params = rpc::query_pos_parameters(namada.client()).await; - display_line!(IO, ""); + display_line!(namada.io(), ""); display_line!( - IO, + namada.io(), "The validator's addresses and keys were stored in the wallet:" ); display_line!( - IO, + namada.io(), " Validator address \"{}\"", validator_address_alias ); display_line!( - IO, + namada.io(), " Validator account key \"{}\"", validator_key_alias ); - display_line!(IO, " Consensus key \"{}\"", consensus_key_alias); display_line!( - IO, + namada.io(), + " Consensus key \"{}\"", + consensus_key_alias + ); + display_line!( + namada.io(), "The ledger node has been setup to use this validator's \ address and consensus key." ); display_line!( - IO, + namada.io(), "Your validator will be active in {} epochs. Be sure to \ restart your node for the changes to take effect!", pos_params.pipeline_len ); } else { display_line!( - IO, + namada.io(), "Transaction dry run. No addresses have been saved." ); } @@ -593,172 +512,30 @@ where Ok(()) } -/// Shielded context file name -const FILE_NAME: &str = "shielded.dat"; -const TMP_FILE_NAME: &str = "shielded.tmp"; - -#[derive(Debug, BorshSerialize, BorshDeserialize, Clone)] -pub struct CLIShieldedUtils { - #[borsh_skip] - context_dir: PathBuf, -} - -impl CLIShieldedUtils { - /// Initialize a shielded transaction context that identifies notes - /// decryptable by any viewing key in the given set - pub fn new(context_dir: PathBuf) -> masp::ShieldedContext { - // Make sure that MASP parameters are downloaded to enable MASP - // transaction building and verification later on - let params_dir = masp::get_params_dir(); - let spend_path = params_dir.join(masp::SPEND_NAME); - let convert_path = params_dir.join(masp::CONVERT_NAME); - let output_path = params_dir.join(masp::OUTPUT_NAME); - if !(spend_path.exists() - && convert_path.exists() - && output_path.exists()) - { - display_line!(IO, "MASP parameters not present, downloading..."); - masp_proofs::download_masp_parameters(None) - .expect("MASP parameters not present or downloadable"); - display_line!( - IO, - "MASP parameter download complete, resuming execution..." - ); - } - // Finally initialize a shielded context with the supplied directory - let utils = Self { context_dir }; - masp::ShieldedContext { - utils, - ..Default::default() - } - } -} - -impl Default for CLIShieldedUtils { - fn default() -> Self { - Self { - context_dir: PathBuf::from(FILE_NAME), - } - } -} - -#[cfg_attr(feature = "async-send", async_trait::async_trait)] -#[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] -impl masp::ShieldedUtils for CLIShieldedUtils { - fn local_tx_prover(&self) -> LocalTxProver { - if let Ok(params_dir) = env::var(masp::ENV_VAR_MASP_PARAMS_DIR) { - let params_dir = PathBuf::from(params_dir); - let spend_path = params_dir.join(masp::SPEND_NAME); - let convert_path = params_dir.join(masp::CONVERT_NAME); - let output_path = params_dir.join(masp::OUTPUT_NAME); - LocalTxProver::new(&spend_path, &output_path, &convert_path) - } else { - LocalTxProver::with_default_location() - .expect("unable to load MASP Parameters") - } - } - - /// Try to load the last saved shielded context from the given context - /// directory. If this fails, then leave the current context unchanged. - async fn load(self) -> std::io::Result> { - // Try to load shielded context from file - let mut ctx_file = File::open(self.context_dir.join(FILE_NAME))?; - let mut bytes = Vec::new(); - ctx_file.read_to_end(&mut bytes)?; - let mut new_ctx = masp::ShieldedContext::deserialize(&mut &bytes[..])?; - // Associate the originating context directory with the - // shielded context under construction - new_ctx.utils = self; - Ok(new_ctx) - } - - /// Save this shielded context into its associated context directory - async fn save( - &self, - ctx: &masp::ShieldedContext, - ) -> std::io::Result<()> { - // TODO: use mktemp crate? - let tmp_path = self.context_dir.join(TMP_FILE_NAME); - { - // First serialize the shielded context into a temporary file. - // Inability to create this file implies a simultaneuous write is in - // progress. In this case, immediately fail. This is unproblematic - // because the data intended to be stored can always be re-fetched - // from the blockchain. - let mut ctx_file = OpenOptions::new() - .write(true) - .create_new(true) - .open(tmp_path.clone())?; - let mut bytes = Vec::new(); - ctx.serialize(&mut bytes) - .expect("cannot serialize shielded context"); - ctx_file.write_all(&bytes[..])?; - } - // Atomically update the old shielded context file with new data. - // Atomicity is required to prevent other client instances from reading - // corrupt data. - std::fs::rename(tmp_path.clone(), self.context_dir.join(FILE_NAME))?; - // Finally, remove our temporary file to allow future saving of shielded - // contexts. - std::fs::remove_file(tmp_path)?; - Ok(()) - } -} - -pub async fn submit_transfer< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - mut ctx: Context, +pub async fn submit_transfer<'a>( + namada: &impl Namada<'a>, args: args::TxTransfer, ) -> Result<(), error::Error> { for _ in 0..2 { - let default_signer = Some(args.source.effective_address()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(args.source.effective_address()), - default_signer, - ) - .await?; - - submit_reveal_aux::<_, IO>( - client, - &mut ctx, + submit_reveal_aux( + namada, args.tx.clone(), &args.source.effective_address(), ) .await?; - let arg = args.clone(); - let (mut tx, tx_epoch) = tx::build_transfer::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - arg, - signing_data.fee_payer.clone(), - ) - .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + let (mut tx, signing_data, tx_epoch) = + args.clone().build(namada).await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); break; } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - let result = tx::process_tx::<_, _, IO>( - client, - &mut ctx.wallet, - &args.tx, - tx, - ) - .await?; + namada.sign(&mut tx, &args.tx, signing_data).await?; + let result = namada.submit(tx, &args.tx).await?; - let submission_epoch = - rpc::query_and_print_epoch::<_, IO>(client).await; + let submission_epoch = rpc::query_and_print_epoch(namada).await; match result { ProcessTxResponse::Applied(resp) if @@ -770,7 +547,7 @@ pub async fn submit_transfer< tx_epoch.unwrap() != submission_epoch => { // Then we probably straddled an epoch boundary. Let's retry... - edisplay_line!(IO, + edisplay_line!(namada.io(), "MASP transaction rejected and this may be due to the \ epoch changing. Attempting to resubmit transaction.", ); @@ -786,64 +563,38 @@ pub async fn submit_transfer< Ok(()) } -pub async fn submit_ibc_transfer( - client: &C, - mut ctx: Context, +pub async fn submit_ibc_transfer<'a, N: Namada<'a>>( + namada: &N, args: args::TxIbcTransfer, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let default_signer = Some(args.source.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(args.source.clone()), - default_signer, - ) - .await?; - - submit_reveal_aux::<_, IO>(client, &mut ctx, args.tx.clone(), &args.source) - .await?; - - let (mut tx, _epoch) = tx::build_ibc_transfer::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - signing_data.fee_payer.clone(), - ) - .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + submit_reveal_aux(namada, args.tx.clone(), &args.source).await?; + let (mut tx, signing_data, _epoch) = args.build(namada).await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.sign(&mut tx, &args.tx, signing_data).await?; + namada.submit(tx, &args.tx).await?; } Ok(()) } -pub async fn submit_init_proposal( - client: &C, - mut ctx: Context, +pub async fn submit_init_proposal<'a, N: Namada<'a>>( + namada: &N, args: args::InitProposal, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let current_epoch = rpc::query_and_print_epoch::<_, IO>(client).await; - let governance_parameters = rpc::query_governance_parameters(client).await; - - let ((mut tx_builder, _fee_unshield_epoch), signing_data) = if args - .is_offline + let current_epoch = rpc::query_and_print_epoch(namada).await; + let governance_parameters = + rpc::query_governance_parameters(namada.client()).await; + let (mut tx_builder, signing_data, _fee_unshield_epoch) = if args.is_offline { let proposal = OfflineProposal::try_from(args.proposal_data.as_ref()) .map_err(|e| { @@ -855,9 +606,8 @@ where .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; let default_signer = Some(proposal.author.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, + let signing_data = aux_signing_data( + namada, &args.tx, Some(proposal.author.clone()), default_signer, @@ -876,7 +626,11 @@ where ) })?; - display_line!(IO, "Proposal serialized to: {}", output_file_path); + display_line!( + namada.io(), + "Proposal serialized to: {}", + output_file_path + ); return Ok(()); } else if args.is_pgf_funding { let proposal = @@ -889,36 +643,10 @@ where .validate(&governance_parameters, current_epoch, args.tx.force) .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; - let default_signer = Some(proposal.proposal.author.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(proposal.proposal.author.clone()), - default_signer, - ) - .await?; - - submit_reveal_aux::<_, IO>( - client, - &mut ctx, - args.tx.clone(), - &proposal.proposal.author, - ) - .await?; + submit_reveal_aux(namada, args.tx.clone(), &proposal.proposal.author) + .await?; - ( - tx::build_pgf_funding_proposal::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - proposal, - &signing_data.fee_payer.clone(), - ) - .await?, - signing_data, - ) + tx::build_pgf_funding_proposal(namada, &args, proposal).await? } else if args.is_pgf_stewards { let proposal = PgfStewardProposal::try_from( args.proposal_data.as_ref(), @@ -927,8 +655,8 @@ where error::TxError::FailedGovernaneProposalDeserialize(e.to_string()) })?; let author_balance = rpc::get_token_balance( - client, - &ctx.native_token, + namada.client(), + &namada.native_token(), &proposal.proposal.author, ) .await; @@ -941,44 +669,18 @@ where ) .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; - let default_signer = Some(proposal.proposal.author.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(proposal.proposal.author.clone()), - default_signer, - ) - .await?; - - submit_reveal_aux::<_, IO>( - client, - &mut ctx, - args.tx.clone(), - &proposal.proposal.author, - ) - .await?; + submit_reveal_aux(namada, args.tx.clone(), &proposal.proposal.author) + .await?; - ( - tx::build_pgf_stewards_proposal::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - proposal, - signing_data.fee_payer.clone(), - ) - .await?, - signing_data, - ) + tx::build_pgf_stewards_proposal(namada, &args, proposal).await? } else { let proposal = DefaultProposal::try_from(args.proposal_data.as_ref()) .map_err(|e| { error::TxError::FailedGovernaneProposalDeserialize(e.to_string()) })?; let author_balane = rpc::get_token_balance( - client, - &ctx.native_token, + namada.client(), + &namada.native_token(), &proposal.proposal.author, ) .await; @@ -991,87 +693,41 @@ where ) .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; - let default_signer = Some(proposal.proposal.author.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(proposal.proposal.author.clone()), - default_signer, - ) - .await?; - - submit_reveal_aux::<_, IO>( - client, - &mut ctx, - args.tx.clone(), - &proposal.proposal.author, - ) - .await?; + submit_reveal_aux(namada, args.tx.clone(), &proposal.proposal.author) + .await?; - ( - tx::build_default_proposal::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - proposal, - signing_data.fee_payer.clone(), - ) - .await?, - signing_data, - ) + tx::build_default_proposal(namada, &args, proposal).await? }; - signing::generate_test_vector::<_, _, IO>( - client, - &mut ctx.wallet, - &tx_builder, - ) - .await?; + signing::generate_test_vector(namada, &tx_builder).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx_builder); + tx::dump_tx(namada.io(), &args.tx, tx_builder); } else { - signing::sign_tx( - &mut ctx.wallet, - &args.tx, - &mut tx_builder, - signing_data, - )?; - tx::process_tx::<_, _, IO>( - client, - &mut ctx.wallet, - &args.tx, - tx_builder, - ) - .await?; + namada.sign(&mut tx_builder, &args.tx, signing_data).await?; + namada.submit(tx_builder, &args.tx).await?; } Ok(()) } -pub async fn submit_vote_proposal( - client: &C, - mut ctx: Context, +pub async fn submit_vote_proposal<'a, N: Namada<'a>>( + namada: &N, args: args::VoteProposal, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let current_epoch = rpc::query_and_print_epoch::<_, IO>(client).await; - - let default_signer = Some(args.voter.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(args.voter.clone()), - default_signer.clone(), - ) - .await?; + let (mut tx_builder, signing_data, _fee_unshield_epoch) = if args.is_offline + { + let default_signer = Some(args.voter.clone()); + let signing_data = aux_signing_data( + namada, + &args.tx, + Some(args.voter.clone()), + default_signer.clone(), + ) + .await?; - let (mut tx_builder, _fee_unshield_epoch) = if args.is_offline { let proposal_vote = ProposalVote::try_from(args.vote) .map_err(|_| error::TxError::InvalidProposalVote)?; @@ -1086,7 +742,7 @@ where ) .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; let delegations = rpc::get_delegators_delegation_at( - client, + namada.client(), &args.voter, proposal.proposal.tally_epoch, ) @@ -1110,50 +766,29 @@ where .serialize(args.tx.output_folder) .expect("Should be able to serialize the offline proposal"); - display_line!(IO, "Proposal vote serialized to: {}", output_file_path); + display_line!( + namada.io(), + "Proposal vote serialized to: {}", + output_file_path + ); return Ok(()); } else { - tx::build_vote_proposal::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - current_epoch, - signing_data.fee_payer.clone(), - ) - .await? + args.build(namada).await? }; - signing::generate_test_vector::<_, _, IO>( - client, - &mut ctx.wallet, - &tx_builder, - ) - .await?; + signing::generate_test_vector(namada, &tx_builder).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx_builder); + tx::dump_tx(namada.io(), &args.tx, tx_builder); } else { - signing::sign_tx( - &mut ctx.wallet, - &args.tx, - &mut tx_builder, - signing_data, - )?; - tx::process_tx::<_, _, IO>( - client, - &mut ctx.wallet, - &args.tx, - tx_builder, - ) - .await?; + namada.sign(&mut tx_builder, &args.tx, signing_data).await?; + namada.submit(tx_builder, &args.tx).await?; } Ok(()) } -pub async fn sign_tx( - client: &C, - ctx: &mut Context, +pub async fn sign_tx<'a, N: Namada<'a>>( + namada: &N, args::SignTx { tx: tx_args, tx_data, @@ -1161,37 +796,31 @@ pub async fn sign_tx( }: args::SignTx, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let tx = if let Ok(transaction) = Tx::deserialize(tx_data.as_ref()) { transaction } else { - edisplay_line!(IO, "Couldn't decode the transaction."); + edisplay_line!(namada.io(), "Couldn't decode the transaction."); safe_exit(1) }; - let default_signer = Some(owner.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &tx_args, - Some(owner.clone()), - default_signer, - ) - .await?; + let signing_data = + aux_signing_data(namada, &tx_args, Some(owner.clone()), default_signer) + .await?; + let mut wallet = namada.wallet_mut().await; let secret_keys = &signing_data .public_keys .iter() .filter_map(|public_key| { if let Ok(secret_key) = - signing::find_key_by_pk(&mut ctx.wallet, &tx_args, public_key) + signing::find_key_by_pk(&mut wallet, &tx_args, public_key) { Some(secret_key) } else { edisplay_line!( - IO, + namada.io(), "Couldn't find the secret key for {}. Skipping signature \ generation.", public_key @@ -1229,7 +858,7 @@ where ) .expect("Signature should be deserializable."); display_line!( - IO, + namada.io(), "Signature for {} serialized at {}", signature.pubkey, output_path.display() @@ -1239,357 +868,215 @@ where Ok(()) } -pub async fn submit_reveal_pk( - client: &C, - ctx: &mut Context, +pub async fn submit_reveal_pk<'a, N: Namada<'a>>( + namada: &N, args: args::RevealPk, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - submit_reveal_aux::<_, IO>( - client, - ctx, - args.tx, - &(&args.public_key).into(), - ) - .await?; + submit_reveal_aux(namada, args.tx, &(&args.public_key).into()).await?; Ok(()) } -pub async fn submit_bond( - client: &C, - ctx: &mut Context, +pub async fn submit_bond<'a, N: Namada<'a>>( + namada: &N, args: args::Bond, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let default_address = args.source.clone().unwrap_or(args.validator.clone()); - let default_signer = Some(default_address.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(default_address.clone()), - default_signer, - ) - .await?; - - submit_reveal_aux::<_, IO>(client, ctx, args.tx.clone(), &default_address) - .await?; + submit_reveal_aux(namada, args.tx.clone(), &default_address).await?; - let (mut tx, _fee_unshield_epoch) = tx::build_bond::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - signing_data.fee_payer.clone(), - ) - .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + let (mut tx, signing_data, _fee_unshield_epoch) = + args.build(namada).await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data).await?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.submit(tx, &args.tx).await?; } Ok(()) } -pub async fn submit_unbond( - client: &C, - ctx: &mut Context, +pub async fn submit_unbond<'a, N: Namada<'a>>( + namada: &N, args: args::Unbond, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let default_address = args.source.clone().unwrap_or(args.validator.clone()); - let default_signer = Some(default_address.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(default_address), - default_signer, - ) - .await?; - - let (mut tx, _fee_unshield_epoch, latest_withdrawal_pre) = - tx::build_unbond::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - signing_data.fee_payer.clone(), - ) - .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + let (mut tx, signing_data, _fee_unshield_epoch, latest_withdrawal_pre) = + args.build(namada).await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data).await?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.submit(tx, &args.tx).await?; - tx::query_unbonds::<_, IO>(client, args.clone(), latest_withdrawal_pre) - .await?; + tx::query_unbonds(namada, args.clone(), latest_withdrawal_pre).await?; } Ok(()) } -pub async fn submit_withdraw( - client: &C, - mut ctx: Context, +pub async fn submit_withdraw<'a, N: Namada<'a>>( + namada: &N, args: args::Withdraw, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let default_address = args.source.clone().unwrap_or(args.validator.clone()); - let default_signer = Some(default_address.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(default_address), - default_signer, - ) - .await?; + let (mut tx, signing_data, _fee_unshield_epoch) = + args.build(namada).await?; + signing::generate_test_vector(namada, &tx).await?; - let (mut tx, _fee_unshield_epoch) = tx::build_withdraw::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - signing_data.fee_payer.clone(), - ) - .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + if args.tx.dump_tx { + tx::dump_tx(namada.io(), &args.tx, tx); + } else { + namada.sign(&mut tx, &args.tx, signing_data).await?; + + namada.submit(tx, &args.tx).await?; + } + + Ok(()) +} + +pub async fn submit_redelegate<'a, N: Namada<'a>>( + namada: &N, + args: args::Redelegate, +) -> Result<(), error::Error> +where + ::Error: std::fmt::Display, +{ + let (mut tx, signing_data) = args.build(namada).await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data).await?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.submit(tx, &args.tx).await?; } Ok(()) } -pub async fn submit_validator_commission_change( - client: &C, - mut ctx: Context, +pub async fn submit_validator_commission_change<'a, N: Namada<'a>>( + namada: &N, args: args::CommissionRateChange, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, + ::Error: std::fmt::Display, { - let default_signer = Some(args.validator.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(args.validator.clone()), - default_signer, - ) - .await?; - - let (mut tx, _fee_unshield_epoch) = - tx::build_validator_commission_change::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - signing_data.fee_payer.clone(), - ) - .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + let (mut tx, signing_data, _fee_unshield_epoch) = + args.build(namada).await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data).await?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.submit(tx, &args.tx).await?; } Ok(()) } -pub async fn submit_unjail_validator< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - mut ctx: Context, +pub async fn submit_unjail_validator<'a, N: Namada<'a>>( + namada: &N, args: args::TxUnjailValidator, ) -> Result<(), error::Error> where - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let default_signer = Some(args.validator.clone()); - let signing_data = aux_signing_data::<_, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(args.validator.clone()), - default_signer, - ) - .await?; - - let (mut tx, _fee_unshield_epoch) = - tx::build_unjail_validator::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - signing_data.fee_payer.clone(), - ) - .await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + let (mut tx, signing_data, _fee_unshield_epoch) = + args.build(namada).await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; + namada.sign(&mut tx, &args.tx, signing_data).await?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.submit(tx, &args.tx).await?; } Ok(()) } -pub async fn submit_update_steward_commission< - C: namada::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, - mut ctx: Context, +pub async fn submit_update_steward_commission<'a, N: Namada<'a>>( + namada: &N, args: args::UpdateStewardCommission, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let default_signer = Some(args.steward.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(args.steward.clone()), - default_signer, - ) - .await?; - - let (mut tx, _fee_unshield_epoch) = - tx::build_update_steward_commission::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - &signing_data.fee_payer, - ) - .await?; + let (mut tx, signing_data, _fee_unshield_epoch) = + args.build(namada).await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.sign(&mut tx, &args.tx, signing_data).await?; + namada.submit(tx, &args.tx).await?; } Ok(()) } -pub async fn submit_resign_steward( - client: &C, - mut ctx: Context, +pub async fn submit_resign_steward<'a, N: Namada<'a>>( + namada: &N, args: args::ResignSteward, ) -> Result<(), error::Error> where - C: namada::ledger::queries::Client + Sync, - C::Error: std::fmt::Display, + ::Error: std::fmt::Display, { - let default_signer = Some(args.steward.clone()); - let signing_data = signing::aux_signing_data::<_, _, IO>( - client, - &mut ctx.wallet, - &args.tx, - Some(args.steward.clone()), - default_signer, - ) - .await?; - - let (mut tx, _fee_unshield_epoch) = - tx::build_resign_steward::<_, _, _, IO>( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - &signing_data.fee_payer, - ) - .await?; + let (mut tx, signing_data, _epoch) = args.build(namada).await?; - signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) - .await?; + signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { - tx::dump_tx::(&args.tx, tx); + tx::dump_tx(namada.io(), &args.tx, tx); } else { - signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) - .await?; + namada.sign(&mut tx, &args.tx, signing_data).await?; + namada.submit(tx, &args.tx).await?; } Ok(()) } /// Save accounts initialized from a tx into the wallet, if any. -pub async fn save_initialized_accounts( - wallet: &mut Wallet, +pub async fn save_initialized_accounts<'a>( + namada: &impl Namada<'a>, args: &args::Tx, initialized_accounts: Vec
, ) { - tx::save_initialized_accounts::(wallet, args, initialized_accounts) - .await + tx::save_initialized_accounts(namada, args, initialized_accounts).await } /// Broadcast a transaction to be included in the blockchain and checks that /// the tx has been successfully included into the mempool of a validator /// /// In the case of errors in any of those stages, an error message is returned -pub async fn broadcast_tx( - rpc_cli: &HttpClient, +pub async fn broadcast_tx<'a>( + namada: &impl Namada<'a>, to_broadcast: &TxBroadcastData, ) -> Result { - tx::broadcast_tx::<_, IO>(rpc_cli, to_broadcast).await + tx::broadcast_tx(namada, to_broadcast).await } /// Broadcast a transaction to be included in the blockchain. @@ -1600,9 +1087,36 @@ pub async fn broadcast_tx( /// 3. The decrypted payload of the tx has been included on the blockchain. /// /// In the case of errors in any of those stages, an error message is returned -pub async fn submit_tx( - client: &HttpClient, +pub async fn submit_tx<'a>( + namada: &impl Namada<'a>, to_broadcast: TxBroadcastData, ) -> Result { - tx::submit_tx::<_, IO>(client, to_broadcast).await + tx::submit_tx(namada, to_broadcast).await +} + +pub async fn gen_ibc_shielded_transfer<'a>( + context: &impl Namada<'a>, + args: args::GenIbcShieldedTransafer, +) -> Result<(), error::Error> { + if let Some(shielded_transfer) = + tx::gen_ibc_shielded_transfer(context, args.clone()).await? + { + let tx_id = shielded_transfer.masp_tx.txid().to_string(); + let filename = format!("ibc_shielded_transfer_{}.memo", tx_id); + let output_path = match &args.output_folder { + Some(path) => path.join(filename), + None => filename.into(), + }; + let mut out = File::create(&output_path) + .expect("Should be able to create the out file."); + out.write_all(Memo::from(shielded_transfer).as_ref().as_bytes()) + .expect("IBC memo should be deserializable."); + println!( + "Output IBC shielded transfer for {tx_id} to {}", + output_path.to_string_lossy() + ); + } else { + eprintln!("No shielded transfer for this IBC transfer.") + } + Ok(()) } diff --git a/apps/src/lib/client/utils.rs b/apps/src/lib/client/utils.rs index 0caab25d35..16bf625c23 100644 --- a/apps/src/lib/client/utils.rs +++ b/apps/src/lib/client/utils.rs @@ -5,16 +5,16 @@ use std::io::Write; use std::path::{Path, PathBuf}; use std::str::FromStr; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use flate2::read::GzDecoder; use flate2::write::GzEncoder; use flate2::Compression; -use namada::sdk::wallet::Wallet; use namada::types::address; use namada::types::chain::ChainId; use namada::types::dec::Dec; use namada::types::key::*; use namada::vm::validate_untrusted_wasm; +use namada_sdk::wallet::Wallet; use prost::bytes::Bytes; use rand::prelude::ThreadRng; use rand::thread_rng; @@ -384,12 +384,12 @@ pub fn id_from_pk(pk: &common::PublicKey) -> TendermintNodeId { match pk { common::PublicKey::Ed25519(_) => { let _pk: ed25519::PublicKey = pk.try_to_pk().unwrap(); - let digest = Sha256::digest(_pk.try_to_vec().unwrap().as_slice()); + let digest = Sha256::digest(_pk.serialize_to_vec().as_slice()); bytes.copy_from_slice(&digest[..TENDERMINT_NODE_ID_LENGTH]); } common::PublicKey::Secp256k1(_) => { let _pk: secp256k1::PublicKey = pk.try_to_pk().unwrap(); - let digest = Sha256::digest(_pk.try_to_vec().unwrap().as_slice()); + let digest = Sha256::digest(_pk.serialize_to_vec().as_slice()); bytes.copy_from_slice(&digest[..TENDERMINT_NODE_ID_LENGTH]); } } @@ -505,10 +505,16 @@ pub fn init_network( println!("Generating validator {} consensus key...", name); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - let (_alias, keypair) = wallet - .gen_key(SchemeType::Ed25519, Some(alias), true, password, None) - .expect("Key generation should not fail.") - .expect("No existing alias expected."); + let (_alias, keypair, _mnemonic) = wallet + .gen_key( + SchemeType::Ed25519, + Some(alias), + true, + None, + password, + None, + ) + .expect("Key generation should not fail."); // Write consensus key for Tendermint tendermint_node::write_validator_key(&tm_home_dir, &keypair); @@ -525,10 +531,16 @@ pub fn init_network( println!("Generating validator {} account key...", name); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - let (_alias, keypair) = wallet - .gen_key(SchemeType::Ed25519, Some(alias), true, password, None) - .expect("Key generation should not fail.") - .expect("No existing alias expected."); + let (_alias, keypair, _mnemonic) = wallet + .gen_key( + SchemeType::Ed25519, + Some(alias), + true, + None, + password, + None, + ) + .expect("Key generation should not fail."); keypair.ref_to() }); @@ -541,10 +553,16 @@ pub fn init_network( println!("Generating validator {} protocol signing key...", name); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - let (_alias, keypair) = wallet - .gen_key(SchemeType::Ed25519, Some(alias), true, password, None) - .expect("Key generation should not fail.") - .expect("No existing alias expected."); + let (_alias, keypair, _mnemonic) = wallet + .gen_key( + SchemeType::Ed25519, + Some(alias), + true, + None, + password, + None, + ) + .expect("Key generation should not fail."); keypair.ref_to() }); @@ -557,16 +575,16 @@ pub fn init_network( println!("Generating validator {} eth hot key...", name); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - let (_alias, keypair) = wallet + let (_alias, keypair, _mnemonic) = wallet .gen_key( SchemeType::Secp256k1, Some(alias), true, + None, password, None, ) - .expect("Key generation should not fail.") - .expect("No existing alias expected."); + .expect("Key generation should not fail."); keypair.ref_to() }); @@ -579,16 +597,16 @@ pub fn init_network( println!("Generating validator {} eth cold key...", name); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - let (_alias, keypair) = wallet + let (_alias, keypair, _mnemonic) = wallet .gen_key( SchemeType::Secp256k1, Some(alias), true, + None, password, None, ) - .expect("Key generation should not fail.") - .expect("No existing alias expected."); + .expect("Key generation should not fail."); keypair.ref_to() }); @@ -675,16 +693,16 @@ pub fn init_network( ); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - let (_alias, keypair) = wallet + let (_alias, keypair, _mnemonic) = wallet .gen_key( SchemeType::Ed25519, Some(name.clone()), true, + None, password, None, ) - .expect("Key generation should not fail.") - .expect("No existing alias expected."); + .expect("Key generation should not fail."); let public_key = genesis_config::HexString(keypair.ref_to().to_string()); config.public_key = Some(public_key); @@ -705,7 +723,7 @@ pub fn init_network( // Generate the chain ID first let genesis = genesis_config::load_genesis_config(config_clean.clone()); - let genesis_bytes = genesis.try_to_vec().unwrap(); + let genesis_bytes = genesis.serialize_to_vec(); let chain_id = ChainId::from_genesis(chain_id_prefix, genesis_bytes); let chain_dir = global_args.base_dir.join(chain_id.as_str()); let genesis_path = global_args @@ -938,16 +956,16 @@ fn init_established_account( println!("Generating established account {} key...", name.as_ref()); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - let (_alias, keypair) = wallet + let (_alias, keypair, _mnemonic) = wallet .gen_key( SchemeType::Ed25519, Some(format!("{}-key", name.as_ref())), true, + None, password, None, // do not use mnemonic code / HD derivation path ) - .expect("Key generation should not fail.") - .expect("No existing alias expected."); + .expect("Key generation should not fail."); let public_key = genesis_config::HexString(keypair.ref_to().to_string()); config.public_key = Some(public_key); @@ -1126,12 +1144,11 @@ pub fn write_tendermint_node_key( // but does not for secp256k1. let (node_keypair, key_str) = match node_sk { common::SecretKey::Ed25519(sk) => ( - [sk.try_to_vec().unwrap(), sk.ref_to().try_to_vec().unwrap()] - .concat(), + [sk.serialize_to_vec(), sk.ref_to().serialize_to_vec()].concat(), "Ed25519", ), common::SecretKey::Secp256k1(sk) => { - (sk.try_to_vec().unwrap(), "Secp256k1") + (sk.serialize_to_vec(), "Secp256k1") } }; diff --git a/apps/src/lib/config/genesis.rs b/apps/src/lib/config/genesis.rs index b7281fd4ce..ed0ffeb788 100644 --- a/apps/src/lib/config/genesis.rs +++ b/apps/src/lib/config/genesis.rs @@ -6,17 +6,16 @@ use borsh::{BorshDeserialize, BorshSerialize}; use derivative::Derivative; use namada::core::ledger::governance::parameters::GovernanceParameters; use namada::core::ledger::pgf::parameters::PgfParameters; -use namada::ledger::eth_bridge::EthereumBridgeConfig; use namada::ledger::parameters::EpochDuration; -use namada::ledger::pos::{Dec, GenesisValidator, PosParams}; +use namada::ledger::pos::{Dec, GenesisValidator, OwnedPosParams}; use namada::types::address::Address; use namada::types::chain::ProposalBytes; use namada::types::key::dkg_session_keys::DkgPublicKey; use namada::types::key::*; use namada::types::time::{DateTimeUtc, DurationSecs}; use namada::types::token::Denomination; -use namada::types::uint::Uint; use namada::types::{storage, token}; +use namada_sdk::eth_bridge::EthereumBridgeConfig; /// Genesis configuration file format pub mod genesis_config { @@ -31,7 +30,7 @@ pub mod genesis_config { use namada::core::ledger::governance::parameters::GovernanceParameters; use namada::core::ledger::pgf::parameters::PgfParameters; use namada::ledger::parameters::EpochDuration; - use namada::ledger::pos::{Dec, GenesisValidator, PosParams}; + use namada::ledger::pos::{Dec, GenesisValidator, OwnedPosParams}; use namada::types::address::Address; use namada::types::chain::ProposalBytes; use namada::types::key::dkg_session_keys::DkgPublicKey; @@ -213,6 +212,8 @@ pub mod genesis_config { pub vp: Option, // Initial balances held by accounts defined elsewhere. pub balances: Option>, + // Token parameters + pub parameters: Option, } #[derive(Clone, Debug, Deserialize, Serialize)] @@ -338,6 +339,12 @@ pub mod genesis_config { .unwrap() .to_public_key() .unwrap(), + protocol_key: config + .protocol_public_key + .as_ref() + .unwrap() + .to_public_key() + .unwrap(), eth_cold_key: config .eth_cold_key .as_ref() @@ -372,12 +379,6 @@ pub mod genesis_config { .unwrap() .to_public_key() .unwrap(), - protocol_key: config - .protocol_public_key - .as_ref() - .unwrap() - .to_public_key() - .unwrap(), dkg_public_key: config .dkg_public_key .as_ref() @@ -404,6 +405,9 @@ pub mod genesis_config { implicit_accounts: &HashMap, ) -> TokenAccount { TokenAccount { + last_locked_ratio: Dec::zero(), + last_inflation: token::Amount::zero(), + parameters: config.parameters.as_ref().unwrap().to_owned(), address: Address::decode(config.address.as_ref().unwrap()).unwrap(), denom: config.denom, balances: config @@ -665,7 +669,7 @@ pub mod genesis_config { validator_stake_threshold, } = pos_params; - let pos_params = PosParams { + let pos_params = OwnedPosParams { max_validator_slots, pipeline_len, unbonding_len, @@ -729,7 +733,7 @@ pub mod genesis_config { } #[derive(Debug, BorshSerialize, BorshDeserialize)] -#[borsh_init(init)] +#[borsh(init=init)] pub struct Genesis { pub genesis_time: DateTimeUtc, pub native_token: Address, @@ -738,7 +742,7 @@ pub struct Genesis { pub established_accounts: Vec, pub implicit_accounts: Vec, pub parameters: Parameters, - pub pos_params: PosParams, + pub pos_params: OwnedPosParams, pub gov_params: GovernanceParameters, pub pgf_params: PgfParameters, // Ethereum bridge config @@ -774,9 +778,6 @@ pub struct Validator { /// this key on a transaction signature. /// Note that this is distinct from consensus key used in the PoS system. pub account_key: common::PublicKey, - /// Public key associated with validator account used for signing protocol - /// transactions - pub protocol_key: common::PublicKey, /// The public DKG session key used during the DKG protocol pub dkg_public_key: DkgPublicKey, /// These tokens are not staked and hence do not contribute to the @@ -818,6 +819,12 @@ pub struct TokenAccount { /// Accounts' balances of this token #[derivative(PartialOrd = "ignore", Ord = "ignore")] pub balances: HashMap, + /// Token parameters + pub parameters: token::Parameters, + /// Token inflation from the last epoch (read + write for every epoch) + pub last_inflation: token::Amount, + /// Token shielded ratio from the last epoch (read + write for every epoch) + pub last_locked_ratio: Dec, } #[derive( @@ -900,14 +907,15 @@ pub fn genesis( } #[cfg(any(test, feature = "dev"))] pub fn genesis(num_validators: u64) -> Genesis { - use namada::ledger::eth_bridge::{ - Contracts, Erc20WhitelistEntry, UpgradeableContract, - }; use namada::types::address::{ self, apfel, btc, dot, eth, kartoffel, nam, schnitzel, wnam, }; use namada::types::ethereum_events::testing::DAI_ERC20_ETH_ADDRESS; use namada::types::ethereum_events::EthAddress; + use namada::types::uint::Uint; + use namada_sdk::eth_bridge::{ + Contracts, Erc20WhitelistEntry, UpgradeableContract, + }; use crate::wallet; @@ -938,6 +946,7 @@ pub fn genesis(num_validators: u64) -> Genesis { address, tokens: token::Amount::native_whole(200_000), consensus_key: consensus_keypair.ref_to(), + protocol_key: protocol_keypair.ref_to(), commission_rate: Dec::new(5, 2).expect("This can't fail"), max_commission_rate_change: Dec::new(1, 2) .expect("This can't fail"), @@ -945,7 +954,6 @@ pub fn genesis(num_validators: u64) -> Genesis { eth_hot_key: eth_bridge_keypair.ref_to(), }, account_key: account_keypair.ref_to(), - protocol_key: protocol_keypair.ref_to(), dkg_public_key: dkg_keypair.public(), non_staked_balance: token::Amount::native_whole(100_000), // TODO replace with https://github.com/anoma/namada/issues/25) @@ -971,6 +979,7 @@ pub fn genesis(num_validators: u64) -> Genesis { address, tokens: token::Amount::native_whole(200_000), consensus_key: consensus_keypair.ref_to(), + protocol_key: protocol_keypair.ref_to(), commission_rate: Dec::new(5, 2).expect("This can't fail"), max_commission_rate_change: Dec::new(1, 2) .expect("This can't fail"), @@ -978,7 +987,6 @@ pub fn genesis(num_validators: u64) -> Genesis { eth_hot_key: eth_bridge_keypair.ref_to(), }, account_key: account_keypair.ref_to(), - protocol_key: protocol_keypair.ref_to(), dkg_public_key: dkg_keypair.public(), non_staked_balance: token::Amount::native_whole(100_000), // TODO replace with https://github.com/anoma/namada/issues/25) @@ -1001,8 +1009,8 @@ pub fn genesis(num_validators: u64) -> Genesis { implicit_vp_code_path: vp_implicit_path.into(), implicit_vp_sha256: Default::default(), max_signatures_per_transaction: 15, - epochs_per_year: 525_600, /* seconds in yr (60*60*24*365) div seconds - * per epoch (60 = min_duration) */ + epochs_per_year: 365, /* seconds in yr (60*60*24*365) div seconds + * per epoch (60 = min_duration) */ pos_gain_p: Dec::new(1, 1).expect("This can't fail"), pos_gain_d: Dec::new(1, 1).expect("This can't fail"), staked_ratio: Dec::zero(), @@ -1100,6 +1108,9 @@ pub fn genesis(num_validators: u64) -> Genesis { .into_iter() .map(|(k, v)| (k, token::Amount::from_uint(v, denom).unwrap())) .collect(), + parameters: token::Parameters::default(), + last_inflation: token::Amount::zero(), + last_locked_ratio: Dec::zero(), }) .collect(); Genesis { @@ -1109,7 +1120,7 @@ pub fn genesis(num_validators: u64) -> Genesis { implicit_accounts, token_accounts, parameters, - pos_params: PosParams::default(), + pos_params: OwnedPosParams::default(), gov_params: GovernanceParameters::default(), pgf_params: PgfParameters::default(), ethereum_bridge_params: Some(EthereumBridgeConfig { @@ -1136,7 +1147,7 @@ pub fn genesis(num_validators: u64) -> Genesis { #[cfg(test)] pub mod tests { - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use namada::types::address::testing::gen_established_address; use namada::types::key::*; use rand::prelude::ThreadRng; @@ -1152,7 +1163,7 @@ pub mod tests { let mut rng: ThreadRng = thread_rng(); let keypair: common::SecretKey = ed25519::SigScheme::generate(&mut rng).try_to_sk().unwrap(); - let kp_arr = keypair.try_to_vec().unwrap(); + let kp_arr = keypair.serialize_to_vec(); let (protocol_keypair, _eth_hot_bridge_keypair, dkg_keypair) = wallet::defaults::validator_keys(); @@ -1169,14 +1180,14 @@ pub mod tests { println!("address: {}", address); println!("keypair: {:?}", kp_arr); println!("protocol_keypair: {:?}", protocol_keypair); - println!("dkg_keypair: {:?}", dkg_keypair.try_to_vec().unwrap()); + println!("dkg_keypair: {:?}", dkg_keypair.serialize_to_vec()); println!( "eth_cold_gov_keypair: {:?}", - eth_cold_gov_keypair.try_to_vec().unwrap() + eth_cold_gov_keypair.serialize_to_vec() ); println!( "eth_hot_bridge_keypair: {:?}", - eth_hot_bridge_keypair.try_to_vec().unwrap() + eth_hot_bridge_keypair.serialize_to_vec() ); } } diff --git a/apps/src/lib/mod.rs b/apps/src/lib/mod.rs index 7df31ea2ea..b2991870ef 100644 --- a/apps/src/lib/mod.rs +++ b/apps/src/lib/mod.rs @@ -5,6 +5,8 @@ #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] +#[cfg(feature = "testing")] +pub mod bench_utils; pub mod cli; pub mod client; pub mod config; diff --git a/apps/src/lib/node/ledger/ethereum_oracle/mod.rs b/apps/src/lib/node/ledger/ethereum_oracle/mod.rs index 6980778c07..371ecf4a92 100644 --- a/apps/src/lib/node/ledger/ethereum_oracle/mod.rs +++ b/apps/src/lib/node/ledger/ethereum_oracle/mod.rs @@ -7,14 +7,15 @@ use std::ops::ControlFlow; use async_trait::async_trait; use ethabi::Address; use ethbridge_events::{event_codecs, EventKind}; +use itertools::Either; use namada::core::hints; use namada::core::types::ethereum_structs; use namada::eth_bridge::ethers; use namada::eth_bridge::ethers::providers::{Http, Middleware, Provider}; use namada::eth_bridge::oracle::config::Config; -use namada::ledger::eth_bridge::{eth_syncing_status_timeout, SyncStatus}; use namada::types::control_flow::time::{Constant, Duration, Instant, Sleep}; use namada::types::ethereum_events::EthereumEvent; +use namada_sdk::eth_bridge::{eth_syncing_status_timeout, SyncStatus}; use num256::Uint256; use thiserror::Error; use tokio::sync::mpsc::error::TryRecvError; @@ -75,13 +76,6 @@ pub trait RpcClient { /// Ethereum event log. type Log: IntoEthAbiLog; - /// Whether we should stop running the Ethereum oracle - /// if a call to [`Self::check_events_in_block`] fails. - /// - /// This is only useful for testing purposes. In general, - /// no implementation should override this constant. - const EXIT_ON_EVENTS_FAILURE: bool = true; - /// Instantiate a new client, pointing to the /// given RPC url. fn new_client(rpc_url: &str) -> Self @@ -108,6 +102,10 @@ pub trait RpcClient { backoff: Duration, deadline: Instant, ) -> Result; + + /// Given its current state, check if this RPC client + /// may recover from the given [`enum@Error`]. + fn may_recover(&self, error: &Error) -> bool; } #[async_trait(?Send)] @@ -172,6 +170,14 @@ impl RpcClient for Provider { }, } } + + #[inline(always)] + fn may_recover(&self, error: &Error) -> bool { + !matches!( + error, + Error::Timeout | Error::Channel(_, _) | Error::CheckEvents(_, _, _) + ) + } } /// A client that can talk to geth and parse @@ -197,7 +203,7 @@ impl Oracle { /// Construct a new [`Oracle`]. Note that it can not do anything until it /// has been sent a configuration via the passed in `control` channel. pub fn new( - url: &str, + client_or_url: Either, sender: BoundedSender, last_processed_block: last_processed_block::Sender, backoff: Duration, @@ -205,7 +211,10 @@ impl Oracle { control: control::Receiver, ) -> Self { Self { - client: C::new_client(url), + client: match client_or_url { + Either::Left(client) => client, + Either::Right(url) => C::new_client(url), + }, sender, backoff, ceiling, @@ -275,7 +284,7 @@ pub fn run_oracle( tracing::info!(?url, "Ethereum event oracle is starting"); let oracle = Oracle::::new( - &url, + Either::Right(&url), sender, last_processed_block, DEFAULT_BACKOFF, @@ -300,6 +309,75 @@ pub fn run_oracle( .with_no_cleanup() } +/// Determine what action to take after attempting to +/// process events contained in an Ethereum block. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(crate) enum ProcessEventAction { + /// No events could be processed at this time, so we must keep + /// polling for new events. + ContinuePollingEvents, + /// Some error occurred while processing Ethereum events in + /// the current height. We must halt the oracle. + HaltOracle, + /// The current Ethereum block height has been processed. + /// We must advance to the next Ethereum height. + ProceedToNextBlock, +} + +impl ProcessEventAction { + /// Check whether the action commands a new block to be processed. + #[inline] + pub fn process_new_block(&self) -> bool { + matches!(self, Self::ProceedToNextBlock) + } +} + +impl ProcessEventAction { + /// Handles the requested oracle action, translating it to a format + /// understood by the set of [`Sleep`] abstractions. + fn handle(self) -> ControlFlow, ()> { + match self { + ProcessEventAction::ContinuePollingEvents => { + ControlFlow::Continue(()) + } + ProcessEventAction::HaltOracle => ControlFlow::Break(Err(())), + ProcessEventAction::ProceedToNextBlock => { + ControlFlow::Break(Ok(())) + } + } + } +} + +/// Tentatively process a batch of Ethereum events. +pub(crate) async fn try_process_eth_events( + oracle: &Oracle, + config: &Config, + next_block_to_process: ðereum_structs::BlockHeight, +) -> ProcessEventAction { + process_events_in_block(next_block_to_process, oracle, config) + .await + .map_or_else( + |error| { + if oracle.client.may_recover(&error) { + tracing::debug!( + %error, + block = ?next_block_to_process, + "Error while trying to process Ethereum block" + ); + ProcessEventAction::ContinuePollingEvents + } else { + tracing::error!( + reason = %error, + block = ?next_block_to_process, + "The Ethereum oracle has disconnected" + ); + ProcessEventAction::HaltOracle + } + }, + |()| ProcessEventAction::ProceedToNextBlock, + ) +} + /// Given an oracle, watch for new Ethereum events, processing /// them into Namada native types. /// @@ -334,43 +412,8 @@ async fn run_oracle_aux(mut oracle: Oracle) { ); let res = Sleep { strategy: Constant(oracle.backoff) }.run(|| async { tokio::select! { - result = process(&oracle, &config, next_block_to_process.clone()) => { - match result { - Ok(()) => { - ControlFlow::Break(Ok(())) - }, - Err( - reason @ ( - Error::Timeout - | Error::Channel(_, _) - | Error::CheckEvents(_, _, _) - ) - ) => { - // the oracle is unresponsive, we don't want the test to end - if !C::EXIT_ON_EVENTS_FAILURE - && matches!(&reason, Error::CheckEvents(_, _, _)) - { - tracing::debug!("Allowing the Ethereum oracle to keep running"); - return ControlFlow::Continue(()); - } - tracing::error!( - %reason, - block = ?next_block_to_process, - "The Ethereum oracle has disconnected" - ); - ControlFlow::Break(Err(())) - } - Err(error) => { - // this is a recoverable error, hence the debug log, - // to avoid spamming info logs - tracing::debug!( - %error, - block = ?next_block_to_process, - "Error while trying to process Ethereum block" - ); - ControlFlow::Continue(()) - } - } + action = try_process_eth_events(&oracle, &config, &next_block_to_process) => { + action.handle() }, _ = oracle.sender.closed() => { tracing::info!( @@ -400,10 +443,10 @@ async fn run_oracle_aux(mut oracle: Oracle) { /// Checks if the given block has any events relating to the bridge, and if so, /// sends them to the oracle's `sender` channel -async fn process( +async fn process_events_in_block( + block_to_process: ðereum_structs::BlockHeight, oracle: &Oracle, config: &Config, - block_to_process: ethereum_structs::BlockHeight, ) -> Result<(), Error> { let mut queue: Vec = vec![]; let pending = &mut queue; diff --git a/apps/src/lib/node/ledger/ethereum_oracle/test_tools/mod.rs b/apps/src/lib/node/ledger/ethereum_oracle/test_tools/mod.rs index 9a2454be17..0479445b7f 100644 --- a/apps/src/lib/node/ledger/ethereum_oracle/test_tools/mod.rs +++ b/apps/src/lib/node/ledger/ethereum_oracle/test_tools/mod.rs @@ -57,7 +57,7 @@ pub mod event_log { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod mock_web3_client { use std::borrow::Cow; use std::fmt::Debug; @@ -102,7 +102,7 @@ pub mod mock_web3_client { /// reason is for interior mutability. pub struct Web3Client(Arc>); - /// Command sender for [`Web3`] instances. + /// Command sender for [`TestOracle`] instances. pub struct Web3Controller(Arc>); impl Web3Controller { @@ -148,8 +148,6 @@ pub mod mock_web3_client { impl RpcClient for Web3Client { type Log = ethabi::RawLog; - const EXIT_ON_EVENTS_FAILURE: bool = false; - #[cold] fn new_client(_: &str) -> Self where @@ -184,14 +182,15 @@ pub mod mock_web3_client { } if client.last_block_processed.as_ref() < Some(&block_to_check) { - client - .blocks_processed - .send(block_to_check.clone()) - .unwrap(); + _ = client.blocks_processed.send(block_to_check.clone()); client.last_block_processed = Some(block_to_check); } Ok(logs) } else { + tracing::debug!( + "No events to be processed by the Test Ethereum oracle, \ + as it has been artificially set as unresponsive" + ); Err(Error::CheckEvents( ty.into(), addr, @@ -209,6 +208,11 @@ pub mod mock_web3_client { let height = self.0.lock().unwrap().latest_block_height.clone(); Ok(SyncStatus::AtHeight(height)) } + + #[inline(always)] + fn may_recover(&self, _: &Error) -> bool { + true + } } impl Web3Client { diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 53252065f1..4fef1fc7ef 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -3,15 +3,17 @@ use std::collections::HashMap; use data_encoding::HEXUPPER; +use namada::core::ledger::inflation; use namada::core::ledger::pgf::ADDRESS as pgf_address; use namada::ledger::events::EventType; use namada::ledger::gas::{GasMetering, TxGasMeter}; use namada::ledger::parameters::storage as params_storage; use namada::ledger::pos::{namada_proof_of_stake, staking_token_address}; +use namada::ledger::protocol; +use namada::ledger::storage::wl_storage::WriteLogAndStorage; use namada::ledger::storage::EPOCH_SWITCH_BLOCKS_DELAY; use namada::ledger::storage_api::token::credit_tokens; use namada::ledger::storage_api::{pgf, StorageRead, StorageWrite}; -use namada::ledger::{inflation, protocol, replay_protection}; use namada::proof_of_stake::{ delegator_rewards_products_handle, find_validator_by_raw_hash, read_last_block_proposer_address, read_pos_params, read_total_stake, @@ -87,6 +89,14 @@ where self.wl_storage.storage.update_epoch_blocks_delay ); + // Finalize the transactions' hashes from the previous block + for hash in self.wl_storage.storage.iter_replay_protection() { + self.wl_storage + .write_log + .finalize_tx_hash(hash) + .expect("Failed tx hashes finalization") + } + if new_epoch { namada::ledger::storage::update_allowed_conversions( &mut self.wl_storage, @@ -100,6 +110,7 @@ where namada_proof_of_stake::read_pos_params(&self.wl_storage)?; namada_proof_of_stake::copy_validator_sets_and_positions( &mut self.wl_storage, + &pos_params, current_epoch, current_epoch + pos_params.pipeline_len, )?; @@ -107,10 +118,6 @@ where &mut self.wl_storage, current_epoch, )?; - namada_proof_of_stake::purge_validator_sets_for_old_epoch( - &mut self.wl_storage, - current_epoch, - )?; } // Invariant: Has to be applied before `record_slashes_from_evidence` @@ -203,24 +210,17 @@ where tx_event["gas_used"] = "0".into(); response.events.push(tx_event); // if the rejected tx was decrypted, remove it - // from the queue of txs to be processed and remove the hash - // from storage + // from the queue of txs to be processed, remove its hash + // from storage and write the hash of the corresponding wrapper if let TxType::Decrypted(_) = &tx_header.tx_type { - let tx_hash = self + let wrapper_tx = self .wl_storage .storage .tx_queue .pop() .expect("Missing wrapper tx in queue") - .tx - .clone() - .update_header(TxType::Raw) - .header_hash(); - let tx_hash_key = - replay_protection::get_replay_protection_key(&tx_hash); - self.wl_storage - .delete(&tx_hash_key) - .expect("Error while deleting tx hash from storage"); + .tx; + self.allow_tx_replay(wrapper_tx); } #[cfg(not(any(feature = "abciplus", feature = "abcipp")))] @@ -276,7 +276,7 @@ where continue; } - let (mut tx_event, tx_unsigned_hash, mut tx_gas_meter, wrapper) = + let (mut tx_event, embedding_wrapper, mut tx_gas_meter, wrapper) = match &tx_header.tx_type { TxType::Wrapper(wrapper) => { stats.increment_wrapper_txs(); @@ -286,7 +286,7 @@ where } TxType::Decrypted(inner) => { // We remove the corresponding wrapper tx from the queue - let mut tx_in_queue = self + let tx_in_queue = self .wl_storage .storage .tx_queue @@ -323,12 +323,7 @@ where ( event, - Some( - tx_in_queue - .tx - .update_header(TxType::Raw) - .header_hash(), - ), + Some(tx_in_queue.tx), TxGasMeter::new_from_sub_limit(tx_in_queue.gas), None, ) @@ -511,18 +506,19 @@ where // If transaction type is Decrypted and failed because of // out of gas, remove its hash from storage to allow // rewrapping it - if let Some(hash) = tx_unsigned_hash { + if let Some(wrapper) = embedding_wrapper { if let Error::TxApply(protocol::Error::GasError(_)) = msg { - let tx_hash_key = - replay_protection::get_replay_protection_key( - &hash, - ); - self.wl_storage.delete(&tx_hash_key).expect( - "Error while deleting tx hash key from storage", - ); + self.allow_tx_replay(wrapper); } + } else if let Some(wrapper) = wrapper { + // If transaction type was Wrapper and failed, write its + // hash to storage to prevent + // replay + self.wl_storage + .write_tx_hash(wrapper.header_hash()) + .expect("Error while writing tx hash to storage"); } tx_event["gas_used"] = @@ -617,10 +613,8 @@ where /// with respect to the previous epoch. fn apply_inflation(&mut self, current_epoch: Epoch) -> Result<()> { let last_epoch = current_epoch.prev(); - // Get input values needed for the PD controller for PoS and MASP. + // Get input values needed for the PD controller for PoS. // Run the PD controllers to calculate new rates. - // - // MASP is included below just for some completeness. let params = read_pos_params(&self.wl_storage)?; @@ -642,7 +636,7 @@ where .read_storage_key(¶ms_storage::get_pos_inflation_amount_key()) .expect("PoS inflation amount should exist in storage"); // Read from PoS storage - let total_tokens = self + let total_tokens: token::Amount = self .read_storage_key(&token::minted_balance_key( &staking_token_address(&self.wl_storage), )) @@ -652,47 +646,25 @@ where let pos_locked_ratio_target = params.target_staked_ratio; let pos_max_inflation_rate = params.max_inflation_rate; - // TODO: properly fetch these values (arbitrary for now) - let masp_locked_supply: Amount = Amount::default(); - let masp_locked_ratio_target = Dec::new(5, 1).expect("Cannot fail"); - let masp_locked_ratio_last = Dec::new(5, 1).expect("Cannot fail"); - let masp_max_inflation_rate = Dec::new(2, 1).expect("Cannot fail"); - let masp_last_inflation_rate = Dec::new(12, 2).expect("Cannot fail"); - let masp_p_gain = Dec::new(1, 1).expect("Cannot fail"); - let masp_d_gain = Dec::new(1, 1).expect("Cannot fail"); - // Run rewards PD controller let pos_controller = inflation::RewardsController { - locked_tokens: pos_locked_supply, - total_tokens, + locked_tokens: pos_locked_supply.raw_amount(), + total_tokens: total_tokens.raw_amount(), + total_native_tokens: total_tokens.raw_amount(), locked_ratio_target: pos_locked_ratio_target, locked_ratio_last: pos_last_staked_ratio, max_reward_rate: pos_max_inflation_rate, - last_inflation_amount: pos_last_inflation_amount, + last_inflation_amount: pos_last_inflation_amount.raw_amount(), p_gain_nom: pos_p_gain_nom, d_gain_nom: pos_d_gain_nom, epochs_per_year, }; - let _masp_controller = inflation::RewardsController { - locked_tokens: masp_locked_supply, - total_tokens, - locked_ratio_target: masp_locked_ratio_target, - locked_ratio_last: masp_locked_ratio_last, - max_reward_rate: masp_max_inflation_rate, - last_inflation_amount: token::Amount::from( - masp_last_inflation_rate, - ), - p_gain_nom: masp_p_gain, - d_gain_nom: masp_d_gain, - epochs_per_year, - }; // Run the rewards controllers let inflation::ValsToUpdate { locked_ratio, inflation, } = pos_controller.run(); - // let new_masp_vals = _masp_controller.run(); // Get the number of blocks in the last epoch let first_block_of_last_epoch = self @@ -712,6 +684,9 @@ where // for the previous epoch // // TODO: think about changing the reward to Decimal + let inflation = token::Amount::from_uint(inflation, 0) + .expect("Should not fail Uint -> Amount conversion"); + let mut reward_tokens_remaining = inflation; let mut new_rewards_products: HashMap = HashMap::new(); @@ -723,14 +698,12 @@ where let reward = fractional_claim * inflation; // Get validator data at the last epoch - let stake = read_validator_stake( + let stake = Dec::from(read_validator_stake( &self.wl_storage, ¶ms, &address, last_epoch, - )? - .map(Dec::from) - .unwrap_or_default(); + )?); let last_rewards_product = validator_rewards_products_handle(&address) .get(&self.wl_storage, &last_epoch)? @@ -828,13 +801,16 @@ where pgf_parameters.pgf_inflation_rate / Dec::from(epochs_per_year); let pgf_inflation = Dec::from(total_tokens) * pgf_pd_rate; + let stewards = pgf::get_stewards(&self.wl_storage)?; let pgf_stewards_pd_rate = pgf_parameters.stewards_inflation_rate / Dec::from(epochs_per_year); let pgf_steward_inflation = Dec::from(total_tokens) * pgf_stewards_pd_rate; + let total_pgf_stewards_inflation = + pgf_steward_inflation * Dec::from(stewards.len()); let pgf_inflation_amount = - token::Amount::from(pgf_inflation + pgf_steward_inflation); + token::Amount::from(pgf_inflation + total_pgf_stewards_inflation); credit_tokens( &mut self.wl_storage, @@ -877,18 +853,9 @@ where } // Pgf steward inflation - let stewards = pgf::get_stewards(&self.wl_storage)?; - - let pgf_steward_reward = match stewards.len() { - 0 => Dec::zero(), - _ => pgf_steward_inflation - .trunc_div(&Dec::from(stewards.len())) - .unwrap_or_default(), - }; - for steward in stewards { for (address, percentage) in steward.reward_distribution { - let pgf_steward_reward = pgf_steward_reward + let pgf_steward_reward = pgf_steward_inflation .checked_mul(&percentage) .unwrap_or_default(); let reward_amount = token::Amount::from(pgf_steward_reward); @@ -960,6 +927,18 @@ where } Ok(()) } + + // Allow to replay a specific wasm transaction. Needs as argument the + // corresponding wrapper transaction to avoid replay of that in the process + fn allow_tx_replay(&mut self, wrapper_tx: Tx) { + self.wl_storage + .write_tx_hash(wrapper_tx.header_hash()) + .expect("Error while deleting tx hash from storage"); + + self.wl_storage + .delete_tx_hash(wrapper_tx.raw_header_hash()) + .expect("Error while deleting tx hash from storage"); + } } /// Convert ABCI vote info to PoS vote info. Any info which fails the conversion @@ -1045,11 +1024,11 @@ mod test_finalize_block { use namada::core::ledger::governance::storage::vote::{ StorageProposalVote, VoteType, }; + use namada::core::ledger::replay_protection; use namada::eth_bridge::storage::bridge_pool::{ self, get_key_from_hash, get_nonce_key, get_signed_root_key, }; use namada::eth_bridge::storage::min_confirmations_key; - use namada::ledger::eth_bridge::MinimumConfirmations; use namada::ledger::gas::VpGasMeter; use namada::ledger::native_vp::parameters::ParametersVp; use namada::ledger::native_vp::NativeVp; @@ -1057,7 +1036,6 @@ mod test_finalize_block { use namada::ledger::pos::PosQueries; use namada::ledger::storage_api; use namada::ledger::storage_api::StorageWrite; - use namada::proof_of_stake::btree_set::BTreeSetShims; use namada::proof_of_stake::storage::{ is_validator_slashes_key, slashes_prefix, }; @@ -1087,6 +1065,7 @@ mod test_finalize_block { use namada::types::transaction::{Fee, WrapperTx}; use namada::types::uint::Uint; use namada::types::vote_extensions::ethereum_events; + use namada_sdk::eth_bridge::MinimumConfirmations; use namada_test_utils::TestWasms; use test_log::test; @@ -1200,10 +1179,7 @@ mod test_finalize_block { shell .wl_storage .storage - .write( - &balance_key, - Amount::native_whole(1000).try_to_vec().unwrap(), - ) + .write(&balance_key, Amount::native_whole(1000).serialize_to_vec()) .unwrap(); // create some wrapper txs @@ -1385,10 +1361,7 @@ mod test_finalize_block { shell .wl_storage .storage - .write( - &balance_key, - Amount::native_whole(1000).try_to_vec().unwrap(), - ) + .write(&balance_key, Amount::native_whole(1000).serialize_to_vec()) .unwrap(); // create two decrypted txs @@ -1638,7 +1611,7 @@ mod test_finalize_block { &KeccakHash([1; 32]), 3.into(), ); - let value = BlockHeight(4).try_to_vec().expect("Test failed"); + let value = BlockHeight(4).serialize_to_vec(); shell .wl_storage .storage @@ -1649,10 +1622,7 @@ mod test_finalize_block { shell .wl_storage .storage - .write( - &get_nonce_key(), - Uint::from(1).try_to_vec().expect("Test failed"), - ) + .write(&get_nonce_key(), Uint::from(1).serialize_to_vec()) .expect("Test failed"); let (tx, action) = craft_tx(&mut shell); let processed_tx = ProcessedTx { @@ -1828,10 +1798,7 @@ mod test_finalize_block { shell .wl_storage .storage - .write( - &balance_key, - Amount::native_whole(1000).try_to_vec().unwrap(), - ) + .write(&balance_key, Amount::native_whole(1000).serialize_to_vec()) .unwrap(); // Add a proposal to be executed on next epoch change. @@ -1912,7 +1879,6 @@ mod test_finalize_block { validator, Epoch::default(), ) - .unwrap() .unwrap(); let votes = vec![VoteInfo { @@ -1982,6 +1948,7 @@ mod test_finalize_block { let (mut shell, _recv, _, _) = setup_with_cfg(SetupCfg { last_height: 0, num_validators: 4, + ..Default::default() }); let mut validator_set: BTreeSet = @@ -1995,10 +1962,10 @@ mod test_finalize_block { let params = read_pos_params(&shell.wl_storage).unwrap(); - let val1 = validator_set.pop_first_shim().unwrap(); - let val2 = validator_set.pop_first_shim().unwrap(); - let val3 = validator_set.pop_first_shim().unwrap(); - let val4 = validator_set.pop_first_shim().unwrap(); + let val1 = validator_set.pop_first().unwrap(); + let val2 = validator_set.pop_first().unwrap(); + let val3 = validator_set.pop_first().unwrap(); + let val4 = validator_set.pop_first().unwrap(); let get_pkh = |address, epoch| { let ck = validator_consensus_key_handle(&address) @@ -2269,15 +2236,11 @@ mod test_finalize_block { let (wrapper_tx, processed_tx) = mk_wrapper_tx(&shell, &crate::wallet::defaults::albert_keypair()); - let wrapper_hash_key = replay_protection::get_replay_protection_key( - &wrapper_tx.header_hash(), - ); - let mut decrypted_tx = wrapper_tx; - decrypted_tx.update_header(TxType::Raw); - let decrypted_hash_key = replay_protection::get_replay_protection_key( - &decrypted_tx.header_hash(), - ); + let decrypted_hash_key = + replay_protection::get_replay_protection_last_key( + &wrapper_tx.raw_header_hash(), + ); // merkle tree root before finalize_block let root_pre = shell.shell.wl_storage.storage.block.tree.root(); @@ -2303,20 +2266,16 @@ mod test_finalize_block { let root_post = shell.shell.wl_storage.storage.block.tree.root(); assert_eq!(root_pre.0, root_post.0); - // Check transactions' hashes in storage - assert!(shell.shell.wl_storage.has_key(&wrapper_hash_key).unwrap()); - assert!(shell.shell.wl_storage.has_key(&decrypted_hash_key).unwrap()); - // Check that non of the hashes is present in the merkle tree + // Check transaction's hash in storage assert!( - !shell + shell .shell .wl_storage - .storage - .block - .tree - .has_key(&wrapper_hash_key) - .unwrap() + .write_log + .has_replay_protection_entry(&wrapper_tx.raw_header_hash()) + .unwrap_or_default() ); + // Check that the hash is present in the merkle tree assert!( !shell .shell @@ -2329,152 +2288,173 @@ mod test_finalize_block { ); } - /// Test that if a decrypted transaction fails because of out-of-gas, its - /// hash is removed from storage to allow rewrapping it + /// Test replay protection hash handling #[test] - fn test_remove_tx_hash() { + fn test_tx_hash_handling() { let (mut shell, _, _, _) = setup(); let keypair = gen_keypair(); + let mut batch = + namada::core::ledger::storage::testing::TestStorage::batch(); - let mut wasm_path = top_level_directory(); - wasm_path.push("wasm_for_tests/tx_no_op.wasm"); - let tx_code = std::fs::read(wasm_path) - .expect("Expected a file at given code path"); - let mut wrapper_tx = + let (wrapper_tx, _) = mk_wrapper_tx(&shell, &keypair); + let (wrapper_tx_2, _) = mk_wrapper_tx(&shell, &keypair); + let mut invalid_wrapper_tx = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: Amount::zero(), + amount_per_gas_unit: 0.into(), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), Epoch(0), - GAS_LIMIT_MULTIPLIER.into(), + 0.into(), None, )))); - wrapper_tx.header.chain_id = shell.chain_id.clone(); - wrapper_tx.set_code(Code::new(tx_code)); - wrapper_tx.set_data(Data::new( + invalid_wrapper_tx.header.chain_id = shell.chain_id.clone(); + invalid_wrapper_tx + .set_code(Code::new("wasm_code".as_bytes().to_owned())); + invalid_wrapper_tx.set_data(Data::new( "Encrypted transaction data".as_bytes().to_owned(), )); + invalid_wrapper_tx.add_section(Section::Signature(Signature::new( + invalid_wrapper_tx.sechashes(), + [(0, keypair)].into_iter().collect(), + None, + ))); + + let wrapper_hash = wrapper_tx.header_hash(); + let wrapper_2_hash = wrapper_tx_2.header_hash(); + let invalid_wrapper_hash = invalid_wrapper_tx.header_hash(); let mut decrypted_tx = wrapper_tx.clone(); + let mut decrypted_tx_2 = wrapper_tx_2.clone(); decrypted_tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); + decrypted_tx_2.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); + let decrypted_hash = wrapper_tx.raw_header_hash(); + let decrypted_2_hash = wrapper_tx_2.raw_header_hash(); + let decrypted_3_hash = invalid_wrapper_tx.raw_header_hash(); + + // Write inner hashes in storage + for hash in [&decrypted_hash, &decrypted_2_hash] { + let hash_subkey = + replay_protection::get_replay_protection_last_subkey(hash); + shell + .wl_storage + .storage + .write_replay_protection_entry(&mut batch, &hash_subkey) + .expect("Test failed"); + } - // Write inner hash in storage - let inner_hash_key = replay_protection::get_replay_protection_key( - &wrapper_tx.clone().update_header(TxType::Raw).header_hash(), - ); - shell - .wl_storage - .storage - .write(&inner_hash_key, vec![]) - .expect("Test failed"); - - let processed_tx = ProcessedTx { + // Invalid wrapper tx that should lead to a commitment of the wrapper + // hash and no commitment of the inner hash + let mut processed_txs = vec![ProcessedTx { + tx: invalid_wrapper_tx.to_bytes(), + result: TxResult { + code: ErrorCodes::Ok.into(), + info: "".into(), + }, + }]; + // Out of gas error triggering inner hash removal and wrapper hash + // insert + processed_txs.push(ProcessedTx { tx: decrypted_tx.to_bytes(), result: TxResult { code: ErrorCodes::Ok.into(), info: "".into(), }, - }; + }); + // Wasm error that still leads to inner hash commitment and no wrapper + // hash insert + processed_txs.push(ProcessedTx { + tx: decrypted_tx_2.to_bytes(), + result: TxResult { + code: ErrorCodes::Ok.into(), + info: "".into(), + }, + }); shell.enqueue_tx(wrapper_tx, Gas::default()); + shell.enqueue_tx(wrapper_tx_2, GAS_LIMIT_MULTIPLIER.into()); // merkle tree root before finalize_block let root_pre = shell.shell.wl_storage.storage.block.tree.root(); let event = &shell .finalize_block(FinalizeBlock { - txs: vec![processed_tx], + txs: processed_txs, ..Default::default() }) - .expect("Test failed")[0]; + .expect("Test failed"); // the merkle tree root should not change after finalize_block let root_post = shell.shell.wl_storage.storage.block.tree.root(); assert_eq!(root_pre.0, root_post.0); - // Check inner tx hash has been removed from storage - assert_eq!(event.event_type.to_string(), String::from("applied")); - let code = event.attributes.get("code").expect("Testfailed").as_str(); + // Check first inner tx hash has been removed from storage but + // corresponding wrapper hash is still there Check second inner + // tx is still there and corresponding wrapper hash has been removed + // since useless + assert_eq!(event[0].event_type.to_string(), String::from("accepted")); + let code = event[0] + .attributes + .get("code") + .expect("Test failed") + .as_str(); + assert_eq!(code, String::from(ErrorCodes::InvalidTx).as_str()); + assert_eq!(event[1].event_type.to_string(), String::from("applied")); + let code = event[1] + .attributes + .get("code") + .expect("Test failed") + .as_str(); + assert_eq!(code, String::from(ErrorCodes::WasmRuntimeError).as_str()); + assert_eq!(event[2].event_type.to_string(), String::from("applied")); + let code = event[2] + .attributes + .get("code") + .expect("Test failed") + .as_str(); assert_eq!(code, String::from(ErrorCodes::WasmRuntimeError).as_str()); + assert!( + shell + .wl_storage + .write_log + .has_replay_protection_entry(&invalid_wrapper_hash) + .unwrap_or_default() + ); assert!( !shell .wl_storage - .has_key(&inner_hash_key) - .expect("Test failed") - ) - } - - #[test] - /// Test that the hash of the wrapper transaction is committed to storage - /// even if the wrapper tx fails. The inner transaction hash must instead be - /// removed - fn test_commits_hash_if_wrapper_failure() { - let (mut shell, _, _, _) = setup(); - let keypair = gen_keypair(); - - let mut wrapper = - Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( - Fee { - amount_per_gas_unit: 0.into(), - token: shell.wl_storage.storage.native_token.clone(), - }, - keypair.ref_to(), - Epoch(0), - 0.into(), - None, - )))); - wrapper.header.chain_id = shell.chain_id.clone(); - wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned())); - wrapper.set_data(Data::new( - "Encrypted transaction data".as_bytes().to_owned(), - )); - wrapper.add_section(Section::Signature(Signature::new( - wrapper.sechashes(), - [(0, keypair)].into_iter().collect(), - None, - ))); - - let wrapper_hash_key = replay_protection::get_replay_protection_key( - &wrapper.header_hash(), + .write_log + .has_replay_protection_entry(&decrypted_3_hash) + .unwrap_or_default() ); - let inner_hash_key = replay_protection::get_replay_protection_key( - &wrapper.clone().update_header(TxType::Raw).header_hash(), + assert!( + !shell + .wl_storage + .write_log + .has_replay_protection_entry(&decrypted_hash) + .unwrap_or_default() ); - - let processed_tx = ProcessedTx { - tx: wrapper.to_bytes(), - result: TxResult { - code: ErrorCodes::Ok.into(), - info: "".into(), - }, - }; - - let event = &shell - .finalize_block(FinalizeBlock { - txs: vec![processed_tx], - ..Default::default() - }) - .expect("Test failed")[0]; - - // Check wrapper hash has been committed to storage even if it failed. - // Check that, instead, the inner hash has been removed - assert_eq!(event.event_type.to_string(), String::from("accepted")); - let code = event.attributes.get("code").expect("Testfailed").as_str(); - assert_eq!(code, String::from(ErrorCodes::InvalidTx).as_str()); - assert!( shell .wl_storage - .has_key(&wrapper_hash_key) - .expect("Test failed") + .write_log + .has_replay_protection_entry(&wrapper_hash) + .unwrap_or_default() + ); + assert!( + shell + .wl_storage + .storage + .has_replay_protection_entry(&decrypted_2_hash) + .expect("test failed") ); assert!( !shell .wl_storage - .has_key(&inner_hash_key) - .expect("Test failed") - ) + .write_log + .has_replay_protection_entry(&wrapper_2_hash) + .unwrap_or_default() + ); } // Test that if the fee payer doesn't have enough funds for fee payment the @@ -2651,10 +2631,11 @@ mod test_finalize_block { let (mut shell, _recv, _, _) = setup_with_cfg(SetupCfg { last_height: 0, num_validators, + ..Default::default() }); let mut params = read_pos_params(&shell.wl_storage).unwrap(); - params.unbonding_len = 4; - write_pos_params(&mut shell.wl_storage, params.clone())?; + params.owned.unbonding_len = 4; + write_pos_params(&mut shell.wl_storage, ¶ms.owned)?; let validator_set: Vec = read_consensus_validator_set_addresses_with_stake( @@ -2831,15 +2812,13 @@ mod test_finalize_block { ¶ms, &val1.address, shell.wl_storage.storage.block.epoch, - )? - .unwrap(); + )?; let stake2 = read_validator_stake( &shell.wl_storage, ¶ms, &val2.address, shell.wl_storage.storage.block.epoch, - )? - .unwrap(); + )?; let total_stake = read_total_stake( &shell.wl_storage, ¶ms, @@ -2930,21 +2909,35 @@ mod test_finalize_block { ¶ms, &val1.address, pipeline_epoch, - )? - .unwrap(); + )?; let stake2 = read_validator_stake( &shell.wl_storage, ¶ms, &val2.address, pipeline_epoch, - )? - .unwrap(); + )?; let total_stake = read_total_stake(&shell.wl_storage, ¶ms, pipeline_epoch)?; - let expected_slashed = cubic_rate * initial_stake; - assert_eq!(stake1, initial_stake - expected_slashed); - assert_eq!(stake2, initial_stake - expected_slashed); + let expected_slashed = initial_stake.mul_ceil(cubic_rate); + + println!( + "Initial stake = {}\nCubic rate = {}\nExpected slashed = {}\n", + initial_stake.to_string_native(), + cubic_rate, + expected_slashed.to_string_native() + ); + + assert!( + (stake1.change() - (initial_stake - expected_slashed).change()) + .abs() + <= 1.into() + ); + assert!( + (stake2.change() - (initial_stake - expected_slashed).change()) + .abs() + <= 1.into() + ); assert_eq!(total_stake, total_initial_stake - 2u64 * expected_slashed); // Unjail one of the validators @@ -3019,7 +3012,6 @@ mod test_finalize_block { /// 4) Self-unbond 15_000 /// 5) Delegate 8_144 to validator /// 6) Discover misbehavior in epoch 3 - /// 7) Discover misbehavior in epoch 3 /// 7) Discover misbehavior in epoch 4 fn test_multiple_misbehaviors_by_num_vals( num_validators: u64, @@ -3029,11 +3021,12 @@ mod test_finalize_block { let (mut shell, _recv, _, _) = setup_with_cfg(SetupCfg { last_height: 0, num_validators, + ..Default::default() }); let mut params = read_pos_params(&shell.wl_storage).unwrap(); - params.unbonding_len = 4; - params.max_validator_slots = 4; - write_pos_params(&mut shell.wl_storage, params.clone())?; + params.owned.unbonding_len = 4; + params.owned.max_validator_slots = 4; + write_pos_params(&mut shell.wl_storage, ¶ms.owned)?; // Slash pool balance let nam_address = shell.wl_storage.storage.native_token.clone(); @@ -3046,7 +3039,7 @@ mod test_finalize_block { .read(&slash_balance_key) .expect("must be able to read") .unwrap_or_default(); - debug_assert_eq!(slash_pool_balance_init, token::Amount::default()); + debug_assert_eq!(slash_pool_balance_init, token::Amount::zero()); let consensus_set: Vec = read_consensus_validator_set_addresses_with_stake( @@ -3108,6 +3101,7 @@ mod test_finalize_block { &val1.address, self_unbond_1_amount, current_epoch, + false, ) .unwrap(); @@ -3117,8 +3111,7 @@ mod test_finalize_block { &val1.address, current_epoch + params.pipeline_len, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); let total_stake = namada_proof_of_stake::read_total_stake( &shell.wl_storage, @@ -3151,6 +3144,7 @@ mod test_finalize_block { &val1.address, del_unbond_1_amount, current_epoch, + false, ) .unwrap(); @@ -3160,8 +3154,7 @@ mod test_finalize_block { &val1.address, current_epoch + params.pipeline_len, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); let total_stake = namada_proof_of_stake::read_total_stake( &shell.wl_storage, ¶ms, @@ -3216,6 +3209,7 @@ mod test_finalize_block { &val1.address, self_unbond_2_amount, current_epoch, + false, ) .unwrap(); @@ -3392,8 +3386,7 @@ mod test_finalize_block { &val1.address, Epoch(10), ) - .unwrap() - .unwrap_or_default(); + .unwrap(); assert_eq!( pre_stake_10, initial_stake + del_1_amount @@ -3426,16 +3419,14 @@ mod test_finalize_block { &val1.address, Epoch(3), ) - .unwrap() - .unwrap_or_default(); + .unwrap(); let val_stake_4 = namada_proof_of_stake::read_validator_stake( &shell.wl_storage, ¶ms, &val1.address, Epoch(4), ) - .unwrap() - .unwrap_or_default(); + .unwrap(); let tot_stake_3 = namada_proof_of_stake::read_total_stake( &shell.wl_storage, @@ -3477,31 +3468,33 @@ mod test_finalize_block { // Check the amount of stake deducted from the futuremost epoch while // processing the slashes - let post_stake_10 = namada_proof_of_stake::read_validator_stake( + let post_stake_10 = read_validator_stake( &shell.wl_storage, ¶ms, &val1.address, Epoch(10), ) - .unwrap() - .unwrap_or_default(); + .unwrap(); // The amount unbonded after the infraction that affected the deltas // before processing is `del_unbond_1_amount + self_bond_1_amount - // self_unbond_2_amount` (since this self-bond was enacted then unbonded // all after the infraction). Thus, the additional deltas to be // deducted is the (infraction stake - this) * rate let slash_rate_3 = std::cmp::min(Dec::one(), Dec::two() * cubic_rate); - let exp_slashed_during_processing_9 = slash_rate_3 - * (initial_stake + del_1_amount - - self_unbond_1_amount - - del_unbond_1_amount - + self_bond_1_amount - - self_unbond_2_amount); + let exp_slashed_during_processing_9 = (initial_stake + del_1_amount + - self_unbond_1_amount + - del_unbond_1_amount + + self_bond_1_amount + - self_unbond_2_amount) + .mul_ceil(slash_rate_3); assert!( ((pre_stake_10 - post_stake_10).change() - exp_slashed_during_processing_9.change()) .abs() - < Uint::from(1000) + < Uint::from(1000), + "Expected {}, got {} (with less than 1000 err)", + exp_slashed_during_processing_9.to_string_native(), + (pre_stake_10 - post_stake_10).to_string_native(), ); // Check that we can compute the stake at the pipeline epoch @@ -3518,7 +3511,11 @@ mod test_finalize_block { assert!( exp_pipeline_stake.abs_diff(&Dec::from(post_stake_10)) - <= Dec::new(1, NATIVE_MAX_DECIMAL_PLACES).unwrap() + <= Dec::new(2, NATIVE_MAX_DECIMAL_PLACES).unwrap(), + "Expected {}, got {} (with less than 2 err), diff {}", + exp_pipeline_stake, + post_stake_10.to_string_native(), + exp_pipeline_stake.abs_diff(&Dec::from(post_stake_10)), ); // Check the balance of the Slash Pool @@ -3535,15 +3532,6 @@ mod test_finalize_block { // ); // assert_eq!(slash_pool_balance, exp_slashed_3); - let _pre_stake_11 = namada_proof_of_stake::read_validator_stake( - &shell.wl_storage, - ¶ms, - &val1.address, - Epoch(10), - ) - .unwrap() - .unwrap_or_default(); - // Advance to epoch 10, where the infraction committed in epoch 4 will // be processed let votes = get_default_true_votes( @@ -3562,7 +3550,7 @@ mod test_finalize_block { // .unwrap_or_default(); // let exp_slashed_4 = if dec!(2) * cubic_rate >= Decimal::ONE { - // token::Amount::default() + // token::Amount::zero() // } else if dec!(3) * cubic_rate >= Decimal::ONE { // decimal_mult_amount( // Decimal::ONE - dec!(2) * cubic_rate, @@ -3587,19 +3575,27 @@ mod test_finalize_block { ¶ms, &val1.address, current_epoch + params.pipeline_len, - )? - .unwrap_or_default(); + )?; - let post_stake_11 = namada_proof_of_stake::read_validator_stake( + let post_stake_10 = read_validator_stake( &shell.wl_storage, ¶ms, &val1.address, Epoch(10), ) - .unwrap() - .unwrap_or_default(); + .unwrap(); + + // Stake at current epoch should be equal to stake at pipeline + assert_eq!( + post_stake_10, + val_stake, + "Stake at pipeline in epoch {} ({}) expected to be equal to stake \ + in epoch 10 ({}).", + current_epoch + params.pipeline_len, + val_stake.to_string_native(), + post_stake_10.to_string_native() + ); - assert_eq!(post_stake_11, val_stake); // dbg!(&val_stake); // dbg!(pre_stake_10 - post_stake_10); @@ -3743,12 +3739,16 @@ mod test_finalize_block { self_details.unbonds[1].amount, self_unbond_2_amount - self_bond_1_amount ); - assert_eq!( - self_details.unbonds[1].slashed_amount, - Some( - std::cmp::min(Dec::one(), Dec::new(3, 0).unwrap() * cubic_rate) - * (self_unbond_2_amount - self_bond_1_amount) - ) + let rate = + std::cmp::min(Dec::one(), Dec::new(3, 0).unwrap() * cubic_rate); + assert!( + // at most off by 1 + (self_details.unbonds[1].slashed_amount.unwrap().change() + - (self_unbond_2_amount - self_bond_1_amount) + .mul_ceil(rate) + .change()) + .abs() + <= Uint::from(1) ); assert_eq!(self_details.unbonds[2].amount, self_bond_1_amount); assert_eq!(self_details.unbonds[2].slashed_amount, None); @@ -3766,10 +3766,12 @@ mod test_finalize_block { .unwrap(); let exp_del_withdraw_slashed_amount = - slash_rate_3 * del_unbond_1_amount; - assert_eq!( - del_withdraw, - del_unbond_1_amount - exp_del_withdraw_slashed_amount + del_unbond_1_amount.mul_ceil(slash_rate_3); + assert!( + (del_withdraw + - (del_unbond_1_amount - exp_del_withdraw_slashed_amount)) + .raw_amount() + <= Uint::one() ); // TODO: finish once implemented @@ -3823,6 +3825,180 @@ mod test_finalize_block { Ok(()) } + #[test] + fn test_purge_validator_information() -> storage_api::Result<()> { + // Setup the network with pipeline_len = 2, unbonding_len = 4 + let num_validators = 4_u64; + let (mut shell, _recv, _, _) = setup_with_cfg(SetupCfg { + last_height: 0, + num_validators, + ..Default::default() + }); + let mut params = read_pos_params(&shell.wl_storage).unwrap(); + params.owned.unbonding_len = 4; + // params.owned.max_validator_slots = 3; + // params.owned.validator_stake_threshold = token::Amount::zero(); + write_pos_params(&mut shell.wl_storage, ¶ms.owned)?; + + let max_proposal_period = params.max_proposal_period; + let default_past_epochs = 2; + let consensus_val_set_len = max_proposal_period + default_past_epochs; + + let consensus_val_set = + namada_proof_of_stake::consensus_validator_set_handle(); + // let below_cap_val_set = + // namada_proof_of_stake::below_capacity_validator_set_handle(); + let validator_positions = + namada_proof_of_stake::validator_set_positions_handle(); + let all_validator_addresses = + namada_proof_of_stake::validator_addresses_handle(); + + let consensus_set: Vec = + read_consensus_validator_set_addresses_with_stake( + &shell.wl_storage, + Epoch::default(), + ) + .unwrap() + .into_iter() + .collect(); + let val1 = consensus_set[0].clone(); + let pkh1 = get_pkh_from_address( + &shell.wl_storage, + ¶ms, + val1.address, + Epoch::default(), + ); + + // Finalize block 1 + next_block_for_inflation(&mut shell, pkh1.clone(), vec![], None); + + let votes = get_default_true_votes(&shell.wl_storage, Epoch::default()); + assert!(!votes.is_empty()); + + let check_is_data = |storage: &WlStorage<_, _>, + start: Epoch, + end: Epoch| { + for ep in Epoch::iter_bounds_inclusive(start, end) { + assert!(!consensus_val_set.at(&ep).is_empty(storage).unwrap()); + // assert!(!below_cap_val_set.at(&ep).is_empty(storage). + // unwrap()); + assert!( + !validator_positions.at(&ep).is_empty(storage).unwrap() + ); + assert!( + !all_validator_addresses.at(&ep).is_empty(storage).unwrap() + ); + } + }; + + // Check that there is validator data for epochs 0 - pipeline_len + check_is_data(&shell.wl_storage, Epoch(0), Epoch(params.pipeline_len)); + + // Advance to epoch `default_past_epochs` + let mut current_epoch = Epoch(0); + for _ in 0..default_past_epochs { + let votes = get_default_true_votes( + &shell.wl_storage, + shell.wl_storage.storage.block.epoch, + ); + current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None); + } + assert_eq!(shell.wl_storage.storage.block.epoch.0, default_past_epochs); + assert_eq!(current_epoch.0, default_past_epochs); + + check_is_data( + &shell.wl_storage, + Epoch(0), + Epoch(params.pipeline_len + default_past_epochs), + ); + + // Advance one more epoch, which should purge the data for epoch 0 in + // everything except the consensus validator set + let votes = get_default_true_votes( + &shell.wl_storage, + shell.wl_storage.storage.block.epoch, + ); + current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None); + assert_eq!(current_epoch.0, default_past_epochs + 1); + + check_is_data( + &shell.wl_storage, + Epoch(1), + Epoch(params.pipeline_len + default_past_epochs + 1), + ); + assert!( + !consensus_val_set + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); + assert!( + validator_positions + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); + assert!( + all_validator_addresses + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); + + // Advance to the epoch `consensus_val_set_len` + 1 + loop { + assert!( + !consensus_val_set + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); + let votes = get_default_true_votes( + &shell.wl_storage, + shell.wl_storage.storage.block.epoch, + ); + current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None); + if current_epoch.0 == consensus_val_set_len + 1 { + break; + } + } + + assert!( + consensus_val_set + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); + + // Advance one more epoch + let votes = get_default_true_votes( + &shell.wl_storage, + shell.wl_storage.storage.block.epoch, + ); + current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None); + for ep in Epoch::default().iter_range(2) { + assert!( + consensus_val_set + .at(&ep) + .is_empty(&shell.wl_storage) + .unwrap() + ); + } + for ep in Epoch::iter_bounds_inclusive( + Epoch(2), + current_epoch + params.pipeline_len, + ) { + assert!( + !consensus_val_set + .at(&ep) + .is_empty(&shell.wl_storage) + .unwrap() + ); + } + + Ok(()) + } + fn get_default_true_votes(storage: &S, epoch: Epoch) -> Vec where S: StorageRead, diff --git a/apps/src/lib/node/ledger/shell/governance.rs b/apps/src/lib/node/ledger/shell/governance.rs index 3cd4cf2757..8f2d3a9b4c 100644 --- a/apps/src/lib/node/ledger/shell/governance.rs +++ b/apps/src/lib/node/ledger/shell/governance.rs @@ -74,6 +74,10 @@ where )?; let proposal_result = compute_proposal_result(votes, total_voting_power, tally_type); + let proposal_result_key = gov_storage::get_proposal_result_key(id); + shell + .wl_storage + .write(&proposal_result_key, proposal_result)?; let transfer_address = match proposal_result.result { TallyResult::Passed => { @@ -233,7 +237,7 @@ where source: delegator.clone(), validator: validator.clone(), }; - let (_, delegator_stake) = + let delegator_stake = bond_amount(storage, &bond_id, epoch).unwrap_or_default(); delegators_vote.insert(delegator.clone(), vote_data.into()); diff --git a/apps/src/lib/node/ledger/shell/init_chain.rs b/apps/src/lib/node/ledger/shell/init_chain.rs index d6b2efe4dd..2554349680 100644 --- a/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/apps/src/lib/node/ledger/shell/init_chain.rs @@ -2,9 +2,8 @@ use std::collections::HashMap; use std::hash::Hash; -use namada::ledger::eth_bridge::EthBridgeStatus; use namada::ledger::parameters::{self, Parameters}; -use namada::ledger::pos::{staking_token_address, PosParams}; +use namada::ledger::pos::{staking_token_address, OwnedPosParams}; use namada::ledger::storage::traits::StorageHasher; use namada::ledger::storage::{DBIter, DB}; use namada::ledger::storage_api::token::{ @@ -17,6 +16,7 @@ use namada::types::hash::Hash as CodeHash; use namada::types::key::*; use namada::types::time::{DateTimeUtc, TimeZone, Utc}; use namada::vm::validate_untrusted_wasm; +use namada_sdk::eth_bridge::EthBridgeStatus; use super::*; use crate::facade::tendermint_proto::google::protobuf; @@ -53,7 +53,7 @@ where genesis::genesis(&self.base_dir, &self.wl_storage.storage.chain_id); #[cfg(not(any(test, feature = "dev")))] { - let genesis_bytes = genesis.try_to_vec().unwrap(); + let genesis_bytes = genesis.serialize_to_vec(); let errors = self.wl_storage.storage.chain_id.validate(genesis_bytes); use itertools::Itertools; @@ -210,7 +210,7 @@ where self.wl_storage .write_bytes( &namada::eth_bridge::storage::active_key(), - EthBridgeStatus::Disabled.try_to_vec().unwrap(), + EthBridgeStatus::Disabled.serialize_to_vec(), ) .unwrap(); } @@ -330,14 +330,41 @@ where address, denom, balances, + parameters, + last_inflation, + last_locked_ratio, } in accounts { + // Init token parameters and last inflation and caching rates + parameters.init_storage(&address, &mut self.wl_storage); + self.wl_storage + .write( + &token::masp_last_inflation_key(&address), + last_inflation, + ) + .unwrap(); + self.wl_storage + .write( + &token::masp_last_locked_ratio_key(&address), + last_locked_ratio, + ) + .unwrap(); // associate a token with its denomination. write_denom(&mut self.wl_storage, &address, denom).unwrap(); + + let mut total_balance_for_token = token::Amount::default(); for (owner, amount) in balances { + total_balance_for_token += amount; credit_tokens(&mut self.wl_storage, &address, &owner, amount) .unwrap(); } + // Write the total amount of tokens for the ratio + self.wl_storage + .write( + &token::minted_balance_key(&address), + total_balance_for_token, + ) + .unwrap(); } } @@ -394,10 +421,6 @@ where ) .unwrap(); - self.wl_storage - .write(&protocol_pk_key(addr), &validator.protocol_key) - .expect("Unable to set genesis user protocol public key"); - self.wl_storage .write( &dkg_session_keys::dkg_pk_key(addr), @@ -412,7 +435,7 @@ where &mut self, staking_token: &Address, validators: Vec, - pos_params: &PosParams, + pos_params: &OwnedPosParams, ) -> Result { let mut response = response::InitChain::default(); // PoS system depends on epoch being initialized. Write the total diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index a1c17fe450..ab8ebd5b05 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -26,11 +26,11 @@ use std::path::{Path, PathBuf}; #[allow(unused_imports)] use std::rc::Rc; -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; +use borsh_ext::BorshSerializeExt; use masp_primitives::transaction::Transaction; use namada::core::hints; use namada::core::ledger::eth_bridge; -use namada::ledger::eth_bridge::{EthBridgeQueries, EthereumOracleConfig}; use namada::ledger::events::log::EventLog; use namada::ledger::events::Event; use namada::ledger::gas::{Gas, TxGasMeter}; @@ -42,13 +42,14 @@ use namada::ledger::protocol::{ apply_wasm_tx, get_fee_unshielding_transaction, get_transfer_hash_from_storage, ShellParams, }; +use namada::ledger::storage::wl_storage::WriteLogAndStorage; use namada::ledger::storage::write_log::WriteLog; use namada::ledger::storage::{ DBIter, Sha256Hasher, Storage, StorageHasher, TempWlStorage, WlStorage, DB, EPOCH_SWITCH_BLOCKS_DELAY, }; use namada::ledger::storage_api::{self, StorageRead}; -use namada::ledger::{parameters, pos, protocol, replay_protection}; +use namada::ledger::{parameters, pos, protocol}; use namada::proof_of_stake::{self, process_slashes, read_pos_params, slash}; use namada::proto::{self, Section, Tx}; use namada::types::address::Address; @@ -63,9 +64,10 @@ use namada::types::transaction::{ hash_tx, verify_decrypted_correctly, AffineCurve, DecryptedTx, EllipticCurve, PairingEngine, TxType, WrapperTx, }; -use namada::types::{address, hash, token}; +use namada::types::{address, token}; use namada::vm::wasm::{TxCache, VpCache}; use namada::vm::{WasmCacheAccess, WasmCacheRwAccess}; +use namada_sdk::eth_bridge::{EthBridgeQueries, EthereumOracleConfig}; use num_derive::{FromPrimitive, ToPrimitive}; use num_traits::{FromPrimitive, ToPrimitive}; use thiserror::Error; @@ -93,10 +95,10 @@ fn key_to_tendermint( ) -> std::result::Result { match pk { common::PublicKey::Ed25519(_) => ed25519::PublicKey::try_from_pk(pk) - .map(|pk| public_key::Sum::Ed25519(pk.try_to_vec().unwrap())), + .map(|pk| public_key::Sum::Ed25519(pk.serialize_to_vec())), common::PublicKey::Secp256k1(_) => { secp256k1::PublicKey::try_from_pk(pk) - .map(|pk| public_key::Sum::Secp256k1(pk.try_to_vec().unwrap())) + .map(|pk| public_key::Sum::Secp256k1(pk.serialize_to_vec())) } } } @@ -146,22 +148,19 @@ impl From for TxResult { #[derive(Debug, Copy, Clone, FromPrimitive, ToPrimitive, PartialEq, Eq)] pub enum ErrorCodes { Ok = 0, - InvalidDecryptedChainId = 1, - ExpiredDecryptedTx = 2, - DecryptedTxGasLimit = 3, - WasmRuntimeError = 4, - InvalidTx = 5, - InvalidSig = 6, - InvalidOrder = 7, - ExtraTxs = 8, - Undecryptable = 9, - AllocationError = 10, - ReplayTx = 11, - InvalidChainId = 12, - ExpiredTx = 13, - TxGasLimit = 14, - FeeError = 15, - InvalidVoteExtension = 16, + WasmRuntimeError = 1, + InvalidTx = 2, + InvalidSig = 3, + InvalidOrder = 4, + ExtraTxs = 5, + Undecryptable = 6, + AllocationError = 7, + ReplayTx = 8, + InvalidChainId = 9, + ExpiredTx = 10, + TxGasLimit = 11, + FeeError = 12, + InvalidVoteExtension = 13, } impl ErrorCodes { @@ -172,11 +171,7 @@ impl ErrorCodes { // NOTE: pattern match on all `ErrorCodes` variants, in order // to catch potential bugs when adding new codes match self { - Ok - | InvalidDecryptedChainId - | ExpiredDecryptedTx - | WasmRuntimeError - | DecryptedTxGasLimit => true, + Ok | WasmRuntimeError => true, InvalidTx | InvalidSig | InvalidOrder | ExtraTxs | Undecryptable | AllocationError | ReplayTx | InvalidChainId | ExpiredTx | TxGasLimit | FeeError | InvalidVoteExtension => false, @@ -825,7 +820,15 @@ where ); response.data = root.0.to_vec(); - // validator specific actions + self.bump_last_processed_eth_block(); + self.broadcast_queued_txs(); + + response + } + + /// Updates the Ethereum oracle's last processed block. + #[inline] + fn bump_last_processed_eth_block(&mut self) { if let ShellMode::Validator { eth_oracle: Some(eth_oracle), .. @@ -851,20 +854,17 @@ where blocks" ), } - - // broadcast any queued txs - self.broadcast_queued_txs(); } - - response } /// Empties all the ledger's queues of transactions to be broadcasted /// via CometBFT's P2P network. #[inline] fn broadcast_queued_txs(&mut self) { - self.broadcast_protocol_txs(); - self.broadcast_expired_txs(); + if let ShellMode::Validator { .. } = &self.mode { + self.broadcast_protocol_txs(); + self.broadcast_expired_txs(); + } } /// Broadcast any pending protocol transactions. @@ -929,51 +929,39 @@ where pub fn replay_protection_checks( &self, wrapper: &Tx, - tx_bytes: &[u8], temp_wl_storage: &mut TempWlStorage, ) -> Result<()> { - let inner_tx_hash = - wrapper.clone().update_header(TxType::Raw).header_hash(); - let inner_hash_key = - replay_protection::get_replay_protection_key(&inner_tx_hash); + let wrapper_hash = wrapper.header_hash(); if temp_wl_storage - .has_key(&inner_hash_key) - .expect("Error while checking inner tx hash key in storage") + .has_replay_protection_entry(&wrapper_hash) + .expect("Error while checking wrapper tx hash key in storage") { return Err(Error::ReplayAttempt(format!( - "Inner transaction hash {} already in storage", - &inner_tx_hash, + "Wrapper transaction hash {} already in storage", + wrapper_hash ))); } - // Write inner hash to tx WAL + // Write wrapper hash to WAL temp_wl_storage - .write_log - .write(&inner_hash_key, vec![]) - .expect("Couldn't write inner transaction hash to write log"); + .write_tx_hash(wrapper_hash) + .map_err(|e| Error::ReplayAttempt(e.to_string()))?; - let tx = - Tx::try_from(tx_bytes).expect("Deserialization shouldn't fail"); - let wrapper_hash = tx.header_hash(); - let wrapper_hash_key = - replay_protection::get_replay_protection_key(&wrapper_hash); + let inner_tx_hash = wrapper.raw_header_hash(); if temp_wl_storage - .has_key(&wrapper_hash_key) - .expect("Error while checking wrapper tx hash key in storage") + .has_replay_protection_entry(&inner_tx_hash) + .expect("Error while checking inner tx hash key in storage") { return Err(Error::ReplayAttempt(format!( - "Wrapper transaction hash {} already in storage", - wrapper_hash + "Inner transaction hash {} already in storage", + &inner_tx_hash, ))); } - // Write wrapper hash to tx WAL + // Write inner hash to WAL temp_wl_storage - .write_log - .write(&wrapper_hash_key, vec![]) - .expect("Couldn't write wrapper tx hash to write log"); - - Ok(()) + .write_tx_hash(inner_tx_hash) + .map_err(|e| Error::ReplayAttempt(e.to_string())) } /// If a handle to an Ethereum oracle was provided to the [`Shell`], attempt @@ -1089,22 +1077,19 @@ where } }; - let tx_chain_id = tx.header.chain_id.clone(); - let tx_expiration = tx.header.expiration; - // Tx chain id - if tx_chain_id != self.chain_id { + if tx.header.chain_id != self.chain_id { response.code = ErrorCodes::InvalidChainId.into(); response.log = format!( "{INVALID_MSG}: Tx carries a wrong chain id: expected {}, \ found {}", - self.chain_id, tx_chain_id + self.chain_id, tx.header.chain_id ); return response; } // Tx expiration - if let Some(exp) = tx_expiration { + if let Some(exp) = tx.header.expiration { let last_block_timestamp = self.get_block_timestamp(None); if last_block_timestamp > exp { @@ -1263,17 +1248,12 @@ where } // Replay protection check - let mut inner_tx = tx; - inner_tx.update_header(TxType::Raw); - let inner_tx_hash = &inner_tx.header_hash(); - let inner_hash_key = - replay_protection::get_replay_protection_key(inner_tx_hash); + let inner_tx_hash = tx.raw_header_hash(); if self .wl_storage .storage - .has_key(&inner_hash_key) + .has_replay_protection_entry(&tx.raw_header_hash()) .expect("Error while checking inner tx hash key in storage") - .0 { response.code = ErrorCodes::ReplayTx.into(); response.log = format!( @@ -1286,17 +1266,14 @@ where let tx = Tx::try_from(tx_bytes) .expect("Deserialization shouldn't fail"); - let wrapper_hash = hash::Hash(tx.header_hash().0); - let wrapper_hash_key = - replay_protection::get_replay_protection_key(&wrapper_hash); + let wrapper_hash = &tx.header_hash(); if self .wl_storage .storage - .has_key(&wrapper_hash_key) + .has_replay_protection_entry(wrapper_hash) .expect( "Error while checking wrapper tx hash key in storage", ) - .0 { response.code = ErrorCodes::ReplayTx.into(); response.log = format!( @@ -1612,11 +1589,11 @@ mod test_utils { ref sig, ref recovery_id, )) => { - let mut sig_bytes = sig.serialize(); - let recovery_id_bytes = recovery_id.serialize(); + let mut sig_bytes = sig.to_vec(); + let recovery_id_bytes = recovery_id.to_byte(); sig_bytes[0] = sig_bytes[0].wrapping_add(1); let bytes: [u8; 65] = - [sig_bytes.as_slice(), [recovery_id_bytes].as_slice()] + [sig_bytes.as_slice(), &[recovery_id_bytes]] .concat() .try_into() .unwrap(); @@ -1867,6 +1844,8 @@ mod test_utils { /// The number of validators to configure // in `InitChain`. pub num_validators: u64, + /// Whether to enable the Ethereum oracle or not. + pub enable_ethereum_oracle: bool, } impl Default for SetupCfg { @@ -1874,6 +1853,7 @@ mod test_utils { Self { last_height: H::default(), num_validators: 1, + enable_ethereum_oracle: true, } } } @@ -1885,6 +1865,7 @@ mod test_utils { SetupCfg { last_height, num_validators, + enable_ethereum_oracle, }: SetupCfg, ) -> ( TestShell, @@ -1892,8 +1873,14 @@ mod test_utils { Sender, Receiver, ) { - let (mut test, receiver, eth_receiver, control_receiver) = + let (mut test, receiver, eth_sender, control_receiver) = TestShell::new_at_height(last_height); + if !enable_ethereum_oracle { + if let ShellMode::Validator { eth_oracle, .. } = &mut test.mode { + // drop the eth oracle event receiver + _ = eth_oracle.take(); + } + } test.init_chain( RequestInitChain { time: Some(Timestamp { @@ -1906,7 +1893,7 @@ mod test_utils { num_validators, ); test.wl_storage.commit_block().expect("Test failed"); - (test, receiver, eth_receiver, control_receiver) + (test, receiver, eth_sender, control_receiver) } /// Same as [`setup_at_height`], but returns a shell at the given block @@ -1973,13 +1960,14 @@ mod test_utils { .wl_storage .write_bytes( &active_key(), - EthBridgeStatus::Disabled.try_to_vec().expect("Test failed"), + EthBridgeStatus::Disabled.serialize_to_vec(), ) .expect("Test failed"); } /// We test that on shell shutdown, the tx queue gets persisted in a DB, and /// on startup it is read successfully + #[cfg(feature = "testing")] #[test] fn test_tx_queue_persistence() { let base_dir = tempdir().unwrap().as_ref().canonicalize().unwrap(); @@ -2017,6 +2005,7 @@ mod test_utils { .storage .begin_block(BlockHash::default(), BlockHeight(1)) .expect("begin_block failed"); + token::testing::init_token_storage(&mut shell.wl_storage, 60); let keypair = gen_keypair(); // enqueue a wrapper tx let mut wrapper = @@ -2126,23 +2115,65 @@ mod test_utils { } } -#[cfg(all(test, not(feature = "abcipp")))] -mod abciplus_mempool_tests { +#[cfg(test)] +mod shell_tests { + use namada::core::ledger::replay_protection; use namada::proto::{ - Data, Section, SignableEthMessage, Signature, Signed, Tx, + Code, Data, Section, SignableEthMessage, Signature, Signed, Tx, }; use namada::types::ethereum_events::EthereumEvent; use namada::types::key::RefTo; - use namada::types::storage::BlockHeight; + use namada::types::storage::{BlockHeight, Epoch}; use namada::types::transaction::protocol::{ ethereum_tx_data_variants, ProtocolTx, ProtocolTxType, }; + use namada::types::transaction::{Fee, WrapperTx}; use namada::types::vote_extensions::{bridge_pool_roots, ethereum_events}; use super::*; use crate::node::ledger::shell::test_utils; use crate::wallet; + const GAS_LIMIT_MULTIPLIER: u64 = 100_000; + + /// Check that the shell broadcasts validator set updates, + /// even when the Ethereum oracle is not running (e.g. + /// because the bridge is disabled). + #[tokio::test] + async fn test_broadcast_valset_upd_inspite_oracle_off() { + // this height should result in a validator set + // update being broadcasted + let (mut shell, mut broadcaster_rx, _, _) = + test_utils::setup_with_cfg(test_utils::SetupCfg { + last_height: 1, + enable_ethereum_oracle: false, + ..Default::default() + }); + + // broadcast validator set update + shell.broadcast_protocol_txs(); + + // check data inside tx - it should be a validator set update + // signed at epoch 0 + let signed_valset_upd = loop { + // attempt to receive validator set update + let serialized_tx = tokio::time::timeout( + std::time::Duration::from_secs(1), + async { broadcaster_rx.recv().await.unwrap() }, + ) + .await + .unwrap(); + let tx = Tx::try_from(&serialized_tx[..]).unwrap(); + + match ethereum_tx_data_variants::ValSetUpdateVext::try_from(&tx) { + Ok(signed_valset_upd) => break signed_valset_upd, + Err(_) => continue, + } + }; + + assert_eq!(signed_valset_upd.data.signing_epoch, Epoch(0)); + } + /// Check that broadcasting expired Ethereum events works /// as expected. #[test] @@ -2307,7 +2338,7 @@ mod abciplus_mempool_tests { }))); // invalid tx type, it doesn't match the // tx type declared in the header - tx.set_data(Data::new(ext.try_to_vec().expect("Test falied"))); + tx.set_data(Data::new(ext.serialize_to_vec())); tx.add_section(Section::Signature(Signature::new( tx.sechashes(), [(0, protocol_key)].into_iter().collect(), @@ -2319,17 +2350,6 @@ mod abciplus_mempool_tests { let rsp = shell.mempool_validate(&tx, Default::default()); assert_eq!(rsp.code, u32::from(ErrorCodes::InvalidVoteExtension)); } -} - -#[cfg(test)] -mod tests { - use namada::proof_of_stake::Epoch; - use namada::proto::{Code, Data, Section, Signature, Tx}; - use namada::types::transaction::{Fee, WrapperTx}; - - use super::*; - - const GAS_LIMIT_MULTIPLIER: u64 = 100_000; /// Mempool validation must reject unsigned wrappers #[test] @@ -2464,13 +2484,15 @@ mod tests { ))); // Write wrapper hash to storage + let mut batch = + namada::core::ledger::storage::testing::TestStorage::batch(); let wrapper_hash = wrapper.header_hash(); let wrapper_hash_key = - replay_protection::get_replay_protection_key(&wrapper_hash); + replay_protection::get_replay_protection_last_subkey(&wrapper_hash); shell .wl_storage .storage - .write(&wrapper_hash_key, wrapper_hash) + .write_replay_protection_entry(&mut batch, &wrapper_hash_key) .expect("Test failed"); // Try wrapper tx replay attack @@ -2502,15 +2524,16 @@ mod tests { ) ); - let inner_tx_hash = - wrapper.clone().update_header(TxType::Raw).header_hash(); + let inner_tx_hash = wrapper.raw_header_hash(); // Write inner hash in storage let inner_hash_key = - replay_protection::get_replay_protection_key(&inner_tx_hash); + replay_protection::get_replay_protection_last_subkey( + &inner_tx_hash, + ); shell .wl_storage .storage - .write(&inner_hash_key, inner_tx_hash) + .write_replay_protection_entry(&mut batch, &inner_hash_key) .expect("Test failed"); // Try inner tx replay attack diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 3687a6d39b..2df4b520c5 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -245,8 +245,11 @@ where let mut tx_gas_meter = TxGasMeter::new(wrapper.gas_limit); tx_gas_meter.add_tx_size_gas(tx_bytes).map_err(|_| ())?; - // Check replay protection - self.replay_protection_checks(&tx, tx_bytes, temp_wl_storage) + // Check replay protection, safe to do here. Even if the tx is a + // replay attempt, we can leave its hashes in the write log since, + // having we already checked the signature, no other tx with the + // same hash can ba deemed valid + self.replay_protection_checks(&tx, temp_wl_storage) .map_err(|_| ())?; // Check fees @@ -493,14 +496,13 @@ mod test_prepare_proposal { #[cfg(feature = "abcipp")] use std::collections::{BTreeSet, HashMap}; - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use namada::core::ledger::storage_api::collections::lazy_map::{ NestedSubKey, SubKey, }; use namada::ledger::gas::Gas; use namada::ledger::pos::PosQueries; use namada::ledger::replay_protection; - use namada::proof_of_stake::btree_set::BTreeSetShims; use namada::proof_of_stake::types::WeightedValidator; use namada::proof_of_stake::{ consensus_validator_set_handle, @@ -584,8 +586,7 @@ mod test_prepare_proposal { bridge_pool_root: Some(bp_root), validator_set_update: None, } - .try_to_vec() - .expect("Test failed"); + .serialize_to_vec(); let vote = ExtendedVoteInfo { vote_extension, @@ -879,6 +880,7 @@ mod test_prepare_proposal { test_utils::setup_with_cfg(test_utils::SetupCfg { last_height: FIRST_HEIGHT, num_validators: 2, + ..Default::default() }); let params = shell.wl_storage.pos_queries().get_pos_params(); @@ -916,8 +918,8 @@ mod test_prepare_proposal { .unwrap() .into_iter() .collect(); - let val1 = consensus_set.pop_first_shim().unwrap(); - let val2 = consensus_set.pop_first_shim().unwrap(); + let val1 = consensus_set.pop_first().unwrap(); + let val2 = consensus_set.pop_first().unwrap(); let pkh1 = get_pkh_from_address( &shell.wl_storage, ¶ms, @@ -1027,7 +1029,7 @@ mod test_prepare_proposal { validator_set_update: None, }; let vote = ExtendedVoteInfo { - vote_extension: vote_extension.try_to_vec().unwrap(), + vote_extension: vote_extension.serialize_to_vec(), ..Default::default() }; // this should panic @@ -1082,10 +1084,7 @@ mod test_prepare_proposal { shell .wl_storage .storage - .write( - &balance_key, - Amount::native_whole(1_000).try_to_vec().unwrap(), - ) + .write(&balance_key, Amount::native_whole(1_000).serialize_to_vec()) .unwrap(); let mut req = RequestPrepareProposal { @@ -1150,11 +1149,11 @@ mod test_prepare_proposal { assert_eq!( received .iter() - .map(|x| x.try_to_vec().unwrap()) + .map(|x| x.serialize_to_vec()) .collect::>(), expected_txs .iter() - .map(|x| x.try_to_vec().unwrap()) + .map(|x| x.serialize_to_vec()) .collect::>(), ); } @@ -1188,7 +1187,7 @@ mod test_prepare_proposal { // Write wrapper hash to storage let wrapper_unsigned_hash = wrapper.header_hash(); - let hash_key = replay_protection::get_replay_protection_key( + let hash_key = replay_protection::get_replay_protection_last_key( &wrapper_unsigned_hash, ); shell @@ -1279,12 +1278,12 @@ mod test_prepare_proposal { [(0, keypair)].into_iter().collect(), None, ))); - let inner_unsigned_hash = - wrapper.clone().update_header(TxType::Raw).header_hash(); + let inner_unsigned_hash = wrapper.raw_header_hash(); // Write inner hash to storage - let hash_key = - replay_protection::get_replay_protection_key(&inner_unsigned_hash); + let hash_key = replay_protection::get_replay_protection_last_key( + &inner_unsigned_hash, + ); shell .wl_storage .storage @@ -1313,7 +1312,7 @@ mod test_prepare_proposal { let (shell, _recv, _, _) = test_utils::setup(); let keypair = crate::wallet::defaults::daewon_keypair(); - let keypair_2 = crate::wallet::defaults::daewon_keypair(); + let keypair_2 = crate::wallet::defaults::albert_keypair(); let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index ab544de3f8..4968faa050 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -4,7 +4,6 @@ use data_encoding::HEXUPPER; use namada::core::hints; use namada::core::ledger::storage::WlStorage; -use namada::ledger::eth_bridge::{EthBridgeQueries, SendValsetUpd}; use namada::ledger::pos::PosQueries; use namada::ledger::protocol::get_fee_unshielding_transaction; use namada::ledger::storage::TempWlStorage; @@ -15,6 +14,7 @@ use namada::types::transaction::protocol::{ }; #[cfg(feature = "abcipp")] use namada::types::voting_power::FractionalVotingPower; +use namada_sdk::eth_bridge::{EthBridgeQueries, SendValsetUpd}; use super::block_alloc::{BlockSpace, EncryptedTxsBins}; use super::*; @@ -721,14 +721,7 @@ where metadata.has_decrypted_txs = true; match tx_queue_iter.next() { Some(wrapper) => { - let mut inner_tx = tx.clone(); - inner_tx.update_header(TxType::Raw); - if wrapper - .tx - .clone() - .update_header(TxType::Raw) - .header_hash() - != inner_tx.header_hash() + if wrapper.tx.raw_header_hash() != tx.raw_header_hash() { TxResult { code: ErrorCodes::InvalidOrder.into(), @@ -742,35 +735,6 @@ where wrapper.tx.clone(), privkey, ) { - // Tx chain id - if wrapper.tx.header.chain_id != self.chain_id { - return TxResult { - code: ErrorCodes::InvalidDecryptedChainId - .into(), - info: format!( - "Decrypted tx carries a wrong chain \ - id: expected {}, found {}", - self.chain_id, - wrapper.tx.header.chain_id - ), - }; - } - - // Tx expiration - if let Some(exp) = wrapper.tx.header.expiration { - if block_time > exp { - return TxResult { - code: ErrorCodes::ExpiredDecryptedTx - .into(), - info: format!( - "Decrypted tx expired at {:#?}, \ - block time: {:#?}", - exp, block_time - ), - }; - } - } - TxResult { code: ErrorCodes::Ok.into(), info: "Process Proposal accepted this \ @@ -878,11 +842,9 @@ where } } else { // Replay protection checks - if let Err(e) = self.replay_protection_checks( - &tx, - tx_bytes, - temp_wl_storage, - ) { + if let Err(e) = + self.replay_protection_checks(&tx, temp_wl_storage) + { return TxResult { code: ErrorCodes::ReplayTx.into(), info: e.to_string(), @@ -988,6 +950,7 @@ mod test_process_proposal { #[cfg(feature = "abcipp")] use assert_matches::assert_matches; + use namada::ledger::replay_protection; use namada::ledger::storage_api::StorageWrite; use namada::proto::{ Code, Data, Section, SignableEthMessage, Signature, Signed, @@ -1000,7 +963,7 @@ mod test_process_proposal { use namada::types::token; use namada::types::token::Amount; use namada::types::transaction::protocol::EthereumTxData; - use namada::types::transaction::{Fee, WrapperTx}; + use namada::types::transaction::{Fee, Solution, WrapperTx}; #[cfg(feature = "abcipp")] use namada::types::vote_extensions::bridge_pool_roots::MultiSignedVext; #[cfg(feature = "abcipp")] @@ -2005,6 +1968,7 @@ mod test_process_proposal { epoch: Epoch(0), gas_limit: GAS_LIMIT_MULTIPLIER.into(), unshield_section_hash: None, + pow_solution: Solution::None, }; let tx = Tx::from_type(TxType::Wrapper(Box::new(wrapper))); @@ -2131,14 +2095,16 @@ mod test_process_proposal { ))); // Write wrapper hash to storage + let mut batch = + namada::core::ledger::storage::testing::TestStorage::batch(); let wrapper_unsigned_hash = wrapper.header_hash(); - let hash_key = replay_protection::get_replay_protection_key( + let hash_key = replay_protection::get_replay_protection_last_subkey( &wrapper_unsigned_hash, ); shell .wl_storage .storage - .write(&hash_key, vec![]) + .write_replay_protection_entry(&mut batch, &hash_key) .expect("Test failed"); // Run validation @@ -2180,10 +2146,7 @@ mod test_process_proposal { shell .wl_storage .storage - .write( - &balance_key, - Amount::native_whole(1000).try_to_vec().unwrap(), - ) + .write(&balance_key, Amount::native_whole(1000).serialize_to_vec()) .unwrap(); let mut wrapper = @@ -2218,18 +2181,12 @@ mod test_process_proposal { response[1].result.code, u32::from(ErrorCodes::ReplayTx) ); - // The checks happens on the inner hash first, so the tx is - // rejected because of this hash, not the - // wrapper one assert_eq!( response[1].result.info, format!( - "Transaction replay attempt: Inner transaction hash \ + "Transaction replay attempt: Wrapper transaction hash \ {} already in storage", - wrapper - .clone() - .update_header(TxType::Raw) - .header_hash(), + wrapper.header_hash() ) ); } @@ -2263,16 +2220,17 @@ mod test_process_proposal { [(0, keypair)].into_iter().collect(), None, ))); - let inner_unsigned_hash = - wrapper.clone().update_header(TxType::Raw).header_hash(); // Write inner hash to storage - let hash_key = - replay_protection::get_replay_protection_key(&inner_unsigned_hash); + let mut batch = + namada::core::ledger::storage::testing::TestStorage::batch(); + let hash_key = replay_protection::get_replay_protection_last_subkey( + &wrapper.raw_header_hash(), + ); shell .wl_storage .storage - .write(&hash_key, vec![]) + .write_replay_protection_entry(&mut batch, &hash_key) .expect("Test failed"); // Run validation @@ -2291,7 +2249,7 @@ mod test_process_proposal { format!( "Transaction replay attempt: Inner transaction hash \ {} already in storage", - inner_unsigned_hash + wrapper.raw_header_hash() ) ); } @@ -2305,7 +2263,7 @@ mod test_process_proposal { let (shell, _recv, _, _) = test_utils::setup(); let keypair = crate::wallet::defaults::daewon_keypair(); - let keypair_2 = crate::wallet::defaults::daewon_keypair(); + let keypair_2 = crate::wallet::defaults::albert_keypair(); let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( @@ -2327,8 +2285,7 @@ mod test_process_proposal { [(0, keypair)].into_iter().collect(), None, ))); - let inner_unsigned_hash = - wrapper.clone().update_header(TxType::Raw).header_hash(); + let inner_unsigned_hash = wrapper.raw_header_hash(); new_wrapper.update_header(TxType::Wrapper(Box::new(WrapperTx::new( Fee { @@ -2432,70 +2389,6 @@ mod test_process_proposal { } } - /// Test that a decrypted transaction with a mismatching chain id gets - /// rejected without rejecting the entire block - #[test] - fn test_decrypted_wrong_chain_id() { - let (mut shell, _recv, _, _) = test_utils::setup(); - let keypair = crate::wallet::defaults::daewon_keypair(); - - let wrong_chain_id = ChainId("Wrong chain id".to_string()); - let mut wrapper = - Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( - Fee { - amount_per_gas_unit: token::Amount::zero(), - token: shell.wl_storage.storage.native_token.clone(), - }, - keypair.ref_to(), - Epoch(0), - GAS_LIMIT_MULTIPLIER.into(), - None, - )))); - wrapper.header.chain_id = wrong_chain_id.clone(); - wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned())); - wrapper - .set_data(Data::new("new transaction data".as_bytes().to_owned())); - let mut decrypted = wrapper.clone(); - - decrypted.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - decrypted.add_section(Section::Signature(Signature::new( - decrypted.sechashes(), - [(0, keypair)].into_iter().collect(), - None, - ))); - let gas_limit = Gas::from(wrapper.header.wrapper().unwrap().gas_limit) - .checked_sub(Gas::from(wrapper.to_bytes().len() as u64)) - .unwrap(); - let wrapper_in_queue = TxInQueue { - tx: wrapper, - gas: gas_limit, - }; - shell.wl_storage.storage.tx_queue.push(wrapper_in_queue); - - // Run validation - let request = ProcessProposal { - txs: vec![decrypted.to_bytes()], - }; - - match shell.process_proposal(request) { - Ok(response) => { - assert_eq!( - response[0].result.code, - u32::from(ErrorCodes::InvalidDecryptedChainId) - ); - assert_eq!( - response[0].result.info, - format!( - "Decrypted tx carries a wrong chain id: expected {}, \ - found {}", - shell.chain_id, wrong_chain_id - ) - ) - } - Err(_) => panic!("Test failed"), - } - } - /// Test that an expired wrapper transaction causes a block rejection #[test] fn test_expired_wrapper() { @@ -2538,62 +2431,6 @@ mod test_process_proposal { } } - /// Test that an expired decrypted transaction is correctly marked as so - /// without rejecting the entire block - #[test] - fn test_expired_decrypted() { - let (mut shell, _recv, _, _) = test_utils::setup(); - let keypair = crate::wallet::defaults::daewon_keypair(); - - let mut wrapper = - Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( - Fee { - amount_per_gas_unit: token::Amount::zero(), - token: shell.wl_storage.storage.native_token.clone(), - }, - keypair.ref_to(), - Epoch(0), - GAS_LIMIT_MULTIPLIER.into(), - None, - )))); - wrapper.header.chain_id = shell.chain_id.clone(); - wrapper.header.expiration = Some(DateTimeUtc::default()); - wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned())); - wrapper - .set_data(Data::new("new transaction data".as_bytes().to_owned())); - let mut decrypted = wrapper.clone(); - - decrypted.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - decrypted.add_section(Section::Signature(Signature::new( - decrypted.sechashes(), - [(0, keypair)].into_iter().collect(), - None, - ))); - let gas_limit = Gas::from(wrapper.header.wrapper().unwrap().gas_limit) - .checked_sub(Gas::from(wrapper.to_bytes().len() as u64)) - .unwrap(); - let wrapper_in_queue = TxInQueue { - tx: wrapper, - gas: gas_limit, - }; - shell.wl_storage.storage.tx_queue.push(wrapper_in_queue); - - // Run validation - let request = ProcessProposal { - txs: vec![decrypted.to_bytes()], - }; - match shell.process_proposal(request) { - Ok(response) => { - assert_eq!(response.len(), 1); - assert_eq!( - response[0].result.code, - u32::from(ErrorCodes::ExpiredDecryptedTx) - ); - } - Err(_) => panic!("Test failed"), - } - } - /// Check that a tx requiring more gas than the block limit causes a block /// rejection #[test] diff --git a/apps/src/lib/node/ledger/shell/queries.rs b/apps/src/lib/node/ledger/shell/queries.rs index a62c3ec4b4..28843fabc6 100644 --- a/apps/src/lib/node/ledger/shell/queries.rs +++ b/apps/src/lib/node/ledger/shell/queries.rs @@ -1,7 +1,8 @@ //! Shell methods for querying state -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use ferveo_common::TendermintValidator; +use namada::ledger::dry_run_tx; use namada::ledger::pos::into_tm_voting_power; use namada::ledger::queries::{RequestCtx, ResponseQuery}; use namada::ledger::storage_api::token; @@ -49,7 +50,11 @@ where }; // Invoke the root RPC handler - returns borsh-encoded data on success - let result = namada::ledger::queries::handle_path(ctx, &request); + let result = if request.path == "/shell/dry_run_tx" { + dry_run_tx(ctx, &request) + } else { + namada::ledger::queries::handle_path(ctx, &request) + }; match result { Ok(ResponseQuery { data, info, proof }) => response::Query { value: data, @@ -84,9 +89,7 @@ where &self, pk: &common::PublicKey, ) -> Option> { - let pk_bytes = pk - .try_to_vec() - .expect("Serializing public key should not fail"); + let pk_bytes = pk.serialize_to_vec(); // get the current epoch let (current_epoch, _) = self.wl_storage.storage.get_current_epoch(); @@ -137,10 +140,10 @@ where #[cfg(not(feature = "abcipp"))] mod test_queries { use namada::core::ledger::storage::EPOCH_SWITCH_BLOCKS_DELAY; - use namada::ledger::eth_bridge::{EthBridgeQueries, SendValsetUpd}; use namada::ledger::pos::PosQueries; use namada::proof_of_stake::types::WeightedValidator; use namada::types::storage::Epoch; + use namada_sdk::eth_bridge::{EthBridgeQueries, SendValsetUpd}; use super::*; use crate::facade::tendermint_proto::abci::VoteInfo; diff --git a/apps/src/lib/node/ledger/shell/testing/client.rs b/apps/src/lib/node/ledger/shell/testing/client.rs index 9ebc825f54..790587a549 100644 --- a/apps/src/lib/node/ledger/shell/testing/client.rs +++ b/apps/src/lib/node/ledger/shell/testing/client.rs @@ -1,9 +1,7 @@ -use std::ops::ControlFlow; - use clap::Command as App; use eyre::Report; -use namada::types::control_flow::Halt; use namada::types::io::Io; +use namada_sdk::error::Error as SdkError; use tendermint_config::net::Address as TendermintAddress; use super::node::MockNode; @@ -47,9 +45,10 @@ pub fn run( NamadaClient::WithoutContext(sub_cmd, global) } }; - rt.block_on(CliApi::::handle_client_command( + rt.block_on(CliApi::handle_client_command( Some(node), cmd, + &TestingIo, )) } Bin::Wallet => { @@ -60,7 +59,7 @@ pub fn run( let cmd = cmds::NamadaWallet::parse(&matches) .expect("Could not parse wallet command"); - CliApi::::handle_wallet_command(cmd, ctx) + CliApi::handle_wallet_command(cmd, ctx, &TestingIo) } Bin::Relayer => { args.insert(0, "relayer"); @@ -82,9 +81,10 @@ pub fn run( NamadaRelayer::ValidatorSet(sub_cmd) } }; - rt.block_on(CliApi::::handle_relayer_command( + rt.block_on(CliApi::handle_relayer_command( Some(node), cmd, + &TestingIo, )) } } @@ -96,7 +96,10 @@ impl<'a> CliClient for &'a MockNode { unreachable!("MockNode should always be instantiated at test start.") } - async fn wait_until_node_is_synced(&self) -> Halt<()> { - ControlFlow::Continue(()) + async fn wait_until_node_is_synced( + &self, + _io: &impl Io, + ) -> Result<(), SdkError> { + Ok(()) } } diff --git a/apps/src/lib/node/ledger/shell/testing/node.rs b/apps/src/lib/node/ledger/shell/testing/node.rs index 034ac80845..8e0787fcfb 100644 --- a/apps/src/lib/node/ledger/shell/testing/node.rs +++ b/apps/src/lib/node/ledger/shell/testing/node.rs @@ -1,11 +1,17 @@ +use std::future::poll_fn; use std::mem::ManuallyDrop; use std::path::PathBuf; use std::str::FromStr; use std::sync::{Arc, Mutex}; +use std::task::Poll; use color_eyre::eyre::{Report, Result}; use data_encoding::HEXUPPER; +use itertools::Either; use lazy_static::lazy_static; +use namada::core::types::ethereum_structs; +use namada::eth_bridge::oracle::config::Config as OracleConfig; +use namada::ledger::dry_run_tx; use namada::ledger::events::log::dumb_queries; use namada::ledger::queries::{ EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, @@ -19,17 +25,19 @@ use namada::proof_of_stake::{ read_consensus_validator_set_addresses_with_stake, validator_consensus_key_handle, }; -use namada::sdk::queries::Client; use namada::tendermint_proto::abci::VoteInfo; use namada::tendermint_rpc::endpoint::abci_info; use namada::tendermint_rpc::SimpleRequest; +use namada::types::control_flow::time::Duration; +use namada::types::ethereum_events::EthereumEvent; use namada::types::hash::Hash; use namada::types::key::tm_consensus_key_raw_hash; use namada::types::storage::{BlockHash, BlockHeight, Epoch, Header}; use namada::types::time::DateTimeUtc; +use namada_sdk::queries::Client; use num_traits::cast::FromPrimitive; use regex::Regex; -use tokio::sync::mpsc::UnboundedReceiver; +use tokio::sync::mpsc; use crate::facade::tendermint_proto::abci::response_process_proposal::ProposalStatus; use crate::facade::tendermint_proto::abci::{ @@ -38,14 +46,193 @@ use crate::facade::tendermint_proto::abci::{ use crate::facade::tendermint_rpc::endpoint::abci_info::AbciInfo; use crate::facade::tendermint_rpc::error::Error as RpcError; use crate::facade::{tendermint, tendermint_rpc}; +use crate::node::ledger::ethereum_oracle::test_tools::mock_web3_client::{ + TestOracle, Web3Client, Web3Controller, +}; +use crate::node::ledger::ethereum_oracle::{ + control, last_processed_block, try_process_eth_events, +}; use crate::node::ledger::shell::testing::utils::TestDir; -use crate::node::ledger::shell::{ErrorCodes, Shell}; +use crate::node::ledger::shell::{ErrorCodes, EthereumOracleChannels, Shell}; use crate::node::ledger::shims::abcipp_shim_types::shim::request::{ FinalizeBlock, ProcessedTx, }; use crate::node::ledger::shims::abcipp_shim_types::shim::response::TxResult; use crate::node::ledger::storage; +/// Mock Ethereum oracle used for testing purposes. +struct MockEthOracle { + /// The inner oracle. + oracle: TestOracle, + /// The inner oracle's configuration. + config: OracleConfig, + /// The inner oracle's next block to process. + next_block_to_process: tokio::sync::RwLock, +} + +impl MockEthOracle { + /// Updates the state of the Ethereum oracle. + /// + /// This includes sending any confirmed Ethereum events to + /// the shell and updating the height of the next Ethereum + /// block to process. Upon a successfully processed block, + /// this functions returns `true`. + async fn drive(&self) -> bool { + try_process_eth_events( + &self.oracle, + &self.config, + &*self.next_block_to_process.read().await, + ) + .await + .process_new_block() + } +} + +/// Services mocking the operation of the ledger's various async tasks. +pub struct MockServices { + /// Receives transactions that are supposed to be broadcasted + /// to the network. + tx_receiver: tokio::sync::Mutex>>, + /// Mock Ethereum oracle, that processes blocks from Ethereum + /// in order to find events emitted by a transaction to vote on. + ethereum_oracle: MockEthOracle, +} + +/// Actions to be performed by the mock node, as a result +/// of driving [`MockServices`]. +pub enum MockServiceAction { + /// The ledger should broadcast new transactions. + BroadcastTxs(Vec>), + /// Progress to the next Ethereum block to process. + IncrementEthHeight, +} + +impl MockServices { + /// Drive the internal state machine of the mock node's services. + async fn drive(&self) -> Vec { + let mut actions = vec![]; + + // process new eth events + // NOTE: this may result in a deadlock, if the events + // sent to the shell exceed the capacity of the oracle's + // events channel! + if self.ethereum_oracle.drive().await { + actions.push(MockServiceAction::IncrementEthHeight); + } + + // receive txs from the broadcaster + let txs = { + let mut txs = vec![]; + let mut tx_receiver = self.tx_receiver.lock().await; + + while let Some(tx) = poll_fn(|cx| match tx_receiver.poll_recv(cx) { + Poll::Pending => Poll::Ready(None), + poll => poll, + }) + .await + { + txs.push(tx); + } + + txs + }; + if !txs.is_empty() { + actions.push(MockServiceAction::BroadcastTxs(txs)); + } + + actions + } +} + +/// Controller of various mock node services. +pub struct MockServicesController { + /// Ethereum oracle controller. + pub eth_oracle: Web3Controller, + /// Handler to the Ethereum oracle sender channel. + /// + /// Bypasses the Ethereum oracle service and sends + /// events directly to the [`Shell`]. + pub eth_events: mpsc::Sender, + /// Transaction broadcaster handle. + pub tx_broadcaster: mpsc::UnboundedSender>, +} + +/// Service handlers to be passed to a [`Shell`], when building +/// a mock node. +pub struct MockServiceShellHandlers { + /// Transaction broadcaster handle. + pub tx_broadcaster: mpsc::UnboundedSender>, + /// Ethereum oracle channel handlers. + pub eth_oracle_channels: Option, +} + +/// Mock services data returned by [`mock_services`]. +pub struct MockServicesPackage { + /// Whether to automatically drive mock services or not. + pub auto_drive_services: bool, + /// Mock services stored by the [`MockNode`]. + pub services: MockServices, + /// Handlers to mock services stored by the [`Shell`]. + pub shell_handlers: MockServiceShellHandlers, + /// Handler to the mock services controller. + pub controller: MockServicesController, +} + +/// Mock services config. +pub struct MockServicesCfg { + /// Whether to automatically drive mock services or not. + pub auto_drive_services: bool, + /// Whether to enable the Ethereum oracle or not. + pub enable_eth_oracle: bool, +} + +/// Instantiate mock services for a node. +pub fn mock_services(cfg: MockServicesCfg) -> MockServicesPackage { + let (_, eth_client) = Web3Client::setup(); + let (eth_sender, eth_receiver) = mpsc::channel(1000); + let (last_processed_block_sender, last_processed_block_receiver) = + last_processed_block::channel(); + let (control_sender, control_receiver) = control::channel(); + let eth_oracle_controller = eth_client.controller(); + let oracle = TestOracle::new( + Either::Left(eth_client), + eth_sender.clone(), + last_processed_block_sender, + Duration::from_millis(5), + Duration::from_secs(30), + control_receiver, + ); + let eth_oracle_channels = EthereumOracleChannels::new( + eth_receiver, + control_sender, + last_processed_block_receiver, + ); + let (tx_broadcaster, tx_receiver) = mpsc::unbounded_channel(); + let ethereum_oracle = MockEthOracle { + oracle, + config: Default::default(), + next_block_to_process: tokio::sync::RwLock::new(Default::default()), + }; + MockServicesPackage { + auto_drive_services: cfg.auto_drive_services, + services: MockServices { + ethereum_oracle, + tx_receiver: tokio::sync::Mutex::new(tx_receiver), + }, + shell_handlers: MockServiceShellHandlers { + tx_broadcaster: tx_broadcaster.clone(), + eth_oracle_channels: cfg + .enable_eth_oracle + .then_some(eth_oracle_channels), + }, + controller: MockServicesController { + eth_oracle: eth_oracle_controller, + eth_events: eth_sender, + tx_broadcaster, + }, + } +} + /// Status of tx #[derive(Debug, Clone, PartialEq, Eq)] pub enum NodeResults { @@ -61,8 +248,9 @@ pub struct MockNode { pub shell: Arc>>, pub test_dir: ManuallyDrop, pub keep_temp: bool, - pub _broadcast_recv: UnboundedReceiver>, pub results: Arc>>, + pub services: Arc, + pub auto_drive_services: bool, } impl Drop for MockNode { @@ -82,6 +270,34 @@ impl Drop for MockNode { } impl MockNode { + pub async fn handle_service_action(&self, action: MockServiceAction) { + match action { + MockServiceAction::BroadcastTxs(txs) => { + self.submit_txs(txs); + } + MockServiceAction::IncrementEthHeight => { + *self + .services + .ethereum_oracle + .next_block_to_process + .write() + .await += 1.into(); + } + } + } + + pub async fn drive_mock_services(&self) { + for action in self.services.drive().await { + self.handle_service_action(action).await; + } + } + + async fn drive_mock_services_bg(&self) { + if self.auto_drive_services { + self.drive_mock_services().await; + } + } + pub fn genesis_dir(&self) -> PathBuf { self.test_dir .path() @@ -179,20 +395,43 @@ impl MockNode { pub fn finalize_and_commit(&self) { let (proposer_address, votes) = self.prepare_request(); - let mut req = FinalizeBlock { - hash: BlockHash([0u8; 32]), - header: Header { - hash: Hash([0; 32]), - time: DateTimeUtc::now(), - next_validators_hash: Hash([0; 32]), - }, - byzantine_validators: vec![], - txs: vec![], - proposer_address, - votes, - }; - req.header.time = DateTimeUtc::now(); let mut locked = self.shell.lock().unwrap(); + + // build finalize block abci request + let req = { + // check if we have protocol txs to be included + // in the finalize block request + let txs = { + let req = RequestPrepareProposal { + proposer_address: proposer_address.clone(), + ..Default::default() + }; + let txs = locked.prepare_proposal(req).txs; + + txs.into_iter() + .map(|tx| ProcessedTx { + tx, + result: TxResult { + code: 0, + info: String::new(), + }, + }) + .collect() + }; + FinalizeBlock { + hash: BlockHash([0u8; 32]), + header: Header { + hash: Hash([0; 32]), + time: DateTimeUtc::now(), + next_validators_hash: Hash([0; 32]), + }, + byzantine_validators: vec![], + txs, + proposer_address, + votes, + } + }; + locked.finalize_block(req).expect("Test failed"); locked.commit(); } @@ -213,19 +452,19 @@ impl MockNode { /// Send a tx through Process Proposal and Finalize Block /// and register the results. - fn submit_tx(&self, tx_bytes: Vec) { - // The block space allocator disallows txs in certain blocks. + fn submit_txs(&self, txs: Vec>) { + // The block space allocator disallows encrypted txs in certain blocks. // Advance to block height that allows txs. self.advance_to_allowed_block(); let (proposer_address, votes) = self.prepare_request(); let req = RequestProcessProposal { - txs: vec![tx_bytes.clone()], + txs: txs.clone(), proposer_address: proposer_address.clone(), ..Default::default() }; let mut locked = self.shell.lock().unwrap(); - let mut result = locked.process_proposal(req); + let result = locked.process_proposal(req); let mut errors: Vec<_> = result .tx_results @@ -252,10 +491,11 @@ impl MockNode { next_validators_hash: Hash([0; 32]), }, byzantine_validators: vec![], - txs: vec![ProcessedTx { - tx: tx_bytes, - result: result.tx_results.remove(0), - }], + txs: txs + .into_iter() + .zip(result.tx_results.into_iter()) + .map(|(tx, result)| ProcessedTx { tx, result }) + .collect(), proposer_address, votes, }; @@ -322,6 +562,7 @@ impl<'a> Client for &'a MockNode { height: Option, prove: bool, ) -> std::result::Result { + self.drive_mock_services_bg().await; let rpc = RPC; let data = data.unwrap_or_default(); let latest_height = { @@ -352,7 +593,12 @@ impl<'a> Client for &'a MockNode { tx_wasm_cache: borrowed.tx_wasm_cache.read_only(), storage_read_past_height_limit: None, }; - rpc.handle(ctx, &request).map_err(Report::new) + if request.path == "/shell/dry_run_tx" { + dry_run_tx(ctx, &request) + } else { + rpc.handle(ctx, &request) + } + .map_err(Report::new) } async fn perform( @@ -367,6 +613,7 @@ impl<'a> Client for &'a MockNode { /// `/abci_info`: get information about the ABCI application. async fn abci_info(&self) -> Result { + self.drive_mock_services_bg().await; let locked = self.shell.lock().unwrap(); Ok(AbciInfo { data: "Namada".to_string(), @@ -398,6 +645,7 @@ impl<'a> Client for &'a MockNode { tx: namada::tendermint::abci::Transaction, ) -> Result { + self.drive_mock_services_bg().await; let mut resp = tendermint_rpc::endpoint::broadcast::tx_sync::Response { code: Default::default(), data: Default::default(), @@ -405,9 +653,10 @@ impl<'a> Client for &'a MockNode { hash: tendermint::abci::transaction::Hash::new([0; 32]), }; let tx_bytes: Vec = tx.into(); - self.submit_tx(tx_bytes); + self.submit_txs(vec![tx_bytes]); if !self.success() { - resp.code = tendermint::abci::Code::Err(1337); // TODO: submit_tx should return the correct error code + message + // TODO: submit_txs should return the correct error code + message + resp.code = tendermint::abci::Code::Err(1337); return Ok(resp); } else { self.clear_results(); @@ -417,11 +666,13 @@ impl<'a> Client for &'a MockNode { proposer_address, ..Default::default() }; - let tx_bytes = { + let txs = { let locked = self.shell.lock().unwrap(); - locked.prepare_proposal(req).txs.remove(0) + locked.prepare_proposal(req).txs }; - self.submit_tx(tx_bytes); + if !txs.is_empty() { + self.submit_txs(txs); + } Ok(resp) } @@ -434,6 +685,7 @@ impl<'a> Client for &'a MockNode { _order: namada::tendermint_rpc::Order, ) -> Result { + self.drive_mock_services_bg().await; let matcher = parse_tm_query(query); let borrowed = self.shell.lock().unwrap(); // we store an index into the event log as a block @@ -503,6 +755,7 @@ impl<'a> Client for &'a MockNode { where H: Into + Send, { + self.drive_mock_services_bg().await; let height = height.into(); let encoded_event = EncodedEvent(height.value()); let locked = self.shell.lock().unwrap(); @@ -561,6 +814,7 @@ impl<'a> Client for &'a MockNode { /// Returns empty result (200 OK) on success, no response in case of an /// error. async fn health(&self) -> Result<(), RpcError> { + self.drive_mock_services_bg().await; Ok(()) } } diff --git a/apps/src/lib/node/ledger/shell/testing/utils.rs b/apps/src/lib/node/ledger/shell/testing/utils.rs index bfcb7f50ab..451e20c2df 100644 --- a/apps/src/lib/node/ledger/shell/testing/utils.rs +++ b/apps/src/lib/node/ledger/shell/testing/utils.rs @@ -74,13 +74,13 @@ pub struct TestingIo; #[async_trait::async_trait(?Send)] impl Io for TestingIo { - fn print(output: impl AsRef) { + fn print(&self, output: impl AsRef) { let mut testout = TESTOUT.lock().unwrap(); testout.append(output.as_ref().as_bytes().to_vec()); print!("{}", output.as_ref()); } - fn println(output: impl AsRef) { + fn println(&self, output: impl AsRef) { let mut testout = TESTOUT.lock().unwrap(); let mut bytes = output.as_ref().as_bytes().to_vec(); bytes.extend_from_slice("\n".as_bytes()); @@ -89,22 +89,24 @@ impl Io for TestingIo { } fn write( + &self, _: W, output: impl AsRef, ) -> std::io::Result<()> { - Self::print(output); + self.print(output); Ok(()) } fn writeln( + &self, _: W, output: impl AsRef, ) -> std::io::Result<()> { - Self::println(output); + self.println(output); Ok(()) } - fn eprintln(output: impl AsRef) { + fn eprintln(&self, output: impl AsRef) { let mut testout = TESTOUT.lock().unwrap(); let mut bytes = output.as_ref().as_bytes().to_vec(); bytes.extend_from_slice("\n".as_bytes()); @@ -112,11 +114,11 @@ impl Io for TestingIo { eprintln!("{}", output.as_ref()); } - async fn read() -> tokio::io::Result { + async fn read(&self) -> tokio::io::Result { read_aux(&*TESTIN).await } - async fn prompt(question: impl AsRef) -> String { + async fn prompt(&self, question: impl AsRef) -> String { prompt_aux(&*TESTIN, tokio::io::stdout(), question.as_ref()).await } } diff --git a/apps/src/lib/node/ledger/shell/vote_extensions.rs b/apps/src/lib/node/ledger/shell/vote_extensions.rs index 658c35a121..847a791ea0 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions.rs @@ -4,7 +4,6 @@ pub mod bridge_pool_vext; pub mod eth_events; pub mod val_set_update; -use namada::ledger::eth_bridge::{EthBridgeQueries, SendValsetUpd}; #[cfg(feature = "abcipp")] use namada::ledger::pos::PosQueries; use namada::proto::{SignableEthMessage, Signed}; @@ -15,6 +14,7 @@ use namada::types::vote_extensions::VoteExtensionDigest; use namada::types::vote_extensions::{ bridge_pool_roots, ethereum_events, validator_set_update, VoteExtension, }; +use namada_sdk::eth_bridge::{EthBridgeQueries, SendValsetUpd}; use super::*; #[cfg(feature = "abcipp")] @@ -87,7 +87,7 @@ where _req: request::ExtendVote, ) -> response::ExtendVote { response::ExtendVote { - vote_extension: self.craft_extension().try_to_vec().unwrap(), + vote_extension: self.craft_extension().serialize_to_vec(), } } diff --git a/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs b/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs index 002bd18904..f817b4cd9f 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs @@ -270,13 +270,10 @@ where mod test_bp_vote_extensions { #[cfg(feature = "abcipp")] use borsh::BorshDeserialize; - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; #[cfg(not(feature = "abcipp"))] use namada::core::ledger::eth_bridge::storage::bridge_pool::get_key_from_hash; - #[cfg(not(feature = "abcipp"))] - use namada::ledger::eth_bridge::EthBridgeQueries; use namada::ledger::pos::PosQueries; - use namada::ledger::storage_api::StorageWrite; use namada::proof_of_stake::types::{ Position as ValidatorPosition, WeightedValidator, }; @@ -297,6 +294,8 @@ mod test_bp_vote_extensions { use namada::types::vote_extensions::bridge_pool_roots; #[cfg(feature = "abcipp")] use namada::types::vote_extensions::VoteExtension; + #[cfg(not(feature = "abcipp"))] + use namada_sdk::eth_bridge::EthBridgeQueries; #[cfg(feature = "abcipp")] use tendermint_proto_abcipp::abci::response_verify_vote_extension::VerifyStatus; #[cfg(feature = "abcipp")] @@ -321,24 +320,12 @@ mod test_bp_vote_extensions { ) .expect("Test failed"); - // register Bertha's protocol key - let pk_key = protocol_pk_key(&bertha_address()); - shell - .wl_storage - .write_bytes( - &pk_key, - bertha_keypair() - .ref_to() - .try_to_vec() - .expect("Test failed."), - ) - .expect("Test failed."); - // change pipeline length to 1 let mut params = shell.wl_storage.pos_queries().get_pos_params(); - params.pipeline_len = 1; + params.owned.pipeline_len = 1; let consensus_key = gen_keypair(); + let protocol_key = bertha_keypair(); let hot_key = gen_secp256k1_keypair(); let cold_key = gen_secp256k1_keypair(); @@ -347,6 +334,7 @@ mod test_bp_vote_extensions { params: ¶ms, address: &bertha_address(), consensus_key: &consensus_key.ref_to(), + protocol_key: &protocol_key.ref_to(), eth_hot_key: &hot_key.ref_to(), eth_cold_key: &cold_key.ref_to(), current_epoch: 0.into(), @@ -481,7 +469,7 @@ mod test_bp_vote_extensions { .as_bytes() .to_vec(), height: 0, - vote_extension: vote_extension.try_to_vec().expect("Test failed"), + vote_extension: vote_extension.serialize_to_vec(), }; let res = shell.verify_vote_extension(req); assert_eq!(res.status, i32::from(VerifyStatus::Accept)); @@ -692,8 +680,7 @@ mod test_bp_vote_extensions { let address = shell.mode.get_validator_address().unwrap().clone(); shell.wl_storage.storage.block.height = 4.into(); let key = get_key_from_hash(&KeccakHash([1; 32])); - let height = - shell.wl_storage.storage.block.height.try_to_vec().unwrap(); + let height = shell.wl_storage.storage.block.height.serialize_to_vec(); shell .wl_storage .storage @@ -719,8 +706,7 @@ mod test_bp_vote_extensions { .delete(&key) .expect("Test failed"); let key = get_key_from_hash(&KeccakHash([2; 32])); - let height = - shell.wl_storage.storage.block.height.try_to_vec().unwrap(); + let height = shell.wl_storage.storage.block.height.serialize_to_vec(); shell .wl_storage .storage @@ -780,8 +766,7 @@ mod test_bp_vote_extensions { let address = shell.mode.get_validator_address().unwrap().clone(); shell.wl_storage.storage.block.height = 4.into(); let key = get_key_from_hash(&KeccakHash([1; 32])); - let height = - shell.wl_storage.storage.block.height.try_to_vec().unwrap(); + let height = shell.wl_storage.storage.block.height.serialize_to_vec(); shell .wl_storage .storage @@ -807,8 +792,7 @@ mod test_bp_vote_extensions { .delete(&key) .expect("Test failed"); let key = get_key_from_hash(&KeccakHash([2; 32])); - let height = - shell.wl_storage.storage.block.height.try_to_vec().unwrap(); + let height = shell.wl_storage.storage.block.height.serialize_to_vec(); shell .wl_storage .storage diff --git a/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs b/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs index 0dd85bfd70..8b9c140cfc 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs @@ -2,7 +2,6 @@ use std::collections::{BTreeMap, HashMap}; -use namada::ledger::eth_bridge::EthBridgeQueries; use namada::ledger::pos::PosQueries; use namada::ledger::storage::traits::StorageHasher; use namada::ledger::storage::{DBIter, DB}; @@ -15,6 +14,7 @@ use namada::types::vote_extensions::ethereum_events::{ }; #[cfg(feature = "abcipp")] use namada::types::voting_power::FractionalVotingPower; +use namada_sdk::eth_bridge::EthBridgeQueries; use super::*; use crate::node::ledger::shell::{Shell, ShellMode}; @@ -411,7 +411,7 @@ mod test_vote_extensions { #[cfg(feature = "abcipp")] use borsh::BorshDeserialize; - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use namada::core::ledger::storage_api::collections::lazy_map::{ NestedSubKey, SubKey, }; @@ -461,7 +461,7 @@ mod test_vote_extensions { shell .wl_storage .storage - .write(&bridge_pool::get_nonce_key(), nonce.try_to_vec().unwrap()) + .write(&bridge_pool::get_nonce_key(), nonce.serialize_to_vec()) .expect("Test failed"); // write nam nonce to the eth events queue @@ -642,7 +642,7 @@ mod test_vote_extensions { .as_bytes() .to_vec(), height: 1, - vote_extension: vote_extension.try_to_vec().expect("Test failed"), + vote_extension: vote_extension.serialize_to_vec(), }; let res = shell.verify_vote_extension(req); assert_eq!(res.status, i32::from(VerifyStatus::Accept)); @@ -720,8 +720,7 @@ mod test_vote_extensions { }, validator_set_update: None, } - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), }; #[cfg(feature = "abcipp")] assert_eq!( @@ -899,15 +898,14 @@ mod test_vote_extensions { }; let req = request::VerifyVoteExtension { hash: vec![], - validator_address: address.try_to_vec().expect("Test failed"), + validator_address: address.serialize_to_vec(), height: 0, vote_extension: VoteExtension { ethereum_events: Some(signed_vext), bridge_pool_root: Some(bp_root), validator_set_update: None, } - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), }; assert_eq!( @@ -952,9 +950,9 @@ mod test_vote_extensions { #[cfg(feature = "abcipp")] let req = request::VerifyVoteExtension { hash: vec![], - validator_address: address.try_to_vec().expect("Test failed"), + validator_address: address.serialize_to_vec(), height: 0, - vote_extension: vote_ext.try_to_vec().expect("Test failed"), + vote_extension: vote_ext.serialize_to_vec(), }; #[cfg(feature = "abcipp")] assert_eq!( diff --git a/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs b/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs index 03843b4717..ef1a3f9771 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs @@ -311,7 +311,6 @@ mod test_vote_extensions { use namada::core::ledger::storage_api::collections::lazy_map::{ NestedSubKey, SubKey, }; - use namada::ledger::eth_bridge::EthBridgeQueries; use namada::ledger::pos::PosQueries; use namada::proof_of_stake::types::WeightedValidator; use namada::proof_of_stake::{ @@ -337,6 +336,7 @@ mod test_vote_extensions { use namada::types::vote_extensions::validator_set_update; #[cfg(feature = "abcipp")] use namada::types::vote_extensions::VoteExtension; + use namada_sdk::eth_bridge::EthBridgeQueries; #[cfg(feature = "abcipp")] use crate::facade::tendermint_proto::abci::response_verify_vote_extension::VerifyStatus; @@ -419,8 +419,7 @@ mod test_vote_extensions { bridge_pool_root: Some(bp_root), validator_set_update, } - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ..Default::default() }; @@ -506,8 +505,7 @@ mod test_vote_extensions { bridge_pool_root: Some(bp_root), validator_set_update, } - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ..Default::default() }; assert_eq!( @@ -715,8 +713,7 @@ mod test_vote_extensions { bridge_pool_root: Some(bp_root), validator_set_update: validator_set_update.clone(), } - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ..Default::default() }; assert_eq!( diff --git a/apps/src/lib/node/ledger/storage/mod.rs b/apps/src/lib/node/ledger/storage/mod.rs index 659f561cce..43617879c8 100644 --- a/apps/src/lib/node/ledger/storage/mod.rs +++ b/apps/src/lib/node/ledger/storage/mod.rs @@ -118,6 +118,7 @@ mod tests { assert_eq!(result, None); } + #[cfg(feature = "testing")] #[test] fn test_commit_block() { let db_path = @@ -144,6 +145,7 @@ mod tests { storage.block.pred_epochs.new_epoch(BlockHeight(100)); // make wl_storage to update conversion for a new epoch let mut wl_storage = WlStorage::new(WriteLog::default(), storage); + namada::types::token::testing::init_token_storage(&mut wl_storage, 60); update_allowed_conversions(&mut wl_storage) .expect("update conversions failed"); wl_storage.commit_block().expect("commit failed"); diff --git a/apps/src/lib/node/ledger/storage/rocksdb.rs b/apps/src/lib/node/ledger/storage/rocksdb.rs index 61eb2c32e5..0ce9ba1fe1 100644 --- a/apps/src/lib/node/ledger/storage/rocksdb.rs +++ b/apps/src/lib/node/ledger/storage/rocksdb.rs @@ -12,6 +12,7 @@ //! epoch can start //! - `next_epoch_min_start_time`: minimum block time from which the next //! epoch can start +//! - `replay_protection`: hashes of the processed transactions //! - `pred`: predecessor values of the top-level keys of the same name //! - `tx_queue` //! - `next_epoch_min_start_height` @@ -32,6 +33,9 @@ //! - `epoch`: block epoch //! - `address_gen`: established address generator //! - `header`: block's header +//! - `replay_protection`: hashes of processed tx +//! - `all`: the hashes included up to the last block +//! - `last`: the hashes included in the last block use std::fs::File; use std::io::BufWriter; @@ -40,7 +44,8 @@ use std::str::FromStr; use std::sync::Mutex; use ark_serialize::Write; -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXLOWER; use namada::core::types::ethereum_structs; use namada::ledger::storage::types::PrefixIterator; @@ -73,6 +78,7 @@ const SUBSPACE_CF: &str = "subspace"; const DIFFS_CF: &str = "diffs"; const STATE_CF: &str = "state"; const BLOCK_CF: &str = "block"; +const REPLAY_PROTECTION_CF: &str = "replay_protection"; /// RocksDB handle #[derive(Debug)] @@ -160,6 +166,21 @@ pub fn open( block_cf_opts.set_block_based_table_factory(&table_opts); cfs.push(ColumnFamilyDescriptor::new(BLOCK_CF, block_cf_opts)); + // for replay protection (read/insert-intensive) + let mut replay_protection_cf_opts = Options::default(); + replay_protection_cf_opts + .set_compression_type(rocksdb::DBCompressionType::Zstd); + replay_protection_cf_opts.set_compression_options(0, 0, 0, 1024 * 1024); + replay_protection_cf_opts.set_level_compaction_dynamic_level_bytes(true); + // Prioritize minimizing read amplification + replay_protection_cf_opts + .set_compaction_style(rocksdb::DBCompactionStyle::Level); + replay_protection_cf_opts.set_block_based_table_factory(&table_opts); + cfs.push(ColumnFamilyDescriptor::new( + REPLAY_PROTECTION_CF, + replay_protection_cf_opts, + )); + rocksdb::DB::open_cf_descriptors(&db_opts, path, cfs) .map(RocksDB) .map_err(|e| Error::DBError(e.into_string())) @@ -353,6 +374,21 @@ impl RocksDB { self.dump_it(cf, None, &mut file); } + // replay protection + // Dump of replay protection keys is possible only at the last height or + // the previous one + if height == last_height { + let cf = self + .get_column_family(REPLAY_PROTECTION_CF) + .expect("Replay protection column family should exist"); + self.dump_it(cf, None, &mut file); + } else if height == last_height - 1 { + let cf = self + .get_column_family(REPLAY_PROTECTION_CF) + .expect("Replay protection column family should exist"); + self.dump_it(cf, Some("all".to_string()), &mut file); + } + println!("Done writing to {}", full_path.to_string_lossy()); } @@ -449,6 +485,11 @@ impl RocksDB { tracing::info!("Removing last block results"); batch.delete_cf(block_cf, format!("results/{}", last_block.height)); + // Delete the tx hashes included in the last block + let reprot_cf = self.get_column_family(REPLAY_PROTECTION_CF)?; + tracing::info!("Removing replay protection hashes"); + batch.delete_cf(reprot_cf, "last"); + // Execute next step in parallel let batch = Mutex::new(batch); @@ -903,11 +944,9 @@ impl DB for RocksDB { let key = prefix_key .push(&"header".to_owned()) .map_err(Error::KeyError)?; - batch.0.put_cf( - block_cf, - key.to_string(), - h.try_to_vec().expect("serialization failed"), - ); + batch + .0 + .put_cf(block_cf, key.to_string(), h.serialize_to_vec()); } } // Block hash @@ -1055,6 +1094,30 @@ impl DB for RocksDB { Ok(Some((stored_height, merkle_tree_stores))) } + fn has_replay_protection_entry( + &self, + hash: &namada::types::hash::Hash, + ) -> Result { + let replay_protection_cf = + self.get_column_family(REPLAY_PROTECTION_CF)?; + + for prefix in ["last", "all"] { + let key = Key::parse(prefix) + .map_err(Error::KeyError)? + .push(&hash.to_string()) + .map_err(Error::KeyError)?; + if self + .0 + .get_pinned_cf(replay_protection_cf, key.to_string()) + .map_err(|e| Error::DBError(e.into_string()))? + .is_some() + { + return Ok(true); + } + } + Ok(false) + } + fn read_subspace_val(&self, key: &Key) -> Result>> { let subspace_cf = self.get_column_family(SUBSPACE_CF)?; self.0 @@ -1341,6 +1404,34 @@ impl DB for RocksDB { None => Ok(()), } } + + fn write_replay_protection_entry( + &mut self, + batch: &mut Self::WriteBatch, + key: &Key, + ) -> Result<()> { + let replay_protection_cf = + self.get_column_family(REPLAY_PROTECTION_CF)?; + + batch + .0 + .put_cf(replay_protection_cf, key.to_string(), vec![]); + + Ok(()) + } + + fn delete_replay_protection_entry( + &mut self, + batch: &mut Self::WriteBatch, + key: &Key, + ) -> Result<()> { + let replay_protection_cf = + self.get_column_family(REPLAY_PROTECTION_CF)?; + + batch.0.delete_cf(replay_protection_cf, key.to_string()); + + Ok(()) + } } impl<'iter> DBIter<'iter> for RocksDB { @@ -1382,6 +1473,14 @@ impl<'iter> DBIter<'iter> for RocksDB { ) -> PersistentPrefixIterator<'iter> { iter_diffs_prefix(self, height, false) } + + fn iter_replay_protection(&'iter self) -> Self::PrefixIter { + let replay_protection_cf = self + .get_column_family(REPLAY_PROTECTION_CF) + .expect("{REPLAY_PROTECTION_CF} column family should exist"); + + iter_prefix(self, replay_protection_cf, "last".to_string(), None) + } } fn iter_subspace_prefix<'iter>( diff --git a/apps/src/lib/node/ledger/tendermint_node.rs b/apps/src/lib/node/ledger/tendermint_node.rs index 2bd5168ffa..0833f7c3a7 100644 --- a/apps/src/lib/node/ledger/tendermint_node.rs +++ b/apps/src/lib/node/ledger/tendermint_node.rs @@ -3,7 +3,7 @@ use std::path::{Path, PathBuf}; use std::process::Stdio; use std::str::FromStr; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use namada::types::chain::ChainId; use namada::types::key::*; use namada::types::storage::BlockHeight; @@ -24,7 +24,6 @@ use crate::facade::tendermint::{block, Genesis}; use crate::facade::tendermint_config::{ Error as TendermintError, TendermintConfig, }; - /// Env. var to output Tendermint log to stdout pub const ENV_VAR_TM_STDOUT: &str = "NAMADA_CMT_STDOUT"; @@ -243,19 +242,17 @@ fn validator_key_to_json( let (id_str, pk_arr, kp_arr) = match sk { common::SecretKey::Ed25519(_) => { let sk_ed: ed25519::SecretKey = sk.try_to_sk().unwrap(); - let keypair = [ - sk_ed.try_to_vec().unwrap(), - sk_ed.ref_to().try_to_vec().unwrap(), - ] - .concat(); - ("Ed25519", sk_ed.ref_to().try_to_vec().unwrap(), keypair) + let keypair = + [sk_ed.serialize_to_vec(), sk_ed.ref_to().serialize_to_vec()] + .concat(); + ("Ed25519", sk_ed.ref_to().serialize_to_vec(), keypair) } common::SecretKey::Secp256k1(_) => { let sk_sec: secp256k1::SecretKey = sk.try_to_sk().unwrap(); ( "Secp256k1", - sk_sec.ref_to().try_to_vec().unwrap(), - sk_sec.try_to_vec().unwrap(), + sk_sec.ref_to().serialize_to_vec(), + sk_sec.serialize_to_vec(), ) } }; diff --git a/apps/src/lib/wallet/cli_utils.rs b/apps/src/lib/wallet/cli_utils.rs index 72bb0acaab..9908af12bb 100644 --- a/apps/src/lib/wallet/cli_utils.rs +++ b/apps/src/lib/wallet/cli_utils.rs @@ -1,13 +1,13 @@ use std::fs::File; use std::io::{self, Write}; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use itertools::sorted; use masp_primitives::zip32::ExtendedFullViewingKey; -use namada::sdk::masp::find_valid_diversifier; -use namada::sdk::wallet::{DecryptionError, FindKeyError}; use namada::types::key::{PublicKeyHash, RefTo}; use namada::types::masp::{MaspValue, PaymentAddress}; +use namada_sdk::masp::find_valid_diversifier; +use namada_sdk::wallet::{DecryptionError, FindKeyError, GenRestoreKeyError}; use rand_core::OsRng; use crate::cli; @@ -271,6 +271,7 @@ pub fn key_and_address_restore( alias, alias_force, derivation_path, + None, encryption_password, ) .unwrap_or_else(|err| { @@ -306,21 +307,24 @@ pub fn key_and_address_gen( let mut rng = OsRng; let derivation_path_and_mnemonic_rng = derivation_path.map(|p| (p, &mut rng)); - let (alias, _key) = wallet + let (alias, _key, _mnemonic) = wallet .gen_key( scheme, alias, alias_force, + None, encryption_password, derivation_path_and_mnemonic_rng, ) - .unwrap_or_else(|err| { - eprintln!("{}", err); - cli::safe_exit(1); - }) - .unwrap_or_else(|| { - println!("No changes are persisted. Exiting."); - cli::safe_exit(0); + .unwrap_or_else(|err| match err { + GenRestoreKeyError::KeyStorageError => { + println!("No changes are persisted. Exiting."); + cli::safe_exit(0); + } + _ => { + eprintln!("{}", err); + cli::safe_exit(1); + } }); crate::wallet::save(&wallet).unwrap_or_else(|err| eprintln!("{}", err)); println!( @@ -426,9 +430,7 @@ pub fn key_export(ctx: Context, args::KeyExport { alias }: args::KeyExport) { wallet .find_key(alias.to_lowercase(), None) .map(|keypair| { - let file_data = keypair - .try_to_vec() - .expect("Encoding keypair shouldn't fail"); + let file_data = keypair.serialize_to_vec(); let file_name = format!("key_{}", alias.to_lowercase()); let mut file = File::create(&file_name).unwrap(); diff --git a/apps/src/lib/wallet/defaults.rs b/apps/src/lib/wallet/defaults.rs index 00b0f49d26..82a9524daa 100644 --- a/apps/src/lib/wallet/defaults.rs +++ b/apps/src/lib/wallet/defaults.rs @@ -8,10 +8,10 @@ pub use dev::{ validator_keys, }; use namada::core::ledger::eth_bridge::storage::bridge_pool::BRIDGE_POOL_ADDRESS; -use namada::ledger::{eth_bridge, governance, pgf, pos}; -use namada::sdk::wallet::alias::Alias; +use namada::ledger::{governance, pgf, pos}; use namada::types::address::Address; use namada::types::key::*; +use namada_sdk::wallet::alias::Alias; use crate::config::genesis::genesis_config::GenesisConfig; @@ -22,7 +22,7 @@ pub fn addresses_from_genesis(genesis: GenesisConfig) -> Vec<(Alias, Address)> { ("pos".into(), pos::ADDRESS), ("pos_slash_pool".into(), pos::SLASH_POOL_ADDRESS), ("governance".into(), governance::ADDRESS), - ("eth_bridge".into(), eth_bridge::ADDRESS), + ("eth_bridge".into(), namada_sdk::eth_bridge::ADDRESS), ("bridge_pool".into(), BRIDGE_POOL_ADDRESS), ("pgf".into(), pgf::ADDRESS), ]; @@ -78,12 +78,12 @@ mod dev { use borsh::BorshDeserialize; use namada::ledger::{governance, pgf, pos}; - use namada::sdk::wallet::alias::Alias; use namada::types::address::{ apfel, btc, dot, eth, kartoffel, nam, schnitzel, Address, }; use namada::types::key::dkg_session_keys::DkgKeypair; use namada::types::key::*; + use namada_sdk::wallet::alias::Alias; /// Generate a new protocol signing keypair, eth hot key and DKG session /// keypair diff --git a/apps/src/lib/wallet/mod.rs b/apps/src/lib/wallet/mod.rs index f6611ebe18..18818daef5 100644 --- a/apps/src/lib/wallet/mod.rs +++ b/apps/src/lib/wallet/mod.rs @@ -9,14 +9,16 @@ use std::str::FromStr; use std::{env, fs}; use namada::bip39::{Language, Mnemonic}; -pub use namada::sdk::wallet::alias::Alias; -use namada::sdk::wallet::{ - AddressVpType, ConfirmationResponse, FindKeyError, GenRestoreKeyError, - Wallet, WalletUtils, -}; -pub use namada::sdk::wallet::{ValidatorData, ValidatorKeys}; use namada::types::address::Address; use namada::types::key::*; +pub use namada_sdk::wallet::alias::Alias; +use namada_sdk::wallet::fs::FsWalletStorage; +use namada_sdk::wallet::store::Store; +use namada_sdk::wallet::{ + AddressVpType, ConfirmationResponse, FindKeyError, GenRestoreKeyError, + Wallet, WalletIo, +}; +pub use namada_sdk::wallet::{ValidatorData, ValidatorKeys}; use rand_core::OsRng; pub use store::wallet_file; use zeroize::Zeroizing; @@ -24,32 +26,28 @@ use zeroize::Zeroizing; use crate::cli; use crate::config::genesis::genesis_config::GenesisConfig; -#[derive(Debug)] -pub struct CliWalletUtils; +#[derive(Debug, Clone)] +pub struct CliWalletUtils { + store_dir: PathBuf, +} -impl WalletUtils for CliWalletUtils { - type Rng = OsRng; - type Storage = PathBuf; +impl CliWalletUtils { + /// Initialize a wallet at the given directory + pub fn new(store_dir: PathBuf) -> Wallet { + Wallet::new(Self { store_dir }, Store::default()) + } +} - fn read_decryption_password() -> Zeroizing { - match env::var("NAMADA_WALLET_PASSWORD_FILE") { - Ok(path) => Zeroizing::new( - fs::read_to_string(path) - .expect("Something went wrong reading the file"), - ), - Err(_) => match env::var("NAMADA_WALLET_PASSWORD") { - Ok(password) => Zeroizing::new(password), - Err(_) => { - let prompt = "Enter your decryption password: "; - rpassword::read_password_from_tty(Some(prompt)) - .map(Zeroizing::new) - .expect("Failed reading password from tty.") - } - }, - } +impl FsWalletStorage for CliWalletUtils { + fn store_dir(&self) -> &PathBuf { + &self.store_dir } +} + +impl WalletIo for CliWalletUtils { + type Rng = OsRng; - fn read_encryption_password() -> Zeroizing { + fn read_password(confirm: bool) -> Zeroizing { let pwd = match env::var("NAMADA_WALLET_PASSWORD_FILE") { Ok(path) => Zeroizing::new( fs::read_to_string(path) @@ -57,7 +55,7 @@ impl WalletUtils for CliWalletUtils { ), Err(_) => match env::var("NAMADA_WALLET_PASSWORD") { Ok(password) => Zeroizing::new(password), - Err(_) => { + Err(_) if confirm => { let prompt = "Enter your encryption password: "; read_and_confirm_passphrase_tty(prompt).unwrap_or_else( |e| { @@ -69,9 +67,15 @@ impl WalletUtils for CliWalletUtils { }, ) } + Err(_) => { + let prompt = "Enter your decryption password: "; + rpassword::read_password_from_tty(Some(prompt)) + .map(Zeroizing::new) + .expect("Failed reading password from tty.") + } }, }; - if pwd.as_str().is_empty() { + if confirm && pwd.as_str().is_empty() { eprintln!("Password cannot be empty"); eprintln!("Action cancelled, no changes persisted."); cli::safe_exit(1) @@ -190,7 +194,7 @@ pub fn read_and_confirm_passphrase_tty( /// for signing protocol txs and for the DKG (which will also be stored) /// A protocol keypair may be optionally provided, indicating that /// we should re-use a keypair already in the wallet -pub fn gen_validator_keys( +pub fn gen_validator_keys( wallet: &mut Wallet, eth_bridge_pk: Option, protocol_pk: Option, @@ -221,7 +225,7 @@ fn find_secret_key( ) -> Result, FindKeyError> where F: Fn(&ValidatorData) -> common::SecretKey, - U: WalletUtils, + U: WalletIo, { maybe_pk .map(|pk| { @@ -254,19 +258,19 @@ pub fn add_genesis_addresses( /// Save the wallet store to a file. pub fn save(wallet: &Wallet) -> std::io::Result<()> { - self::store::save(wallet.store(), wallet.store_dir()) + wallet + .save() + .map_err(|err| std::io::Error::new(std::io::ErrorKind::Other, err)) } /// Load a wallet from the store file. pub fn load(store_dir: &Path) -> Option> { - let store = self::store::load(store_dir).unwrap_or_else(|err| { + let mut wallet = CliWalletUtils::new(store_dir.to_path_buf()); + wallet.load().unwrap_or_else(|err| { eprintln!("Unable to load the wallet: {}", err); cli::safe_exit(1) }); - Some(Wallet::::new( - store_dir.to_path_buf(), - store, - )) + Some(wallet) } /// Load a wallet from the store file or create a new wallet without any @@ -276,7 +280,9 @@ pub fn load_or_new(store_dir: &Path) -> Wallet { eprintln!("Unable to load the wallet: {}", err); cli::safe_exit(1) }); - Wallet::::new(store_dir.to_path_buf(), store) + let mut wallet = CliWalletUtils::new(store_dir.to_path_buf()); + *wallet.store_mut() = store; + wallet } /// Load a wallet from the store file or create a new one with the default @@ -290,7 +296,9 @@ pub fn load_or_new_from_genesis( eprintln!("Unable to load the wallet: {}", err); cli::safe_exit(1) }); - Wallet::::new(store_dir.to_path_buf(), store) + let mut wallet = CliWalletUtils::new(store_dir.to_path_buf()); + *wallet.store_mut() = store; + wallet } /// Read the password for encryption from the file/env/stdin, with @@ -302,14 +310,14 @@ pub fn read_and_confirm_encryption_password( println!("Warning: The keypair will NOT be encrypted."); None } else { - Some(CliWalletUtils::read_encryption_password()) + Some(CliWalletUtils::read_password(true)) } } #[cfg(test)] mod tests { use namada::bip39::MnemonicType; - use namada::sdk::wallet::WalletUtils; + use namada_sdk::wallet::WalletIo; use rand_core; use super::CliWalletUtils; diff --git a/apps/src/lib/wallet/pre_genesis.rs b/apps/src/lib/wallet/pre_genesis.rs index 21a80267f1..da12c2dcce 100644 --- a/apps/src/lib/wallet/pre_genesis.rs +++ b/apps/src/lib/wallet/pre_genesis.rs @@ -3,11 +3,11 @@ use std::path::{Path, PathBuf}; use ark_serialize::{Read, Write}; use fd_lock::RwLock; -use namada::sdk::wallet::pre_genesis::{ +use namada::types::key::SchemeType; +use namada_sdk::wallet::pre_genesis::{ ReadError, ValidatorStore, ValidatorWallet, }; -use namada::sdk::wallet::{gen_key_to_store, WalletUtils}; -use namada::types::key::SchemeType; +use namada_sdk::wallet::{gen_key_to_store, WalletIo}; use zeroize::Zeroizing; use crate::wallet::store::gen_validator_keys; @@ -75,7 +75,7 @@ pub fn load(store_dir: &Path) -> Result { || store.consensus_key.is_encrypted() || store.account_key.is_encrypted() { - Some(CliWalletUtils::read_decryption_password()) + Some(CliWalletUtils::read_password(false)) } else { None }; diff --git a/apps/src/lib/wallet/store.rs b/apps/src/lib/wallet/store.rs index 0f2aa86b7b..62eae8ac0e 100644 --- a/apps/src/lib/wallet/store.rs +++ b/apps/src/lib/wallet/store.rs @@ -1,37 +1,22 @@ -use std::fs; -use std::io::prelude::*; -use std::io::Write; use std::path::{Path, PathBuf}; #[cfg(not(feature = "dev"))] use std::str::FromStr; use ark_std::rand::prelude::*; use ark_std::rand::SeedableRng; -use fd_lock::RwLock; -#[cfg(not(feature = "dev"))] -use namada::sdk::wallet::store::AddressVpType; -#[cfg(feature = "dev")] -use namada::sdk::wallet::StoredKeypair; -use namada::sdk::wallet::{gen_sk_rng, Store, ValidatorKeys}; #[cfg(not(feature = "dev"))] use namada::types::address::Address; use namada::types::key::*; use namada::types::transaction::EllipticCurve; -use thiserror::Error; +#[cfg(not(feature = "dev"))] +use namada_sdk::wallet::store::AddressVpType; +#[cfg(feature = "dev")] +use namada_sdk::wallet::StoredKeypair; +use namada_sdk::wallet::{gen_sk_rng, LoadStoreError, Store, ValidatorKeys}; use crate::config::genesis::genesis_config::GenesisConfig; use crate::wallet::CliWalletUtils; -#[derive(Error, Debug)] -pub enum LoadStoreError { - #[error("Failed decoding the wallet store: {0}")] - Decode(toml::de::Error), - #[error("Failed to read the wallet store from {0}: {1}")] - ReadWallet(String, String), - #[error("Failed to write the wallet store: {0}")] - StoreNewWallet(String), -} - /// Wallet file name const FILE_NAME: &str = "wallet.toml"; @@ -40,28 +25,12 @@ pub fn wallet_file(store_dir: impl AsRef) -> PathBuf { store_dir.as_ref().join(FILE_NAME) } -/// Save the wallet store to a file. -pub fn save(store: &Store, store_dir: &Path) -> std::io::Result<()> { - let data = store.encode(); - let wallet_path = wallet_file(store_dir); - // Make sure the dir exists - let wallet_dir = wallet_path.parent().unwrap(); - fs::create_dir_all(wallet_dir)?; - // Write the file - let mut options = fs::OpenOptions::new(); - options.create(true).write(true).truncate(true); - let mut lock = RwLock::new(options.open(wallet_path)?); - let mut guard = lock.write()?; - guard.write_all(&data) -} - /// Load the store file or create a new one without any keys or addresses. pub fn load_or_new(store_dir: &Path) -> Result { load(store_dir).or_else(|_| { - let store = Store::default(); - save(&store, store_dir) - .map_err(|err| LoadStoreError::StoreNewWallet(err.to_string()))?; - Ok(store) + let wallet = CliWalletUtils::new(store_dir.to_path_buf()); + wallet.save()?; + Ok(wallet.into()) }) } @@ -80,37 +49,18 @@ pub fn load_or_new_from_genesis( let _ = genesis_cfg; new() }; - save(&store, store_dir) - .map_err(|err| LoadStoreError::StoreNewWallet(err.to_string()))?; - Ok(store) + let mut wallet = CliWalletUtils::new(store_dir.to_path_buf()); + *wallet.store_mut() = store; + wallet.save()?; + Ok(wallet.into()) }) } /// Attempt to load the store file. pub fn load(store_dir: &Path) -> Result { - let wallet_file = wallet_file(store_dir); - let mut options = fs::OpenOptions::new(); - options.read(true).write(false); - let lock = RwLock::new(options.open(&wallet_file).map_err(|err| { - LoadStoreError::ReadWallet( - wallet_file.to_string_lossy().into_owned(), - err.to_string(), - ) - })?); - let guard = lock.read().map_err(|err| { - LoadStoreError::ReadWallet( - wallet_file.to_string_lossy().into_owned(), - err.to_string(), - ) - })?; - let mut store = Vec::::new(); - (&*guard).read_to_end(&mut store).map_err(|err| { - LoadStoreError::ReadWallet( - store_dir.to_str().unwrap().parse().unwrap(), - err.to_string(), - ) - })?; - Store::decode(store).map_err(LoadStoreError::Decode) + let mut wallet = CliWalletUtils::new(store_dir.to_path_buf()); + wallet.load()?; + Ok(wallet.into()) } /// Add addresses from a genesis configuration. diff --git a/benches/Cargo.toml b/benches/Cargo.toml index 91a5d45333..0e72e98a4b 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -12,10 +12,6 @@ readme.workspace = true repository.workspace = true version.workspace = true -[lib] -name = "namada_benches" -path = "lib.rs" - [[bench]] name = "whitelisted_txs" harness = false @@ -42,21 +38,14 @@ harness = false path = "host_env.rs" [dependencies] -async-trait.workspace = true -borsh.workspace = true -ferveo-common.workspace = true -masp_primitives.workspace = true -masp_proofs.workspace = true + +[dev-dependencies] namada = { path = "../shared", features = ["testing"] } namada_apps = { path = "../apps", features = ["testing"] } -namada_test_utils = { path = "../test_utils" } -prost.workspace = true -rand.workspace = true +borsh.workspace = true +borsh-ext.workspace = true +criterion = { version = "0.5", features = ["html_reports"] } +ferveo-common.workspace = true rand_core.workspace = true +rand.workspace = true sha2.workspace = true -tokio.workspace = true -tempfile.workspace = true -tracing-subscriber = { workspace = true, features = ["std"]} - -[dev-dependencies] -criterion = { version = "0.5", features = ["html_reports"] } diff --git a/benches/README.md b/benches/README.md index 02b0d52a91..86978eb6f7 100644 --- a/benches/README.md +++ b/benches/README.md @@ -2,6 +2,16 @@ The benchmarks are built with [criterion.rs](https://bheisler.github.io/criterion.rs/book). +Measurements are taken on the elapsed wall-time. + +The benchmarks only focus on sucessfull transactions and vps: in case of failure, the bench function shall panic to avoid timing incomplete execution paths. + +In addition, this crate also contains benchmarks for `WrapperTx` (`namada::core::types::transaction::wrapper::WrapperTx`) validation and `host_env` (`namada::vm::host_env`) exposed functions that define the gas constants of `gas` (`namada::core::ledger::gas`). + +For more realistic results these benchmarks should be run on all the combination of supported OS/architecture. + +## Testing & running + To enable tracing logs, run with e.g. `RUST_LOG=debug`. To ensure that the benches can run successfully without performing measurement, you can run `make test-benches` from the workspace run. diff --git a/benches/host_env.rs b/benches/host_env.rs index 6f385b93bc..f2e2c3ee2a 100644 --- a/benches/host_env.rs +++ b/benches/host_env.rs @@ -1,6 +1,6 @@ use std::collections::HashSet; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use criterion::{criterion_group, criterion_main, Criterion}; use namada::core::types::account::AccountPublicKeysMap; use namada::core::types::address; @@ -19,7 +19,7 @@ fn tx_section_signature_validation(c: &mut Criterion) { key: None, shielded: None, }; - let section = Section::Data(Data::new(transfer_data.try_to_vec().unwrap())); + let section = Section::Data(Data::new(transfer_data.serialize_to_vec())); let section_hash = section.get_hash(); let pkim = AccountPublicKeysMap::from_iter([ @@ -35,7 +35,7 @@ fn tx_section_signature_validation(c: &mut Criterion) { c.bench_function("tx_section_signature_validation", |b| { b.iter(|| { multisig - .verify_signature(&mut HashSet::new(), &pkim, &None) + .verify_signature(&mut HashSet::new(), &pkim, &None, &mut None) .unwrap() }) }); diff --git a/benches/native_vps.rs b/benches/native_vps.rs index 77373080c4..6a5d8e52ae 100644 --- a/benches/native_vps.rs +++ b/benches/native_vps.rs @@ -23,7 +23,6 @@ use namada::ledger::gas::{TxGasMeter, VpGasMeter}; use namada::ledger::governance::GovernanceVp; use namada::ledger::native_vp::ibc::Ibc; use namada::ledger::native_vp::multitoken::MultitokenVp; -use namada::ledger::native_vp::replay_protection::ReplayProtectionVp; use namada::ledger::native_vp::{Ctx, NativeVp}; use namada::ledger::storage_api::StorageRead; use namada::proto::{Code, Section}; @@ -32,54 +31,12 @@ use namada::types::storage::{Epoch, TxIndex}; use namada::types::transaction::governance::{ InitProposalData, VoteProposalData, }; -use namada_apps::wallet::defaults; -use namada_benches::{ +use namada_apps::bench_utils::{ generate_foreign_key_tx, generate_ibc_transfer_tx, generate_ibc_tx, generate_tx, BenchShell, TX_IBC_WASM, TX_INIT_PROPOSAL_WASM, TX_TRANSFER_WASM, TX_VOTE_PROPOSAL_WASM, }; - -fn replay_protection(c: &mut Criterion) { - // Write a random key under the replay protection subspace - let tx = generate_foreign_key_tx(&defaults::albert_keypair()); - let mut shell = BenchShell::default(); - - shell.execute_tx(&tx); - let (verifiers, keys_changed) = shell - .wl_storage - .write_log - .verifiers_and_changed_keys(&BTreeSet::default()); - - let replay_protection = ReplayProtectionVp { - ctx: Ctx::new( - &Address::Internal(InternalAddress::ReplayProtection), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, - &tx, - &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), - &keys_changed, - &verifiers, - shell.vp_wasm_cache.clone(), - ), - }; - - c.bench_function("vp_replay_protection", |b| { - b.iter(|| { - // NOTE: thiv VP will always fail when triggered so don't assert - // here - replay_protection - .validate_tx( - &tx, - replay_protection.ctx.keys_changed, - replay_protection.ctx.verifiers, - ) - .unwrap() - }) - }); -} +use namada_apps::wallet::defaults; fn governance(c: &mut Criterion) { let mut group = c.benchmark_group("vp_governance"); @@ -476,7 +433,6 @@ fn vp_multitoken(c: &mut Criterion) { criterion_group!( native_vps, - replay_protection, governance, // slash_fund, ibc, diff --git a/benches/process_wrapper.rs b/benches/process_wrapper.rs index ce466b1058..d6fbe9b483 100644 --- a/benches/process_wrapper.rs +++ b/benches/process_wrapper.rs @@ -7,9 +7,9 @@ use namada::types::key::RefTo; use namada::types::storage::BlockHeight; use namada::types::time::DateTimeUtc; use namada::types::transaction::{Fee, WrapperTx}; +use namada_apps::bench_utils::{generate_tx, BenchShell, TX_TRANSFER_WASM}; use namada_apps::node::ledger::shell::process_proposal::ValidationMeta; use namada_apps::wallet::defaults; -use namada_benches::{generate_tx, BenchShell, TX_TRANSFER_WASM}; fn process_tx(c: &mut Criterion) { let mut shell = BenchShell::default(); diff --git a/benches/txs.rs b/benches/txs.rs index a1373c7931..ac24b6ab4f 100644 --- a/benches/txs.rs +++ b/benches/txs.rs @@ -20,24 +20,24 @@ use namada::types::storage::Key; use namada::types::transaction::governance::{ InitProposalData, VoteProposalData, }; -use namada::types::transaction::pos::{Bond, CommissionChange, Withdraw}; +use namada::types::transaction::pos::{ + Bond, CommissionChange, Redelegation, Withdraw, +}; use namada::types::transaction::EllipticCurve; -use namada_apps::wallet::defaults; -use namada_benches::{ +use namada_apps::bench_utils::{ generate_ibc_transfer_tx, generate_tx, BenchShell, BenchShieldedCtx, ALBERT_PAYMENT_ADDRESS, ALBERT_SPENDING_KEY, BERTHA_PAYMENT_ADDRESS, - TX_BOND_WASM, TX_CHANGE_VALIDATOR_COMMISSION_WASM, TX_INIT_PROPOSAL_WASM, + TX_BOND_WASM, TX_CHANGE_VALIDATOR_COMMISSION_WASM, TX_INIT_ACCOUNT_WASM, + TX_INIT_PROPOSAL_WASM, TX_INIT_VALIDATOR_WASM, TX_REDELEGATE_WASM, TX_REVEAL_PK_WASM, TX_UNBOND_WASM, TX_UNJAIL_VALIDATOR_WASM, - TX_UPDATE_ACCOUNT_WASM, TX_VOTE_PROPOSAL_WASM, VP_VALIDATOR_WASM, + TX_UPDATE_ACCOUNT_WASM, TX_VOTE_PROPOSAL_WASM, TX_WITHDRAW_WASM, + VP_VALIDATOR_WASM, }; +use namada_apps::wallet::defaults; use rand::rngs::StdRng; use rand::SeedableRng; use sha2::Digest; -const TX_WITHDRAW_WASM: &str = "tx_withdraw.wasm"; -const TX_INIT_ACCOUNT_WASM: &str = "tx_init_account.wasm"; -const TX_INIT_VALIDATOR_WASM: &str = "tx_init_validator.wasm"; - // TODO: need to benchmark tx_bridge_pool.wasm fn transfer(c: &mut Criterion) { let mut group = c.benchmark_group("transfer"); @@ -286,6 +286,43 @@ fn withdraw(c: &mut Criterion) { group.finish(); } +fn redelegate(c: &mut Criterion) { + let mut group = c.benchmark_group("redelegate"); + + let redelegation = |dest_validator| { + generate_tx( + TX_REDELEGATE_WASM, + Redelegation { + src_validator: defaults::validator_address(), + dest_validator, + owner: defaults::albert_address(), + amount: Amount::from(1), + }, + None, + None, + Some(&defaults::albert_keypair()), + ) + }; + + group.bench_function("redelegate", |b| { + b.iter_batched_ref( + || { + let shell = BenchShell::default(); + // Find the other genesis validator + let current_epoch = shell.wl_storage.get_block_epoch().unwrap(); + let validators = namada::proof_of_stake::read_consensus_validator_set_addresses(&shell.inner.wl_storage, current_epoch).unwrap(); + let validator_2 = validators.into_iter().find(|addr| addr != &defaults::validator_address()).expect("There must be another validator to redelegate to"); + // Prepare the redelegation tx + (shell, redelegation(validator_2)) + }, + |(shell, tx)| shell.execute_tx(tx), + criterion::BatchSize::LargeInput, + ) + }); + + group.finish(); +} + fn reveal_pk(c: &mut Criterion) { let mut csprng = rand::rngs::OsRng {}; let new_implicit_account: common::SecretKey = @@ -687,6 +724,7 @@ criterion_group!( bond, unbond, withdraw, + redelegate, reveal_pk, update_vp, init_account, diff --git a/benches/vps.rs b/benches/vps.rs index 6efaf78e4c..77dfdcae87 100644 --- a/benches/vps.rs +++ b/benches/vps.rs @@ -19,14 +19,14 @@ use namada::types::storage::{Key, TxIndex}; use namada::types::transaction::governance::VoteProposalData; use namada::types::transaction::pos::{Bond, CommissionChange}; use namada::vm::wasm::run; -use namada_apps::wallet::defaults; -use namada_benches::{ +use namada_apps::bench_utils::{ generate_foreign_key_tx, generate_tx, BenchShell, BenchShieldedCtx, ALBERT_PAYMENT_ADDRESS, ALBERT_SPENDING_KEY, BERTHA_PAYMENT_ADDRESS, TX_BOND_WASM, TX_CHANGE_VALIDATOR_COMMISSION_WASM, TX_REVEAL_PK_WASM, TX_TRANSFER_WASM, TX_UNBOND_WASM, TX_UPDATE_ACCOUNT_WASM, TX_VOTE_PROPOSAL_WASM, VP_VALIDATOR_WASM, }; +use namada_apps::wallet::defaults; use sha2::Digest; const VP_USER_WASM: &str = "vp_user.wasm"; @@ -277,7 +277,7 @@ fn vp_implicit(c: &mut Criterion) { shell.commit(); } - if bench_name == "transfer" { + if bench_name == "transfer" || bench_name == "pos" { // Transfer some tokens to the implicit address shell.execute_tx(&received_transfer); shell.wl_storage.commit_tx(); diff --git a/core/Cargo.toml b/core/Cargo.toml index ebbb35f383..05433958e8 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -25,11 +25,6 @@ ferveo-tpke = [ wasm-runtime = [ "rayon", ] -# secp256k1 key signing, disabled in WASM build by default as it bloats the -# build a lot -secp256k1-sign = [ - "libsecp256k1/hmac", -] abciplus = [ "ibc", @@ -61,6 +56,7 @@ ark-serialize.workspace = true arse-merkle-tree.workspace = true bech32.workspace = true borsh.workspace = true +borsh-ext.workspace = true chrono.workspace = true data-encoding.workspace = true derivative.workspace = true @@ -72,13 +68,13 @@ ferveo = {optional = true, git = "https://github.com/anoma/ferveo", rev = "e5abd ferveo-common = {git = "https://github.com/anoma/ferveo", rev = "e5abd0acc938da90140351a65a26472eb495ce4d"} tpke = {package = "group-threshold-cryptography", optional = true, git = "https://github.com/anoma/ferveo", rev = "e5abd0acc938da90140351a65a26472eb495ce4d"} # TODO using the same version of tendermint-rs as we do here. -ibc = { git = "https://github.com/heliaxdev/cosmos-ibc-rs.git", rev = "38a827d3901e590b2935ee5b6b81b4d67c399560", features = ["serde"], optional = true} +ibc = { git = "https://github.com/heliaxdev/cosmos-ibc-rs.git", rev = "206cb5fa74a7ca38038b937d202ae39fbbd63c19", features = ["serde"], optional = true} ibc-proto = {git = "https://github.com/heliaxdev/ibc-proto-rs.git", rev = "31892ee743b2af017d5629b2af419ee20b6100c7", default-features = false, optional = true} ics23.workspace = true impl-num-traits = "0.1.2" index-set.workspace = true itertools.workspace = true -libsecp256k1.workspace = true +k256.workspace = true masp_primitives.workspace = true num256.workspace = true num-integer = "0.1.45" @@ -104,7 +100,6 @@ zeroize.workspace = true [dev-dependencies] assert_matches.workspace = true -libsecp256k1 = {workspace = true, features = ["hmac"]} pretty_assertions.workspace = true proptest.workspace = true rand.workspace = true diff --git a/core/src/ledger/governance/cli/offline.rs b/core/src/ledger/governance/cli/offline.rs index fb56a1270a..64b13e29db 100644 --- a/core/src/ledger/governance/cli/offline.rs +++ b/core/src/ledger/governance/cli/offline.rs @@ -3,6 +3,7 @@ use std::fs::{File, ReadDir}; use std::path::PathBuf; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use serde::{Deserialize, Serialize}; use super::onchain::ProposalVote; @@ -214,14 +215,8 @@ impl OfflineVote { keypairs: Vec, account_public_keys_map: &AccountPublicKeysMap, ) -> Self { - let proposal_vote_data = self - .vote - .try_to_vec() - .expect("Conversion to bytes shouldn't fail."); - let delegations_hash = self - .delegations - .try_to_vec() - .expect("Conversion to bytes shouldn't fail."); + let proposal_vote_data = self.vote.serialize_to_vec(); + let delegations_hash = self.delegations.serialize_to_vec(); let vote_hash = Hash::sha256( [ @@ -248,18 +243,9 @@ impl OfflineVote { /// compute the hash of a proposal pub fn compute_hash(&self) -> Hash { - let proposal_hash_data = self - .proposal_hash - .try_to_vec() - .expect("Conversion to bytes shouldn't fail."); - let proposal_vote_data = self - .vote - .try_to_vec() - .expect("Conversion to bytes shouldn't fail."); - let delegations_hash = self - .delegations - .try_to_vec() - .expect("Conversion to bytes shouldn't fail."); + let proposal_hash_data = self.proposal_hash.serialize_to_vec(); + let proposal_vote_data = self.vote.serialize_to_vec(); + let delegations_hash = self.delegations.serialize_to_vec(); let vote_serialized = &[proposal_hash_data, proposal_vote_data, delegations_hash] .concat(); diff --git a/core/src/ledger/governance/storage/keys.rs b/core/src/ledger/governance/storage/keys.rs index a975b6541f..92beb9da36 100644 --- a/core/src/ledger/governance/storage/keys.rs +++ b/core/src/ledger/governance/storage/keys.rs @@ -26,6 +26,7 @@ struct Keys { min_grace_epoch: &'static str, counter: &'static str, pending: &'static str, + result: &'static str, } /// Check if key is inside governance address space @@ -459,6 +460,15 @@ pub fn get_proposal_execution_key(id: u64) -> Key { .expect("Cannot obtain a storage key") } +/// Get the proposal result key +pub fn get_proposal_result_key(id: u64) -> Key { + proposal_prefix() + .push(&id.to_string()) + .expect("Cannot obtain a storage key") + .push(&Keys::VALUES.result.to_owned()) + .expect("Cannot obtain a storage key") +} + /// Get proposal id from key pub fn get_proposal_id(key: &Key) -> Option { match key.get_at(2) { diff --git a/core/src/ledger/governance/storage/proposal.rs b/core/src/ledger/governance/storage/proposal.rs index 72a15b8631..c4a59389ef 100644 --- a/core/src/ledger/governance/storage/proposal.rs +++ b/core/src/ledger/governance/storage/proposal.rs @@ -55,6 +55,7 @@ impl StoragePgfFunding { PartialEq, Eq, PartialOrd, + Ord, BorshSerialize, BorshDeserialize, Serialize, diff --git a/core/src/ledger/governance/utils.rs b/core/src/ledger/governance/utils.rs index 1a4bf0fc7c..33f032def1 100644 --- a/core/src/ledger/governance/utils.rs +++ b/core/src/ledger/governance/utils.rs @@ -75,6 +75,7 @@ impl TallyType { } /// The result of a proposal +#[derive(Copy, Clone, BorshSerialize, BorshDeserialize)] pub enum TallyResult { /// Proposal was accepted with the associated value Passed, @@ -126,6 +127,7 @@ impl TallyResult { } /// The result with votes of a proposal +#[derive(Clone, Copy, BorshDeserialize, BorshSerialize)] pub struct ProposalResult { /// The result of a proposal pub result: TallyResult, diff --git a/core/src/ledger/ibc/context/common.rs b/core/src/ledger/ibc/context/common.rs index 5e963e7a5f..a6018d231d 100644 --- a/core/src/ledger/ibc/context/common.rs +++ b/core/src/ledger/ibc/context/common.rs @@ -1,6 +1,7 @@ //! IbcCommonContext implementation for IBC -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; +use borsh_ext::BorshSerializeExt; use prost::Message; use sha2::Digest; @@ -358,26 +359,24 @@ pub trait IbcCommonContext: IbcStorageContext { }) } - /// Write the IBC denom + /// Write the IBC denom. The given address could be a non-Namada token. fn store_ibc_denom( &mut self, + addr: impl AsRef, trace_hash: impl AsRef, denom: impl AsRef, ) -> Result<(), ContextError> { - let key = storage::ibc_denom_key(trace_hash.as_ref()); + let key = storage::ibc_denom_key(addr, trace_hash.as_ref()); let has_key = self.has_key(&key).map_err(|_| { ContextError::ChannelError(ChannelError::Other { description: format!( "Reading the IBC denom failed: Key {}", - key + key, ), }) })?; if !has_key { - let bytes = denom - .as_ref() - .try_to_vec() - .expect("encoding shouldn't fail"); + let bytes = denom.as_ref().serialize_to_vec(); self.write(&key, bytes).map_err(|_| { ContextError::ChannelError(ChannelError::Other { description: format!( @@ -434,7 +433,7 @@ pub trait IbcCommonContext: IbcStorageContext { if !has_key { // IBC denomination should be zero for U256 let denom = token::Denomination::from(0); - let bytes = denom.try_to_vec().expect("encoding shouldn't fail"); + let bytes = denom.serialize_to_vec(); self.write(&key, bytes).map_err(|_| { ContextError::ChannelError(ChannelError::Other { description: format!( diff --git a/core/src/ledger/ibc/context/execution.rs b/core/src/ledger/ibc/context/execution.rs index ec0708ce2a..0160cb1d29 100644 --- a/core/src/ledger/ibc/context/execution.rs +++ b/core/src/ledger/ibc/context/execution.rs @@ -1,6 +1,7 @@ //! ExecutionContext implementation for IBC -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; +use borsh_ext::BorshSerializeExt; use super::super::{IbcActions, IbcCommonContext}; use crate::ibc::core::events::IbcEvent; @@ -179,7 +180,7 @@ where }))? } }; - let bytes = list.try_to_vec().expect("encoding shouldn't fail"); + let bytes = list.serialize_to_vec(); self.ctx.borrow_mut().write(&key, bytes).map_err(|_| { ContextError::ConnectionError(ConnectionError::Other { description: format!( diff --git a/core/src/ledger/ibc/context/storage.rs b/core/src/ledger/ibc/context/storage.rs index 2d1c8afdb3..ef24cd94f2 100644 --- a/core/src/ledger/ibc/context/storage.rs +++ b/core/src/ledger/ibc/context/storage.rs @@ -7,7 +7,7 @@ pub use ics23::ProofSpec; use super::super::Error; use crate::ledger::storage_api; use crate::types::address::Address; -use crate::types::ibc::IbcEvent; +use crate::types::ibc::{IbcEvent, IbcShieldedTransfer}; use crate::types::storage::{BlockHeight, Header, Key}; use crate::types::token::DenominatedAmount; @@ -55,11 +55,11 @@ pub trait IbcStorageContext { /// Emit an IBC event fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<(), Self::Error>; - /// Get an IBC event - fn get_ibc_event( + /// Get IBC events + fn get_ibc_events( &self, event_type: impl AsRef, - ) -> Result, Self::Error>; + ) -> Result, Self::Error>; /// Transfer token fn transfer_token( @@ -70,6 +70,12 @@ pub trait IbcStorageContext { amount: DenominatedAmount, ) -> Result<(), Self::Error>; + /// Handle masp tx + fn handle_masp_tx( + &mut self, + shielded: &IbcShieldedTransfer, + ) -> Result<(), Self::Error>; + /// Mint token fn mint_token( &mut self, diff --git a/core/src/ledger/ibc/mod.rs b/core/src/ledger/ibc/mod.rs index fcabcee745..c78afeabd6 100644 --- a/core/src/ledger/ibc/mod.rs +++ b/core/src/ledger/ibc/mod.rs @@ -7,6 +7,7 @@ use std::cell::RefCell; use std::collections::HashMap; use std::fmt::Debug; use std::rc::Rc; +use std::str::FromStr; use std::time::Duration; pub use context::common::IbcCommonContext; @@ -18,15 +19,24 @@ use thiserror::Error; use crate::ibc::applications::transfer::error::TokenTransferError; use crate::ibc::applications::transfer::msgs::transfer::MsgTransfer; use crate::ibc::applications::transfer::{ - send_transfer_execute, send_transfer_validate, + is_receiver_chain_source, send_transfer_execute, send_transfer_validate, + PrefixedDenom, TracePrefix, }; use crate::ibc::core::ics04_channel::msgs::PacketMsg; use crate::ibc::core::ics23_commitment::specs::ProofSpecs; -use crate::ibc::core::ics24_host::identifier::{ChainId as IbcChainId, PortId}; +use crate::ibc::core::ics24_host::identifier::{ + ChainId as IbcChainId, ChannelId, PortId, +}; use crate::ibc::core::router::{Module, ModuleId, Router}; use crate::ibc::core::{execute, validate, MsgEnvelope, RouterError}; use crate::ibc_proto::google::protobuf::Any; +use crate::types::address::{masp, Address}; use crate::types::chain::ChainId; +use crate::types::ibc::{ + get_shielded_transfer, is_ibc_denom, EVENT_TYPE_DENOM_TRACE, + EVENT_TYPE_PACKET, +}; +use crate::types::masp::PaymentAddress; #[allow(missing_docs)] #[derive(Error, Debug)] @@ -49,6 +59,8 @@ pub enum Error { Denom(String), #[error("Invalid chain ID: {0}")] ChainId(ChainId), + #[error("Handling MASP transaction error: {0}")] + MaspTx(String), } /// IBC actions to handle IBC operations @@ -124,31 +136,21 @@ where let envelope = MsgEnvelope::try_from(any_msg).map_err(Error::Execution)?; execute(self, envelope.clone()).map_err(Error::Execution)?; + // For receiving the token to a shielded address + self.handle_masp_tx(&envelope)?; // the current ibc-rs execution doesn't store the denom for the // token hash when transfer with MsgRecvPacket - self.store_denom(envelope) + self.store_denom(&envelope) } } } /// Store the denom when transfer with MsgRecvPacket - fn store_denom(&mut self, envelope: MsgEnvelope) -> Result<(), Error> { + fn store_denom(&mut self, envelope: &MsgEnvelope) -> Result<(), Error> { match envelope { MsgEnvelope::Packet(PacketMsg::Recv(_)) => { - let result = self - .ctx - .borrow() - .get_ibc_event("denomination_trace") - .map_err(|_| { - Error::Denom("Reading the IBC event failed".to_string()) - })?; - if let Some((trace_hash, ibc_denom)) = - result.as_ref().and_then(|event| { - event - .attributes - .get("trace_hash") - .zip(event.attributes.get("denom")) - }) + if let Some((trace_hash, ibc_denom, receiver)) = + self.get_minted_token_info()? { // If the denomination trace event has the trace hash and // the IBC denom, a token has been minted. The raw IBC denom @@ -157,13 +159,24 @@ where // denomination is also set for the minting. self.ctx .borrow_mut() - .store_ibc_denom(trace_hash, ibc_denom) + .store_ibc_denom(&receiver, &trace_hash, &ibc_denom) .map_err(|e| { Error::Denom(format!( "Writing the IBC denom failed: {}", e )) })?; + if let Some((_, base_token)) = is_ibc_denom(&ibc_denom) { + self.ctx + .borrow_mut() + .store_ibc_denom(base_token, trace_hash, &ibc_denom) + .map_err(|e| { + Error::Denom(format!( + "Writing the IBC denom failed: {}", + e + )) + })?; + } let token = storage::ibc_token(ibc_denom); self.ctx.borrow_mut().store_token_denom(&token).map_err( |e| { @@ -182,6 +195,57 @@ where } } + /// Get the minted IBC denom, the trace hash, and the receiver from IBC + /// events + fn get_minted_token_info( + &self, + ) -> Result, Error> { + let receive_event = self + .ctx + .borrow() + .get_ibc_events(EVENT_TYPE_PACKET) + .map_err(|_| { + Error::Denom("Reading the IBC event failed".to_string()) + })?; + // The receiving event should be only one in the single IBC transaction + let receiver = match receive_event + .first() + .as_ref() + .and_then(|event| event.attributes.get("receiver")) + { + // Check the receiver address + Some(receiver) => Some( + Address::decode(receiver) + .or_else(|_| { + // Replace it with MASP address when the receiver is a + // payment address + PaymentAddress::from_str(receiver).map(|_| masp()) + }) + .map_err(|_| { + Error::Denom(format!( + "Decoding the receiver address failed: {:?}", + receive_event + )) + })? + .to_string(), + ), + None => None, + }; + let denom_event = self + .ctx + .borrow() + .get_ibc_events(EVENT_TYPE_DENOM_TRACE) + .map_err(|_| { + Error::Denom("Reading the IBC event failed".to_string()) + })?; + // The denom event should be only one in the single IBC transaction + Ok(denom_event.first().as_ref().and_then(|event| { + let trace_hash = event.attributes.get("trace_hash").cloned()?; + let denom = event.attributes.get("denom").cloned()?; + Some((trace_hash, denom, receiver?)) + })) + } + /// Validate according to the message in IBC VP pub fn validate(&self, tx_data: &[u8]) -> Result<(), Error> { let any_msg = Any::decode(tx_data).map_err(Error::DecodingData)?; @@ -204,6 +268,40 @@ where } } } + + /// Handle the MASP transaction if needed + fn handle_masp_tx(&mut self, envelope: &MsgEnvelope) -> Result<(), Error> { + let shielded_transfer = match envelope { + MsgEnvelope::Packet(PacketMsg::Recv(_)) => { + let event = self + .ctx + .borrow() + .get_ibc_events(EVENT_TYPE_PACKET) + .map_err(|_| { + Error::MaspTx( + "Reading the IBC event failed".to_string(), + ) + })?; + // The receiving event should be only one in the single IBC + // transaction + match event.first() { + Some(event) => get_shielded_transfer(event) + .map_err(|e| Error::MaspTx(e.to_string()))?, + None => return Ok(()), + } + } + _ => return Ok(()), + }; + if let Some(shielded_transfer) = shielded_transfer { + self.ctx + .borrow_mut() + .handle_masp_tx(&shielded_transfer) + .map_err(|_| { + Error::MaspTx("Writing MASP components failed".to_string()) + })?; + } + Ok(()) + } } #[derive(Debug, Default)] @@ -218,3 +316,28 @@ pub struct ValidationParams { /// Upgrade path pub upgrade_path: Vec, } + +/// Get the IbcToken from the source/destination ports and channels +pub fn received_ibc_token( + ibc_denom: &PrefixedDenom, + src_port_id: &PortId, + src_channel_id: &ChannelId, + dest_port_id: &PortId, + dest_channel_id: &ChannelId, +) -> Result { + let mut ibc_denom = ibc_denom.clone(); + if is_receiver_chain_source( + src_port_id.clone(), + src_channel_id.clone(), + &ibc_denom, + ) { + let prefix = + TracePrefix::new(src_port_id.clone(), src_channel_id.clone()); + ibc_denom.remove_trace_prefix(&prefix); + } else { + let prefix = + TracePrefix::new(dest_port_id.clone(), dest_channel_id.clone()); + ibc_denom.add_trace_prefix(prefix); + } + Ok(storage::ibc_token(ibc_denom.to_string())) +} diff --git a/core/src/ledger/ibc/storage.rs b/core/src/ledger/ibc/storage.rs index 317aa108bf..717991b9f1 100644 --- a/core/src/ledger/ibc/storage.rs +++ b/core/src/ledger/ibc/storage.rs @@ -367,10 +367,32 @@ pub fn port_id(key: &Key) -> Result { } } -/// The storage key to get the denom name from the hashed token -pub fn ibc_denom_key(token_hash: impl AsRef) -> Key { - let path = format!("{}/{}", DENOM, token_hash.as_ref()); - ibc_key(path).expect("Creating a key for the denom key shouldn't fail") +/// The storage key prefix to get the denom name with the hashed IBC denom. The +/// address is given as string because the given address could be non-Namada +/// token. +pub fn ibc_denom_key_prefix(addr: Option) -> Key { + let prefix = Key::from(Address::Internal(InternalAddress::Ibc).to_db_key()) + .push(&DENOM.to_string().to_db_key()) + .expect("Cannot obtain a storage key"); + + if let Some(addr) = addr { + prefix + .push(&addr.to_db_key()) + .expect("Cannot obtain a storage key") + } else { + prefix + } +} + +/// The storage key to get the denom name with the hashed IBC denom. The address +/// is given as string because the given address could be non-Namada token. +pub fn ibc_denom_key( + addr: impl AsRef, + token_hash: impl AsRef, +) -> Key { + ibc_denom_key_prefix(Some(addr.as_ref().to_string())) + .push(&token_hash.as_ref().to_string().to_db_key()) + .expect("Cannot obtain a storage key") } /// Hash the denom @@ -392,20 +414,19 @@ pub fn is_ibc_key(key: &Key) -> bool { DbKeySeg::AddressSeg(addr) if *addr == Address::Internal(InternalAddress::Ibc)) } -/// Returns the token hash if the given key is the denom key -pub fn is_ibc_denom_key(key: &Key) -> Option { +/// Returns the owner and the token hash if the given key is the denom key +pub fn is_ibc_denom_key(key: &Key) -> Option<(String, String)> { match &key.segments[..] { [ DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(prefix), - DbKeySeg::AddressSeg(Address::Internal(InternalAddress::IbcToken( - hash, - ))), + DbKeySeg::StringSeg(owner), + DbKeySeg::StringSeg(hash), ] => { if addr == &Address::Internal(InternalAddress::Ibc) && prefix == DENOM { - Some(hash.clone()) + Some((owner.clone(), hash.clone())) } else { None } diff --git a/shared/src/ledger/inflation.rs b/core/src/ledger/inflation.rs similarity index 70% rename from shared/src/ledger/inflation.rs rename to core/src/ledger/inflation.rs index da2d5ed1e1..3e1902a445 100644 --- a/shared/src/ledger/inflation.rs +++ b/core/src/ledger/inflation.rs @@ -2,9 +2,8 @@ //! proof-of-stake, providing liquity to shielded asset pools, and public goods //! funding. -use namada_core::types::dec::Dec; - -use crate::types::token; +use crate::types::dec::Dec; +use crate::types::uint::Uint; /// The domains of inflation pub enum RewardsType { @@ -20,16 +19,18 @@ pub enum RewardsType { #[allow(missing_docs)] pub struct ValsToUpdate { pub locked_ratio: Dec, - pub inflation: token::Amount, + pub inflation: Uint, } /// PD controller used to dynamically adjust the rewards rates #[derive(Debug, Clone)] pub struct RewardsController { /// Locked token amount in the relevant system - pub locked_tokens: token::Amount, + pub locked_tokens: Uint, /// Total token supply - pub total_tokens: token::Amount, + pub total_tokens: Uint, + /// Total native token supply + pub total_native_tokens: Uint, /// PD target locked ratio pub locked_ratio_target: Dec, /// PD last locked ratio @@ -37,7 +38,7 @@ pub struct RewardsController { /// Maximum reward rate pub max_reward_rate: Dec, /// Last inflation amount - pub last_inflation_amount: token::Amount, + pub last_inflation_amount: Uint, /// Nominal proportional gain pub p_gain_nom: Dec, /// Nominal derivative gain @@ -52,6 +53,7 @@ impl RewardsController { let Self { locked_tokens, total_tokens, + total_native_tokens, locked_ratio_target, locked_ratio_last, max_reward_rate, @@ -63,14 +65,16 @@ impl RewardsController { // Token amounts must be expressed in terms of the raw amount (namnam) // to properly run the PD controller - let locked = Dec::try_from(locked_tokens.raw_amount()) - .expect("Should not fail to convert token Amount to Dec"); - let total = Dec::try_from(total_tokens.raw_amount()) - .expect("Should not fail to convert token Amount to Dec"); + let locked = Dec::try_from(locked_tokens) + .expect("Should not fail to convert Uint to Dec"); + let total = Dec::try_from(total_tokens) + .expect("Should not fail to convert Uint to Dec"); + let total_native = Dec::try_from(total_native_tokens) + .expect("Should not fail to convert Uint to Dec"); let epochs_py: Dec = epochs_per_year.into(); let locked_ratio = locked / total; - let max_inflation = total * max_reward_rate / epochs_py; + let max_inflation = total_native * max_reward_rate / epochs_py; let p_gain = p_gain_nom * max_inflation; let d_gain = d_gain_nom * max_inflation; @@ -78,29 +82,20 @@ impl RewardsController { let delta_error = locked_ratio_last - locked_ratio; let control_val = p_gain * error - d_gain * delta_error; - let last_inflation_amount = - Dec::try_from(last_inflation_amount.raw_amount()) - .expect("Should not fail to convert token Amount to Dec"); + let last_inflation_amount = Dec::try_from(last_inflation_amount) + .expect("Should not fail to convert Uint to Dec"); let new_inflation_amount_raw = last_inflation_amount + control_val; let new_inflation_amount = if new_inflation_amount_raw.is_negative() { - token::Amount::zero() + Uint::zero() } else { - token::Amount::from_uint( - new_inflation_amount_raw - .to_uint() - .expect("Should not fail to convert Dec to Uint"), - 0, - ) - .expect("Should not fail to convert Uint to Amount") + new_inflation_amount_raw + .to_uint() + .expect("Should not fail to convert Dec to Uint") }; - let max_inflation = token::Amount::from_uint( - max_inflation - .to_uint() - .expect("Should not fail to convert Dec to Uint"), - 0, - ) - .expect("Should not fail to convert Uint to Amount"); + let max_inflation = max_inflation + .to_uint() + .expect("Should not fail to convert Dec to Uint"); let inflation = std::cmp::min(new_inflation_amount, max_inflation); ValsToUpdate { @@ -114,27 +109,18 @@ impl RewardsController { mod test { use std::str::FromStr; - use namada_core::types::token::NATIVE_MAX_DECIMAL_PLACES; - use super::*; #[test] fn test_inflation_calc_up() { let mut controller = RewardsController { - locked_tokens: token::Amount::from_uint( - 2_000, - NATIVE_MAX_DECIMAL_PLACES, - ) - .unwrap(), - total_tokens: token::Amount::from_uint( - 4_000, - NATIVE_MAX_DECIMAL_PLACES, - ) - .unwrap(), + locked_tokens: Uint::from(2_000_000_000), + total_tokens: Uint::from(4_000_000_000_u64), + total_native_tokens: Uint::from(4_000_000_000_u64), locked_ratio_target: Dec::from_str("0.66666666").unwrap(), locked_ratio_last: Dec::from_str("0.5").unwrap(), max_reward_rate: Dec::from_str("0.1").unwrap(), - last_inflation_amount: token::Amount::zero(), + last_inflation_amount: Uint::zero(), p_gain_nom: Dec::from_str("0.1").unwrap(), d_gain_nom: Dec::from_str("0.1").unwrap(), epochs_per_year: 365, @@ -146,11 +132,10 @@ mod test { inflation: inflation_0, } = controller.clone().run(); println!( - "Round 0: Locked ratio: {locked_ratio_0}, inflation: {}", - inflation_0.to_string_native() + "Round 0: Locked ratio: {locked_ratio_0}, inflation: {inflation_0}" ); assert_eq!(locked_ratio_0, Dec::from_str("0.5").unwrap()); - assert_eq!(inflation_0, token::Amount::from_uint(18_264, 0).unwrap()); + assert_eq!(inflation_0, Uint::from(18_264)); controller.locked_ratio_last = locked_ratio_0; controller.last_inflation_amount = inflation_0; @@ -162,13 +147,12 @@ mod test { inflation: inflation_1, } = controller.clone().run(); println!( - "Round 1: Locked ratio: {locked_ratio_1}, inflation: {}", - inflation_1.to_string_native() + "Round 1: Locked ratio: {locked_ratio_1}, inflation: {inflation_1}" ); assert!(locked_ratio_1 > locked_ratio_0); assert!(locked_ratio_1 > Dec::from_str("0.5").unwrap()); assert!(locked_ratio_1 < Dec::from_str("0.51").unwrap()); - assert_eq!(inflation_1, token::Amount::from_uint(36_528, 0).unwrap()); + assert_eq!(inflation_1, Uint::from(36_528)); controller.locked_ratio_last = locked_ratio_1; controller.last_inflation_amount = inflation_1; @@ -180,32 +164,24 @@ mod test { inflation: inflation_2, } = controller.run(); println!( - "Round 2: Locked ratio: {locked_ratio_2}, inflation: {}", - inflation_2.to_string_native() + "Round 2: Locked ratio: {locked_ratio_2}, inflation: {inflation_2}", ); assert!(locked_ratio_2 > locked_ratio_1); assert!(locked_ratio_2 > Dec::from_str("0.5").unwrap()); assert!(locked_ratio_2 < Dec::from_str("0.51").unwrap()); - assert_eq!(inflation_2, token::Amount::from_uint(54_792, 0).unwrap()); + assert_eq!(inflation_2, Uint::from(54_792)); } #[test] fn test_inflation_calc_down() { let mut controller = RewardsController { - locked_tokens: token::Amount::from_uint( - 900, - NATIVE_MAX_DECIMAL_PLACES, - ) - .unwrap(), - total_tokens: token::Amount::from_uint( - 1_000, - NATIVE_MAX_DECIMAL_PLACES, - ) - .unwrap(), + locked_tokens: Uint::from(900_000_000), + total_tokens: Uint::from(1_000_000_000), + total_native_tokens: Uint::from(1_000_000_000), locked_ratio_target: Dec::from_str("0.66666666").unwrap(), locked_ratio_last: Dec::from_str("0.9").unwrap(), max_reward_rate: Dec::from_str("0.1").unwrap(), - last_inflation_amount: token::Amount::from_uint(10_000, 0).unwrap(), + last_inflation_amount: Uint::from(10_000), p_gain_nom: Dec::from_str("0.1").unwrap(), d_gain_nom: Dec::from_str("0.1").unwrap(), epochs_per_year: 365, @@ -217,11 +193,10 @@ mod test { inflation: inflation_0, } = controller.clone().run(); println!( - "Round 0: Locked ratio: {locked_ratio_0}, inflation: {}", - inflation_0.to_string_native() + "Round 0: Locked ratio: {locked_ratio_0}, inflation: {inflation_0}", ); assert_eq!(locked_ratio_0, Dec::from_str("0.9").unwrap()); - assert_eq!(inflation_0, token::Amount::from_uint(3_607, 0).unwrap()); + assert_eq!(inflation_0, Uint::from(3_607)); controller.locked_ratio_last = locked_ratio_0; controller.last_inflation_amount = inflation_0; @@ -233,13 +208,12 @@ mod test { inflation: inflation_1, } = controller.clone().run(); println!( - "Round 1: Locked ratio: {locked_ratio_1}, inflation: {}", - inflation_1.to_string_native() + "Round 1: Locked ratio: {locked_ratio_1}, inflation: {inflation_1}", ); assert!(locked_ratio_1 > locked_ratio_0); assert!(locked_ratio_1 > Dec::from_str("0.9").unwrap()); assert!(locked_ratio_1 < Dec::from_str("0.91").unwrap()); - assert_eq!(inflation_1, token::Amount::zero()); + assert_eq!(inflation_1, Uint::zero()); controller.locked_ratio_last = locked_ratio_1; controller.last_inflation_amount = inflation_1; @@ -251,10 +225,9 @@ mod test { inflation: inflation_2, } = controller.run(); println!( - "Round 2: Locked ratio: {locked_ratio_2}, inflation: {}", - inflation_2.to_string_native() + "Round 2: Locked ratio: {locked_ratio_2}, inflation: {inflation_2}", ); assert_eq!(locked_ratio_2, locked_ratio_1); - assert_eq!(inflation_2, token::Amount::zero()); + assert_eq!(inflation_2, Uint::zero()); } } diff --git a/core/src/ledger/mod.rs b/core/src/ledger/mod.rs index 890d58044d..301cf78e08 100644 --- a/core/src/ledger/mod.rs +++ b/core/src/ledger/mod.rs @@ -5,6 +5,7 @@ pub mod gas; pub mod governance; #[cfg(any(feature = "abciplus", feature = "abcipp"))] pub mod ibc; +pub mod inflation; pub mod parameters; pub mod pgf; pub mod replay_protection; diff --git a/core/src/ledger/replay_protection.rs b/core/src/ledger/replay_protection.rs index 56537dfbaf..71332d295c 100644 --- a/core/src/ledger/replay_protection.rs +++ b/core/src/ledger/replay_protection.rs @@ -1,21 +1,32 @@ //! Replay protection storage -use crate::types::address::{Address, InternalAddress}; use crate::types::hash::Hash; -use crate::types::storage::{DbKeySeg, Key, KeySeg}; +use crate::types::storage::Key; -/// Internal replay protection address -pub const ADDRESS: Address = - Address::Internal(InternalAddress::ReplayProtection); +const ERROR_MSG: &str = "Cannot obtain a valid db key"; -/// Check if a key is a replay protection key -pub fn is_replay_protection_key(key: &Key) -> bool { - matches!(&key.segments[0], DbKeySeg::AddressSeg(addr) if addr == &ADDRESS) +/// Get the transaction hash key under the `last` subkey +pub fn get_replay_protection_last_subkey(hash: &Hash) -> Key { + Key::parse("last") + .expect(ERROR_MSG) + .push(&hash.to_string()) + .expect(ERROR_MSG) +} + +/// Get the transaction hash key under the `all` subkey +pub fn get_replay_protection_all_subkey(hash: &Hash) -> Key { + Key::parse("all") + .expect(ERROR_MSG) + .push(&hash.to_string()) + .expect(ERROR_MSG) } -/// Get the transaction hash key -pub fn get_replay_protection_key(hash: &Hash) -> Key { - Key::from(ADDRESS.to_db_key()) +/// Get the full transaction hash key under the `last` subkey +pub fn get_replay_protection_last_key(hash: &Hash) -> Key { + Key::parse("replay_protection") + .expect(ERROR_MSG) + .push(&"last".to_string()) + .expect(ERROR_MSG) .push(&hash.to_string()) - .expect("Cannot obtain a valid db key") + .expect(ERROR_MSG) } diff --git a/core/src/ledger/storage/masp_conversions.rs b/core/src/ledger/storage/masp_conversions.rs index 624fe2aa1f..830068220e 100644 --- a/core/src/ledger/storage/masp_conversions.rs +++ b/core/src/ledger/storage/masp_conversions.rs @@ -3,20 +3,28 @@ use std::collections::BTreeMap; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use masp_primitives::asset_type::AssetType; use masp_primitives::convert::AllowedConversion; use masp_primitives::merkle_tree::FrozenCommitmentTree; use masp_primitives::sapling::Node; +use crate::ledger::inflation::{RewardsController, ValsToUpdate}; +use crate::ledger::parameters; +use crate::ledger::storage_api::token::read_denom; +use crate::ledger::storage_api::{StorageRead, StorageWrite}; use crate::types::address::Address; +use crate::types::dec::Dec; use crate::types::storage::Epoch; use crate::types::token::MaspDenom; +use crate::types::uint::Uint; +use crate::types::{address, token}; /// A representation of the conversion state #[derive(Debug, Default, BorshSerialize, BorshDeserialize)] pub struct ConversionState { - /// The merkle root from the previous epoch - pub prev_root: Node, + /// The last amount of the native token distributed + pub normed_inflation: Option, /// The tree currently containing all the conversions pub tree: FrozenCommitmentTree, /// Map assets to their latest conversion and position in Merkle tree @@ -27,6 +35,159 @@ pub struct ConversionState { >, } +/// Compute the MASP rewards by applying the PD-controller to the genesis +/// parameters and the last inflation and last locked rewards ratio values. +pub fn calculate_masp_rewards( + wl_storage: &mut super::WlStorage, + addr: &Address, +) -> crate::ledger::storage_api::Result<(u128, u128)> +where + D: 'static + super::DB + for<'iter> super::DBIter<'iter>, + H: 'static + super::StorageHasher, +{ + let denomination = read_denom(wl_storage, addr)? + .expect("failed to read token denomination"); + // Inflation is implicitly denominated by this value. The lower this + // figure, the less precise inflation computations are. This is especially + // problematic when inflation is coming from a token with much higher + // denomination than the native token. The higher this figure, the higher + // the threshold of holdings required in order to receive non-zero rewards. + // This value should be fixed constant for each asset type. Here we choose + // a thousandth of the given asset. + let precision = 10u128.pow(std::cmp::max(u32::from(denomination.0), 3) - 3); + + let masp_addr = address::masp(); + // Query the storage for information + + //// information about the amount of tokens on the chain + let total_tokens: token::Amount = wl_storage + .read(&token::minted_balance_key(addr))? + .expect("the total supply key should be here"); + + //// information about the amount of native tokens on the chain + let total_native_tokens: token::Amount = wl_storage + .read(&token::minted_balance_key(&wl_storage.storage.native_token))? + .expect("the total supply key should be here"); + + // total staked amount in the Shielded pool + let total_token_in_masp: token::Amount = wl_storage + .read(&token::balance_key(addr, &masp_addr))? + .unwrap_or_default(); + + let epochs_per_year: u64 = wl_storage + .read(¶meters::storage::get_epochs_per_year_key())? + .expect("epochs per year should properly decode"); + + //// Values from the last epoch + let last_inflation: token::Amount = wl_storage + .read(&token::masp_last_inflation_key(addr))? + .expect("failure to read last inflation"); + + let last_locked_ratio: Dec = wl_storage + .read(&token::masp_last_locked_ratio_key(addr))? + .expect("failure to read last inflation"); + + //// Parameters for each token + let max_reward_rate: Dec = wl_storage + .read(&token::masp_max_reward_rate_key(addr))? + .expect("max reward should properly decode"); + + let kp_gain_nom: Dec = wl_storage + .read(&token::masp_kp_gain_key(addr))? + .expect("kp_gain_nom reward should properly decode"); + + let kd_gain_nom: Dec = wl_storage + .read(&token::masp_kd_gain_key(addr))? + .expect("kd_gain_nom reward should properly decode"); + + let locked_target_ratio: Dec = wl_storage + .read(&token::masp_locked_ratio_target_key(addr))? + .expect("locked ratio target should properly decode"); + + // Creating the PD controller for handing out tokens + let controller = RewardsController { + locked_tokens: total_token_in_masp.raw_amount(), + total_tokens: total_tokens.raw_amount(), + total_native_tokens: total_native_tokens.raw_amount(), + locked_ratio_target: locked_target_ratio, + locked_ratio_last: last_locked_ratio, + max_reward_rate, + last_inflation_amount: last_inflation.raw_amount(), + p_gain_nom: kp_gain_nom, + d_gain_nom: kd_gain_nom, + epochs_per_year, + }; + + let ValsToUpdate { + locked_ratio, + inflation, + } = RewardsController::run(controller); + + // inflation-per-token = inflation / locked tokens = n/PRECISION + // ∴ n = (inflation * PRECISION) / locked tokens + // Since we must put the notes in a compatible format with the + // note format, we must make the inflation amount discrete. + let noterized_inflation = if total_token_in_masp.is_zero() { + 0u128 + } else { + inflation + .checked_mul_div( + Uint::from(precision), + total_token_in_masp.raw_amount(), + ) + .and_then(|x| x.0.try_into().ok()) + .unwrap_or_else(|| { + tracing::warn!( + "MASP inflation for {} assumed to be 0 because the \ + computed value is too large. Please check the inflation \ + parameters.", + *addr + ); + 0u128 + }) + }; + + tracing::debug!( + "Controller, call: total_in_masp {:?}, total_tokens {:?}, \ + total_native_tokens {:?}, locked_target_ratio {:?}, \ + last_locked_ratio {:?}, max_reward_rate {:?}, last_inflation {:?}, \ + kp_gain_nom {:?}, kd_gain_nom {:?}, epochs_per_year {:?}", + total_token_in_masp, + total_tokens, + total_native_tokens, + locked_target_ratio, + last_locked_ratio, + max_reward_rate, + last_inflation, + kp_gain_nom, + kd_gain_nom, + epochs_per_year, + ); + tracing::debug!("Please give me: {:?}", addr); + tracing::debug!("Ratio {:?}", locked_ratio); + tracing::debug!("inflation from the pd controller {:?}", inflation); + tracing::debug!("total in the masp {:?}", total_token_in_masp); + tracing::debug!("Please give me inflation: {:?}", noterized_inflation); + + // Is it fine to write the inflation rate, this is accurate, + // but we should make sure the return value's ratio matches + // this new inflation rate in 'update_allowed_conversions', + // otherwise we will have an inaccurate view of inflation + wl_storage.write( + &token::masp_last_inflation_key(addr), + token::Amount::from_uint( + (total_token_in_masp.raw_amount() / precision) + * Uint::from(noterized_inflation), + 0, + ) + .unwrap(), + )?; + + wl_storage.write(&token::masp_last_locked_ratio_key(addr), locked_ratio)?; + + Ok((noterized_inflation, precision)) +} + // This is only enabled when "wasm-runtime" is on, because we're using rayon #[cfg(feature = "wasm-runtime")] /// Update the MASP's allowed conversions @@ -37,45 +198,66 @@ where D: 'static + super::DB + for<'iter> super::DBIter<'iter>, H: 'static + super::StorageHasher, { + use std::cmp::Ordering; + use masp_primitives::ff::PrimeField; - use masp_primitives::transaction::components::I32Sum as MaspAmount; + use masp_primitives::transaction::components::I128Sum as MaspAmount; use rayon::iter::{ IndexedParallelIterator, IntoParallelIterator, ParallelIterator, }; use rayon::prelude::ParallelSlice; - use crate::ledger::storage_api::{ResultExt, StorageRead, StorageWrite}; + use crate::ledger::storage_api::ResultExt; use crate::types::storage::{self, KeySeg}; - use crate::types::{address, token}; // The derived conversions will be placed in MASP address space let masp_addr = address::masp(); let key_prefix: storage::Key = masp_addr.to_db_key().into(); - let masp_rewards = address::masp_rewards(); + let tokens = address::tokens(); + let mut masp_reward_keys: Vec<_> = tokens.into_keys().collect(); + // Put the native rewards first because other inflation computations depend + // on it + let native_token = wl_storage.storage.native_token.clone(); + masp_reward_keys.sort_unstable_by(|x, y| { + if (*x == native_token) == (*y == native_token) { + Ordering::Equal + } else if *x == native_token { + Ordering::Less + } else { + Ordering::Greater + } + }); // The total transparent value of the rewards being distributed let mut total_reward = token::Amount::native_whole(0); - // Construct MASP asset type for rewards. Always timestamp reward tokens - // with the zeroth epoch to minimize the number of convert notes clients - // have to use. This trick works under the assumption that reward tokens - // from different epochs are exactly equivalent. - let reward_asset = - encode_asset_type(address::nam(), MaspDenom::Zero, Epoch(0)); + // Construct MASP asset type for rewards. Always deflate and timestamp + // reward tokens with the zeroth epoch to minimize the number of convert + // notes clients have to use. This trick works under the assumption that + // reward tokens will then be reinflated back to the current epoch. + let reward_assets = [ + encode_asset_type(native_token.clone(), MaspDenom::Zero, Epoch(0)), + encode_asset_type(native_token.clone(), MaspDenom::One, Epoch(0)), + encode_asset_type(native_token.clone(), MaspDenom::Two, Epoch(0)), + encode_asset_type(native_token.clone(), MaspDenom::Three, Epoch(0)), + ]; // Conversions from the previous to current asset for each address let mut current_convs = BTreeMap::<(Address, MaspDenom), AllowedConversion>::new(); + // Native token inflation values are always with respect to this + let mut ref_inflation = 0; // Reward all tokens according to above reward rates - for (addr, reward) in &masp_rewards { + for addr in &masp_reward_keys { + let reward = calculate_masp_rewards(wl_storage, addr)?; + if *addr == native_token { + // The reference inflation is the denominator of the native token + // inflation, which is always a constant + ref_inflation = reward.1; + } // Dispense a transparent reward in parallel to the shielded rewards let addr_bal: token::Amount = wl_storage .read(&token::balance_key(addr, &masp_addr))? .unwrap_or_default(); - // The reward for each reward.1 units of the current asset is - // reward.0 units of the reward token - // Since floor(a) + floor(b) <= floor(a+b), there will always be - // enough rewards to reimburse users - total_reward += (addr_bal * *reward).0; for denom in token::MaspDenom::iter() { // Provide an allowed conversion from previous timestamp. The // negative sign allows each instance of the old asset to be @@ -90,15 +272,102 @@ where denom, wl_storage.storage.block.epoch, ); - current_convs.insert( - (addr.clone(), denom), - (MaspAmount::from_pair(old_asset, -(reward.1 as i32)).unwrap() - + MaspAmount::from_pair(new_asset, reward.1 as i32) + // Get the last rewarded amount of the native token + let normed_inflation = wl_storage + .storage + .conversion_state + .normed_inflation + .get_or_insert(ref_inflation); + if *addr == native_token { + // The amount that will be given of the new native token for + // every amount of the native token given in the + // previous epoch + let new_normed_inflation = Uint::from(*normed_inflation) + .checked_add( + (Uint::from(*normed_inflation) * Uint::from(reward.0)) + / reward.1, + ) + .and_then(|x| x.try_into().ok()) + .unwrap_or_else(|| { + tracing::warn!( + "MASP reward for {} assumed to be 0 because the \ + computed value is too large. Please check the \ + inflation parameters.", + *addr + ); + *normed_inflation + }); + // The conversion is computed such that if consecutive + // conversions are added together, the + // intermediate native tokens cancel/ + // telescope out + current_convs.insert( + (addr.clone(), denom), + (MaspAmount::from_pair( + old_asset, + -(*normed_inflation as i128), + ) + .unwrap() + + MaspAmount::from_pair( + new_asset, + new_normed_inflation as i128, + ) + .unwrap()) + .into(), + ); + // Operations that happen exactly once for each token + if denom == MaspDenom::Three { + // The reward for each reward.1 units of the current asset + // is reward.0 units of the reward token + total_reward += (addr_bal + * (new_normed_inflation, *normed_inflation)) + .0 + - addr_bal; + // Save the new normed inflation + *normed_inflation = new_normed_inflation; + } + } else { + // Express the inflation reward in real terms, that is, with + // respect to the native asset in the zeroth + // epoch + let real_reward = ((Uint::from(reward.0) + * Uint::from(ref_inflation)) + / *normed_inflation) + .try_into() + .unwrap_or_else(|_| { + tracing::warn!( + "MASP reward for {} assumed to be 0 because the \ + computed value is too large. Please check the \ + inflation parameters.", + *addr + ); + 0u128 + }); + // The conversion is computed such that if consecutive + // conversions are added together, the + // intermediate tokens cancel/ telescope out + current_convs.insert( + (addr.clone(), denom), + (MaspAmount::from_pair(old_asset, -(reward.1 as i128)) .unwrap() - + MaspAmount::from_pair(reward_asset, reward.0 as i32) + + MaspAmount::from_pair(new_asset, reward.1 as i128) + .unwrap() + + MaspAmount::from_pair( + reward_assets[denom as usize], + real_reward as i128, + ) .unwrap()) - .into(), - ); + .into(), + ); + // Operations that happen exactly once for each token + if denom == MaspDenom::Three { + // The reward for each reward.1 units of the current asset + // is reward.0 units of the reward token + total_reward += ((addr_bal * (real_reward, reward.1)).0 + * (*normed_inflation, ref_inflation)) + .0; + } + } // Add a conversion from the previous asset type wl_storage.storage.conversion_state.assets.insert( old_asset, @@ -145,7 +414,7 @@ where // Update the MASP's transparent reward token balance to ensure that it // is sufficiently backed to redeem rewards - let reward_key = token::balance_key(&address::nam(), &masp_addr); + let reward_key = token::balance_key(&native_token, &masp_addr); let addr_bal: token::Amount = wl_storage.read(&reward_key)?.unwrap_or_default(); let new_bal = addr_bal + total_reward; @@ -163,11 +432,6 @@ where .map(FrozenCommitmentTree::new) .collect(); - // Keep the merkle root from the old tree for transactions constructed - // close to the epoch boundary - wl_storage.storage.conversion_state.prev_root = - wl_storage.storage.conversion_state.tree.root(); - // Convert conversion vector into tree so that Merkle paths can be // obtained wl_storage.storage.conversion_state.tree = @@ -175,7 +439,7 @@ where // Add purely decoding entries to the assets map. These will be // overwritten before the creation of the next commitment tree - for addr in masp_rewards.keys() { + for addr in masp_reward_keys { for denom in token::MaspDenom::iter() { // Add the decoding entry for the new asset type. An uncommited // node position is used since this is not a conversion. @@ -203,11 +467,7 @@ where .into_storage_result()?; // We cannot borrow `conversion_state` at the same time as when we call // `wl_storage.write`, so we encode it manually first - let conv_bytes = wl_storage - .storage - .conversion_state - .try_to_vec() - .into_storage_result()?; + let conv_bytes = wl_storage.storage.conversion_state.serialize_to_vec(); wl_storage.write_bytes(&state_key, conv_bytes)?; Ok(()) } @@ -218,9 +478,7 @@ pub fn encode_asset_type( denom: MaspDenom, epoch: Epoch, ) -> AssetType { - let new_asset_bytes = (addr, denom, epoch.0) - .try_to_vec() - .expect("unable to serialize address and epoch"); + let new_asset_bytes = (addr, denom, epoch.0).serialize_to_vec(); AssetType::new(new_asset_bytes.as_ref()) .expect("unable to derive asset identifier") } diff --git a/core/src/ledger/storage/merkle_tree.rs b/core/src/ledger/storage/merkle_tree.rs index eb4e34e20b..961cbc3c35 100644 --- a/core/src/ledger/storage/merkle_tree.rs +++ b/core/src/ledger/storage/merkle_tree.rs @@ -8,6 +8,7 @@ use arse_merkle_tree::{ Hash as SmtHash, Key as TreeKey, SparseMerkleTree as ArseMerkleTree, H256, }; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use ics23::commitment_proof::Proof as Ics23Proof; use ics23::{CommitmentProof, ExistenceProof, NonExistenceProof}; use thiserror::Error; @@ -152,13 +153,12 @@ impl<'a> StoreRef<'a> { /// Borsh Seriliaze the backing stores of our Merkle tree. pub fn encode(&self) -> Vec { match self { - Self::Base(store) => store.try_to_vec(), - Self::Account(store) => store.try_to_vec(), - Self::Ibc(store) => store.try_to_vec(), - Self::PoS(store) => store.try_to_vec(), - Self::BridgePool(store) => store.try_to_vec(), + Self::Base(store) => store.serialize_to_vec(), + Self::Account(store) => store.serialize_to_vec(), + Self::Ibc(store) => store.serialize_to_vec(), + Self::PoS(store) => store.serialize_to_vec(), + Self::BridgePool(store) => store.serialize_to_vec(), } - .expect("Serialization failed") } } diff --git a/core/src/ledger/storage/mockdb.rs b/core/src/ledger/storage/mockdb.rs index 971584e742..11154ca5ce 100644 --- a/core/src/ledger/storage/mockdb.rs +++ b/core/src/ledger/storage/mockdb.rs @@ -6,7 +6,8 @@ use std::ops::Bound::{Excluded, Included}; use std::path::Path; use std::str::FromStr; -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; +use borsh_ext::BorshSerializeExt; use super::merkle_tree::{MerkleTreeStoresRead, StoreType}; use super::{ @@ -14,6 +15,7 @@ use super::{ }; use crate::ledger::storage::types::{self, KVBytes, PrefixIterator}; use crate::types::ethereum_structs; +use crate::types::hash::Hash; #[cfg(feature = "ferveo-tpke")] use crate::types::internal::TxQueue; use crate::types::storage::{ @@ -295,10 +297,9 @@ impl DB for MockDB { let key = prefix_key .push(&"header".to_owned()) .map_err(Error::KeyError)?; - self.0.borrow_mut().insert( - key.to_string(), - h.try_to_vec().expect("serialization failed"), - ); + self.0 + .borrow_mut() + .insert(key.to_string(), h.serialize_to_vec()); } } // Block hash @@ -413,6 +414,24 @@ impl DB for MockDB { Ok(Some((height, merkle_tree_stores))) } + fn has_replay_protection_entry(&self, hash: &Hash) -> Result { + let prefix_key = + Key::parse("replay_protection").map_err(Error::KeyError)?; + for prefix in ["last", "all"] { + let key = prefix_key + .push(&prefix.to_string()) + .map_err(Error::KeyError)? + .push(&hash.to_string()) + .map_err(Error::KeyError)?; + + if self.0.borrow().contains_key(&key.to_string()) { + return Ok(true); + } + } + + Ok(false) + } + fn read_subspace_val(&self, key: &Key) -> Result>> { let key = Key::parse("subspace").map_err(Error::KeyError)?.join(key); Ok(self.0.borrow().get(&key.to_string()).cloned()) @@ -540,6 +559,37 @@ impl DB for MockDB { None => Ok(()), } } + + fn write_replay_protection_entry( + &mut self, + _batch: &mut Self::WriteBatch, + key: &Key, + ) -> Result<()> { + let key = Key::parse("replay_protection") + .map_err(Error::KeyError)? + .join(key); + + match self.0.borrow_mut().insert(key.to_string(), vec![]) { + Some(_) => Err(Error::DBError(format!( + "Replay protection key {key} already in storage" + ))), + None => Ok(()), + } + } + + fn delete_replay_protection_entry( + &mut self, + _batch: &mut Self::WriteBatch, + key: &Key, + ) -> Result<()> { + let key = Key::parse("replay_protection") + .map_err(Error::KeyError)? + .join(key); + + self.0.borrow_mut().remove(&key.to_string()); + + Ok(()) + } } impl<'iter> DBIter<'iter> for MockDB { @@ -581,6 +631,18 @@ impl<'iter> DBIter<'iter> for MockDB { // Mock DB can read only the latest value for now unimplemented!() } + + fn iter_replay_protection(&'iter self) -> Self::PrefixIter { + let db_prefix = "replay_protection/".to_owned(); + let iter = self.0.borrow().clone().into_iter(); + MockPrefixIterator::new( + MockIterator { + prefix: "last".to_string(), + iter, + }, + db_prefix, + ) + } } /// A prefix iterator base for the [`MockPrefixIterator`]. diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs index 81be7e48a6..4f506a61be 100644 --- a/core/src/ledger/storage/mod.rs +++ b/core/src/ledger/storage/mod.rs @@ -15,6 +15,7 @@ use std::cmp::Ordering; use std::format; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; pub use merkle_tree::{ MerkleTree, MerkleTreeStoresRead, MerkleTreeStoresWrite, StoreType, }; @@ -26,8 +27,9 @@ pub use wl_storage::{ #[cfg(feature = "wasm-runtime")] pub use self::masp_conversions::update_allowed_conversions; -pub use self::masp_conversions::{encode_asset_type, ConversionState}; -use super::replay_protection::is_replay_protection_key; +pub use self::masp_conversions::{ + calculate_masp_rewards, encode_asset_type, ConversionState, +}; use crate::ledger::eth_bridge::storage::bridge_pool::is_pending_transfer_key; use crate::ledger::gas::{ STORAGE_ACCESS_GAS_PER_BYTE, STORAGE_WRITE_GAS_PER_BYTE, @@ -286,6 +288,9 @@ pub trait DB: std::fmt::Debug { height: BlockHeight, ) -> Result>; + /// Check if the given replay protection entry exists + fn has_replay_protection_entry(&self, hash: &Hash) -> Result; + /// Read the latest value for account subspace key from the DB fn read_subspace_val(&self, key: &Key) -> Result>>; @@ -353,6 +358,20 @@ pub trait DB: std::fmt::Debug { pruned_epoch: Epoch, pred_epochs: &Epochs, ) -> Result<()>; + + /// Write a replay protection entry + fn write_replay_protection_entry( + &mut self, + batch: &mut Self::WriteBatch, + key: &Key, + ) -> Result<()>; + + /// Delete a replay protection entry + fn delete_replay_protection_entry( + &mut self, + batch: &mut Self::WriteBatch, + key: &Key, + ) -> Result<()>; } /// A database prefix iterator. @@ -376,6 +395,9 @@ pub trait DBIter<'iter> { /// Read subspace new diffs at a given height fn iter_new_diffs(&'iter self, height: BlockHeight) -> Self::PrefixIter; + + /// Read replay protection storage from the last block + fn iter_replay_protection(&'iter self) -> Self::PrefixIter; } /// Atomic batch write. @@ -570,19 +592,10 @@ where /// Check if the given key is present in storage. Returns the result and the /// gas cost. pub fn has_key(&self, key: &Key) -> Result<(bool, u64)> { - if is_replay_protection_key(key) { - // Replay protection keys are not included in the merkle - // tree - Ok(( - self.db.read_subspace_val(key)?.is_some(), - key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE, - )) - } else { - Ok(( - self.block.tree.has_key(key)?, - key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE, - )) - } + Ok(( + self.block.tree.has_key(key)?, + key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE, + )) } /// Returns a value from the specified subspace and the gas cost @@ -664,11 +677,10 @@ where if is_pending_transfer_key(key) { // The tree of the bright pool stores the current height for the // pending transfer - let height = - self.block.height.try_to_vec().expect("Encoding failed"); + let height = self.block.height.serialize_to_vec(); self.block.tree.update(key, height)?; - } else if !is_replay_protection_key(key) { - // Update the merkle tree for all but replay-protection entries + } else { + // Update the merkle tree self.block.tree.update(key, value)?; } @@ -686,9 +698,7 @@ where // but with gas and storage bytes len diff accounting let mut deleted_bytes_len = 0; if self.has_key(key)?.0 { - if !is_replay_protection_key(key) { - self.block.tree.delete(key)?; - } + self.block.tree.delete(key)?; deleted_bytes_len = self.db.delete_subspace_val(self.block.height, key)?; } @@ -797,44 +807,32 @@ where match old.0.cmp(&new.0) { Ordering::Equal => { // the value was updated - if !is_replay_protection_key(&new_key) { - tree.update( - &new_key, - if is_pending_transfer_key(&new_key) { - target_height.try_to_vec().expect( - "Serialization should never \ - fail", - ) - } else { - new.1.clone() - }, - )? - }; + tree.update( + &new_key, + if is_pending_transfer_key(&new_key) { + target_height.serialize_to_vec() + } else { + new.1.clone() + }, + )?; old_diff = old_diff_iter.next(); new_diff = new_diff_iter.next(); } Ordering::Less => { // the value was deleted - if !is_replay_protection_key(&old_key) { - tree.delete(&old_key)?; - } + tree.delete(&old_key)?; old_diff = old_diff_iter.next(); } Ordering::Greater => { // the value was inserted - if !is_replay_protection_key(&new_key) { - tree.update( - &new_key, - if is_pending_transfer_key(&new_key) { - target_height.try_to_vec().expect( - "Serialization should never \ - fail", - ) - } else { - new.1.clone() - }, - )?; - } + tree.update( + &new_key, + if is_pending_transfer_key(&new_key) { + target_height.serialize_to_vec() + } else { + new.1.clone() + }, + )?; new_diff = new_diff_iter.next(); } } @@ -843,9 +841,7 @@ where // the value was deleted let key = Key::parse(old.0.clone()) .expect("the key should be parsable"); - if !is_replay_protection_key(&key) { - tree.delete(&key)?; - } + tree.delete(&key)?; old_diff = old_diff_iter.next(); } (None, Some(new)) => { @@ -853,18 +849,14 @@ where let key = Key::parse(new.0.clone()) .expect("the key should be parsable"); - if !is_replay_protection_key(&key) { - tree.update( - &key, - if is_pending_transfer_key(&key) { - target_height.try_to_vec().expect( - "Serialization should never fail", - ) - } else { - new.1.clone() - }, - )? - }; + tree.update( + &key, + if is_pending_transfer_key(&key) { + target_height.serialize_to_vec() + } else { + new.1.clone() + }, + )?; new_diff = new_diff_iter.next(); } (None, None) => break, @@ -1055,11 +1047,10 @@ where if is_pending_transfer_key(key) { // The tree of the bright pool stores the current height for the // pending transfer - let height = - self.block.height.try_to_vec().expect("Encoding failed"); + let height = self.block.height.serialize_to_vec(); self.block.tree.update(key, height)?; - } else if !is_replay_protection_key(key) { - // Update the merkle tree for all but replay-protection entries + } else { + // Update the merkle tree self.block.tree.update(key, value)?; } self.db @@ -1074,10 +1065,8 @@ where batch: &mut D::WriteBatch, key: &Key, ) -> Result { - if !is_replay_protection_key(key) { - // Update the merkle tree for all but replay-protection entries - self.block.tree.delete(key)?; - } + // Update the merkle tree + self.block.tree.delete(key)?; self.db .batch_delete_subspace_val(batch, self.block.height, key) } @@ -1121,6 +1110,42 @@ where .map(|b| b.height) .unwrap_or_default() } + + /// Check it the given transaction's hash is already present in storage + pub fn has_replay_protection_entry(&self, hash: &Hash) -> Result { + self.db.has_replay_protection_entry(hash) + } + + /// Write the provided tx hash to storage + pub fn write_replay_protection_entry( + &mut self, + batch: &mut D::WriteBatch, + key: &Key, + ) -> Result<()> { + self.db.write_replay_protection_entry(batch, key) + } + + /// Delete the provided tx hash from storage + pub fn delete_replay_protection_entry( + &mut self, + batch: &mut D::WriteBatch, + key: &Key, + ) -> Result<()> { + self.db.delete_replay_protection_entry(batch, key) + } + + /// Iterate the replay protection storage from the last block + pub fn iter_replay_protection( + &self, + ) -> Box + '_> { + Box::new(self.db.iter_replay_protection().map(|(key, _, _)| { + key.rsplit_once('/') + .expect("Missing tx hash in storage key") + .1 + .parse() + .expect("Failed hash conversion") + })) + } } impl From for Error { diff --git a/core/src/ledger/storage/traits.rs b/core/src/ledger/storage/traits.rs index 2892110480..40b094e411 100644 --- a/core/src/ledger/storage/traits.rs +++ b/core/src/ledger/storage/traits.rs @@ -5,7 +5,8 @@ use std::fmt; use arse_merkle_tree::traits::{Hasher, Value}; use arse_merkle_tree::{Key as TreeKey, H256}; -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; +use borsh_ext::BorshSerializeExt; use ics23::commitment_proof::Proof as Ics23Proof; use ics23::{CommitmentProof, ExistenceProof}; use sha2::{Digest, Sha256}; @@ -199,7 +200,7 @@ impl<'a> SubTreeRead for &'a BridgePoolTree { fn subtree_get(&self, key: &Key) -> Result, Error> { match self.get(key) { - Ok(height) => Ok(height.try_to_vec().expect("Encoding failed")), + Ok(height) => Ok(height.serialize_to_vec()), Err(err) => Err(Error::MerkleTree(err.to_string())), } } diff --git a/core/src/ledger/storage/wl_storage.rs b/core/src/ledger/storage/wl_storage.rs index 87107a35c9..6d30e77c73 100644 --- a/core/src/ledger/storage/wl_storage.rs +++ b/core/src/ledger/storage/wl_storage.rs @@ -9,6 +9,7 @@ use crate::ledger::storage::{DBIter, Storage, StorageHasher, DB}; use crate::ledger::storage_api::{ResultExt, StorageRead, StorageWrite}; use crate::ledger::{gas, parameters, storage_api}; use crate::types::address::Address; +use crate::types::hash::Hash; use crate::types::storage::{self, BlockHeight}; use crate::types::time::DateTimeUtc; @@ -55,6 +56,19 @@ where storage, } } + + /// Check if the given tx hash has already been processed + pub fn has_replay_protection_entry( + &self, + hash: &Hash, + ) -> Result { + if let Some(present) = self.write_log.has_replay_protection_entry(hash) + { + return Ok(present); + } + + self.storage.has_replay_protection_entry(hash) + } } /// Common trait for [`WlStorage`] and [`TempWlStorage`], used to implement @@ -78,6 +92,12 @@ pub trait WriteLogAndStorage { /// reference to `WriteLog` when in need of both (avoids complain from the /// borrow checker) fn split_borrow(&mut self) -> (&mut WriteLog, &Storage); + + /// Write the provided tx hash to storage. + fn write_tx_hash( + &mut self, + hash: Hash, + ) -> crate::ledger::storage::write_log::Result<()>; } impl WriteLogAndStorage for WlStorage @@ -103,6 +123,13 @@ where fn split_borrow(&mut self) -> (&mut WriteLog, &Storage) { (&mut self.write_log, &self.storage) } + + fn write_tx_hash( + &mut self, + hash: Hash, + ) -> crate::ledger::storage::write_log::Result<()> { + self.write_log.write_tx_hash(hash) + } } impl WriteLogAndStorage for TempWlStorage<'_, D, H> @@ -128,6 +155,13 @@ where fn split_borrow(&mut self) -> (&mut WriteLog, &Storage) { (&mut self.write_log, (self.storage)) } + + fn write_tx_hash( + &mut self, + hash: Hash, + ) -> crate::ledger::storage::write_log::Result<()> { + self.write_log.write_tx_hash(hash) + } } impl WlStorage @@ -221,6 +255,14 @@ where } Ok(new_epoch) } + + /// Delete the provided transaction's hash from storage. + pub fn delete_tx_hash( + &mut self, + hash: Hash, + ) -> crate::ledger::storage::write_log::Result<()> { + self.write_log.delete_tx_hash(hash) + } } /// Prefix iterator for [`WlStorage`]. @@ -504,7 +546,8 @@ where mod tests { use std::collections::BTreeMap; - use borsh::{BorshDeserialize, BorshSerialize}; + use borsh::BorshDeserialize; + use borsh_ext::BorshSerializeExt; use proptest::prelude::*; use proptest::test_runner::Config; // Use `RUST_LOG=info` (or another tracing level) and `--nocapture` to @@ -638,16 +681,16 @@ mod tests { | Level::BlockWriteLog(WlMod::Delete | WlMod::DeletePrefix) => { } Level::TxWriteLog(WlMod::Write(val)) => { - s.write_log.write(key, val.try_to_vec().unwrap()).unwrap(); + s.write_log.write(key, val.serialize_to_vec()).unwrap(); } Level::BlockWriteLog(WlMod::Write(val)) => { s.write_log // protocol only writes at block level - .protocol_write(key, val.try_to_vec().unwrap()) + .protocol_write(key, val.serialize_to_vec()) .unwrap(); } Level::Storage(val) => { - s.storage.write(key, val.try_to_vec().unwrap()).unwrap(); + s.storage.write(key, val.serialize_to_vec()).unwrap(); } } } diff --git a/core/src/ledger/storage/write_log.rs b/core/src/ledger/storage/write_log.rs index 84edb8f56d..e563374146 100644 --- a/core/src/ledger/storage/write_log.rs +++ b/core/src/ledger/storage/write_log.rs @@ -10,6 +10,9 @@ use crate::ledger; use crate::ledger::gas::{ STORAGE_ACCESS_GAS_PER_BYTE, STORAGE_WRITE_GAS_PER_BYTE, }; +use crate::ledger::replay_protection::{ + get_replay_protection_all_subkey, get_replay_protection_last_subkey, +}; use crate::ledger::storage::traits::StorageHasher; use crate::ledger::storage::Storage; use crate::types::address::{Address, EstablishedAddressGen, InternalAddress}; @@ -36,6 +39,8 @@ pub enum Error { DeleteVp, #[error("Trying to write a temporary value after deleting")] WriteTempAfterDelete, + #[error("Replay protection key: {0}")] + ReplayProtection(String), } /// Result for functions that may fail @@ -66,6 +71,17 @@ pub enum StorageModification { }, } +#[derive(Debug, Clone)] +/// A replay protection storage modification +enum ReProtStorageModification { + /// Write an entry + Write, + /// Delete an entry + Delete, + /// Finalize an entry + Finalize, +} + /// The write log storage #[derive(Debug, Clone)] pub struct WriteLog { @@ -87,6 +103,9 @@ pub struct WriteLog { tx_precommit_write_log: HashMap, /// The IBC events for the current transaction ibc_events: BTreeSet, + /// Storage modifications for the replay protection storage, always + /// committed regardless of the result of the transaction + replay_protection: HashMap, } /// Write log prefix iterator @@ -113,6 +132,7 @@ impl Default for WriteLog { tx_write_log: HashMap::with_capacity(100), tx_precommit_write_log: HashMap::with_capacity(100), ibc_events: BTreeSet::new(), + replay_protection: HashMap::with_capacity(1_000), } } } @@ -511,10 +531,47 @@ impl WriteLog { StorageModification::Temp { .. } => {} } } + + for (hash, entry) in self.replay_protection.iter() { + match entry { + ReProtStorageModification::Write => storage + .write_replay_protection_entry( + batch, + // Can only write tx hashes to the previous block, no + // further + &get_replay_protection_last_subkey(hash), + ) + .map_err(Error::StorageError)?, + ReProtStorageModification::Delete => storage + .delete_replay_protection_entry( + batch, + // Can only delete tx hashes from the previous block, + // no further + &get_replay_protection_last_subkey(hash), + ) + .map_err(Error::StorageError)?, + ReProtStorageModification::Finalize => { + storage + .write_replay_protection_entry( + batch, + &get_replay_protection_all_subkey(hash), + ) + .map_err(Error::StorageError)?; + storage + .delete_replay_protection_entry( + batch, + &get_replay_protection_last_subkey(hash), + ) + .map_err(Error::StorageError)? + } + } + } + if let Some(address_gen) = self.address_gen.take() { storage.address_gen = address_gen } self.block_write_log.clear(); + self.replay_protection.clear(); Ok(()) } @@ -608,6 +665,71 @@ impl WriteLog { let iter = matches.into_iter(); PrefixIter { iter } } + + /// Check if the given tx hash has already been processed. Returns `None` if + /// the key is not known. + pub fn has_replay_protection_entry(&self, hash: &Hash) -> Option { + self.replay_protection + .get(hash) + .map(|action| !matches!(action, ReProtStorageModification::Delete)) + } + + /// Write the transaction hash + pub(crate) fn write_tx_hash(&mut self, hash: Hash) -> Result<()> { + if self + .replay_protection + .insert(hash, ReProtStorageModification::Write) + .is_some() + { + // Cannot write an hash if other requests have already been + // committed for the same hash + return Err(Error::ReplayProtection(format!( + "Requested a write on hash {hash} over a previous request" + ))); + } + + Ok(()) + } + + /// Remove the transaction hash + pub(crate) fn delete_tx_hash(&mut self, hash: Hash) -> Result<()> { + match self + .replay_protection + .insert(hash, ReProtStorageModification::Delete) + { + None => Ok(()), + // Allow overwriting a previous finalize request + Some(ReProtStorageModification::Finalize) => Ok(()), + Some(_) => + // Cannot delete an hash that still has to be written to + // storage or has already been deleted + { + Err(Error::ReplayProtection(format!( + "Requested a delete on hash {hash} not yet committed to \ + storage" + ))) + } + } + } + + /// Move the transaction hash of the previous block to the list of all + /// blocks. This functions should be called at the beginning of the block + /// processing, before any other replay protection operation is done + pub fn finalize_tx_hash(&mut self, hash: Hash) -> Result<()> { + if self + .replay_protection + .insert(hash, ReProtStorageModification::Finalize) + .is_some() + { + // Cannot finalize an hash if other requests have already been + // committed for the same hash + return Err(Error::ReplayProtection(format!( + "Requested a finalize on hash {hash} over a previous request" + ))); + } + + Ok(()) + } } #[cfg(test)] @@ -834,6 +956,98 @@ mod tests { assert_eq!(value, None); } + #[test] + fn test_replay_protection_commit() { + let mut storage = + crate::ledger::storage::testing::TestStorage::default(); + let mut write_log = WriteLog::default(); + let mut batch = crate::ledger::storage::testing::TestStorage::batch(); + + // write some replay protection keys + write_log + .write_tx_hash(Hash::sha256("tx1".as_bytes())) + .unwrap(); + write_log + .write_tx_hash(Hash::sha256("tx2".as_bytes())) + .unwrap(); + write_log + .write_tx_hash(Hash::sha256("tx3".as_bytes())) + .unwrap(); + + // commit a block + write_log + .commit_block(&mut storage, &mut batch) + .expect("commit failed"); + + assert!(write_log.replay_protection.is_empty()); + for tx in ["tx1", "tx2", "tx3"] { + assert!( + storage + .has_replay_protection_entry(&Hash::sha256(tx.as_bytes())) + .expect("read failed") + ); + } + + // write some replay protection keys + write_log + .write_tx_hash(Hash::sha256("tx4".as_bytes())) + .unwrap(); + write_log + .write_tx_hash(Hash::sha256("tx5".as_bytes())) + .unwrap(); + write_log + .write_tx_hash(Hash::sha256("tx6".as_bytes())) + .unwrap(); + + // delete previous hash + write_log + .delete_tx_hash(Hash::sha256("tx1".as_bytes())) + .unwrap(); + + // finalize previous hashes + for tx in ["tx2", "tx3"] { + write_log + .finalize_tx_hash(Hash::sha256(tx.as_bytes())) + .unwrap(); + } + + // commit a block + write_log + .commit_block(&mut storage, &mut batch) + .expect("commit failed"); + + assert!(write_log.replay_protection.is_empty()); + for tx in ["tx2", "tx3", "tx4", "tx5", "tx6"] { + assert!( + storage + .has_replay_protection_entry(&Hash::sha256(tx.as_bytes())) + .expect("read failed") + ); + } + assert!( + !storage + .has_replay_protection_entry(&Hash::sha256("tx1".as_bytes())) + .expect("read failed") + ); + + // try to delete finalized hash which shouldn't work + write_log + .delete_tx_hash(Hash::sha256("tx2".as_bytes())) + .unwrap(); + + // commit a block + write_log + .commit_block(&mut storage, &mut batch) + .expect("commit failed"); + + assert!(write_log.replay_protection.is_empty()); + assert!( + storage + .has_replay_protection_entry(&Hash::sha256("tx2".as_bytes())) + .expect("read failed") + ); + } + prop_compose! { fn arb_verifiers_changed_key_tx_all_key() (verifiers_from_tx in testing::arb_verifiers_from_tx()) diff --git a/core/src/ledger/storage_api/collections/lazy_map.rs b/core/src/ledger/storage_api/collections/lazy_map.rs index 4f9aeb426d..e17de83bef 100644 --- a/core/src/ledger/storage_api/collections/lazy_map.rs +++ b/core/src/ledger/storage_api/collections/lazy_map.rs @@ -1,6 +1,6 @@ //! Lazy map. -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; use std::fmt::Debug; use std::hash::Hash; use std::marker::PhantomData; @@ -101,6 +101,107 @@ pub enum ValidationError { InvalidNestedSubKey(storage::Key), } +// pub trait EagerMapFromIter { +// fn from_iter(iter: I) -> Self +// where +// I: IntoIterator; +// } + +// impl EagerMapFromIter for HashMap { +// fn from_iter(iter: I) -> Self +// where +// I: IntoIterator, +// { +// iter.into_iter().collect() +// } +// } + +// impl EagerMapFromIter for BTreeMap { +// fn from_iter(iter: I) -> Self +// where +// K: Eq + Hash + Ord, +// I: IntoIterator, +// { +// iter.into_iter().collect() +// } +// } + +/// Trait used to facilitate collection of lazy maps into eager maps +pub trait Collectable { + /// The type of the value of the lazy map + type Collected; + + /// Collect the lazy map into an eager map + fn collect_map( + &self, + storage: &S, + ) -> storage_api::Result; +} + +impl Collectable for LazyMap +where + K: Hash + Eq + Clone + Debug + storage::KeySeg + Ord, + V: Collectable + LazyCollection + Debug, +{ + type Collected = BTreeMap; + + fn collect_map( + &self, + storage: &S, + ) -> storage_api::Result + where + S: StorageRead, + { + let mut map = BTreeMap::::new(); + for res in self.iter(storage)? { + let ( + NestedSubKey::Data { + key, + nested_sub_key: _, + }, + _, + ) = res?; + let next_layer = self.at(&key).collect_map(storage)?; + map.insert(key, next_layer); + } + Ok(map) + } +} + +impl Collectable for LazyMap +where + K: Hash + Eq + Clone + Debug + storage::KeySeg + Ord, + V: BorshSerialize + BorshDeserialize + Clone + Debug + 'static, +{ + type Collected = BTreeMap; + + fn collect_map( + &self, + storage: &S, + ) -> storage_api::Result + where + S: StorageRead, + { + let mut map = BTreeMap::::new(); + for res in self.iter(storage)? { + let (key, value) = res?; + map.insert(key, value); + } + Ok(map) + } +} + +// impl Collectable for V { +// type Collected = Self; + +// fn collect_map( +// &self, +// _storage: &S, +// ) -> storage_api::Result { +// Ok(self.clone()) +// } +// } + /// [`LazyMap`] validation result pub type ValidationResult = std::result::Result; @@ -359,14 +460,6 @@ impl LazyMap where K: storage::KeySeg, { - /// Returns whether the set contains a value. - pub fn contains(&self, storage: &S, key: &K) -> Result - where - S: StorageRead, - { - storage.has_key(&self.get_data_key(key)) - } - /// Get the prefix of set's elements storage fn get_data_prefix(&self) -> storage::Key { self.key.push(&DATA_SUBKEY.to_owned()).unwrap() @@ -392,6 +485,16 @@ where V::open(self.get_data_key(key)) } + /// Returns whether the nested map contains a certain key with data inside. + pub fn contains(&self, storage: &S, key: &K) -> Result + where + S: StorageRead, + { + let prefix = self.get_data_key(key); + let mut iter = storage_api::iter_prefix_bytes(storage, &prefix)?; + Ok(iter.next().is_some()) + } + /// Remove all map entries at a given key prefix pub fn remove_all(&self, storage: &mut S, key: &K) -> Result where @@ -482,16 +585,18 @@ where Ok(previous) } - /// Removes a key from the map, returning the value at the key if the key - /// was previously in the map. + /// Removes a key from the map if it's present, returning the value at the + /// key if the key was previously in the map. pub fn remove(&self, storage: &mut S, key: &K) -> Result> where S: StorageWrite + StorageRead, { let value = self.get(storage, key)?; - let data_key = self.get_data_key(key); - storage.delete(&data_key)?; + if value.is_some() { + let data_key = self.get_data_key(key); + storage.delete(&data_key)?; + } Ok(value) } @@ -505,6 +610,28 @@ where Self::read_key_val(storage, &data_key) } + /// Update a value at the given key with the given function. If no existing + /// value exists, the closure's argument will be `None`. + pub fn update(&self, storage: &mut S, key: K, f: F) -> Result<()> + where + S: StorageWrite + StorageRead, + F: FnOnce(Option) -> V, + { + let data_key = self.get_data_key(&key); + let current = Self::read_key_val(storage, &data_key)?; + let new = f(current); + Self::write_key_val(storage, &data_key, new)?; + Ok(()) + } + + /// Returns whether the map contains a key with a value. + pub fn contains(&self, storage: &S, key: &K) -> Result + where + S: StorageRead, + { + storage.has_key(&self.get_data_key(key)) + } + /// Returns whether the map contains no elements. pub fn is_empty(&self, storage: &S) -> Result where @@ -553,6 +680,19 @@ where })) } + // /// Collect the lazy map into an eager map + // pub fn collect(&self, storage: &S) -> Result + // where + // S: StorageRead, + // M: EagerMapFromIter, + // K: Eq + Hash + Ord, + // { + // let it = self + // .iter(storage)? + // .map(|res| res.expect("Failed to unwrap a lazy map element")); + // Ok(M::from_iter(it)) + // } + /// Reads a value from storage fn read_key_val( storage: &S, @@ -619,6 +759,14 @@ mod test { assert_eq!(lazy_map.get(&storage, &key)?.unwrap(), val); assert_eq!(lazy_map.get(&storage, &key2)?.unwrap(), val2); + let eager_map: BTreeMap<_, _> = lazy_map.collect_map(&storage)?; + assert_eq!( + eager_map, + vec![(123, "Test".to_string()), (456, "Test2".to_string())] + .into_iter() + .collect::>() + ); + // Remove the values and check the map contents let removed = lazy_map.remove(&mut storage, &key)?.unwrap(); assert_eq!(removed, val); @@ -650,6 +798,20 @@ mod test { Some(SubKey::Data(key2)) ); + // Try to update a key that doesn't yet exist. + let updated_val = "updated"; + lazy_map.update(&mut storage, key, |current| { + assert!(current.is_none()); + updated_val.to_string() + })?; + // Try to update a key that exists. + let updated_val_2 = "updated again"; + lazy_map.update(&mut storage, key, |current| { + assert_eq!(¤t.unwrap_or_default(), updated_val); + updated_val_2.to_string() + })?; + assert_eq!(&lazy_map.get(&storage, &key)?.unwrap(), updated_val_2); + Ok(()) } @@ -780,6 +942,7 @@ mod test { nested_map.at(&0).get(&storage, &"string2".to_string())?, None ); + assert!(nested_map.contains(&storage, &0)?); // Insert more values nested_map @@ -789,6 +952,9 @@ mod test { .at(&0) .insert(&mut storage, "string2".to_string(), 300)?; + assert!(nested_map.contains(&storage, &0)?); + assert!(nested_map.contains(&storage, &1)?); + let mut it = nested_map.iter(&storage)?; let ( NestedSubKey::Data { @@ -852,6 +1018,8 @@ mod test { assert_eq!(nested_map.at(&0).len(&storage)?, 0_u64); assert_eq!(nested_map.at(&1).len(&storage)?, 1_u64); assert_eq!(nested_map.iter(&storage)?.count(), 1); + assert!(!nested_map.contains(&storage, &0)?); + assert!(nested_map.contains(&storage, &1)?); // Start removing elements let rem = nested_map @@ -899,4 +1067,56 @@ mod test { assert!(!nested_map.contains(&storage, &1).unwrap()); assert!(nested_map.is_empty(&storage).unwrap()); } + + #[test] + fn test_lazy_map_collection() { + let mut storage = TestWlStorage::default(); + let key_s = storage::Key::parse("testing_simple").unwrap(); + let key_n = storage::Key::parse("testing_nested").unwrap(); + + let simple = LazyMap::::open(key_s); + simple + .insert(&mut storage, "bartle".to_string(), 5) + .unwrap(); + simple.insert(&mut storage, "doo".to_string(), 4).unwrap(); + + let nested_map = NestedMap::>::open(key_n); + nested_map + .at(&0) + .insert(&mut storage, "dingus".to_string(), 5) + .unwrap(); + nested_map + .at(&0) + .insert(&mut storage, "zingus".to_string(), 3) + .unwrap(); + nested_map + .at(&1) + .insert(&mut storage, "dingus".to_string(), 4) + .unwrap(); + + let exp_simple = + vec![("bartle".to_string(), 5), ("doo".to_string(), 4)] + .into_iter() + .collect::>(); + let mut exp_nested: BTreeMap> = + BTreeMap::new(); + exp_nested + .entry(0) + .or_default() + .insert("dingus".to_string(), 5); + exp_nested + .entry(0) + .or_default() + .insert("zingus".to_string(), 3); + exp_nested + .entry(1) + .or_default() + .insert("dingus".to_string(), 4); + + let simple_eager = simple.collect_map(&storage).unwrap(); + let nested_eager = nested_map.collect_map(&storage).unwrap(); + + assert_eq!(exp_simple, simple_eager); + assert_eq!(exp_nested, nested_eager); + } } diff --git a/core/src/ledger/storage_api/collections/lazy_set.rs b/core/src/ledger/storage_api/collections/lazy_set.rs index 038b7a87d0..9dc6747465 100644 --- a/core/src/ledger/storage_api/collections/lazy_set.rs +++ b/core/src/ledger/storage_api/collections/lazy_set.rs @@ -183,7 +183,7 @@ where storage.write(&key, ()) } - /// Removes a key from the set, returning `true` if the key + /// Removes a key from the set if it's present, returning `true` if the key /// was in the set. pub fn remove(&self, storage: &mut S, key: &K) -> Result where @@ -191,8 +191,10 @@ where { let present = self.contains(storage, key)?; - let key = self.get_key(key); - storage.delete(&key)?; + if present { + let key = self.get_key(key); + storage.delete(&key)?; + } Ok(present) } diff --git a/core/src/ledger/storage_api/error.rs b/core/src/ledger/storage_api/error.rs index f99539bc87..5644bc0a1a 100644 --- a/core/src/ledger/storage_api/error.rs +++ b/core/src/ledger/storage_api/error.rs @@ -63,6 +63,33 @@ impl Error { { Self::CustomWithMessage(msg, CustomError(error.into())) } + + /// Attempt to downgrade the inner error to `E` if any. + /// + /// If this [`enum@Error`] was constructed via [`new`] or [`wrap`] then this + /// function will attempt to perform downgrade on it, otherwise it will + /// return [`Err`]. + /// + /// [`new`]: Error::new + /// [`wrap`]: Error::wrap + /// + /// To match on the inner error type when the downcast is successful, you'll + /// typically want to [`std::ops::Deref::deref`] it out of the [`Box`]. + pub fn downcast(self) -> std::result::Result, Self> + where + E: std::error::Error + Send + Sync + 'static, + { + match self { + Self::Custom(CustomError(b)) + | Self::CustomWithMessage(_, CustomError(b)) + if b.is::() => + { + let res = b.downcast::(); + Ok(res.unwrap()) + } + _ => Err(self), + } + } } /// A custom error diff --git a/core/src/ledger/storage_api/governance.rs b/core/src/ledger/storage_api/governance.rs index e03be937b5..ab4ad27b0b 100644 --- a/core/src/ledger/storage_api/governance.rs +++ b/core/src/ledger/storage_api/governance.rs @@ -245,9 +245,7 @@ where let min_proposal_voting_period: u64 = storage.read(&key)?.expect("Parameter should be definied."); - let key = governance_keys::get_max_proposal_period_key(); - let max_proposal_period: u64 = - storage.read(&key)?.expect("Parameter should be definied."); + let max_proposal_period: u64 = get_max_proposal_period(storage)?; Ok(GovernanceParameters { min_proposal_fund, @@ -258,3 +256,14 @@ where min_proposal_grace_epochs, }) } + +/// Get governance "max_proposal_period" parameter +pub fn get_max_proposal_period(storage: &S) -> storage_api::Result +where + S: storage_api::StorageRead, +{ + let key = governance_keys::get_max_proposal_period_key(); + let max_proposal_period: u64 = + storage.read(&key)?.expect("Parameter should be defined."); + Ok(max_proposal_period) +} diff --git a/core/src/ledger/storage_api/mod.rs b/core/src/ledger/storage_api/mod.rs index 1108c44e3d..3ec75843b1 100644 --- a/core/src/ledger/storage_api/mod.rs +++ b/core/src/ledger/storage_api/mod.rs @@ -11,6 +11,7 @@ pub mod token; pub mod validation; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; pub use error::{CustomError, Error, OptionExt, Result, ResultExt}; use crate::types::address::Address; @@ -109,7 +110,7 @@ pub trait StorageWrite { key: &storage::Key, val: T, ) -> Result<()> { - let bytes = val.try_to_vec().into_storage_result()?; + let bytes = val.serialize_to_vec(); self.write_bytes(key, bytes) } diff --git a/core/src/ledger/storage_api/token.rs b/core/src/ledger/storage_api/token.rs index 02adcc32be..5ea915d034 100644 --- a/core/src/ledger/storage_api/token.rs +++ b/core/src/ledger/storage_api/token.rs @@ -46,19 +46,22 @@ pub fn read_denom( where S: StorageRead, { - let (key, nut) = match token { + let (key, is_default_zero) = match token { Address::Internal(InternalAddress::Nut(erc20)) => { let token = Address::Internal(InternalAddress::Erc20(*erc20)); + // NB: always use the equivalent ERC20's smallest + // denomination to specify amounts, if we cannot + // find a denom in storage (token::denom_key(&token), true) } + Address::Internal(InternalAddress::IbcToken(_)) => { + (token::denom_key(token), true) + } token => (token::denom_key(token), false), }; storage.read(&key).map(|opt_denom| { Some(opt_denom.unwrap_or_else(|| { - if nut { - // NB: always use the equivalent ERC20's smallest - // denomination to specify amounts, if we cannot - // find a denom in storage + if is_default_zero { 0u8.into() } else { // FIXME: perhaps when we take this branch, we should @@ -171,7 +174,7 @@ where amount } None => { - storage.write(&key, token::Amount::default())?; + storage.write(&key, token::Amount::zero())?; balance } }; diff --git a/core/src/ledger/tx_env.rs b/core/src/ledger/tx_env.rs index 61b7824e64..ad8b23e60c 100644 --- a/core/src/ledger/tx_env.rs +++ b/core/src/ledger/tx_env.rs @@ -59,9 +59,9 @@ pub trait TxEnv: StorageRead + StorageWrite { /// Request to charge the provided amount of gas for the current transaction fn charge_gas(&mut self, used_gas: u64) -> Result<(), storage_api::Error>; - /// Get an IBC event with a event type - fn get_ibc_event( + /// Get IBC events with a event type + fn get_ibc_events( &self, event_type: impl AsRef, - ) -> Result, storage_api::Error>; + ) -> Result, storage_api::Error>; } diff --git a/core/src/ledger/vp_env.rs b/core/src/ledger/vp_env.rs index 969452b241..1241adfed7 100644 --- a/core/src/ledger/vp_env.rs +++ b/core/src/ledger/vp_env.rs @@ -2,14 +2,17 @@ //! inside validity predicates. use borsh::BorshDeserialize; +use masp_primitives::transaction::Transaction; -use super::storage_api::{self, StorageRead}; +use super::storage_api::{self, OptionExt, ResultExt, StorageRead}; use crate::proto::Tx; use crate::types::address::Address; use crate::types::hash::Hash; +use crate::types::ibc::{get_shielded_transfer, IbcEvent, EVENT_TYPE_PACKET}; use crate::types::storage::{ BlockHash, BlockHeight, Epoch, Header, Key, TxIndex, }; +use crate::types::token::Transfer; /// Validity predicate's environment is available for native VPs and WASM VPs pub trait VpEnv<'view> @@ -75,6 +78,12 @@ where /// Get the address of the native token. fn get_native_token(&self) -> Result; + /// Get the IBC events. + fn get_ibc_events( + &self, + event_type: String, + ) -> Result, storage_api::Error>; + /// Storage prefix iterator, ordered by storage keys. It will try to get an /// iterator from the storage. fn iter_prefix<'iter>( @@ -97,6 +106,43 @@ where /// Get a tx hash fn get_tx_code_hash(&self) -> Result, storage_api::Error>; + /// Get the shielded action including the transfer and the masp tx + fn get_shielded_action( + &self, + tx_data: Tx, + ) -> Result<(Transfer, Transaction), storage_api::Error> { + let signed = tx_data; + if let Ok(transfer) = + Transfer::try_from_slice(&signed.data().unwrap()[..]) + { + let shielded_hash = transfer + .shielded + .ok_or_err_msg("unable to find shielded hash")?; + let masp_tx = signed + .get_section(&shielded_hash) + .and_then(|x| x.as_ref().masp_tx()) + .ok_or_err_msg("unable to find shielded section")?; + return Ok((transfer, masp_tx)); + } + + // Shielded transfer over IBC + let events = self.get_ibc_events(EVENT_TYPE_PACKET.to_string())?; + // The receiving event should be only one in the single IBC transaction + let event = events.first().ok_or_else(|| { + storage_api::Error::new_const( + "No IBC event for the shielded action", + ) + })?; + get_shielded_transfer(event) + .into_storage_result()? + .map(|shielded| (shielded.transfer, shielded.masp_tx)) + .ok_or_else(|| { + storage_api::Error::new_const( + "No shielded transfer in the IBC event", + ) + }) + } + /// Verify a MASP transaction fn verify_masp(&self, tx: Vec) -> Result; diff --git a/core/src/proto/types.rs b/core/src/proto/types.rs index a6082fbbab..2702d4285e 100644 --- a/core/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -3,14 +3,17 @@ use std::cmp::Ordering; use std::collections::{BTreeMap, HashMap, HashSet}; use std::convert::TryFrom; use std::hash::{Hash, Hasher}; +#[cfg(feature = "ferveo-tpke")] +use std::io::Read; use std::marker::PhantomData; #[cfg(feature = "ferveo-tpke")] use ark_ec::AffineCurve; #[cfg(feature = "ferveo-tpke")] use ark_ec::PairingEngine; -use borsh::schema::{Declaration, Definition}; +use borsh::schema::{add_definition, Declaration, Definition}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXUPPER; use masp_primitives::transaction::builder::Builder; use masp_primitives::transaction::components::sapling::builder::SaplingMetadata; @@ -129,8 +132,7 @@ impl Signable for SerializeWithBorsh { type Output = Vec; fn as_signable(data: &T) -> Vec { - data.try_to_vec() - .expect("Encoding data for signing shouldn't fail") + data.serialize_to_vec() } } @@ -183,17 +185,22 @@ impl PartialOrd for Signed { self.data.partial_cmp(&other.data) } } +impl Ord for Signed { + fn cmp(&self, other: &Self) -> Ordering { + self.data.cmp(&other.data) + } +} impl BorshSchema for Signed { fn add_definitions_recursively( - definitions: &mut HashMap, + definitions: &mut BTreeMap, ) { - let fields = borsh::schema::Fields::NamedFields(borsh::maybestd::vec![ + let fields = borsh::schema::Fields::NamedFields(vec![ ("data".to_string(), T::declaration()), - ("sig".to_string(), ::declaration()) + ("sig".to_string(), ::declaration()), ]); let definition = borsh::schema::Definition::Struct { fields }; - Self::add_definition(Self::declaration(), definition, definitions); + add_definition(Self::declaration(), definition, definitions); T::add_definitions_recursively(definitions); ::add_definitions_recursively(definitions); } @@ -265,9 +272,7 @@ impl Data { /// Hash this data section pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update( - self.try_to_vec().expect("unable to serialize data section"), - ); + hasher.update(self.serialize_to_vec()); hasher } } @@ -410,8 +415,7 @@ impl SignatureIndex { } pub fn serialize(&self) -> String { - let signature_bytes = - self.try_to_vec().expect("Signature should be serializable"); + let signature_bytes = self.serialize_to_vec(); HEXUPPER.encode(&signature_bytes) } @@ -524,10 +528,7 @@ impl Signature { /// Hash this signature section pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update( - self.try_to_vec() - .expect("unable to serialize multisignature section"), - ); + hasher.update(self.serialize_to_vec()); hasher } @@ -553,6 +554,7 @@ impl Signature { verified_pks: &mut HashSet, public_keys_index_map: &AccountPublicKeysMap, signer: &Option
, + gas_meter: &mut Option<&mut VpGasMeter>, ) -> std::result::Result { // Records whether there are any successful verifications let mut verifications = 0; @@ -564,6 +566,11 @@ impl Signature { if let Some(pk) = public_keys_index_map.get_public_key_from_index(*idx) { + if let Some(meter) = gas_meter { + meter + .consume(VERIFY_TX_SIG_GAS_COST) + .map_err(|_| VerifySigError::OutOfGas)?; + } common::SigScheme::verify_signature( &pk, &self.get_raw_hash(), @@ -584,6 +591,11 @@ impl Signature { if let Some(map_idx) = public_keys_index_map.get_index_from_public_key(pk) { + if let Some(meter) = gas_meter { + meter + .consume(VERIFY_TX_SIG_GAS_COST) + .map_err(|_| VerifySigError::OutOfGas)?; + } common::SigScheme::verify_signature( pk, &self.get_raw_hash(), @@ -628,6 +640,9 @@ impl CompressedSignature { if idx == 0 { // The "zeroth" section is the header targets.push(tx.header_hash()); + } else if idx == 255 { + // The 255th section is the raw header + targets.push(tx.raw_header_hash()); } else { targets.push(tx.sections[idx as usize - 1].get_hash()); } @@ -663,8 +678,7 @@ impl Ciphertext { #[cfg(feature = "ferveo-tpke")] pub fn new(sections: Vec
, pubkey: &EncryptionKey) -> Self { let mut rng = rand::thread_rng(); - let bytes = - sections.try_to_vec().expect("unable to serialize sections"); + let bytes = sections.serialize_to_vec(); Self { ciphertext: tpke::encrypt(&bytes, pubkey.0, &mut rng), } @@ -683,9 +697,7 @@ impl Ciphertext { /// Get the hash of this ciphertext section. This operation is done in such /// a way it matches the hash of the type pun pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update( - self.try_to_vec().expect("unable to serialize decrypted tx"), - ); + hasher.update(self.serialize_to_vec()); hasher } } @@ -725,34 +737,42 @@ impl borsh::ser::BorshSerialize for Ciphertext { #[cfg(feature = "ferveo-tpke")] impl borsh::BorshDeserialize for Ciphertext { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - type VecTuple = (u32, Vec, Vec, Vec); - let (_length, nonce, ciphertext, auth_tag): VecTuple = - BorshDeserialize::deserialize(buf)?; - Ok(Self { - ciphertext: tpke::Ciphertext { - nonce: ark_serialize::CanonicalDeserialize::deserialize( - &*nonce, - ) - .map_err(|err| { - std::io::Error::new(std::io::ErrorKind::InvalidData, err) - })?, - ciphertext, - auth_tag: ark_serialize::CanonicalDeserialize::deserialize( - &*auth_tag, - ) - .map_err(|err| { - std::io::Error::new(std::io::ErrorKind::InvalidData, err) - })?, - }, - }) + fn deserialize_reader(reader: &mut R) -> std::io::Result { + { + type VecTuple = (u32, Vec, Vec, Vec); + let (_length, nonce, ciphertext, auth_tag): VecTuple = + BorshDeserialize::deserialize_reader(reader)?; + Ok(Self { + ciphertext: tpke::Ciphertext { + nonce: ark_serialize::CanonicalDeserialize::deserialize( + &*nonce, + ) + .map_err(|err| { + std::io::Error::new( + std::io::ErrorKind::InvalidData, + err, + ) + })?, + ciphertext, + auth_tag: ark_serialize::CanonicalDeserialize::deserialize( + &*auth_tag, + ) + .map_err(|err| { + std::io::Error::new( + std::io::ErrorKind::InvalidData, + err, + ) + })?, + }, + }) + } } } #[cfg(feature = "ferveo-tpke")] impl borsh::BorshSchema for Ciphertext { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, @@ -785,9 +805,7 @@ struct SerializedCiphertext { impl From for SerializedCiphertext { fn from(tx: Ciphertext) -> Self { SerializedCiphertext { - payload: tx - .try_to_vec() - .expect("Unable to serialize encrypted transaction"), + payload: tx.serialize_to_vec(), } } } @@ -824,7 +842,7 @@ where T: From>, T: serde::Serialize, { - Into::::into(obj.try_to_vec().unwrap()).serialize(ser) + Into::::into(obj.serialize_to_vec()).serialize(ser) } fn serde_borsh<'de, T, S, U>(ser: S) -> std::result::Result @@ -901,16 +919,14 @@ impl MaspBuilder { /// Get the hash of this ciphertext section. This operation is done in such /// a way it matches the hash of the type pun pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update( - self.try_to_vec().expect("unable to serialize MASP builder"), - ); + hasher.update(self.serialize_to_vec()); hasher } } impl borsh::BorshSchema for MaspBuilder { fn add_definitions_recursively( - _definitions: &mut std::collections::HashMap< + _definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, @@ -962,8 +978,7 @@ impl Section { /// allowing transaction sections to cross reference. pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { // Get the index corresponding to this variant - let discriminant = - self.try_to_vec().expect("sections should serialize")[0]; + let discriminant = self.serialize_to_vec()[0]; // Use Borsh's discriminant in the Section's hash hasher.update([discriminant]); match self { @@ -1111,10 +1126,7 @@ impl Header { /// Get the hash of this transaction header. pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update( - self.try_to_vec() - .expect("unable to serialize transaction header"), - ); + hasher.update(self.serialize_to_vec()); hasher } @@ -1221,9 +1233,7 @@ impl Tx { /// Serialize tx to hex string pub fn serialize(&self) -> String { - let tx_bytes = self - .try_to_vec() - .expect("Transation should be serializable"); + let tx_bytes = self.serialize_to_vec(); HEXUPPER.encode(&tx_bytes) } @@ -1250,6 +1260,14 @@ impl Tx { Section::Header(self.header.clone()).get_hash() } + /// Gets the hash of the decrypted transaction's header + pub fn raw_header_hash(&self) -> crate::types::hash::Hash { + let mut raw_header = self.header(); + raw_header.tx_type = TxType::Raw; + + Section::Header(raw_header).get_hash() + } + /// Get hashes of all the sections in this transaction pub fn sechashes(&self) -> Vec { let mut hashes = vec![self.header_hash()]; @@ -1272,6 +1290,10 @@ impl Tx { ) -> Option> { if self.header_hash() == *hash { return Some(Cow::Owned(Section::Header(self.header.clone()))); + } else if self.raw_header_hash() == *hash { + let mut header = self.header(); + header.tx_type = TxType::Raw; + return Some(Cow::Owned(Section::Header(header))); } for section in &self.sections { if section.get_hash() == *hash { @@ -1351,27 +1373,13 @@ impl Tx { pub fn to_bytes(&self) -> Vec { let mut bytes = vec![]; let tx: types::Tx = types::Tx { - data: self.try_to_vec().expect("encoding a transaction failed"), + data: self.serialize_to_vec(), }; tx.encode(&mut bytes) .expect("encoding a transaction failed"); bytes } - /// Get the inner section hashes - pub fn inner_section_targets(&self) -> Vec { - let mut sections_hashes = self - .sections - .iter() - .filter_map(|section| match section { - Section::Data(_) | Section::Code(_) => Some(section.get_hash()), - _ => None, - }) - .collect::>(); - sections_hashes.sort(); - sections_hashes - } - /// Verify that the section with the given hash has been signed by the given /// public key pub fn verify_signatures( @@ -1381,7 +1389,7 @@ impl Tx { signer: &Option
, threshold: u8, max_signatures: Option, - mut gas_meter: Option<&mut VpGasMeter>, + gas_meter: &mut Option<&mut VpGasMeter>, ) -> std::result::Result, Error> { let max_signatures = max_signatures.unwrap_or(u8::MAX); // Records the public key indices used in successful signatures @@ -1408,26 +1416,22 @@ impl Tx { } // Finally verify that the signature itself is valid - let prev_verifieds = verified_pks.len(); let amt_verifieds = signatures .verify_signature( &mut verified_pks, &public_keys_index_map, signer, + gas_meter, ) - .map_err(|_| { - Error::InvalidSectionSignature( - "found invalid signature.".to_string(), - ) + .map_err(|e| { + if let VerifySigError::OutOfGas = e { + Error::OutOfGas + } else { + Error::InvalidSectionSignature( + "found invalid signature.".to_string(), + ) + } }); - // Compute the cost of the signature verifications - if let Some(x) = gas_meter.as_mut() { - let amt_verified = usize::from(amt_verifieds.is_err()) - + verified_pks.len() - - prev_verifieds; - x.consume(VERIFY_TX_SIG_GAS_COST * amt_verified as u64) - .map_err(|_| Error::OutOfGas)?; - } // Record the section witnessing these signatures if amt_verifieds? > 0 { witnesses.push(signatures); @@ -1458,7 +1462,7 @@ impl Tx { &None, 1, None, - None, + &mut None, ) .map(|x| *x.first().unwrap()) .map_err(|_| Error::InvalidWrapperSignature) @@ -1486,7 +1490,7 @@ impl Tx { public_keys_index_map: &AccountPublicKeysMap, signer: Option
, ) -> Vec { - let targets = self.inner_section_targets(); + let targets = vec![self.raw_header_hash()]; let mut signatures = Vec::new(); let section = Signature::new( targets, @@ -1716,7 +1720,7 @@ impl Tx { /// Add wasm data to the tx builder pub fn add_data(&mut self, data: impl BorshSerialize) -> &mut Self { - let bytes = data.try_to_vec().expect("Encoding tx data shouldn't fail"); + let bytes = data.serialize_to_vec(); self.set_data(Data::new(bytes)); self } @@ -1764,8 +1768,10 @@ impl Tx { account_public_keys_map: AccountPublicKeysMap, signer: Option
, ) -> &mut Self { + // The inner tx signer signs the Decrypted version of the Header + let hashes = vec![self.raw_header_hash()]; self.protocol_filter(); - let hashes = self.inner_section_targets(); + self.add_section(Section::Signature(Signature::new( hashes, account_public_keys_map.index_secret_keys(keypairs), @@ -1781,7 +1787,7 @@ impl Tx { ) -> &mut Self { self.protocol_filter(); let mut pk_section = Signature { - targets: self.inner_section_targets(), + targets: vec![self.raw_header_hash()], signatures: BTreeMap::new(), signer: Signer::PubKeys(vec![]), }; @@ -1792,7 +1798,7 @@ impl Tx { // Add the signature under the given multisig address let section = sections.entry(addr.clone()).or_insert_with(|| Signature { - targets: self.inner_section_targets(), + targets: vec![self.raw_header_hash()], signatures: BTreeMap::new(), signer: Signer::Address(addr.clone()), }); @@ -1987,16 +1993,13 @@ mod tests { // check that encryption doesn't do trivial things assert_ne!( encrypted.ciphertext.ciphertext, - plaintext.try_to_vec().expect("Test failed") + plaintext.serialize_to_vec() ); // decrypt the payload and check we got original data back let decrypted = encrypted.decrypt(privkey); assert_eq!( - decrypted - .expect("Test failed") - .try_to_vec() - .expect("Test failed"), - plaintext.try_to_vec().expect("Test failed"), + decrypted.expect("Test failed").serialize_to_vec(), + plaintext.serialize_to_vec(), ); } @@ -2014,7 +2017,7 @@ mod tests { ))]; let encrypted = Ciphertext::new(plaintext.clone(), &pubkey); // serialize via Borsh - let borsh = encrypted.try_to_vec().expect("Test failed"); + let borsh = encrypted.serialize_to_vec(); // deserialize again let new_encrypted: Ciphertext = BorshDeserialize::deserialize(&mut borsh.as_ref()) @@ -2022,11 +2025,8 @@ mod tests { // check that decryption works as expected let decrypted = new_encrypted.decrypt(privkey); assert_eq!( - decrypted - .expect("Test failed") - .try_to_vec() - .expect("Test failed"), - plaintext.try_to_vec().expect("Test failed"), + decrypted.expect("Test failed").serialize_to_vec(), + plaintext.serialize_to_vec(), ); } @@ -2051,11 +2051,8 @@ mod tests { let decrypted = new_encrypted.decrypt(privkey); // check that decryption works as expected assert_eq!( - decrypted - .expect("Test failed") - .try_to_vec() - .expect("Test failed"), - plaintext.try_to_vec().expect("Test failed"), + decrypted.expect("Test failed").serialize_to_vec(), + plaintext.serialize_to_vec(), ); } } diff --git a/core/src/types/address.rs b/core/src/types/address.rs index 416b3f059e..f7c63bb470 100644 --- a/core/src/types/address.rs +++ b/core/src/types/address.rs @@ -9,6 +9,7 @@ use std::str::FromStr; use bech32::{self, FromBase32, ToBase32, Variant}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXUPPER; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; @@ -82,8 +83,6 @@ mod internal { "ano::ETH Bridge Address "; pub const ETH_BRIDGE_POOL: &str = "ano::ETH Bridge Pool Address "; - pub const REPLAY_PROTECTION: &str = - "ano::Replay Protection "; pub const MULTITOKEN: &str = "ano::Multitoken "; pub const PGF: &str = @@ -243,9 +242,6 @@ impl Address { eth_addr.to_canonical().replace("0x", ""); format!("{PREFIX_NUT}::{eth_addr}") } - InternalAddress::ReplayProtection => { - internal::REPLAY_PROTECTION.to_string() - } InternalAddress::Multitoken => { internal::MULTITOKEN.to_string() } @@ -329,9 +325,6 @@ impl Address { internal::ETH_BRIDGE_POOL => { Ok(Address::Internal(InternalAddress::EthBridgePool)) } - internal::REPLAY_PROTECTION => { - Ok(Address::Internal(InternalAddress::ReplayProtection)) - } internal::MULTITOKEN => { Ok(Address::Internal(InternalAddress::Multitoken)) } @@ -450,7 +443,18 @@ impl TryFrom for Address { type Error = DecodeError; fn try_from(signer: Signer) -> Result { - Address::decode(signer.as_ref()) + // The given address should be an address or payment address. When + // sending a token from a spending key, it has been already + // replaced with the MASP address. + Address::decode(signer.as_ref()).or( + match crate::types::masp::PaymentAddress::from_str(signer.as_ref()) + { + Ok(_) => Ok(masp()), + Err(_) => Err(DecodeError::InvalidInnerEncodingStr(format!( + "Invalid address for IBC transfer: {signer}" + ))), + }, + ) } } @@ -494,9 +498,7 @@ impl EstablishedAddressGen { &mut self, rng_source: impl AsRef<[u8]>, ) -> Address { - let gen_bytes = self - .try_to_vec() - .expect("Encoding established addresses generator shouldn't fail"); + let gen_bytes = self.serialize_to_vec(); let bytes = [&gen_bytes, rng_source.as_ref()].concat(); let full_hash = Sha256::digest(&bytes); // take first 20 bytes of the hash @@ -572,8 +574,6 @@ pub enum InternalAddress { Erc20(EthAddress), /// Non-usable ERC20 tokens Nut(EthAddress), - /// Replay protection contains transactions' hash - ReplayProtection, /// Multitoken Multitoken, /// Pgf @@ -596,7 +596,6 @@ impl Display for InternalAddress { Self::EthBridgePool => "EthBridgePool".to_string(), Self::Erc20(eth_addr) => format!("Erc20: {}", eth_addr), Self::Nut(eth_addr) => format!("Non-usable token: {eth_addr}"), - Self::ReplayProtection => "ReplayProtection".to_string(), Self::Multitoken => "Multitoken".to_string(), Self::Pgf => "PublicGoodFundings".to_string(), } @@ -681,23 +680,6 @@ pub fn tokens() -> HashMap { .collect() } -/// Temporary helper for testing, a hash map of tokens addresses with their -/// MASP XAN incentive schedules. If the reward is (a, b) then a rewarded tokens -/// are dispensed for every b possessed tokens. -pub fn masp_rewards() -> HashMap { - vec![ - (nam(), (0, 100)), - (btc(), (1, 100)), - (eth(), (2, 100)), - (dot(), (3, 100)), - (schnitzel(), (4, 100)), - (apfel(), (5, 100)), - (kartoffel(), (6, 100)), - ] - .into_iter() - .collect() -} - #[cfg(test)] pub mod tests { use proptest::prelude::*; @@ -753,7 +735,7 @@ pub mod tests { #[test] fn test_established_address_bytes_length(address in testing::arb_established_address()) { let address = Address::Established(address); - let bytes = address.try_to_vec().unwrap(); + let bytes = address.serialize_to_vec(); assert_eq!(bytes.len(), ESTABLISHED_ADDRESS_BYTES_LEN); } } @@ -892,7 +874,6 @@ pub mod testing { InternalAddress::EthBridgePool => {} InternalAddress::Erc20(_) => {} InternalAddress::Nut(_) => {} - InternalAddress::ReplayProtection => {} InternalAddress::Pgf => {} InternalAddress::Multitoken => {} /* Add new addresses in the * `prop_oneof` below. */ @@ -908,7 +889,6 @@ pub mod testing { Just(InternalAddress::EthBridgePool), Just(arb_erc20()), Just(arb_nut()), - Just(InternalAddress::ReplayProtection), Just(InternalAddress::Multitoken), Just(InternalAddress::Pgf), ] diff --git a/core/src/types/chain.rs b/core/src/types/chain.rs index b14fdbbef2..43977d8812 100644 --- a/core/src/types/chain.rs +++ b/core/src/types/chain.rs @@ -95,7 +95,7 @@ impl<'de> Deserialize<'de> for ProposalBytes { impl BorshSchema for ProposalBytes { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut std::collections::BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, diff --git a/core/src/types/dec.rs b/core/src/types/dec.rs index abc80c618c..40494ffad0 100644 --- a/core/src/types/dec.rs +++ b/core/src/types/dec.rs @@ -4,7 +4,8 @@ //! precision. use std::fmt::{Debug, Display, Formatter}; -use std::ops::{Add, AddAssign, Div, Mul, Sub}; +use std::iter::Sum; +use std::ops::{Add, AddAssign, Div, Mul, Neg, Sub}; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; @@ -330,6 +331,12 @@ impl AddAssign for Dec { } } +impl Sum for Dec { + fn sum>(iter: I) -> Self { + iter.fold(Dec::default(), |acc, next| acc + next) + } +} + impl Sub for Dec { type Output = Self; @@ -409,6 +416,14 @@ impl Div for Dec { } } +impl Neg for Dec { + type Output = Self; + + fn neg(self) -> Self::Output { + Self(self.0.neg()) + } +} + impl Display for Dec { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let is_neg = self.is_negative(); diff --git a/core/src/types/eth_bridge_pool.rs b/core/src/types/eth_bridge_pool.rs index 0f1a887345..8e533ea262 100644 --- a/core/src/types/eth_bridge_pool.rs +++ b/core/src/types/eth_bridge_pool.rs @@ -4,6 +4,7 @@ use std::borrow::Cow; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use ethabi::token::Token; use serde::{Deserialize, Serialize}; @@ -68,7 +69,7 @@ pub enum TransferToEthereumKind { Deserialize, BorshSerialize, BorshDeserialize, - BorshSchema, + /* BorshSchema, */ )] pub struct PendingTransferAppendix<'transfer> { /// The kind of the pending transfer to Ethereum. @@ -105,9 +106,7 @@ impl<'t> From<&'t PendingTransfer> for PendingTransferAppendix<'t> { impl<'transfer> PendingTransferAppendix<'transfer> { /// Calculate the checksum of this [`PendingTransferAppendix`]. pub fn checksum(&self) -> HashDigest { - let serialized = self - .try_to_vec() - .expect("Serializing a PendingTransferAppendix should not fail"); + let serialized = self.serialize_to_vec(); HashDigest::sha256(serialized) } } diff --git a/core/src/types/ethereum_events.rs b/core/src/types/ethereum_events.rs index 8dce0a39a4..2dc3601e5e 100644 --- a/core/src/types/ethereum_events.rs +++ b/core/src/types/ethereum_events.rs @@ -6,6 +6,7 @@ use std::ops::{Add, Sub}; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use ethabi::ethereum_types::{H160, U256 as ethUint}; use ethabi::Token; use eyre::{eyre, Context}; @@ -308,7 +309,7 @@ pub enum EthereumEvent { impl EthereumEvent { /// SHA256 of the Borsh serialization of the [`EthereumEvent`]. pub fn hash(&self) -> Result { - let bytes = self.try_to_vec()?; + let bytes = self.serialize_to_vec(); Ok(Hash::sha256(bytes)) } } diff --git a/core/src/types/ethereum_structs.rs b/core/src/types/ethereum_structs.rs index bccab79d65..f029edc4b6 100644 --- a/core/src/types/ethereum_structs.rs +++ b/core/src/types/ethereum_structs.rs @@ -1,5 +1,6 @@ //! Ethereum bridge struct re-exports and types to do with ethereum. use std::fmt; +use std::io::Read; use std::num::NonZeroU64; use std::ops::{Add, AddAssign, Deref}; @@ -96,8 +97,8 @@ impl BorshSerialize for BlockHeight { } impl BorshDeserialize for BlockHeight { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - let be: Vec = BorshDeserialize::deserialize(buf)?; + fn deserialize_reader(reader: &mut R) -> std::io::Result { + let be: Vec = BorshDeserialize::deserialize_reader(reader)?; Ok(Self(Uint256::from_bytes_be(&be))) } } diff --git a/core/src/types/ibc.rs b/core/src/types/ibc.rs index 7a412ecb05..a58d042eeb 100644 --- a/core/src/types/ibc.rs +++ b/core/src/types/ibc.rs @@ -1,10 +1,15 @@ -//! IBC event without IBC-related data types +//! IBC-related data types use std::cmp::Ordering; use std::collections::HashMap; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +/// The event type defined in ibc-rs for receiving a token +pub const EVENT_TYPE_PACKET: &str = "fungible_token_packet"; +/// The event type defined in ibc-rs for IBC denom +pub const EVENT_TYPE_DENOM_TRACE: &str = "denomination_trace"; + /// Wrapped IbcEvent #[derive( Debug, Clone, BorshSerialize, BorshDeserialize, BorshSchema, PartialEq, Eq, @@ -45,23 +50,42 @@ impl std::fmt::Display for IbcEvent { } } +/// IBC shielded transfer +#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] +pub struct IbcShieldedTransfer { + /// The IBC event type + pub transfer: crate::types::token::Transfer, + /// The attributes of the IBC event + pub masp_tx: masp_primitives::transaction::Transaction, +} + #[cfg(any(feature = "abciplus", feature = "abcipp"))] mod ibc_rs_conversion { use std::collections::HashMap; + use std::str::FromStr; + use borsh::BorshDeserialize; + use borsh_ext::BorshSerializeExt; + use data_encoding::HEXUPPER; use thiserror::Error; - use super::IbcEvent; + use super::{IbcEvent, IbcShieldedTransfer, EVENT_TYPE_PACKET}; + use crate::ibc::applications::transfer::{Memo, PrefixedDenom, TracePath}; use crate::ibc::core::events::{ Error as IbcEventError, IbcEvent as RawIbcEvent, }; use crate::tendermint_proto::abci::Event as AbciEvent; + use crate::types::masp::PaymentAddress; #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { #[error("IBC event error: {0}")] IbcEvent(IbcEventError), + #[error("IBC transfer memo HEX decoding error: {0}")] + DecodingHex(data_encoding::DecodeError), + #[error("IBC transfer memo decoding error: {0}")] + DecodingShieldedTransfer(std::io::Error), } /// Conversion functions result @@ -84,6 +108,66 @@ mod ibc_rs_conversion { }) } } + + /// Returns the trace path and the token string if the denom is an IBC + /// denom. + pub fn is_ibc_denom(denom: impl AsRef) -> Option<(TracePath, String)> { + let prefixed_denom = PrefixedDenom::from_str(denom.as_ref()).ok()?; + if prefixed_denom.trace_path.is_empty() { + return None; + } + // The base token isn't decoded because it could be non Namada token + Some(( + prefixed_denom.trace_path, + prefixed_denom.base_denom.to_string(), + )) + } + + impl From for Memo { + fn from(shielded: IbcShieldedTransfer) -> Self { + let bytes = shielded.serialize_to_vec(); + HEXUPPER.encode(&bytes).into() + } + } + + impl TryFrom for IbcShieldedTransfer { + type Error = Error; + + fn try_from(memo: Memo) -> Result { + let bytes = HEXUPPER + .decode(memo.as_ref().as_bytes()) + .map_err(Error::DecodingHex)?; + Self::try_from_slice(&bytes) + .map_err(Error::DecodingShieldedTransfer) + } + } + + /// Get the shielded transfer from the memo + pub fn get_shielded_transfer( + event: &IbcEvent, + ) -> Result> { + if event.event_type != EVENT_TYPE_PACKET { + // This event is not for receiving a token + return Ok(None); + } + let is_success = + event.attributes.get("success") == Some(&"true".to_string()); + let receiver = event.attributes.get("receiver"); + let is_shielded = if let Some(receiver) = receiver { + PaymentAddress::from_str(receiver).is_ok() + } else { + false + }; + if !is_success || !is_shielded { + return Ok(None); + } + + event + .attributes + .get("memo") + .map(|memo| IbcShieldedTransfer::try_from(Memo::from(memo.clone()))) + .transpose() + } } #[cfg(any(feature = "abciplus", feature = "abcipp"))] diff --git a/core/src/types/key/common.rs b/core/src/types/key/common.rs index b3c4f3a52f..9ca0bdaffc 100644 --- a/core/src/types/key/common.rs +++ b/core/src/types/key/common.rs @@ -5,6 +5,7 @@ use std::fmt::Display; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXLOWER; #[cfg(feature = "rand")] use rand::{CryptoRng, RngCore}; @@ -48,19 +49,19 @@ impl super::PublicKey for PublicKey { pk: &PK, ) -> Result { if PK::TYPE == Self::TYPE { - Self::try_from_slice(pk.try_to_vec().unwrap().as_slice()) + Self::try_from_slice(pk.serialize_to_vec().as_slice()) .map_err(ParsePublicKeyError::InvalidEncoding) } else if PK::TYPE == ed25519::PublicKey::TYPE { Ok(Self::Ed25519( ed25519::PublicKey::try_from_slice( - pk.try_to_vec().unwrap().as_slice(), + pk.serialize_to_vec().as_slice(), ) .map_err(ParsePublicKeyError::InvalidEncoding)?, )) } else if PK::TYPE == secp256k1::PublicKey::TYPE { Ok(Self::Secp256k1( secp256k1::PublicKey::try_from_slice( - pk.try_to_vec().unwrap().as_slice(), + pk.serialize_to_vec().as_slice(), ) .map_err(ParsePublicKeyError::InvalidEncoding)?, )) @@ -72,7 +73,7 @@ impl super::PublicKey for PublicKey { impl Display for PublicKey { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", HEXLOWER.encode(&self.try_to_vec().unwrap())) + write!(f, "{}", HEXLOWER.encode(&self.serialize_to_vec())) } } @@ -174,19 +175,19 @@ impl super::SecretKey for SecretKey { sk: &SK, ) -> Result { if SK::TYPE == Self::TYPE { - Self::try_from_slice(sk.try_to_vec().unwrap().as_ref()) + Self::try_from_slice(sk.serialize_to_vec().as_ref()) .map_err(ParseSecretKeyError::InvalidEncoding) } else if SK::TYPE == ed25519::SecretKey::TYPE { Ok(Self::Ed25519( ed25519::SecretKey::try_from_slice( - sk.try_to_vec().unwrap().as_ref(), + sk.serialize_to_vec().as_ref(), ) .map_err(ParseSecretKeyError::InvalidEncoding)?, )) } else if SK::TYPE == secp256k1::SecretKey::TYPE { Ok(Self::Secp256k1( secp256k1::SecretKey::try_from_slice( - sk.try_to_vec().unwrap().as_ref(), + sk.serialize_to_vec().as_ref(), ) .map_err(ParseSecretKeyError::InvalidEncoding)?, )) @@ -207,7 +208,7 @@ impl RefTo for SecretKey { impl Display for SecretKey { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", HEXLOWER.encode(&self.try_to_vec().unwrap())) + write!(f, "{}", HEXLOWER.encode(&self.serialize_to_vec())) } } @@ -230,6 +231,7 @@ impl FromStr for SecretKey { Eq, PartialEq, PartialOrd, + Ord, Hash, Serialize, Deserialize, @@ -263,19 +265,19 @@ impl super::Signature for Signature { sig: &SIG, ) -> Result { if SIG::TYPE == Self::TYPE { - Self::try_from_slice(sig.try_to_vec().unwrap().as_slice()) + Self::try_from_slice(sig.serialize_to_vec().as_slice()) .map_err(ParseSignatureError::InvalidEncoding) } else if SIG::TYPE == ed25519::Signature::TYPE { Ok(Self::Ed25519( ed25519::Signature::try_from_slice( - sig.try_to_vec().unwrap().as_slice(), + sig.serialize_to_vec().as_slice(), ) .map_err(ParseSignatureError::InvalidEncoding)?, )) } else if SIG::TYPE == secp256k1::Signature::TYPE { Ok(Self::Secp256k1( secp256k1::Signature::try_from_slice( - sig.try_to_vec().unwrap().as_slice(), + sig.serialize_to_vec().as_slice(), ) .map_err(ParseSignatureError::InvalidEncoding)?, )) diff --git a/core/src/types/key/dkg_session_keys.rs b/core/src/types/key/dkg_session_keys.rs index f2cafb639c..ccca82aeba 100644 --- a/core/src/types/key/dkg_session_keys.rs +++ b/core/src/types/key/dkg_session_keys.rs @@ -1,12 +1,14 @@ //! Utilities around the DKG session keys use std::cmp::Ordering; +use std::collections::BTreeMap; use std::fmt::Display; -use std::io::{Error, ErrorKind}; +use std::io::{Error, ErrorKind, Read}; use std::str::FromStr; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXLOWER; use serde::{Deserialize, Serialize}; @@ -51,8 +53,8 @@ impl BorshSerialize for DkgKeypair { } impl BorshDeserialize for DkgKeypair { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - let kp_bytes: Vec = BorshDeserialize::deserialize(buf)?; + fn deserialize_reader(reader: &mut R) -> std::io::Result { + let kp_bytes: Vec = BorshDeserialize::deserialize_reader(reader)?; let kp: ferveo_common::Keypair = CanonicalDeserialize::deserialize(kp_bytes.as_slice()) .map_err(|err| Error::new(ErrorKind::InvalidInput, err))?; @@ -111,8 +113,8 @@ impl BorshSerialize for DkgPublicKey { } impl BorshDeserialize for DkgPublicKey { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - let pk_bytes: Vec = BorshDeserialize::deserialize(buf)?; + fn deserialize_reader(reader: &mut R) -> std::io::Result { + let pk_bytes: Vec = BorshDeserialize::deserialize_reader(reader)?; let pk: ferveo_common::PublicKey = CanonicalDeserialize::deserialize(pk_bytes.as_slice()) .map_err(|err| Error::new(ErrorKind::InvalidInput, err))?; @@ -122,7 +124,7 @@ impl BorshDeserialize for DkgPublicKey { impl BorshSchema for DkgPublicKey { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, @@ -140,9 +142,7 @@ impl BorshSchema for DkgPublicKey { impl Display for DkgPublicKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let vec = self - .try_to_vec() - .expect("Encoding public key shouldn't fail"); + let vec = self.serialize_to_vec(); write!(f, "{}", HEXLOWER.encode(&vec)) } } diff --git a/core/src/types/key/ed25519.rs b/core/src/types/key/ed25519.rs index faf6076ea2..48460a2d2f 100644 --- a/core/src/types/key/ed25519.rs +++ b/core/src/types/key/ed25519.rs @@ -1,16 +1,19 @@ //! Ed25519 keys and related functionality +use std::cmp::Ordering; +use std::collections::BTreeMap; use std::fmt::{Debug, Display}; use std::hash::{Hash, Hasher}; -use std::io::Write; +use std::io::{Read, Write}; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXLOWER; #[cfg(feature = "rand")] use rand::{CryptoRng, RngCore}; use serde::{Deserialize, Serialize}; -use zeroize::Zeroize; +use zeroize::{Zeroize, ZeroizeOnDrop}; use super::{ ParsePublicKeyError, ParseSecretKeyError, ParseSignatureError, RefTo, @@ -38,7 +41,7 @@ impl super::PublicKey for PublicKey { _ => Err(ParsePublicKeyError::MismatchedScheme), }) } else if PK::TYPE == Self::TYPE { - Self::try_from_slice(pk.try_to_vec().unwrap().as_slice()) + Self::try_from_slice(pk.serialize_to_vec().as_slice()) .map_err(ParsePublicKeyError::InvalidEncoding) } else { Err(ParsePublicKeyError::MismatchedScheme) @@ -47,17 +50,17 @@ impl super::PublicKey for PublicKey { } impl BorshDeserialize for PublicKey { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { + fn deserialize_reader(reader: &mut R) -> std::io::Result { Ok(PublicKey( ed25519_consensus::VerificationKey::try_from( - <[u8; PUBLIC_KEY_LENGTH] as BorshDeserialize>::deserialize( - buf, + <[u8; PUBLIC_KEY_LENGTH] as BorshDeserialize>::deserialize_reader( + reader, )? - .as_ref(), + .as_ref(), ) - .map_err(|e| { - std::io::Error::new(std::io::ErrorKind::InvalidInput, e) - })?, + .map_err(|e| { + std::io::Error::new(std::io::ErrorKind::InvalidInput, e) + })?, )) } } @@ -70,7 +73,7 @@ impl BorshSerialize for PublicKey { impl BorshSchema for PublicKey { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, @@ -125,7 +128,7 @@ impl FromStr for PublicKey { } /// Ed25519 secret key -#[derive(Debug, Serialize, Deserialize, Zeroize)] +#[derive(Debug, Serialize, Deserialize, Zeroize, ZeroizeOnDrop)] pub struct SecretKey(pub Box); impl super::SecretKey for SecretKey { @@ -142,7 +145,7 @@ impl super::SecretKey for SecretKey { _ => Err(ParseSecretKeyError::MismatchedScheme), }) } else if PK::TYPE == Self::TYPE { - Self::try_from_slice(pk.try_to_vec().unwrap().as_slice()) + Self::try_from_slice(pk.serialize_to_vec().as_slice()) .map_err(ParseSecretKeyError::InvalidEncoding) } else { Err(ParseSecretKeyError::MismatchedScheme) @@ -165,17 +168,17 @@ impl Clone for SecretKey { } impl BorshDeserialize for SecretKey { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { + fn deserialize_reader(reader: &mut R) -> std::io::Result { Ok(SecretKey(Box::new( ed25519_consensus::SigningKey::try_from( - <[u8; SECRET_KEY_LENGTH] as BorshDeserialize>::deserialize( - buf, + <[u8; SECRET_KEY_LENGTH] as BorshDeserialize>::deserialize_reader( + reader, )? - .as_ref(), + .as_ref(), ) - .map_err(|e| { - std::io::Error::new(std::io::ErrorKind::InvalidInput, e) - })?, + .map_err(|e| { + std::io::Error::new(std::io::ErrorKind::InvalidInput, e) + })?, ))) } } @@ -188,7 +191,7 @@ impl BorshSerialize for SecretKey { impl BorshSchema for SecretKey { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, @@ -223,12 +226,6 @@ impl FromStr for SecretKey { } } -impl Drop for SecretKey { - fn drop(&mut self) { - self.0.zeroize(); - } -} - /// Ed25519 signature #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] pub struct Signature(pub ed25519_consensus::Signature); @@ -245,7 +242,7 @@ impl super::Signature for Signature { _ => Err(ParseSignatureError::MismatchedScheme), }) } else if PK::TYPE == Self::TYPE { - Self::try_from_slice(pk.try_to_vec().unwrap().as_slice()) + Self::try_from_slice(pk.serialize_to_vec().as_slice()) .map_err(ParseSignatureError::InvalidEncoding) } else { Err(ParseSignatureError::MismatchedScheme) @@ -254,10 +251,10 @@ impl super::Signature for Signature { } impl BorshDeserialize for Signature { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { + fn deserialize_reader(reader: &mut R) -> std::io::Result { Ok(Signature( ed25519_consensus::Signature::try_from( - <[u8; SIGNATURE_LENGTH] as BorshDeserialize>::deserialize(buf)? + <[u8; SIGNATURE_LENGTH] as BorshDeserialize>::deserialize_reader(reader)? .as_ref(), ) .map_err(|e| { @@ -275,7 +272,7 @@ impl BorshSerialize for Signature { impl BorshSchema for Signature { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, @@ -305,6 +302,12 @@ impl PartialOrd for Signature { } } +impl Ord for Signature { + fn cmp(&self, other: &Self) -> Ordering { + self.0.to_bytes().cmp(&other.0.to_bytes()) + } +} + /// An implementation of the Ed25519 signature scheme #[derive( Debug, diff --git a/core/src/types/key/mod.rs b/core/src/types/key/mod.rs index 1287956b13..f6d226e2f4 100644 --- a/core/src/types/key/mod.rs +++ b/core/src/types/key/mod.rs @@ -11,6 +11,7 @@ use std::hash::Hash; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXUPPER; use lazy_map::LazyMap; use namada_macros::StorageKeys; @@ -123,6 +124,8 @@ pub enum VerifySigError { MissingData, #[error("Signature belongs to a different scheme from the public key.")] MismatchedScheme, + #[error("Signature verification went out of gas")] + OutOfGas, } #[allow(missing_docs)] @@ -211,7 +214,7 @@ pub trait Signature: sig: &SIG, ) -> Result { if SIG::TYPE == Self::TYPE { - let sig_arr = sig.try_to_vec().unwrap(); + let sig_arr = sig.serialize_to_vec(); let res = Self::try_from_slice(sig_arr.as_ref()); res.map_err(ParseSignatureError::InvalidEncoding) } else { @@ -247,7 +250,7 @@ pub trait PublicKey: pk: &PK, ) -> Result { if Self::TYPE == PK::TYPE { - let pk_arr = pk.try_to_vec().unwrap(); + let pk_arr = pk.serialize_to_vec(); let res = Self::try_from_slice(pk_arr.as_ref()); res.map_err(ParsePublicKeyError::InvalidEncoding) } else { @@ -283,7 +286,7 @@ pub trait SecretKey: sk: &SK, ) -> Result { if SK::TYPE == Self::TYPE { - let sk_vec = sk.try_to_vec().unwrap(); + let sk_vec = sk.serialize_to_vec(); let res = Self::try_from_slice(sk_vec.as_ref()); res.map_err(ParseSecretKeyError::InvalidEncoding) } else { @@ -440,8 +443,7 @@ pub enum PkhFromStringError { impl From<&PK> for PublicKeyHash { fn from(pk: &PK) -> Self { - let pk_bytes = - pk.try_to_vec().expect("Public key encoding shouldn't fail"); + let pk_bytes = pk.serialize_to_vec(); let full_hash = Sha256::digest(&pk_bytes); // take first 20 bytes of the hash let mut hash: [u8; PKH_LEN] = Default::default(); @@ -630,10 +632,7 @@ macro_rules! sigscheme_test { let mut rng: ThreadRng = thread_rng(); let keypair = <$type>::generate(&mut rng); - println!( - "keypair {:?}", - keypair.try_to_vec().unwrap().as_slice() - ); + println!("keypair {:?}", keypair.serialize_to_vec().as_slice()); } /// Run `cargo test gen_keypair -- --nocapture` to generate a /// new keypair. @@ -692,17 +691,15 @@ mod more_tests { fn zeroize_keypair_secp256k1() { use rand::thread_rng; - let mut sk = secp256k1::SigScheme::generate(&mut thread_rng()); - let sk_scalar = sk.0.to_scalar_ref(); - let len = sk_scalar.0.len(); - let ptr = sk_scalar.0.as_ref().as_ptr(); - - let original_data = sk_scalar.0; - + let sk = secp256k1::SigScheme::generate(&mut thread_rng()); + let (ptr, original_data) = { + let sk_scalar = sk.0.as_scalar_primitive().as_ref(); + (sk_scalar.as_ptr(), sk_scalar.to_owned()) + }; drop(sk); - assert_ne!(&original_data, unsafe { - core::slice::from_raw_parts(ptr, len) + assert_ne!(original_data.as_slice(), unsafe { + core::slice::from_raw_parts(ptr, secp256k1::SECRET_KEY_SIZE) }); } } diff --git a/core/src/types/key/secp256k1.rs b/core/src/types/key/secp256k1.rs index 6fde8af5cd..7d823d5305 100644 --- a/core/src/types/key/secp256k1.rs +++ b/core/src/types/key/secp256k1.rs @@ -1,17 +1,19 @@ //! secp256k1 keys and related functionality use std::cmp::Ordering; +use std::collections::BTreeMap; use std::fmt; use std::fmt::{Debug, Display}; use std::hash::{Hash, Hasher}; -use std::io::{ErrorKind, Write}; +use std::io::{ErrorKind, Read, Write}; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXLOWER; -use ethabi::ethereum_types::U256; use ethabi::Token; -use libsecp256k1::RecoveryId; +use k256::ecdsa::RecoveryId; +use k256::elliptic_curve::sec1::ToEncodedPoint; #[cfg(feature = "rand")] use rand::{CryptoRng, RngCore}; use serde::de::{Error, SeqAccess, Visitor}; @@ -22,7 +24,6 @@ use super::{ ParsePublicKeyError, ParseSecretKeyError, ParseSignatureError, RefTo, SchemeType, SigScheme as SigSchemeTrait, SignableBytes, VerifySigError, }; -use crate::hints; use crate::types::eth_abi::Encode; use crate::types::ethereum_events::EthAddress; use crate::types::key::StorageHasher; @@ -30,11 +31,16 @@ use crate::types::key::StorageHasher; /// The provided constant is for a traditional /// signature on this curve. For Ethereum, an extra byte is included /// that prevents malleability attacks. -pub const SIGNATURE_LENGTH: usize = libsecp256k1::util::SIGNATURE_SIZE + 1; +pub const SIGNATURE_SIZE: usize = 64 + 1; /// secp256k1 public key #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -pub struct PublicKey(pub libsecp256k1::PublicKey); +pub struct PublicKey(pub k256::PublicKey); + +/// Size of a compressed public key bytes +const COMPRESSED_PUBLIC_KEY_SIZE: usize = 33; +/// Size of a secret key bytes +pub(crate) const SECRET_KEY_SIZE: usize = 32; impl super::PublicKey for PublicKey { const TYPE: SchemeType = SigScheme::TYPE; @@ -48,7 +54,7 @@ impl super::PublicKey for PublicKey { _ => Err(ParsePublicKeyError::MismatchedScheme), }) } else if PK::TYPE == Self::TYPE { - Self::try_from_slice(pk.try_to_vec().unwrap().as_slice()) + Self::try_from_slice(pk.serialize_to_vec().as_slice()) .map_err(ParsePublicKeyError::InvalidEncoding) } else { Err(ParsePublicKeyError::MismatchedScheme) @@ -57,42 +63,37 @@ impl super::PublicKey for PublicKey { } impl BorshDeserialize for PublicKey { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { + fn deserialize_reader(reader: &mut R) -> std::io::Result { // deserialize the bytes first - let pk = libsecp256k1::PublicKey::parse_compressed( - buf.get(0..libsecp256k1::util::COMPRESSED_PUBLIC_KEY_SIZE) - .ok_or_else(|| std::io::Error::from(ErrorKind::UnexpectedEof))? - .try_into() - .unwrap(), - ) - .map_err(|e| { + let mut key_buf = [0u8; COMPRESSED_PUBLIC_KEY_SIZE]; + reader.read_exact(&mut key_buf[..])?; + let pk = k256::PublicKey::from_sec1_bytes(&key_buf).map_err(|e| { std::io::Error::new( ErrorKind::InvalidInput, format!("Error decoding secp256k1 public key: {}", e), ) })?; - *buf = &buf[libsecp256k1::util::COMPRESSED_PUBLIC_KEY_SIZE..]; Ok(PublicKey(pk)) } } impl BorshSerialize for PublicKey { fn serialize(&self, writer: &mut W) -> std::io::Result<()> { - writer.write_all(&self.0.serialize_compressed())?; + writer.write_all(&self.0.to_sec1_bytes())?; Ok(()) } } impl BorshSchema for PublicKey { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, ) { // Encoded as `[u8; COMPRESSED_PUBLIC_KEY_SIZE]` let elements = "u8".into(); - let length = libsecp256k1::util::COMPRESSED_PUBLIC_KEY_SIZE as u32; + let length = COMPRESSED_PUBLIC_KEY_SIZE as u32; let definition = borsh::schema::Definition::Array { elements, length }; definitions.insert(Self::declaration(), definition); } @@ -105,29 +106,25 @@ impl BorshSchema for PublicKey { #[allow(clippy::derived_hash_with_manual_eq)] impl Hash for PublicKey { fn hash(&self, state: &mut H) { - self.0.serialize_compressed().hash(state); + self.0.to_sec1_bytes().hash(state); } } impl PartialOrd for PublicKey { fn partial_cmp(&self, other: &Self) -> Option { - self.0 - .serialize_compressed() - .partial_cmp(&other.0.serialize_compressed()) + self.0.to_sec1_bytes().partial_cmp(&other.0.to_sec1_bytes()) } } impl Ord for PublicKey { fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.0 - .serialize_compressed() - .cmp(&other.0.serialize_compressed()) + self.0.to_sec1_bytes().cmp(&other.0.to_sec1_bytes()) } } impl Display for PublicKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", HEXLOWER.encode(&self.0.serialize_compressed())) + write!(f, "{}", HEXLOWER.encode(&self.0.to_sec1_bytes())) } } @@ -143,8 +140,8 @@ impl FromStr for PublicKey { } } -impl From for PublicKey { - fn from(pk: libsecp256k1::PublicKey) -> Self { +impl From for PublicKey { + fn from(pk: k256::PublicKey) -> Self { Self(pk) } } @@ -154,9 +151,7 @@ impl From<&PublicKey> for EthAddress { use tiny_keccak::Hasher; let mut hasher = tiny_keccak::Keccak::v256(); - // We're removing the first byte with - // `libsecp256k1::util::TAG_PUBKEY_FULL` - let pk_bytes = &pk.0.serialize()[1..]; + let pk_bytes = &pk.0.to_encoded_point(false).to_bytes()[1..]; hasher.update(pk_bytes); let mut output = [0_u8; 32]; hasher.finalize(&mut output); @@ -168,7 +163,7 @@ impl From<&PublicKey> for EthAddress { /// Secp256k1 secret key #[derive(Debug, Clone)] -pub struct SecretKey(pub Box); +pub struct SecretKey(pub Box); impl super::SecretKey for SecretKey { type PublicKey = PublicKey; @@ -184,7 +179,7 @@ impl super::SecretKey for SecretKey { _ => Err(ParseSecretKeyError::MismatchedScheme), }) } else if PK::TYPE == Self::TYPE { - Self::try_from_slice(pk.try_to_vec().unwrap().as_slice()) + Self::try_from_slice(pk.serialize_to_vec().as_slice()) .map_err(ParseSecretKeyError::InvalidEncoding) } else { Err(ParseSecretKeyError::MismatchedScheme) @@ -197,7 +192,7 @@ impl Serialize for SecretKey { where S: Serializer, { - let arr = self.0.serialize(); + let arr: [u8; SECRET_KEY_SIZE] = self.0.to_bytes().into(); serde::Serialize::serialize(&arr, serializer) } } @@ -207,47 +202,46 @@ impl<'de> Deserialize<'de> for SecretKey { where D: serde::Deserializer<'de>, { - let arr_res: [u8; libsecp256k1::util::SECRET_KEY_SIZE] = + let arr_res: [u8; SECRET_KEY_SIZE] = serde::Deserialize::deserialize(deserializer)?; - let key = libsecp256k1::SecretKey::parse_slice(&arr_res) - .map_err(D::Error::custom); + let key = + k256::SecretKey::from_slice(&arr_res).map_err(D::Error::custom); Ok(SecretKey(Box::new(key.unwrap()))) } } impl BorshDeserialize for SecretKey { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { + fn deserialize_reader(reader: &mut R) -> std::io::Result { // deserialize the bytes first - Ok(SecretKey(Box::new( - libsecp256k1::SecretKey::parse( - &(BorshDeserialize::deserialize(buf)?), + let bytes: [u8; SECRET_KEY_SIZE] = + BorshDeserialize::deserialize_reader(reader)?; + let sk = k256::SecretKey::from_slice(&bytes).map_err(|e| { + std::io::Error::new( + ErrorKind::InvalidInput, + format!("Error decoding secp256k1 secret key: {}", e), ) - .map_err(|e| { - std::io::Error::new( - ErrorKind::InvalidInput, - format!("Error decoding secp256k1 secret key: {}", e), - ) - })?, - ))) + })?; + Ok(SecretKey(Box::new(sk))) } } impl BorshSerialize for SecretKey { fn serialize(&self, writer: &mut W) -> std::io::Result<()> { - BorshSerialize::serialize(&self.0.serialize(), writer) + let bytes: [u8; SECRET_KEY_SIZE] = self.0.to_bytes().into(); + BorshSerialize::serialize(&bytes, writer) } } impl BorshSchema for SecretKey { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, ) { // Encoded as `[u8; SECRET_KEY_SIZE]` let elements = "u8".into(); - let length = libsecp256k1::util::SECRET_KEY_SIZE as u32; + let length = SECRET_KEY_SIZE as u32; let definition = borsh::schema::Definition::Array { elements, length }; definitions.insert(Self::declaration(), definition); } @@ -259,7 +253,7 @@ impl BorshSchema for SecretKey { impl Display for SecretKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", HEXLOWER.encode(&self.0.serialize())) + write!(f, "{}", HEXLOWER.encode(&self.0.to_bytes())) } } @@ -277,13 +271,13 @@ impl FromStr for SecretKey { impl RefTo for SecretKey { fn ref_to(&self) -> PublicKey { - PublicKey(libsecp256k1::PublicKey::from_secret_key(&self.0)) + PublicKey(self.0.public_key()) } } /// Secp256k1 signature #[derive(Clone, Debug, Eq, PartialEq)] -pub struct Signature(pub libsecp256k1::Signature, pub RecoveryId); +pub struct Signature(pub k256::ecdsa::Signature, pub RecoveryId); impl super::Signature for Signature { const TYPE: SchemeType = SigScheme::TYPE; @@ -297,7 +291,7 @@ impl super::Signature for Signature { _ => Err(ParseSignatureError::MismatchedScheme), }) } else if PK::TYPE == Self::TYPE { - Self::try_from_slice(pk.try_to_vec().unwrap().as_slice()) + Self::try_from_slice(pk.serialize_to_vec().as_slice()) .map_err(ParseSignatureError::InvalidEncoding) } else { Err(ParseSignatureError::MismatchedScheme) @@ -305,15 +299,14 @@ impl super::Signature for Signature { } } -// Would ideally like Serialize, Deserialize to be implemented in libsecp256k1, +// Would ideally like Serialize, Deserialize to be implemented in k256, // may try to do so and merge upstream in the future. - impl Serialize for Signature { fn serialize(&self, serializer: S) -> Result where S: Serializer, { - let arr = self.0.serialize(); + let arr = self.0.to_bytes(); // TODO: implement the line below, currently cannot support [u8; 64] // serde::Serialize::serialize(&arr, serializer) @@ -321,7 +314,7 @@ impl Serialize for Signature { for elem in &arr[..] { seq.serialize_element(elem)?; } - seq.serialize_element(&self.1.serialize())?; + seq.serialize_element(&self.1.to_byte())?; seq.end() } } @@ -334,22 +327,25 @@ impl<'de> Deserialize<'de> for Signature { struct ByteArrayVisitor; impl<'de> Visitor<'de> for ByteArrayVisitor { - type Value = [u8; SIGNATURE_LENGTH]; + type Value = [u8; SIGNATURE_SIZE]; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str(&format!( "an array of length {}", - SIGNATURE_LENGTH, + SIGNATURE_SIZE, )) } - fn visit_seq(self, mut seq: A) -> Result<[u8; 65], A::Error> + fn visit_seq( + self, + mut seq: A, + ) -> Result<[u8; SIGNATURE_SIZE], A::Error> where A: SeqAccess<'de>, { - let mut arr = [0u8; SIGNATURE_LENGTH]; + let mut arr = [0u8; SIGNATURE_SIZE]; #[allow(clippy::needless_range_loop)] - for i in 0..SIGNATURE_LENGTH { + for i in 0..SIGNATURE_SIZE { arr[i] = seq .next_element()? .ok_or_else(|| Error::invalid_length(i, &self))?; @@ -358,64 +354,59 @@ impl<'de> Deserialize<'de> for Signature { } } - let arr_res = deserializer - .deserialize_tuple(SIGNATURE_LENGTH, ByteArrayVisitor)?; + let arr_res = + deserializer.deserialize_tuple(SIGNATURE_SIZE, ByteArrayVisitor)?; let sig_array: [u8; 64] = arr_res[..64].try_into().unwrap(); - let sig = libsecp256k1::Signature::parse_standard(&sig_array) + let sig = k256::ecdsa::Signature::from_slice(&sig_array) .map_err(D::Error::custom); Ok(Signature( sig.unwrap(), - RecoveryId::parse(arr_res[64]).map_err(Error::custom)?, + RecoveryId::from_byte(arr_res[64]) + .ok_or_else(|| Error::custom("Invalid recovery byte"))?, )) } } impl BorshDeserialize for Signature { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { + fn deserialize_reader(reader: &mut R) -> std::io::Result { // deserialize the bytes first - let (sig_bytes, recovery_id) = BorshDeserialize::deserialize(buf)?; + let (sig_bytes, recovery_id): ([u8; 64], u8) = + BorshDeserialize::deserialize_reader(reader)?; Ok(Signature( - libsecp256k1::Signature::parse_standard(&sig_bytes).map_err( - |e| { - std::io::Error::new( - ErrorKind::InvalidInput, - format!("Error decoding secp256k1 signature: {}", e), - ) - }, - )?, - RecoveryId::parse(recovery_id).map_err(|e| { + k256::ecdsa::Signature::from_slice(&sig_bytes).map_err(|e| { std::io::Error::new( ErrorKind::InvalidInput, format!("Error decoding secp256k1 signature: {}", e), ) })?, + RecoveryId::from_byte(recovery_id).ok_or_else(|| { + std::io::Error::new( + ErrorKind::InvalidInput, + "Error decoding secp256k1 signature recovery byte", + ) + })?, )) } } impl BorshSerialize for Signature { fn serialize(&self, writer: &mut W) -> std::io::Result<()> { - BorshSerialize::serialize( - &(self.0.serialize(), self.1.serialize()), - writer, - ) + let sig_bytes: [u8; 64] = self.0.to_bytes().into(); + BorshSerialize::serialize(&(sig_bytes, self.1.to_byte()), writer) } } impl BorshSchema for Signature { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, ) { // Encoded as `([u8; SIGNATURE_SIZE], u8)` - let signature = - <[u8; libsecp256k1::util::SIGNATURE_SIZE]>::declaration(); - <[u8; libsecp256k1::util::SIGNATURE_SIZE]>::add_definitions_recursively( - definitions, - ); + let signature = <[u8; SIGNATURE_SIZE]>::declaration(); + <[u8; SIGNATURE_SIZE]>::add_definitions_recursively(definitions); let recovery = "u8".into(); let definition = borsh::schema::Definition::Tuple { elements: vec![signature, recovery], @@ -429,52 +420,42 @@ impl BorshSchema for Signature { } impl Signature { - const S_MALLEABILITY_FIX: U256 = U256([ - 13822214165235122497, - 13451932020343611451, - 18446744073709551614, - 18446744073709551615, - ]); - // these constants are pulled from OpenZeppelin's ECDSA code - const S_MALLEABILITY_THRESHOLD: U256 = U256([ - 16134479119472337056, - 6725966010171805725, - 18446744073709551615, - 9223372036854775807, - ]); + /// OpenZeppelin consumes v values in the range [27, 28], + /// rather than [0, 1], the latter returned by `k256`. const V_FIX: u8 = 27; + /// Given a v signature parameter, flip its value + /// (i.e. negate the input). + /// + /// __INVARIANT__: The value of `v` must be in the range [0, 1]. + #[inline(always)] + fn flip_v(v: u8) -> u8 { + debug_assert!(v == 0 || v == 1); + v ^ 1 + } + /// Returns the `r`, `s` and `v` parameters of this [`Signature`], /// destroying the original value in the process. /// /// The returned signature is unique (i.e. non-malleable). This /// ensures OpenZeppelin considers the signature valid. pub fn into_eth_rsv(self) -> ([u8; 32], [u8; 32], u8) { - // assuming the value of v is either 0 or 1, - // the output is essentially the negated input - #[inline(always)] - fn flip_v(v: u8) -> u8 { - v ^ 1 - } - - let (v, s) = { - let s1: U256 = self.0.s.b32().into(); - let v = self.1.serialize(); - let (v, non_malleable_s) = - if hints::unlikely(s1 > Self::S_MALLEABILITY_THRESHOLD) { - // this code path seems quite rare. we often - // get non-malleable signatures, which is good - (flip_v(v) + Self::V_FIX, Self::S_MALLEABILITY_FIX - s1) - } else { - (v + Self::V_FIX, s1) - }; - let mut non_malleable_s: [u8; 32] = non_malleable_s.into(); - self.0.s.fill_b32(&mut non_malleable_s); - (v, self.0.s.b32()) + // A recovery id (dubbed v) is used by secp256k1 signatures + // to signal verifying code if a signature had been malleable + // or not (based on whether the s field of the signature was odd + // or not). In the `k256` dependency, the low-bit signifies the + // y-coordinate, associated with s, being odd. + let v = self.1.to_byte() & 1; + // Check if s needs to be normalized. In case it does, + // we must flip the value of v (e.g. 0 -> 1). + let (s, v) = if let Some(signature) = self.0.normalize_s() { + let normalized_s = signature.s(); + (normalized_s, Self::flip_v(v)) + } else { + (self.0.s(), v) }; - let r = self.0.r.b32(); - - (r, s, v) + let r = self.0.r(); + (r.to_bytes().into(), s.to_bytes().into(), v + Self::V_FIX) } } @@ -491,16 +472,14 @@ impl Encode<1> for Signature { #[allow(clippy::derived_hash_with_manual_eq)] impl Hash for Signature { fn hash(&self, state: &mut H) { - self.0.serialize().hash(state); + self.0.to_bytes().hash(state); } } impl PartialOrd for Signature { fn partial_cmp(&self, other: &Self) -> Option { - match self.0.serialize().partial_cmp(&other.0.serialize()) { - Some(Ordering::Equal) => { - self.1.serialize().partial_cmp(&other.1.serialize()) - } + match self.0.to_bytes().partial_cmp(&other.0.to_bytes()) { + Some(Ordering::Equal) => self.1.partial_cmp(&other.1), res => res, } } @@ -516,21 +495,20 @@ impl TryFrom<&[u8; 65]> for Signature { type Error = ParseSignatureError; fn try_from(sig: &[u8; 65]) -> Result { - let sig_bytes = sig[..64].try_into().unwrap(); - let recovery_id = RecoveryId::parse(sig[64]).map_err(|err| { + let recovery_id = RecoveryId::from_byte(sig[64]).ok_or_else(|| { ParseSignatureError::InvalidEncoding(std::io::Error::new( ErrorKind::Other, - err, + "Invalid recovery byte", )) })?; - libsecp256k1::Signature::parse_standard(&sig_bytes) - .map(|sig| Self(sig, recovery_id)) - .map_err(|err| { + let sig = + k256::ecdsa::Signature::from_slice(&sig[..64]).map_err(|err| { ParseSignatureError::InvalidEncoding(std::io::Error::new( ErrorKind::Other, err, )) - }) + })?; + Ok(Self(sig, recovery_id)) } } @@ -563,12 +541,12 @@ impl super::SigScheme for SigScheme { where R: CryptoRng + RngCore, { - SecretKey(Box::new(libsecp256k1::SecretKey::random(csprng))) + SecretKey(Box::new(k256::SecretKey::random(csprng))) } fn from_bytes(sk: [u8; 32]) -> SecretKey { SecretKey(Box::new( - libsecp256k1::SecretKey::parse_slice(&sk) + k256::SecretKey::from_slice(&sk) .expect("Secret key parsing should not fail."), )) } @@ -580,20 +558,12 @@ impl super::SigScheme for SigScheme { where H: 'static + StorageHasher, { - #[cfg(not(any(test, feature = "secp256k1-sign")))] - { - // to avoid `unused-variables` warn - let _ = (keypair, data); - panic!("\"secp256k1-sign\" feature must be enabled"); - } - - #[cfg(any(test, feature = "secp256k1-sign"))] - { - let message = - libsecp256k1::Message::parse(&data.signable_hash::()); - let (sig, recovery_id) = libsecp256k1::sign(&message, &keypair.0); - Signature(sig, recovery_id) - } + let sig_key = k256::ecdsa::SigningKey::from(keypair.0.as_ref()); + let msg = data.signable_hash::(); + let (sig, recovery_id) = sig_key + .sign_prehash_recoverable(&msg) + .expect("Must be able to sign"); + Signature(sig, recovery_id) } fn verify_signature_with_hasher( @@ -604,21 +574,23 @@ impl super::SigScheme for SigScheme { where H: 'static + StorageHasher, { - let message = libsecp256k1::Message::parse(&data.signable_hash::()); - let is_valid = libsecp256k1::verify(&message, &sig.0, &pk.0); - if is_valid { - Ok(()) - } else { - Err(VerifySigError::SigVerifyError(format!( + use k256::ecdsa::signature::hazmat::PrehashVerifier; + + let vrf_key = k256::ecdsa::VerifyingKey::from(&pk.0); + let msg = data.signable_hash::(); + vrf_key.verify_prehash(&msg, &sig.0).map_err(|e| { + VerifySigError::SigVerifyError(format!( "Error verifying secp256k1 signature: {}", - libsecp256k1::Error::InvalidSignature - ))) - } + e + )) + }) } } #[cfg(test)] mod test { + use k256::elliptic_curve::sec1::ToEncodedPoint; + use super::*; /// test vector from https://bitcoin.stackexchange.com/a/89848 @@ -635,9 +607,9 @@ mod test { let sk_bytes = HEXLOWER.decode(SECRET_KEY_HEX.as_bytes()).unwrap(); let sk = SecretKey::try_from_slice(&sk_bytes[..]).unwrap(); let pk: PublicKey = sk.ref_to(); - // We're removing the first byte with - // `libsecp256k1::util::TAG_PUBKEY_FULL` - let pk_hex = HEXLOWER.encode(&pk.0.serialize()[1..]); + // We're removing the first byte with tag + let pk_hex = + HEXLOWER.encode(&pk.0.to_encoded_point(false).to_bytes()[1..]); assert_eq!(expected_pk_hex, pk_hex); let eth_addr: EthAddress = (&pk).into(); @@ -653,7 +625,7 @@ mod test { let sk = SecretKey::try_from_slice(&sk_bytes[..]).unwrap(); let to_sign = "test".as_bytes(); let mut signature = SigScheme::sign(&sk, to_sign); - signature.1 = RecoveryId::parse(3).expect("Test failed"); + signature.1 = RecoveryId::from_byte(3).expect("Test failed"); let sig_json = serde_json::to_string(&signature).expect("Test failed"); let sig: Signature = serde_json::from_str(&sig_json).expect("Test failed"); @@ -668,28 +640,10 @@ mod test { let sk = SecretKey::try_from_slice(&sk_bytes[..]).unwrap(); let to_sign = "test".as_bytes(); let mut signature = SigScheme::sign(&sk, to_sign); - signature.1 = RecoveryId::parse(3).expect("Test failed"); - let sig_bytes = signature.try_to_vec().expect("Test failed"); + signature.1 = RecoveryId::from_byte(3).expect("Test failed"); + let sig_bytes = signature.serialize_to_vec(); let sig = Signature::try_from_slice(sig_bytes.as_slice()) .expect("Test failed"); assert_eq!(sig, signature); } - - /// Ensures we are using the right malleability consts. - #[test] - fn test_signature_malleability_consts() { - let s_threshold = U256::from_str_radix( - "7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0", - 16, - ) - .unwrap(); - assert_eq!(Signature::S_MALLEABILITY_THRESHOLD, s_threshold); - - let malleable_const = U256::from_str_radix( - "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", - 16, - ) - .unwrap(); - assert_eq!(Signature::S_MALLEABILITY_FIX, malleable_const); - } } diff --git a/core/src/types/masp.rs b/core/src/types/masp.rs index e5ad0202b1..9083852a81 100644 --- a/core/src/types/masp.rs +++ b/core/src/types/masp.rs @@ -6,6 +6,7 @@ use std::str::FromStr; use bech32::{FromBase32, ToBase32}; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use sha2::{Digest, Sha256}; use crate::types::address::{ @@ -147,9 +148,7 @@ impl PaymentAddress { /// Hash this payment address pub fn hash(&self) -> String { - let bytes = (self.0, self.1) - .try_to_vec() - .expect("Payment address encoding shouldn't fail"); + let bytes = (self.0, self.1).serialize_to_vec(); let mut hasher = Sha256::new(); hasher.update(bytes); // hex of the first 40 chars of the hash diff --git a/core/src/types/storage.rs b/core/src/types/storage.rs index ad0c14f499..a21de88cbe 100644 --- a/core/src/types/storage.rs +++ b/core/src/types/storage.rs @@ -2,13 +2,14 @@ use std::collections::VecDeque; use std::convert::{TryFrom, TryInto}; use std::fmt::Display; -use std::io::Write; +use std::io::{Read, Write}; use std::num::ParseIntError; use std::ops::{Add, AddAssign, Deref, Div, Drop, Mul, Rem, Sub}; use std::str::FromStr; use arse_merkle_tree::InternalKey; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use data_encoding::{BASE32HEX_NOPAD, HEXUPPER}; use ics23::CommitmentProof; use index_set::vec::VecIndexSet; @@ -26,7 +27,7 @@ use crate::types::keccak::{KeccakHash, TryFromError}; use crate::types::time::DateTimeUtc; /// The maximum size of an IBC key (in bytes) allowed in merkle-ized storage -pub const IBC_KEY_LIMIT: usize = 120; +pub const IBC_KEY_LIMIT: usize = 240; #[allow(missing_docs)] #[derive(Error, Debug, Clone)] @@ -320,7 +321,7 @@ pub struct Header { impl Header { /// The number of bytes when this header is encoded pub fn encoded_len(&self) -> usize { - self.try_to_vec().unwrap().len() + self.serialize_to_vec().len() } } @@ -404,13 +405,13 @@ impl BorshSerialize for StringKey { } impl BorshDeserialize for StringKey { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { + fn deserialize_reader(reader: &mut R) -> std::io::Result { use std::io::ErrorKind; let (original, tree_key, length): ( Vec, InternalKey, usize, - ) = BorshDeserialize::deserialize(buf)?; + ) = BorshDeserialize::deserialize_reader(reader)?; let original: [u8; IBC_KEY_LIMIT] = original.try_into().map_err(|_| { std::io::Error::new( @@ -1035,11 +1036,12 @@ impl Epoch { /// overflow occurred. #[must_use = "this returns the result of the operation, without modifying \ the original"] - pub fn checked_sub(self, rhs: Epoch) -> Option { - if rhs.0 > self.0 { + pub fn checked_sub(self, rhs: impl Into) -> Option { + let Epoch(rhs) = rhs.into(); + if rhs > self.0 { None } else { - Some(Self(self.0 - rhs.0)) + Some(Self(self.0 - rhs)) } } diff --git a/core/src/types/time.rs b/core/src/types/time.rs index 0670392f94..341a81411c 100644 --- a/core/src/types/time.rs +++ b/core/src/types/time.rs @@ -1,7 +1,9 @@ //! Types for dealing with time and durations. +use std::collections::BTreeMap; use std::convert::{TryFrom, TryInto}; use std::fmt::Display; +use std::io::Read; use std::ops::{Add, Sub}; use std::str::FromStr; @@ -183,9 +185,9 @@ impl BorshSerialize for DateTimeUtc { } impl BorshDeserialize for DateTimeUtc { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { + fn deserialize_reader(reader: &mut R) -> std::io::Result { use std::io::{Error, ErrorKind}; - let raw: String = BorshDeserialize::deserialize(buf)?; + let raw: String = BorshDeserialize::deserialize_reader(reader)?; let actual = DateTime::parse_from_rfc3339(&raw) .map_err(|err| Error::new(ErrorKind::InvalidData, err))?; Ok(Self(actual.into())) @@ -194,7 +196,7 @@ impl BorshDeserialize for DateTimeUtc { impl BorshSchema for DateTimeUtc { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, diff --git a/core/src/types/token.rs b/core/src/types/token.rs index 0ee60b4326..9e7c2d0ee7 100644 --- a/core/src/types/token.rs +++ b/core/src/types/token.rs @@ -13,8 +13,9 @@ use thiserror::Error; use super::dec::POS_DECIMAL_PRECISION; use crate::ibc::applications::transfer::Amount as IbcAmount; +use crate::ledger::storage as ledger_storage; use crate::ledger::storage_api::token::read_denom; -use crate::ledger::storage_api::{self, StorageRead}; +use crate::ledger::storage_api::{self, StorageRead, StorageWrite}; use crate::types::address::{ masp, Address, DecodeError as AddressError, InternalAddress, }; @@ -148,6 +149,7 @@ impl Amount { } /// Checked subtraction. Returns `None` on underflow. + #[must_use] pub fn checked_sub(&self, amount: Amount) -> Option { self.raw .checked_sub(amount.raw) @@ -248,6 +250,26 @@ impl Amount { pub fn from_string_precise(string: &str) -> Result { DenominatedAmount::from_str(string).map(|den| den.amount) } + + /// Multiply by a decimal [`Dec`] with the result rounded up. + /// + /// # Panics + /// Panics when the `dec` is negative. + #[must_use] + pub fn mul_ceil(&self, dec: Dec) -> Self { + assert!(!dec.is_negative()); + let tot = self.raw * dec.abs(); + let denom = Uint::from(10u64.pow(POS_DECIMAL_PRECISION as u32)); + let floor_div = tot / denom; + let rem = tot % denom; + // dbg!(tot, denom, floor_div, rem); + let raw = if !rem.is_zero() { + floor_div + Self::from(1_u64) + } else { + floor_div + }; + Self { raw } + } } /// Given a number represented as `M*B^D`, then @@ -595,6 +617,20 @@ impl Mul for Amount { } } +/// A combination of Euclidean division and fractions: +/// x*(a,b) = (a*(x//b), x%b). +impl Mul<(u128, u128)> for Amount { + type Output = (Amount, Amount); + + fn mul(mut self, rhs: (u128, u128)) -> Self::Output { + let amt = Amount { + raw: (self.raw / rhs.1) * Uint::from(rhs.0), + }; + self.raw %= rhs.1; + (amt, self) + } +} + /// A combination of Euclidean division and fractions: /// x*(a,b) = (a*(x//b), x%b). impl Mul<(u64, u64)> for Amount { @@ -749,6 +785,7 @@ impl From for Uint { )] #[repr(u8)] #[allow(missing_docs)] +#[borsh(use_discriminant = true)] pub enum MaspDenom { Zero = 0, One, @@ -814,6 +851,31 @@ pub const TX_KEY_PREFIX: &str = "tx-"; pub const CONVERSION_KEY_PREFIX: &str = "conv"; /// Key segment prefix for pinned shielded transactions pub const PIN_KEY_PREFIX: &str = "pin-"; +/// Last calculated inflation value handed out +pub const MASP_LAST_INFLATION_KEY: &str = "last_inflation"; +/// The last locked ratio +pub const MASP_LAST_LOCKED_RATIO_KEY: &str = "last_locked_ratio"; +/// The key for the nominal proportional gain of a shielded pool for a given +/// asset +pub const MASP_KP_GAIN_KEY: &str = "proportional_gain"; +/// The key for the nominal derivative gain of a shielded pool for a given asset +pub const MASP_KD_GAIN_KEY: &str = "derivative_gain"; +/// The key for the locked ratio target for a given asset +pub const MASP_LOCKED_RATIO_TARGET_KEY: &str = "locked_ratio_target"; +/// The key for the max reward rate for a given asset +pub const MASP_MAX_REWARD_RATE_KEY: &str = "max_reward_rate"; + +/// Gets the key for the given token address, error with the given +/// message to expect if the key is not in the address +pub fn key_of_token( + token_addr: &Address, + specific_key: &str, + expect_message: &str, +) -> Key { + Key::from(token_addr.to_db_key()) + .push(&specific_key.to_owned()) + .expect(expect_message) +} /// Obtain a storage key for user's balance. pub fn balance_key(token_addr: &Address, owner: &Address) -> Key { @@ -847,6 +909,98 @@ pub fn minted_balance_key(token_addr: &Address) -> Key { .expect("Cannot obtain a storage key") } +/// Obtain the nominal proportional key for the given token +pub fn masp_kp_gain_key(token_addr: &Address) -> Key { + key_of_token(token_addr, MASP_KP_GAIN_KEY, "nominal proproitonal gains") +} + +/// Obtain the nominal derivative key for the given token +pub fn masp_kd_gain_key(token_addr: &Address) -> Key { + key_of_token(token_addr, MASP_KD_GAIN_KEY, "nominal proproitonal gains") +} + +/// The max reward rate key for the given token +pub fn masp_max_reward_rate_key(token_addr: &Address) -> Key { + key_of_token(token_addr, MASP_MAX_REWARD_RATE_KEY, "max reward rate") +} + +/// Obtain the locked target ratio key for the given token +pub fn masp_locked_ratio_target_key(token_addr: &Address) -> Key { + key_of_token( + token_addr, + MASP_LOCKED_RATIO_TARGET_KEY, + "nominal proproitonal gains", + ) +} + +/// Token parameters for each kind of asset held on chain +#[derive( + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + BorshSerialize, + BorshDeserialize, + BorshSchema, + Deserialize, + Serialize, +)] +pub struct Parameters { + /// Maximum reward rate + pub max_reward_rate: Dec, + /// Shielded Pool nominal derivative gain + pub kd_gain_nom: Dec, + /// Shielded Pool nominal proportional gain for the given token + pub kp_gain_nom: Dec, + /// Locked ratio for the given token + pub locked_ratio_target: Dec, +} + +impl Parameters { + /// Initialize parameters for the token in storage during the genesis block. + pub fn init_storage( + &self, + address: &Address, + wl_storage: &mut ledger_storage::WlStorage, + ) where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, + { + let Self { + max_reward_rate: max_rate, + kd_gain_nom, + kp_gain_nom, + locked_ratio_target: locked_target, + } = self; + wl_storage + .write(&masp_max_reward_rate_key(address), max_rate) + .expect("max reward rate for the given asset must be initialized"); + wl_storage + .write(&masp_locked_ratio_target_key(address), locked_target) + .expect("locked ratio must be initialized"); + wl_storage + .write(&masp_kp_gain_key(address), kp_gain_nom) + .expect("The nominal proportional gain must be initialized"); + wl_storage + .write(&masp_kd_gain_key(address), kd_gain_nom) + .expect("The nominal derivative gain must be initialized"); + } +} + +impl Default for Parameters { + fn default() -> Self { + Self { + max_reward_rate: Dec::from_str("0.1").unwrap(), + kp_gain_nom: Dec::from_str("0.1").unwrap(), + kd_gain_nom: Dec::from_str("0.1").unwrap(), + locked_ratio_target: Dec::from_str("0.1").unwrap(), + } + } +} + /// Check if the given storage key is balance key for the given token. If it is, /// returns the owner. For minted balances, use [`is_any_minted_balance_key()`]. pub fn is_balance_key<'a>( @@ -914,6 +1068,24 @@ pub fn is_masp_key(key: &Key) -> bool { || key.starts_with(PIN_KEY_PREFIX))) } +/// Obtain the storage key for the last locked ratio of a token +pub fn masp_last_locked_ratio_key(token_address: &Address) -> Key { + key_of_token( + token_address, + MASP_LAST_LOCKED_RATIO_KEY, + "cannot obtain storage key for the last locked ratio", + ) +} + +/// Obtain the storage key for the last inflation of a token +pub fn masp_last_inflation_key(token_address: &Address) -> Key { + key_of_token( + token_address, + MASP_LAST_INFLATION_KEY, + "cannot obtain storage key for the last inflation rate", + ) +} + /// Check if the given storage key is for a minter of a unspecified token. /// If it is, returns the token. pub fn is_any_minter_key(key: &Key) -> Option<&Address> { @@ -1159,6 +1331,17 @@ mod tests { let non_zero = Amount::from_uint(1, 0).expect("Test failed"); assert!(!non_zero.is_zero()); } + + #[test] + fn test_token_amount_mul_ceil() { + let one = Amount::from(1); + let two = Amount::from(2); + let three = Amount::from(3); + let dec = Dec::from_str("0.34").unwrap(); + assert_eq!(one.mul_ceil(dec), one); + assert_eq!(two.mul_ceil(dec), one); + assert_eq!(three.mul_ceil(dec), two); + } } /// Helpers for testing with addresses. @@ -1185,4 +1368,50 @@ pub mod testing { ) -> impl Strategy { (1..=max).prop_map(|val| Amount::from_uint(val, 0).unwrap()) } + + /// init_token_storage is useful when the initialization of the network is + /// not properly made. This properly sets up the storage such that + /// inflation calculations can be ran on the token addresses. We assume + /// a total supply that may not be real + pub fn init_token_storage( + wl_storage: &mut ledger_storage::WlStorage, + epochs_per_year: u64, + ) where + D: 'static + + ledger_storage::DB + + for<'iter> ledger_storage::DBIter<'iter>, + H: 'static + ledger_storage::StorageHasher, + { + use crate::ledger::parameters::storage::get_epochs_per_year_key; + use crate::types::address::tokens; + + let tokens = tokens(); + let masp_reward_keys: Vec<_> = tokens.keys().collect(); + + wl_storage + .write(&get_epochs_per_year_key(), epochs_per_year) + .unwrap(); + let params = Parameters { + max_reward_rate: Dec::from_str("0.1").unwrap(), + kd_gain_nom: Dec::from_str("0.1").unwrap(), + kp_gain_nom: Dec::from_str("0.1").unwrap(), + locked_ratio_target: Dec::zero(), + }; + + for address in masp_reward_keys { + params.init_storage(address, wl_storage); + wl_storage + .write( + &minted_balance_key(address), + Amount::native_whole(5), // arbitrary amount + ) + .unwrap(); + wl_storage + .write(&masp_last_inflation_key(address), Amount::zero()) + .expect("inflation ought to be written"); + wl_storage + .write(&masp_last_locked_ratio_key(address), Dec::zero()) + .expect("last locked set default"); + } + } } diff --git a/core/src/types/transaction/decrypted.rs b/core/src/types/transaction/decrypted.rs index bbebc85e77..3a7ba2f335 100644 --- a/core/src/types/transaction/decrypted.rs +++ b/core/src/types/transaction/decrypted.rs @@ -7,6 +7,7 @@ pub mod decrypted_tx { #[cfg(feature = "ferveo-tpke")] use ark_ec::PairingEngine; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; + use borsh_ext::BorshSerializeExt; use sha2::{Digest, Sha256}; #[derive( @@ -32,9 +33,7 @@ pub mod decrypted_tx { impl DecryptedTx { /// Produce a SHA-256 hash of this header pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update( - self.try_to_vec().expect("unable to serialize decrypted tx"), - ); + hasher.update(self.serialize_to_vec()); hasher } } diff --git a/core/src/types/transaction/encrypted.rs b/core/src/types/transaction/encrypted.rs index 277ec6d3fd..8b547fea88 100644 --- a/core/src/types/transaction/encrypted.rs +++ b/core/src/types/transaction/encrypted.rs @@ -3,7 +3,7 @@ /// *Not wasm compatible* #[cfg(feature = "ferveo-tpke")] pub mod encrypted_tx { - use std::io::{Error, ErrorKind, Write}; + use std::io::{Error, ErrorKind, Read, Write}; use ark_ec::PairingEngine; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; @@ -32,8 +32,10 @@ pub mod encrypted_tx { } impl borsh::de::BorshDeserialize for EncryptionKey { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - let key: Vec = BorshDeserialize::deserialize(buf)?; + fn deserialize_reader( + reader: &mut R, + ) -> std::io::Result { + let key: Vec = BorshDeserialize::deserialize_reader(reader)?; Ok(EncryptionKey( CanonicalDeserialize::deserialize(&*key) .map_err(|err| Error::new(ErrorKind::InvalidData, err))?, diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index 8acb9e6c7e..3b50e55e05 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -22,6 +22,7 @@ use std::collections::BTreeSet; use std::fmt; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh_ext::BorshSerializeExt; pub use decrypted::*; #[cfg(feature = "ferveo-tpke")] pub use encrypted::EncryptionKey; @@ -160,7 +161,7 @@ pub enum TxType { impl TxType { /// Produce a SHA-256 hash of this header pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update(self.try_to_vec().expect("unable to serialize header")); + hasher.update(self.serialize_to_vec()); hasher } } @@ -234,7 +235,7 @@ mod test_process_tx { .set_data(Data::new("transaction data".as_bytes().to_owned())) .clone(); tx.add_section(Section::Signature(Signature::new( - vec![*tx.code_sechash(), *tx.data_sechash()], + vec![tx.raw_header_hash()], [(0, gen_keypair())].into_iter().collect(), None, ))); diff --git a/core/src/types/transaction/pos.rs b/core/src/types/transaction/pos.rs index fa0e3d0891..e3ea9d3a21 100644 --- a/core/src/types/transaction/pos.rs +++ b/core/src/types/transaction/pos.rs @@ -95,6 +95,30 @@ pub struct Withdraw { pub source: Option
, } +/// A redelegation of bonded tokens from one validator to another. +#[derive( + Debug, + Clone, + PartialEq, + BorshSerialize, + BorshDeserialize, + BorshSchema, + Hash, + Eq, + Serialize, + Deserialize, +)] +pub struct Redelegation { + /// Source validator address + pub src_validator: Address, + /// Destination validator address + pub dest_validator: Address, + /// Owner (delegator) of the bonds to be redelegate + pub owner: Address, + /// The amount of tokens + pub amount: token::Amount, +} + /// A change to the validator commission rate. #[derive( Debug, diff --git a/core/src/types/transaction/protocol.rs b/core/src/types/transaction/protocol.rs index 1a51434b29..db5d8f9a23 100644 --- a/core/src/types/transaction/protocol.rs +++ b/core/src/types/transaction/protocol.rs @@ -24,10 +24,12 @@ pub struct UpdateDkgSessionKey { #[cfg(feature = "ferveo-tpke")] mod protocol_txs { - use std::io::{ErrorKind, Write}; + use std::collections::BTreeMap; + use std::io::{ErrorKind, Read, Write}; use std::path::Path; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; + use borsh_ext::BorshSerializeExt; use ferveo::dkg::pv::Message; use serde_json; @@ -77,9 +79,7 @@ mod protocol_txs { /// Produce a SHA-256 hash of this section pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update( - self.try_to_vec().expect("unable to serialize protocol"), - ); + hasher.update(self.serialize_to_vec()); hasher } } @@ -213,7 +213,7 @@ mod protocol_txs { ( $( $type:ident ),* $(,)?) => { match self { $( EthereumTxData::$type(x) => - x.try_to_vec().map(|data| (data, ProtocolTxType::$type))),* + (x.serialize_to_vec(), ProtocolTxType::$type)),* } } } @@ -225,7 +225,6 @@ mod protocol_txs { BridgePoolVext, ValSetUpdateVext, } - .expect("Should be able to borsh-serialize tx data") } /// Deserialize Ethereum protocol transaction data. @@ -330,16 +329,9 @@ mod protocol_txs { }))); outer_tx.header.chain_id = chain_id; outer_tx.set_code(Code::new(code)); - outer_tx.set_data(Data::new( - data.try_to_vec() - .expect("Serializing request should not fail"), - )); + outer_tx.set_data(Data::new(data.serialize_to_vec())); outer_tx.add_section(Section::Signature(Signature::new( - vec![ - outer_tx.header_hash(), - *outer_tx.code_sechash(), - *outer_tx.data_sechash(), - ], + vec![outer_tx.header_hash()], [(0, signing_key.clone())].into_iter().collect(), None, ))); @@ -375,8 +367,10 @@ mod protocol_txs { } impl BorshDeserialize for DkgMessage { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - let blob: Vec = BorshDeserialize::deserialize(buf)?; + fn deserialize_reader( + reader: &mut R, + ) -> std::io::Result { + let blob: Vec = BorshDeserialize::deserialize_reader(reader)?; let json = String::from_utf8(blob).map_err(|err| { std::io::Error::new(ErrorKind::InvalidData, err) })?; @@ -389,7 +383,7 @@ mod protocol_txs { impl BorshSchema for DkgMessage { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, diff --git a/core/src/types/transaction/wrapper.rs b/core/src/types/transaction/wrapper.rs index e9b49b0c07..4603081d3f 100644 --- a/core/src/types/transaction/wrapper.rs +++ b/core/src/types/transaction/wrapper.rs @@ -10,6 +10,7 @@ pub mod wrapper_tx { #[cfg(feature = "ferveo-tpke")] pub use ark_ec::{AffineCurve, PairingEngine}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; + use borsh_ext::BorshSerializeExt; use masp_primitives::transaction::Transaction; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; @@ -164,6 +165,21 @@ pub mod wrapper_tx { } } + /// A degenerate PoW solution type + #[derive( + Debug, + Clone, + BorshSerialize, + BorshDeserialize, + BorshSchema, + Serialize, + Deserialize, + )] + pub enum Solution { + /// No PoW solution + None, + } + /// A transaction with an encrypted payload, an optional shielded pool /// unshielding tx for fee payment and some non-encrypted metadata for /// inclusion and / or verification purposes @@ -190,6 +206,8 @@ pub mod wrapper_tx { /// The hash of the optional, unencrypted, unshielding transaction for /// fee payment pub unshield_section_hash: Option, + /// Mandatory 0x00 byte for deprecated field + pub pow_solution: Solution, } impl WrapperTx { @@ -211,6 +229,7 @@ pub mod wrapper_tx { epoch, gas_limit, unshield_section_hash: unshield_hash, + pow_solution: Solution::None, } } @@ -225,9 +244,7 @@ pub mod wrapper_tx { /// Produce a SHA-256 hash of this section pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update( - self.try_to_vec().expect("unable to serialize wrapper"), - ); + hasher.update(self.serialize_to_vec()); hasher } @@ -305,12 +322,7 @@ pub mod wrapper_tx { key: None, shielded: Some(masp_hash), }; - let data = transfer.try_to_vec().map_err(|_| { - WrapperTxErr::InvalidUnshield( - "Error while serializing the unshield transfer data" - .to_string(), - ) - })?; + let data = transfer.serialize_to_vec(); tx.set_data(Data::new(data)); tx.set_code(Code::from_hash(transfer_code_hash)); @@ -349,7 +361,7 @@ pub mod wrapper_tx { ); // Test borsh roundtrip - let borsh = limit.try_to_vec().expect("Test failed"); + let borsh = limit.serialize_to_vec(); assert_eq!( limit, BorshDeserialize::deserialize(&mut borsh.as_ref()) diff --git a/core/src/types/uint.rs b/core/src/types/uint.rs index ee14e67ad1..e6aeedc252 100644 --- a/core/src/types/uint.rs +++ b/core/src/types/uint.rs @@ -11,6 +11,7 @@ use num_integer::Integer; use num_traits::CheckedMul; use uint::construct_uint; +use super::dec::{Dec, POS_DECIMAL_PRECISION}; use crate::types::token; use crate::types::token::{Amount, AmountParseError, MaspDenom}; @@ -21,10 +22,276 @@ pub const ZERO: Uint = Uint::from_u64(0); pub const ONE: Uint = Uint::from_u64(1); impl Uint { + const N_WORDS: usize = 4; + /// Convert a [`u64`] to a [`Uint`]. pub const fn from_u64(x: u64) -> Uint { Uint([x.to_le(), 0, 0, 0]) } + + /// Return the least number of bits needed to represent the number + #[inline] + pub fn bits_512(arr: &[u64; 2 * Self::N_WORDS]) -> usize { + for i in 1..arr.len() { + if arr[arr.len() - i] > 0 { + return (0x40 * (arr.len() - i + 1)) + - arr[arr.len() - i].leading_zeros() as usize; + } + } + 0x40 - arr[0].leading_zeros() as usize + } + + fn div_mod_small_512( + mut slf: [u64; 2 * Self::N_WORDS], + other: u64, + ) -> ([u64; 2 * Self::N_WORDS], Self) { + let mut rem = 0u64; + slf.iter_mut().rev().for_each(|d| { + let (q, r) = Self::div_mod_word(rem, *d, other); + *d = q; + rem = r; + }); + (slf, rem.into()) + } + + fn shr_512( + original: [u64; 2 * Self::N_WORDS], + shift: u32, + ) -> [u64; 2 * Self::N_WORDS] { + let shift = shift as usize; + let mut ret = [0u64; 2 * Self::N_WORDS]; + let word_shift = shift / 64; + let bit_shift = shift % 64; + + // shift + for i in word_shift..original.len() { + ret[i - word_shift] = original[i] >> bit_shift; + } + + // Carry + if bit_shift > 0 { + for i in word_shift + 1..original.len() { + ret[i - word_shift - 1] += original[i] << (64 - bit_shift); + } + } + + ret + } + + fn full_shl_512( + slf: [u64; 2 * Self::N_WORDS], + shift: u32, + ) -> [u64; 2 * Self::N_WORDS + 1] { + debug_assert!(shift < Self::WORD_BITS as u32); + let mut u = [0u64; 2 * Self::N_WORDS + 1]; + let u_lo = slf[0] << shift; + let u_hi = Self::shr_512(slf, Self::WORD_BITS as u32 - shift); + u[0] = u_lo; + u[1..].copy_from_slice(&u_hi[..]); + u + } + + fn full_shr_512( + u: [u64; 2 * Self::N_WORDS + 1], + shift: u32, + ) -> [u64; 2 * Self::N_WORDS] { + debug_assert!(shift < Self::WORD_BITS as u32); + let mut res = [0; 2 * Self::N_WORDS]; + for i in 0..res.len() { + res[i] = u[i] >> shift; + } + // carry + if shift > 0 { + for i in 1..=res.len() { + res[i - 1] |= u[i] << (Self::WORD_BITS as u32 - shift); + } + } + res + } + + // See Knuth, TAOCP, Volume 2, section 4.3.1, Algorithm D. + fn div_mod_knuth_512( + slf: [u64; 2 * Self::N_WORDS], + mut v: Self, + n: usize, + m: usize, + ) -> ([u64; 2 * Self::N_WORDS], Self) { + debug_assert!(Self::bits_512(&slf) >= v.bits() && !v.fits_word()); + debug_assert!(n + m <= slf.len()); + // D1. + // Make sure 64th bit in v's highest word is set. + // If we shift both self and v, it won't affect the quotient + // and the remainder will only need to be shifted back. + let shift = v.0[n - 1].leading_zeros(); + v <<= shift; + // u will store the remainder (shifted) + let mut u = Self::full_shl_512(slf, shift); + + // quotient + let mut q = [0; 2 * Self::N_WORDS]; + let v_n_1 = v.0[n - 1]; + let v_n_2 = v.0[n - 2]; + + // D2. D7. + // iterate from m downto 0 + for j in (0..=m).rev() { + let u_jn = u[j + n]; + + // D3. + // q_hat is our guess for the j-th quotient digit + // q_hat = min(b - 1, (u_{j+n} * b + u_{j+n-1}) / v_{n-1}) + // b = 1 << WORD_BITS + // Theorem B: q_hat >= q_j >= q_hat - 2 + let mut q_hat = if u_jn < v_n_1 { + let (mut q_hat, mut r_hat) = + Self::div_mod_word(u_jn, u[j + n - 1], v_n_1); + // this loop takes at most 2 iterations + loop { + // check if q_hat * v_{n-2} > b * r_hat + u_{j+n-2} + let (hi, lo) = + Self::split_u128(u128::from(q_hat) * u128::from(v_n_2)); + if (hi, lo) <= (r_hat, u[j + n - 2]) { + break; + } + // then iterate till it doesn't hold + q_hat -= 1; + let (new_r_hat, overflow) = r_hat.overflowing_add(v_n_1); + r_hat = new_r_hat; + // if r_hat overflowed, we're done + if overflow { + break; + } + } + q_hat + } else { + // here q_hat >= q_j >= q_hat - 1 + u64::max_value() + }; + + // ex. 20: + // since q_hat * v_{n-2} <= b * r_hat + u_{j+n-2}, + // either q_hat == q_j, or q_hat == q_j + 1 + + // D4. + // let's assume optimistically q_hat == q_j + // subtract (q_hat * v) from u[j..] + let q_hat_v = v.full_mul_u64(q_hat); + // u[j..] -= q_hat_v; + let c = Self::sub_slice(&mut u[j..], &q_hat_v[..n + 1]); + + // D6. + // actually, q_hat == q_j + 1 and u[j..] has overflowed + // highly unlikely ~ (1 / 2^63) + if c { + q_hat -= 1; + // add v to u[j..] + let c = Self::add_slice(&mut u[j..], &v.0[..n]); + u[j + n] = u[j + n].wrapping_add(u64::from(c)); + } + + // D5. + q[j] = q_hat; + } + + // D8. + let remainder = Self::full_shr_512(u, shift); + // The remainder should never exceed the capacity of Self + debug_assert!( + Self::bits_512(&remainder) <= Self::N_WORDS * Self::WORD_BITS + ); + (q, Self(remainder[..Self::N_WORDS].try_into().unwrap())) + } + + /// Returns a pair `(self / other, self % other)`. + /// + /// # Panics + /// + /// Panics if `other` is zero. + pub fn div_mod_512( + slf: [u64; 2 * Self::N_WORDS], + other: Self, + ) -> ([u64; 2 * Self::N_WORDS], Self) { + let my_bits = Self::bits_512(&slf); + let your_bits = other.bits(); + + assert!(your_bits != 0, "division by zero"); + + // Early return in case we are dividing by a larger number than us + if my_bits < your_bits { + return ( + [0; 2 * Self::N_WORDS], + Self(slf[..Self::N_WORDS].try_into().unwrap()), + ); + } + + if your_bits <= Self::WORD_BITS { + return Self::div_mod_small_512(slf, other.low_u64()); + } + + let (n, m) = { + let my_words = Self::words(my_bits); + let your_words = Self::words(your_bits); + (your_words, my_words - your_words) + }; + + Self::div_mod_knuth_512(slf, other, n, m) + } + + /// Returns a pair `(Some((self * num) / denom), (self * num) % denom)` if + /// the quotient fits into Self. Otherwise `(None, (self * num) % denom)` is + /// returned. + /// + /// # Panics + /// + /// Panics if `denom` is zero. + pub fn checked_mul_div( + &self, + num: Self, + denom: Self, + ) -> Option<(Self, Self)> { + if denom.is_zero() { + None + } else { + let prod = uint::uint_full_mul_reg!(Uint, 4, self, num); + let (quotient, remainder) = Self::div_mod_512(prod, denom); + // The compiler WILL NOT inline this if you remove this annotation. + #[inline(always)] + fn any_nonzero(arr: &[u64]) -> bool { + use uint::unroll; + unroll! { + for i in 0..4 { + if arr[i] != 0 { + return true; + } + } + } + + false + } + if any_nonzero("ient[Self::N_WORDS..]) { + None + } else { + Some(( + Self(quotient[0..Self::N_WORDS].try_into().unwrap()), + remainder, + )) + } + } + } + + /// Returns a pair `((self * num) / denom, (self * num) % denom)`. + /// + /// # Panics + /// + /// Panics if `denom` is zero. + pub fn mul_div(&self, num: Self, denom: Self) -> (Self, Self) { + let prod = uint::uint_full_mul_reg!(Uint, 4, self, num); + let (quotient, remainder) = Self::div_mod_512(prod, denom); + ( + Self(quotient[0..Self::N_WORDS].try_into().unwrap()), + remainder, + ) + } } construct_uint! { @@ -171,10 +438,10 @@ impl Uint { /// * `self` * 10^(`denom`) overflows 256 bits /// * `other` is zero (`checked_div` will return `None`). pub fn fixed_precision_div(&self, rhs: &Self, denom: u8) -> Option { - let lhs = Uint::from(10) + Uint::from(10) .checked_pow(Uint::from(denom)) - .and_then(|res| res.checked_mul(*self))?; - lhs.checked_div(*rhs) + .and_then(|res| res.checked_mul_div(*self, *rhs)) + .map(|x| x.0) } /// Compute the two's complement of a number. @@ -337,6 +604,22 @@ impl I256 { Err(AmountParseError::InvalidRange) } } + + /// Multiply by a decimal [`Dec`] with the result rounded up. + #[must_use] + pub fn mul_ceil(&self, dec: Dec) -> Self { + let is_res_negative = self.is_negative() ^ dec.is_negative(); + let tot = self.abs() * dec.0.abs(); + let denom = Uint::from(10u64.pow(POS_DECIMAL_PRECISION as u32)); + let floor_div = tot / denom; + let rem = tot % denom; + let abs_res = Self(if !rem.is_zero() && !is_res_negative { + floor_div + Uint::from(1_u64) + } else { + floor_div + }); + if is_res_negative { -abs_res } else { abs_res } + } } impl From for I256 { @@ -554,6 +837,8 @@ impl TryFrom for i128 { #[cfg(test)] mod test_uint { + use std::str::FromStr; + use super::*; /// Test that dividing two [`Uint`]s with the specified precision @@ -581,8 +866,14 @@ mod test_uint { two.fixed_precision_div(&three, 3).expect("Satan lives"), Uint::from(666) ); - assert!(two.fixed_precision_div(&three, 77).is_none()); - assert!(Uint::from(20).fixed_precision_div(&three, 76).is_none()); + assert_eq!( + two.fixed_precision_div(&three, 77).expect("Test failed"), + Uint::from_str("9363ff047551e60c314a09cf62a269d471bafcf44a8c6aaaaaaaaaaaaaaaaaaa").unwrap() + ); + assert_eq!( + Uint::from(20).fixed_precision_div(&three, 76).expect("Test failed"), + Uint::from_str("9363ff047551e60c314a09cf62a269d471bafcf44a8c6aaaaaaaaaaaaaaaaaaa").unwrap() + ); } /// Test that adding one to the max signed @@ -710,4 +1001,55 @@ mod test_uint { let amount: Result = serde_json::from_str(r#""1000000000.2""#); assert!(amount.is_err()); } + + #[test] + fn test_i256_mul_ceil() { + let one = I256::from(1); + let two = I256::from(2); + let dec = Dec::from_str("0.25").unwrap(); + assert_eq!(one.mul_ceil(dec), one); + assert_eq!(two.mul_ceil(dec), one); + assert_eq!(I256::from(4).mul_ceil(dec), one); + assert_eq!(I256::from(5).mul_ceil(dec), two); + + assert_eq!((-one).mul_ceil(-dec), one); + + assert_eq!((-one).mul_ceil(dec), I256::zero()); + assert_eq!(one.mul_ceil(-dec), I256::zero()); + } + + #[test] + fn test_mul_div() { + use std::str::FromStr; + let a: Uint = Uint::from_str( + "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + ).unwrap(); + let b: Uint = Uint::from_str( + "0x8000000000000000000000000000000000000000000000000000000000000000", + ).unwrap(); + let c: Uint = Uint::from_str( + "0x4000000000000000000000000000000000000000000000000000000000000000", + ).unwrap(); + let d: Uint = Uint::from_str( + "0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + ).unwrap(); + let e: Uint = Uint::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ).unwrap(); + let f: Uint = Uint::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ).unwrap(); + assert_eq!(a.mul_div(a, a), (a, Uint::zero())); + assert_eq!(b.mul_div(c, b), (c, Uint::zero())); + assert_eq!(a.mul_div(c, b), (d, c)); + assert_eq!(a.mul_div(e, e), (a, Uint::zero())); + assert_eq!(e.mul_div(c, b), (Uint::zero(), c)); + assert_eq!(f.mul_div(a, e), (Uint::zero(), Uint::zero())); + assert_eq!(a.checked_mul_div(a, a), Some((a, Uint::zero()))); + assert_eq!(b.checked_mul_div(c, b), Some((c, Uint::zero()))); + assert_eq!(a.checked_mul_div(c, b), Some((d, c))); + assert_eq!(a.checked_mul_div(e, e), Some((a, Uint::zero()))); + assert_eq!(e.checked_mul_div(c, b), Some((Uint::zero(), c))); + assert_eq!(d.checked_mul_div(a, e), None); + } } diff --git a/core/src/types/vote_extensions/bridge_pool_roots.rs b/core/src/types/vote_extensions/bridge_pool_roots.rs index 5670d3967c..22718521f3 100644 --- a/core/src/types/vote_extensions/bridge_pool_roots.rs +++ b/core/src/types/vote_extensions/bridge_pool_roots.rs @@ -21,6 +21,7 @@ use crate::types::storage::BlockHeight; Clone, PartialEq, PartialOrd, + Ord, Eq, Hash, BorshSerialize, diff --git a/core/src/types/voting_power.rs b/core/src/types/voting_power.rs index a28eedc1a4..292fadb6f9 100644 --- a/core/src/types/voting_power.rs +++ b/core/src/types/voting_power.rs @@ -1,6 +1,8 @@ //! This module contains types related with validator voting power calculations. +use std::collections::BTreeMap; use std::fmt::{Display, Formatter}; +use std::io::Read; use std::iter::Sum; use std::ops::{Add, AddAssign, Mul}; @@ -247,26 +249,30 @@ impl BorshSerialize for FractionalVotingPower { } impl BorshDeserialize for FractionalVotingPower { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - let (numer, denom): (Uint, Uint) = BorshDeserialize::deserialize(buf)?; + fn deserialize_reader(reader: &mut R) -> std::io::Result { + let (numer, denom): (Uint, Uint) = + BorshDeserialize::deserialize_reader(reader)?; Ok(FractionalVotingPower(Ratio::::new(numer, denom))) } } impl BorshSchema for FractionalVotingPower { fn add_definitions_recursively( - definitions: &mut std::collections::HashMap< + definitions: &mut BTreeMap< borsh::schema::Declaration, borsh::schema::Definition, >, ) { - let fields = - borsh::schema::Fields::UnnamedFields(borsh::maybestd::vec![ - Uint::declaration(), - Uint::declaration() - ]); + let fields = borsh::schema::Fields::UnnamedFields(vec![ + Uint::declaration(), + Uint::declaration(), + ]); let definition = borsh::schema::Definition::Struct { fields }; - Self::add_definition(Self::declaration(), definition, definitions); + borsh::schema::add_definition( + Self::declaration(), + definition, + definitions, + ); } fn declaration() -> borsh::schema::Declaration { diff --git a/encoding_spec/src/main.rs b/encoding_spec/src/main.rs index 5889b03b8d..4365fd4a01 100644 --- a/encoding_spec/src/main.rs +++ b/encoding_spec/src/main.rs @@ -15,10 +15,12 @@ #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] -use std::collections::HashSet; +use std::collections::{BTreeMap, HashSet}; use std::io::Write; +use std::iter::Extend; -use borsh::{schema, BorshSchema}; +use borsh::schema::{BorshSchemaContainer, Declaration, Definition}; +use borsh::{schema, schema_container_of}; use itertools::Itertools; use lazy_static::lazy_static; use madato::types::TableRow; @@ -56,179 +58,188 @@ lazy_static! { ]); } +fn btree(b: &BorshSchemaContainer) -> BTreeMap { + b.definitions() + .map(|(x, y)| (x.clone(), y.clone())) + .collect() +} + fn main() -> Result<(), Box> { let mut file = std::fs::File::create(OUTPUT_PATH).unwrap(); write_generated_code_notice(&mut file)?; // Top-level definitions are displayed at the top - let address_schema = Address::schema_container(); - let token_amount_schema = token::Amount::schema_container(); - let epoch_schema = Epoch::schema_container(); - let parameters_schema = Parameters::schema_container(); + + let address_schema = schema_container_of::
(); + let token_amount_schema = schema_container_of::(); + let epoch_schema = schema_container_of::(); + let parameters_schema = schema_container_of::(); // TODO update after - let public_key_schema = PublicKey::schema_container(); + let public_key_schema = schema_container_of::(); // TODO update after - let signature_schema = Signature::schema_container(); + let signature_schema = schema_container_of::(); let init_account_schema = - transaction::account::InitAccount::schema_container(); + schema_container_of::(); let init_validator_schema = - transaction::pos::InitValidator::schema_container(); - let token_transfer_schema = token::Transfer::schema_container(); + schema_container_of::(); + let token_transfer_schema = schema_container_of::(); let update_account = - transaction::account::UpdateAccount::schema_container(); - let pos_bond_schema = pos::Bond::schema_container(); - let pos_withdraw_schema = pos::Withdraw::schema_container(); - let wrapper_tx_schema = transaction::WrapperTx::schema_container(); + schema_container_of::(); + let pos_bond_schema = schema_container_of::(); + let pos_withdraw_schema = schema_container_of::(); + let wrapper_tx_schema = schema_container_of::(); // TODO derive BorshSchema after - // let tx_result_schema = transaction::TxResult::schema_container(); - let tx_type_schema = transaction::TxType::schema_container(); - let prefix_value_schema = storage::PrefixValue::schema_container(); + // let tx_result_schema = schema_container_of::(); + let tx_type_schema = schema_container_of::(); + let prefix_value_schema = schema_container_of::(); // PoS // TODO add after // TODO imported from `use namada::ledger::pos::Bonds;` - // let pos_bonds_schema = Bonds::schema_container(); + // let pos_bonds_schema = schema_container_of::(); // Merge type definitions - let mut definitions = address_schema.definitions; + + let mut definitions = btree(&address_schema); + // TODO check for conflicts (same name, different declaration) - definitions.extend(token_amount_schema.definitions); - definitions.extend(epoch_schema.definitions); - definitions.extend(parameters_schema.definitions); - definitions.extend(public_key_schema.definitions); - definitions.extend(signature_schema.definitions); - definitions.extend(init_account_schema.definitions); - definitions.extend(init_validator_schema.definitions); - definitions.extend(token_transfer_schema.definitions); - definitions.extend(update_account.definitions); - definitions.extend(pos_bond_schema.definitions); - definitions.extend(pos_withdraw_schema.definitions); - definitions.extend(wrapper_tx_schema.definitions); - // definitions.extend(tx_result_schema.definitions); - definitions.extend(tx_type_schema.definitions); - definitions.extend(prefix_value_schema.definitions); - // definitions.extend(pos_bonds_schema.definitions); + definitions.extend(btree(&token_amount_schema)); + definitions.extend(btree(&epoch_schema)); + definitions.extend(btree(¶meters_schema)); + definitions.extend(btree(&public_key_schema)); + definitions.extend(btree(&signature_schema)); + definitions.extend(btree(&init_account_schema)); + definitions.extend(btree(&init_validator_schema)); + definitions.extend(btree(&token_transfer_schema)); + definitions.extend(btree(&update_account)); + definitions.extend(btree(&pos_bond_schema)); + definitions.extend(btree(&pos_withdraw_schema)); + definitions.extend(btree(&wrapper_tx_schema)); + // definitions.extend(btree(&tx_result_schema)); + definitions.extend(btree(&tx_type_schema)); + definitions.extend(btree(&prefix_value_schema)); + // definitions.extend(btree(&pos_bonds_schema)); let mut tables: Vec = Vec::with_capacity(definitions.len()); // Add the top-level definitions first let address_definition = - definitions.remove(&address_schema.declaration).unwrap(); + definitions.remove(address_schema.declaration()).unwrap(); let address_table = - definition_to_table(address_schema.declaration, address_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/address/enum.Address.html"); + definition_to_table( address_schema.declaration(), address_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/address/enum.Address.html"); tables.push(address_table); let token_amount_definition = definitions - .remove(&token_amount_schema.declaration) + .remove(token_amount_schema.declaration()) .unwrap(); let token_amount_table = definition_to_table( - token_amount_schema.declaration, + token_amount_schema.declaration(), token_amount_definition, ).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/token/struct.Amount.html"); tables.push(token_amount_table); let epoch_definition = - definitions.remove(&epoch_schema.declaration).unwrap(); + definitions.remove(epoch_schema.declaration()).unwrap(); let epoch_table = - definition_to_table(epoch_schema.declaration, epoch_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/storage/struct.Epoch.html"); + definition_to_table(epoch_schema.declaration(), epoch_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/storage/struct.Epoch.html"); tables.push(epoch_table); let parameters_definition = - definitions.remove(¶meters_schema.declaration).unwrap(); + definitions.remove(parameters_schema.declaration()).unwrap(); let parameters_table = - definition_to_table(parameters_schema.declaration, parameters_definition).with_rust_doc_link("file:///Users/tz/dev/namada/target/doc/namada/ledger/parameters/struct.Parameters.html"); + definition_to_table(parameters_schema.declaration(), parameters_definition).with_rust_doc_link("file:///Users/tz/dev/namada/target/doc/namada/ledger/parameters/struct.Parameters.html"); tables.push(parameters_table); let public_key_definition = - definitions.remove(&public_key_schema.declaration).unwrap(); + definitions.remove(public_key_schema.declaration()).unwrap(); let public_key_table = - definition_to_table(public_key_schema.declaration, public_key_definition).with_rust_doc_link( + definition_to_table(public_key_schema.declaration(), public_key_definition).with_rust_doc_link( // TODO update after "https://dev.namada.net/master/rustdoc/namada/types/key/ed25519/struct.PublicKey.html"); tables.push(public_key_table); let signature_definition = - definitions.remove(&signature_schema.declaration).unwrap(); + definitions.remove(signature_schema.declaration()).unwrap(); let signature_table = - definition_to_table(signature_schema.declaration, signature_definition).with_rust_doc_link( + definition_to_table(signature_schema.declaration(), signature_definition).with_rust_doc_link( // TODO update after "https://dev.namada.net/master/rustdoc/namada/types/key/ed25519/struct.Signature.html"); tables.push(signature_table); let init_account_definition = definitions - .remove(&init_account_schema.declaration) + .remove(init_account_schema.declaration()) .unwrap(); let init_account_table = definition_to_table( - init_account_schema.declaration, + init_account_schema.declaration(), init_account_definition, ).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/struct.InitAccount.html"); tables.push(init_account_table); let init_validator_definition = definitions - .remove(&init_validator_schema.declaration) + .remove(init_validator_schema.declaration()) .unwrap(); let init_validator_table = definition_to_table( - init_validator_schema.declaration, + init_validator_schema.declaration(), init_validator_definition, ).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/struct.InitValidator.html"); tables.push(init_validator_table); let token_transfer_definition = definitions - .remove(&token_transfer_schema.declaration) + .remove(token_transfer_schema.declaration()) .unwrap(); let token_transfer_table = definition_to_table( - token_transfer_schema.declaration, + token_transfer_schema.declaration(), token_transfer_definition, ).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/token/struct.Transfer.html"); tables.push(token_transfer_table); let update_account_definition = - definitions.remove(&update_account.declaration).unwrap(); + definitions.remove(update_account.declaration()).unwrap(); let update_accoun_table = - definition_to_table(update_account.declaration, update_account_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/struct.UpdateVp.html"); + definition_to_table(update_account.declaration(), update_account_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/struct.UpdateVp.html"); tables.push(update_accoun_table); let pos_bond_definition = - definitions.remove(&pos_bond_schema.declaration).unwrap(); + definitions.remove(pos_bond_schema.declaration()).unwrap(); let pos_bond_table = - definition_to_table(pos_bond_schema.declaration, pos_bond_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/pos/struct.Bond.html"); + definition_to_table(pos_bond_schema.declaration(), pos_bond_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/pos/struct.Bond.html"); tables.push(pos_bond_table); let pos_withdraw_definition = definitions - .remove(&pos_withdraw_schema.declaration) + .remove(pos_withdraw_schema.declaration()) .unwrap(); let pos_withdraw_table = definition_to_table( - pos_withdraw_schema.declaration, + pos_withdraw_schema.declaration(), pos_withdraw_definition, ).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/pos/struct.Withdraw.html"); tables.push(pos_withdraw_table); let wrapper_tx_definition = - definitions.remove(&wrapper_tx_schema.declaration).unwrap(); + definitions.remove(wrapper_tx_schema.declaration()).unwrap(); let wrapper_tx_table = definition_to_table( - wrapper_tx_schema.declaration, + wrapper_tx_schema.declaration(), wrapper_tx_definition, ).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/wrapper/wrapper_tx/struct.WrapperTx.html"); tables.push(wrapper_tx_table); // let tx_result_definition = - // definitions.remove(&tx_result_schema.declaration).unwrap(); + // definitions.remove(tx_result_schema.declaration()).unwrap(); // let tx_result_table = - // definition_to_table(tx_result_schema.declaration, + // definition_to_table(tx_result_schema.declaration(), // tx_result_definition).with_rust_doc_link("TODO"); // tables.push(tx_result_table); let tx_type_definition = - definitions.remove(&tx_type_schema.declaration).unwrap(); + definitions.remove(tx_type_schema.declaration()).unwrap(); let tx_type_table = - definition_to_table(tx_type_schema.declaration, tx_type_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/tx_types/enum.TxType.html"); + definition_to_table(tx_type_schema.declaration(), tx_type_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/tx_types/enum.TxType.html"); tables.push(tx_type_table); let prefix_value_definition = definitions - .remove(&prefix_value_schema.declaration) + .remove(prefix_value_schema.declaration()) .unwrap(); let prefix_value_table = - definition_to_table(prefix_value_schema.declaration, prefix_value_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/prefix_values/enum.TxType.html"); + definition_to_table(prefix_value_schema.declaration(), prefix_value_definition).with_rust_doc_link("https://dev.namada.net/master/rustdoc/namada/types/transaction/prefix_values/enum.TxType.html"); tables.push(prefix_value_table); // Add PoS definitions @@ -243,7 +254,7 @@ fn main() -> Result<(), Box> { .into_iter() .sorted_by_key(|(key, _val)| key.clone()) { - tables.push(definition_to_table(declaration, defition)) + tables.push(definition_to_table(&declaration, defition)) } // Print the tables to markdown @@ -271,7 +282,7 @@ struct Table { rows: Option>, } -fn definition_to_table(name: String, def: schema::Definition) -> Table { +fn definition_to_table(name: &Declaration, def: schema::Definition) -> Table { let (desc, rows) = match def { schema::Definition::Array { length, elements } => { let rows = None; @@ -345,7 +356,11 @@ fn definition_to_table(name: String, def: schema::Definition) -> Table { } } }; - Table { name, desc, rows } + Table { + name: name.to_string(), + desc, + rows, + } } /// Format a type to markdown. For internal types, adds anchors. diff --git a/ethereum_bridge/Cargo.toml b/ethereum_bridge/Cargo.toml index 1354c36a8c..40d8e6f422 100644 --- a/ethereum_bridge/Cargo.toml +++ b/ethereum_bridge/Cargo.toml @@ -29,10 +29,11 @@ testing = [ ] [dependencies] -namada_core = {path = "../core", default-features = false, features = ["secp256k1-sign", "ferveo-tpke", "ethers-derive"]} +namada_core = {path = "../core", default-features = false, features = ["ferveo-tpke", "ethers-derive"]} namada_macros = {path = "../macros"} namada_proof_of_stake = {path = "../proof_of_stake", default-features = false} borsh.workspace = true +borsh-ext.workspace = true ethers.workspace = true eyre.workspace = true itertools.workspace = true @@ -46,7 +47,7 @@ tracing = "0.1.30" [dev-dependencies] # Added "testing" feature. -namada_core = {path = "../core", default-features = false, features = ["secp256k1-sign", "ferveo-tpke", "ethers-derive", "testing"]} +namada_core = {path = "../core", default-features = false, features = ["ferveo-tpke", "ethers-derive", "testing"]} assert_matches.workspace = true data-encoding.workspace = true ethabi.workspace = true diff --git a/ethereum_bridge/src/parameters.rs b/ethereum_bridge/src/parameters.rs index c86ef1e6ed..599ef0c5f8 100644 --- a/ethereum_bridge/src/parameters.rs +++ b/ethereum_bridge/src/parameters.rs @@ -367,6 +367,7 @@ where #[cfg(test)] mod tests { + use borsh_ext::BorshSerializeExt; use eyre::Result; use namada_core::ledger::storage::testing::TestWlStorage; use namada_core::types::ethereum_events::EthAddress; @@ -474,7 +475,7 @@ mod tests { wl_storage .write_bytes( &bridge_storage::min_confirmations_key(), - MinimumConfirmations::default().try_to_vec().unwrap(), + MinimumConfirmations::default().serialize_to_vec(), ) .unwrap(); diff --git a/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs b/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs index 3271efeed5..a5dda2d206 100644 --- a/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs +++ b/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs @@ -18,7 +18,6 @@ use crate::protocol::transactions::{utils, votes, ChangedKeys}; use crate::storage::eth_bridge_queries::EthBridgeQueries; use crate::storage::proof::BridgePoolRootProof; use crate::storage::vote_tallies::{self, BridgePoolRoot}; - /// Applies a tally of signatures on over the Ethereum /// bridge pool root and nonce. Note that every signature /// passed into this function will be for the same @@ -222,7 +221,8 @@ mod test_apply_bp_roots_to_storage { use std::collections::BTreeSet; use assert_matches::assert_matches; - use borsh::{BorshDeserialize, BorshSerialize}; + use borsh::BorshDeserialize; + use borsh_ext::BorshSerializeExt; use namada_core::ledger::eth_bridge::storage::bridge_pool::{ get_key_from_hash, get_nonce_key, }; @@ -235,7 +235,7 @@ mod test_apply_bp_roots_to_storage { use namada_core::types::storage::Key; use namada_core::types::vote_extensions::bridge_pool_roots; use namada_core::types::voting_power::FractionalVotingPower; - use namada_proof_of_stake::parameters::PosParams; + use namada_proof_of_stake::parameters::OwnedPosParams; use namada_proof_of_stake::write_pos_params; use super::*; @@ -281,7 +281,7 @@ mod test_apply_bp_roots_to_storage { &KeccakHash([1; 32]), 100.into(), ); - let value = BlockHeight(101).try_to_vec().expect("Test failed"); + let value = BlockHeight(101).serialize_to_vec(); wl_storage .storage .block @@ -289,10 +289,7 @@ mod test_apply_bp_roots_to_storage { .update(&get_key_from_hash(&KeccakHash([1; 32])), value) .expect("Test failed"); wl_storage - .write_bytes( - &get_nonce_key(), - Uint::from(42).try_to_vec().expect("Test failed"), - ) + .write_bytes(&get_nonce_key(), Uint::from(42).serialize_to_vec()) .expect("Test failed"); TestPackage { validators: [validator_a, validator_b, validator_c], @@ -757,11 +754,11 @@ mod test_apply_bp_roots_to_storage { ); // update the pos params - let params = PosParams { + let params = OwnedPosParams { pipeline_len: 1, ..Default::default() }; - write_pos_params(&mut wl_storage, params).expect("Test failed"); + write_pos_params(&mut wl_storage, ¶ms).expect("Test failed"); // insert validators 2 and 3 at epoch 1 test_utils::append_validators_to_storage( diff --git a/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs b/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs index d878a56d11..fa2be67104 100644 --- a/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs +++ b/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs @@ -564,7 +564,7 @@ mod tests { use std::collections::HashMap; use assert_matches::assert_matches; - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use eyre::Result; use namada_core::ledger::eth_bridge::storage::bridge_pool::get_pending_key; use namada_core::ledger::parameters::{ @@ -702,7 +702,7 @@ mod tests { let key = get_pending_key(&transfer); wl_storage .storage - .write(&key, transfer.try_to_vec().expect("Test failed")) + .write(&key, transfer.serialize_to_vec()) .expect("Test failed"); pending_transfers.push(transfer); @@ -743,10 +743,7 @@ mod tests { let payer_key = balance_key(&transfer.gas_fee.token, &payer); let payer_balance = Amount::from(0); wl_storage - .write_bytes( - &payer_key, - payer_balance.try_to_vec().expect("Test failed"), - ) + .write_bytes(&payer_key, payer_balance.serialize_to_vec()) .expect("Test failed"); let escrow_key = balance_key(&transfer.gas_fee.token, &BRIDGE_POOL_ADDRESS); @@ -761,36 +758,24 @@ mod tests { let sender_key = balance_key(&nam(), &transfer.transfer.sender); let sender_balance = Amount::from(0); wl_storage - .write_bytes( - &sender_key, - sender_balance.try_to_vec().expect("Test failed"), - ) + .write_bytes(&sender_key, sender_balance.serialize_to_vec()) .expect("Test failed"); let escrow_key = balance_key(&nam(), &BRIDGE_ADDRESS); let escrow_balance = Amount::from(10); wl_storage - .write_bytes( - &escrow_key, - escrow_balance.try_to_vec().expect("Test failed"), - ) + .write_bytes(&escrow_key, escrow_balance.serialize_to_vec()) .expect("Test failed"); } else { let token = transfer.token_address(); let sender_key = balance_key(&token, &transfer.transfer.sender); let sender_balance = Amount::from(0); wl_storage - .write_bytes( - &sender_key, - sender_balance.try_to_vec().expect("Test failed"), - ) + .write_bytes(&sender_key, sender_balance.serialize_to_vec()) .expect("Test failed"); let escrow_key = balance_key(&token, &BRIDGE_POOL_ADDRESS); let escrow_balance = Amount::from(10); wl_storage - .write_bytes( - &escrow_key, - escrow_balance.try_to_vec().expect("Test failed"), - ) + .write_bytes(&escrow_key, escrow_balance.serialize_to_vec()) .expect("Test failed"); update::amount( wl_storage, @@ -1162,7 +1147,7 @@ mod tests { let key = get_pending_key(&transfer); wl_storage .storage - .write(&key, transfer.try_to_vec().expect("Test failed")) + .write(&key, transfer.serialize_to_vec()) .expect("Test failed"); wl_storage .storage diff --git a/ethereum_bridge/src/protocol/transactions/read.rs b/ethereum_bridge/src/protocol/transactions/read.rs index 550be149ca..257c045e33 100644 --- a/ethereum_bridge/src/protocol/transactions/read.rs +++ b/ethereum_bridge/src/protocol/transactions/read.rs @@ -55,7 +55,7 @@ where #[cfg(test)] mod tests { use assert_matches::assert_matches; - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use namada_core::ledger::storage::testing::TestWlStorage; use namada_core::ledger::storage_api::StorageWrite; use namada_core::types::storage; @@ -81,7 +81,7 @@ mod tests { let amount = Amount::from(1_000_000); let mut fake_storage = TestWlStorage::default(); fake_storage - .write_bytes(&key, amount.try_to_vec().unwrap()) + .write_bytes(&key, amount.serialize_to_vec()) .unwrap(); let amt = read::amount_or_default(&fake_storage, &key).unwrap(); diff --git a/ethereum_bridge/src/protocol/transactions/update.rs b/ethereum_bridge/src/protocol/transactions/update.rs index 8316a72df4..d14c20ec35 100644 --- a/ethereum_bridge/src/protocol/transactions/update.rs +++ b/ethereum_bridge/src/protocol/transactions/update.rs @@ -18,7 +18,7 @@ where { let mut amount = super::read::amount_or_default(wl_storage, key)?; update(&mut amount); - wl_storage.write_bytes(key, amount.try_to_vec()?)?; + wl_storage.write_bytes(key, borsh::to_vec(&amount)?)?; Ok(amount) } @@ -35,13 +35,14 @@ where { let mut value = super::read::value(wl_storage, key)?; update(&mut value); - wl_storage.write_bytes(key, value.try_to_vec()?)?; + wl_storage.write_bytes(key, borsh::to_vec(&value)?)?; Ok(value) } #[cfg(test)] mod tests { - use borsh::{BorshDeserialize, BorshSerialize}; + use borsh::BorshDeserialize; + use borsh_ext::BorshSerializeExt; use eyre::{eyre, Result}; use namada_core::ledger::storage::testing::TestWlStorage; use namada_core::ledger::storage_api::{StorageRead, StorageWrite}; @@ -52,9 +53,9 @@ mod tests { fn test_value() -> Result<()> { let key = storage::Key::parse("some arbitrary key") .expect("could not set up test"); - let value = 21; + let value = 21i32; let mut wl_storage = TestWlStorage::default(); - let serialized = value.try_to_vec().expect("could not set up test"); + let serialized = value.serialize_to_vec(); wl_storage .write_bytes(&key, serialized) .expect("could not set up test"); diff --git a/ethereum_bridge/src/protocol/transactions/votes.rs b/ethereum_bridge/src/protocol/transactions/votes.rs index c3a82bd370..cc029e28f5 100644 --- a/ethereum_bridge/src/protocol/transactions/votes.rs +++ b/ethereum_bridge/src/protocol/transactions/votes.rs @@ -189,7 +189,7 @@ mod tests { use namada_core::types::storage::BlockHeight; use namada_core::types::{address, token}; - use namada_proof_of_stake::parameters::PosParams; + use namada_proof_of_stake::parameters::OwnedPosParams; use namada_proof_of_stake::write_pos_params; use super::*; @@ -321,11 +321,11 @@ mod tests { ); // update the pos params - let params = PosParams { + let params = OwnedPosParams { pipeline_len: 1, ..Default::default() }; - write_pos_params(&mut wl_storage, params).expect("Test failed"); + write_pos_params(&mut wl_storage, ¶ms).expect("Test failed"); // insert validators 2 and 3 at epoch 1 test_utils::append_validators_to_storage( diff --git a/ethereum_bridge/src/protocol/transactions/votes/storage.rs b/ethereum_bridge/src/protocol/transactions/votes/storage.rs index 832797ae1b..fe43c37c80 100644 --- a/ethereum_bridge/src/protocol/transactions/votes/storage.rs +++ b/ethereum_bridge/src/protocol/transactions/votes/storage.rs @@ -1,4 +1,5 @@ use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use eyre::{Result, WrapErr}; use namada_core::hints; use namada_core::ledger::storage::{ @@ -23,16 +24,19 @@ where H: 'static + StorageHasher + Sync, T: BorshSerialize, { - wl_storage.write_bytes(&keys.body(), &body.try_to_vec()?)?; - wl_storage.write_bytes(&keys.seen(), &tally.seen.try_to_vec()?)?; - wl_storage.write_bytes(&keys.seen_by(), &tally.seen_by.try_to_vec()?)?; + wl_storage.write_bytes(&keys.body(), &body.serialize_to_vec())?; + wl_storage.write_bytes(&keys.seen(), &tally.seen.serialize_to_vec())?; wl_storage - .write_bytes(&keys.voting_power(), &tally.voting_power.try_to_vec()?)?; + .write_bytes(&keys.seen_by(), &tally.seen_by.serialize_to_vec())?; + wl_storage.write_bytes( + &keys.voting_power(), + &tally.voting_power.serialize_to_vec(), + )?; if !already_present { // add the current epoch for the inserted event wl_storage.write_bytes( &keys.voting_started_epoch(), - &wl_storage.storage.get_current_epoch().0.try_to_vec()?, + &wl_storage.storage.get_current_epoch().0.serialize_to_vec(), )?; } Ok(()) @@ -205,28 +209,18 @@ mod tests { assert!(result.is_ok()); let body = wl_storage.read_bytes(&keys.body()).unwrap(); - assert_eq!(body, Some(event.try_to_vec().unwrap())); + assert_eq!(body, Some(event.serialize_to_vec())); let seen = wl_storage.read_bytes(&keys.seen()).unwrap(); - assert_eq!(seen, Some(tally.seen.try_to_vec().unwrap())); + assert_eq!(seen, Some(tally.seen.serialize_to_vec())); let seen_by = wl_storage.read_bytes(&keys.seen_by()).unwrap(); - assert_eq!(seen_by, Some(tally.seen_by.try_to_vec().unwrap())); + assert_eq!(seen_by, Some(tally.seen_by.serialize_to_vec())); let voting_power = wl_storage.read_bytes(&keys.voting_power()).unwrap(); - assert_eq!( - voting_power, - Some(tally.voting_power.try_to_vec().unwrap()) - ); + assert_eq!(voting_power, Some(tally.voting_power.serialize_to_vec())); let epoch = wl_storage.read_bytes(&keys.voting_started_epoch()).unwrap(); assert_eq!( epoch, - Some( - wl_storage - .storage - .get_current_epoch() - .0 - .try_to_vec() - .unwrap() - ) + Some(wl_storage.storage.get_current_epoch().0.serialize_to_vec()) ); } @@ -249,29 +243,24 @@ mod tests { seen: false, }; wl_storage - .write_bytes(&keys.body(), &event.try_to_vec().unwrap()) + .write_bytes(&keys.body(), &event.serialize_to_vec()) .unwrap(); wl_storage - .write_bytes(&keys.seen(), &tally.seen.try_to_vec().unwrap()) + .write_bytes(&keys.seen(), &tally.seen.serialize_to_vec()) .unwrap(); wl_storage - .write_bytes(&keys.seen_by(), &tally.seen_by.try_to_vec().unwrap()) + .write_bytes(&keys.seen_by(), &tally.seen_by.serialize_to_vec()) .unwrap(); wl_storage .write_bytes( &keys.voting_power(), - &tally.voting_power.try_to_vec().unwrap(), + &tally.voting_power.serialize_to_vec(), ) .unwrap(); wl_storage .write_bytes( &keys.voting_started_epoch(), - &wl_storage - .storage - .get_block_height() - .0 - .try_to_vec() - .unwrap(), + &wl_storage.storage.get_block_height().0.serialize_to_vec(), ) .unwrap(); diff --git a/ethereum_bridge/src/protocol/transactions/votes/update.rs b/ethereum_bridge/src/protocol/transactions/votes/update.rs index c1173bdf12..a98be1859d 100644 --- a/ethereum_bridge/src/protocol/transactions/votes/update.rs +++ b/ethereum_bridge/src/protocol/transactions/votes/update.rs @@ -263,6 +263,7 @@ mod tests { votes, total_stake, } = self; + let keys = vote_tallies::Keys::from(event); let seen_voting_power: token::Amount = votes .iter() @@ -376,6 +377,7 @@ mod tests { #[test] fn test_apply_duplicate_votes() -> Result<()> { let mut wl_storage = TestWlStorage::default(); + test_utils::init_default_storage(&mut wl_storage); let validator = address::testing::established_address_1(); let already_voted_height = BlockHeight(100); @@ -411,6 +413,7 @@ mod tests { #[test] fn test_calculate_already_seen() -> Result<()> { let mut wl_storage = TestWlStorage::default(); + test_utils::init_default_storage(&mut wl_storage); let event = default_event(); let keys = vote_tallies::Keys::from(&event); let tally_pre = TallyParams { diff --git a/ethereum_bridge/src/storage/vote_tallies.rs b/ethereum_bridge/src/storage/vote_tallies.rs index ec03c498d6..edb60114e8 100644 --- a/ethereum_bridge/src/storage/vote_tallies.rs +++ b/ethereum_bridge/src/storage/vote_tallies.rs @@ -1,6 +1,6 @@ //! Functionality for accessing keys to do with tallying votes -use std::io::Write; +use std::io::{Read, Write}; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; @@ -199,11 +199,11 @@ impl BorshSerialize for BridgePoolRoot { } impl BorshDeserialize for BridgePoolRoot { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - as BorshDeserialize>::deserialize( - buf, + fn deserialize_reader(reader: &mut R) -> std::io::Result { + as BorshDeserialize>::deserialize_reader( + reader, ) - .map(BridgePoolRoot) + .map(BridgePoolRoot) } } diff --git a/ethereum_bridge/src/test_utils.rs b/ethereum_bridge/src/test_utils.rs index 9c24e9edfa..cc5370360d 100644 --- a/ethereum_bridge/src/test_utils.rs +++ b/ethereum_bridge/src/test_utils.rs @@ -3,11 +3,13 @@ use std::collections::HashMap; use std::num::NonZeroU64; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use namada_core::ledger::eth_bridge::storage::bridge_pool::get_key_from_hash; use namada_core::ledger::eth_bridge::storage::whitelist; +use namada_core::ledger::governance::parameters::GovernanceParameters; use namada_core::ledger::storage::mockdb::MockDBWriteBatch; use namada_core::ledger::storage::testing::{TestStorage, TestWlStorage}; +use namada_core::ledger::storage_api::token::credit_tokens; use namada_core::ledger::storage_api::{StorageRead, StorageWrite}; use namada_core::types::address::{self, wnam, Address}; use namada_core::types::dec::Dec; @@ -16,11 +18,12 @@ use namada_core::types::keccak::KeccakHash; use namada_core::types::key::{self, protocol_pk_key, RefTo}; use namada_core::types::storage::{BlockHeight, Key}; use namada_core::types::token; -use namada_proof_of_stake::parameters::PosParams; +use namada_proof_of_stake::parameters::OwnedPosParams; use namada_proof_of_stake::pos_queries::PosQueries; use namada_proof_of_stake::types::GenesisValidator; use namada_proof_of_stake::{ - become_validator, bond_tokens, store_total_consensus_stake, BecomeValidator, + become_validator, bond_tokens, staking_token_address, + store_total_consensus_stake, BecomeValidator, }; use crate::parameters::{ @@ -197,6 +200,7 @@ pub fn init_storage_with_validators( .map(|(address, tokens)| { let keys = TestValidatorKeys::generate(); let consensus_key = keys.consensus.ref_to(); + let protocol_key = keys.protocol.ref_to(); let eth_cold_key = keys.eth_gov.ref_to(); let eth_hot_key = keys.eth_bridge.ref_to(); all_keys.insert(address.clone(), keys); @@ -204,6 +208,7 @@ pub fn init_storage_with_validators( address, tokens, consensus_key, + protocol_key, eth_cold_key, eth_hot_key, commission_rate: Dec::new(5, 2).unwrap(), @@ -212,9 +217,11 @@ pub fn init_storage_with_validators( }) .collect(); + let gov_params = GovernanceParameters::default(); + gov_params.init_storage(wl_storage).unwrap(); namada_proof_of_stake::init_genesis( wl_storage, - &PosParams::default(), + &OwnedPosParams::default(), validators.into_iter(), 0.into(), ) @@ -241,7 +248,7 @@ pub fn commit_bridge_pool_root_at_height( root: &KeccakHash, height: BlockHeight, ) { - let value = height.try_to_vec().expect("Encoding failed"); + let value = height.serialize_to_vec(); storage .block .tree @@ -263,10 +270,13 @@ pub fn append_validators_to_storage( let mut all_keys = HashMap::new(); let params = wl_storage.pos_queries().get_pos_params(); + let staking_token = staking_token_address(wl_storage); + for (validator, stake) in consensus_validators { let keys = TestValidatorKeys::generate(); let consensus_key = &keys.consensus.ref_to(); + let protocol_key = &&keys.protocol.ref_to(); let eth_cold_key = &keys.eth_gov.ref_to(); let eth_hot_key = &keys.eth_bridge.ref_to(); @@ -275,6 +285,7 @@ pub fn append_validators_to_storage( params: ¶ms, address: &validator, consensus_key, + protocol_key, eth_cold_key, eth_hot_key, current_epoch, @@ -282,6 +293,8 @@ pub fn append_validators_to_storage( max_commission_rate_change: Dec::new(1, 2).unwrap(), }) .expect("Test failed"); + credit_tokens(wl_storage, &staking_token, &validator, stake) + .expect("Test failed"); bond_tokens(wl_storage, None, &validator, stake, current_epoch) .expect("Test failed"); diff --git a/ethereum_bridge/src/vp.rs b/ethereum_bridge/src/vp.rs index 1c06de83a1..ed678ff03b 100644 --- a/ethereum_bridge/src/vp.rs +++ b/ethereum_bridge/src/vp.rs @@ -1,4 +1,4 @@ -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use namada_core::ledger::storage::{self as ledger_storage, StorageHasher}; use namada_core::ledger::storage_api::StorageWrite; use namada_core::types::token::{balance_key, Amount}; @@ -17,12 +17,7 @@ where &namada_core::ledger::eth_bridge::ADDRESS, ); wl_storage - .write_bytes( - &escrow_key, - Amount::default() - .try_to_vec() - .expect("Serializing an amount shouldn't fail."), - ) + .write_bytes(&escrow_key, Amount::default().serialize_to_vec()) .expect( "Initializing the escrow balance of the Ethereum Bridge VP \ shouldn't fail.", diff --git a/genesis/e2e-tests-single-node.toml b/genesis/e2e-tests-single-node.toml index 4a3c02b805..1096be0d28 100644 --- a/genesis/e2e-tests-single-node.toml +++ b/genesis/e2e-tests-single-node.toml @@ -36,6 +36,11 @@ Christel = "1000000" Daewon = "1000000" Ester = "1000000" "validator-0.public_key" = "100" +[token.NAM.parameters] +max_reward_rate = "0.1" +kd_gain_nom = "0.1" +kp_gain_nom = "0.1" +locked_ratio_target = "0.6667" [token.BTC] address = "atest1v4ehgw36xdzryve5gsc52veeg5cnsv2yx5eygvp38qcrvd29xy6rys6p8yc5xvp4xfpy2v694wgwcp" @@ -46,6 +51,11 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" +[token.BTC.parameters] +max_reward_rate = "0.1" +kd_gain_nom = "0.1" +kp_gain_nom = "0.1" +locked_ratio_target = "0.6667" [token.ETH] address = "atest1v4ehgw36xqmr2d3nx3ryvd2xxgmrq33j8qcns33sxezrgv6zxdzrydjrxveygd2yxumrsdpsf9jc2p" @@ -56,6 +66,11 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" +[token.ETH.parameters] +max_reward_rate = "0.1" +kd_gain_nom = "0.1" +kp_gain_nom = "0.1" +locked_ratio_target = "0.6667" [token.DOT] address = "atest1v4ehgw36gg6nvs2zgfpyxsfjgc65yv6pxy6nwwfsxgungdzrggeyzv35gveyxsjyxymyz335hur2jn" @@ -66,6 +81,11 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" +[token.DOT.parameters] +max_reward_rate = "0.1" +kd_gain_nom = "0.1" +kp_gain_nom = "0.1" +locked_ratio_target = "0.6667" [token.Schnitzel] address = "atest1v4ehgw36xue5xvf5xvuyzvpjx5un2v3k8qeyvd3cxdqns32p89rrxd6xx9zngvpegccnzs699rdnnt" @@ -76,6 +96,11 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" +[token.Schnitzel.parameters] +max_reward_rate = "0.1" +kd_gain_nom = "0.1" +kp_gain_nom = "0.1" +locked_ratio_target = "0.6667" [token.Apfel] address = "atest1v4ehgw36gfryydj9g3p5zv3kg9znyd358ycnzsfcggc5gvecgc6ygs2rxv6ry3zpg4zrwdfeumqcz9" @@ -86,6 +111,11 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" +[token.Apfel.parameters] +max_reward_rate = "0.1" +kd_gain_nom = "0.1" +kp_gain_nom = "0.1" +locked_ratio_target = "0.6667" [token.Kartoffel] address = "atest1v4ehgw36gep5ysecxq6nyv3jg3zygv3e89qn2vp48pryxsf4xpznvve5gvmy23fs89pryvf5a6ht90" @@ -97,6 +127,11 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" +[token.Kartoffel.parameters] +max_reward_rate = "0.1" +kd_gain_nom = "0.1" +kp_gain_nom = "0.1" +locked_ratio_target = "0.6667" [established.Albert] vp = "vp_user" diff --git a/proof_of_stake/Cargo.toml b/proof_of_stake/Cargo.toml index a5b407df36..5506ec5174 100644 --- a/proof_of_stake/Cargo.toml +++ b/proof_of_stake/Cargo.toml @@ -33,8 +33,12 @@ tracing.workspace = true [dev-dependencies] namada_core = {path = "../core", features = ["testing"]} +assert_matches.workspace = true itertools.workspace = true proptest.workspace = true proptest-state-machine.workspace = true test-log.workspace = true tracing-subscriber.workspace = true +pretty_assertions.workspace = true +derivative.workspace = true +yansi.workspace = true diff --git a/proof_of_stake/proptest-regressions/tests/state_machine.txt b/proof_of_stake/proptest-regressions/tests/state_machine.txt index 4c02bc0ede..341ba3ff3d 100644 --- a/proof_of_stake/proptest-regressions/tests/state_machine.txt +++ b/proof_of_stake/proptest-regressions/tests/state_machine.txt @@ -4,5 +4,3 @@ # # It is recommended to check this file in to source control so that # everyone who runs the test benefits from these saved cases. -cc 3076c8509d56c546d5915febcf429f218ab79a7bac34c75c288f531b88110bc3 # shrinks to (initial_state, transitions) = (AbstractPosState { epoch: Epoch(0), params: PosParams { max_validator_slots: 4, pipeline_len: 2, unbonding_len: 4, tm_votes_per_token: 0.0614, block_proposer_reward: 0.125, block_vote_reward: 0.1, max_inflation_rate: 0.1, target_staked_ratio: 0.6667, duplicate_vote_min_slash_rate: 0.001, light_client_attack_min_slash_rate: 0.001, cubic_slashing_window_length: 1 }, genesis_validators: [GenesisValidator { address: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, tokens: Amount { micro: 9185807 }, consensus_key: Ed25519(PublicKey(VerificationKey("ee1aa49a4459dfe813a3cf6eb882041230c7b2558469de81f87c9bf23bf10a03"))), commission_rate: 0.05, max_commission_rate_change: 0.01 }, GenesisValidator { address: Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6, tokens: Amount { micro: 5025206 }, consensus_key: Ed25519(PublicKey(VerificationKey("17888c2ca502371245e5e35d5bcf35246c3bc36878e859938c9ead3c54db174f"))), commission_rate: 0.05, max_commission_rate_change: 0.01 }, GenesisValidator { address: Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc, tokens: Amount { micro: 4424807 }, consensus_key: Ed25519(PublicKey(VerificationKey("478243aed376da313d7cf3a60637c264cb36acc936efb341ff8d3d712092d244"))), commission_rate: 0.05, max_commission_rate_change: 0.01 }, GenesisValidator { address: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, tokens: Amount { micro: 4119410 }, consensus_key: Ed25519(PublicKey(VerificationKey("c5bbbb60e412879bbec7bb769804fa8e36e68af10d5477280b63deeaca931bed"))), commission_rate: 0.05, max_commission_rate_change: 0.01 }, GenesisValidator { address: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, tokens: Amount { micro: 3619078 }, consensus_key: Ed25519(PublicKey(VerificationKey("4f44e6c7bdfed3d9f48d86149ee3d29382cae8c83ca253e06a70be54a301828b"))), commission_rate: 0.05, max_commission_rate_change: 0.01 }, GenesisValidator { address: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, tokens: Amount { micro: 2691447 }, consensus_key: Ed25519(PublicKey(VerificationKey("ff87a0b0a3c7c0ce827e9cada5ff79e75a44a0633bfcb5b50f99307ddb26b337"))), commission_rate: 0.05, max_commission_rate_change: 0.01 }, GenesisValidator { address: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, tokens: Amount { micro: 224944 }, consensus_key: Ed25519(PublicKey(VerificationKey("191fc38f134aaf1b7fdb1f86330b9d03e94bd4ba884f490389de964448e89b3f"))), commission_rate: 0.05, max_commission_rate_change: 0.01 }, GenesisValidator { address: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, tokens: Amount { micro: 142614 }, consensus_key: Ed25519(PublicKey(VerificationKey("e2e8aa145e1ec5cb01ebfaa40e10e12f0230c832fd8135470c001cb86d77de00"))), commission_rate: 0.05, max_commission_rate_change: 0.01 }], bonds: {BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }: {Epoch(0): 142614}, BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }: {Epoch(0): 4119410}, BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }: {Epoch(0): 9185807}, BondId { source: Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6, validator: Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6 }: {Epoch(0): 5025206}, BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }: {Epoch(0): 2691447}, BondId { source: Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc, validator: Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc }: {Epoch(0): 4424807}, BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }: {Epoch(0): 224944}, BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }: {Epoch(0): 3619078}}, validator_stakes: {Epoch(0): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: 142614, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: 4119410, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: 9185807, Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6: 5025206, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: 2691447, Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc: 4424807, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: 224944, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: 3619078}, Epoch(1): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: 142614, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: 4119410, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: 9185807, Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6: 5025206, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: 2691447, Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc: 4424807, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: 224944, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: 3619078}, Epoch(2): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: 142614, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: 4119410, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: 9185807, Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6: 5025206, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: 2691447, Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc: 4424807, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: 224944, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: 3619078}}, consensus_set: {Epoch(0): {Amount { micro: 4119410 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { micro: 4424807 }: [Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc], Amount { micro: 5025206 }: [Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6], Amount { micro: 9185807 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6]}, Epoch(1): {Amount { micro: 4119410 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { micro: 4424807 }: [Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc], Amount { micro: 5025206 }: [Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6], Amount { micro: 9185807 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6]}, Epoch(2): {Amount { micro: 4119410 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { micro: 4424807 }: [Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc], Amount { micro: 5025206 }: [Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6], Amount { micro: 9185807 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6]}}, below_capacity_set: {Epoch(0): {ReverseOrdTokenAmount(Amount { micro: 142614 }): [Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6], ReverseOrdTokenAmount(Amount { micro: 224944 }): [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv], ReverseOrdTokenAmount(Amount { micro: 2691447 }): [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], ReverseOrdTokenAmount(Amount { micro: 3619078 }): [Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd]}, Epoch(1): {ReverseOrdTokenAmount(Amount { micro: 142614 }): [Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6], ReverseOrdTokenAmount(Amount { micro: 224944 }): [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv], ReverseOrdTokenAmount(Amount { micro: 2691447 }): [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], ReverseOrdTokenAmount(Amount { micro: 3619078 }): [Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd]}, Epoch(2): {ReverseOrdTokenAmount(Amount { micro: 142614 }): [Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6], ReverseOrdTokenAmount(Amount { micro: 224944 }): [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv], ReverseOrdTokenAmount(Amount { micro: 2691447 }): [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], ReverseOrdTokenAmount(Amount { micro: 3619078 }): [Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd]}}, validator_states: {Epoch(0): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: BelowCapacity, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus, Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6: Consensus, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: BelowCapacity, Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc: Consensus, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: BelowCapacity, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: BelowCapacity}, Epoch(1): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: BelowCapacity, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus, Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6: Consensus, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: BelowCapacity, Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc: Consensus, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: BelowCapacity, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: BelowCapacity}, Epoch(2): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: BelowCapacity, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus, Established: atest1v4ehgw36gfzrydfsx9zryv6pxcmng32xg9zyvve3xveyxvf58pzyzd2p8qmr23fsggensve3v7a7y6: Consensus, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: BelowCapacity, Established: atest1v4ehgw36gvcn23zyx3zngw2pgv6nxvfjx9pyyv2p8ye5vvpjxcenvv3ng3przvpnxqur2vzpkrazgc: Consensus, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: BelowCapacity, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: BelowCapacity}}, unbonds: {}, validator_slashes: {}, enqueued_slashes: {}, validator_last_slash_epochs: {}, unbond_records: {} }, [InitValidator { address: Established: atest1v4ehgw36xgunxvj9xqmny3jyxycnzdzxxqeng33ngvunqsfsx5mnwdfjgvenvwfk89prwdpjd0cjrk, consensus_key: Ed25519(PublicKey(VerificationKey("bea04de1e5be8ca0ae27be8ad935df8d757e96c1e067e96aedeba0ded0df997d"))), commission_rate: 0.39428, max_commission_rate_change: 0.12485 }]) -cc c0ffe7b368967ea0c456da20046f7d8a78c232c066ea116d3a123c945b7882fb # shrinks to (initial_state, transitions) = (AbstractPosState { epoch: Epoch(0), params: PosParams { max_validator_slots: 4, pipeline_len: 2, unbonding_len: 7, tm_votes_per_token: Dec(900700.000000), block_proposer_reward: Dec(125000.000000), block_vote_reward: Dec(100000.000000), max_inflation_rate: Dec(100000.000000), target_staked_ratio: Dec(666700.000000), duplicate_vote_min_slash_rate: Dec(1000.000000), light_client_attack_min_slash_rate: Dec(1000.000000), cubic_slashing_window_length: 1 }, genesis_validators: [GenesisValidator { address: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, tokens: Amount { raw: 8937727 }, consensus_key: Ed25519(PublicKey(VerificationKey("e2e8aa145e1ec5cb01ebfaa40e10e12f0230c832fd8135470c001cb86d77de00"))), commission_rate: Dec(50000.000000), max_commission_rate_change: Dec(10000.000000) }, GenesisValidator { address: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, tokens: Amount { raw: 8738693 }, consensus_key: Ed25519(PublicKey(VerificationKey("ff87a0b0a3c7c0ce827e9cada5ff79e75a44a0633bfcb5b50f99307ddb26b337"))), commission_rate: Dec(50000.000000), max_commission_rate_change: Dec(10000.000000) }, GenesisValidator { address: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, tokens: Amount { raw: 8373784 }, consensus_key: Ed25519(PublicKey(VerificationKey("c5bbbb60e412879bbec7bb769804fa8e36e68af10d5477280b63deeaca931bed"))), commission_rate: Dec(50000.000000), max_commission_rate_change: Dec(10000.000000) }, GenesisValidator { address: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, tokens: Amount { raw: 3584214 }, consensus_key: Ed25519(PublicKey(VerificationKey("4f44e6c7bdfed3d9f48d86149ee3d29382cae8c83ca253e06a70be54a301828b"))), commission_rate: Dec(50000.000000), max_commission_rate_change: Dec(10000.000000) }, GenesisValidator { address: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, tokens: Amount { raw: 553863 }, consensus_key: Ed25519(PublicKey(VerificationKey("ee1aa49a4459dfe813a3cf6eb882041230c7b2558469de81f87c9bf23bf10a03"))), commission_rate: Dec(50000.000000), max_commission_rate_change: Dec(10000.000000) }, GenesisValidator { address: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, tokens: Amount { raw: 218044 }, consensus_key: Ed25519(PublicKey(VerificationKey("191fc38f134aaf1b7fdb1f86330b9d03e94bd4ba884f490389de964448e89b3f"))), commission_rate: Dec(50000.000000), max_commission_rate_change: Dec(10000.000000) }], bonds: {BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }: {Epoch(0): 8.937727}, BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }: {Epoch(0): 8.373784}, BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }: {Epoch(0): 0.553863}, BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }: {Epoch(0): 8.738693}, BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }: {Epoch(0): 0.218044}, BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }: {Epoch(0): 3.584214}}, validator_stakes: {Epoch(0): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: 8.937727, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: 8.373784, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: 0.553863, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: 8.738693, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: 0.218044, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: 3.584214}, Epoch(1): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: 8.937727, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: 8.373784, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: 0.553863, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: 8.738693, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: 0.218044, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: 3.584214}, Epoch(2): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: 8.937727, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: 8.373784, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: 0.553863, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: 8.738693, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: 0.218044, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: 3.584214}}, consensus_set: {Epoch(0): {Amount { raw: 3584214 }: [Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd], Amount { raw: 8373784 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { raw: 8738693 }: [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], Amount { raw: 8937727 }: [Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6]}, Epoch(1): {Amount { raw: 3584214 }: [Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd], Amount { raw: 8373784 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { raw: 8738693 }: [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], Amount { raw: 8937727 }: [Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6]}, Epoch(2): {Amount { raw: 3584214 }: [Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd], Amount { raw: 8373784 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { raw: 8738693 }: [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], Amount { raw: 8937727 }: [Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6]}}, below_capacity_set: {Epoch(0): {ReverseOrdTokenAmount(Amount { raw: 218044 }): [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv], ReverseOrdTokenAmount(Amount { raw: 553863 }): [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6]}, Epoch(1): {ReverseOrdTokenAmount(Amount { raw: 218044 }): [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv], ReverseOrdTokenAmount(Amount { raw: 553863 }): [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6]}, Epoch(2): {ReverseOrdTokenAmount(Amount { raw: 218044 }): [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv], ReverseOrdTokenAmount(Amount { raw: 553863 }): [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6]}}, validator_states: {Epoch(0): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: Consensus, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: BelowCapacity, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: Consensus, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: BelowCapacity, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: Consensus}, Epoch(1): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: Consensus, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: BelowCapacity, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: Consensus, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: BelowCapacity, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: Consensus}, Epoch(2): {Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6: Consensus, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: BelowCapacity, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: Consensus, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: BelowCapacity, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: Consensus}}, unbonds: {}, validator_slashes: {}, enqueued_slashes: {}, validator_last_slash_epochs: {}, unbond_records: {} }, [Unbond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 267 } }, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 7610143 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 9863718 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 7102818 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 63132 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 9663084 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 2694963 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 7453740 } }, NextEpoch, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 14974324 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 2628172 } }, NextEpoch, NextEpoch, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 282055 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 11228090 } }, Bond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 2027105 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 2034080 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, amount: Amount { raw: 3329590 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 854661 } }, Misbehavior { address: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, slash_type: DuplicateVote, infraction_epoch: Epoch(1), height: 0 }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 227931 } }, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 2701887 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 1776100 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, amount: Amount { raw: 3717491 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, amount: Amount { raw: 5281559 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, amount: Amount { raw: 2426117 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 2005749 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 7883312 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 7300122 } }, Bond { id: BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 3388459 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, amount: Amount { raw: 195542 } }, NextEpoch, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 2251455 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 1237777 } }, NextEpoch, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 691613 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 1244599 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 2645543 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 8384136 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 590662 } }, NextEpoch, InitValidator { address: Established: atest1v4ehgw368qcrqd2ygvmyyvf4g9qnvv3kxucrwv3hxg6ryve4x56r233cxucnysjrxsmygdj9yer4pz, consensus_key: Ed25519(PublicKey(VerificationKey("afa2335747c0249f66eca84e88fba1a0e3ccec6a8f6f97f3177a42ffbb216492"))), commission_rate: Dec(195450.000000), max_commission_rate_change: Dec(954460.000000) }, Bond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 1687952 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 12754717 } }, Misbehavior { address: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, slash_type: LightClientAttack, infraction_epoch: Epoch(4), height: 0 }, Bond { id: BondId { source: Implicit: atest1d9khqw36xqunjdeegge5xdpcg5mnqwzp8yerzde58pq5g3pcxu6yvvphg3zr23z9gg6yvs3cmzdz9u, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 8952712 } }, NextEpoch, Withdraw { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv } }, Unbond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 519835 } }, UnjailValidator { address: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, Unbond { id: BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 2207493 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 236124 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 71122 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw36xqunjdeegge5xdpcg5mnqwzp8yerzde58pq5g3pcxu6yvvphg3zr23z9gg6yvs3cmzdz9u, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 1158688 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 267618 } }, InitValidator { address: Established: atest1v4ehgw36xucy2dfcxdzrxvpjx5uygwzrxpzrjs3jx4p5vvjrxdq5yvpjx5e5zs3jxdqng3pcplv2ch, consensus_key: Ed25519(PublicKey(VerificationKey("822cfec1ec829a50306424ac3d11115e880b952f5f54ac9a624277898991ee70"))), commission_rate: Dec(614520.000000), max_commission_rate_change: Dec(369920.000000) }, Bond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 8634884 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 8660668 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g9rryv3sx5c5v33sgsmrsd3egerrgdenx3zy2sfex4prvsehxcurydjx8qu5zdz9f2npes, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 8436873 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 515615 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 46481 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 4153966 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 2272563 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g5eyzwf3xqc5gwzxg3pnq3jpgsenxwp3x56rjvz9x5crwsf3gerrgwphxqen2sjz4hscvd, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 7491749 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 1921487 } }, Bond { id: BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 8316111 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 11873152 } }, NextEpoch, Withdraw { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv } }, Withdraw { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk } }, Withdraw { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 } }, Bond { id: BondId { source: Implicit: atest1d9khqw368yenjvpjxcu5vv33x3zrqw2zgg6nsvzrx9prxd2pgsmyxwfjxgunvs3exerrydp3csdkvr, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 4728535 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g5eyzwf3xqc5gwzxg3pnq3jpgsenxwp3x56rjvz9x5crwsf3gerrgwphxqen2sjz4hscvd, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 2828807 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g5eyzwf3xqc5gwzxg3pnq3jpgsenxwp3x56rjvz9x5crwsf3gerrgwphxqen2sjz4hscvd, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 655500 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 234416 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 330322 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 222600 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw36xqunjdeegge5xdpcg5mnqwzp8yerzde58pq5g3pcxu6yvvphg3zr23z9gg6yvs3cmzdz9u, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 2538059 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 168498 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw368yenjvpjxcu5vv33x3zrqw2zgg6nsvzrx9prxd2pgsmyxwfjxgunvs3exerrydp3csdkvr, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 510701 } }, Misbehavior { address: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, slash_type: DuplicateVote, infraction_epoch: Epoch(8), height: 0 }, InitValidator { address: Established: atest1v4ehgw36ggcrz3zygyunqsfjggmnq33h8ycnsdphxepnsve4gerrss2pgfp5z3psgccrj33klenl5r, consensus_key: Ed25519(PublicKey(VerificationKey("afc853489cf37abedeb6a97d036f3dc60934194af7169a2cc15fb3f85e4e287c"))), commission_rate: Dec(52690.000000), max_commission_rate_change: Dec(56470.000000) }, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 7098849 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw36xqunjdeegge5xdpcg5mnqwzp8yerzde58pq5g3pcxu6yvvphg3zr23z9gg6yvs3cmzdz9u, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 2180088 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 243441 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw36xqunjdeegge5xdpcg5mnqwzp8yerzde58pq5g3pcxu6yvvphg3zr23z9gg6yvs3cmzdz9u, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 1621261 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 7650954 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 1201023 } }, Bond { id: BondId { source: Implicit: atest1d9khqw36xqunjdeegge5xdpcg5mnqwzp8yerzde58pq5g3pcxu6yvvphg3zr23z9gg6yvs3cmzdz9u, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 9702706 } }, InitValidator { address: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu, consensus_key: Ed25519(PublicKey(VerificationKey("f8506f129faaf3bac1397ad0ab3bfa6d1a00d5c1064c4fafe740f2844be8fb04"))), commission_rate: Dec(575190.000000), max_commission_rate_change: Dec(602710.000000) }, Unbond { id: BondId { source: Implicit: atest1d9khqw368yenjvpjxcu5vv33x3zrqw2zgg6nsvzrx9prxd2pgsmyxwfjxgunvs3exerrydp3csdkvr, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 347187 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 5536481 } }, Bond { id: BondId { source: Implicit: atest1d9khqw36xc6nvvf4g9znxvf3xdrrgvfexuen2dek8qmnqse58q6ygdpkxeznz3j9xyeyydfht747xe, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 1859243 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 1907757 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw368yenjvpjxcu5vv33x3zrqw2zgg6nsvzrx9prxd2pgsmyxwfjxgunvs3exerrydp3csdkvr, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 3007741 } }, Misbehavior { address: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, slash_type: DuplicateVote, infraction_epoch: Epoch(9), height: 0 }, Bond { id: BondId { source: Established: atest1v4ehgw36g9rryv3sx5c5v33sgsmrsd3egerrgdenx3zy2sfex4prvsehxcurydjx8qu5zdz9f2npes, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 8226972 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw368yenjvpjxcu5vv33x3zrqw2zgg6nsvzrx9prxd2pgsmyxwfjxgunvs3exerrydp3csdkvr, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 602759 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw36xqunjdeegge5xdpcg5mnqwzp8yerzde58pq5g3pcxu6yvvphg3zr23z9gg6yvs3cmzdz9u, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 8350223 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 3787232 } }, InitValidator { address: Established: atest1v4ehgw36gc6njdpcxycnwv2zx9zrsdjxg9zrqvjzxuurxve5x3rryde48pqnjsekg3przs2z8dz595, consensus_key: Ed25519(PublicKey(VerificationKey("0b88c50c1b9b5b1e83c89110e388908dc3cc18ce0551494ab1c82bece24b2714"))), commission_rate: Dec(674000.000000), max_commission_rate_change: Dec(247230.000000) }, Bond { id: BondId { source: Established: atest1v4ehgw36gdp52wp4xv6yyd3nx9pnysfn89znjsen8quyvwfkgycnjs29x9ryxveh8prygsfecye5dj, validator: Established: atest1v4ehgw36ggcrz3zygyunqsfjggmnq33h8ycnsdphxepnsve4gerrss2pgfp5z3psgccrj33klenl5r }, amount: Amount { raw: 1391049 } }, Bond { id: BondId { source: Implicit: atest1d9khqw36gve5zdf4gccygv6zxgcnxwzrgv65x32rg4zrxv34g9prvs2pxqmnzve5xvuns33czq9awp, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 4008194 } }, Bond { id: BondId { source: Implicit: atest1d9khqw368pq5g3f3gceygvpjxuenyveexary2wzx8ycnw3zpg9zrvvp4xger2dzyxuunwvjz4n93ww, validator: Established: atest1v4ehgw36gc6njdpcxycnwv2zx9zrsdjxg9zrqvjzxuurxve5x3rryde48pqnjsekg3przs2z8dz595 }, amount: Amount { raw: 9368360 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, amount: Amount { raw: 9140634 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 600383 } }, Misbehavior { address: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, slash_type: DuplicateVote, infraction_epoch: Epoch(7), height: 0 }, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 8599835 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 345454 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g9rryv3sx5c5v33sgsmrsd3egerrgdenx3zy2sfex4prvsehxcurydjx8qu5zdz9f2npes, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 12448069 } }, NextEpoch, Withdraw { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 5151682 } }, Bond { id: BondId { source: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 1862578 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 10904134 } }, Bond { id: BondId { source: Implicit: atest1d9khqw368pq5g3f3gceygvpjxuenyveexary2wzx8ycnw3zpg9zrvvp4xger2dzyxuunwvjz4n93ww, validator: Established: atest1v4ehgw36gc6njdpcxycnwv2zx9zrsdjxg9zrqvjzxuurxve5x3rryde48pqnjsekg3przs2z8dz595 }, amount: Amount { raw: 773655 } }, Bond { id: BondId { source: Implicit: atest1d9khqw3689rrqdp58pznydecgyu5xs3cxdznvd6xxsmng32zxumrxvpj8qenydejgfzygwzxlu6r7s, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 8927299 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 1288039 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 2861830 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 445593 } }, Bond { id: BondId { source: Implicit: atest1d9khqw368pq5g3f3gceygvpjxuenyveexary2wzx8ycnw3zpg9zrvvp4xger2dzyxuunwvjz4n93ww, validator: Established: atest1v4ehgw36gc6njdpcxycnwv2zx9zrsdjxg9zrqvjzxuurxve5x3rryde48pqnjsekg3przs2z8dz595 }, amount: Amount { raw: 8204875 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 602527 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 5812026 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw3689rrqdp58pznydecgyu5xs3cxdznvd6xxsmng32zxumrxvpj8qenydejgfzygwzxlu6r7s, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 211165 } }, NextEpoch, Bond { id: BondId { source: Implicit: atest1d9khqw36xsun2decx9p52v2xg5cr2vphxym5vve58yerqve5x5c5yve3gepyzs3ngycy233eufckzz, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 350302 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 4560437 } }, Bond { id: BondId { source: Implicit: atest1d9khqw36xqunjdeegge5xdpcg5mnqwzp8yerzde58pq5g3pcxu6yvvphg3zr23z9gg6yvs3cmzdz9u, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 3515009 } }, Bond { id: BondId { source: Established: atest1v4ehgw36xucy2dfcxdzrxvpjx5uygwzrxpzrjs3jx4p5vvjrxdq5yvpjx5e5zs3jxdqng3pcplv2ch, validator: Established: atest1v4ehgw36xucy2dfcxdzrxvpjx5uygwzrxpzrjs3jx4p5vvjrxdq5yvpjx5e5zs3jxdqng3pcplv2ch }, amount: Amount { raw: 4956849 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw36xsun2decx9p52v2xg5cr2vphxym5vve58yerqve5x5c5yve3gepyzs3ngycy233eufckzz, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 290427 } }, NextEpoch, Unbond { id: BondId { source: Implicit: atest1d9khqw36gve5zdf4gccygv6zxgcnxwzrgv65x32rg4zrxv34g9prvs2pxqmnzve5xvuns33czq9awp, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 3261985 } }, Bond { id: BondId { source: Established: atest1v4ehgw36xucy2dfcxdzrxvpjx5uygwzrxpzrjs3jx4p5vvjrxdq5yvpjx5e5zs3jxdqng3pcplv2ch, validator: Established: atest1v4ehgw36xucy2dfcxdzrxvpjx5uygwzrxpzrjs3jx4p5vvjrxdq5yvpjx5e5zs3jxdqng3pcplv2ch }, amount: Amount { raw: 8946479 } }, Withdraw { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 } }, NextEpoch, InitValidator { address: Established: atest1v4ehgw36gvcrgdeex5ensvfkgccyxve3x3pnys6xxpzr2s6rxuurv3j9g4pyysjzxq6ygdzyt2wxa3, consensus_key: Ed25519(PublicKey(VerificationKey("a856fc650a2404e2d0c152d89c1c221bd9056a6103980e1d821b0cbae213ff44"))), commission_rate: Dec(324920.000000), max_commission_rate_change: Dec(512260.000000) }, Withdraw { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 82795 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 128956 } }, Bond { id: BondId { source: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 2043203 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 6764953 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g5eyzwf3xqc5gwzxg3pnq3jpgsenxwp3x56rjvz9x5crwsf3gerrgwphxqen2sjz4hscvd, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 6413168 } }, Bond { id: BondId { source: Implicit: atest1d9khqw368pq5g3f3gceygvpjxuenyveexary2wzx8ycnw3zpg9zrvvp4xger2dzyxuunwvjz4n93ww, validator: Established: atest1v4ehgw36gc6njdpcxycnwv2zx9zrsdjxg9zrqvjzxuurxve5x3rryde48pqnjsekg3przs2z8dz595 }, amount: Amount { raw: 6384185 } }, Misbehavior { address: Established: atest1v4ehgw36gc6njdpcxycnwv2zx9zrsdjxg9zrqvjzxuurxve5x3rryde48pqnjsekg3przs2z8dz595, slash_type: LightClientAttack, infraction_epoch: Epoch(13), height: 0 }, Bond { id: BondId { source: Established: atest1v4ehgw36gc6njdpcxycnwv2zx9zrsdjxg9zrqvjzxuurxve5x3rryde48pqnjsekg3przs2z8dz595, validator: Established: atest1v4ehgw36gc6njdpcxycnwv2zx9zrsdjxg9zrqvjzxuurxve5x3rryde48pqnjsekg3przs2z8dz595 }, amount: Amount { raw: 8314982 } }, Bond { id: BondId { source: Implicit: atest1d9khqw36xscrsve3geqnwd2x8qmrzwpe89z5zsekgvenqwp5x4p5ydzp8qmrz3zpgcmnydjptyfc40, validator: Established: atest1v4ehgw36gvcrgdeex5ensvfkgccyxve3x3pnys6xxpzr2s6rxuurv3j9g4pyysjzxq6ygdzyt2wxa3 }, amount: Amount { raw: 9139532 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 34693 } }, Bond { id: BondId { source: Implicit: atest1d9khqw3689rrqdp58pznydecgyu5xs3cxdznvd6xxsmng32zxumrxvpj8qenydejgfzygwzxlu6r7s, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 9487215 } }, NextEpoch, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 799953 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw36xscrsve3geqnwd2x8qmrzwpe89z5zsekgvenqwp5x4p5ydzp8qmrz3zpgcmnydjptyfc40, validator: Established: atest1v4ehgw36gvcrgdeex5ensvfkgccyxve3x3pnys6xxpzr2s6rxuurv3j9g4pyysjzxq6ygdzyt2wxa3 }, amount: Amount { raw: 3334636 } }, NextEpoch, Withdraw { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xucy2dfcxdzrxvpjx5uygwzrxpzrjs3jx4p5vvjrxdq5yvpjx5e5zs3jxdqng3pcplv2ch, validator: Established: atest1v4ehgw36xucy2dfcxdzrxvpjx5uygwzrxpzrjs3jx4p5vvjrxdq5yvpjx5e5zs3jxdqng3pcplv2ch }, amount: Amount { raw: 7942329 } }, NextEpoch, Unbond { id: BondId { source: Established: atest1v4ehgw36gdp52wp4xv6yyd3nx9pnysfn89znjsen8quyvwfkgycnjs29x9ryxveh8prygsfecye5dj, validator: Established: atest1v4ehgw36ggcrz3zygyunqsfjggmnq33h8ycnsdphxepnsve4gerrss2pgfp5z3psgccrj33klenl5r }, amount: Amount { raw: 878389 } }, Withdraw { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 } }, UnjailValidator { address: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, UnjailValidator { address: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, Bond { id: BondId { source: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 5376602 } }, UnjailValidator { address: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, Unbond { id: BondId { source: Implicit: atest1d9khqw36xc6nvvf4g9znxvf3xdrrgvfexuen2dek8qmnqse58q6ygdpkxeznz3j9xyeyydfht747xe, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { raw: 1118174 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 286221 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 73579 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g9rryv3sx5c5v33sgsmrsd3egerrgdenx3zy2sfex4prvsehxcurydjx8qu5zdz9f2npes, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 2010212 } }, Bond { id: BondId { source: Implicit: atest1d9khqw3689rrqdp58pznydecgyu5xs3cxdznvd6xxsmng32zxumrxvpj8qenydejgfzygwzxlu6r7s, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 4276553 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw368yenjvpjxcu5vv33x3zrqw2zgg6nsvzrx9prxd2pgsmyxwfjxgunvs3exerrydp3csdkvr, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 54860 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gdp52wp4xv6yyd3nx9pnysfn89znjsen8quyvwfkgycnjs29x9ryxveh8prygsfecye5dj, validator: Established: atest1v4ehgw36ggcrz3zygyunqsfjggmnq33h8ycnsdphxepnsve4gerrss2pgfp5z3psgccrj33klenl5r }, amount: Amount { raw: 145154 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, amount: Amount { raw: 1941194 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 93 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw3689rrqdp58pznydecgyu5xs3cxdznvd6xxsmng32zxumrxvpj8qenydejgfzygwzxlu6r7s, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { raw: 9992596 } }, Bond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { raw: 504024 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 5640962 } }, InitValidator { address: Established: atest1v4ehgw368qmnzsfeg5urqw2p8pq5gsf4ggcnqdz9xvc5vsfjxc6nvsekgsmyv3jp8ym52wph0hm33r, consensus_key: Ed25519(PublicKey(VerificationKey("2bccbdf7490f98b2e258a399b75c74bd1b71e9f6f4cc2160edbe3186e23d30e4"))), commission_rate: Dec(427420.000000), max_commission_rate_change: Dec(574220.000000) }, Misbehavior { address: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, slash_type: DuplicateVote, infraction_epoch: Epoch(12), height: 0 }, Bond { id: BondId { source: Implicit: atest1d9khqw368pq5g3f3gceygvpjxuenyveexary2wzx8ycnw3zpg9zrvvp4xger2dzyxuunwvjz4n93ww, validator: Established: atest1v4ehgw36gc6njdpcxycnwv2zx9zrsdjxg9zrqvjzxuurxve5x3rryde48pqnjsekg3przs2z8dz595 }, amount: Amount { raw: 4019468 } }, Bond { id: BondId { source: Implicit: atest1d9khqw36xscrsve3geqnwd2x8qmrzwpe89z5zsekgvenqwp5x4p5ydzp8qmrz3zpgcmnydjptyfc40, validator: Established: atest1v4ehgw36gvcrgdeex5ensvfkgccyxve3x3pnys6xxpzr2s6rxuurv3j9g4pyysjzxq6ygdzyt2wxa3 }, amount: Amount { raw: 5683219 } }, Bond { id: BondId { source: Implicit: atest1d9khqw368pz5zd3sgeqnxve4g9ryv3zzggerqdf3xqmrywfng4zrs3pkx5enydesg5mr2v6p4v8rst, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 6886837 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g9rryv3sx5c5v33sgsmrsd3egerrgdenx3zy2sfex4prvsehxcurydjx8qu5zdz9f2npes, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 7852494 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 749047 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gdp52wp4xv6yyd3nx9pnysfn89znjsen8quyvwfkgycnjs29x9ryxveh8prygsfecye5dj, validator: Established: atest1v4ehgw36ggcrz3zygyunqsfjggmnq33h8ycnsdphxepnsve4gerrss2pgfp5z3psgccrj33klenl5r }, amount: Amount { raw: 9097957 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g9rryv3sx5c5v33sgsmrsd3egerrgdenx3zy2sfex4prvsehxcurydjx8qu5zdz9f2npes, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 6781624 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw36gve5zdf4gccygv6zxgcnxwzrgv65x32rg4zrxv34g9prvs2pxqmnzve5xvuns33czq9awp, validator: Established: atest1v4ehgw36xsuy2vzx89pygd35gsurs3f3xsenz3pnxgmnws29xfrrzvp3xeq5yvjygsmnz33crlu8uu }, amount: Amount { raw: 123577 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gvmrzsf58yurxsjxgfqnqv6yg56nwv69xv6yv3zpx9znv3jpg4p5zdpnxpznzv3hq7q2az, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 1515359 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 9136180 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw368yenjvpjxcu5vv33x3zrqw2zgg6nsvzrx9prxd2pgsmyxwfjxgunvs3exerrydp3csdkvr, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 190090 } }, Unbond { id: BondId { source: Implicit: atest1d9khqw368pz5zd3sgeqnxve4g9ryv3zzggerqdf3xqmrywfng4zrs3pkx5enydesg5mr2v6p4v8rst, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 2817512 } }, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { raw: 5207922 } }, Bond { id: BondId { source: Implicit: atest1d9khqw36x5uyvv2px4pr2d3cgdpry3zzxq6nsd6yg5mnwsjzgcervdpegsunqd3kgy6ygvpjyvyhzj, validator: Established: atest1v4ehgw368qcrqd2ygvmyyvf4g9qnvv3kxucrwv3hxg6ryve4x56r233cxucnysjrxsmygdj9yer4pz }, amount: Amount { raw: 70961 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gdzns33sgsmr2wz9x4rrxdenx3zyysfcxcmry32pgeznjw2zx4zrysjxgeryxsfc2etu33, validator: Established: atest1v4ehgw36ggcrz3zygyunqsfjggmnq33h8ycnsdphxepnsve4gerrss2pgfp5z3psgccrj33klenl5r }, amount: Amount { raw: 9056961 } }, Unbond { id: BondId { source: Established: atest1v4ehgw36gvmrzsf58yurxsjxgfqnqv6yg56nwv69xv6yv3zpx9znv3jpg4p5zdpnxpznzv3hq7q2az, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }, amount: Amount { raw: 1451932 } }, Bond { id: BondId { source: Implicit: atest1d9khqw36gcunwdzyxpz5xs2rxuuyxvfcgfznzd3hg9zrzdfnx5crwv69ggcnvsjpgc65gd33uuymj8, validator: Established: atest1v4ehgw36xucy2dfcxdzrxvpjx5uygwzrxpzrjs3jx4p5vvjrxdq5yvpjx5e5zs3jxdqng3pcplv2ch }, amount: Amount { raw: 1463719 } }, Withdraw { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 } }, Bond { id: BondId { source: Implicit: atest1d9khqw36x5uyvv2px4pr2d3cgdpry3zzxq6nsd6yg5mnwsjzgcervdpegsunqd3kgy6ygvpjyvyhzj, validator: Established: atest1v4ehgw368qcrqd2ygvmyyvf4g9qnvv3kxucrwv3hxg6ryve4x56r233cxucnysjrxsmygdj9yer4pz }, amount: Amount { raw: 792907 } }, InitValidator { address: Established: atest1v4ehgw36xy65xd3cgvcyxsesgsunys3hgg6nyvekxgerz3fjxaprqvfhxser2wphg5mnjdzpf7edt5, consensus_key: Ed25519(PublicKey(VerificationKey("8f6eeade76a7ce1ccf1d3138807774696d51fcf2c8879e53aa2b082e34eec42b"))), commission_rate: Dec(592790.000000), max_commission_rate_change: Dec(854710.000000) }]) diff --git a/proof_of_stake/src/btree_set.rs b/proof_of_stake/src/btree_set.rs deleted file mode 100644 index 48460b2f0b..0000000000 --- a/proof_of_stake/src/btree_set.rs +++ /dev/null @@ -1,54 +0,0 @@ -//! This module adds shims for BTreeSet methods that not yet stable. - -use std::collections::BTreeSet; - -/// This trait adds shims for BTreeSet methods that not yet stable. They have -/// the same behavior as their nightly counterparts, but additionally require -/// `Clone` bound on the element type (for `pop_first` and `pop_last`). -pub trait BTreeSetShims { - /// Returns a reference to the first value in the set, if any. This value is - /// always the minimum of all values in the set. - fn first_shim(&self) -> Option<&T>; - - /// Returns a reference to the last value in the set, if any. This value is - /// always the maximum of all values in the set. - fn last_shim(&self) -> Option<&T>; - - /// Removes the first value from the set and returns it, if any. The first - /// value is always the minimum value in the set. - fn pop_first_shim(&mut self) -> Option; - - /// Removes the last value from the set and returns it, if any. The last - /// value is always the maximum value in the set. - fn pop_last_shim(&mut self) -> Option; -} - -impl BTreeSetShims for BTreeSet { - fn first_shim(&self) -> Option<&T> { - let mut iter = self.iter(); - iter.next() - } - - fn last_shim(&self) -> Option<&T> { - let iter = self.iter(); - iter.last() - } - - fn pop_first_shim(&mut self) -> Option { - let mut iter = self.iter(); - let first = iter.next().cloned(); - if let Some(first) = first { - return self.take(&first); - } - None - } - - fn pop_last_shim(&mut self) -> Option { - let iter = self.iter(); - let last = iter.last().cloned(); - if let Some(last) = last { - return self.take(&last); - } - None - } -} diff --git a/proof_of_stake/src/epoched.rs b/proof_of_stake/src/epoched.rs index d5a567fc94..796827de1f 100644 --- a/proof_of_stake/src/epoched.rs +++ b/proof_of_stake/src/epoched.rs @@ -4,7 +4,7 @@ use std::collections::HashMap; use std::fmt::Debug; use std::marker::PhantomData; -use std::ops; +use std::{cmp, ops}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use namada_core::ledger::storage_api; @@ -16,6 +16,7 @@ use namada_core::ledger::storage_api::{StorageRead, StorageWrite}; use namada_core::types::storage::{self, Epoch}; use crate::parameters::PosParams; +use crate::read_pos_params; /// Sub-key holding a lazy map in storage pub const LAZY_MAP_SUB_KEY: &str = "lazy_map"; @@ -25,60 +26,51 @@ pub const LAST_UPDATE_SUB_KEY: &str = "last_update"; pub const OLDEST_EPOCH_SUB_KEY: &str = "oldest_epoch"; /// Default number of past epochs to keep. -const DEFAULT_NUM_PAST_EPOCHS: u64 = 2; +pub const DEFAULT_NUM_PAST_EPOCHS: u64 = 2; /// Discrete epoched data handle -pub struct Epoched< - Data, - FutureEpochs, - const NUM_PAST_EPOCHS: u64 = DEFAULT_NUM_PAST_EPOCHS, - SON = collections::Simple, -> { +pub struct Epoched { storage_prefix: storage::Key, future_epochs: PhantomData, + past_epochs: PhantomData, data: PhantomData, phantom_son: PhantomData, } /// Discrete epoched data handle with nested lazy structure -pub type NestedEpoched< - Data, - FutureEpochs, - const NUM_PAST_EPOCHS: u64 = DEFAULT_NUM_PAST_EPOCHS, -> = Epoched; +pub type NestedEpoched = + Epoched; /// Delta epoched data handle -pub struct EpochedDelta { +pub struct EpochedDelta { storage_prefix: storage::Key, future_epochs: PhantomData, + past_epochs: PhantomData, data: PhantomData, } -impl - Epoched +impl + Epoched where FutureEpochs: EpochOffset, + PastEpochs: EpochOffset, { /// Open the handle pub fn open(key: storage::Key) -> Self { Self { storage_prefix: key, future_epochs: PhantomData, + past_epochs: PhantomData, data: PhantomData, phantom_son: PhantomData, } } - - /// Return the number of past epochs to keep data for - pub fn get_num_past_epochs() -> u64 { - NUM_PAST_EPOCHS - } } -impl - Epoched +impl Epoched where FutureEpochs: EpochOffset, + PastEpochs: EpochOffset, Data: BorshSerialize + BorshDeserialize + 'static + Debug, { /// Initialize new epoched data. Sets the head to the given value. @@ -125,7 +117,8 @@ where Some(_) => return Ok(res), None => { if epoch.0 > 0 - && epoch > Self::sub_past_epochs(last_update) + && epoch + > Self::sub_past_epochs(params, last_update) { epoch = Epoch(epoch.0 - 1); } else { @@ -149,7 +142,8 @@ where where S: StorageWrite + StorageRead, { - self.update_data(storage, current_epoch)?; + let params = read_pos_params(storage)?; + self.update_data(storage, ¶ms, current_epoch)?; self.set_at_epoch(storage, value, current_epoch, offset) } @@ -174,9 +168,10 @@ where /// kept is dropped. If the oldest stored epoch is not already /// associated with some value, the latest value from the dropped /// values, if any, is associated with it. - fn update_data( + pub fn update_data( &self, storage: &mut S, + params: &PosParams, current_epoch: Epoch, ) -> storage_api::Result<()> where @@ -188,11 +183,10 @@ where (last_update, oldest_epoch) { let oldest_to_keep = current_epoch - .0 - .checked_sub(NUM_PAST_EPOCHS) + .checked_sub(PastEpochs::value(params)) .unwrap_or_default(); - if oldest_epoch.0 < oldest_to_keep { - let diff = oldest_to_keep - oldest_epoch.0; + if oldest_epoch < oldest_to_keep { + let diff = u64::from(oldest_to_keep - oldest_epoch); // Go through the epochs before the expected oldest epoch and // keep the latest one tracing::debug!( @@ -211,7 +205,8 @@ where } } if let Some(latest_value) = latest_value { - let new_oldest_epoch = Self::sub_past_epochs(current_epoch); + let new_oldest_epoch = + Self::sub_past_epochs(params, current_epoch); // TODO we can add `contains_key` to LazyMap if data_handler.get(storage, &new_oldest_epoch)?.is_none() { tracing::debug!( @@ -269,8 +264,10 @@ where LazyMap::open(key) } - fn sub_past_epochs(epoch: Epoch) -> Epoch { - Epoch(epoch.0.checked_sub(NUM_PAST_EPOCHS).unwrap_or_default()) + fn sub_past_epochs(params: &PosParams, epoch: Epoch) -> Epoch { + epoch + .checked_sub(PastEpochs::value(params)) + .unwrap_or_default() } fn get_oldest_epoch_storage_key(&self) -> storage::Key { @@ -303,10 +300,11 @@ where } } -impl - Epoched +impl + Epoched where FutureEpochs: EpochOffset, + PastEpochs: EpochOffset, Data: LazyCollection + Debug, { /// Get the inner LazyCollection value by the outer key @@ -323,7 +321,7 @@ where NestedMap::open(key) } - /// Initialize new nested data at the given epoch offset. + /// Initialize new nested data at the given epoch. pub fn init( &self, storage: &mut S, @@ -333,7 +331,8 @@ where S: StorageWrite + StorageRead, { let key = self.get_last_update_storage_key(); - storage.write(&key, epoch) + storage.write(&key, epoch)?; + self.set_oldest_epoch(storage, epoch) } fn get_last_update_storage_key(&self) -> storage::Key { @@ -342,7 +341,7 @@ where .unwrap() } - /// TODO + /// Get the epoch of the most recent update pub fn get_last_update( &self, storage: &S, @@ -354,7 +353,7 @@ where storage.read(&key) } - /// TODO + /// Set the epoch of the most recent update pub fn set_last_update( &self, storage: &mut S, @@ -367,36 +366,106 @@ where storage.write(&key, current_epoch) } - /// TODO - pub fn sub_past_epochs(epoch: Epoch) -> Epoch { - Epoch(epoch.0.checked_sub(NUM_PAST_EPOCHS).unwrap_or_default()) + fn get_oldest_epoch_storage_key(&self) -> storage::Key { + self.storage_prefix + .push(&OLDEST_EPOCH_SUB_KEY.to_owned()) + .unwrap() + } + + fn get_oldest_epoch( + &self, + storage: &S, + ) -> storage_api::Result> + where + S: StorageRead, + { + let key = self.get_oldest_epoch_storage_key(); + storage.read(&key) + } + + fn set_oldest_epoch( + &self, + storage: &mut S, + new_oldest_epoch: Epoch, + ) -> storage_api::Result<()> + where + S: StorageRead + StorageWrite, + { + let key = self.get_oldest_epoch_storage_key(); + storage.write(&key, new_oldest_epoch) + } + + fn sub_past_epochs(params: &PosParams, epoch: Epoch) -> Epoch { + epoch + .checked_sub(PastEpochs::value(params)) + .unwrap_or_default() } - // pub fn get_inner_by_epoch(&self) -> storage_api::Result {} + /// Update data by removing old epochs + // TODO: should we consider more complex handling of empty epochs in the + // data below? + pub fn update_data( + &self, + storage: &mut S, + params: &PosParams, + current_epoch: Epoch, + ) -> storage_api::Result<()> + where + S: StorageRead + StorageWrite, + { + let last_update = self.get_last_update(storage)?; + let oldest_epoch = self.get_oldest_epoch(storage)?; + if let (Some(last_update), Some(oldest_epoch)) = + (last_update, oldest_epoch) + { + let oldest_to_keep = current_epoch + .checked_sub(PastEpochs::value(params)) + .unwrap_or_default(); + if oldest_epoch < oldest_to_keep { + let diff = u64::from(oldest_to_keep - oldest_epoch); + // Go through the epochs before the expected oldest epoch and + // keep the latest one + tracing::debug!( + "Trimming nested epoched data in epoch {current_epoch}, \ + last updated at {last_update}." + ); + let data_handler = self.get_data_handler(); + // Remove data before the new oldest epoch, keep the latest + // value + for epoch in oldest_epoch.iter_range(diff) { + let was_data = data_handler.remove_all(storage, &epoch)?; + if was_data { + tracing::debug!( + "Removed inner map data at epoch {epoch}" + ); + } else { + tracing::debug!("WARNING: was no data in {epoch}"); + } + } + let new_oldest_epoch = + Self::sub_past_epochs(params, current_epoch); + + // if !data_handler.contains(storage, &new_oldest_epoch)? { + // panic!("WARNING: no data existing in + // {new_oldest_epoch}"); } + self.set_oldest_epoch(storage, new_oldest_epoch)?; + + // Update the epoch of the last update to the current epoch + let key = self.get_last_update_storage_key(); + storage.write(&key, current_epoch)?; + return Ok(()); + } + } - // TODO: we may need an update_data() method, figure out when it should be - // called (in at()?) + Ok(()) + } } -// impl -// Epoched< -// LazyMap, -// FutureEpochs, -// NUM_PAST_EPOCHS, -// collections::Nested, -// > -// where -// FutureEpochs: EpochOffset, -// { -// pub fn get_inner_by_epoch(&self, epoch: &Epoch) -> LazyMap { -// self.at() -// } -// } - -impl - EpochedDelta +impl + EpochedDelta where FutureEpochs: EpochOffset, + PastEpochs: EpochOffset, Data: BorshSerialize + BorshDeserialize + ops::Add @@ -409,6 +478,7 @@ where Self { storage_prefix: key, future_epochs: PhantomData, + past_epochs: PhantomData, data: PhantomData, } } @@ -434,7 +504,6 @@ where &self, storage: &S, epoch: Epoch, - _params: &PosParams, ) -> storage_api::Result> where S: StorageRead, @@ -457,7 +526,7 @@ where None => Ok(None), Some(last_update) => { let data_handler = self.get_data_handler(); - let start_epoch = Self::sub_past_epochs(last_update); + let start_epoch = Self::sub_past_epochs(params, last_update); let future_most_epoch = last_update + FutureEpochs::value(params); @@ -482,6 +551,27 @@ where } } + /// Initialize or add a value to the current delta value at the given epoch + /// offset. + pub fn add( + &self, + storage: &mut S, + value: Data, + current_epoch: Epoch, + offset: u64, + ) -> storage_api::Result<()> + where + S: StorageWrite + StorageRead, + Data: Default, + { + let params = read_pos_params(storage)?; + self.update_data(storage, ¶ms, current_epoch)?; + let cur_value = self + .get_delta_val(storage, current_epoch + offset)? + .unwrap_or_default(); + self.set_at_epoch(storage, cur_value + value, current_epoch, offset) + } + /// Initialize or set the value at the given epoch offset. pub fn set( &self, @@ -493,7 +583,8 @@ where where S: StorageWrite + StorageRead, { - self.update_data(storage, current_epoch)?; + let params = read_pos_params(storage)?; + self.update_data(storage, ¶ms, current_epoch)?; self.set_at_epoch(storage, value, current_epoch, offset) } @@ -519,6 +610,7 @@ where fn update_data( &self, storage: &mut S, + params: &PosParams, current_epoch: Epoch, ) -> storage_api::Result<()> where @@ -530,11 +622,10 @@ where (last_update, oldest_epoch) { let oldest_to_keep = current_epoch - .0 - .checked_sub(NUM_PAST_EPOCHS) + .checked_sub(PastEpochs::value(params)) .unwrap_or_default(); - if oldest_epoch.0 < oldest_to_keep { - let diff = oldest_to_keep - oldest_epoch.0; + if oldest_epoch < oldest_to_keep { + let diff = u64::from(oldest_to_keep - oldest_epoch); // Go through the epochs before the expected oldest epoch and // sum them into it tracing::debug!( @@ -557,7 +648,8 @@ where } } if let Some(sum) = sum { - let new_oldest_epoch = Self::sub_past_epochs(current_epoch); + let new_oldest_epoch = + Self::sub_past_epochs(params, current_epoch); let new_oldest_epoch_data = match data_handler.get(storage, &new_oldest_epoch)? { Some(oldest_epoch_data) => oldest_epoch_data + sum, @@ -631,8 +723,10 @@ where handle.iter(storage)?.collect() } - fn sub_past_epochs(epoch: Epoch) -> Epoch { - Epoch(epoch.0.checked_sub(NUM_PAST_EPOCHS).unwrap_or_default()) + fn sub_past_epochs(params: &PosParams, epoch: Epoch) -> Epoch { + epoch + .checked_sub(PastEpochs::value(params)) + .unwrap_or_default() } fn get_oldest_epoch_storage_key(&self) -> storage::Key { @@ -679,7 +773,7 @@ where )] pub struct OffsetZero; impl EpochOffset for OffsetZero { - fn value(_paras: &PosParams) -> u64 { + fn value(_params: &PosParams) -> u64 { 0 } @@ -688,6 +782,29 @@ impl EpochOffset for OffsetZero { } } +/// Default offset +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct OffsetDefaultNumPastEpochs; +impl EpochOffset for OffsetDefaultNumPastEpochs { + fn value(_params: &PosParams) -> u64 { + DEFAULT_NUM_PAST_EPOCHS + } + + fn dyn_offset() -> DynEpochOffset { + DynEpochOffset::DefaultNumPastEpoch + } +} + /// Offset at pipeline length. #[derive( Debug, @@ -757,11 +874,182 @@ impl EpochOffset for OffsetPipelinePlusUnbondingLen { } } +/// Offset at the slash processing delay. +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct OffsetSlashProcessingLen; +impl EpochOffset for OffsetSlashProcessingLen { + fn value(params: &PosParams) -> u64 { + params.slash_processing_epoch_offset() + } + + fn dyn_offset() -> DynEpochOffset { + DynEpochOffset::SlashProcessingLen + } +} + +/// Offset at the slash processing delay plus the default num past epochs. +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct OffsetSlashProcessingLenPlus; +impl EpochOffset for OffsetSlashProcessingLenPlus { + fn value(params: &PosParams) -> u64 { + params.slash_processing_epoch_offset() + DEFAULT_NUM_PAST_EPOCHS + } + + fn dyn_offset() -> DynEpochOffset { + DynEpochOffset::SlashProcessingLenPlus + } +} + +/// Maximum offset. +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct OffsetMaxU64; +impl EpochOffset for OffsetMaxU64 { + fn value(_params: &PosParams) -> u64 { + u64::MAX + } + + fn dyn_offset() -> DynEpochOffset { + DynEpochOffset::MaxU64 + } +} + +/// Offset at max proposal period. +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct OffsetMaxProposalPeriod; +impl EpochOffset for OffsetMaxProposalPeriod { + fn value(params: &PosParams) -> u64 { + params.max_proposal_period + } + + fn dyn_offset() -> DynEpochOffset { + DynEpochOffset::MaxProposalPeriod + } +} + +/// Offset at the max proposal period, plus the default num past epochs. +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct OffsetMaxProposalPeriodPlus; +impl EpochOffset for OffsetMaxProposalPeriodPlus { + fn value(params: &PosParams) -> u64 { + params.max_proposal_period + DEFAULT_NUM_PAST_EPOCHS + } + + fn dyn_offset() -> DynEpochOffset { + DynEpochOffset::MaxProposalPeriodPlus + } +} + +/// Offset at the larger of the slash processing length and the max proposal +/// period. +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct OffsetMaxProposalPeriodOrSlashProcessingLen; +impl EpochOffset for OffsetMaxProposalPeriodOrSlashProcessingLen { + fn value(params: &PosParams) -> u64 { + cmp::max( + params.slash_processing_epoch_offset(), + params.max_proposal_period, + ) + } + + fn dyn_offset() -> DynEpochOffset { + DynEpochOffset::MaxProposalPeriodOrSlashProcessingLen + } +} + +/// Offset at the larger of the slash processing length and the max proposal +/// period, plus the default num past epochs. +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct OffsetMaxProposalPeriodOrSlashProcessingLenPlus; +impl EpochOffset for OffsetMaxProposalPeriodOrSlashProcessingLenPlus { + fn value(params: &PosParams) -> u64 { + cmp::max( + params.slash_processing_epoch_offset(), + params.max_proposal_period, + ) + DEFAULT_NUM_PAST_EPOCHS + } + + fn dyn_offset() -> DynEpochOffset { + DynEpochOffset::MaxProposalPeriodOrSlashProcessingLenPlus + } +} + /// Offset length dynamic choice. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub enum DynEpochOffset { /// Zero offset Zero, + /// Offset at the const default num past epochs (above) + DefaultNumPastEpoch, /// Offset at pipeline length - 1 PipelineLenMinusOne, /// Offset at pipeline length. @@ -770,6 +1058,22 @@ pub enum DynEpochOffset { UnbondingLen, /// Offset at pipeline + unbonding length. PipelinePlusUnbondingLen, + /// Offset at slash processing delay (unbonding + + /// cubic_slashing_window + 1). + SlashProcessingLen, + /// Offset at slash processing delay plus the defaul num past epochs + SlashProcessingLenPlus, + /// Offset at the max proposal period + MaxProposalPeriod, + /// Offset at the max proposal period plus the default num past epochs + MaxProposalPeriodPlus, + /// Offset at the larger of max proposal period or slash processing delay + MaxProposalPeriodOrSlashProcessingLen, + /// Offset at the larger of max proposal period or slash processing delay, + /// plus the default num past epochs + MaxProposalPeriodOrSlashProcessingLenPlus, + /// Offset of the max u64 value + MaxU64, } /// Which offset should be used to set data. The value is read from @@ -777,7 +1081,7 @@ pub enum DynEpochOffset { pub trait EpochOffset: Debug + Clone + BorshDeserialize + BorshSerialize + BorshSchema { - /// Find the value of a given offset from PoS parameters. + /// Find the value of a given offset from PoS and Gov parameters. fn value(params: &PosParams) -> u64; /// Convert to [`DynEpochOffset`] fn dyn_offset() -> DynEpochOffset; @@ -786,19 +1090,23 @@ pub trait EpochOffset: #[cfg(test)] mod test { use namada_core::ledger::storage::testing::TestWlStorage; + use namada_core::types::address::testing::established_address_1; + use namada_core::types::dec::Dec; + use namada_core::types::{key, token}; use test_log::test; use super::*; + use crate::types::GenesisValidator; #[test] fn test_epoched_data_trimming() -> storage_api::Result<()> { - let mut s = TestWlStorage::default(); + let mut s = init_storage()?; - const NUM_PAST_EPOCHS: u64 = 2; let key_prefix = storage::Key::parse("test").unwrap(); - let epoched = Epoched::::open( - key_prefix, - ); + let epoched = + Epoched::::open( + key_prefix, + ); let data_handler = epoched.get_data_handler(); assert!(epoched.get_last_update(&s)?.is_none()); assert!(epoched.get_oldest_epoch(&s)?.is_none()); @@ -863,13 +1171,11 @@ mod test { #[test] fn test_epoched_without_data_trimming() -> storage_api::Result<()> { - let mut s = TestWlStorage::default(); + let mut s = init_storage()?; - const NUM_PAST_EPOCHS: u64 = u64::MAX; let key_prefix = storage::Key::parse("test").unwrap(); - let epoched = Epoched::::open( - key_prefix, - ); + let epoched = + Epoched::::open(key_prefix); let data_handler = epoched.get_data_handler(); assert!(epoched.get_last_update(&s)?.is_none()); assert!(epoched.get_oldest_epoch(&s)?.is_none()); @@ -933,12 +1239,11 @@ mod test { #[test] fn test_epoched_delta_data_trimming() -> storage_api::Result<()> { - let mut s = TestWlStorage::default(); + let mut s = init_storage()?; - const NUM_PAST_EPOCHS: u64 = 2; let key_prefix = storage::Key::parse("test").unwrap(); let epoched = - EpochedDelta::::open( + EpochedDelta::::open( key_prefix, ); let data_handler = epoched.get_data_handler(); @@ -1007,13 +1312,12 @@ mod test { #[test] fn test_epoched_delta_without_data_trimming() -> storage_api::Result<()> { - let mut s = TestWlStorage::default(); + let mut s = init_storage()?; // Nothing should ever get trimmed - const NUM_PAST_EPOCHS: u64 = u64::MAX; let key_prefix = storage::Key::parse("test").unwrap(); let epoched = - EpochedDelta::::open( + EpochedDelta::::open( key_prefix, ); let data_handler = epoched.get_data_handler(); @@ -1074,91 +1378,44 @@ mod test { assert_eq!(data_handler.get(&s, &Epoch(9))?, None); assert_eq!(data_handler.get(&s, &Epoch(10))?, Some(6)); + epoched.add(&mut s, 15, Epoch(10), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(10))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + assert_eq!(data_handler.get(&s, &Epoch(1))?, Some(2)); + assert_eq!(data_handler.get(&s, &Epoch(2))?, Some(3)); + assert_eq!(data_handler.get(&s, &Epoch(3))?, Some(4)); + assert_eq!(data_handler.get(&s, &Epoch(5))?, Some(5)); + assert_eq!(data_handler.get(&s, &Epoch(6))?, None); + assert_eq!(data_handler.get(&s, &Epoch(7))?, None); + assert_eq!(data_handler.get(&s, &Epoch(8))?, None); + assert_eq!(data_handler.get(&s, &Epoch(9))?, None); + assert_eq!(data_handler.get(&s, &Epoch(10))?, Some(21)); + Ok(()) } - // use namada_core::ledger::storage::testing::TestStorage; - // use namada_core::types::address::{self, Address}; - // use namada_core::types::storage::Key; - // - // use super::{ - // storage, storage_api, Epoch, LazyMap, NestedEpoched, NestedMap, - // OffsetPipelineLen, - // }; - // - // #[test] - // fn testing_epoched_new() -> storage_api::Result<()> { - // let mut storage = TestStorage::default(); - // - // let key1 = storage::Key::parse("test_nested1").unwrap(); - // let nested1 = - // NestedEpoched::, OffsetPipelineLen>::open( - // key1, - // ); - // nested1.init(&mut storage, Epoch(0))?; - // - // let key2 = storage::Key::parse("test_nested2").unwrap(); - // let nested2 = NestedEpoched::< - // NestedMap>, - // OffsetPipelineLen, - // >::open(key2); - // nested2.init(&mut storage, Epoch(0))?; - // - // dbg!(&nested1.get_last_update_storage_key()); - // dbg!(&nested1.get_last_update(&storage)); - // - // nested1.at(&Epoch(0)).insert( - // &mut storage, - // address::testing::established_address_1(), - // 1432, - // )?; - // dbg!(&nested1.at(&Epoch(0)).iter(&mut storage)?.next()); - // dbg!(&nested1.at(&Epoch(1)).iter(&mut storage)?.next()); - // - // nested2.at(&Epoch(0)).at(&100).insert( - // &mut storage, - // 1, - // address::testing::established_address_2(), - // )?; - // dbg!(&nested2.at(&Epoch(0)).iter(&mut storage)?.next()); - // dbg!(&nested2.at(&Epoch(1)).iter(&mut storage)?.next()); - // - // dbg!(&nested_epoched.get_epoch_key(&Epoch::from(0))); - // - // let epoch = Epoch::from(0); - // let addr = address::testing::established_address_1(); - // let amount: u64 = 234235; - // - // nested_epoched - // .at(&epoch) - // .insert(&mut storage, addr.clone(), amount)?; - // - // let epoch = epoch + 3_u64; - // nested_epoched.at(&epoch).insert( - // &mut storage, - // addr.clone(), - // 999_u64, - // )?; - // - // dbg!(nested_epoched.contains_epoch(&storage, &Epoch::from(0))?); - // dbg!( - // nested_epoched - // .get_data_handler() - // .get_data_key(&Epoch::from(3)) - // ); - // dbg!(nested_epoched.contains_epoch(&storage, &Epoch::from(3))?); - // dbg!( - // nested_epoched - // .at(&Epoch::from(0)) - // .get(&storage, &addr.clone())? - // ); - // dbg!( - // nested_epoched - // .at(&Epoch::from(3)) - // .get(&storage, &addr.clone())? - // ); - // dbg!(nested_epoched.at(&Epoch::from(3)).get_data_key(&addr)); - // - // Ok(()) - // } + fn init_storage() -> storage_api::Result { + let mut s = TestWlStorage::default(); + let gov_params = namada_core::ledger::governance::parameters::GovernanceParameters::default(); + gov_params.init_storage(&mut s)?; + crate::init_genesis( + &mut s, + &PosParams::default(), + [GenesisValidator { + address: established_address_1(), + tokens: token::Amount::native_whole(1_000), + consensus_key: key::testing::keypair_1().to_public(), + protocol_key: key::testing::keypair_2().to_public(), + eth_hot_key: key::testing::keypair_3().to_public(), + eth_cold_key: key::testing::keypair_3().to_public(), + commission_rate: Dec::new(1, 1).expect("Dec creation failed"), + max_commission_rate_change: Dec::new(1, 1) + .expect("Dec creation failed"), + }] + .into_iter(), + Epoch::default(), + )?; + Ok(s) + } } diff --git a/proof_of_stake/src/error.rs b/proof_of_stake/src/error.rs new file mode 100644 index 0000000000..d3eeecb3c8 --- /dev/null +++ b/proof_of_stake/src/error.rs @@ -0,0 +1,189 @@ +/// Custom error types +use std::num::TryFromIntError; + +use namada_core::ledger::storage_api; +use namada_core::types::address::Address; +use namada_core::types::dec::Dec; +use namada_core::types::storage::Epoch; +use thiserror::Error; + +use crate::rewards; +use crate::types::{BondId, ValidatorState}; + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum GenesisError { + #[error("Voting power overflow: {0}")] + VotingPowerOverflow(TryFromIntError), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum InflationError { + #[error("Error in calculating rewards: {0}")] + Rewards(rewards::RewardsError), + #[error("Expected validator {0} to be in consensus set but got: {1:?}")] + ExpectedValidatorInConsensus(Address, Option), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum BecomeValidatorError { + #[error("The given address {0} is already a validator")] + AlreadyValidator(Address), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum BondError { + #[error("The given address {0} is not a validator address")] + NotAValidator(Address), + #[error( + "The given source address {0} is a validator address. Validators may \ + not delegate." + )] + SourceMustNotBeAValidator(Address), + #[error("The given validator address {0} is inactive")] + InactiveValidator(Address), + #[error("Voting power overflow: {0}")] + VotingPowerOverflow(TryFromIntError), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum UnbondError { + #[error("No bond could be found")] + NoBondFound, + #[error( + "Trying to withdraw more tokens ({0}) than the amount bonded ({0})" + )] + UnbondAmountGreaterThanBond(String, String), + #[error("No bonds found for the validator {0}")] + ValidatorHasNoBonds(Address), + #[error("Voting power not found for the validator {0}")] + ValidatorHasNoVotingPower(Address), + #[error("Voting power overflow: {0}")] + VotingPowerOverflow(TryFromIntError), + #[error("Trying to unbond from a frozen validator: {0}")] + ValidatorIsFrozen(Address), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum WithdrawError { + #[error("No unbond could be found for {0}")] + NoUnbondFound(BondId), + #[error("No unbond may be withdrawn yet for {0}")] + NoWithdrawableUnbond(BondId), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum SlashError { + #[error("The validator {0} has no total deltas value")] + ValidatorHasNoTotalDeltas(Address), + #[error("The validator {0} has no voting power")] + ValidatorHasNoVotingPower(Address), + #[error("Unexpected slash token change")] + InvalidSlashChange(i128), + #[error("Voting power overflow: {0}")] + VotingPowerOverflow(TryFromIntError), + #[error("Unexpected negative stake {0} for validator {1}")] + NegativeStake(i128, Address), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum CommissionRateChangeError { + #[error("Unexpected negative commission rate {0} for validator {1}")] + NegativeRate(Dec, Address), + #[error( + "Unexpected commission rate {0} larger than 1.0 for validator {1}" + )] + LargerThanOne(Dec, Address), + #[error("Rate change of {0} is too large for validator {1}")] + RateChangeTooLarge(Dec, Address), + #[error( + "There is no maximum rate change written in storage for validator {0}" + )] + NoMaxSetInStorage(Address), + #[error("Cannot write to storage for validator {0}")] + CannotWrite(Address), + #[error("Cannot read storage for validator {0}")] + CannotRead(Address), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum UnjailValidatorError { + #[error("The given address {0} is not a validator address")] + NotAValidator(Address), + #[error("The given address {0} is not jailed in epoch {1}")] + NotJailed(Address, Epoch), + #[error( + "The given address {0} is not eligible for unnjailing until epoch \ + {1}: current epoch is {2}" + )] + NotEligible(Address, Epoch, Epoch), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum RedelegationError { + #[error("The redelegation is chained")] + IsChainedRedelegation, + #[error("The source and destination validator must be different")] + RedelegationSrcEqDest, + #[error("The delegator must not be a validator")] + DelegatorIsValidator, + #[error("The address {0} must be a validator")] + NotAValidator(Address), +} + +impl From for storage_api::Error { + fn from(err: BecomeValidatorError) -> Self { + Self::new(err) + } +} + +impl From for storage_api::Error { + fn from(err: BondError) -> Self { + Self::new(err) + } +} + +impl From for storage_api::Error { + fn from(err: UnbondError) -> Self { + Self::new(err) + } +} + +impl From for storage_api::Error { + fn from(err: WithdrawError) -> Self { + Self::new(err) + } +} + +impl From for storage_api::Error { + fn from(err: CommissionRateChangeError) -> Self { + Self::new(err) + } +} + +impl From for storage_api::Error { + fn from(err: InflationError) -> Self { + Self::new(err) + } +} + +impl From for storage_api::Error { + fn from(err: UnjailValidatorError) -> Self { + Self::new(err) + } +} + +impl From for storage_api::Error { + fn from(err: RedelegationError) -> Self { + Self::new(err) + } +} diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index 0fbbf2231b..8a1f2f0b26 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -12,7 +12,6 @@ #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] -pub mod btree_set; pub mod epoched; pub mod parameters; pub mod pos_queries; @@ -21,54 +20,56 @@ pub mod storage; pub mod types; // pub mod validation; +mod error; #[cfg(test)] mod tests; use core::fmt::Debug; use std::cmp::{self, Reverse}; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; -use std::num::TryFromIntError; use borsh::BorshDeserialize; +pub use error::*; use namada_core::ledger::storage_api::collections::lazy_map::{ - NestedSubKey, SubKey, + Collectable, LazyMap, NestedMap, NestedSubKey, SubKey, }; use namada_core::ledger::storage_api::collections::{LazyCollection, LazySet}; -use namada_core::ledger::storage_api::token::credit_tokens; use namada_core::ledger::storage_api::{ - self, ResultExt, StorageRead, StorageWrite, + self, governance, token, ResultExt, StorageRead, StorageWrite, }; use namada_core::types::address::{Address, InternalAddress}; use namada_core::types::dec::Dec; use namada_core::types::key::{ - common, tm_consensus_key_raw_hash, PublicKeyTmRawHash, + common, protocol_pk_key, tm_consensus_key_raw_hash, PublicKeyTmRawHash, }; pub use namada_core::types::storage::{Epoch, Key, KeySeg}; -use namada_core::types::token; use once_cell::unsync::Lazy; -use parameters::PosParams; +pub use parameters::{OwnedPosParams, PosParams}; use rewards::PosRewardsCalculator; use storage::{ bonds_for_source_prefix, bonds_prefix, consensus_keys_key, - get_validator_address_from_bond, into_tm_voting_power, is_bond_key, - is_unbond_key, is_validator_slashes_key, last_block_proposer_key, - params_key, slashes_prefix, unbonds_for_source_prefix, unbonds_prefix, + get_validator_address_from_bond, is_bond_key, is_unbond_key, + is_validator_slashes_key, last_block_proposer_key, params_key, + slashes_prefix, unbonds_for_source_prefix, unbonds_prefix, validator_address_raw_hash_key, validator_last_slash_key, - validator_max_commission_rate_change_key, BondDetails, - BondsAndUnbondsDetail, BondsAndUnbondsDetails, EpochedSlashes, - ReverseOrdTokenAmount, RewardsAccumulator, SlashedAmount, - TotalConsensusStakes, UnbondDetails, ValidatorAddresses, - ValidatorUnbondRecords, + validator_max_commission_rate_change_key, }; -use thiserror::Error; use types::{ - BelowCapacityValidatorSet, BelowCapacityValidatorSets, BondId, Bonds, - CommissionRates, ConsensusValidator, ConsensusValidatorSet, - ConsensusValidatorSets, GenesisValidator, Position, RewardsProducts, Slash, - SlashType, Slashes, TotalDeltas, Unbonds, ValidatorConsensusKeys, - ValidatorDeltas, ValidatorEthColdKeys, ValidatorEthHotKeys, - ValidatorPositionAddresses, ValidatorSetPositions, ValidatorSetUpdate, - ValidatorState, ValidatorStates, VoteInfo, WeightedValidator, + into_tm_voting_power, BelowCapacityValidatorSet, + BelowCapacityValidatorSets, BondDetails, BondId, Bonds, + BondsAndUnbondsDetail, BondsAndUnbondsDetails, CommissionRates, + ConsensusValidator, ConsensusValidatorSet, ConsensusValidatorSets, + DelegatorRedelegatedBonded, DelegatorRedelegatedUnbonded, + EagerRedelegatedBondsMap, EpochedSlashes, GenesisValidator, + IncomingRedelegations, OutgoingRedelegations, Position, + RedelegatedBondsOrUnbonds, RedelegatedTokens, ReverseOrdTokenAmount, + RewardsAccumulator, RewardsProducts, Slash, SlashType, SlashedAmount, + Slashes, TotalConsensusStakes, TotalDeltas, TotalRedelegatedBonded, + TotalRedelegatedUnbonded, UnbondDetails, Unbonds, ValidatorAddresses, + ValidatorConsensusKeys, ValidatorDeltas, ValidatorEthColdKeys, + ValidatorEthHotKeys, ValidatorPositionAddresses, ValidatorProtocolKeys, + ValidatorSetPositions, ValidatorSetUpdate, ValidatorState, ValidatorStates, + ValidatorTotalUnbonded, VoteInfo, WeightedValidator, }; /// Address of the PoS account implemented as a native VP @@ -85,165 +86,6 @@ pub fn staking_token_address(storage: &impl StorageRead) -> Address { .expect("Must be able to read native token address") } -/// Number of epochs below the current epoch for which full validator sets are -/// stored -const STORE_VALIDATOR_SETS_LEN: u64 = 2; - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum GenesisError { - #[error("Voting power overflow: {0}")] - VotingPowerOverflow(TryFromIntError), -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum InflationError { - #[error("Error in calculating rewards: {0}")] - Rewards(rewards::RewardsError), - #[error("Expected validator {0} to be in consensus set but got: {1:?}")] - ExpectedValidatorInConsensus(Address, Option), -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum BecomeValidatorError { - #[error("The given address {0} is already a validator")] - AlreadyValidator(Address), -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum BondError { - #[error("The given address {0} is not a validator address")] - NotAValidator(Address), - #[error( - "The given source address {0} is a validator address. Validators may \ - not delegate." - )] - SourceMustNotBeAValidator(Address), - #[error("The given validator address {0} is inactive")] - InactiveValidator(Address), - #[error("Voting power overflow: {0}")] - VotingPowerOverflow(TryFromIntError), -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum UnbondError { - #[error("No bond could be found")] - NoBondFound, - #[error( - "Trying to withdraw more tokens ({0}) than the amount bonded ({0})" - )] - UnbondAmountGreaterThanBond(String, String), - #[error("No bonds found for the validator {0}")] - ValidatorHasNoBonds(Address), - #[error("Voting power not found for the validator {0}")] - ValidatorHasNoVotingPower(Address), - #[error("Voting power overflow: {0}")] - VotingPowerOverflow(TryFromIntError), - #[error("Trying to unbond from a frozen validator: {0}")] - ValidatorIsFrozen(Address), -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum WithdrawError { - #[error("No unbond could be found for {0}")] - NoUnbondFound(BondId), - #[error("No unbond may be withdrawn yet for {0}")] - NoWithdrawableUnbond(BondId), -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum SlashError { - #[error("The validator {0} has no total deltas value")] - ValidatorHasNoTotalDeltas(Address), - #[error("The validator {0} has no voting power")] - ValidatorHasNoVotingPower(Address), - #[error("Unexpected slash token change")] - InvalidSlashChange(i128), - #[error("Voting power overflow: {0}")] - VotingPowerOverflow(TryFromIntError), - #[error("Unexpected negative stake {0} for validator {1}")] - NegativeStake(i128, Address), -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum CommissionRateChangeError { - #[error("Unexpected negative commission rate {0} for validator {1}")] - NegativeRate(Dec, Address), - #[error("Rate change of {0} is too large for validator {1}")] - RateChangeTooLarge(Dec, Address), - #[error( - "There is no maximum rate change written in storage for validator {0}" - )] - NoMaxSetInStorage(Address), - #[error("Cannot write to storage for validator {0}")] - CannotWrite(Address), - #[error("Cannot read storage for validator {0}")] - CannotRead(Address), -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum UnjailValidatorError { - #[error("The given address {0} is not a validator address")] - NotAValidator(Address), - #[error("The given address {0} is not jailed in epoch {1}")] - NotJailed(Address, Epoch), - #[error( - "The given address {0} is not eligible for unnjailing until epoch \ - {1}: current epoch is {2}" - )] - NotEligible(Address, Epoch, Epoch), -} - -impl From for storage_api::Error { - fn from(err: BecomeValidatorError) -> Self { - Self::new(err) - } -} - -impl From for storage_api::Error { - fn from(err: BondError) -> Self { - Self::new(err) - } -} - -impl From for storage_api::Error { - fn from(err: UnbondError) -> Self { - Self::new(err) - } -} - -impl From for storage_api::Error { - fn from(err: WithdrawError) -> Self { - Self::new(err) - } -} - -impl From for storage_api::Error { - fn from(err: CommissionRateChangeError) -> Self { - Self::new(err) - } -} - -impl From for storage_api::Error { - fn from(err: InflationError) -> Self { - Self::new(err) - } -} - -impl From for storage_api::Error { - fn from(err: UnjailValidatorError) -> Self { - Self::new(err) - } -} - /// Get the storage handle to the epoched consensus validator set pub fn consensus_validator_set_handle() -> ConsensusValidatorSets { let key = storage::consensus_validator_set_key(); @@ -265,6 +107,14 @@ pub fn validator_consensus_key_handle( ValidatorConsensusKeys::open(key) } +/// Get the storage handle to a PoS validator's protocol key key. +pub fn validator_protocol_key_handle( + validator: &Address, +) -> ValidatorProtocolKeys { + let key = protocol_pk_key(validator); + ValidatorProtocolKeys::open(key) +} + /// Get the storage handle to a PoS validator's eth hot key. pub fn validator_eth_hot_key_handle( validator: &Address, @@ -319,7 +169,8 @@ pub fn validator_commission_rate_handle( CommissionRates::open(key) } -/// Get the storage handle to a bond +/// Get the storage handle to a bond, which is dynamically updated with when +/// unbonding pub fn bond_handle(source: &Address, validator: &Address) -> Bonds { let bond_id = BondId { source: source.clone(), @@ -329,7 +180,8 @@ pub fn bond_handle(source: &Address, validator: &Address) -> Bonds { Bonds::open(key) } -/// Get the storage handle to a validator's total bonds +/// Get the storage handle to a validator's total bonds, which are not updated +/// due to unbonding pub fn total_bonded_handle(validator: &Address) -> Bonds { let key = storage::validator_total_bonded_key(validator); Bonds::open(key) @@ -346,9 +198,9 @@ pub fn unbond_handle(source: &Address, validator: &Address) -> Unbonds { } /// Get the storage handle to a validator's total-unbonded map -pub fn unbond_records_handle(validator: &Address) -> ValidatorUnbondRecords { +pub fn total_unbonded_handle(validator: &Address) -> ValidatorTotalUnbonded { let key = storage::validator_total_unbonded_key(validator); - ValidatorUnbondRecords::open(key) + ValidatorTotalUnbonded::open(key) } /// Get the storage handle to a PoS validator's deltas @@ -394,10 +246,58 @@ pub fn delegator_rewards_products_handle( RewardsProducts::open(key) } -/// Init genesis +/// Get the storage handle to a validator's incoming redelegations +pub fn validator_incoming_redelegations_handle( + validator: &Address, +) -> IncomingRedelegations { + let key = storage::validator_incoming_redelegations_key(validator); + IncomingRedelegations::open(key) +} + +/// Get the storage handle to a validator's outgoing redelegations +pub fn validator_outgoing_redelegations_handle( + validator: &Address, +) -> OutgoingRedelegations { + let key: Key = storage::validator_outgoing_redelegations_key(validator); + OutgoingRedelegations::open(key) +} + +/// Get the storage handle to a validator's total redelegated bonds +pub fn validator_total_redelegated_bonded_handle( + validator: &Address, +) -> TotalRedelegatedBonded { + let key: Key = storage::validator_total_redelegated_bonded_key(validator); + TotalRedelegatedBonded::open(key) +} + +/// Get the storage handle to a validator's outgoing redelegations +pub fn validator_total_redelegated_unbonded_handle( + validator: &Address, +) -> TotalRedelegatedUnbonded { + let key: Key = storage::validator_total_redelegated_unbonded_key(validator); + TotalRedelegatedUnbonded::open(key) +} + +/// Get the storage handle to a delegator's redelegated bonds information +pub fn delegator_redelegated_bonds_handle( + delegator: &Address, +) -> DelegatorRedelegatedBonded { + let key: Key = storage::delegator_redelegated_bonds_key(delegator); + DelegatorRedelegatedBonded::open(key) +} + +/// Get the storage handle to a delegator's redelegated unbonds information +pub fn delegator_redelegated_unbonds_handle( + delegator: &Address, +) -> DelegatorRedelegatedUnbonded { + let key: Key = storage::delegator_redelegated_unbonds_key(delegator); + DelegatorRedelegatedUnbonded::open(key) +} + +/// Init genesis. Requires that the governance parameters are initialized. pub fn init_genesis( storage: &mut S, - params: &PosParams, + params: &OwnedPosParams, validators: impl Iterator + Clone, current_epoch: namada_core::types::storage::Epoch, ) -> storage_api::Result<()> @@ -405,9 +305,10 @@ where S: StorageRead + StorageWrite, { tracing::debug!("Initializing PoS genesis"); - write_pos_params(storage, params.clone())?; + write_pos_params(storage, params)?; + let params = read_non_pos_owned_params(storage, params.clone())?; - let mut total_bonded = token::Amount::default(); + let mut total_bonded = token::Amount::zero(); consensus_validator_set_handle().init(storage, current_epoch)?; below_capacity_validator_set_handle().init(storage, current_epoch)?; validator_set_positions_handle().init(storage, current_epoch)?; @@ -417,6 +318,7 @@ where address, tokens, consensus_key, + protocol_key, eth_cold_key, eth_hot_key, commission_rate, @@ -433,7 +335,7 @@ where // validator data insert_validator_into_validator_set( storage, - params, + ¶ms, &address, tokens, current_epoch, @@ -456,6 +358,11 @@ where consensus_key, current_epoch, )?; + validator_protocol_key_handle(&address).init_at_genesis( + storage, + protocol_key, + current_epoch, + )?; validator_eth_hot_key_handle(&address).init_at_genesis( storage, eth_hot_key, @@ -466,20 +373,19 @@ where eth_cold_key, current_epoch, )?; - let delta = token::Change::from(tokens); validator_deltas_handle(&address).init_at_genesis( storage, - delta, + tokens.change(), current_epoch, )?; bond_handle(&address, &address).init_at_genesis( storage, - delta, + tokens, current_epoch, )?; total_bonded_handle(&address).init_at_genesis( storage, - delta, + tokens, current_epoch, )?; validator_commission_rate_handle(&address).init_at_genesis( @@ -501,10 +407,15 @@ where // Credit bonded token amount to the PoS account let staking_token = staking_token_address(storage); - credit_tokens(storage, &staking_token, &ADDRESS, total_bonded)?; + token::credit_tokens(storage, &staking_token, &ADDRESS, total_bonded)?; // Copy the genesis validator set into the pipeline epoch as well for epoch in (current_epoch.next()).iter_range(params.pipeline_len) { - copy_validator_sets_and_positions(storage, current_epoch, epoch)?; + copy_validator_sets_and_positions( + storage, + ¶ms, + current_epoch, + epoch, + )?; } tracing::debug!("Genesis initialized"); @@ -517,16 +428,33 @@ pub fn read_pos_params(storage: &S) -> storage_api::Result where S: StorageRead, { - storage + let params = storage .read(¶ms_key()) .transpose() - .expect("PosParams should always exist in storage after genesis") + .expect("PosParams should always exist in storage after genesis")?; + read_non_pos_owned_params(storage, params) +} + +/// Read non-PoS-owned parameters to add them to `OwnedPosParams` to construct +/// `PosParams`. +pub fn read_non_pos_owned_params( + storage: &S, + owned: OwnedPosParams, +) -> storage_api::Result +where + S: StorageRead, +{ + let max_proposal_period = governance::get_max_proposal_period(storage)?; + Ok(PosParams { + owned, + max_proposal_period, + }) } /// Write PoS parameters pub fn write_pos_params( storage: &mut S, - params: PosParams, + params: &OwnedPosParams, ) -> storage_api::Result<()> where S: StorageRead + StorageWrite, @@ -634,42 +562,44 @@ where } /// Read PoS validator's delta value. -pub fn read_validator_delta_value( +pub fn read_validator_deltas_value( storage: &S, - params: &PosParams, validator: &Address, - epoch: namada_core::types::storage::Epoch, + epoch: &namada_core::types::storage::Epoch, ) -> storage_api::Result> where S: StorageRead, { let handle = validator_deltas_handle(validator); - handle.get_delta_val(storage, epoch, params) + handle.get_delta_val(storage, *epoch) } /// Read PoS validator's stake (sum of deltas). -/// Returns `None` when the given address is not a validator address. For a -/// validator with `0` stake, this returns `Ok(token::Amount::default())`. +/// For non-validators and validators with `0` stake, this returns the default - +/// `token::Amount::zero()`. pub fn read_validator_stake( storage: &S, params: &PosParams, validator: &Address, epoch: namada_core::types::storage::Epoch, -) -> storage_api::Result> +) -> storage_api::Result where S: StorageRead, { let handle = validator_deltas_handle(validator); let amount = handle .get_sum(storage, epoch, params)? - .map(token::Amount::from_change); + .map(|change| { + debug_assert!(change.non_negative()); + token::Amount::from_change(change) + }) + .unwrap_or_default(); Ok(amount) } /// Add or remove PoS validator's stake delta value pub fn update_validator_deltas( storage: &mut S, - params: &PosParams, validator: &Address, delta: token::Change, current_epoch: namada_core::types::storage::Epoch, @@ -680,7 +610,7 @@ where { let handle = validator_deltas_handle(validator); let val = handle - .get_delta_val(storage, current_epoch + offset, params)? + .get_delta_val(storage, current_epoch + offset)? .unwrap_or_default(); handle.set(storage, val + delta, current_epoch, offset) } @@ -697,7 +627,10 @@ where let handle = total_deltas_handle(); let amnt = handle .get_sum(storage, epoch, params)? - .map(token::Amount::from_change) + .map(|change| { + debug_assert!(change.non_negative()); + token::Amount::from_change(change) + }) .unwrap_or_default(); Ok(amnt) } @@ -848,7 +781,6 @@ where /// Note: for EpochedDelta, write the value to change storage by pub fn update_total_deltas( storage: &mut S, - params: &PosParams, delta: token::Change, current_epoch: namada_core::types::storage::Epoch, offset: u64, @@ -858,7 +790,7 @@ where { let handle = total_deltas_handle(); let val = handle - .get_delta_val(storage, current_epoch + offset, params)? + .get_delta_val(storage, current_epoch + offset)? .unwrap_or_default(); handle.set(storage, val + delta, current_epoch, offset) } @@ -920,13 +852,18 @@ pub fn bond_tokens( where S: StorageRead + StorageWrite, { - let amount = amount.change(); tracing::debug!( "Bonding token amount {} at epoch {current_epoch}", amount.to_string_native() ); + if amount.is_zero() { + return Ok(()); + } + let params = read_pos_params(storage)?; let pipeline_epoch = current_epoch + params.pipeline_len; + + // Check that the source is not a validator if let Some(source) = source { if source != validator && is_validator(storage, source)? { return Err( @@ -934,6 +871,8 @@ where ); } } + + // Check that the validator is actually a validator let validator_state_handle = validator_state_handle(validator); let state = validator_state_handle.get(storage, pipeline_epoch, ¶ms)?; if state.is_none() { @@ -942,6 +881,7 @@ where let source = source.unwrap_or(validator); tracing::debug!("Source {} --> Validator {}", source, validator); + let bond_handle = bond_handle(source, validator); let total_bonded_handle = total_bonded_handle(validator); @@ -955,52 +895,27 @@ where } } - tracing::debug!("\nBonds before incrementing:"); - for ep in Epoch::default().iter_range(current_epoch.0 + 3) { - let delta = bond_handle - .get_delta_val(storage, ep, ¶ms)? - .unwrap_or_default(); - if !delta.is_zero() { - tracing::debug!( - "bond ∆ at epoch {}: {}", - ep, - delta.to_string_native() - ); - } + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = find_bonds(storage, source, validator)?; + tracing::debug!("\nBonds before incrementing: {bonds:#?}"); } // Initialize or update the bond at the pipeline offset - let offset = params.pipeline_len; - let cur_remain = bond_handle - .get_delta_val(storage, current_epoch + offset, ¶ms)? - .unwrap_or_default(); - bond_handle.set(storage, cur_remain + amount, current_epoch, offset)?; - let cur_remain_global = total_bonded_handle - .get_delta_val(storage, current_epoch + offset, ¶ms)? - .unwrap_or_default(); - total_bonded_handle.set( + bond_handle.add(storage, amount, current_epoch, params.pipeline_len)?; + total_bonded_handle.add( storage, - cur_remain_global + amount, + amount, current_epoch, - offset, + params.pipeline_len, )?; - tracing::debug!("\nBonds after incrementing:"); - for ep in Epoch::default().iter_range(current_epoch.0 + 3) { - let delta = bond_handle - .get_delta_val(storage, ep, ¶ms)? - .unwrap_or_default(); - if !delta.is_zero() { - tracing::debug!( - "bond ∆ at epoch {}: {}", - ep, - delta.to_string_native() - ); - } + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = find_bonds(storage, source, validator)?; + tracing::debug!("\nBonds after incrementing: {bonds:#?}"); } // Update the validator set - // We allow bonding if the validator is jailed, however if jailed, there + // Allow bonding even if the validator is jailed. However, if jailed, there // must be no changes to the validator set. Check at the pipeline epoch. let is_jailed_at_pipeline = matches!( validator_state_handle @@ -1013,32 +928,30 @@ where storage, ¶ms, validator, - amount, - current_epoch, + amount.change(), + pipeline_epoch, )?; } // Update the validator and total deltas update_validator_deltas( storage, - ¶ms, validator, - amount, + amount.change(), current_epoch, - offset, + params.pipeline_len, )?; - update_total_deltas(storage, ¶ms, amount, current_epoch, offset)?; + update_total_deltas( + storage, + amount.change(), + current_epoch, + params.pipeline_len, + )?; // Transfer the bonded tokens from the source to PoS let staking_token = staking_token_address(storage); - transfer_tokens( - storage, - &staking_token, - token::Amount::from_change(amount), - source, - &ADDRESS, - )?; + token::transfer(storage, &staking_token, source, &ADDRESS, amount)?; Ok(()) } @@ -1155,7 +1068,7 @@ fn update_validator_set( params: &PosParams, validator: &Address, token_change: token::Change, - current_epoch: Epoch, + epoch: Epoch, ) -> storage_api::Result<()> where S: StorageRead + StorageWrite, @@ -1163,26 +1076,23 @@ where if token_change.is_zero() { return Ok(()); } - let pipeline_epoch = current_epoch + params.pipeline_len; + // let pipeline_epoch = current_epoch + params.pipeline_len; tracing::debug!( - "Update epoch for validator set: {pipeline_epoch}, validator: \ - {validator}" + "Update epoch for validator set: {epoch}, validator: {validator}" ); let consensus_validator_set = consensus_validator_set_handle(); let below_capacity_validator_set = below_capacity_validator_set_handle(); // Validator sets at the pipeline offset - let consensus_val_handle = consensus_validator_set.at(&pipeline_epoch); - let below_capacity_val_handle = - below_capacity_validator_set.at(&pipeline_epoch); + let consensus_val_handle = consensus_validator_set.at(&epoch); + let below_capacity_val_handle = below_capacity_validator_set.at(&epoch); - let tokens_pre = - read_validator_stake(storage, params, validator, pipeline_epoch)? - .unwrap_or_default(); + let tokens_pre = read_validator_stake(storage, params, validator, epoch)?; // tracing::debug!("VALIDATOR STAKE BEFORE UPDATE: {}", tokens_pre); let tokens_post = tokens_pre.change() + token_change; + debug_assert!(tokens_post.non_negative()); let tokens_post = token::Amount::from_change(tokens_post); // If token amounts both before and after the action are below the threshold @@ -1195,12 +1105,8 @@ where // The position is only set when the validator is in consensus or // below_capacity set (not in below_threshold set) - let position = read_validator_set_position( - storage, - validator, - pipeline_epoch, - params, - )?; + let position = + read_validator_set_position(storage, validator, epoch, params)?; if let Some(position) = position { let consensus_vals_pre = consensus_val_handle.at(&tokens_pre); @@ -1234,13 +1140,13 @@ where validator_state_handle(validator).set( storage, ValidatorState::BelowThreshold, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; // Remove the validator's position from storage validator_set_positions_handle() - .at(&pipeline_epoch) + .at(&epoch) .remove(storage, validator)?; // Promote the next below-cap validator if there is one @@ -1265,14 +1171,14 @@ where insert_validator_into_set( &consensus_val_handle.at(&max_bc_amount), storage, - &pipeline_epoch, + &epoch, &removed_max_below_capacity, )?; validator_state_handle(&removed_max_below_capacity).set( storage, ValidatorState::Consensus, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; } } else if tokens_post < max_below_capacity_validator_amount { @@ -1300,28 +1206,28 @@ where &consensus_val_handle .at(&max_below_capacity_validator_amount), storage, - &pipeline_epoch, + &epoch, &removed_max_below_capacity, )?; validator_state_handle(&removed_max_below_capacity).set( storage, ValidatorState::Consensus, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; // Insert the current validator into the below-capacity set insert_validator_into_set( &below_capacity_val_handle.at(&tokens_post.into()), storage, - &pipeline_epoch, + &epoch, validator, )?; validator_state_handle(validator).set( storage, ValidatorState::BelowCapacity, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; } else { tracing::debug!("Validator remains in consensus set"); @@ -1330,7 +1236,7 @@ where insert_validator_into_set( &consensus_val_handle.at(&tokens_post), storage, - &pipeline_epoch, + &epoch, validator, )?; } @@ -1361,11 +1267,10 @@ where insert_into_consensus_and_demote_to_below_cap( storage, - params, validator, tokens_post, min_consensus_validator_amount, - current_epoch, + epoch, &consensus_val_handle, &below_capacity_val_handle, )?; @@ -1375,14 +1280,14 @@ where insert_validator_into_set( &below_capacity_val_handle.at(&tokens_post.into()), storage, - &pipeline_epoch, + &epoch, validator, )?; validator_state_handle(validator).set( storage, ValidatorState::BelowCapacity, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; } else { // The current validator is demoted to the below-threshold set @@ -1393,13 +1298,13 @@ where validator_state_handle(validator).set( storage, ValidatorState::BelowThreshold, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; // Remove the validator's position from storage validator_set_positions_handle() - .at(&pipeline_epoch) + .at(&epoch) .remove(storage, validator)?; } } @@ -1411,7 +1316,7 @@ where // Move the validator into the appropriate set let num_consensus_validators = - get_num_consensus_validators(storage, pipeline_epoch)?; + get_num_consensus_validators(storage, epoch)?; if num_consensus_validators < params.max_validator_slots { // Just insert into the consensus set tracing::debug!("Inserting validator into the consensus set"); @@ -1419,14 +1324,14 @@ where insert_validator_into_set( &consensus_val_handle.at(&tokens_post), storage, - &pipeline_epoch, + &epoch, validator, )?; validator_state_handle(validator).set( storage, ValidatorState::Consensus, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; } else { let min_consensus_validator_amount = @@ -1444,11 +1349,10 @@ where insert_into_consensus_and_demote_to_below_cap( storage, - params, validator, tokens_post, min_consensus_validator_amount, - current_epoch, + epoch, &consensus_val_handle, &below_capacity_val_handle, )?; @@ -1461,14 +1365,14 @@ where insert_validator_into_set( &below_capacity_val_handle.at(&tokens_post.into()), storage, - &pipeline_epoch, + &epoch, validator, )?; validator_state_handle(validator).set( storage, ValidatorState::BelowCapacity, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; } } @@ -1480,11 +1384,10 @@ where #[allow(clippy::too_many_arguments)] fn insert_into_consensus_and_demote_to_below_cap( storage: &mut S, - params: &PosParams, validator: &Address, tokens_post: token::Amount, min_consensus_amount: token::Amount, - current_epoch: Epoch, + epoch: Epoch, consensus_set: &ConsensusValidatorSet, below_capacity_set: &BelowCapacityValidatorSet, ) -> storage_api::Result<()> @@ -1500,35 +1403,35 @@ where .remove(storage, &last_position_of_min_consensus_vals)? .expect("There must be always be at least 1 consensus validator"); - let pipeline_epoch = current_epoch + params.pipeline_len; + // let pipeline_epoch = current_epoch + params.pipeline_len; // Insert the min consensus validator into the below-capacity // set insert_validator_into_set( &below_capacity_set.at(&min_consensus_amount.into()), storage, - &pipeline_epoch, + &epoch, &removed_min_consensus, )?; validator_state_handle(&removed_min_consensus).set( storage, ValidatorState::BelowCapacity, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; // Insert the current validator into the consensus set insert_validator_into_set( &consensus_set.at(&tokens_post), storage, - &pipeline_epoch, + &epoch, validator, )?; validator_state_handle(validator).set( storage, ValidatorState::Consensus, - current_epoch, - params.pipeline_len, + epoch, + 0, )?; Ok(()) } @@ -1536,6 +1439,7 @@ where /// Validator sets and positions copying into a future epoch pub fn copy_validator_sets_and_positions( storage: &mut S, + params: &PosParams, current_epoch: Epoch, target_epoch: Epoch, ) -> storage_api::Result<()> @@ -1583,8 +1487,6 @@ where below_cap_in_mem.insert((stake, position), address); } - tracing::debug!("{consensus_in_mem:?}"); - for ((val_stake, val_position), val_address) in consensus_in_mem.into_iter() { consensus_validator_set @@ -1592,11 +1494,6 @@ where .at(&val_stake) .insert(storage, val_position, val_address)?; } - tracing::debug!("New validator set should be inserted:"); - tracing::debug!( - "{:?}", - read_consensus_validator_set_addresses(storage, target_epoch)? - ); for ((val_stake, val_position), val_address) in below_cap_in_mem.into_iter() { @@ -1605,6 +1502,9 @@ where .at(&val_stake) .insert(storage, val_position, val_address)?; } + // Purge consensus and below-capacity validator sets + consensus_validator_set.update_data(storage, params, current_epoch)?; + below_capacity_validator_set.update_data(storage, params, current_epoch)?; // Copy validator positions let mut positions = HashMap::::default(); @@ -1623,6 +1523,13 @@ where } validator_set_positions_handle.set_last_update(storage, current_epoch)?; + // Purge old epochs of validator positions + validator_set_positions_handle.update_data( + storage, + params, + current_epoch, + )?; + // Copy set of all validator addresses let mut all_validators = HashSet::
::default(); let validator_addresses_handle = validator_addresses_handle(); @@ -1638,6 +1545,9 @@ where debug_assert!(!was_in); } + // Purge old epochs of all validator addresses + validator_addresses_handle.update_data(storage, params, current_epoch)?; + Ok(()) } @@ -1682,27 +1592,6 @@ where total_consensus_stake_key_handle().set(storage, total, epoch, 0) } -/// Purge the validator sets from the epochs older than the current epoch minus -/// `STORE_VALIDATOR_SETS_LEN` -pub fn purge_validator_sets_for_old_epoch( - storage: &mut S, - epoch: Epoch, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - if Epoch(STORE_VALIDATOR_SETS_LEN) < epoch { - let old_epoch = epoch - STORE_VALIDATOR_SETS_LEN - 1; - consensus_validator_set_handle() - .get_data_handler() - .remove_all(storage, &old_epoch)?; - below_capacity_validator_set_handle() - .get_data_handler() - .remove_all(storage, &old_epoch)?; - } - Ok(()) -} - /// Read the position of the validator in the subset of validators that have the /// same bonded stake. This information is held in its own epoched structure in /// addition to being inside the validator sets. @@ -1842,23 +1731,42 @@ struct BondAndUnbondUpdates { unbond_value: token::Change, } +/// Temp: In quint this is from `ResultUnbondTx` field `resultSlashing: {sum: +/// int, epochMap: Epoch -> int}` +#[derive(Debug, Default)] +pub struct ResultSlashing { + /// The token amount unbonded from the validator stake after accounting for + /// slashes + pub sum: token::Amount, + /// Map from bond start epoch to token amount after slashing + pub epoch_map: BTreeMap, +} + /// Unbond tokens that are bonded between a validator and a source (self or -/// delegator) +/// delegator). +/// +/// This fn is also called during redelegation for a source validator, in +/// which case the `is_redelegation` param must be true. pub fn unbond_tokens( storage: &mut S, source: Option<&Address>, validator: &Address, amount: token::Amount, current_epoch: Epoch, -) -> storage_api::Result<()> + is_redelegation: bool, +) -> storage_api::Result where S: StorageRead + StorageWrite, { - let amount = amount.change(); tracing::debug!( - "Unbonding token amount {} at epoch {current_epoch}", - amount.to_string_native() + "Unbonding token amount {} at epoch {}", + amount.to_string_native(), + current_epoch ); + if amount.is_zero() { + return Ok(ResultSlashing::default()); + } + let params = read_pos_params(storage)?; let pipeline_epoch = current_epoch + params.pipeline_len; @@ -1879,150 +1787,256 @@ where return Err(UnbondError::ValidatorIsFrozen(validator.clone()).into()); } - // Should be able to unbond inactive validators - - // Check that validator is not inactive at anywhere between the current - // epoch and pipeline offset - // let validator_state_handle = validator_state_handle(validator); - // for epoch in current_epoch.iter_range(params.pipeline_len) { - // if let Some(ValidatorState::Inactive) = - // validator_state_handle.get(storage, epoch, ¶ms)? - // { - // return - // Err(BondError::InactiveValidator(validator.clone()).into()); } - // } + // TODO: check that validator is not inactive (when implemented)! let source = source.unwrap_or(validator); let bonds_handle = bond_handle(source, validator); - tracing::debug!("\nBonds before decrementing:"); - for ep in Epoch::default().iter_range(current_epoch.0 + 3) { - let delta = bonds_handle - .get_delta_val(storage, ep, ¶ms)? - .unwrap_or_default(); - if !delta.is_zero() { - tracing::debug!( - "bond ∆ at epoch {}: {}", - ep, - delta.to_string_native() - ); - } - } - // Make sure there are enough tokens left in the bond at the pipeline offset let remaining_at_pipeline = bonds_handle .get_sum(storage, pipeline_epoch, ¶ms)? .unwrap_or_default(); if amount > remaining_at_pipeline { return Err(UnbondError::UnbondAmountGreaterThanBond( - token::Amount::from_change(amount).to_string_native(), - token::Amount::from_change(remaining_at_pipeline) - .to_string_native(), + amount.to_string_native(), + remaining_at_pipeline.to_string_native(), ) .into()); } + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = find_bonds(storage, source, validator)?; + tracing::debug!("\nBonds before decrementing: {bonds:#?}"); + } + let unbonds = unbond_handle(source, validator); - // TODO: think if this should be +1 or not!!! let withdrawable_epoch = current_epoch + params.withdrawable_epoch_offset(); - let mut remaining = amount; - let mut amount_after_slashing = token::Change::default(); + let redelegated_bonds = + delegator_redelegated_bonds_handle(source).at(validator); - // Iterate thru bonds, find non-zero delta entries starting from - // future-most, then decrement those values. For every val that - // gets decremented down to 0, need a unique unbond object. - // Read all matched bonds into memory to do reverse iteration - #[allow(clippy::needless_collect)] - let bonds: Vec> = - bonds_handle.get_data_handler().iter(storage)?.collect(); + #[cfg(debug_assertions)] + let redel_bonds_pre = redelegated_bonds.collect_map(storage)?; - let mut bond_iter = bonds.into_iter().rev(); - let mut new_bond_values = HashSet::::new(); + // `resultUnbonding` + // Find the bonds to fully unbond (remove) and one to partially unbond, if + // necessary + let bonds_to_unbond = find_bonds_to_remove( + storage, + &bonds_handle.get_data_handler(), + amount, + )?; - while remaining > token::Change::default() { - let bond = bond_iter.next().transpose()?; - if bond.is_none() { - continue; + // `modifiedRedelegation` + // A bond may have both redelegated and non-redelegated tokens in it. If + // this is the case, compute the modified state of the redelegation. + let modified_redelegation = match bonds_to_unbond.new_entry { + Some((bond_epoch, new_bond_amount)) => { + if redelegated_bonds.contains(storage, &bond_epoch)? { + let cur_bond_amount = bonds_handle + .get_delta_val(storage, bond_epoch)? + .unwrap_or_default(); + compute_modified_redelegation( + storage, + &redelegated_bonds.at(&bond_epoch), + bond_epoch, + cur_bond_amount - new_bond_amount, + )? + } else { + ModifiedRedelegation::default() + } } - let (bond_epoch, bond_amount) = bond.unwrap(); - // println!("\nBond (epoch, amnt) = ({}, {})", bond_epoch, bond_amount); - // println!("remaining = {}", remaining); + None => ModifiedRedelegation::default(), + }; - let to_unbond = cmp::min(bond_amount, remaining); - new_bond_values.insert(BondAndUnbondUpdates { - bond_start: bond_epoch, - new_bond_value: bond_amount - to_unbond, - unbond_value: to_unbond, - }); - // println!("to_unbond (init) = {}", to_unbond); + // Compute the new unbonds eagerly + // `keysUnbonds` + // Get a set of epochs from which we're unbonding (fully and partially). + let bond_epochs_to_unbond = + if let Some((start_epoch, _)) = bonds_to_unbond.new_entry { + let mut to_remove = bonds_to_unbond.epochs.clone(); + to_remove.insert(start_epoch); + to_remove + } else { + bonds_to_unbond.epochs.clone() + }; - let slashes_for_this_bond = - find_slashes_in_range(storage, bond_epoch, None, validator)?; + // `newUnbonds` + // For each epoch we're unbonding, find the amount that's being unbonded. + // For full unbonds, this is the current bond value. For partial unbonds + // it is a difference between the current and new bond amount. + let new_unbonds_map = bond_epochs_to_unbond + .into_iter() + .map(|epoch| { + let cur_bond_value = bonds_handle + .get_delta_val(storage, epoch) + .unwrap() + .unwrap_or_default(); + let value = if let Some((start_epoch, new_bond_amount)) = + bonds_to_unbond.new_entry + { + if start_epoch == epoch { + cur_bond_value - new_bond_amount + } else { + cur_bond_value + } + } else { + cur_bond_value + }; + (epoch, value) + }) + .collect::>(); - amount_after_slashing += get_slashed_amount( - ¶ms, - token::Amount::from_change(to_unbond), - &slashes_for_this_bond, - )?; - // println!("Cur amnt after slashing = {}", &amount_after_slashing); + // `updatedBonded` + // Remove bonds for all the full unbonds. + for epoch in &bonds_to_unbond.epochs { + bonds_handle.get_data_handler().remove(storage, epoch)?; + } + // Replace bond amount for partial unbond, if any. + if let Some((bond_epoch, new_bond_amount)) = bonds_to_unbond.new_entry { + bonds_handle.set(storage, new_bond_amount, bond_epoch, 0)?; + } - // Update the unbond records - let cur_amnt = unbond_records_handle(validator) - .at(&pipeline_epoch) - .get(storage, &bond_epoch)? - .unwrap_or_default(); - unbond_records_handle(validator) - .at(&pipeline_epoch) - .insert( + // `updatedUnbonded` + // Update the unbonds in storage using the eager map computed above + if !is_redelegation { + for (start_epoch, &unbond_amount) in new_unbonds_map.iter() { + unbonds.at(start_epoch).update( storage, - bond_epoch, - cur_amnt + token::Amount::from_change(to_unbond), + withdrawable_epoch, + |cur_val| cur_val.unwrap_or_default() + unbond_amount, )?; - - remaining -= to_unbond; + } } - drop(bond_iter); - // Write the in-memory bond and unbond values back to storage - for BondAndUnbondUpdates { - bond_start, - new_bond_value, - unbond_value, - } in new_bond_values.into_iter() - { - bonds_handle.set(storage, new_bond_value, bond_start, 0)?; - update_unbond( - &unbonds, - storage, - &withdrawable_epoch, - &bond_start, - token::Amount::from_change(unbond_value), - )?; + // `newRedelegatedUnbonds` + // This is what the delegator's redelegated unbonds would look like if this + // was the only unbond in the PoS system. We need to add these redelegated + // unbonds to the existing redelegated unbonds + let new_redelegated_unbonds = compute_new_redelegated_unbonds( + storage, + &redelegated_bonds, + &bonds_to_unbond.epochs, + &modified_redelegation, + )?; + + // `updatedRedelegatedBonded` + // NOTE: for now put this here after redelegated unbonds calc bc that one + // uses the pre-modified redelegated bonds from storage! + // First remove redelegation entries in epochs with full unbonds. + for epoch_to_remove in &bonds_to_unbond.epochs { + redelegated_bonds.remove_all(storage, epoch_to_remove)?; + } + if let Some(epoch) = modified_redelegation.epoch { + tracing::debug!("\nIs modified redelegation"); + if modified_redelegation.validators_to_remove.is_empty() { + redelegated_bonds.remove_all(storage, &epoch)?; + } else { + // Then update the redelegated bonds at this epoch + let rbonds = redelegated_bonds.at(&epoch); + update_redelegated_bonds(storage, &rbonds, &modified_redelegation)?; + } } - tracing::debug!("Bonds after decrementing:"); - for ep in Epoch::default().iter_range(current_epoch.0 + 3) { - let delta = bonds_handle - .get_delta_val(storage, ep, ¶ms)? - .unwrap_or_default(); - if !delta.is_zero() { - tracing::debug!( - "bond ∆ at epoch {}: {}", - ep, - delta.to_string_native() - ); + if !is_redelegation { + // `val updatedRedelegatedUnbonded` with updates applied below + // Delegator's redelegated unbonds to this validator. + let delegator_redelegated_unbonded = + delegator_redelegated_unbonds_handle(source).at(validator); + + // Quint `def updateRedelegatedUnbonded` with `val + // updatedRedelegatedUnbonded` together with last statement + // in `updatedDelegator.with("redelegatedUnbonded", ...` updated + // directly in storage + for (start, unbonds) in &new_redelegated_unbonds { + let this_redelegated_unbonded = delegator_redelegated_unbonded + .at(start) + .at(&withdrawable_epoch); + + // Update the delegator's redelegated unbonds with the change + for (src_validator, redelegated_unbonds) in unbonds { + let redelegated_unbonded = + this_redelegated_unbonded.at(src_validator); + for (&redelegation_epoch, &change) in redelegated_unbonds { + redelegated_unbonded.update( + storage, + redelegation_epoch, + |current| current.unwrap_or_default() + change, + )?; + } + } } } - tracing::debug!( - "Token change including slashes on unbond = {}", - (-amount_after_slashing).to_string_native() - ); + // all `val updatedDelegator` changes are applied at this point + + // `val updatedTotalBonded` and `val updatedTotalUnbonded` with updates + // Update the validator's total bonded and unbonded amounts + let total_bonded = total_bonded_handle(validator).get_data_handler(); + let total_unbonded = total_unbonded_handle(validator).at(&pipeline_epoch); + for (&start_epoch, &amount) in &new_unbonds_map { + total_bonded.update(storage, start_epoch, |current| { + current.unwrap_or_default() - amount + })?; + total_unbonded.update(storage, start_epoch, |current| { + current.unwrap_or_default() + amount + })?; + } - // Update the validator set at the pipeline offset. Since unbonding from a - // jailed validator who is no longer frozen is allowed, only update the - // validator set if the validator is not jailed - let is_jailed_at_pipeline = matches!( + let total_redelegated_bonded = + validator_total_redelegated_bonded_handle(validator); + let total_redelegated_unbonded = + validator_total_redelegated_unbonded_handle(validator); + for (redelegation_start_epoch, unbonds) in &new_redelegated_unbonds { + for (src_validator, changes) in unbonds { + for (bond_start_epoch, change) in changes { + // total redelegated bonded + let bonded_sub_map = total_redelegated_bonded + .at(redelegation_start_epoch) + .at(src_validator); + bonded_sub_map.update( + storage, + *bond_start_epoch, + |current| current.unwrap_or_default() - *change, + )?; + + // total redelegated unbonded + let unbonded_sub_map = total_redelegated_unbonded + .at(&pipeline_epoch) + .at(redelegation_start_epoch) + .at(src_validator); + unbonded_sub_map.update( + storage, + *bond_start_epoch, + |current| current.unwrap_or_default() + *change, + )?; + } + } + } + + let slashes = find_validator_slashes(storage, validator)?; + // `val resultSlashing` + let result_slashing = compute_amount_after_slashing_unbond( + storage, + ¶ms, + &new_unbonds_map, + &new_redelegated_unbonds, + slashes, + )?; + #[cfg(debug_assertions)] + let redel_bonds_post = redelegated_bonds.collect_map(storage)?; + debug_assert!( + result_slashing.sum <= amount, + "Amount after slashing ({}) must be <= requested amount to unbond \ + ({}).", + result_slashing.sum.to_string_native(), + amount.to_string_native(), + ); + + let change_after_slashing = -result_slashing.sum.change(); + // Update the validator set at the pipeline offset. Since unbonding from a + // jailed validator who is no longer frozen is allowed, only update the + // validator set if the validator is not jailed + let is_jailed_at_pipeline = matches!( validator_state_handle(validator) .get(storage, pipeline_epoch, ¶ms)? .unwrap(), @@ -2033,31 +2047,530 @@ where storage, ¶ms, validator, - -amount_after_slashing, - current_epoch, + change_after_slashing, + pipeline_epoch, )?; } // Update the validator and total deltas at the pipeline offset update_validator_deltas( storage, - ¶ms, validator, - -amount_after_slashing, + change_after_slashing, current_epoch, params.pipeline_len, )?; update_total_deltas( storage, - ¶ms, - -amount_after_slashing, + change_after_slashing, current_epoch, params.pipeline_len, )?; + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = find_bonds(storage, source, validator)?; + tracing::debug!("\nBonds after decrementing: {bonds:#?}"); + } + + // Invariant: in the affected epochs, the delta of bonds must be >= delta of + // redelegated bonds deltas sum + #[cfg(debug_assertions)] + { + let mut epochs = bonds_to_unbond.epochs.clone(); + if let Some((epoch, _)) = bonds_to_unbond.new_entry { + epochs.insert(epoch); + } + for epoch in epochs { + let cur_bond = bonds_handle + .get_delta_val(storage, epoch)? + .unwrap_or_default(); + let redelegated_deltas = redelegated_bonds + .at(&epoch) + // Sum of redelegations from any src validator + .collect_map(storage)? + .into_values() + .map(|redeleg| redeleg.into_values().sum()) + .sum(); + debug_assert!( + cur_bond >= redelegated_deltas, + "After unbonding, in epoch {epoch} the bond amount {} must be \ + >= redelegated deltas at pipeline {}.\n\nredelegated_bonds \ + pre: {redel_bonds_pre:#?}\nredelegated_bonds post: \ + {redel_bonds_post:#?},\nmodified_redelegation: \ + {modified_redelegation:#?},\nbonds_to_unbond: \ + {bonds_to_unbond:#?}", + cur_bond.to_string_native(), + redelegated_deltas.to_string_native() + ); + } + } + + Ok(result_slashing) +} + +#[derive(Debug, Default, Eq, PartialEq)] +struct FoldRedelegatedBondsResult { + total_redelegated: token::Amount, + total_after_slashing: token::Amount, +} + +/// Iterates over a `redelegated_unbonds` and computes the both the sum of all +/// redelegated tokens and how much is left after applying all relevant slashes. +// `def foldAndSlashRedelegatedBondsMap` +fn fold_and_slash_redelegated_bonds( + storage: &S, + params: &OwnedPosParams, + redelegated_unbonds: &EagerRedelegatedBondsMap, + start_epoch: Epoch, + list_slashes: &[Slash], + slash_epoch_filter: impl Fn(Epoch) -> bool, +) -> FoldRedelegatedBondsResult +where + S: StorageRead, +{ + let mut result = FoldRedelegatedBondsResult::default(); + for (src_validator, bonds_map) in redelegated_unbonds { + for (bond_start, &change) in bonds_map { + // Merge the two lists of slashes + let mut merged: Vec = + // Look-up slashes for this validator ... + validator_slashes_handle(src_validator) + .iter(storage) + .unwrap() + .map(Result::unwrap) + .filter(|slash| { + params.in_redelegation_slashing_window( + slash.epoch, + params.redelegation_start_epoch_from_end( + start_epoch, + ), + start_epoch, + ) && *bond_start <= slash.epoch + && slash_epoch_filter(slash.epoch) + }) + // ... and add `list_slashes` + .chain(list_slashes.iter().cloned()) + .collect(); + + // Sort slashes by epoch + merged.sort_by(|s1, s2| s1.epoch.partial_cmp(&s2.epoch).unwrap()); + + result.total_redelegated += change; + result.total_after_slashing += + apply_list_slashes(params, &merged, change); + } + } + result +} + +/// Computes how much remains from an amount of tokens after applying a list of +/// slashes. +/// +/// - `slashes` - a list of slashes ordered by misbehaving epoch. +/// - `amount` - the amount of slashable tokens. +// `def applyListSlashes` +fn apply_list_slashes( + params: &OwnedPosParams, + slashes: &[Slash], + amount: token::Amount, +) -> token::Amount { + let mut final_amount = amount; + let mut computed_slashes = BTreeMap::::new(); + for slash in slashes { + let slashed_amount = + compute_slashable_amount(params, slash, amount, &computed_slashes); + final_amount = + final_amount.checked_sub(slashed_amount).unwrap_or_default(); + computed_slashes.insert(slash.epoch, slashed_amount); + } + final_amount +} + +/// Computes how much is left from a bond or unbond after applying a slash given +/// that a set of slashes may have been previously applied. +// `def computeSlashableAmount` +fn compute_slashable_amount( + params: &OwnedPosParams, + slash: &Slash, + amount: token::Amount, + computed_slashes: &BTreeMap, +) -> token::Amount { + let updated_amount = computed_slashes + .iter() + .filter(|(&epoch, _)| { + // Keep slashes that have been applied and processed before the + // current slash occurred. We use `<=` because slashes processed at + // `slash.epoch` (at the start of the epoch) are also processed + // before this slash occurred. + epoch + params.slash_processing_epoch_offset() <= slash.epoch + }) + .fold(amount, |acc, (_, &amnt)| { + acc.checked_sub(amnt).unwrap_or_default() + }); + updated_amount.mul_ceil(slash.rate) +} + +/// Epochs for full and partial unbonds. +#[derive(Debug, Default)] +struct BondsForRemovalRes { + /// Full unbond epochs + pub epochs: BTreeSet, + /// Partial unbond epoch associated with the new bond amount + pub new_entry: Option<(Epoch, token::Amount)>, +} + +/// In decreasing epoch order, decrement the non-zero bond amount entries until +/// the full `amount` has been removed. Returns a `BondsForRemovalRes` object +/// that contains the epochs for which the full bond amount is removed and +/// additionally information for the one epoch whose bond amount is partially +/// removed, if any. +fn find_bonds_to_remove( + storage: &S, + bonds_handle: &LazyMap, + amount: token::Amount, +) -> storage_api::Result +where + S: StorageRead, +{ + #[allow(clippy::needless_collect)] + let bonds: Vec> = bonds_handle.iter(storage)?.collect(); + + let mut bonds_for_removal = BondsForRemovalRes::default(); + let mut remaining = amount; + + for bond in bonds.into_iter().rev() { + let (bond_epoch, bond_amount) = bond?; + let to_unbond = cmp::min(bond_amount, remaining); + if to_unbond == bond_amount { + bonds_for_removal.epochs.insert(bond_epoch); + } else { + bonds_for_removal.new_entry = + Some((bond_epoch, bond_amount - to_unbond)); + } + remaining -= to_unbond; + if remaining.is_zero() { + break; + } + } + Ok(bonds_for_removal) +} + +#[derive(Debug, Default, PartialEq, Eq)] +struct ModifiedRedelegation { + epoch: Option, + validators_to_remove: BTreeSet
, + validator_to_modify: Option
, + epochs_to_remove: BTreeSet, + epoch_to_modify: Option, + new_amount: Option, +} + +/// Used in `fn unbond_tokens` to compute the modified state of a redelegation +/// if redelegated tokens are being unbonded. +fn compute_modified_redelegation( + storage: &S, + redelegated_bonds: &RedelegatedTokens, + start_epoch: Epoch, + amount_to_unbond: token::Amount, +) -> storage_api::Result +where + S: StorageRead, +{ + let mut modified_redelegation = ModifiedRedelegation::default(); + + let mut src_validators = BTreeSet::
::new(); + let mut total_redelegated = token::Amount::zero(); + for rb in redelegated_bonds.iter(storage)? { + let ( + NestedSubKey::Data { + key: src_validator, + nested_sub_key: _, + }, + amount, + ) = rb?; + total_redelegated += amount; + src_validators.insert(src_validator); + } + + modified_redelegation.epoch = Some(start_epoch); + + // If the total amount of redelegated bonds is less than the target amount, + // then all redelegated bonds must be unbonded. + if total_redelegated <= amount_to_unbond { + return Ok(modified_redelegation); + } + + let mut remaining = amount_to_unbond; + for src_validator in src_validators.into_iter() { + if remaining.is_zero() { + break; + } + let rbonds = redelegated_bonds.at(&src_validator); + let total_src_val_amount = rbonds + .iter(storage)? + .map(|res| { + let (_, amount) = res?; + Ok(amount) + }) + .sum::>()?; + + // TODO: move this into the `if total_redelegated <= remaining` branch + // below, then we don't have to remove it in `fn + // update_redelegated_bonds` when `validator_to_modify` is Some (and + // avoid `modified_redelegation.validators_to_remove.clone()`). + // It affects assumption 2. in `fn compute_new_redelegated_unbonds`, but + // that looks trivial to change. + // NOTE: not sure if this TODO is still relevant... + modified_redelegation + .validators_to_remove + .insert(src_validator.clone()); + if total_src_val_amount <= remaining { + remaining -= total_src_val_amount; + } else { + let bonds_to_remove = + find_bonds_to_remove(storage, &rbonds, remaining)?; + + remaining = token::Amount::zero(); + + // NOTE: When there are multiple `src_validators` from which we're + // unbonding, `validator_to_modify` cannot get overriden, because + // only one of them can be a partial unbond (`new_entry` + // is partial unbond) + if let Some((bond_epoch, new_bond_amount)) = + bonds_to_remove.new_entry + { + modified_redelegation.validator_to_modify = Some(src_validator); + modified_redelegation.epochs_to_remove = { + let mut epochs = bonds_to_remove.epochs; + // TODO: remove this insertion then we don't have to remove + // it again in `fn update_redelegated_bonds` + // when `epoch_to_modify` is Some (and avoid + // `modified_redelegation.epochs_to_remove.clone`) + // It affects assumption 3. in `fn + // compute_new_redelegated_unbonds`, but that also looks + // trivial to change. + epochs.insert(bond_epoch); + epochs + }; + modified_redelegation.epoch_to_modify = Some(bond_epoch); + modified_redelegation.new_amount = Some(new_bond_amount); + } else { + modified_redelegation.validator_to_modify = Some(src_validator); + modified_redelegation.epochs_to_remove = bonds_to_remove.epochs; + } + } + } + Ok(modified_redelegation) +} + +fn update_redelegated_bonds( + storage: &mut S, + redelegated_bonds: &RedelegatedTokens, + modified_redelegation: &ModifiedRedelegation, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + if let Some(val_to_modify) = &modified_redelegation.validator_to_modify { + let mut updated_vals_to_remove = + modified_redelegation.validators_to_remove.clone(); + updated_vals_to_remove.remove(val_to_modify); + + // Remove the updated_vals_to_remove keys from the + // redelegated_bonds map + for val in &updated_vals_to_remove { + redelegated_bonds.remove_all(storage, val)?; + } + + if let Some(epoch_to_modify) = modified_redelegation.epoch_to_modify { + let mut updated_epochs_to_remove = + modified_redelegation.epochs_to_remove.clone(); + updated_epochs_to_remove.remove(&epoch_to_modify); + let val_bonds_to_modify = redelegated_bonds.at(val_to_modify); + for epoch in updated_epochs_to_remove { + val_bonds_to_modify.remove(storage, &epoch)?; + } + val_bonds_to_modify.insert( + storage, + epoch_to_modify, + modified_redelegation.new_amount.unwrap(), + )?; + } else { + // Then remove to epochs_to_remove from the redelegated bonds of the + // val_to_modify + let val_bonds_to_modify = redelegated_bonds.at(val_to_modify); + for epoch in &modified_redelegation.epochs_to_remove { + val_bonds_to_modify.remove(storage, epoch)?; + } + } + } else { + // Remove all validators in modified_redelegation.validators_to_remove + // from redelegated_bonds + for val in &modified_redelegation.validators_to_remove { + redelegated_bonds.remove_all(storage, val)?; + } + } Ok(()) } +/// Temp helper type to match quint model. +/// Result of `compute_new_redelegated_unbonds` that contains a map of +/// redelegated unbonds. +/// The map keys from outside in are: +/// +/// - redelegation end epoch where redeleg stops contributing to src validator +/// - src validator address +/// - src bond start epoch where it started contributing to src validator +// TODO: refactor out +type EagerRedelegatedUnbonds = BTreeMap; + +/// Computes a map of redelegated unbonds from a set of redelegated bonds. +/// +/// - `redelegated_bonds` - a map of redelegated bonds from epoch to +/// `RedelegatedTokens`. +/// - `epochs_to_remove` - a set of epochs that indicate the set of epochs +/// unbonded. +/// - `modified` record that represents a redelegated bond that it is only +/// partially unbonded. +/// +/// The function assumes that: +/// +/// 1. `modified.epoch` is not in the `epochs_to_remove` set. +/// 2. `modified.validator_to_modify` is in `modified.vals_to_remove`. +/// 3. `modified.epoch_to_modify` is in in `modified.epochs_to_remove`. +// TODO: try to optimize this by only writing to storage via Lazy! +// `def computeNewRedelegatedUnbonds` from Quint +fn compute_new_redelegated_unbonds( + storage: &S, + redelegated_bonds: &RedelegatedBondsOrUnbonds, + epochs_to_remove: &BTreeSet, + modified: &ModifiedRedelegation, +) -> storage_api::Result +where + S: StorageRead + StorageWrite, +{ + let unbonded_epochs = if let Some(epoch) = modified.epoch { + debug_assert!( + !epochs_to_remove.contains(&epoch), + "1. assumption in `fn compute_new_redelegated_unbonds` doesn't \ + hold" + ); + let mut epochs = epochs_to_remove.clone(); + epochs.insert(epoch); + epochs + .iter() + .cloned() + .filter(|e| redelegated_bonds.contains(storage, e).unwrap()) + .collect::>() + } else { + epochs_to_remove + .iter() + .cloned() + .filter(|e| redelegated_bonds.contains(storage, e).unwrap()) + .collect::>() + }; + debug_assert!( + modified + .validator_to_modify + .as_ref() + .map(|validator| modified.validators_to_remove.contains(validator)) + .unwrap_or(true), + "2. assumption in `fn compute_new_redelegated_unbonds` doesn't hold" + ); + debug_assert!( + modified + .epoch_to_modify + .as_ref() + .map(|epoch| modified.epochs_to_remove.contains(epoch)) + .unwrap_or(true), + "3. assumption in `fn compute_new_redelegated_unbonds` doesn't hold" + ); + + // quint `newRedelegatedUnbonds` returned from + // `computeNewRedelegatedUnbonds` + let new_redelegated_unbonds: EagerRedelegatedUnbonds = unbonded_epochs + .into_iter() + .map(|start| { + let mut rbonds = EagerRedelegatedBondsMap::default(); + if modified + .epoch + .map(|redelegation_epoch| start != redelegation_epoch) + .unwrap_or(true) + || modified.validators_to_remove.is_empty() + { + for res in redelegated_bonds.at(&start).iter(storage).unwrap() { + let ( + NestedSubKey::Data { + key: validator, + nested_sub_key: SubKey::Data(epoch), + }, + amount, + ) = res.unwrap(); + rbonds + .entry(validator.clone()) + .or_default() + .insert(epoch, amount); + } + (start, rbonds) + } else { + for src_validator in &modified.validators_to_remove { + if modified + .validator_to_modify + .as_ref() + .map(|validator| src_validator != validator) + .unwrap_or(true) + { + let raw_bonds = + redelegated_bonds.at(&start).at(src_validator); + for res in raw_bonds.iter(storage).unwrap() { + let (bond_epoch, bond_amount) = res.unwrap(); + rbonds + .entry(src_validator.clone()) + .or_default() + .insert(bond_epoch, bond_amount); + } + } else { + for bond_start in &modified.epochs_to_remove { + let cur_redel_bond_amount = redelegated_bonds + .at(&start) + .at(src_validator) + .get(storage, bond_start) + .unwrap() + .unwrap_or_default(); + let raw_bonds = rbonds + .entry(src_validator.clone()) + .or_default(); + if modified + .epoch_to_modify + .as_ref() + .map(|epoch| bond_start != epoch) + .unwrap_or(true) + { + raw_bonds + .insert(*bond_start, cur_redel_bond_amount); + } else { + raw_bonds.insert( + *bond_start, + cur_redel_bond_amount + - modified + .new_amount + // Safe unwrap - it shouldn't + // get to + // this if it's None + .unwrap(), + ); + } + } + } + } + (start, rbonds) + } + }) + .collect(); + + Ok(new_redelegated_unbonds) +} + /// Compute a token amount after slashing, given the initial amount and a set of /// slashes. It is assumed that the input `slashes` are those commited while the /// `amount` was contributing to voting power. @@ -2065,22 +2578,17 @@ fn get_slashed_amount( params: &PosParams, amount: token::Amount, slashes: &BTreeMap, -) -> storage_api::Result { - // println!("FN `get_slashed_amount`"); - +) -> storage_api::Result { let mut updated_amount = amount; let mut computed_amounts = Vec::::new(); - for (infraction_epoch, slash_rate) in slashes { - // println!("Slash epoch: {}, rate: {}", infraction_epoch, slash_rate); + for (&infraction_epoch, &slash_rate) in slashes { let mut computed_to_remove = BTreeSet::>::new(); for (ix, slashed_amount) in computed_amounts.iter().enumerate() { // Update amount with slashes that happened more than unbonding_len // epochs before this current slash - // TODO: understand this better (from Informal) - // TODO: do bounds of this need to be changed with a +/- 1?? if slashed_amount.epoch + params.slash_processing_epoch_offset() - <= *infraction_epoch + <= infraction_epoch { updated_amount = updated_amount .checked_sub(slashed_amount.amount) @@ -2095,13 +2603,10 @@ fn get_slashed_amount( computed_amounts.remove(item.0); } computed_amounts.push(SlashedAmount { - amount: *slash_rate * updated_amount, - epoch: *infraction_epoch, + amount: updated_amount.mul_ceil(slash_rate), + epoch: infraction_epoch, }); } - // println!("Finished loop over slashes in `get_slashed_amount`"); - // println!("Updated amount: {:?}", &updated_amount); - // println!("Computed amounts: {:?}", &computed_amounts); let total_computed_amounts = computed_amounts .into_iter() @@ -2112,29 +2617,126 @@ fn get_slashed_amount( .checked_sub(total_computed_amounts) .unwrap_or_default(); - Ok(final_amount.change()) + Ok(final_amount) } -fn update_unbond( - handle: &Unbonds, - storage: &mut S, - withdraw_epoch: &Epoch, - start_epoch: &Epoch, - amount: token::Amount, -) -> storage_api::Result<()> +// `def computeAmountAfterSlashingUnbond` +fn compute_amount_after_slashing_unbond( + storage: &S, + params: &OwnedPosParams, + unbonds: &BTreeMap, + redelegated_unbonds: &EagerRedelegatedUnbonds, + slashes: Vec, +) -> storage_api::Result where - S: StorageRead + StorageWrite, + S: StorageRead, { - let current = handle - .at(withdraw_epoch) - .get(storage, start_epoch)? - .unwrap_or_default(); - handle.at(withdraw_epoch).insert( - storage, - *start_epoch, - current + amount, - )?; - Ok(()) + let mut result_slashing = ResultSlashing::default(); + for (&start_epoch, amount) in unbonds { + // `val listSlashes` + let list_slashes: Vec = slashes + .iter() + .filter(|slash| slash.epoch >= start_epoch) + .cloned() + .collect(); + // `val resultFold` + let result_fold = if let Some(redelegated_unbonds) = + redelegated_unbonds.get(&start_epoch) + { + fold_and_slash_redelegated_bonds( + storage, + params, + redelegated_unbonds, + start_epoch, + &list_slashes, + |_| true, + ) + } else { + FoldRedelegatedBondsResult::default() + }; + // `val totalNoRedelegated` + let total_not_redelegated = amount + .checked_sub(result_fold.total_redelegated) + .unwrap_or_default(); + // `val afterNoRedelegated` + let after_not_redelegated = + apply_list_slashes(params, &list_slashes, total_not_redelegated); + // `val amountAfterSlashing` + let amount_after_slashing = + after_not_redelegated + result_fold.total_after_slashing; + // Accumulation step + result_slashing.sum += amount_after_slashing; + result_slashing + .epoch_map + .insert(start_epoch, amount_after_slashing); + } + Ok(result_slashing) +} + +/// Compute from a set of unbonds (both redelegated and not) how much is left +/// after applying all relevant slashes. +// `def computeAmountAfterSlashingWithdraw` +fn compute_amount_after_slashing_withdraw( + storage: &S, + params: &OwnedPosParams, + unbonds_and_redelegated_unbonds: &BTreeMap< + (Epoch, Epoch), + (token::Amount, EagerRedelegatedBondsMap), + >, + slashes: Vec, +) -> storage_api::Result +where + S: StorageRead, +{ + let mut result_slashing = ResultSlashing::default(); + + for ((start_epoch, withdraw_epoch), (amount, redelegated_unbonds)) in + unbonds_and_redelegated_unbonds.iter() + { + // TODO: check if slashes in the same epoch can be + // folded into one effective slash + let end_epoch = *withdraw_epoch + - params.unbonding_len + - params.cubic_slashing_window_length; + // Find slashes that apply to `start_epoch..end_epoch` + let list_slashes = slashes + .iter() + .filter(|slash| { + // Started before the slash occurred + start_epoch <= &slash.epoch + // Ends after the slash + && end_epoch > slash.epoch + }) + .cloned() + .collect::>(); + + // Find the sum and the sum after slashing of the redelegated unbonds + let result_fold = fold_and_slash_redelegated_bonds( + storage, + params, + redelegated_unbonds, + *start_epoch, + &list_slashes, + |_| true, + ); + + // Unbond amount that didn't come from a redelegation + let total_not_redelegated = *amount - result_fold.total_redelegated; + // Find how much remains after slashing non-redelegated amount + let after_not_redelegated = + apply_list_slashes(params, &list_slashes, total_not_redelegated); + + // Add back the unbond and redelegated unbond amount after slashing + let amount_after_slashing = + after_not_redelegated + result_fold.total_after_slashing; + + result_slashing.sum += amount_after_slashing; + result_slashing + .epoch_map + .insert(*start_epoch, amount_after_slashing); + } + + Ok(result_slashing) } /// Arguments to [`become_validator`]. @@ -2147,6 +2749,8 @@ pub struct BecomeValidator<'a, S> { pub address: &'a Address, /// The validator's consensus key, used by Tendermint. pub consensus_key: &'a common::PublicKey, + /// The validator's protocol key. + pub protocol_key: &'a common::PublicKey, /// The validator's Ethereum bridge cold key. pub eth_cold_key: &'a common::PublicKey, /// The validator's Ethereum bridge hot key. @@ -2171,6 +2775,7 @@ where params, address, consensus_key, + protocol_key, eth_cold_key, eth_hot_key, current_epoch, @@ -2201,6 +2806,12 @@ where current_epoch, params.pipeline_len, )?; + validator_protocol_key_handle(address).set( + storage, + protocol_key.clone(), + current_epoch, + params.pipeline_len, + )?; validator_eth_hot_key_handle(address).set( storage, eth_hot_key.clone(), @@ -2221,7 +2832,7 @@ where )?; validator_deltas_handle(address).set( storage, - token::Change::default(), + token::Change::zero(), current_epoch, params.pipeline_len, )?; @@ -2248,12 +2859,17 @@ pub fn withdraw_tokens( where S: StorageRead + StorageWrite, { - tracing::debug!("Withdrawing tokens in epoch {current_epoch}"); let params = read_pos_params(storage)?; let source = source.unwrap_or(validator); + + tracing::debug!("Withdrawing tokens in epoch {current_epoch}"); tracing::debug!("Source {} --> Validator {}", source, validator); - let unbond_handle = unbond_handle(source, validator); + let unbond_handle: Unbonds = unbond_handle(source, validator); + let redelegated_unbonds = + delegator_redelegated_unbonds_handle(source).at(validator); + + // Check that there are unbonded tokens available for withdrawal if unbond_handle.is_empty(storage)? { return Err(WithdrawError::NoUnbondFound(BondId { source: source.clone(), @@ -2262,84 +2878,109 @@ where .into()); } - // let mut total_slashed = token::Amount::default(); - let mut withdrawable_amount = token::Amount::default(); - // (withdraw_epoch, start_epoch) - let mut unbonds_to_remove: Vec<(Epoch, Epoch)> = Vec::new(); + let mut unbonds_and_redelegated_unbonds: BTreeMap< + (Epoch, Epoch), + (token::Amount, EagerRedelegatedBondsMap), + > = BTreeMap::new(); for unbond in unbond_handle.iter(storage)? { let ( NestedSubKey::Data { - key: withdraw_epoch, - nested_sub_key: SubKey::Data(start_epoch), + key: start_epoch, + nested_sub_key: SubKey::Data(withdraw_epoch), }, amount, ) = unbond?; + // Logging tracing::debug!( "Unbond delta ({start_epoch}..{withdraw_epoch}), amount {}", amount.to_string_native() ); - - // TODO: adding slash rates in same epoch, applying cumulatively in dif - // epochs if withdraw_epoch > current_epoch { tracing::debug!( "Not yet withdrawable until epoch {withdraw_epoch}" ); continue; } - let slashes_for_this_unbond = find_slashes_in_range( - storage, - start_epoch, - Some( - withdraw_epoch - - params.unbonding_len - - params.cubic_slashing_window_length, - ), - validator, - )?; - let amount_after_slashing = - get_slashed_amount(¶ms, amount, &slashes_for_this_unbond)?; + let mut eager_redelegated_unbonds = EagerRedelegatedBondsMap::default(); + let matching_redelegated_unbonds = + redelegated_unbonds.at(&start_epoch).at(&withdraw_epoch); + for ub in matching_redelegated_unbonds.iter(storage)? { + let ( + NestedSubKey::Data { + key: address, + nested_sub_key: SubKey::Data(epoch), + }, + amount, + ) = ub?; + eager_redelegated_unbonds + .entry(address) + .or_default() + .entry(epoch) + .or_insert(amount); + } - // total_slashed += amount - token::Amount::from(amount_after_slashing); - withdrawable_amount += token::Amount::from(amount_after_slashing); - unbonds_to_remove.push((withdraw_epoch, start_epoch)); + unbonds_and_redelegated_unbonds.insert( + (start_epoch, withdraw_epoch), + (amount, eager_redelegated_unbonds), + ); } + let slashes = find_validator_slashes(storage, validator)?; + + // `val resultSlashing` + let result_slashing = compute_amount_after_slashing_withdraw( + storage, + ¶ms, + &unbonds_and_redelegated_unbonds, + slashes, + )?; + + let withdrawable_amount = result_slashing.sum; tracing::debug!( "Withdrawing total {}", withdrawable_amount.to_string_native() ); - // Remove the unbond data from storage - for (withdraw_epoch, start_epoch) in unbonds_to_remove { + // `updateDelegator` with `unbonded` and `redelegeatedUnbonded` + for ((start_epoch, withdraw_epoch), _unbond_and_redelegations) in + unbonds_and_redelegated_unbonds + { tracing::debug!("Remove ({start_epoch}..{withdraw_epoch}) from unbond"); unbond_handle - .at(&withdraw_epoch) - .remove(storage, &start_epoch)?; - // TODO: check if the `end_epoch` layer is now empty and remove it if - // so, may need to implement remove/delete for nested map + .at(&start_epoch) + .remove(storage, &withdraw_epoch)?; + redelegated_unbonds + .at(&start_epoch) + .remove_all(storage, &withdraw_epoch)?; + + if unbond_handle.at(&start_epoch).is_empty(storage)? { + unbond_handle.remove_all(storage, &start_epoch)?; + } + if redelegated_unbonds.at(&start_epoch).is_empty(storage)? { + redelegated_unbonds.remove_all(storage, &start_epoch)?; + } } // Transfer the withdrawable tokens from the PoS address back to the source let staking_token = staking_token_address(storage); - transfer_tokens( + token::transfer( storage, &staking_token, - withdrawable_amount, &ADDRESS, source, + withdrawable_amount, )?; // TODO: Transfer the slashed tokens from the PoS address to the Slash Pool // address - // transfer_tokens( + // token::transfer( // storage, // &staking_token, - // total_slashed, // &ADDRESS, // &SLASH_POOL_ADDRESS, + // total_slashed, // )?; Ok(withdrawable_amount) @@ -2355,13 +2996,21 @@ pub fn change_validator_commission_rate( where S: StorageRead + StorageWrite, { - // if new_rate < Uint::zero() { - // return Err(CommissionRateChangeError::NegativeRate( - // new_rate, - // validator.clone(), - // ) - // .into()); - // } + if new_rate.is_negative() { + return Err(CommissionRateChangeError::NegativeRate( + new_rate, + validator.clone(), + ) + .into()); + } + + if new_rate > Dec::one() { + return Err(CommissionRateChangeError::LargerThanOne( + new_rate, + validator.clone(), + ) + .into()); + } let max_change = read_validator_max_commission_rate_change(storage, validator)?; @@ -2386,14 +3035,7 @@ where .get(storage, pipeline_epoch.prev(), ¶ms)? .expect("Could not find a rate in given epoch"); - // TODO: change this back if we use `Dec` type with a signed integer - // let change_from_prev = new_rate - rate_before_pipeline; - // if change_from_prev.abs() > max_change.unwrap() { - let change_from_prev = if new_rate > rate_before_pipeline { - new_rate - rate_before_pipeline - } else { - rate_before_pipeline - new_rate - }; + let change_from_prev = new_rate.abs_diff(&rate_before_pipeline); if change_from_prev > max_change.unwrap() { return Err(CommissionRateChangeError::RateChangeTooLarge( change_from_prev, @@ -2405,53 +3047,6 @@ where commission_handle.set(storage, new_rate, current_epoch, params.pipeline_len) } -/// Transfer tokens between accounts -/// TODO: may want to move this into core crate -pub fn transfer_tokens( - storage: &mut S, - token: &Address, - amount: token::Amount, - src: &Address, - dest: &Address, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let src_key = token::balance_key(token, src); - let dest_key = token::balance_key(token, dest); - if let Some(mut src_balance) = storage.read::(&src_key)? { - // let mut src_balance: token::Amount = - // decode(src_balance).unwrap_or_default(); - if src_balance < amount { - tracing::error!( - "PoS system transfer error, the source doesn't have \ - sufficient balance. It has {}, but {} is required", - src_balance.to_string_native(), - amount.to_string_native(), - ); - } - src_balance.spend(&amount); - let mut dest_balance = storage - .read::(&dest_key)? - .unwrap_or_default(); - - // let dest_balance = storage.read_bytes(&dest_key).unwrap_or_default(); - // let mut dest_balance: token::Amount = dest_balance - // .and_then(|b| decode(b).ok()) - // .unwrap_or_default(); - dest_balance.receive(&amount); - storage - .write(&src_key, src_balance) - .expect("Unable to write token balance for PoS system"); - storage - .write(&dest_key, dest_balance) - .expect("Unable to write token balance for PoS system"); - } else { - tracing::error!("PoS system transfer error, the source has no balance"); - } - Ok(()) -} - /// Check if the given consensus key is already being used to ensure uniqueness. /// /// If it's not being used, it will be inserted into the set that's being used @@ -2481,54 +3076,272 @@ where } /// Get the total bond amount, including slashes, for a given bond ID and epoch. -/// Returns a two-element tuple of the raw bond amount and the post-slashed bond -/// amount, respectively. -/// -/// TODO: does epoch of discovery need to be considered for precise accuracy? +/// Returns the bond amount after slashing. For future epochs the value is +/// subject to change. pub fn bond_amount( storage: &S, bond_id: &BondId, epoch: Epoch, -) -> storage_api::Result<(token::Amount, token::Amount)> +) -> storage_api::Result where S: StorageRead, { - // TODO: review this logic carefully, apply rewards - let slashes = find_validator_slashes(storage, &bond_id.validator)?; - let slash_rates = slashes.into_iter().fold( - BTreeMap::::new(), - |mut map, slash| { - let tot_rate = map.entry(slash.epoch).or_default(); - *tot_rate = cmp::min(Dec::one(), *tot_rate + slash.rate); - map - }, - ); + // TODO: our method of applying slashes is not correct! This needs review - let bonds = - bond_handle(&bond_id.source, &bond_id.validator).get_data_handler(); - let mut total = token::Amount::default(); - let mut total_active = token::Amount::default(); - for next in bonds.iter(storage)? { - let (bond_epoch, delta) = next?; + let params = read_pos_params(storage)?; + + // TODO: apply rewards + let slashes = find_validator_slashes(storage, &bond_id.validator)?; + // dbg!(&slashes); + // let slash_rates = + // slashes + // .iter() + // .fold(BTreeMap::::new(), |mut map, slash| { + // let tot_rate = map.entry(slash.epoch).or_default(); + // *tot_rate = cmp::min(Dec::one(), *tot_rate + slash.rate); + // map + // }); + // dbg!(&slash_rates); + + // Accumulate incoming redelegations slashes from source validator, if any. + // This ensures that if there're slashes on both src validator and dest + // validator, they're combined correctly. + let mut redelegation_slashes = BTreeMap::::new(); + for res in delegator_redelegated_bonds_handle(&bond_id.source) + .at(&bond_id.validator) + .iter(storage)? + { + let ( + NestedSubKey::Data { + key: redelegation_end, + nested_sub_key: + NestedSubKey::Data { + key: src_validator, + nested_sub_key: SubKey::Data(start), + }, + }, + delta, + ) = res?; + + let list_slashes = validator_slashes_handle(&src_validator) + .iter(storage)? + .map(Result::unwrap) + .filter(|slash| { + let slash_processing_epoch = + slash.epoch + params.slash_processing_epoch_offset(); + start <= slash.epoch + && redelegation_end > slash.epoch + && slash_processing_epoch + > redelegation_end - params.pipeline_len + }) + .collect::>(); + + let slashed_delta = apply_list_slashes(¶ms, &list_slashes, delta); + + // let mut slashed_delta = delta; + // let slashes = find_slashes_in_range( + // storage, + // start, + // Some(redelegation_end), + // &src_validator, + // )?; + // for (slash_epoch, rate) in slashes { + // let slash_processing_epoch = + // slash_epoch + params.slash_processing_epoch_offset(); + // // If the slash was processed after redelegation was submitted + // // it has to be slashed now + // if slash_processing_epoch > redelegation_end - + // params.pipeline_len { let slashed = + // slashed_delta.mul_ceil(rate); slashed_delta -= + // slashed; } + // } + *redelegation_slashes.entry(redelegation_end).or_default() += + delta - slashed_delta; + } + // dbg!(&redelegation_slashes); + + let bonds = + bond_handle(&bond_id.source, &bond_id.validator).get_data_handler(); + let mut total_active = token::Amount::zero(); + for next in bonds.iter(storage)? { + let (bond_epoch, delta) = next?; if bond_epoch > epoch { continue; } - total += token::Amount::from(delta); - total_active += token::Amount::from(delta); + let list_slashes = slashes + .iter() + .filter(|slash| bond_epoch <= slash.epoch) + .cloned() + .collect::>(); + + let mut slashed_delta = + apply_list_slashes(¶ms, &list_slashes, delta); + + // Deduct redelegation src validator slash, if any + if let Some(&redelegation_slash) = redelegation_slashes.get(&bond_epoch) + { + slashed_delta -= redelegation_slash; + } + + // let list_slashes = slashes + // .iter() + // .map(Result::unwrap) + // .filter(|slash| bond_epoch <= slash.epoch) + // .collect::>(); + + // for (&slash_epoch, &rate) in &slash_rates { + // if slash_epoch < bond_epoch { + // continue; + // } + // // TODO: think about truncation + // let current_slash = slashed_delta.mul_ceil(rate); + // slashed_delta -= current_slash; + // } + total_active += slashed_delta; + } + // dbg!(&total_active); + + // Add unbonds that are still contributing to stake + let unbonds = unbond_handle(&bond_id.source, &bond_id.validator); + for next in unbonds.iter(storage)? { + let ( + NestedSubKey::Data { + key: start, + nested_sub_key: SubKey::Data(withdrawable_epoch), + }, + delta, + ) = next?; + let end = withdrawable_epoch - params.withdrawable_epoch_offset() + + params.pipeline_len; + + if start <= epoch && end > epoch { + let list_slashes = slashes + .iter() + .filter(|slash| start <= slash.epoch && end > slash.epoch) + .cloned() + .collect::>(); + + let slashed_delta = + apply_list_slashes(¶ms, &list_slashes, delta); + + // let mut slashed_delta = delta; + // for (&slash_epoch, &rate) in &slash_rates { + // if start <= slash_epoch && end > slash_epoch { + // // TODO: think about truncation + // let current_slash = slashed_delta.mul_ceil(rate); + // slashed_delta -= current_slash; + // } + // } + total_active += slashed_delta; + } + } + // dbg!(&total_active); + + if bond_id.validator != bond_id.source { + // Add outgoing redelegations that are still contributing to the source + // validator's stake + let redelegated_bonds = + delegator_redelegated_bonds_handle(&bond_id.source); + for res in redelegated_bonds.iter(storage)? { + let ( + NestedSubKey::Data { + key: _dest_validator, + nested_sub_key: + NestedSubKey::Data { + key: end, + nested_sub_key: + NestedSubKey::Data { + key: src_validator, + nested_sub_key: SubKey::Data(start), + }, + }, + }, + delta, + ) = res?; + if src_validator == bond_id.validator + && start <= epoch + && end > epoch + { + let list_slashes = slashes + .iter() + .filter(|slash| start <= slash.epoch && end > slash.epoch) + .cloned() + .collect::>(); + + let slashed_delta = + apply_list_slashes(¶ms, &list_slashes, delta); + + // let mut slashed_delta = delta; + // for (&slash_epoch, &rate) in &slash_rates { + // if start <= slash_epoch && end > slash_epoch { + // // TODO: think about truncation + // let current_slash = delta.mul_ceil(rate); + // slashed_delta -= current_slash; + // } + // } + total_active += slashed_delta; + } + } + // dbg!(&total_active); - for (slash_epoch, rate) in &slash_rates { - if *slash_epoch < bond_epoch { - continue; + // Add outgoing redelegation unbonds that are still contributing to + // the source validator's stake + let redelegated_unbonds = + delegator_redelegated_unbonds_handle(&bond_id.source); + for res in redelegated_unbonds.iter(storage)? { + let ( + NestedSubKey::Data { + key: _dest_validator, + nested_sub_key: + NestedSubKey::Data { + key: redelegation_epoch, + nested_sub_key: + NestedSubKey::Data { + key: withdraw_epoch, + nested_sub_key: + NestedSubKey::Data { + key: src_validator, + nested_sub_key: SubKey::Data(start), + }, + }, + }, + }, + delta, + ) = res?; + let end = withdraw_epoch - params.withdrawable_epoch_offset() + + params.pipeline_len; + if src_validator == bond_id.validator + // If the unbonded bond was redelegated after this epoch ... + && redelegation_epoch > epoch + // ... the start was before or at this epoch ... + && start <= epoch + // ... and the end after this epoch + && end > epoch + { + let list_slashes = slashes + .iter() + .filter(|slash| start <= slash.epoch && end > slash.epoch) + .cloned() + .collect::>(); + + let slashed_delta = + apply_list_slashes(¶ms, &list_slashes, delta); + + // let mut slashed_delta = delta; + // for (&slash_epoch, &rate) in &slash_rates { + // if start <= slash_epoch && end > slash_epoch { + // let current_slash = delta.mul_ceil(rate); + // slashed_delta -= current_slash; + // } + // } + total_active += slashed_delta; } - // TODO: think about truncation - let current_slashed = *rate * delta; - total_active - .checked_sub(token::Amount::from(current_slashed)) - .unwrap_or_default(); } } - Ok((total, total_active)) + // dbg!(&total_active); + + Ok(total_active) } /// Get the genesis consensus validators stake and consensus key for Tendermint, @@ -2618,8 +3431,7 @@ where &address, current_epoch, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); into_tm_voting_power( params.tm_votes_per_token, prev_validator_stake, @@ -2642,7 +3454,7 @@ where } // If both previous and current voting powers are 0, and the // validator_stake_threshold is 0, skip update - if params.validator_stake_threshold == token::Amount::default() + if params.validator_stake_threshold.is_zero() && *prev_tm_voting_power == 0 && *new_tm_voting_power == 0 { @@ -2690,8 +3502,7 @@ where &address, current_epoch, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); into_tm_voting_power( params.tm_votes_per_token, prev_validator_stake, @@ -2702,8 +3513,7 @@ where // it in the `new_consensus_validators` iterator above if matches!(new_state, Some(ValidatorState::Consensus)) { return None; - } else if params.validator_stake_threshold - == token::Amount::default() + } else if params.validator_stake_threshold.is_zero() && *prev_tm_voting_power == 0 { // If the new state is not Consensus but its prev voting power @@ -2782,11 +3592,10 @@ where "Delegation key should contain validator address.", ) })?; - let amount = bond_handle(owner, &validator_address) + let deltas_sum = bond_handle(owner, &validator_address) .get_sum(storage, *epoch, ¶ms)? .unwrap_or_default(); - delegations - .insert(validator_address, token::Amount::from_change(amount)); + delegations.insert(validator_address, deltas_sum); } Ok(delegations) } @@ -2807,7 +3616,7 @@ pub fn find_bonds( storage: &S, source: &Address, validator: &Address, -) -> storage_api::Result> +) -> storage_api::Result> where S: StorageRead, { @@ -2831,8 +3640,8 @@ where .map(|next_result| { let ( NestedSubKey::Data { - key: withdraw_epoch, - nested_sub_key: SubKey::Data(start_epoch), + key: start_epoch, + nested_sub_key: SubKey::Data(withdraw_epoch), }, amount, ) = next_result?; @@ -2976,7 +3785,7 @@ where { return None; } - let change: token::Change = + let change: token::Amount = BorshDeserialize::try_from_slice(&val_bytes).ok()?; if change.is_zero() { return None; @@ -3101,12 +3910,12 @@ where let bonds = find_bonds(storage, &source, &validator)? .into_iter() - .filter(|(_start, change)| *change > token::Change::default()) - .map(|(start, change)| { + .filter(|(_start, amount)| *amount > token::Amount::zero()) + .map(|(start, amount)| { make_bond_details( params, &validator, - change, + amount, start, &slashes, &mut applied_slashes, @@ -3140,7 +3949,7 @@ where fn make_bond_details( params: &PosParams, validator: &Address, - change: token::Change, + deltas_sum: token::Amount, start: Epoch, slashes: &[Slash], applied_slashes: &mut HashMap>, @@ -3150,7 +3959,7 @@ fn make_bond_details( .get(validator) .cloned() .unwrap_or_default(); - let amount = token::Amount::from_change(change); + let mut slash_rates_by_epoch = BTreeMap::::new(); let validator_slashes = @@ -3169,15 +3978,15 @@ fn make_bond_details( let slashed_amount = if slash_rates_by_epoch.is_empty() { None } else { - let amount_after_slashing = token::Amount::from_change( - get_slashed_amount(params, amount, &slash_rates_by_epoch).unwrap(), - ); - Some(amount - amount_after_slashing) + let amount_after_slashing = + get_slashed_amount(params, deltas_sum, &slash_rates_by_epoch) + .unwrap(); + Some(deltas_sum - amount_after_slashing) }; BondDetails { start, - amount, + amount: deltas_sum, slashed_amount, } } @@ -3203,10 +4012,10 @@ fn make_unbond_details( if slash.epoch >= start && slash.epoch < withdraw - .checked_sub(Epoch( + .checked_sub( params.unbonding_len + params.cubic_slashing_window_length, - )) + ) .unwrap_or_default() { let cur_rate = slash_rates_by_epoch.entry(slash.epoch).or_default(); @@ -3221,9 +4030,8 @@ fn make_unbond_details( let slashed_amount = if slash_rates_by_epoch.is_empty() { None } else { - let amount_after_slashing = token::Amount::from_change( - get_slashed_amount(params, amount, &slash_rates_by_epoch).unwrap(), - ); + let amount_after_slashing = + get_slashed_amount(params, amount, &slash_rates_by_epoch).unwrap(); Some(amount - amount_after_slashing) }; @@ -3255,7 +4063,7 @@ where let consensus_validators = consensus_validator_set_handle().at(&epoch); // Get total stake of the consensus validator set - let mut total_consensus_stake = token::Amount::default(); + let mut total_consensus_stake = token::Amount::zero(); for validator in consensus_validators.iter(storage)? { let ( NestedSubKey::Data { @@ -3270,7 +4078,7 @@ where // Get set of signing validator addresses and the combined stake of // these signers let mut signer_set: HashSet
= HashSet::new(); - let mut total_signing_stake = token::Amount::default(); + let mut total_signing_stake = token::Amount::zero(); for VoteInfo { validator_address, validator_vp, @@ -3291,8 +4099,7 @@ where } let stake_from_deltas = - read_validator_stake(storage, ¶ms, &validator_address, epoch)? - .unwrap_or_default(); + read_validator_stake(storage, ¶ms, &validator_address, epoch)?; // Ensure TM stake updates properly with a debug_assert if cfg!(debug_assertions) { @@ -3325,7 +4132,7 @@ where "PoS rewards coefficients {coeffs:?}, inputs: {rewards_calculator:?}." ); - // println!( + // tracing::debug!( // "TOTAL SIGNING STAKE (LOGGING BLOCK REWARDS) = {}", // signing_stake // ); @@ -3348,13 +4155,13 @@ where // When below-threshold validator set is added, this shouldn't be needed // anymore since some minimal stake will be required to be in at least // the consensus set - if stake == token::Amount::default() { + if stake.is_zero() { continue; } let mut rewards_frac = Dec::zero(); let stake_unscaled: Dec = stake.into(); - // println!( + // tracing::debug!( // "NAMADA VALIDATOR STAKE (LOGGING BLOCK REWARDS) OF EPOCH {} = // {}", epoch, stake // ); @@ -3396,7 +4203,7 @@ pub fn compute_cubic_slash_rate( where S: StorageRead, { - // println!("COMPUTING CUBIC SLASH RATE"); + // tracing::debug!("COMPUTING CUBIC SLASH RATE"); let mut sum_vp_fraction = Dec::zero(); let (start_epoch, end_epoch) = params.cubic_slash_epoch_window(infraction_epoch); @@ -3424,9 +4231,9 @@ where ) = res?; let validator_stake = - read_validator_stake(storage, params, &validator, epoch)? - .unwrap_or_default(); - // println!("Val {} stake: {}", &validator, validator_stake); + read_validator_stake(storage, params, &validator, epoch)?; + // tracing::debug!("Val {} stake: {}", &validator, + // validator_stake); Ok(acc + Dec::from(validator_stake)) // TODO: does something more complex need to be done @@ -3436,7 +4243,7 @@ where )?; sum_vp_fraction += infracting_stake / consensus_stake; } - // println!("sum_vp_fraction: {}", sum_vp_fraction); + // tracing::debug!("sum_vp_fraction: {}", sum_vp_fraction); Ok(Dec::new(9, 0).unwrap() * sum_vp_fraction * sum_vp_fraction) } @@ -3495,16 +4302,15 @@ where .expect("Expected to find a valid validator."); match prev_state { ValidatorState::Consensus => { - let amount_pre = validator_deltas_handle(validator) - .get_sum(storage, epoch, params)? - .unwrap_or_default(); + let amount_pre = + read_validator_stake(storage, params, validator, epoch)?; let val_position = validator_set_positions_handle() .at(&epoch) .get(storage, validator)? .expect("Could not find validator's position in storage."); let _ = consensus_validator_set_handle() .at(&epoch) - .at(&token::Amount::from_change(amount_pre)) + .at(&amount_pre) .remove(storage, &val_position)?; validator_set_positions_handle() .at(&epoch) @@ -3557,6 +4363,7 @@ where let amount_pre = validator_deltas_handle(validator) .get_sum(storage, epoch, params)? .unwrap_or_default(); + debug_assert!(amount_pre.non_negative()); let val_position = validator_set_positions_handle() .at(&epoch) .get(storage, validator)? @@ -3570,10 +4377,10 @@ where .remove(storage, validator)?; } ValidatorState::BelowThreshold => { - println!("Below-threshold"); + tracing::debug!("Below-threshold"); } ValidatorState::Inactive => { - println!("INACTIVE"); + tracing::debug!("INACTIVE"); panic!( "Shouldn't be here - haven't implemented inactive vals yet" ) @@ -3604,11 +4411,7 @@ where Ok(()) } -/// Process slashes that have been queued up after discovery. Calculate the -/// cubic slashing rate, store the finalized slashes, update the deltas, then -/// transfer slashed tokens from PoS to the Slash Pool. This function is called -/// at the beginning of the epoch that is `unbonding_length + 1 + -/// cubic_slashing_window_length` epochs after the infraction epoch. +/// Process slashes NEW pub fn process_slashes( storage: &mut S, current_epoch: Epoch, @@ -3641,8 +4444,11 @@ where compute_cubic_slash_rate(storage, ¶ms, infraction_epoch)?; // Collect the enqueued slashes and update their rates - let mut validators_and_slashes: HashMap> = - HashMap::new(); + let mut eager_validator_slashes: BTreeMap> = + BTreeMap::new(); // TODO: will need to update this in storage later + let mut eager_validator_slash_rates: HashMap = HashMap::new(); + + // `slashPerValidator` and `slashesMap` while also updating in storage for enqueued_slash in enqueued_slashes.iter(storage)? { let ( NestedSubKey::Data { @@ -3666,254 +4472,586 @@ where r#type: enqueued_slash.r#type, rate: slash_rate, }; - tracing::debug!( - "Slash for validator {} committed in epoch {} has rate {}", - &validator, - enqueued_slash.epoch, - slash_rate - ); - let cur_slashes = validators_and_slashes.entry(validator).or_default(); + let cur_slashes = eager_validator_slashes + .entry(validator.clone()) + .or_default(); cur_slashes.push(updated_slash); + let cur_rate = + eager_validator_slash_rates.entry(validator).or_default(); + *cur_rate = cmp::min(Dec::one(), *cur_rate + slash_rate); } - let mut deltas_for_update: HashMap> = - HashMap::new(); + // Update the epochs of enqueued slashes in storage + enqueued_slashes_handle().update_data(storage, ¶ms, current_epoch)?; - // Store the final processed slashes to their corresponding validators, then - // update the deltas - for (validator, enqueued_slashes) in validators_and_slashes.into_iter() { - let validator_stake_at_infraction = read_validator_stake( + // `resultSlashing` + let mut map_validator_slash: EagerRedelegatedBondsMap = BTreeMap::new(); + for (validator, slash_rate) in eager_validator_slash_rates { + process_validator_slash( storage, ¶ms, &validator, - infraction_epoch, - )? - .unwrap_or_default(); - - tracing::debug!( - "Validator {} stake at infraction epoch {} = {}", - &validator, - infraction_epoch, - validator_stake_at_infraction.to_string_native() - ); - - let mut total_rate = Dec::zero(); + slash_rate, + current_epoch, + &mut map_validator_slash, + )?; + } + tracing::debug!("Slashed amounts for validators: {map_validator_slash:#?}"); - for enqueued_slash in &enqueued_slashes { - // Add this slash to the list of validator's slashes in storage - validator_slashes_handle(&validator) - .push(storage, enqueued_slash.clone())?; + // Now update the remaining parts of storage - total_rate += enqueued_slash.rate; + // Write slashes themselves into storage + for (validator, slashes) in eager_validator_slashes { + let validator_slashes = validator_slashes_handle(&validator); + for slash in slashes { + validator_slashes.push(storage, slash)?; } - total_rate = cmp::min(Dec::one(), total_rate); - - // Find the total amount deducted from the deltas due to unbonds that - // became active after the infraction epoch, accounting for slashes - let mut total_unbonded = token::Amount::default(); - - let total_bonded_handle = total_bonded_handle(&validator); - let mut sum_post_bonds = token::Change::default(); - - // Start from after the infraction epoch up thru last epoch before - // processing - tracing::debug!("Iterating over unbonds after the infraction epoch"); - for epoch in Epoch::iter_bounds_inclusive( - infraction_epoch.next(), - current_epoch.prev(), - ) { - tracing::debug!("Epoch {}", epoch); - let mut recent_unbonds = token::Change::default(); - let unbonds = unbond_records_handle(&validator).at(&epoch); - for unbond in unbonds.iter(storage)? { - let (start, unbond_amount) = unbond?; - tracing::debug!( - "UnbondRecord: amount = {}, start_epoch {}", - unbond_amount.to_string_native(), - &start - ); - if start <= infraction_epoch { - let prev_slashes = find_slashes_in_range( - storage, - start, - Some( - infraction_epoch - .checked_sub(Epoch( - params.unbonding_len - + params.cubic_slashing_window_length, - )) - .unwrap_or_default(), - ), - &validator, - )?; - tracing::debug!( - "Slashes for this unbond: {:?}", - prev_slashes - ); + } - total_unbonded += - token::Amount::from_change(get_slashed_amount( - ¶ms, - unbond_amount, - &prev_slashes, - )?); - } else { - recent_unbonds += unbond_amount.change(); - } + // Update the validator stakes + for (validator, slash_amounts) in map_validator_slash { + let mut slash_acc = token::Amount::zero(); - tracing::debug!( - "Total unbonded (epoch {}) w slashing = {}", + // Update validator sets first because it needs to be able to read + // validator stake before we make any changes to it + for (&epoch, &slash_amount) in &slash_amounts { + let state = validator_state_handle(&validator) + .get(storage, epoch, ¶ms)? + .unwrap(); + if state != ValidatorState::Jailed { + update_validator_set( + storage, + ¶ms, + &validator, + -slash_amount.change(), epoch, - total_unbonded.to_string_native() - ); + )?; } + } + // Then update validator and total deltas + for (epoch, slash_amount) in slash_amounts { + let slash_delta = slash_amount - slash_acc; + slash_acc += slash_delta; - sum_post_bonds += total_bonded_handle - .get_delta_val(storage, epoch, ¶ms)? - .unwrap_or_default() - - recent_unbonds; + update_validator_deltas( + storage, + &validator, + -slash_delta.change(), + epoch, + 0, + )?; + update_total_deltas(storage, -slash_delta.change(), epoch, 0)?; } - // Compute the adjusted validator deltas and slashed amounts from the - // current up until the pipeline epoch - let mut last_slash = token::Change::default(); - for offset in 0..params.pipeline_len { - tracing::debug!( - "Epoch {}\nLast slash = {}", - current_epoch + offset, - last_slash.to_string_native() - ); - let mut recent_unbonds = token::Change::default(); - let unbonds = - unbond_records_handle(&validator).at(&(current_epoch + offset)); + // TODO: should we clear some storage here as is done in Quint?? + // Possibly make the `unbonded` LazyMaps epoched so that it is done + // automatically? + } - for unbond in unbonds.iter(storage)? { - let (start, unbond_amount) = unbond?; - tracing::debug!( - "UnbondRecord: amount = {}, start_epoch {}", - unbond_amount.to_string_native(), - &start - ); - if start <= infraction_epoch { - let prev_slashes = find_slashes_in_range( - storage, - start, - Some( - infraction_epoch - .checked_sub(Epoch( - params.unbonding_len - + params.cubic_slashing_window_length, - )) - .unwrap_or_default(), - ), - &validator, - )?; - tracing::debug!( - "Slashes for this unbond: {:?}", - prev_slashes - ); + Ok(()) +} - total_unbonded += - token::Amount::from_change(get_slashed_amount( - ¶ms, - unbond_amount, - &prev_slashes, - )?); - } else { - recent_unbonds += unbond_amount.change(); - } +/// Process a slash by (i) slashing the misbehaving validator; and (ii) any +/// validator to which it has redelegated some tokens and the slash misbehaving +/// epoch is wihtin the redelegation slashing window. +/// +/// `validator` - the misbehaving validator. +/// `slash_rate` - the slash rate. +/// `slashed_amounts_map` - a map from validator address to a map from epoch to +/// already processed slash amounts. +/// +/// Adds any newly processed slash amount of any involved validator to +/// `slashed_amounts_map`. +// Quint `processSlash` +fn process_validator_slash( + storage: &mut S, + params: &PosParams, + validator: &Address, + slash_rate: Dec, + current_epoch: Epoch, + slashed_amount_map: &mut EagerRedelegatedBondsMap, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + // `resultSlashValidator + let result_slash = slash_validator( + storage, + params, + validator, + slash_rate, + current_epoch, + &slashed_amount_map + .get(validator) + .cloned() + .unwrap_or_default(), + )?; - tracing::debug!( - "Total unbonded (offset {}) w slashing = {}", - offset, - total_unbonded.to_string_native() - ); - } + // `updatedSlashedAmountMap` + let validator_slashes = + slashed_amount_map.entry(validator.clone()).or_default(); + *validator_slashes = result_slash; - let this_slash = total_rate - * (validator_stake_at_infraction - total_unbonded).change(); - let diff_slashed_amount = last_slash - this_slash; - last_slash = this_slash; - // println!("This slash = {}", this_slash); - // println!("Diff slashed amount = {}", diff_slashed_amount); - // total_slashed -= diff_slashed_amount; - // total_unbonded = token::Amount::default(); - - sum_post_bonds += total_bonded_handle - .get_delta_val(storage, current_epoch + offset, ¶ms)? - .unwrap_or_default() - - recent_unbonds; - - let validator_stake_at_offset = read_validator_stake( - storage, - ¶ms, - &validator, - current_epoch + offset, - )? - .unwrap_or_default() - .change(); - let slashable_stake_at_offset = - validator_stake_at_offset - sum_post_bonds; - assert!(slashable_stake_at_offset >= token::Change::default()); - - let change = - cmp::max(-slashable_stake_at_offset, diff_slashed_amount); - - let val_updates = - deltas_for_update.entry(validator.clone()).or_default(); - val_updates.push((offset, change)); - } + // `outgoingRedelegation` + let outgoing_redelegations = + validator_outgoing_redelegations_handle(validator); + + // Final loop in `processSlash` + let dest_validators = outgoing_redelegations + .iter(storage)? + .map(|res| { + let ( + NestedSubKey::Data { + key: dest_validator, + nested_sub_key: _, + }, + _redelegation, + ) = res?; + Ok(dest_validator) + }) + .collect::>>()?; + + for dest_validator in dest_validators { + let to_modify = slashed_amount_map + .entry(dest_validator.clone()) + .or_default(); + + tracing::debug!( + "Slashing {} redelegation to {}", + validator, + &dest_validator + ); + + // `slashValidatorRedelegation` + slash_validator_redelegation( + storage, + params, + validator, + current_epoch, + &outgoing_redelegations.at(&dest_validator), + &validator_slashes_handle(validator), + &validator_total_redelegated_unbonded_handle(&dest_validator), + slash_rate, + to_modify, + )?; } - // println!("\nUpdating deltas"); - // Update the deltas in storage - // let mut total_slashed = token::Change::default(); - for (validator, updates) in deltas_for_update { - for (offset, delta) in updates { - // println!("Val {}, offset {}, delta {}", &validator, offset, - // delta); - tracing::debug!( - "Deltas change = {} at offset {} for validator {}", - delta.to_string_native(), - offset, - &validator - ); - // total_slashed -= change; + Ok(()) +} - update_validator_deltas( - storage, - ¶ms, - &validator, - delta, - current_epoch, - offset, - )?; - update_total_deltas( +/// In the context of a redelegation, the function computes how much a validator +/// (the destination validator of the redelegation) should be slashed due to the +/// misbehaving of a second validator (the source validator of the +/// redelegation). The function computes how much the validator whould be +/// slashed at all epochs between the current epoch (curEpoch) + 1 and the +/// current epoch + 1 + PIPELINE_OFFSET, accounting for any tokens of the +/// redelegation already unbonded. +/// +/// - `src_validator` - the source validator +/// - `outgoing_redelegations` - a map from pair of epochs to int that includes +/// all the redelegations from the source validator to the destination +/// validator. +/// - The outer key is epoch at which the bond started at the source +/// validator. +/// - The inner key is epoch at which the redelegation started (the epoch at +/// which was issued). +/// - `slashes` a list of slashes of the source validator. +/// - `dest_total_redelegated_unbonded` - a map of unbonded redelegated tokens +/// at the destination validator. +/// - `slash_rate` - the rate of the slash being processed. +/// - `dest_slashed_amounts` - a map from epoch to already processed slash +/// amounts. +/// +/// Adds any newly processed slash amount to `dest_slashed_amounts`. +#[allow(clippy::too_many_arguments)] +fn slash_validator_redelegation( + storage: &S, + params: &OwnedPosParams, + src_validator: &Address, + current_epoch: Epoch, + outgoing_redelegations: &NestedMap>, + slashes: &Slashes, + dest_total_redelegated_unbonded: &TotalRedelegatedUnbonded, + slash_rate: Dec, + dest_slashed_amounts: &mut BTreeMap, +) -> storage_api::Result<()> +where + S: StorageRead, +{ + let infraction_epoch = + current_epoch - params.slash_processing_epoch_offset(); + + for res in outgoing_redelegations.iter(storage)? { + let ( + NestedSubKey::Data { + key: bond_start, + nested_sub_key: SubKey::Data(redel_start), + }, + amount, + ) = res?; + + if params.in_redelegation_slashing_window( + infraction_epoch, + redel_start, + params.redelegation_end_epoch_from_start(redel_start), + ) && bond_start <= infraction_epoch + { + slash_redelegation( storage, - ¶ms, - delta, + params, + amount, + bond_start, + params.redelegation_end_epoch_from_start(redel_start), + src_validator, current_epoch, - offset, + slashes, + dest_total_redelegated_unbonded, + slash_rate, + dest_slashed_amounts, )?; } } - // debug_assert!(total_slashed >= token::Change::default()); + Ok(()) +} - // TODO: Transfer all slashed tokens from PoS account to Slash Pool address - // let staking_token = staking_token_address(storage); - // transfer_tokens( - // storage, - // &staking_token, - // token::Amount::from_change(total_slashed), - // &ADDRESS, - // &SLASH_POOL_ADDRESS, - // )?; +#[allow(clippy::too_many_arguments)] +fn slash_redelegation( + storage: &S, + params: &OwnedPosParams, + amount: token::Amount, + bond_start: Epoch, + redel_bond_start: Epoch, + src_validator: &Address, + current_epoch: Epoch, + slashes: &Slashes, + total_redelegated_unbonded: &TotalRedelegatedUnbonded, + slash_rate: Dec, + slashed_amounts: &mut BTreeMap, +) -> storage_api::Result<()> +where + S: StorageRead, +{ + tracing::debug!( + "\nSlashing redelegation amount {} - bond start {} and \ + redel_bond_start {} - at rate {}\n", + amount.to_string_native(), + bond_start, + redel_bond_start, + slash_rate + ); + + let infraction_epoch = + current_epoch - params.slash_processing_epoch_offset(); + + // Slash redelegation destination validator from the next epoch only + // as they won't be jailed + let set_update_epoch = current_epoch.next(); + + let mut init_tot_unbonded = + Epoch::iter_bounds_inclusive(infraction_epoch.next(), set_update_epoch) + .map(|epoch| { + let redelegated_unbonded = total_redelegated_unbonded + .at(&epoch) + .at(&redel_bond_start) + .at(src_validator) + .get(storage, &bond_start)? + .unwrap_or_default(); + Ok(redelegated_unbonded) + }) + .sum::>()?; + + for epoch in Epoch::iter_range(set_update_epoch, params.pipeline_len) { + let updated_total_unbonded = { + let redelegated_unbonded = total_redelegated_unbonded + .at(&epoch) + .at(&redel_bond_start) + .at(src_validator) + .get(storage, &bond_start)? + .unwrap_or_default(); + init_tot_unbonded + redelegated_unbonded + }; + + let list_slashes = slashes + .iter(storage)? + .map(Result::unwrap) + .filter(|slash| { + params.in_redelegation_slashing_window( + slash.epoch, + params.redelegation_start_epoch_from_end(redel_bond_start), + redel_bond_start, + ) && bond_start <= slash.epoch + && slash.epoch + params.slash_processing_epoch_offset() + // TODO this may need to be `<=` as in `fn compute_total_unbonded` + // + // NOTE(Tomas): Agreed and changed to `<=`. We're looking + // for slashes that were processed before or in the epoch + // in which slashes that are currently being processed + // occurred. Because we're slashing in the beginning of an + // epoch, we're also taking slashes that were processed in + // the infraction epoch as they would still be processed + // before any infraction occurred. + <= infraction_epoch + }) + .collect::>(); + + let slashable_amount = amount + .checked_sub(updated_total_unbonded) + .unwrap_or_default(); + + let slashed = + apply_list_slashes(params, &list_slashes, slashable_amount) + .mul_ceil(slash_rate); + + let list_slashes = slashes + .iter(storage)? + .map(Result::unwrap) + .filter(|slash| { + params.in_redelegation_slashing_window( + slash.epoch, + params.redelegation_start_epoch_from_end(redel_bond_start), + redel_bond_start, + ) && bond_start <= slash.epoch + }) + .collect::>(); + + let slashable_stake = + apply_list_slashes(params, &list_slashes, slashable_amount) + .mul_ceil(slash_rate); + + init_tot_unbonded = updated_total_unbonded; + let to_slash = cmp::min(slashed, slashable_stake); + if !to_slash.is_zero() { + let map_value = slashed_amounts.entry(epoch).or_default(); + *map_value += to_slash; + } + } Ok(()) } +/// Computes for a given validator and a slash how much should be slashed at all +/// epochs between the currentÃ¥ epoch (curEpoch) + 1 and the current epoch + 1 + +/// PIPELINE_OFFSET, accounting for any tokens already unbonded. +/// +/// - `validator` - the misbehaving validator. +/// - `slash_rate` - the rate of the slash being processed. +/// - `slashed_amounts_map` - a map from epoch to already processed slash +/// amounts. +/// +/// Returns a map that adds any newly processed slash amount to +/// `slashed_amounts_map`. +// `def slashValidator` +fn slash_validator( + storage: &S, + params: &OwnedPosParams, + validator: &Address, + slash_rate: Dec, + current_epoch: Epoch, + slashed_amounts_map: &BTreeMap, +) -> storage_api::Result> +where + S: StorageRead, +{ + tracing::debug!("Slashing validator {} at rate {}", validator, slash_rate); + let infraction_epoch = + current_epoch - params.slash_processing_epoch_offset(); + + let total_unbonded = total_unbonded_handle(validator); + let total_redelegated_unbonded = + validator_total_redelegated_unbonded_handle(validator); + let total_bonded = total_bonded_handle(validator); + let total_redelegated_bonded = + validator_total_redelegated_bonded_handle(validator); + + let mut slashed_amounts = slashed_amounts_map.clone(); + + let mut tot_bonds = total_bonded + .get_data_handler() + .iter(storage)? + .map(Result::unwrap) + .filter(|&(epoch, bonded)| { + epoch <= infraction_epoch && bonded > 0.into() + }) + .collect::>(); + + let mut redelegated_bonds = tot_bonds + .keys() + .filter(|&epoch| { + !total_redelegated_bonded + .at(epoch) + .is_empty(storage) + .unwrap() + }) + .map(|epoch| { + let tot_redel_bonded = total_redelegated_bonded + .at(epoch) + .collect_map(storage) + .unwrap(); + (*epoch, tot_redel_bonded) + }) + .collect::>(); + + let mut sum = token::Amount::zero(); + + let eps = current_epoch + .iter_range(params.pipeline_len) + .collect::>(); + for epoch in eps.into_iter().rev() { + let amount = tot_bonds.iter().fold( + token::Amount::zero(), + |acc, (bond_start, bond_amount)| { + acc + compute_slash_bond_at_epoch( + storage, + params, + validator, + epoch, + infraction_epoch, + *bond_start, + *bond_amount, + redelegated_bonds.get(bond_start), + slash_rate, + ) + .unwrap() + }, + ); + + let new_bonds = total_unbonded.at(&epoch); + tot_bonds = new_bonds + .collect_map(storage) + .unwrap() + .into_iter() + .filter(|(ep, _)| *ep <= infraction_epoch) + .collect::>(); + + let new_redelegated_bonds = tot_bonds + .keys() + .filter(|&ep| { + !total_redelegated_unbonded.at(ep).is_empty(storage).unwrap() + }) + .map(|ep| { + ( + *ep, + total_redelegated_unbonded + .at(&epoch) + .at(ep) + .collect_map(storage) + .unwrap(), + ) + }) + .collect::>(); + + redelegated_bonds = new_redelegated_bonds; + + // `newSum` + sum += amount; + + // `newSlashesMap` + let cur = slashed_amounts.entry(epoch).or_default(); + *cur += sum; + } + // Hack - should this be done differently? (think this is safe) + let pipeline_epoch = current_epoch + params.pipeline_len; + let last_amt = slashed_amounts + .get(&pipeline_epoch.prev()) + .cloned() + .unwrap(); + slashed_amounts.insert(pipeline_epoch, last_amt); + + Ok(slashed_amounts) +} + +/// Get the remaining token amount in a bond after applying a set of slashes. +/// +/// - `validator` - the bond's validator +/// - `epoch` - the latest slash epoch to consider. +/// - `start` - the start epoch of the bond +/// - `redelegated_bonds` +fn compute_bond_at_epoch( + storage: &S, + params: &OwnedPosParams, + validator: &Address, + epoch: Epoch, + start: Epoch, + amount: token::Amount, + redelegated_bonds: Option<&EagerRedelegatedBondsMap>, +) -> storage_api::Result +where + S: StorageRead, +{ + let list_slashes = validator_slashes_handle(validator) + .iter(storage)? + .map(Result::unwrap) + .filter(|slash| { + // TODO: check bounds on second arg + start <= slash.epoch + && slash.epoch + params.slash_processing_epoch_offset() <= epoch + }) + .collect::>(); + + let slash_epoch_filter = + |e: Epoch| e + params.slash_processing_epoch_offset() <= epoch; + + let result_fold = redelegated_bonds + .map(|redelegated_bonds| { + fold_and_slash_redelegated_bonds( + storage, + params, + redelegated_bonds, + start, + &list_slashes, + slash_epoch_filter, + ) + }) + .unwrap_or_default(); + + let total_not_redelegated = amount - result_fold.total_redelegated; + let after_not_redelegated = + apply_list_slashes(params, &list_slashes, total_not_redelegated); + + Ok(after_not_redelegated + result_fold.total_after_slashing) +} + +/// Uses `fn compute_bond_at_epoch` to compute the token amount to slash in +/// order to prevent overslashing. +#[allow(clippy::too_many_arguments)] +fn compute_slash_bond_at_epoch( + storage: &S, + params: &OwnedPosParams, + validator: &Address, + epoch: Epoch, + infraction_epoch: Epoch, + bond_start: Epoch, + bond_amount: token::Amount, + redelegated_bonds: Option<&EagerRedelegatedBondsMap>, + slash_rate: Dec, +) -> storage_api::Result +where + S: StorageRead, +{ + let amount_due = compute_bond_at_epoch( + storage, + params, + validator, + infraction_epoch, + bond_start, + bond_amount, + redelegated_bonds, + )? + .mul_ceil(slash_rate); + let slashable_amount = compute_bond_at_epoch( + storage, + params, + validator, + epoch, + bond_start, + bond_amount, + redelegated_bonds, + )?; + Ok(cmp::min(amount_due, slashable_amount)) +} + /// Unjail a validator that is currently jailed pub fn unjail_validator( storage: &mut S, @@ -3963,8 +5101,7 @@ where // Re-insert the validator into the validator set and update its state let pipeline_epoch = current_epoch + params.pipeline_len; let stake = - read_validator_stake(storage, ¶ms, validator, pipeline_epoch)? - .unwrap_or_default(); + read_validator_stake(storage, ¶ms, validator, pipeline_epoch)?; insert_validator_into_validator_set( storage, @@ -4017,6 +5154,7 @@ where /// Find slashes applicable to a validator with inclusive `start` and exclusive /// `end` epoch. +#[allow(dead_code)] fn find_slashes_in_range( storage: &S, start: Epoch, @@ -4032,13 +5170,233 @@ where if start <= slash.epoch && end.map(|end| slash.epoch < end).unwrap_or(true) { - // println!( - // "Slash (epoch, rate) = ({}, {})", - // &slash.epoch, &slash.rate - // ); let cur_rate = slashes.entry(slash.epoch).or_default(); *cur_rate = cmp::min(*cur_rate + slash.rate, Dec::one()); } } Ok(slashes) } + +/// Redelegate bonded tokens from a source validator to a destination validator +pub fn redelegate_tokens( + storage: &mut S, + delegator: &Address, + src_validator: &Address, + dest_validator: &Address, + current_epoch: Epoch, + amount: token::Amount, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + tracing::debug!( + "Delegator {} redelegating {} tokens from {} to {}", + delegator, + amount.to_string_native(), + src_validator, + dest_validator + ); + if amount.is_zero() { + return Ok(()); + } + + // The src and dest validators must be different + if src_validator == dest_validator { + return Err(RedelegationError::RedelegationSrcEqDest.into()); + } + + // The delegator must not be a validator + if is_validator(storage, delegator)? { + return Err(RedelegationError::DelegatorIsValidator.into()); + } + + // The src and dest validators must actually be validators + if !is_validator(storage, src_validator)? { + return Err( + RedelegationError::NotAValidator(src_validator.clone()).into() + ); + } + if !is_validator(storage, dest_validator)? { + return Err( + RedelegationError::NotAValidator(dest_validator.clone()).into() + ); + } + + let params = read_pos_params(storage)?; + let pipeline_epoch = current_epoch + params.pipeline_len; + let src_redel_end_epoch = + validator_incoming_redelegations_handle(src_validator) + .get(storage, delegator)?; + + // Forbid chained redelegations. A redelegation is "chained" if: + // 1. the source validator holds bonded tokens that themselves were + // redelegated to the src validator + // 2. given the latest epoch at which the most recently redelegated tokens + // started contributing to the src validator's voting power, these tokens + // cannot be slashed anymore + let is_not_chained = if let Some(end_epoch) = src_redel_end_epoch { + // TODO: check bounds for correctness (> and presence of cubic offset) + let last_contrib_epoch = end_epoch.prev(); + // If the source validator's slashes that would cause slash on + // redelegation are now outdated (would have to be processed before or + // on start of the current epoch), the redelegation can be redelegated + // again + last_contrib_epoch + params.slash_processing_epoch_offset() + <= current_epoch + } else { + true + }; + if !is_not_chained { + return Err(RedelegationError::IsChainedRedelegation.into()); + } + + // Unbond the redelegated tokens from the src validator. + // `resultUnbond` in quint + let result_unbond = unbond_tokens( + storage, + Some(delegator), + src_validator, + amount, + current_epoch, + true, + )?; + + // The unbonded amount after slashing is what is going to be redelegated. + // `amountAfterSlashing` + let amount_after_slashing = result_unbond.sum; + tracing::debug!( + "Redelegated amount after slashing: {}", + amount_after_slashing.to_string_native() + ); + + // Add incoming redelegated bonds to the dest validator. + // `updatedRedelegatedBonds` with updates to delegatorState + // `redelegatedBonded` + let redelegated_bonds = delegator_redelegated_bonds_handle(delegator) + .at(dest_validator) + .at(&pipeline_epoch) + .at(src_validator); + for (&epoch, &unbonded_amount) in result_unbond.epoch_map.iter() { + redelegated_bonds.update(storage, epoch, |current| { + current.unwrap_or_default() + unbonded_amount + })?; + } + + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = find_bonds(storage, delegator, dest_validator)?; + tracing::debug!("\nRedeleg dest bonds before incrementing: {bonds:#?}"); + } + + // Add a bond delta to the destination. + if !amount_after_slashing.is_zero() { + // `updatedDelegator` with updates to `bonded` + let bond_handle = bond_handle(delegator, dest_validator); + bond_handle.add( + storage, + amount_after_slashing, + current_epoch, + params.pipeline_len, + )?; + // `updatedDestValidator` --> `with("totalVBonded")` + // Add the amount to the dest validator total bonded + let dest_total_bonded = total_bonded_handle(dest_validator); + dest_total_bonded.add( + storage, + amount_after_slashing, + current_epoch, + params.pipeline_len, + )?; + } + + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = find_bonds(storage, delegator, dest_validator)?; + tracing::debug!("\nRedeleg dest bonds after incrementing: {bonds:#?}"); + } + + // Add outgoing redelegation to the src validator. + // `updateOutgoingRedelegations` with `updatedSrcValidator` + let outgoing_redelegations = + validator_outgoing_redelegations_handle(src_validator) + .at(dest_validator); + for (start, &unbonded_amount) in result_unbond.epoch_map.iter() { + outgoing_redelegations.at(start).update( + storage, + current_epoch, + |current| current.unwrap_or_default() + unbonded_amount, + )?; + } + + // Add the amount to the dest validator total redelegated bonds. + let dest_total_redelegated_bonded = + validator_total_redelegated_bonded_handle(dest_validator) + .at(&pipeline_epoch) + .at(src_validator); + for (&epoch, &amount) in &result_unbond.epoch_map { + dest_total_redelegated_bonded.update(storage, epoch, |current| { + current.unwrap_or_default() + amount + })?; + } + + // Set the epoch of the validator incoming redelegation from this delegator + let dest_incoming_redelegations = + validator_incoming_redelegations_handle(dest_validator); + dest_incoming_redelegations.insert( + storage, + delegator.clone(), + pipeline_epoch, + )?; + + // Update validator set for dest validator + let is_jailed_at_pipeline = matches!( + validator_state_handle(dest_validator).get( + storage, + pipeline_epoch, + ¶ms + )?, + Some(ValidatorState::Jailed) + ); + if !is_jailed_at_pipeline { + update_validator_set( + storage, + ¶ms, + dest_validator, + amount_after_slashing.change(), + pipeline_epoch, + )?; + } + + // Update deltas + update_validator_deltas( + storage, + dest_validator, + amount_after_slashing.change(), + current_epoch, + params.pipeline_len, + )?; + update_total_deltas( + storage, + amount_after_slashing.change(), + current_epoch, + params.pipeline_len, + )?; + + Ok(()) +} + +/// Init PoS genesis wrapper helper that also initializes gov params that are +/// used in PoS with default values. +#[cfg(any(test, feature = "testing"))] +pub fn test_init_genesis( + storage: &mut S, + owned: OwnedPosParams, + validators: impl Iterator + Clone, + current_epoch: namada_core::types::storage::Epoch, +) -> storage_api::Result +where + S: StorageRead + StorageWrite, +{ + let gov_params = namada_core::ledger::governance::parameters::GovernanceParameters::default(); + gov_params.init_storage(storage)?; + crate::init_genesis(storage, &owned, validators, current_epoch)?; + crate::read_non_pos_owned_params(storage, owned) +} diff --git a/proof_of_stake/src/parameters.rs b/proof_of_stake/src/parameters.rs index 8501aff379..0c173c9261 100644 --- a/proof_of_stake/src/parameters.rs +++ b/proof_of_stake/src/parameters.rs @@ -1,16 +1,28 @@ //! Proof-of-Stake system parameters use borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::ledger::governance::parameters::GovernanceParameters; use namada_core::types::dec::Dec; use namada_core::types::storage::Epoch; use namada_core::types::token; use namada_core::types::uint::Uint; use thiserror::Error; -/// Proof-of-Stake system parameters, set at genesis and can only be changed via -/// governance +/// Proof-of-Stake system parameters. This includes parameters that are used in +/// PoS but are read from other accounts storage (governance). #[derive(Debug, Clone, BorshDeserialize, BorshSerialize)] pub struct PosParams { + /// PoS-owned params + pub owned: OwnedPosParams, + /// Governance param - Maximum proposal voting period in epochs. + /// This param is stored in governance. + pub max_proposal_period: u64, +} + +/// Proof-of-Stake system parameters owned by the PoS address, set at genesis +/// and can only be changed via governance +#[derive(Debug, Clone, BorshDeserialize, BorshSerialize)] +pub struct OwnedPosParams { /// A maximum number of consensus validators pub max_validator_slots: u64, /// Any change applied during an epoch `n` will become active at the @@ -49,6 +61,17 @@ pub struct PosParams { } impl Default for PosParams { + fn default() -> Self { + let owned = OwnedPosParams::default(); + let gov = GovernanceParameters::default(); + Self { + owned, + max_proposal_period: gov.max_proposal_period, + } + } +} + +impl Default for OwnedPosParams { fn default() -> Self { Self { max_validator_slots: 100, @@ -102,7 +125,7 @@ const MAX_TOTAL_VOTING_POWER: i64 = i64::MAX / 8; /// Assuming token amount is `u64` in micro units. const TOKEN_MAX_AMOUNT: u64 = u64::MAX / TOKENS_PER_NAM; -impl PosParams { +impl OwnedPosParams { /// Validate PoS parameters values. Returns an empty list if the values are /// valid. #[must_use] @@ -173,6 +196,55 @@ impl PosParams { let end = infraction_epoch + self.cubic_slashing_window_length; (start, end) } + + /// Get the redelegation end epoch from the start epoch + pub fn redelegation_end_epoch_from_start(&self, end: Epoch) -> Epoch { + end + self.pipeline_len + } + + /// Get the redelegation start epoch from the end epoch + pub fn redelegation_start_epoch_from_end(&self, end: Epoch) -> Epoch { + end - self.pipeline_len + } + + /// Determine if the infraction is in the lazy slashing window for a + /// redelegation source validator. Any source validator slashes that + /// were processed before redelegation was applied will be applied + /// eagerly on the redelegation amount, so this function will only return + /// `true` for applicable infractions that were processed after + /// the redelegation was applied. + /// + /// The `redel_start` is the epoch in which the redelegation was applied and + /// `redel_end` the epoch in which it no longer contributed to source + /// validator's stake. + pub fn in_redelegation_slashing_window( + &self, + infraction_epoch: Epoch, + redel_start: Epoch, + redel_end: Epoch, + ) -> bool { + let processing_epoch = + infraction_epoch + self.slash_processing_epoch_offset(); + redel_start < processing_epoch && infraction_epoch < redel_end + } + + /// A test helper to add the default gov params to PoS params. + #[cfg(any(test, feature = "testing"))] + pub fn with_default_gov_params(self) -> PosParams { + let gov = GovernanceParameters::default(); + PosParams { + owned: self, + max_proposal_period: gov.max_proposal_period, + } + } +} + +impl std::ops::Deref for PosParams { + type Target = OwnedPosParams; + + fn deref(&self) -> &Self::Target { + &self.owned + } } #[cfg(test)] @@ -214,8 +286,8 @@ pub mod testing { unbonding_len in pipeline_len + 1..pipeline_len + 8, pipeline_len in Just(pipeline_len), tm_votes_per_token in 1..10_001_i128) - -> PosParams { - PosParams { + -> OwnedPosParams { + OwnedPosParams { max_validator_slots, pipeline_len, unbonding_len, diff --git a/proof_of_stake/src/pos_queries.rs b/proof_of_stake/src/pos_queries.rs index 190548570b..b694897aa8 100644 --- a/proof_of_stake/src/pos_queries.rs +++ b/proof_of_stake/src/pos_queries.rs @@ -1,7 +1,6 @@ //! Storage API for querying data about Proof-of-stake related //! data. This includes validator and epoch related data. -use borsh::{BorshDeserialize, BorshSerialize}; use namada_core::ledger::parameters::storage::get_max_proposal_bytes_key; use namada_core::ledger::parameters::EpochDuration; use namada_core::ledger::storage::WlStorage; @@ -172,17 +171,17 @@ where pk: &key::common::PublicKey, epoch: Option, ) -> Result { - let pk_bytes = pk - .try_to_vec() - .expect("Serializing public key should not fail"); + let params = crate::read_pos_params(self.wl_storage) + .expect("Failed to fetch Pos params"); let epoch = epoch .unwrap_or_else(|| self.wl_storage.storage.get_current_epoch().0); self.get_consensus_validators(Some(epoch)) .iter() .find(|validator| { - let pk_key = key::protocol_pk_key(&validator.address); - match self.wl_storage.storage.read(&pk_key) { - Ok((Some(bytes), _)) => bytes == pk_bytes, + let protocol_keys = + crate::validator_protocol_key_handle(&validator.address); + match protocol_keys.get(self.wl_storage, epoch, ¶ms) { + Ok(Some(key)) => key == *pk, _ => false, } }) @@ -195,26 +194,24 @@ where address: &Address, epoch: Option, ) -> Result<(token::Amount, key::common::PublicKey)> { + let params = crate::read_pos_params(self.wl_storage) + .expect("Failed to fetch Pos params"); let epoch = epoch .unwrap_or_else(|| self.wl_storage.storage.get_current_epoch().0); self.get_consensus_validators(Some(epoch)) .iter() .find(|validator| address == &validator.address) .map(|validator| { - let protocol_pk_key = key::protocol_pk_key(&validator.address); - // TODO: rewrite this, to use `StorageRead::read` - let bytes = self - .wl_storage - .storage - .read(&protocol_pk_key) - .expect("Validator should have public protocol key") - .0 - .expect("Validator should have public protocol key"); - let protocol_pk: key::common::PublicKey = - BorshDeserialize::deserialize(&mut bytes.as_ref()).expect( - "Protocol public key in storage should be \ - deserializable", + let protocol_keys = + crate::validator_protocol_key_handle(&validator.address); + let protocol_pk = protocol_keys + .get(self.wl_storage, epoch, ¶ms) + .unwrap() + .expect( + "Protocol public key should be set in storage after \ + genesis.", ); + (validator.bonded_stake, protocol_pk) }) .ok_or_else(|| Error::NotValidatorAddress(address.clone(), epoch)) diff --git a/proof_of_stake/src/storage.rs b/proof_of_stake/src/storage.rs index 54bd7cfe6b..fe7e6c8d7e 100644 --- a/proof_of_stake/src/storage.rs +++ b/proof_of_stake/src/storage.rs @@ -6,7 +6,7 @@ use namada_core::types::storage::{DbKeySeg, Epoch, Key, KeySeg}; use super::ADDRESS; use crate::epoched::LAZY_MAP_SUB_KEY; -pub use crate::types::*; // TODO: not sure why this needs to be public +use crate::types::BondId; const PARAMS_STORAGE_KEY: &str = "params"; const VALIDATOR_ADDRESSES_KEY: &str = "validator_addresses"; @@ -43,6 +43,13 @@ const CONSENSUS_KEYS: &str = "consensus_keys"; const LAST_BLOCK_PROPOSER_STORAGE_KEY: &str = "last_block_proposer"; const CONSENSUS_VALIDATOR_SET_ACCUMULATOR_STORAGE_KEY: &str = "validator_rewards_accumulator"; +const VALIDATOR_INCOMING_REDELEGATIONS_KEY: &str = "incoming_redelegations"; +const VALIDATOR_OUTGOING_REDELEGATIONS_KEY: &str = "outgoing_redelegations"; +const VALIDATOR_TOTAL_REDELEGATED_BONDED_KEY: &str = "total_redelegated_bonded"; +const VALIDATOR_TOTAL_REDELEGATED_UNBONDED_KEY: &str = + "total_redelegated_unbonded"; +const DELEGATOR_REDELEGATED_BONDS_KEY: &str = "delegator_redelegated_bonds"; +const DELEGATOR_REDELEGATED_UNBONDS_KEY: &str = "delegator_redelegated_unbonds"; /// Is the given key a PoS storage key? pub fn is_pos_key(key: &Key) -> bool { @@ -257,6 +264,66 @@ pub fn validator_delegation_rewards_product_key(validator: &Address) -> Key { .expect("Cannot obtain a storage key") } +/// Storage key for a validator's incoming redelegations, where the prefixed +/// validator is the destination validator. +pub fn validator_incoming_redelegations_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_INCOMING_REDELEGATIONS_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for a validator's outgoing redelegations, where the prefixed +/// validator is the source validator. +pub fn validator_outgoing_redelegations_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_OUTGOING_REDELEGATIONS_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for validator's total-redelegated-bonded amount to track for +/// slashing +pub fn validator_total_redelegated_bonded_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_TOTAL_REDELEGATED_BONDED_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for validator's total-redelegated-unbonded amount to track for +/// slashing +pub fn validator_total_redelegated_unbonded_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_TOTAL_REDELEGATED_UNBONDED_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key prefix for all delegators' redelegated bonds. +pub fn delegator_redelegated_bonds_prefix() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&DELEGATOR_REDELEGATED_BONDS_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for a particular delegator's redelegated bond information. +pub fn delegator_redelegated_bonds_key(delegator: &Address) -> Key { + delegator_redelegated_bonds_prefix() + .push(&delegator.to_db_key()) + .expect("Cannot obtain a storage key") +} + +/// Storage key prefix for all delegators' redelegated unbonds. +pub fn delegator_redelegated_unbonds_prefix() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&DELEGATOR_REDELEGATED_UNBONDS_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for a particular delegator's redelegated unbond information. +pub fn delegator_redelegated_unbonds_key(delegator: &Address) -> Key { + delegator_redelegated_unbonds_prefix() + .push(&delegator.to_db_key()) + .expect("Cannot obtain a storage key") +} + /// Is storage key for validator's delegation rewards products? pub fn is_validator_delegation_rewards_product_key( key: &Key, @@ -521,9 +588,9 @@ pub fn is_unbond_key(key: &Key) -> Option<(BondId, Epoch, Epoch)> { DbKeySeg::AddressSeg(source), DbKeySeg::AddressSeg(validator), DbKeySeg::StringSeg(data_1), - DbKeySeg::StringSeg(withdraw_epoch_str), - DbKeySeg::StringSeg(data_2), DbKeySeg::StringSeg(start_epoch_str), + DbKeySeg::StringSeg(data_2), + DbKeySeg::StringSeg(withdraw_epoch_str), ] if addr == &ADDRESS && prefix == UNBOND_STORAGE_KEY && data_1 == lazy_map::DATA_SUBKEY diff --git a/proof_of_stake/src/tests.rs b/proof_of_stake/src/tests.rs index b7463c8ea5..ad0a2cccd9 100644 --- a/proof_of_stake/src/tests.rs +++ b/proof_of_stake/src/tests.rs @@ -1,16 +1,25 @@ //! PoS system tests mod state_machine; +mod state_machine_v2; +mod utils; -use std::cmp::min; -use std::ops::Range; +use std::cmp::{max, min}; +use std::collections::{BTreeMap, BTreeSet}; +use std::ops::{Deref, Range}; +use std::str::FromStr; +use assert_matches::assert_matches; use namada_core::ledger::storage::testing::TestWlStorage; -use namada_core::ledger::storage_api::collections::lazy_map; +use namada_core::ledger::storage_api::collections::lazy_map::{ + self, Collectable, NestedMap, +}; +use namada_core::ledger::storage_api::collections::LazyCollection; use namada_core::ledger::storage_api::token::{credit_tokens, read_balance}; use namada_core::ledger::storage_api::StorageRead; use namada_core::types::address::testing::{ - address_from_simple_seed, arb_established_address, + address_from_simple_seed, arb_established_address, established_address_1, + established_address_2, established_address_3, }; use namada_core::types::address::{Address, EstablishedAddressGen}; use namada_core::types::dec::Dec; @@ -19,9 +28,9 @@ use namada_core::types::key::testing::{ arb_common_keypair, common_sk_from_simple_seed, }; use namada_core::types::key::RefTo; -use namada_core::types::storage::{BlockHeight, Epoch}; +use namada_core::types::storage::{BlockHeight, Epoch, Key}; +use namada_core::types::token::testing::arb_amount_non_zero_ceiled; use namada_core::types::token::NATIVE_MAX_DECIMAL_PLACES; -use namada_core::types::uint::Uint; use namada_core::types::{address, key, token}; use proptest::prelude::*; use proptest::test_runner::Config; @@ -29,55 +38,67 @@ use proptest::test_runner::Config; // `tracing` logs from tests use test_log::test; +use crate::epoched::DEFAULT_NUM_PAST_EPOCHS; use crate::parameters::testing::arb_pos_params; -use crate::parameters::PosParams; +use crate::parameters::{OwnedPosParams, PosParams}; use crate::types::{ into_tm_voting_power, BondDetails, BondId, BondsAndUnbondsDetails, - ConsensusValidator, GenesisValidator, Position, ReverseOrdTokenAmount, - SlashType, UnbondDetails, ValidatorSetUpdate, ValidatorState, - WeightedValidator, + ConsensusValidator, EagerRedelegatedBondsMap, GenesisValidator, Position, + RedelegatedTokens, ReverseOrdTokenAmount, Slash, SlashType, UnbondDetails, + ValidatorSetUpdate, ValidatorState, WeightedValidator, }; use crate::{ - become_validator, below_capacity_validator_set_handle, bond_handle, - bond_tokens, bonds_and_unbonds, consensus_validator_set_handle, - copy_validator_sets_and_positions, find_validator_by_raw_hash, - get_num_consensus_validators, init_genesis, + apply_list_slashes, become_validator, below_capacity_validator_set_handle, + bond_handle, bond_tokens, bonds_and_unbonds, + compute_amount_after_slashing_unbond, + compute_amount_after_slashing_withdraw, compute_bond_at_epoch, + compute_modified_redelegation, compute_new_redelegated_unbonds, + compute_slash_bond_at_epoch, compute_slashable_amount, + consensus_validator_set_handle, copy_validator_sets_and_positions, + delegator_redelegated_bonds_handle, delegator_redelegated_unbonds_handle, + find_bonds_to_remove, find_validator_by_raw_hash, + fold_and_slash_redelegated_bonds, get_num_consensus_validators, insert_validator_into_validator_set, is_validator, process_slashes, - purge_validator_sets_for_old_epoch, read_below_capacity_validator_set_addresses_with_stake, read_below_threshold_validator_set_addresses, read_consensus_validator_set_addresses_with_stake, read_total_stake, - read_validator_delta_value, read_validator_stake, slash, - staking_token_address, store_total_consensus_stake, total_deltas_handle, + read_validator_deltas_value, read_validator_stake, slash, + slash_redelegation, slash_validator, slash_validator_redelegation, + staking_token_address, store_total_consensus_stake, test_init_genesis, + total_bonded_handle, total_deltas_handle, total_unbonded_handle, unbond_handle, unbond_tokens, unjail_validator, update_validator_deltas, update_validator_set, validator_consensus_key_handle, - validator_set_positions_handle, validator_set_update_tendermint, - validator_slashes_handle, validator_state_handle, withdraw_tokens, - write_validator_address_raw_hash, BecomeValidator, - STORE_VALIDATOR_SETS_LEN, + validator_incoming_redelegations_handle, + validator_outgoing_redelegations_handle, validator_set_positions_handle, + validator_set_update_tendermint, validator_slashes_handle, + validator_state_handle, validator_total_redelegated_bonded_handle, + validator_total_redelegated_unbonded_handle, withdraw_tokens, + write_pos_params, write_validator_address_raw_hash, BecomeValidator, + EagerRedelegatedUnbonds, FoldRedelegatedBondsResult, ModifiedRedelegation, + RedelegationError, }; proptest! { - // Generate arb valid input for `test_init_genesis_aux` + // Generate arb valid input for `test_test_init_genesis_aux` #![proptest_config(Config { - cases: 1, + cases: 100, .. Config::default() })] #[test] - fn test_init_genesis( + fn test_test_init_genesis( (pos_params, genesis_validators) in arb_params_and_genesis_validators(Some(5), 1..10), start_epoch in (0_u64..1000).prop_map(Epoch), ) { - test_init_genesis_aux(pos_params, start_epoch, genesis_validators) + test_test_init_genesis_aux(pos_params, start_epoch, genesis_validators) } } proptest! { // Generate arb valid input for `test_bonds_aux` #![proptest_config(Config { - cases: 1, + cases: 100, .. Config::default() })] #[test] @@ -93,7 +114,7 @@ proptest! { proptest! { // Generate arb valid input for `test_become_validator_aux` #![proptest_config(Config { - cases: 1, + cases: 100, .. Config::default() })] #[test] @@ -112,7 +133,7 @@ proptest! { proptest! { // Generate arb valid input for `test_slashes_with_unbonding_aux` #![proptest_config(Config { - cases: 5, + cases: 100, .. Config::default() })] #[test] @@ -128,7 +149,7 @@ proptest! { proptest! { // Generate arb valid input for `test_unjail_validator_aux` #![proptest_config(Config { - cases: 5, + cases: 100, .. Config::default() })] #[test] @@ -141,10 +162,76 @@ proptest! { } } +proptest! { + // Generate arb valid input for `test_simple_redelegation_aux` + #![proptest_config(Config { + cases: 100, + .. Config::default() + })] + #[test] + fn test_simple_redelegation( + + genesis_validators in arb_genesis_validators(2..4, None), + (amount_delegate, amount_redelegate, amount_unbond) in arb_redelegation_amounts(20) + + ) { + test_simple_redelegation_aux(genesis_validators, amount_delegate, amount_redelegate, amount_unbond) + } +} + +proptest! { + // Generate arb valid input for `test_simple_redelegation_aux` + #![proptest_config(Config { + cases: 100, + .. Config::default() + })] + #[test] + fn test_redelegation_with_slashing( + + genesis_validators in arb_genesis_validators(2..4, None), + (amount_delegate, amount_redelegate, amount_unbond) in arb_redelegation_amounts(20) + + ) { + test_redelegation_with_slashing_aux(genesis_validators, amount_delegate, amount_redelegate, amount_unbond) + } +} + +proptest! { + // Generate arb valid input for `test_chain_redelegations_aux` + #![proptest_config(Config { + cases: 100, + .. Config::default() + })] + #[test] + fn test_chain_redelegations( + + genesis_validators in arb_genesis_validators(3..4, None), + + ) { + test_chain_redelegations_aux(genesis_validators) + } +} + +proptest! { + // Generate arb valid input for `test_overslashing_aux` + #![proptest_config(Config { + cases: 1, + .. Config::default() + })] + #[test] + fn test_overslashing( + + genesis_validators in arb_genesis_validators(4..5, None), + + ) { + test_overslashing_aux(genesis_validators) + } +} + fn arb_params_and_genesis_validators( num_max_validator_slots: Option, val_size: Range, -) -> impl Strategy)> { +) -> impl Strategy)> { let params = arb_pos_params(num_max_validator_slots); params.prop_flat_map(move |params| { let validators = arb_genesis_validators( @@ -156,7 +243,7 @@ fn arb_params_and_genesis_validators( } fn test_slashes_with_unbonding_params() --> impl Strategy, u64)> { +-> impl Strategy, u64)> { let params = arb_pos_params(Some(5)); params.prop_flat_map(|params| { let unbond_delay = 0..(params.slash_processing_epoch_offset() * 2); @@ -168,8 +255,8 @@ fn test_slashes_with_unbonding_params() } /// Test genesis initialization -fn test_init_genesis_aux( - params: PosParams, +fn test_test_init_genesis_aux( + params: OwnedPosParams, start_epoch: Epoch, mut validators: Vec, ) { @@ -181,8 +268,13 @@ fn test_init_genesis_aux( s.storage.block.epoch = start_epoch; validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); - init_genesis(&mut s, ¶ms, validators.clone().into_iter(), start_epoch) - .unwrap(); + let params = test_init_genesis( + &mut s, + params, + validators.clone().into_iter(), + start_epoch, + ) + .unwrap(); let mut bond_details = bonds_and_unbonds(&s, None, None).unwrap(); assert!(bond_details.iter().all(|(_id, details)| { @@ -250,7 +342,7 @@ fn test_init_genesis_aux( /// Test bonding /// NOTE: copy validator sets each time we advance the epoch -fn test_bonds_aux(params: PosParams, validators: Vec) { +fn test_bonds_aux(params: OwnedPosParams, validators: Vec) { // This can be useful for debugging: // params.pipeline_len = 2; // params.unbonding_len = 4; @@ -260,9 +352,9 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { // Genesis let start_epoch = s.storage.block.epoch; let mut current_epoch = s.storage.block.epoch; - init_genesis( + let params = test_init_genesis( &mut s, - ¶ms, + params, validators.clone().into_iter(), current_epoch, ) @@ -303,10 +395,8 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { // Check the bond delta let self_bond = bond_handle(&validator.address, &validator.address); - let delta = self_bond - .get_delta_val(&s, pipeline_epoch, ¶ms) - .unwrap(); - assert_eq!(delta, Some(amount_self_bond.change())); + let delta = self_bond.get_delta_val(&s, pipeline_epoch).unwrap(); + assert_eq!(delta, Some(amount_self_bond)); // Check the validator in the validator set let set = @@ -322,13 +412,9 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { } )); - let val_deltas = read_validator_delta_value( - &s, - ¶ms, - &validator.address, - pipeline_epoch, - ) - .unwrap(); + let val_deltas = + read_validator_deltas_value(&s, &validator.address, &pipeline_epoch) + .unwrap(); assert_eq!(val_deltas, Some(amount_self_bond.change())); let total_deltas_handle = total_deltas_handle(); @@ -423,12 +509,10 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { &validator.address, pipeline_epoch.prev(), ) - .unwrap() - .unwrap_or_default(); + .unwrap(); let val_stake_post = read_validator_stake(&s, ¶ms, &validator.address, pipeline_epoch) - .unwrap() - .unwrap_or_default(); + .unwrap(); assert_eq!(validator.tokens + amount_self_bond, val_stake_pre); assert_eq!( validator.tokens + amount_self_bond + amount_del, @@ -440,14 +524,14 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { .get_sum(&s, pipeline_epoch.prev(), ¶ms) .unwrap() .unwrap_or_default(), - token::Change::default() + token::Amount::zero() ); assert_eq!( delegation .get_sum(&s, pipeline_epoch, ¶ms) .unwrap() .unwrap_or_default(), - amount_del.change() + amount_del ); // Check delegation bonds details after delegation @@ -532,7 +616,7 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { amount_self_bond + (validator.tokens / 2); // When the difference is 0, only the non-genesis self-bond is unbonded let unbonded_genesis_self_bond = - amount_self_unbond - amount_self_bond != token::Amount::default(); + amount_self_unbond - amount_self_bond != token::Amount::zero(); dbg!( amount_self_unbond, amount_self_bond, @@ -546,6 +630,7 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { &validator.address, amount_self_unbond, current_epoch, + false, ) .unwrap(); @@ -561,22 +646,21 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { read_validator_stake(&s, ¶ms, &validator.address, pipeline_epoch) .unwrap(); - let val_delta = read_validator_delta_value( - &s, - ¶ms, - &validator.address, - pipeline_epoch, - ) - .unwrap(); + let val_delta = + read_validator_deltas_value(&s, &validator.address, &pipeline_epoch) + .unwrap(); let unbond = unbond_handle(&validator.address, &validator.address); assert_eq!(val_delta, Some(-amount_self_unbond.change())); assert_eq!( unbond - .at(&(pipeline_epoch - + params.unbonding_len - + params.cubic_slashing_window_length)) - .get(&s, &Epoch::default()) + .at(&Epoch::default()) + .get( + &s, + &(pipeline_epoch + + params.unbonding_len + + params.cubic_slashing_window_length) + ) .unwrap(), if unbonded_genesis_self_bond { Some(amount_self_unbond - amount_self_bond) @@ -586,23 +670,23 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { ); assert_eq!( unbond - .at(&(pipeline_epoch - + params.unbonding_len - + params.cubic_slashing_window_length)) - .get(&s, &(self_bond_epoch + params.pipeline_len)) + .at(&(self_bond_epoch + params.pipeline_len)) + .get( + &s, + &(pipeline_epoch + + params.unbonding_len + + params.cubic_slashing_window_length) + ) .unwrap(), Some(amount_self_bond) ); assert_eq!( val_stake_pre, - Some(validator.tokens + amount_self_bond + amount_del) + validator.tokens + amount_self_bond + amount_del ); assert_eq!( val_stake_post, - Some( - validator.tokens + amount_self_bond + amount_del - - amount_self_unbond - ) + validator.tokens + amount_self_bond + amount_del - amount_self_unbond ); // Check all bond and unbond details (self-bonds and delegation) @@ -680,6 +764,7 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { &validator.address, amount_undel, current_epoch, + false, ) .unwrap(); @@ -693,13 +778,9 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { let val_stake_post = read_validator_stake(&s, ¶ms, &validator.address, pipeline_epoch) .unwrap(); - let val_delta = read_validator_delta_value( - &s, - ¶ms, - &validator.address, - pipeline_epoch, - ) - .unwrap(); + let val_delta = + read_validator_deltas_value(&s, &validator.address, &pipeline_epoch) + .unwrap(); let unbond = unbond_handle(&delegator, &validator.address); assert_eq!( @@ -708,24 +789,24 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { ); assert_eq!( unbond - .at(&(pipeline_epoch - + params.unbonding_len - + params.cubic_slashing_window_length)) - .get(&s, &(delegation_epoch + params.pipeline_len)) + .at(&(delegation_epoch + params.pipeline_len)) + .get( + &s, + &(pipeline_epoch + + params.unbonding_len + + params.cubic_slashing_window_length) + ) .unwrap(), Some(amount_undel) ); assert_eq!( val_stake_pre, - Some(validator.tokens + amount_self_bond + amount_del) + validator.tokens + amount_self_bond + amount_del ); assert_eq!( val_stake_post, - Some( - validator.tokens + amount_self_bond - amount_self_unbond - + amount_del - - amount_undel - ) + validator.tokens + amount_self_bond - amount_self_unbond + amount_del + - amount_undel ); let withdrawable_offset = params.unbonding_len @@ -801,7 +882,7 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { /// Test validator initialization. fn test_become_validator_aux( - params: PosParams, + params: OwnedPosParams, new_validator: Address, new_validator_consensus_key: SecretKey, validators: Vec, @@ -814,10 +895,10 @@ fn test_become_validator_aux( let mut s = TestWlStorage::default(); // Genesis - let mut current_epoch = dbg!(s.storage.block.epoch); - init_genesis( + let mut current_epoch = s.storage.block.epoch; + let params = test_init_genesis( &mut s, - ¶ms, + params, validators.clone().into_iter(), current_epoch, ) @@ -848,6 +929,8 @@ fn test_become_validator_aux( // Initialize the validator account let consensus_key = new_validator_consensus_key.to_public(); + let protocol_sk = common_sk_from_simple_seed(0); + let protocol_key = protocol_sk.to_public(); let eth_hot_key = key::common::PublicKey::Secp256k1( key::testing::gen_keypair::().ref_to(), ); @@ -859,6 +942,7 @@ fn test_become_validator_aux( params: ¶ms, address: &new_validator, consensus_key: &consensus_key, + protocol_key: &protocol_key, eth_cold_key: ð_cold_key, eth_hot_key: ð_hot_key, current_epoch, @@ -888,10 +972,8 @@ fn test_become_validator_aux( // Check the bond delta let bond_handle = bond_handle(&new_validator, &new_validator); let pipeline_epoch = current_epoch + params.pipeline_len; - let delta = bond_handle - .get_delta_val(&s, pipeline_epoch, ¶ms) - .unwrap(); - assert_eq!(delta, Some(amount.change())); + let delta = bond_handle.get_delta_val(&s, pipeline_epoch).unwrap(); + assert_eq!(delta, Some(amount)); // Check the validator in the validator set - // If the consensus validator slots are full and all the genesis validators @@ -935,7 +1017,8 @@ fn test_become_validator_aux( current_epoch = advance_epoch(&mut s, ¶ms); // Unbond the self-bond - unbond_tokens(&mut s, None, &new_validator, amount, current_epoch).unwrap(); + unbond_tokens(&mut s, None, &new_validator, amount, current_epoch, false) + .unwrap(); let withdrawable_offset = params.unbonding_len + params.pipeline_len; @@ -949,7 +1032,7 @@ fn test_become_validator_aux( } fn test_slashes_with_unbonding_aux( - mut params: PosParams, + mut params: OwnedPosParams, validators: Vec, unbond_delay: u64, ) { @@ -975,9 +1058,9 @@ fn test_slashes_with_unbonding_aux( // Genesis // let start_epoch = s.storage.block.epoch; let mut current_epoch = s.storage.block.epoch; - init_genesis( + let params = test_init_genesis( &mut s, - ¶ms, + params, validators.clone().into_iter(), current_epoch, ) @@ -1022,7 +1105,8 @@ fn test_slashes_with_unbonding_aux( let unbond_amount = Dec::new(5, 1).unwrap() * val_tokens; println!("Going to unbond {}", unbond_amount.to_string_native()); let unbond_epoch = current_epoch; - unbond_tokens(&mut s, None, val_addr, unbond_amount, unbond_epoch).unwrap(); + unbond_tokens(&mut s, None, val_addr, unbond_amount, unbond_epoch, false) + .unwrap(); // Discover second slash let slash_1_evidence_epoch = current_epoch; @@ -1128,7 +1212,7 @@ fn test_validator_raw_hash() { fn test_validator_sets() { let mut s = TestWlStorage::default(); // Only 3 consensus validator slots - let params = PosParams { + let params = OwnedPosParams { max_validator_slots: 3, ..Default::default() }; @@ -1145,39 +1229,6 @@ fn test_validator_sets() { res }; - // A helper to insert a non-genesis validator - let insert_validator = |s: &mut TestWlStorage, - addr, - pk: &PublicKey, - stake: token::Amount, - epoch: Epoch| { - insert_validator_into_validator_set( - s, - ¶ms, - addr, - stake, - epoch, - params.pipeline_len, - ) - .unwrap(); - - update_validator_deltas( - s, - ¶ms, - addr, - stake.change(), - epoch, - params.pipeline_len, - ) - .unwrap(); - - // Set their consensus key (needed for - // `validator_set_update_tendermint` fn) - validator_consensus_key_handle(addr) - .set(s, pk.clone(), epoch, params.pipeline_len) - .unwrap(); - }; - // Create genesis validators let ((val1, pk1), stake1) = (gen_validator(), token::Amount::native_whole(1)); @@ -1204,14 +1255,18 @@ fn test_validator_sets() { let start_epoch = Epoch::default(); let epoch = start_epoch; - init_genesis( + let protocol_sk_1 = common_sk_from_simple_seed(0); + let protocol_sk_2 = common_sk_from_simple_seed(1); + + let params = test_init_genesis( &mut s, - ¶ms, + params, [ GenesisValidator { address: val1.clone(), tokens: stake1, consensus_key: pk1.clone(), + protocol_key: protocol_sk_1.to_public(), eth_hot_key: key::common::PublicKey::Secp256k1( key::testing::gen_keypair::() .ref_to(), @@ -1228,6 +1283,7 @@ fn test_validator_sets() { address: val2.clone(), tokens: stake2, consensus_key: pk2.clone(), + protocol_key: protocol_sk_2.to_public(), eth_hot_key: key::common::PublicKey::Secp256k1( key::testing::gen_keypair::() .ref_to(), @@ -1246,6 +1302,38 @@ fn test_validator_sets() { ) .unwrap(); + // A helper to insert a non-genesis validator + let insert_validator = |s: &mut TestWlStorage, + addr, + pk: &PublicKey, + stake: token::Amount, + epoch: Epoch| { + insert_validator_into_validator_set( + s, + ¶ms, + addr, + stake, + epoch, + params.pipeline_len, + ) + .unwrap(); + + update_validator_deltas( + s, + addr, + stake.change(), + epoch, + params.pipeline_len, + ) + .unwrap(); + + // Set their consensus key (needed for + // `validator_set_update_tendermint` fn) + validator_consensus_key_handle(addr) + .set(s, pk.clone(), epoch, params.pipeline_len) + .unwrap(); + }; + // Advance to EPOCH 1 // // We cannot call `get_tendermint_set_updates` for the genesis state as @@ -1459,13 +1547,18 @@ fn test_validator_sets() { // Because `update_validator_set` and `update_validator_deltas` are // effective from pipeline offset, we use pipeline epoch for the rest of the // checks - update_validator_set(&mut s, ¶ms, &val1, -unbond.change(), epoch) - .unwrap(); - update_validator_deltas( + update_validator_set( &mut s, ¶ms, &val1, -unbond.change(), + pipeline_epoch, + ) + .unwrap(); + update_validator_deltas( + &mut s, + &val1, + -unbond.change(), epoch, params.pipeline_len, ) @@ -1655,10 +1748,10 @@ fn test_validator_sets() { let bond = token::Amount::from_uint(500_000, 0).unwrap(); let stake6 = stake6 + bond; println!("val6 {val6} new stake {}", stake6.to_string_native()); - update_validator_set(&mut s, ¶ms, &val6, bond.change(), epoch).unwrap(); + update_validator_set(&mut s, ¶ms, &val6, bond.change(), pipeline_epoch) + .unwrap(); update_validator_deltas( &mut s, - ¶ms, &val6, bond.change(), epoch, @@ -1771,16 +1864,17 @@ fn test_validator_sets() { ); assert_eq!(tm_updates[1], ValidatorSetUpdate::Deactivated(pk4)); - // Check that the validator sets were purged for the old epochs + // Check that the below-capacity validator set was purged for the old epochs + // but that the consensus_validator_set was not let last_epoch = epoch; for e in Epoch::iter_bounds_inclusive( start_epoch, last_epoch - .sub_or_default(Epoch(STORE_VALIDATOR_SETS_LEN)) + .sub_or_default(Epoch(DEFAULT_NUM_PAST_EPOCHS)) .sub_or_default(Epoch(1)), ) { assert!( - consensus_validator_set_handle() + !consensus_validator_set_handle() .at(&e) .is_empty(&s) .unwrap() @@ -1804,15 +1898,16 @@ fn test_validator_sets() { fn test_validator_sets_swap() { let mut s = TestWlStorage::default(); // Only 2 consensus validator slots - let params = PosParams { + let params = OwnedPosParams { max_validator_slots: 2, // Set the stake threshold to 0 so no validators are in the // below-threshold set - validator_stake_threshold: token::Amount::default(), + validator_stake_threshold: token::Amount::zero(), // Set 0.1 votes per token tm_votes_per_token: Dec::new(1, 1).expect("Dec creation failed"), ..Default::default() }; + let addr_seed = "seed"; let mut address_gen = EstablishedAddressGen::new(addr_seed); let mut sk_seed = 0; @@ -1826,39 +1921,6 @@ fn test_validator_sets_swap() { res }; - // A helper to insert a non-genesis validator - let insert_validator = |s: &mut TestWlStorage, - addr, - pk: &PublicKey, - stake: token::Amount, - epoch: Epoch| { - insert_validator_into_validator_set( - s, - ¶ms, - addr, - stake, - epoch, - params.pipeline_len, - ) - .unwrap(); - - update_validator_deltas( - s, - ¶ms, - addr, - stake.change(), - epoch, - params.pipeline_len, - ) - .unwrap(); - - // Set their consensus key (needed for - // `validator_set_update_tendermint` fn) - validator_consensus_key_handle(addr) - .set(s, pk.clone(), epoch, params.pipeline_len) - .unwrap(); - }; - // Start with two genesis validators, one with 1 voting power and other 0 let epoch = Epoch::default(); // 1M voting power @@ -1874,14 +1936,18 @@ fn test_validator_sets_swap() { println!("val2: {val2}, {pk2}, {}", stake2.to_string_native()); println!("val3: {val3}, {pk3}, {}", stake3.to_string_native()); - init_genesis( + let protocol_sk_1 = common_sk_from_simple_seed(0); + let protocol_sk_2 = common_sk_from_simple_seed(1); + + let params = test_init_genesis( &mut s, - ¶ms, + params, [ GenesisValidator { address: val1, tokens: stake1, consensus_key: pk1, + protocol_key: protocol_sk_1.to_public(), eth_hot_key: key::common::PublicKey::Secp256k1( key::testing::gen_keypair::() .ref_to(), @@ -1898,6 +1964,7 @@ fn test_validator_sets_swap() { address: val2.clone(), tokens: stake2, consensus_key: pk2, + protocol_key: protocol_sk_2.to_public(), eth_hot_key: key::common::PublicKey::Secp256k1( key::testing::gen_keypair::() .ref_to(), @@ -1916,6 +1983,38 @@ fn test_validator_sets_swap() { ) .unwrap(); + // A helper to insert a non-genesis validator + let insert_validator = |s: &mut TestWlStorage, + addr, + pk: &PublicKey, + stake: token::Amount, + epoch: Epoch| { + insert_validator_into_validator_set( + s, + ¶ms, + addr, + stake, + epoch, + params.pipeline_len, + ) + .unwrap(); + + update_validator_deltas( + s, + addr, + stake.change(), + epoch, + params.pipeline_len, + ) + .unwrap(); + + // Set their consensus key (needed for + // `validator_set_update_tendermint` fn) + validator_consensus_key_handle(addr) + .set(s, pk.clone(), epoch, params.pipeline_len) + .unwrap(); + }; + // Advance to EPOCH 1 let epoch = advance_epoch(&mut s, ¶ms); let pipeline_epoch = epoch + params.pipeline_len; @@ -1936,31 +2035,41 @@ fn test_validator_sets_swap() { assert_eq!(into_tm_voting_power(params.tm_votes_per_token, stake2), 0); assert_eq!(into_tm_voting_power(params.tm_votes_per_token, stake3), 0); - update_validator_set(&mut s, ¶ms, &val2, bond2.change(), epoch) - .unwrap(); - update_validator_deltas( + update_validator_set( &mut s, ¶ms, &val2, bond2.change(), - epoch, - params.pipeline_len, + pipeline_epoch, ) .unwrap(); - - update_validator_set(&mut s, ¶ms, &val3, bond3.change(), epoch) - .unwrap(); update_validator_deltas( &mut s, - ¶ms, - &val3, - bond3.change(), + &val2, + bond2.change(), epoch, params.pipeline_len, ) .unwrap(); - // Advance to EPOCH 2 + update_validator_set( + &mut s, + ¶ms, + &val3, + bond3.change(), + pipeline_epoch, + ) + .unwrap(); + update_validator_deltas( + &mut s, + &val3, + bond3.change(), + epoch, + params.pipeline_len, + ) + .unwrap(); + + // Advance to EPOCH 2 let epoch = advance_epoch(&mut s, ¶ms); let pipeline_epoch = epoch + params.pipeline_len; @@ -1975,25 +2084,35 @@ fn test_validator_sets_swap() { into_tm_voting_power(params.tm_votes_per_token, stake3) ); - update_validator_set(&mut s, ¶ms, &val2, bonds.change(), epoch) - .unwrap(); - update_validator_deltas( + update_validator_set( &mut s, ¶ms, &val2, bonds.change(), + pipeline_epoch, + ) + .unwrap(); + update_validator_deltas( + &mut s, + &val2, + bonds.change(), epoch, params.pipeline_len, ) .unwrap(); - update_validator_set(&mut s, ¶ms, &val3, bonds.change(), epoch) - .unwrap(); - update_validator_deltas( + update_validator_set( &mut s, ¶ms, &val3, bonds.change(), + pipeline_epoch, + ) + .unwrap(); + update_validator_deltas( + &mut s, + &val3, + bonds.change(), epoch, params.pipeline_len, ) @@ -2049,11 +2168,12 @@ fn advance_epoch(s: &mut TestWlStorage, params: &PosParams) -> Epoch { store_total_consensus_stake(s, current_epoch).unwrap(); copy_validator_sets_and_positions( s, + params, current_epoch, current_epoch + params.pipeline_len, ) .unwrap(); - purge_validator_sets_for_old_epoch(s, current_epoch).unwrap(); + // purge_validator_sets_for_old_epoch(s, current_epoch).unwrap(); // process_slashes(s, current_epoch).unwrap(); // dbg!(current_epoch); current_epoch @@ -2063,16 +2183,15 @@ fn arb_genesis_validators( size: Range, threshold: Option, ) -> impl Strategy> { + let threshold = threshold + .unwrap_or_else(|| PosParams::default().validator_stake_threshold); let tokens: Vec<_> = (0..size.end) .map(|ix| { if ix == 0 { - // If there's a threshold, make sure that at least one validator - // has at least a stake greater or equal to the threshold to - // avoid having an empty consensus set. - threshold - .map(|token| token.raw_amount()) - .unwrap_or(Uint::one()) - .as_u64()..=10_000_000_u64 + // Make sure that at least one validator has at least a stake + // greater or equal to the threshold to avoid having an empty + // consensus set. + threshold.raw_amount().as_u64()..=10_000_000_u64 } else { 1..=10_000_000_u64 } @@ -2091,6 +2210,9 @@ fn arb_genesis_validators( let consensus_sk = common_sk_from_simple_seed(seed); let consensus_key = consensus_sk.to_public(); + let protocol_sk = common_sk_from_simple_seed(seed); + let protocol_key = protocol_sk.to_public(); + let eth_hot_key = key::common::PublicKey::Secp256k1( key::testing::gen_keypair::( ) @@ -2109,6 +2231,7 @@ fn arb_genesis_validators( address, tokens, consensus_key, + protocol_key, eth_hot_key, eth_cold_key, commission_rate, @@ -2121,17 +2244,13 @@ fn arb_genesis_validators( "Must have at least one genesis validator with stake above the \ provided threshold, if any.", move |gen_vals: &Vec| { - if let Some(thresh) = threshold { - gen_vals.iter().any(|val| val.tokens >= thresh) - } else { - true - } + gen_vals.iter().any(|val| val.tokens >= threshold) }, ) } fn test_unjail_validator_aux( - params: PosParams, + params: OwnedPosParams, mut validators: Vec, ) { println!("\nTest inputs: {params:?}, genesis validators: {validators:#?}"); @@ -2153,9 +2272,9 @@ fn test_unjail_validator_aux( // Genesis let mut current_epoch = s.storage.block.epoch; - init_genesis( + let params = test_init_genesis( &mut s, - ¶ms, + params, validators.clone().into_iter(), current_epoch, ) @@ -2253,3 +2372,3400 @@ fn test_unjail_validator_aux( let second_att = unjail_validator(&mut s, val_addr, current_epoch); assert!(second_att.is_err()); } + +/// `iterateBondsUpToAmountTest` +#[test] +fn test_find_bonds_to_remove() { + let mut storage = TestWlStorage::default(); + let gov_params = namada_core::ledger::governance::parameters::GovernanceParameters::default(); + gov_params.init_storage(&mut storage).unwrap(); + write_pos_params(&mut storage, &OwnedPosParams::default()).unwrap(); + + let source = established_address_1(); + let validator = established_address_2(); + let bond_handle = bond_handle(&source, &validator); + + let (e1, e2, e6) = (Epoch(1), Epoch(2), Epoch(6)); + + bond_handle + .set(&mut storage, token::Amount::from(5), e1, 0) + .unwrap(); + bond_handle + .set(&mut storage, token::Amount::from(3), e2, 0) + .unwrap(); + bond_handle + .set(&mut storage, token::Amount::from(8), e6, 0) + .unwrap(); + + // Test 1 + let bonds_for_removal = find_bonds_to_remove( + &storage, + &bond_handle.get_data_handler(), + token::Amount::from(8), + ) + .unwrap(); + assert_eq!( + bonds_for_removal.epochs, + vec![e6].into_iter().collect::>() + ); + assert!(bonds_for_removal.new_entry.is_none()); + + // Test 2 + let bonds_for_removal = find_bonds_to_remove( + &storage, + &bond_handle.get_data_handler(), + token::Amount::from(10), + ) + .unwrap(); + assert_eq!( + bonds_for_removal.epochs, + vec![e6].into_iter().collect::>() + ); + assert_eq!( + bonds_for_removal.new_entry, + Some((Epoch(2), token::Amount::from(1))) + ); + + // Test 3 + let bonds_for_removal = find_bonds_to_remove( + &storage, + &bond_handle.get_data_handler(), + token::Amount::from(11), + ) + .unwrap(); + assert_eq!( + bonds_for_removal.epochs, + vec![e6, e2].into_iter().collect::>() + ); + assert!(bonds_for_removal.new_entry.is_none()); + + // Test 4 + let bonds_for_removal = find_bonds_to_remove( + &storage, + &bond_handle.get_data_handler(), + token::Amount::from(12), + ) + .unwrap(); + assert_eq!( + bonds_for_removal.epochs, + vec![e6, e2].into_iter().collect::>() + ); + assert_eq!( + bonds_for_removal.new_entry, + Some((Epoch(1), token::Amount::from(4))) + ); +} + +/// `computeModifiedRedelegationTest` +#[test] +fn test_compute_modified_redelegation() { + let mut storage = TestWlStorage::default(); + let validator1 = established_address_1(); + let validator2 = established_address_2(); + let owner = established_address_3(); + let outer_epoch = Epoch(0); + + let mut alice = validator1.clone(); + let mut bob = validator2.clone(); + + // Ensure a ranking order of alice > bob + // TODO: check why this needs to be > (am I just confusing myself?) + if bob > alice { + alice = validator2; + bob = validator1; + } + println!("\n\nalice = {}\nbob = {}\n", &alice, &bob); + + // Fill redelegated bonds in storage + let redelegated_bonds_map = delegator_redelegated_bonds_handle(&owner) + .at(&alice) + .at(&outer_epoch); + redelegated_bonds_map + .at(&alice) + .insert(&mut storage, Epoch(2), token::Amount::from(6)) + .unwrap(); + redelegated_bonds_map + .at(&alice) + .insert(&mut storage, Epoch(4), token::Amount::from(7)) + .unwrap(); + redelegated_bonds_map + .at(&bob) + .insert(&mut storage, Epoch(1), token::Amount::from(5)) + .unwrap(); + redelegated_bonds_map + .at(&bob) + .insert(&mut storage, Epoch(4), token::Amount::from(7)) + .unwrap(); + + // Test cases 1 and 2 + let mr1 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + token::Amount::from(25), + ) + .unwrap(); + let mr2 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + token::Amount::from(30), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + ..Default::default() + }; + + assert_eq!(mr1, exp_mr); + assert_eq!(mr2, exp_mr); + + // Test case 3 + let mr3 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + token::Amount::from(7), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + validators_to_remove: BTreeSet::from_iter([bob.clone()]), + validator_to_modify: Some(bob.clone()), + epochs_to_remove: BTreeSet::from_iter([Epoch(4)]), + ..Default::default() + }; + assert_eq!(mr3, exp_mr); + + // Test case 4 + let mr4 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + token::Amount::from(8), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + validators_to_remove: BTreeSet::from_iter([bob.clone()]), + validator_to_modify: Some(bob.clone()), + epochs_to_remove: BTreeSet::from_iter([Epoch(1), Epoch(4)]), + epoch_to_modify: Some(Epoch(1)), + new_amount: Some(4.into()), + }; + assert_eq!(mr4, exp_mr); + + // Test case 5 + let mr5 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + 12.into(), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + validators_to_remove: BTreeSet::from_iter([bob.clone()]), + ..Default::default() + }; + assert_eq!(mr5, exp_mr); + + // Test case 6 + let mr6 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + 14.into(), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + validators_to_remove: BTreeSet::from_iter([alice.clone(), bob.clone()]), + validator_to_modify: Some(alice.clone()), + epochs_to_remove: BTreeSet::from_iter([Epoch(4)]), + epoch_to_modify: Some(Epoch(4)), + new_amount: Some(5.into()), + }; + assert_eq!(mr6, exp_mr); + + // Test case 7 + let mr7 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + 19.into(), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + validators_to_remove: BTreeSet::from_iter([alice.clone(), bob.clone()]), + validator_to_modify: Some(alice.clone()), + epochs_to_remove: BTreeSet::from_iter([Epoch(4)]), + ..Default::default() + }; + assert_eq!(mr7, exp_mr); + + // Test case 8 + let mr8 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + 21.into(), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + validators_to_remove: BTreeSet::from_iter([alice.clone(), bob]), + validator_to_modify: Some(alice), + epochs_to_remove: BTreeSet::from_iter([Epoch(2), Epoch(4)]), + epoch_to_modify: Some(Epoch(2)), + new_amount: Some(4.into()), + }; + assert_eq!(mr8, exp_mr); +} + +/// `computeBondAtEpochTest` +#[test] +fn test_compute_bond_at_epoch() { + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + pipeline_len: 2, + unbonding_len: 4, + cubic_slashing_window_length: 1, + ..Default::default() + }; + let alice = established_address_1(); + let bob = established_address_2(); + + // Test 1 + let res = compute_bond_at_epoch( + &storage, + ¶ms, + &bob, + 12.into(), + 3.into(), + 23.into(), + Some(&Default::default()), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 23.into()); + + // Test 2 + validator_slashes_handle(&bob) + .push( + &mut storage, + Slash { + epoch: 4.into(), + block_height: 0, + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + let res = compute_bond_at_epoch( + &storage, + ¶ms, + &bob, + 12.into(), + 3.into(), + 23.into(), + Some(&Default::default()), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 0.into()); + + // Test 3 + validator_slashes_handle(&bob).pop(&mut storage).unwrap(); + let mut redel_bonds = EagerRedelegatedBondsMap::default(); + redel_bonds.insert( + alice.clone(), + BTreeMap::from_iter([(Epoch(1), token::Amount::from(5))]), + ); + let res = compute_bond_at_epoch( + &storage, + ¶ms, + &bob, + 12.into(), + 3.into(), + 23.into(), + Some(&redel_bonds), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 23.into()); + + // Test 4 + validator_slashes_handle(&bob) + .push( + &mut storage, + Slash { + epoch: 4.into(), + block_height: 0, + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + let res = compute_bond_at_epoch( + &storage, + ¶ms, + &bob, + 12.into(), + 3.into(), + 23.into(), + Some(&redel_bonds), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 0.into()); + + // Test 5 + validator_slashes_handle(&bob).pop(&mut storage).unwrap(); + validator_slashes_handle(&alice) + .push( + &mut storage, + Slash { + epoch: 6.into(), + block_height: 0, + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + let res = compute_bond_at_epoch( + &storage, + ¶ms, + &bob, + 12.into(), + 3.into(), + 23.into(), + Some(&redel_bonds), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 23.into()); + + // Test 6 + validator_slashes_handle(&alice).pop(&mut storage).unwrap(); + validator_slashes_handle(&alice) + .push( + &mut storage, + Slash { + epoch: 4.into(), + block_height: 0, + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + let res = compute_bond_at_epoch( + &storage, + ¶ms, + &bob, + 18.into(), + 9.into(), + 23.into(), + Some(&redel_bonds), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 18.into()); +} + +/// `computeSlashBondAtEpochTest` +#[test] +fn test_compute_slash_bond_at_epoch() { + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + pipeline_len: 2, + unbonding_len: 4, + cubic_slashing_window_length: 1, + ..Default::default() + }; + let alice = established_address_1(); + let bob = established_address_2(); + + let current_epoch = Epoch(20); + let infraction_epoch = + current_epoch - params.slash_processing_epoch_offset(); + + let redelegated_bond = BTreeMap::from_iter([( + alice, + BTreeMap::from_iter([(infraction_epoch - 4, token::Amount::from(10))]), + )]); + + // Test 1 + let res = compute_slash_bond_at_epoch( + &storage, + ¶ms, + &bob, + current_epoch.next(), + infraction_epoch, + infraction_epoch - 2, + 30.into(), + Some(&Default::default()), + Dec::one(), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 30.into()); + + // Test 2 + let res = compute_slash_bond_at_epoch( + &storage, + ¶ms, + &bob, + current_epoch.next(), + infraction_epoch, + infraction_epoch - 2, + 30.into(), + Some(&redelegated_bond), + Dec::one(), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 30.into()); + + // Test 3 + validator_slashes_handle(&bob) + .push( + &mut storage, + Slash { + epoch: infraction_epoch.prev(), + block_height: 0, + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + let res = compute_slash_bond_at_epoch( + &storage, + ¶ms, + &bob, + current_epoch.next(), + infraction_epoch, + infraction_epoch - 2, + 30.into(), + Some(&Default::default()), + Dec::one(), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 0.into()); + + // Test 4 + let res = compute_slash_bond_at_epoch( + &storage, + ¶ms, + &bob, + current_epoch.next(), + infraction_epoch, + infraction_epoch - 2, + 30.into(), + Some(&redelegated_bond), + Dec::one(), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 0.into()); +} + +/// `computeNewRedelegatedUnbondsTest` +#[test] +fn test_compute_new_redelegated_unbonds() { + let mut storage = TestWlStorage::default(); + let alice = established_address_1(); + let bob = established_address_2(); + + let key = Key::parse("testing").unwrap(); + let redelegated_bonds = NestedMap::::open(key); + + // Populate the lazy and eager maps + let (ep1, ep2, ep4, ep5, ep6, ep7) = + (Epoch(1), Epoch(2), Epoch(4), Epoch(5), Epoch(6), Epoch(7)); + let keys_and_values = vec![ + (ep5, alice.clone(), ep2, 1), + (ep5, alice.clone(), ep4, 1), + (ep7, alice.clone(), ep2, 1), + (ep7, alice.clone(), ep4, 1), + (ep5, bob.clone(), ep1, 1), + (ep5, bob.clone(), ep4, 2), + (ep7, bob.clone(), ep1, 1), + (ep7, bob.clone(), ep4, 2), + ]; + let mut eager_map = BTreeMap::::new(); + for (outer_ep, address, inner_ep, amount) in keys_and_values { + redelegated_bonds + .at(&outer_ep) + .at(&address) + .insert(&mut storage, inner_ep, token::Amount::from(amount)) + .unwrap(); + eager_map + .entry(outer_ep) + .or_default() + .entry(address.clone()) + .or_default() + .insert(inner_ep, token::Amount::from(amount)); + } + + // Different ModifiedRedelegation objects for testing + let empty_mr = ModifiedRedelegation::default(); + let all_mr = ModifiedRedelegation { + epoch: Some(ep7), + validators_to_remove: BTreeSet::from_iter([alice.clone(), bob.clone()]), + validator_to_modify: None, + epochs_to_remove: Default::default(), + epoch_to_modify: None, + new_amount: None, + }; + let mod_val_mr = ModifiedRedelegation { + epoch: Some(ep7), + validators_to_remove: BTreeSet::from_iter([alice.clone()]), + validator_to_modify: None, + epochs_to_remove: Default::default(), + epoch_to_modify: None, + new_amount: None, + }; + let mod_val_partial_mr = ModifiedRedelegation { + epoch: Some(ep7), + validators_to_remove: BTreeSet::from_iter([alice.clone(), bob.clone()]), + validator_to_modify: Some(bob.clone()), + epochs_to_remove: BTreeSet::from_iter([ep1]), + epoch_to_modify: None, + new_amount: None, + }; + let mod_epoch_partial_mr = ModifiedRedelegation { + epoch: Some(ep7), + validators_to_remove: BTreeSet::from_iter([alice, bob.clone()]), + validator_to_modify: Some(bob.clone()), + epochs_to_remove: BTreeSet::from_iter([ep1, ep4]), + epoch_to_modify: Some(ep4), + new_amount: Some(token::Amount::from(1)), + }; + + // Test case 1 + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &Default::default(), + &empty_mr, + ) + .unwrap(); + assert_eq!(res, Default::default()); + + let set5 = BTreeSet::::from_iter([ep5]); + let set56 = BTreeSet::::from_iter([ep5, ep6]); + + // Test case 2 + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &set5, + &empty_mr, + ) + .unwrap(); + let mut exp_res = eager_map.clone(); + exp_res.remove(&ep7); + assert_eq!(res, exp_res); + + // Test case 3 + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &set56, + &empty_mr, + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 4 + println!("\nTEST CASE 4\n"); + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &set56, + &all_mr, + ) + .unwrap(); + assert_eq!(res, eager_map); + + // Test case 5 + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &set56, + &mod_val_mr, + ) + .unwrap(); + exp_res = eager_map.clone(); + exp_res.entry(ep7).or_default().remove(&bob); + assert_eq!(res, exp_res); + + // Test case 6 + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &set56, + &mod_val_partial_mr, + ) + .unwrap(); + exp_res = eager_map.clone(); + exp_res + .entry(ep7) + .or_default() + .entry(bob.clone()) + .or_default() + .remove(&ep4); + assert_eq!(res, exp_res); + + // Test case 7 + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &set56, + &mod_epoch_partial_mr, + ) + .unwrap(); + exp_res + .entry(ep7) + .or_default() + .entry(bob) + .or_default() + .insert(ep4, token::Amount::from(1)); + assert_eq!(res, exp_res); +} + +/// `applyListSlashesTest` +#[test] +fn test_apply_list_slashes() { + let init_epoch = Epoch(2); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + // let unbonding_len = 4u64; + // let cubic_offset = 1u64; + + let slash1 = Slash { + epoch: init_epoch, + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slash2 = Slash { + epoch: init_epoch + + params.unbonding_len + + params.cubic_slashing_window_length + + 1u64, + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + + let list1 = vec![slash1.clone()]; + let list2 = vec![slash1.clone(), slash2.clone()]; + let list3 = vec![slash1.clone(), slash1.clone()]; + let list4 = vec![slash1.clone(), slash1, slash2]; + + let res = apply_list_slashes(¶ms, &[], token::Amount::from(100)); + assert_eq!(res, token::Amount::from(100)); + + let res = apply_list_slashes(¶ms, &list1, token::Amount::from(100)); + assert_eq!(res, token::Amount::zero()); + + let res = apply_list_slashes(¶ms, &list2, token::Amount::from(100)); + assert_eq!(res, token::Amount::zero()); + + let res = apply_list_slashes(¶ms, &list3, token::Amount::from(100)); + assert_eq!(res, token::Amount::zero()); + + let res = apply_list_slashes(¶ms, &list4, token::Amount::from(100)); + assert_eq!(res, token::Amount::zero()); +} + +/// `computeSlashableAmountTest` +#[test] +fn test_compute_slashable_amount() { + let init_epoch = Epoch(2); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + + let slash1 = Slash { + epoch: init_epoch + + params.unbonding_len + + params.cubic_slashing_window_length, + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + + let slash2 = Slash { + epoch: init_epoch + + params.unbonding_len + + params.cubic_slashing_window_length + + 1u64, + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + + let test_map = vec![(init_epoch, token::Amount::from(50))] + .into_iter() + .collect::>(); + + let res = compute_slashable_amount( + ¶ms, + &slash1, + token::Amount::from(100), + &BTreeMap::new(), + ); + assert_eq!(res, token::Amount::from(100)); + + let res = compute_slashable_amount( + ¶ms, + &slash2, + token::Amount::from(100), + &test_map, + ); + assert_eq!(res, token::Amount::from(50)); + + let res = compute_slashable_amount( + ¶ms, + &slash1, + token::Amount::from(100), + &test_map, + ); + assert_eq!(res, token::Amount::from(100)); +} + +/// `foldAndSlashRedelegatedBondsMapTest` +#[test] +fn test_fold_and_slash_redelegated_bonds() { + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + let start_epoch = Epoch(7); + + let alice = established_address_1(); + let bob = established_address_2(); + + println!("\n\nAlice: {}", alice); + println!("Bob: {}\n", bob); + + let test_slash = Slash { + epoch: Default::default(), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + + let test_data = vec![ + (alice.clone(), vec![(2, 1), (4, 1)]), + (bob, vec![(1, 1), (4, 2)]), + ]; + let mut eager_redel_bonds = EagerRedelegatedBondsMap::default(); + for (address, pair) in test_data { + for (epoch, amount) in pair { + eager_redel_bonds + .entry(address.clone()) + .or_default() + .insert(Epoch(epoch), token::Amount::from(amount)); + } + } + + // Test case 1 + let res = fold_and_slash_redelegated_bonds( + &storage, + ¶ms, + &eager_redel_bonds, + start_epoch, + &[], + |_| true, + ); + assert_eq!( + res, + FoldRedelegatedBondsResult { + total_redelegated: token::Amount::from(5), + total_after_slashing: token::Amount::from(5), + } + ); + + // Test case 2 + let res = fold_and_slash_redelegated_bonds( + &storage, + ¶ms, + &eager_redel_bonds, + start_epoch, + &[test_slash], + |_| true, + ); + assert_eq!( + res, + FoldRedelegatedBondsResult { + total_redelegated: token::Amount::from(5), + total_after_slashing: token::Amount::zero(), + } + ); + + // Test case 3 + let alice_slash = Slash { + epoch: Epoch(6), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + validator_slashes_handle(&alice) + .push(&mut storage, alice_slash) + .unwrap(); + + let res = fold_and_slash_redelegated_bonds( + &storage, + ¶ms, + &eager_redel_bonds, + start_epoch, + &[], + |_| true, + ); + assert_eq!( + res, + FoldRedelegatedBondsResult { + total_redelegated: token::Amount::from(5), + total_after_slashing: token::Amount::from(3), + } + ); +} + +/// `slashRedelegationTest` +#[test] +fn test_slash_redelegation() { + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + let alice = established_address_1(); + + let total_redelegated_unbonded = + validator_total_redelegated_unbonded_handle(&alice); + total_redelegated_unbonded + .at(&Epoch(13)) + .at(&Epoch(10)) + .at(&alice) + .insert(&mut storage, Epoch(7), token::Amount::from(2)) + .unwrap(); + + let slashes = validator_slashes_handle(&alice); + + let mut slashed_amounts_map = BTreeMap::from_iter([ + (Epoch(15), token::Amount::zero()), + (Epoch(16), token::Amount::zero()), + ]); + let empty_slash_amounts = slashed_amounts_map.clone(); + + // Test case 1 + slash_redelegation( + &storage, + ¶ms, + token::Amount::from(7), + Epoch(7), + Epoch(10), + &alice, + Epoch(14), + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!( + slashed_amounts_map, + BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(5)), + (Epoch(16), token::Amount::from(5)), + ]) + ); + + // Test case 2 + slashed_amounts_map = empty_slash_amounts.clone(); + slash_redelegation( + &storage, + ¶ms, + token::Amount::from(7), + Epoch(7), + Epoch(11), + &alice, + Epoch(14), + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!( + slashed_amounts_map, + BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(7)), + (Epoch(16), token::Amount::from(7)), + ]) + ); + + // Test case 3 + slashed_amounts_map = BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(2)), + (Epoch(16), token::Amount::from(3)), + ]); + slash_redelegation( + &storage, + ¶ms, + token::Amount::from(7), + Epoch(7), + Epoch(10), + &alice, + Epoch(14), + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!( + slashed_amounts_map, + BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(7)), + (Epoch(16), token::Amount::from(8)), + ]) + ); + + // Test case 4 + slashes + .push( + &mut storage, + Slash { + epoch: Epoch(8), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + slashed_amounts_map = empty_slash_amounts.clone(); + slash_redelegation( + &storage, + ¶ms, + token::Amount::from(7), + Epoch(7), + Epoch(10), + &alice, + Epoch(14), + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!(slashed_amounts_map, empty_slash_amounts); + + // Test case 5 + slashes.pop(&mut storage).unwrap(); + slashes + .push( + &mut storage, + Slash { + epoch: Epoch(9), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + slash_redelegation( + &storage, + ¶ms, + token::Amount::from(7), + Epoch(7), + Epoch(10), + &alice, + Epoch(14), + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!(slashed_amounts_map, empty_slash_amounts); + + // Test case 6 + slashes + .push( + &mut storage, + Slash { + epoch: Epoch(8), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + slash_redelegation( + &storage, + ¶ms, + token::Amount::from(7), + Epoch(7), + Epoch(10), + &alice, + Epoch(14), + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!(slashed_amounts_map, empty_slash_amounts); +} + +/// `slashValidatorRedelegationTest` +#[test] +fn test_slash_validator_redelegation() { + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + let gov_params = namada_core::ledger::governance::parameters::GovernanceParameters::default(); + gov_params.init_storage(&mut storage).unwrap(); + write_pos_params(&mut storage, ¶ms).unwrap(); + + let alice = established_address_1(); + let bob = established_address_2(); + + let total_redelegated_unbonded = + validator_total_redelegated_unbonded_handle(&alice); + total_redelegated_unbonded + .at(&Epoch(13)) + .at(&Epoch(10)) + .at(&alice) + .insert(&mut storage, Epoch(7), token::Amount::from(2)) + .unwrap(); + + let outgoing_redelegations = + validator_outgoing_redelegations_handle(&alice).at(&bob); + + let slashes = validator_slashes_handle(&alice); + + let mut slashed_amounts_map = BTreeMap::from_iter([ + (Epoch(15), token::Amount::zero()), + (Epoch(16), token::Amount::zero()), + ]); + let empty_slash_amounts = slashed_amounts_map.clone(); + + // Test case 1 + slash_validator_redelegation( + &storage, + ¶ms, + &alice, + Epoch(14), + &outgoing_redelegations, + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!(slashed_amounts_map, empty_slash_amounts); + + // Test case 2 + total_redelegated_unbonded + .remove_all(&mut storage, &Epoch(13)) + .unwrap(); + slash_validator_redelegation( + &storage, + ¶ms, + &alice, + Epoch(14), + &outgoing_redelegations, + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!(slashed_amounts_map, empty_slash_amounts); + + // Test case 3 + total_redelegated_unbonded + .at(&Epoch(13)) + .at(&Epoch(10)) + .at(&alice) + .insert(&mut storage, Epoch(7), token::Amount::from(2)) + .unwrap(); + outgoing_redelegations + .at(&Epoch(6)) + .insert(&mut storage, Epoch(8), token::Amount::from(7)) + .unwrap(); + slash_validator_redelegation( + &storage, + ¶ms, + &alice, + Epoch(14), + &outgoing_redelegations, + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!( + slashed_amounts_map, + BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(7)), + (Epoch(16), token::Amount::from(7)), + ]) + ); + + // Test case 4 + slashed_amounts_map = empty_slash_amounts.clone(); + outgoing_redelegations + .remove_all(&mut storage, &Epoch(6)) + .unwrap(); + outgoing_redelegations + .at(&Epoch(7)) + .insert(&mut storage, Epoch(8), token::Amount::from(7)) + .unwrap(); + slash_validator_redelegation( + &storage, + ¶ms, + &alice, + Epoch(14), + &outgoing_redelegations, + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!( + slashed_amounts_map, + BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(5)), + (Epoch(16), token::Amount::from(5)), + ]) + ); + + // Test case 5 + slashed_amounts_map = BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(2)), + (Epoch(16), token::Amount::from(3)), + ]); + slash_validator_redelegation( + &storage, + ¶ms, + &alice, + Epoch(14), + &outgoing_redelegations, + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!( + slashed_amounts_map, + BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(7)), + (Epoch(16), token::Amount::from(8)), + ]) + ); + + // Test case 6 + slashed_amounts_map = empty_slash_amounts.clone(); + slashes + .push( + &mut storage, + Slash { + epoch: Epoch(8), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + slash_validator_redelegation( + &storage, + ¶ms, + &alice, + Epoch(14), + &outgoing_redelegations, + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!(slashed_amounts_map, empty_slash_amounts); +} + +/// `slashValidatorTest` +#[test] +fn test_slash_validator() { + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + let gov_params = namada_core::ledger::governance::parameters::GovernanceParameters::default(); + gov_params.init_storage(&mut storage).unwrap(); + write_pos_params(&mut storage, ¶ms).unwrap(); + + let alice = established_address_1(); + let bob = established_address_2(); + + let total_bonded = total_bonded_handle(&bob); + let total_unbonded = total_unbonded_handle(&bob); + let total_redelegated_bonded = + validator_total_redelegated_bonded_handle(&bob); + let total_redelegated_unbonded = + validator_total_redelegated_unbonded_handle(&bob); + + let infraction_stake = token::Amount::from(23); + + let initial_stakes = BTreeMap::from_iter([ + (Epoch(11), infraction_stake), + (Epoch(12), infraction_stake), + (Epoch(13), infraction_stake), + ]); + let mut exp_res = initial_stakes.clone(); + + let current_epoch = Epoch(10); + let infraction_epoch = + current_epoch - params.slash_processing_epoch_offset(); + let processing_epoch = current_epoch.next(); + let slash_rate = Dec::one(); + + // Test case 1 + println!("\nTEST 1:"); + + total_bonded + .set(&mut storage, 23.into(), infraction_epoch - 2, 0) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 2 + println!("\nTEST 2:"); + total_bonded + .set(&mut storage, 17.into(), infraction_epoch - 2, 0) + .unwrap(); + total_unbonded + .at(&(current_epoch + params.pipeline_len)) + .insert(&mut storage, infraction_epoch - 2, 6.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + exp_res.insert(Epoch(12), 17.into()); + exp_res.insert(Epoch(13), 17.into()); + assert_eq!(res, exp_res); + + // Test case 3 + println!("\nTEST 3:"); + total_redelegated_bonded + .at(&infraction_epoch.prev()) + .at(&alice) + .insert(&mut storage, Epoch(2), 5.into()) + .unwrap(); + total_redelegated_bonded + .at(&infraction_epoch.prev()) + .at(&alice) + .insert(&mut storage, Epoch(3), 1.into()) + .unwrap(); + + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 4 + println!("\nTEST 4:"); + total_unbonded_handle(&bob) + .at(&(current_epoch + params.pipeline_len)) + .remove(&mut storage, &(infraction_epoch - 2)) + .unwrap(); + total_unbonded_handle(&bob) + .at(&(current_epoch + params.pipeline_len)) + .insert(&mut storage, infraction_epoch - 1, 6.into()) + .unwrap(); + total_redelegated_unbonded + .at(&(current_epoch + params.pipeline_len)) + .at(&infraction_epoch.prev()) + .at(&alice) + .insert(&mut storage, Epoch(2), 5.into()) + .unwrap(); + total_redelegated_unbonded + .at(&(current_epoch + params.pipeline_len)) + .at(&infraction_epoch.prev()) + .at(&alice) + .insert(&mut storage, Epoch(3), 1.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 5 + println!("\nTEST 5:"); + total_bonded_handle(&bob) + .set(&mut storage, 19.into(), infraction_epoch - 2, 0) + .unwrap(); + total_unbonded_handle(&bob) + .at(&(current_epoch + params.pipeline_len)) + .insert(&mut storage, infraction_epoch - 1, 4.into()) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, Epoch(2), token::Amount::from(1)) + .unwrap(); + total_redelegated_unbonded + .at(&(current_epoch + params.pipeline_len)) + .at(&infraction_epoch.prev()) + .at(&alice) + .remove(&mut storage, &Epoch(3)) + .unwrap(); + total_redelegated_unbonded + .at(&(current_epoch + params.pipeline_len)) + .at(&infraction_epoch.prev()) + .at(&alice) + .insert(&mut storage, Epoch(2), 4.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + exp_res.insert(Epoch(12), 19.into()); + exp_res.insert(Epoch(13), 19.into()); + assert_eq!(res, exp_res); + + // Test case 6 + println!("\nTEST 6:"); + total_unbonded_handle(&bob) + .remove_all(&mut storage, &(current_epoch + params.pipeline_len)) + .unwrap(); + total_redelegated_unbonded + .remove_all(&mut storage, &(current_epoch + params.pipeline_len)) + .unwrap(); + total_redelegated_bonded + .remove_all(&mut storage, ¤t_epoch) + .unwrap(); + total_bonded_handle(&bob) + .set(&mut storage, 23.into(), infraction_epoch - 2, 0) + .unwrap(); + total_bonded_handle(&bob) + .set(&mut storage, 6.into(), current_epoch, 0) + .unwrap(); + + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + exp_res = initial_stakes; + assert_eq!(res, exp_res); + + // Test case 7 + println!("\nTEST 7:"); + total_bonded + .get_data_handler() + .remove(&mut storage, ¤t_epoch) + .unwrap(); + total_unbonded + .at(¤t_epoch.next()) + .insert(&mut storage, current_epoch, 6.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 8 + println!("\nTEST 8:"); + total_bonded + .get_data_handler() + .insert(&mut storage, current_epoch, 3.into()) + .unwrap(); + total_unbonded + .at(¤t_epoch.next()) + .insert(&mut storage, current_epoch, 3.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 9 + println!("\nTEST 9:"); + total_unbonded + .remove_all(&mut storage, ¤t_epoch.next()) + .unwrap(); + total_bonded + .set(&mut storage, 6.into(), current_epoch, 0) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 2.into(), 5.into()) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 3.into(), 1.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 10 + println!("\nTEST 10:"); + total_redelegated_bonded + .remove_all(&mut storage, ¤t_epoch) + .unwrap(); + total_bonded + .get_data_handler() + .remove(&mut storage, ¤t_epoch) + .unwrap(); + total_redelegated_unbonded + .at(¤t_epoch.next()) + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 2.into(), 5.into()) + .unwrap(); + total_redelegated_unbonded + .at(¤t_epoch.next()) + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 3.into(), 1.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 11 + println!("\nTEST 11:"); + total_bonded + .set(&mut storage, 2.into(), current_epoch, 0) + .unwrap(); + total_redelegated_unbonded + .at(¤t_epoch.next()) + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 2.into(), 4.into()) + .unwrap(); + total_redelegated_unbonded + .at(¤t_epoch.next()) + .at(¤t_epoch) + .at(&alice) + .remove(&mut storage, &3.into()) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 2.into(), 1.into()) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 3.into(), 1.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 12 + println!("\nTEST 12:"); + total_bonded + .set(&mut storage, 6.into(), current_epoch, 0) + .unwrap(); + total_bonded + .set(&mut storage, 2.into(), current_epoch.next(), 0) + .unwrap(); + total_redelegated_bonded + .remove_all(&mut storage, ¤t_epoch) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch.next()) + .at(&alice) + .insert(&mut storage, 2.into(), 1.into()) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch.next()) + .at(&alice) + .insert(&mut storage, 3.into(), 1.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 13 + println!("\nTEST 13:"); + validator_slashes_handle(&bob) + .push( + &mut storage, + Slash { + epoch: infraction_epoch.prev(), + block_height: 0, + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + total_redelegated_unbonded + .remove_all(&mut storage, ¤t_epoch.next()) + .unwrap(); + total_bonded + .get_data_handler() + .remove(&mut storage, ¤t_epoch.next()) + .unwrap(); + total_redelegated_bonded + .remove_all(&mut storage, ¤t_epoch.next()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + exp_res.insert(Epoch(11), 0.into()); + exp_res.insert(Epoch(12), 0.into()); + exp_res.insert(Epoch(13), 0.into()); + assert_eq!(res, exp_res); +} + +/// `computeAmountAfterSlashingUnbondTest` +#[test] +fn compute_amount_after_slashing_unbond_test() { + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + + // Test data + let alice = established_address_1(); + let bob = established_address_2(); + let unbonds: BTreeMap = BTreeMap::from_iter([ + ((Epoch(2)), token::Amount::from(5)), + ((Epoch(4)), token::Amount::from(6)), + ]); + let redelegated_unbonds: EagerRedelegatedUnbonds = BTreeMap::from_iter([( + Epoch(2), + BTreeMap::from_iter([( + alice.clone(), + BTreeMap::from_iter([(Epoch(1), token::Amount::from(1))]), + )]), + )]); + + // Test case 1 + let slashes = vec![]; + let result = compute_amount_after_slashing_unbond( + &storage, + ¶ms, + &unbonds, + &redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 11.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 5.into()), (4.into(), 6.into())], + ); + + // Test case 2 + let bob_slash = Slash { + epoch: Epoch(5), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slashes = vec![bob_slash.clone()]; + validator_slashes_handle(&bob) + .push(&mut storage, bob_slash) + .unwrap(); + let result = compute_amount_after_slashing_unbond( + &storage, + ¶ms, + &unbonds, + &redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 0.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 0.into()), (4.into(), 0.into())], + ); + + // Test case 3 + let alice_slash = Slash { + epoch: Epoch(0), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slashes = vec![alice_slash.clone()]; + validator_slashes_handle(&alice) + .push(&mut storage, alice_slash) + .unwrap(); + validator_slashes_handle(&bob).pop(&mut storage).unwrap(); + let result = compute_amount_after_slashing_unbond( + &storage, + ¶ms, + &unbonds, + &redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 11.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 5.into()), (4.into(), 6.into())], + ); + + // Test case 4 + let alice_slash = Slash { + epoch: Epoch(1), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slashes = vec![alice_slash.clone()]; + validator_slashes_handle(&alice).pop(&mut storage).unwrap(); + validator_slashes_handle(&alice) + .push(&mut storage, alice_slash) + .unwrap(); + let result = compute_amount_after_slashing_unbond( + &storage, + ¶ms, + &unbonds, + &redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 10.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 4.into()), (4.into(), 6.into())], + ); +} + +/// `computeAmountAfterSlashingWithdrawTest` +#[test] +fn compute_amount_after_slashing_withdraw_test() { + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + + // Test data + let alice = established_address_1(); + let bob = established_address_2(); + let unbonds_and_redelegated_unbonds: BTreeMap< + (Epoch, Epoch), + (token::Amount, EagerRedelegatedBondsMap), + > = BTreeMap::from_iter([ + ( + (Epoch(2), Epoch(20)), + ( + // unbond + token::Amount::from(5), + // redelegations + BTreeMap::from_iter([( + alice.clone(), + BTreeMap::from_iter([(Epoch(1), token::Amount::from(1))]), + )]), + ), + ), + ( + (Epoch(4), Epoch(20)), + ( + // unbond + token::Amount::from(6), + // redelegations + BTreeMap::default(), + ), + ), + ]); + + // Test case 1 + let slashes = vec![]; + let result = compute_amount_after_slashing_withdraw( + &storage, + ¶ms, + &unbonds_and_redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 11.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 5.into()), (4.into(), 6.into())], + ); + + // Test case 2 + let bob_slash = Slash { + epoch: Epoch(5), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slashes = vec![bob_slash.clone()]; + validator_slashes_handle(&bob) + .push(&mut storage, bob_slash) + .unwrap(); + let result = compute_amount_after_slashing_withdraw( + &storage, + ¶ms, + &unbonds_and_redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 0.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 0.into()), (4.into(), 0.into())], + ); + + // Test case 3 + let alice_slash = Slash { + epoch: Epoch(0), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slashes = vec![alice_slash.clone()]; + validator_slashes_handle(&alice) + .push(&mut storage, alice_slash) + .unwrap(); + validator_slashes_handle(&bob).pop(&mut storage).unwrap(); + let result = compute_amount_after_slashing_withdraw( + &storage, + ¶ms, + &unbonds_and_redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 11.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 5.into()), (4.into(), 6.into())], + ); + + // Test case 4 + let alice_slash = Slash { + epoch: Epoch(1), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slashes = vec![alice_slash.clone()]; + validator_slashes_handle(&alice).pop(&mut storage).unwrap(); + validator_slashes_handle(&alice) + .push(&mut storage, alice_slash) + .unwrap(); + let result = compute_amount_after_slashing_withdraw( + &storage, + ¶ms, + &unbonds_and_redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 10.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 4.into()), (4.into(), 6.into())], + ); +} + +fn arb_redelegation_amounts( + max_delegation: u64, +) -> impl Strategy { + let arb_delegation = arb_amount_non_zero_ceiled(max_delegation); + let amounts = arb_delegation.prop_flat_map(move |amount_delegate| { + let amount_redelegate = arb_amount_non_zero_ceiled(max( + 1, + u64::try_from(amount_delegate.raw_amount()).unwrap() - 1, + )); + (Just(amount_delegate), amount_redelegate) + }); + amounts.prop_flat_map(move |(amount_delegate, amount_redelegate)| { + let amount_unbond = arb_amount_non_zero_ceiled(max( + 1, + u64::try_from(amount_redelegate.raw_amount()).unwrap() - 1, + )); + ( + Just(amount_delegate), + Just(amount_redelegate), + amount_unbond, + ) + }) +} + +fn test_simple_redelegation_aux( + mut validators: Vec, + amount_delegate: token::Amount, + amount_redelegate: token::Amount, + amount_unbond: token::Amount, +) { + validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); + + let src_validator = validators[0].address.clone(); + let dest_validator = validators[1].address.clone(); + + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + + // Genesis + let mut current_epoch = storage.storage.block.epoch; + let params = test_init_genesis( + &mut storage, + params, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + storage.commit_block().unwrap(); + + // Get a delegator with some tokens + let staking_token = staking_token_address(&storage); + let delegator = address::testing::gen_implicit_address(); + let del_balance = token::Amount::from_uint(1_000_000, 0).unwrap(); + credit_tokens(&mut storage, &staking_token, &delegator, del_balance) + .unwrap(); + + // Ensure that we cannot redelegate with the same src and dest validator + let err = super::redelegate_tokens( + &mut storage, + &delegator, + &src_validator, + &src_validator, + current_epoch, + amount_redelegate, + ) + .unwrap_err(); + let err_str = err.to_string(); + assert_matches!( + err.downcast::().unwrap().deref(), + RedelegationError::RedelegationSrcEqDest, + "Redelegation with the same src and dest validator must be rejected, \ + got {err_str}", + ); + + for _ in 0..5 { + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + } + + let init_epoch = current_epoch; + + // Delegate in epoch 1 to src_validator + println!( + "\nBONDING {} TOKENS TO {}\n", + amount_delegate.to_string_native(), + &src_validator + ); + super::bond_tokens( + &mut storage, + Some(&delegator), + &src_validator, + amount_delegate, + current_epoch, + ) + .unwrap(); + + println!("\nAFTER DELEGATION\n"); + let bonds = bond_handle(&delegator, &src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let bonds_dest = bond_handle(&delegator, &dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let unbonds = unbond_handle(&delegator, &src_validator) + .collect_map(&storage) + .unwrap(); + let tot_bonds = total_bonded_handle(&src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let tot_unbonds = total_unbonded_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + dbg!(&bonds, &bonds_dest, &unbonds, &tot_bonds, &tot_unbonds); + + // Advance three epochs + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + + // Redelegate in epoch 3 + println!( + "\nREDELEGATING {} TOKENS TO {}\n", + amount_redelegate.to_string_native(), + &dest_validator + ); + + super::redelegate_tokens( + &mut storage, + &delegator, + &src_validator, + &dest_validator, + current_epoch, + amount_redelegate, + ) + .unwrap(); + + println!("\nAFTER REDELEGATION\n"); + println!("\nDELEGATOR\n"); + let bonds_src = bond_handle(&delegator, &src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let bonds_dest = bond_handle(&delegator, &dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let unbonds_src = unbond_handle(&delegator, &src_validator) + .collect_map(&storage) + .unwrap(); + let unbonds_dest = unbond_handle(&delegator, &dest_validator) + .collect_map(&storage) + .unwrap(); + let redel_bonds = delegator_redelegated_bonds_handle(&delegator) + .collect_map(&storage) + .unwrap(); + let redel_unbonds = delegator_redelegated_unbonds_handle(&delegator) + .collect_map(&storage) + .unwrap(); + + dbg!( + &bonds_src, + &bonds_dest, + &unbonds_src, + &unbonds_dest, + &redel_bonds, + &redel_unbonds + ); + + // Dest val + println!("\nDEST VALIDATOR\n"); + + let incoming_redels_dest = + validator_incoming_redelegations_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let outgoing_redels_dest = + validator_outgoing_redelegations_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_bonds_dest = total_bonded_handle(&dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let tot_unbonds_dest = total_unbonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_bonds_dest = + validator_total_redelegated_bonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_unbonds_dest = + validator_total_redelegated_unbonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + dbg!( + &incoming_redels_dest, + &outgoing_redels_dest, + &tot_bonds_dest, + &tot_unbonds_dest, + &tot_redel_bonds_dest, + &tot_redel_unbonds_dest + ); + + // Src val + println!("\nSRC VALIDATOR\n"); + + let incoming_redels_src = + validator_incoming_redelegations_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + let outgoing_redels_src = + validator_outgoing_redelegations_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + let tot_bonds_src = total_bonded_handle(&src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let tot_unbonds_src = total_unbonded_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_bonds_src = + validator_total_redelegated_bonded_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_unbonds_src = + validator_total_redelegated_unbonded_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + dbg!( + &incoming_redels_src, + &outgoing_redels_src, + &tot_bonds_src, + &tot_unbonds_src, + &tot_redel_bonds_src, + &tot_redel_unbonds_src + ); + + // Checks + let redelegated = delegator_redelegated_bonds_handle(&delegator) + .at(&dest_validator) + .at(&(current_epoch + params.pipeline_len)) + .at(&src_validator) + .get(&storage, &(init_epoch + params.pipeline_len)) + .unwrap() + .unwrap(); + assert_eq!(redelegated, amount_redelegate); + + let redel_start_epoch = + validator_incoming_redelegations_handle(&dest_validator) + .get(&storage, &delegator) + .unwrap() + .unwrap(); + assert_eq!(redel_start_epoch, current_epoch + params.pipeline_len); + + let redelegated = validator_outgoing_redelegations_handle(&src_validator) + .at(&dest_validator) + .at(¤t_epoch.prev()) + .get(&storage, ¤t_epoch) + .unwrap() + .unwrap(); + assert_eq!(redelegated, amount_redelegate); + + // Advance three epochs + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + + // Unbond in epoch 5 from dest_validator + println!( + "\nUNBONDING {} TOKENS FROM {}\n", + amount_unbond.to_string_native(), + &dest_validator + ); + let _ = unbond_tokens( + &mut storage, + Some(&delegator), + &dest_validator, + amount_unbond, + current_epoch, + false, + ) + .unwrap(); + + println!("\nAFTER UNBONDING\n"); + println!("\nDELEGATOR\n"); + + let bonds_src = bond_handle(&delegator, &src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let bonds_dest = bond_handle(&delegator, &dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let unbonds_src = unbond_handle(&delegator, &src_validator) + .collect_map(&storage) + .unwrap(); + let unbonds_dest = unbond_handle(&delegator, &dest_validator) + .collect_map(&storage) + .unwrap(); + let redel_bonds = delegator_redelegated_bonds_handle(&delegator) + .collect_map(&storage) + .unwrap(); + let redel_unbonds = delegator_redelegated_unbonds_handle(&delegator) + .collect_map(&storage) + .unwrap(); + + dbg!( + &bonds_src, + &bonds_dest, + &unbonds_src, + &unbonds_dest, + &redel_bonds, + &redel_unbonds + ); + + println!("\nDEST VALIDATOR\n"); + + let incoming_redels_dest = + validator_incoming_redelegations_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let outgoing_redels_dest = + validator_outgoing_redelegations_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_bonds_dest = total_bonded_handle(&dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let tot_unbonds_dest = total_unbonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_bonds_dest = + validator_total_redelegated_bonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_unbonds_dest = + validator_total_redelegated_unbonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + dbg!( + &incoming_redels_dest, + &outgoing_redels_dest, + &tot_bonds_dest, + &tot_unbonds_dest, + &tot_redel_bonds_dest, + &tot_redel_unbonds_dest + ); + + let bond_start = init_epoch + params.pipeline_len; + let redelegation_end = bond_start + params.pipeline_len + 1u64; + let unbond_end = + redelegation_end + params.withdrawable_epoch_offset() + 1u64; + let unbond_materialized = redelegation_end + params.pipeline_len + 1u64; + + // Checks + let redelegated_remaining = delegator_redelegated_bonds_handle(&delegator) + .at(&dest_validator) + .at(&redelegation_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + assert_eq!(redelegated_remaining, amount_redelegate - amount_unbond); + + let redel_unbonded = delegator_redelegated_unbonds_handle(&delegator) + .at(&dest_validator) + .at(&redelegation_end) + .at(&unbond_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap(); + assert_eq!(redel_unbonded, amount_unbond); + + dbg!(unbond_materialized, redelegation_end, bond_start); + let total_redel_unbonded = + validator_total_redelegated_unbonded_handle(&dest_validator) + .at(&unbond_materialized) + .at(&redelegation_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap(); + assert_eq!(total_redel_unbonded, amount_unbond); + + // Advance to withdrawal epoch + loop { + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + if current_epoch == unbond_end { + break; + } + } + + // Withdraw + withdraw_tokens( + &mut storage, + Some(&delegator), + &dest_validator, + current_epoch, + ) + .unwrap(); + + assert!( + delegator_redelegated_unbonds_handle(&delegator) + .at(&dest_validator) + .is_empty(&storage) + .unwrap() + ); + + let delegator_balance = storage + .read::(&token::balance_key(&staking_token, &delegator)) + .unwrap() + .unwrap_or_default(); + assert_eq!( + delegator_balance, + del_balance - amount_delegate + amount_unbond + ); +} + +fn test_redelegation_with_slashing_aux( + mut validators: Vec, + amount_delegate: token::Amount, + amount_redelegate: token::Amount, + amount_unbond: token::Amount, +) { + validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); + + let src_validator = validators[0].address.clone(); + let dest_validator = validators[1].address.clone(); + + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + // Avoid empty consensus set by removing the threshold + validator_stake_threshold: token::Amount::zero(), + ..Default::default() + }; + + // Genesis + let mut current_epoch = storage.storage.block.epoch; + let params = test_init_genesis( + &mut storage, + params, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + storage.commit_block().unwrap(); + + // Get a delegator with some tokens + let staking_token = staking_token_address(&storage); + let delegator = address::testing::gen_implicit_address(); + let del_balance = token::Amount::from_uint(1_000_000, 0).unwrap(); + credit_tokens(&mut storage, &staking_token, &delegator, del_balance) + .unwrap(); + + for _ in 0..5 { + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + } + + let init_epoch = current_epoch; + + // Delegate in epoch 5 to src_validator + println!( + "\nBONDING {} TOKENS TO {}\n", + amount_delegate.to_string_native(), + &src_validator + ); + super::bond_tokens( + &mut storage, + Some(&delegator), + &src_validator, + amount_delegate, + current_epoch, + ) + .unwrap(); + + println!("\nAFTER DELEGATION\n"); + let bonds = bond_handle(&delegator, &src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let bonds_dest = bond_handle(&delegator, &dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let unbonds = unbond_handle(&delegator, &src_validator) + .collect_map(&storage) + .unwrap(); + let tot_bonds = total_bonded_handle(&src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let tot_unbonds = total_unbonded_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + dbg!(&bonds, &bonds_dest, &unbonds, &tot_bonds, &tot_unbonds); + + // Advance three epochs + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + + // Redelegate in epoch 8 + println!( + "\nREDELEGATING {} TOKENS TO {}\n", + amount_redelegate.to_string_native(), + &dest_validator + ); + + super::redelegate_tokens( + &mut storage, + &delegator, + &src_validator, + &dest_validator, + current_epoch, + amount_redelegate, + ) + .unwrap(); + + println!("\nAFTER REDELEGATION\n"); + println!("\nDELEGATOR\n"); + let bonds_src = bond_handle(&delegator, &src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let bonds_dest = bond_handle(&delegator, &dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let unbonds_src = unbond_handle(&delegator, &src_validator) + .collect_map(&storage) + .unwrap(); + let unbonds_dest = unbond_handle(&delegator, &dest_validator) + .collect_map(&storage) + .unwrap(); + let redel_bonds = delegator_redelegated_bonds_handle(&delegator) + .collect_map(&storage) + .unwrap(); + let redel_unbonds = delegator_redelegated_unbonds_handle(&delegator) + .collect_map(&storage) + .unwrap(); + + dbg!( + &bonds_src, + &bonds_dest, + &unbonds_src, + &unbonds_dest, + &redel_bonds, + &redel_unbonds + ); + + // Dest val + println!("\nDEST VALIDATOR\n"); + + let incoming_redels_dest = + validator_incoming_redelegations_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let outgoing_redels_dest = + validator_outgoing_redelegations_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_bonds_dest = total_bonded_handle(&dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let tot_unbonds_dest = total_unbonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_bonds_dest = + validator_total_redelegated_bonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_unbonds_dest = + validator_total_redelegated_unbonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + dbg!( + &incoming_redels_dest, + &outgoing_redels_dest, + &tot_bonds_dest, + &tot_unbonds_dest, + &tot_redel_bonds_dest, + &tot_redel_unbonds_dest + ); + + // Src val + println!("\nSRC VALIDATOR\n"); + + let incoming_redels_src = + validator_incoming_redelegations_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + let outgoing_redels_src = + validator_outgoing_redelegations_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + let tot_bonds_src = total_bonded_handle(&src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let tot_unbonds_src = total_unbonded_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_bonds_src = + validator_total_redelegated_bonded_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_unbonds_src = + validator_total_redelegated_unbonded_handle(&src_validator) + .collect_map(&storage) + .unwrap(); + dbg!( + &incoming_redels_src, + &outgoing_redels_src, + &tot_bonds_src, + &tot_unbonds_src, + &tot_redel_bonds_src, + &tot_redel_unbonds_src + ); + + // Checks + let redelegated = delegator_redelegated_bonds_handle(&delegator) + .at(&dest_validator) + .at(&(current_epoch + params.pipeline_len)) + .at(&src_validator) + .get(&storage, &(init_epoch + params.pipeline_len)) + .unwrap() + .unwrap(); + assert_eq!(redelegated, amount_redelegate); + + let redel_start_epoch = + validator_incoming_redelegations_handle(&dest_validator) + .get(&storage, &delegator) + .unwrap() + .unwrap(); + assert_eq!(redel_start_epoch, current_epoch + params.pipeline_len); + + let redelegated = validator_outgoing_redelegations_handle(&src_validator) + .at(&dest_validator) + .at(¤t_epoch.prev()) + .get(&storage, ¤t_epoch) + .unwrap() + .unwrap(); + assert_eq!(redelegated, amount_redelegate); + + // Advance three epochs + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + + // Unbond in epoch 11 from dest_validator + println!( + "\nUNBONDING {} TOKENS FROM {}\n", + amount_unbond.to_string_native(), + &dest_validator + ); + let _ = unbond_tokens( + &mut storage, + Some(&delegator), + &dest_validator, + amount_unbond, + current_epoch, + false, + ) + .unwrap(); + + println!("\nAFTER UNBONDING\n"); + println!("\nDELEGATOR\n"); + + let bonds_src = bond_handle(&delegator, &src_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let bonds_dest = bond_handle(&delegator, &dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let unbonds_src = unbond_handle(&delegator, &src_validator) + .collect_map(&storage) + .unwrap(); + let unbonds_dest = unbond_handle(&delegator, &dest_validator) + .collect_map(&storage) + .unwrap(); + let redel_bonds = delegator_redelegated_bonds_handle(&delegator) + .collect_map(&storage) + .unwrap(); + let redel_unbonds = delegator_redelegated_unbonds_handle(&delegator) + .collect_map(&storage) + .unwrap(); + + dbg!( + &bonds_src, + &bonds_dest, + &unbonds_src, + &unbonds_dest, + &redel_bonds, + &redel_unbonds + ); + + println!("\nDEST VALIDATOR\n"); + + let incoming_redels_dest = + validator_incoming_redelegations_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let outgoing_redels_dest = + validator_outgoing_redelegations_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_bonds_dest = total_bonded_handle(&dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let tot_unbonds_dest = total_unbonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_bonds_dest = + validator_total_redelegated_bonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + let tot_redel_unbonds_dest = + validator_total_redelegated_unbonded_handle(&dest_validator) + .collect_map(&storage) + .unwrap(); + dbg!( + &incoming_redels_dest, + &outgoing_redels_dest, + &tot_bonds_dest, + &tot_unbonds_dest, + &tot_redel_bonds_dest, + &tot_redel_unbonds_dest + ); + + // Advance one epoch + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + + // Discover evidence + slash( + &mut storage, + ¶ms, + current_epoch, + init_epoch + 2 * params.pipeline_len, + 0u64, + SlashType::DuplicateVote, + &src_validator, + current_epoch.next(), + ) + .unwrap(); + + let bond_start = init_epoch + params.pipeline_len; + let redelegation_end = bond_start + params.pipeline_len + 1u64; + let unbond_end = + redelegation_end + params.withdrawable_epoch_offset() + 1u64; + let unbond_materialized = redelegation_end + params.pipeline_len + 1u64; + + // Checks + let redelegated_remaining = delegator_redelegated_bonds_handle(&delegator) + .at(&dest_validator) + .at(&redelegation_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + assert_eq!(redelegated_remaining, amount_redelegate - amount_unbond); + + let redel_unbonded = delegator_redelegated_unbonds_handle(&delegator) + .at(&dest_validator) + .at(&redelegation_end) + .at(&unbond_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap(); + assert_eq!(redel_unbonded, amount_unbond); + + dbg!(unbond_materialized, redelegation_end, bond_start); + let total_redel_unbonded = + validator_total_redelegated_unbonded_handle(&dest_validator) + .at(&unbond_materialized) + .at(&redelegation_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap(); + assert_eq!(total_redel_unbonded, amount_unbond); + + // Advance to withdrawal epoch + loop { + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + if current_epoch == unbond_end { + break; + } + } + + // Withdraw + withdraw_tokens( + &mut storage, + Some(&delegator), + &dest_validator, + current_epoch, + ) + .unwrap(); + + assert!( + delegator_redelegated_unbonds_handle(&delegator) + .at(&dest_validator) + .is_empty(&storage) + .unwrap() + ); + + let delegator_balance = storage + .read::(&token::balance_key(&staking_token, &delegator)) + .unwrap() + .unwrap_or_default(); + assert_eq!(delegator_balance, del_balance - amount_delegate); +} + +fn test_chain_redelegations_aux(mut validators: Vec) { + validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); + + let src_validator = validators[0].address.clone(); + let _init_stake_src = validators[0].tokens; + let dest_validator = validators[1].address.clone(); + let _init_stake_dest = validators[1].tokens; + let dest_validator_2 = validators[2].address.clone(); + let _init_stake_dest_2 = validators[2].tokens; + + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + + // Genesis + let mut current_epoch = storage.storage.block.epoch; + let params = test_init_genesis( + &mut storage, + params, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + storage.commit_block().unwrap(); + + // Get a delegator with some tokens + let staking_token = staking_token_address(&storage); + let delegator = address::testing::gen_implicit_address(); + let del_balance = token::Amount::from_uint(1_000_000, 0).unwrap(); + credit_tokens(&mut storage, &staking_token, &delegator, del_balance) + .unwrap(); + + // Delegate in epoch 0 to src_validator + let bond_amount: token::Amount = 100.into(); + super::bond_tokens( + &mut storage, + Some(&delegator), + &src_validator, + bond_amount, + current_epoch, + ) + .unwrap(); + + let bond_start = current_epoch + params.pipeline_len; + + // Advance one epoch + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + + // Redelegate in epoch 1 to dest_validator + let redel_amount_1: token::Amount = 58.into(); + super::redelegate_tokens( + &mut storage, + &delegator, + &src_validator, + &dest_validator, + current_epoch, + redel_amount_1, + ) + .unwrap(); + + let redel_start = current_epoch; + let redel_end = current_epoch + params.pipeline_len; + + // Checks ---------------- + + // Dest validator should have an incoming redelegation + let incoming_redelegation = + validator_incoming_redelegations_handle(&dest_validator) + .get(&storage, &delegator) + .unwrap(); + assert_eq!(incoming_redelegation, Some(redel_end)); + + // Src validator should have an outoging redelegation + let outgoing_redelegation = + validator_outgoing_redelegations_handle(&src_validator) + .at(&dest_validator) + .at(&bond_start) + .get(&storage, &redel_start) + .unwrap(); + assert_eq!(outgoing_redelegation, Some(redel_amount_1)); + + // Delegator should have redelegated bonds + let del_total_redelegated_bonded = + delegator_redelegated_bonds_handle(&delegator) + .at(&dest_validator) + .at(&redel_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + assert_eq!(del_total_redelegated_bonded, redel_amount_1); + + // There should be delegator bonds for both src and dest validators + let bonded_src = bond_handle(&delegator, &src_validator); + let bonded_dest = bond_handle(&delegator, &dest_validator); + assert_eq!( + bonded_src + .get_delta_val(&storage, bond_start) + .unwrap() + .unwrap_or_default(), + bond_amount - redel_amount_1 + ); + assert_eq!( + bonded_dest + .get_delta_val(&storage, redel_end) + .unwrap() + .unwrap_or_default(), + redel_amount_1 + ); + + // The dest validator should have total redelegated bonded tokens + let dest_total_redelegated_bonded = + validator_total_redelegated_bonded_handle(&dest_validator) + .at(&redel_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + assert_eq!(dest_total_redelegated_bonded, redel_amount_1); + + // The dest validator's total bonded should have an entry for the genesis + // bond and the redelegation + let dest_total_bonded = total_bonded_handle(&dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + assert!( + dest_total_bonded.len() == 2 + && dest_total_bonded.contains_key(&Epoch::default()) + ); + assert_eq!( + dest_total_bonded + .get(&redel_end) + .cloned() + .unwrap_or_default(), + redel_amount_1 + ); + + // The src validator should have a total bonded entry for the original bond + // accounting for the redelegation + assert_eq!( + total_bonded_handle(&src_validator) + .get_delta_val(&storage, bond_start) + .unwrap() + .unwrap_or_default(), + bond_amount - redel_amount_1 + ); + + // The src validator should have a total unbonded entry due to the + // redelegation + let src_total_unbonded = total_unbonded_handle(&src_validator) + .at(&redel_end) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + assert_eq!(src_total_unbonded, redel_amount_1); + + // Attempt to redelegate in epoch 3 to dest_validator + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + + let redel_amount_2: token::Amount = 23.into(); + let redel_att = super::redelegate_tokens( + &mut storage, + &delegator, + &dest_validator, + &dest_validator_2, + current_epoch, + redel_amount_2, + ); + assert!(redel_att.is_err()); + + // Advance to right before the redelegation can be redelegated again + assert_eq!(redel_end, current_epoch); + let epoch_can_redel = + redel_end.prev() + params.slash_processing_epoch_offset(); + loop { + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + if current_epoch == epoch_can_redel.prev() { + break; + } + } + + // Attempt to redelegate in epoch before we actually are able to + let redel_att = super::redelegate_tokens( + &mut storage, + &delegator, + &dest_validator, + &dest_validator_2, + current_epoch, + redel_amount_2, + ); + assert!(redel_att.is_err()); + + // Advance one more epoch + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + + // Redelegate from dest_validator to dest_validator_2 now + super::redelegate_tokens( + &mut storage, + &delegator, + &dest_validator, + &dest_validator_2, + current_epoch, + redel_amount_2, + ) + .unwrap(); + + let redel_2_start = current_epoch; + let redel_2_end = current_epoch + params.pipeline_len; + + // Checks ----------------------------------- + + // Both the dest validator and dest validator 2 should have incoming + // redelegations + let incoming_redelegation_1 = + validator_incoming_redelegations_handle(&dest_validator) + .get(&storage, &delegator) + .unwrap(); + assert_eq!(incoming_redelegation_1, Some(redel_end)); + let incoming_redelegation_2 = + validator_incoming_redelegations_handle(&dest_validator_2) + .get(&storage, &delegator) + .unwrap(); + assert_eq!(incoming_redelegation_2, Some(redel_2_end)); + + // Both the src validator and dest validator should have outgoing + // redelegations + let outgoing_redelegation_1 = + validator_outgoing_redelegations_handle(&src_validator) + .at(&dest_validator) + .at(&bond_start) + .get(&storage, &redel_start) + .unwrap(); + assert_eq!(outgoing_redelegation_1, Some(redel_amount_1)); + + let outgoing_redelegation_2 = + validator_outgoing_redelegations_handle(&dest_validator) + .at(&dest_validator_2) + .at(&redel_end) + .get(&storage, &redel_2_start) + .unwrap(); + assert_eq!(outgoing_redelegation_2, Some(redel_amount_2)); + + // All three validators should have bonds + let bonded_dest2 = bond_handle(&delegator, &dest_validator_2); + assert_eq!( + bonded_src + .get_delta_val(&storage, bond_start) + .unwrap() + .unwrap_or_default(), + bond_amount - redel_amount_1 + ); + assert_eq!( + bonded_dest + .get_delta_val(&storage, redel_end) + .unwrap() + .unwrap_or_default(), + redel_amount_1 - redel_amount_2 + ); + assert_eq!( + bonded_dest2 + .get_delta_val(&storage, redel_2_end) + .unwrap() + .unwrap_or_default(), + redel_amount_2 + ); + + // There should be no unbond entries + let unbond_src = unbond_handle(&delegator, &src_validator); + let unbond_dest = unbond_handle(&delegator, &dest_validator); + assert!(unbond_src.is_empty(&storage).unwrap()); + assert!(unbond_dest.is_empty(&storage).unwrap()); + + // The dest validator should have some total unbonded due to the second + // redelegation + let dest_total_unbonded = total_unbonded_handle(&dest_validator) + .at(&redel_2_end) + .get(&storage, &redel_end) + .unwrap(); + assert_eq!(dest_total_unbonded, Some(redel_amount_2)); + + // Delegator should have redelegated bonds due to both redelegations + let del_redelegated_bonds = delegator_redelegated_bonds_handle(&delegator); + assert_eq!( + Some(redel_amount_1 - redel_amount_2), + del_redelegated_bonds + .at(&dest_validator) + .at(&redel_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + ); + assert_eq!( + Some(redel_amount_2), + del_redelegated_bonds + .at(&dest_validator_2) + .at(&redel_2_end) + .at(&dest_validator) + .get(&storage, &redel_end) + .unwrap() + ); + + // Delegator redelegated unbonds should be empty + assert!( + delegator_redelegated_unbonds_handle(&delegator) + .is_empty(&storage) + .unwrap() + ); + + // Both the dest validator and dest validator 2 should have total + // redelegated bonds + let dest_redelegated_bonded = + validator_total_redelegated_bonded_handle(&dest_validator) + .at(&redel_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + let dest2_redelegated_bonded = + validator_total_redelegated_bonded_handle(&dest_validator_2) + .at(&redel_2_end) + .at(&dest_validator) + .get(&storage, &redel_end) + .unwrap() + .unwrap_or_default(); + assert_eq!(dest_redelegated_bonded, redel_amount_1 - redel_amount_2); + assert_eq!(dest2_redelegated_bonded, redel_amount_2); + + // Total redelegated unbonded should be empty for src_validator and + // dest_validator_2 + assert!( + validator_total_redelegated_unbonded_handle(&dest_validator_2) + .is_empty(&storage) + .unwrap() + ); + assert!( + validator_total_redelegated_unbonded_handle(&src_validator) + .is_empty(&storage) + .unwrap() + ); + + // The dest_validator should have total_redelegated unbonded + let tot_redel_unbonded = + validator_total_redelegated_unbonded_handle(&dest_validator) + .at(&redel_2_end) + .at(&redel_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + assert_eq!(tot_redel_unbonded, redel_amount_2); +} + +/// SM test case 1 from Brent +#[test] +fn test_from_sm_case_1() { + use namada_core::types::address::testing::established_address_4; + + let mut storage = TestWlStorage::default(); + let gov_params = namada_core::ledger::governance::parameters::GovernanceParameters::default(); + gov_params.init_storage(&mut storage).unwrap(); + write_pos_params(&mut storage, &OwnedPosParams::default()).unwrap(); + + let validator = established_address_1(); + let redeleg_src_1 = established_address_2(); + let redeleg_src_2 = established_address_3(); + let owner = established_address_4(); + let unbond_amount = token::Amount::from(3130688); + println!( + "Owner: {owner}\nValidator: {validator}\nRedeleg src 1: \ + {redeleg_src_1}\nRedeleg src 2: {redeleg_src_2}" + ); + + // Validator's incoming redelegations + let outer_epoch_1 = Epoch(27); + // from redeleg_src_1 + let epoch_1_redeleg_1 = token::Amount::from(8516); + // from redeleg_src_2 + let epoch_1_redeleg_2 = token::Amount::from(5704386); + let outer_epoch_2 = Epoch(30); + // from redeleg_src_2 + let epoch_2_redeleg_2 = token::Amount::from(1035191); + + // Insert the data - bonds and redelegated bonds + let bonds_handle = bond_handle(&owner, &validator); + bonds_handle + .add( + &mut storage, + epoch_1_redeleg_1 + epoch_1_redeleg_2, + outer_epoch_1, + 0, + ) + .unwrap(); + bonds_handle + .add(&mut storage, epoch_2_redeleg_2, outer_epoch_2, 0) + .unwrap(); + + let redelegated_bonds_map_1 = delegator_redelegated_bonds_handle(&owner) + .at(&validator) + .at(&outer_epoch_1); + redelegated_bonds_map_1 + .at(&redeleg_src_1) + .insert(&mut storage, Epoch(14), epoch_1_redeleg_1) + .unwrap(); + redelegated_bonds_map_1 + .at(&redeleg_src_2) + .insert(&mut storage, Epoch(18), epoch_1_redeleg_2) + .unwrap(); + let redelegated_bonds_map_1 = delegator_redelegated_bonds_handle(&owner) + .at(&validator) + .at(&outer_epoch_1); + + let redelegated_bonds_map_2 = delegator_redelegated_bonds_handle(&owner) + .at(&validator) + .at(&outer_epoch_2); + redelegated_bonds_map_2 + .at(&redeleg_src_2) + .insert(&mut storage, Epoch(18), epoch_2_redeleg_2) + .unwrap(); + + // Find the modified redelegation the same way as `unbond_tokens` + let bonds_to_unbond = find_bonds_to_remove( + &storage, + &bonds_handle.get_data_handler(), + unbond_amount, + ) + .unwrap(); + dbg!(&bonds_to_unbond); + + let (new_entry_epoch, new_bond_amount) = bonds_to_unbond.new_entry.unwrap(); + assert_eq!(outer_epoch_1, new_entry_epoch); + // The modified bond should be sum of all redelegations less the unbonded + // amouunt + assert_eq!( + epoch_1_redeleg_1 + epoch_1_redeleg_2 + epoch_2_redeleg_2 + - unbond_amount, + new_bond_amount + ); + // The current bond should be sum of redelegations fom the modified epoch + let cur_bond_amount = bonds_handle + .get_delta_val(&storage, new_entry_epoch) + .unwrap() + .unwrap_or_default(); + assert_eq!(epoch_1_redeleg_1 + epoch_1_redeleg_2, cur_bond_amount); + + let mr = compute_modified_redelegation( + &storage, + &redelegated_bonds_map_1, + new_entry_epoch, + cur_bond_amount - new_bond_amount, + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(27)), + validators_to_remove: BTreeSet::from_iter([redeleg_src_2.clone()]), + validator_to_modify: Some(redeleg_src_2), + epochs_to_remove: BTreeSet::from_iter([Epoch(18)]), + epoch_to_modify: Some(Epoch(18)), + new_amount: Some(token::Amount::from(3608889)), + }; + + pretty_assertions::assert_eq!(mr, exp_mr); +} + +/// Test precisely that we are not overslashing, as originally discovered by Tomas in this issue: https://github.com/informalsystems/partnership-heliax/issues/74 +fn test_overslashing_aux(mut validators: Vec) { + assert_eq!(validators.len(), 4); + + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + + let offending_stake = token::Amount::native_whole(110); + let other_stake = token::Amount::native_whole(100); + + // Set stakes so we know we will get a slashing rate between 0.5 -1.0 + validators[0].tokens = offending_stake; + validators[1].tokens = other_stake; + validators[2].tokens = other_stake; + validators[3].tokens = other_stake; + + // Get the offending validator + let validator = validators[0].address.clone(); + + println!("\nTest inputs: {params:?}, genesis validators: {validators:#?}"); + let mut storage = TestWlStorage::default(); + + // Genesis + let mut current_epoch = storage.storage.block.epoch; + let params = test_init_genesis( + &mut storage, + params, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + storage.commit_block().unwrap(); + + // Get a delegator with some tokens + let staking_token = storage.storage.native_token.clone(); + let delegator = address::testing::gen_implicit_address(); + let amount_del = token::Amount::native_whole(5); + credit_tokens(&mut storage, &staking_token, &delegator, amount_del) + .unwrap(); + + // Delegate tokens in epoch 0 to validator + bond_tokens( + &mut storage, + Some(&delegator), + &validator, + amount_del, + current_epoch, + ) + .unwrap(); + + let self_bond_epoch = current_epoch; + let delegation_epoch = current_epoch + params.pipeline_len; + + // Advance to pipeline epoch + for _ in 0..params.pipeline_len { + current_epoch = advance_epoch(&mut storage, ¶ms); + } + assert_eq!(delegation_epoch, current_epoch); + + // Find a misbehavior committed in epoch 0 + slash( + &mut storage, + ¶ms, + current_epoch, + self_bond_epoch, + 0_u64, + SlashType::DuplicateVote, + &validator, + current_epoch.next(), + ) + .unwrap(); + + // Find a misbehavior committed in current epoch + slash( + &mut storage, + ¶ms, + current_epoch, + delegation_epoch, + 0_u64, + SlashType::DuplicateVote, + &validator, + current_epoch.next(), + ) + .unwrap(); + + let processing_epoch_1 = + self_bond_epoch + params.slash_processing_epoch_offset(); + let processing_epoch_2 = + delegation_epoch + params.slash_processing_epoch_offset(); + + // Advance to processing epoch 1 + loop { + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + if current_epoch == processing_epoch_1 { + break; + } + } + + let total_stake_1 = offending_stake + 3 * other_stake; + let stake_frac = Dec::from(offending_stake) / Dec::from(total_stake_1); + let slash_rate_1 = Dec::from_str("9.0").unwrap() * stake_frac * stake_frac; + dbg!(&slash_rate_1); + + let exp_slashed_1 = offending_stake.mul_ceil(slash_rate_1); + + // Check that the proper amount was slashed + let epoch = current_epoch.next(); + let validator_stake = + read_validator_stake(&storage, ¶ms, &validator, epoch).unwrap(); + let exp_validator_stake = offending_stake - exp_slashed_1 + amount_del; + assert_eq!(validator_stake, exp_validator_stake); + + let total_stake = read_total_stake(&storage, ¶ms, epoch).unwrap(); + let exp_total_stake = + offending_stake - exp_slashed_1 + amount_del + 3 * other_stake; + assert_eq!(total_stake, exp_total_stake); + + let self_bond_id = BondId { + source: validator.clone(), + validator: validator.clone(), + }; + let bond_amount = + crate::bond_amount(&storage, &self_bond_id, epoch).unwrap(); + let exp_bond_amount = offending_stake - exp_slashed_1; + assert_eq!(bond_amount, exp_bond_amount); + + // Advance to processing epoch 2 + loop { + current_epoch = advance_epoch(&mut storage, ¶ms); + super::process_slashes(&mut storage, current_epoch).unwrap(); + if current_epoch == processing_epoch_2 { + break; + } + } + + let total_stake_2 = offending_stake + amount_del + 3 * other_stake; + let stake_frac = + Dec::from(offending_stake + amount_del) / Dec::from(total_stake_2); + let slash_rate_2 = Dec::from_str("9.0").unwrap() * stake_frac * stake_frac; + dbg!(&slash_rate_2); + + let exp_slashed_from_delegation = amount_del.mul_ceil(slash_rate_2); + + // Check that the proper amount was slashed. We expect that all of the + // validator self-bond has been slashed and some of the delegation has been + // slashed due to the second infraction. + let epoch = current_epoch.next(); + + let validator_stake = + read_validator_stake(&storage, ¶ms, &validator, epoch).unwrap(); + let exp_validator_stake = amount_del - exp_slashed_from_delegation; + assert_eq!(validator_stake, exp_validator_stake); + + let total_stake = read_total_stake(&storage, ¶ms, epoch).unwrap(); + let exp_total_stake = + amount_del - exp_slashed_from_delegation + 3 * other_stake; + assert_eq!(total_stake, exp_total_stake); + + let delegation_id = BondId { + source: delegator.clone(), + validator: validator.clone(), + }; + let delegation_amount = + crate::bond_amount(&storage, &delegation_id, epoch).unwrap(); + let exp_del_amount = amount_del - exp_slashed_from_delegation; + assert_eq!(delegation_amount, exp_del_amount); + + let self_bond_amount = + crate::bond_amount(&storage, &self_bond_id, epoch).unwrap(); + let exp_bond_amount = token::Amount::zero(); + assert_eq!(self_bond_amount, exp_bond_amount); +} diff --git a/proof_of_stake/src/tests/state_machine.rs b/proof_of_stake/src/tests/state_machine.rs index 6c9968c519..a067037232 100644 --- a/proof_of_stake/src/tests/state_machine.rs +++ b/proof_of_stake/src/tests/state_machine.rs @@ -2,10 +2,14 @@ use std::cmp; use std::collections::{BTreeMap, BTreeSet, HashSet, VecDeque}; +use std::ops::Deref; +use assert_matches::assert_matches; use itertools::Itertools; use namada_core::ledger::storage::testing::TestWlStorage; -use namada_core::ledger::storage_api::collections::lazy_map::NestedSubKey; +use namada_core::ledger::storage_api::collections::lazy_map::{ + Collectable, NestedSubKey, SubKey, +}; use namada_core::ledger::storage_api::token::read_balance; use namada_core::ledger::storage_api::{token, StorageRead}; use namada_core::types::address::{self, Address}; @@ -27,27 +31,72 @@ use crate::parameters::testing::arb_rate; use crate::parameters::PosParams; use crate::tests::arb_params_and_genesis_validators; use crate::types::{ - BondId, GenesisValidator, ReverseOrdTokenAmount, Slash, SlashType, - SlashedAmount, ValidatorState, WeightedValidator, + BondId, EagerRedelegatedBondsMap, GenesisValidator, ReverseOrdTokenAmount, + Slash, SlashType, ValidatorState, WeightedValidator, }; use crate::{ below_capacity_validator_set_handle, consensus_validator_set_handle, enqueued_slashes_handle, read_below_threshold_validator_set_addresses, - read_pos_params, validator_deltas_handle, validator_slashes_handle, - validator_state_handle, + read_pos_params, redelegate_tokens, validator_deltas_handle, + validator_slashes_handle, validator_state_handle, BondsForRemovalRes, + EagerRedelegatedUnbonds, FoldRedelegatedBondsResult, ModifiedRedelegation, + RedelegationError, ResultSlashing, }; prop_state_machine! { #![proptest_config(Config { cases: 2, - verbose: 1, .. Config::default() })] #[test] /// A `StateMachineTest` implemented on `PosState` - fn pos_state_machine_test(sequential 200 => ConcretePosState); + fn pos_state_machine_test(sequential 500 => ConcretePosState); } +type AbstractDelegatorRedelegatedBonded = BTreeMap< + Address, + BTreeMap< + Address, + BTreeMap>>, + >, +>; + +type AbstractDelegatorRedelegatedUnbonded = BTreeMap< + Address, + BTreeMap< + Address, + BTreeMap< + (Epoch, Epoch), + BTreeMap>, + >, + >, +>; + +type AbstractValidatorTotalRedelegatedBonded = BTreeMap< + Address, + BTreeMap>>, +>; + +type AbstractTotalRedelegatedUnbonded = BTreeMap< + Epoch, + BTreeMap>>, +>; + +type AbstractValidatorTotalRedelegatedUnbonded = BTreeMap< + Address, + BTreeMap< + Epoch, + BTreeMap>>, + >, +>; + +type AbstractIncomingRedelegations = + BTreeMap>; +type AbstractOutgoingRedelegations = BTreeMap< + Address, + BTreeMap>, +>; + /// Abstract representation of a state of PoS system #[derive(Clone, Debug)] struct AbstractPosState { @@ -59,13 +108,13 @@ struct AbstractPosState { genesis_validators: Vec, /// Bonds delta values. The outer key for Epoch is pipeline offset from /// epoch in which the bond is applied - bonds: BTreeMap>, + bonds: BTreeMap>, /// Total bonded tokens to a validator in each epoch. This is never /// decremented and used for slashing computations. - total_bonded: BTreeMap>, + total_bonded: BTreeMap>, /// Validator stakes. These are NOT deltas. /// Pipelined. - validator_stakes: BTreeMap>, + validator_stakes: BTreeMap>, /// Consensus validator set. Pipelined. consensus_set: BTreeMap>>, /// Below-capacity validator set. Pipelined. @@ -75,20 +124,30 @@ struct AbstractPosState { below_threshold_set: BTreeMap>, /// Validator states. Pipelined. validator_states: BTreeMap>, - /// Unbonded bonds. The outer key for Epoch is pipeline + unbonding offset - /// from epoch in which the unbond is applied. - unbonds: BTreeMap>, + /// Unbonded bonds. The outer key for Epoch is pipeline + unbonding + + /// cubic_window offset from epoch in which the unbond transition + /// occurs. + unbonds: BTreeMap<(Epoch, Epoch), BTreeMap>, /// Validator slashes post-processing validator_slashes: BTreeMap>, /// Enqueued slashes pre-processing enqueued_slashes: BTreeMap>>, /// The last epoch in which a validator committed an infraction validator_last_slash_epochs: BTreeMap, - /// Unbond records required for slashing. + /// Validator's total unbonded required for slashing. /// Inner `Epoch` is the epoch in which the unbond became active. /// Outer `Epoch` is the epoch in which the underlying bond became active. - unbond_records: + total_unbonded: BTreeMap>>, + /// The outer key is the epoch in which redelegation became active + /// (pipeline offset). The next key is the address of the delegator. + delegator_redelegated_bonded: AbstractDelegatorRedelegatedBonded, + delegator_redelegated_unbonded: AbstractDelegatorRedelegatedUnbonded, + validator_total_redelegated_bonded: AbstractValidatorTotalRedelegatedBonded, + validator_total_redelegated_unbonded: + AbstractValidatorTotalRedelegatedUnbonded, + incoming_redelegations: AbstractIncomingRedelegations, + outgoing_redelegations: AbstractOutgoingRedelegations, } /// The PoS system under test @@ -106,6 +165,7 @@ enum Transition { InitValidator { address: Address, consensus_key: PublicKey, + protocol_key: PublicKey, eth_cold_key: PublicKey, eth_hot_key: PublicKey, commission_rate: Dec, @@ -122,6 +182,13 @@ enum Transition { Withdraw { id: BondId, }, + Redelegate { + /// A chained redelegation must fail + is_chained: bool, + id: BondId, + new_validator: Address, + amount: token::Amount, + }, Misbehavior { address: Address, slash_type: SlashType, @@ -140,9 +207,8 @@ impl StateMachineTest for ConcretePosState { fn init_test( initial_state: &::State, ) -> Self::SystemUnderTest { - println!(); - println!("New test case"); - println!( + tracing::debug!("New test case"); + tracing::debug!( "Genesis validators: {:#?}", initial_state .genesis_validators @@ -151,9 +217,9 @@ impl StateMachineTest for ConcretePosState { .collect::>() ); let mut s = TestWlStorage::default(); - crate::init_genesis( + crate::test_init_genesis( &mut s, - &initial_state.params, + initial_state.params.owned.clone(), initial_state.genesis_validators.clone().into_iter(), initial_state.epoch, ) @@ -163,7 +229,7 @@ impl StateMachineTest for ConcretePosState { fn apply( mut state: Self::SystemUnderTest, - _ref_state: &::State, + ref_state: &::State, transition: ::Transition, ) -> Self::SystemUnderTest { let params = crate::read_pos_params(&state.s).unwrap(); @@ -173,10 +239,10 @@ impl StateMachineTest for ConcretePosState { &crate::ADDRESS, ) .unwrap(); - println!("PoS balance: {}", pos_balance.to_string_native()); + tracing::debug!("PoS balance: {}", pos_balance.to_string_native()); match transition { Transition::NextEpoch => { - println!("\nCONCRETE Next epoch"); + tracing::debug!("\nCONCRETE Next epoch"); super::advance_epoch(&mut state.s, ¶ms); // Need to apply some slashing @@ -189,12 +255,13 @@ impl StateMachineTest for ConcretePosState { Transition::InitValidator { address, consensus_key, + protocol_key, eth_cold_key, eth_hot_key, commission_rate, max_commission_rate_change, } => { - println!("\nCONCRETE Init validator"); + tracing::debug!("\nCONCRETE Init validator"); let current_epoch = state.current_epoch(); super::become_validator(super::BecomeValidator { @@ -202,6 +269,7 @@ impl StateMachineTest for ConcretePosState { params: ¶ms, address: &address, consensus_key: &consensus_key, + protocol_key: &protocol_key, eth_cold_key: ð_cold_key, eth_hot_key: ð_hot_key, current_epoch, @@ -218,7 +286,7 @@ impl StateMachineTest for ConcretePosState { ) } Transition::Bond { id, amount } => { - println!("\nCONCRETE Bond"); + tracing::debug!("\nCONCRETE Bond"); let current_epoch = state.current_epoch(); let pipeline = current_epoch + params.pipeline_len; let validator_stake_before_bond_cur = @@ -228,8 +296,7 @@ impl StateMachineTest for ConcretePosState { &id.validator, current_epoch, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); let validator_stake_before_bond_pipeline = crate::read_validator_stake( &state.s, @@ -237,8 +304,7 @@ impl StateMachineTest for ConcretePosState { &id.validator, pipeline, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); // Credit tokens to ensure we can apply the bond let native_token = state.s.get_native_token().unwrap(); @@ -299,9 +365,11 @@ impl StateMachineTest for ConcretePosState { pos_balance_post - pos_balance_pre, src_balance_pre - src_balance_post ); + + state.check_multistate_bond_post_conditions(ref_state, &id); } Transition::Unbond { id, amount } => { - println!("\nCONCRETE Unbond"); + tracing::debug!("\nCONCRETE Unbond"); let current_epoch = state.current_epoch(); let pipeline = current_epoch + params.pipeline_len; let native_token = state.s.get_native_token().unwrap(); @@ -319,8 +387,7 @@ impl StateMachineTest for ConcretePosState { &id.validator, current_epoch, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); let validator_stake_before_unbond_pipeline = crate::read_validator_stake( &state.s, @@ -328,8 +395,7 @@ impl StateMachineTest for ConcretePosState { &id.validator, pipeline, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); // Apply the unbond super::unbond_tokens( @@ -338,6 +404,7 @@ impl StateMachineTest for ConcretePosState { &id.validator, amount, current_epoch, + false, ) .unwrap(); @@ -361,11 +428,13 @@ impl StateMachineTest for ConcretePosState { assert_eq!(pos_balance_pre, pos_balance_post); // Post-condition: Source balance should not change assert_eq!(src_balance_post, src_balance_pre); + + state.check_multistate_unbond_post_conditions(ref_state, &id); } Transition::Withdraw { id: BondId { source, validator }, } => { - println!("\nCONCRETE Withdraw"); + tracing::debug!("\nCONCRETE Withdraw"); let current_epoch = state.current_epoch(); let native_token = state.s.get_native_token().unwrap(); let pos = address::POS; @@ -411,6 +480,218 @@ impl StateMachineTest for ConcretePosState { // Post-condition: The increment in source balance should be // equal to the withdrawn amount assert_eq!(src_balance_post - src_balance_pre, withdrawn); + + state.check_multistate_withdraw_post_conditions( + ref_state, + &BondId { source, validator }, + ); + } + Transition::Redelegate { + is_chained, + id, + new_validator, + amount, + } => { + tracing::debug!("\nCONCRETE Redelegate"); + + let current_epoch = state.current_epoch(); + let pipeline = current_epoch + params.pipeline_len; + + // Read data prior to applying the transition + let native_token = state.s.get_native_token().unwrap(); + let pos = address::POS; + let pos_balance_pre = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + let slash_pool = address::POS_SLASH_POOL; + let slash_balance_pre = + token::read_balance(&state.s, &native_token, &slash_pool) + .unwrap(); + + // Read src validator stakes + let src_validator_stake_cur_pre = crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + current_epoch, + ) + .unwrap(); + let _src_validator_stake_pipeline_pre = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + pipeline, + ) + .unwrap(); + + // Read dest validator stakes + let dest_validator_stake_cur_pre = crate::read_validator_stake( + &state.s, + ¶ms, + &new_validator, + current_epoch, + ) + .unwrap(); + let _dest_validator_stake_pipeline_pre = + crate::read_validator_stake( + &state.s, + ¶ms, + &new_validator, + pipeline, + ) + .unwrap(); + + // Find delegations + let delegations_pre = + crate::find_delegations(&state.s, &id.source, &pipeline) + .unwrap(); + + // Apply redelegation + let result = redelegate_tokens( + &mut state.s, + &id.source, + &id.validator, + &new_validator, + current_epoch, + amount, + ); + + state.check_multistate_redelegation_post_conditions( + ref_state, + &id.source, + &id.validator, + &new_validator, + ); + + if is_chained && !amount.is_zero() { + assert!(result.is_err()); + let err = result.unwrap_err(); + let err_str = err.to_string(); + assert_matches!( + err.downcast::().unwrap().deref(), + RedelegationError::IsChainedRedelegation, + "A chained redelegation must be rejected, got \ + {err_str}", + ); + } else { + result.unwrap(); + + // Post-condition: PoS balance is unchanged + let pos_balance_post = + token::read_balance(&state.s, &native_token, &pos) + .unwrap(); + assert_eq!(pos_balance_pre, pos_balance_post); + + // Find slash pool balance difference + let slash_balance_post = token::read_balance( + &state.s, + &native_token, + &slash_pool, + ) + .unwrap(); + let slashed = slash_balance_post - slash_balance_pre; + + // Post-condition: Source validator stake at current epoch + // is unchanged + let src_validator_stake_cur_post = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + current_epoch, + ) + .unwrap(); + assert_eq!( + src_validator_stake_cur_pre, + src_validator_stake_cur_post + ); + + // Post-condition: Source validator stake at pipeline epoch + // is reduced by the redelegation amount + + // TODO: shouldn't this be reduced by the redelegation + // amount post-slashing tho? + // NOTE: We changed it to reduce it, check again later + let _amount_after_slash = amount - slashed; + let _src_validator_stake_pipeline_post = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + pipeline, + ) + .unwrap(); + // assert_eq!( + // src_validator_stake_pipeline_pre - + // amount_after_slash, + // src_validator_stake_pipeline_post + // ); + + // Post-condition: Destination validator stake at current + // epoch is unchanged + let dest_validator_stake_cur_post = + crate::read_validator_stake( + &state.s, + ¶ms, + &new_validator, + current_epoch, + ) + .unwrap(); + assert_eq!( + dest_validator_stake_cur_pre, + dest_validator_stake_cur_post + ); + + // Post-condition: Destination validator stake at pipeline + // epoch is increased by the redelegation amount, less any + // slashes + let _dest_validator_stake_pipeline_post = + crate::read_validator_stake( + &state.s, + ¶ms, + &new_validator, + pipeline, + ) + .unwrap(); + // assert_eq!( + // dest_validator_stake_pipeline_pre + + // amount_after_slash, + // dest_validator_stake_pipeline_post + // ); + + // Post-condition: The delegator's delegations should be + // updated with redelegation. For the source reduced by the + // redelegation amount and for the destination increased by + // the redelegation amount, less any slashes. + let delegations_post = crate::find_delegations( + &state.s, &id.source, &pipeline, + ) + .unwrap(); + let src_delegation_pre = delegations_pre + .get(&id.validator) + .cloned() + .unwrap_or_default(); + let src_delegation_post = delegations_post + .get(&id.validator) + .cloned() + .unwrap_or_default(); + assert_eq!( + src_delegation_pre - src_delegation_post, + amount + ); + let _dest_delegation_pre = delegations_pre + .get(&new_validator) + .cloned() + .unwrap_or_default(); + let _dest_delegation_post = delegations_post + .get(&new_validator) + .cloned() + .unwrap_or_default(); + // assert_eq!( + // dest_delegation_post - dest_delegation_pre, + // amount_after_slash + // ); + } } Transition::Misbehavior { address, @@ -418,7 +699,7 @@ impl StateMachineTest for ConcretePosState { infraction_epoch, height, } => { - println!("\nCONCRETE Misbehavior"); + tracing::debug!("\nCONCRETE Misbehavior"); let current_epoch = state.current_epoch(); // Record the slash evidence super::slash( @@ -443,10 +724,10 @@ impl StateMachineTest for ConcretePosState { &address, ); - // TODO: Any others? + state.check_multistate_misbehavior_post_conditions(ref_state); } Transition::UnjailValidator { address } => { - println!("\nCONCRETE UnjailValidator"); + tracing::debug!("\nCONCRETE UnjailValidator"); let current_epoch = state.current_epoch(); // Unjail the validator @@ -566,8 +847,7 @@ impl ConcretePosState { &id.validator, submit_epoch, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); // Post-condition: the validator stake at the current epoch should not // change @@ -579,8 +859,7 @@ impl ConcretePosState { &id.validator, pipeline, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); // Post-condition: the validator stake at the pipeline should be // incremented by the bond amount @@ -597,6 +876,29 @@ impl ConcretePosState { ); } + fn check_multistate_bond_post_conditions( + &self, + ref_state: &AbstractPosState, + id: &BondId, + ) { + // Check that the bonds are the same + let abs_bonds = ref_state.bonds.get(id).cloned().unwrap(); + let conc_bonds = crate::bond_handle(&id.source, &id.validator) + .get_data_handler() + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_bonds, conc_bonds); + + // Check that the total bonded is the same + let abs_tot_bonded = + ref_state.total_bonded.get(&id.validator).cloned().unwrap(); + let conc_tot_bonded = crate::total_bonded_handle(&id.validator) + .get_data_handler() + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_tot_bonded, conc_tot_bonded); + } + fn check_unbond_post_conditions( &self, submit_epoch: Epoch, @@ -614,8 +916,7 @@ impl ConcretePosState { &id.validator, submit_epoch, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); // Post-condition: the validator stake at the current epoch should not // change @@ -627,8 +928,7 @@ impl ConcretePosState { &id.validator, pipeline, ) - .unwrap() - .unwrap_or_default(); + .unwrap(); // Post-condition: the validator stake at the pipeline should be // decremented at most by the bond amount (because slashing can reduce @@ -651,6 +951,172 @@ impl ConcretePosState { ); } + fn check_multistate_unbond_post_conditions( + &self, + ref_state: &AbstractPosState, + id: &BondId, + ) { + // Check that the bonds are the same + let abs_bonds = ref_state.bonds.get(id).cloned().unwrap(); + let conc_bonds = crate::bond_handle(&id.source, &id.validator) + .get_data_handler() + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_bonds, conc_bonds); + + // Check that the total bonded is the same + let abs_tot_bonded = + ref_state.total_bonded.get(&id.validator).cloned().unwrap(); + let conc_tot_bonded = crate::total_bonded_handle(&id.validator) + .get_data_handler() + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_tot_bonded, conc_tot_bonded); + + // Check that the unbonds are the same + let mut abs_unbonds: BTreeMap> = + BTreeMap::new(); + ref_state.unbonds.iter().for_each( + |((start_epoch, withdraw_epoch), inner)| { + let amount = inner.get(id).cloned().unwrap_or_default(); + if !amount.is_zero() { + abs_unbonds + .entry(*start_epoch) + .or_default() + .insert(*withdraw_epoch, amount); + } + }, + ); + let conc_unbonds = crate::unbond_handle(&id.source, &id.validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_unbonds, conc_unbonds); + + // Check that the total_unbonded are the same + // TODO: figure out how we get entries with 0 amount in the + // abstract version (and prevent) + let mut abs_total_unbonded = ref_state + .total_unbonded + .get(&id.validator) + .cloned() + .unwrap_or_default(); + abs_total_unbonded.retain(|_, inner_map| { + inner_map.retain(|_, value| !value.is_zero()); + !inner_map.is_empty() + }); + let conc_total_unbonded = crate::total_unbonded_handle(&id.validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_total_unbonded, conc_total_unbonded); + + // Check that the delegator redelegated bonds are the same + let abs_del_redel_bonds = ref_state + .delegator_redelegated_bonded + .get(&id.source) + .cloned() + .unwrap_or_default() + .get(&id.validator) + .cloned() + .unwrap_or_default(); + let conc_del_redel_bonds = + crate::delegator_redelegated_bonds_handle(&id.source) + .at(&id.validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_del_redel_bonds, conc_del_redel_bonds); + + // Check that the delegator redelegated unbonds are the same + #[allow(clippy::type_complexity)] + let mut abs_del_redel_unbonds: BTreeMap< + Epoch, + BTreeMap>>, + > = BTreeMap::new(); + ref_state + .delegator_redelegated_unbonded + .get(&id.source) + .cloned() + .unwrap_or_default() + .get(&id.validator) + .cloned() + .unwrap_or_default() + .iter() + .for_each(|((redel_end_epoch, withdraw_epoch), inner)| { + let abs_map = abs_del_redel_unbonds + .entry(*redel_end_epoch) + .or_default() + .entry(*withdraw_epoch) + .or_default(); + for (src, bonds) in inner { + for (start, amount) in bonds { + abs_map + .entry(src.clone()) + .or_default() + .insert(*start, *amount); + } + } + }); + let conc_del_redel_unbonds = + crate::delegator_redelegated_unbonds_handle(&id.source) + .at(&id.validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_del_redel_unbonds, conc_del_redel_unbonds); + + // Check the validator total redelegated bonded + let abs_total_redel_bonded = ref_state + .validator_total_redelegated_bonded + .get(&id.validator) + .cloned() + .unwrap_or_default(); + let mut conc_total_redel_bonded: BTreeMap< + Epoch, + BTreeMap>, + > = BTreeMap::new(); + crate::validator_total_redelegated_bonded_handle(&id.validator) + .iter(&self.s) + .unwrap() + .for_each(|res| { + let ( + NestedSubKey::Data { + key: redel_end_epoch, + nested_sub_key: + NestedSubKey::Data { + key: src_val, + nested_sub_key: SubKey::Data(bond_start), + }, + }, + amount, + ) = res.unwrap(); + conc_total_redel_bonded + .entry(redel_end_epoch) + .or_default() + .entry(src_val) + .or_default() + .insert(bond_start, amount); + }); + assert_eq!(abs_total_redel_bonded, conc_total_redel_bonded); + + // Check the validator total redelegated unbonded + let mut abs_total_redel_unbonded = ref_state + .validator_total_redelegated_unbonded + .get(&id.validator) + .cloned() + .unwrap_or_default(); + abs_total_redel_unbonded.retain(|_, inner1| { + inner1.retain(|_, inner2| { + inner2.retain(|_, inner3| !inner3.is_empty()); + !inner2.is_empty() + }); + !inner1.is_empty() + }); + + let conc_total_redel_unbonded = + crate::validator_total_redelegated_unbonded_handle(&id.validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_total_redel_unbonded, conc_total_redel_unbonded); + } + /// These post-conditions apply to bonding and unbonding fn check_bond_and_unbond_post_conditions( &self, @@ -760,6 +1226,68 @@ impl ConcretePosState { } } + fn check_multistate_withdraw_post_conditions( + &self, + ref_state: &AbstractPosState, + id: &BondId, + ) { + // Check that the unbonds are the same + let mut abs_unbonds: BTreeMap> = + BTreeMap::new(); + ref_state.unbonds.iter().for_each( + |((start_epoch, withdraw_epoch), inner)| { + let amount = inner.get(id).cloned().unwrap_or_default(); + if !amount.is_zero() { + abs_unbonds + .entry(*start_epoch) + .or_default() + .insert(*withdraw_epoch, amount); + } + }, + ); + let conc_unbonds = crate::unbond_handle(&id.source, &id.validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_unbonds, conc_unbonds); + + // Check the delegator redelegated unbonds + #[allow(clippy::type_complexity)] + let mut abs_del_redel_unbonds: BTreeMap< + Epoch, + BTreeMap>>, + > = BTreeMap::new(); + ref_state + .delegator_redelegated_unbonded + .get(&id.source) + .cloned() + .unwrap_or_default() + .get(&id.validator) + .cloned() + .unwrap_or_default() + .iter() + .for_each(|((redel_end_epoch, withdraw_epoch), inner)| { + let abs_map = abs_del_redel_unbonds + .entry(*redel_end_epoch) + .or_default() + .entry(*withdraw_epoch) + .or_default(); + for (src, bonds) in inner { + for (start, amount) in bonds { + abs_map + .entry(src.clone()) + .or_default() + .insert(*start, *amount); + } + } + }); + let conc_del_redel_unbonds = + crate::delegator_redelegated_unbonds_handle(&id.source) + .at(&id.validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_del_redel_unbonds, conc_del_redel_unbonds); + } + fn check_init_validator_post_conditions( &self, submit_epoch: Epoch, @@ -823,7 +1351,7 @@ impl ConcretePosState { slash_type: SlashType, validator: &Address, ) { - println!( + tracing::debug!( "\nChecking misbehavior post conditions for validator: \n{}", validator ); @@ -831,13 +1359,6 @@ impl ConcretePosState { // Validator state jailed and validator removed from the consensus set // starting at the next epoch for offset in 1..=params.pipeline_len { - // dbg!( - // crate::read_consensus_validator_set_addresses_with_stake( - // &self.s, - // current_epoch + offset - // ) - // .unwrap() - // ); assert_eq!( validator_state_handle(validator) .get(&self.s, current_epoch + offset, params) @@ -850,7 +1371,6 @@ impl ConcretePosState { .unwrap() .any(|res| { let (_, val_address) = res.unwrap(); - // dbg!(&val_address); val_address == validator.clone() }); assert!(!in_consensus); @@ -877,6 +1397,40 @@ impl ConcretePosState { // TODO: Any others? } + fn check_multistate_misbehavior_post_conditions( + &self, + ref_state: &AbstractPosState, + ) { + // Check the enqueued slashes + let abs_enqueued = ref_state.enqueued_slashes.clone(); + let mut conc_enqueued: BTreeMap>> = + BTreeMap::new(); + crate::enqueued_slashes_handle() + .get_data_handler() + .iter(&self.s) + .unwrap() + .for_each(|res| { + let ( + NestedSubKey::Data { + key: epoch, + nested_sub_key: + NestedSubKey::Data { + key: address, + nested_sub_key: _, + }, + }, + slash, + ) = res.unwrap(); + let slashes = conc_enqueued + .entry(epoch) + .or_default() + .entry(address) + .or_default(); + slashes.push(slash); + }); + assert_eq!(abs_enqueued, conc_enqueued); + } + fn check_unjail_validator_post_conditions( &self, params: &PosParams, @@ -950,58 +1504,299 @@ impl ConcretePosState { ); } - fn check_global_post_conditions( + fn check_multistate_redelegation_post_conditions( &self, - params: &PosParams, - current_epoch: Epoch, ref_state: &AbstractPosState, + delegator: &Address, + src_validator: &Address, + dest_validator: &Address, ) { - // Ensure that every validator in each set has the proper state - for epoch in Epoch::iter_bounds_inclusive( - current_epoch, - current_epoch + params.pipeline_len, - ) { - tracing::debug!("Epoch {epoch}"); - let mut vals = HashSet::
::new(); - for WeightedValidator { - bonded_stake, - address: validator, - } in crate::read_consensus_validator_set_addresses_with_stake( - &self.s, epoch, - ) - .unwrap() - { - let deltas_stake = validator_deltas_handle(&validator) - .get_sum(&self.s, epoch, params) - .unwrap() - .unwrap_or_default(); - tracing::debug!( - "Consensus val {}, stake: {} ({})", - &validator, - bonded_stake.to_string_native(), - deltas_stake.to_string_native(), - ); - assert!(!deltas_stake.is_negative()); - assert_eq!( - bonded_stake, - token::Amount::from_change(deltas_stake) - ); - assert_eq!( - bonded_stake.change(), - ref_state - .validator_stakes - .get(&epoch) - .unwrap() - .get(&validator) - .cloned() - .unwrap() - ); + let src_id = BondId { + source: delegator.clone(), + validator: src_validator.clone(), + }; + let dest_id = BondId { + source: delegator.clone(), + validator: dest_validator.clone(), + }; - let state = crate::validator_state_handle(&validator) - .get(&self.s, epoch, params) - .unwrap(); + // Check the src bonds + let abs_src_bonds = + ref_state.bonds.get(&src_id).cloned().unwrap_or_default(); + let conc_src_bonds = crate::bond_handle(delegator, src_validator) + .get_data_handler() + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_src_bonds, conc_src_bonds); + + // Check the dest bonds + let abs_dest_bonds = + ref_state.bonds.get(&dest_id).cloned().unwrap_or_default(); + let conc_dest_bonds = crate::bond_handle(delegator, dest_validator) + .get_data_handler() + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_dest_bonds, conc_dest_bonds); - assert_eq!(state, Some(ValidatorState::Consensus)); + // Check the src total bonded + let abs_src_tot_bonded = ref_state + .total_bonded + .get(src_validator) + .cloned() + .unwrap_or_default(); + let conc_src_tot_bonded = crate::total_bonded_handle(src_validator) + .get_data_handler() + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_src_tot_bonded, conc_src_tot_bonded); + + // Check the dest total bonded + let abs_dest_tot_bonded = ref_state + .total_bonded + .get(dest_validator) + .cloned() + .unwrap_or_default(); + let conc_dest_tot_bonded = crate::total_bonded_handle(dest_validator) + .get_data_handler() + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_dest_tot_bonded, conc_dest_tot_bonded); + + // NOTE: Unbonds are not updated by redelegation + + // Check the src total_unbonded + let mut abs_src_total_unbonded = ref_state + .total_unbonded + .get(src_validator) + .cloned() + .unwrap_or_default(); + abs_src_total_unbonded.retain(|_, inner_map| { + inner_map.retain(|_, value| !value.is_zero()); + !inner_map.is_empty() + }); + let conc_src_total_unbonded = + crate::total_unbonded_handle(src_validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_src_total_unbonded, conc_src_total_unbonded); + + // Check the delegator redelegated bonds to the src + let abs_del_redel_bonds_src = ref_state + .delegator_redelegated_bonded + .get(delegator) + .cloned() + .unwrap_or_default() + .get(src_validator) + .cloned() + .unwrap_or_default(); + let conc_del_redel_bonds_src = + crate::delegator_redelegated_bonds_handle(delegator) + .at(src_validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_del_redel_bonds_src, conc_del_redel_bonds_src); + + // Check the delegator redelegated bonds to the dest + let abs_del_redel_bonds_dest = ref_state + .delegator_redelegated_bonded + .get(delegator) + .cloned() + .unwrap_or_default() + .get(dest_validator) + .cloned() + .unwrap_or_default(); + let conc_del_redel_bonds_dest = + crate::delegator_redelegated_bonds_handle(delegator) + .at(dest_validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_del_redel_bonds_dest, conc_del_redel_bonds_dest); + + // NOTE: Delegator redelegated unbonds are not updated by redelegation + + // Check the src total redelegated bonded + let abs_src_total_redel_bonded = ref_state + .validator_total_redelegated_bonded + .get(src_validator) + .cloned() + .unwrap_or_default(); + let mut conc_src_total_redel_bonded: BTreeMap< + Epoch, + BTreeMap>, + > = BTreeMap::new(); + crate::validator_total_redelegated_bonded_handle(src_validator) + .iter(&self.s) + .unwrap() + .for_each(|res| { + let ( + NestedSubKey::Data { + key: redel_end_epoch, + nested_sub_key: + NestedSubKey::Data { + key: src_val, + nested_sub_key: SubKey::Data(bond_start), + }, + }, + amount, + ) = res.unwrap(); + conc_src_total_redel_bonded + .entry(redel_end_epoch) + .or_default() + .entry(src_val) + .or_default() + .insert(bond_start, amount); + }); + assert_eq!(abs_src_total_redel_bonded, conc_src_total_redel_bonded); + + // Check the dest total redelegated bonded + let abs_dest_total_redel_bonded = ref_state + .validator_total_redelegated_bonded + .get(dest_validator) + .cloned() + .unwrap_or_default(); + let mut conc_dest_total_redel_bonded: BTreeMap< + Epoch, + BTreeMap>, + > = BTreeMap::new(); + crate::validator_total_redelegated_bonded_handle(dest_validator) + .iter(&self.s) + .unwrap() + .for_each(|res| { + let ( + NestedSubKey::Data { + key: redel_end_epoch, + nested_sub_key: + NestedSubKey::Data { + key: src_val, + nested_sub_key: SubKey::Data(bond_start), + }, + }, + amount, + ) = res.unwrap(); + conc_dest_total_redel_bonded + .entry(redel_end_epoch) + .or_default() + .entry(src_val) + .or_default() + .insert(bond_start, amount); + }); + assert_eq!(abs_dest_total_redel_bonded, conc_dest_total_redel_bonded); + + // Check the src validator's total redelegated unbonded + let mut abs_src_total_redel_unbonded = ref_state + .validator_total_redelegated_unbonded + .get(src_validator) + .cloned() + .unwrap_or_default(); + abs_src_total_redel_unbonded.retain(|_, inner1| { + inner1.retain(|_, inner2| { + inner2.retain(|_, inner3| !inner3.is_empty()); + !inner2.is_empty() + }); + !inner1.is_empty() + }); + + let conc_src_total_redel_unbonded = + crate::validator_total_redelegated_unbonded_handle(src_validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_src_total_redel_unbonded, conc_src_total_redel_unbonded); + + // Check the src validator's outgoing redelegations + let mut abs_src_outgoing: BTreeMap< + Address, + BTreeMap>, + > = BTreeMap::new(); + ref_state + .outgoing_redelegations + .get(src_validator) + .cloned() + .unwrap_or_default() + .iter() + .for_each(|(address, amounts)| { + for ((bond_start, redel_start), amount) in amounts { + abs_src_outgoing + .entry(address.clone()) + .or_default() + .entry(*bond_start) + .or_default() + .insert(*redel_start, *amount); + } + }); + let conc_src_outgoing = + crate::validator_outgoing_redelegations_handle(src_validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_src_outgoing, conc_src_outgoing); + + // Check the dest validator's incoming redelegations + let abs_dest_incoming = ref_state + .incoming_redelegations + .get(dest_validator) + .cloned() + .unwrap_or_default(); + let conc_dest_incoming = + crate::validator_incoming_redelegations_handle(dest_validator) + .collect_map(&self.s) + .unwrap(); + assert_eq!(abs_dest_incoming, conc_dest_incoming); + } + + fn check_global_post_conditions( + &self, + params: &PosParams, + current_epoch: Epoch, + ref_state: &AbstractPosState, + ) { + for epoch in Epoch::iter_bounds_inclusive( + current_epoch, + current_epoch + params.pipeline_len, + ) { + tracing::debug!("Epoch {epoch}"); + let mut vals = HashSet::
::new(); + + // Consensus validators + for WeightedValidator { + bonded_stake, + address: validator, + } in crate::read_consensus_validator_set_addresses_with_stake( + &self.s, epoch, + ) + .unwrap() + { + let deltas_stake = validator_deltas_handle(&validator) + .get_sum(&self.s, epoch, params) + .unwrap() + .unwrap_or_default(); + tracing::debug!( + "Consensus val {}, stake: {} ({})", + &validator, + bonded_stake.to_string_native(), + deltas_stake.to_string_native(), + ); + assert!(!deltas_stake.is_negative()); + + // Checks on stake + assert_eq!( + bonded_stake, + token::Amount::from_change(deltas_stake) + ); + assert_eq!( + bonded_stake, + ref_state + .validator_stakes + .get(&epoch) + .unwrap() + .get(&validator) + .cloned() + .unwrap() + ); + + // Checks on validator state + let state = crate::validator_state_handle(&validator) + .get(&self.s, epoch, params) + .unwrap(); + assert_eq!(state, Some(ValidatorState::Consensus)); assert_eq!( state.unwrap(), ref_state @@ -1012,9 +1807,12 @@ impl ConcretePosState { .cloned() .unwrap() ); + assert!(!vals.contains(&validator)); vals.insert(validator); } + + // Below-capacity validators for WeightedValidator { bonded_stake, address: validator, @@ -1039,7 +1837,7 @@ impl ConcretePosState { token::Amount::from_change(deltas_stake) ); assert_eq!( - bonded_stake.change(), + bonded_stake, ref_state .validator_stakes .get(&epoch) @@ -1052,23 +1850,7 @@ impl ConcretePosState { let state = crate::validator_state_handle(&validator) .get(&self.s, epoch, params) .unwrap(); - if state.is_none() { - dbg!( - crate::validator_state_handle(&validator) - .get(&self.s, current_epoch, params) - .unwrap() - ); - dbg!( - crate::validator_state_handle(&validator) - .get(&self.s, current_epoch.next(), params) - .unwrap() - ); - dbg!( - crate::validator_state_handle(&validator) - .get(&self.s, current_epoch.next(), params) - .unwrap() - ); - } + assert_eq!(state, Some(ValidatorState::BelowCapacity)); assert_eq!( state.unwrap(), @@ -1080,6 +1862,7 @@ impl ConcretePosState { .cloned() .unwrap() ); + assert!(!vals.contains(&validator)); vals.insert(validator); } @@ -1090,10 +1873,10 @@ impl ConcretePosState { ) .unwrap() { - let stake = validator_deltas_handle(&validator) - .get_sum(&self.s, epoch, params) - .unwrap() - .unwrap_or_default(); + let stake = crate::read_validator_stake( + &self.s, params, &validator, epoch, + ) + .unwrap(); tracing::debug!( "Below-thresh val {}, stake {}", &validator, @@ -1126,6 +1909,7 @@ impl ConcretePosState { .cloned() .unwrap() ); + assert!(!vals.contains(&validator)); vals.insert(validator); } @@ -1134,8 +1918,8 @@ impl ConcretePosState { let all_validators = crate::read_all_validator_addresses(&self.s, epoch).unwrap(); - for val in all_validators { - let state = validator_state_handle(&val) + for validator in all_validators { + let state = validator_state_handle(&validator) .get(&self.s, epoch, params) .unwrap() .unwrap(); @@ -1147,17 +1931,17 @@ impl ConcretePosState { .validator_states .get(&epoch) .unwrap() - .get(&val) + .get(&validator) .cloned() .unwrap() ); - let stake = validator_deltas_handle(&val) - .get_sum(&self.s, epoch, params) - .unwrap() - .unwrap_or_default(); + let stake = crate::read_validator_stake( + &self.s, params, &validator, epoch, + ) + .unwrap(); tracing::debug!( "Jailed val {}, stake {}", - &val, + &validator, stake.to_string_native() ); @@ -1167,7 +1951,7 @@ impl ConcretePosState { .validator_states .get(&epoch) .unwrap() - .get(&val) + .get(&validator) .cloned() .unwrap() ); @@ -1177,11 +1961,12 @@ impl ConcretePosState { .validator_stakes .get(&epoch) .unwrap() - .get(&val) + .get(&validator) .cloned() .unwrap() ); - assert!(!vals.contains(&val)); + + assert!(!vals.contains(&validator)); } } } @@ -1194,9 +1979,10 @@ impl ReferenceStateMachine for AbstractPosState { type Transition = Transition; fn init_state() -> BoxedStrategy { - println!("\nInitializing abstract state machine"); + tracing::debug!("\nInitializing abstract state machine"); arb_params_and_genesis_validators(Some(8), 8..10) .prop_map(|(params, genesis_validators)| { + let params = params.with_default_gov_params(); let epoch = Epoch::default(); let mut state = Self { epoch, @@ -1218,13 +2004,20 @@ impl ReferenceStateMachine for AbstractPosState { validator_slashes: Default::default(), enqueued_slashes: Default::default(), validator_last_slash_epochs: Default::default(), - unbond_records: Default::default(), + total_unbonded: Default::default(), + delegator_redelegated_bonded: Default::default(), + delegator_redelegated_unbonded: Default::default(), + validator_total_redelegated_bonded: Default::default(), + validator_total_redelegated_unbonded: Default::default(), + incoming_redelegations: Default::default(), + outgoing_redelegations: Default::default(), }; for GenesisValidator { address, tokens, consensus_key: _, + protocol_key: _, eth_cold_key: _, eth_hot_key: _, commission_rate: _, @@ -1238,12 +2031,15 @@ impl ReferenceStateMachine for AbstractPosState { validator: address.clone(), }) .or_default(); - bonds.insert(epoch, token::Change::from(tokens)); + bonds.insert(epoch, tokens); + + let total_bonded = + state.total_bonded.entry(address.clone()).or_default(); + total_bonded.insert(epoch, tokens); let total_stakes = state.validator_stakes.entry(epoch).or_default(); - total_stakes - .insert(address.clone(), token::Change::from(tokens)); + total_stakes.insert(address.clone(), tokens); let consensus_set = state.consensus_set.entry(epoch).or_default(); @@ -1302,7 +2098,6 @@ impl ReferenceStateMachine for AbstractPosState { { state.copy_discrete_epoched_data(epoch) } - // dbg!(&state); state }) .boxed() @@ -1312,6 +2107,24 @@ impl ReferenceStateMachine for AbstractPosState { fn transitions(state: &Self::State) -> BoxedStrategy { // Let preconditions filter out what unbonds are not allowed let unbondable = state.bond_sums().into_iter().collect::>(); + let redelegatable = unbondable + .iter() + // Self-bonds cannot be redelegated + .filter(|(id, _)| id.source != id.validator) + .cloned() + .collect::>(); + + for (id, amt) in &redelegatable { + if *amt <= 0.into() { + tracing::debug!( + "Source: {}\nValidator: {}\nAmount: {}", + &id.source, + &id.validator, + amt.to_string_native() + ); + panic!("Should have no bonds with 0 amount or less!"); + } + } let withdrawable = state.withdrawable_unbonds().into_iter().collect::>(); @@ -1348,6 +2161,7 @@ impl ReferenceStateMachine for AbstractPosState { 1 => ( address::testing::arb_established_address(), key::testing::arb_common_keypair(), + key::testing::arb_common_keypair(), key::testing::arb_common_secp256k1_keypair(), key::testing::arb_common_secp256k1_keypair(), arb_rate(), @@ -1357,6 +2171,7 @@ impl ReferenceStateMachine for AbstractPosState { |( addr, consensus_key, + protocol_key, eth_hot_key, eth_cold_key, commission_rate, @@ -1365,6 +2180,7 @@ impl ReferenceStateMachine for AbstractPosState { Transition::InitValidator { address: Address::Established(addr), consensus_key: consensus_key.to_public(), + protocol_key: protocol_key.to_public(), eth_hot_key: eth_hot_key.to_public(), eth_cold_key: eth_cold_key.to_public(), commission_rate, @@ -1394,10 +2210,14 @@ impl ReferenceStateMachine for AbstractPosState { } else { let arb_unbondable = prop::sample::select(unbondable); let arb_unbond = - arb_unbondable.prop_flat_map(|(id, deltas_sum)| { - let deltas_sum = i128::try_from(deltas_sum).unwrap(); + arb_unbondable.prop_flat_map(move |(id, deltas_sum)| { + let deltas_sum = + i128::try_from(deltas_sum.change()).unwrap(); // Generate an amount to unbond, up to the sum - assert!(deltas_sum > 0); + assert!( + deltas_sum > 0, + "Bond {id} deltas_sum must be non-zero" + ); (0..deltas_sum).prop_map(move |to_unbond| { let id = id.clone(); let amount = @@ -1409,7 +2229,7 @@ impl ReferenceStateMachine for AbstractPosState { }; // Add withdrawals, if any - if withdrawable.is_empty() { + let transitions = if withdrawable.is_empty() { transitions } else { let arb_withdrawable = prop::sample::select(withdrawable); @@ -1417,6 +2237,63 @@ impl ReferenceStateMachine for AbstractPosState { .prop_map(|(id, _)| Transition::Withdraw { id }); prop_oneof![transitions, arb_withdrawal].boxed() + }; + + // Add redelegations, if any + if redelegatable.is_empty() { + transitions + } else { + let arb_redelegatable = prop::sample::select(redelegatable); + let validators = state + .validator_states + .get(&state.pipeline()) + .unwrap() + .keys() + .cloned() + .collect::>(); + let epoch = state.epoch; + let params = state.params.clone(); + let incoming_redelegations = state.incoming_redelegations.clone(); + let arb_redelegation = + arb_redelegatable.prop_flat_map(move |(id, deltas_sum)| { + let deltas_sum = + i128::try_from(deltas_sum.change()).unwrap(); + // Generate an amount to redelegate, up to the sum + assert!( + deltas_sum > 0, + "Bond {id} deltas_sum must be non-zero" + ); + let arb_amount = (0..deltas_sum).prop_map(|to_unbond| { + token::Amount::from_change(Change::from(to_unbond)) + }); + // Generate a new validator for redelegation + let current_validator = id.validator.clone(); + let new_validators = validators + .iter() + // The validator must be other than the current + .filter(|validator| *validator != ¤t_validator) + .cloned() + .collect::>(); + let arb_new_validator = + prop::sample::select(new_validators); + let params = params.clone(); + let incoming_redelegations = incoming_redelegations.clone(); + (arb_amount, arb_new_validator).prop_map( + move |(amount, new_validator)| Transition::Redelegate { + is_chained: Self::is_chained_redelegation( + epoch, + ¶ms, + &incoming_redelegations, + &id.source, + &id.validator, + ), + id: id.clone(), + new_validator, + amount, + }, + ) + }); + prop_oneof![transitions, arb_redelegation].boxed() } } @@ -1426,7 +2303,7 @@ impl ReferenceStateMachine for AbstractPosState { ) -> Self::State { match transition { Transition::NextEpoch => { - println!("\nABSTRACT Next Epoch"); + tracing::debug!("\nABSTRACT Next Epoch"); state.epoch = state.epoch.next(); @@ -1442,14 +2319,16 @@ impl ReferenceStateMachine for AbstractPosState { Transition::InitValidator { address, consensus_key: _, + protocol_key: _, eth_cold_key: _, eth_hot_key: _, commission_rate: _, max_commission_rate_change: _, } => { - println!( + tracing::debug!( "\nABSTRACT Init Validator {} in epoch {}", - address, state.epoch + address, + state.epoch ); let pipeline: Epoch = state.pipeline(); @@ -1458,7 +2337,7 @@ impl ReferenceStateMachine for AbstractPosState { .validator_stakes .entry(pipeline) .or_default() - .insert(address.clone(), 0_i128.into()); + .insert(address.clone(), token::Amount::zero()); // Insert into the below-threshold set at pipeline since the // initial stake is 0 @@ -1476,14 +2355,13 @@ impl ReferenceStateMachine for AbstractPosState { state.debug_validators(); } Transition::Bond { id, amount } => { - println!( + tracing::debug!( "\nABSTRACT Bond {} tokens, id = {}", amount.to_string_native(), id ); - if *amount != token::Amount::default() { - let change = token::Change::from(*amount); + if !amount.is_zero() { let pipeline_state = state .validator_states .get(&state.pipeline()) @@ -1493,54 +2371,95 @@ impl ReferenceStateMachine for AbstractPosState { // Validator sets need to be updated first!! if *pipeline_state != ValidatorState::Jailed { - state.update_validator_sets(&id.validator, change); + state.update_validator_sets( + state.pipeline(), + &id.validator, + amount.change(), + ); } - state.update_bond(id, change); - state.update_validator_total_stake(&id.validator, change); + state.update_bond(id, *amount); + state.update_validator_total_stake( + &id.validator, + amount.change(), + ); } state.debug_validators(); } Transition::Unbond { id, amount } => { - println!( + tracing::debug!( "\nABSTRACT Unbond {} tokens, id = {}", amount.to_string_native(), id ); - if *amount != token::Amount::default() { - let change = token::Change::from(*amount); - state.update_state_with_unbond(id, change); + // `totalBonded` + let sum_bonded = state + .bonds + .get(id) + .map(|a| { + a.iter() + .fold(token::Amount::zero(), |acc, (_, amount)| { + acc + *amount + }) + }) + .unwrap_or_default(); - // Validator sets need to be updated first!! - // state.update_validator_sets(&id.validator, change); - // state.update_bond(id, change); - // state.update_validator_total_stake(&id.validator, - // change); - - // let withdrawal_epoch = - // state.pipeline() + state.params.unbonding_len; - // // + 1_u64; - // let unbonds = - // state.unbonds.entry(withdrawal_epoch).or_default(); - // let unbond = unbonds.entry(id.clone()).or_default(); - // *unbond += *amount; + if !amount.is_zero() && *amount <= sum_bonded { + state.update_state_with_unbond(id, *amount); } state.debug_validators(); } Transition::Withdraw { id } => { - println!("\nABSTRACT Withdraw, id = {}", id); + tracing::debug!("\nABSTRACT Withdraw, id = {}", id); + + let redel_unbonds = state + .delegator_redelegated_unbonded + .entry(id.source.clone()) + .or_default() + .entry(id.validator.clone()) + .or_default(); // Remove all withdrawable unbonds with this bond ID - for (epoch, unbonds) in state.unbonds.iter_mut() { - if *epoch <= state.epoch { + for ((start_epoch, withdraw_epoch), unbonds) in + state.unbonds.iter_mut() + { + if *withdraw_epoch <= state.epoch { unbonds.remove(id); + redel_unbonds.remove(&(*start_epoch, *withdraw_epoch)); } } // Remove any epochs that have no unbonds left - state.unbonds.retain(|_epoch, unbonds| !unbonds.is_empty()); + state.unbonds.retain(|_epochs, unbonds| !unbonds.is_empty()); + + // Remove the redel unbonds if empty now + redel_unbonds.retain(|_epochs, unbonds| !unbonds.is_empty()); // TODO: should we do anything here for slashing? } + Transition::Redelegate { + is_chained, + id, + new_validator, + amount, + } => { + tracing::debug!( + "\nABSTRACT Redelegation, id = {id}, new validator = \ + {new_validator}, amount = {}, is_chained = {is_chained}", + amount.to_string_native(), + ); + if *is_chained { + return state; + } + if !amount.is_zero() { + // Remove the amount from source validator + state.update_state_with_redelegation( + id, + new_validator, + *amount, + ); + } + state.debug_validators(); + } Transition::Misbehavior { address, slash_type, @@ -1548,10 +2467,12 @@ impl ReferenceStateMachine for AbstractPosState { height, } => { let current_epoch = state.epoch; - println!( + tracing::debug!( "\nABSTRACT Misbehavior in epoch {} by validator {}, \ found in epoch {}", - infraction_epoch, address, current_epoch + infraction_epoch, + address, + current_epoch ); let processing_epoch = *infraction_epoch @@ -1580,15 +2501,13 @@ impl ReferenceStateMachine for AbstractPosState { // Remove from the validator set starting at the next epoch and // up thru the pipeline for offset in 1..=state.params.pipeline_len { - let real_stake = token::Amount::from_change( - state - .validator_stakes - .get(&(current_epoch + offset)) - .unwrap() - .get(address) - .cloned() - .unwrap_or_default(), - ); + let real_stake = state + .validator_stakes + .get(&(current_epoch + offset)) + .unwrap() + .get(address) + .cloned() + .unwrap_or_default(); if let Some((index, stake)) = state .is_in_consensus_w_info(address, current_epoch + offset) @@ -1719,7 +2638,7 @@ impl ReferenceStateMachine for AbstractPosState { Transition::UnjailValidator { address } => { let pipeline_epoch = state.pipeline(); - println!( + tracing::debug!( "\nABSTRACT Unjail validator {} starting in epoch {}", address.clone(), pipeline_epoch @@ -1745,9 +2664,7 @@ impl ReferenceStateMachine for AbstractPosState { sum + validators.len() as u64 }); - if pipeline_stake - < state.params.validator_stake_threshold.change() - { + if pipeline_stake < state.params.validator_stake_threshold { // Place into the below-threshold set let below_threshold_set_pipeline = state .below_threshold_set @@ -1768,7 +2685,7 @@ impl ReferenceStateMachine for AbstractPosState { .is_empty() ); consensus_set_pipeline - .entry(token::Amount::from_change(pipeline_stake)) + .entry(pipeline_stake) .or_default() .push_back(address.clone()); validator_states_pipeline @@ -1782,7 +2699,7 @@ impl ReferenceStateMachine for AbstractPosState { .or_default(); let min_consensus_stake = *min_consensus.key(); - if pipeline_stake > min_consensus_stake.change() { + if pipeline_stake > min_consensus_stake { // Place into the consensus set and demote the last // min_consensus validator let min_validators = min_consensus.get_mut(); @@ -1800,7 +2717,7 @@ impl ReferenceStateMachine for AbstractPosState { .insert(last_val, ValidatorState::BelowCapacity); consensus_set_pipeline - .entry(token::Amount::from_change(pipeline_stake)) + .entry(pipeline_stake) .or_default() .push_back(address.clone()); validator_states_pipeline @@ -1808,10 +2725,7 @@ impl ReferenceStateMachine for AbstractPosState { } else { // Just place into the below-capacity set below_capacity_set_pipeline - .entry( - token::Amount::from_change(pipeline_stake) - .into(), - ) + .entry(pipeline_stake.into()) .or_default() .push_back(address.clone()); validator_states_pipeline.insert( @@ -1838,6 +2752,7 @@ impl ReferenceStateMachine for AbstractPosState { Transition::InitValidator { address, consensus_key: _, + protocol_key: _, eth_cold_key: _, eth_hot_key: _, commission_rate: _, @@ -1867,7 +2782,7 @@ impl ReferenceStateMachine for AbstractPosState { let is_unbondable = state .bond_sums() .get(id) - .map(|sum| *sum >= token::Change::from(*amount)) + .map(|sum| *sum >= *amount) .unwrap_or_default(); // The validator must not be frozen currently @@ -1883,13 +2798,6 @@ impl ReferenceStateMachine for AbstractPosState { false }; - // if is_frozen { - // println!( - // "\nVALIDATOR {} IS FROZEN - CANNOT UNBOND\n", - // &id.validator - // ); - // } - // The validator must be known state.is_validator(&id.validator, pipeline) // The amount must be available to unbond and the validator not jailed @@ -1901,7 +2809,7 @@ impl ReferenceStateMachine for AbstractPosState { let is_withdrawable = state .withdrawable_unbonds() .get(id) - .map(|amount| *amount >= token::Amount::default()) + .map(|amount| *amount >= token::Amount::zero()) .unwrap_or_default(); // The validator must not be jailed currently @@ -1918,6 +2826,71 @@ impl ReferenceStateMachine for AbstractPosState { // The amount must be available to unbond && is_withdrawable && !is_jailed } + Transition::Redelegate { + is_chained, + id, + new_validator, + amount, + } => { + let pipeline = state.pipeline(); + + if *is_chained { + Self::is_chained_redelegation( + state.epoch, + &state.params, + &state.incoming_redelegations, + &id.source, + new_validator, + ) + } else { + // The src and dest validator must be known + if !state.is_validator(&id.validator, pipeline) + || !state.is_validator(new_validator, pipeline) + { + return false; + } + + // The amount must be available to redelegate + if !state + .bond_sums() + .get(id) + .map(|sum| *sum >= *amount) + .unwrap_or_default() + { + return false; + } + + // The src validator must not be frozen + if let Some(last_epoch) = + state.validator_last_slash_epochs.get(&id.validator) + { + if *last_epoch + + state.params.unbonding_len + + 1u64 + + state.params.cubic_slashing_window_length + > state.epoch + { + return false; + } + } + + // The dest validator must not be frozen + if let Some(last_epoch) = + state.validator_last_slash_epochs.get(new_validator) + { + if *last_epoch + + state.params.unbonding_len + + 1u64 + + state.params.cubic_slashing_window_length + > state.epoch + { + return false; + } + } + + true + } + } Transition::Misbehavior { address, slash_type: _, @@ -1935,27 +2908,43 @@ impl ReferenceStateMachine for AbstractPosState { <= state.params.unbonding_len; // Only misbehave when there is more than 3 validators that's - // not jailed, so there's always at least one honest left + // not jailed or about to be slashed, so there's always at least + // one honest left let enough_honest_validators = || { - state + let num_of_honest = state .validator_states .get(&state.pipeline()) .unwrap() .iter() .filter(|(_addr, val_state)| match val_state { ValidatorState::Consensus - | ValidatorState::BelowCapacity - | ValidatorState::BelowThreshold => true, + | ValidatorState::BelowCapacity => true, ValidatorState::Inactive - | ValidatorState::Jailed => false, + | ValidatorState::Jailed + // Below threshold cannot be in consensus + | ValidatorState::BelowThreshold => false, + }) + .count(); + + // Find the number of enqueued slashes to unique validators + let num_of_enquequed_slashes = state + .enqueued_slashes + .iter() + // find all validators with any enqueued slashes + .fold(BTreeSet::new(), |mut acc, (&epoch, slashes)| { + if epoch > current_epoch { + acc.extend(slashes.keys().cloned()); + } + acc }) - .count() - > 3 + .len(); + + num_of_honest - num_of_enquequed_slashes > 3 }; // Ensure that the validator is in consensus when it misbehaves // TODO: possibly also test allowing below-capacity validators - // println!("\nVal to possibly misbehave: {}", &address); + // tracing::debug!("\nVal to possibly misbehave: {}", &address); let state_at_infraction = state .validator_states .get(infraction_epoch) @@ -2060,7 +3049,7 @@ impl AbstractPosState { } /// Update a bond with bonded or unbonded change at the pipeline epoch - fn update_bond(&mut self, id: &BondId, change: token::Change) { + fn update_bond(&mut self, id: &BondId, change: token::Amount) { let pipeline_epoch = self.pipeline(); let bonds = self.bonds.entry(id.clone()).or_default(); let bond = bonds.entry(pipeline_epoch).or_default(); @@ -2079,32 +3068,59 @@ impl AbstractPosState { *total_bonded += change; } - fn update_state_with_unbond(&mut self, id: &BondId, change: token::Change) { + fn update_state_with_unbond(&mut self, id: &BondId, change: token::Amount) { + self.unbond_tokens(id, change, false); + } + + fn unbond_tokens( + &mut self, + id: &BondId, + change: token::Amount, + is_redelegation: bool, + ) -> ResultSlashing { + // TODO: check in here too that the amount is less or equal to bond sum + let pipeline_epoch = self.pipeline(); let withdraw_epoch = pipeline_epoch + self.params.unbonding_len + self.params.cubic_slashing_window_length; + let bonds = self.bonds.entry(id.clone()).or_default(); - let unbond_records = self - .unbond_records + + let total_bonded = + self.total_bonded.entry(id.validator.clone()).or_default(); + let total_unbonded = self + .total_unbonded .entry(id.validator.clone()) .or_default() .entry(pipeline_epoch) .or_default(); - let unbonds = self - .unbonds - .entry(withdraw_epoch) + + let delegator_redelegated_bonds = self + .delegator_redelegated_bonded + .entry(id.source.clone()) + .or_default() + .entry(id.validator.clone()) + .or_default(); + let delegator_redelegated_unbonds = self + .delegator_redelegated_unbonded + .entry(id.source.clone()) .or_default() - .entry(id.clone()) + .entry(id.validator.clone()) .or_default(); - let validator_slashes = self - .validator_slashes - .get(&id.validator) - .cloned() - .unwrap_or_default(); - let mut remaining = change; - let mut amount_after_slashing = token::Change::default(); + let validator_total_redelegated_bonded = self + .validator_total_redelegated_bonded + .entry(id.validator.clone()) + .or_default(); + let validator_total_redelegated_unbonded = self + .validator_total_redelegated_unbonded + .entry(id.validator.clone()) + .or_default() + .entry(pipeline_epoch) + .or_default(); + + let validator_slashes = &self.validator_slashes; tracing::debug!("Bonds before decrementing"); for (start, amnt) in bonds.iter() { @@ -2115,52 +3131,79 @@ impl AbstractPosState { ); } - for (bond_epoch, bond_amnt) in bonds.iter_mut().rev() { - tracing::debug!("remaining {}", remaining.to_string_native()); - tracing::debug!( - "Bond epoch {} - amnt {}", - bond_epoch, - bond_amnt.to_string_native() - ); - let to_unbond = cmp::min(*bond_amnt, remaining); - tracing::debug!( - "to_unbond (init) = {}", - to_unbond.to_string_native() - ); - *bond_amnt -= to_unbond; - *unbonds += token::Amount::from_change(to_unbond); - - let slashes_for_this_bond: BTreeMap = validator_slashes - .iter() - .cloned() - .filter(|s| *bond_epoch <= s.epoch) - .fold(BTreeMap::new(), |mut acc, s| { - let cur = acc.entry(s.epoch).or_default(); - *cur += s.rate; - acc - }); - tracing::debug!( - "Slashes for this bond{:?}", - slashes_for_this_bond.clone() - ); - amount_after_slashing += compute_amount_after_slashing( - &slashes_for_this_bond, - token::Amount::from_change(to_unbond), - self.params.unbonding_len, - self.params.cubic_slashing_window_length, - ) - .change(); - tracing::debug!( - "Cur amnt after slashing = {}", - &amount_after_slashing.to_string_native() - ); + // `resultUnbonding` + // Get the bonds for removal + let bonds_to_remove = Self::find_bonds_to_remove(bonds, change); + + // `modifiedRedelegation` + // Modified redelegation + // The unbond may need to partially unbond redelegated tokens, so + // compute if necessary + let modified_redelegation = match bonds_to_remove.new_entry { + Some((bond_epoch, new_bond_amount)) => { + if delegator_redelegated_bonds.contains_key(&bond_epoch) { + let cur_bond_amount = + bonds.get(&bond_epoch).cloned().unwrap_or_default(); + Self::compute_modified_redelegation( + delegator_redelegated_bonds, + bond_epoch, + cur_bond_amount - new_bond_amount, + ) + } else { + ModifiedRedelegation::default() + } + } + None => ModifiedRedelegation::default(), + }; - let amt = unbond_records.entry(*bond_epoch).or_default(); - *amt += token::Amount::from_change(to_unbond); + // `keysUnbonds` + // New unbonds. This will be needed for a couple things + let unbonded_bond_starts = + if let Some((start_epoch, _)) = bonds_to_remove.new_entry { + let mut to_remove = bonds_to_remove.epochs.clone(); + to_remove.insert(start_epoch); + to_remove + } else { + bonds_to_remove.epochs.clone() + }; + // `newUnbonds` + let new_unbonds = unbonded_bond_starts + .into_iter() + .map(|start| { + let cur_bond_amnt = bonds.get(&start).cloned().unwrap(); + let new_value = if let Some((start_epoch, new_bond_amount)) = + bonds_to_remove.new_entry + { + if start_epoch == start { + cur_bond_amnt - new_bond_amount + } else { + cur_bond_amnt + } + } else { + cur_bond_amnt + }; + ((start, withdraw_epoch), new_value) + }) + .collect::>(); - remaining -= to_unbond; - if remaining.is_zero() { - break; + // Update the bonds and unbonds in the AbstractState + // `updatedBonded` + updates to `updatedDelegator` + for bond_epoch in &bonds_to_remove.epochs { + bonds.remove(bond_epoch); + } + if let Some((bond_epoch, new_bond_amt)) = bonds_to_remove.new_entry { + bonds.insert(bond_epoch, new_bond_amt); + } + // `updatedUnbonded` + updates to `updatedDelegator` + if !is_redelegation { + for (epoch_pair, amount) in &new_unbonds { + let unbonds = self + .unbonds + .entry(*epoch_pair) + .or_default() + .entry(id.clone()) + .or_default(); + *unbonds += *amount; } } @@ -2173,27 +3216,323 @@ impl AbstractPosState { ); } + // `newRedelegatedUnbonds` + // Compute new redelegated unbonds (which requires unmodified + // redelegated bonds) + let new_redelegated_unbonds = Self::compute_new_redelegated_unbonds( + delegator_redelegated_bonds, + &bonds_to_remove.epochs, + &modified_redelegation, + ); + + // `updatedRedelegatedBonded` + // Update the delegator's redelegated bonds in the state + for epoch_to_remove in &bonds_to_remove.epochs { + delegator_redelegated_bonds.remove(epoch_to_remove); + } + if let Some(epoch) = modified_redelegation.epoch { + if modified_redelegation.validators_to_remove.is_empty() { + delegator_redelegated_bonds.remove(&epoch); + } else { + let rbonds = + delegator_redelegated_bonds.entry(epoch).or_default(); + + if let Some(val_to_modify) = + &modified_redelegation.validator_to_modify + { + let mut updated_vals_to_remove = + modified_redelegation.validators_to_remove.clone(); + updated_vals_to_remove.remove(val_to_modify); + + // Remove the updated_vals_to_remove keys from the + // redelegated_bonds map first + for val in &updated_vals_to_remove { + rbonds.remove(val); + } + + if let Some(epoch_to_modify) = + modified_redelegation.epoch_to_modify + { + let mut updated_epochs_to_remove = + modified_redelegation.epochs_to_remove.clone(); + updated_epochs_to_remove.remove(&epoch_to_modify); + let val_bonds_to_modify = + rbonds.entry(val_to_modify.clone()).or_default(); + for epoch in updated_epochs_to_remove { + val_bonds_to_modify.remove(&epoch); + } + val_bonds_to_modify.insert( + epoch_to_modify, + modified_redelegation.new_amount.unwrap(), + ); + } else { + // Then remove to epochs_to_remove from the redelegated + // bonds of the val_to_modify + let val_bonds_to_modify = + rbonds.entry(val_to_modify.clone()).or_default(); + for epoch in &modified_redelegation.epochs_to_remove { + val_bonds_to_modify.remove(epoch); + } + } + } else { + // Remove all validators in + // modified_redelegation.validators_to_remove + // from redelegated_bonds + for val in &modified_redelegation.validators_to_remove { + rbonds.remove(val); + } + } + } + } + + // `updatedRedelegatedUnbonded + if !is_redelegation { + // Get all the epoch pairs that should exist in the state now + let new_unbond_epoch_pairs = new_redelegated_unbonds + .keys() + .map(|start_epoch| (*start_epoch, withdraw_epoch)) + .collect::>(); + + // Update the state for delegator's redelegated unbonds now + // NOTE: can maybe do this by only looking at those inside the new + // epoch pairs? + for unbond_pair in new_unbond_epoch_pairs { + for (src_val, redel_unbonds) in + new_redelegated_unbonds.get(&unbond_pair.0).unwrap() + { + for (src_start, unbonded) in redel_unbonds { + let existing_unbonded = delegator_redelegated_unbonds + .entry(unbond_pair) + .or_default() + .entry(src_val.clone()) + .or_default() + .entry(*src_start) + .or_default(); + *existing_unbonded += *unbonded; + } + } + } + } + + // `updatedTotalBonded` and `updatedTotalUnbonded` + // Update the validator's total bonded and total unbonded + for ((start_epoch, _), unbonded) in &new_unbonds { + let cur_total_bonded = + total_bonded.entry(*start_epoch).or_default(); + *cur_total_bonded -= *unbonded; + let cur_total_unbonded = + total_unbonded.entry(*start_epoch).or_default(); + *cur_total_unbonded += *unbonded; + } + + // `updatedTotalRedelegatedBonded` and `updatedTotalRedelegatedUnbonded` + // Update the validator's total redelegated bonded and unbonded + for (dest_start, r_unbonds) in &new_redelegated_unbonds { + for (src_val, changes) in r_unbonds { + for (bond_start, change) in changes { + let cur_total_bonded = validator_total_redelegated_bonded + .entry(*dest_start) + .or_default() + .entry(src_val.clone()) + .or_default() + .entry(*bond_start) + .or_default(); + *cur_total_bonded -= *change; + + let cur_total_unbonded = + validator_total_redelegated_unbonded + .entry(*dest_start) + .or_default() + .entry(src_val.clone()) + .or_default() + .entry(*bond_start) + .or_default(); + *cur_total_unbonded += *change; + } + } + } + + // `resultSlashing` + // Get the slashed amount of the unbond now + let result_slashing = Self::compute_amount_after_slashing_unbond( + &self.params, + validator_slashes, + &id.validator, + &new_unbonds, + &new_redelegated_unbonds, + ); + // `amountAfterSlashing` + let amount_after_slashing = result_slashing.sum.change(); + let pipeline_state = self .validator_states .get(&self.pipeline()) .unwrap() .get(&id.validator) .unwrap(); - // let pipeline_stake = self - // .validator_stakes - // .get(&self.pipeline()) - // .unwrap() - // .get(&id.validator) - // .unwrap(); - // let token_change = cmp::min(*pipeline_stake, amount_after_slashing); if *pipeline_state != ValidatorState::Jailed { - self.update_validator_sets(&id.validator, -amount_after_slashing); + self.update_validator_sets( + self.pipeline(), + &id.validator, + -amount_after_slashing, + ); } self.update_validator_total_stake( &id.validator, -amount_after_slashing, ); + + result_slashing + } + + fn update_state_with_redelegation( + &mut self, + id: &BondId, + new_validator: &Address, + change: token::Amount, + ) { + // First need to unbond the redelegated tokens + // NOTE: same logic as unbond transition but with some things left out + let pipeline_epoch = self.pipeline(); + + // `resultUnbond` + let result_unbond = self.unbond_tokens(id, change, true); + + // `amountAfterSlashing` + let amount_after_slashing = result_unbond.sum; + + // `updatedRedelegatedBonds` + // Update the delegator's redelegated bonded + let delegator_redelegated_bonded = self + .delegator_redelegated_bonded + .entry(id.source.clone()) + .or_default() + .entry(new_validator.clone()) + .or_default() + .entry(pipeline_epoch) + .or_default() + .entry(id.validator.clone()) + .or_default(); + for (start_epoch, bonded) in &result_unbond.epoch_map { + *delegator_redelegated_bonded + .entry(*start_epoch) + .or_default() += *bonded; + } + + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = self + .bonds + .get(&BondId { + source: id.source.clone(), + validator: new_validator.clone(), + }) + .cloned() + .unwrap_or_default(); + tracing::debug!( + "\nRedeleg dest bonds before incrementing: {bonds:#?}" + ); + } + + if !amount_after_slashing.is_zero() { + // `updatedDelegator` --> `with("bonded")` + // Update the delegator's bonds + let bonds = self + .bonds + .entry(BondId { + source: id.source.clone(), + validator: new_validator.clone(), + }) + .or_default(); + *bonds.entry(pipeline_epoch).or_default() += amount_after_slashing; + + // `updatedDestValidator` --> `with("totalBonded")` + // Update the dest validator's total bonded + let dest_total_bonded = self + .total_bonded + .entry(new_validator.clone()) + .or_default() + .entry(pipeline_epoch) + .or_default(); + *dest_total_bonded += amount_after_slashing; + } + + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = self + .bonds + .get(&BondId { + source: id.source.clone(), + validator: new_validator.clone(), + }) + .cloned() + .unwrap_or_default(); + tracing::debug!( + "\nRedeleg dest bonds after incrementing: {bonds:#?}" + ); + } + + // `updatedOutgoingRedelegations` and `updatedSrcValidator` + // Update the src validator's outgoing redelegations + let outgoing_redelegations = self + .outgoing_redelegations + .entry(id.validator.clone()) + .or_default() + .entry(new_validator.clone()) + .or_default(); + for (start_epoch, bonded) in &result_unbond.epoch_map { + let cur_outgoing = outgoing_redelegations + .entry((*start_epoch, self.epoch)) + .or_default(); + *cur_outgoing += *bonded; + } + + // `updatedDestValidator` --> `with("totalRedelegatedBonded")` + // Update the dest validator's total redelegated bonded + let dest_total_redelegated_bonded = self + .validator_total_redelegated_bonded + .entry(new_validator.clone()) + .or_default() + .entry(pipeline_epoch) + .or_default() + .entry(id.validator.clone()) + .or_default(); + for (start_epoch, bonded) in &result_unbond.epoch_map { + let cur_tot_bonded = dest_total_redelegated_bonded + .entry(*start_epoch) + .or_default(); + *cur_tot_bonded += *bonded; + } + + // `updatedDestValidator` --> `with("incomingRedelegations")` + // Update the dest validator's incoming redelegations + let incoming_redelegations = self + .incoming_redelegations + .entry(new_validator.clone()) + .or_default(); + incoming_redelegations.insert(id.source.clone(), pipeline_epoch); + + // `updatedDestValidator` --> `with("stake")` + // Update validator set and stake + let pipeline_state = self + .validator_states + .get(&self.pipeline()) + .unwrap() + .get(new_validator) + .unwrap(); + + if !amount_after_slashing.is_zero() { + if *pipeline_state != ValidatorState::Jailed { + self.update_validator_sets( + self.pipeline(), + new_validator, + amount_after_slashing.change(), + ); + } + self.update_validator_total_stake( + new_validator, + amount_after_slashing.change(), + ); + } } /// Update validator's total stake with bonded or unbonded change at the @@ -2209,32 +3548,39 @@ impl AbstractPosState { .or_default() .entry(validator.clone()) .or_default(); - *total_stakes += change; + *total_stakes = token::Amount::from(total_stakes.change() + change); } /// Update validator in sets with bonded or unbonded change fn update_validator_sets( &mut self, + epoch: Epoch, validator: &Address, change: token::Change, ) { - let pipeline = self.pipeline(); - let consensus_set = self.consensus_set.entry(pipeline).or_default(); - let below_cap_set = - self.below_capacity_set.entry(pipeline).or_default(); + tracing::debug!( + "\nUpdating set for validator {} in epoch {} with amount {}\n", + validator, + epoch, + change + ); + if change.is_zero() { + return; + } + // let pipeline = self.pipeline(); + let consensus_set = self.consensus_set.entry(epoch).or_default(); + let below_cap_set = self.below_capacity_set.entry(epoch).or_default(); let below_thresh_set = - self.below_threshold_set.entry(pipeline).or_default(); + self.below_threshold_set.entry(epoch).or_default(); - let validator_stakes = self.validator_stakes.get(&pipeline).unwrap(); - let validator_states = - self.validator_states.get_mut(&pipeline).unwrap(); + let validator_stakes = self.validator_stakes.get(&epoch).unwrap(); + let validator_states = self.validator_states.get_mut(&epoch).unwrap(); let state_pre = validator_states.get(validator).unwrap(); let this_val_stake_pre = *validator_stakes.get(validator).unwrap(); let this_val_stake_post = - token::Amount::from_change(this_val_stake_pre + change); - let this_val_stake_pre = token::Amount::from_change(this_val_stake_pre); + token::Amount::from_change(this_val_stake_pre.change() + change); let threshold = self.params.validator_stake_threshold; if this_val_stake_pre < threshold && this_val_stake_post < threshold { @@ -2246,12 +3592,9 @@ impl AbstractPosState { match state_pre { ValidatorState::Consensus => { - // println!("Validator initially in consensus"); // Remove from the prior stake let vals = consensus_set.entry(this_val_stake_pre).or_default(); - // dbg!(&vals); vals.retain(|addr| addr != validator); - // dbg!(&vals); if vals.is_empty() { consensus_set.remove(&this_val_stake_pre); @@ -2290,7 +3633,7 @@ impl AbstractPosState { // If unbonding, check the max below-cap validator's state if we // need to do a swap - if change < token::Change::default() { + if change < token::Change::zero() { if let Some(mut max_below_cap) = below_cap_set.last_entry() { let max_below_cap_stake = *max_below_cap.key(); @@ -2333,7 +3676,7 @@ impl AbstractPosState { .push_back(validator.clone()); } ValidatorState::BelowCapacity => { - // println!("Validator initially in below-cap"); + // tracing::debug!("Validator initially in below-cap"); // Remove from the prior stake let vals = @@ -2356,11 +3699,9 @@ impl AbstractPosState { // If bonding, check the min consensus validator's state if we // need to do a swap - if change >= token::Change::default() { - // dbg!(&consensus_set); + if change >= token::Change::zero() { if let Some(mut min_consensus) = consensus_set.first_entry() { - // dbg!(&min_consensus); let min_consensus_stake = *min_consensus.key(); if this_val_stake_post > min_consensus_stake { // Swap this validator with the max consensus @@ -2423,7 +3764,6 @@ impl AbstractPosState { } // Determine which set to place the validator into if let Some(mut min_consensus) = consensus_set.first_entry() { - // dbg!(&min_consensus); let min_consensus_stake = *min_consensus.key(); if this_val_stake_post > min_consensus_stake { // Swap this validator with the max consensus @@ -2478,288 +3818,630 @@ impl AbstractPosState { .get(&self.epoch) .cloned() .unwrap_or_default(); - if !slashes_this_epoch.is_empty() { - let infraction_epoch = self.epoch - - self.params.unbonding_len - - self.params.cubic_slashing_window_length - - 1; - // Now need to basically do the end_of_epoch() procedure - // from the Informal Systems model - let cubic_rate = self.cubic_slash_rate(); - for (validator, slashes) in slashes_this_epoch { - let stake_at_infraction = self - .validator_stakes - .get(&infraction_epoch) - .unwrap() - .get(&validator) - .cloned() - .unwrap_or_default(); - tracing::debug!( - "Val {} stake at infraction {}", + + if slashes_this_epoch.is_empty() { + return; + } + + let infraction_epoch = + self.epoch - self.params.slash_processing_epoch_offset(); + let cubic_rate = self.cubic_slash_rate(); + + // Get effective slash rate per validator and update the slashes in the + // Abstract state + let slash_rates = slashes_this_epoch.iter().fold( + BTreeMap::::new(), + |mut acc, (validator, slashes)| { + let mut tot_rate = + acc.get(validator).cloned().unwrap_or_default(); + for slash in slashes { + debug_assert_eq!(slash.epoch, infraction_epoch); + let rate = cmp::max( + slash.r#type.get_slash_rate(&self.params), + cubic_rate, + ); + tot_rate = cmp::min(Dec::one(), tot_rate + rate); + } + acc.insert(validator.clone(), tot_rate); + acc + }, + ); + + let mut map_validator_slash: EagerRedelegatedBondsMap = BTreeMap::new(); + for (validator, rate) in slash_rates { + self.process_validator_slash( + &validator, + rate, + &mut map_validator_slash, + ); + } + tracing::debug!( + "Slashed amounts for validators: {map_validator_slash:#?}" + ); + + for (validator, slash_amounts) in map_validator_slash { + for (update_epoch, delta) in slash_amounts { + let state = self + .validator_states + .get(&update_epoch) + .unwrap() + .get(&validator) + .unwrap(); + if *state != ValidatorState::Jailed { + self.update_validator_sets( + update_epoch, + &validator, + -delta.change(), + ); + } + + let stake = self + .validator_stakes + .entry(update_epoch) + .or_default() + .entry(validator.clone()) + .or_default(); + *stake -= delta; + } + + let next_state = self + .validator_states + .get(&self.epoch.next()) + .unwrap() + .get(&validator) + .cloned() + .unwrap(); + + let pipeline_state = self + .validator_states + .get(&self.pipeline()) + .unwrap() + .get(&validator) + .cloned() + .unwrap(); + + debug_assert_eq!(next_state, pipeline_state); + } + + // Update the slashes in the Abstract state ONLY AFTER processing them + for (validator, slashes) in slashes_this_epoch { + let cur_slashes = + self.validator_slashes.entry(validator.clone()).or_default(); + + for slash in slashes { + let rate = cmp::max( + slash.r#type.get_slash_rate(&self.params), + cubic_rate, + ); + cur_slashes.push(Slash { + epoch: slash.epoch, + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate, + }); + } + } + } + + fn process_validator_slash( + &mut self, + validator: &Address, + slash_rate: Dec, + val_slash_amounts: &mut EagerRedelegatedBondsMap, + ) { + let slash_amounts = val_slash_amounts + .get(validator) + .cloned() + .unwrap_or_default(); + let result_slash = + self.slash_validator(validator, slash_rate, &slash_amounts); + + // `updatedSlashedAmountMap` + let validator_slashes = + val_slash_amounts.entry(validator.clone()).or_default(); + *validator_slashes = result_slash; + + let dest_validators = self + .outgoing_redelegations + .get(validator) + .cloned() + .unwrap_or_default() + .keys() + .cloned() + .collect::>(); + + for dest_val in dest_validators { + let to_modify = + val_slash_amounts.entry(dest_val.clone()).or_default(); + + tracing::debug!( + "Slashing {} redelegation to {}", + validator, + &dest_val + ); + + // `slashValidatorRedelegation` + self.slash_validator_redelegation( + validator, &dest_val, slash_rate, to_modify, + ); + + if to_modify.is_empty() { + val_slash_amounts.remove(&dest_val); + }; + } + } + + fn slash_validator( + &self, + validator: &Address, + slash_rate: Dec, + val_slash_amounts: &BTreeMap, + ) -> BTreeMap { + tracing::debug!( + "Slashing validator {} at rate {}", + validator, + slash_rate + ); + + let infraction_epoch = + self.epoch - self.params.slash_processing_epoch_offset(); + + let total_unbonded = self + .total_unbonded + .get(validator) + .cloned() + .unwrap_or_default(); + let total_redelegated_unbonded = self + .validator_total_redelegated_unbonded + .get(validator) + .cloned() + .unwrap_or_default(); + + // `val bonds` + let mut total_bonded = self + .total_bonded + .get(validator) + .cloned() + .unwrap_or_default() + .into_iter() + .filter(|&(epoch, _amount)| epoch <= infraction_epoch) + .collect::>(); + + // `val redelegatedBonds` + let mut total_redelegated_bonded = total_bonded + .keys() + .filter(|&epoch| { + self.validator_total_redelegated_bonded + .get(validator) + .cloned() + .unwrap_or_default() + .contains_key(epoch) + }) + .map(|epoch| { + ( + *epoch, + self.validator_total_redelegated_bonded + .get(validator) + .unwrap() + .get(epoch) + .cloned() + .unwrap(), + ) + }) + .collect::>(); + + let mut slashed_amounts = val_slash_amounts.clone(); + let mut sum = token::Amount::zero(); + + let eps = self + .epoch + .iter_range(self.params.pipeline_len) + .collect::>(); + for epoch in eps.into_iter().rev() { + let amount = total_bonded.iter().fold( + token::Amount::zero(), + |acc, (bond_start, bond_amount)| { + let redel_bonds = total_redelegated_bonded + .get(bond_start) + .cloned() + .unwrap_or_default(); + acc + self.compute_slash_bond_at_epoch( + epoch, + infraction_epoch, + *bond_start, + *bond_amount, + &redel_bonds, + slash_rate, + validator, + ) + }, + ); + + let new_bonds = total_unbonded + .get(&epoch) + .cloned() + .unwrap_or_default() + .into_iter() + .filter(|(ep, _)| *ep <= infraction_epoch) + .collect::>(); + + let new_redelegated_bonds = new_bonds + .keys() + .filter(|&ep| { + total_redelegated_unbonded + .get(&epoch) + .cloned() + .unwrap_or_default() + .contains_key(ep) + }) + .map(|ep| { + ( + *ep, + total_redelegated_unbonded + .get(&epoch) + .unwrap() + .get(ep) + .cloned() + .unwrap(), + ) + }) + .collect::>(); + + total_bonded = new_bonds; + total_redelegated_bonded = new_redelegated_bonds; + sum += amount; + + let cur = slashed_amounts.entry(epoch).or_default(); + *cur += sum; + } + // Hack - should this be done differently? (think this is safe) + let last_amt = slashed_amounts + .get(&self.pipeline().prev()) + .cloned() + .unwrap(); + slashed_amounts.insert(self.pipeline(), last_amt); + + slashed_amounts + } + + fn fold_and_slash_redelegated_bonds( + &self, + redel_bonds: &BTreeMap>, + start: Epoch, + list_slashes: &[Slash], + slash_epoch_filter: impl Fn(Epoch) -> bool, + ) -> FoldRedelegatedBondsResult { + let mut result = FoldRedelegatedBondsResult::default(); + for (src_validator, bonds) in redel_bonds { + for (bond_start, bonded) in bonds { + let src_slashes = self + .validator_slashes + .get(src_validator) + .cloned() + .unwrap_or_default() + .iter() + .filter(|&s| { + self.params.in_redelegation_slashing_window( + s.epoch, + self.params + .redelegation_start_epoch_from_end(start), + start, + ) && *bond_start <= s.epoch + && slash_epoch_filter(s.epoch) + }) + .cloned() + .collect::>(); + + let mut merged = list_slashes + .iter() + .chain(src_slashes.iter()) + .cloned() + .collect::>(); + merged + .sort_by(|s1, s2| s1.epoch.partial_cmp(&s2.epoch).unwrap()); + + result.total_redelegated += *bonded; + result.total_after_slashing += Self::apply_slashes_to_amount( + &self.params, + &merged, + *bonded, + ); + } + } + result + } + + fn compute_bond_at_epoch( + &self, + epoch: Epoch, + start: Epoch, + amount: token::Amount, + redel_bonds: &BTreeMap>, + validator: &Address, + ) -> token::Amount { + // `val list_slashes` + let list_slashes = self + .validator_slashes + .get(validator) + .cloned() + .unwrap_or_default() + .iter() + .filter(|&slash| { + // TODO: check bounds! + start <= slash.epoch + && slash.epoch + self.params.slash_processing_epoch_offset() + <= epoch + }) + .cloned() + .collect::>(); + + // `val filteredSlashMap` and `val resultFold` + // `fold_and_slash_redelegated_bonds` + let slash_epoch_filter = + |e: Epoch| e + self.params.slash_processing_epoch_offset() <= epoch; + let result_fold = self.fold_and_slash_redelegated_bonds( + redel_bonds, + start, + &list_slashes, + slash_epoch_filter, + ); + + // `val totalNoRedelegated` + let total_not_redelegated = amount - result_fold.total_redelegated; + // `val afterNoRedelegated` + let after_not_redelegated = Self::apply_slashes_to_amount( + &self.params, + &list_slashes, + total_not_redelegated, + ); + + after_not_redelegated + result_fold.total_after_slashing + } + + #[allow(clippy::too_many_arguments)] + fn compute_slash_bond_at_epoch( + &self, + epoch: Epoch, + infraction_epoch: Epoch, + bond_start: Epoch, + bond_amount: token::Amount, + redel_bonds: &BTreeMap>, + slash_rate: Dec, + validator: &Address, + ) -> token::Amount { + let amount_due = self + .compute_bond_at_epoch( + infraction_epoch, + bond_start, + bond_amount, + redel_bonds, + validator, + ) + .mul_ceil(slash_rate); + let slashable_amount = self.compute_bond_at_epoch( + epoch, + bond_start, + bond_amount, + redel_bonds, + validator, + ); + + cmp::min(amount_due, slashable_amount) + } + + fn slash_validator_redelegation( + &self, + validator: &Address, + dest_validator: &Address, + slash_rate: Dec, + slash_amounts: &mut BTreeMap, + ) { + let infraction_epoch = + self.epoch - self.params.slash_processing_epoch_offset(); + + let dest_total_redelegated_unbonded = self + .validator_total_redelegated_unbonded + .get(dest_validator) + .cloned() + .unwrap_or_default(); + let validator_slashes = self + .validator_slashes + .get(validator) + .cloned() + .unwrap_or_default(); + + // Loop over outgoing redelegations of validator -> dest_validator + let outgoing_redelegations = if let Some(outgoing_redels) = + self.outgoing_redelegations.get(validator) + { + outgoing_redels + .get(dest_validator) + .cloned() + .unwrap_or_default() + } else { + BTreeMap::<(Epoch, Epoch), token::Amount>::new() + }; + + for ((src_start_epoch, redel_start), amount) in outgoing_redelegations { + if self.params.in_redelegation_slashing_window( + infraction_epoch, + redel_start, + self.params.redelegation_end_epoch_from_start(redel_start), + ) && src_start_epoch <= infraction_epoch + { + self.slash_redelegation( + amount, + src_start_epoch, + self.params.redelegation_end_epoch_from_start(redel_start), validator, - stake_at_infraction.to_string_native(), + slash_rate, + &validator_slashes, + &dest_total_redelegated_unbonded, + slash_amounts, ); + } + } + } - let mut total_rate = Dec::zero(); + #[allow(clippy::too_many_arguments)] + fn slash_redelegation( + &self, + amount: token::Amount, + bond_start: Epoch, + redel_bond_start: Epoch, + src_validator: &Address, + slash_rate: Dec, + slashes: &[Slash], + dest_total_redelegated_unbonded: &AbstractTotalRedelegatedUnbonded, + slash_amounts: &mut BTreeMap, + ) { + tracing::debug!( + "\nSlashing redelegation amount {} - bond start {} and \ + redel_bond_start {} - at rate {}\n", + amount.to_string_native(), + bond_start, + redel_bond_start, + slash_rate + ); - for slash in slashes { - debug_assert_eq!(slash.epoch, infraction_epoch); - let rate = cmp::max( - slash.r#type.get_slash_rate(&self.params), - cubic_rate, - ); - let processed_slash = Slash { - epoch: slash.epoch, - block_height: slash.block_height, - r#type: slash.r#type, - rate, - }; - let cur_slashes = self - .validator_slashes - .entry(validator.clone()) - .or_default(); - cur_slashes.push(processed_slash.clone()); + let infraction_epoch = + self.epoch - self.params.slash_processing_epoch_offset(); - total_rate += rate; - } - total_rate = cmp::min(total_rate, Dec::one()); - tracing::debug!("Total rate: {}", total_rate); - - let mut total_unbonded = token::Amount::default(); - let mut sum_post_bonds = token::Change::default(); - - for epoch in (infraction_epoch.0 + 1)..self.epoch.0 { - tracing::debug!("\nEpoch {}", epoch); - let mut recent_unbonds = token::Change::default(); - let unbond_records = self - .unbond_records - .entry(validator.clone()) - .or_default() - .get(&Epoch(epoch)) - .cloned() - .unwrap_or_default(); - for (start, unbond_amount) in unbond_records { - tracing::debug!( - "UnbondRecord: amount = {}, start_epoch {}", - &unbond_amount.to_string_native(), - &start - ); - if start <= infraction_epoch { - let slashes_for_this_unbond = self - .validator_slashes - .get(&validator) - .cloned() - .unwrap_or_default() - .iter() - .filter(|&s| { - start <= s.epoch - && s.epoch - + self.params.unbonding_len - + self - .params - .cubic_slashing_window_length - < infraction_epoch - }) - .cloned() - .fold( - BTreeMap::::new(), - |mut acc, s| { - let cur = - acc.entry(s.epoch).or_default(); - *cur += s.rate; - acc - }, - ); - tracing::debug!( - "Slashes for this unbond: {:?}", - slashes_for_this_unbond - ); - total_unbonded += compute_amount_after_slashing( - &slashes_for_this_unbond, - unbond_amount, - self.params.unbonding_len, - self.params.cubic_slashing_window_length, - ); - } else { - recent_unbonds += unbond_amount.change(); - } + // Slash redelegation destination validator from the next epoch only + // as they won't be jailed + let set_update_epoch = self.epoch.next(); - tracing::debug!( - "Total unbonded (epoch {}) w slashing = {}", - epoch, - total_unbonded.to_string_native() - ); - } - sum_post_bonds += self - .total_bonded - .get(&validator) - .and_then(|bonded| bonded.get(&Epoch(epoch))) + // Do initial computation of total unbonded + let mut tot_unbonded = token::Amount::zero(); + for epoch in Epoch::iter_bounds_inclusive( + infraction_epoch.next(), + set_update_epoch, + ) { + let total_redelegated_unbonded = + dest_total_redelegated_unbonded.get(&epoch); + if let Some(tot_redel_unbonded) = total_redelegated_unbonded { + if Self::has_redelegation( + tot_redel_unbonded, + bond_start, + redel_bond_start, + src_validator, + ) { + tot_unbonded += tot_redel_unbonded + .get(&redel_bond_start) + .unwrap() + .get(src_validator) + .unwrap() + .get(&bond_start) .cloned() - .unwrap_or_default() - - recent_unbonds; + .unwrap(); } - tracing::debug!("Computing adjusted amounts now"); + } + } - let mut last_slash = token::Change::default(); - for offset in 0..self.params.pipeline_len { - tracing::debug!( - "Epoch {}\nLast slash = {}", - self.epoch + offset, - last_slash.to_string_native(), - ); - let mut recent_unbonds = token::Change::default(); - let unbond_records = self - .unbond_records - .get(&validator) + for epoch in + Epoch::iter_range(set_update_epoch, self.params.pipeline_len) + { + let total_redelegated_unbonded = dest_total_redelegated_unbonded + .get(&epoch) + .cloned() + .unwrap_or_default(); + let updated_total_unbonded = if !Self::has_redelegation( + &total_redelegated_unbonded, + bond_start, + redel_bond_start, + src_validator, + ) { + tot_unbonded + } else { + tot_unbonded + + total_redelegated_unbonded + .get(&redel_bond_start) .unwrap() - .get(&(self.epoch + offset)) + .get(src_validator) + .unwrap() + .get(&bond_start) .cloned() - .unwrap_or_default(); - for (start, unbond_amount) in unbond_records { - tracing::debug!( - "UnbondRecord: amount = {}, start_epoch {}", - unbond_amount.to_string_native(), - &start - ); - if start <= infraction_epoch { - let slashes_for_this_unbond = self - .validator_slashes - .get(&validator) - .cloned() - .unwrap_or_default() - .iter() - .filter(|&s| { - start <= s.epoch - && s.epoch - + self.params.unbonding_len - + self - .params - .cubic_slashing_window_length - < infraction_epoch - }) - .cloned() - .fold( - BTreeMap::::new(), - |mut acc, s| { - let cur = - acc.entry(s.epoch).or_default(); - *cur += s.rate; - acc - }, - ); - tracing::debug!( - "Slashes for this unbond: {:?}", - slashes_for_this_unbond - ); + .unwrap() + }; - total_unbonded += compute_amount_after_slashing( - &slashes_for_this_unbond, - unbond_amount, - self.params.unbonding_len, - self.params.cubic_slashing_window_length, - ); - } else { - recent_unbonds += unbond_amount.change(); - } + let list_slashes = slashes + .iter() + .filter(|&slash| { + self.params.in_redelegation_slashing_window( + slash.epoch, + self.params.redelegation_start_epoch_from_end( + redel_bond_start, + ), + redel_bond_start, + ) && bond_start <= slash.epoch + && slash.epoch + + self.params.slash_processing_epoch_offset() + <= infraction_epoch + }) + .cloned() + .collect::>(); - tracing::debug!( - "Total unbonded (offset {}) w slashing = {}", - offset, - total_unbonded.to_string_native() - ); - } - tracing::debug!( - "stake at infraction {}", - stake_at_infraction.to_string_native(), - ); - tracing::debug!( - "total unbonded {}", - total_unbonded.to_string_native() - ); - let this_slash = total_rate - * (stake_at_infraction - total_unbonded.change()); - let diff_slashed_amount = last_slash - this_slash; - tracing::debug!( - "Offset {} diff_slashed_amount {}", - offset, - diff_slashed_amount.to_string_native(), - ); - last_slash = this_slash; - // total_unbonded = token::Amount::default(); - - // Update the voting powers (consider that the stake is - // discrete) let validator_stake = self - // .validator_stakes - // .entry(self.epoch + offset) - // .or_default() - // .entry(validator.clone()) - // .or_default(); - // *validator_stake -= diff_slashed_amount; - - tracing::debug!("Updating ABSTRACT voting powers"); - sum_post_bonds += self - .total_bonded - .get(&validator) - .and_then(|bonded| bonded.get(&(self.epoch + offset))) - .cloned() - .unwrap_or_default() - - recent_unbonds; + let slashable_amount = amount + .checked_sub(updated_total_unbonded) + .unwrap_or_default(); - tracing::debug!( - "\nUnslashable bonds = {}", - sum_post_bonds.to_string_native() - ); - let validator_stake_at_offset = self - .validator_stakes - .entry(self.epoch + offset) - .or_default() - .entry(validator.clone()) - .or_default(); + let slashed = Self::apply_slashes_to_amount( + &self.params, + &list_slashes, + slashable_amount, + ) + .mul_ceil(slash_rate); - let slashable_stake_at_offset = - *validator_stake_at_offset - sum_post_bonds; - tracing::debug!( - "Val stake pre (epoch {}) = {}", - self.epoch + offset, - validator_stake_at_offset.to_string_native(), - ); - tracing::debug!( - "Slashable stake at offset = {}", - slashable_stake_at_offset.to_string_native(), - ); - let change = cmp::max( - -slashable_stake_at_offset, - diff_slashed_amount, - ); + let list_slashes = slashes + .iter() + .filter(|&slash| { + self.params.in_redelegation_slashing_window( + slash.epoch, + self.params.redelegation_start_epoch_from_end( + redel_bond_start, + ), + redel_bond_start, + ) && bond_start <= slash.epoch + }) + .cloned() + .collect::>(); - tracing::debug!("Change = {}", change.to_string_native()); - *validator_stake_at_offset += change; + let slashable_stake = Self::apply_slashes_to_amount( + &self.params, + &list_slashes, + slashable_amount, + ) + .mul_ceil(slash_rate); - for os in (offset + 1)..=self.params.pipeline_len { - tracing::debug!("Adjust epoch {}", self.epoch + os); - let offset_stake = self - .validator_stakes - .entry(self.epoch + os) - .or_default() - .entry(validator.clone()) - .or_default(); - *offset_stake += change; - // let mut new_stake = - // *validator_stake - diff_slashed_amount; - // if new_stake < 0_i128 { - // new_stake = 0_i128; - // } - - // *validator_stake = new_stake; - tracing::debug!( - "New stake at epoch {} = {}", - self.epoch + os, - offset_stake.to_string_native() - ); - } + tot_unbonded = updated_total_unbonded; + + let to_slash = cmp::min(slashed, slashable_stake); + if !to_slash.is_zero() { + let slashed_amt = slash_amounts.entry(epoch).or_default(); + *slashed_amt += to_slash; + } + } + } + + fn has_redelegation( + total_redelegated_unbonded: &BTreeMap< + Epoch, + BTreeMap>, + >, + bond_start: Epoch, + redel_start: Epoch, + src_validator: &Address, + ) -> bool { + if let Some(redel_unbonded) = + total_redelegated_unbonded.get(&redel_start) + { + if let Some(unbonded) = redel_unbonded.get(src_validator) { + if unbonded.contains_key(&bond_start) { + return true; } } } + false } /// Get the pipeline epoch @@ -2826,9 +4508,9 @@ impl AbstractPosState { } /// Find the sums of the bonds across all epochs - fn bond_sums(&self) -> BTreeMap { + fn bond_sums(&self) -> BTreeMap { self.bonds.iter().fold( - BTreeMap::::new(), + BTreeMap::::new(), |mut acc, (id, bonds)| { for delta in bonds.values() { let entry = acc.entry(id.clone()).or_default(); @@ -2843,10 +4525,10 @@ impl AbstractPosState { fn withdrawable_unbonds(&self) -> BTreeMap { self.unbonds.iter().fold( BTreeMap::::new(), - |mut acc, (epoch, unbonds)| { - if *epoch <= self.epoch { + |mut acc, ((_start_epoch, withdraw_epoch), unbonds)| { + if *withdraw_epoch <= self.epoch { for (id, amount) in unbonds { - if *amount > token::Amount::default() { + if *amount > token::Amount::zero() { *acc.entry(id.clone()).or_default() += *amount; } } @@ -2858,11 +4540,13 @@ impl AbstractPosState { /// Compute the cubic slashing rate for the current epoch fn cubic_slash_rate(&self) -> Dec { - let infraction_epoch = self.epoch - - self.params.unbonding_len - - 1_u64 - - self.params.cubic_slashing_window_length; - tracing::debug!("Infraction epoch: {}", infraction_epoch); + let infraction_epoch = + self.epoch - self.params.slash_processing_epoch_offset(); + tracing::debug!( + "Infraction epoch: {}, Current epoch: {}", + infraction_epoch, + self.epoch + ); let window_width = self.params.cubic_slashing_window_length; let epoch_start = Epoch::from( infraction_epoch @@ -2877,7 +4561,7 @@ impl AbstractPosState { for epoch in Epoch::iter_bounds_inclusive(epoch_start, epoch_end) { let consensus_stake = self.consensus_set.get(&epoch).unwrap().iter().fold( - token::Amount::default(), + token::Amount::zero(), |sum, (val_stake, validators)| { sum + *val_stake * validators.len() as u64 }, @@ -2895,14 +4579,13 @@ impl AbstractPosState { let enqueued_slashes = self.enqueued_slashes.get(&processing_epoch); if let Some(enqueued_slashes) = enqueued_slashes { for (validator, slashes) in enqueued_slashes.iter() { - let val_stake = token::Amount::from_change( - self.validator_stakes - .get(&epoch) - .unwrap() - .get(validator) - .cloned() - .unwrap_or_default(), - ); + let val_stake = self + .validator_stakes + .get(&epoch) + .unwrap() + .get(validator) + .cloned() + .unwrap_or_default(); tracing::debug!( "Val {} stake epoch {}: {}", &validator, @@ -2957,14 +4640,11 @@ impl AbstractPosState { deltas_stake.to_string_native(), val_state ); - debug_assert_eq!( - *amount, - token::Amount::from_change(*deltas_stake) - ); + debug_assert_eq!(*amount, *deltas_stake); debug_assert_eq!(*val_state, ValidatorState::Consensus); } } - let mut max_bc = token::Amount::default(); + let mut max_bc = token::Amount::zero(); let bc = self.below_capacity_set.get(&epoch).unwrap(); for (amount, vals) in bc { if token::Amount::from(*amount) > max_bc { @@ -2993,13 +4673,13 @@ impl AbstractPosState { ); debug_assert_eq!( token::Amount::from(*amount), - token::Amount::from_change(deltas_stake) + deltas_stake ); debug_assert_eq!(*val_state, ValidatorState::BelowCapacity); } } if max_bc > min_consensus { - println!( + tracing::debug!( "min_consensus = {}, max_bc = {}", min_consensus.to_string_native(), max_bc.to_string_native() @@ -3069,6 +4749,364 @@ impl AbstractPosState { } } } + + fn is_chained_redelegation( + current_epoch: Epoch, + params: &PosParams, + incoming_redelegations: &AbstractIncomingRedelegations, + delegator: &Address, + src_validator: &Address, + ) -> bool { + let src_incoming_redelegations = + incoming_redelegations.get(src_validator); + if let Some(incoming) = src_incoming_redelegations { + if let Some(redel_end_epoch) = incoming.get(delegator) { + return redel_end_epoch.prev() + + params.slash_processing_epoch_offset() + > current_epoch; + } + } + false + } + + fn find_bonds_to_remove( + bonds: &BTreeMap, + amount: token::Amount, + ) -> BondsForRemovalRes { + let mut bonds_for_removal = BondsForRemovalRes::default(); + let mut remaining = amount; + + for (&bond_epoch, &bond_amount) in bonds.iter().rev() { + let to_unbond = cmp::min(bond_amount, remaining); + if to_unbond == bond_amount { + bonds_for_removal.epochs.insert(bond_epoch); + } else { + bonds_for_removal.new_entry = + Some((bond_epoch, bond_amount - to_unbond)); + } + remaining -= to_unbond; + if remaining.is_zero() { + break; + } + } + bonds_for_removal + } + + fn compute_modified_redelegation( + delegator_redelegated_bonds: &mut BTreeMap< + Epoch, + BTreeMap>, + >, + bond_epoch: Epoch, + amount: token::Amount, + ) -> ModifiedRedelegation { + let mut modified_redelegation = ModifiedRedelegation::default(); + + let redelegated_bonds = + delegator_redelegated_bonds.entry(bond_epoch).or_default(); + let (src_validators, total_redelegated) = + redelegated_bonds.iter().fold( + (BTreeSet::
::new(), token::Amount::zero()), + |mut acc, (src_val, redel_bonds)| { + acc.0.insert(src_val.clone()); + acc.1 += redel_bonds + .values() + .fold(token::Amount::zero(), |sum, val| sum + *val); + acc + }, + ); + + modified_redelegation.epoch = Some(bond_epoch); + + if total_redelegated <= amount { + return modified_redelegation; + } + + let mut remaining = amount; + for src_val in src_validators { + if remaining.is_zero() { + break; + } + let bonds = redelegated_bonds.get(&src_val).unwrap(); + let total_src_amount = + bonds.values().cloned().sum::(); + + modified_redelegation + .validators_to_remove + .insert(src_val.clone()); + + if total_src_amount <= remaining { + remaining -= total_src_amount; + } else { + let src_bonds_to_remove = + Self::find_bonds_to_remove(bonds, remaining); + + remaining = token::Amount::zero(); + + if let Some((bond_epoch, new_bond_amount)) = + src_bonds_to_remove.new_entry + { + modified_redelegation.validator_to_modify = Some(src_val); + modified_redelegation.epochs_to_remove = { + let mut epochs = src_bonds_to_remove.epochs; + epochs.insert(bond_epoch); + epochs + }; + modified_redelegation.epoch_to_modify = Some(bond_epoch); + modified_redelegation.new_amount = Some(new_bond_amount); + } else { + modified_redelegation.validator_to_modify = Some(src_val); + modified_redelegation.epochs_to_remove = + src_bonds_to_remove.epochs; + } + } + } + + modified_redelegation + } + + fn compute_new_redelegated_unbonds( + redelegated_bonds: &mut BTreeMap< + Epoch, + BTreeMap>, + >, + epochs_to_remove: &BTreeSet, + modified_redelegation: &ModifiedRedelegation, + ) -> BTreeMap>> + { + let unbonded_epochs = if let Some(epoch) = modified_redelegation.epoch { + let mut epochs = epochs_to_remove.clone(); + epochs.insert(epoch); + epochs + .iter() + .cloned() + .filter(|e| redelegated_bonds.contains_key(e)) + .collect::>() + } else { + epochs_to_remove + .iter() + .cloned() + .filter(|e| redelegated_bonds.contains_key(e)) + .collect::>() + }; + + let new_redelegated_unbonds: EagerRedelegatedUnbonds = unbonded_epochs + .into_iter() + .map(|start| { + let mut rbonds = EagerRedelegatedBondsMap::default(); + if modified_redelegation + .epoch + .map(|redelegation_epoch| start != redelegation_epoch) + .unwrap_or(true) + || modified_redelegation.validators_to_remove.is_empty() + { + for (src_val, bonds) in + redelegated_bonds.get(&start).unwrap() + { + for (bond_epoch, bond_amount) in bonds { + rbonds + .entry(src_val.clone()) + .or_default() + .insert(*bond_epoch, *bond_amount); + } + } + (start, rbonds) + } else { + for src_validator in + &modified_redelegation.validators_to_remove + { + if modified_redelegation + .validator_to_modify + .as_ref() + .map(|validator| src_validator != validator) + .unwrap_or(true) + { + let raw_bonds = redelegated_bonds + .entry(start) + .or_default() + .entry(src_validator.clone()) + .or_default(); + for (bond_epoch, bond_amount) in raw_bonds { + rbonds + .entry(src_validator.clone()) + .or_default() + .insert(*bond_epoch, *bond_amount); + } + } else { + for bond_start in + &modified_redelegation.epochs_to_remove + { + let cur_redel_bond_amount = redelegated_bonds + .entry(start) + .or_default() + .entry(src_validator.clone()) + .or_default() + .entry(*bond_start) + .or_default(); + + let raw_bonds = rbonds + .entry(src_validator.clone()) + .or_default(); + if modified_redelegation + .epoch_to_modify + .as_ref() + .map(|epoch| bond_start != epoch) + .unwrap_or(true) + { + raw_bonds.insert( + *bond_start, + *cur_redel_bond_amount, + ); + } else { + raw_bonds.insert( + *bond_start, + *cur_redel_bond_amount + - modified_redelegation + .new_amount + // Safe unwrap - it shouldn't + // get to + // this if it's None + .unwrap(), + ); + } + } + } + } + (start, rbonds) + } + }) + .collect(); + new_redelegated_unbonds + } + + fn compute_amount_after_slashing_unbond( + params: &PosParams, + all_slashes: &BTreeMap>, + validator: &Address, + new_unbonds: &BTreeMap<(Epoch, Epoch), token::Amount>, + new_redelegated_unbonded: &BTreeMap< + Epoch, + BTreeMap>, + >, + ) -> ResultSlashing { + let mut result_slashing = ResultSlashing::default(); + let validator_slashes = + all_slashes.get(validator).cloned().unwrap_or_default(); + for ((start_epoch, _withdraw_epoch), to_unbond) in new_unbonds { + let slashes = validator_slashes + .iter() + .filter(|&s| s.epoch >= *start_epoch) + .cloned() + .collect::>(); + + // Begin the logic for `fold_and_slash_redelegated_bonds` + let result_fold = { + let (mut total_redelegated, mut total_after_slashing) = + (token::Amount::zero(), token::Amount::zero()); + + for (src_validator, unbonded_map) in new_redelegated_unbonded + .get(start_epoch) + .cloned() + .unwrap_or_default() + { + for (bond_start, unbonded) in unbonded_map { + let src_slashes = all_slashes + .get(&src_validator) + .cloned() + .unwrap_or_default() + .iter() + .filter(|&s| { + params.in_redelegation_slashing_window( + s.epoch, + params.redelegation_start_epoch_from_end( + *start_epoch, + ), + *start_epoch, + ) && bond_start <= s.epoch + }) + .cloned() + .collect::>(); + + let mut merged = slashes + .iter() + .chain(src_slashes.iter()) + .cloned() + .collect::>(); + merged.sort_by(|s1, s2| { + s1.epoch.partial_cmp(&s2.epoch).unwrap() + }); + + total_redelegated += unbonded; + total_after_slashing += Self::apply_slashes_to_amount( + params, &merged, unbonded, + ); + } + } + + FoldRedelegatedBondsResult { + total_redelegated, + total_after_slashing, + } + }; + + let total_not_redelegated = + *to_unbond - result_fold.total_redelegated; + let after_not_redelegated = Self::apply_slashes_to_amount( + params, + &slashes, + total_not_redelegated, + ); + let amount_after_slashing = + after_not_redelegated + result_fold.total_after_slashing; + result_slashing.sum += amount_after_slashing; + result_slashing + .epoch_map + .insert(*start_epoch, amount_after_slashing); + } + + result_slashing + } + + fn apply_slashes_to_amount( + params: &PosParams, + slashes: &[Slash], + amount: token::Amount, + ) -> token::Amount { + let mut final_amount = amount; + let mut computed_slashes = BTreeMap::::new(); + for slash in slashes { + let slashed_amount = Self::compute_slashable_amount( + params, + slash, + amount, + &computed_slashes, + ); + final_amount = + final_amount.checked_sub(slashed_amount).unwrap_or_default(); + + computed_slashes.insert(slash.epoch, slashed_amount); + } + final_amount + } + + fn compute_slashable_amount( + params: &PosParams, + slash: &Slash, + amount: token::Amount, + computed_slashes: &BTreeMap, + ) -> token::Amount { + let updated_amount = computed_slashes + .iter() + .filter(|(&epoch, _)| { + // TODO: check if bounds correct! + // slashes that have already been applied and processed + epoch + params.slash_processing_epoch_offset() <= slash.epoch + }) + .fold(amount, |acc, (_, amnt)| { + acc.checked_sub(*amnt).unwrap_or_default() + }); + updated_amount.mul_ceil(slash.rate) + } } /// Arbitrary bond transition that adds tokens to an existing bond @@ -3175,44 +5213,3 @@ fn arb_slash(state: &AbstractPosState) -> impl Strategy { }, ) } - -fn compute_amount_after_slashing( - slashes: &BTreeMap, - amount: token::Amount, - unbonding_len: u64, - cubic_slash_window_len: u64, -) -> token::Amount { - let mut computed_amounts = Vec::::new(); - let mut updated_amount = amount; - - for (infraction_epoch, slash_rate) in slashes { - let mut indices_to_remove = BTreeSet::::new(); - - for (idx, slashed_amount) in computed_amounts.iter().enumerate() { - if slashed_amount.epoch + unbonding_len + cubic_slash_window_len - < *infraction_epoch - { - updated_amount = updated_amount - .checked_sub(slashed_amount.amount) - .unwrap_or_default(); - indices_to_remove.insert(idx); - } - } - for idx in indices_to_remove.into_iter().rev() { - computed_amounts.remove(idx); - } - computed_amounts.push(SlashedAmount { - amount: *slash_rate * updated_amount, - epoch: *infraction_epoch, - }); - } - updated_amount - .checked_sub( - computed_amounts - .iter() - .fold(token::Amount::default(), |sum, computed| { - sum + computed.amount - }), - ) - .unwrap_or_default() -} diff --git a/proof_of_stake/src/tests/state_machine_v2.rs b/proof_of_stake/src/tests/state_machine_v2.rs new file mode 100644 index 0000000000..02df1b39a0 --- /dev/null +++ b/proof_of_stake/src/tests/state_machine_v2.rs @@ -0,0 +1,4597 @@ +//! Test PoS transitions with a state machine + +use std::collections::{BTreeMap, BTreeSet, HashSet, VecDeque}; +use std::ops::{AddAssign, Deref}; +use std::{cmp, mem}; + +use assert_matches::assert_matches; +use derivative::Derivative; +use itertools::Itertools; +use namada_core::ledger::storage::testing::TestWlStorage; +use namada_core::ledger::storage_api::collections::lazy_map::{ + NestedSubKey, SubKey, +}; +use namada_core::ledger::storage_api::token::read_balance; +use namada_core::ledger::storage_api::{token, StorageRead}; +use namada_core::types::address::{self, Address}; +use namada_core::types::dec::Dec; +use namada_core::types::key; +use namada_core::types::key::common::PublicKey; +use namada_core::types::storage::Epoch; +use namada_core::types::token::Change; +use proptest::prelude::*; +use proptest::test_runner::Config; +use proptest_state_machine::{ + prop_state_machine, ReferenceStateMachine, StateMachineTest, +}; +// Use `RUST_LOG=info` (or another tracing level) and `--nocapture` to see +// `tracing` logs from tests +use test_log::test; +use yansi::Paint; + +use super::utils::DbgPrintDiff; +use crate::parameters::testing::arb_rate; +use crate::parameters::PosParams; +use crate::tests::arb_params_and_genesis_validators; +use crate::tests::utils::pause_for_enter; +use crate::types::{ + BondId, GenesisValidator, ReverseOrdTokenAmount, Slash, SlashType, + ValidatorState, WeightedValidator, +}; +use crate::{ + below_capacity_validator_set_handle, bond_handle, + consensus_validator_set_handle, delegator_redelegated_bonds_handle, + enqueued_slashes_handle, find_slashes_in_range, + read_below_threshold_validator_set_addresses, read_pos_params, + redelegate_tokens, validator_deltas_handle, validator_slashes_handle, + validator_state_handle, RedelegationError, +}; + +prop_state_machine! { + #![proptest_config(Config { + cases: 2, + .. Config::default() + })] + #[ignore] + #[test] + /// A `StateMachineTest` implemented on `PosState` + fn pos_state_machine_test_v2(sequential 1000 => ConcretePosState); +} + +/// Abstract representation of a state of PoS system +#[derive(Clone, Derivative)] +#[derivative(Debug)] +struct AbstractPosState { + /// Current epoch + epoch: Epoch, + /// Parameters + params: PosParams, + /// Genesis validators + #[derivative(Debug = "ignore")] + genesis_validators: Vec, + /// Records of bonds, unbonds, withdrawal and redelegations with slashes, + /// if any + validator_records: BTreeMap, + /// Validator stakes. These are NOT deltas. + /// Pipelined. + validator_stakes: BTreeMap>, + /// Consensus validator set. Pipelined. + consensus_set: BTreeMap>>, + /// Below-capacity validator set. Pipelined. + below_capacity_set: + BTreeMap>>, + /// Below-threshold validator set. Pipelined. + below_threshold_set: BTreeMap>, + /// Validator states. Pipelined. + validator_states: BTreeMap>, + /// Validator slashes post-processing + validator_slashes: BTreeMap>, + /// Enqueued slashes pre-processing + enqueued_slashes: BTreeMap>>, + /// The last epoch in which a validator committed an infraction + validator_last_slash_epochs: BTreeMap, +} + +impl AbstractPosState { + /// Copy validator sets and validator states at the given epoch from its + /// predecessor + fn copy_discrete_epoched_data(&mut self, epoch: Epoch) { + let prev_epoch = epoch.prev(); + // Copy the non-delta data from the last epoch into the new one + self.consensus_set.insert( + epoch, + self.consensus_set.get(&prev_epoch).unwrap().clone(), + ); + self.below_capacity_set.insert( + epoch, + self.below_capacity_set.get(&prev_epoch).unwrap().clone(), + ); + self.below_threshold_set.insert( + epoch, + self.below_threshold_set.get(&prev_epoch).unwrap().clone(), + ); + self.validator_states.insert( + epoch, + self.validator_states.get(&prev_epoch).unwrap().clone(), + ); + self.validator_stakes.insert( + epoch, + self.validator_stakes.get(&prev_epoch).unwrap().clone(), + ); + } + + /// Add a bond. + fn bond( + &mut self, + BondId { source, validator }: &BondId, + amount: token::Amount, + ) { + let start = self.pipeline(); + + let records = self.records_mut(validator, source); + let bond_at_start = records.bonds.entry(start).or_default(); + bond_at_start.tokens.amount += amount; + + let change = amount.change(); + let pipeline_state = self + .validator_states + .get(&start) + .unwrap() + .get(validator) + .unwrap(); + // Validator sets need to be updated before total stake + if *pipeline_state != ValidatorState::Jailed { + self.update_validator_sets(validator, change, self.pipeline()); + } + self.update_validator_total_stake(validator, change, self.pipeline()); + } + + /// Unbond a bond. + fn unbond( + &mut self, + BondId { source, validator }: &BondId, + amount: token::Amount, + ) { + // Last epoch in which it contributes to stake + let end = self.pipeline().prev(); + let withdrawable_epoch = + self.epoch + self.params.withdrawable_epoch_offset(); + let pipeline_len = self.params.pipeline_len; + + let records = self.records_mut(validator, source); + // The amount requested is before any slashing that may be applicable + let mut to_unbond = amount; + let mut amount_after_slashing = token::Amount::zero(); + + 'bonds_iter: for (&start, bond) in records.bonds.iter_mut().rev() { + // In every loop, try to unbond redelegations first. We have to + // go in reverse order of the start epoch to match the order of + // unbond in the implementation. + for (dest_validator, redelegs) in bond.incoming_redelegs.iter_mut() + { + let _redeleg_epoch = start - pipeline_len; + + for (&src_bond_start, redeleg) in + redelegs.tokens.iter_mut().rev() + { + let amount_before_slashing = + redeleg.amount_before_slashing(); + + let unbonded = if to_unbond >= amount_before_slashing { + // Unbond the whole bond + to_unbond -= amount_before_slashing; + amount_after_slashing += redeleg.amount; + + mem::take(redeleg) + } else { + // We have to divide this bond in case there are slashes + let unbond_slash = + to_unbond.mul_ceil(redeleg.slash_rates_sum()); + let to_unbond_after_slash = to_unbond - unbond_slash; + + to_unbond = token::Amount::zero(); + amount_after_slashing += to_unbond_after_slash; + + redeleg.amount -= to_unbond_after_slash; + let removed_slashes = + redeleg.subtract_slash(unbond_slash); + + TokensWithSlashes { + amount: to_unbond_after_slash, + slashes: removed_slashes, + } + }; + + let unbond = + bond.unbonds.entry(end).or_insert_with(|| Unbond { + withdrawable_epoch, + tokens: Default::default(), + incoming_redelegs: Default::default(), + }); + debug_assert_eq!( + unbond.withdrawable_epoch, + withdrawable_epoch + ); + let redeleg_unbond = unbond + .incoming_redelegs + .entry(dest_validator.clone()) + .or_default(); + let redeleg_unbond_tokens = redeleg_unbond + .tokens + .entry(src_bond_start) + .or_default(); + redeleg_unbond_tokens.amount += unbonded.amount; + redeleg_unbond_tokens.add_slashes(&unbonded.slashes); + + // Stop once all is unbonded + if to_unbond.is_zero() { + break 'bonds_iter; + } + } + } + + // Then try to unbond regular bonds + if !to_unbond.is_zero() { + let amount_before_slashing = + bond.tokens.amount_before_slashing(); + + let unbonded = if to_unbond >= amount_before_slashing { + // Unbond the whole bond + to_unbond -= amount_before_slashing; + amount_after_slashing += bond.tokens.amount; + + mem::take(&mut bond.tokens) + } else { + // We have to divide this bond in case there are slashes + let unbond_slash = + to_unbond.mul_ceil(bond.tokens.slash_rates_sum()); + let to_unbond_after_slash = to_unbond - unbond_slash; + + to_unbond = token::Amount::zero(); + amount_after_slashing += to_unbond_after_slash; + + bond.tokens.amount -= to_unbond_after_slash; + let removed_slashes = + bond.tokens.subtract_slash(unbond_slash); + + TokensWithSlashes { + amount: to_unbond_after_slash, + slashes: removed_slashes, + } + }; + + let unbond = + bond.unbonds.entry(end).or_insert_with(|| Unbond { + withdrawable_epoch, + tokens: Default::default(), + incoming_redelegs: Default::default(), + }); + debug_assert_eq!(unbond.withdrawable_epoch, withdrawable_epoch); + unbond.tokens.amount += unbonded.amount; + unbond.tokens.add_slashes(&unbonded.slashes); + + // Stop once all is unbonded + if to_unbond.is_zero() { + break; + } + } + } + assert!(to_unbond.is_zero()); + + let pipeline_state = self + .validator_states + .get(&self.pipeline()) + .unwrap() + .get(validator) + .unwrap(); + if *pipeline_state != ValidatorState::Jailed { + self.update_validator_sets( + validator, + -amount_after_slashing.change(), + self.pipeline(), + ); + } + self.update_validator_total_stake( + validator, + -amount_after_slashing.change(), + self.pipeline(), + ); + } + + /// Redelegate a bond. + fn redelegate( + &mut self, + BondId { source, validator }: &BondId, + new_validator: &Address, + amount: token::Amount, + ) { + // Last epoch in which it contributes to stake of thhe source validator + let current_epoch = self.epoch; + let pipeline = self.pipeline(); + let src_end = pipeline.prev(); + let withdrawable_epoch_offset = self.params.withdrawable_epoch_offset(); + let pipeline_len = self.params.pipeline_len; + + let records = self.records_mut(validator, source); + + // The amount requested is before any slashing that may be applicable + let mut to_unbond = amount; + let mut amount_after_slashing = token::Amount::zero(); + // Keyed by redelegation src bond start epoch + let mut dest_incoming_redelegs = + BTreeMap::::new(); + + 'bonds_iter: for (&start, bond) in records.bonds.iter_mut().rev() { + // In every loop, try to redelegate redelegations first. We have to + // go in reverse order of the start epoch to match the order of + // redelegation in the implementation. + for (_src_validator, redelegs) in + bond.incoming_redelegs.iter_mut().rev() + { + let _redeleg_epoch = start - pipeline_len; + + for (_src_bond_start, redeleg) in + redelegs.tokens.iter_mut().rev() + { + let amount_before_slashing = + redeleg.amount_before_slashing(); + + // No chained redelegations + if Epoch( + start.0.checked_sub(pipeline_len).unwrap_or_default(), + ) + withdrawable_epoch_offset + <= current_epoch + { + let unbonded = if to_unbond >= amount_before_slashing { + // Unbond the whole bond + to_unbond -= amount_before_slashing; + amount_after_slashing += redeleg.amount; + + mem::take(redeleg) + } else { + // We have to divide this bond in case there are + // slashes + let unbond_slash = + to_unbond.mul_ceil(redeleg.slash_rates_sum()); + let to_unbond_after_slash = + to_unbond - unbond_slash; + + to_unbond = token::Amount::zero(); + amount_after_slashing += to_unbond_after_slash; + + redeleg.amount -= to_unbond_after_slash; + let removed_slashes = + redeleg.subtract_slash(unbond_slash); + + TokensWithSlashes { + amount: to_unbond_after_slash, + slashes: removed_slashes, + } + }; + + let outgoing_redeleg = bond + .outgoing_redelegs + .entry(src_end) + .or_default() + .entry(new_validator.clone()) + .or_default(); + + outgoing_redeleg.amount += unbonded.amount; + outgoing_redeleg.add_slashes(&unbonded.slashes); + + let redeleg = + dest_incoming_redelegs.entry(start).or_default(); + redeleg.amount += unbonded.amount; + redeleg.add_slashes(&unbonded.slashes); + + // Stop once all is unbonded + if to_unbond.is_zero() { + break 'bonds_iter; + } + } + } + } + + // Then try to redelegate regular bonds + if !to_unbond.is_zero() { + let amount_before_slashing = + bond.tokens.amount_before_slashing(); + + let unbonded = if to_unbond >= amount_before_slashing { + // Unbond the whole bond + to_unbond -= amount_before_slashing; + amount_after_slashing += bond.tokens.amount; + + mem::take(&mut bond.tokens) + } else { + // We have to divide this bond in case there are slashes + let unbond_slash = + to_unbond.mul_ceil(bond.tokens.slash_rates_sum()); + let to_unbond_after_slash = to_unbond - unbond_slash; + + to_unbond = token::Amount::zero(); + amount_after_slashing += to_unbond_after_slash; + + bond.tokens.amount -= to_unbond_after_slash; + let removed_slashes = + bond.tokens.subtract_slash(unbond_slash); + + TokensWithSlashes { + amount: to_unbond_after_slash, + slashes: removed_slashes, + } + }; + + let outgoing_redeleg = bond + .outgoing_redelegs + .entry(src_end) + .or_default() + .entry(new_validator.clone()) + .or_default(); + outgoing_redeleg.amount += unbonded.amount; + outgoing_redeleg.add_slashes(&unbonded.slashes); + let dest_incoming_redeleg = + dest_incoming_redelegs.entry(start).or_default(); + dest_incoming_redeleg.amount += unbonded.amount; + dest_incoming_redeleg.add_slashes(&unbonded.slashes); + } + // Stop once all is unbonded + if to_unbond.is_zero() { + break; + } + } + assert!(to_unbond.is_zero()); + + // Record the incoming redelegations on destination validator + let dest_records = self.records_mut(new_validator, source); + let redeleg = dest_records + .bonds + .entry(pipeline) + .or_default() + .incoming_redelegs + .entry(validator.clone()) + .or_default(); + for (start, inc_redeleg) in dest_incoming_redelegs { + let redeleg_tokens = redeleg.tokens.entry(start).or_default(); + redeleg_tokens.amount += inc_redeleg.amount; + redeleg_tokens.add_slashes(&inc_redeleg.slashes); + } + + // Update stake of src validator + let src_pipeline_state = self + .validator_states + .get(&self.pipeline()) + .unwrap() + .get(validator) + .unwrap(); + if *src_pipeline_state != ValidatorState::Jailed { + self.update_validator_sets( + validator, + -amount_after_slashing.change(), + self.pipeline(), + ); + } + self.update_validator_total_stake( + validator, + -amount_after_slashing.change(), + self.pipeline(), + ); + + // Update stake of dest validator + let dest_pipeline_state = self + .validator_states + .get(&self.pipeline()) + .unwrap() + .get(new_validator) + .unwrap(); + if *dest_pipeline_state != ValidatorState::Jailed { + self.update_validator_sets( + new_validator, + amount_after_slashing.change(), + self.pipeline(), + ); + } + self.update_validator_total_stake( + new_validator, + amount_after_slashing.change(), + self.pipeline(), + ); + } + + /// Withdraw all unbonds that can be withdrawn. + fn withdraw(&mut self, BondId { source, validator }: &BondId) { + let epoch = self.epoch; + let records = self.records_mut(validator, source); + let mut to_store = BTreeMap::::new(); + for (_start, bond) in records.bonds.iter_mut() { + bond.unbonds.retain(|_end, unbond| { + let is_withdrawable = unbond.withdrawable_epoch <= epoch; + if is_withdrawable { + let withdrawn = to_store.entry(epoch).or_default(); + withdrawn.amount += unbond.tokens.amount; + withdrawn.add_slashes(&unbond.tokens.slashes); + for redeleg in unbond.incoming_redelegs.values() { + for tokens in redeleg.tokens.values() { + withdrawn.amount += tokens.amount; + withdrawn.add_slashes(&tokens.slashes); + } + } + } + !is_withdrawable + }) + } + records.withdrawn.extend(to_store.into_iter()); + } + + /// Get or insert default mutable records + fn records_mut( + &mut self, + validator: &Address, + source: &Address, + ) -> &mut Records { + self.validator_records + .entry(validator.clone()) + .or_default() + .per_source + .entry(source.clone()) + .or_default() + } + + /// Get records + fn records( + &self, + validator: &Address, + source: &Address, + ) -> Option<&Records> { + self.validator_records + .get(validator) + .and_then(|records| records.per_source.get(source)) + } + + /// Update validator's total stake with bonded or unbonded change at the + /// pipeline epoch + fn update_validator_total_stake( + &mut self, + validator: &Address, + change: token::Change, + epoch: Epoch, + ) { + let total_stakes = self + .validator_stakes + .entry(epoch) + .or_default() + .entry(validator.clone()) + .or_default(); + tracing::debug!("TOTAL {validator} stakes before {}", total_stakes); + *total_stakes += change; + tracing::debug!("TOTAL {validator} stakes after {}", total_stakes); + } + + /// Update validator in sets with bonded or unbonded change (should be + /// called with epoch at pipeline) or slashes. + fn update_validator_sets( + &mut self, + validator: &Address, + change: token::Change, + epoch: Epoch, + ) { + let consensus_set = self.consensus_set.entry(epoch).or_default(); + let below_cap_set = self.below_capacity_set.entry(epoch).or_default(); + let below_thresh_set = + self.below_threshold_set.entry(epoch).or_default(); + + let validator_stakes = self.validator_stakes.get(&epoch).unwrap(); + let validator_states = self.validator_states.get_mut(&epoch).unwrap(); + + let state_pre = validator_states.get(validator).unwrap(); + + let this_val_stake_pre = *validator_stakes.get(validator).unwrap(); + let this_val_stake_post = + token::Amount::from_change(this_val_stake_pre + change); + let this_val_stake_pre = token::Amount::from_change(this_val_stake_pre); + + let threshold = self.params.validator_stake_threshold; + if this_val_stake_pre < threshold && this_val_stake_post < threshold { + // Validator is already below-threshold and will remain there, so do + // nothing + debug_assert!(below_thresh_set.contains(validator)); + return; + } + + match state_pre { + ValidatorState::Consensus => { + // tracing::debug!("Validator initially in consensus"); + // Remove from the prior stake + let vals = consensus_set.entry(this_val_stake_pre).or_default(); + // dbg!(&vals); + vals.retain(|addr| addr != validator); + // dbg!(&vals); + + if vals.is_empty() { + consensus_set.remove(&this_val_stake_pre); + } + + // If posterior stake is below threshold, place into the + // below-threshold set + if this_val_stake_post < threshold { + below_thresh_set.insert(validator.clone()); + validator_states.insert( + validator.clone(), + ValidatorState::BelowThreshold, + ); + + // Promote the next below-cap validator if there is one + if let Some(mut max_below_cap) = below_cap_set.last_entry() + { + let max_below_cap_stake = *max_below_cap.key(); + let vals = max_below_cap.get_mut(); + let promoted_val = vals.pop_front().unwrap(); + // Remove the key if there's nothing left + if vals.is_empty() { + below_cap_set.remove(&max_below_cap_stake); + } + + consensus_set + .entry(max_below_cap_stake.0) + .or_default() + .push_back(promoted_val.clone()); + validator_states + .insert(promoted_val, ValidatorState::Consensus); + } + + return; + } + + // If unbonding, check the max below-cap validator's state if we + // need to do a swap + if change < token::Change::zero() { + if let Some(mut max_below_cap) = below_cap_set.last_entry() + { + let max_below_cap_stake = *max_below_cap.key(); + if max_below_cap_stake.0 > this_val_stake_post { + // Swap this validator with the max below-cap + let vals = max_below_cap.get_mut(); + let first_val = vals.pop_front().unwrap(); + // Remove the key if there's nothing left + if vals.is_empty() { + below_cap_set.remove(&max_below_cap_stake); + } + // Do the swap in the validator sets + consensus_set + .entry(max_below_cap_stake.0) + .or_default() + .push_back(first_val.clone()); + below_cap_set + .entry(this_val_stake_post.into()) + .or_default() + .push_back(validator.clone()); + + // Change the validator states + validator_states + .insert(first_val, ValidatorState::Consensus); + validator_states.insert( + validator.clone(), + ValidatorState::BelowCapacity, + ); + + // And we're done here + return; + } + } + } + + // Insert with the posterior stake + consensus_set + .entry(this_val_stake_post) + .or_default() + .push_back(validator.clone()); + } + ValidatorState::BelowCapacity => { + // tracing::debug!("Validator initially in below-cap"); + + // Remove from the prior stake + let vals = + below_cap_set.entry(this_val_stake_pre.into()).or_default(); + vals.retain(|addr| addr != validator); + if vals.is_empty() { + below_cap_set.remove(&this_val_stake_pre.into()); + } + + // If posterior stake is below threshold, place into the + // below-threshold set + if this_val_stake_post < threshold { + below_thresh_set.insert(validator.clone()); + validator_states.insert( + validator.clone(), + ValidatorState::BelowThreshold, + ); + return; + } + + // If bonding, check the min consensus validator's state if we + // need to do a swap + if change >= token::Change::zero() { + // dbg!(&consensus_set); + if let Some(mut min_consensus) = consensus_set.first_entry() + { + // dbg!(&min_consensus); + let min_consensus_stake = *min_consensus.key(); + if this_val_stake_post > min_consensus_stake { + // Swap this validator with the max consensus + let vals = min_consensus.get_mut(); + let last_val = vals.pop_back().unwrap(); + // Remove the key if there's nothing left + if vals.is_empty() { + consensus_set.remove(&min_consensus_stake); + } + // Do the swap in the validator sets + below_cap_set + .entry(min_consensus_stake.into()) + .or_default() + .push_back(last_val.clone()); + consensus_set + .entry(this_val_stake_post) + .or_default() + .push_back(validator.clone()); + + // Change the validator states + validator_states.insert( + validator.clone(), + ValidatorState::Consensus, + ); + validator_states.insert( + last_val, + ValidatorState::BelowCapacity, + ); + + // And we're done here + return; + } + } + } + + // Insert with the posterior stake + below_cap_set + .entry(this_val_stake_post.into()) + .or_default() + .push_back(validator.clone()); + } + ValidatorState::BelowThreshold => { + // We know that this validator will be promoted into one of the + // higher sets, so first remove from the below-threshold set. + below_thresh_set.remove(validator); + + let num_consensus = + consensus_set.iter().fold(0, |sum, (_, validators)| { + sum + validators.len() as u64 + }); + if num_consensus < self.params.max_validator_slots { + // Place the validator directly into the consensus set + consensus_set + .entry(this_val_stake_post) + .or_default() + .push_back(validator.clone()); + validator_states + .insert(validator.clone(), ValidatorState::Consensus); + return; + } + // Determine which set to place the validator into + if let Some(mut min_consensus) = consensus_set.first_entry() { + // dbg!(&min_consensus); + let min_consensus_stake = *min_consensus.key(); + if this_val_stake_post > min_consensus_stake { + // Swap this validator with the max consensus + let vals = min_consensus.get_mut(); + let last_val = vals.pop_back().unwrap(); + // Remove the key if there's nothing left + if vals.is_empty() { + consensus_set.remove(&min_consensus_stake); + } + // Do the swap in the validator sets + below_cap_set + .entry(min_consensus_stake.into()) + .or_default() + .push_back(last_val.clone()); + consensus_set + .entry(this_val_stake_post) + .or_default() + .push_back(validator.clone()); + + // Change the validator states + validator_states.insert( + validator.clone(), + ValidatorState::Consensus, + ); + validator_states + .insert(last_val, ValidatorState::BelowCapacity); + } else { + // Place the validator into the below-capacity set + below_cap_set + .entry(this_val_stake_post.into()) + .or_default() + .push_back(validator.clone()); + validator_states.insert( + validator.clone(), + ValidatorState::BelowCapacity, + ); + } + } + } + ValidatorState::Inactive => { + panic!("unexpected state") + } + ValidatorState::Jailed => { + panic!("unexpected state (jailed)") + } + } + } + + fn process_enqueued_slashes(&mut self) { + let slashes_this_epoch = self + .enqueued_slashes + .get(&self.epoch) + .cloned() + .unwrap_or_default(); + if !slashes_this_epoch.is_empty() { + let infraction_epoch = self.epoch + - self.params.unbonding_len + - self.params.cubic_slashing_window_length + - 1; + + let cubic_rate = self.cubic_slash_rate(); + for (validator, slashes) in slashes_this_epoch { + // Slash this validator on it's full stake at infration + self.slash_a_validator( + &validator, + &slashes, + infraction_epoch, + cubic_rate, + ); + } + } + } + + fn slash_a_validator( + &mut self, + validator: &Address, + slashes: &[Slash], + infraction_epoch: Epoch, + cubic_rate: Dec, + ) { + let current_epoch = self.epoch; + let mut total_rate = Dec::zero(); + + for slash in slashes { + debug_assert_eq!(slash.epoch, infraction_epoch); + let rate = + cmp::max(slash.r#type.get_slash_rate(&self.params), cubic_rate); + let processed_slash = Slash { + epoch: slash.epoch, + block_height: slash.block_height, + r#type: slash.r#type, + rate, + }; + let cur_slashes = + self.validator_slashes.entry(validator.clone()).or_default(); + cur_slashes.push(processed_slash.clone()); + + total_rate += rate; + } + total_rate = cmp::min(total_rate, Dec::one()); + tracing::debug!("Total rate: {}", total_rate); + + // Find validator stakes before slashing for up to pipeline epoch + let mut validator_stakes_pre = + BTreeMap::>::new(); + for epoch in + Epoch::iter_bounds_inclusive(current_epoch, self.pipeline()) + { + for (validator, records) in &self.validator_records { + let stake = records.stake(epoch); + validator_stakes_pre + .entry(epoch) + .or_default() + .insert(validator.clone(), stake); + } + } + + let mut redelegations_to_slash = BTreeMap::< + Address, + BTreeMap>>, + >::new(); + for (addr, records) in self.validator_records.iter_mut() { + if addr == validator { + for (source, records) in records.per_source.iter_mut() { + // Apply slashes on non-redelegated bonds + records.slash(total_rate, infraction_epoch, current_epoch); + + // Slash tokens in the outgoing redelegation records for + // this validator + for (&start, bond) in records.bonds.iter_mut() { + for (&end, redelegs) in + bond.outgoing_redelegs.iter_mut() + { + if start <= infraction_epoch + && end >= infraction_epoch + { + for (dest, tokens) in redelegs.iter_mut() { + let slashed = tokens.slash( + total_rate, + infraction_epoch, + current_epoch, + ); + // Store the redelegation slashes to apply + // on destination validator + *redelegations_to_slash + .entry(dest.clone()) + .or_default() + .entry(source.clone()) + .or_default() + .entry( + // start epoch of redelegation + end.next(), + ) + .or_default() + // redelegation src bond start epoch + .entry(start) + .or_default() += TokensSlash { + amount: slashed, + rate: total_rate, + }; + } + } + } + } + } + } + } + // Apply redelegation slashes on destination validator + for (dest_validator, redelegations) in redelegations_to_slash { + for (source, tokens) in redelegations { + for (redelegation_start, slashes) in tokens { + for (src_bond_start, slash) in slashes { + let records = self + .validator_records + .get_mut(&dest_validator) + .unwrap() + .per_source + .get_mut(&source) + .unwrap(); + records.subtract_redelegation_slash( + validator, + src_bond_start, + redelegation_start, + slash, + current_epoch, + ); + } + } + } + } + + // Find validator stakes after slashing for up to pipeline epoch + let mut validator_stakes_post = + BTreeMap::>::new(); + for epoch in + Epoch::iter_bounds_inclusive(current_epoch, self.pipeline()) + { + for (validator, records) in &self.validator_records { + let stake = records.stake(epoch); + validator_stakes_post + .entry(epoch) + .or_default() + .insert(validator.clone(), stake); + } + } + + // Apply the difference in stakes to validator_stakes, states and deltas + for epoch in + Epoch::iter_bounds_inclusive(current_epoch, self.pipeline()) + { + for (validator_to_update, &stake_post) in + validator_stakes_post.get(&epoch).unwrap() + { + let stake_pre = validator_stakes_pre + .get(&epoch) + .unwrap() + .get(validator_to_update) + .cloned() + .unwrap_or_default(); + let change = stake_post.change() - stake_pre.change(); + + if !change.is_zero() { + let state = self + .validator_states + .get(&epoch) + .unwrap() + .get(validator_to_update) + .unwrap(); + // Validator sets need to be updated before total + // stake + if *state != ValidatorState::Jailed { + self.update_validator_sets( + validator_to_update, + change, + epoch, + ); + } + self.update_validator_total_stake( + validator_to_update, + change, + epoch, + ); + } + } + } + } + + /// Get the pipeline epoch + fn pipeline(&self) -> Epoch { + self.epoch + self.params.pipeline_len + } + + /// Check if the given address is of a known validator + fn is_validator(&self, validator: &Address, epoch: Epoch) -> bool { + self.validator_states + .get(&epoch) + .unwrap() + .keys() + .any(|val| val == validator) + } + + fn is_in_consensus_w_info( + &self, + validator: &Address, + epoch: Epoch, + ) -> Option<(usize, token::Amount)> { + for (stake, vals) in self.consensus_set.get(&epoch).unwrap() { + if let Some(index) = vals.iter().position(|val| val == validator) { + return Some((index, *stake)); + } + } + None + } + + fn is_in_below_capacity_w_info( + &self, + validator: &Address, + epoch: Epoch, + ) -> Option<(usize, token::Amount)> { + for (stake, vals) in self.below_capacity_set.get(&epoch).unwrap() { + if let Some(index) = vals.iter().position(|val| val == validator) { + return Some((index, (*stake).into())); + } + } + None + } + + fn is_in_below_threshold(&self, validator: &Address, epoch: Epoch) -> bool { + self.below_threshold_set + .get(&epoch) + .unwrap() + .iter() + .any(|val| val == validator) + } + + /// Find the sum of bonds that can be unbonded. The returned amounts are + /// prior to slashing. + fn unbondable_bonds(&self) -> BTreeMap { + let mut sums = BTreeMap::::new(); + for (validator, records) in &self.validator_records { + for (source, record) in &records.per_source { + let unbondable = sums + .entry(BondId { + source: source.clone(), + validator: validator.clone(), + }) + .or_default(); + // Add bonds and incoming redelegations + for (&start, bond) in &record.bonds { + *unbondable += bond.tokens.amount_before_slashing(); + for redeleg in bond.incoming_redelegs.values() { + let redeleg_epoch = start - self.params.pipeline_len; + *unbondable += redeleg + .amount_before_slashing_after_redeleg( + redeleg_epoch, + ); + } + } + } + } + // Filter out any 0s. + sums.retain(|_id, tokens| !tokens.is_zero()); + sums + } + + /// Find the sum of bonds that can be redelegated. The returned amounts are + /// prior to slashing. + fn redelegatable_bonds(&self) -> BTreeMap { + let mut sums = BTreeMap::::new(); + for (validator, records) in &self.validator_records { + for (source, record) in &records.per_source { + // Self-bonds cannot be redelegated + if validator != source { + let unbondable = sums + .entry(BondId { + source: source.clone(), + validator: validator.clone(), + }) + .or_default(); + // Add bonds + for (&start, bond) in &record.bonds { + *unbondable += bond.tokens.amount_before_slashing(); + // Add redelegations + for redeleg in bond.incoming_redelegs.values() { + // No chained redelegations + if Epoch( + start + .0 + .checked_sub(self.params.pipeline_len) + .unwrap_or_default(), + ) + self.params.withdrawable_epoch_offset() + <= self.epoch + { + *unbondable += redeleg.amount_before_slashing(); + } + } + } + } + } + } + // Filter out any 0s. + sums.retain(|_id, tokens| !tokens.is_zero()); + sums + } + + fn unchainable_redelegations(&self) -> BTreeSet { + let mut unchainable = BTreeSet::new(); + for records in self.validator_records.values() { + for (owner, records) in &records.per_source { + for bond in records.bonds.values() { + for (&end, redelegs) in &bond.outgoing_redelegs { + // If the outgoing redelegation is still slashable for + // source validator ... + if end + self.params.slash_processing_epoch_offset() + > self.epoch + { + // ... it cannot be redelegated for now + for (dest_validator, tokens) in redelegs { + if !tokens.is_zero() { + unchainable.insert(BondId { + source: owner.clone(), + validator: dest_validator.clone(), + }); + } + } + } + } + } + } + } + unchainable + } + + /// Find the sums of withdrawable unbonds + fn withdrawable_unbonds(&self) -> BTreeMap { + let mut withdrawable = BTreeMap::::new(); + for (validator, records) in &self.validator_records { + for (source, records) in &records.per_source { + for bond in records.bonds.values() { + for unbond in bond.unbonds.values() { + if unbond.withdrawable_epoch <= self.epoch { + let entry = withdrawable + .entry(BondId { + source: source.clone(), + validator: validator.clone(), + }) + .or_default(); + // Add withdrawable unbonds including redelegations + *entry += unbond.amount_before_slashing(); + } + } + } + } + } + withdrawable + } + + fn existing_bond_ids(&self) -> Vec { + let mut ids = Vec::new(); + for (validator, records) in &self.validator_records { + for source in records.per_source.keys() { + ids.push(BondId { + source: source.clone(), + validator: validator.clone(), + }); + } + } + ids + } + + /// Compute the cubic slashing rate for the current epoch + fn cubic_slash_rate(&self) -> Dec { + let infraction_epoch = self.epoch + - self.params.unbonding_len + - 1_u64 + - self.params.cubic_slashing_window_length; + tracing::debug!("Infraction epoch: {}", infraction_epoch); + let window_width = self.params.cubic_slashing_window_length; + let epoch_start = Epoch::from( + infraction_epoch + .0 + .checked_sub(window_width) + .unwrap_or_default(), + ); + let epoch_end = infraction_epoch + window_width; + + // Calculate cubic slashing rate with the abstract state + let mut vp_frac_sum = Dec::zero(); + for epoch in Epoch::iter_bounds_inclusive(epoch_start, epoch_end) { + let consensus_stake = + self.consensus_set.get(&epoch).unwrap().iter().fold( + token::Amount::zero(), + |sum, (val_stake, validators)| { + sum + *val_stake * validators.len() as u64 + }, + ); + tracing::debug!( + "Consensus stake in epoch {}: {}", + epoch, + consensus_stake.to_string_native() + ); + + let processing_epoch = epoch + + self.params.unbonding_len + + 1_u64 + + self.params.cubic_slashing_window_length; + let enqueued_slashes = self.enqueued_slashes.get(&processing_epoch); + if let Some(enqueued_slashes) = enqueued_slashes { + for (validator, slashes) in enqueued_slashes.iter() { + let val_stake = token::Amount::from_change( + self.validator_stakes + .get(&epoch) + .unwrap() + .get(validator) + .cloned() + .unwrap_or_default(), + ); + tracing::debug!( + "Val {} stake epoch {}: {}", + &validator, + epoch, + val_stake.to_string_native(), + ); + vp_frac_sum += Dec::from(slashes.len()) + * Dec::from(val_stake) + / Dec::from(consensus_stake); + } + } + } + let vp_frac_sum = cmp::min(Dec::one(), vp_frac_sum); + tracing::debug!("vp_frac_sum: {}", vp_frac_sum); + + cmp::min( + Dec::new(9, 0).unwrap() * vp_frac_sum * vp_frac_sum, + Dec::one(), + ) + } + + fn debug_validators(&self) { + let current_epoch = self.epoch; + for epoch in + Epoch::iter_bounds_inclusive(current_epoch, self.pipeline()) + { + let mut min_consensus = token::Amount::from(u64::MAX); + let consensus = self.consensus_set.get(&epoch).unwrap(); + for (amount, vals) in consensus { + if *amount < min_consensus { + min_consensus = *amount; + } + for val in vals { + let deltas_stake = self + .validator_stakes + .get(&epoch) + .unwrap() + .get(val) + .unwrap(); + let val_state = self + .validator_states + .get(&epoch) + .unwrap() + .get(val) + .unwrap(); + debug_assert_eq!( + *amount, + token::Amount::from_change(*deltas_stake) + ); + debug_assert_eq!(*val_state, ValidatorState::Consensus); + } + } + let mut max_bc = token::Amount::zero(); + let bc = self.below_capacity_set.get(&epoch).unwrap(); + for (amount, vals) in bc { + if token::Amount::from(*amount) > max_bc { + max_bc = token::Amount::from(*amount); + } + for val in vals { + let deltas_stake = self + .validator_stakes + .get(&epoch) + .unwrap() + .get(val) + .cloned() + .unwrap_or_default(); + let val_state = self + .validator_states + .get(&epoch) + .unwrap() + .get(val) + .unwrap(); + debug_assert_eq!( + token::Amount::from(*amount), + token::Amount::from_change(deltas_stake) + ); + debug_assert_eq!(*val_state, ValidatorState::BelowCapacity); + } + } + if max_bc > min_consensus { + tracing::debug!( + "min_consensus = {}, max_bc = {}", + min_consensus.to_string_native(), + max_bc.to_string_native() + ); + } + assert!(min_consensus >= max_bc); + + for addr in self.below_threshold_set.get(&epoch).unwrap() { + let state = self + .validator_states + .get(&epoch) + .unwrap() + .get(addr) + .unwrap(); + + assert_eq!(*state, ValidatorState::BelowThreshold); + } + + for addr in self + .validator_states + .get(&epoch) + .unwrap() + .keys() + .cloned() + .collect::>() + { + if let (None, None, false) = ( + self.is_in_consensus_w_info(&addr, epoch), + self.is_in_below_capacity_w_info(&addr, epoch), + self.is_in_below_threshold(&addr, epoch), + ) { + assert_eq!( + self.validator_states + .get(&epoch) + .unwrap() + .get(&addr) + .cloned(), + Some(ValidatorState::Jailed) + ); + } + } + } + } + + fn is_chained_redelegation( + unchainable_redelegations: &BTreeSet, + delegator: &Address, + src_validator: &Address, + ) -> bool { + unchainable_redelegations.contains(&BondId { + source: delegator.clone(), + validator: src_validator.clone(), + }) + } +} + +#[derive(Clone, Debug, Default)] +struct ValidatorRecords { + /// All records to a validator that contribute to its + /// [`ValidatorBonds::stake`]. For self-bonds the key is a validator + /// and for delegations a delegator. + per_source: BTreeMap, +} + +impl ValidatorRecords { + /// Validator's stake is a sum of bond amounts with any slashing applied. + fn stake(&self, epoch: Epoch) -> token::Amount { + let mut total = token::Amount::zero(); + for bonds in self.per_source.values() { + total += bonds.amount(epoch); + } + total + } + + /// Find how much slash rounding error at most can be tolerated for slashes + /// that were processed before or at the given epoch on a total validator's + /// stake vs sum of slashes on bond deltas, unbonded, withdrawn or + /// redelegated bonds. + /// + /// We allow `n - 1` slash rounding error for `n` number of slashes in + /// unique epochs for bonds, unbonds and withdrawals. The bond deltas, + /// unbonds and withdrawals are slashed individually and so their total + /// slashed may be more than the slash on a sum of total validator's + /// stake. + fn slash_round_err_tolerance(&self, epoch: Epoch) -> token::Amount { + let mut unique_count = 0_u64; + for record in self.per_source.values() { + unique_count += record.num_of_slashes(epoch); + } + token::Amount::from(unique_count.checked_sub(1).unwrap_or_default()) + } +} + +#[derive(Clone, Debug, Default)] +struct Records { + /// Key is a bond start epoch (when it first contributed to voting power) + /// The value contains the sum of all the bonds started at the same epoch. + bonds: BTreeMap, + /// Withdrawn tokens in the epoch + withdrawn: BTreeMap, +} + +impl Records { + /// Sum of bond amounts with any slashes that were processed before or at + /// the given epoch applied. + fn amount(&self, epoch: Epoch) -> token::Amount { + let Records { + bonds, + withdrawn: _, + } = self; + let mut total = token::Amount::zero(); + for (&start, bond) in bonds { + if start <= epoch { + // Bonds + total += bond.tokens.amount; + // Add back any slashes that were processed after the given + // epoch + total += bond.tokens.slashes_sum_after_epoch(epoch); + + for (&end, unbond) in &bond.unbonds { + if end >= epoch { + // Unbonds + total += unbond.tokens.amount; + total += unbond.tokens.slashes_sum_after_epoch(epoch); + + // Unbonded incoming redelegations + for redelegs in unbond.incoming_redelegs.values() { + for tokens in redelegs.tokens.values() { + total += tokens.amount; + total += tokens.slashes_sum_after_epoch(epoch); + } + } + } + } + + // Outgoing redelegations + for (&end, redelegs) in &bond.outgoing_redelegs { + if end >= epoch { + for tokens in redelegs.values() { + total += tokens.amount; + total += tokens.slashes_sum_after_epoch(epoch); + } + } + } + + // Incoming redelegations + for redelegs in bond.incoming_redelegs.values() { + for tokens in redelegs.tokens.values() { + total += tokens.amount; + total += tokens.slashes_sum_after_epoch(epoch); + } + } + } + } + total + } + + fn slash( + &mut self, + rate: Dec, + infraction_epoch: Epoch, + processing_epoch: Epoch, + ) { + for (&start, bond) in self.bonds.iter_mut() { + if start <= infraction_epoch { + bond.slash(rate, infraction_epoch, processing_epoch); + + for (&end, unbond) in bond.unbonds.iter_mut() { + if end >= infraction_epoch { + unbond.slash(rate, infraction_epoch, processing_epoch); + } + } + } + } + } + + fn subtract_redelegation_slash( + &mut self, + src_validator: &Address, + src_bond_start: Epoch, + redelegation_start: Epoch, + mut to_sub: TokensSlash, + processing_epoch: Epoch, + ) { + // Slash redelegation destination on the next epoch + let slash_epoch = processing_epoch.next(); + let bond = self.bonds.get_mut(&redelegation_start).unwrap(); + for unbond in bond.unbonds.values_mut() { + if let Some(redeleg) = + unbond.incoming_redelegs.get_mut(src_validator) + { + if let Some(tokens) = redeleg.tokens.get_mut(&src_bond_start) { + if tokens.amount >= to_sub.amount { + tokens.amount -= to_sub.amount; + *tokens.slashes.entry(slash_epoch).or_default() += + to_sub; + return; + } else { + to_sub.amount -= tokens.amount; + *tokens.slashes.entry(slash_epoch).or_default() += + TokensSlash { + amount: tokens.amount, + rate: to_sub.rate, + }; + tokens.amount = token::Amount::zero(); + } + } + } + } + let redeleg = bond.incoming_redelegs.get_mut(src_validator).unwrap(); + if let Some(tokens) = redeleg.tokens.get_mut(&src_bond_start) { + tokens.amount -= to_sub.amount; + *tokens.slashes.entry(slash_epoch).or_default() += to_sub; + } else { + debug_assert!(to_sub.amount.is_zero()); + } + } + + /// Find how much slash rounding error at most can be tolerated for slashes + /// that were processed before or at the given epoch on a bond's amount vs + /// sum of slashes on bond deltas, unbonded, withdrawn or redelegated + /// bonds. + /// + /// We allow `n - 1` slash rounding error for `n` number of slashes (`fn + /// num_of_slashes`) in unique epochs for bonds, unbonds and + /// withdrawals. The bond deltas, unbonds and withdrawals are slashed + /// individually and so their total slashed may be more than the slash + /// on a sum of a bond's total amount. + fn slash_round_err_tolerance(&self, epoch: Epoch) -> token::Amount { + token::Amount::from( + self.num_of_slashes(epoch) + .checked_sub(1) + .unwrap_or_default(), + ) + } + + /// Get the number of slashes in unique epochs that were processed before or + /// at the given epoch for all bonds, unbonds, redelegs, unbonded redelegs + /// and withdrawn tokens. + fn num_of_slashes(&self, epoch: Epoch) -> u64 { + let mut unique_count = 0_u64; + for bond in self.bonds.values() { + unique_count += bond.tokens.num_of_slashes(epoch); + for redeleg in bond.incoming_redelegs.values() { + for tokens in redeleg.tokens.values() { + unique_count += tokens.num_of_slashes(epoch); + } + } + for unbond in bond.unbonds.values() { + unique_count += unbond.tokens.num_of_slashes(epoch); + for redeleg in unbond.incoming_redelegs.values() { + for tokens in redeleg.tokens.values() { + unique_count += tokens.num_of_slashes(epoch); + } + } + } + } + for withdrawn in self.withdrawn.values() { + unique_count += withdrawn.num_of_slashes(epoch); + } + unique_count + } +} + +#[derive(Clone, Debug, Default)] +struct Bond { + /// Bonded amount is the amount that's been bonded originally, reduced by + /// unbonding or slashing, if any. Incoming redelegations are recorded + /// separately. + tokens: TokensWithSlashes, + /// Incoming redelegations contribute to the stake of this validator. + /// Their sum is not included in the `tokens` field. + incoming_redelegs: BTreeMap, + /// Key is end epoch in which the unbond last contributed to stake of the + /// validator. + unbonds: BTreeMap, + /// The outer key is an end epoch of the redelegated bond in which the bond + /// last contributed to voting power of this validator (the source). The + /// inner key is the redelegation destination validator. + /// + /// After a redelegation a bond transferred to destination validator is + /// liable for slashes on a source validator (key in the map) from the + /// Bond's `start` to key's `end` epoch. + outgoing_redelegs: BTreeMap>, +} + +impl Bond { + fn slash( + &mut self, + rate: Dec, + infraction_epoch: Epoch, + processing_epoch: Epoch, + ) { + self.tokens.slash(rate, infraction_epoch, processing_epoch); + for (_src, redeleg) in self.incoming_redelegs.iter_mut() { + for tokens in redeleg.tokens.values_mut() { + tokens.slash(rate, infraction_epoch, processing_epoch); + } + } + } +} + +#[derive(Clone, Debug, Default)] +struct IncomingRedeleg { + /// Total amount with all slashes keyed by redelegation source bond start + tokens: BTreeMap, +} +impl IncomingRedeleg { + /// Get the token amount before any slashes that were processed after the + /// redelegation epoch. + fn amount_before_slashing_after_redeleg( + &self, + redeleg_epoch: Epoch, + ) -> token::Amount { + self.tokens + .values() + .map(|tokens| { + tokens.amount_before_slashing_after_redeleg(redeleg_epoch) + }) + .sum() + } + + // Get the token amount before any slashing. + fn amount_before_slashing(&self) -> token::Amount { + self.tokens + .values() + .map(TokensWithSlashes::amount_before_slashing) + .sum() + } +} + +#[derive(Clone, Debug, Default, PartialEq)] +struct TokensWithSlashes { + /// Token amount after any applicable slashing + amount: token::Amount, + /// Total amount that's been slashed associated with the epoch in which the + /// slash was processed. + slashes: BTreeMap, +} + +#[derive(Clone, Debug, Default, PartialEq)] +struct TokensSlash { + amount: token::Amount, + rate: Dec, +} + +impl AddAssign for TokensSlash { + fn add_assign(&mut self, rhs: Self) { + self.amount += rhs.amount; + // Cap the rate at 1 + self.rate = cmp::min(Dec::one(), self.rate + rhs.rate); + } +} + +impl TokensWithSlashes { + /// Slash on original amount before slashes that were processed after the + /// infraction epoch. Returns the slashed amount. + fn slash( + &mut self, + rate: Dec, + infraction_epoch: Epoch, + processing_epoch: Epoch, + ) -> token::Amount { + // Add back slashes to slashable amount that didn't affect this epoch + // (applied after infraction epoch) + let slashable_amount = + self.amount + self.slashes_sum_after_epoch(infraction_epoch); + let amount = cmp::min(slashable_amount.mul_ceil(rate), self.amount); + if !amount.is_zero() { + self.amount -= amount; + let slash = self.slashes.entry(processing_epoch).or_default(); + *slash += TokensSlash { amount, rate }; + } + amount + } + + /// Add the given slashes at their epochs. + fn add_slashes(&mut self, slashes: &BTreeMap) { + for (&epoch, slash) in slashes { + *self.slashes.entry(epoch).or_default() += slash.clone(); + } + } + + /// Subtract the given slash amount in order of the epochs. Returns the + /// removed slashes. + fn subtract_slash( + &mut self, + mut to_slash: token::Amount, + ) -> BTreeMap { + let mut removed = BTreeMap::new(); + self.slashes.retain(|&epoch, slash| { + if to_slash.is_zero() { + return true; + } + if slash.amount > to_slash { + slash.amount -= to_slash; + removed.insert( + epoch, + TokensSlash { + amount: to_slash, + rate: slash.rate, + }, + ); + to_slash = token::Amount::zero(); + true + } else { + to_slash -= slash.amount; + removed.insert(epoch, slash.clone()); + false + } + }); + removed + } + + /// Get the token amount before any slashing. + fn amount_before_slashing(&self) -> token::Amount { + self.amount + self.slashes_sum() + } + + /// Get the token amount before any slashes that were processed after the + /// redelegation epoch. + fn amount_before_slashing_after_redeleg( + &self, + redeleg_epoch: Epoch, + ) -> token::Amount { + let mut amount = self.amount; + for (&processed_epoch, slash) in &self.slashes { + if processed_epoch > redeleg_epoch { + amount += slash.amount; + } + } + amount + } + + /// Get a sum of all slash amounts. + fn slashes_sum(&self) -> token::Amount { + self.slashes + .values() + .map(|TokensSlash { amount, rate: _ }| *amount) + .sum() + } + + /// Get a sum of all slash rates, capped at 1. + fn slash_rates_sum(&self) -> Dec { + cmp::min( + Dec::one(), + self.slashes + .values() + .map(|TokensSlash { amount: _, rate }| *rate) + .sum(), + ) + } + + /// Get a sum of all slashes that were processed after the given epoch. + fn slashes_sum_after_epoch(&self, epoch: Epoch) -> token::Amount { + let mut sum = token::Amount::zero(); + for (&processed_epoch, slash) in &self.slashes { + if processed_epoch > epoch { + sum += slash.amount; + } + } + sum + } + + /// Is the sum of tokens and slashed tokens zero? I.e. Are there no tokens? + fn is_zero(&self) -> bool { + self.amount.is_zero() && self.slashes_sum().is_zero() + } + + /// Get the number of slashes in unique epochs that were processed before or + /// at the given epoch. + fn num_of_slashes(&self, epoch: Epoch) -> u64 { + self.slashes + .keys() + .filter(|&&processed| processed <= epoch) + .count() as u64 + } +} + +#[derive(Clone, Debug, Default)] +struct Unbond { + /// A first epoch from which the unbond is withdrawable. + withdrawable_epoch: Epoch, + /// Bonded amount is the amount that's been bonded originally, reduced by + /// unbonding or slashing, if any. + tokens: TokensWithSlashes, + incoming_redelegs: BTreeMap, +} + +impl Unbond { + /// Get the total unbonded amount before slashing, including any unbonded + /// redelegations. + fn amount_before_slashing(&self) -> token::Amount { + self.tokens.amount_before_slashing() + + self + .incoming_redelegs + .iter() + .fold(token::Amount::zero(), |acc, (_src, redeleg)| { + acc + redeleg.amount_before_slashing() + }) + } + + fn slash( + &mut self, + rate: Dec, + infraction_epoch: Epoch, + processing_epoch: Epoch, + ) { + self.tokens.slash(rate, infraction_epoch, processing_epoch); + for (_src, redeleg) in self.incoming_redelegs.iter_mut() { + for tokens in redeleg.tokens.values_mut() { + tokens.slash(rate, infraction_epoch, processing_epoch); + } + } + } +} + +/// The PoS system under test +#[derive(Derivative)] +#[derivative(Debug)] +struct ConcretePosState { + /// Storage - contains all the PoS state + s: TestWlStorage, + /// Last reference state in debug format to print changes after transitions + #[derivative(Debug = "ignore")] + last_state_diff: DbgPrintDiff, +} + +/// State machine transitions +#[allow(clippy::large_enum_variant)] +#[derive(Clone, Derivative)] +#[derivative(Debug)] +enum Transition { + NextEpoch, + InitValidator { + address: Address, + #[derivative(Debug = "ignore")] + consensus_key: PublicKey, + #[derivative(Debug = "ignore")] + protocol_key: PublicKey, + #[derivative(Debug = "ignore")] + eth_cold_key: PublicKey, + #[derivative(Debug = "ignore")] + eth_hot_key: PublicKey, + commission_rate: Dec, + max_commission_rate_change: Dec, + }, + Bond { + id: BondId, + amount: token::Amount, + }, + Unbond { + id: BondId, + amount: token::Amount, + }, + Withdraw { + id: BondId, + }, + Redelegate { + /// A chained redelegation must fail + is_chained: bool, + id: BondId, + new_validator: Address, + amount: token::Amount, + }, + Misbehavior { + address: Address, + slash_type: SlashType, + infraction_epoch: Epoch, + height: u64, + }, + UnjailValidator { + address: Address, + }, +} + +impl StateMachineTest for ConcretePosState { + type Reference = AbstractPosState; + type SystemUnderTest = Self; + + fn init_test( + initial_state: &::State, + ) -> Self::SystemUnderTest { + tracing::debug!("New test case"); + tracing::debug!( + "Genesis validators: {:#?}", + initial_state + .genesis_validators + .iter() + .map(|val| &val.address) + .collect::>() + ); + let mut s = TestWlStorage::default(); + crate::init_genesis( + &mut s, + &initial_state.params, + initial_state.genesis_validators.clone().into_iter(), + initial_state.epoch, + ) + .unwrap(); + let last_state_diff = DbgPrintDiff::new().store(initial_state); + Self { s, last_state_diff } + } + + fn apply( + mut state: Self::SystemUnderTest, + ref_state: &::State, + transition: ::Transition, + ) -> Self::SystemUnderTest { + tracing::debug!( + "{} {:#?}", + Paint::green("Transition").underline(), + Paint::yellow(&transition) + ); + + if false { + // NOTE: enable to capture and print ref state diff + let new_diff = + state.last_state_diff.print_diff_and_store(ref_state); + state.last_state_diff = new_diff; + } + + pause_for_enter(); + + let params = crate::read_pos_params(&state.s).unwrap(); + let pos_balance = read_balance( + &state.s, + &state.s.storage.native_token, + &crate::ADDRESS, + ) + .unwrap(); + tracing::debug!("PoS balance: {}", pos_balance.to_string_native()); + match transition { + Transition::NextEpoch => { + tracing::debug!("\nCONCRETE Next epoch"); + super::advance_epoch(&mut state.s, ¶ms); + + // Need to apply some slashing + let current_epoch = state.s.storage.block.epoch; + super::process_slashes(&mut state.s, current_epoch).unwrap(); + + let params = read_pos_params(&state.s).unwrap(); + state.check_next_epoch_post_conditions(¶ms); + } + Transition::InitValidator { + address, + consensus_key, + protocol_key, + eth_cold_key, + eth_hot_key, + commission_rate, + max_commission_rate_change, + } => { + tracing::debug!("\nCONCRETE Init validator"); + let current_epoch = state.current_epoch(); + + super::become_validator(super::BecomeValidator { + storage: &mut state.s, + params: ¶ms, + address: &address, + consensus_key: &consensus_key, + protocol_key: &protocol_key, + eth_cold_key: ð_cold_key, + eth_hot_key: ð_hot_key, + current_epoch, + commission_rate, + max_commission_rate_change, + }) + .unwrap(); + + let params = read_pos_params(&state.s).unwrap(); + state.check_init_validator_post_conditions( + current_epoch, + ¶ms, + &address, + ) + } + Transition::Bond { id, amount } => { + tracing::debug!("\nCONCRETE Bond"); + let current_epoch = state.current_epoch(); + let pipeline = current_epoch + params.pipeline_len; + let validator_stake_before_bond_cur = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + current_epoch, + ) + .unwrap(); + let validator_stake_before_bond_pipeline = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + pipeline, + ) + .unwrap(); + + // Credit tokens to ensure we can apply the bond + let native_token = state.s.get_native_token().unwrap(); + let pos = address::POS; + token::credit_tokens( + &mut state.s, + &native_token, + &id.source, + amount, + ) + .unwrap(); + + let src_balance_pre = + token::read_balance(&state.s, &native_token, &id.source) + .unwrap(); + let pos_balance_pre = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + + // This must be ensured by both transitions generator and + // pre-conditions! + assert!( + crate::is_validator(&state.s, &id.validator).unwrap(), + "{} is not a validator", + id.validator + ); + + // Apply the bond + super::bond_tokens( + &mut state.s, + Some(&id.source), + &id.validator, + amount, + current_epoch, + ) + .unwrap(); + + let params = read_pos_params(&state.s).unwrap(); + state.check_bond_post_conditions( + current_epoch, + ¶ms, + id.clone(), + amount, + validator_stake_before_bond_cur, + validator_stake_before_bond_pipeline, + ); + + let src_balance_post = + token::read_balance(&state.s, &native_token, &id.source) + .unwrap(); + let pos_balance_post = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + + // Post-condition: PoS balance should increase + assert!(pos_balance_pre < pos_balance_post); + // Post-condition: The difference in PoS balance should be the + // same as in the source + assert_eq!( + pos_balance_post - pos_balance_pre, + src_balance_pre - src_balance_post + ); + } + Transition::Unbond { id, amount } => { + tracing::debug!("\nCONCRETE Unbond"); + let current_epoch = state.current_epoch(); + let pipeline = current_epoch + params.pipeline_len; + let native_token = state.s.get_native_token().unwrap(); + let pos = address::POS; + let src_balance_pre = + token::read_balance(&state.s, &native_token, &id.source) + .unwrap(); + let pos_balance_pre = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + + let validator_stake_before_unbond_cur = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + current_epoch, + ) + .unwrap(); + let validator_stake_before_unbond_pipeline = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + pipeline, + ) + .unwrap(); + + // Apply the unbond + super::unbond_tokens( + &mut state.s, + Some(&id.source), + &id.validator, + amount, + current_epoch, + false, + ) + .unwrap(); + + let params = read_pos_params(&state.s).unwrap(); + state.check_unbond_post_conditions( + current_epoch, + ¶ms, + id.clone(), + amount, + validator_stake_before_unbond_cur, + validator_stake_before_unbond_pipeline, + ); + + let src_balance_post = + token::read_balance(&state.s, &native_token, &id.source) + .unwrap(); + let pos_balance_post = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + + // Post-condition: PoS balance should not change + assert_eq!(pos_balance_pre, pos_balance_post); + // Post-condition: Source balance should not change + assert_eq!(src_balance_post, src_balance_pre); + + // Check that the bonds are the same + // let abs_bonds = ref_state.bonds.get(&id).cloned().unwrap(); + // let conc_bonds = crate::bond_handle(&id.source, + // &id.validator) .get_data_handler() + // .collect_map(&state.s) + // .unwrap(); + // assert_eq!(abs_bonds, conc_bonds); + + // // Check that the unbond records are the same + // // TODO: figure out how we get entries with 0 amount in the + // // abstract version (and prevent) + // let mut abs_unbond_records = ref_state + // .unbond_records + // .get(&id.validator) + // .cloned() + // .unwrap(); + // abs_unbond_records.retain(|_, inner_map| { + // inner_map.retain(|_, value| !value.is_zero()); + // !inner_map.is_empty() + // }); + // let conc_unbond_records = + // crate::total_unbonded_handle(&id.validator) + // .collect_map(&state.s) + // .unwrap(); + // assert_eq!(abs_unbond_records, conc_unbond_records); + } + Transition::Withdraw { + id: BondId { source, validator }, + } => { + tracing::debug!("\nCONCRETE Withdraw"); + let current_epoch = state.current_epoch(); + let native_token = state.s.get_native_token().unwrap(); + let pos = address::POS; + // TODO: add back when slash pool is being used again + // let slash_pool = address::POS_SLASH_POOL; + let src_balance_pre = + token::read_balance(&state.s, &native_token, &source) + .unwrap(); + let pos_balance_pre = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + // let slash_balance_pre = + // token::read_balance(&state.s, &native_token, &slash_pool) + // .unwrap(); + + // Apply the withdrawal + let withdrawn = super::withdraw_tokens( + &mut state.s, + Some(&source), + &validator, + current_epoch, + ) + .unwrap(); + + let src_balance_post = + token::read_balance(&state.s, &native_token, &source) + .unwrap(); + let pos_balance_post = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + // let slash_balance_post = + // token::read_balance(&state.s, &native_token, &slash_pool) + // .unwrap(); + + // Post-condition: PoS balance should decrease or not change if + // nothing was withdrawn + assert!(pos_balance_pre >= pos_balance_post); + + // Post-condition: The difference in PoS balance should be equal + // to the sum of the difference in the source and the difference + // in the slash pool + // TODO: needs slash pool + // assert_eq!( + // pos_balance_pre - pos_balance_post, + // src_balance_post - src_balance_pre + slash_balance_post + // - slash_balance_pre + // ); + + // Post-condition: The increment in source balance should be + // equal to the withdrawn amount + assert_eq!(src_balance_post - src_balance_pre, withdrawn); + + // Post-condition: The amount withdrawn must match reference + // state withdrawal + let records = ref_state.records(&validator, &source).unwrap(); + let max_slash_round_err = + records.slash_round_err_tolerance(current_epoch); + let ref_withdrawn = + records.withdrawn.get(¤t_epoch).unwrap().amount; + assert!( + ref_withdrawn <= withdrawn + && withdrawn <= ref_withdrawn + max_slash_round_err, + "Expected to withdraw from validator {validator} owner \ + {source} amount {} ({}), but withdrawn {}.", + ref_withdrawn.to_string_native(), + if max_slash_round_err.is_zero() { + "no slashing rounding error expected".to_string() + } else { + format!( + "max slashing rounding error +{}", + max_slash_round_err.to_string_native() + ) + }, + withdrawn.to_string_native(), + ); + } + Transition::Redelegate { + is_chained, + id, + new_validator, + amount, + } => { + tracing::debug!("\nCONCRETE Redelegate"); + + let current_epoch = state.current_epoch(); + let pipeline = current_epoch + params.pipeline_len; + + // Read data prior to applying the transition + let native_token = state.s.get_native_token().unwrap(); + let pos = address::POS; + let pos_balance_pre = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + + // Read validator's redelegations and bonds to find how much of + // them is slashed + let mut amount_after_slash = token::Amount::zero(); + let mut to_redelegate = amount; + + let redelegations_handle = + delegator_redelegated_bonds_handle(&id.source) + .at(&id.validator); + + let bonds: Vec> = + bond_handle(&id.source, &id.validator) + .get_data_handler() + .iter(&state.s) + .unwrap() + .collect(); + 'bonds_loop: for res in bonds.into_iter().rev() { + let (bond_start, bond_delta) = res.unwrap(); + + // Find incoming redelegations at this bond start epoch as a + // redelegation end epoch (the epoch in which it stopped to + // contributing to src) + let redeleg_end = bond_start; + let redeleg_start = + params.redelegation_start_epoch_from_end(redeleg_end); + let redelegations: Vec<_> = redelegations_handle + .at(&redeleg_end) + .iter(&state.s) + .unwrap() + .collect(); + // Iterate incoming redelegations first + for res in redelegations.into_iter().rev() { + let ( + NestedSubKey::Data { + key: src_validator, + nested_sub_key: + SubKey::Data(redeleg_src_bond_start), + }, + delta, + ) = res.unwrap(); + + // Apply slashes on this delta, if any + let mut this_amount_after_slash = delta; + + // Find redelegation source validator's slashes + let slashes = find_slashes_in_range( + &state.s, + redeleg_src_bond_start, + Some(redeleg_end), + &src_validator, + ) + .unwrap(); + for (slash_epoch, rate) in slashes { + // Only apply slashes that weren't processed before + // redelegation as those are applied eagerly + if slash_epoch + + params.slash_processing_epoch_offset() + > redeleg_start + { + let slash = delta.mul_ceil(rate); + this_amount_after_slash = + this_amount_after_slash + .checked_sub(slash) + .unwrap_or_default(); + } + } + // Find redelegation destination validator's slashes + let slashes = find_slashes_in_range( + &state.s, + redeleg_end, + None, + &id.validator, + ) + .unwrap(); + for (_slash_epoch, rate) in slashes { + let slash = delta.mul_ceil(rate); + this_amount_after_slash = this_amount_after_slash + .checked_sub(slash) + .unwrap_or_default(); + } + + if to_redelegate >= delta { + amount_after_slash += this_amount_after_slash; + to_redelegate -= delta; + } else { + // We have to divide this bond in case there are + // slashes + let slash_ratio = + Dec::from(this_amount_after_slash) + / Dec::from(delta); + amount_after_slash += slash_ratio * to_redelegate; + to_redelegate = token::Amount::zero(); + } + + if to_redelegate.is_zero() { + break 'bonds_loop; + } + } + + // Then if there's still something to redelegate, unbond the + // regular bonds + if !to_redelegate.is_zero() { + // Apply slashes on this bond delta, if any + let mut this_amount_after_slash = bond_delta; + + // Find validator's slashes + let slashes = find_slashes_in_range( + &state.s, + bond_start, + None, + &id.validator, + ) + .unwrap(); + for (_slash_epoch, rate) in slashes { + let slash = bond_delta.mul_ceil(rate); + this_amount_after_slash = this_amount_after_slash + .checked_sub(slash) + .unwrap_or_default(); + } + + if to_redelegate >= bond_delta { + amount_after_slash += this_amount_after_slash; + to_redelegate -= bond_delta; + } else { + // We have to divide this bond in case there are + // slashes + let slash_ratio = + Dec::from(this_amount_after_slash) + / Dec::from(bond_delta); + amount_after_slash += slash_ratio * to_redelegate; + to_redelegate = token::Amount::zero(); + } + if to_redelegate.is_zero() { + break; + } + } + } + + // Read src validator stakes + let src_validator_stake_cur_pre = crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + current_epoch, + ) + .unwrap(); + let src_validator_stake_pipeline_pre = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + pipeline, + ) + .unwrap(); + + // Read dest validator stakes + let dest_validator_stake_cur_pre = crate::read_validator_stake( + &state.s, + ¶ms, + &new_validator, + current_epoch, + ) + .unwrap(); + let dest_validator_stake_pipeline_pre = + crate::read_validator_stake( + &state.s, + ¶ms, + &new_validator, + pipeline, + ) + .unwrap(); + + // Find delegations + let delegations_pre = + crate::find_delegations(&state.s, &id.source, &pipeline) + .unwrap(); + + // Apply redelegation + let result = redelegate_tokens( + &mut state.s, + &id.source, + &id.validator, + &new_validator, + current_epoch, + amount, + ); + + if !amount.is_zero() && is_chained { + assert!(result.is_err()); + let err = result.unwrap_err(); + let err_str = err.to_string(); + assert_matches!( + err.downcast::().unwrap().deref(), + RedelegationError::IsChainedRedelegation, + "A chained redelegation must be rejected, got \ + {err_str}", + ); + } else { + result.unwrap(); + + // Post-condition: PoS balance is unchanged + let pos_balance_post = + token::read_balance(&state.s, &native_token, &pos) + .unwrap(); + assert_eq!(pos_balance_pre, pos_balance_post); + + // Post-condition: Source validator stake at current epoch + // is unchanged + let src_validator_stake_cur_post = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + current_epoch, + ) + .unwrap(); + assert_eq!( + src_validator_stake_cur_pre, + src_validator_stake_cur_post + ); + + // Post-condition: Source validator stake at pipeline epoch + // is reduced by the redelegation amount + + // TODO: shouldn't this be reduced by the redelegation + // amount post-slashing tho? + // NOTE: We changed it to reduce it, check again later + let src_validator_stake_pipeline_post = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + pipeline, + ) + .unwrap(); + let max_slash_round_err = ref_state + .validator_records + .get(&id.validator) + .map(|r| r.slash_round_err_tolerance(current_epoch)) + .unwrap_or_default(); + let expected_new_stake = src_validator_stake_pipeline_pre + .checked_sub(amount_after_slash) + .unwrap_or_default(); + assert!( + src_validator_stake_pipeline_post + <= expected_new_stake + max_slash_round_err + && expected_new_stake + <= src_validator_stake_pipeline_post + + max_slash_round_err, + "Expected src validator {} stake after redelegation \ + at pipeline to be equal to {} ({}), got {}.", + id.validator, + expected_new_stake.to_string_native(), + if max_slash_round_err.is_zero() { + "no slashing rounding error expected".to_string() + } else { + format!( + "max slashing rounding error +-{}", + max_slash_round_err.to_string_native() + ) + }, + src_validator_stake_pipeline_post.to_string_native() + ); + + // Post-condition: Destination validator stake at current + // epoch is unchanged + let dest_validator_stake_cur_post = + crate::read_validator_stake( + &state.s, + ¶ms, + &new_validator, + current_epoch, + ) + .unwrap(); + assert_eq!( + dest_validator_stake_cur_pre, + dest_validator_stake_cur_post + ); + + // Post-condition: Destination validator stake at pipeline + // epoch is increased by the redelegation amount, less any + // slashes + let expected_new_stake = + dest_validator_stake_pipeline_pre + amount_after_slash; + let dest_validator_stake_pipeline_post = + crate::read_validator_stake( + &state.s, + ¶ms, + &new_validator, + pipeline, + ) + .unwrap(); + assert!( + expected_new_stake + <= dest_validator_stake_pipeline_post + + max_slash_round_err + && dest_validator_stake_pipeline_post + <= expected_new_stake + max_slash_round_err, + "Expected dest validator {} stake after redelegation \ + at pipeline to be equal to {} ({}), got {}.", + new_validator, + expected_new_stake.to_string_native(), + if max_slash_round_err.is_zero() { + "no slashing rounding error expected".to_string() + } else { + format!( + "max slashing rounding error +-{}", + max_slash_round_err.to_string_native() + ) + }, + dest_validator_stake_pipeline_post.to_string_native() + ); + + // Post-condition: The difference at pipeline in src + // validator stake is equal to negative difference in dest + // validator. + assert_eq!( + src_validator_stake_pipeline_pre + - src_validator_stake_pipeline_post, + dest_validator_stake_pipeline_post + - dest_validator_stake_pipeline_pre + ); + + // Post-condition: The delegator's delegations should be + // updated with redelegation. For the source reduced by the + // redelegation amount and for the destination increased by + // the redelegation amount, less any slashes. + let delegations_post = crate::find_delegations( + &state.s, &id.source, &pipeline, + ) + .unwrap(); + let src_delegation_pre = delegations_pre + .get(&id.validator) + .cloned() + .unwrap_or_default(); + let src_delegation_post = delegations_post + .get(&id.validator) + .cloned() + .unwrap_or_default(); + assert_eq!( + src_delegation_pre - src_delegation_post, + amount + ); + let dest_delegation_pre = delegations_pre + .get(&new_validator) + .cloned() + .unwrap_or_default(); + let dest_delegation_post = delegations_post + .get(&new_validator) + .cloned() + .unwrap_or_default(); + let dest_delegation_diff = + dest_delegation_post - dest_delegation_pre; + assert!( + amount_after_slash + <= dest_delegation_diff + max_slash_round_err + && dest_delegation_diff + <= amount_after_slash + max_slash_round_err, + "Expected redelegation by {} to be increased by to {} \ + ({}), but it increased by {}.", + id.source, + amount_after_slash.to_string_native(), + if max_slash_round_err.is_zero() { + "no slashing rounding error expected".to_string() + } else { + format!( + "max slashing rounding error +-{}", + max_slash_round_err.to_string_native() + ) + }, + dest_delegation_diff.to_string_native(), + ); + } + } + Transition::Misbehavior { + address, + slash_type, + infraction_epoch, + height, + } => { + tracing::debug!("\nCONCRETE Misbehavior"); + let current_epoch = state.current_epoch(); + // Record the slash evidence + super::slash( + &mut state.s, + ¶ms, + current_epoch, + infraction_epoch, + height, + slash_type, + &address, + current_epoch.next(), + ) + .unwrap(); + + // Apply some post-conditions + let params = read_pos_params(&state.s).unwrap(); + state.check_misbehavior_post_conditions( + ¶ms, + current_epoch, + infraction_epoch, + slash_type, + &address, + ); + + // TODO: Any others? + } + Transition::UnjailValidator { address } => { + tracing::debug!("\nCONCRETE UnjailValidator"); + let current_epoch = state.current_epoch(); + + // Unjail the validator + super::unjail_validator(&mut state.s, &address, current_epoch) + .unwrap(); + + // Post-conditions + let params = read_pos_params(&state.s).unwrap(); + state.check_unjail_validator_post_conditions(¶ms, &address); + } + } + state + } + + fn check_invariants( + state: &Self::SystemUnderTest, + ref_state: &::State, + ) { + let current_epoch = state.current_epoch(); + let params = read_pos_params(&state.s).unwrap(); + state.check_global_post_conditions(¶ms, current_epoch, ref_state); + } +} + +impl ConcretePosState { + fn current_epoch(&self) -> Epoch { + self.s.storage.block.epoch + } + + fn check_next_epoch_post_conditions(&self, params: &PosParams) { + let pipeline = self.current_epoch() + params.pipeline_len; + let before_pipeline = pipeline.prev(); + + // Post-condition: Consensus validator sets at pipeline offset + // must be the same as at the epoch before it. + let consensus_set_before_pipeline = + crate::read_consensus_validator_set_addresses_with_stake( + &self.s, + before_pipeline, + ) + .unwrap(); + let consensus_set_at_pipeline = + crate::read_consensus_validator_set_addresses_with_stake( + &self.s, pipeline, + ) + .unwrap(); + itertools::assert_equal( + consensus_set_before_pipeline.into_iter().sorted(), + consensus_set_at_pipeline.into_iter().sorted(), + ); + + // Post-condition: Below-capacity validator sets at pipeline + // offset must be the same as at the epoch before it. + let below_cap_before_pipeline = + crate::read_below_capacity_validator_set_addresses_with_stake( + &self.s, + before_pipeline, + ) + .unwrap(); + let below_cap_at_pipeline = + crate::read_below_capacity_validator_set_addresses_with_stake( + &self.s, pipeline, + ) + .unwrap(); + itertools::assert_equal( + below_cap_before_pipeline.into_iter().sorted(), + below_cap_at_pipeline.into_iter().sorted(), + ); + + // TODO: post-conditions for processing of slashes, just throwing things + // here atm + let slashed_validators = enqueued_slashes_handle() + .at(&self.current_epoch()) + .iter(&self.s) + .unwrap() + .map(|a| { + let ( + NestedSubKey::Data { + key: address, + nested_sub_key: _, + }, + _b, + ) = a.unwrap(); + address + }) + .collect::>(); + + for validator in &slashed_validators { + assert!( + !validator_slashes_handle(validator) + .is_empty(&self.s) + .unwrap() + ); + assert_eq!( + validator_state_handle(validator) + .get(&self.s, self.current_epoch(), params) + .unwrap(), + Some(ValidatorState::Jailed) + ); + } + } + + fn check_bond_post_conditions( + &self, + submit_epoch: Epoch, + params: &PosParams, + id: BondId, + amount: token::Amount, + validator_stake_before_bond_cur: token::Amount, + validator_stake_before_bond_pipeline: token::Amount, + ) { + let pipeline = submit_epoch + params.pipeline_len; + + let cur_stake = super::read_validator_stake( + &self.s, + params, + &id.validator, + submit_epoch, + ) + .unwrap(); + + // Post-condition: the validator stake at the current epoch should not + // change + assert_eq!(cur_stake, validator_stake_before_bond_cur); + + let stake_at_pipeline = super::read_validator_stake( + &self.s, + params, + &id.validator, + pipeline, + ) + .unwrap(); + + // Post-condition: the validator stake at the pipeline should be + // incremented by the bond amount + assert_eq!( + stake_at_pipeline, + validator_stake_before_bond_pipeline + amount + ); + + self.check_bond_and_unbond_post_conditions( + submit_epoch, + params, + id, + stake_at_pipeline, + ); + } + + fn check_unbond_post_conditions( + &self, + submit_epoch: Epoch, + params: &PosParams, + id: BondId, + amount: token::Amount, + validator_stake_before_unbond_cur: token::Amount, + validator_stake_before_unbond_pipeline: token::Amount, + ) { + let pipeline = submit_epoch + params.pipeline_len; + + let cur_stake = super::read_validator_stake( + &self.s, + params, + &id.validator, + submit_epoch, + ) + .unwrap(); + + // Post-condition: the validator stake at the current epoch should not + // change + assert_eq!(cur_stake, validator_stake_before_unbond_cur); + + let stake_at_pipeline = super::read_validator_stake( + &self.s, + params, + &id.validator, + pipeline, + ) + .unwrap(); + + // Post-condition: the validator stake at the pipeline should be + // decremented at most by the bond amount (because slashing can reduce + // the actual amount unbonded) + // + // TODO: is this a weak assertion here? Seems cumbersome to calculate + // the exact amount considering the slashing applied can be complicated + assert!( + stake_at_pipeline + >= validator_stake_before_unbond_pipeline + .checked_sub(amount) + .unwrap_or_default() + ); + + self.check_bond_and_unbond_post_conditions( + submit_epoch, + params, + id, + stake_at_pipeline, + ); + } + + /// These post-conditions apply to bonding and unbonding + fn check_bond_and_unbond_post_conditions( + &self, + submit_epoch: Epoch, + params: &PosParams, + id: BondId, + stake_at_pipeline: token::Amount, + ) { + let pipeline = submit_epoch + params.pipeline_len; + // Read the consensus sets data using iterator + let num_in_consensus = crate::consensus_validator_set_handle() + .at(&pipeline) + .iter(&self.s) + .unwrap() + .map(|res| res.unwrap()) + .filter(|(_keys, addr)| addr == &id.validator) + .count(); + + let num_in_below_cap = crate::below_capacity_validator_set_handle() + .at(&pipeline) + .iter(&self.s) + .unwrap() + .map(|res| res.unwrap()) + .filter(|(_keys, addr)| addr == &id.validator) + .count(); + + let num_in_below_thresh = + read_below_threshold_validator_set_addresses(&self.s, pipeline) + .unwrap() + .into_iter() + .filter(|addr| addr == &id.validator) + .count(); + + let num_occurrences = + num_in_consensus + num_in_below_cap + num_in_below_thresh; + let validator_is_jailed = crate::validator_state_handle(&id.validator) + .get(&self.s, pipeline, params) + .unwrap() + == Some(ValidatorState::Jailed); + + // Post-condition: There must only be one instance of this validator in + // the consensus + below-cap sets with some stake across all + // validator sets, OR there are no instances and this validator is + // jailed + assert!( + num_occurrences == 1 + || (num_occurrences == 0 && validator_is_jailed) + ); + + let consensus_set = + crate::read_consensus_validator_set_addresses_with_stake( + &self.s, pipeline, + ) + .unwrap(); + let below_cap_set = + crate::read_below_capacity_validator_set_addresses_with_stake( + &self.s, pipeline, + ) + .unwrap(); + let below_thresh_set = + crate::read_below_threshold_validator_set_addresses( + &self.s, pipeline, + ) + .unwrap(); + let weighted = WeightedValidator { + bonded_stake: stake_at_pipeline, + address: id.validator, + }; + let consensus_val = consensus_set.get(&weighted); + let below_cap_val = below_cap_set.get(&weighted); + let below_thresh_val = below_thresh_set.get(&weighted.address); + + // Post-condition: The validator should be updated in exactly once in + // the validator sets + let jailed_condition = validator_is_jailed + && consensus_val.is_none() + && below_cap_val.is_none() + && below_thresh_val.is_none(); + + let mut num_sets = i32::from(consensus_val.is_some()); + num_sets += i32::from(below_cap_val.is_some()); + num_sets += i32::from(below_thresh_val.is_some()); + + assert!(num_sets == 1 || jailed_condition); + + // Post-condition: The stake of the validators in the consensus set is + // greater than or equal to below-capacity validators + for WeightedValidator { + bonded_stake: consensus_stake, + address: consensus_addr, + } in consensus_set.iter() + { + for WeightedValidator { + bonded_stake: below_cap_stake, + address: below_cap_addr, + } in below_cap_set.iter() + { + assert!( + consensus_stake >= below_cap_stake, + "Consensus validator {consensus_addr} with stake {} and \ + below-capacity {below_cap_addr} with stake {} should be \ + swapped.", + consensus_stake.to_string_native(), + below_cap_stake.to_string_native() + ); + } + } + } + + fn check_init_validator_post_conditions( + &self, + submit_epoch: Epoch, + params: &PosParams, + address: &Address, + ) { + let pipeline = submit_epoch + params.pipeline_len; + + // Post-condition: the validator should not be in the validator set + // until the pipeline epoch + for epoch in submit_epoch.iter_range(params.pipeline_len) { + assert!( + !crate::read_consensus_validator_set_addresses(&self.s, epoch) + .unwrap() + .contains(address) + ); + assert!( + !crate::read_below_capacity_validator_set_addresses( + &self.s, epoch + ) + .unwrap() + .contains(address) + ); + assert!( + !crate::read_below_threshold_validator_set_addresses( + &self.s, epoch + ) + .unwrap() + .contains(address) + ); + assert!( + !crate::read_all_validator_addresses(&self.s, epoch) + .unwrap() + .contains(address) + ); + } + let in_consensus = + crate::read_consensus_validator_set_addresses(&self.s, pipeline) + .unwrap() + .contains(address); + let in_bc = crate::read_below_capacity_validator_set_addresses( + &self.s, pipeline, + ) + .unwrap() + .contains(address); + let in_below_thresh = + crate::read_below_threshold_validator_set_addresses( + &self.s, pipeline, + ) + .unwrap() + .contains(address); + + assert!(in_below_thresh && !in_consensus && !in_bc); + } + + fn check_misbehavior_post_conditions( + &self, + params: &PosParams, + current_epoch: Epoch, + infraction_epoch: Epoch, + slash_type: SlashType, + validator: &Address, + ) { + tracing::debug!( + "\nChecking misbehavior post conditions for validator: \n{}", + validator + ); + + // Validator state jailed and validator removed from the consensus set + // starting at the next epoch + for offset in 1..=params.pipeline_len { + // dbg!( + // crate::read_consensus_validator_set_addresses_with_stake( + // &self.s, + // current_epoch + offset + // ) + // .unwrap() + // ); + assert_eq!( + validator_state_handle(validator) + .get(&self.s, current_epoch + offset, params) + .unwrap(), + Some(ValidatorState::Jailed) + ); + let in_consensus = consensus_validator_set_handle() + .at(&(current_epoch + offset)) + .iter(&self.s) + .unwrap() + .any(|res| { + let (_, val_address) = res.unwrap(); + // dbg!(&val_address); + val_address == validator.clone() + }); + assert!(!in_consensus); + } + + // `enqueued_slashes` contains the slash element just added + let processing_epoch = infraction_epoch + + params.unbonding_len + + 1_u64 + + params.cubic_slashing_window_length; + let slash = enqueued_slashes_handle() + .at(&processing_epoch) + .at(validator) + .back(&self.s) + .unwrap(); + if let Some(slash) = slash { + assert_eq!(slash.epoch, infraction_epoch); + assert_eq!(slash.r#type, slash_type); + assert_eq!(slash.rate, Dec::zero()); + } else { + panic!("Could not find the slash enqueued"); + } + + // TODO: Any others? + } + + fn check_unjail_validator_post_conditions( + &self, + params: &PosParams, + validator: &Address, + ) { + let current_epoch = self.s.storage.block.epoch; + + // Make sure the validator is not in either set until the pipeline epoch + for epoch in current_epoch.iter_range(params.pipeline_len) { + let in_consensus = consensus_validator_set_handle() + .at(&epoch) + .iter(&self.s) + .unwrap() + .any(|res| { + let (_, val_address) = res.unwrap(); + val_address == validator.clone() + }); + + let in_bc = below_capacity_validator_set_handle() + .at(&epoch) + .iter(&self.s) + .unwrap() + .any(|res| { + let (_, val_address) = res.unwrap(); + val_address == validator.clone() + }); + assert!(!in_consensus && !in_bc); + + let val_state = validator_state_handle(validator) + .get(&self.s, epoch, params) + .unwrap(); + assert_eq!(val_state, Some(ValidatorState::Jailed)); + } + let pipeline_epoch = current_epoch + params.pipeline_len; + + let num_in_consensus = consensus_validator_set_handle() + .at(&pipeline_epoch) + .iter(&self.s) + .unwrap() + .map(|res| res.unwrap()) + .filter(|(_keys, addr)| addr == validator) + .count(); + + let num_in_bc = below_capacity_validator_set_handle() + .at(&pipeline_epoch) + .iter(&self.s) + .unwrap() + .map(|res| res.unwrap()) + .filter(|(_keys, addr)| addr == validator) + .count(); + + let num_in_bt = read_below_threshold_validator_set_addresses( + &self.s, + pipeline_epoch, + ) + .unwrap() + .into_iter() + .filter(|addr| addr == validator) + .count(); + + let num_occurrences = num_in_consensus + num_in_bc + num_in_bt; + assert_eq!(num_occurrences, 1); + + let val_state = validator_state_handle(validator) + .get(&self.s, current_epoch + params.pipeline_len, params) + .unwrap(); + assert!( + val_state == Some(ValidatorState::Consensus) + || val_state == Some(ValidatorState::BelowCapacity) + || val_state == Some(ValidatorState::BelowThreshold) + ); + } + + fn check_global_post_conditions( + &self, + params: &PosParams, + current_epoch: Epoch, + ref_state: &AbstractPosState, + ) { + // Ensure that every validator in each set has the proper state + for epoch in Epoch::iter_bounds_inclusive( + current_epoch, + current_epoch + params.pipeline_len, + ) { + tracing::debug!("Epoch {epoch}"); + let mut vals = HashSet::
::new(); + for WeightedValidator { + bonded_stake, + address: validator, + } in crate::read_consensus_validator_set_addresses_with_stake( + &self.s, epoch, + ) + .unwrap() + { + let deltas_stake = validator_deltas_handle(&validator) + .get_sum(&self.s, epoch, params) + .unwrap() + .unwrap_or_default(); + let max_slash_round_err = ref_state + .validator_records + .get(&validator) + .unwrap() + .slash_round_err_tolerance(epoch); + let ref_stake = ref_state + .validator_stakes + .get(&epoch) + .unwrap() + .get(&validator) + .cloned() + .unwrap(); + let conc_stake = bonded_stake.change(); + let max_err_msg = if max_slash_round_err.is_zero() { + "no error expected".to_string() + } else { + format!( + "max err +-{}", + max_slash_round_err.to_string_native() + ) + }; + tracing::debug!( + "Consensus val {}, set stake: {}, deltas: {}, ref: {}, \ + {max_err_msg}", + &validator, + conc_stake.to_string_native(), + deltas_stake.to_string_native(), + ref_stake.to_string_native(), + ); + assert!(!deltas_stake.is_negative()); + assert_eq!(conc_stake, deltas_stake); + assert!( + ref_stake <= conc_stake + max_slash_round_err.change() + && conc_stake + <= ref_stake + max_slash_round_err.change(), + "Expected {} ({max_err_msg}), got {}.", + ref_stake.to_string_native(), + conc_stake.to_string_native() + ); + + let state = crate::validator_state_handle(&validator) + .get(&self.s, epoch, params) + .unwrap(); + + assert_eq!(state, Some(ValidatorState::Consensus)); + assert_eq!( + state.unwrap(), + ref_state + .validator_states + .get(&epoch) + .unwrap() + .get(&validator) + .cloned() + .unwrap() + ); + assert!(!vals.contains(&validator)); + vals.insert(validator); + } + for WeightedValidator { + bonded_stake, + address: validator, + } in + crate::read_below_capacity_validator_set_addresses_with_stake( + &self.s, epoch, + ) + .unwrap() + { + let deltas_stake = validator_deltas_handle(&validator) + .get_sum(&self.s, epoch, params) + .unwrap() + .unwrap_or_default(); + let max_slash_round_err = ref_state + .validator_records + .get(&validator) + .unwrap() + .slash_round_err_tolerance(epoch); + let ref_stake = ref_state + .validator_stakes + .get(&epoch) + .unwrap() + .get(&validator) + .cloned() + .unwrap(); + let conc_stake = bonded_stake.change(); + let max_err_msg = if max_slash_round_err.is_zero() { + "no error expected".to_string() + } else { + format!( + "max err +-{}", + max_slash_round_err.to_string_native() + ) + }; + tracing::debug!( + "Below-cap val {}, set stake: {}, deltas: {}, ref: {}, \ + {max_err_msg}", + &validator, + conc_stake.to_string_native(), + deltas_stake.to_string_native(), + ref_stake.to_string_native(), + ); + assert_eq!(conc_stake, deltas_stake); + assert!( + conc_stake <= ref_stake + max_slash_round_err.change() + && ref_stake + <= conc_stake + max_slash_round_err.change(), + "Expected {} ({max_err_msg}), got {}.", + ref_stake.to_string_native(), + bonded_stake.to_string_native() + ); + + let state = crate::validator_state_handle(&validator) + .get(&self.s, epoch, params) + .unwrap(); + // if state.is_none() { + // dbg!( + // crate::validator_state_handle(&validator) + // .get(&self.s, current_epoch, params) + // .unwrap() + // ); + // dbg!( + // crate::validator_state_handle(&validator) + // .get(&self.s, current_epoch.next(), params) + // .unwrap() + // ); + // dbg!( + // crate::validator_state_handle(&validator) + // .get(&self.s, current_epoch.next(), params) + // .unwrap() + // ); + // } + assert_eq!(state, Some(ValidatorState::BelowCapacity)); + assert_eq!( + state.unwrap(), + ref_state + .validator_states + .get(&epoch) + .unwrap() + .get(&validator) + .cloned() + .unwrap() + ); + assert!(!vals.contains(&validator)); + vals.insert(validator); + } + + for validator in + crate::read_below_threshold_validator_set_addresses( + &self.s, epoch, + ) + .unwrap() + { + let conc_stake = validator_deltas_handle(&validator) + .get_sum(&self.s, epoch, params) + .unwrap() + .unwrap_or_default(); + + let state = crate::validator_state_handle(&validator) + .get(&self.s, epoch, params) + .unwrap() + .unwrap(); + + assert_eq!(state, ValidatorState::BelowThreshold); + assert_eq!( + state, + ref_state + .validator_states + .get(&epoch) + .unwrap() + .get(&validator) + .cloned() + .unwrap() + ); + let max_slash_round_err = ref_state + .validator_records + .get(&validator) + .map(|r| r.slash_round_err_tolerance(epoch)) + .unwrap_or_default(); + let ref_stake = ref_state + .validator_stakes + .get(&epoch) + .unwrap() + .get(&validator) + .cloned() + .unwrap(); + let max_err_msg = if max_slash_round_err.is_zero() { + "no error expected".to_string() + } else { + format!( + "max err +-{}", + max_slash_round_err.to_string_native() + ) + }; + tracing::debug!( + "Below-thresh val {}, deltas: {}, ref: {}, {max_err_msg})", + &validator, + conc_stake.to_string_native(), + ref_stake.to_string_native(), + ); + assert!( + conc_stake <= ref_stake + max_slash_round_err.change() + && ref_stake + <= conc_stake + max_slash_round_err.change(), + "Expected {} ({max_err_msg}), got {}.", + ref_stake.to_string_native(), + conc_stake.to_string_native() + ); + assert!(!vals.contains(&validator)); + vals.insert(validator); + } + + // Jailed validators not in a set + let all_validators = + crate::read_all_validator_addresses(&self.s, epoch).unwrap(); + + for val in all_validators { + let state = validator_state_handle(&val) + .get(&self.s, epoch, params) + .unwrap() + .unwrap(); + + if state == ValidatorState::Jailed { + assert_eq!( + state, + ref_state + .validator_states + .get(&epoch) + .unwrap() + .get(&val) + .cloned() + .unwrap() + ); + let conc_stake = validator_deltas_handle(&val) + .get_sum(&self.s, epoch, params) + .unwrap() + .unwrap_or_default(); + let max_slash_round_err = ref_state + .validator_records + .get(&val) + .map(|r| r.slash_round_err_tolerance(epoch)) + .unwrap_or_default(); + let max_err_msg = if max_slash_round_err.is_zero() { + "no error expected".to_string() + } else { + format!( + "max err +-{}", + max_slash_round_err.to_string_native() + ) + }; + let ref_stake = ref_state + .validator_stakes + .get(&epoch) + .unwrap() + .get(&val) + .cloned() + .unwrap(); + tracing::debug!( + "Jailed val {}, deltas: {}, ref: {}, {max_err_msg}", + &val, + conc_stake.to_string_native(), + ref_stake.to_string_native(), + ); + + assert_eq!( + state, + ref_state + .validator_states + .get(&epoch) + .unwrap() + .get(&val) + .cloned() + .unwrap() + ); + assert!( + conc_stake <= ref_stake + max_slash_round_err.change() + && ref_stake + <= conc_stake + max_slash_round_err.change(), + "Expected {} ({}), got {}.", + ref_stake.to_string_native(), + max_err_msg, + conc_stake.to_string_native() + ); + assert!(!vals.contains(&val)); + } + } + } + + // Check that validator stakes are matching ref_state + for (validator, records) in &ref_state.validator_records { + // On every epoch from current up to pipeline + for epoch in current_epoch.iter_range(params.pipeline_len) { + let ref_stake = records.stake(epoch); + let conc_stake = crate::read_validator_stake( + &self.s, params, validator, epoch, + ) + .unwrap(); + let max_slash_round_err = + records.slash_round_err_tolerance(epoch); + assert!( + ref_stake <= conc_stake + max_slash_round_err + && conc_stake <= ref_stake + max_slash_round_err, + "Stake for validator {validator} in epoch {epoch} is not \ + matched against reference stake. Expected {} ({}), got \ + {}.", + ref_stake.to_string_native(), + if max_slash_round_err.is_zero() { + "no slashing rounding error expected".to_string() + } else { + format!( + "max slashing rounding error +-{}", + max_slash_round_err.to_string_native() + ) + }, + conc_stake.to_string_native() + ); + } + } + // TODO: expand above to include jailed validators + + for (validator, records) in &ref_state.validator_records { + for (source, records) in &records.per_source { + let bond_id = BondId { + source: source.clone(), + validator: validator.clone(), + }; + for epoch in current_epoch.iter_range(params.pipeline_len) { + let max_slash_round_err = + records.slash_round_err_tolerance(epoch); + let conc_bond_amount = + crate::bond_amount(&self.s, &bond_id, epoch).unwrap(); + let ref_bond_amount = records.amount(epoch); + assert!( + ref_bond_amount + <= conc_bond_amount + max_slash_round_err + && conc_bond_amount + <= ref_bond_amount + max_slash_round_err, + "Slashed `bond_amount` for validator {validator} in \ + epoch {epoch} is not matched against reference \ + state. Expected {} ({}), got {}.", + ref_bond_amount.to_string_native(), + if max_slash_round_err.is_zero() { + "no slashing rounding error expected".to_string() + } else { + format!( + "max slashing rounding error +-{}", + max_slash_round_err.to_string_native() + ) + }, + conc_bond_amount.to_string_native() + ); + } + } + } + } +} + +impl ReferenceStateMachine for AbstractPosState { + type State = Self; + type Transition = Transition; + + fn init_state() -> BoxedStrategy { + tracing::debug!("\nInitializing abstract state machine"); + arb_params_and_genesis_validators(Some(8), 8..10) + .prop_map(|(params, genesis_validators)| { + let epoch = Epoch::default(); + let mut state = Self { + epoch, + params: PosParams { + owned: params, + ..Default::default() + }, + genesis_validators: genesis_validators + .into_iter() + // Sorted by stake to fill in the consensus set first + .sorted_by(|a, b| Ord::cmp(&a.tokens, &b.tokens)) + .rev() + .collect(), + validator_records: Default::default(), + validator_stakes: Default::default(), + consensus_set: Default::default(), + below_capacity_set: Default::default(), + below_threshold_set: Default::default(), + validator_states: Default::default(), + validator_slashes: Default::default(), + enqueued_slashes: Default::default(), + validator_last_slash_epochs: Default::default(), + }; + + for GenesisValidator { + address, + tokens, + consensus_key: _, + protocol_key: _, + eth_cold_key: _, + eth_hot_key: _, + commission_rate: _, + max_commission_rate_change: _, + } in state.genesis_validators.clone() + { + let records = state.records_mut(&address, &address); + let bond_at_start = records.bonds.entry(epoch).or_default(); + bond_at_start.tokens.amount = tokens; + + let total_stakes = + state.validator_stakes.entry(epoch).or_default(); + total_stakes + .insert(address.clone(), token::Change::from(tokens)); + + let consensus_set = + state.consensus_set.entry(epoch).or_default(); + let consensus_vals_len = consensus_set + .iter() + .map(|(_stake, validators)| validators.len() as u64) + .sum(); + + if tokens < state.params.validator_stake_threshold { + state + .below_threshold_set + .entry(epoch) + .or_default() + .insert(address.clone()); + state + .validator_states + .entry(epoch) + .or_default() + .insert(address, ValidatorState::BelowThreshold); + } else if state.params.max_validator_slots + > consensus_vals_len + { + state + .validator_states + .entry(epoch) + .or_default() + .insert(address.clone(), ValidatorState::Consensus); + consensus_set + .entry(tokens) + .or_default() + .push_back(address); + } else { + state + .validator_states + .entry(epoch) + .or_default() + .insert( + address.clone(), + ValidatorState::BelowCapacity, + ); + let below_cap_set = + state.below_capacity_set.entry(epoch).or_default(); + below_cap_set + .entry(ReverseOrdTokenAmount(tokens)) + .or_default() + .push_back(address) + }; + } + // Ensure that below-capacity and below-threshold sets are + // initialized even if empty + state.below_capacity_set.entry(epoch).or_default(); + state.below_threshold_set.entry(epoch).or_default(); + + // Copy validator sets up to pipeline epoch + for epoch in epoch.next().iter_range(state.params.pipeline_len) + { + state.copy_discrete_epoched_data(epoch) + } + state + }) + .boxed() + } + + // TODO: allow bonding to jailed val + fn transitions(state: &Self::State) -> BoxedStrategy { + // Let preconditions filter out what unbonds are not allowed + let unbondable = + state.unbondable_bonds().into_iter().collect::>(); + let redelegatable = + state.redelegatable_bonds().into_iter().collect::>(); + + let withdrawable = + state.withdrawable_unbonds().into_iter().collect::>(); + + let eligible_for_unjail = state + .validator_states + .get(&state.pipeline()) + .unwrap() + .iter() + .filter_map(|(addr, &val_state)| { + let last_slash_epoch = + state.validator_last_slash_epochs.get(addr); + + if let Some(last_slash_epoch) = last_slash_epoch { + if val_state == ValidatorState::Jailed + // `last_slash_epoch` must be unbonding_len + window_width or more epochs + // before the current + && state.epoch.0 - last_slash_epoch.0 + > state.params.unbonding_len + state.params.cubic_slashing_window_length + { + return Some(addr.clone()); + } + } + None + }) + .collect::>(); + + // Transitions that can be applied if there are no bonds and unbonds + let basic = prop_oneof![ + 4 => Just(Transition::NextEpoch), + 6 => add_arb_bond_amount(state), + 5 => arb_delegation(state), + 3 => arb_self_bond(state), + 1 => ( + address::testing::arb_established_address(), + key::testing::arb_common_keypair(), + key::testing::arb_common_keypair(), + key::testing::arb_common_secp256k1_keypair(), + key::testing::arb_common_secp256k1_keypair(), + arb_rate(), + arb_rate(), + ) + .prop_map( + |( + addr, + consensus_key, + protocol_key, + eth_hot_key, + eth_cold_key, + commission_rate, + max_commission_rate_change, + )| { + Transition::InitValidator { + address: Address::Established(addr), + consensus_key: consensus_key.to_public(), + protocol_key: protocol_key.to_public(), + eth_hot_key: eth_hot_key.to_public(), + eth_cold_key: eth_cold_key.to_public(), + commission_rate, + max_commission_rate_change, + } + }, + ), + 1 => arb_slash(state), + ]; + + // Add unjailing, if any eligible + let transitions = if eligible_for_unjail.is_empty() { + basic.boxed() + } else { + prop_oneof![ + // basic 6x more likely as it's got 6 cases + 6 => basic, + 1 => prop::sample::select(eligible_for_unjail).prop_map(|address| { + Transition::UnjailValidator { address } + }) + ] + .boxed() + }; + + // Add unbonds, if any + let transitions = if unbondable.is_empty() { + transitions + } else { + let arb_unbondable = prop::sample::select(unbondable); + let arb_unbond = + arb_unbondable.prop_flat_map(move |(id, bonds_sum)| { + let bonds_sum: i128 = + TryFrom::try_from(bonds_sum.change()).unwrap(); + (0..bonds_sum).prop_map(move |to_unbond| { + let id = id.clone(); + let amount = + token::Amount::from_change(Change::from(to_unbond)); + Transition::Unbond { id, amount } + }) + }); + prop_oneof![ + 7 => transitions, + 1 => arb_unbond, + ] + .boxed() + }; + + // Add withdrawals, if any + let transitions = if withdrawable.is_empty() { + transitions + } else { + let arb_withdrawable = prop::sample::select(withdrawable); + let arb_withdrawal = arb_withdrawable + .prop_map(|(id, _)| Transition::Withdraw { id }); + + prop_oneof![ + 8 => transitions, + 1 => arb_withdrawal, + ] + .boxed() + }; + + // Add redelegations, if any + if redelegatable.is_empty() { + transitions + } else { + let arb_redelegatable = prop::sample::select(redelegatable); + let validators = state + .validator_states + .get(&state.pipeline()) + .unwrap() + .keys() + .cloned() + .collect::>(); + let unchainable_redelegations = state.unchainable_redelegations(); + let arb_redelegation = + arb_redelegatable.prop_flat_map(move |(id, deltas_sum)| { + let deltas_sum = + i128::try_from(deltas_sum.change()).unwrap(); + // Generate an amount to redelegate, up to the sum + assert!( + deltas_sum > 0, + "Bond {id} deltas_sum must be non-zero" + ); + let arb_amount = (0..deltas_sum).prop_map(|to_unbond| { + token::Amount::from_change(Change::from(to_unbond)) + }); + // Generate a new validator for redelegation + let current_validator = id.validator.clone(); + let new_validators = validators + .iter() + // The validator must be other than the current + .filter(|validator| *validator != ¤t_validator) + .cloned() + .collect::>(); + let arb_new_validator = + prop::sample::select(new_validators); + let unchainable_redelegations = + unchainable_redelegations.clone(); + (arb_amount, arb_new_validator).prop_map( + move |(amount, new_validator)| Transition::Redelegate { + is_chained: Self::is_chained_redelegation( + &unchainable_redelegations, + &id.source, + &id.validator, + ), + id: id.clone(), + new_validator, + amount, + }, + ) + }); + prop_oneof![ + 9 => transitions, + // Cranked up to make redelegations more common + 15 => arb_redelegation, + ] + .boxed() + } + } + + fn apply( + mut state: Self::State, + transition: &Self::Transition, + ) -> Self::State { + match transition { + Transition::NextEpoch => { + state.epoch = state.epoch.next(); + tracing::debug!("Starting epoch {}", state.epoch); + + // Copy the non-delta data into pipeline epoch from its pred. + state.copy_discrete_epoched_data(state.pipeline()); + + // Process slashes enqueued for the new epoch + state.process_enqueued_slashes(); + + // print-out the state + state.debug_validators(); + } + Transition::InitValidator { + address, + consensus_key: _, + protocol_key: _, + eth_cold_key: _, + eth_hot_key: _, + commission_rate: _, + max_commission_rate_change: _, + } => { + let pipeline: Epoch = state.pipeline(); + + // Initialize the stake at pipeline + state + .validator_stakes + .entry(pipeline) + .or_default() + .insert(address.clone(), 0_i128.into()); + + // Insert into the below-threshold set at pipeline since the + // initial stake is 0 + state + .below_threshold_set + .entry(pipeline) + .or_default() + .insert(address.clone()); + state + .validator_states + .entry(pipeline) + .or_default() + .insert(address.clone(), ValidatorState::BelowThreshold); + + state.debug_validators(); + } + Transition::Bond { id, amount } => { + if !amount.is_zero() { + state.bond(id, *amount); + state.debug_validators(); + } + } + Transition::Unbond { id, amount } => { + if !amount.is_zero() { + state.unbond(id, *amount); + state.debug_validators(); + } + } + Transition::Withdraw { id } => { + state.withdraw(id); + } + Transition::Redelegate { + is_chained, + id, + new_validator, + amount, + } => { + if *is_chained { + return state; + } + if !amount.is_zero() { + state.redelegate(id, new_validator, *amount); + state.debug_validators(); + } + } + Transition::Misbehavior { + address, + slash_type, + infraction_epoch, + height, + } => { + let current_epoch = state.epoch; + let processing_epoch = *infraction_epoch + + state.params.unbonding_len + + 1_u64 + + state.params.cubic_slashing_window_length; + let slash = Slash { + epoch: *infraction_epoch, + block_height: *height, + r#type: *slash_type, + rate: Dec::zero(), + }; + + // Enqueue the slash for future processing + state + .enqueued_slashes + .entry(processing_epoch) + .or_default() + .entry(address.clone()) + .or_default() + .push(slash); + + // Remove the validator from either the consensus or + // below-capacity set and place it into the jailed validator set + + // Remove from the validator set starting at the next epoch and + // up thru the pipeline + for offset in 1..=state.params.pipeline_len { + let real_stake = token::Amount::from_change( + state + .validator_stakes + .get(&(current_epoch + offset)) + .unwrap() + .get(address) + .cloned() + .unwrap_or_default(), + ); + + if let Some((index, stake)) = state + .is_in_consensus_w_info(address, current_epoch + offset) + { + debug_assert_eq!(stake, real_stake); + + let vals = state + .consensus_set + .entry(current_epoch + offset) + .or_default() + .entry(stake) + .or_default(); + let removed = vals.remove(index); + debug_assert_eq!(removed, Some(address.clone())); + if vals.is_empty() { + state + .consensus_set + .entry(current_epoch + offset) + .or_default() + .remove(&stake); + } + + // At pipeline epoch, if was consensus, replace it with + // a below-capacity validator + if offset == state.params.pipeline_len { + let below_cap_pipeline = state + .below_capacity_set + .entry(current_epoch + offset) + .or_default(); + + if let Some(mut max_below_cap) = + below_cap_pipeline.last_entry() + { + let max_bc_stake = *max_below_cap.key(); + let vals = max_below_cap.get_mut(); + let first_val = vals.pop_front().unwrap(); + if vals.is_empty() { + below_cap_pipeline.remove(&max_bc_stake); + } + state + .consensus_set + .entry(current_epoch + offset) + .or_default() + .entry(max_bc_stake.into()) + .or_default() + .push_back(first_val.clone()); + state + .validator_states + .entry(current_epoch + offset) + .or_default() + .insert( + first_val.clone(), + ValidatorState::Consensus, + ); + } + } + } else if let Some((index, stake)) = state + .is_in_below_capacity_w_info( + address, + current_epoch + offset, + ) + { + debug_assert_eq!(stake, real_stake); + + let vals = state + .below_capacity_set + .entry(current_epoch + offset) + .or_default() + .entry(stake.into()) + .or_default(); + + let removed = vals.remove(index); + debug_assert_eq!(removed, Some(address.clone())); + if vals.is_empty() { + state + .below_capacity_set + .entry(current_epoch + offset) + .or_default() + .remove(&stake.into()); + } + } else if state + .is_in_below_threshold(address, current_epoch + offset) + { + let removed = state + .below_threshold_set + .entry(current_epoch + offset) + .or_default() + .remove(address); + debug_assert!(removed); + } else { + // Just make sure the validator is already jailed + debug_assert_eq!( + state + .validator_states + .get(&(current_epoch + offset)) + .unwrap() + .get(address) + .cloned() + .unwrap(), + ValidatorState::Jailed + ); + } + + state + .validator_states + .entry(current_epoch + offset) + .or_default() + .insert(address.clone(), ValidatorState::Jailed); + } + + // Update the most recent infraction epoch for the validator + if let Some(last_epoch) = + state.validator_last_slash_epochs.get(address) + { + if infraction_epoch > last_epoch { + state + .validator_last_slash_epochs + .insert(address.clone(), *infraction_epoch); + } + } else { + state + .validator_last_slash_epochs + .insert(address.clone(), *infraction_epoch); + } + + state.debug_validators(); + } + Transition::UnjailValidator { address } => { + let pipeline_epoch = state.pipeline(); + let consensus_set_pipeline = + state.consensus_set.entry(pipeline_epoch).or_default(); + let pipeline_stake = state + .validator_stakes + .get(&pipeline_epoch) + .unwrap() + .get(address) + .cloned() + .unwrap_or_default(); + let validator_states_pipeline = + state.validator_states.entry(pipeline_epoch).or_default(); + + // Insert the validator back into the appropriate validator set + // and update its state + let num_consensus = consensus_set_pipeline + .iter() + .fold(0, |sum, (_, validators)| { + sum + validators.len() as u64 + }); + + if pipeline_stake + < state.params.validator_stake_threshold.change() + { + // Place into the below-threshold set + let below_threshold_set_pipeline = state + .below_threshold_set + .entry(pipeline_epoch) + .or_default(); + below_threshold_set_pipeline.insert(address.clone()); + validator_states_pipeline.insert( + address.clone(), + ValidatorState::BelowThreshold, + ); + } else if num_consensus < state.params.max_validator_slots { + // Place directly into the consensus set + debug_assert!( + state + .below_capacity_set + .get(&pipeline_epoch) + .unwrap() + .is_empty() + ); + consensus_set_pipeline + .entry(token::Amount::from_change(pipeline_stake)) + .or_default() + .push_back(address.clone()); + validator_states_pipeline + .insert(address.clone(), ValidatorState::Consensus); + } else if let Some(mut min_consensus) = + consensus_set_pipeline.first_entry() + { + let below_capacity_set_pipeline = state + .below_capacity_set + .entry(pipeline_epoch) + .or_default(); + + let min_consensus_stake = *min_consensus.key(); + if pipeline_stake > min_consensus_stake.change() { + // Place into the consensus set and demote the last + // min_consensus validator + let min_validators = min_consensus.get_mut(); + let last_val = min_validators.pop_back().unwrap(); + // Remove the key if there's nothing left + if min_validators.is_empty() { + consensus_set_pipeline.remove(&min_consensus_stake); + } + // Do the swap + below_capacity_set_pipeline + .entry(min_consensus_stake.into()) + .or_default() + .push_back(last_val.clone()); + validator_states_pipeline + .insert(last_val, ValidatorState::BelowCapacity); + + consensus_set_pipeline + .entry(token::Amount::from_change(pipeline_stake)) + .or_default() + .push_back(address.clone()); + validator_states_pipeline + .insert(address.clone(), ValidatorState::Consensus); + } else { + // Just place into the below-capacity set + below_capacity_set_pipeline + .entry( + token::Amount::from_change(pipeline_stake) + .into(), + ) + .or_default() + .push_back(address.clone()); + validator_states_pipeline.insert( + address.clone(), + ValidatorState::BelowCapacity, + ); + } + } else { + panic!("Should not reach here I don't think") + } + state.debug_validators(); + } + } + + state + } + + fn preconditions( + state: &Self::State, + transition: &Self::Transition, + ) -> bool { + match transition { + // TODO: should there be any slashing preconditions for `NextEpoch`? + Transition::NextEpoch => true, + Transition::InitValidator { + address, + consensus_key: _, + protocol_key: _, + eth_cold_key: _, + eth_hot_key: _, + commission_rate: _, + max_commission_rate_change: _, + } => { + let pipeline = state.pipeline(); + // The address must not belong to an existing validator + !state.is_validator(address, pipeline) && + // There must be no delegations from this address + !state.unbondable_bonds().into_iter().any(|(id, _sum)| + &id.source == address) + } + Transition::Bond { id, amount: _ } => { + let pipeline = state.pipeline(); + // The validator must be known + if !state.is_validator(&id.validator, pipeline) { + return false; + } + + id.validator == id.source + // If it's not a self-bond, the source must not be a validator + || !state.is_validator(&id.source, pipeline) + } + Transition::Unbond { id, amount } => { + let pipeline = state.pipeline(); + + let is_unbondable = state + .unbondable_bonds() + .get(id) + .map(|sum| sum >= amount) + .unwrap_or_default(); + + // The validator must not be frozen currently + let is_frozen = if let Some(last_epoch) = + state.validator_last_slash_epochs.get(&id.validator) + { + *last_epoch + + state.params.unbonding_len + + 1u64 + + state.params.cubic_slashing_window_length + > state.epoch + } else { + false + }; + + // if is_frozen { + // tracing::debug!( + // "\nVALIDATOR {} IS FROZEN - CANNOT UNBOND\n", + // &id.validator + // ); + // } + + // The validator must be known + state.is_validator(&id.validator, pipeline) + // The amount must be available to unbond and the validator not jailed + && is_unbondable && !is_frozen + } + Transition::Withdraw { id } => { + let pipeline = state.pipeline(); + + let is_withdrawable = state + .withdrawable_unbonds() + .get(id) + .map(|amount| *amount > token::Amount::zero()) + .unwrap_or_default(); + + // The validator must not be jailed currently + let is_jailed = state + .validator_states + .get(&state.epoch) + .unwrap() + .get(&id.validator) + .cloned() + == Some(ValidatorState::Jailed); + + // The validator must be known + state.is_validator(&id.validator, pipeline) + // The amount must be available to unbond + && is_withdrawable && !is_jailed + } + Transition::Redelegate { + is_chained, + id, + new_validator, + amount, + } => { + let pipeline = state.pipeline(); + + if *is_chained { + Self::is_chained_redelegation( + &state.unchainable_redelegations(), + &id.source, + new_validator, + ) + } else { + // The src and dest validator must be known + if !state.is_validator(&id.validator, pipeline) + || !state.is_validator(new_validator, pipeline) + { + return false; + } + + // The amount must be available to redelegate + if !state + .unbondable_bonds() + .get(id) + .map(|sum| sum >= amount) + .unwrap_or_default() + { + return false; + } + + // The src validator must not be frozen + if let Some(last_epoch) = + state.validator_last_slash_epochs.get(&id.validator) + { + if *last_epoch + + state.params.unbonding_len + + 1u64 + + state.params.cubic_slashing_window_length + > state.epoch + { + return false; + } + } + + // The dest validator must not be frozen + if let Some(last_epoch) = + state.validator_last_slash_epochs.get(new_validator) + { + if *last_epoch + + state.params.unbonding_len + + 1u64 + + state.params.cubic_slashing_window_length + > state.epoch + { + return false; + } + } + + true + } + } + Transition::Misbehavior { + address, + slash_type: _, + infraction_epoch, + height: _, + } => { + let is_validator = + state.is_validator(address, *infraction_epoch); + + // The infraction epoch cannot be in the future or more than + // unbonding_len epochs in the past + let current_epoch = state.epoch; + let valid_epoch = *infraction_epoch <= current_epoch + && current_epoch.0 - infraction_epoch.0 + <= state.params.unbonding_len; + + // Only misbehave when there is more than 3 validators that's + // not jailed, so there's always at least one honest left + let enough_honest_validators = || { + let num_of_honest = state + .validator_states + .get(&state.pipeline()) + .unwrap() + .iter() + .filter(|(_addr, val_state)| match val_state { + ValidatorState::Consensus + | ValidatorState::BelowCapacity => true, + ValidatorState::Inactive + | ValidatorState::Jailed + // Below threshold cannot be in consensus + | ValidatorState::BelowThreshold => false, + }) + .count(); + + // Find the number of enqueued slashes to unique validators + let num_of_enquequed_slashes = state + .enqueued_slashes + .iter() + // find all validators with any enqueued slashes + .fold(BTreeSet::new(), |mut acc, (&epoch, slashes)| { + if epoch > current_epoch { + acc.extend(slashes.keys().cloned()); + } + acc + }) + .len(); + + num_of_honest - num_of_enquequed_slashes > 3 + }; + + // Ensure that the validator is in consensus when it misbehaves + // TODO: possibly also test allowing below-capacity validators + // tracing::debug!("\nVal to possibly misbehave: {}", &address); + let state_at_infraction = state + .validator_states + .get(infraction_epoch) + .unwrap() + .get(address); + if state_at_infraction.is_none() { + // Figure out why this happening + tracing::debug!( + "State is None at Infraction epoch {}", + infraction_epoch + ); + for epoch in Epoch::iter_bounds_inclusive( + infraction_epoch.next(), + state.epoch, + ) { + let state_ep = state + .validator_states + .get(infraction_epoch) + .unwrap() + .get(address) + .cloned(); + tracing::debug!( + "State at epoch {} is {:?}", + epoch, + state_ep + ); + } + } + + let can_misbehave = state_at_infraction.cloned() + == Some(ValidatorState::Consensus); + + is_validator + && valid_epoch + && enough_honest_validators() + && can_misbehave + + // TODO: any others conditions? + } + Transition::UnjailValidator { address } => { + // Validator address must be jailed thru the pipeline epoch + for epoch in + Epoch::iter_bounds_inclusive(state.epoch, state.pipeline()) + { + if state + .validator_states + .get(&epoch) + .unwrap() + .get(address) + .cloned() + .unwrap() + != ValidatorState::Jailed + { + return false; + } + } + // Most recent misbehavior is >= unbonding_len epochs away from + // current epoch + if let Some(last_slash_epoch) = + state.validator_last_slash_epochs.get(address) + { + if state.epoch.0 - last_slash_epoch.0 + < state.params.unbonding_len + { + return false; + } + } + + true + // TODO: any others? + } + } + } +} + +/// Arbitrary bond transition that adds tokens to an existing bond +fn add_arb_bond_amount( + state: &AbstractPosState, +) -> impl Strategy { + let bond_ids = state.existing_bond_ids(); + let arb_bond_id = prop::sample::select(bond_ids); + (arb_bond_id, arb_bond_amount()) + .prop_map(|(id, amount)| Transition::Bond { id, amount }) +} + +/// Arbitrary delegation to one of the validators +fn arb_delegation( + state: &AbstractPosState, +) -> impl Strategy { + // Bond is allowed to any validator in any set - including jailed validators + let validators = state + .validator_states + .get(&state.pipeline()) + .unwrap() + .keys() + .cloned() + .collect::>(); + let validator_vec = validators.clone().into_iter().collect::>(); + let arb_source = address::testing::arb_non_internal_address() + .prop_filter("Must be a non-validator address", move |addr| { + !validators.contains(addr) + }); + let arb_validator = prop::sample::select(validator_vec); + (arb_source, arb_validator, arb_bond_amount()).prop_map( + |(source, validator, amount)| Transition::Bond { + id: BondId { source, validator }, + amount, + }, + ) +} + +/// Arbitrary validator self-bond +fn arb_self_bond( + state: &AbstractPosState, +) -> impl Strategy { + // Bond is allowed to any validator in any set - including jailed validators + let validator_vec = state + .validator_states + .get(&state.pipeline()) + .unwrap() + .keys() + .cloned() + .collect::>(); + let arb_validator = prop::sample::select(validator_vec); + (arb_validator, arb_bond_amount()).prop_map(|(validator, amount)| { + Transition::Bond { + id: BondId { + source: validator.clone(), + validator, + }, + amount, + } + }) +} + +// Bond up to 10 tokens (in micro units) to avoid overflows +pub fn arb_bond_amount() -> impl Strategy { + (1_u64..10).prop_map(|val| token::Amount::from_uint(val, 0).unwrap()) +} + +/// Arbitrary validator misbehavior +fn arb_slash(state: &AbstractPosState) -> impl Strategy { + let validators = state.consensus_set.iter().fold( + Vec::new(), + |mut acc, (_epoch, vals)| { + for vals in vals.values() { + for validator in vals { + acc.push(validator.clone()); + } + } + acc + }, + ); + let current_epoch = state.epoch.0; + + let arb_validator = prop::sample::select(validators); + let slash_types = + vec![SlashType::LightClientAttack, SlashType::DuplicateVote]; + let arb_type = prop::sample::select(slash_types); + let arb_epoch = (current_epoch + .checked_sub(state.params.unbonding_len) + .unwrap_or_default()..=current_epoch) + .prop_map(Epoch::from); + (arb_validator, arb_type, arb_epoch).prop_map( + |(validator, slash_type, infraction_epoch)| Transition::Misbehavior { + address: validator, + slash_type, + infraction_epoch, + height: 0, + }, + ) +} diff --git a/proof_of_stake/src/tests/utils.rs b/proof_of_stake/src/tests/utils.rs new file mode 100644 index 0000000000..1e5f5acf62 --- /dev/null +++ b/proof_of_stake/src/tests/utils.rs @@ -0,0 +1,81 @@ +use std::marker::PhantomData; +use std::str::FromStr; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::Relaxed; +use std::{env, fmt}; + +// TODO: allow custom fmt fn +#[derive(Clone)] +pub struct DbgPrintDiff +where + T: fmt::Debug, +{ + last: String, + phantom_t: PhantomData, +} +impl DbgPrintDiff +where + T: fmt::Debug, +{ + pub fn new() -> Self { + Self { + last: Default::default(), + phantom_t: PhantomData, + } + } + + /// Store a state in dbg format string + pub fn store(&self, data: &T) -> Self { + Self { + last: Self::fmt_data(data), + phantom_t: PhantomData, + } + } + + /// Diff a state in dbg format string against the stored state + pub fn print_diff_and_store(&self, data: &T) -> Self { + let dbg_str = Self::fmt_data(data); + println!( + "{}", + pretty_assertions::StrComparison::new(&self.last, &dbg_str,) + ); + Self { + last: dbg_str, + phantom_t: PhantomData, + } + } + + fn fmt_data(data: &T) -> String { + format!("{:#?}", data) + } +} + +const ENV_VAR_TEST_PAUSES: &str = "TEST_PAUSES"; + +pub fn pause_for_enter() { + if paused_enabled() { + println!("Press Enter to continue"); + let mut input = String::new(); + std::io::stdin().read_line(&mut input).unwrap(); + } +} + +fn paused_enabled() -> bool { + // Cache the result of reading the environment variable + static ENABLED: AtomicUsize = AtomicUsize::new(0); + match ENABLED.load(Relaxed) { + 0 => {} + 1 => return false, + _ => return true, + } + let enabled: bool = matches!( + env::var(ENV_VAR_TEST_PAUSES).map(|val| { + FromStr::from_str(&val).unwrap_or_else(|_| { + panic!("Expected a bool for {ENV_VAR_TEST_PAUSES} env var.") + }) + }), + Ok(true), + ); + ENABLED.store(enabled as usize + 1, Relaxed); + enabled +} diff --git a/proof_of_stake/src/types.rs b/proof_of_stake/src/types.rs index 736ffe7a46..aa4edf1bcc 100644 --- a/proof_of_stake/src/types.rs +++ b/proof_of_stake/src/types.rs @@ -3,7 +3,7 @@ mod rev_order; use core::fmt::Debug; -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; use std::convert::TryFrom; use std::fmt::Display; use std::hash::Hash; @@ -14,7 +14,6 @@ use namada_core::ledger::storage_api::collections::lazy_map::NestedMap; use namada_core::ledger::storage_api::collections::{ LazyMap, LazySet, LazyVec, }; -use namada_core::ledger::storage_api::{self, StorageRead}; use namada_core::types::address::Address; use namada_core::types::dec::Dec; use namada_core::types::key::common; @@ -25,82 +24,49 @@ pub use rev_order::ReverseOrdTokenAmount; use crate::parameters::PosParams; -// TODO: replace `POS_MAX_DECIMAL_PLACES` with -// core::types::token::NATIVE_MAX_DECIMAL_PLACES?? -const U64_MAX: u64 = u64::MAX; +// TODO: review the offsets for each epoched type!! -/// Number of epochs below the current epoch for which validator deltas and -/// slashes are stored -const VALIDATOR_DELTAS_SLASHES_LEN: u64 = 23; - -// TODO: add this to the spec /// Stored positions of validators in validator sets pub type ValidatorSetPositions = crate::epoched::NestedEpoched< LazyMap, crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetDefaultNumPastEpochs, >; -impl ValidatorSetPositions { - /// TODO - pub fn get_position( - &self, - storage: &S, - epoch: &Epoch, - address: &Address, - params: &PosParams, - ) -> storage_api::Result> - where - S: StorageRead, - { - let last_update = self.get_last_update(storage)?; - // dbg!(&last_update); - if last_update.is_none() { - return Ok(None); - } - let last_update = last_update.unwrap(); - let future_most_epoch: Epoch = last_update + params.pipeline_len; - // dbg!(future_most_epoch); - let mut epoch = std::cmp::min(*epoch, future_most_epoch); - loop { - // dbg!(epoch); - match self.at(&epoch).get(storage, address)? { - Some(val) => return Ok(Some(val)), - None => { - if epoch.0 > 0 && epoch > Self::sub_past_epochs(last_update) - { - epoch = Epoch(epoch.0 - 1); - } else { - return Ok(None); - } - } - } - } - } -} - -// TODO: check the offsets for each epoched type!! - /// Epoched validator's consensus key. pub type ValidatorConsensusKeys = crate::epoched::Epoched< common::PublicKey, crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetDefaultNumPastEpochs, +>; + +/// Epoched validator's protocol key. +pub type ValidatorProtocolKeys = crate::epoched::Epoched< + common::PublicKey, + crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetMaxProposalPeriodPlus, >; /// Epoched validator's eth hot key. pub type ValidatorEthHotKeys = crate::epoched::Epoched< common::PublicKey, crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetMaxProposalPeriodPlus, >; /// Epoched validator's eth cold key. pub type ValidatorEthColdKeys = crate::epoched::Epoched< common::PublicKey, crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetMaxProposalPeriodPlus, >; /// Epoched validator's state. -pub type ValidatorStates = - crate::epoched::Epoched; +pub type ValidatorStates = crate::epoched::Epoched< + ValidatorState, + crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetDefaultNumPastEpochs, +>; /// A map from a position to an address in a Validator Set pub type ValidatorPositionAddresses = LazyMap; @@ -117,48 +83,57 @@ pub type BelowCapacityValidatorSet = pub type ConsensusValidatorSets = crate::epoched::NestedEpoched< ConsensusValidatorSet, crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetMaxProposalPeriodPlus, >; /// Epoched below-capacity validator sets. pub type BelowCapacityValidatorSets = crate::epoched::NestedEpoched< BelowCapacityValidatorSet, crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetDefaultNumPastEpochs, >; -/// Epoched total consensus validator stake -pub type TotalConsensusStakes = - crate::epoched::Epoched; +/// Epoched total consensus validator set stake +pub type TotalConsensusStakes = crate::epoched::Epoched< + Amount, + crate::epoched::OffsetZero, + crate::epoched::OffsetMaxU64, +>; /// Epoched validator's deltas. pub type ValidatorDeltas = crate::epoched::EpochedDelta< token::Change, crate::epoched::OffsetUnbondingLen, - VALIDATOR_DELTAS_SLASHES_LEN, + crate::epoched::OffsetMaxProposalPeriodOrSlashProcessingLenPlus, >; /// Epoched total deltas. pub type TotalDeltas = crate::epoched::EpochedDelta< token::Change, crate::epoched::OffsetUnbondingLen, - VALIDATOR_DELTAS_SLASHES_LEN, + crate::epoched::OffsetMaxProposalPeriodOrSlashProcessingLenPlus, >; /// Epoched validator commission rate -pub type CommissionRates = - crate::epoched::Epoched; +pub type CommissionRates = crate::epoched::Epoched< + Dec, + crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetDefaultNumPastEpochs, +>; /// Epoched validator's bonds pub type Bonds = crate::epoched::EpochedDelta< - token::Change, + token::Amount, crate::epoched::OffsetPipelineLen, - U64_MAX, + crate::epoched::OffsetMaxU64, >; /// An epoched lazy set of all known active validator addresses (consensus, -/// below-capacity, jailed) +/// below-capacity, below-threshold, jailed) pub type ValidatorAddresses = crate::epoched::NestedEpoched< LazySet
, crate::epoched::OffsetPipelineLen, + crate::epoched::OffsetDefaultNumPastEpochs, >; /// Slashes indexed by validator address and then block height (for easier @@ -172,10 +147,14 @@ pub type ValidatorSlashes = NestedMap; pub type EpochedSlashes = crate::epoched::NestedEpoched< ValidatorSlashes, crate::epoched::OffsetUnbondingLen, - VALIDATOR_DELTAS_SLASHES_LEN, + crate::epoched::OffsetSlashProcessingLenPlus, >; /// Epoched validator's unbonds +/// +/// The map keys from outside in are: +/// - start epoch of the bond in which it started contributing to stake +/// - withdrawable epoch of the unbond pub type Unbonds = NestedMap>; /// Consensus keys set, used to ensure uniqueness @@ -186,17 +165,104 @@ pub type ConsensusKeys = LazySet; /// (affects the deltas, pipeline after submission). The inner `Epoch` /// corresponds to the epoch from which the underlying bond became active /// (affected deltas). -pub type ValidatorUnbondRecords = +pub type ValidatorTotalUnbonded = NestedMap>; +/// A validator's incoming redelegations, where the key is the bond owner +/// address and the value is the redelegation end epoch +pub type IncomingRedelegations = LazyMap; + +/// A validator's outgoing redelegations, where the validator in question is a +/// source validator. +/// +/// The map keys from outside in are: +/// - destination validator's address +/// - bond start epoch +/// - redelegation epoch in which it started contributing to destination +/// validator +/// +/// The value is the redelegated bond amount. +pub type OutgoingRedelegations = + NestedMap>>; + +/// A validator's total redelegated unbonded tokens for any delegator. +/// The map keys from outside in are: +/// +/// - redelegation epoch in which it started contributing to destination +/// validator +/// - redelegation source validator +/// - start epoch of the bond that's been redelegated +pub type TotalRedelegatedBonded = NestedMap; + +/// A validator's total redelegated unbonded tokens for any delegator. +/// The map keys from outside in are: +/// +/// - unbond epoch +/// - redelegation epoch in which it started contributing to destination +/// validator +/// - redelegation source validator +/// - bond start epoch +pub type TotalRedelegatedUnbonded = NestedMap; + +/// Map of redelegated tokens. +/// The map keys from outside in are: +/// +/// - redelegation source validator +/// - start epoch of the bond that's been redelegated +pub type RedelegatedTokens = NestedMap>; + +/// Map of redelegated bonds or unbonds. +/// The map keys from outside in are: +/// +/// - for bonds redelegation epoch in which the redelegation started +/// contributing to destination validator, for unbonds it's withdrawal epoch +/// - redelegation source validator +/// - start epoch of the bond that's been redelegated +/// +/// TODO: it's a confusing that the outermost epoch is different for bonds vs +/// unbonds, can we swap withdrawal with redelegation epoch for +/// `DelegatorRedelegatedUnbonded`? +pub type RedelegatedBondsOrUnbonds = NestedMap; + +/// A delegator's redelegated bonded token amount. +/// The map keys from outside in are: +/// +/// - redelegation destination validator +/// - redelegation epoch in which the redelegation started contributing to +/// destination validator +/// - redelegation source validator +/// - start epoch of the bond that's been redelegated +pub type DelegatorRedelegatedBonded = + NestedMap; + +/// A delegator's redelegated unbonded token amounts. +/// The map keys from outside in are: +/// +/// - redelegation destination validator +/// - redelegation epoch in which the redelegation started contributing to +/// destination validator +/// - withdrawal epoch of the unbond +/// - redelegation source validator +/// - start epoch of the bond that's been redelegated +pub type DelegatorRedelegatedUnbonded = + NestedMap>; + +/// In-memory map of redelegated bonds. +/// The map keys from outside in are: +/// +/// - src validator address +/// - src bond start epoch where it started contributing to src validator +pub type EagerRedelegatedBondsMap = + BTreeMap>; + #[derive( Debug, Clone, BorshSerialize, BorshDeserialize, Eq, Hash, PartialEq, )] -/// TODO: slashed amount for thing +/// Slashed amount of tokens. pub struct SlashedAmount { - /// Perlangus + /// Amount of tokens that were slashed. pub amount: token::Amount, - /// Churms + /// Infraction epoch from which the tokens were slashed pub epoch: Epoch, } @@ -216,6 +282,20 @@ pub type RewardsProducts = LazyMap; /// rewards owed over the course of an epoch) pub type RewardsAccumulator = LazyMap; +/// Eager data for a generic redelegation +#[derive(Debug)] +pub struct Redelegation { + /// Start epoch of the redelegation is the first epoch in which the + /// redelegated amount no longer contributes to the stake of source + /// validator and starts contributing to destination validator. + pub redel_bond_start: Epoch, + /// Source validator + pub src_validator: Address, + /// Start epoch of the redelgated bond + pub bond_start: Epoch, + /// Redelegation amount + pub amount: token::Amount, +} // -------------------------------------------------------------------------------------------- /// A genesis validator definition. @@ -237,6 +317,8 @@ pub struct GenesisValidator { pub tokens: token::Amount, /// A public key used for signing validator's consensus actions pub consensus_key: common::PublicKey, + /// A public key used for signing protocol transactions + pub protocol_key: common::PublicKey, /// An Eth bridge governance public key pub eth_cold_key: common::PublicKey, /// An Eth bridge hot signing public key used for validator set updates and diff --git a/scripts/generator.sh b/scripts/generator.sh index 3fe1792a49..c9635d498d 100755 --- a/scripts/generator.sh +++ b/scripts/generator.sh @@ -9,8 +9,10 @@ # vectors. NAMADA_DIR="$(pwd)" +NAMADA_BASE_DIR_FILE="$(pwd)/namada_base_dir" export NAMADA_LEDGER_LOG_PATH="$(pwd)/vectors.json" export NAMADA_TX_LOG_PATH="$(pwd)/debugs.txt" +export NAMADA_DEV=false if [ "$#" -ne 1 ]; then echo "Illegal number of parameters" @@ -19,11 +21,14 @@ elif [ "$1" = "server" ]; then sed -i 's/^epochs_per_year = 31_536_000$/epochs_per_year = 262_800/' genesis/test-vectors-single-node.toml - NAMADA_GENESIS_FILE=$(cargo run --bin namadac -- utils init-network --genesis-path genesis/test-vectors-single-node.toml --wasm-checksums-path wasm/checksums.json --chain-prefix e2e-test --unsafe-dont-encrypt --localhost --allow-duplicate-ip | grep 'Genesis file generated at ' | sed 's/^Genesis file generated at //') + NAMADA_GENESIS_FILE=$(cargo run --bin namadac --package namada_apps --manifest-path Cargo.toml -- utils init-network --genesis-path genesis/test-vectors-single-node.toml --wasm-checksums-path wasm/checksums.json --chain-prefix e2e-test --unsafe-dont-encrypt --localhost --dont-archive --allow-duplicate-ip | grep 'Genesis file generated at ' | sed 's/^Genesis file generated at //') rm genesis/test-vectors-single-node.toml NAMADA_BASE_DIR=${NAMADA_GENESIS_FILE%.toml} + echo $NAMADA_BASE_DIR > $NAMADA_BASE_DIR_FILE + + sed -i 's/^mode = "RemoteEndpoint"$/mode = "Off"/' $NAMADA_BASE_DIR/config.toml cp wasm/*.wasm $NAMADA_BASE_DIR/wasm/ @@ -31,8 +36,14 @@ elif [ "$1" = "server" ]; then cp $NAMADA_BASE_DIR/setup/other/wallet.toml $NAMADA_BASE_DIR/wallet.toml - cargo run --bin namadan -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada/ ledger + sed -i 's/^mode = "RemoteEndpoint"$/mode = "Off"/' $NAMADA_BASE_DIR/setup/validator-0/.namada/$(basename $NAMADA_BASE_DIR)/config.toml + + cargo run --bin namadan --package namada_apps --manifest-path Cargo.toml -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada/ ledger elif [ "$1" = "client" ]; then + if test -f "$NAMADA_BASE_DIR_FILE"; then + NAMADA_BASE_DIR="$(cat $NAMADA_BASE_DIR_FILE)" + fi + echo > $NAMADA_TX_LOG_PATH echo $'[' > $NAMADA_LEDGER_LOG_PATH @@ -40,120 +51,49 @@ elif [ "$1" = "client" ]; then ALBERT_ADDRESS=$(cargo run --bin namadaw -- address find --alias albert | sed 's/^Found address Established: //') echo '{ - "author":"'$ALBERT_ADDRESS'", - "content":{ - "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", - "authors":"test@test.com", - "created":"2022-03-10T08:54:37Z", - "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", - "discussions-to":"www.github.com/anoma/aip/1", - "license":"MIT", - "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", - "requires":"2", - "title":"TheTitle" - }, - "grace_epoch":30, - "type":{ - "Default":"'$NAMADA_DIR'/wasm_for_tests/tx_proposal_code.wasm" - }, - "voting_end_epoch":24, - "voting_start_epoch":12 -} -' > proposal_submission_valid_proposal.json - + "proposal": { + "author":"'$ALBERT_ADDRESS'", + "content":{ + "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", + "authors":"test@test.com", + "created":"2022-03-10T08:54:37Z", + "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", + "discussions-to":"www.github.com/anoma/aip/1", + "license":"MIT", + "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", + "requires":"2", + "title":"TheTitle" + }, + "grace_epoch":30, + "voting_end_epoch":24, + "voting_start_epoch":12 + } + }' > proposal_default.json + echo '{ - "content": { - "abstract": "Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", - "authors": "test@test.com", - "created": "2022-03-10T08:54:37Z", - "details": "Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", - "discussions-to": "www.github.com/anoma/aip/1", - "license": "MIT", - "motivation": "Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", - "requires": "2", - "title": "TheTitle" - }, - "author": "'$ALBERT_ADDRESS'", - "tally_epoch": 18, - "signature": { - "Ed25519": { - "R_bytes": [ - 113, - 196, - 231, - 134, - 101, - 191, - 75, - 17, - 245, - 19, - 50, - 231, - 183, - 80, - 162, - 38, - 108, - 72, - 72, - 2, - 116, - 112, - 121, - 33, - 197, - 67, - 64, - 116, - 21, - 250, - 196, - 121 - ], - "s_bytes": [ - 87, - 163, - 134, - 87, - 42, - 156, - 121, - 211, - 189, - 19, - 255, - 5, - 23, - 178, - 143, - 39, - 118, - 249, - 37, - 53, - 121, - 136, - 59, - 103, - 190, - 91, - 121, - 95, - 46, - 54, - 168, - 9 - ] + "data":['$(od -An -tu1 -v wasm_for_tests/tx_proposal_code.wasm | tr '\n' ' ' | sed 's/\b\s\+\b/,/g')'], + "proposal": { + "author":"'$ALBERT_ADDRESS'", + "content":{ + "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", + "authors":"test@test.com", + "created":"2022-03-10T08:54:37Z", + "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", + "discussions-to":"www.github.com/anoma/aip/1", + "license":"MIT", + "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", + "requires":"2", + "title":"TheTitle" + }, + "grace_epoch":30, + "voting_end_epoch":24, + "voting_start_epoch":12 } - }, - "address": "'$ALBERT_ADDRESS'" -} -' > proposal_offline_proposal + }' > proposal_default_with_data.json echo '{ - "author":"'$ALBERT_ADDRESS'", - "content":{ + "author":"'$ALBERT_ADDRESS'", + "content":{ "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", "authors":"test@test.com", "created":"2022-03-10T08:54:37Z", @@ -164,59 +104,41 @@ elif [ "$1" = "client" ]; then "requires":"2", "title":"TheTitle" }, - "grace_epoch":18, - "type":{ - "Default":null - }, - "voting_end_epoch":9, - "voting_start_epoch":3 -}' > proposal_offline_valid_proposal.json + "tally_epoch":1 + }' > proposal_offline.json echo '{ - "author":"'$ALBERT_ADDRESS'", - "content":{ - "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", - "authors":"test@test.com", - "created":"2022-03-10T08:54:37Z", - "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", - "discussions-to":"www.github.com/anoma/aip/1", - "license":"MIT", - "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", - "requires":"2", - "title":"TheTitle" + "proposal": { + "author":"'$ALBERT_ADDRESS'", + "content":{ + "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", + "authors":"test@test.com", + "created":"2022-03-10T08:54:37Z", + "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", + "discussions-to":"www.github.com/anoma/aip/1", + "license":"MIT", + "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", + "requires":"2", + "title":"TheTitle" + }, + "grace_epoch":30, + "voting_end_epoch":24, + "voting_start_epoch":12 }, - "grace_epoch":30, - "type":"ETHBridge", - "voting_end_epoch":24, - "voting_start_epoch":12 -}' > eth_governance_proposal_valid_proposal.json + "data": {"add":"'$ALBERT_ADDRESS'","remove":[]} + }' > proposal_pgf_steward_add.json - echo '{ - "author":"'$ALBERT_ADDRESS'", - "content":{ - "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", - "authors":"test@test.com", - "created":"2022-03-10T08:54:37Z", - "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", - "discussions-to":"www.github.com/anoma/aip/1", - "license":"MIT", - "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", - "requires":"2", - "title":"TheTitle" - }, - "grace_epoch":30, - "type":"PGFCouncil", - "voting_end_epoch":24, - "voting_start_epoch":12 -}' > pgf_governance_proposal_valid_proposal.json + # proposal_default - # proposal_submission + cargo run --bin namadac --features std -- bond --validator validator-0 --source Bertha --amount 900 --gas-token NAM --node 127.0.0.1:27657 - cargo run --bin namadac --features std -- bond --validator validator-0 --source Bertha --amount 900 --gas-amount 0 --gas-limit 0 --gas-token NAM --node 127.0.0.1:27657 + cargo run --bin namadac --features std -- unjail-validator --validator Bertha --gas-token NAM --force --node 127.0.0.1:27657 - cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.02 --gas-amount 0 --gas-limit 0 --gas-token NAM --force --node 127.0.0.1:27657 + cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.02 --gas-token NAM --force --node 127.0.0.1:27657 - PROPOSAL_ID_0=$(cargo run --bin namadac --features std -- init-proposal --force --data-path proposal_submission_valid_proposal.json --node 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') + PROPOSAL_ID_0=$(cargo run --bin namadac --features std -- init-proposal --force --data-path proposal_default.json --node 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') + + cargo run --bin namadac --features std -- init-proposal --force --data-path proposal_default_with_data.json --node 127.0.0.1:27657 cargo run --bin namadac --features std -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --address validator-0 --node 127.0.0.1:27657 @@ -226,41 +148,29 @@ elif [ "$1" = "client" ]; then # proposal_offline - cargo run --bin namadac --features std -- bond --validator validator-0 --source Albert --amount 900 --gas-amount 0 --gas-limit 0 --gas-token NAM --node 127.0.0.1:27657 - - cargo run --bin namadac --features std -- change-commission-rate --validator Albert --commission-rate 0.05 --gas-amount 0 --gas-limit 0 --gas-token NAM --force --node 127.0.0.1:27657 - - cargo run --bin namadac --features std -- init-proposal --force --data-path proposal_offline_valid_proposal.json --offline --node 127.0.0.1:27657 + cargo run --bin namadac --features std -- bond --validator validator-0 --source Albert --amount 900 --gas-token NAM --node 127.0.0.1:27657 - cargo run --bin namadac --features std -- vote-proposal --data-path proposal_offline_proposal --vote yay --address Albert --offline --node 127.0.0.1:27657 + cargo run --bin namadac --features std -- change-commission-rate --validator Albert --commission-rate 0.05 --gas-token NAM --force --node 127.0.0.1:27657 - # eth_governance_proposal + PROPOSAL_OFFLINE_SIGNED=$(cargo run --bin namadac --features std -- init-proposal --force --data-path proposal_offline.json --signing-keys albert-key --offline --node 127.0.0.1:27657 | grep -o -P '(?<=Proposal serialized to:\s).*') - cargo run --bin namadac --features std -- bond --validator validator-0 --source Bertha --amount 900 --gas-amount 0 --gas-limit 0 --gas-token NAM --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.07 --gas-amount 0 --gas-limit 0 --gas-token NAM --force --node 127.0.0.1:27657 - - PROPOSAL_ID_0=$(cargo run --bin namadac --features std -- init-proposal --force --data-path eth_governance_proposal_valid_proposal.json --ledger-address 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') - - cargo run --bin namadac --features std -- vote-proposal --force --proposal-id 0 --vote yay --eth '011586062748ba53bc53155e817ec1ea708de75878dcb9a5713bf6986d87fe14e7 fd34672ab5' --address Bertha --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --eth '011586062748ba53bc53155e817ec1ea708de75878dcb9a5713bf6986d87fe14e7 fd34672ab5' --address validator-0 --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- vote-proposal --data-path $PROPOSAL_OFFLINE_SIGNED --vote yay --address Albert --offline --node 127.0.0.1:27657 # pgf_governance_proposal - cargo run --bin namadac --features std -- bond --validator validator-0 --source Bertha --amount 900 --gas-amount 0 --gas-limit 0 --gas-token NAM --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- bond --validator validator-0 --source Bertha --amount 900 --gas-token NAM --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.09 --gas-amount 0 --gas-limit 0 --gas-token NAM --force --node 127.0.0.1:27657 + cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.09 --gas-token NAM --force --node 127.0.0.1:27657 - PROPOSAL_ID_0=$(cargo run --bin namadac --features std -- init-proposal --force --data-path pgf_governance_proposal_valid_proposal.json --ledger-address 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') + PROPOSAL_ID_0=$(cargo run --bin namadac --features std -- init-proposal --pgf-stewards --force --data-path proposal_pgf_steward_add.json --ledger-address 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') - PROPOSAL_ID_1=$(cargo run --bin namadac --features std -- init-proposal --force --data-path pgf_governance_proposal_valid_proposal.json --ledger-address 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') + PROPOSAL_ID_1=$(cargo run --bin namadac --features std -- init-proposal --pgf-stewards --force --data-path proposal_pgf_steward_add.json --ledger-address 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') - cargo run --bin namadac --features std -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --pgf "$ALBERT_ADDRESS 1000" --address validator-0 --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --address validator-0 --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --pgf "$ALBERT_ADDRESS 900" --address Bertha --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --address Bertha --signing-keys bertha-key --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- vote-proposal --force --proposal-id $PROPOSAL_ID_1 --vote yay --pgf "$ALBERT_ADDRESS 900" --address Bertha --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- vote-proposal --force --proposal-id $PROPOSAL_ID_1 --vote yay --address Bertha --signing-keys bertha-key --ledger-address 127.0.0.1:27657 # non-proposal tests @@ -268,24 +178,38 @@ elif [ "$1" = "client" ]; then cargo run --bin namadac --features std -- bond --validator bertha --amount 25 --signing-keys bertha-key --force --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.11 --gas-amount 0 --gas-limit 0 --gas-token NAM --force --node 127.0.0.1:27657 + cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.11 --gas-token NAM --force --node 127.0.0.1:27657 cargo run --bin namadac --features std -- reveal-pk --public-key albert-key --gas-payer albert-key --force --ledger-address 127.0.0.1:27657 cargo run --bin namadac --features std -- update-account --code-path vp_user.wasm --address bertha --signing-keys bertha-key --force --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- init-validator --alias bertha-validator --account-keys bertha --commission-rate 0.05 --max-commission-rate-change 0.01 --signing-keys bertha-key --unsafe-dont-encrypt --force --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- update-account --code-path vp_user.wasm --address bertha --public-keys albert-key,bertha-key --force --ledger-address 127.0.0.1:27657 + + cargo run --bin namadac --features std -- update-account --code-path vp_user.wasm --address bertha --public-keys albert-key,bertha-key,christel-key --threshold 2 --force --ledger-address 127.0.0.1:27657 + + cargo run --bin namadac --features std -- init-validator --alias bertha-validator --account-keys bertha-key --commission-rate 0.05 --max-commission-rate-change 0.01 --signing-keys bertha-key --unsafe-dont-encrypt --force --ledger-address 127.0.0.1:27657 + + cargo run --bin namadac --features std -- init-validator --alias validator-mult --account-keys albert-key,bertha-key --commission-rate 0.05 --max-commission-rate-change 0.01 --signing-keys albert-key,bertha-key --threshold 2 --unsafe-dont-encrypt --force --ledger-address 127.0.0.1:27657 + # TODO works but panics cargo run --bin namadac --features std -- unbond --validator christel --amount 5 --signing-keys christel-key --force --ledger-address 127.0.0.1:27657 cargo run --bin namadac --features std -- withdraw --validator albert --signing-keys albert-key --force --ledger-address 127.0.0.1:27657 cargo run --bin namadac --features std -- init-account --alias albert-account --public-keys albert-key --signing-keys albert-key --force --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- tx --code-path $NAMADA_DIR/wasm_for_tests/tx_no_op.wasm --data-path README.md --signing-keys albert-key --owner albert --force --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- init-account --alias account-mul --public-keys albert-key,bertha-key,christel-key --signing-keys albert-key,bertha-key,christel-key --threshold 2 --force --ledger-address 127.0.0.1:27657 + + # TODO panics, no vector produced + # cargo run --bin namadac --features std -- tx --code-path $NAMADA_DIR/wasm_for_tests/tx_no_op.wasm --data-path README.md --signing-keys albert-key --owner albert --force --ledger-address 127.0.0.1:27657 cargo run --bin namadac --features std -- ibc-transfer --source bertha --receiver christel --token btc --amount 24 --channel-id channel-141 --signing-keys bertha-key --force --ledger-address 127.0.0.1:27657 + cargo run --bin namadac --features std -- ibc-transfer --source albert --receiver bertha --token nam --amount 100000 --channel-id channel-0 --port-id transfer --signing-keys albert-key --force --ledger-address 127.0.0.1:27657 + + cargo run --bin namadac --features std -- ibc-transfer --source albert --receiver bertha --token nam --amount 100000 --channel-id channel-0 --port-id transfer --signing-keys albert-key --timeout-sec-offset 5 --force --ledger-address 127.0.0.1:27657 + cargo run --bin namadaw -- masp add --alias a_spending_key --value xsktest1qqqqqqqqqqqqqq9v0sls5r5de7njx8ehu49pqgmqr9ygelg87l5x8y4s9r0pjlvu69au6gn3su5ewneas486hdccyayx32hxvt64p3d0hfuprpgcgv2q9gdx3jvxrn02f0nnp3jtdd6f5vwscfuyum083cvfv4jun75ak5sdgrm2pthzj3sflxc0jx0edrakx3vdcngrfjmru8ywkguru8mxss2uuqxdlglaz6undx5h8w7g70t2es850g48xzdkqay5qs0yw06rtxcvedhsv --unsafe-dont-encrypt cargo run --bin namadaw -- masp add --alias b_spending_key --value xsktest1qqqqqqqqqqqqqqpagte43rsza46v55dlz8cffahv0fnr6eqacvnrkyuf9lmndgal7c2k4r7f7zu2yr5rjwr374unjjeuzrh6mquzy6grfdcnnu5clzaq2llqhr70a8yyx0p62aajqvrqjxrht3myuyypsvm725uyt5vm0fqzrzuuedtf6fala4r4nnazm9y9hq5yu6pq24arjskmpv4mdgfn3spffxxv8ugvym36kmnj45jcvvmm227vqjm5fq8882yhjsq97p7xrwqqd82s0 --unsafe-dont-encrypt @@ -296,27 +220,31 @@ elif [ "$1" = "client" ]; then cargo run --bin namadaw -- masp add --alias bb_payment_address --value patest1vqe0vyxh6wmhahwa52gthgd6edgqxfmgyv8e94jtwn55mdvpvylcyqnp59595272qrz3zxn0ysg + # TODO vector produced only when epoch boundaries not straddled cargo run --bin namadac --features std -- transfer --source albert --target aa_payment_address --token btc --amount 20 --force --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- transfer --source a_spending_key --target ab_payment_address --token btc --amount 7 --force --ledger-address 127.0.0.1:27657 + # TODO vector produced only when epoch boundaries not straddled + cargo run --bin namadac --features std -- transfer --gas-payer albert-key --source a_spending_key --target ab_payment_address --token btc --amount 7 --force --ledger-address 127.0.0.1:27657 - until cargo run --bin namadac -- epoch --ledger-address 127.0.0.1:27657 | grep -m1 "Last committed epoch: 2" ; do sleep 10 ; done; + # TODO fragile + until cargo run --bin namadac -- epoch --ledger-address 127.0.0.1:27657 | grep -m1 "Last committed epoch: 2" ; do sleep 10 ; done; - cargo run --bin namadac --features std -- transfer --source a_spending_key --target bb_payment_address --token btc --amount 7 --force --ledger-address 127.0.0.1:27657 + # TODO vector produced only when epoch boundaries not straddled + cargo run --bin namadac --features std -- transfer --gas-payer albert-key --source a_spending_key --target bb_payment_address --token btc --amount 7 --force --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- transfer --source a_spending_key --target bb_payment_address --token btc --amount 6 --force --ledger-address 127.0.0.1:27657 + # TODO vector produced only when epoch boundaries not straddled + cargo run --bin namadac --features std -- transfer --gas-payer albert-key --source a_spending_key --target bb_payment_address --token btc --amount 6 --force --ledger-address 127.0.0.1:27657 - cargo run --bin namadac --features std -- transfer --source b_spending_key --target bb_payment_address --token btc --amount 6 --force --ledger-address 127.0.0.1:27657 + # TODO vector produced only when epoch boundaries not straddled + cargo run --bin namadac --features std -- transfer --gas-payer albert-key --source b_spending_key --target bb_payment_address --token btc --amount 6 --force --ledger-address 127.0.0.1:27657 - rm proposal_submission_valid_proposal.json - - rm proposal_offline_proposal - - rm proposal_offline_valid_proposal.json + rm -f proposal_default.json + + rm -f proposal_default_with_data.json - rm eth_governance_proposal_valid_proposal.json + rm -f proposal_offline.json - rm pgf_governance_proposal_valid_proposal.json + rm -f proposal_pgf_steward_add.json perl -0777 -i.original -pe 's/,\s*$//igs' $NAMADA_LEDGER_LOG_PATH diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml new file mode 100644 index 0000000000..f9df559c8a --- /dev/null +++ b/sdk/Cargo.toml @@ -0,0 +1,122 @@ +[package] +name = "namada_sdk" +description = "The main Namada SDK crate" +resolver = "2" +authors.workspace = true +edition.workspace = true +documentation.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +readme.workspace = true +repository.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[features] +abciplus = [ + "namada_core/abciplus", + "namada_proof_of_stake/abciplus", + "namada_ethereum_bridge/abciplus", +] + +ferveo-tpke = [ + "namada_core/ferveo-tpke", +] + +masp-tx-gen = [ + "rand", + "rand_core", +] + +multicore = ["masp_proofs/multicore"] + +namada-sdk = [ + "tendermint-rpc", + "masp-tx-gen", + "ferveo-tpke", + "masp_primitives/transparent-inputs" +] + +std = ["fd-lock"] + +# tendermint-rpc support +tendermint-rpc = [ + "async-client", + "dep:tendermint-rpc", +] + +wasm-runtime = [ + "namada_core/wasm-runtime", +] + +# Enable queries support for an async client +async-client = [ + "async-trait", +] + +ibc-mocks = [ + "namada_core/ibc-mocks", +] + +# for integration tests and test utilies +testing = [ + "namada_core/testing", + "namada_ethereum_bridge/testing", + "namada_proof_of_stake/testing", + "async-client", + "rand_core", + "rand", +] + +[dependencies] +async-trait = {version = "0.1.51", optional = true} +bimap.workspace = true +borsh.workspace = true +borsh-ext.workspace = true +circular-queue.workspace = true +data-encoding.workspace = true +derivation-path.workspace = true +ethbridge-bridge-contract.workspace = true +ethers.workspace = true +fd-lock = { workspace = true, optional = true } +futures.workspace = true +itertools.workspace = true +masp_primitives.workspace = true +masp_proofs = { workspace = true, features = ["download-params"] } +namada_core = {path = "../core", default-features = false} +namada_ethereum_bridge = {path = "../ethereum_bridge", default-features = false} +namada_proof_of_stake = {path = "../proof_of_stake", default-features = false} +num256.workspace = true +orion.workspace = true +owo-colors = "3.5.0" +parse_duration = "2.1.1" +paste.workspace = true +prost.workspace = true +rand = {optional = true, workspace = true} +rand_core = {optional = true, workspace = true} +ripemd.workspace = true +serde.workspace = true +serde_json.workspace = true +sha2.workspace = true +slip10_ed25519.workspace = true +tendermint-rpc = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "b7d1e5afc6f2ccb3fd1545c2174bab1cc48d7fa7", default-features = false, features = ["trait-client"], optional = true} +thiserror.workspace = true +tiny-bip39.workspace = true +tiny-hderive.workspace = true +toml.workspace = true +tracing.workspace = true +zeroize.workspace = true + +[target.'cfg(not(target_family = "wasm"))'.dependencies] +tokio = {workspace = true, features = ["full"]} + +[target.'cfg(target_family = "wasm")'.dependencies] +tokio = {workspace = true, default-features = false, features = ["sync"]} +wasmtimer = "0.2.0" + +[dev-dependencies] +assert_matches.workspace = true +namada_test_utils = {path = "../test_utils"} +tempfile.workspace = true diff --git a/sdk/src/args.rs b/sdk/src/args.rs new file mode 100644 index 0000000000..1ef3aff0be --- /dev/null +++ b/sdk/src/args.rs @@ -0,0 +1,2016 @@ +//! Structures encapsulating SDK arguments + +use std::collections::HashMap; +use std::path::PathBuf; +use std::time::Duration as StdDuration; + +use namada_core::ledger::governance::cli::onchain::{ + DefaultProposal, PgfFundingProposal, PgfStewardProposal, +}; +use namada_core::types::address::Address; +use namada_core::types::chain::ChainId; +use namada_core::types::dec::Dec; +use namada_core::types::ethereum_events::EthAddress; +use namada_core::types::keccak::KeccakHash; +use namada_core::types::key::{common, SchemeType}; +use namada_core::types::masp::MaspValue; +use namada_core::types::storage::Epoch; +use namada_core::types::time::DateTimeUtc; +use namada_core::types::transaction::GasLimit; +use namada_core::types::{storage, token}; +use serde::{Deserialize, Serialize}; +use zeroize::Zeroizing; + +use crate::eth_bridge::bridge_pool; +use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; +use crate::signing::SigningTxData; +use crate::{rpc, tx, Namada}; + +/// [`Duration`](StdDuration) wrapper that provides a +/// method to parse a value from a string. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +#[repr(transparent)] +pub struct Duration(pub StdDuration); + +impl ::std::str::FromStr for Duration { + type Err = ::parse_duration::parse::Error; + + #[inline] + fn from_str(s: &str) -> Result { + ::parse_duration::parse(s).map(Duration) + } +} + +/// Abstraction of types being used in Namada +pub trait NamadaTypes: Clone + std::fmt::Debug { + /// Represents an address on the ledger + type Address: Clone + std::fmt::Debug; + /// Represents the address of a native token + type NativeAddress: Clone + std::fmt::Debug; + /// Represents a key pair + type Keypair: Clone + std::fmt::Debug; + /// Represents the address of a Tendermint endpoint + type TendermintAddress: Clone + std::fmt::Debug; + /// Represents the address of an Ethereum endpoint + type EthereumAddress: Clone + std::fmt::Debug; + /// Represents a viewing key + type ViewingKey: Clone + std::fmt::Debug; + /// Represents the owner of a balance + type BalanceOwner: Clone + std::fmt::Debug; + /// Represents a public key + type PublicKey: Clone + std::fmt::Debug; + /// Represents the source of a Transfer + type TransferSource: Clone + std::fmt::Debug; + /// Represents the target of a Transfer + type TransferTarget: Clone + std::fmt::Debug; + /// Represents some data that is used in a transaction + type Data: Clone + std::fmt::Debug; + /// Bridge pool recommendations conversion rates table. + type BpConversionTable: Clone + std::fmt::Debug; +} + +/// The concrete types being used in Namada SDK +#[derive(Clone, Debug)] +pub struct SdkTypes; + +/// An entry in the Bridge pool recommendations conversion +/// rates table. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct BpConversionTableEntry { + /// An alias for the token, or the string representation + /// of its address if none is available. + pub alias: String, + /// Conversion rate from the given token to gwei. + pub conversion_rate: f64, +} + +impl NamadaTypes for SdkTypes { + type Address = Address; + type BalanceOwner = namada_core::types::masp::BalanceOwner; + type BpConversionTable = HashMap; + type Data = Vec; + type EthereumAddress = (); + type Keypair = namada_core::types::key::common::SecretKey; + type NativeAddress = Address; + type PublicKey = namada_core::types::key::common::PublicKey; + type TendermintAddress = (); + type TransferSource = namada_core::types::masp::TransferSource; + type TransferTarget = namada_core::types::masp::TransferTarget; + type ViewingKey = namada_core::types::masp::ExtendedViewingKey; +} + +/// Common query arguments +#[derive(Clone, Debug)] +pub struct Query { + /// The address of the ledger node as host:port + pub ledger_address: C::TendermintAddress, +} + +/// Transaction associated results arguments +#[derive(Clone, Debug)] +pub struct QueryResult { + /// Common query args + pub query: Query, + /// Hash of transaction to lookup + pub tx_hash: String, +} + +/// Custom transaction arguments +#[derive(Clone, Debug)] +pub struct TxCustom { + /// Common tx arguments + pub tx: Tx, + /// Path to the tx WASM code file + pub code_path: Option, + /// Path to the data file + pub data_path: Option, + /// Path to the serialized transaction + pub serialized_tx: Option, + /// The address that correspond to the signatures/signing-keys + pub owner: C::Address, +} + +impl TxBuilder for TxCustom { + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + TxCustom { + tx: func(self.tx), + ..self + } + } +} + +impl TxCustom { + /// Path to the tx WASM code file + pub fn code_path(self, code_path: PathBuf) -> Self { + Self { + code_path: Some(code_path), + ..self + } + } + + /// Path to the data file + pub fn data_path(self, data_path: C::Data) -> Self { + Self { + data_path: Some(data_path), + ..self + } + } + + /// Path to the serialized transaction + pub fn serialized_tx(self, serialized_tx: C::Data) -> Self { + Self { + serialized_tx: Some(serialized_tx), + ..self + } + } + + /// The address that correspond to the signatures/signing-keys + pub fn owner(self, owner: C::Address) -> Self { + Self { owner, ..self } + } +} + +impl TxCustom { + /// Build a transaction from this builder + pub async fn build<'a>( + &self, + context: &impl Namada<'a>, + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_custom(context, self).await + } +} + +/// An amount read in by the cli +#[derive(Copy, Clone, Debug)] +pub enum InputAmount { + /// An amount whose representation has been validated + /// against the allowed representation in storage + Validated(token::DenominatedAmount), + /// The parsed amount read in from the cli. It has + /// not yet been validated against the allowed + /// representation in storage. + Unvalidated(token::DenominatedAmount), +} + +impl std::str::FromStr for InputAmount { + type Err = ::Err; + + fn from_str(s: &str) -> Result { + token::DenominatedAmount::from_str(s).map(InputAmount::Unvalidated) + } +} + +impl From for InputAmount { + fn from(amt: token::DenominatedAmount) -> Self { + InputAmount::Unvalidated(amt) + } +} + +/// Transfer transaction arguments +#[derive(Clone, Debug)] +pub struct TxTransfer { + /// Common tx arguments + pub tx: Tx, + /// Transfer source address + pub source: C::TransferSource, + /// Transfer target address + pub target: C::TransferTarget, + /// Transferred token address + pub token: C::Address, + /// Transferred token amount + pub amount: InputAmount, + /// Native token address + pub native_token: C::NativeAddress, + /// Path to the TX WASM code file + pub tx_code_path: PathBuf, +} + +impl TxBuilder for TxTransfer { + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + TxTransfer { + tx: func(self.tx), + ..self + } + } +} + +impl TxTransfer { + /// Transfer source address + pub fn source(self, source: C::TransferSource) -> Self { + Self { source, ..self } + } + + /// Transfer target address + pub fn receiver(self, target: C::TransferTarget) -> Self { + Self { target, ..self } + } + + /// Transferred token address + pub fn token(self, token: C::Address) -> Self { + Self { token, ..self } + } + + /// Transferred token amount + pub fn amount(self, amount: InputAmount) -> Self { + Self { amount, ..self } + } + + /// Native token address + pub fn native_token(self, native_token: C::NativeAddress) -> Self { + Self { + native_token, + ..self + } + } + + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { + tx_code_path, + ..self + } + } +} + +impl TxTransfer { + /// Build a transaction from this builder + pub async fn build<'a>( + &mut self, + context: &impl Namada<'a>, + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_transfer(context, self).await + } +} + +/// IBC transfer transaction arguments +#[derive(Clone, Debug)] +pub struct TxIbcTransfer { + /// Common tx arguments + pub tx: Tx, + /// Transfer source address + pub source: C::Address, + /// Transfer target address + pub receiver: String, + /// Transferred token address + pub token: C::Address, + /// Transferred token amount + pub amount: InputAmount, + /// Port ID + pub port_id: PortId, + /// Channel ID + pub channel_id: ChannelId, + /// Timeout height of the destination chain + pub timeout_height: Option, + /// Timeout timestamp offset + pub timeout_sec_offset: Option, + /// Memo + pub memo: Option, + /// Path to the TX WASM code file + pub tx_code_path: PathBuf, +} + +impl TxBuilder for TxIbcTransfer { + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + TxIbcTransfer { + tx: func(self.tx), + ..self + } + } +} + +impl TxIbcTransfer { + /// Transfer source address + pub fn source(self, source: C::Address) -> Self { + Self { source, ..self } + } + + /// Transfer target address + pub fn receiver(self, receiver: String) -> Self { + Self { receiver, ..self } + } + + /// Transferred token address + pub fn token(self, token: C::Address) -> Self { + Self { token, ..self } + } + + /// Transferred token amount + pub fn amount(self, amount: InputAmount) -> Self { + Self { amount, ..self } + } + + /// Port ID + pub fn port_id(self, port_id: PortId) -> Self { + Self { port_id, ..self } + } + + /// Channel ID + pub fn channel_id(self, channel_id: ChannelId) -> Self { + Self { channel_id, ..self } + } + + /// Timeout height of the destination chain + pub fn timeout_height(self, timeout_height: u64) -> Self { + Self { + timeout_height: Some(timeout_height), + ..self + } + } + + /// Timeout timestamp offset + pub fn timeout_sec_offset(self, timeout_sec_offset: u64) -> Self { + Self { + timeout_sec_offset: Some(timeout_sec_offset), + ..self + } + } + + /// Memo + pub fn memo(self, memo: String) -> Self { + Self { + memo: Some(memo), + ..self + } + } + + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { + tx_code_path, + ..self + } + } +} + +impl TxIbcTransfer { + /// Build a transaction from this builder + pub async fn build<'a>( + &self, + context: &impl Namada<'a>, + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_ibc_transfer(context, self).await + } +} + +/// Transaction to initialize create a new proposal +#[derive(Clone, Debug)] +pub struct InitProposal { + /// Common tx arguments + pub tx: Tx, + /// The proposal data + pub proposal_data: C::Data, + /// Native token address + pub native_token: C::NativeAddress, + /// Flag if proposal should be run offline + pub is_offline: bool, + /// Flag if proposal is of type Pgf stewards + pub is_pgf_stewards: bool, + /// Flag if proposal is of type Pgf funding + pub is_pgf_funding: bool, + /// Path to the tx WASM file + pub tx_code_path: PathBuf, +} + +impl TxBuilder for InitProposal { + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + InitProposal { + tx: func(self.tx), + ..self + } + } +} + +impl InitProposal { + /// The proposal data + pub fn proposal_data(self, proposal_data: C::Data) -> Self { + Self { + proposal_data, + ..self + } + } + + /// Native token address + pub fn native_token(self, native_token: C::NativeAddress) -> Self { + Self { + native_token, + ..self + } + } + + /// Flag if proposal should be run offline + pub fn is_offline(self, is_offline: bool) -> Self { + Self { is_offline, ..self } + } + + /// Flag if proposal is of type Pgf stewards + pub fn is_pgf_stewards(self, is_pgf_stewards: bool) -> Self { + Self { + is_pgf_stewards, + ..self + } + } + + /// Flag if proposal is of type Pgf funding + pub fn is_pgf_funding(self, is_pgf_funding: bool) -> Self { + Self { + is_pgf_funding, + ..self + } + } + + /// Path to the tx WASM file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { + tx_code_path, + ..self + } + } +} + +impl InitProposal { + /// Build a transaction from this builder + pub async fn build<'a>( + &self, + context: &impl Namada<'a>, + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + let current_epoch = rpc::query_epoch(context.client()).await?; + let governance_parameters = + rpc::query_governance_parameters(context.client()).await; + + if self.is_pgf_funding { + let proposal = PgfFundingProposal::try_from( + self.proposal_data.as_ref(), + ) + .map_err(|e| { + crate::error::TxError::FailedGovernaneProposalDeserialize( + e.to_string(), + ) + })? + .validate(&governance_parameters, current_epoch, self.tx.force) + .map_err(|e| { + crate::error::TxError::InvalidProposal(e.to_string()) + })?; + + tx::build_pgf_funding_proposal(context, self, proposal).await + } else if self.is_pgf_stewards { + let proposal = PgfStewardProposal::try_from( + self.proposal_data.as_ref(), + ) + .map_err(|e| { + crate::error::TxError::FailedGovernaneProposalDeserialize( + e.to_string(), + ) + })?; + let nam_address = context.native_token(); + let author_balance = rpc::get_token_balance( + context.client(), + &nam_address, + &proposal.proposal.author, + ) + .await?; + let proposal = proposal + .validate( + &governance_parameters, + current_epoch, + author_balance, + self.tx.force, + ) + .map_err(|e| { + crate::error::TxError::InvalidProposal(e.to_string()) + })?; + + tx::build_pgf_stewards_proposal(context, self, proposal).await + } else { + let proposal = DefaultProposal::try_from( + self.proposal_data.as_ref(), + ) + .map_err(|e| { + crate::error::TxError::FailedGovernaneProposalDeserialize( + e.to_string(), + ) + })?; + let nam_address = context.native_token(); + let author_balance = rpc::get_token_balance( + context.client(), + &nam_address, + &proposal.proposal.author, + ) + .await?; + let proposal = proposal + .validate( + &governance_parameters, + current_epoch, + author_balance, + self.tx.force, + ) + .map_err(|e| { + crate::error::TxError::InvalidProposal(e.to_string()) + })?; + tx::build_default_proposal(context, self, proposal).await + } + } +} + +/// Transaction to vote on a proposal +#[derive(Clone, Debug)] +pub struct VoteProposal { + /// Common tx arguments + pub tx: Tx, + /// Proposal id + pub proposal_id: Option, + /// The vote + pub vote: String, + /// The address of the voter + pub voter: C::Address, + /// Flag if proposal vote should be run offline + pub is_offline: bool, + /// The proposal file path + pub proposal_data: Option, + /// Path to the TX WASM code file + pub tx_code_path: PathBuf, +} + +impl TxBuilder for VoteProposal { + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + VoteProposal { + tx: func(self.tx), + ..self + } + } +} + +impl VoteProposal { + /// Proposal id + pub fn proposal_id(self, proposal_id: u64) -> Self { + Self { + proposal_id: Some(proposal_id), + ..self + } + } + + /// The vote + pub fn vote(self, vote: String) -> Self { + Self { vote, ..self } + } + + /// The address of the voter + pub fn voter(self, voter: C::Address) -> Self { + Self { voter, ..self } + } + + /// Flag if proposal vote should be run offline + pub fn is_offline(self, is_offline: bool) -> Self { + Self { is_offline, ..self } + } + + /// The proposal file path + pub fn proposal_data(self, proposal_data: C::Data) -> Self { + Self { + proposal_data: Some(proposal_data), + ..self + } + } + + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { + tx_code_path, + ..self + } + } +} + +impl VoteProposal { + /// Build a transaction from this builder + pub async fn build<'a>( + &self, + context: &impl Namada<'a>, + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + let current_epoch = rpc::query_epoch(context.client()).await?; + tx::build_vote_proposal(context, self, current_epoch).await + } +} + +/// Transaction to initialize a new account +#[derive(Clone, Debug)] +pub struct TxInitAccount { + /// Common tx arguments + pub tx: Tx, + /// Path to the VP WASM code file for the new account + pub vp_code_path: PathBuf, + /// Path to the TX WASM code file + pub tx_code_path: PathBuf, + /// Public key for the new account + pub public_keys: Vec, + /// The account multisignature threshold + pub threshold: Option, +} + +/// Transaction to initialize a new account +#[derive(Clone, Debug)] +pub struct TxInitValidator { + /// Common tx arguments + pub tx: Tx, + /// Signature scheme + pub scheme: SchemeType, + /// Account keys + pub account_keys: Vec, + /// The account multisignature threshold + pub threshold: Option, + /// Consensus key + pub consensus_key: Option, + /// Ethereum cold key + pub eth_cold_key: Option, + /// Ethereum hot key + pub eth_hot_key: Option, + /// Protocol key + pub protocol_key: Option, + /// Commission rate + pub commission_rate: Dec, + /// Maximum commission rate change + pub max_commission_rate_change: Dec, + /// Path to the VP WASM code file + pub validator_vp_code_path: PathBuf, + /// Path to the TX WASM code file + pub tx_code_path: PathBuf, + /// Don't encrypt the keypair + pub unsafe_dont_encrypt: bool, +} + +/// Transaction to update a VP arguments +#[derive(Clone, Debug)] +pub struct TxUpdateAccount { + /// Common tx arguments + pub tx: Tx, + /// Path to the VP WASM code file + pub vp_code_path: Option, + /// Path to the TX WASM code file + pub tx_code_path: PathBuf, + /// Address of the account whose VP is to be updated + pub addr: C::Address, + /// Public keys + pub public_keys: Vec, + /// The account threshold + pub threshold: Option, +} + +impl TxBuilder for TxUpdateAccount { + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + TxUpdateAccount { + tx: func(self.tx), + ..self + } + } +} + +impl TxUpdateAccount { + /// Path to the VP WASM code file + pub fn vp_code_path(self, vp_code_path: PathBuf) -> Self { + Self { + vp_code_path: Some(vp_code_path), + ..self + } + } + + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { + tx_code_path, + ..self + } + } + + /// Address of the account whose VP is to be updated + pub fn addr(self, addr: C::Address) -> Self { + Self { addr, ..self } + } + + /// Public keys + pub fn public_keys(self, public_keys: Vec) -> Self { + Self { + public_keys, + ..self + } + } + + /// The account threshold + pub fn threshold(self, threshold: u8) -> Self { + Self { + threshold: Some(threshold), + ..self + } + } +} + +impl TxUpdateAccount { + /// Build a transaction from this builder + pub async fn build<'a>( + &self, + context: &impl Namada<'a>, + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_update_account(context, self).await + } +} + +/// Bond arguments +#[derive(Clone, Debug)] +pub struct Bond { + /// Common tx arguments + pub tx: Tx, + /// Validator address + pub validator: C::Address, + /// Amount of tokens to stake in a bond + pub amount: token::Amount, + /// Source address for delegations. For self-bonds, the validator is + /// also the source. + pub source: Option, + /// Native token address + pub native_token: C::NativeAddress, + /// Path to the TX WASM code file + pub tx_code_path: PathBuf, +} + +impl TxBuilder for Bond { + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + Bond { + tx: func(self.tx), + ..self + } + } +} + +impl Bond { + /// Validator address + pub fn validator(self, validator: C::Address) -> Self { + Self { validator, ..self } + } + + /// Amount of tokens to stake in a bond + pub fn amount(self, amount: token::Amount) -> Self { + Self { amount, ..self } + } + + /// Source address for delegations. For self-bonds, the validator is + /// also the source. + pub fn source(self, source: C::Address) -> Self { + Self { + source: Some(source), + ..self + } + } + + /// Native token address + pub fn native_token(self, native_token: C::NativeAddress) -> Self { + Self { + native_token, + ..self + } + } + + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { + tx_code_path, + ..self + } + } +} + +impl Bond { + /// Build a transaction from this builder + pub async fn build<'a>( + &self, + context: &impl Namada<'a>, + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_bond(context, self).await + } +} + +/// Unbond arguments +#[derive(Clone, Debug)] +pub struct Unbond { + /// Common tx arguments + pub tx: Tx, + /// Validator address + pub validator: C::Address, + /// Amount of tokens to unbond from a bond + pub amount: token::Amount, + /// Source address for unbonding from delegations. For unbonding from + /// self-bonds, the validator is also the source + pub source: Option, + /// Path to the TX WASM code file + pub tx_code_path: PathBuf, +} + +impl Unbond { + /// Build a transaction from this builder + pub async fn build<'a>( + &self, + context: &impl Namada<'a>, + ) -> crate::error::Result<( + crate::proto::Tx, + SigningTxData, + Option, + Option<(Epoch, token::Amount)>, + )> { + tx::build_unbond(context, self).await + } +} + +impl TxBuilder for Unbond { + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + Unbond { + tx: func(self.tx), + ..self + } + } +} + +impl Unbond { + /// Validator address + pub fn validator(self, validator: C::Address) -> Self { + Self { validator, ..self } + } + + /// Amount of tokens to unbond from a bond + pub fn amount(self, amount: token::Amount) -> Self { + Self { amount, ..self } + } + + /// Source address for unbonding from delegations. For unbonding from + /// self-bonds, the validator is also the source + pub fn source(self, source: C::Address) -> Self { + Self { + source: Some(source), + ..self + } + } + + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { + tx_code_path, + ..self + } + } +} + +/// Redelegation arguments +#[derive(Clone, Debug)] +pub struct Redelegate { + /// Common tx arguments + pub tx: Tx, + /// Source validator address + pub src_validator: C::Address, + /// Destination validator address + pub dest_validator: C::Address, + /// Owner of the bonds that are being redelegated + pub owner: C::Address, + /// The amount of tokens to redelegate + pub amount: token::Amount, + /// Path to the TX WASM code file + pub tx_code_path: PathBuf, +} + +impl Redelegate { + /// Build a transaction from this builder + pub async fn build<'a>( + &self, + context: &impl Namada<'a>, + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData)> { + tx::build_redelegation(context, self).await + } +} + +/// Reveal public key +#[derive(Clone, Debug)] +pub struct RevealPk { + /// Common tx arguments + pub tx: Tx, + /// A public key to be revealed on-chain + pub public_key: C::PublicKey, +} + +impl TxBuilder for RevealPk { + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + RevealPk { + tx: func(self.tx), + ..self + } + } +} + +impl RevealPk { + /// A public key to be revealed on-chain + pub fn public_key(self, public_key: C::PublicKey) -> Self { + Self { public_key, ..self } + } +} + +impl RevealPk { + /// Build a transaction from this builder + pub async fn build<'a>( + &self, + context: &impl Namada<'a>, + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_reveal_pk(context, &self.tx, &self.public_key).await + } +} + +/// Query proposal +#[derive(Clone, Debug)] +pub struct QueryProposal { + /// Common query args + pub query: Query, + /// Proposal id + pub proposal_id: Option, +} + +/// Query protocol parameters +#[derive(Clone, Debug)] +pub struct QueryProtocolParameters { + /// Common query args + pub query: Query, +} + +/// Query pgf data +#[derive(Clone, Debug)] +pub struct QueryPgf { + /// Common query args + pub query: Query, +} + +/// Withdraw arguments +#[derive(Clone, Debug)] +pub struct Withdraw { + /// Common tx arguments + pub tx: Tx, + /// Validator address + pub validator: C::Address, + /// Source address for withdrawing from delegations. For withdrawing + /// from self-bonds, the validator is also the source + pub source: Option, + /// Path to the TX WASM code file + pub tx_code_path: PathBuf, +} + +impl TxBuilder for Withdraw { + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + Withdraw { + tx: func(self.tx), + ..self + } + } +} + +impl Withdraw { + /// Validator address + pub fn validator(self, validator: C::Address) -> Self { + Self { validator, ..self } + } + + /// Source address for withdrawing from delegations. For withdrawing + /// from self-bonds, the validator is also the source + pub fn source(self, source: C::Address) -> Self { + Self { + source: Some(source), + ..self + } + } + + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { + tx_code_path, + ..self + } + } +} + +impl Withdraw { + /// Build a transaction from this builder + pub async fn build<'a>( + &self, + context: &impl Namada<'a>, + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_withdraw(context, self).await + } +} + +/// Query asset conversions +#[derive(Clone, Debug)] +pub struct QueryConversions { + /// Common query args + pub query: Query, + /// Address of a token + pub token: Option, + /// Epoch of the asset + pub epoch: Option, +} + +/// Query token balance(s) +#[derive(Clone, Debug)] +pub struct QueryAccount { + /// Common query args + pub query: Query, + /// Address of an owner + pub owner: C::Address, +} + +/// Query token balance(s) +#[derive(Clone, Debug)] +pub struct QueryBalance { + /// Common query args + pub query: Query, + /// Address of an owner + pub owner: Option, + /// Address of a token + pub token: Option, + /// Whether not to convert balances + pub no_conversions: bool, +} + +/// Query historical transfer(s) +#[derive(Clone, Debug)] +pub struct QueryTransfers { + /// Common query args + pub query: Query, + /// Address of an owner + pub owner: Option, + /// Address of a token + pub token: Option, +} + +/// Query PoS bond(s) +#[derive(Clone, Debug)] +pub struct QueryBonds { + /// Common query args + pub query: Query, + /// Address of an owner + pub owner: Option, + /// Address of a validator + pub validator: Option, +} + +/// Query PoS bonded stake +#[derive(Clone, Debug)] +pub struct QueryBondedStake { + /// Common query args + pub query: Query, + /// Address of a validator + pub validator: Option, + /// Epoch in which to find bonded stake + pub epoch: Option, +} + +/// Query the state of a validator (its validator set or if it is jailed) +#[derive(Clone, Debug)] +pub struct QueryValidatorState { + /// Common query args + pub query: Query, + /// Address of a validator + pub validator: C::Address, + /// Epoch in which to find the validator state + pub epoch: Option, +} + +#[derive(Clone, Debug)] +/// Commission rate change args +pub struct CommissionRateChange { + /// Common tx arguments + pub tx: Tx, + /// Validator address (should be self) + pub validator: C::Address, + /// Value to which the tx changes the commission rate + pub rate: Dec, + /// Path to the TX WASM code file + pub tx_code_path: PathBuf, +} + +impl TxBuilder for CommissionRateChange { + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + CommissionRateChange { + tx: func(self.tx), + ..self + } + } +} + +impl CommissionRateChange { + /// Validator address (should be self) + pub fn validator(self, validator: C::Address) -> Self { + Self { validator, ..self } + } + + /// Value to which the tx changes the commission rate + pub fn rate(self, rate: Dec) -> Self { + Self { rate, ..self } + } + + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { + tx_code_path, + ..self + } + } +} + +impl CommissionRateChange { + /// Build a transaction from this builder + pub async fn build<'a>( + &self, + context: &impl Namada<'a>, + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_validator_commission_change(context, self).await + } +} + +#[derive(Clone, Debug)] +/// Commission rate change args +pub struct UpdateStewardCommission { + /// Common tx arguments + pub tx: Tx, + /// Steward address + pub steward: C::Address, + /// Value to which the tx changes the commission rate + pub commission: C::Data, + /// Path to the TX WASM code file + pub tx_code_path: PathBuf, +} + +impl TxBuilder for UpdateStewardCommission { + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + UpdateStewardCommission { + tx: func(self.tx), + ..self + } + } +} + +impl UpdateStewardCommission { + /// Steward address + pub fn steward(self, steward: C::Address) -> Self { + Self { steward, ..self } + } + + /// Value to which the tx changes the commission rate + pub fn commission(self, commission: C::Data) -> Self { + Self { commission, ..self } + } + + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { + tx_code_path, + ..self + } + } +} + +impl UpdateStewardCommission { + /// Build a transaction from this builder + pub async fn build<'a>( + &self, + context: &impl Namada<'a>, + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_update_steward_commission(context, self).await + } +} + +#[derive(Clone, Debug)] +/// Commission rate change args +pub struct ResignSteward { + /// Common tx arguments + pub tx: Tx, + /// Validator address + pub steward: C::Address, + /// Path to the TX WASM code file + pub tx_code_path: PathBuf, +} + +impl TxBuilder for ResignSteward { + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + ResignSteward { + tx: func(self.tx), + ..self + } + } +} + +impl ResignSteward { + /// Validator address + pub fn steward(self, steward: C::Address) -> Self { + Self { steward, ..self } + } + + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { + tx_code_path, + ..self + } + } +} + +impl ResignSteward { + /// Build a transaction from this builder + pub async fn build<'a>( + &self, + context: &impl Namada<'a>, + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_resign_steward(context, self).await + } +} + +#[derive(Clone, Debug)] +/// Re-activate a jailed validator args +pub struct TxUnjailValidator { + /// Common tx arguments + pub tx: Tx, + /// Validator address (should be self) + pub validator: C::Address, + /// Path to the TX WASM code file + pub tx_code_path: PathBuf, +} + +impl TxBuilder for TxUnjailValidator { + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + TxUnjailValidator { + tx: func(self.tx), + ..self + } + } +} + +impl TxUnjailValidator { + /// Validator address (should be self) + pub fn validator(self, validator: C::Address) -> Self { + Self { validator, ..self } + } + + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { + tx_code_path, + ..self + } + } +} + +impl TxUnjailValidator { + /// Build a transaction from this builder + pub async fn build<'a>( + &self, + context: &impl Namada<'a>, + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + tx::build_unjail_validator(context, self).await + } +} + +#[derive(Clone, Debug)] +/// Sign a transaction offline +pub struct SignTx { + /// Common tx arguments + pub tx: Tx, + /// Transaction data + pub tx_data: C::Data, + /// The account address + pub owner: C::Address, +} + +/// Query PoS commission rate +#[derive(Clone, Debug)] +pub struct QueryCommissionRate { + /// Common query args + pub query: Query, + /// Address of a validator + pub validator: C::Address, + /// Epoch in which to find commission rate + pub epoch: Option, +} + +/// Query PoS slashes +#[derive(Clone, Debug)] +pub struct QuerySlashes { + /// Common query args + pub query: Query, + /// Address of a validator + pub validator: Option, +} + +/// Query PoS delegations +#[derive(Clone, Debug)] +pub struct QueryDelegations { + /// Common query args + pub query: Query, + /// Address of an owner + pub owner: C::Address, +} + +/// Query PoS to find a validator +#[derive(Clone, Debug)] +pub struct QueryFindValidator { + /// Common query args + pub query: Query, + /// Tendermint address + pub tm_addr: String, +} + +/// Query the raw bytes of given storage key +#[derive(Clone, Debug)] +pub struct QueryRawBytes { + /// The storage key to query + pub storage_key: storage::Key, + /// Common query args + pub query: Query, +} + +/// Common transaction arguments +#[derive(Clone, Debug)] +pub struct Tx { + /// Simulate applying the transaction + pub dry_run: bool, + /// Simulate applying both the wrapper and inner transactions + pub dry_run_wrapper: bool, + /// Dump the transaction bytes to file + pub dump_tx: bool, + /// The output directory path to where serialize the data + pub output_folder: Option, + /// Submit the transaction even if it doesn't pass client checks + pub force: bool, + /// Do not wait for the transaction to be added to the blockchain + pub broadcast_only: bool, + /// The address of the ledger node as host:port + pub ledger_address: C::TendermintAddress, + /// If any new account is initialized by the tx, use the given alias to + /// save it in the wallet. + pub initialized_account_alias: Option, + /// Whether to force overwrite the above alias, if it is provided, in the + /// wallet. + pub wallet_alias_force: bool, + /// The amount being payed (for gas unit) to include the transaction + pub fee_amount: Option, + /// The fee payer signing key + pub wrapper_fee_payer: Option, + /// The token in which the fee is being paid + pub fee_token: C::Address, + /// The optional spending key for fee unshielding + pub fee_unshield: Option, + /// The max amount of gas used to process tx + pub gas_limit: GasLimit, + /// The optional expiration of the transaction + pub expiration: Option, + /// Generate an ephimeral signing key to be used only once to sign a + /// wrapper tx + pub disposable_signing_key: bool, + /// The chain id for which the transaction is intended + pub chain_id: Option, + /// Sign the tx with the key for the given alias from your wallet + pub signing_keys: Vec, + /// List of signatures to attach to the transaction + pub signatures: Vec, + /// Path to the TX WASM code file to reveal PK + pub tx_reveal_code_path: PathBuf, + /// Sign the tx with the public key for the given alias from your wallet + pub verification_key: Option, + /// Password to decrypt key + pub password: Option>, +} + +/// Builder functions for Tx +pub trait TxBuilder: Sized { + /// Apply the given function to the Tx inside self + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx; + /// Simulate applying the transaction + fn dry_run(self, dry_run: bool) -> Self { + self.tx(|x| Tx { dry_run, ..x }) + } + /// Simulate applying both the wrapper and inner transactions + fn dry_run_wrapper(self, dry_run_wrapper: bool) -> Self { + self.tx(|x| Tx { + dry_run_wrapper, + ..x + }) + } + /// Dump the transaction bytes to file + fn dump_tx(self, dump_tx: bool) -> Self { + self.tx(|x| Tx { dump_tx, ..x }) + } + /// The output directory path to where serialize the data + fn output_folder(self, output_folder: PathBuf) -> Self { + self.tx(|x| Tx { + output_folder: Some(output_folder), + ..x + }) + } + /// Submit the transaction even if it doesn't pass client checks + fn force(self, force: bool) -> Self { + self.tx(|x| Tx { force, ..x }) + } + /// Do not wait for the transaction to be added to the blockchain + fn broadcast_only(self, broadcast_only: bool) -> Self { + self.tx(|x| Tx { + broadcast_only, + ..x + }) + } + /// The address of the ledger node as host:port + fn ledger_address(self, ledger_address: C::TendermintAddress) -> Self { + self.tx(|x| Tx { + ledger_address, + ..x + }) + } + /// If any new account is initialized by the tx, use the given alias to + /// save it in the wallet. + fn initialized_account_alias( + self, + initialized_account_alias: String, + ) -> Self { + self.tx(|x| Tx { + initialized_account_alias: Some(initialized_account_alias), + ..x + }) + } + /// Whether to force overwrite the above alias, if it is provided, in the + /// wallet. + fn wallet_alias_force(self, wallet_alias_force: bool) -> Self { + self.tx(|x| Tx { + wallet_alias_force, + ..x + }) + } + /// The amount being payed (for gas unit) to include the transaction + fn fee_amount(self, fee_amount: InputAmount) -> Self { + self.tx(|x| Tx { + fee_amount: Some(fee_amount), + ..x + }) + } + /// The fee payer signing key + fn wrapper_fee_payer(self, wrapper_fee_payer: C::Keypair) -> Self { + self.tx(|x| Tx { + wrapper_fee_payer: Some(wrapper_fee_payer), + ..x + }) + } + /// The token in which the fee is being paid + fn fee_token(self, fee_token: C::Address) -> Self { + self.tx(|x| Tx { fee_token, ..x }) + } + /// The optional spending key for fee unshielding + fn fee_unshield(self, fee_unshield: C::TransferSource) -> Self { + self.tx(|x| Tx { + fee_unshield: Some(fee_unshield), + ..x + }) + } + /// The max amount of gas used to process tx + fn gas_limit(self, gas_limit: GasLimit) -> Self { + self.tx(|x| Tx { gas_limit, ..x }) + } + /// The optional expiration of the transaction + fn expiration(self, expiration: DateTimeUtc) -> Self { + self.tx(|x| Tx { + expiration: Some(expiration), + ..x + }) + } + /// Generate an ephimeral signing key to be used only once to sign a + /// wrapper tx + fn disposable_signing_key(self, disposable_signing_key: bool) -> Self { + self.tx(|x| Tx { + disposable_signing_key, + ..x + }) + } + /// The chain id for which the transaction is intended + fn chain_id(self, chain_id: ChainId) -> Self { + self.tx(|x| Tx { + chain_id: Some(chain_id), + ..x + }) + } + /// Sign the tx with the key for the given alias from your wallet + fn signing_keys(self, signing_keys: Vec) -> Self { + self.tx(|x| Tx { signing_keys, ..x }) + } + /// List of signatures to attach to the transaction + fn signatures(self, signatures: Vec) -> Self { + self.tx(|x| Tx { signatures, ..x }) + } + /// Path to the TX WASM code file to reveal PK + fn tx_reveal_code_path(self, tx_reveal_code_path: PathBuf) -> Self { + self.tx(|x| Tx { + tx_reveal_code_path, + ..x + }) + } + /// Sign the tx with the public key for the given alias from your wallet + fn verification_key(self, verification_key: C::PublicKey) -> Self { + self.tx(|x| Tx { + verification_key: Some(verification_key), + ..x + }) + } + /// Password to decrypt key + fn password(self, password: Zeroizing) -> Self { + self.tx(|x| Tx { + password: Some(password), + ..x + }) + } +} + +impl TxBuilder for Tx { + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + func(self) + } +} + +/// MASP add key or address arguments +#[derive(Clone, Debug)] +pub struct MaspAddrKeyAdd { + /// Key alias + pub alias: String, + /// Whether to force overwrite the alias + pub alias_force: bool, + /// Any MASP value + pub value: MaspValue, + /// Don't encrypt the keypair + pub unsafe_dont_encrypt: bool, +} + +/// MASP generate spending key arguments +#[derive(Clone, Debug)] +pub struct MaspSpendKeyGen { + /// Key alias + pub alias: String, + /// Whether to force overwrite the alias + pub alias_force: bool, + /// Don't encrypt the keypair + pub unsafe_dont_encrypt: bool, +} + +/// MASP generate payment address arguments +#[derive(Clone, Debug)] +pub struct MaspPayAddrGen { + /// Key alias + pub alias: String, + /// Whether to force overwrite the alias + pub alias_force: bool, + /// Viewing key + pub viewing_key: C::ViewingKey, + /// Pin + pub pin: bool, +} + +/// Wallet generate key and implicit address arguments +#[derive(Clone, Debug)] +pub struct KeyAndAddressGen { + /// Scheme type + pub scheme: SchemeType, + /// Key alias + pub alias: Option, + /// Whether to force overwrite the alias, if provided + pub alias_force: bool, + /// Don't encrypt the keypair + pub unsafe_dont_encrypt: bool, + /// BIP44 derivation path + pub derivation_path: Option, +} + +/// Wallet restore key and implicit address arguments +#[derive(Clone, Debug)] +pub struct KeyAndAddressRestore { + /// Scheme type + pub scheme: SchemeType, + /// Key alias + pub alias: Option, + /// Whether to force overwrite the alias, if provided + pub alias_force: bool, + /// Don't encrypt the keypair + pub unsafe_dont_encrypt: bool, + /// BIP44 derivation path + pub derivation_path: Option, +} + +/// Wallet key lookup arguments +#[derive(Clone, Debug)] +pub struct KeyFind { + /// Public key to lookup keypair with + pub public_key: Option, + /// Key alias to lookup keypair with + pub alias: Option, + /// Public key hash to lookup keypair with + pub value: Option, + /// Show secret keys to user + pub unsafe_show_secret: bool, +} + +/// Wallet find shielded address or key arguments +#[derive(Clone, Debug)] +pub struct AddrKeyFind { + /// Address/key alias + pub alias: String, + /// Show secret keys to user + pub unsafe_show_secret: bool, +} + +/// Wallet list shielded keys arguments +#[derive(Clone, Debug)] +pub struct MaspKeysList { + /// Don't decrypt spending keys + pub decrypt: bool, + /// Show secret keys to user + pub unsafe_show_secret: bool, +} + +/// Wallet list keys arguments +#[derive(Clone, Debug)] +pub struct KeyList { + /// Don't decrypt keypairs + pub decrypt: bool, + /// Show secret keys to user + pub unsafe_show_secret: bool, +} + +/// Wallet key export arguments +#[derive(Clone, Debug)] +pub struct KeyExport { + /// Key alias + pub alias: String, +} + +/// Wallet address lookup arguments +#[derive(Clone, Debug)] +pub struct AddressOrAliasFind { + /// Alias to find + pub alias: Option, + /// Address to find + pub address: Option
, +} + +/// Wallet address add arguments +#[derive(Clone, Debug)] +pub struct AddressAdd { + /// Address alias + pub alias: String, + /// Whether to force overwrite the alias + pub alias_force: bool, + /// Address to add + pub address: Address, +} + +/// Bridge pool batch recommendation. +#[derive(Clone, Debug)] +pub struct RecommendBatch { + /// The query parameters. + pub query: Query, + /// The maximum amount of gas to spend. + pub max_gas: Option, + /// An optional parameter indicating how much net + /// gas the relayer is willing to pay. + pub gas: Option, + /// Bridge pool recommendations conversion rates table. + pub conversion_table: C::BpConversionTable, +} + +/// A transfer to be added to the Ethereum bridge pool. +#[derive(Clone, Debug)] +pub struct EthereumBridgePool { + /// Whether the transfer is for a NUT. + /// + /// By default, we add wrapped ERC20s onto the + /// Bridge pool. + pub nut: bool, + /// The args for building a tx to the bridge pool + pub tx: Tx, + /// The type of token + pub asset: EthAddress, + /// The recipient address + pub recipient: EthAddress, + /// The sender of the transfer + pub sender: C::Address, + /// The amount to be transferred + pub amount: InputAmount, + /// The amount of gas fees + pub fee_amount: InputAmount, + /// The account of fee payer. + /// + /// If unset, it is the same as the sender. + pub fee_payer: Option, + /// The token in which the gas is being paid + pub fee_token: C::Address, + /// Path to the tx WASM code file + pub code_path: PathBuf, +} + +impl TxBuilder for EthereumBridgePool { + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + EthereumBridgePool { + tx: func(self.tx), + ..self + } + } +} + +impl EthereumBridgePool { + /// Whether the transfer is for a NUT. + /// + /// By default, we add wrapped ERC20s onto the + /// Bridge pool. + pub fn nut(self, nut: bool) -> Self { + Self { nut, ..self } + } + + /// The type of token + pub fn asset(self, asset: EthAddress) -> Self { + Self { asset, ..self } + } + + /// The recipient address + pub fn recipient(self, recipient: EthAddress) -> Self { + Self { recipient, ..self } + } + + /// The sender of the transfer + pub fn sender(self, sender: C::Address) -> Self { + Self { sender, ..self } + } + + /// The amount to be transferred + pub fn amount(self, amount: InputAmount) -> Self { + Self { amount, ..self } + } + + /// The amount of gas fees + pub fn fee_amount(self, fee_amount: InputAmount) -> Self { + Self { fee_amount, ..self } + } + + /// The account of fee payer. + /// + /// If unset, it is the same as the sender. + pub fn fee_payer(self, fee_payer: C::Address) -> Self { + Self { + fee_payer: Some(fee_payer), + ..self + } + } + + /// The token in which the gas is being paid + pub fn fee_token(self, fee_token: C::Address) -> Self { + Self { fee_token, ..self } + } + + /// Path to the tx WASM code file + pub fn code_path(self, code_path: PathBuf) -> Self { + Self { code_path, ..self } + } +} + +impl EthereumBridgePool { + /// Build a transaction from this builder + pub async fn build<'a>( + self, + context: &impl Namada<'a>, + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> + { + bridge_pool::build_bridge_pool_tx(context, self).await + } +} + +/// Bridge pool proof arguments. +#[derive(Debug, Clone)] +pub struct BridgePoolProof { + /// The query parameters. + pub query: Query, + /// The keccak hashes of transfers to + /// acquire a proof of. + pub transfers: Vec, + /// The address of the node responsible for relaying + /// the transfers. + /// + /// This node will receive the gas fees escrowed in + /// the Bridge pool, to compensate the Ethereum relay + /// procedure. + pub relayer: Address, +} + +/// Arguments to an Ethereum Bridge pool relay operation. +#[derive(Debug, Clone)] +pub struct RelayBridgePoolProof { + /// The query parameters. + pub query: Query, + /// The hashes of the transfers to be relayed + pub transfers: Vec, + /// The Namada address for receiving fees for relaying + pub relayer: Address, + /// The number of confirmations to wait for on Ethereum + pub confirmations: u64, + /// The Ethereum RPC endpoint. + pub eth_rpc_endpoint: C::EthereumAddress, + /// The Ethereum gas that can be spent during + /// the relay call. + pub gas: Option, + /// The price of Ethereum gas, during the + /// relay call. + pub gas_price: Option, + /// The address of the Ethereum wallet to pay the gas fees. + /// If unset, the default wallet is used. + pub eth_addr: Option, + /// Synchronize with the network, or exit immediately, + /// if the Ethereum node has fallen behind. + pub sync: bool, + /// Safe mode overrides keyboard interrupt signals, to ensure + /// Ethereum transfers aren't canceled midway through. + pub safe_mode: bool, +} + +/// Bridge validator set arguments. +#[derive(Debug, Clone)] +pub struct BridgeValidatorSet { + /// The query parameters. + pub query: Query, + /// The epoch to query. + pub epoch: Option, +} + +/// Governance validator set arguments. +#[derive(Debug, Clone)] +pub struct GovernanceValidatorSet { + /// The query parameters. + pub query: Query, + /// The epoch to query. + pub epoch: Option, +} + +/// Validator set proof arguments. +#[derive(Debug, Clone)] +pub struct ValidatorSetProof { + /// The query parameters. + pub query: Query, + /// The epoch to query. + pub epoch: Option, +} + +/// Validator set update relayer arguments. +#[derive(Debug, Clone)] +pub struct ValidatorSetUpdateRelay { + /// Run in daemon mode, which will continuously + /// perform validator set updates. + pub daemon: bool, + /// The query parameters. + pub query: Query, + /// The number of block confirmations on Ethereum. + pub confirmations: u64, + /// The Ethereum RPC endpoint. + pub eth_rpc_endpoint: C::EthereumAddress, + /// The epoch of the validator set to relay. + pub epoch: Option, + /// The Ethereum gas that can be spent during + /// the relay call. + pub gas: Option, + /// The price of Ethereum gas, during the + /// relay call. + pub gas_price: Option, + /// The address of the Ethereum wallet to pay the gas fees. + /// If unset, the default wallet is used. + pub eth_addr: Option, + /// Synchronize with the network, or exit immediately, + /// if the Ethereum node has fallen behind. + pub sync: bool, + /// The amount of time to sleep between failed + /// daemon mode relays. + pub retry_dur: Option, + /// The amount of time to sleep between successful + /// daemon mode relays. + pub success_dur: Option, + /// Safe mode overrides keyboard interrupt signals, to ensure + /// Ethereum transfers aren't canceled midway through. + pub safe_mode: bool, +} + +/// IBC shielded transfer generation arguments +#[derive(Clone, Debug)] +pub struct GenIbcShieldedTransafer { + /// The query parameters. + pub query: Query, + /// The output directory path to where serialize the data + pub output_folder: Option, + /// The target address + pub target: C::TransferTarget, + /// The token address + pub token: C::Address, + /// Transferred token amount + pub amount: InputAmount, + /// Port ID via which the token is received + pub port_id: PortId, + /// Channel ID via which the token is received + pub channel_id: ChannelId, +} diff --git a/shared/src/types/control_flow.rs b/sdk/src/control_flow/mod.rs similarity index 61% rename from shared/src/types/control_flow.rs rename to sdk/src/control_flow/mod.rs index 6b7d07532d..9b75b6e921 100644 --- a/shared/src/types/control_flow.rs +++ b/sdk/src/control_flow/mod.rs @@ -3,7 +3,6 @@ pub mod time; use std::future::Future; -use std::ops::ControlFlow; use std::pin::Pin; use std::task::{Context, Poll}; @@ -12,109 +11,6 @@ use futures::future::FutureExt; #[cfg(any(unix, windows))] use tokio::sync::oneshot; -/// A [`ControlFlow`] to control the halt status -/// of some execution context. -/// -/// No return values are assumed to exist. -pub type Halt = ControlFlow<(), T>; - -/// Halt all execution. -pub const fn halt() -> Halt { - ControlFlow::Break(()) -} - -/// Proceed execution. -pub const fn proceed(value: T) -> Halt { - ControlFlow::Continue(value) -} - -/// Convert from [`Halt`] to [`Result`]. -#[allow(missing_docs)] -pub trait ProceedOrElse { - fn proceed_or_else(self, error: F) -> Result - where - Self: Sized, - F: FnOnce() -> E; - - #[inline] - fn proceed_or(self, error: E) -> Result - where - Self: Sized, - { - self.proceed_or_else(move || error) - } - - #[inline] - fn proceed(self) -> T - where - Self: Sized, - { - self.proceed_or(()).expect("Halted execution") - } -} - -impl ProceedOrElse for Halt { - #[inline] - fn proceed_or_else(self, error: F) -> Result - where - Self: Sized, - F: FnOnce() -> E, - { - match self { - ControlFlow::Continue(x) => Ok(x), - ControlFlow::Break(()) => Err(error()), - } - } -} - -/// Halting abstraction to obtain [`ControlFlow`] actions. -pub trait TryHalt { - /// Possibly exit from some context, if we encounter an - /// error. We may recover from said error. - fn try_halt_or_recover(self, handle_err: F) -> Halt - where - F: FnMut(E) -> Halt; - - /// Exit from some context, if we encounter an error. - #[inline] - fn try_halt(self, mut handle_err: F) -> Halt - where - Self: Sized, - F: FnMut(E), - { - self.try_halt_or_recover(|e| { - handle_err(e); - halt() - }) - } -} - -impl TryHalt for Result { - #[inline] - fn try_halt_or_recover(self, mut handle_err: F) -> Halt - where - F: FnMut(E) -> Halt, - { - match self { - Ok(x) => proceed(x), - Err(e) => handle_err(e), - } - } -} - -impl TryHalt for itertools::Either { - #[inline] - fn try_halt_or_recover(self, mut handle_err: F) -> Halt - where - F: FnMut(L) -> Halt, - { - match self { - itertools::Either::Right(x) => proceed(x), - itertools::Either::Left(e) => handle_err(e), - } - } -} - /// A shutdown signal receiver. pub struct ShutdownSignal { #[cfg(not(any(unix, windows)))] diff --git a/shared/src/types/control_flow/time.rs b/sdk/src/control_flow/time.rs similarity index 100% rename from shared/src/types/control_flow/time.rs rename to sdk/src/control_flow/time.rs diff --git a/shared/src/sdk/error.rs b/sdk/src/error.rs similarity index 75% rename from shared/src/sdk/error.rs rename to sdk/src/error.rs index b103a9523f..9f84195cc2 100644 --- a/shared/src/sdk/error.rs +++ b/sdk/src/error.rs @@ -3,14 +3,14 @@ use namada_core::proto::Tx; use namada_core::types::address::Address; use namada_core::types::dec::Dec; +use namada_core::types::ethereum_events::EthAddress; use namada_core::types::storage; use namada_core::types::storage::Epoch; use prost::EncodeError; use tendermint_rpc::Error as RpcError; use thiserror::Error; -use crate::sdk::error::Error::Pinned; -use crate::vm::WasmValidationError; +use crate::error::Error::Pinned; /// The standard Result type that most code ought to return pub type Result = std::result::Result; @@ -39,6 +39,9 @@ pub enum Error { /// Errors that handle querying from storage #[error("Querying error: {0}")] Query(#[from] QueryError), + /// Ethereum bridge related errors + #[error("{0}")] + EthereumBridge(#[from] EthereumBridgeError), /// Any Other errors that are uncategorized #[error("{0}")] Other(String), @@ -89,6 +92,10 @@ pub enum QueryError { /// Wasm querying failure #[error("Wasm code path {0} does not exist on chain")] Wasm(String), + /// The queried node is outdated, and is in the process of + /// synchronizing with the network. + #[error("Node is still catching up with the network")] + CatchingUp, } /// Errors that deal with Decoding, Encoding, or Conversions @@ -165,6 +172,12 @@ pub enum TxError { /// Error retrieving from storage #[error("Error retrieving from storage")] Retrieval, + /// Bond amount is zero + #[error("The requested bond amount is 0.")] + BondIsZero, + /// Unond amount is zero + #[error("The requested unbond amount is 0.")] + UnbondIsZero, /// No unbonded bonds ready to withdraw in the current epoch #[error( "There are no unbonded bonds ready to withdraw in the current epoch \ @@ -222,9 +235,6 @@ pub enum TxError { /// Error in the fee unshielding transaction #[error("Error in fee unshielding: {0}")] FeeUnshieldingError(String), - /// Wasm validation failed - #[error("Validity predicate code validation failed with {0}")] - WasmValidationFailure(WasmValidationError), /// Encoding transaction failure #[error("Encoding tx data, {0}, shouldn't fail")] EncodeTxFailure(String), @@ -278,11 +288,85 @@ pub enum TxError { /// Invalid owner account #[error("The source account {0} is not valid or doesn't exist.")] InvalidAccount(String), + /// The redelegation amount is larger than the remaining bond amount + #[error( + "The redelegation amount is larger than the remaining bond amount. \ + Amount to redelegate is {0} and the remaining bond amount is {1}." + )] + RedelegationAmountTooLarge(String, String), + /// The redelegation amount is 0 + #[error("The amount requested to redelegate is 0 tokens")] + RedelegationIsZero, + /// The src and dest validators are the same + #[error("The source and destination validators are the same")] + RedelegationSrcEqDest, + /// The redelegation owner is a validator + #[error("The redelegation owner {0} is a validator")] + RedelegatorIsValidator(Address), + /// There is an incoming redelegation that is still subject to possible + /// slashing + #[error( + "An incoming redelegation from delegator {0} to validator {1} is \ + still subject to possible slashing" + )] + IncomingRedelIsStillSlashable(Address, Address), /// Other Errors that may show up when using the interface #[error("{0}")] Other(String), } +/// Ethereum bridge related errors. +#[derive(Error, Debug, Clone)] +pub enum EthereumBridgeError { + /// Error invoking smart contract function. + #[error("Smart contract call failed: {0}")] + ContractCall(String), + /// Ethereum RPC error. + #[error("RPC error: {0}")] + Rpc(String), + /// Error reading the signed Bridge pool. + #[error("Failed to read signed Bridge pool: {0}")] + ReadSignedBridgePool(String), + /// Error reading the Bridge pool. + #[error("Failed to read Bridge pool: {0}")] + ReadBridgePool(String), + /// Error querying transfer to Ethereum progress. + #[error("Failed to query transfer to Ethereum progress: {0}")] + TransferToEthProgress(String), + /// Error querying Ethereum voting powers. + #[error("Failed to query Ethereum voting powers: {0}")] + QueryVotingPowers(String), + /// Ethereum node timeout error. + #[error( + "Timed out while attempting to communicate with the Ethereum node" + )] + NodeTimeout, + /// Error generating Bridge pool proof. + #[error("Failed to generate Bridge pool proof: {0}")] + GenBridgePoolProof(String), + /// Error retrieving contract address. + #[error("Failed to retrieve contract address: {0}")] + RetrieveContract(String), + /// Error calculating relay cost. + #[error("Failed to calculate relay cost: {0}")] + RelayCost(String), + /// Invalid Bridge pool nonce error. + #[error("The Bridge pool nonce is invalid")] + InvalidBpNonce, + /// Invalid fee token error. + #[error("An invalid fee token was provided: {0}")] + InvalidFeeToken(Address), + /// Not whitelisted error. + #[error("ERC20 is not whitelisted: {0}")] + Erc20NotWhitelisted(EthAddress), + /// Exceeded token caps error. + #[error("ERC20 token caps exceeded: {0}")] + Erc20TokenCapsExceeded(EthAddress), + /// Transfer already in pool error. + #[error("An identical transfer is already present in the Bridge pool")] + TransferAlreadyInPool, +} + /// Checks if the given error is an invalid viewing key pub fn is_pinned_error(err: &Result) -> bool { matches!(err, Err(Pinned(PinnedBalanceError::InvalidViewingKey))) diff --git a/shared/src/ledger/eth_bridge/bridge_pool.rs b/sdk/src/eth_bridge/bridge_pool.rs similarity index 67% rename from shared/src/ledger/eth_bridge/bridge_pool.rs rename to sdk/src/eth_bridge/bridge_pool.rs index b9573cab97..a73767cf5e 100644 --- a/shared/src/ledger/eth_bridge/bridge_pool.rs +++ b/sdk/src/eth_bridge/bridge_pool.rs @@ -5,53 +5,49 @@ use std::cmp::Ordering; use std::collections::HashMap; use std::sync::Arc; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use ethbridge_bridge_contract::Bridge; use ethers::providers::Middleware; +use futures::future::FutureExt; +use namada_core::ledger::eth_bridge::storage::bridge_pool::get_pending_key; use namada_core::ledger::eth_bridge::storage::wrapped_erc20s; -use namada_core::types::key::common; +use namada_core::types::address::{Address, InternalAddress}; +use namada_core::types::eth_abi::Encode; +use namada_core::types::eth_bridge_pool::{ + GasFee, PendingTransfer, TransferToEthereum, TransferToEthereumKind, +}; +use namada_core::types::ethereum_events::EthAddress; +use namada_core::types::keccak::KeccakHash; use namada_core::types::storage::Epoch; +use namada_core::types::token::{balance_key, Amount, DenominatedAmount}; +use namada_core::types::voting_power::FractionalVotingPower; use owo_colors::OwoColorize; -use serde::{Deserialize, Serialize}; +use serde::Serialize; use super::{block_on_eth_sync, eth_sync_or_exit, BlockOnEthSync}; +use crate::control_flow::install_shutdown_signal; +use crate::control_flow::time::{Duration, Instant}; +use crate::error::{ + EncodingError, Error, EthereumBridgeError, QueryError, TxError, +}; use crate::eth_bridge::ethers::abi::AbiDecode; -use crate::ledger::queries::{ +use crate::internal_macros::echo_error; +use crate::io::Io; +use crate::proto::Tx; +use crate::queries::{ Client, GenBridgePoolProofReq, GenBridgePoolProofRsp, TransferToErcArgs, RPC, }; -use crate::proto::Tx; -use crate::sdk::args; -use crate::sdk::error::Error; -use crate::sdk::masp::{ShieldedContext, ShieldedUtils}; -use crate::sdk::rpc::{query_wasm_code_hash, validate_amount}; -use crate::sdk::tx::prepare_tx; -use crate::sdk::wallet::{Wallet, WalletUtils}; -use crate::types::address::Address; -use crate::types::control_flow::time::{Duration, Instant}; -use crate::types::control_flow::{ - self, install_shutdown_signal, Halt, TryHalt, -}; -use crate::types::eth_abi::Encode; -use crate::types::eth_bridge_pool::{ - GasFee, PendingTransfer, TransferToEthereum, TransferToEthereumKind, +use crate::rpc::{query_storage_value, query_wasm_code_hash, validate_amount}; +use crate::signing::aux_signing_data; +use crate::tx::prepare_tx; +use crate::{ + args, display, display_line, edisplay_line, Namada, SigningTxData, }; -use crate::types::io::Io; -use crate::types::keccak::KeccakHash; -use crate::types::token::{Amount, DenominatedAmount}; -use crate::types::voting_power::FractionalVotingPower; -use crate::{display, display_line}; /// Craft a transaction that adds a transfer to the Ethereum bridge pool. -pub async fn build_bridge_pool_tx< - C: crate::ledger::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_bridge_pool_tx( + context: &impl Namada<'_>, args::EthereumBridgePool { tx: tx_args, nut, @@ -64,27 +60,95 @@ pub async fn build_bridge_pool_tx< fee_token, code_path, }: args::EthereumBridgePool, - wrapper_fee_payer: common::PublicKey, -) -> Result<(Tx, Option), Error> { - let fee_payer = fee_payer.unwrap_or_else(|| sender.clone()); - let DenominatedAmount { amount, .. } = validate_amount::<_, IO>( - client, - amount, - &wrapped_erc20s::token(&asset), - tx_args.force, +) -> Result<(Tx, SigningTxData, Option), Error> { + let sender_ = sender.clone(); + let (transfer, tx_code_hash, signing_data) = futures::try_join!( + validate_bridge_pool_tx( + context, + tx_args.force, + nut, + asset, + recipient, + sender, + amount, + fee_amount, + fee_payer, + fee_token, + ), + query_wasm_code_hash(context, code_path.to_string_lossy()), + aux_signing_data( + context, + &tx_args, + // token owner + Some(sender_.clone()), + // tx signer + Some(sender_), + ), + )?; + + let chain_id = tx_args + .chain_id + .clone() + .ok_or_else(|| Error::Other("No chain id available".into()))?; + + let mut tx = Tx::new(chain_id, tx_args.expiration); + tx.add_code_from_hash(tx_code_hash).add_data(transfer); + + let epoch = prepare_tx( + context, + &tx_args, + &mut tx, + signing_data.fee_payer.clone(), + None, ) - .await - .map_err(|e| Error::Other(format!("Failed to validate amount. {}", e)))?; - let DenominatedAmount { - amount: fee_amount, .. - } = validate_amount::<_, IO>(client, fee_amount, &fee_token, tx_args.force) - .await - .map_err(|e| { - Error::Other(format!( - "Failed to validate Bridge pool fee amount. {}", - e - )) - })?; + .await?; + + Ok((tx, signing_data, epoch)) +} + +/// Perform client validation checks on a Bridge pool transfer. +#[allow(clippy::too_many_arguments)] +async fn validate_bridge_pool_tx( + context: &impl Namada<'_>, + force: bool, + nut: bool, + asset: EthAddress, + recipient: EthAddress, + sender: Address, + amount: args::InputAmount, + fee_amount: args::InputAmount, + fee_payer: Option
, + fee_token: Address, +) -> Result { + let token_addr = wrapped_erc20s::token(&asset); + let validate_token_amount = + validate_amount(context, amount, &token_addr, force).map(|result| { + result.map_err(|e| { + Error::Other(format!( + "Failed to validate Bridge pool transfer amount: {e}" + )) + }) + }); + + let validate_fee_amount = + validate_amount(context, fee_amount, &fee_token, force).map(|result| { + result.map_err(|e| { + Error::Other(format!( + "Failed to validate Bridge pool fee amount: {e}", + )) + }) + }); + + // validate amounts + let ( + tok_denominated @ DenominatedAmount { amount, .. }, + fee_denominated @ DenominatedAmount { + amount: fee_amount, .. + }, + ) = futures::try_join!(validate_token_amount, validate_fee_amount)?; + + // build pending Bridge pool transfer + let fee_payer = fee_payer.unwrap_or_else(|| sender.clone()); let transfer = PendingTransfer { transfer: TransferToEthereum { asset, @@ -104,92 +168,219 @@ pub async fn build_bridge_pool_tx< }, }; - let tx_code_hash = - query_wasm_code_hash::<_, IO>(client, code_path.to_str().unwrap()) + if force { + return Ok(transfer); + } + + //====================================================== + // XXX: the following validations should be kept in sync + // with the validations performed by the Bridge pool VP! + //====================================================== + + // check if an identical transfer is already in the Bridge pool + let transfer_in_pool = RPC + .shell() + .storage_has_key(context.client(), &get_pending_key(&transfer)) + .await + .map_err(|e| Error::Query(QueryError::General(e.to_string())))?; + if transfer_in_pool { + return Err(Error::EthereumBridge( + EthereumBridgeError::TransferAlreadyInPool, + )); + } + + let wnam_addr = RPC + .shell() + .eth_bridge() + .read_native_erc20_contract(context.client()) + .await + .map_err(|e| { + Error::EthereumBridge(EthereumBridgeError::RetrieveContract( + e.to_string(), + )) + })?; + + // validate gas fee token + match &transfer.gas_fee.token { + Address::Internal(InternalAddress::Nut(_)) => { + return Err(Error::EthereumBridge( + EthereumBridgeError::InvalidFeeToken(transfer.gas_fee.token), + )); + } + fee_token if fee_token == &wrapped_erc20s::token(&wnam_addr) => { + return Err(Error::EthereumBridge( + EthereumBridgeError::InvalidFeeToken(transfer.gas_fee.token), + )); + } + _ => {} + } + + // validate wnam token caps + whitelist + if transfer.transfer.asset == wnam_addr { + let flow_control = RPC + .shell() + .eth_bridge() + .get_erc20_flow_control(context.client(), &wnam_addr) .await - .unwrap(); + .map_err(|e| { + Error::Query(QueryError::General(format!( + "Failed to read wrapped NAM flow control data: {e}" + ))) + })?; - let chain_id = tx_args.chain_id.clone().unwrap(); - let mut tx = Tx::new(chain_id, tx_args.expiration); - tx.add_code_from_hash(tx_code_hash).add_data(transfer); + if !flow_control.whitelisted { + return Err(Error::EthereumBridge( + EthereumBridgeError::Erc20NotWhitelisted(wnam_addr), + )); + } - // TODO(namada#1800): validate the tx on the client side + if flow_control.exceeds_token_caps(transfer.transfer.amount) { + return Err(Error::EthereumBridge( + EthereumBridgeError::Erc20TokenCapsExceeded(wnam_addr), + )); + } + } - let epoch = prepare_tx::( - client, - wallet, - shielded, - &tx_args, - &mut tx, - wrapper_fee_payer, - None, - ) - .await?; + // validate balances + let maybe_balance_error = if token_addr == transfer.gas_fee.token { + let expected_debit = transfer.transfer.amount + transfer.gas_fee.amount; + let balance: Amount = query_storage_value( + context.client(), + &balance_key(&token_addr, &transfer.transfer.sender), + ) + .await?; + + balance + .checked_sub(expected_debit) + .is_none() + .then_some((token_addr, tok_denominated)) + } else { + let check_tokens = async { + let balance: Amount = query_storage_value( + context.client(), + &balance_key(&token_addr, &transfer.transfer.sender), + ) + .await?; + Result::<_, Error>::Ok( + balance + .checked_sub(transfer.transfer.amount) + .is_none() + .then_some((token_addr, tok_denominated)), + ) + }; + let check_fees = async { + let balance: Amount = query_storage_value( + context.client(), + &balance_key( + &transfer.gas_fee.token, + &transfer.transfer.sender, + ), + ) + .await?; + Result::<_, Error>::Ok( + balance + .checked_sub(transfer.gas_fee.amount) + .is_none() + .then_some(( + transfer.gas_fee.token.clone(), + fee_denominated, + )), + ) + }; - Ok((tx, epoch)) + let (err_tokens, err_fees) = + futures::try_join!(check_tokens, check_fees)?; + err_tokens.or(err_fees) + }; + if let Some((token, amount)) = maybe_balance_error { + return Err(Error::Tx(TxError::NegativeBalanceAfterTransfer( + Box::new(transfer.transfer.sender), + amount.to_string(), + Box::new(token), + ))); + } + + Ok(transfer) } /// A json serializable representation of the Ethereum /// bridge pool. -#[derive(Serialize, Deserialize)] -struct BridgePoolResponse { - bridge_pool_contents: HashMap, +#[derive(Serialize)] +struct BridgePoolResponse<'pool> { + bridge_pool_contents: &'pool HashMap, } /// Query the contents of the Ethereum bridge pool. /// Prints out a json payload. -pub async fn query_bridge_pool(client: &C) -where - C: Client + Sync, -{ +pub async fn query_bridge_pool<'a>( + client: &(impl Client + Sync), + io: &impl Io, +) -> Result, Error> { let response: Vec = RPC .shell() .eth_bridge() .read_ethereum_bridge_pool(client) .await - .unwrap(); + .map_err(|e| { + Error::EthereumBridge(EthereumBridgeError::ReadBridgePool( + e.to_string(), + )) + })?; let pool_contents: HashMap = response .into_iter() .map(|transfer| (transfer.keccak256().to_string(), transfer)) .collect(); if pool_contents.is_empty() { - display_line!(IO, "Bridge pool is empty."); - return; + display_line!(io, "Bridge pool is empty."); + return Ok(pool_contents); } let contents = BridgePoolResponse { - bridge_pool_contents: pool_contents, + bridge_pool_contents: &pool_contents, }; - display_line!(IO, "{}", serde_json::to_string_pretty(&contents).unwrap()); + display_line!( + io, + "{}", + serde_json::to_string_pretty(&contents) + .map_err(|e| EncodingError::Serde(e.to_string()))? + ); + Ok(pool_contents) } /// Query the contents of the Ethereum bridge pool that /// is covered by the latest signed root. /// Prints out a json payload. -pub async fn query_signed_bridge_pool( - client: &C, -) -> Halt> -where - C: Client + Sync, -{ +pub async fn query_signed_bridge_pool<'a>( + client: &(impl Client + Sync), + io: &impl Io, +) -> Result, Error> { let response: Vec = RPC .shell() .eth_bridge() .read_signed_ethereum_bridge_pool(client) .await - .unwrap(); + .map_err(|e| { + Error::EthereumBridge(EthereumBridgeError::ReadSignedBridgePool( + e.to_string(), + )) + })?; let pool_contents: HashMap = response .into_iter() .map(|transfer| (transfer.keccak256().to_string(), transfer)) .collect(); if pool_contents.is_empty() { - display_line!(IO, "Bridge pool is empty."); - return control_flow::halt(); + display_line!(io, "Bridge pool is empty."); + return Ok(pool_contents); } let contents = BridgePoolResponse { - bridge_pool_contents: pool_contents.clone(), + bridge_pool_contents: &pool_contents, }; - display_line!(IO, "{}", serde_json::to_string_pretty(&contents).unwrap()); - control_flow::proceed(pool_contents) + display_line!( + io, + "{}", + serde_json::to_string_pretty(&contents) + .map_err(|e| EncodingError::Serde(e.to_string()))? + ); + Ok(pool_contents) } /// Iterates over all ethereum events @@ -197,34 +388,46 @@ where /// backing each `TransferToEthereum` event. /// /// Prints a json payload. -pub async fn query_relay_progress(client: &C) -where - C: Client + Sync, -{ +pub async fn query_relay_progress<'a>( + client: &(impl Client + Sync), + io: &impl Io, +) -> Result<(), Error> { let resp = RPC .shell() .eth_bridge() .transfer_to_ethereum_progress(client) .await - .unwrap(); - display_line!(IO, "{}", serde_json::to_string_pretty(&resp).unwrap()); + .map_err(|e| { + Error::EthereumBridge(EthereumBridgeError::TransferToEthProgress( + e.to_string(), + )) + })?; + display_line!( + io, + "{}", + serde_json::to_string_pretty(&resp) + .map_err(|e| EncodingError::Serde(e.to_string()))? + ); + Ok(()) } /// Internal methdod to construct a proof that a set of transfers are in the /// bridge pool. -async fn construct_bridge_pool_proof( - client: &C, +async fn construct_bridge_pool_proof<'a>( + client: &(impl Client + Sync), + io: &impl Io, args: GenBridgePoolProofReq<'_, '_>, -) -> Halt -where - C: Client + Sync, -{ +) -> Result { let in_progress = RPC .shell() .eth_bridge() .transfer_to_ethereum_progress(client) .await - .unwrap(); + .map_err(|e| { + Error::EthereumBridge(EthereumBridgeError::TransferToEthProgress( + e.to_string(), + )) + })?; let warnings: Vec<_> = in_progress .into_iter() @@ -243,43 +446,55 @@ where let warning = warning.bold(); let warning = warning.blink(); display_line!( - IO, + io, "{warning}: The following hashes correspond to transfers that \ have surpassed the security threshold in Namada, therefore have \ likely been relayed to Ethereum, but do not yet have a quorum of \ validator signatures behind them in Namada; thus they are still \ in the Bridge pool:\n{warnings:?}", ); - display!(IO, "\nDo you wish to proceed? (y/n): "); - IO::flush(); + display!(io, "\nDo you wish to proceed? (y/n): "); + io.flush(); loop { - let resp = IO::read().await.try_halt(|e| { - display_line!( - IO, + let resp = io.read().await.map_err(|e| { + Error::Other(echo_error!( + io, "Encountered error reading from STDIN: {e:?}" - ); + )) })?; match resp.trim() { "y" => break, - "n" => return control_flow::halt(), + "n" => { + return Err(Error::Other( + "Aborted generating Bridge pool proof".into(), + )); + } _ => { - display!(IO, "Expected 'y' or 'n'. Please try again: "); - IO::flush(); + display!(io, "Expected 'y' or 'n'. Please try again: "); + io.flush(); } } } } - let data = args.try_to_vec().unwrap(); + let data = args.serialize_to_vec(); let response = RPC .shell() .eth_bridge() .generate_bridge_pool_proof(client, Some(data), None, false) - .await; + .await + .map_err(|e| { + edisplay_line!( + io, + "Encountered error constructing proof:\n{:?}", + e + ); + Error::EthereumBridge(EthereumBridgeError::GenBridgePoolProof( + e.to_string(), + )) + })?; - response.map(|response| response.data).try_halt(|e| { - display_line!(IO, "Encountered error constructing proof:\n{:?}", e); - }) + Ok(response.data) } /// A response from construction a bridge pool proof. @@ -294,18 +509,17 @@ struct BridgePoolProofResponse { /// Construct a merkle proof of a batch of transfers in /// the bridge pool and return it to the user (as opposed /// to relaying it to ethereum). -pub async fn construct_proof( - client: &C, +pub async fn construct_proof<'a>( + client: &(impl Client + Sync), + io: &impl Io, args: args::BridgePoolProof, -) -> Halt<()> -where - C: Client + Sync, -{ +) -> Result<(), Error> { let GenBridgePoolProofRsp { abi_encoded_args, appendices, - } = construct_bridge_pool_proof::<_, IO>( + } = construct_bridge_pool_proof( client, + io, GenBridgePoolProofReq { transfers: args.transfers.as_slice().into(), relayer: Cow::Borrowed(&args.relayer), @@ -334,26 +548,32 @@ where .unwrap_or_default(), abi_encoded_args, }; - display_line!(IO, "{}", serde_json::to_string(&resp).unwrap()); - control_flow::proceed(()) + display_line!( + io, + "{}", + serde_json::to_string_pretty(&resp) + .map_err(|e| EncodingError::Serde(e.to_string()))? + ); + Ok(()) } /// Relay a validator set update, signed off for a given epoch. -pub async fn relay_bridge_pool_proof( +pub async fn relay_bridge_pool_proof<'a, E>( eth_client: Arc, - nam_client: &C, + client: &(impl Client + Sync), + io: &impl Io, args: args::RelayBridgePoolProof, -) -> Halt<()> +) -> Result<(), Error> where - C: Client + Sync, E: Middleware, E::Error: std::fmt::Debug + std::fmt::Display, { let _signal_receiver = args.safe_mode.then(install_shutdown_signal); if args.sync { - block_on_eth_sync::<_, IO>( + block_on_eth_sync( &*eth_client, + io, BlockOnEthSync { deadline: Instant::now() + Duration::from_secs(60), delta_sleep: Duration::from_secs(1), @@ -361,13 +581,14 @@ where ) .await?; } else { - eth_sync_or_exit::<_, IO>(&*eth_client).await?; + eth_sync_or_exit(&*eth_client, io).await?; } let GenBridgePoolProofRsp { abi_encoded_args, .. - } = construct_bridge_pool_proof::<_, IO>( - nam_client, + } = construct_bridge_pool_proof( + client, + io, GenBridgePoolProofReq { transfers: Cow::Owned(args.transfers), relayer: Cow::Owned(args.relayer), @@ -375,40 +596,43 @@ where }, ) .await?; - let bridge = match RPC - .shell() - .eth_bridge() - .read_bridge_contract(nam_client) - .await - { - Ok(address) => Bridge::new(address.address, eth_client), - Err(err_msg) => { - let error = "Error".on_red(); - let error = error.bold(); - let error = error.blink(); - display_line!( - IO, - "{error}: Failed to retrieve the Ethereum Bridge smart \ - contract address from storage with \ - reason:\n{err_msg}\n\nPerhaps the Ethereum bridge is not \ - active.", - ); - return control_flow::halt(); - } - }; + let bridge = + match RPC.shell().eth_bridge().read_bridge_contract(client).await { + Ok(address) => Bridge::new(address.address, eth_client), + Err(err_msg) => { + let error = "Error".on_red(); + let error = error.bold(); + let error = error.blink(); + display_line!( + io, + "Unable to decode the generated proof: {:?}", + error + ); + return Err(Error::EthereumBridge( + EthereumBridgeError::RetrieveContract(err_msg.to_string()), + )); + } + }; let (validator_set, signatures, bp_proof): TransferToErcArgs = - AbiDecode::decode(&abi_encoded_args).try_halt(|error| { - display_line!( - IO, + AbiDecode::decode(&abi_encoded_args).map_err(|error| { + EncodingError::Decoding(echo_error!( + io, "Unable to decode the generated proof: {:?}", error - ); + )) })?; // NOTE: this operation costs no gas on Ethereum - let contract_nonce = - bridge.transfer_to_erc_20_nonce().call().await.unwrap(); + let contract_nonce = bridge + .transfer_to_erc_20_nonce() + .call() + .await + .map_err(|e| { + Error::EthereumBridge(EthereumBridgeError::ContractCall( + e.to_string(), + )) + })?; match bp_proof.batch_nonce.cmp(&contract_nonce) { Ordering::Equal => {} @@ -417,27 +641,31 @@ where let error = error.bold(); let error = error.blink(); display_line!( - IO, + io, "{error}: The Bridge pool nonce in the smart contract is \ {contract_nonce}, while the nonce in Namada is still {}. A \ relay of the former one has already happened, but a proof \ has yet to be crafted in Namada.", bp_proof.batch_nonce ); - return control_flow::halt(); + return Err(Error::EthereumBridge( + EthereumBridgeError::InvalidBpNonce, + )); } Ordering::Greater => { let error = "Error".on_red(); let error = error.bold(); let error = error.blink(); display_line!( - IO, + io, "{error}: The Bridge pool nonce in the smart contract is \ {contract_nonce}, while the nonce in Namada is still {}. \ Somehow, Namada's nonce is ahead of the contract's nonce!", bp_proof.batch_nonce ); - return control_flow::halt(); + return Err(Error::EthereumBridge( + EthereumBridgeError::InvalidBpNonce, + )); } } @@ -453,21 +681,30 @@ where relay_op.tx.set_from(eth_addr.into()); } - let pending_tx = relay_op.send().await.unwrap(); + let pending_tx = relay_op.send().await.map_err(|e| { + Error::EthereumBridge(EthereumBridgeError::ContractCall(e.to_string())) + })?; let transf_result = pending_tx .confirmations(args.confirmations as usize) .await - .unwrap(); + .map_err(|e| { + Error::EthereumBridge(EthereumBridgeError::Rpc(e.to_string())) + })?; - display_line!(IO, "{transf_result:?}"); - control_flow::proceed(()) + display_line!(io, "{transf_result:?}"); + Ok(()) } mod recommendations { use std::collections::BTreeSet; use borsh::BorshDeserialize; + use namada_core::types::ethereum_events::Uint as EthUint; + use namada_core::types::storage::BlockHeight; use namada_core::types::uint::{self, Uint, I256}; + use namada_core::types::vote_extensions::validator_set_update::{ + EthAddrBook, VotingPowersMap, VotingPowersMapExt, + }; use super::*; use crate::edisplay_line; @@ -475,12 +712,7 @@ mod recommendations { get_nonce_key, get_signed_root_key, }; use crate::eth_bridge::storage::proof::BridgePoolRootProof; - use crate::types::ethereum_events::Uint as EthUint; - use crate::types::io::Io; - use crate::types::storage::BlockHeight; - use crate::types::vote_extensions::validator_set_update::{ - EthAddrBook, VotingPowersMap, VotingPowersMapExt, - }; + use crate::io::Io; const fn unsigned_transfer_fee() -> Uint { Uint::from_u64(37_500_u64) @@ -558,21 +790,22 @@ mod recommendations { /// Recommend the most economical batch of transfers to relay based /// on a conversion rate estimates from NAM to ETH and gas usage /// heuristics. - pub async fn recommend_batch( - client: &C, + pub async fn recommend_batch<'a>( + context: &impl Namada<'a>, args: args::RecommendBatch, - ) -> Halt<()> - where - C: Client + Sync, - { + ) -> Result<(), Error> { // get transfers that can already been relayed but are awaiting a quorum // of backing votes. let in_progress = RPC .shell() .eth_bridge() - .transfer_to_ethereum_progress(client) + .transfer_to_ethereum_progress(context.client()) .await - .unwrap() + .map_err(|e| { + Error::EthereumBridge( + EthereumBridgeError::TransferToEthProgress(e.to_string()), + ) + })? .into_keys() .map(|pending| pending.keccak256().to_string()) .collect::>(); @@ -583,49 +816,63 @@ mod recommendations { <(BridgePoolRootProof, BlockHeight)>::try_from_slice( &RPC.shell() .storage_value( - client, + context.client(), None, None, false, &get_signed_root_key(), ) .await - .try_halt(|err| { - edisplay_line!( - IO, + .map_err(|err| { + Error::Query(QueryError::General(echo_error!( + context.io(), "Failed to query Bridge pool proof: {err}" - ); + ))) })? .data, ) - .try_halt(|err| { - edisplay_line!(IO, "Failed to decode Bridge pool proof: {err}"); + .map_err(|err| { + Error::Encode(EncodingError::Decoding(echo_error!( + context.io(), + "Failed to decode Bridge pool proof: {err}" + ))) })?; // get the latest bridge pool nonce let latest_bp_nonce = EthUint::try_from_slice( &RPC.shell() - .storage_value(client, None, None, false, &get_nonce_key()) + .storage_value( + context.client(), + None, + None, + false, + &get_nonce_key(), + ) .await - .try_halt(|err| { - edisplay_line!( - IO, + .map_err(|err| { + Error::Query(QueryError::General(echo_error!( + context.io(), "Failed to query Bridge pool nonce: {err}" - ); + ))) })? .data, ) - .try_halt(|err| { - edisplay_line!(IO, "Failed to decode Bridge pool nonce: {err}"); + .map_err(|err| { + Error::Encode(EncodingError::Decoding(echo_error!( + context.io(), + "Failed to decode Bridge pool nonce: {err}" + ))) })?; if latest_bp_nonce != bp_root.data.1 { edisplay_line!( - IO, + context.io(), "The signed Bridge pool nonce is not up to date, repeat this \ query at a later time" ); - return control_flow::halt(); + return Err(Error::EthereumBridge( + EthereumBridgeError::InvalidBpNonce, + )); } // Get the voting powers of each of validator who signed @@ -633,9 +880,13 @@ mod recommendations { let voting_powers = RPC .shell() .eth_bridge() - .voting_powers_at_height(client, &height) + .voting_powers_at_height(context.client(), &height) .await - .unwrap(); + .map_err(|e| { + Error::EthereumBridge(EthereumBridgeError::QueryVotingPowers( + e.to_string(), + )) + })?; let valset_size = Uint::from_u64(voting_powers.len() as u64); // This is the gas cost for hashing the validator set and @@ -645,17 +896,19 @@ mod recommendations { + valset_fee() * valset_size; // we don't recommend transfers that have already been relayed - let eligible = generate_eligible::( + let eligible = generate_eligible( + context.io(), &args.conversion_table, &in_progress, - query_signed_bridge_pool::<_, IO>(client).await?, + query_signed_bridge_pool(context.client(), context.io()).await?, )?; let max_gas = args.max_gas.map(Uint::from_u64).unwrap_or(uint::MAX_VALUE); let max_cost = args.gas.map(I256::from).unwrap_or_default(); - generate_recommendations::( + generate_recommendations( + context.io(), eligible, &args.conversion_table, validator_gas, @@ -669,28 +922,34 @@ mod recommendations { net_profit, bridge_pool_gas_fees, }| { - display_line!(IO, "Recommended batch: {transfer_hashes:#?}"); display_line!( - IO, + context.io(), + "Recommended batch: {transfer_hashes:#?}" + ); + display_line!( + context.io(), "Estimated Ethereum transaction gas (in gwei): \ {ethereum_gas_fees}", ); display_line!( - IO, + context.io(), "Estimated net profit (in gwei): {net_profit}" ); - display_line!(IO, "Total fees: {bridge_pool_gas_fees:#?}"); + display_line!( + context.io(), + "Total fees: {bridge_pool_gas_fees:#?}" + ); }, ) .unwrap_or_else(|| { display_line!( - IO, + context.io(), "Unable to find a recommendation satisfying the input \ parameters." ); }); - control_flow::proceed(()) + Ok(()) } /// Given an ordered list of signatures, figure out the size of the first @@ -717,6 +976,9 @@ mod recommendations { (*p).into(), total_power.into(), ) + // NB: this unwrap is infallible, since we calculate + // the total voting power beforehand. the fraction's + // value will never exceed 1.0 .unwrap(); true } else { @@ -729,10 +991,11 @@ mod recommendations { /// Generate eligible recommendations. fn generate_eligible( + io: &IO, conversion_table: &HashMap, in_progress: &BTreeSet, signed_pool: HashMap, - ) -> Halt> { + ) -> Result, Error> { let mut eligible: Vec<_> = signed_pool .into_iter() .filter_map(|(pending_hash, pending)| { @@ -745,7 +1008,7 @@ mod recommendations { .and_then(|entry| match entry.conversion_rate { r if r == 0.0f64 => { edisplay_line!( - IO, + io, "{}: Ignoring null conversion rate", pending.gas_fee.token, ); @@ -753,7 +1016,7 @@ mod recommendations { } r if r < 0.0f64 => { edisplay_line!( - IO, + io, "{}: Ignoring negative conversion rate: {r:.1}", pending.gas_fee.token, ); @@ -761,7 +1024,7 @@ mod recommendations { } r if r > 1e9 => { edisplay_line!( - IO, + io, "{}: Ignoring high conversion rate: {r:.1} > \ 10^9", pending.gas_fee.token, @@ -799,25 +1062,28 @@ mod recommendations { ) }) .collect::, _>>() - .try_halt(|err| { - tracing::debug!(%err, "Failed to calculate relaying cost"); + .map_err(|err| { + Error::EthereumBridge(EthereumBridgeError::RelayCost( + echo_error!(io, "Failed to calculate relaying cost: {err}"), + )) })?; // sort transfers in increasing amounts of profitability eligible.sort_by_key(|EligibleRecommendation { cost, .. }| *cost); - control_flow::proceed(eligible) + Ok(eligible) } /// Generates the actual recommendation from restrictions given by the /// input parameters. fn generate_recommendations( + io: &IO, contents: Vec, conversion_table: &HashMap, validator_gas: Uint, max_gas: Uint, max_cost: I256, - ) -> Halt> { + ) -> Result, Error> { let mut state = AlgorithState { profitable: true, feasible_region: false, @@ -830,8 +1096,11 @@ mod recommendations { }; let mut total_gas = validator_gas; - let mut total_cost = I256::try_from(validator_gas).try_halt(|err| { - tracing::debug!(%err, "Failed to convert value to I256"); + let mut total_cost = I256::try_from(validator_gas).map_err(|err| { + Error::Encode(EncodingError::Conversion(echo_error!( + io, + "Failed to convert value to I256: {err}" + ))) })?; let mut total_fees = HashMap::new(); let mut recommendation = vec![]; @@ -870,23 +1139,21 @@ mod recommendations { update_total_fees(&mut total_fees, transfer, conversion_table); } - control_flow::proceed( - if state.feasible_region && !recommendation.is_empty() { - Some(RecommendedBatch { - transfer_hashes: recommendation, - ethereum_gas_fees: total_gas, - net_profit: -total_cost, - bridge_pool_gas_fees: total_fees, - }) - } else { - display_line!( - IO, - "Unable to find a recommendation satisfying the input \ - parameters." - ); - None - }, - ) + Ok(if state.feasible_region && !recommendation.is_empty() { + Some(RecommendedBatch { + transfer_hashes: recommendation, + ethereum_gas_fees: total_gas, + net_profit: -total_cost, + bridge_pool_gas_fees: total_fees, + }) + } else { + edisplay_line!( + io, + "Unable to find a recommendation satisfying the input \ + parameters." + ); + None + }) } fn update_total_fees( @@ -909,11 +1176,9 @@ mod recommendations { #[cfg(test)] mod test_recommendations { use namada_core::types::address::Address; - use namada_core::types::ethereum_events::EthAddress; use super::*; - use crate::types::control_flow::ProceedOrElse; - use crate::types::io::DefaultIo; + use crate::io::StdIo; /// An established user address for testing & development pub fn bertha_address() -> Address { @@ -1019,12 +1284,9 @@ mod recommendations { signed_pool: &mut signed_pool, expected_eligible: &mut expected, }); - let eligible = generate_eligible::( - &table, - &in_progress, - signed_pool, - ) - .proceed(); + let eligible = + generate_eligible(&StdIo, &table, &in_progress, signed_pool) + .unwrap(); assert_eq!(eligible, expected); eligible } @@ -1114,14 +1376,15 @@ mod recommendations { let profitable = vec![transfer(100_000); 17]; let hash = profitable[0].keccak256().to_string(); let expected = vec![hash; 17]; - let recommendation = generate_recommendations::( + let recommendation = generate_recommendations( + &StdIo, process_transfers(profitable), &Default::default(), Uint::from_u64(800_000), uint::MAX_VALUE, I256::zero(), ) - .proceed() + .unwrap() .expect("Test failed") .transfer_hashes; assert_eq!(recommendation, expected); @@ -1133,14 +1396,15 @@ mod recommendations { let hash = transfers[0].keccak256().to_string(); transfers.push(transfer(0)); let expected: Vec<_> = vec![hash; 17]; - let recommendation = generate_recommendations::( + let recommendation = generate_recommendations( + &StdIo, process_transfers(transfers), &Default::default(), Uint::from_u64(800_000), uint::MAX_VALUE, I256::zero(), ) - .proceed() + .unwrap() .expect("Test failed") .transfer_hashes; assert_eq!(recommendation, expected); @@ -1151,14 +1415,15 @@ mod recommendations { let transfers = vec![transfer(75_000); 4]; let hash = transfers[0].keccak256().to_string(); let expected = vec![hash; 2]; - let recommendation = generate_recommendations::( + let recommendation = generate_recommendations( + &StdIo, process_transfers(transfers), &Default::default(), Uint::from_u64(50_000), Uint::from_u64(150_000), I256(uint::MAX_SIGNED_VALUE), ) - .proceed() + .unwrap() .expect("Test failed") .transfer_hashes; assert_eq!(recommendation, expected); @@ -1173,14 +1438,15 @@ mod recommendations { .map(|t| t.keccak256().to_string()) .take(5) .collect(); - let recommendation = generate_recommendations::( + let recommendation = generate_recommendations( + &StdIo, process_transfers(transfers), &Default::default(), Uint::from_u64(150_000), uint::MAX_VALUE, I256::from(20_000), ) - .proceed() + .unwrap() .expect("Test failed") .transfer_hashes; assert_eq!(recommendation, expected); @@ -1192,14 +1458,15 @@ mod recommendations { let hash = transfers[0].keccak256().to_string(); let expected = vec![hash; 4]; transfers.extend([transfer(17_500), transfer(17_500)]); - let recommendation = generate_recommendations::( + let recommendation = generate_recommendations( + &StdIo, process_transfers(transfers), &Default::default(), Uint::from_u64(150_000), Uint::from_u64(330_000), I256::from(20_000), ) - .proceed() + .unwrap() .expect("Test failed") .transfer_hashes; assert_eq!(recommendation, expected); @@ -1208,14 +1475,15 @@ mod recommendations { #[test] fn test_wholly_infeasible() { let transfers = vec![transfer(75_000); 4]; - let recommendation = generate_recommendations::( + let recommendation = generate_recommendations( + &StdIo, process_transfers(transfers), &Default::default(), Uint::from_u64(300_000), uint::MAX_VALUE, I256::from(20_000), ) - .proceed(); + .unwrap(); assert!(recommendation.is_none()) } @@ -1289,7 +1557,8 @@ mod recommendations { const VALIDATOR_GAS_FEE: Uint = Uint::from_u64(100_000); - let recommended_batch = generate_recommendations::( + let recommended_batch = generate_recommendations( + &StdIo, eligible, &conversion_table, // gas spent by validator signature checks @@ -1299,7 +1568,7 @@ mod recommendations { // only profitable I256::zero(), ) - .proceed() + .unwrap() .expect("Test failed"); assert_eq!( diff --git a/shared/src/ledger/eth_bridge.rs b/sdk/src/eth_bridge/mod.rs similarity index 78% rename from shared/src/ledger/eth_bridge.rs rename to sdk/src/eth_bridge/mod.rs index a73f5efd77..b8577956ca 100644 --- a/shared/src/ledger/eth_bridge.rs +++ b/sdk/src/eth_bridge/mod.rs @@ -5,19 +5,22 @@ pub mod validator_set; use std::ops::ControlFlow; +pub use ethers; use ethers::providers::Middleware; use itertools::Either; pub use namada_core::ledger::eth_bridge::storage::wrapped_erc20s; pub use namada_core::ledger::eth_bridge::{ADDRESS, INTERNAL_ADDRESS}; +pub use namada_core::types::ethereum_structs as structs; pub use namada_ethereum_bridge::parameters::*; pub use namada_ethereum_bridge::storage::eth_bridge_queries::*; +pub use namada_ethereum_bridge::*; use num256::Uint256; -use crate::types::control_flow::time::{ - Constant, Duration, Error as TimeoutError, Instant, LinearBackoff, Sleep, +use crate::control_flow::time::{ + Constant, Duration, Instant, LinearBackoff, Sleep, }; -use crate::types::control_flow::{self, Halt, TryHalt}; -use crate::types::io::Io; +use crate::error::{Error, EthereumBridgeError}; +use crate::io::Io; use crate::{display_line, edisplay_line}; const DEFAULT_BACKOFF: Duration = std::time::Duration::from_millis(500); @@ -40,9 +43,7 @@ impl SyncStatus { /// Fetch the sync status of an Ethereum node. #[inline] -pub async fn eth_syncing_status( - client: &C, -) -> Result +pub async fn eth_syncing_status(client: &C) -> Result where C: Middleware, { @@ -63,7 +64,7 @@ pub async fn eth_syncing_status_timeout( client: &C, backoff_duration: Duration, deadline: Instant, -) -> Result +) -> Result where C: Middleware, { @@ -89,6 +90,7 @@ where }) }) .await + .map_err(|_| Error::EthereumBridge(EthereumBridgeError::NodeTimeout)) } /// Arguments to [`block_on_eth_sync`]. @@ -102,8 +104,9 @@ pub struct BlockOnEthSync { /// Block until Ethereum finishes synchronizing. pub async fn block_on_eth_sync( client: &C, + io: &IO, args: BlockOnEthSync, -) -> Halt<()> +) -> Result<(), Error> where C: Middleware, { @@ -111,7 +114,7 @@ where deadline, delta_sleep, } = args; - display_line!(IO, "Attempting to synchronize with the Ethereum network"); + display_line!(io, "Attempting to synchronize with the Ethereum network"); Sleep { strategy: LinearBackoff { delta: delta_sleep }, } @@ -126,22 +129,24 @@ where } }) .await - .try_halt(|_| { + .map_err(|_| { edisplay_line!( - IO, + io, "Timed out while waiting for Ethereum to synchronize" ); + Error::EthereumBridge(EthereumBridgeError::NodeTimeout) })?; - display_line!(IO, "The Ethereum node is up to date"); - control_flow::proceed(()) + display_line!(io, "The Ethereum node is up to date"); + Ok(()) } /// Check if Ethereum has finished synchronizing. In case it has /// not, perform `action`. pub async fn eth_sync_or( client: &C, + io: &IO, mut action: F, -) -> Halt> +) -> Result, Error> where C: Middleware, F: FnMut() -> T, @@ -149,29 +154,33 @@ where let is_synchronized = eth_syncing_status(client) .await .map(|status| status.is_synchronized()) - .try_halt(|err| { + .map_err(|err| { edisplay_line!( - IO, + io, "An error occurred while fetching the Ethereum \ synchronization status: {err}" ); + err })?; if is_synchronized { - control_flow::proceed(Either::Right(())) + Ok(Either::Right(())) } else { - control_flow::proceed(Either::Left(action())) + Ok(Either::Left(action())) } } /// Check if Ethereum has finished synchronizing. In case it has /// not, end execution. -pub async fn eth_sync_or_exit(client: &C) -> Halt<()> +pub async fn eth_sync_or_exit( + client: &C, + io: &IO, +) -> Result<(), Error> where C: Middleware, { - eth_sync_or::<_, _, _, IO>(client, || { - tracing::error!("The Ethereum node has not finished synchronizing"); + eth_sync_or(client, io, || { + edisplay_line!(io, "The Ethereum node has not finished synchronizing"); }) - .await? - .try_halt(|_| ()) + .await?; + Ok(()) } diff --git a/shared/src/ledger/eth_bridge/validator_set.rs b/sdk/src/eth_bridge/validator_set.rs similarity index 78% rename from shared/src/ledger/eth_bridge/validator_set.rs rename to sdk/src/eth_bridge/validator_set.rs index 4ae08dd598..1b7a77466d 100644 --- a/shared/src/ledger/eth_bridge/validator_set.rs +++ b/sdk/src/eth_bridge/validator_set.rs @@ -1,6 +1,5 @@ //! Validator set updates SDK functionality. -use std::borrow::Cow; use std::cmp::Ordering; use std::future::Future; use std::pin::Pin; @@ -12,23 +11,25 @@ use ethbridge_bridge_contract::Bridge; use ethers::providers::Middleware; use futures::future::{self, FutureExt}; use namada_core::hints; +use namada_core::types::eth_abi::EncodeCell; +use namada_core::types::ethereum_events::EthAddress; use namada_core::types::storage::Epoch; +use namada_core::types::vote_extensions::validator_set_update::{ + ValidatorSetArgs, VotingPowersMap, +}; +use namada_ethereum_bridge::storage::proof::EthereumProof; use super::{block_on_eth_sync, eth_sync_or, eth_sync_or_exit, BlockOnEthSync}; +use crate::control_flow::install_shutdown_signal; +use crate::control_flow::time::{self, Duration, Instant}; +use crate::error::{Error as SdkError, EthereumBridgeError, QueryError}; use crate::eth_bridge::ethers::abi::{AbiDecode, AbiType, Tokenizable}; use crate::eth_bridge::ethers::core::types::TransactionReceipt; use crate::eth_bridge::structs::Signature; -use crate::ledger::queries::RPC; -use crate::sdk::args; -use crate::sdk::queries::Client; -use crate::types::control_flow::time::{self, Duration, Instant}; -use crate::types::control_flow::{ - self, install_shutdown_signal, Halt, TryHalt, -}; -use crate::types::ethereum_events::EthAddress; -use crate::types::io::{DefaultIo, Io}; -use crate::types::vote_extensions::validator_set_update::ValidatorSetArgs; -use crate::{display_line, edisplay_line}; +use crate::internal_macros::{echo_error, trace_error}; +use crate::io::Io; +use crate::queries::{Client, RPC}; +use crate::{args, display_line, edisplay_line}; /// Relayer related errors. #[derive(Debug, Default)] @@ -43,7 +44,7 @@ enum Error { /// `tracing` log level. WithReason { /// The reason of the error. - reason: Cow<'static, str>, + reason: SdkError, /// The log level where to display the error message. level: tracing::Level, /// If critical, exit the relayer. @@ -57,7 +58,7 @@ impl Error { /// The error is recoverable. fn recoverable(msg: M) -> Self where - M: Into>, + M: Into, { Error::WithReason { level: tracing::Level::DEBUG, @@ -71,7 +72,7 @@ impl Error { /// The error is not recoverable. fn critical(msg: M) -> Self where - M: Into>, + M: Into, { Error::WithReason { level: tracing::Level::ERROR, @@ -80,9 +81,10 @@ impl Error { } } - /// Display the error message, and return the [`Halt`] status. - fn handle(&self) -> Halt<()> { - let critical = match self { + /// Display the error message, and return a new [`Result`], + /// with the error already handled appropriately. + fn handle(self) -> Result<(), SdkError> { + let (critical, reason) = match self { Error::WithReason { reason, critical, @@ -93,7 +95,7 @@ impl Error { %reason, "An error occurred during the relay" ); - *critical + (critical, reason) } Error::WithReason { reason, @@ -104,18 +106,18 @@ impl Error { %reason, "An error occurred during the relay" ); - *critical + (critical, reason) } // all log levels we care about are DEBUG and ERROR _ => { hints::cold(); - return control_flow::proceed(()); + return Ok(()); } }; if hints::unlikely(critical) { - control_flow::halt() + Err(reason) } else { - control_flow::proceed(()) + Ok(()) } } } @@ -172,7 +174,7 @@ trait ShouldRelay { E::Error: std::fmt::Display; /// Try to recover from an error that has happened. - fn try_recover(err: String) -> Error; + fn try_recover>(err: E) -> Error; } impl ShouldRelay for DoNotCheckNonce { @@ -189,7 +191,7 @@ impl ShouldRelay for DoNotCheckNonce { } #[inline] - fn try_recover(err: String) -> Error { + fn try_recover>(err: E) -> Error { Error::recoverable(err) } } @@ -228,7 +230,7 @@ impl ShouldRelay for CheckNonce { } #[inline] - fn try_recover(err: String) -> Error { + fn try_recover>(err: E) -> Error { Error::critical(err) } } @@ -268,12 +270,11 @@ impl From> for RelayResult { /// Query an ABI encoding of the validator set to be installed /// at the given epoch, and its associated proof. -pub async fn query_validator_set_update_proof( - client: &C, +pub async fn query_validator_set_update_proof<'a>( + client: &(impl Client + Sync), + io: &impl Io, args: args::ValidatorSetProof, -) where - C: Client + Sync, -{ +) -> Result>, SdkError> { let epoch = if let Some(epoch) = args.epoch { epoch } else { @@ -285,19 +286,23 @@ pub async fn query_validator_set_update_proof( .eth_bridge() .read_valset_upd_proof(client, &epoch) .await - .unwrap(); + .map_err(|err| { + SdkError::Query(QueryError::General(echo_error!( + io, + "Failed to fetch validator set update proof: {err}" + ))) + })?; - display_line!(IO, "0x{}", HEXLOWER.encode(encoded_proof.as_ref())); + display_line!(io, "0x{}", HEXLOWER.encode(encoded_proof.as_ref())); + Ok(encoded_proof) } /// Query an ABI encoding of the Bridge validator set at a given epoch. -pub async fn query_bridge_validator_set( - client: &C, +pub async fn query_bridge_validator_set<'a>( + client: &(impl Client + Sync), + io: &impl Io, args: args::BridgeValidatorSet, -) -> Halt<()> -where - C: Client + Sync, -{ +) -> Result { let epoch = if let Some(epoch) = args.epoch { epoch } else { @@ -309,22 +314,23 @@ where .eth_bridge() .read_bridge_valset(client, &epoch) .await - .try_halt(|err| { - tracing::error!(%err, "Failed to fetch Bridge validator set"); + .map_err(|err| { + SdkError::Query(QueryError::General(echo_error!( + io, + "Failed to fetch Bridge validator set: {err}" + ))) })?; - display_validator_set::(args); - control_flow::proceed(()) + display_validator_set(io, args.clone()); + Ok(args) } /// Query an ABI encoding of the Governance validator set at a given epoch. -pub async fn query_governnace_validator_set( - client: &C, +pub async fn query_governnace_validator_set<'a>( + client: &(impl Client + Sync), + io: &impl Io, args: args::GovernanceValidatorSet, -) -> Halt<()> -where - C: Client + Sync, -{ +) -> Result { let epoch = if let Some(epoch) = args.epoch { epoch } else { @@ -336,16 +342,19 @@ where .eth_bridge() .read_governance_valset(client, &epoch) .await - .try_halt(|err| { - tracing::error!(%err, "Failed to fetch Governance validator set"); + .map_err(|err| { + SdkError::Query(QueryError::General(echo_error!( + io, + "Failed to fetch Governance validator set: {err}" + ))) })?; - display_validator_set::(args); - control_flow::proceed(()) + display_validator_set(io, args.clone()); + Ok(args) } /// Display the given [`ValidatorSetArgs`]. -fn display_validator_set(args: ValidatorSetArgs) { +fn display_validator_set(io: &IO, args: ValidatorSetArgs) { use serde::Serialize; #[derive(Serialize)] @@ -373,28 +382,29 @@ fn display_validator_set(args: ValidatorSetArgs) { }; display_line!( - IO, + io, "{}", serde_json::to_string_pretty(&validator_set).unwrap() ); } /// Relay a validator set update, signed off for a given epoch. -pub async fn relay_validator_set_update( +pub async fn relay_validator_set_update<'a, E>( eth_client: Arc, - nam_client: &C, + client: &(impl Client + Sync), + io: &impl Io, args: args::ValidatorSetUpdateRelay, -) -> Halt<()> +) -> Result<(), SdkError> where - C: Client + Sync, E: Middleware, E::Error: std::fmt::Debug + std::fmt::Display, { let mut signal_receiver = args.safe_mode.then(install_shutdown_signal); if args.sync { - block_on_eth_sync::<_, IO>( + block_on_eth_sync( &*eth_client, + io, BlockOnEthSync { deadline: Instant::now() + Duration::from_secs(60), delta_sleep: Duration::from_secs(1), @@ -402,14 +412,15 @@ where ) .await?; } else { - eth_sync_or_exit::<_, IO>(&*eth_client).await?; + eth_sync_or_exit(&*eth_client, io).await?; } if args.daemon { relay_validator_set_update_daemon( args, eth_client, - nam_client, + client, + io, &mut signal_receiver, ) .await @@ -417,11 +428,11 @@ where relay_validator_set_update_once::( &args, eth_client, - nam_client, + client, |relay_result| match relay_result { RelayResult::BridgeCallError(reason) => { edisplay_line!( - IO, + io, "Calling Bridge failed due to: {reason}" ); } @@ -432,27 +443,27 @@ where Ordering::Greater => "too far ahead of", }; edisplay_line!( - IO, + io, "Argument nonce <{argument}> is {whence} contract \ nonce <{contract}>" ); } RelayResult::NoReceipt => { edisplay_line!( - IO, + io, "No transfer receipt received from the Ethereum node" ); } RelayResult::Receipt { receipt } => { if receipt.is_successful() { display_line!( - IO, + io, "Ethereum transfer succeeded: {:?}", receipt ); } else { display_line!( - IO, + io, "Ethereum transfer failed: {:?}", receipt ); @@ -461,18 +472,18 @@ where }, ) .await - .try_halt_or_recover(|error| error.handle()) } + .or_else(|err| err.handle()) } -async fn relay_validator_set_update_daemon( +async fn relay_validator_set_update_daemon<'a, E, F>( mut args: args::ValidatorSetUpdateRelay, eth_client: Arc, - nam_client: &C, + client: &(impl Client + Sync), + io: &impl Io, shutdown_receiver: &mut Option, -) -> Halt<()> +) -> Result<(), Error> where - C: Client + Sync, E: Middleware, E::Error: std::fmt::Debug + std::fmt::Display, F: Future + Unpin, @@ -500,7 +511,7 @@ where }; if should_exit { - return control_flow::proceed(()); + return Ok(()); } let sleep_for = if last_call_succeeded { @@ -513,9 +524,7 @@ where time::sleep(sleep_for).await; let is_synchronizing = - eth_sync_or::<_, _, _, DefaultIo>(&*eth_client, || ()) - .await - .is_break(); + eth_sync_or(&*eth_client, io, || ()).await.is_err(); if is_synchronizing { tracing::debug!("The Ethereum node is synchronizing"); last_call_succeeded = false; @@ -525,38 +534,34 @@ where // we could be racing against governance updates, // so it is best to always fetch the latest Bridge // contract address - let bridge = get_bridge_contract(nam_client, Arc::clone(ð_client)) - .await - .try_halt(|err| { - // only care about displaying errors, - // exit on all circumstances - _ = err.handle(); - })?; + let bridge = + get_bridge_contract(client, Arc::clone(ð_client)).await?; let bridge_epoch_prep_call = bridge.validator_set_nonce(); let bridge_epoch_fut = bridge_epoch_prep_call.call().map(|result| { result .map_err(|err| { - tracing::error!( + Error::critical(QueryError::General(trace_error!( + error, "Failed to fetch latest validator set nonce: {err}" - ); + ))) }) .map(|e| e.as_u64() as i128) }); let shell = RPC.shell(); - let nam_current_epoch_fut = shell.epoch(nam_client).map(|result| { + let nam_current_epoch_fut = shell.epoch(client).map(|result| { result .map_err(|err| { - tracing::error!( + Error::critical(QueryError::General(trace_error!( + error, "Failed to fetch the latest epoch in Namada: {err}" - ); + ))) }) .map(|Epoch(e)| e as i128) }); let (nam_current_epoch, gov_current_epoch) = - futures::try_join!(nam_current_epoch_fut, bridge_epoch_fut) - .try_halt(|()| ())?; + futures::try_join!(nam_current_epoch_fut, bridge_epoch_fut)?; tracing::debug!( ?nam_current_epoch, @@ -596,7 +601,7 @@ where let result = relay_validator_set_update_once::( &args, Arc::clone(ð_client), - nam_client, + client, |transf_result| { let Some(receipt) = transf_result else { tracing::warn!("No transfer receipt received from the Ethereum node"); @@ -634,7 +639,11 @@ where .eth_bridge() .read_bridge_contract(nam_client) .await - .map_err(|err| Error::critical(err.to_string()))?; + .map_err(|err| { + Error::critical(EthereumBridgeError::RetrieveContract( + err.to_string(), + )) + })?; Ok(Bridge::new(bridge_contract.address, eth_client)) } @@ -657,27 +666,49 @@ where RPC.shell() .epoch(nam_client) .await - .map_err(|e| Error::critical(e.to_string()))? + .map_err(|e| Error::critical(QueryError::General(e.to_string())))? .next() }; if hints::unlikely(epoch_to_relay == Epoch(0)) { - return Err(Error::critical( - "There is no validator set update proof for epoch 0", - )); + return Err(Error::critical(SdkError::Other( + "There is no validator set update proof for epoch 0".into(), + ))); } let shell = RPC.shell().eth_bridge(); - let encoded_proof_fut = - shell.read_valset_upd_proof(nam_client, &epoch_to_relay); + let encoded_proof_fut = shell + .read_valset_upd_proof(nam_client, &epoch_to_relay) + .map(|result| { + result.map_err(|err| { + let msg = format!( + "Failed to fetch validator set update proof: {err}" + ); + SdkError::Query(QueryError::General(msg)) + }) + }); let bridge_current_epoch = epoch_to_relay - 1; let shell = RPC.shell().eth_bridge(); - let validator_set_args_fut = - shell.read_bridge_valset(nam_client, &bridge_current_epoch); + let validator_set_args_fut = shell + .read_bridge_valset(nam_client, &bridge_current_epoch) + .map(|result| { + result.map_err(|err| { + let msg = + format!("Failed to fetch Bridge validator set: {err}"); + SdkError::Query(QueryError::General(msg)) + }) + }); let shell = RPC.shell().eth_bridge(); - let bridge_address_fut = shell.read_bridge_contract(nam_client); + let bridge_address_fut = + shell.read_bridge_contract(nam_client).map(|result| { + result.map_err(|err| { + SdkError::EthereumBridge(EthereumBridgeError::RetrieveContract( + err.to_string(), + )) + }) + }); let (encoded_proof, validator_set_args, bridge_contract) = futures::try_join!( @@ -685,7 +716,7 @@ where validator_set_args_fut, bridge_address_fut ) - .map_err(|err| R::try_recover(err.to_string()))?; + .map_err(|err| R::try_recover(err))?; let (bridge_hash, gov_hash, signatures): ( [u8; 32], @@ -716,14 +747,15 @@ where relay_op.tx.set_from(eth_addr.into()); } - let pending_tx = relay_op - .send() - .await - .map_err(|e| Error::critical(e.to_string()))?; + let pending_tx = relay_op.send().await.map_err(|e| { + Error::critical(EthereumBridgeError::ContractCall(e.to_string())) + })?; let transf_result = pending_tx .confirmations(args.confirmations as usize) .await - .map_err(|err| Error::critical(err.to_string()))?; + .map_err(|e| { + Error::critical(EthereumBridgeError::Rpc(e.to_string())) + })?; let transf_result: R::RelayResult = transf_result.into(); let status = if transf_result.is_successful() { diff --git a/shared/src/ledger/events/log.rs b/sdk/src/events/log.rs similarity index 97% rename from shared/src/ledger/events/log.rs rename to sdk/src/events/log.rs index a2dc3978d0..596c23bdc9 100644 --- a/shared/src/ledger/events/log.rs +++ b/sdk/src/events/log.rs @@ -8,7 +8,7 @@ use std::default::Default; use circular_queue::CircularQueue; -use crate::ledger::events::Event; +use crate::events::Event; pub mod dumb_queries; @@ -85,9 +85,10 @@ impl EventLog { #[cfg(test)] mod tests { + use namada_core::types::hash::Hash; + use super::*; - use crate::ledger::events::{EventLevel, EventType}; - use crate::types::hash::Hash; + use crate::events::{EventLevel, EventType}; const HASH: &str = "DEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF"; diff --git a/shared/src/ledger/events/log/dumb_queries.rs b/sdk/src/events/log/dumb_queries.rs similarity index 96% rename from shared/src/ledger/events/log/dumb_queries.rs rename to sdk/src/events/log/dumb_queries.rs index 5ff7c8d54f..44988fb0dc 100644 --- a/shared/src/ledger/events/log/dumb_queries.rs +++ b/sdk/src/events/log/dumb_queries.rs @@ -8,12 +8,13 @@ use std::collections::HashMap; +use namada_core::types::hash::Hash; +use namada_core::types::storage::BlockHeight; + +use crate::events::{Event, EventType}; use crate::ibc::core::ics04_channel::packet::Sequence; use crate::ibc::core::ics24_host::identifier::{ChannelId, ClientId, PortId}; use crate::ibc::Height as IbcHeight; -use crate::ledger::events::{Event, EventType}; -use crate::types::hash::Hash; -use crate::types::storage::BlockHeight; /// A [`QueryMatcher`] verifies if a Namada event matches a /// given Tendermint query. @@ -118,7 +119,7 @@ impl QueryMatcher { #[cfg(test)] mod tests { use super::*; - use crate::ledger::events::EventLevel; + use crate::events::EventLevel; /// Test if query matching is working as expected. #[test] diff --git a/shared/src/ledger/events.rs b/sdk/src/events/mod.rs similarity index 94% rename from shared/src/ledger/events.rs rename to sdk/src/events/mod.rs index ff5b9f108d..141867c63d 100644 --- a/shared/src/ledger/events.rs +++ b/sdk/src/events/mod.rs @@ -8,14 +8,14 @@ use std::ops::{Index, IndexMut}; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::types::ibc::IbcEvent; +#[cfg(feature = "ferveo-tpke")] +use namada_core::types::transaction::TxType; use serde_json::Value; -use crate::ledger::governance::utils::ProposalEvent; -use crate::sdk::error::{EncodingError, Error, EventError}; +// use crate::ledger::governance::utils::ProposalEvent; +use crate::error::{EncodingError, Error, EventError}; use crate::tendermint_proto::abci::EventAttribute; -use crate::types::ibc::IbcEvent; -#[cfg(feature = "ferveo-tpke")] -use crate::types::transaction::TxType; /// Indicates if an event is emitted do to /// an individual Tx or the nature of a finalized block @@ -171,16 +171,6 @@ impl From for Event { } } -impl From for Event { - fn from(proposal_event: ProposalEvent) -> Self { - Self { - event_type: EventType::Proposal, - level: EventLevel::Block, - attributes: proposal_event.attributes, - } - } -} - /// Convert our custom event into the necessary tendermint proto type impl From for crate::tendermint_proto::abci::Event { fn from(event: Event) -> Self { diff --git a/sdk/src/internal_macros.rs b/sdk/src/internal_macros.rs new file mode 100644 index 0000000000..b864faa948 --- /dev/null +++ b/sdk/src/internal_macros.rs @@ -0,0 +1,17 @@ +macro_rules! echo_error { + ($io:expr, $($arg:tt)*) => {{ + let msg = ::alloc::format!($($arg)*); + $crate::edisplay_line!($io, "{msg}"); + msg + }} +} + +macro_rules! trace_error { + ($level:ident, $($arg:tt)*) => {{ + let msg = ::alloc::format!($($arg)*); + ::tracing::$level!("{msg}"); + msg + }} +} + +pub(crate) use {echo_error, trace_error}; diff --git a/shared/src/types/io.rs b/sdk/src/io.rs similarity index 56% rename from shared/src/types/io.rs rename to sdk/src/io.rs index 462dbef95f..248f6f91d9 100644 --- a/shared/src/types/io.rs +++ b/sdk/src/io.rs @@ -2,47 +2,50 @@ //! generic IO. The defaults are the obvious Rust native //! functions. -/// Rust native I/O handling. -pub struct DefaultIo; - -#[async_trait::async_trait(?Send)] -impl Io for DefaultIo {} - +/// A trait that abstracts out I/O operations #[async_trait::async_trait(?Send)] -#[allow(missing_docs)] pub trait Io { - fn print(output: impl AsRef) { + /// Print the given string + fn print(&self, output: impl AsRef) { print!("{}", output.as_ref()); } - fn flush() { + /// Flush the output + fn flush(&self) { use std::io::Write; std::io::stdout().flush().unwrap(); } - fn println(output: impl AsRef) { + /// Print the given string with a newline + fn println(&self, output: impl AsRef) { println!("{}", output.as_ref()); } + /// Print the given string into the given Writer fn write( + &self, mut writer: W, output: impl AsRef, ) -> std::io::Result<()> { write!(writer, "{}", output.as_ref()) } + /// Print the given string into the given Writer and terminate with newline fn writeln( + &self, mut writer: W, output: impl AsRef, ) -> std::io::Result<()> { writeln!(writer, "{}", output.as_ref()) } - fn eprintln(output: impl AsRef) { + /// Print the given error string + fn eprintln(&self, output: impl AsRef) { eprintln!("{}", output.as_ref()); } - async fn read() -> std::io::Result { + /// Read a string from input + async fn read(&self) -> std::io::Result { #[cfg(not(target_family = "wasm"))] { read_aux(tokio::io::stdin()).await @@ -53,7 +56,8 @@ pub trait Io { } } - async fn prompt(question: impl AsRef) -> String { + /// Display the given prompt and return the string input + async fn prompt(&self, question: impl AsRef) -> String { #[cfg(not(target_family = "wasm"))] { prompt_aux( @@ -74,6 +78,50 @@ pub trait Io { } } +/// Rust native I/O handling. +pub struct StdIo; + +#[async_trait::async_trait(?Send)] +impl Io for StdIo {} + +/// Ignores all I/O operations. +pub struct NullIo; + +#[async_trait::async_trait(?Send)] +impl Io for NullIo { + fn print(&self, _output: impl AsRef) {} + + fn flush(&self) {} + + fn println(&self, _output: impl AsRef) {} + + fn write( + &self, + mut _writer: W, + _output: impl AsRef, + ) -> std::io::Result<()> { + Ok(()) + } + + fn writeln( + &self, + mut _writer: W, + _output: impl AsRef, + ) -> std::io::Result<()> { + Ok(()) + } + + fn eprintln(&self, _output: impl AsRef) {} + + async fn read(&self) -> std::io::Result { + panic!("Unsupported operation") + } + + async fn prompt(&self, _question: impl AsRef) -> String { + panic!("Unsupported operation") + } +} + /// A generic function for displaying a prompt to users and reading /// in their response. #[cfg(not(target_family = "wasm"))] @@ -111,14 +159,14 @@ where /// [`Io::print`] #[macro_export] macro_rules! display { - ($io:ty) => { - <$io>::print("") + ($io:expr) => { + $io.print("") }; - ($io:ty, $w:expr; $($args:tt)*) => { - <$io>::write($w, format_args!($($args)*).to_string()) + ($io:expr, $w:expr; $($args:tt)*) => { + $io.write($w, format_args!($($args)*).to_string()) }; - ($io:ty,$($args:tt)*) => { - <$io>::print(format_args!($($args)*).to_string()) + ($io:expr,$($args:tt)*) => { + $io.print(format_args!($($args)*).to_string()) }; } @@ -126,14 +174,14 @@ macro_rules! display { /// [`Io::println`] and [`Io::writeln`] #[macro_export] macro_rules! display_line { - ($io:ty) => { - <$io>::println("") + ($io:expr) => { + $io.println("") }; - ($io:ty, $w:expr; $($args:tt)*) => { - <$io>::writeln($w, format_args!($($args)*).to_string()) + ($io:expr, $w:expr; $($args:tt)*) => { + $io.writeln($w, format_args!($($args)*).to_string()) }; - ($io:ty,$($args:tt)*) => { - <$io>::println(format_args!($($args)*).to_string()) + ($io:expr,$($args:tt)*) => { + $io.println(format_args!($($args)*).to_string()) }; } @@ -141,8 +189,8 @@ macro_rules! display_line { /// [`Io::eprintln`] #[macro_export] macro_rules! edisplay_line { - ($io:ty,$($args:tt)*) => { - <$io>::eprintln(format_args!($($args)*).to_string()) + ($io:expr,$($args:tt)*) => { + $io.eprintln(format_args!($($args)*).to_string()) }; } @@ -150,7 +198,7 @@ macro_rules! edisplay_line { /// A convenience macro for formatting the user prompt before /// forwarding it to the [`Io::prompt`] method. macro_rules! prompt { - ($io:ty,$($arg:tt)*) => {{ - <$io>::prompt(format!("{}", format_args!($($arg)*))) + ($io:expr,$($arg:tt)*) => {{ + $io.prompt(format!("{}", format_args!($($arg)*))) }} } diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs new file mode 100644 index 0000000000..77e03c2b4c --- /dev/null +++ b/sdk/src/lib.rs @@ -0,0 +1,589 @@ +extern crate alloc; + +pub use namada_core::proto; +#[cfg(feature = "tendermint-rpc")] +pub use tendermint_rpc; +#[cfg(feature = "tendermint-rpc-abcipp")] +pub use tendermint_rpc_abcipp as tendermint_rpc; +pub use { + bip39, borsh, masp_primitives, masp_proofs, namada_core as core, + namada_proof_of_stake as proof_of_stake, zeroize, +}; +#[cfg(feature = "abcipp")] +pub use { + ibc_abcipp as ibc, ibc_proto_abcipp as ibc_proto, + tendermint_abcipp as tendermint, + tendermint_proto_abcipp as tendermint_proto, +}; +#[cfg(feature = "abciplus")] +pub use { + namada_core::ibc, namada_core::ibc_proto, namada_core::tendermint, + namada_core::tendermint_proto, +}; + +pub mod eth_bridge; + +pub mod rpc; + +pub mod args; +pub mod masp; +pub mod signing; +#[allow(clippy::result_large_err)] +pub mod tx; + +pub mod control_flow; +pub mod error; +pub mod events; +pub(crate) mod internal_macros; +pub mod io; +pub mod queries; +pub mod wallet; + +use std::path::PathBuf; +use std::str::FromStr; + +use args::{InputAmount, SdkTypes}; +use namada_core::types::address::Address; +use namada_core::types::dec::Dec; +use namada_core::types::ethereum_events::EthAddress; +use namada_core::types::key::*; +use namada_core::types::masp::{TransferSource, TransferTarget}; +use namada_core::types::token; +use namada_core::types::token::NATIVE_MAX_DECIMAL_PLACES; +use namada_core::types::transaction::GasLimit; +use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; + +use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; +use crate::io::Io; +use crate::masp::{ShieldedContext, ShieldedUtils}; +use crate::proto::Tx; +use crate::rpc::{ + denominate_amount, format_denominated_amount, query_native_token, +}; +use crate::signing::SigningTxData; +use crate::token::DenominatedAmount; +use crate::tx::{ + ProcessTxResponse, TX_BOND_WASM, TX_BRIDGE_POOL_WASM, + TX_CHANGE_COMMISSION_WASM, TX_IBC_WASM, TX_INIT_PROPOSAL, + TX_INIT_VALIDATOR_WASM, TX_RESIGN_STEWARD, TX_REVEAL_PK, TX_TRANSFER_WASM, + TX_UNBOND_WASM, TX_UNJAIL_VALIDATOR_WASM, TX_UPDATE_ACCOUNT_WASM, + TX_UPDATE_STEWARD_COMMISSION, TX_VOTE_PROPOSAL, TX_WITHDRAW_WASM, + VP_USER_WASM, +}; +use crate::wallet::{Wallet, WalletIo, WalletStorage}; + +#[async_trait::async_trait(?Send)] +/// An interface for high-level interaction with the Namada SDK +pub trait Namada<'a>: Sized { + /// A client with async request dispatcher method + type Client: 'a + queries::Client + Sync; + /// Captures the interactive parts of the wallet's functioning + type WalletUtils: 'a + WalletIo + WalletStorage; + /// Abstracts platform specific details away from the logic of shielded pool + /// operations. + type ShieldedUtils: 'a + ShieldedUtils; + /// Captures the input/output streams used by this object + type Io: 'a + Io; + + /// Obtain the client for communicating with the ledger + fn client(&self) -> &'a Self::Client; + + /// Obtain the input/output handle for this context + fn io(&self) -> &'a Self::Io; + + /// Obtain read guard on the wallet + async fn wallet( + &self, + ) -> RwLockReadGuard<&'a mut Wallet>; + + /// Obtain write guard on the wallet + async fn wallet_mut( + &self, + ) -> RwLockWriteGuard<&'a mut Wallet>; + + /// Obtain read guard on the shielded context + async fn shielded( + &self, + ) -> RwLockReadGuard<&'a mut ShieldedContext>; + + /// Obtain write guard on the shielded context + async fn shielded_mut( + &self, + ) -> RwLockWriteGuard<&'a mut ShieldedContext>; + + /// Return the native token + fn native_token(&self) -> Address; + + /// Make a tx builder using no arguments + fn tx_builder(&self) -> args::Tx { + args::Tx { + dry_run: false, + dry_run_wrapper: false, + dump_tx: false, + output_folder: None, + force: false, + broadcast_only: false, + ledger_address: (), + initialized_account_alias: None, + wallet_alias_force: false, + fee_amount: None, + wrapper_fee_payer: None, + fee_token: self.native_token(), + fee_unshield: None, + gas_limit: GasLimit::from(20_000), + expiration: None, + disposable_signing_key: false, + chain_id: None, + signing_keys: vec![], + signatures: vec![], + tx_reveal_code_path: PathBuf::from(TX_REVEAL_PK), + verification_key: None, + password: None, + } + } + + /// Make a TxTransfer builder from the given minimum set of arguments + fn new_transfer( + &self, + source: TransferSource, + target: TransferTarget, + token: Address, + amount: InputAmount, + ) -> args::TxTransfer { + args::TxTransfer { + source, + target, + token, + amount, + tx_code_path: PathBuf::from(TX_TRANSFER_WASM), + tx: self.tx_builder(), + native_token: self.native_token(), + } + } + + /// Make a RevealPK builder from the given minimum set of arguments + fn new_reveal_pk(&self, public_key: common::PublicKey) -> args::RevealPk { + args::RevealPk { + public_key, + tx: self.tx_builder(), + } + } + + /// Make a Bond builder from the given minimum set of arguments + fn new_bond( + &self, + validator: Address, + amount: token::Amount, + ) -> args::Bond { + args::Bond { + validator, + amount, + source: None, + tx: self.tx_builder(), + native_token: self.native_token(), + tx_code_path: PathBuf::from(TX_BOND_WASM), + } + } + + /// Make a Unbond builder from the given minimum set of arguments + fn new_unbond( + &self, + validator: Address, + amount: token::Amount, + ) -> args::Unbond { + args::Unbond { + validator, + amount, + source: None, + tx: self.tx_builder(), + tx_code_path: PathBuf::from(TX_UNBOND_WASM), + } + } + + /// Make a TxIbcTransfer builder from the given minimum set of arguments + fn new_ibc_transfer( + &self, + source: Address, + receiver: String, + token: Address, + amount: InputAmount, + channel_id: ChannelId, + ) -> args::TxIbcTransfer { + args::TxIbcTransfer { + source, + receiver, + token, + amount, + channel_id, + port_id: PortId::from_str("transfer").unwrap(), + timeout_height: None, + timeout_sec_offset: None, + memo: None, + tx: self.tx_builder(), + tx_code_path: PathBuf::from(TX_IBC_WASM), + } + } + + /// Make a InitProposal builder from the given minimum set of arguments + fn new_init_proposal(&self, proposal_data: Vec) -> args::InitProposal { + args::InitProposal { + proposal_data, + native_token: self.native_token(), + is_offline: false, + is_pgf_stewards: false, + is_pgf_funding: false, + tx_code_path: PathBuf::from(TX_INIT_PROPOSAL), + tx: self.tx_builder(), + } + } + + /// Make a TxUpdateAccount builder from the given minimum set of arguments + fn new_update_account(&self, addr: Address) -> args::TxUpdateAccount { + args::TxUpdateAccount { + addr, + vp_code_path: None, + public_keys: vec![], + threshold: None, + tx_code_path: PathBuf::from(TX_UPDATE_ACCOUNT_WASM), + tx: self.tx_builder(), + } + } + + /// Make a VoteProposal builder from the given minimum set of arguments + fn new_vote_prposal( + &self, + vote: String, + voter: Address, + ) -> args::VoteProposal { + args::VoteProposal { + vote, + voter, + proposal_id: None, + is_offline: false, + proposal_data: None, + tx_code_path: PathBuf::from(TX_VOTE_PROPOSAL), + tx: self.tx_builder(), + } + } + + /// Make a CommissionRateChange builder from the given minimum set of + /// arguments + fn new_change_commission_rate( + &self, + rate: Dec, + validator: Address, + ) -> args::CommissionRateChange { + args::CommissionRateChange { + rate, + validator, + tx_code_path: PathBuf::from(TX_CHANGE_COMMISSION_WASM), + tx: self.tx_builder(), + } + } + + /// Make a TxInitValidator builder from the given minimum set of arguments + fn new_init_validator( + &self, + commission_rate: Dec, + max_commission_rate_change: Dec, + ) -> args::TxInitValidator { + args::TxInitValidator { + commission_rate, + max_commission_rate_change, + scheme: SchemeType::Ed25519, + account_keys: vec![], + threshold: None, + consensus_key: None, + eth_cold_key: None, + eth_hot_key: None, + protocol_key: None, + validator_vp_code_path: PathBuf::from(VP_USER_WASM), + unsafe_dont_encrypt: false, + tx_code_path: PathBuf::from(TX_INIT_VALIDATOR_WASM), + tx: self.tx_builder(), + } + } + + /// Make a TxUnjailValidator builder from the given minimum set of arguments + fn new_unjail_validator( + &self, + validator: Address, + ) -> args::TxUnjailValidator { + args::TxUnjailValidator { + validator, + tx_code_path: PathBuf::from(TX_UNJAIL_VALIDATOR_WASM), + tx: self.tx_builder(), + } + } + + /// Make a Withdraw builder from the given minimum set of arguments + fn new_withdraw(&self, validator: Address) -> args::Withdraw { + args::Withdraw { + validator, + source: None, + tx_code_path: PathBuf::from(TX_WITHDRAW_WASM), + tx: self.tx_builder(), + } + } + + /// Make a Withdraw builder from the given minimum set of arguments + fn new_add_erc20_transfer( + &self, + sender: Address, + recipient: EthAddress, + asset: EthAddress, + amount: InputAmount, + ) -> args::EthereumBridgePool { + args::EthereumBridgePool { + sender, + recipient, + asset, + amount, + fee_amount: InputAmount::Unvalidated(token::DenominatedAmount { + amount: token::Amount::default(), + denom: NATIVE_MAX_DECIMAL_PLACES.into(), + }), + fee_payer: None, + fee_token: self.native_token(), + nut: false, + code_path: PathBuf::from(TX_BRIDGE_POOL_WASM), + tx: self.tx_builder(), + } + } + + /// Make a ResignSteward builder from the given minimum set of arguments + fn new_resign_steward(&self, steward: Address) -> args::ResignSteward { + args::ResignSteward { + steward, + tx: self.tx_builder(), + tx_code_path: PathBuf::from(TX_RESIGN_STEWARD), + } + } + + /// Make a UpdateStewardCommission builder from the given minimum set of + /// arguments + fn new_update_steward_rewards( + &self, + steward: Address, + commission: Vec, + ) -> args::UpdateStewardCommission { + args::UpdateStewardCommission { + steward, + commission, + tx: self.tx_builder(), + tx_code_path: PathBuf::from(TX_UPDATE_STEWARD_COMMISSION), + } + } + + /// Make a TxCustom builder from the given minimum set of arguments + fn new_custom(&self, owner: Address) -> args::TxCustom { + args::TxCustom { + owner, + tx: self.tx_builder(), + code_path: None, + data_path: None, + serialized_tx: None, + } + } + + /// Sign the given transaction using the given signing data + async fn sign( + &self, + tx: &mut Tx, + args: &args::Tx, + signing_data: SigningTxData, + ) -> crate::error::Result<()> { + signing::sign_tx(*self.wallet_mut().await, args, tx, signing_data) + } + + /// Process the given transaction using the given flags + async fn submit( + &self, + tx: Tx, + args: &args::Tx, + ) -> crate::error::Result { + tx::process_tx(self, args, tx).await + } + + /// Look up the denomination of a token in order to make a correctly + /// denominated amount. + async fn denominate_amount( + &self, + token: &Address, + amount: token::Amount, + ) -> DenominatedAmount { + denominate_amount(self.client(), self.io(), token, amount).await + } + + /// Look up the denomination of a token in order to format it correctly as a + /// string. + async fn format_amount( + &self, + token: &Address, + amount: token::Amount, + ) -> String { + format_denominated_amount(self.client(), self.io(), token, amount).await + } +} + +/// Provides convenience methods for common Namada interactions +pub struct NamadaImpl<'a, C, U, V, I> +where + C: queries::Client + Sync, + U: WalletIo, + V: ShieldedUtils, + I: Io, +{ + /// Used to send and receive messages from the ledger + pub client: &'a C, + /// Stores the addresses and keys required for ledger interactions + pub wallet: RwLock<&'a mut Wallet>, + /// Stores the current state of the shielded pool + pub shielded: RwLock<&'a mut ShieldedContext>, + /// Captures the input/output streams used by this object + pub io: &'a I, + /// The address of the native token + native_token: Address, + /// The default builder for a Tx + prototype: args::Tx, +} + +impl<'a, C, U, V, I> NamadaImpl<'a, C, U, V, I> +where + C: queries::Client + Sync, + U: WalletIo, + V: ShieldedUtils, + I: Io, +{ + /// Construct a new Namada context with the given native token address + pub fn native_new( + client: &'a C, + wallet: &'a mut Wallet, + shielded: &'a mut ShieldedContext, + io: &'a I, + native_token: Address, + ) -> Self { + NamadaImpl { + client, + wallet: RwLock::new(wallet), + shielded: RwLock::new(shielded), + io, + native_token: native_token.clone(), + prototype: args::Tx { + dry_run: false, + dry_run_wrapper: false, + dump_tx: false, + output_folder: None, + force: false, + broadcast_only: false, + ledger_address: (), + initialized_account_alias: None, + wallet_alias_force: false, + fee_amount: None, + wrapper_fee_payer: None, + fee_token: native_token, + fee_unshield: None, + gas_limit: GasLimit::from(20_000), + expiration: None, + disposable_signing_key: false, + chain_id: None, + signing_keys: vec![], + signatures: vec![], + tx_reveal_code_path: PathBuf::from(TX_REVEAL_PK), + verification_key: None, + password: None, + }, + } + } + + /// Construct a new Namada context looking up the native token address + pub async fn new( + client: &'a C, + wallet: &'a mut Wallet, + shielded: &'a mut ShieldedContext, + io: &'a I, + ) -> crate::error::Result> { + let native_token = query_native_token(client).await?; + Ok(NamadaImpl::native_new( + client, + wallet, + shielded, + io, + native_token, + )) + } +} + +#[async_trait::async_trait(?Send)] +impl<'a, C, U, V, I> Namada<'a> for NamadaImpl<'a, C, U, V, I> +where + C: queries::Client + Sync, + U: WalletIo + WalletStorage, + V: ShieldedUtils, + I: Io, +{ + type Client = C; + type Io = I; + type ShieldedUtils = V; + type WalletUtils = U; + + /// Obtain the prototypical Tx builder + fn tx_builder(&self) -> args::Tx { + self.prototype.clone() + } + + fn native_token(&self) -> Address { + self.native_token.clone() + } + + fn io(&self) -> &'a Self::Io { + self.io + } + + fn client(&self) -> &'a Self::Client { + self.client + } + + async fn wallet( + &self, + ) -> RwLockReadGuard<&'a mut Wallet> { + self.wallet.read().await + } + + async fn wallet_mut( + &self, + ) -> RwLockWriteGuard<&'a mut Wallet> { + self.wallet.write().await + } + + async fn shielded( + &self, + ) -> RwLockReadGuard<&'a mut ShieldedContext> { + self.shielded.read().await + } + + async fn shielded_mut( + &self, + ) -> RwLockWriteGuard<&'a mut ShieldedContext> { + self.shielded.write().await + } +} + +/// Allow the prototypical Tx builder to be modified +impl<'a, C, U, V, I> args::TxBuilder for NamadaImpl<'a, C, U, V, I> +where + C: queries::Client + Sync, + U: WalletIo, + V: ShieldedUtils, + I: Io, +{ + fn tx(self, func: F) -> Self + where + F: FnOnce(args::Tx) -> args::Tx, + { + Self { + prototype: func(self.prototype), + ..self + } + } +} diff --git a/shared/src/sdk/masp.rs b/sdk/src/masp.rs similarity index 87% rename from shared/src/sdk/masp.rs rename to sdk/src/masp.rs index 739f941b9a..5e22dba3cc 100644 --- a/shared/src/sdk/masp.rs +++ b/sdk/src/masp.rs @@ -10,6 +10,7 @@ use std::path::PathBuf; // use async_std::io::prelude::WriteExt; // use async_std::io::{self}; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use itertools::Either; use masp_primitives::asset_type::AssetType; #[cfg(feature = "mainnet")] @@ -34,8 +35,8 @@ use masp_primitives::transaction::builder::{self, *}; use masp_primitives::transaction::components::sapling::builder::SaplingMetadata; use masp_primitives::transaction::components::transparent::builder::TransparentBuilder; use masp_primitives::transaction::components::{ - ConvertDescription, I128Sum, I32Sum, OutputDescription, SpendDescription, - TxOut, U64Sum, + ConvertDescription, I128Sum, OutputDescription, SpendDescription, TxOut, + U64Sum, }; use masp_primitives::transaction::fees::fixed::FeeRule; use masp_primitives::transaction::sighash::{signature_hash, SignableInput}; @@ -49,8 +50,19 @@ use masp_proofs::bellman::groth16::PreparedVerifyingKey; use masp_proofs::bls12_381::Bls12; use masp_proofs::prover::LocalTxProver; use masp_proofs::sapling::SaplingVerificationContext; -use namada_core::types::token::{Change, MaspDenom}; -use namada_core::types::transaction::AffineCurve; +use namada_core::types::address::{masp, Address}; +use namada_core::types::masp::{ + BalanceOwner, ExtendedViewingKey, PaymentAddress, TransferSource, + TransferTarget, +}; +use namada_core::types::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; +use namada_core::types::token; +use namada_core::types::token::{ + Change, MaspDenom, Transfer, HEAD_TX_KEY, PIN_KEY_PREFIX, TX_KEY_PREFIX, +}; +use namada_core::types::transaction::{ + AffineCurve, EllipticCurve, PairingEngine, WrapperTx, +}; #[cfg(feature = "masp-tx-gen")] use rand_core::{CryptoRng, OsRng, RngCore}; use ripemd::Digest as RipemdDigest; @@ -58,25 +70,17 @@ use ripemd::Digest as RipemdDigest; use sha2::Digest; use thiserror::Error; +#[cfg(feature = "testing")] +use crate::error::EncodingError; +use crate::error::{Error, PinnedBalanceError, QueryError}; +use crate::io::Io; use crate::proto::Tx; -use crate::sdk::args::InputAmount; -use crate::sdk::error::{EncodingError, Error, PinnedBalanceError, QueryError}; -use crate::sdk::queries::Client; -use crate::sdk::rpc::{query_conversion, query_storage_value}; -use crate::sdk::tx::decode_component; -use crate::sdk::{args, rpc}; +use crate::queries::Client; +use crate::rpc::{query_conversion, query_storage_value}; use crate::tendermint_rpc::query::Query; use crate::tendermint_rpc::Order; -use crate::types::address::{masp, Address}; -use crate::types::io::Io; -use crate::types::masp::{BalanceOwner, ExtendedViewingKey, PaymentAddress}; -use crate::types::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; -use crate::types::token; -use crate::types::token::{ - Transfer, HEAD_TX_KEY, PIN_KEY_PREFIX, TX_KEY_PREFIX, -}; -use crate::types::transaction::{EllipticCurve, PairingEngine, WrapperTx}; -use crate::{display_line, edisplay_line}; +use crate::tx::decode_component; +use crate::{display_line, edisplay_line, rpc, Namada}; /// Env var to point to a dir with MASP parameters. When not specified, /// the default OS specific path is used. @@ -396,10 +400,16 @@ pub trait ShieldedUtils: fn local_tx_prover(&self) -> LocalTxProver; /// Load up the currently saved ShieldedContext - async fn load(self) -> std::io::Result>; + async fn load( + &self, + ctx: &mut ShieldedContext, + ) -> std::io::Result<()>; - /// Sace the given ShieldedContext for future loads - async fn save(&self, ctx: &ShieldedContext) -> std::io::Result<()>; + /// Save the given ShieldedContext for future loads + async fn save( + &self, + ctx: &ShieldedContext, + ) -> std::io::Result<()>; } /// Make a ViewingKey that can view notes encrypted by given ExtendedSpendingKey @@ -563,7 +573,7 @@ pub type TransactionDelta = HashMap; #[derive(BorshSerialize, BorshDeserialize, Debug)] pub struct ShieldedContext { /// Location where this shielded context is saved - #[borsh_skip] + #[borsh(skip)] pub utils: U, /// The last transaction index to be processed in this context pub last_txidx: u64, @@ -620,9 +630,7 @@ impl ShieldedContext { /// Try to load the last saved shielded context from the given context /// directory. If this fails, then leave the current context unchanged. pub async fn load(&mut self) -> std::io::Result<()> { - let new_ctx = self.utils.clone().load().await?; - *self = new_ctx; - Ok(()) + self.utils.clone().load(self).await } /// Save this shielded context into its associated context directory @@ -989,7 +997,7 @@ impl ShieldedContext { Address, MaspDenom, _, - I32Sum, + I128Sum, MerklePath, ) = rpc::query_conversion(client, asset_type).await?; self.asset_types @@ -1025,9 +1033,10 @@ impl ShieldedContext { /// context and express that value in terms of the currently timestamped /// asset types. If the key is not in the context, then we do not know the /// balance and hence we return None. - pub async fn compute_exchanged_balance( + pub async fn compute_exchanged_balance( &mut self, - client: &C, + client: &(impl Client + Sync), + io: &impl Io, vk: &ViewingKey, target_epoch: Epoch, ) -> Result, Error> { @@ -1035,8 +1044,9 @@ impl ShieldedContext { if let Some(balance) = self.compute_shielded_balance(client, vk).await? { let exchanged_amount = self - .compute_exchanged_amount::<_, IO>( + .compute_exchanged_amount( client, + io, balance, target_epoch, BTreeMap::new(), @@ -1058,9 +1068,10 @@ impl ShieldedContext { /// the trace amount that could not be converted is moved from input to /// output. #[allow(clippy::too_many_arguments)] - async fn apply_conversion( + async fn apply_conversion( &mut self, - client: &C, + client: &(impl Client + Sync), + io: &impl Io, conv: AllowedConversion, asset_type: (Epoch, Address, MaspDenom), value: i128, @@ -1080,7 +1091,7 @@ impl ShieldedContext { let threshold = -conv[&masp_asset]; if threshold == 0 { edisplay_line!( - IO, + io, "Asset threshold of selected conversion for asset type {} is \ 0, this is a bug, please report it.", masp_asset @@ -1110,9 +1121,10 @@ impl ShieldedContext { /// note of the conversions that were used. Note that this function does /// not assume that allowed conversions from the ledger are expressed in /// terms of the latest asset types. - pub async fn compute_exchanged_amount( + pub async fn compute_exchanged_amount( &mut self, - client: &C, + client: &(impl Client + Sync), + io: &impl Io, mut input: MaspAmount, target_epoch: Epoch, mut conversions: Conversions, @@ -1149,15 +1161,16 @@ impl ShieldedContext { (conversions.get_mut(&asset_type), at_target_asset_type) { display_line!( - IO, + io, "converting current asset type to latest asset type..." ); // Not at the target asset type, not at the latest asset // type. Apply conversion to get from // current asset type to the latest // asset type. - self.apply_conversion::<_, IO>( + self.apply_conversion( client, + io, conv.clone(), (asset_epoch, token_addr.clone(), denom), denom_value, @@ -1171,15 +1184,16 @@ impl ShieldedContext { at_target_asset_type, ) { display_line!( - IO, + io, "converting latest asset type to target asset type..." ); // Not at the target asset type, yet at the latest asset // type. Apply inverse conversion to get // from latest asset type to the target // asset type. - self.apply_conversion::<_, IO>( + self.apply_conversion( client, + io, conv.clone(), (asset_epoch, token_addr.clone(), denom), denom_value, @@ -1213,9 +1227,9 @@ impl ShieldedContext { /// of the specified asset type. Return the total value accumulated plus /// notes and the corresponding diversifiers/merkle paths that were used to /// achieve the total value. - pub async fn collect_unspent_notes( + pub async fn collect_unspent_notes<'a>( &mut self, - client: &C, + context: &impl Namada<'a>, vk: &ViewingKey, target: I128Sum, target_epoch: Epoch, @@ -1257,10 +1271,12 @@ impl ShieldedContext { .to_string(), ) })?; - let input = self.decode_all_amounts(client, pre_contr).await; + let input = + self.decode_all_amounts(context.client(), pre_contr).await; let (contr, proposed_convs) = self - .compute_exchanged_amount::<_, IO>( - client, + .compute_exchanged_amount( + context.client(), + context.io(), input, target_epoch, conversions.clone(), @@ -1398,31 +1414,37 @@ impl ShieldedContext { /// the epoch of the transaction or even before, so exchange all these /// amounts to the epoch of the transaction in order to get the value that /// would have been displayed in the epoch of the transaction. - pub async fn compute_exchanged_pinned_balance( + pub async fn compute_exchanged_pinned_balance<'a>( &mut self, - client: &C, + context: &impl Namada<'a>, owner: PaymentAddress, viewing_key: &ViewingKey, ) -> Result<(MaspAmount, Epoch), Error> { // Obtain the balance that will be exchanged let (amt, ep) = - Self::compute_pinned_balance(client, owner, viewing_key).await?; - display_line!(IO, "Pinned balance: {:?}", amt); + Self::compute_pinned_balance(context.client(), owner, viewing_key) + .await?; + display_line!(context.io(), "Pinned balance: {:?}", amt); // Establish connection with which to do exchange rate queries - let amount = self.decode_all_amounts(client, amt).await; - display_line!(IO, "Decoded pinned balance: {:?}", amount); + let amount = self.decode_all_amounts(context.client(), amt).await; + display_line!(context.io(), "Decoded pinned balance: {:?}", amount); // Finally, exchange the balance to the transaction's epoch let computed_amount = self - .compute_exchanged_amount::<_, IO>( - client, + .compute_exchanged_amount( + context.client(), + context.io(), amount, ep, BTreeMap::new(), ) .await? .0; - display_line!(IO, "Exchanged amount: {:?}", computed_amount); - Ok((self.decode_all_amounts(client, computed_amount).await, ep)) + display_line!(context.io(), "Exchanged amount: {:?}", computed_amount); + Ok(( + self.decode_all_amounts(context.client(), computed_amount) + .await, + ep, + )) } /// Convert an amount whose units are AssetTypes to one whose units are @@ -1483,10 +1505,12 @@ impl ShieldedContext { /// understood that transparent account changes are effected only by the /// amounts and signatures specified by the containing Transfer object. #[cfg(feature = "masp-tx-gen")] - pub async fn gen_shielded_transfer( - &mut self, - client: &C, - args: args::TxTransfer, + pub async fn gen_shielded_transfer<'a>( + context: &impl Namada<'a>, + source: &TransferSource, + target: &TransferTarget, + token: &Address, + amount: token::DenominatedAmount, ) -> Result, TransferErr> { // No shielded components are needed when neither source nor destination // are shielded @@ -1496,8 +1520,8 @@ impl ShieldedContext { use rand::rngs::StdRng; use rand_core::SeedableRng; - let spending_key = args.source.spending_key(); - let payment_address = args.target.payment_address(); + let spending_key = source.spending_key(); + let payment_address = target.payment_address(); // No shielded components are needed when neither source nor // destination are shielded if spending_key.is_none() && payment_address.is_none() { @@ -1506,13 +1530,20 @@ impl ShieldedContext { // We want to fund our transaction solely from supplied spending key let spending_key = spending_key.map(|x| x.into()); let spending_keys: Vec<_> = spending_key.into_iter().collect(); - // Load the current shielded context given the spending key we possess - let _ = self.load().await; - self.fetch(client, &spending_keys, &[]).await?; - // Save the update state so that future fetches can be short-circuited - let _ = self.save().await; + { + // Load the current shielded context given the spending key we + // possess + let mut shielded = context.shielded_mut().await; + let _ = shielded.load().await; + shielded + .fetch(context.client(), &spending_keys, &[]) + .await?; + // Save the update state so that future fetches can be + // short-circuited + let _ = shielded.save().await; + } // Determine epoch in which to submit potential shielded transaction - let epoch = rpc::query_epoch(client).await?; + let epoch = rpc::query_epoch(context.client()).await?; // Context required for storing which notes are in the source's // possesion let memo = MemoBytes::empty(); @@ -1540,23 +1571,20 @@ impl ShieldedContext { let mut builder = Builder::::new_with_rng(NETWORK, 1.into(), rng); - // break up a transfer into a number of transfers with suitable - // denominations - let InputAmount::Validated(amt) = args.amount else { - unreachable!("The function `gen_shielded_transfer` is only called by `submit_tx` which validates amounts.") - }; // Convert transaction amount into MASP types - let (asset_types, amount) = - convert_amount(epoch, &args.token, amt.amount)?; + let (asset_types, masp_amount) = + convert_amount(epoch, token, amount.amount)?; // If there are shielded inputs if let Some(sk) = spending_key { // Locate unspent notes that can help us meet the transaction amount - let (_, unspent_notes, used_convs) = self - .collect_unspent_notes::<_, IO>( - client, + let (_, unspent_notes, used_convs) = context + .shielded_mut() + .await + .collect_unspent_notes( + context, &to_viewing_key(&sk).vk, - I128Sum::from_sum(amount), + I128Sum::from_sum(masp_amount), epoch, ) .await?; @@ -1582,20 +1610,15 @@ impl ShieldedContext { // We add a dummy UTXO to our transaction, but only the source of // the parent Transfer object is used to validate fund // availability - let source_enc = args - .source + let source_enc = source .address() .ok_or_else(|| { Error::Other( "source address should be transparent".to_string(), ) })? - .try_to_vec() - .map_err(|_| { - Error::from(EncodingError::Encode( - "source address".to_string(), - )) - })?; + .serialize_to_vec(); + let hash = ripemd::Ripemd160::digest(sha2::Sha256::digest( source_enc.as_ref(), )); @@ -1605,7 +1628,7 @@ impl ShieldedContext { builder .add_transparent_input(TxOut { asset_type: *asset_type, - value: denom.denominate(&amt), + value: denom.denominate(&amount), address: script, }) .map_err(builder::Error::TransparentBuild)?; @@ -1623,7 +1646,7 @@ impl ShieldedContext { ovk_opt, pa.into(), *asset_type, - denom.denominate(&amt), + denom.denominate(&amount), memo.clone(), ) .map_err(builder::Error::SaplingBuild)?; @@ -1631,26 +1654,20 @@ impl ShieldedContext { } else { // Embed the transparent target address into the shielded // transaction so that it can be signed - let target_enc = args - .target + let target_enc = target .address() .ok_or_else(|| { Error::Other( "source address should be transparent".to_string(), ) })? - .try_to_vec() - .map_err(|_| { - Error::from(EncodingError::Encode( - "target address".to_string(), - )) - })?; + .serialize_to_vec(); let hash = ripemd::Ripemd160::digest(sha2::Sha256::digest( target_enc.as_ref(), )); for (denom, asset_type) in MaspDenom::iter().zip(asset_types.iter()) { - let vout = denom.denominate(&amt); + let vout = denom.denominate(&amount); if vout != 0 { builder .add_transparent_output( @@ -1735,22 +1752,21 @@ impl ShieldedContext { let builder_clone = builder.clone().map_builder(WalletMap); #[cfg(feature = "testing")] - let builder_bytes = BorshSerialize::try_to_vec(&builder_clone) - .map_err(|e| { - Error::from(EncodingError::Conversion(e.to_string())) - })?; - - let build_transfer = - || -> Result> { - let (masp_tx, metadata) = builder.build( - &self.utils.local_tx_prover(), - &FeeRule::non_standard(U64Sum::zero()), - )?; - Ok(ShieldedTransfer { - builder: builder_clone, - masp_tx, - metadata, - epoch, + let builder_bytes = borsh::to_vec(&builder_clone).map_err(|e| { + Error::from(EncodingError::Conversion(e.to_string())) + })?; + + let build_transfer = |prover: LocalTxProver| -> Result< + ShieldedTransfer, + builder::Error, + > { + let (masp_tx, metadata) = builder + .build(&prover, &FeeRule::non_standard(U64Sum::zero()))?; + Ok(ShieldedTransfer { + builder: builder_clone, + masp_tx, + metadata, + epoch, }) }; @@ -1796,14 +1812,13 @@ impl ShieldedContext { Ok(Some(loaded)) } else { // Build and return the constructed transaction - let built = build_transfer()?; + let built = build_transfer( + context.shielded().await.utils.local_tx_prover(), + )?; if let LoadOrSaveProofs::Save = load_or_save { - let built_bytes = BorshSerialize::try_to_vec(&built) - .map_err(|e| { - Error::from(EncodingError::Conversion( - e.to_string(), - )) - })?; + let built_bytes = borsh::to_vec(&built).map_err(|e| { + Error::from(EncodingError::Conversion(e.to_string())) + })?; tokio::fs::write(&saved_filepath, built_bytes) .await .map_err(|e| Error::Other(e.to_string()))?; @@ -1815,7 +1830,9 @@ impl ShieldedContext { #[cfg(not(feature = "testing"))] { // Build and return the constructed transaction - let built = build_transfer()?; + let built = build_transfer( + context.shielded().await.utils.local_tx_prover(), + )?; Ok(Some(built)) } } @@ -1971,12 +1988,8 @@ pub fn make_asset_type( ) -> Result { // Typestamp the chosen token with the current epoch let token_bytes = match epoch { - None => (token, denom) - .try_to_vec() - .map_err(|e| Error::from(EncodingError::Encode(e.to_string())))?, - Some(epoch) => (token, denom, epoch.0) - .try_to_vec() - .map_err(|e| Error::from(EncodingError::Encode(e.to_string())))?, + None => (token, denom).serialize_to_vec(), + Some(epoch) => (token, denom, epoch.0).serialize_to_vec(), }; // Generate the unique asset identifier from the unique token address AssetType::new(token_bytes.as_ref()) @@ -2108,3 +2121,133 @@ mod tests { super::load_pvks(); } } + +#[cfg(feature = "std")] +/// Implementation of MASP functionality depending on a standard filesystem +pub mod fs { + use std::fs::{File, OpenOptions}; + use std::io::{Read, Write}; + + use async_trait::async_trait; + + use super::*; + + /// Shielded context file name + const FILE_NAME: &str = "shielded.dat"; + const TMP_FILE_NAME: &str = "shielded.tmp"; + + #[derive(Debug, BorshSerialize, BorshDeserialize, Clone)] + /// An implementation of ShieldedUtils for standard filesystems + pub struct FsShieldedUtils { + #[borsh(skip)] + context_dir: PathBuf, + } + + impl FsShieldedUtils { + /// Initialize a shielded transaction context that identifies notes + /// decryptable by any viewing key in the given set + pub fn new(context_dir: PathBuf) -> ShieldedContext { + // Make sure that MASP parameters are downloaded to enable MASP + // transaction building and verification later on + let params_dir = get_params_dir(); + let spend_path = params_dir.join(SPEND_NAME); + let convert_path = params_dir.join(CONVERT_NAME); + let output_path = params_dir.join(OUTPUT_NAME); + if !(spend_path.exists() + && convert_path.exists() + && output_path.exists()) + { + println!("MASP parameters not present, downloading..."); + masp_proofs::download_masp_parameters(None) + .expect("MASP parameters not present or downloadable"); + println!( + "MASP parameter download complete, resuming execution..." + ); + } + // Finally initialize a shielded context with the supplied directory + let utils = Self { context_dir }; + ShieldedContext { + utils, + ..Default::default() + } + } + } + + impl Default for FsShieldedUtils { + fn default() -> Self { + Self { + context_dir: PathBuf::from(FILE_NAME), + } + } + } + + #[async_trait(?Send)] + impl ShieldedUtils for FsShieldedUtils { + fn local_tx_prover(&self) -> LocalTxProver { + if let Ok(params_dir) = env::var(ENV_VAR_MASP_PARAMS_DIR) { + let params_dir = PathBuf::from(params_dir); + let spend_path = params_dir.join(SPEND_NAME); + let convert_path = params_dir.join(CONVERT_NAME); + let output_path = params_dir.join(OUTPUT_NAME); + LocalTxProver::new(&spend_path, &output_path, &convert_path) + } else { + LocalTxProver::with_default_location() + .expect("unable to load MASP Parameters") + } + } + + /// Try to load the last saved shielded context from the given context + /// directory. If this fails, then leave the current context unchanged. + async fn load( + &self, + ctx: &mut ShieldedContext, + ) -> std::io::Result<()> { + // Try to load shielded context from file + let mut ctx_file = File::open(self.context_dir.join(FILE_NAME))?; + let mut bytes = Vec::new(); + ctx_file.read_to_end(&mut bytes)?; + // Fill the supplied context with the deserialized object + *ctx = ShieldedContext { + utils: ctx.utils.clone(), + ..ShieldedContext::::deserialize(&mut &bytes[..])? + }; + Ok(()) + } + + /// Save this shielded context into its associated context directory + async fn save( + &self, + ctx: &ShieldedContext, + ) -> std::io::Result<()> { + // TODO: use mktemp crate? + let tmp_path = self.context_dir.join(TMP_FILE_NAME); + { + // First serialize the shielded context into a temporary file. + // Inability to create this file implies a simultaneuous write + // is in progress. In this case, immediately + // fail. This is unproblematic because the data + // intended to be stored can always be re-fetched + // from the blockchain. + let mut ctx_file = OpenOptions::new() + .write(true) + .create_new(true) + .open(tmp_path.clone())?; + let mut bytes = Vec::new(); + ctx.serialize(&mut bytes) + .expect("cannot serialize shielded context"); + ctx_file.write_all(&bytes[..])?; + } + // Atomically update the old shielded context file with new data. + // Atomicity is required to prevent other client instances from + // reading corrupt data. + std::fs::rename( + tmp_path.clone(), + self.context_dir.join(FILE_NAME), + )?; + // Finally, remove our temporary file to allow future saving of + // shielded contexts. + std::fs::remove_file(tmp_path)?; + Ok(()) + } + } +} diff --git a/shared/src/sdk/queries.rs b/sdk/src/queries/mod.rs similarity index 59% rename from shared/src/sdk/queries.rs rename to sdk/src/queries/mod.rs index a7cb9badb1..fdd5b042a8 100644 --- a/shared/src/sdk/queries.rs +++ b/sdk/src/queries/mod.rs @@ -1,7 +1,207 @@ -//! Query functionality related to the SDK -use std::fmt::{Debug, Display}; +//! Ledger read-only queries can be handled and dispatched via the [`RPC`] +//! defined via `router!` macro. +// Re-export to show in rustdoc! +use namada_core::ledger::storage::traits::StorageHasher; +use namada_core::ledger::storage::{DBIter, DB}; +use namada_core::ledger::storage_api; use namada_core::types::storage::BlockHeight; +pub use shell::Shell; +use shell::SHELL; +pub use types::{ + EncodedResponseQuery, Error, RequestCtx, RequestQuery, ResponseQuery, + Router, +}; +use vp::{Vp, VP}; + +pub use self::shell::eth_bridge::{ + Erc20FlowControl, GenBridgePoolProofReq, GenBridgePoolProofRsp, + TransferToErcArgs, +}; + +#[macro_use] +mod router; +mod shell; +mod types; +pub mod vp; + +// Most commonly expected patterns should be declared first +router! {RPC, + // Shell provides storage read access, block metadata and can dry-run a tx + ( "shell" ) = (sub SHELL), + + // Validity-predicate's specific storage queries + ( "vp" ) = (sub VP), +} + +/// Handle RPC query request in the ledger. On success, returns response with +/// borsh-encoded data. +pub fn handle_path( + ctx: RequestCtx<'_, D, H, V, T>, + request: &RequestQuery, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + RPC.handle(ctx, request) +} + +// Handler helpers: + +/// For queries that only support latest height, check that the given height is +/// not different from latest height, otherwise return an error. +pub fn require_latest_height( + ctx: &RequestCtx<'_, D, H, V, T>, + request: &RequestQuery, +) -> storage_api::Result<()> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + if request.height != BlockHeight(0) + && request.height != ctx.wl_storage.storage.get_last_block_height() + { + return Err(storage_api::Error::new_const( + "This query doesn't support arbitrary block heights, only the \ + latest committed block height ('0' can be used as a special \ + value that means the latest block height)", + )); + } + Ok(()) +} + +/// For queries that do not support proofs, check that proof is not requested, +/// otherwise return an error. +pub fn require_no_proof(request: &RequestQuery) -> storage_api::Result<()> { + if request.prove { + return Err(storage_api::Error::new_const( + "This query doesn't support proofs", + )); + } + Ok(()) +} + +/// For queries that don't use request data, require that there are no data +/// attached. +pub fn require_no_data(request: &RequestQuery) -> storage_api::Result<()> { + if !request.data.is_empty() { + return Err(storage_api::Error::new_const( + "This query doesn't accept request data", + )); + } + Ok(()) +} + +/// Queries testing helpers +#[cfg(any(test, feature = "testing"))] +mod testing { + + use namada_core::ledger::storage::testing::TestWlStorage; + use namada_core::types::storage::BlockHeight; + use tendermint_rpc::Response; + + use super::*; + use crate::events::log::EventLog; + use crate::tendermint_rpc::error::Error as RpcError; + + /// A test client that has direct access to the storage + pub struct TestClient + where + RPC: Router, + { + /// RPC router + pub rpc: RPC, + /// storage + pub wl_storage: TestWlStorage, + /// event log + pub event_log: EventLog, + } + + impl TestClient + where + RPC: Router, + { + #[allow(dead_code)] + /// Initialize a test client for the given root RPC router + pub fn new(rpc: RPC) -> Self { + // Initialize the `TestClient` + let mut wl_storage = TestWlStorage::default(); + + // Initialize mock gas limit + let max_block_gas_key = + namada_core::ledger::parameters::storage::get_max_block_gas_key( + ); + wl_storage + .storage + .write( + &max_block_gas_key, + namada_core::ledger::storage::types::encode( + &20_000_000_u64, + ), + ) + .expect( + "Max block gas parameter must be initialized in storage", + ); + let event_log = EventLog::default(); + Self { + rpc, + wl_storage, + event_log, + } + } + } + + #[cfg_attr(feature = "async-send", async_trait::async_trait)] + #[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] + impl Client for TestClient + where + RPC: Router + Sync, + { + type Error = std::io::Error; + + async fn request( + &self, + path: String, + data: Option>, + height: Option, + prove: bool, + ) -> Result { + let data = data.unwrap_or_default(); + let height = height.unwrap_or_default(); + // Handle a path by invoking the `RPC.handle` directly with the + // borrowed storage + let request = RequestQuery { + data, + path, + height, + prove, + }; + let ctx = RequestCtx { + wl_storage: &self.wl_storage, + event_log: &self.event_log, + vp_wasm_cache: (), + tx_wasm_cache: (), + storage_read_past_height_limit: None, + }; + // TODO: this is a hack to propagate errors to the caller, we should + // really permit error types other than [`std::io::Error`] + self.rpc.handle(ctx, &request).map_err(|err| { + std::io::Error::new(std::io::ErrorKind::Other, err.to_string()) + }) + } + + async fn perform(&self, _request: R) -> Result + where + R: tendermint_rpc::SimpleRequest, + { + Response::from_string("TODO") + } + } +} + +use std::fmt::{Debug, Display}; + use tendermint_rpc::endpoint::{ abci_info, block, block_results, blockchain, commit, consensus_params, consensus_state, health, net_info, status, @@ -9,7 +209,6 @@ use tendermint_rpc::endpoint::{ use tendermint_rpc::query::Query; use tendermint_rpc::{Error as RpcError, Order}; -use crate::ledger::queries::{EncodedResponseQuery, Error}; use crate::tendermint::block::Height; /// A client with async request dispatcher method, which can be used to invoke diff --git a/shared/src/ledger/queries/router.rs b/sdk/src/queries/router.rs similarity index 92% rename from shared/src/ledger/queries/router.rs rename to sdk/src/queries/router.rs index 799a34e5bd..1251972608 100644 --- a/shared/src/ledger/queries/router.rs +++ b/sdk/src/queries/router.rs @@ -82,16 +82,16 @@ macro_rules! handle_match { break } // Check that the request is not sent with unsupported non-default - $crate::ledger::queries::require_latest_height(&$ctx, $request)?; - $crate::ledger::queries::require_no_proof($request)?; - $crate::ledger::queries::require_no_data($request)?; + $crate::queries::require_latest_height(&$ctx, $request)?; + $crate::queries::require_no_proof($request)?; + $crate::queries::require_no_data($request)?; // If you get a compile error from here with `expected function, found // queries::Storage`, you're probably missing the marker `(sub _)` let data = $handle($ctx, $( $matched_args ),* )?; // Encode the returned data with borsh - let data = borsh::BorshSerialize::try_to_vec(&data).into_storage_result()?; - return Ok($crate::ledger::queries::EncodedResponseQuery { + let data = borsh::to_vec(&data).into_storage_result()?; + return Ok($crate::queries::EncodedResponseQuery { data, info: Default::default(), proof: None, @@ -401,22 +401,22 @@ macro_rules! pattern_and_handler_to_method { `storage_value` and `storage_prefix`) from `storage_value`."] pub async fn storage_value(&self, client: &CLIENT, data: Option>, - height: Option<$crate::types::storage::BlockHeight>, + height: Option, prove: bool, $( $param: &$param_ty ),* ) -> std::result::Result< - $crate::ledger::queries::ResponseQuery>, - ::Error + $crate::queries::ResponseQuery>, + ::Error > - where CLIENT: $crate::ledger::queries::Client + std::marker::Sync { + where CLIENT: $crate::queries::Client + std::marker::Sync { let path = self.storage_value_path( $( $param ),* ); - let $crate::ledger::queries::ResponseQuery { + let $crate::queries::ResponseQuery { data, info, proof } = client.request(path, data, height, prove).await?; - Ok($crate::ledger::queries::ResponseQuery { + Ok($crate::queries::ResponseQuery { data, info, proof, @@ -453,25 +453,25 @@ macro_rules! pattern_and_handler_to_method { `storage_value` and `storage_prefix`) from `" $handle "`."] pub async fn $handle(&self, client: &CLIENT, data: Option>, - height: Option<$crate::types::storage::BlockHeight>, + height: Option, prove: bool, $( $param: &$param_ty ),* ) -> std::result::Result< - $crate::ledger::queries::ResponseQuery<$return_type>, - ::Error + $crate::queries::ResponseQuery<$return_type>, + ::Error > - where CLIENT: $crate::ledger::queries::Client + std::marker::Sync { + where CLIENT: $crate::queries::Client + std::marker::Sync { let path = self.[<$handle _path>]( $( $param ),* ); - let $crate::ledger::queries::ResponseQuery { + let $crate::queries::ResponseQuery { data, info, proof } = client.request(path, data, height, prove).await?; let decoded: $return_type = borsh::BorshDeserialize::try_from_slice(&data[..])?; - Ok($crate::ledger::queries::ResponseQuery { + Ok($crate::queries::ResponseQuery { data: decoded, info, proof, @@ -510,9 +510,9 @@ macro_rules! pattern_and_handler_to_method { ) -> std::result::Result< $return_type, - ::Error + ::Error > - where CLIENT: $crate::ledger::queries::Client + std::marker::Sync { + where CLIENT: $crate::queries::Client + std::marker::Sync { let path = self.[<$handle _path>]( $( $param ),* ); let data = client.simple_request(path).await?; @@ -783,25 +783,25 @@ macro_rules! router { router_type!{[<$name:camel>] {}, $( $pattern $( -> $return_type )? = $handle ),* } - impl $crate::ledger::queries::Router for [<$name:camel>] { + impl $crate::queries::Router for [<$name:camel>] { // TODO: for some patterns, there's unused assignment of `$end` #[allow(unused_assignments)] - fn internal_handle( + fn internal_handle( &self, - ctx: $crate::ledger::queries::RequestCtx<'_, D, H>, - request: &$crate::ledger::queries::RequestQuery, + ctx: $crate::queries::RequestCtx<'_, D, H, V, T>, + request: &$crate::queries::RequestQuery, start: usize - ) -> $crate::ledger::storage_api::Result<$crate::ledger::queries::EncodedResponseQuery> + ) -> namada_core::ledger::storage_api::Result<$crate::queries::EncodedResponseQuery> where - D: 'static + $crate::ledger::storage::DB + for<'iter> $crate::ledger::storage::DBIter<'iter> + Sync, - H: 'static + $crate::ledger::storage::StorageHasher + Sync, + D: 'static + namada_core::ledger::storage::DB + for<'iter> namada_core::ledger::storage::DBIter<'iter> + Sync, + H: 'static + namada_core::ledger::storage::StorageHasher + Sync, { // Import for `.into_storage_result()` - use $crate::ledger::storage_api::ResultExt; + use namada_core::ledger::storage_api::ResultExt; // Import helper from this crate used inside the macros - use $crate::ledger::queries::router::find_next_slash_index; + use $crate::queries::router::find_next_slash_index; $( // This loop never repeats, it's only used for a breaking @@ -816,7 +816,7 @@ macro_rules! router { )* return Err( - $crate::ledger::queries::router::Error::WrongPath(request.path.clone())) + $crate::queries::router::Error::WrongPath(request.path.clone())) .into_storage_result(); } } @@ -834,15 +834,15 @@ macro_rules! router { /// ``` #[cfg(test)] mod test_rpc_handlers { - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; + use namada_core::ledger::storage::{DBIter, StorageHasher, DB}; + use namada_core::ledger::storage_api; + use namada_core::types::storage::Epoch; + use namada_core::types::token; - use crate::ledger::queries::{ + use crate::queries::{ EncodedResponseQuery, RequestCtx, RequestQuery, ResponseQuery, }; - use crate::ledger::storage::{DBIter, StorageHasher, DB}; - use crate::ledger::storage_api::{self, ResultExt}; - use crate::types::storage::Epoch; - use crate::types::token; /// A little macro to generate boilerplate for RPC handler functions. /// These are implemented to return their name as a String, joined by @@ -854,8 +854,8 @@ mod test_rpc_handlers { // optional trailing comma $(,)? ) => { $( - pub fn $name( - _ctx: RequestCtx<'_, D, H>, + pub fn $name( + _ctx: RequestCtx<'_, D, H, V, T>, $( $( $param: $param_ty ),* )? ) -> storage_api::Result where @@ -901,8 +901,8 @@ mod test_rpc_handlers { /// This handler is hand-written, because the test helper macro doesn't /// support optional args. - pub fn b3iii( - _ctx: RequestCtx<'_, D, H>, + pub fn b3iii( + _ctx: RequestCtx<'_, D, H, V, T>, a1: token::DenominatedAmount, a2: token::DenominatedAmount, a3: Option, @@ -920,8 +920,8 @@ mod test_rpc_handlers { /// This handler is hand-written, because the test helper macro doesn't /// support optional args. - pub fn b3iiii( - _ctx: RequestCtx<'_, D, H>, + pub fn b3iiii( + _ctx: RequestCtx<'_, D, H, V, T>, a1: token::DenominatedAmount, a2: token::DenominatedAmount, a3: Option, @@ -941,15 +941,15 @@ mod test_rpc_handlers { /// This handler is hand-written, because the test helper macro doesn't /// support handlers with `with_options`. - pub fn c( - _ctx: RequestCtx<'_, D, H>, + pub fn c( + _ctx: RequestCtx<'_, D, H, V, T>, _request: &RequestQuery, ) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let data = "c".to_owned().try_to_vec().into_storage_result()?; + let data = "c".to_owned().serialize_to_vec(); Ok(ResponseQuery { data, ..ResponseQuery::default() @@ -963,9 +963,10 @@ mod test_rpc_handlers { /// ``` #[cfg(test)] mod test_rpc { + use namada_core::types::storage::Epoch; + use namada_core::types::token; + use super::test_rpc_handlers::*; - use crate::types::storage::Epoch; - use crate::types::token; // Setup an RPC router for testing router! {TEST_RPC, @@ -1000,14 +1001,14 @@ mod test_rpc { #[cfg(test)] mod test { + use namada_core::ledger::storage_api; + use namada_core::types::storage::Epoch; + use namada_core::types::token; use namada_core::types::token::NATIVE_MAX_DECIMAL_PLACES; use super::test_rpc::TEST_RPC; - use crate::ledger::queries::testing::TestClient; - use crate::ledger::queries::{RequestCtx, RequestQuery, Router}; - use crate::ledger::storage_api; - use crate::types::storage::Epoch; - use crate::types::token; + use crate::queries::testing::TestClient; + use crate::queries::{RequestCtx, RequestQuery, Router}; /// Test all the possible paths in `TEST_RPC` router. #[tokio::test] @@ -1022,8 +1023,8 @@ mod test { let ctx = RequestCtx { event_log: &client.event_log, wl_storage: &client.wl_storage, - vp_wasm_cache: client.vp_wasm_cache.clone(), - tx_wasm_cache: client.tx_wasm_cache.clone(), + vp_wasm_cache: (), + tx_wasm_cache: (), storage_read_past_height_limit: None, }; let result = TEST_RPC.handle(ctx, &request); diff --git a/shared/src/ledger/queries/shell.rs b/sdk/src/queries/shell.rs similarity index 57% rename from shared/src/ledger/queries/shell.rs rename to sdk/src/queries/shell.rs index a766846916..1efba0083e 100644 --- a/shared/src/ledger/queries/shell.rs +++ b/sdk/src/queries/shell.rs @@ -1,36 +1,37 @@ pub(super) mod eth_bridge; -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; +use borsh_ext::BorshSerializeExt; use masp_primitives::asset_type::AssetType; use masp_primitives::merkle_tree::MerklePath; use masp_primitives::sapling::Node; -use namada_core::ledger::storage::LastBlock; +use namada_core::ledger::storage::traits::StorageHasher; +use namada_core::ledger::storage::{DBIter, LastBlock, DB}; +use namada_core::ledger::storage_api::{self, ResultExt, StorageRead}; use namada_core::types::account::{Account, AccountPublicKeysMap}; use namada_core::types::address::Address; use namada_core::types::hash::Hash; -use namada_core::types::storage::{BlockHeight, BlockResults, KeySeg}; +use namada_core::types::storage::{ + self, BlockHeight, BlockResults, Epoch, KeySeg, PrefixValue, +}; use namada_core::types::token::MaspDenom; +#[cfg(any(test, feature = "async-client"))] +use namada_core::types::transaction::TxResult; use self::eth_bridge::{EthBridge, ETH_BRIDGE}; +use crate::events::log::dumb_queries; +use crate::events::{Event, EventType}; use crate::ibc::core::ics04_channel::packet::Sequence; use crate::ibc::core::ics24_host::identifier::{ChannelId, ClientId, PortId}; -use crate::ledger::events::log::dumb_queries; -use crate::ledger::events::{Event, EventType}; -use crate::ledger::queries::types::{RequestCtx, RequestQuery}; -use crate::ledger::queries::{require_latest_height, EncodedResponseQuery}; -use crate::ledger::storage::traits::StorageHasher; -use crate::ledger::storage::{DBIter, DB}; -use crate::ledger::storage_api::{self, ResultExt, StorageRead}; +use crate::queries::types::{RequestCtx, RequestQuery}; +use crate::queries::{require_latest_height, EncodedResponseQuery}; use crate::tendermint::merkle::proof::Proof; -use crate::types::storage::{self, Epoch, PrefixValue}; -#[cfg(any(test, feature = "async-client"))] -use crate::types::transaction::TxResult; type Conversion = ( Address, MaspDenom, Epoch, - masp_primitives::transaction::components::I32Sum, + masp_primitives::transaction::components::I128Sum, MerklePath, ); @@ -43,6 +44,9 @@ router! {SHELL, // Epoch of the last committed block ( "epoch" ) -> Epoch = epoch, + // The address of the native token + ( "native_token" ) -> Address = native_token, + // Epoch of the input block height ( "epoch_at_height" / [height: BlockHeight]) -> Option = epoch_at_height, @@ -91,114 +95,20 @@ router! {SHELL, // Handlers: -#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] -fn dry_run_tx( - mut ctx: RequestCtx<'_, D, H>, - request: &RequestQuery, +fn dry_run_tx( + _ctx: RequestCtx<'_, D, H, V, T>, + _request: &RequestQuery, ) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - use namada_core::ledger::gas::{Gas, GasMetering, TxGasMeter}; - use namada_core::ledger::storage::TempWlStorage; - use namada_core::types::transaction::DecryptedTx; - - use crate::ledger::protocol::{self, ShellParams}; - use crate::proto::Tx; - use crate::types::storage::TxIndex; - use crate::types::transaction::wrapper::wrapper_tx::PairingEngine; - use crate::types::transaction::{AffineCurve, EllipticCurve, TxType}; - - let mut tx = Tx::try_from(&request.data[..]).into_storage_result()?; - tx.validate_tx().into_storage_result()?; - - let mut temp_wl_storage = TempWlStorage::new(&ctx.wl_storage.storage); - let mut cumulated_gas = Gas::default(); - - // Wrapper dry run to allow estimating the gas cost of a transaction - let mut tx_gas_meter = match tx.header().tx_type { - TxType::Wrapper(wrapper) => { - let mut tx_gas_meter = - TxGasMeter::new(wrapper.gas_limit.to_owned()); - protocol::apply_wrapper_tx( - &wrapper, - None, - &request.data, - ShellParams::new( - &mut tx_gas_meter, - &mut temp_wl_storage, - &mut ctx.vp_wasm_cache, - &mut ctx.tx_wasm_cache, - ), - None, - ) - .into_storage_result()?; - - temp_wl_storage.write_log.commit_tx(); - cumulated_gas = tx_gas_meter.get_tx_consumed_gas(); - - // NOTE: the encryption key for a dry-run should always be an - // hardcoded, dummy one - let _privkey = - ::G2Affine::prime_subgroup_generator(); - tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - TxGasMeter::new_from_sub_limit(tx_gas_meter.get_available_gas()) - } - TxType::Protocol(_) | TxType::Decrypted(_) => { - // If dry run only the inner tx, use the max block gas as the gas - // limit - TxGasMeter::new( - namada_core::ledger::gas::get_max_block_gas(ctx.wl_storage) - .unwrap() - .into(), - ) - } - TxType::Raw => { - // Cast tx to a decrypted for execution - tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - - // If dry run only the inner tx, use the max block gas as the gas - // limit - TxGasMeter::new( - namada_core::ledger::gas::get_max_block_gas(ctx.wl_storage) - .unwrap() - .into(), - ) - } - }; - - let mut data = protocol::apply_wasm_tx( - tx, - &TxIndex(0), - ShellParams::new( - &mut tx_gas_meter, - &mut temp_wl_storage, - &mut ctx.vp_wasm_cache, - &mut ctx.tx_wasm_cache, - ), - ) - .into_storage_result()?; - cumulated_gas = cumulated_gas - .checked_add(tx_gas_meter.get_tx_consumed_gas()) - .ok_or(namada_core::ledger::storage_api::Error::SimpleMessage( - "Overflow in gas", - ))?; - // Account gas for both inner and wrapper (if available) - data.gas_used = cumulated_gas; - // NOTE: the keys changed by the wrapper transaction (if any) are not - // returned from this function - let data = data.try_to_vec().into_storage_result()?; - Ok(EncodedResponseQuery { - data, - proof: None, - info: Default::default(), - }) + unimplemented!("Dry running tx requires \"wasm-runtime\" feature.") } /// Query to read block results from storage -pub fn read_results( - ctx: RequestCtx<'_, D, H>, +pub fn read_results( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -234,8 +144,8 @@ where } /// Query to read a conversion from storage -fn read_conversion( - ctx: RequestCtx<'_, D, H>, +fn read_conversion( + ctx: RequestCtx<'_, D, H, V, T>, asset_type: AssetType, ) -> storage_api::Result where @@ -254,7 +164,7 @@ where addr.clone(), *denom, *epoch, - Into::::into( + Into::::into( conv.clone(), ), ctx.wl_storage.storage.conversion_state.tree.path(*pos), @@ -267,29 +177,30 @@ where } } -#[cfg(not(all(feature = "wasm-runtime", feature = "ferveo-tpke")))] -fn dry_run_tx( - _ctx: RequestCtx<'_, D, H>, - _request: &RequestQuery, -) -> storage_api::Result +fn epoch( + ctx: RequestCtx<'_, D, H, V, T>, +) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - unimplemented!("Dry running tx requires \"wasm-runtime\" feature.") + let data = ctx.wl_storage.storage.last_epoch; + Ok(data) } -fn epoch(ctx: RequestCtx<'_, D, H>) -> storage_api::Result +fn native_token( + ctx: RequestCtx<'_, D, H, V, T>, +) -> storage_api::Result
where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let data = ctx.wl_storage.storage.last_epoch; + let data = ctx.wl_storage.storage.native_token.clone(); Ok(data) } -fn epoch_at_height( - ctx: RequestCtx<'_, D, H>, +fn epoch_at_height( + ctx: RequestCtx<'_, D, H, V, T>, height: BlockHeight, ) -> storage_api::Result> where @@ -299,8 +210,8 @@ where Ok(ctx.wl_storage.storage.block.pred_epochs.get_epoch(height)) } -fn last_block( - ctx: RequestCtx<'_, D, H>, +fn last_block( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -313,8 +224,8 @@ where /// borsh-encoded types, it is safe to check `data.is_empty()` to see if the /// value was found, except for unit - see `fn query_storage_value` in /// `apps/src/lib/client/rpc.rs` for unit type handling via `storage_has_key`. -fn storage_value( - ctx: RequestCtx<'_, D, H>, +fn storage_value( + ctx: RequestCtx<'_, D, H, V, T>, request: &RequestQuery, storage_key: storage::Key, ) -> storage_api::Result @@ -380,8 +291,8 @@ where } } -fn storage_prefix( - ctx: RequestCtx<'_, D, H>, +fn storage_prefix( + ctx: RequestCtx<'_, D, H, V, T>, request: &RequestQuery, storage_key: storage::Key, ) -> storage_api::Result @@ -415,7 +326,7 @@ where } else { None }; - let data = data.try_to_vec().into_storage_result()?; + let data = data.serialize_to_vec(); Ok(EncodedResponseQuery { data, proof, @@ -423,8 +334,8 @@ where }) } -fn storage_has_key( - ctx: RequestCtx<'_, D, H>, +fn storage_has_key( + ctx: RequestCtx<'_, D, H, V, T>, storage_key: storage::Key, ) -> storage_api::Result where @@ -435,8 +346,8 @@ where Ok(data) } -fn accepted( - ctx: RequestCtx<'_, D, H>, +fn accepted( + ctx: RequestCtx<'_, D, H, V, T>, tx_hash: Hash, ) -> storage_api::Result> where @@ -452,8 +363,8 @@ where .cloned()) } -fn applied( - ctx: RequestCtx<'_, D, H>, +fn applied( + ctx: RequestCtx<'_, D, H, V, T>, tx_hash: Hash, ) -> storage_api::Result> where @@ -469,8 +380,8 @@ where .cloned()) } -fn ibc_client_update( - ctx: RequestCtx<'_, D, H>, +fn ibc_client_update( + ctx: RequestCtx<'_, D, H, V, T>, client_id: ClientId, consensus_height: BlockHeight, ) -> storage_api::Result> @@ -490,8 +401,8 @@ where .cloned()) } -fn ibc_packet( - ctx: RequestCtx<'_, D, H>, +fn ibc_packet( + ctx: RequestCtx<'_, D, H, V, T>, event_type: EventType, source_port: PortId, source_channel: ChannelId, @@ -519,8 +430,8 @@ where .cloned()) } -fn account( - ctx: RequestCtx<'_, D, H>, +fn account( + ctx: RequestCtx<'_, D, H, V, T>, owner: Address, ) -> storage_api::Result> where @@ -545,8 +456,8 @@ where } } -fn revealed( - ctx: RequestCtx<'_, D, H>, +fn revealed( + ctx: RequestCtx<'_, D, H, V, T>, owner: Address, ) -> storage_api::Result where @@ -561,18 +472,9 @@ where #[cfg(test)] mod test { - use borsh::{BorshDeserialize, BorshSerialize}; - use namada_test_utils::TestWasms; - - use crate::ledger::queries::testing::TestClient; - use crate::ledger::queries::RPC; - use crate::ledger::storage_api::{self, StorageWrite}; - use crate::proto::{Code, Data, Tx}; - use crate::types::hash::Hash; - use crate::types::storage::Key; - use crate::types::transaction::decrypted::DecryptedTx; - use crate::types::transaction::TxType; - use crate::types::{address, token}; + use namada_core::types::{address, token}; + + use crate::queries::RPC; #[test] fn test_shell_queries_router_paths() { @@ -594,106 +496,4 @@ mod test { let path = RPC.shell().storage_has_key_path(&key); assert_eq!(format!("/shell/has_key/{}", key), path); } - - #[tokio::test] - async fn test_shell_queries_router_with_client() -> storage_api::Result<()> - { - // Initialize the `TestClient` - let mut client = TestClient::new(RPC); - // store the wasm code - let tx_no_op = TestWasms::TxNoOp.read_bytes(); - let tx_hash = Hash::sha256(&tx_no_op); - let key = Key::wasm_code(&tx_hash); - let len_key = Key::wasm_code_len(&tx_hash); - client.wl_storage.storage.write(&key, &tx_no_op).unwrap(); - client - .wl_storage - .storage - .write(&len_key, (tx_no_op.len() as u64).try_to_vec().unwrap()) - .unwrap(); - - // Request last committed epoch - let read_epoch = RPC.shell().epoch(&client).await.unwrap(); - let current_epoch = client.wl_storage.storage.last_epoch; - assert_eq!(current_epoch, read_epoch); - - // Request dry run tx - let mut outer_tx = - Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); - outer_tx.header.chain_id = client.wl_storage.storage.chain_id.clone(); - outer_tx.set_code(Code::from_hash(tx_hash)); - outer_tx.set_data(Data::new(vec![])); - let tx_bytes = outer_tx.to_bytes(); - let result = RPC - .shell() - .dry_run_tx(&client, Some(tx_bytes), None, false) - .await - .unwrap(); - assert!(result.data.is_accepted()); - - // Request storage value for a balance key ... - let token_addr = address::testing::established_address_1(); - let owner = address::testing::established_address_2(); - let balance_key = token::balance_key(&token_addr, &owner); - // ... there should be no value yet. - let read_balance = RPC - .shell() - .storage_value(&client, None, None, false, &balance_key) - .await - .unwrap(); - assert!(read_balance.data.is_empty()); - - // Request storage prefix iterator - let balance_prefix = token::balance_prefix(&token_addr); - let read_balances = RPC - .shell() - .storage_prefix(&client, None, None, false, &balance_prefix) - .await - .unwrap(); - assert!(read_balances.data.is_empty()); - - // Request storage has key - let has_balance_key = RPC - .shell() - .storage_has_key(&client, &balance_key) - .await - .unwrap(); - assert!(!has_balance_key); - - // Then write some balance ... - let balance = token::Amount::native_whole(1000); - StorageWrite::write(&mut client.wl_storage, &balance_key, balance)?; - // It has to be committed to be visible in a query - client.wl_storage.commit_tx(); - client.wl_storage.commit_block().unwrap(); - // ... there should be the same value now - let read_balance = RPC - .shell() - .storage_value(&client, None, None, false, &balance_key) - .await - .unwrap(); - assert_eq!( - balance, - token::Amount::try_from_slice(&read_balance.data).unwrap() - ); - - // Request storage prefix iterator - let balance_prefix = token::balance_prefix(&token_addr); - let read_balances = RPC - .shell() - .storage_prefix(&client, None, None, false, &balance_prefix) - .await - .unwrap(); - assert_eq!(read_balances.data.len(), 1); - - // Request storage has key - let has_balance_key = RPC - .shell() - .storage_has_key(&client, &balance_key) - .await - .unwrap(); - assert!(has_balance_key); - - Ok(()) - } } diff --git a/shared/src/ledger/queries/shell/eth_bridge.rs b/sdk/src/queries/shell/eth_bridge.rs similarity index 93% rename from shared/src/ledger/queries/shell/eth_bridge.rs rename to sdk/src/queries/shell/eth_bridge.rs index 0bbc0aa679..47234f0cd9 100644 --- a/shared/src/ledger/queries/shell/eth_bridge.rs +++ b/sdk/src/queries/shell/eth_bridge.rs @@ -5,6 +5,7 @@ use std::collections::HashMap; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use namada_core::ledger::eth_bridge::storage::bridge_pool::get_key_from_hash; use namada_core::ledger::storage::merkle_tree::StoreRef; use namada_core::ledger::storage::{DBIter, StorageHasher, StoreType, DB}; @@ -12,12 +13,17 @@ use namada_core::ledger::storage_api::{ self, CustomError, ResultExt, StorageRead, }; use namada_core::types::address::Address; -use namada_core::types::eth_bridge_pool::PendingTransferAppendix; +use namada_core::types::eth_abi::{Encode, EncodeCell}; +use namada_core::types::eth_bridge_pool::{ + PendingTransfer, PendingTransferAppendix, +}; use namada_core::types::ethereum_events::{ EthAddress, EthereumEvent, TransferToEthereum, }; use namada_core::types::ethereum_structs; -use namada_core::types::storage::{BlockHeight, DbKeySeg, Key}; +use namada_core::types::keccak::KeccakHash; +use namada_core::types::storage::MembershipProof::BridgePool; +use namada_core::types::storage::{BlockHeight, DbKeySeg, Epoch, Key}; use namada_core::types::token::Amount; use namada_core::types::vote_extensions::validator_set_update::{ ValidatorSetArgs, VotingPowersMap, @@ -36,12 +42,7 @@ use namada_ethereum_bridge::storage::{ use namada_proof_of_stake::pos_queries::PosQueries; use crate::eth_bridge::ethers::abi::AbiDecode; -use crate::ledger::queries::{EncodedResponseQuery, RequestCtx, RequestQuery}; -use crate::types::eth_abi::{Encode, EncodeCell}; -use crate::types::eth_bridge_pool::PendingTransfer; -use crate::types::keccak::KeccakHash; -use crate::types::storage::Epoch; -use crate::types::storage::MembershipProof::BridgePool; +use crate::queries::{EncodedResponseQuery, RequestCtx, RequestQuery}; /// Contains information about the flow control of some ERC20 /// wrapped asset. @@ -50,11 +51,20 @@ use crate::types::storage::MembershipProof::BridgePool; )] pub struct Erc20FlowControl { /// Whether the wrapped asset is whitelisted. - whitelisted: bool, + pub whitelisted: bool, /// Total minted supply of some wrapped asset. - supply: Amount, + pub supply: Amount, /// The token cap of some wrapped asset. - cap: Amount, + pub cap: Amount, +} + +impl Erc20FlowControl { + /// Check if the `transferred_amount` exceeds the token caps of some ERC20 + /// asset. + #[inline] + pub fn exceeds_token_caps(&self, transferred_amount: Amount) -> bool { + self.supply + transferred_amount > self.cap + } } /// Request data to pass to `generate_bridge_pool_proof`. @@ -167,8 +177,8 @@ router! {ETH_BRIDGE, /// Read the total supply and respective cap of some wrapped /// ERC20 token in Namada. -fn get_erc20_flow_control( - ctx: RequestCtx<'_, D, H>, +fn get_erc20_flow_control( + ctx: RequestCtx<'_, D, H, V, T>, asset: EthAddress, ) -> storage_api::Result where @@ -191,9 +201,9 @@ where } /// Helper function to read a smart contract from storage. -fn read_contract( +fn read_contract( key: &Key, - ctx: RequestCtx<'_, D, H>, + ctx: RequestCtx<'_, D, H, V, U>, ) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -212,8 +222,8 @@ where /// Read the address and version of the Ethereum bridge's Bridge /// smart contract. #[inline] -fn read_bridge_contract( - ctx: RequestCtx<'_, D, H>, +fn read_bridge_contract( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -225,8 +235,8 @@ where /// Read the address of the Ethereum bridge's native ERC20 /// smart contract. #[inline] -fn read_native_erc20_contract( - ctx: RequestCtx<'_, D, H>, +fn read_native_erc20_contract( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -237,8 +247,8 @@ where /// Read the current contents of the Ethereum bridge /// pool. -fn read_ethereum_bridge_pool( - ctx: RequestCtx<'_, D, H>, +fn read_ethereum_bridge_pool( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -252,8 +262,8 @@ where /// Read the contents of the Ethereum bridge /// pool covered by the latest signed root. -fn read_signed_ethereum_bridge_pool( - ctx: RequestCtx<'_, D, H>, +fn read_signed_ethereum_bridge_pool( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -272,9 +282,9 @@ where } /// Read the Ethereum bridge pool contents at a specified height. -fn read_ethereum_bridge_pool_at_height( +fn read_ethereum_bridge_pool_at_height( height: BlockHeight, - ctx: RequestCtx<'_, D, H>, + ctx: RequestCtx<'_, D, H, V, T>, ) -> Vec where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -311,8 +321,8 @@ where /// Generate a merkle proof for the inclusion of the /// requested transfers in the Ethereum bridge pool. -fn generate_bridge_pool_proof( - ctx: RequestCtx<'_, D, H>, +fn generate_bridge_pool_proof( + ctx: RequestCtx<'_, D, H, V, T>, request: &RequestQuery, ) -> storage_api::Result where @@ -425,7 +435,7 @@ where )), appendices: with_appendix.then_some(appendices), }; - let data = rsp.try_to_vec().into_storage_result()?; + let data = rsp.serialize_to_vec(); Ok(EncodedResponseQuery { data, ..Default::default() @@ -444,8 +454,8 @@ where /// Iterates over all ethereum events /// and returns the amount of voting power /// backing each `TransferToEthereum` event. -fn transfer_to_ethereum_progress( - ctx: RequestCtx<'_, D, H>, +fn transfer_to_ethereum_progress( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -518,8 +528,8 @@ where /// /// This method may fail if a complete proof (i.e. with more than /// 2/3 of the total voting power behind it) is not available yet. -fn read_valset_upd_proof( - ctx: RequestCtx<'_, D, H>, +fn read_valset_upd_proof( + ctx: RequestCtx<'_, D, H, V, T>, epoch: Epoch, ) -> storage_api::Result>> where @@ -568,8 +578,8 @@ where /// /// This method may fail if no set of validators exists yet, /// at that [`Epoch`]. -fn read_bridge_valset( - ctx: RequestCtx<'_, D, H>, +fn read_bridge_valset( + ctx: RequestCtx<'_, D, H, V, T>, epoch: Epoch, ) -> storage_api::Result where @@ -598,8 +608,8 @@ where /// /// This method may fail if no set of validators exists yet, /// at that [`Epoch`]. -fn read_governance_valset( - ctx: RequestCtx<'_, D, H>, +fn read_governance_valset( + ctx: RequestCtx<'_, D, H, V, T>, epoch: Epoch, ) -> storage_api::Result where @@ -626,8 +636,8 @@ where /// Retrieve the consensus validator voting powers at the /// given [`BlockHeight`]. -fn voting_powers_at_height( - ctx: RequestCtx<'_, D, H>, +fn voting_powers_at_height( + ctx: RequestCtx<'_, D, H, V, T>, height: BlockHeight, ) -> storage_api::Result where @@ -645,8 +655,8 @@ where /// Retrieve the consensus validator voting powers at the /// given [`Epoch`]. -fn voting_powers_at_epoch( - ctx: RequestCtx<'_, D, H>, +fn voting_powers_at_epoch( + ctx: RequestCtx<'_, D, H, V, T>, epoch: Epoch, ) -> storage_api::Result where @@ -671,14 +681,19 @@ mod test_ethbridge_router { use std::collections::BTreeMap; use assert_matches::assert_matches; - use borsh::BorshSerialize; use namada_core::ledger::eth_bridge::storage::bridge_pool::{ get_pending_key, get_signed_root_key, BridgePoolTree, }; use namada_core::ledger::eth_bridge::storage::whitelist; use namada_core::ledger::storage::mockdb::MockDBWriteBatch; use namada_core::ledger::storage_api::StorageWrite; + use namada_core::types::address::nam; use namada_core::types::address::testing::established_address_1; + use namada_core::types::eth_abi::Encode; + use namada_core::types::eth_bridge_pool::{ + GasFee, PendingTransfer, TransferToEthereum, TransferToEthereumKind, + }; + use namada_core::types::ethereum_events::EthAddress; use namada_core::types::storage::BlockHeight; use namada_core::types::vote_extensions::validator_set_update; use namada_core::types::vote_extensions::validator_set_update::{ @@ -693,14 +708,8 @@ mod test_ethbridge_router { use super::test_utils::bertha_address; use super::*; - use crate::ledger::queries::testing::TestClient; - use crate::ledger::queries::RPC; - use crate::types::address::nam; - use crate::types::eth_abi::Encode; - use crate::types::eth_bridge_pool::{ - GasFee, PendingTransfer, TransferToEthereum, TransferToEthereumKind, - }; - use crate::types::ethereum_events::EthAddress; + use crate::queries::testing::TestClient; + use crate::queries::RPC; /// Test that reading the bridge validator set works. #[tokio::test] @@ -915,7 +924,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer), - transfer.try_to_vec().expect("Test failed"), + transfer.serialize_to_vec(), ) .expect("Test failed"); @@ -958,7 +967,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer), - transfer.try_to_vec().expect("Test failed"), + transfer.serialize_to_vec(), ) .expect("Test failed"); @@ -977,7 +986,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer2), - transfer2.try_to_vec().expect("Test failed"), + transfer2.serialize_to_vec(), ) .expect("Test failed"); @@ -1023,7 +1032,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer), - transfer.try_to_vec().expect("Test failed"), + transfer.serialize_to_vec(), ) .expect("Test failed"); @@ -1044,7 +1053,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer2), - transfer2.try_to_vec().expect("Test failed"), + transfer2.serialize_to_vec(), ) .expect("Test failed"); @@ -1053,9 +1062,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_signed_root_key(), - (signed_root.clone(), BlockHeight::from(0)) - .try_to_vec() - .unwrap(), + (signed_root.clone(), BlockHeight::from(0)).serialize_to_vec(), ) .expect("Test failed"); @@ -1074,8 +1081,7 @@ mod test_ethbridge_router { relayer: Cow::Owned(bertha_address()), with_appendix: false, } - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ), None, false, @@ -1141,7 +1147,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer), - transfer.try_to_vec().expect("Test failed"), + transfer.serialize_to_vec(), ) .expect("Test failed"); @@ -1166,7 +1172,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer2), - transfer2.try_to_vec().expect("Test failed"), + transfer2.serialize_to_vec(), ) .expect("Test failed"); @@ -1175,7 +1181,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_signed_root_key(), - (signed_root, BlockHeight::from(0)).try_to_vec().unwrap(), + (signed_root, BlockHeight::from(0)).serialize_to_vec(), ) .expect("Test failed"); @@ -1199,8 +1205,7 @@ mod test_ethbridge_router { relayer: Cow::Owned(bertha_address()), with_appendix: false, } - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ), None, false, @@ -1237,7 +1242,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer), - transfer.try_to_vec().expect("Test failed"), + transfer.serialize_to_vec(), ) .expect("Test failed"); @@ -1258,7 +1263,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer2), - transfer2.try_to_vec().expect("Test failed"), + transfer2.serialize_to_vec(), ) .expect("Test failed"); @@ -1267,7 +1272,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_signed_root_key(), - (signed_root, BlockHeight::from(0)).try_to_vec().unwrap(), + (signed_root, BlockHeight::from(0)).serialize_to_vec(), ) .expect("Test failed"); @@ -1311,7 +1316,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer), - transfer.try_to_vec().expect("Test failed"), + transfer.serialize_to_vec(), ) .expect("Test failed"); @@ -1326,10 +1331,7 @@ mod test_ethbridge_router { let voting_power = FractionalVotingPower::HALF; client .wl_storage - .write_bytes( - ð_msg_key.body(), - eth_event.try_to_vec().expect("Test failed"), - ) + .write_bytes(ð_msg_key.body(), eth_event.serialize_to_vec()) .expect("Test failed"); client .wl_storage @@ -1339,8 +1341,7 @@ mod test_ethbridge_router { 0.into(), voting_power * dummy_validator_stake, )]) - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ) .expect("Test failed"); client @@ -1362,7 +1363,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer2), - transfer2.try_to_vec().expect("Test failed"), + transfer2.serialize_to_vec(), ) .expect("Test failed"); @@ -1414,7 +1415,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer), - transfer.try_to_vec().expect("Test failed"), + transfer.serialize_to_vec(), ) .expect("Test failed"); @@ -1435,7 +1436,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_pending_key(&transfer2), - transfer2.try_to_vec().expect("Test failed"), + transfer2.serialize_to_vec(), ) .expect("Test failed"); @@ -1444,7 +1445,7 @@ mod test_ethbridge_router { .wl_storage .write_bytes( &get_signed_root_key(), - (signed_root, BlockHeight::from(0)).try_to_vec().unwrap(), + (signed_root, BlockHeight::from(0)).serialize_to_vec(), ) .expect("Test failed"); @@ -1463,8 +1464,7 @@ mod test_ethbridge_router { relayer: Cow::Owned(bertha_address()), with_appendix: false, } - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ), None, false, @@ -1490,8 +1490,7 @@ mod test_ethbridge_router { relayer: Cow::Owned(bertha_address()), with_appendix: false, } - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ), None, false, diff --git a/shared/src/ledger/queries/types.rs b/sdk/src/queries/types.rs similarity index 88% rename from shared/src/ledger/queries/types.rs rename to sdk/src/queries/types.rs index 235bf76e99..7283982099 100644 --- a/shared/src/ledger/queries/types.rs +++ b/sdk/src/queries/types.rs @@ -1,21 +1,16 @@ use std::fmt::Debug; -use namada_core::ledger::storage::WlStorage; +use namada_core::ledger::storage::{DBIter, StorageHasher, WlStorage, DB}; +use namada_core::ledger::storage_api; +use namada_core::types::storage::BlockHeight; use thiserror::Error; -use crate::ledger::events::log::EventLog; -use crate::ledger::storage::{DBIter, StorageHasher, DB}; -use crate::ledger::storage_api; +use crate::events::log::EventLog; use crate::tendermint::merkle::proof::Proof; -use crate::types::storage::BlockHeight; -#[cfg(feature = "wasm-runtime")] -use crate::vm::wasm::{TxCache, VpCache}; -#[cfg(feature = "wasm-runtime")] -use crate::vm::WasmCacheRoAccess; /// A request context provides read-only access to storage and WASM compilation /// caches to request handlers. #[derive(Debug, Clone)] -pub struct RequestCtx<'shell, D, H> +pub struct RequestCtx<'shell, D, H, VpCache, TxCache> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -25,11 +20,9 @@ where /// Log of events emitted by `FinalizeBlock` ABCI calls. pub event_log: &'shell EventLog, /// Cache of VP wasm compiled artifacts. - #[cfg(feature = "wasm-runtime")] - pub vp_wasm_cache: VpCache, + pub vp_wasm_cache: VpCache, /// Cache of transaction wasm compiled artifacts. - #[cfg(feature = "wasm-runtime")] - pub tx_wasm_cache: TxCache, + pub tx_wasm_cache: TxCache, /// Taken from config `storage_read_past_height_limit`. When set, will /// limit the how many block heights in the past can the storage be /// queried for reading values. @@ -41,9 +34,9 @@ where pub trait Router { /// Handle a given request using the provided context. This must be invoked /// on the root `Router` to be able to match the `request.path` fully. - fn handle( + fn handle( &self, - ctx: RequestCtx<'_, D, H>, + ctx: RequestCtx<'_, D, H, V, T>, request: &RequestQuery, ) -> storage_api::Result where @@ -59,9 +52,9 @@ pub trait Router { /// Handle a given request using the provided context, starting to /// try to match `request.path` against the `Router`'s patterns at the /// given `start` offset. - fn internal_handle( + fn internal_handle( &self, - ctx: RequestCtx<'_, D, H>, + ctx: RequestCtx<'_, D, H, V, T>, request: &RequestQuery, start: usize, ) -> storage_api::Result diff --git a/shared/src/ledger/queries/vp/governance.rs b/sdk/src/queries/vp/governance.rs similarity index 75% rename from shared/src/ledger/queries/vp/governance.rs rename to sdk/src/queries/vp/governance.rs index 92c3495f24..1e3a5a8ece 100644 --- a/shared/src/ledger/queries/vp/governance.rs +++ b/sdk/src/queries/vp/governance.rs @@ -1,12 +1,12 @@ // cd shared && cargo expand ledger::queries::vp::governance +use namada_core::ledger::governance::parameters::GovernanceParameters; use namada_core::ledger::governance::storage::proposal::StorageProposal; use namada_core::ledger::governance::utils::Vote; +use namada_core::ledger::storage::{DBIter, StorageHasher, DB}; +use namada_core::ledger::storage_api; -use crate::core::ledger::governance::parameters::GovernanceParameters; -use crate::ledger::queries::types::RequestCtx; -use crate::ledger::storage::{DBIter, StorageHasher, DB}; -use crate::ledger::storage_api; +use crate::queries::types::RequestCtx; // Governance queries router! {GOV, @@ -16,8 +16,8 @@ router! {GOV, } /// Find if the given address belongs to a validator account. -fn proposal_id( - ctx: RequestCtx<'_, D, H>, +fn proposal_id( + ctx: RequestCtx<'_, D, H, V, T>, id: u64, ) -> storage_api::Result> where @@ -28,8 +28,8 @@ where } /// Find if the given address belongs to a validator account. -fn proposal_id_votes( - ctx: RequestCtx<'_, D, H>, +fn proposal_id_votes( + ctx: RequestCtx<'_, D, H, V, T>, id: u64, ) -> storage_api::Result> where @@ -40,8 +40,8 @@ where } /// Get the governane parameters -fn parameters( - ctx: RequestCtx<'_, D, H>, +fn parameters( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, diff --git a/shared/src/ledger/queries/vp/mod.rs b/sdk/src/queries/vp/mod.rs similarity index 100% rename from shared/src/ledger/queries/vp/mod.rs rename to sdk/src/queries/vp/mod.rs diff --git a/shared/src/ledger/queries/vp/pgf.rs b/sdk/src/queries/vp/pgf.rs similarity index 76% rename from shared/src/ledger/queries/vp/pgf.rs rename to sdk/src/queries/vp/pgf.rs index 8f5b14c91b..9e8ea2f5cc 100644 --- a/shared/src/ledger/queries/vp/pgf.rs +++ b/sdk/src/queries/vp/pgf.rs @@ -1,11 +1,11 @@ use namada_core::ledger::governance::storage::proposal::StoragePgfFunding; +use namada_core::ledger::pgf::parameters::PgfParameters; use namada_core::ledger::pgf::storage::steward::StewardDetail; +use namada_core::ledger::storage::{DBIter, StorageHasher, DB}; +use namada_core::ledger::storage_api; use namada_core::types::address::Address; -use crate::core::ledger::pgf::parameters::PgfParameters; -use crate::ledger::queries::types::RequestCtx; -use crate::ledger::storage::{DBIter, StorageHasher, DB}; -use crate::ledger::storage_api; +use crate::queries::types::RequestCtx; // PoS validity predicate queries router! {PGF, @@ -16,8 +16,8 @@ router! {PGF, } /// Query the currect pgf steward set -fn stewards( - ctx: RequestCtx<'_, D, H>, +fn stewards( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -27,8 +27,8 @@ where } /// Check if an address is a pgf steward -fn is_steward( - ctx: RequestCtx<'_, D, H>, +fn is_steward( + ctx: RequestCtx<'_, D, H, V, T>, address: Address, ) -> storage_api::Result where @@ -39,8 +39,8 @@ where } /// Query the continous pgf fundings -fn funding( - ctx: RequestCtx<'_, D, H>, +fn funding( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -50,8 +50,8 @@ where } /// Query the PGF parameters -fn parameters( - ctx: RequestCtx<'_, D, H>, +fn parameters( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, diff --git a/shared/src/ledger/queries/vp/pos.rs b/sdk/src/queries/vp/pos.rs similarity index 86% rename from shared/src/ledger/queries/vp/pos.rs rename to sdk/src/queries/vp/pos.rs index e78bff146b..454680f1f8 100644 --- a/shared/src/ledger/queries/vp/pos.rs +++ b/sdk/src/queries/vp/pos.rs @@ -3,8 +3,13 @@ use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use namada_core::ledger::storage::{DBIter, StorageHasher, DB}; +use namada_core::ledger::storage_api; use namada_core::ledger::storage_api::collections::lazy_map; use namada_core::ledger::storage_api::OptionExt; +use namada_core::types::address::Address; +use namada_core::types::storage::Epoch; +use namada_core::types::token; use namada_proof_of_stake::parameters::PosParams; use namada_proof_of_stake::types::{ BondId, BondsAndUnbondsDetail, BondsAndUnbondsDetails, CommissionPair, @@ -18,17 +23,11 @@ use namada_proof_of_stake::{ read_consensus_validator_set_addresses_with_stake, read_pos_params, read_total_stake, read_validator_max_commission_rate_change, read_validator_stake, unbond_handle, validator_commission_rate_handle, - validator_slashes_handle, validator_state_handle, + validator_incoming_redelegations_handle, validator_slashes_handle, + validator_state_handle, }; -use crate::ledger::queries::types::RequestCtx; -use crate::ledger::storage::{DBIter, StorageHasher, DB}; -use crate::ledger::storage_api; -use crate::types::address::Address; -use crate::types::storage::Epoch; -use crate::types::token; - -type AmountPair = (token::Amount, token::Amount); +use crate::queries::types::RequestCtx; // PoS validity predicate queries router! {POS, @@ -49,6 +48,9 @@ router! {POS, ( "state" / [validator: Address] / [epoch: opt Epoch] ) -> Option = validator_state, + + ( "incoming_redelegation" / [src_validator: Address] / [delegator: Address] ) + -> Option = validator_incoming_redelegation, }, ( "validator_set" ) = { @@ -79,7 +81,7 @@ router! {POS, -> token::Amount = bond, ( "bond_with_slashing" / [source: Address] / [validator: Address] / [epoch: opt Epoch] ) - -> AmountPair = bond_with_slashing, + -> token::Amount = bond_with_slashing, ( "unbond" / [source: Address] / [validator: Address] ) -> HashMap<(Epoch, Epoch), token::Amount> = unbond, @@ -148,7 +150,9 @@ impl Enriched { // Handlers that implement the functions via `trait StorageRead`: /// Get the PoS parameters -fn pos_params(ctx: RequestCtx<'_, D, H>) -> storage_api::Result +fn pos_params( + ctx: RequestCtx<'_, D, H, V, T>, +) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -157,8 +161,8 @@ where } /// Find if the given address belongs to a validator account. -fn is_validator( - ctx: RequestCtx<'_, D, H>, +fn is_validator( + ctx: RequestCtx<'_, D, H, V, T>, addr: Address, ) -> storage_api::Result where @@ -169,8 +173,8 @@ where } /// Find if the given address is a delegator -fn is_delegator( - ctx: RequestCtx<'_, D, H>, +fn is_delegator( + ctx: RequestCtx<'_, D, H, V, T>, addr: Address, epoch: Option, ) -> storage_api::Result @@ -183,8 +187,8 @@ where /// Get all the validator known addresses. These validators may be in any state, /// e.g. consensus, below-capacity, inactive or jailed. -fn validator_addresses( - ctx: RequestCtx<'_, D, H>, +fn validator_addresses( + ctx: RequestCtx<'_, D, H, V, T>, epoch: Option, ) -> storage_api::Result> where @@ -196,8 +200,8 @@ where } /// Get the validator commission rate and max commission rate change per epoch -fn validator_commission( - ctx: RequestCtx<'_, D, H>, +fn validator_commission( + ctx: RequestCtx<'_, D, H, V, T>, validator: Address, epoch: Option, ) -> storage_api::Result> @@ -227,8 +231,8 @@ where } /// Get the validator state -fn validator_state( - ctx: RequestCtx<'_, D, H>, +fn validator_state( + ctx: RequestCtx<'_, D, H, V, T>, validator: Address, epoch: Option, ) -> storage_api::Result> @@ -251,8 +255,8 @@ where /// to their address. /// Returns `None` when the given address is not a validator address. For a /// validator with `0` stake, this returns `Ok(token::Amount::zero())`. -fn validator_stake( - ctx: RequestCtx<'_, D, H>, +fn validator_stake( + ctx: RequestCtx<'_, D, H, V, T>, validator: Address, epoch: Option, ) -> storage_api::Result> @@ -262,12 +266,33 @@ where { let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); let params = read_pos_params(ctx.wl_storage)?; - read_validator_stake(ctx.wl_storage, ¶ms, &validator, epoch) + if namada_proof_of_stake::is_validator(ctx.wl_storage, &validator)? { + let stake = + read_validator_stake(ctx.wl_storage, ¶ms, &validator, epoch)?; + Ok(Some(stake)) + } else { + Ok(None) + } +} + +/// Get the incoming redelegation epoch for a source validator - delegator pair, +/// if there is any. +fn validator_incoming_redelegation( + ctx: RequestCtx<'_, D, H, V, T>, + src_validator: Address, + delegator: Address, +) -> storage_api::Result> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + let handle = validator_incoming_redelegations_handle(&src_validator); + handle.get(ctx.wl_storage, &delegator) } /// Get all the validator in the consensus set with their bonded stake. -fn consensus_validator_set( - ctx: RequestCtx<'_, D, H>, +fn consensus_validator_set( + ctx: RequestCtx<'_, D, H, V, T>, epoch: Option, ) -> storage_api::Result> where @@ -279,8 +304,8 @@ where } /// Get all the validator in the below-capacity set with their bonded stake. -fn below_capacity_validator_set( - ctx: RequestCtx<'_, D, H>, +fn below_capacity_validator_set( + ctx: RequestCtx<'_, D, H, V, T>, epoch: Option, ) -> storage_api::Result> where @@ -295,8 +320,8 @@ where } /// Get the total stake in PoS system at the given epoch or current when `None`. -fn total_stake( - ctx: RequestCtx<'_, D, H>, +fn total_stake( + ctx: RequestCtx<'_, D, H, V, T>, epoch: Option, ) -> storage_api::Result where @@ -308,11 +333,11 @@ where read_total_stake(ctx.wl_storage, ¶ms, epoch) } -fn bond_deltas( - ctx: RequestCtx<'_, D, H>, +fn bond_deltas( + ctx: RequestCtx<'_, D, H, V, T>, source: Address, validator: Address, -) -> storage_api::Result> +) -> storage_api::Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -322,8 +347,8 @@ where /// Find the sum of bond amount up the given epoch when `Some`, or up to the /// pipeline length parameter offset otherwise -fn bond( - ctx: RequestCtx<'_, D, H>, +fn bond( + ctx: RequestCtx<'_, D, H, V, T>, source: Address, validator: Address, epoch: Option, @@ -339,16 +364,15 @@ where let handle = bond_handle(&source, &validator); handle .get_sum(ctx.wl_storage, epoch, ¶ms)? - .map(token::Amount::from_change) .ok_or_err_msg("Cannot find bond") } -fn bond_with_slashing( - ctx: RequestCtx<'_, D, H>, +fn bond_with_slashing( + ctx: RequestCtx<'_, D, H, V, T>, source: Address, validator: Address, epoch: Option, -) -> storage_api::Result +) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -359,8 +383,8 @@ where bond_amount(ctx.wl_storage, &bond_id, epoch) } -fn unbond( - ctx: RequestCtx<'_, D, H>, +fn unbond( + ctx: RequestCtx<'_, D, H, V, T>, source: Address, validator: Address, ) -> storage_api::Result> @@ -384,8 +408,8 @@ where .collect() } -fn unbond_with_slashing( - ctx: RequestCtx<'_, D, H>, +fn unbond_with_slashing( + ctx: RequestCtx<'_, D, H, V, T>, source: Address, validator: Address, ) -> storage_api::Result> @@ -410,8 +434,8 @@ where .collect() } -fn withdrawable_tokens( - ctx: RequestCtx<'_, D, H>, +fn withdrawable_tokens( + ctx: RequestCtx<'_, D, H, V, T>, source: Address, validator: Address, epoch: Option, @@ -439,8 +463,8 @@ where Ok(total) } -fn bonds_and_unbonds( - ctx: RequestCtx<'_, D, H>, +fn bonds_and_unbonds( + ctx: RequestCtx<'_, D, H, V, T>, source: Option
, validator: Option
, ) -> storage_api::Result @@ -453,8 +477,8 @@ where /// Find all the validator addresses to whom the given `owner` address has /// some delegation in any epoch -fn delegation_validators( - ctx: RequestCtx<'_, D, H>, +fn delegation_validators( + ctx: RequestCtx<'_, D, H, V, T>, owner: Address, ) -> storage_api::Result> where @@ -466,8 +490,8 @@ where /// Find all the validator addresses to whom the given `owner` address has /// some delegation in any epoch -fn delegations( - ctx: RequestCtx<'_, D, H>, +fn delegations( + ctx: RequestCtx<'_, D, H, V, T>, owner: Address, epoch: Option, ) -> storage_api::Result> @@ -480,8 +504,8 @@ where } /// Validator slashes -fn validator_slashes( - ctx: RequestCtx<'_, D, H>, +fn validator_slashes( + ctx: RequestCtx<'_, D, H, V, T>, validator: Address, ) -> storage_api::Result> where @@ -493,8 +517,8 @@ where } /// All slashes -fn slashes( - ctx: RequestCtx<'_, D, H>, +fn slashes( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result>> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -504,8 +528,8 @@ where } /// Enqueued slashes -fn enqueued_slashes( - ctx: RequestCtx<'_, D, H>, +fn enqueued_slashes( + ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result>>> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -516,8 +540,8 @@ where } /// Native validator address by looking up the Tendermint address -fn validator_by_tm_addr( - ctx: RequestCtx<'_, D, H>, +fn validator_by_tm_addr( + ctx: RequestCtx<'_, D, H, V, T>, tm_addr: String, ) -> storage_api::Result> where @@ -531,8 +555,7 @@ where #[cfg(any(test, feature = "async-client"))] pub mod client_only_methods { use super::*; - use crate::ledger::queries::RPC; - use crate::sdk::queries::Client; + use crate::queries::{Client, RPC}; impl Pos { /// Get bonds and unbonds with all details (slashes and rewards, if any) diff --git a/shared/src/ledger/queries/vp/token.rs b/sdk/src/queries/vp/token.rs similarity index 85% rename from shared/src/ledger/queries/vp/token.rs rename to sdk/src/queries/vp/token.rs index 3b99cb0fda..5e2e5aa95b 100644 --- a/shared/src/ledger/queries/vp/token.rs +++ b/sdk/src/queries/vp/token.rs @@ -6,7 +6,7 @@ use namada_core::ledger::storage_api::token::read_denom; use namada_core::types::address::Address; use namada_core::types::token; -use crate::ledger::queries::RequestCtx; +use crate::queries::RequestCtx; router! {TOKEN, ( "denomination" / [addr: Address] ) -> Option = denomination, @@ -14,8 +14,8 @@ router! {TOKEN, /// Get the number of decimal places (in base 10) for a /// token specified by `addr`. -fn denomination( - ctx: RequestCtx<'_, D, H>, +fn denomination( + ctx: RequestCtx<'_, D, H, V, T>, addr: Address, ) -> storage_api::Result> where @@ -28,12 +28,11 @@ where #[cfg(any(test, feature = "async-client"))] pub mod client_only_methods { use borsh::BorshDeserialize; + use namada_core::types::address::Address; + use namada_core::types::token; use super::Token; - use crate::ledger::queries::RPC; - use crate::sdk::queries::Client; - use crate::types::address::Address; - use crate::types::token; + use crate::queries::{Client, RPC}; impl Token { /// Get the balance of the given `token` belonging to the given `owner`. @@ -53,7 +52,7 @@ pub mod client_only_methods { .await?; let balance = if response.data.is_empty() { - token::Amount::default() + token::Amount::zero() } else { token::Amount::try_from_slice(&response.data) .unwrap_or_default() diff --git a/shared/src/sdk/rpc.rs b/sdk/src/rpc.rs similarity index 77% rename from shared/src/sdk/rpc.rs rename to sdk/src/rpc.rs index 58609bed42..a5c0793b36 100644 --- a/shared/src/sdk/rpc.rs +++ b/sdk/src/rpc.rs @@ -11,51 +11,52 @@ use masp_primitives::sapling::Node; use namada_core::ledger::governance::parameters::GovernanceParameters; use namada_core::ledger::governance::storage::proposal::StorageProposal; use namada_core::ledger::governance::utils::Vote; +use namada_core::ledger::ibc::storage::{ + ibc_denom_key, ibc_denom_key_prefix, is_ibc_denom_key, +}; use namada_core::ledger::storage::LastBlock; use namada_core::types::account::Account; -use namada_core::types::address::Address; -use namada_core::types::storage::Key; +use namada_core::types::address::{Address, InternalAddress}; +use namada_core::types::hash::Hash; +use namada_core::types::key::common; +use namada_core::types::storage::{ + BlockHeight, BlockResults, Epoch, Key, PrefixValue, +}; use namada_core::types::token::{ Amount, DenominatedAmount, Denomination, MaspDenom, }; +use namada_core::types::{storage, token}; use namada_proof_of_stake::parameters::PosParams; use namada_proof_of_stake::types::{ BondsAndUnbondsDetails, CommissionPair, ValidatorState, }; use serde::Serialize; -use crate::ledger::events::Event; -use crate::ledger::queries::vp::pos::EnrichedBondsAndUnbondsDetails; -use crate::ledger::queries::RPC; +use crate::args::InputAmount; +use crate::control_flow::time; +use crate::error::{EncodingError, Error, QueryError, TxError}; +use crate::events::Event; +use crate::internal_macros::echo_error; +use crate::io::Io; use crate::proto::Tx; -use crate::sdk::args::InputAmount; -use crate::sdk::error; -use crate::sdk::error::{EncodingError, Error, QueryError}; +use crate::queries::vp::pos::EnrichedBondsAndUnbondsDetails; +use crate::queries::{Client, RPC}; use crate::tendermint::block::Height; use crate::tendermint::merkle::proof::Proof; use crate::tendermint_rpc::error::Error as TError; use crate::tendermint_rpc::query::Query; use crate::tendermint_rpc::Order; -use crate::types::control_flow::{time, Halt, TryHalt}; -use crate::types::hash::Hash; -use crate::types::io::Io; -use crate::types::key::common; -use crate::types::storage::{BlockHeight, BlockResults, Epoch, PrefixValue}; -use crate::types::{storage, token}; -use crate::{display_line, edisplay_line}; +use crate::{display_line, edisplay_line, error, Namada}; /// Query the status of a given transaction. /// /// If a response is not delivered until `deadline`, we exit the cli with an /// error. -pub async fn query_tx_status( - client: &C, +pub async fn query_tx_status<'a>( + context: &impl Namada<'a>, status: TxEventQuery<'_>, deadline: time::Instant, -) -> Halt -where - C: crate::ledger::queries::Client + Sync, -{ +) -> Result { time::Sleep { strategy: time::LinearBackoff { delta: time::Duration::from_secs(1), @@ -63,7 +64,8 @@ where } .timeout(deadline, || async { tracing::debug!(query = ?status, "Querying tx status"); - let maybe_event = match query_tx_events(client, status).await { + let maybe_event = match query_tx_events(context.client(), status).await + { Ok(response) => response, Err(err) => { tracing::debug!( @@ -88,25 +90,36 @@ where } }) .await - .try_halt(|_| { + .map_err(|_| { edisplay_line!( - IO, + context.io(), "Transaction status query deadline of {deadline:?} exceeded" ); + match status { + TxEventQuery::Accepted(_) => Error::Tx(TxError::AcceptTimeout), + TxEventQuery::Applied(_) => Error::Tx(TxError::AppliedTimeout), + } }) } /// Query the epoch of the last committed block -pub async fn query_epoch( +pub async fn query_epoch( client: &C, ) -> Result { convert_response::(RPC.shell().epoch(client).await) } +/// Query the address of the native token +pub async fn query_native_token( + client: &C, +) -> Result { + convert_response::(RPC.shell().native_token(client).await) +} + /// Query the epoch of the given block height, if it exists. /// Will return none if the input block height is greater than /// the latest committed block height. -pub async fn query_epoch_at_height( +pub async fn query_epoch_at_height( client: &C, height: BlockHeight, ) -> Result, error::Error> { @@ -114,7 +127,7 @@ pub async fn query_epoch_at_height( } /// Query the last committed block, if any. -pub async fn query_block( +pub async fn query_block( client: &C, ) -> Result, error::Error> { // NOTE: We're not using `client.latest_block()` because it may return an @@ -123,7 +136,7 @@ pub async fn query_block( } /// A helper to unwrap client's response. Will shut down process on error. -fn unwrap_client_response( +fn unwrap_client_response( response: Result, ) -> T { response.unwrap_or_else(|err| { @@ -134,21 +147,21 @@ fn unwrap_client_response( /// A helper to turn client's response into an error type that can be used with /// ? The exact error type is a `QueryError::NoResponse`, and thus should be /// seen as getting no response back from a query. -fn convert_response( +fn convert_response( response: Result, ) -> Result { response.map_err(|err| Error::from(QueryError::NoResponse(err.to_string()))) } /// Query the results of the last committed block -pub async fn query_results( +pub async fn query_results( client: &C, ) -> Result, Error> { convert_response::(RPC.shell().read_results(client).await) } /// Query token amount of owner. -pub async fn get_token_balance( +pub async fn get_token_balance( client: &C, token: &Address, owner: &Address, @@ -159,7 +172,7 @@ pub async fn get_token_balance( } /// Check if the given address is a known validator. -pub async fn is_validator( +pub async fn is_validator( client: &C, address: &Address, ) -> Result { @@ -167,7 +180,7 @@ pub async fn is_validator( } /// Check if the given address is a pgf steward. -pub async fn is_steward( +pub async fn is_steward( client: &C, address: &Address, ) -> bool { @@ -177,7 +190,7 @@ pub async fn is_steward( } /// Check if a given address is a known delegator -pub async fn is_delegator( +pub async fn is_delegator( client: &C, address: &Address, ) -> Result { @@ -187,7 +200,7 @@ pub async fn is_delegator( } /// Check if a given address is a known delegator at the given epoch -pub async fn is_delegator_at( +pub async fn is_delegator_at( client: &C, address: &Address, epoch: Epoch, @@ -203,7 +216,7 @@ pub async fn is_delegator_at( /// Check if the address exists on chain. Established address exists if it has a /// stored validity predicate. Implicit and internal addresses always return /// true. -pub async fn known_address( +pub async fn known_address( client: &C, address: &Address, ) -> Result { @@ -221,14 +234,14 @@ pub async fn known_address( // often ignore the optional value and do not have any error type surrounding // it. /// Query a conversion. -pub async fn query_conversion( +pub async fn query_conversion( client: &C, asset_type: AssetType, ) -> Option<( Address, MaspDenom, Epoch, - masp_primitives::transaction::components::I32Sum, + masp_primitives::transaction::components::I128Sum, MerklePath, )> { Some(unwrap_client_response::( @@ -237,22 +250,19 @@ pub async fn query_conversion( } /// Query a wasm code hash -pub async fn query_wasm_code_hash< - C: crate::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_wasm_code_hash<'a>( + context: &impl Namada<'a>, code_path: impl AsRef, ) -> Result { let hash_key = Key::wasm_hash(code_path.as_ref()); - match query_storage_value_bytes(client, &hash_key, None, false) + match query_storage_value_bytes(context.client(), &hash_key, None, false) .await? .0 { Some(hash) => Ok(Hash::try_from(&hash[..]).expect("Invalid code hash")), None => { edisplay_line!( - IO, + context.io(), "The corresponding wasm code of the code path {} doesn't \ exist on chain.", code_path.as_ref(), @@ -271,7 +281,7 @@ pub async fn query_storage_value( ) -> Result where T: BorshDeserialize, - C: crate::ledger::queries::Client + Sync, + C: crate::queries::Client + Sync, { // In case `T` is a unit (only thing that encodes to 0 bytes), we have to // use `storage_has_key` instead of `storage_value`, because `storage_value` @@ -302,9 +312,7 @@ where } /// Query a storage value and the proof without decoding. -pub async fn query_storage_value_bytes< - C: crate::ledger::queries::Client + Sync, ->( +pub async fn query_storage_value_bytes( client: &C, key: &storage::Key, height: Option, @@ -326,20 +334,16 @@ pub async fn query_storage_value_bytes< /// Query a range of storage values with a matching prefix and decode them with /// [`BorshDeserialize`]. Returns an iterator of the storage keys paired with /// their associated values. -pub async fn query_storage_prefix< - C: crate::ledger::queries::Client + Sync, - IO: Io, - T, ->( - client: &C, +pub async fn query_storage_prefix<'a, 'b, N: Namada<'a>, T>( + context: &'b N, key: &storage::Key, -) -> Result>, error::Error> +) -> Result>, error::Error> where T: BorshDeserialize, { - let values = convert_response::( + let values = convert_response::( RPC.shell() - .storage_prefix(client, None, None, false, key) + .storage_prefix(context.client(), None, None, false, key) .await, )?; let decode = @@ -348,7 +352,7 @@ where ) { Err(err) => { edisplay_line!( - IO, + context.io(), "Skipping a value for key {}. Error in decoding: {}", key, err @@ -365,7 +369,7 @@ where } /// Query to check if the given storage key exists. -pub async fn query_has_storage_key( +pub async fn query_has_storage_key( client: &C, key: &storage::Key, ) -> Result { @@ -415,13 +419,10 @@ impl<'a> From> for Query { /// Call the corresponding `tx_event_query` RPC method, to fetch /// the current status of a transation. -pub async fn query_tx_events( +pub async fn query_tx_events( client: &C, tx_event_query: TxEventQuery<'_>, -) -> std::result::Result< - Option, - ::Error, -> { +) -> std::result::Result, ::Error> { let tx_hash: Hash = tx_event_query.tx_hash().try_into().unwrap(); match tx_event_query { TxEventQuery::Accepted(_) => { @@ -437,16 +438,18 @@ pub async fn query_tx_events( } /// Dry run a transaction -pub async fn dry_run_tx( - client: &C, +pub async fn dry_run_tx<'a, N: Namada<'a>>( + context: &N, tx_bytes: Vec, ) -> Result { let (data, height, prove) = (Some(tx_bytes), None, false); - let result = convert_response::( - RPC.shell().dry_run_tx(client, data, height, prove).await, + let result = convert_response::( + RPC.shell() + .dry_run_tx(context.client(), data, height, prove) + .await, )? .data; - display_line!(IO, "Dry-run result: {}", result); + display_line!(context.io(), "Dry-run result: {}", result); Ok(result) } @@ -555,7 +558,7 @@ impl TxResponse { /// Lookup the full response accompanying the specified transaction event // TODO: maybe remove this in favor of `query_tx_status` -pub async fn query_tx_response( +pub async fn query_tx_response( client: &C, tx_query: TxEventQuery<'_>, ) -> Result { @@ -625,14 +628,14 @@ pub async fn query_tx_response( } /// Get the PoS parameters -pub async fn get_pos_params( +pub async fn get_pos_params( client: &C, ) -> Result { convert_response::(RPC.vp().pos().pos_params(client).await) } /// Get all validators in the given epoch -pub async fn get_all_validators( +pub async fn get_all_validators( client: &C, epoch: Epoch, ) -> Result, error::Error> { @@ -645,9 +648,7 @@ pub async fn get_all_validators( } /// Get the total staked tokens in the given epoch -pub async fn get_total_staked_tokens< - C: crate::ledger::queries::Client + Sync, ->( +pub async fn get_total_staked_tokens( client: &C, epoch: Epoch, ) -> Result { @@ -657,7 +658,7 @@ pub async fn get_total_staked_tokens< } /// Get the given validator's stake at the given epoch -pub async fn get_validator_stake( +pub async fn get_validator_stake( client: &C, epoch: Epoch, validator: &Address, @@ -672,7 +673,7 @@ pub async fn get_validator_stake( } /// Query and return a validator's state -pub async fn get_validator_state( +pub async fn get_validator_state( client: &C, validator: &Address, epoch: Option, @@ -686,9 +687,7 @@ pub async fn get_validator_state( } /// Get the delegator's delegation -pub async fn get_delegators_delegation< - C: crate::ledger::queries::Client + Sync, ->( +pub async fn get_delegators_delegation( client: &C, address: &Address, ) -> Result, error::Error> { @@ -698,9 +697,7 @@ pub async fn get_delegators_delegation< } /// Get the delegator's delegation at some epoh -pub async fn get_delegators_delegation_at< - C: crate::ledger::queries::Client + Sync, ->( +pub async fn get_delegators_delegation_at( client: &C, address: &Address, epoch: Epoch, @@ -714,7 +711,7 @@ pub async fn get_delegators_delegation_at< } /// Query proposal by Id -pub async fn query_proposal_by_id( +pub async fn query_proposal_by_id( client: &C, proposal_id: u64, ) -> Result, Error> { @@ -725,7 +722,7 @@ pub async fn query_proposal_by_id( /// Query and return validator's commission rate and max commission rate change /// per epoch -pub async fn query_commission_rate( +pub async fn query_commission_rate( client: &C, validator: &Address, epoch: Option, @@ -738,8 +735,23 @@ pub async fn query_commission_rate( ) } +/// Query and return the incoming redelegation epoch for a given pair of source +/// validator and delegator, if there is any. +pub async fn query_incoming_redelegations( + client: &C, + src_validator: &Address, + delegator: &Address, +) -> Result, Error> { + convert_response::>( + RPC.vp() + .pos() + .validator_incoming_redelegation(client, src_validator, delegator) + .await, + ) +} + /// Query a validator's bonds for a given epoch -pub async fn query_bond( +pub async fn query_bond( client: &C, source: &Address, validator: &Address, @@ -751,7 +763,7 @@ pub async fn query_bond( } /// Query the accunt substorage space of an address -pub async fn get_account_info( +pub async fn get_account_info( client: &C, owner: &Address, ) -> Result, error::Error> { @@ -761,9 +773,7 @@ pub async fn get_account_info( } /// Query if the public_key is revealed -pub async fn is_public_key_revealed< - C: crate::ledger::queries::Client + Sync, ->( +pub async fn is_public_key_revealed( client: &C, owner: &Address, ) -> Result { @@ -771,7 +781,7 @@ pub async fn is_public_key_revealed< } /// Query an account substorage at a specific index -pub async fn get_public_key_at( +pub async fn get_public_key_at( client: &C, owner: &Address, index: u8, @@ -787,18 +797,16 @@ pub async fn get_public_key_at( } /// Query a validator's unbonds for a given epoch -pub async fn query_and_print_unbonds< - C: crate::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn query_and_print_unbonds<'a>( + context: &impl Namada<'a>, source: &Address, validator: &Address, ) -> Result<(), error::Error> { - let unbonds = query_unbond_with_slashing(client, source, validator).await?; - let current_epoch = query_epoch(client).await?; + let unbonds = + query_unbond_with_slashing(context.client(), source, validator).await?; + let current_epoch = query_epoch(context.client()).await?; - let mut total_withdrawable = token::Amount::default(); + let mut total_withdrawable = token::Amount::zero(); let mut not_yet_withdrawable = HashMap::::new(); for ((_start_epoch, withdraw_epoch), amount) in unbonds.into_iter() { if withdraw_epoch <= current_epoch { @@ -809,19 +817,19 @@ pub async fn query_and_print_unbonds< *withdrawable_amount += amount; } } - if total_withdrawable != token::Amount::default() { + if !total_withdrawable.is_zero() { display_line!( - IO, + context.io(), "Total withdrawable now: {}.", total_withdrawable.to_string_native() ); } if !not_yet_withdrawable.is_empty() { - display_line!(IO, "Current epoch: {current_epoch}.") + display_line!(context.io(), "Current epoch: {current_epoch}.") } for (withdraw_epoch, amount) in not_yet_withdrawable { display_line!( - IO, + context.io(), "Amount {} withdrawable starting from epoch {withdraw_epoch}.", amount.to_string_native() ); @@ -830,9 +838,7 @@ pub async fn query_and_print_unbonds< } /// Query withdrawable tokens in a validator account for a given epoch -pub async fn query_withdrawable_tokens< - C: crate::ledger::queries::Client + Sync, ->( +pub async fn query_withdrawable_tokens( client: &C, bond_source: &Address, validator: &Address, @@ -847,9 +853,7 @@ pub async fn query_withdrawable_tokens< } /// Query all unbonds for a validator, applying slashes -pub async fn query_unbond_with_slashing< - C: crate::ledger::queries::Client + Sync, ->( +pub async fn query_unbond_with_slashing( client: &C, source: &Address, validator: &Address, @@ -863,16 +867,14 @@ pub async fn query_unbond_with_slashing< } /// Get the givernance parameters -pub async fn query_governance_parameters< - C: crate::ledger::queries::Client + Sync, ->( +pub async fn query_governance_parameters( client: &C, ) -> GovernanceParameters { unwrap_client_response::(RPC.vp().gov().parameters(client).await) } /// Get the givernance parameters -pub async fn query_proposal_votes( +pub async fn query_proposal_votes( client: &C, proposal_id: u64, ) -> Result, error::Error> { @@ -882,24 +884,24 @@ pub async fn query_proposal_votes( } /// Get the bond amount at the given epoch -pub async fn get_bond_amount_at( +pub async fn get_bond_amount_at( client: &C, delegator: &Address, validator: &Address, epoch: Epoch, -) -> Result, error::Error> { - let (_total, total_active) = convert_response::( +) -> Result { + let total_active = convert_response::( RPC.vp() .pos() .bond_with_slashing(client, delegator, validator, &Some(epoch)) .await, )?; - Ok(Some(total_active)) + Ok(total_active) } /// Get bonds and unbonds with all details (slashes and rewards, if any) /// grouped by their bond IDs. -pub async fn bonds_and_unbonds( +pub async fn bonds_and_unbonds( client: &C, source: &Option
, validator: &Option
, @@ -915,9 +917,7 @@ pub async fn bonds_and_unbonds( /// Get bonds and unbonds with all details (slashes and rewards, if any) /// grouped by their bond IDs, enriched with extra information calculated from /// the data. -pub async fn enriched_bonds_and_unbonds< - C: crate::ledger::queries::Client + Sync, ->( +pub async fn enriched_bonds_and_unbonds( client: &C, current_epoch: Epoch, source: &Option
, @@ -937,11 +937,8 @@ pub async fn enriched_bonds_and_unbonds< } /// Get the correct representation of the amount given the token type. -pub async fn validate_amount< - C: crate::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn validate_amount<'a, N: Namada<'a>>( + context: &N, amount: InputAmount, token: &Address, force: bool, @@ -951,21 +948,21 @@ pub async fn validate_amount< InputAmount::Unvalidated(amt) => amt.canonical(), InputAmount::Validated(amt) => return Ok(amt), }; - let denom = match convert_response::>( - RPC.vp().token().denomination(client, token).await, + let denom = match convert_response::>( + RPC.vp().token().denomination(context.client(), token).await, )? { Some(denom) => Ok(denom), None => { if force { display_line!( - IO, + context.io(), "No denomination found for token: {token}, but --force \ was passed. Defaulting to the provided denomination." ); Ok(input_amount.denom) } else { display_line!( - IO, + context.io(), "No denomination found for token: {token}, the input \ arguments could not be parsed." ); @@ -977,7 +974,7 @@ pub async fn validate_amount< }?; if denom < input_amount.denom && !force { display_line!( - IO, + context.io(), "The input amount contained a higher precision than allowed by \ {token}." ); @@ -988,7 +985,7 @@ pub async fn validate_amount< } else { input_amount.increase_precision(denom).map_err(|_err| { display_line!( - IO, + context.io(), "The amount provided requires more the 256 bits to represent." ); Error::from(QueryError::General( @@ -1001,10 +998,10 @@ pub async fn validate_amount< } /// Wait for a first block and node to be synced. -pub async fn wait_until_node_is_synched(client: &C) -> Halt<()> -where - C: crate::ledger::queries::Client + Sync, -{ +pub async fn wait_until_node_is_synched<'a>( + client: &(impl Client + Sync), + io: &impl Io, +) -> Result<(), Error> { let height_one = Height::try_from(1_u64).unwrap(); let try_count = Cell::new(1_u64); const MAX_TRIES: usize = 5; @@ -1026,7 +1023,7 @@ where return ControlFlow::Break(Ok(())); } display_line!( - IO, + io, " Waiting for {} ({}/{} tries)...", if is_at_least_height_one { "a first block" @@ -1039,52 +1036,98 @@ where try_count.set(try_count.get() + 1); ControlFlow::Continue(()) } - Err(e) => { - edisplay_line!( - IO, - "Failed to query node status with error: {}", - e - ); - ControlFlow::Break(Err(())) - } + Err(e) => ControlFlow::Break(Err(Error::Query( + QueryError::General(echo_error!( + io, + "Failed to query node status with error: {e}" + )), + ))), } }) .await // maybe time out - .try_halt(|_| { - display_line!( - IO, + .map_err(|_| { + edisplay_line!( + io, "Node is still catching up, wait for it to finish synching." ); + Error::Query(QueryError::CatchingUp) })? - // error querying rpc - .try_halt(|_| ()) } -/// Look up the denomination of a token in order to format it -/// correctly as a string. -pub async fn format_denominated_amount< - C: crate::ledger::queries::Client + Sync, - IO: Io, ->( +/// Look up the denomination of a token in order to make a correctly denominated +/// amount. +pub async fn denominate_amount( client: &C, + io: &impl Io, token: &Address, amount: token::Amount, -) -> String { +) -> DenominatedAmount { let denom = convert_response::>( RPC.vp().token().denomination(client, token).await, ) .unwrap_or_else(|t| { - display_line!(IO, "Error in querying for denomination: {t}"); + display_line!(io, "Error in querying for denomination: {t}"); None }) .unwrap_or_else(|| { display_line!( - IO, + io, "No denomination found for token: {token}, defaulting to zero \ decimal places" ); 0.into() }); - DenominatedAmount { amount, denom }.to_string() + DenominatedAmount { amount, denom } +} + +/// Look up the denomination of a token in order to format it +/// correctly as a string. +pub async fn format_denominated_amount( + client: &(impl Client + Sync), + io: &impl Io, + token: &Address, + amount: token::Amount, +) -> String { + denominate_amount(client, io, token, amount) + .await + .to_string() +} + +/// Look up the IBC denomination from a IbcToken. +pub async fn query_ibc_denom<'a, N: Namada<'a>>( + context: &N, + token: &Address, + owner: Option<&Address>, +) -> String { + let hash = match token { + Address::Internal(InternalAddress::IbcToken(hash)) => hash, + _ => return token.to_string(), + }; + + if let Some(owner) = owner { + let ibc_denom_key = ibc_denom_key(owner.to_string(), hash); + if let Ok(ibc_denom) = + query_storage_value::<_, String>(context.client(), &ibc_denom_key) + .await + { + return ibc_denom; + } + } + + // No owner is specified or the owner doesn't have the token + let ibc_denom_prefix = ibc_denom_key_prefix(None); + if let Ok(Some(ibc_denoms)) = + query_storage_prefix::<_, String>(context, &ibc_denom_prefix).await + { + for (key, ibc_denom) in ibc_denoms { + if let Some((_, token_hash)) = is_ibc_denom_key(&key) { + if token_hash == *hash { + return ibc_denom; + } + } + } + } + + token.to_string() } diff --git a/shared/src/sdk/signing.rs b/sdk/src/signing.rs similarity index 66% rename from shared/src/sdk/signing.rs rename to sdk/src/signing.rs index 042be03a63..381df34634 100644 --- a/shared/src/sdk/signing.rs +++ b/sdk/src/signing.rs @@ -1,60 +1,61 @@ //! Functions to sign transactions use std::collections::{BTreeMap, HashMap}; -use std::path::PathBuf; +use std::fmt::Display; -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXLOWER; use itertools::Itertools; use masp_primitives::asset_type::AssetType; use masp_primitives::transaction::components::sapling::fees::{ InputView, OutputView, }; +use namada_core::ledger::parameters::storage as parameter_storage; use namada_core::proto::SignatureIndex; use namada_core::types::account::AccountPublicKeysMap; use namada_core::types::address::{ masp, masp_tx_key, Address, ImplicitAddress, }; +use namada_core::types::key::*; +use namada_core::types::masp::{ExtendedViewingKey, PaymentAddress}; +use namada_core::types::storage::Epoch; use namada_core::types::token; +use namada_core::types::token::Transfer; // use namada_core::types::storage::Key; use namada_core::types::token::{Amount, DenominatedAmount, MaspDenom}; -use namada_core::types::transaction::pos; +use namada_core::types::transaction::account::{InitAccount, UpdateAccount}; +use namada_core::types::transaction::governance::{ + InitProposalData, VoteProposalData, +}; +use namada_core::types::transaction::pos::InitValidator; +use namada_core::types::transaction::{pos, Fee}; use prost::Message; use serde::{Deserialize, Serialize}; use sha2::Digest; use zeroize::Zeroizing; -use crate::display_line; +use super::masp::{ShieldedContext, ShieldedTransfer}; +use crate::args::SdkTypes; +use crate::core::ledger::governance::storage::proposal::ProposalType; +use crate::core::ledger::governance::storage::vote::{ + StorageProposalVote, VoteType, +}; +use crate::error::{EncodingError, Error, TxError}; use crate::ibc::applications::transfer::msgs::transfer::MsgTransfer; use crate::ibc_proto::google::protobuf::Any; -use crate::ledger::parameters::storage as parameter_storage; +use crate::io::*; +use crate::masp::make_asset_type; use crate::proto::{MaspBuilder, Section, Tx}; -use crate::sdk::error::{EncodingError, Error, TxError}; -use crate::sdk::masp::{ - make_asset_type, ShieldedContext, ShieldedTransfer, ShieldedUtils, -}; -use crate::sdk::rpc::{ - format_denominated_amount, query_wasm_code_hash, validate_amount, -}; -use crate::sdk::tx::{ +use crate::rpc::{query_wasm_code_hash, validate_amount}; +use crate::tx::{ TX_BOND_WASM, TX_CHANGE_COMMISSION_WASM, TX_IBC_WASM, TX_INIT_ACCOUNT_WASM, TX_INIT_PROPOSAL, TX_INIT_VALIDATOR_WASM, TX_REVEAL_PK, TX_TRANSFER_WASM, - TX_UNBOND_WASM, TX_UPDATE_ACCOUNT_WASM, TX_VOTE_PROPOSAL, TX_WITHDRAW_WASM, - VP_USER_WASM, + TX_UNBOND_WASM, TX_UNJAIL_VALIDATOR_WASM, TX_UPDATE_ACCOUNT_WASM, + TX_VOTE_PROPOSAL, TX_WITHDRAW_WASM, VP_USER_WASM, }; -pub use crate::sdk::wallet::store::AddressVpType; -use crate::sdk::wallet::{Wallet, WalletUtils}; -use crate::sdk::{args, rpc}; -use crate::types::io::*; -use crate::types::key::*; -use crate::types::masp::{ExtendedViewingKey, PaymentAddress}; -use crate::types::storage::Epoch; -use crate::types::token::Transfer; -use crate::types::transaction::account::{InitAccount, UpdateAccount}; -use crate::types::transaction::governance::{ - InitProposalData, VoteProposalData, -}; -use crate::types::transaction::pos::InitValidator; -use crate::types::transaction::Fee; +pub use crate::wallet::store::AddressVpType; +use crate::wallet::{Wallet, WalletIo}; +use crate::{args, display_line, rpc, Namada}; #[cfg(feature = "std")] /// Env. var specifying where to store signing test vectors @@ -82,31 +83,28 @@ pub struct SigningTxData { /// for it from the wallet. If the keypair is encrypted but a password is not /// supplied, then it is interactively prompted. Errors if the key cannot be /// found or loaded. -pub async fn find_pk< - C: crate::ledger::queries::Client + Sync, - U: WalletUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, +pub async fn find_pk<'a>( + context: &impl Namada<'a>, addr: &Address, password: Option>, ) -> Result { match addr { Address::Established(_) => { display_line!( - IO, + context.io(), "Looking-up public key of {} from the ledger...", addr.encode() ); - rpc::get_public_key_at(client, addr, 0) + rpc::get_public_key_at(context.client(), addr, 0) .await? .ok_or(Error::Other(format!( "No public key found for the address {}", addr.encode() ))) } - Address::Implicit(ImplicitAddress(pkh)) => Ok(wallet + Address::Implicit(ImplicitAddress(pkh)) => Ok(context + .wallet_mut() + .await .find_key_by_pkh(pkh, password) .map_err(|err| { Error::Other(format!( @@ -127,7 +125,7 @@ pub async fn find_pk< /// Load the secret key corresponding to the given public key from the wallet. /// If the keypair is encrypted but a password is not supplied, then it is /// interactively prompted. Errors if the key cannot be found or loaded. -pub fn find_key_by_pk( +pub fn find_key_by_pk( wallet: &mut Wallet, args: &args::Tx, public_key: &common::PublicKey, @@ -136,6 +134,21 @@ pub fn find_key_by_pk( // We already know the secret key corresponding to the MASP sentinal key Ok(masp_tx_key()) } else { + // Try to get the signer from the signing-keys argument + for signing_key in &args.signing_keys { + if signing_key.ref_to() == *public_key { + return Ok(signing_key.clone()); + } + } + // Try to get the signer from the wrapper-fee-payer argument + match &args.wrapper_fee_payer { + Some(wrapper_fee_payer) + if &wrapper_fee_payer.ref_to() == public_key => + { + return Ok(wrapper_fee_payer.clone()); + } + _ => {} + } // Otherwise we need to search the wallet for the secret key wallet .find_key_by_pk(public_key, args.password.clone()) @@ -153,14 +166,9 @@ pub fn find_key_by_pk( /// signer. Return the given signing key or public key of the given signer if /// possible. If no explicit signer given, use the `default`. If no `default` /// is given, an `Error` is returned. -pub async fn tx_signers< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - args: &args::Tx, +pub async fn tx_signers<'a>( + context: &impl Namada<'a>, + args: &args::Tx, default: Option
, ) -> Result, Error> { let signer = if !&args.signing_keys.is_empty() { @@ -179,8 +187,7 @@ pub async fn tx_signers< Some(signer) if signer == masp() => Ok(vec![masp_tx_key().ref_to()]), Some(signer) => Ok(vec![ - find_pk::(client, wallet, &signer, args.password.clone()) - .await?, + find_pk(context, &signer, args.password.clone()).await?, ]), None => other_err( "All transactions must be signed; please either specify the key \ @@ -201,7 +208,7 @@ pub async fn tx_signers< /// hashes needed for monitoring the tx on chain. /// /// If it is a dry run, it is not put in a wrapper, but returned as is. -pub fn sign_tx( +pub fn sign_tx( wallet: &mut Wallet, args: &args::Tx, tx: &mut Tx, @@ -242,27 +249,22 @@ pub fn sign_tx( /// Return the necessary data regarding an account to be able to generate a /// multisignature section -pub async fn aux_signing_data< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - args: &args::Tx, +pub async fn aux_signing_data<'a>( + context: &impl Namada<'a>, + args: &args::Tx, owner: Option
, default_signer: Option
, ) -> Result { let public_keys = if owner.is_some() || args.wrapper_fee_payer.is_none() { - tx_signers::(client, wallet, args, default_signer.clone()) - .await? + tx_signers(context, args, default_signer.clone()).await? } else { vec![] }; let (account_public_keys_map, threshold) = match &owner { Some(owner @ Address::Established(_)) => { - let account = rpc::get_account_info::(client, owner).await?; + let account = + rpc::get_account_info(context.client(), owner).await?; if let Some(account) = account { (Some(account.public_keys_map), account.threshold) } else { @@ -282,7 +284,11 @@ pub async fn aux_signing_data< }; let fee_payer = if args.disposable_signing_key { - wallet.generate_disposable_signing_key().to_public() + context + .wallet_mut() + .await + .generate_disposable_signing_key() + .to_public() } else { match &args.wrapper_fee_payer { Some(keypair) => keypair.to_public(), @@ -322,15 +328,10 @@ pub struct TxSourcePostBalance { /// wrapper and its payload which is needed for monitoring its /// progress on chain. #[allow(clippy::too_many_arguments)] -pub async fn wrap_tx< - C: crate::sdk::queries::Client + Sync, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - shielded: &mut ShieldedContext, +pub async fn wrap_tx<'a, N: Namada<'a>>( + context: &N, tx: &mut Tx, - args: &args::Tx, + args: &args::Tx, tx_source_balance: Option, epoch: Epoch, fee_payer: common::PublicKey, @@ -339,9 +340,9 @@ pub async fn wrap_tx< // Validate fee amount and token let gas_cost_key = parameter_storage::get_gas_cost_key(); let minimum_fee = match rpc::query_storage_value::< - C, + _, BTreeMap, - >(client, &gas_cost_key) + >(context.client(), &gas_cost_key) .await .and_then(|map| { map.get(&args.fee_token) @@ -358,20 +359,16 @@ pub async fn wrap_tx< if !args.force { return Err(e); } else { - token::Amount::default() + token::Amount::zero() } } }; let fee_amount = match args.fee_amount { Some(amount) => { - let validated_fee_amount = validate_amount::<_, IO>( - client, - amount, - &args.fee_token, - args.force, - ) - .await - .expect("Expected to be able to validate fee"); + let validated_fee_amount = + validate_amount(context, amount, &args.fee_token, args.force) + .await + .expect("Expected to be able to validate fee"); let amount = Amount::from_uint(validated_fee_amount.amount, 0).unwrap(); @@ -381,7 +378,7 @@ pub async fn wrap_tx< } else if !args.force { // Update the fee amount if it's not enough display_line!( - IO, + context.io(), "The provided gas price {} is less than the minimum \ amount required {}, changing it to match the minimum", amount.to_string_native(), @@ -405,9 +402,12 @@ pub async fn wrap_tx< let balance_key = token::balance_key(&args.fee_token, &fee_payer_address); - rpc::query_storage_value::(client, &balance_key) - .await - .unwrap_or_default() + rpc::query_storage_value::<_, token::Amount>( + context.client(), + &balance_key, + ) + .await + .unwrap_or_default() } }; @@ -419,30 +419,26 @@ pub async fn wrap_tx< Some(diff) if !diff.is_zero() => { if let Some(spending_key) = args.fee_unshield.clone() { // Unshield funds for fee payment - let transfer_args = args::TxTransfer { - tx: args.to_owned(), - source: spending_key, - target: namada_core::types::masp::TransferTarget::Address( - fee_payer_address.clone(), - ), - token: args.fee_token.clone(), - amount: args::InputAmount::Validated(DenominatedAmount { - // NOTE: must unshield the total fee amount, not the - // diff, because the ledger evaluates the transaction in - // reverse (wrapper first, inner second) and cannot know - // ahead of time if the inner will modify the balance of - // the gas payer - amount: total_fee, - denom: 0.into(), - }), - // These last two fields are not used in the function, mock - // them - native_token: args.fee_token.clone(), - tx_code_path: PathBuf::new(), + let target = namada_core::types::masp::TransferTarget::Address( + fee_payer_address.clone(), + ); + let fee_amount = DenominatedAmount { + // NOTE: must unshield the total fee amount, not the + // diff, because the ledger evaluates the transaction in + // reverse (wrapper first, inner second) and cannot know + // ahead of time if the inner will modify the balance of + // the gas payer + amount: total_fee, + denom: 0.into(), }; - match shielded - .gen_shielded_transfer::<_, IO>(client, transfer_args) + match ShieldedContext::::gen_shielded_transfer( + context, + &spending_key, + &target, + &args.fee_token, + fee_amount, + ) .await { Ok(Some(ShieldedTransfer { @@ -471,8 +467,8 @@ pub async fn wrap_tx< let descriptions_limit_key= parameter_storage::get_fee_unshielding_descriptions_limit_key(); let descriptions_limit = - rpc::query_storage_value::( - client, + rpc::query_storage_value::<_, u64>( + context.client(), &descriptions_limit_key, ) .await @@ -519,19 +515,12 @@ pub async fn wrap_tx< } else { let token_addr = args.fee_token.clone(); if !args.force { - let fee_amount = format_denominated_amount::<_, IO>( - client, - &token_addr, - total_fee, - ) - .await; + let fee_amount = + context.format_amount(&token_addr, total_fee).await; - let balance = format_denominated_amount::<_, IO>( - client, - &token_addr, - updated_balance, - ) - .await; + let balance = context + .format_amount(&token_addr, updated_balance) + .await; return Err(Error::from(TxError::BalanceTooLowForFees( fee_payer_address, token_addr, @@ -546,7 +535,7 @@ pub async fn wrap_tx< _ => { if args.fee_unshield.is_some() { display_line!( - IO, + context.io(), "Enough transparent balance to pay fees: the fee \ unshielding spending key will be ignored" ); @@ -603,22 +592,28 @@ fn make_ledger_amount_addr( prefix: &str, ) { if let Some(token) = tokens.get(token) { - output.push(format!("{}Amount {}: {}", prefix, token, amount)); + output.push(format!( + "{}Amount : {} {}", + prefix, + token.to_uppercase(), + to_ledger_decimal(&amount.to_string()), + )); } else { output.extend(vec![ - format!("{}Token: {}", prefix, token), - format!("{}Amount: {}", prefix, amount), + format!("{}Token : {}", prefix, token), + format!( + "{}Amount : {}", + prefix, + to_ledger_decimal(&amount.to_string()) + ), ]); } } /// Adds a Ledger output line describing a given transaction amount and asset /// type -async fn make_ledger_amount_asset< - C: crate::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +async fn make_ledger_amount_asset<'a>( + context: &impl Namada<'a>, tokens: &HashMap, output: &mut Vec, amount: u64, @@ -628,25 +623,33 @@ async fn make_ledger_amount_asset< ) { if let Some((token, _, _epoch)) = assets.get(token) { // If the AssetType can be decoded, then at least display Addressees - let formatted_amt = - format_denominated_amount::<_, IO>(client, token, amount.into()) - .await; + let formatted_amt = context.format_amount(token, amount.into()).await; if let Some(token) = tokens.get(token) { - output - .push( - format!("{}Amount: {} {}", prefix, token, formatted_amt,), - ); + output.push(format!( + "{}Amount : {} {}", + prefix, + token.to_uppercase(), + to_ledger_decimal(&formatted_amt), + )); } else { output.extend(vec![ - format!("{}Token: {}", prefix, token), - format!("{}Amount: {}", prefix, formatted_amt), + format!("{}Token : {}", prefix, token), + format!( + "{}Amount : {}", + prefix, + to_ledger_decimal(&formatted_amt) + ), ]); } } else { // Otherwise display the raw AssetTypes output.extend(vec![ - format!("{}Token: {}", prefix, token), - format!("{}Amount: {}", prefix, amount), + format!("{}Token : {}", prefix, token), + format!( + "{}Amount : {}", + prefix, + to_ledger_decimal(&amount.to_string()) + ), ]); } } @@ -654,54 +657,44 @@ async fn make_ledger_amount_asset< /// Split the lines in the vector that are longer than the Ledger device's /// character width fn format_outputs(output: &mut Vec) { - const LEDGER_WIDTH: usize = 60; + const MAX_KEY_LEN: usize = 39; + const MAX_VALUE_LEN: usize = 39; let mut i = 0; let mut pos = 0; // Break down each line that is too long one-by-one while pos < output.len() { - let prefix_len = i.to_string().len() + 3; let curr_line = output[pos].clone(); - if curr_line.len() + prefix_len < LEDGER_WIDTH { + let (key, mut value) = + curr_line.split_once(':').unwrap_or(("", &curr_line)); + // Truncate the key length to the declared maximum + let key = key.trim().chars().take(MAX_KEY_LEN - 1).collect::(); + // Trim value because we will insert spaces later + value = value.trim(); + if value.is_empty() { + value = "(none)" + } + if value.chars().count() < MAX_VALUE_LEN { // No need to split the line in this case - output[pos] = format!("{} | {}", i, curr_line); + output[pos] = format!("{} | {} : {}", i, key, value); pos += 1; } else { // Line is too long so split it up. Repeat the key on each line - let (mut key, mut value) = - curr_line.split_once(':').unwrap_or(("", &curr_line)); - key = key.trim(); - value = value.trim(); - if value.is_empty() { - value = "(none)" - } - - // First comput how many lines we will break the current one up into - let mut digits = 1; - let mut line_space; - let mut lines; - loop { - let prefix_len = prefix_len + 7 + 2 * digits + key.len(); - line_space = LEDGER_WIDTH - prefix_len; - lines = (value.len() + line_space - 1) / line_space; - if lines.to_string().len() <= digits { - break; - } else { - digits += 1; - } - } - - // Then break up this line according to the above plan output.remove(pos); - for (idx, part) in - value.chars().chunks(line_space).into_iter().enumerate() + let part_count = (value.chars().count() + MAX_VALUE_LEN - 2) + / (MAX_VALUE_LEN - 1); + for (idx, part) in value + .chars() + .chunks(MAX_VALUE_LEN - 1) + .into_iter() + .enumerate() { let line = format!( "{} | {} [{}/{}] : {}", i, key, idx + 1, - lines, + part_count, part.collect::(), ); output.insert(pos, line); @@ -714,11 +707,8 @@ fn format_outputs(output: &mut Vec) { /// Adds a Ledger output for the sender and destination for transparent and MASP /// transactions -pub async fn make_ledger_masp_endpoints< - C: crate::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +pub async fn make_ledger_masp_endpoints<'a>( + context: &impl Namada<'a>, tokens: &HashMap, output: &mut Vec, transfer: &Transfer, @@ -740,8 +730,8 @@ pub async fn make_ledger_masp_endpoints< for sapling_input in builder.builder.sapling_inputs() { let vk = ExtendedViewingKey::from(*sapling_input.key()); output.push(format!("Sender : {}", vk)); - make_ledger_amount_asset::<_, IO>( - client, + make_ledger_amount_asset( + context, tokens, output, sapling_input.value(), @@ -767,8 +757,8 @@ pub async fn make_ledger_masp_endpoints< for sapling_output in builder.builder.sapling_outputs() { let pa = PaymentAddress::from(sapling_output.address()); output.push(format!("Destination : {}", pa)); - make_ledger_amount_asset::<_, IO>( - client, + make_ledger_amount_asset( + context, tokens, output, sapling_output.value(), @@ -792,13 +782,8 @@ pub async fn make_ledger_masp_endpoints< /// Internal method used to generate transaction test vectors #[cfg(feature = "std")] -pub async fn generate_test_vector< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, +pub async fn generate_test_vector<'a>( + context: &impl Namada<'a>, tx: &Tx, ) -> Result<(), Error> { use std::env; @@ -810,8 +795,7 @@ pub async fn generate_test_vector< // Contract the large data blobs in the transaction tx.wallet_filter(); // Convert the transaction to Ledger format - let decoding = - to_ledger_vector::<_, _, IO>(client, wallet, &tx).await?; + let decoding = to_ledger_vector(context, &tx).await?; let output = serde_json::to_string(&decoding) .map_err(|e| Error::from(EncodingError::Serde(e.to_string())))?; // Record the transaction at the identified path @@ -847,62 +831,106 @@ pub async fn generate_test_vector< Ok(()) } +/// Convert decimal numbers into the format used by Ledger. Specifically remove +/// all insignificant zeros occuring after decimal point. +fn to_ledger_decimal(amount: &str) -> String { + if amount.contains('.') { + let mut amount = amount.trim_end_matches('0').to_string(); + if amount.ends_with('.') { + amount.push('0') + } + amount + } else { + amount.to_string() + ".0" + } +} + +/// A ProposalVote wrapper that prints the spending cap with Ledger decimal +/// formatting. +struct LedgerProposalVote<'a>(&'a StorageProposalVote); + +impl<'a> Display for LedgerProposalVote<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match &self.0 { + StorageProposalVote::Yay(vote_type) => match vote_type { + VoteType::Default => write!(f, "yay"), + VoteType::PGFSteward => write!(f, "yay for PGF steward"), + VoteType::PGFPayment => { + write!(f, "yay for PGF payment proposal") + } + }, + + StorageProposalVote::Nay => write!(f, "nay"), + } + } +} + +/// A ProposalType wrapper that prints the hash of the contained WASM code if it +/// is present. +struct LedgerProposalType<'a>(&'a ProposalType, &'a Tx); + +impl<'a> Display for LedgerProposalType<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self.0 { + ProposalType::Default(None) => write!(f, "Default"), + ProposalType::Default(Some(hash)) => { + let extra = self + .1 + .get_section(hash) + .and_then(|x| Section::extra_data_sec(x.as_ref())) + .expect("unable to load vp code") + .code + .hash(); + write!(f, "{}", HEXLOWER.encode(&extra.0)) + } + ProposalType::PGFSteward(_) => write!(f, "PGF Steward"), + ProposalType::PGFPayment(_) => write!(f, "PGF Payment"), + } + } +} + /// Converts the given transaction to the form that is displayed on the Ledger /// device -pub async fn to_ledger_vector< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, +pub async fn to_ledger_vector<'a>( + context: &impl Namada<'a>, tx: &Tx, ) -> Result { let init_account_hash = - query_wasm_code_hash::<_, IO>(client, TX_INIT_ACCOUNT_WASM).await?; + query_wasm_code_hash(context, TX_INIT_ACCOUNT_WASM).await?; let init_validator_hash = - query_wasm_code_hash::<_, IO>(client, TX_INIT_VALIDATOR_WASM).await?; + query_wasm_code_hash(context, TX_INIT_VALIDATOR_WASM).await?; let init_proposal_hash = - query_wasm_code_hash::<_, IO>(client, TX_INIT_PROPOSAL).await?; + query_wasm_code_hash(context, TX_INIT_PROPOSAL).await?; let vote_proposal_hash = - query_wasm_code_hash::<_, IO>(client, TX_VOTE_PROPOSAL).await?; - let reveal_pk_hash = - query_wasm_code_hash::<_, IO>(client, TX_REVEAL_PK).await?; + query_wasm_code_hash(context, TX_VOTE_PROPOSAL).await?; + let reveal_pk_hash = query_wasm_code_hash(context, TX_REVEAL_PK).await?; let update_account_hash = - query_wasm_code_hash::<_, IO>(client, TX_UPDATE_ACCOUNT_WASM).await?; - let transfer_hash = - query_wasm_code_hash::<_, IO>(client, TX_TRANSFER_WASM).await?; - let ibc_hash = query_wasm_code_hash::<_, IO>(client, TX_IBC_WASM).await?; - let bond_hash = query_wasm_code_hash::<_, IO>(client, TX_BOND_WASM).await?; - let unbond_hash = - query_wasm_code_hash::<_, IO>(client, TX_UNBOND_WASM).await?; - let withdraw_hash = - query_wasm_code_hash::<_, IO>(client, TX_WITHDRAW_WASM).await?; + query_wasm_code_hash(context, TX_UPDATE_ACCOUNT_WASM).await?; + let transfer_hash = query_wasm_code_hash(context, TX_TRANSFER_WASM).await?; + let ibc_hash = query_wasm_code_hash(context, TX_IBC_WASM).await?; + let bond_hash = query_wasm_code_hash(context, TX_BOND_WASM).await?; + let unbond_hash = query_wasm_code_hash(context, TX_UNBOND_WASM).await?; + let withdraw_hash = query_wasm_code_hash(context, TX_WITHDRAW_WASM).await?; let change_commission_hash = - query_wasm_code_hash::<_, IO>(client, TX_CHANGE_COMMISSION_WASM) - .await?; - let user_hash = query_wasm_code_hash::<_, IO>(client, VP_USER_WASM).await?; + query_wasm_code_hash(context, TX_CHANGE_COMMISSION_WASM).await?; + let user_hash = query_wasm_code_hash(context, VP_USER_WASM).await?; + let unjail_validator_hash = + query_wasm_code_hash(context, TX_UNJAIL_VALIDATOR_WASM).await?; // To facilitate lookups of human-readable token names - let tokens: HashMap = wallet - .get_addresses_with_vp_type(AddressVpType::Token) + let tokens: HashMap = context + .wallet() + .await + .get_addresses() .into_iter() - .map(|addr| { - let alias = match wallet.find_alias(&addr) { - Some(alias) => alias.to_string(), - None => addr.to_string(), - }; - (addr, alias) - }) + .map(|(alias, addr)| (addr, alias)) .collect(); let mut tv = LedgerVector { - blob: HEXLOWER.encode(&tx.try_to_vec().map_err(|_| { - Error::Other("unable to serialize transaction".to_string()) - })?), + blob: HEXLOWER.encode(&tx.serialize_to_vec()), index: 0, valid: true, - name: "Custom 0".to_string(), + name: "Custom_0".to_string(), ..Default::default() }; @@ -928,7 +956,7 @@ pub async fn to_ledger_vector< .map_err(|err| { Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Init Account 0".to_string(); + tv.name = "Init_Account_0".to_string(); let extra = tx .get_section(&init_account.vp_code_hash) @@ -941,15 +969,26 @@ pub async fn to_ledger_vector< } else { HEXLOWER.encode(&extra.0) }; - + tv.output.extend(vec![format!("Type : Init Account")]); + tv.output.extend( + init_account + .public_keys + .iter() + .map(|k| format!("Public key : {}", k)), + ); tv.output.extend(vec![ - format!("Type : Init Account"), - format!("Public key : {:?}", init_account.public_keys), + format!("Threshold : {}", init_account.threshold), format!("VP type : {}", vp_code), ]); + tv.output_expert.extend( + init_account + .public_keys + .iter() + .map(|k| format!("Public key : {}", k)), + ); tv.output_expert.extend(vec![ - format!("Public key : {:?}", init_account.public_keys), + format!("Threshold : {}", init_account.threshold), format!("VP type : {}", HEXLOWER.encode(&extra.0)), ]); } else if code_hash == init_validator_hash { @@ -961,7 +1000,7 @@ pub async fn to_ledger_vector< Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Init Validator 0".to_string(); + tv.name = "Init_Validator_0".to_string(); let extra = tx .get_section(&init_validator.validator_vp_code_hash) @@ -975,10 +1014,18 @@ pub async fn to_ledger_vector< HEXLOWER.encode(&extra.0) }; + tv.output.extend(vec!["Type : Init Validator".to_string()]); + tv.output.extend( + init_validator + .account_keys + .iter() + .map(|k| format!("Account key : {}", k)), + ); tv.output.extend(vec![ - format!("Type : Init Validator"), - format!("Account key : {:?}", init_validator.account_keys), + format!("Threshold : {}", init_validator.threshold), format!("Consensus key : {}", init_validator.consensus_key), + format!("Ethereum cold key : {}", init_validator.eth_cold_key), + format!("Ethereum hot key : {}", init_validator.eth_hot_key), format!("Protocol key : {}", init_validator.protocol_key), format!("DKG key : {}", init_validator.dkg_key), format!("Commission rate : {}", init_validator.commission_rate), @@ -989,9 +1036,17 @@ pub async fn to_ledger_vector< format!("Validator VP type : {}", vp_code,), ]); + tv.output_expert.extend( + init_validator + .account_keys + .iter() + .map(|k| format!("Account key : {}", k)), + ); tv.output_expert.extend(vec![ - format!("Account key : {:?}", init_validator.account_keys), + format!("Threshold : {}", init_validator.threshold), format!("Consensus key : {}", init_validator.consensus_key), + format!("Ethereum cold key : {}", init_validator.eth_cold_key), + format!("Ethereum hot key : {}", init_validator.eth_hot_key), format!("Protocol key : {}", init_validator.protocol_key), format!("DKG key : {}", init_validator.dkg_key), format!("Commission rate : {}", init_validator.commission_rate), @@ -1010,16 +1065,24 @@ pub async fn to_ledger_vector< Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Init Proposal 0".to_string(); + tv.name = "Init_Proposal_0".to_string(); - let init_proposal_data_id = init_proposal_data - .id - .as_ref() - .map(u64::to_string) - .unwrap_or_else(|| "(none)".to_string()); + let extra = tx + .get_section(&init_proposal_data.content) + .and_then(|x| Section::extra_data_sec(x.as_ref())) + .expect("unable to load vp code") + .code + .hash(); + + tv.output.push("Type : Init proposal".to_string()); + if let Some(id) = init_proposal_data.id.as_ref() { + tv.output.push(format!("ID : {}", id)); + } tv.output.extend(vec![ - format!("Type : Init proposal"), - format!("ID : {}", init_proposal_data_id), + format!( + "Proposal type : {}", + LedgerProposalType(&init_proposal_data.r#type, tx) + ), format!("Author : {}", init_proposal_data.author), format!( "Voting start epoch : {}", @@ -1030,12 +1093,17 @@ pub async fn to_ledger_vector< init_proposal_data.voting_end_epoch ), format!("Grace epoch : {}", init_proposal_data.grace_epoch), + format!("Content : {}", HEXLOWER.encode(&extra.0)), ]); - tv.output - .push(format!("Content: {}", init_proposal_data.content)); + if let Some(id) = init_proposal_data.id.as_ref() { + tv.output_expert.push(format!("ID : {}", id)); + } tv.output_expert.extend(vec![ - format!("ID : {}", init_proposal_data_id), + format!( + "Proposal type : {}", + LedgerProposalType(&init_proposal_data.r#type, tx) + ), format!("Author : {}", init_proposal_data.author), format!( "Voting start epoch : {}", @@ -1046,9 +1114,8 @@ pub async fn to_ledger_vector< init_proposal_data.voting_end_epoch ), format!("Grace epoch : {}", init_proposal_data.grace_epoch), + format!("Content : {}", HEXLOWER.encode(&extra.0)), ]); - tv.output - .push(format!("Content: {}", init_proposal_data.content)); } else if code_hash == vote_proposal_hash { let vote_proposal = VoteProposalData::try_from_slice( &tx.data() @@ -1058,26 +1125,26 @@ pub async fn to_ledger_vector< Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Vote Proposal 0".to_string(); + tv.name = "Vote_Proposal_0".to_string(); tv.output.extend(vec![ format!("Type : Vote Proposal"), format!("ID : {}", vote_proposal.id), - format!("Vote : {}", vote_proposal.vote), + format!("Vote : {}", LedgerProposalVote(&vote_proposal.vote)), format!("Voter : {}", vote_proposal.voter), ]); for delegation in &vote_proposal.delegations { - tv.output.push(format!("Delegations : {}", delegation)); + tv.output.push(format!("Delegation : {}", delegation)); } tv.output_expert.extend(vec![ format!("ID : {}", vote_proposal.id), - format!("Vote : {}", vote_proposal.vote), + format!("Vote : {}", LedgerProposalVote(&vote_proposal.vote)), format!("Voter : {}", vote_proposal.voter), ]); for delegation in vote_proposal.delegations { tv.output_expert - .push(format!("Delegations : {}", delegation)); + .push(format!("Delegation : {}", delegation)); } } else if code_hash == reveal_pk_hash { let public_key = common::PublicKey::try_from_slice( @@ -1088,17 +1155,17 @@ pub async fn to_ledger_vector< Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Init Account 0".to_string(); + tv.name = "Reveal_Pubkey_0".to_string(); tv.output.extend(vec![ - format!("Type : Reveal PK"), + format!("Type : Reveal Pubkey"), format!("Public key : {}", public_key), ]); tv.output_expert .extend(vec![format!("Public key : {}", public_key)]); } else if code_hash == update_account_hash { - let transfer = UpdateAccount::try_from_slice( + let update_account = UpdateAccount::try_from_slice( &tx.data() .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) @@ -1106,9 +1173,9 @@ pub async fn to_ledger_vector< Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Update VP 0".to_string(); + tv.name = "Update_VP_0".to_string(); - match &transfer.vp_code_hash { + match &update_account.vp_code_hash { Some(hash) => { let extra = tx .get_section(hash) @@ -1125,14 +1192,40 @@ pub async fn to_ledger_vector< }; tv.output.extend(vec![ format!("Type : Update VP"), - format!("Address : {}", transfer.addr), - format!("VP type : {}", vp_code), - ]); - - tv.output_expert.extend(vec![ - format!("Address : {}", transfer.addr), - format!("VP type : {}", HEXLOWER.encode(&extra.0)), + format!("Address : {}", update_account.addr), ]); + tv.output.extend( + update_account + .public_keys + .iter() + .map(|k| format!("Public key : {}", k)), + ); + if update_account.threshold.is_some() { + tv.output.extend(vec![format!( + "Threshold : {}", + update_account.threshold.unwrap() + )]) + } + tv.output.extend(vec![format!("VP type : {}", vp_code)]); + + tv.output_expert + .extend(vec![format!("Address : {}", update_account.addr)]); + tv.output_expert.extend( + update_account + .public_keys + .iter() + .map(|k| format!("Public key : {}", k)), + ); + if update_account.threshold.is_some() { + tv.output_expert.extend(vec![format!( + "Threshold : {}", + update_account.threshold.unwrap() + )]) + } + tv.output_expert.extend(vec![format!( + "VP type : {}", + HEXLOWER.encode(&extra.0) + )]); } None => (), }; @@ -1171,11 +1264,11 @@ pub async fn to_ledger_vector< None }; - tv.name = "Transfer 0".to_string(); + tv.name = "Transfer_0".to_string(); tv.output.push("Type : Transfer".to_string()); - make_ledger_masp_endpoints::<_, IO>( - client, + make_ledger_masp_endpoints( + context, &tokens, &mut tv.output, &transfer, @@ -1183,8 +1276,8 @@ pub async fn to_ledger_vector< &asset_types, ) .await; - make_ledger_masp_endpoints::<_, IO>( - client, + make_ledger_masp_endpoints( + context, &tokens, &mut tv.output_expert, &transfer, @@ -1200,7 +1293,7 @@ pub async fn to_ledger_vector< ) .map_err(|x| Error::from(EncodingError::Conversion(x.to_string())))?; - tv.name = "IBC 0".to_string(); + tv.name = "IBC_0".to_string(); tv.output.push("Type : IBC".to_string()); match MsgTransfer::try_from(any_msg.clone()) { @@ -1222,7 +1315,11 @@ pub async fn to_ledger_vector< ), format!( "Timeout timestamp : {}", - transfer.timeout_timestamp_on_b + transfer + .timeout_timestamp_on_b + .into_tm_time() + .map_or("(none)".to_string(), |time| time + .to_rfc3339()) ), ]); tv.output_expert.extend(vec![ @@ -1237,7 +1334,11 @@ pub async fn to_ledger_vector< ), format!( "Timeout timestamp : {}", - transfer.timeout_timestamp_on_b + transfer + .timeout_timestamp_on_b + .into_tm_time() + .map_or("(none)".to_string(), |time| time + .to_rfc3339()) ), ]); } @@ -1258,24 +1359,29 @@ pub async fn to_ledger_vector< Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Bond 0".to_string(); + tv.name = "Bond_0".to_string(); - let bond_source = bond - .source - .as_ref() - .map(Address::to_string) - .unwrap_or_else(|| "(none)".to_string()); + tv.output.push("Type : Bond".to_string()); + if let Some(source) = bond.source.as_ref() { + tv.output.push(format!("Source : {}", source)); + } tv.output.extend(vec![ - format!("Type : Bond"), - format!("Source : {}", bond_source), format!("Validator : {}", bond.validator), - format!("Amount : {}", bond.amount.to_string_native()), + format!( + "Amount : NAM {}", + to_ledger_decimal(&bond.amount.to_string_native()) + ), ]); + if let Some(source) = bond.source.as_ref() { + tv.output_expert.push(format!("Source : {}", source)); + } tv.output_expert.extend(vec![ - format!("Source : {}", bond_source), format!("Validator : {}", bond.validator), - format!("Amount : {}", bond.amount.to_string_native()), + format!( + "Amount : NAM {}", + to_ledger_decimal(&bond.amount.to_string_native()) + ), ]); } else if code_hash == unbond_hash { let unbond = pos::Unbond::try_from_slice( @@ -1286,24 +1392,29 @@ pub async fn to_ledger_vector< Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Unbond 0".to_string(); + tv.name = "Unbond_0".to_string(); - let unbond_source = unbond - .source - .as_ref() - .map(Address::to_string) - .unwrap_or_else(|| "(none)".to_string()); + tv.output.push("Type : Unbond".to_string()); + if let Some(source) = unbond.source.as_ref() { + tv.output.push(format!("Source : {}", source)); + } tv.output.extend(vec![ - format!("Code : Unbond"), - format!("Source : {}", unbond_source), format!("Validator : {}", unbond.validator), - format!("Amount : {}", unbond.amount.to_string_native()), + format!( + "Amount : NAM {}", + to_ledger_decimal(&unbond.amount.to_string_native()) + ), ]); + if let Some(source) = unbond.source.as_ref() { + tv.output_expert.push(format!("Source : {}", source)); + } tv.output_expert.extend(vec![ - format!("Source : {}", unbond_source), format!("Validator : {}", unbond.validator), - format!("Amount : {}", unbond.amount.to_string_native()), + format!( + "Amount : NAM {}", + to_ledger_decimal(&unbond.amount.to_string_native()) + ), ]); } else if code_hash == withdraw_hash { let withdraw = pos::Withdraw::try_from_slice( @@ -1314,23 +1425,20 @@ pub async fn to_ledger_vector< Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Withdraw 0".to_string(); + tv.name = "Withdraw_0".to_string(); - let withdraw_source = withdraw - .source - .as_ref() - .map(Address::to_string) - .unwrap_or_else(|| "(none)".to_string()); - tv.output.extend(vec![ - format!("Type : Withdraw"), - format!("Source : {}", withdraw_source), - format!("Validator : {}", withdraw.validator), - ]); + tv.output.push("Type : Withdraw".to_string()); + if let Some(source) = withdraw.source.as_ref() { + tv.output.push(format!("Source : {}", source)); + } + tv.output + .push(format!("Validator : {}", withdraw.validator)); - tv.output_expert.extend(vec![ - format!("Source : {}", withdraw_source), - format!("Validator : {}", withdraw.validator), - ]); + if let Some(source) = withdraw.source.as_ref() { + tv.output_expert.push(format!("Source : {}", source)); + } + tv.output_expert + .push(format!("Validator : {}", withdraw.validator)); } else if code_hash == change_commission_hash { let commission_change = pos::CommissionChange::try_from_slice( &tx.data() @@ -1340,7 +1448,7 @@ pub async fn to_ledger_vector< Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Change Commission 0".to_string(); + tv.name = "Change_Commission_0".to_string(); tv.output.extend(vec![ format!("Type : Change commission"), @@ -1352,39 +1460,53 @@ pub async fn to_ledger_vector< format!("New rate : {}", commission_change.new_rate), format!("Validator : {}", commission_change.validator), ]); + } else if code_hash == unjail_validator_hash { + let address = Address::try_from_slice( + &tx.data() + .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, + ) + .map_err(|err| { + Error::from(EncodingError::Conversion(err.to_string())) + })?; + + tv.name = "Unjail_Validator_0".to_string(); + + tv.output.extend(vec![ + format!("Type : Unjail Validator"), + format!("Validator : {}", address), + ]); + + tv.output_expert.push(format!("Validator : {}", address)); + } else { + tv.name = "Custom_0".to_string(); + tv.output.push("Type : Custom".to_string()); } if let Some(wrapper) = tx.header.wrapper() { let gas_token = wrapper.fee.token.clone(); - let gas_limit = format_denominated_amount::<_, IO>( - client, - &gas_token, - Amount::from(wrapper.gas_limit), - ) - .await; - let fee_amount_per_gas_unit = format_denominated_amount::<_, IO>( - client, - &gas_token, - wrapper.fee.amount_per_gas_unit, - ) - .await; + let gas_limit = context + .format_amount(&gas_token, Amount::from(wrapper.gas_limit)) + .await; + let fee_amount_per_gas_unit = context + .format_amount(&gas_token, wrapper.fee.amount_per_gas_unit) + .await; tv.output_expert.extend(vec![ format!("Timestamp : {}", tx.header.timestamp.0), - format!("PK : {}", wrapper.pk), + format!("Pubkey : {}", wrapper.pk), format!("Epoch : {}", wrapper.epoch), format!("Gas limit : {}", gas_limit), - format!("Fee token : {}", gas_token), ]); if let Some(token) = tokens.get(&wrapper.fee.token) { tv.output_expert.push(format!( - "Fee amount per gas unit : {} {}", - token, fee_amount_per_gas_unit + "Fees/gas unit : {} {}", + token.to_uppercase(), + to_ledger_decimal(&fee_amount_per_gas_unit), )); } else { - tv.output_expert.push(format!( - "Fee amount per gas unit : {}", - fee_amount_per_gas_unit - )); + tv.output_expert.extend(vec![ + format!("Fee token : {}", gas_token), + format!("Fees/gas unit : {}", fee_amount_per_gas_unit), + ]); } } diff --git a/shared/src/sdk/tx.rs b/sdk/src/tx.rs similarity index 58% rename from shared/src/sdk/tx.rs rename to sdk/src/tx.rs index 9d7fe0cfe4..962c6964a3 100644 --- a/shared/src/sdk/tx.rs +++ b/sdk/src/tx.rs @@ -15,61 +15,65 @@ use masp_primitives::transaction::components::sapling::fees::{ use masp_primitives::transaction::components::transparent::fees::{ InputView as TransparentInputView, OutputView as TransparentOutputView, }; -use masp_primitives::transaction::components::I32Sum; +use masp_primitives::transaction::components::I128Sum; +use namada_core::ibc::applications::transfer::msgs::transfer::MsgTransfer; +use namada_core::ibc::applications::transfer::packet::PacketData; +use namada_core::ibc::applications::transfer::PrefixedCoin; +use namada_core::ibc::core::ics04_channel::timeout::TimeoutHeight; +use namada_core::ibc::core::timestamp::Timestamp as IbcTimestamp; +use namada_core::ibc::core::Msg; +use namada_core::ibc::Height as IbcHeight; use namada_core::ledger::governance::cli::onchain::{ DefaultProposal, OnChainProposal, PgfFundingProposal, PgfStewardProposal, ProposalVote, }; use namada_core::ledger::governance::storage::proposal::ProposalType; use namada_core::ledger::governance::storage::vote::StorageProposalVote; +use namada_core::ledger::ibc::storage::channel_key; use namada_core::ledger::pgf::cli::steward::Commission; use namada_core::types::address::{masp, Address, InternalAddress}; use namada_core::types::dec::Dec; use namada_core::types::hash::Hash; +use namada_core::types::ibc::IbcShieldedTransfer; +use namada_core::types::key::*; +use namada_core::types::masp::{TransferSource, TransferTarget}; +use namada_core::types::storage::Epoch; +use namada_core::types::time::DateTimeUtc; use namada_core::types::token::MaspDenom; +use namada_core::types::transaction::account::{InitAccount, UpdateAccount}; use namada_core::types::transaction::governance::{ InitProposalData, VoteProposalData, }; use namada_core::types::transaction::pgf::UpdateStewardCommission; +use namada_core::types::transaction::pos; +use namada_core::types::{storage, token}; use namada_proof_of_stake::parameters::PosParams; use namada_proof_of_stake::types::{CommissionPair, ValidatorState}; -use crate::ibc::applications::transfer::msgs::transfer::MsgTransfer; -use crate::ibc::applications::transfer::packet::PacketData; -use crate::ibc::applications::transfer::PrefixedCoin; -use crate::ibc::core::ics04_channel::timeout::TimeoutHeight; -use crate::ibc::core::timestamp::Timestamp as IbcTimestamp; -use crate::ibc::core::Msg; -use crate::ibc::Height as IbcHeight; -use crate::ledger::ibc::storage::ibc_denom_key; +use crate::args::{self, InputAmount}; +use crate::control_flow::time; +use crate::error::{EncodingError, Error, QueryError, Result, TxError}; +use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; +use crate::io::Io; +use crate::masp::TransferErr::Build; +use crate::masp::{make_asset_type, ShieldedContext, ShieldedTransfer}; use crate::proto::{MaspBuilder, Tx}; -use crate::sdk::args::{self, InputAmount}; -use crate::sdk::error::{EncodingError, Error, QueryError, Result, TxError}; -use crate::sdk::masp::TransferErr::Build; -use crate::sdk::masp::{ShieldedContext, ShieldedTransfer, ShieldedUtils}; -use crate::sdk::rpc::{ - self, format_denominated_amount, query_wasm_code_hash, validate_amount, - TxBroadcastData, TxResponse, +use crate::queries::Client; +use crate::rpc::{ + self, query_wasm_code_hash, validate_amount, TxBroadcastData, TxResponse, }; -use crate::sdk::signing::{self, TxSourcePostBalance}; -use crate::sdk::wallet::{Wallet, WalletUtils}; +use crate::signing::{self, SigningTxData, TxSourcePostBalance}; use crate::tendermint_rpc::endpoint::broadcast::tx_sync::Response; use crate::tendermint_rpc::error::Error as RpcError; -use crate::types::control_flow::{time, ProceedOrElse}; -use crate::types::io::Io; -use crate::types::key::*; -use crate::types::masp::TransferTarget; -use crate::types::storage::Epoch; -use crate::types::time::DateTimeUtc; -use crate::types::transaction::account::{InitAccount, UpdateAccount}; -use crate::types::transaction::{pos, TxType}; -use crate::types::{storage, token}; -use crate::{display_line, edisplay_line, vm}; +use crate::wallet::WalletIo; +use crate::{display_line, edisplay_line, Namada}; /// Initialize account transaction WASM pub const TX_INIT_ACCOUNT_WASM: &str = "tx_init_account.wasm"; /// Initialize validator transaction WASM path pub const TX_INIT_VALIDATOR_WASM: &str = "tx_init_validator.wasm"; +/// Unjail validator transaction WASM path +pub const TX_UNJAIL_VALIDATOR_WASM: &str = "tx_unjail_validator.wasm"; /// Initialize proposal transaction WASM path pub const TX_INIT_PROPOSAL: &str = "tx_init_proposal.wasm"; /// Vote transaction WASM path @@ -90,15 +94,23 @@ pub const TX_BOND_WASM: &str = "tx_bond.wasm"; pub const TX_UNBOND_WASM: &str = "tx_unbond.wasm"; /// Withdraw WASM path pub const TX_WITHDRAW_WASM: &str = "tx_withdraw.wasm"; +/// Bridge pool WASM path +pub const TX_BRIDGE_POOL_WASM: &str = "tx_bridge_pool.wasm"; /// Change commission WASM path pub const TX_CHANGE_COMMISSION_WASM: &str = "tx_change_validator_commission.wasm"; +/// Resign steward WASM path +pub const TX_RESIGN_STEWARD: &str = "tx_resign_steward.wasm"; +/// Update steward commission WASM path +pub const TX_UPDATE_STEWARD_COMMISSION: &str = + "tx_update_steward_commission.wasm"; /// Default timeout in seconds for requests to the `/accepted` /// and `/applied` ABCI query endpoints. const DEFAULT_NAMADA_EVENTS_MAX_WAIT_TIME_SECONDS: u64 = 60; /// Capture the result of running a transaction +#[derive(Debug)] pub enum ProcessTxResponse { /// Result of submitting a transaction to the blockchain Applied(TxResponse), @@ -121,7 +133,7 @@ impl ProcessTxResponse { } /// Build and dump a transaction either to file or to screen -pub fn dump_tx(args: &args::Tx, tx: Tx) { +pub fn dump_tx(io: &IO, args: &args::Tx, tx: Tx) { let tx_id = tx.header_hash(); let serialized_tx = tx.serialize(); match args.output_folder.to_owned() { @@ -132,14 +144,14 @@ pub fn dump_tx(args: &args::Tx, tx: Tx) { serde_json::to_writer_pretty(out, &serialized_tx) .expect("Should be able to write to file."); display_line!( - IO, + io, "Transaction serialized to {}.", tx_path.to_string_lossy() ); } None => { - display_line!(IO, "Below the serialized transaction: \n"); - display_line!(IO, "{}", serialized_tx) + display_line!(io, "Below the serialized transaction: \n"); + display_line!(io, "{}", serialized_tx) } } } @@ -147,33 +159,18 @@ pub fn dump_tx(args: &args::Tx, tx: Tx) { /// Prepare a transaction for signing and submission by adding a wrapper header /// to it. #[allow(clippy::too_many_arguments)] -pub async fn prepare_tx< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - _wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn prepare_tx<'a>( + context: &impl Namada<'a>, args: &args::Tx, tx: &mut Tx, fee_payer: common::PublicKey, tx_source_balance: Option, ) -> Result> { if !args.dry_run { - let epoch = rpc::query_epoch(client).await?; + let epoch = rpc::query_epoch(context.client()).await?; - signing::wrap_tx::<_, _, IO>( - client, - shielded, - tx, - args, - tx_source_balance, - epoch, - fee_payer, - ) - .await + signing::wrap_tx(context, tx, args, tx_source_balance, epoch, fee_payer) + .await } else { Ok(None) } @@ -181,13 +178,8 @@ pub async fn prepare_tx< /// Submit transaction and wait for result. Returns a list of addresses /// initialized in the transaction if any. In dry run, this is always empty. -pub async fn process_tx< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, +pub async fn process_tx<'a>( + context: &impl Namada<'a>, args: &args::Tx, tx: Tx, ) -> Result { @@ -202,17 +194,13 @@ pub async fn process_tx< // println!("HTTP request body: {}", request_body); if args.dry_run || args.dry_run_wrapper { - expect_dry_broadcast::<_, IO>(TxBroadcastData::DryRun(tx), client).await + expect_dry_broadcast(TxBroadcastData::DryRun(tx), context).await } else { // We use this to determine when the wrapper tx makes it on-chain let wrapper_hash = tx.header_hash().to_string(); // We use this to determine when the decrypted inner tx makes it // on-chain - let decrypted_hash = tx - .clone() - .update_header(TxType::Raw) - .header_hash() - .to_string(); + let decrypted_hash = tx.raw_header_hash().to_string(); let to_broadcast = TxBroadcastData::Live { tx, wrapper_hash, @@ -222,14 +210,14 @@ pub async fn process_tx< // of masp epoch Either broadcast or submit transaction and // collect result into sum type if args.broadcast_only { - broadcast_tx::<_, IO>(client, &to_broadcast) + broadcast_tx(context, &to_broadcast) .await .map(ProcessTxResponse::Broadcast) } else { - match submit_tx::<_, IO>(client, to_broadcast).await { + match submit_tx(context, to_broadcast).await { Ok(x) => { - save_initialized_accounts::( - wallet, + save_initialized_accounts( + context, args, x.initialized_accounts.clone(), ) @@ -243,20 +231,20 @@ pub async fn process_tx< } /// Check if a reveal public key transaction is needed -pub async fn is_reveal_pk_needed( +pub async fn is_reveal_pk_needed( client: &C, address: &Address, force: bool, ) -> Result where - C: crate::sdk::queries::Client + Sync, + C: crate::queries::Client + Sync, { // Check if PK revealed Ok(force || !has_revealed_pk(client, address).await?) } /// Check if the public key for the given address has been revealed -pub async fn has_revealed_pk( +pub async fn has_revealed_pk( client: &C, address: &Address, ) -> Result { @@ -264,45 +252,33 @@ pub async fn has_revealed_pk( } /// Submit transaction to reveal the given public key -pub async fn build_reveal_pk< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_reveal_pk<'a>( + context: &impl Namada<'a>, args: &args::Tx, - address: &Address, public_key: &common::PublicKey, - fee_payer: &common::PublicKey, -) -> Result<(Tx, Option)> { - display_line!( - IO, - "Submitting a tx to reveal the public key for address {address}..." - ); +) -> Result<(Tx, SigningTxData, Option)> { + let signing_data = + signing::aux_signing_data(context, args, None, None).await?; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, + build( + context, args, args.tx_reveal_code_path.clone(), public_key, do_nothing, - fee_payer, + &signing_data.fee_payer, None, ) .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Broadcast a transaction to be included in the blockchain and checks that /// the tx has been successfully included into the mempool of a node /// /// In the case of errors in any of those stages, an error message is returned -pub async fn broadcast_tx( - rpc_cli: &C, +pub async fn broadcast_tx<'a>( + context: &impl Namada<'a>, to_broadcast: &TxBroadcastData, ) -> Result { let (tx, wrapper_tx_hash, decrypted_tx_hash) = match to_broadcast { @@ -322,21 +298,29 @@ pub async fn broadcast_tx( // TODO: configure an explicit timeout value? we need to hack away at // `tendermint-rs` for this, which is currently using a hard-coded 30s // timeout. - let response = - lift_rpc_error(rpc_cli.broadcast_tx_sync(tx.to_bytes().into()).await)?; + let response = lift_rpc_error( + context + .client() + .broadcast_tx_sync(tx.to_bytes().into()) + .await, + )?; if response.code == 0.into() { - display_line!(IO, "Transaction added to mempool: {:?}", response); + display_line!( + context.io(), + "Transaction added to mempool: {:?}", + response + ); // Print the transaction identifiers to enable the extraction of // acceptance/application results later { display_line!( - IO, + context.io(), "Wrapper transaction hash: {:?}", wrapper_tx_hash ); display_line!( - IO, + context.io(), "Inner transaction hash: {:?}", decrypted_tx_hash ); @@ -359,13 +343,10 @@ pub async fn broadcast_tx( /// 3. The decrypted payload of the tx has been included on the blockchain. /// /// In the case of errors in any of those stages, an error message is returned -pub async fn submit_tx( - client: &C, +pub async fn submit_tx<'a>( + context: &impl Namada<'a>, to_broadcast: TxBroadcastData, -) -> Result -where - C: crate::sdk::queries::Client + Sync, -{ +) -> Result { let (_, wrapper_hash, decrypted_hash) = match &to_broadcast { TxBroadcastData::Live { tx, @@ -376,7 +357,7 @@ where }?; // Broadcast the supplied transaction - broadcast_tx::<_, IO>(client, &to_broadcast).await?; + broadcast_tx(context, &to_broadcast).await?; let deadline = time::Instant::now() + time::Duration::from_secs( @@ -392,9 +373,7 @@ where let parsed = { let wrapper_query = rpc::TxEventQuery::Accepted(wrapper_hash.as_str()); let event = - rpc::query_tx_status::<_, IO>(client, wrapper_query, deadline) - .await - .proceed_or(TxError::AcceptTimeout)?; + rpc::query_tx_status(context, wrapper_query, deadline).await?; let parsed = TxResponse::from_event(event); let tx_to_str = |parsed| { serde_json::to_string_pretty(parsed).map_err(|err| { @@ -402,7 +381,7 @@ where }) }; display_line!( - IO, + context.io(), "Transaction accepted with result: {}", tx_to_str(&parsed)? ); @@ -413,16 +392,12 @@ where // payload makes its way onto the blockchain let decrypted_query = rpc::TxEventQuery::Applied(decrypted_hash.as_str()); - let event = rpc::query_tx_status::<_, IO>( - client, - decrypted_query, - deadline, - ) - .await - .proceed_or(TxError::AppliedTimeout)?; + let event = + rpc::query_tx_status(context, decrypted_query, deadline) + .await?; let parsed = TxResponse::from_event(event); display_line!( - IO, + context.io(), "Transaction applied with result: {}", tx_to_str(&parsed)? ); @@ -459,8 +434,8 @@ pub fn decode_component( } /// Save accounts initialized from a tx into the wallet, if any. -pub async fn save_initialized_accounts( - wallet: &mut Wallet, +pub async fn save_initialized_accounts<'a, N: Namada<'a>>( + context: &N, args: &args::Tx, initialized_accounts: Vec
, ) { @@ -468,7 +443,7 @@ pub async fn save_initialized_accounts( if len != 0 { // Store newly initialized account addresses in the wallet display_line!( - IO, + context.io(), "The transaction initialized {} new account{}", len, if len == 1 { "" } else { "s" } @@ -489,10 +464,10 @@ pub async fn save_initialized_accounts( format!("{}{}", initialized_account_alias, ix).into() } } - None => U::read_alias(&encoded).into(), + None => N::WalletUtils::read_alias(&encoded).into(), }; let alias = alias.into_owned(); - let added = wallet.add_address( + let added = context.wallet_mut().await.add_address( alias.clone(), address.clone(), args.wallet_alias_force, @@ -500,14 +475,18 @@ pub async fn save_initialized_accounts( match added { Some(new_alias) if new_alias != encoded => { display_line!( - IO, + context.io(), "Added alias {} for address {}.", new_alias, encoded ); } _ => { - display_line!(IO, "No alias added for address {}.", encoded) + display_line!( + context.io(), + "No alias added for address {}.", + encoded + ) } }; } @@ -515,42 +494,43 @@ pub async fn save_initialized_accounts( } /// Submit validator comission rate change -pub async fn build_validator_commission_change< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_validator_commission_change<'a>( + context: &impl Namada<'a>, args::CommissionRateChange { tx: tx_args, validator, rate, tx_code_path, - }: args::CommissionRateChange, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { - let epoch = rpc::query_epoch(client).await?; + }: &args::CommissionRateChange, +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(validator.clone()); + let signing_data = signing::aux_signing_data( + context, + tx_args, + Some(validator.clone()), + default_signer, + ) + .await?; + + let epoch = rpc::query_epoch(context.client()).await?; - let params: PosParams = rpc::get_pos_params(client).await?; + let params: PosParams = rpc::get_pos_params(context.client()).await?; let validator = validator.clone(); - if rpc::is_validator(client, &validator).await? { - if rate < Dec::zero() || rate > Dec::one() { + if rpc::is_validator(context.client(), &validator).await? { + if *rate < Dec::zero() || *rate > Dec::one() { edisplay_line!( - IO, + context.io(), "Invalid new commission rate, received {}", rate ); - return Err(Error::from(TxError::InvalidCommissionRate(rate))); + return Err(Error::from(TxError::InvalidCommissionRate(*rate))); } let pipeline_epoch_minus_one = epoch + params.pipeline_len - 1; match rpc::query_commission_rate( - client, + context.client(), &validator, Some(pipeline_epoch_minus_one), ) @@ -560,31 +540,46 @@ pub async fn build_validator_commission_change< commission_rate, max_commission_change_per_epoch, }) => { + if rate.is_negative() || *rate > Dec::one() { + edisplay_line!( + context.io(), + "New rate is outside of the allowed range of values \ + between 0.0 and 1.0." + ); + if !tx_args.force { + return Err(Error::from( + TxError::InvalidCommissionRate(*rate), + )); + } + } if rate.abs_diff(&commission_rate) > max_commission_change_per_epoch { edisplay_line!( - IO, + context.io(), "New rate is too large of a change with respect to \ the predecessor epoch in which the rate will take \ effect." ); if !tx_args.force { return Err(Error::from( - TxError::InvalidCommissionRate(rate), + TxError::InvalidCommissionRate(*rate), )); } } } None => { - edisplay_line!(IO, "Error retrieving from storage"); + edisplay_line!(context.io(), "Error retrieving from storage"); if !tx_args.force { return Err(Error::from(TxError::Retrieval)); } } } } else { - edisplay_line!(IO, "The given address {validator} is not a validator."); + edisplay_line!( + context.io(), + "The given address {validator} is not a validator." + ); if !tx_args.force { return Err(Error::from(TxError::InvalidValidatorAddress( validator, @@ -594,43 +589,47 @@ pub async fn build_validator_commission_change< let data = pos::CommissionChange { validator: validator.clone(), - new_rate: rate, + new_rate: *rate, }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx_args, - tx_code_path, + build( + context, + tx_args, + tx_code_path.clone(), data, do_nothing, - &fee_payer, + &signing_data.fee_payer, None, ) .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Craft transaction to update a steward commission -pub async fn build_update_steward_commission< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_update_steward_commission<'a>( + context: &impl Namada<'a>, args::UpdateStewardCommission { tx: tx_args, steward, commission, tx_code_path, - }: args::UpdateStewardCommission, - gas_payer: &common::PublicKey, -) -> Result<(Tx, Option)> { - if !rpc::is_steward(client, &steward).await && !tx_args.force { - edisplay_line!(IO, "The given address {} is not a steward.", &steward); + }: &args::UpdateStewardCommission, +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(steward.clone()); + let signing_data = signing::aux_signing_data( + context, + tx_args, + Some(steward.clone()), + default_signer, + ) + .await?; + + if !rpc::is_steward(context.client(), steward).await && !tx_args.force { + edisplay_line!( + context.io(), + "The given address {} is not a steward.", + &steward + ); return Err(Error::from(TxError::InvalidSteward(steward.clone()))); }; @@ -639,7 +638,7 @@ pub async fn build_update_steward_commission< if !commission.is_valid() && !tx_args.force { edisplay_line!( - IO, + context.io(), "The sum of all percentage must not be greater than 1." ); return Err(Error::from(TxError::InvalidStewardCommission( @@ -652,76 +651,80 @@ pub async fn build_update_steward_commission< commission: commission.reward_distribution, }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx_args, - tx_code_path, + build( + context, + tx_args, + tx_code_path.clone(), data, do_nothing, - gas_payer, + &signing_data.fee_payer, None, ) .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Craft transaction to resign as a steward -pub async fn build_resign_steward< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_resign_steward<'a>( + context: &impl Namada<'a>, args::ResignSteward { tx: tx_args, steward, tx_code_path, - }: args::ResignSteward, - gas_payer: &common::PublicKey, -) -> Result<(Tx, Option)> { - if !rpc::is_steward(client, &steward).await && !tx_args.force { - edisplay_line!(IO, "The given address {} is not a steward.", &steward); + }: &args::ResignSteward, +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(steward.clone()); + let signing_data = signing::aux_signing_data( + context, + tx_args, + Some(steward.clone()), + default_signer, + ) + .await?; + + if !rpc::is_steward(context.client(), steward).await && !tx_args.force { + edisplay_line!( + context.io(), + "The given address {} is not a steward.", + &steward + ); return Err(Error::from(TxError::InvalidSteward(steward.clone()))); }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx_args, - tx_code_path, - steward, + build( + context, + tx_args, + tx_code_path.clone(), + steward.clone(), do_nothing, - gas_payer, + &signing_data.fee_payer, None, ) .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Submit transaction to unjail a jailed validator -pub async fn build_unjail_validator< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_unjail_validator<'a>( + context: &impl Namada<'a>, args::TxUnjailValidator { tx: tx_args, validator, tx_code_path, - }: args::TxUnjailValidator, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { - if !rpc::is_validator(client, &validator).await? { + }: &args::TxUnjailValidator, +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(validator.clone()); + let signing_data = signing::aux_signing_data( + context, + tx_args, + Some(validator.clone()), + default_signer, + ) + .await?; + + if !rpc::is_validator(context.client(), validator).await? { edisplay_line!( - IO, + context.io(), "The given address {} is not a validator.", &validator ); @@ -732,21 +735,19 @@ pub async fn build_unjail_validator< } } - let params: PosParams = rpc::get_pos_params(client).await?; - let current_epoch = rpc::query_epoch(client).await?; + let params: PosParams = rpc::get_pos_params(context.client()).await?; + let current_epoch = rpc::query_epoch(context.client()).await?; let pipeline_epoch = current_epoch + params.pipeline_len; - let validator_state_at_pipeline = - rpc::get_validator_state(client, &validator, Some(pipeline_epoch)) - .await? - .ok_or_else(|| { - Error::from(TxError::Other( - "Validator state should be defined.".to_string(), - )) - })?; - if validator_state_at_pipeline != ValidatorState::Jailed { + let validator_state_at_pipeline = rpc::get_validator_state( + context.client(), + validator, + Some(pipeline_epoch), + ) + .await?; + if validator_state_at_pipeline != Some(ValidatorState::Jailed) { edisplay_line!( - IO, + context.io(), "The given validator address {} is not jailed at the pipeline \ epoch when it would be restored to one of the validator sets.", &validator @@ -759,17 +760,19 @@ pub async fn build_unjail_validator< } let last_slash_epoch_key = - crate::ledger::pos::validator_last_slash_key(&validator); - let last_slash_epoch = - rpc::query_storage_value::(client, &last_slash_epoch_key) - .await; + namada_proof_of_stake::storage::validator_last_slash_key(validator); + let last_slash_epoch = rpc::query_storage_value::<_, Epoch>( + context.client(), + &last_slash_epoch_key, + ) + .await; match last_slash_epoch { Ok(last_slash_epoch) => { let eligible_epoch = last_slash_epoch + params.slash_processing_epoch_offset(); if current_epoch < eligible_epoch { edisplay_line!( - IO, + context.io(), "The given validator address {} is currently frozen and \ not yet eligible to be unjailed.", &validator @@ -795,53 +798,205 @@ pub async fn build_unjail_validator< Err(err) => return Err(err), } - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx_args, + build( + context, + tx_args, + tx_code_path.clone(), + validator.clone(), + do_nothing, + &signing_data.fee_payer, + None, + ) + .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) +} + +/// Redelegate bonded tokens from one validator to another +pub async fn build_redelegation<'a>( + context: &impl Namada<'a>, + args::Redelegate { + tx: tx_args, + src_validator, + dest_validator, + owner, + amount: redel_amount, tx_code_path, - validator, + }: &args::Redelegate, +) -> Result<(Tx, SigningTxData)> { + // Require a positive amount of tokens to be redelegated + if redel_amount.is_zero() { + edisplay_line!( + context.io(), + "The requested redelegation amount is 0. A positive amount must \ + be requested." + ); + if !tx_args.force { + return Err(Error::from(TxError::RedelegationIsZero)); + } + } + + // The src and dest validators must actually be validators + let src_validator = + known_validator_or_err(src_validator.clone(), tx_args.force, context) + .await?; + let dest_validator = + known_validator_or_err(dest_validator.clone(), tx_args.force, context) + .await?; + + // The delegator (owner) must exist on-chain and must not be a validator + let owner = + source_exists_or_err(owner.clone(), tx_args.force, context).await?; + if rpc::is_validator(context.client(), &owner).await? { + edisplay_line!( + context.io(), + "The given address {} is a validator. A validator is prohibited \ + from redelegating its own bonds.", + &owner + ); + if !tx_args.force { + return Err(Error::from(TxError::RedelegatorIsValidator( + owner.clone(), + ))); + } + } + + // Prohibit redelegation to the same validator + if src_validator == dest_validator { + edisplay_line!( + context.io(), + "The provided source and destination validators are the same. \ + Redelegation is not allowed to the same validator." + ); + if !tx_args.force { + return Err(Error::from(TxError::RedelegationSrcEqDest)); + } + } + + // Prohibit chained redelegations + let params = rpc::get_pos_params(context.client()).await?; + let incoming_redel_epoch = rpc::query_incoming_redelegations( + context.client(), + &src_validator, + &owner, + ) + .await?; + let current_epoch = rpc::query_epoch(context.client()).await?; + let is_not_chained = if let Some(redel_end_epoch) = incoming_redel_epoch { + let last_contrib_epoch = redel_end_epoch.prev(); + last_contrib_epoch + params.slash_processing_epoch_offset() + <= current_epoch + } else { + true + }; + if !is_not_chained { + edisplay_line!( + context.io(), + "The source validator {} has an incoming redelegation from the \ + delegator {} that may still be subject to future slashing. \ + Redelegation is not allowed until this is no longer the case.", + &src_validator, + &owner + ); + if !tx_args.force { + return Err(Error::from(TxError::IncomingRedelIsStillSlashable( + src_validator.clone(), + owner.clone(), + ))); + } + } + + // There must be at least as many tokens in the bond as the requested + // redelegation amount + let bond_amount = + rpc::query_bond(context.client(), &owner, &src_validator, None).await?; + if *redel_amount > bond_amount { + edisplay_line!( + context.io(), + "There are not enough tokens available for the desired \ + redelegation at the current epoch {}. Requested to redelegate {} \ + tokens but only {} tokens are available.", + current_epoch, + redel_amount.to_string_native(), + bond_amount.to_string_native() + ); + if !tx_args.force { + return Err(Error::from(TxError::RedelegationAmountTooLarge( + redel_amount.to_string_native(), + bond_amount.to_string_native(), + ))); + } + } else { + display_line!( + context.io(), + "{} NAM tokens available for redelegation. Submitting \ + redelegation transaction for {} tokens...", + bond_amount.to_string_native(), + redel_amount.to_string_native() + ); + } + + let default_address = owner.clone(); + let default_signer = Some(default_address.clone()); + let signing_data = signing::aux_signing_data( + context, + tx_args, + Some(default_address), + default_signer, + ) + .await?; + + let data = pos::Redelegation { + src_validator, + dest_validator, + owner, + amount: *redel_amount, + }; + + build( + context, + tx_args, + tx_code_path.clone(), + data, do_nothing, - &fee_payer, + &signing_data.fee_payer, None, ) .await + .map(|(tx, _epoch)| (tx, signing_data)) } /// Submit transaction to withdraw an unbond -pub async fn build_withdraw< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_withdraw<'a>( + context: &impl Namada<'a>, args::Withdraw { tx: tx_args, validator, source, tx_code_path, - }: args::Withdraw, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { - let epoch = rpc::query_epoch(client).await?; - - let validator = known_validator_or_err::<_, IO>( - validator.clone(), - tx_args.force, - client, + }: &args::Withdraw, +) -> Result<(Tx, SigningTxData, Option)> { + let default_address = source.clone().unwrap_or(validator.clone()); + let default_signer = Some(default_address.clone()); + let signing_data = signing::aux_signing_data( + context, + tx_args, + Some(default_address), + default_signer, ) .await?; + let epoch = rpc::query_epoch(context.client()).await?; + + let validator = + known_validator_or_err(validator.clone(), tx_args.force, context) + .await?; + let source = source.clone(); // Check the source's current unbond amount let bond_source = source.clone().unwrap_or_else(|| validator.clone()); let tokens = rpc::query_withdrawable_tokens( - client, + context.client(), &bond_source, &validator, Some(epoch), @@ -850,104 +1005,123 @@ pub async fn build_withdraw< if tokens.is_zero() { edisplay_line!( - IO, + context.io(), "There are no unbonded bonds ready to withdraw in the current \ epoch {}.", epoch ); - rpc::query_and_print_unbonds::<_, IO>(client, &bond_source, &validator) - .await?; + rpc::query_and_print_unbonds(context, &bond_source, &validator).await?; if !tx_args.force { return Err(Error::from(TxError::NoUnbondReady(epoch))); } } else { display_line!( - IO, + context.io(), "Found {} tokens that can be withdrawn.", tokens.to_string_native() ); - display_line!(IO, "Submitting transaction to withdraw them..."); + display_line!( + context.io(), + "Submitting transaction to withdraw them..." + ); } let data = pos::Withdraw { validator, source }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx_args, - tx_code_path, + build( + context, + tx_args, + tx_code_path.clone(), data, do_nothing, - &fee_payer, + &signing_data.fee_payer, None, ) .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Submit a transaction to unbond -pub async fn build_unbond< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_unbond<'a>( + context: &impl Namada<'a>, args::Unbond { tx: tx_args, validator, amount, source, tx_code_path, - }: args::Unbond, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option, Option<(Epoch, token::Amount)>)> { - let source = source.clone(); + }: &args::Unbond, +) -> Result<( + Tx, + SigningTxData, + Option, + Option<(Epoch, token::Amount)>, +)> { + // Require a positive amount of tokens to be bonded + if amount.is_zero() { + edisplay_line!( + context.io(), + "The requested bond amount is 0. A positive amount must be \ + requested." + ); + if !tx_args.force { + return Err(Error::from(TxError::BondIsZero)); + } + } + + // The validator must actually be a validator + let validator = + known_validator_or_err(validator.clone(), tx_args.force, context) + .await?; + + // Check that the source address exists on chain + let source = match source.clone() { + Some(source) => source_exists_or_err(source, tx_args.force, context) + .await + .map(Some), + None => Ok(source.clone()), + }?; + + let default_address = source.clone().unwrap_or(validator.clone()); + let default_signer = Some(default_address.clone()); + let signing_data = signing::aux_signing_data( + context, + tx_args, + Some(default_address), + default_signer, + ) + .await?; + // Check the source's current bond amount let bond_source = source.clone().unwrap_or_else(|| validator.clone()); - if !tx_args.force { - known_validator_or_err::<_, IO>( - validator.clone(), - tx_args.force, - client, - ) - .await?; + let bond_amount = + rpc::query_bond(context.client(), &bond_source, &validator, None) + .await?; + display_line!( + context.io(), + "Bond amount available for unbonding: {} NAM", + bond_amount.to_string_native() + ); - let bond_amount = - rpc::query_bond(client, &bond_source, &validator, None).await?; - display_line!( - IO, - "Bond amount available for unbonding: {} NAM", - bond_amount.to_string_native() + if *amount > bond_amount { + edisplay_line!( + context.io(), + "The total bonds of the source {} is lower than the amount to be \ + unbonded. Amount to unbond is {} and the total bonds is {}.", + bond_source, + amount.to_string_native(), + bond_amount.to_string_native(), ); - - if amount > bond_amount { - edisplay_line!( - IO, - "The total bonds of the source {} is lower than the amount to \ - be unbonded. Amount to unbond is {} and the total bonds is \ - {}.", - bond_source, - amount.to_string_native(), - bond_amount.to_string_native() - ); - if !tx_args.force { - return Err(Error::from(TxError::LowerBondThanUnbond( - bond_source, - amount.to_string_native(), - bond_amount.to_string_native(), - ))); - } - } } // Query the unbonds before submitting the tx - let unbonds = - rpc::query_unbond_with_slashing(client, &bond_source, &validator) - .await?; + let unbonds = rpc::query_unbond_with_slashing( + context.client(), + &bond_source, + &validator, + ) + .await?; let mut withdrawable = BTreeMap::::new(); for ((_start_epoch, withdraw_epoch), amount) in unbonds.into_iter() { let to_withdraw = withdrawable.entry(withdraw_epoch).or_default(); @@ -957,28 +1131,26 @@ pub async fn build_unbond< let data = pos::Unbond { validator: validator.clone(), - amount, + amount: *amount, source: source.clone(), }; - let (tx, epoch) = build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx_args, - tx_code_path, + let (tx, epoch) = build( + context, + tx_args, + tx_code_path.clone(), data, do_nothing, - &fee_payer, + &signing_data.fee_payer, None, ) .await?; - Ok((tx, epoch, latest_withdrawal_pre)) + Ok((tx, signing_data, epoch, latest_withdrawal_pre)) } /// Query the unbonds post-tx -pub async fn query_unbonds( - client: &C, +pub async fn query_unbonds<'a>( + context: &impl Namada<'a>, args: args::Unbond, latest_withdrawal_pre: Option<(Epoch, token::Amount)>, ) -> Result<()> { @@ -987,9 +1159,12 @@ pub async fn query_unbonds( let bond_source = source.clone().unwrap_or_else(|| args.validator.clone()); // Query the unbonds post-tx - let unbonds = - rpc::query_unbond_with_slashing(client, &bond_source, &args.validator) - .await?; + let unbonds = rpc::query_unbond_with_slashing( + context.client(), + &bond_source, + &args.validator, + ) + .await?; let mut withdrawable = BTreeMap::::new(); for ((_start_epoch, withdraw_epoch), amount) in unbonds.into_iter() { let to_withdraw = withdrawable.entry(withdraw_epoch).or_default(); @@ -1007,7 +1182,7 @@ pub async fn query_unbonds( std::cmp::Ordering::Less => { if args.tx.force { edisplay_line!( - IO, + context.io(), "Unexpected behavior reading the unbonds data has \ occurred" ); @@ -1017,7 +1192,7 @@ pub async fn query_unbonds( } std::cmp::Ordering::Equal => { display_line!( - IO, + context.io(), "Amount {} withdrawable starting from epoch {}", (latest_withdraw_amount_post - latest_withdraw_amount_pre) .to_string_native(), @@ -1026,7 +1201,7 @@ pub async fn query_unbonds( } std::cmp::Ordering::Greater => { display_line!( - IO, + context.io(), "Amount {} withdrawable starting from epoch {}", latest_withdraw_amount_post.to_string_native(), latest_withdraw_epoch_post, @@ -1035,7 +1210,7 @@ pub async fn query_unbonds( } } else { display_line!( - IO, + context.io(), "Amount {} withdrawable starting from epoch {}", latest_withdraw_amount_post.to_string_native(), latest_withdraw_epoch_post, @@ -1045,15 +1220,8 @@ pub async fn query_unbonds( } /// Submit a transaction to bond -pub async fn build_bond< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_bond<'a>( + context: &impl Namada<'a>, args::Bond { tx: tx_args, validator, @@ -1061,76 +1229,86 @@ pub async fn build_bond< source, native_token, tx_code_path, - }: args::Bond, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { - let validator = known_validator_or_err::<_, IO>( - validator.clone(), - tx_args.force, - client, - ) - .await?; + }: &args::Bond, +) -> Result<(Tx, SigningTxData, Option)> { + // Require a positive amount of tokens to be bonded + if amount.is_zero() { + edisplay_line!( + context.io(), + "The requested bond amount is 0. A positive amount must be \ + requested." + ); + if !tx_args.force { + return Err(Error::from(TxError::BondIsZero)); + } + } + + // The validator must actually be a validator + let validator = + known_validator_or_err(validator.clone(), tx_args.force, context) + .await?; // Check that the source address exists on chain let source = match source.clone() { - Some(source) => { - source_exists_or_err::<_, IO>(source, tx_args.force, client) - .await - .map(Some) - } + Some(source) => source_exists_or_err(source, tx_args.force, context) + .await + .map(Some), None => Ok(source.clone()), }?; + + let default_address = source.clone().unwrap_or(validator.clone()); + let default_signer = Some(default_address.clone()); + let signing_data = signing::aux_signing_data( + context, + tx_args, + Some(default_address.clone()), + default_signer, + ) + .await?; + // Check bond's source (source for delegation or validator for self-bonds) // balance let bond_source = source.as_ref().unwrap_or(&validator); - let balance_key = token::balance_key(&native_token, bond_source); + let balance_key = token::balance_key(native_token, bond_source); // TODO Should we state the same error message for the native token? - let post_balance = check_balance_too_low_err::<_, IO>( - &native_token, + let post_balance = check_balance_too_low_err( + native_token, bond_source, - amount, + *amount, balance_key, tx_args.force, - client, + context, ) .await?; let tx_source_balance = Some(TxSourcePostBalance { post_balance, source: bond_source.clone(), - token: native_token, + token: native_token.clone(), }); let data = pos::Bond { validator, - amount, + amount: *amount, source, }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx_args, - tx_code_path, + build( + context, + tx_args, + tx_code_path.clone(), data, do_nothing, - &fee_payer, + &signing_data.fee_payer, tx_source_balance, ) .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Build a default proposal governance -pub async fn build_default_proposal< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_default_proposal<'a>( + context: &impl Namada<'a>, args::InitProposal { tx, proposal_data: _, @@ -1139,10 +1317,18 @@ pub async fn build_default_proposal< is_pgf_stewards: _, is_pgf_funding: _, tx_code_path, - }: args::InitProposal, + }: &args::InitProposal, proposal: DefaultProposal, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(proposal.proposal.author.clone()); + let signing_data = signing::aux_signing_data( + context, + tx, + Some(proposal.proposal.author.clone()), + default_signer, + ) + .await?; + let init_proposal_data = InitProposalData::try_from(proposal.clone()) .map_err(|e| TxError::InvalidProposal(e.to_string()))?; @@ -1160,30 +1346,22 @@ pub async fn build_default_proposal< }; Ok(()) }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx, - tx_code_path, + build( + context, + tx, + tx_code_path.clone(), init_proposal_data, push_data, - &fee_payer, + &signing_data.fee_payer, None, // TODO: need to pay the fee to submit a proposal ) .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Build a proposal vote -pub async fn build_vote_proposal< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_vote_proposal<'a>( + context: &impl Namada<'a>, args::VoteProposal { tx, proposal_id, @@ -1192,18 +1370,26 @@ pub async fn build_vote_proposal< is_offline: _, proposal_data: _, tx_code_path, - }: args::VoteProposal, + }: &args::VoteProposal, epoch: Epoch, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { - let proposal_vote = ProposalVote::try_from(vote) +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(voter.clone()); + let signing_data = signing::aux_signing_data( + context, + tx, + Some(voter.clone()), + default_signer.clone(), + ) + .await?; + + let proposal_vote = ProposalVote::try_from(vote.clone()) .map_err(|_| TxError::InvalidProposalVote)?; let proposal_id = proposal_id.ok_or_else(|| { Error::Other("Proposal id must be defined.".to_string()) })?; let proposal = if let Some(proposal) = - rpc::query_proposal_by_id(client, proposal_id).await? + rpc::query_proposal_by_id(context.client(), proposal_id).await? { proposal } else { @@ -1218,7 +1404,7 @@ pub async fn build_vote_proposal< )) })?; - let is_validator = rpc::is_validator(client, &voter).await?; + let is_validator = rpc::is_validator(context.client(), voter).await?; if !proposal.can_be_voted(epoch, is_validator) { if tx.force { @@ -1231,8 +1417,8 @@ pub async fn build_vote_proposal< } let delegations = rpc::get_delegators_delegation_at( - client, - &voter, + context.client(), + voter, proposal.voting_start_epoch, ) .await? @@ -1247,30 +1433,22 @@ pub async fn build_vote_proposal< delegations, }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx, - tx_code_path, + build( + context, + tx, + tx_code_path.clone(), data, do_nothing, - &fee_payer, + &signing_data.fee_payer, None, ) .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Build a pgf funding proposal governance -pub async fn build_pgf_funding_proposal< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_pgf_funding_proposal<'a>( + context: &impl Namada<'a>, args::InitProposal { tx, proposal_data: _, @@ -1279,10 +1457,18 @@ pub async fn build_pgf_funding_proposal< is_pgf_stewards: _, is_pgf_funding: _, tx_code_path, - }: args::InitProposal, + }: &args::InitProposal, proposal: PgfFundingProposal, - fee_payer: &common::PublicKey, -) -> Result<(Tx, Option)> { +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(proposal.proposal.author.clone()); + let signing_data = signing::aux_signing_data( + context, + tx, + Some(proposal.proposal.author.clone()), + default_signer, + ) + .await?; + let init_proposal_data = InitProposalData::try_from(proposal.clone()) .map_err(|e| TxError::InvalidProposal(e.to_string()))?; @@ -1292,30 +1478,22 @@ pub async fn build_pgf_funding_proposal< data.content = extra_section_hash; Ok(()) }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx, - tx_code_path, + build( + context, + tx, + tx_code_path.clone(), init_proposal_data, add_section, - fee_payer, + &signing_data.fee_payer, None, // TODO: need to pay the fee to submit a proposal ) .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Build a pgf funding proposal governance -pub async fn build_pgf_stewards_proposal< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_pgf_stewards_proposal<'a>( + context: &impl Namada<'a>, args::InitProposal { tx, proposal_data: _, @@ -1324,10 +1502,18 @@ pub async fn build_pgf_stewards_proposal< is_pgf_stewards: _, is_pgf_funding: _, tx_code_path, - }: args::InitProposal, + }: &args::InitProposal, proposal: PgfStewardProposal, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(proposal.proposal.author.clone()); + let signing_data = signing::aux_signing_data( + context, + tx, + Some(proposal.proposal.author.clone()), + default_signer, + ) + .await?; + let init_proposal_data = InitProposalData::try_from(proposal.clone()) .map_err(|e| TxError::InvalidProposal(e.to_string()))?; @@ -1338,50 +1524,43 @@ pub async fn build_pgf_stewards_proposal< Ok(()) }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx, - tx_code_path, + build( + context, + tx, + tx_code_path.clone(), init_proposal_data, add_section, - &fee_payer, + &signing_data.fee_payer, None, // TODO: need to pay the fee to submit a proposal ) .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Submit an IBC transfer -pub async fn build_ibc_transfer< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, - args: args::TxIbcTransfer, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { - // Check that the source address exists on chain - let source = source_exists_or_err::<_, IO>( - args.source.clone(), - args.tx.force, - client, +pub async fn build_ibc_transfer<'a>( + context: &impl Namada<'a>, + args: &args::TxIbcTransfer, +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(args.source.clone()); + let signing_data = signing::aux_signing_data( + context, + &args.tx, + Some(args.source.clone()), + default_signer, ) .await?; + // Check that the source address exists on chain + let source = + source_exists_or_err(args.source.clone(), args.tx.force, context) + .await?; // We cannot check the receiver // validate the amount given - let validated_amount = validate_amount::<_, IO>( - client, - args.amount, - &args.token, - args.tx.force, - ) - .await?; + let validated_amount = + validate_amount(context, args.amount, &args.token, args.tx.force) + .await + .expect("expected to validate amount"); if validated_amount.canonical().denom.0 != 0 { return Err(Error::Other(format!( "The amount for the IBC transfer should be an integer: {}", @@ -1392,13 +1571,13 @@ pub async fn build_ibc_transfer< // Check source balance let balance_key = token::balance_key(&args.token, &source); - let post_balance = check_balance_too_low_err::<_, IO>( + let post_balance = check_balance_too_low_err( &args.token, &source, validated_amount.amount, balance_key, args.tx.force, - client, + context, ) .await?; let tx_source_balance = Some(TxSourcePostBalance { @@ -1407,22 +1586,13 @@ pub async fn build_ibc_transfer< token: args.token.clone(), }); - let tx_code_hash = query_wasm_code_hash::<_, IO>( - client, - args.tx_code_path.to_str().unwrap(), - ) - .await - .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; + let tx_code_hash = + query_wasm_code_hash(context, args.tx_code_path.to_str().unwrap()) + .await + .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; - let ibc_denom = match &args.token { - Address::Internal(InternalAddress::IbcToken(hash)) => { - let ibc_denom_key = ibc_denom_key(hash); - rpc::query_storage_value::(client, &ibc_denom_key) - .await - .map_err(|_e| TxError::TokenDoesNotExist(args.token.clone()))? - } - _ => args.token.to_string(), - }; + let ibc_denom = + rpc::query_ibc_denom(context, &args.token, Some(&source)).await; let token = PrefixedCoin { denom: ibc_denom.parse().expect("Invalid IBC denom"), // Set the IBC amount as an integer @@ -1431,8 +1601,8 @@ pub async fn build_ibc_transfer< let packet_data = PacketData { token, sender: source.to_string().into(), - receiver: args.receiver.into(), - memo: args.memo.unwrap_or_default().into(), + receiver: args.receiver.clone().into(), + memo: args.memo.clone().unwrap_or_default().into(), }; // this height should be that of the destination chain, not this chain @@ -1463,8 +1633,8 @@ pub async fn build_ibc_transfer< }; let msg = MsgTransfer { - port_id_on_a: args.port_id, - chan_id_on_a: args.channel_id, + port_id_on_a: args.port_id.clone(), + chan_id_on_a: args.channel_id.clone(), packet_data, timeout_height_on_b: timeout_height, timeout_timestamp_on_b: timeout_timestamp, @@ -1480,27 +1650,23 @@ pub async fn build_ibc_transfer< tx.add_code_from_hash(tx_code_hash) .add_serialized_data(data); - let epoch = prepare_tx::( - client, - wallet, - shielded, + let epoch = prepare_tx( + context, &args.tx, &mut tx, - fee_payer, + signing_data.fee_payer.clone(), tx_source_balance, ) .await?; - Ok((tx, epoch)) + Ok((tx, signing_data, epoch)) } /// Abstraction for helping build transactions #[allow(clippy::too_many_arguments)] -pub async fn build( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, - tx_args: &crate::sdk::args::Tx, +pub async fn build<'a, F, D>( + context: &impl Namada<'a>, + tx_args: &crate::args::Tx, path: PathBuf, data: D, on_tx: F, @@ -1510,14 +1676,9 @@ pub async fn build( where F: FnOnce(&mut Tx, &mut D) -> Result<()>, D: BorshSerialize, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, { - build_pow_flag::<_, _, _, _, _, IO>( - client, - wallet, - shielded, + build_pow_flag( + context, tx_args, path, data, @@ -1529,18 +1690,9 @@ where } #[allow(clippy::too_many_arguments)] -async fn build_pow_flag< - C: crate::ledger::queries::Client + Sync, - U, - V, - F, - D, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, - tx_args: &crate::sdk::args::Tx, +async fn build_pow_flag<'a, F, D>( + context: &impl Namada<'a>, + tx_args: &crate::args::Tx, path: PathBuf, mut data: D, on_tx: F, @@ -1550,26 +1702,21 @@ async fn build_pow_flag< where F: FnOnce(&mut Tx, &mut D) -> Result<()>, D: BorshSerialize, - U: WalletUtils, - V: ShieldedUtils, { let chain_id = tx_args.chain_id.clone().unwrap(); let mut tx_builder = Tx::new(chain_id, tx_args.expiration); - let tx_code_hash = - query_wasm_code_hash::<_, IO>(client, path.to_string_lossy()) - .await - .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; + let tx_code_hash = query_wasm_code_hash(context, path.to_string_lossy()) + .await + .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; on_tx(&mut tx_builder, &mut data)?; tx_builder.add_code_from_hash(tx_code_hash).add_data(data); - let epoch = prepare_tx::( - client, - wallet, - shielded, + let epoch = prepare_tx( + context, tx_args, &mut tx_builder, gas_payer.clone(), @@ -1581,17 +1728,16 @@ where /// Try to decode the given asset type and add its decoding to the supplied set. /// Returns true only if a new decoding has been added to the given set. -async fn add_asset_type< - C: crate::sdk::queries::Client + Sync, - U: ShieldedUtils, ->( +async fn add_asset_type<'a>( asset_types: &mut HashSet<(Address, MaspDenom, Epoch)>, - shielded: &mut ShieldedContext, - client: &C, + context: &impl Namada<'a>, asset_type: AssetType, ) -> bool { - if let Some(asset_type) = - shielded.decode_asset_type(client, asset_type).await + if let Some(asset_type) = context + .shielded_mut() + .await + .decode_asset_type(context.client(), asset_type) + .await { asset_types.insert(asset_type) } else { @@ -1602,101 +1748,82 @@ async fn add_asset_type< /// Collect the asset types used in the given Builder and decode them. This /// function provides the data necessary for offline wallets to present asset /// type information. -async fn used_asset_types< - C: crate::sdk::queries::Client + Sync, - U: ShieldedUtils, - P, - R, - K, - N, ->( - shielded: &mut ShieldedContext, - client: &C, +async fn used_asset_types<'a, P, R, K, N>( + context: &impl Namada<'a>, builder: &Builder, ) -> std::result::Result, RpcError> { let mut asset_types = HashSet::new(); // Collect all the asset types used in the Sapling inputs for input in builder.sapling_inputs() { - add_asset_type(&mut asset_types, shielded, client, input.asset_type()) - .await; + add_asset_type(&mut asset_types, context, input.asset_type()).await; } // Collect all the asset types used in the transparent inputs for input in builder.transparent_inputs() { - add_asset_type( - &mut asset_types, - shielded, - client, - input.coin().asset_type(), - ) - .await; + add_asset_type(&mut asset_types, context, input.coin().asset_type()) + .await; } // Collect all the asset types used in the Sapling outputs for output in builder.sapling_outputs() { - add_asset_type(&mut asset_types, shielded, client, output.asset_type()) - .await; + add_asset_type(&mut asset_types, context, output.asset_type()).await; } // Collect all the asset types used in the transparent outputs for output in builder.transparent_outputs() { - add_asset_type(&mut asset_types, shielded, client, output.asset_type()) - .await; + add_asset_type(&mut asset_types, context, output.asset_type()).await; } // Collect all the asset types used in the Sapling converts for output in builder.sapling_converts() { for (asset_type, _) in - I32Sum::from(output.conversion().clone()).components() + I128Sum::from(output.conversion().clone()).components() { - add_asset_type(&mut asset_types, shielded, client, *asset_type) - .await; + add_asset_type(&mut asset_types, context, *asset_type).await; } } Ok(asset_types) } /// Submit an ordinary transfer -pub async fn build_transfer< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, - mut args: args::TxTransfer, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { +pub async fn build_transfer<'a, N: Namada<'a>>( + context: &N, + args: &mut args::TxTransfer, +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(args.source.effective_address()); + let signing_data = signing::aux_signing_data( + context, + &args.tx, + Some(args.source.effective_address()), + default_signer, + ) + .await?; + let source = args.source.effective_address(); let target = args.target.effective_address(); - let token = args.token.clone(); // Check that the source address exists on chain - source_exists_or_err::<_, IO>(source.clone(), args.tx.force, client) - .await?; + source_exists_or_err(source.clone(), args.tx.force, context).await?; // Check that the target address exists on chain - target_exists_or_err::<_, IO>(target.clone(), args.tx.force, client) - .await?; + target_exists_or_err(target.clone(), args.tx.force, context).await?; // Check source balance - let balance_key = token::balance_key(&token, &source); + let balance_key = token::balance_key(&args.token, &source); // validate the amount given let validated_amount = - validate_amount::<_, IO>(client, args.amount, &token, args.tx.force) + validate_amount(context, args.amount, &args.token, args.tx.force) .await?; args.amount = InputAmount::Validated(validated_amount); - let post_balance = check_balance_too_low_err::( - &token, + let post_balance = check_balance_too_low_err( + &args.token, &source, validated_amount.amount, balance_key, args.tx.force, - client, + context, ) .await?; let tx_source_balance = Some(TxSourcePostBalance { post_balance, source: source.clone(), - token: token.clone(), + token: args.token.clone(), }); let masp_addr = masp(); @@ -1707,9 +1834,9 @@ pub async fn build_transfer< // This has no side-effect because transaction is to self. let (_amount, token) = if source == masp_addr && target == masp_addr { // TODO Refactor me, we shouldn't rely on any specific token here. - (token::Amount::default(), args.native_token.clone()) + (token::Amount::zero(), args.native_token.clone()) } else { - (validated_amount.amount, token) + (validated_amount.amount, args.token.clone()) }; // Determine whether to pin this transaction to a storage key let key = match &args.target { @@ -1718,8 +1845,14 @@ pub async fn build_transfer< }; // Construct the shielded part of the transaction, if any - let stx_result = shielded - .gen_shielded_transfer::<_, IO>(client, args.clone()) + let stx_result = + ShieldedContext::::gen_shielded_transfer( + context, + &args.source, + &args.target, + &args.token, + validated_amount, + ) .await; let shielded_parts = match stx_result { @@ -1741,10 +1874,9 @@ pub async fn build_transfer< Some(transfer) => { // Get the decoded asset types used in the transaction to give // offline wallet users more information - let asset_types = - used_asset_types(shielded, client, &transfer.builder) - .await - .unwrap_or_default(); + let asset_types = used_asset_types(context, &transfer.builder) + .await + .unwrap_or_default(); Some(asset_types) } }; @@ -1788,15 +1920,13 @@ pub async fn build_transfer< }; Ok(()) }; - let (tx, unshielding_epoch) = build_pow_flag::<_, _, _, _, _, IO>( - client, - wallet, - shielded, + let (tx, unshielding_epoch) = build_pow_flag( + context, &args.tx, - args.tx_code_path, + args.tx_code_path.clone(), transfer, add_shielded, - &fee_payer, + &signing_data.fee_payer, tx_source_balance, ) .await?; @@ -1819,33 +1949,27 @@ pub async fn build_transfer< (None, Some(_transfer_unshield_epoch)) => shielded_tx_epoch, (None, None) => None, }; - Ok((tx, masp_epoch)) + Ok((tx, signing_data, masp_epoch)) } /// Submit a transaction to initialize an account -pub async fn build_init_account< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_init_account<'a>( + context: &impl Namada<'a>, args::TxInitAccount { tx: tx_args, vp_code_path, tx_code_path, public_keys, threshold, - }: args::TxInitAccount, - fee_payer: &common::PublicKey, -) -> Result<(Tx, Option)> { - let vp_code_hash = - query_wasm_code_hash_buf::<_, IO>(client, &vp_code_path).await?; + }: &args::TxInitAccount, +) -> Result<(Tx, SigningTxData, Option)> { + let signing_data = + signing::aux_signing_data(context, tx_args, None, None).await?; + + let vp_code_hash = query_wasm_code_hash_buf(context, vp_code_path).await?; let threshold = match threshold { - Some(threshold) => threshold, + Some(threshold) => *threshold, None => { if public_keys.len() == 1 { 1u8 @@ -1856,7 +1980,7 @@ pub async fn build_init_account< }; let data = InitAccount { - public_keys, + public_keys: public_keys.clone(), // We will add the hash inside the add_code_hash function vp_code_hash: Hash::zero(), threshold, @@ -1867,30 +1991,22 @@ pub async fn build_init_account< data.vp_code_hash = extra_section_hash; Ok(()) }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx_args, - tx_code_path, + build( + context, + tx_args, + tx_code_path.clone(), data, add_code_hash, - fee_payer, + &signing_data.fee_payer, None, ) .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Submit a transaction to update a VP -pub async fn build_update_account< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_update_account<'a>( + context: &impl Namada<'a>, args::TxUpdateAccount { tx: tx_args, vp_code_path, @@ -1898,22 +2014,30 @@ pub async fn build_update_account< addr, public_keys, threshold, - }: args::TxUpdateAccount, - fee_payer: common::PublicKey, -) -> Result<(Tx, Option)> { - let addr = - if let Some(account) = rpc::get_account_info(client, &addr).await? { - account.address - } else if tx_args.force { - addr - } else { - return Err(Error::from(TxError::LocationDoesNotExist(addr))); - }; + }: &args::TxUpdateAccount, +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(addr.clone()); + let signing_data = signing::aux_signing_data( + context, + tx_args, + Some(addr.clone()), + default_signer, + ) + .await?; + + let addr = if let Some(account) = + rpc::get_account_info(context.client(), addr).await? + { + account.address + } else if tx_args.force { + addr.clone() + } else { + return Err(Error::from(TxError::LocationDoesNotExist(addr.clone()))); + }; let vp_code_hash = match vp_code_path { Some(code_path) => { - let vp_hash = - query_wasm_code_hash_buf::<_, IO>(client, &code_path).await?; + let vp_hash = query_wasm_code_hash_buf(context, code_path).await?; Some(vp_hash) } None => None, @@ -1927,8 +2051,8 @@ pub async fn build_update_account< let data = UpdateAccount { addr, vp_code_hash: extra_section_hash, - public_keys, - threshold, + public_keys: public_keys.clone(), + threshold: *threshold, }; let add_code_hash = |tx: &mut Tx, data: &mut UpdateAccount| { @@ -1937,81 +2061,193 @@ pub async fn build_update_account< data.vp_code_hash = extra_section_hash; Ok(()) }; - build::<_, _, _, _, _, IO>( - client, - wallet, - shielded, - &tx_args, - tx_code_path, + build( + context, + tx_args, + tx_code_path.clone(), data, add_code_hash, - &fee_payer, + &signing_data.fee_payer, None, ) .await + .map(|(tx, epoch)| (tx, signing_data, epoch)) } /// Submit a custom transaction -pub async fn build_custom< - C: crate::sdk::queries::Client + Sync, - U: WalletUtils, - V: ShieldedUtils, - IO: Io, ->( - client: &C, - wallet: &mut Wallet, - shielded: &mut ShieldedContext, +pub async fn build_custom<'a>( + context: &impl Namada<'a>, args::TxCustom { tx: tx_args, code_path, data_path, serialized_tx, - owner: _, - }: args::TxCustom, - fee_payer: &common::PublicKey, -) -> Result<(Tx, Option)> { + owner, + }: &args::TxCustom, +) -> Result<(Tx, SigningTxData, Option)> { + let default_signer = Some(owner.clone()); + let signing_data = signing::aux_signing_data( + context, + tx_args, + Some(owner.clone()), + default_signer, + ) + .await?; + let mut tx = if let Some(serialized_tx) = serialized_tx { Tx::deserialize(serialized_tx.as_ref()).map_err(|_| { Error::Other("Invalid tx deserialization.".to_string()) })? } else { - let tx_code_hash = query_wasm_code_hash_buf::<_, IO>( - client, - &code_path + let tx_code_hash = query_wasm_code_hash_buf( + context, + code_path + .as_ref() .ok_or(Error::Other("No code path supplied".to_string()))?, ) .await?; let chain_id = tx_args.chain_id.clone().unwrap(); let mut tx = Tx::new(chain_id, tx_args.expiration); tx.add_code_from_hash(tx_code_hash); - data_path.map(|data| tx.add_serialized_data(data)); + data_path.clone().map(|data| tx.add_serialized_data(data)); tx }; - let epoch = prepare_tx::( - client, - wallet, - shielded, - &tx_args, + let epoch = prepare_tx( + context, + tx_args, &mut tx, - fee_payer.clone(), + signing_data.fee_payer.clone(), None, ) .await?; - Ok((tx, epoch)) + Ok((tx, signing_data, epoch)) +} + +/// Generate IBC shielded transfer +pub async fn gen_ibc_shielded_transfer<'a, N: Namada<'a>>( + context: &N, + args: args::GenIbcShieldedTransafer, +) -> Result> { + let key = match args.target.payment_address() { + Some(pa) if pa.is_pinned() => Some(pa.hash()), + Some(_) => None, + None => return Ok(None), + }; + let source = Address::Internal(InternalAddress::Ibc); + let (src_port_id, src_channel_id) = + get_ibc_src_port_channel(context, &args.port_id, &args.channel_id) + .await?; + let ibc_denom = + rpc::query_ibc_denom(context, &args.token, Some(&source)).await; + let prefixed_denom = ibc_denom + .parse() + .map_err(|_| Error::Other(format!("Invalid IBC denom: {ibc_denom}")))?; + let token = namada_core::ledger::ibc::received_ibc_token( + &prefixed_denom, + &src_port_id, + &src_channel_id, + &args.port_id, + &args.channel_id, + ) + .map_err(|e| { + Error::Other(format!("Getting IBC Token failed: error {e}")) + })?; + let validated_amount = + validate_amount(context, args.amount, &token, false).await?; + + let shielded_transfer = + ShieldedContext::::gen_shielded_transfer( + context, + &TransferSource::Address(source.clone()), + &args.target, + &token, + validated_amount, + ) + .await + .map_err(|err| TxError::MaspError(err.to_string()))?; + + let transfer = token::Transfer { + source: source.clone(), + target: masp(), + token: token.clone(), + amount: validated_amount, + key, + shielded: None, + }; + if let Some(shielded_transfer) = shielded_transfer { + // TODO: Workaround for decoding the asset_type later + let mut shielded = context.shielded_mut().await; + let mut asset_types = Vec::new(); + for denom in MaspDenom::iter() { + let epoch = shielded_transfer.epoch; + let asset_type = make_asset_type(Some(epoch), &token, denom)?; + shielded + .asset_types + .insert(asset_type, (token.clone(), denom, epoch)); + asset_types.push(asset_type); + } + let _ = shielded.save().await; + + Ok(Some(IbcShieldedTransfer { + transfer, + masp_tx: shielded_transfer.masp_tx, + })) + } else { + Ok(None) + } +} + +async fn get_ibc_src_port_channel<'a>( + context: &impl Namada<'a>, + dest_port_id: &PortId, + dest_channel_id: &ChannelId, +) -> Result<(PortId, ChannelId)> { + use crate::ibc::core::ics04_channel::channel::ChannelEnd; + use crate::ibc_proto::protobuf::Protobuf; + + let channel_key = channel_key(dest_port_id, dest_channel_id); + let bytes = rpc::query_storage_value_bytes( + context.client(), + &channel_key, + None, + false, + ) + .await? + .0 + .ok_or_else(|| { + Error::Other(format!( + "No channel end: port {dest_port_id}, channel {dest_channel_id}" + )) + })?; + let channel = ChannelEnd::decode_vec(&bytes).map_err(|_| { + Error::Other(format!( + "Decoding channel end failed: port {dest_port_id}, channel \ + {dest_channel_id}", + )) + })?; + channel + .remote + .channel_id() + .map(|src_channel| { + (channel.remote.port_id.clone(), src_channel.clone()) + }) + .ok_or_else(|| { + Error::Other(format!( + "The source channel doesn't exist: port {dest_port_id}, \ + channel {dest_channel_id}" + )) + }) } -async fn expect_dry_broadcast< - C: crate::ledger::queries::Client + Sync, - IO: Io, ->( +async fn expect_dry_broadcast<'a>( to_broadcast: TxBroadcastData, - client: &C, + context: &impl Namada<'a>, ) -> Result { match to_broadcast { TxBroadcastData::DryRun(tx) => { - rpc::dry_run_tx::<_, IO>(client, tx.to_bytes()).await?; + rpc::dry_run_tx(context, tx.to_bytes()).await?; Ok(ProcessTxResponse::DryRun) } TxBroadcastData::Live { @@ -2029,20 +2265,17 @@ fn lift_rpc_error(res: std::result::Result) -> Result { /// Returns the given validator if the given address is a validator, /// otherwise returns an error, force forces the address through even /// if it isn't a validator -async fn known_validator_or_err< - C: crate::ledger::queries::Client + Sync, - IO: Io, ->( +async fn known_validator_or_err<'a>( validator: Address, force: bool, - client: &C, + context: &impl Namada<'a>, ) -> Result
{ // Check that the validator address exists on chain - let is_validator = rpc::is_validator(client, &validator).await?; + let is_validator = rpc::is_validator(context.client(), &validator).await?; if !is_validator { if force { edisplay_line!( - IO, + context.io(), "The address {} doesn't belong to any known validator account.", validator ); @@ -2058,21 +2291,20 @@ async fn known_validator_or_err< /// general pattern for checking if an address exists on the chain, or /// throwing an error if it's not forced. Takes a generic error /// message and the error type. -async fn address_exists_or_err( +async fn address_exists_or_err<'a, F>( addr: Address, force: bool, - client: &C, + context: &impl Namada<'a>, message: String, err: F, ) -> Result
where - C: crate::sdk::queries::Client + Sync, F: FnOnce(Address) -> Error, { - let addr_exists = rpc::known_address::(client, &addr).await?; + let addr_exists = rpc::known_address(context.client(), &addr).await?; if !addr_exists { if force { - edisplay_line!(IO, "{}", message); + edisplay_line!(context.io(), "{}", message); Ok(addr) } else { Err(err(addr)) @@ -2085,17 +2317,14 @@ where /// Returns the given source address if the given address exists on chain /// otherwise returns an error, force forces the address through even /// if it isn't on chain -async fn source_exists_or_err< - C: crate::ledger::queries::Client + Sync, - IO: Io, ->( +async fn source_exists_or_err<'a>( token: Address, force: bool, - client: &C, + context: &impl Namada<'a>, ) -> Result
{ let message = format!("The source address {} doesn't exist on chain.", token); - address_exists_or_err::<_, _, IO>(token, force, client, message, |err| { + address_exists_or_err(token, force, context, message, |err| { Error::from(TxError::SourceDoesNotExist(err)) }) .await @@ -2104,17 +2333,14 @@ async fn source_exists_or_err< /// Returns the given target address if the given address exists on chain /// otherwise returns an error, force forces the address through even /// if it isn't on chain -async fn target_exists_or_err< - C: crate::ledger::queries::Client + Sync, - IO: Io, ->( +async fn target_exists_or_err<'a>( token: Address, force: bool, - client: &C, + context: &impl Namada<'a>, ) -> Result
{ let message = format!("The target address {} doesn't exist on chain.", token); - address_exists_or_err::<_, _, IO>(token, force, client, message, |err| { + address_exists_or_err(token, force, context, message, |err| { Error::from(TxError::TargetLocationDoesNotExist(err)) }) .await @@ -2123,41 +2349,35 @@ async fn target_exists_or_err< /// Checks the balance at the given address is enough to transfer the /// given amount, along with the balance even existing. Force /// overrides this. Returns the updated balance for fee check if necessary -async fn check_balance_too_low_err< - C: crate::ledger::queries::Client + Sync, - IO: Io, ->( +async fn check_balance_too_low_err<'a, N: Namada<'a>>( token: &Address, source: &Address, amount: token::Amount, balance_key: storage::Key, force: bool, - client: &C, + context: &N, ) -> Result { - match rpc::query_storage_value::(client, &balance_key) - .await + match rpc::query_storage_value::( + context.client(), + &balance_key, + ) + .await { Ok(balance) => match balance.checked_sub(amount) { Some(diff) => Ok(diff), None => { if force { edisplay_line!( - IO, + context.io(), "The balance of the source {} of token {} is lower \ than the amount to be transferred. Amount to \ transfer is {} and the balance is {}.", source, token, - format_denominated_amount::<_, IO>( - client, token, amount - ) - .await, - format_denominated_amount::<_, IO>( - client, token, balance - ) - .await, + context.format_amount(token, amount).await, + context.format_amount(token, balance).await, ); - Ok(token::Amount::default()) + Ok(token::Amount::zero()) } else { Err(Error::from(TxError::BalanceTooLow( source.clone(), @@ -2173,12 +2393,12 @@ async fn check_balance_too_low_err< )) => { if force { edisplay_line!( - IO, + context.io(), "No balance found for the source {} of token {}", source, token ); - Ok(token::Amount::default()) + Ok(token::Amount::zero()) } else { Err(Error::from(TxError::NoBalanceForToken( source.clone(), @@ -2192,34 +2412,11 @@ async fn check_balance_too_low_err< } } -#[allow(dead_code)] -fn validate_untrusted_code_err( - vp_code: &Vec, - force: bool, -) -> Result<()> { - if let Err(err) = vm::validate_untrusted_wasm(vp_code) { - if force { - edisplay_line!( - IO, - "Validity predicate code validation failed with {}", - err - ); - Ok(()) - } else { - Err(Error::from(TxError::WasmValidationFailure(err))) - } - } else { - Ok(()) - } -} -async fn query_wasm_code_hash_buf< - C: crate::ledger::queries::Client + Sync, - IO: Io, ->( - client: &C, +async fn query_wasm_code_hash_buf<'a>( + context: &impl Namada<'a>, path: &Path, ) -> Result { - query_wasm_code_hash::<_, IO>(client, path.to_string_lossy()).await + query_wasm_code_hash(context, path.to_string_lossy()).await } /// A helper for [`fn build`] that can be used for `on_tx` arg that does nothing @@ -2231,8 +2428,6 @@ where } fn proposal_to_vec(proposal: OnChainProposal) -> Result> { - proposal - .content - .try_to_vec() + borsh::to_vec(&proposal.content) .map_err(|e| Error::from(EncodingError::Conversion(e.to_string()))) } diff --git a/shared/src/sdk/wallet/alias.rs b/sdk/src/wallet/alias.rs similarity index 100% rename from shared/src/sdk/wallet/alias.rs rename to sdk/src/wallet/alias.rs diff --git a/shared/src/sdk/wallet/derivation_path.rs b/sdk/src/wallet/derivation_path.rs similarity index 98% rename from shared/src/sdk/wallet/derivation_path.rs rename to sdk/src/wallet/derivation_path.rs index 7f639161d2..7751e51701 100644 --- a/shared/src/sdk/wallet/derivation_path.rs +++ b/sdk/src/wallet/derivation_path.rs @@ -2,6 +2,7 @@ use core::fmt; use std::str::FromStr; use derivation_path::{ChildIndex, DerivationPath as DerivationPathInner}; +use namada_core::types::key::SchemeType; use thiserror::Error; use tiny_hderive::bip44::{ DerivationPath as HDeriveDerivationPath, @@ -9,8 +10,6 @@ use tiny_hderive::bip44::{ }; use tiny_hderive::Error as HDeriveError; -use crate::types::key::SchemeType; - const ETH_COIN_TYPE: u32 = 60; const NAMADA_COIN_TYPE: u32 = 877; @@ -114,8 +113,9 @@ impl IntoHDeriveDerivationPath for DerivationPath { #[cfg(test)] mod tests { + use namada_core::types::key::SchemeType; + use super::DerivationPath; - use crate::types::key::SchemeType; #[test] fn path_is_compatible() { diff --git a/shared/src/sdk/wallet/keys.rs b/sdk/src/wallet/keys.rs similarity index 96% rename from shared/src/sdk/wallet/keys.rs rename to sdk/src/wallet/keys.rs index 867a2b1ad0..079f165246 100644 --- a/shared/src/sdk/wallet/keys.rs +++ b/sdk/src/wallet/keys.rs @@ -5,13 +5,14 @@ use std::marker::PhantomData; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; use data_encoding::HEXLOWER; use orion::{aead, kdf}; use serde::{Deserialize, Serialize}; use thiserror::Error; use zeroize::Zeroizing; -use crate::sdk::wallet::WalletUtils; +use crate::wallet::WalletIo; const ENCRYPTED_KEY_PREFIX: &str = "encrypted:"; const UNENCRYPTED_KEY_PREFIX: &str = "unencrypted:"; @@ -166,7 +167,7 @@ where /// Get a raw keypair from a stored keypair. If the keypair is encrypted and /// no password is provided in the argument, a password will be prompted /// from stdin. - pub fn get( + pub fn get( &self, decrypt: bool, password: Option>, @@ -174,8 +175,8 @@ where match self { StoredKeypair::Encrypted(encrypted_keypair) => { if decrypt { - let password = password - .unwrap_or_else(|| U::read_decryption_password()); + let password = + password.unwrap_or_else(|| U::read_password(false)); let key = encrypted_keypair.decrypt(password)?; Ok(key) } else { @@ -201,9 +202,7 @@ impl EncryptedKeypair { let salt = encryption_salt(); let encryption_key = encryption_key(&salt, &password); - let data = keypair - .try_to_vec() - .expect("Serializing keypair shouldn't fail"); + let data = keypair.serialize_to_vec(); let encrypted_keypair = aead::seal(&encryption_key, &data) .expect("Encryption of data shouldn't fail"); diff --git a/shared/src/sdk/wallet/mod.rs b/sdk/src/wallet/mod.rs similarity index 76% rename from shared/src/sdk/wallet/mod.rs rename to sdk/src/wallet/mod.rs index 371c97806b..67d5a11f86 100644 --- a/shared/src/sdk/wallet/mod.rs +++ b/sdk/src/wallet/mod.rs @@ -13,6 +13,11 @@ use alias::Alias; use bip39::{Language, Mnemonic, MnemonicType, Seed}; use borsh::{BorshDeserialize, BorshSerialize}; use masp_primitives::zip32::ExtendedFullViewingKey; +use namada_core::types::address::Address; +use namada_core::types::key::*; +use namada_core::types::masp::{ + ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, +}; pub use pre_genesis::gen_key_to_store; use rand_core::RngCore; pub use store::{gen_sk_rng, AddressVpType, Store}; @@ -22,11 +27,6 @@ use zeroize::Zeroizing; use self::derivation_path::{DerivationPath, DerivationPathError}; pub use self::keys::{DecryptionError, StoredKeypair}; pub use self::store::{ConfirmationResponse, ValidatorData, ValidatorKeys}; -use crate::types::address::Address; -use crate::types::key::*; -use crate::types::masp::{ - ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, -}; /// Errors of key generation / recovery #[derive(Error, Debug)] @@ -40,12 +40,13 @@ pub enum GenRestoreKeyError { /// Mnemonic input error #[error("Mnemonic input error")] MnemonicInputError, + /// Key storage error + #[error("Key storage error")] + KeyStorageError, } /// Captures the interactive parts of the wallet's functioning -pub trait WalletUtils { - /// The location where the wallet is stored - type Storage; +pub trait WalletIo: Sized + Clone { /// Secure random number generator type Rng: RngCore; @@ -67,29 +68,161 @@ pub trait WalletUtils { } /// Read the password for decryption from the file/env/stdin. - fn read_decryption_password() -> Zeroizing; - - /// Read the password for encryption from the file/env/stdin. - /// If the password is read from stdin, the implementation is expected - /// to ask for a confirmation. - fn read_encryption_password() -> Zeroizing; + fn read_password(_confirm: bool) -> Zeroizing { + panic!("attempted to prompt for password in non-interactive mode"); + } /// Read an alias from the file/env/stdin. - fn read_alias(prompt_msg: &str) -> String; + fn read_alias(_prompt_msg: &str) -> String { + panic!("attempted to prompt for alias in non-interactive mode"); + } /// Read mnemonic code from the file/env/stdin. - fn read_mnemonic_code() -> Result; + fn read_mnemonic_code() -> Result { + panic!("attempted to prompt for alias in non-interactive mode"); + } /// Read a mnemonic code from the file/env/stdin. - fn read_mnemonic_passphrase(confirm: bool) -> Zeroizing; + fn read_mnemonic_passphrase(_confirm: bool) -> Zeroizing { + panic!("attempted to prompt for alias in non-interactive mode"); + } /// The given alias has been selected but conflicts with another alias in /// the store. Offer the user to either replace existing mapping, alter the /// chosen alias to a name of their choice, or cancel the aliasing. fn show_overwrite_confirmation( - alias: &Alias, - alias_for: &str, - ) -> store::ConfirmationResponse; + _alias: &Alias, + _alias_for: &str, + ) -> store::ConfirmationResponse { + // Automatically replace aliases in non-interactive mode + store::ConfirmationResponse::Replace + } +} + +/// Errors of wallet loading and storing +#[derive(Error, Debug)] +pub enum LoadStoreError { + /// Wallet store decoding error + #[error("Failed decoding the wallet store: {0}")] + Decode(toml::de::Error), + /// Wallet store reading error + #[error("Failed to read the wallet store from {0}: {1}")] + ReadWallet(String, String), + /// Wallet store writing error + #[error("Failed to write the wallet store: {0}")] + StoreNewWallet(String), +} + +/// Captures the permanent storage parts of the wallet's functioning +pub trait WalletStorage: Sized + Clone { + /// Save the wallet store to a file. + fn save(&self, wallet: &Wallet) -> Result<(), LoadStoreError>; + + /// Load a wallet from the store file. + fn load(&self, wallet: &mut Wallet) -> Result<(), LoadStoreError>; +} + +#[cfg(feature = "std")] +/// Implementation of wallet functionality depending on a standard filesystem +pub mod fs { + use std::fs; + use std::io::{Read, Write}; + use std::path::PathBuf; + + use fd_lock::RwLock; + use rand_core::OsRng; + + use super::*; + + /// A trait for deriving WalletStorage for standard filesystems + pub trait FsWalletStorage: Clone { + /// The directory in which the wallet is supposed to be stored + fn store_dir(&self) -> &PathBuf; + } + + /// Wallet file name + const FILE_NAME: &str = "wallet.toml"; + + impl WalletStorage for F { + fn save(&self, wallet: &Wallet) -> Result<(), LoadStoreError> { + let data = wallet.store.encode(); + let wallet_path = self.store_dir().join(FILE_NAME); + // Make sure the dir exists + let wallet_dir = wallet_path.parent().unwrap(); + fs::create_dir_all(wallet_dir).map_err(|err| { + LoadStoreError::StoreNewWallet(err.to_string()) + })?; + // Write the file + let mut options = fs::OpenOptions::new(); + options.create(true).write(true).truncate(true); + let mut lock = + RwLock::new(options.open(wallet_path).map_err(|err| { + LoadStoreError::StoreNewWallet(err.to_string()) + })?); + let mut guard = lock.write().map_err(|err| { + LoadStoreError::StoreNewWallet(err.to_string()) + })?; + guard + .write_all(&data) + .map_err(|err| LoadStoreError::StoreNewWallet(err.to_string())) + } + + fn load( + &self, + wallet: &mut Wallet, + ) -> Result<(), LoadStoreError> { + let wallet_file = self.store_dir().join(FILE_NAME); + let mut options = fs::OpenOptions::new(); + options.read(true).write(false); + let lock = + RwLock::new(options.open(&wallet_file).map_err(|err| { + LoadStoreError::ReadWallet( + wallet_file.to_string_lossy().into_owned(), + err.to_string(), + ) + })?); + let guard = lock.read().map_err(|err| { + LoadStoreError::ReadWallet( + wallet_file.to_string_lossy().into_owned(), + err.to_string(), + ) + })?; + let mut store = Vec::::new(); + (&*guard).read_to_end(&mut store).map_err(|err| { + LoadStoreError::ReadWallet( + self.store_dir().to_str().unwrap().parse().unwrap(), + err.to_string(), + ) + })?; + wallet.store = + Store::decode(store).map_err(LoadStoreError::Decode)?; + Ok(()) + } + } + + /// For a non-interactive filesystem based wallet + #[derive(Debug, BorshSerialize, BorshDeserialize, Clone)] + pub struct FsWalletUtils { + #[borsh(skip)] + store_dir: PathBuf, + } + + impl FsWalletUtils { + /// Initialize a wallet at the given directory + pub fn new(store_dir: PathBuf) -> Wallet { + Wallet::new(Self { store_dir }, Store::default()) + } + } + + impl WalletIo for FsWalletUtils { + type Rng = OsRng; + } + + impl FsWalletStorage for FsWalletUtils { + fn store_dir(&self) -> &PathBuf { + &self.store_dir + } + } } /// The error that is produced when a given key cannot be obtained @@ -105,24 +238,217 @@ pub enum FindKeyError { /// Represents a collection of keys and addresses while caching key decryptions #[derive(Debug)] -pub struct Wallet { - store_dir: U::Storage, +pub struct Wallet { + /// Location where this shielded context is saved + utils: U, store: Store, decrypted_key_cache: HashMap, decrypted_spendkey_cache: HashMap, } -impl Wallet { +impl From> for Store { + fn from(wallet: Wallet) -> Self { + wallet.store + } +} + +impl Wallet { /// Create a new wallet from the given backing store and storage location - pub fn new(store_dir: U::Storage, store: Store) -> Self { + pub fn new(utils: U, store: Store) -> Self { Self { - store_dir, + utils, store, decrypted_key_cache: HashMap::default(), decrypted_spendkey_cache: HashMap::default(), } } + /// Add validator data to the store + pub fn add_validator_data( + &mut self, + address: Address, + keys: ValidatorKeys, + ) { + self.store.add_validator_data(address, keys); + } + + /// Returns a reference to the validator data, if it exists. + pub fn get_validator_data(&self) -> Option<&ValidatorData> { + self.store.get_validator_data() + } + + /// Returns a mut reference to the validator data, if it exists. + pub fn get_validator_data_mut(&mut self) -> Option<&mut ValidatorData> { + self.store.get_validator_data_mut() + } + + /// Take the validator data, if it exists. + pub fn take_validator_data(&mut self) -> Option { + self.store.take_validator_data() + } + + /// Returns the validator data, if it exists. + pub fn into_validator_data(self) -> Option { + self.store.into_validator_data() + } + + /// Provide immutable access to the backing store + pub fn store(&self) -> &Store { + &self.store + } + + /// Provide mutable access to the backing store + pub fn store_mut(&mut self) -> &mut Store { + &mut self.store + } + + /// Extend this wallet from pre-genesis validator wallet. + pub fn extend_from_pre_genesis_validator( + &mut self, + validator_address: Address, + validator_alias: Alias, + other: pre_genesis::ValidatorWallet, + ) { + self.store.extend_from_pre_genesis_validator( + validator_address, + validator_alias, + other, + ) + } + + /// Gets all addresses given a vp_type + pub fn get_addresses_with_vp_type( + &self, + vp_type: AddressVpType, + ) -> HashSet
{ + self.store.get_addresses_with_vp_type(vp_type) + } + + /// Add a vp_type to a given address + pub fn add_vp_type_to_address( + &mut self, + vp_type: AddressVpType, + address: Address, + ) { + // defaults to an empty set + self.store.add_vp_type_to_address(vp_type, address) + } + + /// Get addresses with tokens VP type keyed and ordered by their aliases. + pub fn tokens_with_aliases(&self) -> BTreeMap { + self.get_addresses_with_vp_type(AddressVpType::Token) + .into_iter() + .map(|addr| { + let alias = self.lookup_alias(&addr); + (alias, addr) + }) + .collect() + } + + /// Find the stored address by an alias. + pub fn find_address(&self, alias: impl AsRef) -> Option<&Address> { + self.store.find_address(alias) + } + + /// Find an alias by the address if it's in the wallet. + pub fn find_alias(&self, address: &Address) -> Option<&Alias> { + self.store.find_alias(address) + } + + /// Try to find an alias for a given address from the wallet. If not found, + /// formats the address into a string. + pub fn lookup_alias(&self, addr: &Address) -> String { + match self.find_alias(addr) { + Some(alias) => format!("{}", alias), + None => format!("{}", addr), + } + } + + /// Find the viewing key with the given alias in the wallet and return it + pub fn find_viewing_key( + &mut self, + alias: impl AsRef, + ) -> Result<&ExtendedViewingKey, FindKeyError> { + self.store + .find_viewing_key(alias.as_ref()) + .ok_or(FindKeyError::KeyNotFound) + } + + /// Find the payment address with the given alias in the wallet and return + /// it + pub fn find_payment_addr( + &self, + alias: impl AsRef, + ) -> Option<&PaymentAddress> { + self.store.find_payment_addr(alias.as_ref()) + } + + /// Get all known keys by their alias, paired with PKH, if known. + pub fn get_keys( + &self, + ) -> HashMap< + String, + (&StoredKeypair, Option<&PublicKeyHash>), + > { + self.store + .get_keys() + .into_iter() + .map(|(alias, value)| (alias.into(), value)) + .collect() + } + + /// Get all known addresses by their alias, paired with PKH, if known. + pub fn get_addresses(&self) -> HashMap { + self.store + .get_addresses() + .iter() + .map(|(alias, value)| (alias.into(), value.clone())) + .collect() + } + + /// Get all known payment addresses by their alias + pub fn get_payment_addrs(&self) -> HashMap { + self.store + .get_payment_addrs() + .iter() + .map(|(alias, value)| (alias.into(), *value)) + .collect() + } + + /// Get all known viewing keys by their alias + pub fn get_viewing_keys(&self) -> HashMap { + self.store + .get_viewing_keys() + .iter() + .map(|(alias, value)| (alias.into(), *value)) + .collect() + } + + /// Get all known viewing keys by their alias + pub fn get_spending_keys( + &self, + ) -> HashMap> { + self.store + .get_spending_keys() + .iter() + .map(|(alias, value)| (alias.into(), value)) + .collect() + } +} + +impl Wallet { + /// Load a wallet from the store file. + pub fn load(&mut self) -> Result<(), LoadStoreError> { + self.utils.clone().load(self) + } + + /// Save the wallet store to a file. + pub fn save(&self) -> Result<(), LoadStoreError> { + self.utils.save(self) + } +} + +impl Wallet { fn gen_and_store_key( &mut self, scheme: SchemeType, @@ -161,6 +487,7 @@ impl Wallet { alias: Option, alias_force: bool, derivation_path: Option, + mnemonic_passphrase: Option<(Mnemonic, Zeroizing)>, password: Option>, ) -> Result, GenRestoreKeyError> { let parsed_derivation_path = derivation_path @@ -182,8 +509,12 @@ impl Wallet { ) } println!("Using HD derivation path {}", parsed_derivation_path); - let mnemonic = U::read_mnemonic_code()?; - let passphrase = U::read_mnemonic_passphrase(false); + let (mnemonic, passphrase) = + if let Some(mnemonic_passphrase) = mnemonic_passphrase { + mnemonic_passphrase + } else { + (U::read_mnemonic_code()?, U::read_mnemonic_passphrase(false)) + }; let seed = Seed::new(&mnemonic, &passphrase); Ok(self.gen_and_store_key( @@ -212,9 +543,11 @@ impl Wallet { scheme: SchemeType, alias: Option, alias_force: bool, + passphrase: Option>, password: Option>, derivation_path_and_mnemonic_rng: Option<(String, &mut U::Rng)>, - ) -> Result, GenRestoreKeyError> { + ) -> Result<(String, common::SecretKey, Option), GenRestoreKeyError> + { let parsed_path_and_rng = derivation_path_and_mnemonic_rng .map(|(raw_derivation_path, rng)| { let is_default = @@ -242,27 +575,33 @@ impl Wallet { println!("Using HD derivation path {}", parsed_derivation_path); } + let mut mnemonic_opt = None; let seed_and_derivation_path //: Option> = parsed_path_and_rng.map(|(path, rng)| { const MNEMONIC_TYPE: MnemonicType = MnemonicType::Words24; - let mnemonic = U::generate_mnemonic_code(MNEMONIC_TYPE, rng)?; + let mnemonic = mnemonic_opt + .insert(U::generate_mnemonic_code(MNEMONIC_TYPE, rng)?); println!( "Safely store your {} words mnemonic.", MNEMONIC_TYPE.word_count() ); println!("{}", mnemonic.clone().into_phrase()); - let passphrase = U::read_mnemonic_passphrase(true); - Ok((Seed::new(&mnemonic, &passphrase), path)) + let passphrase = passphrase + .unwrap_or_else(|| U::read_mnemonic_passphrase(true)); + Ok((Seed::new(mnemonic, &passphrase), path)) }).transpose()?; - Ok(self.gen_and_store_key( - scheme, - alias, - alias_force, - seed_and_derivation_path, - password, - )) + let (alias, key) = self + .gen_and_store_key( + scheme, + alias, + alias_force, + seed_and_derivation_path, + password, + ) + .ok_or(GenRestoreKeyError::KeyStorageError)?; + Ok((alias, key, mnemonic_opt)) } /// Generate a disposable signing key for fee payment and store it under the @@ -280,10 +619,9 @@ impl Wallet { // Generate a disposable keypair to sign the wrapper if requested // TODO: once the wrapper transaction has been accepted, this key can be // deleted from wallet - let (alias, disposable_keypair) = self - .gen_key(SchemeType::Ed25519, Some(alias), false, None, None) - .expect("Failed to initialize disposable keypair") - .expect("Missing alias and secret key"); + let (alias, disposable_keypair, _mnemonic) = self + .gen_key(SchemeType::Ed25519, Some(alias), false, None, None, None) + .expect("Failed to initialize disposable keypair"); println!("Created disposable keypair with alias {alias}"); disposable_keypair @@ -304,35 +642,6 @@ impl Wallet { (alias.into(), key) } - /// Add validator data to the store - pub fn add_validator_data( - &mut self, - address: Address, - keys: ValidatorKeys, - ) { - self.store.add_validator_data(address, keys); - } - - /// Returns a reference to the validator data, if it exists. - pub fn get_validator_data(&self) -> Option<&ValidatorData> { - self.store.get_validator_data() - } - - /// Returns a mut reference to the validator data, if it exists. - pub fn get_validator_data_mut(&mut self) -> Option<&mut ValidatorData> { - self.store.get_validator_data_mut() - } - - /// Take the validator data, if it exists. - pub fn take_validator_data(&mut self) -> Option { - self.store.take_validator_data() - } - - /// Returns the validator data, if it exists. - pub fn into_validator_data(self) -> Option { - self.store.into_validator_data() - } - /// Find the stored key by an alias, a public key hash or a public key. /// If the key is encrypted and password not supplied, then password will be /// interactively prompted. Any keys that are decrypted are stored in and @@ -389,25 +698,6 @@ impl Wallet { ) } - /// Find the viewing key with the given alias in the wallet and return it - pub fn find_viewing_key( - &mut self, - alias: impl AsRef, - ) -> Result<&ExtendedViewingKey, FindKeyError> { - self.store - .find_viewing_key(alias.as_ref()) - .ok_or(FindKeyError::KeyNotFound) - } - - /// Find the payment address with the given alias in the wallet and return - /// it - pub fn find_payment_addr( - &self, - alias: impl AsRef, - ) -> Option<&PaymentAddress> { - self.store.find_payment_addr(alias.as_ref()) - } - /// Find the stored key by a public key. /// If the key is encrypted and password not supplied, then password will be /// interactively prompted for. Any keys that are decrypted are stored in @@ -463,7 +753,7 @@ impl Wallet { .store .find_key_by_pkh(pkh) .ok_or(FindKeyError::KeyNotFound)?; - Self::decrypt_stored_key::<_>( + Self::decrypt_stored_key( &mut self.decrypted_key_cache, stored_key, alias, @@ -489,7 +779,7 @@ impl Wallet { match stored_key { StoredKeypair::Encrypted(encrypted) => { let password = - password.unwrap_or_else(U::read_decryption_password); + password.unwrap_or_else(|| U::read_password(false)); let key = encrypted .decrypt(password) .map_err(FindKeyError::KeyDecryptionError)?; @@ -503,77 +793,6 @@ impl Wallet { } } - /// Get all known keys by their alias, paired with PKH, if known. - pub fn get_keys( - &self, - ) -> HashMap< - String, - (&StoredKeypair, Option<&PublicKeyHash>), - > { - self.store - .get_keys() - .into_iter() - .map(|(alias, value)| (alias.into(), value)) - .collect() - } - - /// Find the stored address by an alias. - pub fn find_address(&self, alias: impl AsRef) -> Option<&Address> { - self.store.find_address(alias) - } - - /// Find an alias by the address if it's in the wallet. - pub fn find_alias(&self, address: &Address) -> Option<&Alias> { - self.store.find_alias(address) - } - - /// Try to find an alias for a given address from the wallet. If not found, - /// formats the address into a string. - pub fn lookup_alias(&self, addr: &Address) -> String { - match self.find_alias(addr) { - Some(alias) => format!("{}", alias), - None => format!("{}", addr), - } - } - - /// Get all known addresses by their alias, paired with PKH, if known. - pub fn get_addresses(&self) -> HashMap { - self.store - .get_addresses() - .iter() - .map(|(alias, value)| (alias.into(), value.clone())) - .collect() - } - - /// Get all known payment addresses by their alias - pub fn get_payment_addrs(&self) -> HashMap { - self.store - .get_payment_addrs() - .iter() - .map(|(alias, value)| (alias.into(), *value)) - .collect() - } - - /// Get all known viewing keys by their alias - pub fn get_viewing_keys(&self) -> HashMap { - self.store - .get_viewing_keys() - .iter() - .map(|(alias, value)| (alias.into(), *value)) - .collect() - } - - /// Get all known viewing keys by their alias - pub fn get_spending_keys( - &self, - ) -> HashMap> { - self.store - .get_spending_keys() - .iter() - .map(|(alias, value)| (alias.into(), value)) - .collect() - } - /// Add a new address with the given alias. If the alias is already used, /// will ask whether the existing alias should be replaced, a different /// alias is desired, or the alias creation should be cancelled. Return @@ -664,62 +883,4 @@ impl Wallet { .insert_payment_addr::(alias.into(), payment_addr, force_alias) .map(Into::into) } - - /// Extend this wallet from pre-genesis validator wallet. - pub fn extend_from_pre_genesis_validator( - &mut self, - validator_address: Address, - validator_alias: Alias, - other: pre_genesis::ValidatorWallet, - ) { - self.store.extend_from_pre_genesis_validator( - validator_address, - validator_alias, - other, - ) - } - - /// Gets all addresses given a vp_type - pub fn get_addresses_with_vp_type( - &self, - vp_type: AddressVpType, - ) -> HashSet
{ - self.store.get_addresses_with_vp_type(vp_type) - } - - /// Add a vp_type to a given address - pub fn add_vp_type_to_address( - &mut self, - vp_type: AddressVpType, - address: Address, - ) { - // defaults to an empty set - self.store.add_vp_type_to_address(vp_type, address) - } - - /// Provide immutable access to the backing store - pub fn store(&self) -> &Store { - &self.store - } - - /// Provide mutable access to the backing store - pub fn store_mut(&mut self) -> &mut Store { - &mut self.store - } - - /// Access storage location data - pub fn store_dir(&self) -> &U::Storage { - &self.store_dir - } - - /// Get addresses with tokens VP type keyed and ordered by their aliases. - pub fn tokens_with_aliases(&self) -> BTreeMap { - self.get_addresses_with_vp_type(AddressVpType::Token) - .into_iter() - .map(|addr| { - let alias = self.lookup_alias(&addr); - (alias, addr) - }) - .collect() - } } diff --git a/shared/src/sdk/wallet/pre_genesis.rs b/sdk/src/wallet/pre_genesis.rs similarity index 96% rename from shared/src/sdk/wallet/pre_genesis.rs rename to sdk/src/wallet/pre_genesis.rs index fd66dedbfe..916ec43781 100644 --- a/shared/src/sdk/wallet/pre_genesis.rs +++ b/sdk/src/wallet/pre_genesis.rs @@ -1,11 +1,11 @@ //! Provides functionality for managing validator keys +use namada_core::types::key::{common, SchemeType}; use serde::{Deserialize, Serialize}; use thiserror::Error; use zeroize::Zeroizing; -use crate::sdk::wallet; -use crate::sdk::wallet::{store, StoredKeypair}; -use crate::types::key::{common, SchemeType}; +use crate::wallet; +use crate::wallet::{store, StoredKeypair}; /// Ways in which wallet store operations can fail #[derive(Error, Debug)] diff --git a/shared/src/sdk/wallet/store.rs b/sdk/src/wallet/store.rs similarity index 98% rename from shared/src/sdk/wallet/store.rs rename to sdk/src/wallet/store.rs index 509ff5afe6..201cc885a4 100644 --- a/shared/src/sdk/wallet/store.rs +++ b/sdk/src/wallet/store.rs @@ -8,6 +8,12 @@ use bimap::BiHashMap; use bip39::Seed; use itertools::Itertools; use masp_primitives::zip32::ExtendedFullViewingKey; +use namada_core::types::address::{Address, ImplicitAddress}; +use namada_core::types::key::dkg_session_keys::DkgKeypair; +use namada_core::types::key::*; +use namada_core::types::masp::{ + ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, +}; #[cfg(feature = "masp-tx-gen")] use rand_core::RngCore; use serde::{Deserialize, Serialize}; @@ -17,13 +23,7 @@ use zeroize::Zeroizing; use super::alias::{self, Alias}; use super::derivation_path::DerivationPath; use super::pre_genesis; -use crate::sdk::wallet::{StoredKeypair, WalletUtils}; -use crate::types::address::{Address, ImplicitAddress}; -use crate::types::key::dkg_session_keys::DkgKeypair; -use crate::types::key::*; -use crate::types::masp::{ - ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, -}; +use crate::wallet::{StoredKeypair, WalletIo}; /// Actions that can be taken when there is an alias conflict pub enum ConfirmationResponse { @@ -239,7 +239,7 @@ impl Store { /// key. /// Returns None if the alias already exists and the user decides to skip /// it. No changes in the wallet store are made. - pub fn gen_key( + pub fn gen_key( &mut self, scheme: SchemeType, alias: Option, @@ -277,7 +277,7 @@ impl Store { } /// Generate a spending key similarly to how it's done for keypairs - pub fn gen_spending_key( + pub fn gen_spending_key( &mut self, alias: String, password: Option>, @@ -335,7 +335,7 @@ impl Store { /// will prompt for overwrite/reselection confirmation. If declined, then /// keypair is not inserted and nothing is returned, otherwise selected /// alias is returned. - pub fn insert_keypair( + pub fn insert_keypair( &mut self, alias: Alias, keypair: StoredKeypair, @@ -388,7 +388,7 @@ impl Store { } /// Insert spending keys similarly to how it's done for keypairs - pub fn insert_spending_key( + pub fn insert_spending_key( &mut self, alias: Alias, spendkey: StoredKeypair, @@ -418,7 +418,7 @@ impl Store { } /// Insert viewing keys similarly to how it's done for keypairs - pub fn insert_viewing_key( + pub fn insert_viewing_key( &mut self, alias: Alias, viewkey: ExtendedViewingKey, @@ -463,7 +463,7 @@ impl Store { } /// Insert payment addresses similarly to how it's done for keypairs - pub fn insert_payment_addr( + pub fn insert_payment_addr( &mut self, alias: Alias, payment_addr: PaymentAddress, @@ -507,7 +507,7 @@ impl Store { /// will prompt for overwrite/reselection confirmation, which when declined, /// the address won't be added. Return the selected alias if the address has /// been added. - pub fn insert_address( + pub fn insert_address( &mut self, alias: Alias, address: Address, diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 3259afd2c7..648f7e0b39 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -19,11 +19,12 @@ default = ["abciplus", "namada-sdk", "wasm-runtime"] mainnet = [ "namada_core/mainnet", ] -std = [] +std = ["fd-lock"] # NOTE "dev" features that shouldn't be used in live networks are enabled by default for now dev = [] ferveo-tpke = [ "namada_core/ferveo-tpke", + "namada_sdk/ferveo-tpke", ] wasm-runtime = [ "namada_core/wasm-runtime", @@ -41,6 +42,7 @@ wasm-runtime = [ # Enable queries support for an async client async-client = [ "async-trait", + "namada_sdk/async-client" ] # Requires async traits to be safe to send across threads @@ -50,6 +52,7 @@ async-send = [] tendermint-rpc = [ "async-client", "dep:tendermint-rpc", + "namada_sdk/tendermint-rpc", ] # tendermint-rpc HttpClient http-client = [ @@ -60,15 +63,18 @@ abciplus = [ "namada_core/abciplus", "namada_proof_of_stake/abciplus", "namada_ethereum_bridge/abciplus", + "namada_sdk/abciplus", ] ibc-mocks = [ "namada_core/ibc-mocks", + "namada_sdk/ibc-mocks", ] masp-tx-gen = [ "rand", "rand_core", + "namada_sdk/masp-tx-gen", ] # for integration tests and test utilies @@ -76,6 +82,7 @@ testing = [ "namada_core/testing", "namada_ethereum_bridge/testing", "namada_proof_of_stake/testing", + "namada_sdk/testing", "async-client", "proptest", "rand_core", @@ -87,18 +94,24 @@ namada-sdk = [ "tendermint-rpc", "masp-tx-gen", "ferveo-tpke", - "masp_primitives/transparent-inputs" + "masp_primitives/transparent-inputs", + "namada_sdk/namada-sdk", ] -multicore = ["masp_proofs/multicore"] +multicore = [ + "masp_proofs/multicore", + "namada_sdk/multicore", +] [dependencies] -namada_core = {path = "../core", default-features = false, features = ["secp256k1-sign"]} +namada_core = {path = "../core", default-features = false} +namada_sdk = {path = "../sdk", default-features = false} namada_proof_of_stake = {path = "../proof_of_stake", default-features = false} namada_ethereum_bridge = {path = "../ethereum_bridge", default-features = false} async-trait = {version = "0.1.51", optional = true} bimap.workspace = true borsh.workspace = true +borsh-ext.workspace = true circular-queue.workspace = true clru.workspace = true data-encoding.workspace = true @@ -107,6 +120,7 @@ derivative.workspace = true ethbridge-bridge-contract.workspace = true ethers.workspace = true eyre.workspace = true +fd-lock = { workspace = true, optional = true } futures.workspace = true itertools.workspace = true loupe = {version = "0.1.3", optional = true} @@ -149,17 +163,18 @@ zeroize.workspace = true tokio = {workspace = true, features = ["full"]} [target.'cfg(target_family = "wasm")'.dependencies] +tokio = {workspace = true, default-features = false, features = ["sync"]} wasmtimer = "0.2.0" [dev-dependencies] -namada_core = {path = "../core", default-features = false, features = ["secp256k1-sign", "testing", "ibc-mocks"]} +namada_core = {path = "../core", default-features = false, features = ["testing", "ibc-mocks"]} namada_ethereum_bridge = {path = "../ethereum_bridge", default-features = false, features = ["testing"]} namada_test_utils = {path = "../test_utils"} assert_matches.workspace = true async-trait.workspace = true base58.workspace = true byte-unit.workspace = true -libsecp256k1.workspace = true +k256.workspace = true pretty_assertions.workspace = true proptest.workspace = true tempfile.workspace = true diff --git a/shared/src/ledger/governance/utils.rs b/shared/src/ledger/governance/utils.rs index d4b4d1316c..a254556cce 100644 --- a/shared/src/ledger/governance/utils.rs +++ b/shared/src/ledger/governance/utils.rs @@ -3,6 +3,7 @@ use std::collections::HashMap; use namada_core::ledger::governance::utils::TallyResult; +use namada_sdk::events::{Event, EventLevel}; use thiserror::Error; use crate::ledger::events::EventType; @@ -34,6 +35,16 @@ pub struct ProposalEvent { pub attributes: HashMap, } +impl From for Event { + fn from(proposal_event: ProposalEvent) -> Self { + Self { + event_type: EventType::Proposal, + level: EventLevel::Block, + attributes: proposal_event.attributes, + } + } +} + impl ProposalEvent { /// Create a proposal event pub fn new( diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index 04b5809bc2..62e460f099 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -1,19 +1,376 @@ //! The ledger modules -pub mod eth_bridge; -pub mod events; +pub use namada_sdk::{eth_bridge, events}; pub mod governance; pub mod ibc; -pub mod inflation; pub mod native_vp; pub mod pgf; pub mod pos; #[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] pub mod protocol; -pub mod queries; +pub use namada_sdk::queries; pub mod storage; pub mod vp_host_fns; +use namada_core::ledger::storage::{DBIter, StorageHasher, DB}; +use namada_core::ledger::storage_api::ResultExt; pub use namada_core::ledger::{ gas, parameters, replay_protection, storage_api, tx_env, vp_env, }; +use namada_sdk::queries::{EncodedResponseQuery, RequestCtx, RequestQuery}; + +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] +use crate::vm::wasm::{TxCache, VpCache}; +use crate::vm::WasmCacheAccess; + +/// Dry run a transaction +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] +pub fn dry_run_tx( + mut ctx: RequestCtx<'_, D, H, VpCache, TxCache>, + request: &RequestQuery, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + CA: 'static + WasmCacheAccess + Sync, +{ + use borsh_ext::BorshSerializeExt; + use namada_core::ledger::gas::{Gas, GasMetering, TxGasMeter}; + use namada_core::ledger::storage::TempWlStorage; + use namada_core::proto::Tx; + use namada_core::types::transaction::wrapper::wrapper_tx::PairingEngine; + use namada_core::types::transaction::{ + AffineCurve, DecryptedTx, EllipticCurve, + }; + + use crate::ledger::protocol::ShellParams; + use crate::types::storage::TxIndex; + use crate::types::transaction::TxType; + + let mut tx = Tx::try_from(&request.data[..]).into_storage_result()?; + tx.validate_tx().into_storage_result()?; + + let mut temp_wl_storage = TempWlStorage::new(&ctx.wl_storage.storage); + let mut cumulated_gas = Gas::default(); + + // Wrapper dry run to allow estimating the gas cost of a transaction + let mut tx_gas_meter = match tx.header().tx_type { + TxType::Wrapper(wrapper) => { + let mut tx_gas_meter = + TxGasMeter::new(wrapper.gas_limit.to_owned()); + protocol::apply_wrapper_tx( + tx.clone(), + &wrapper, + None, + &request.data, + ShellParams::new( + &mut tx_gas_meter, + &mut temp_wl_storage, + &mut ctx.vp_wasm_cache, + &mut ctx.tx_wasm_cache, + ), + None, + ) + .into_storage_result()?; + + temp_wl_storage.write_log.commit_tx(); + cumulated_gas = tx_gas_meter.get_tx_consumed_gas(); + + // NOTE: the encryption key for a dry-run should always be an + // hardcoded, dummy one + let _privkey = + ::G2Affine::prime_subgroup_generator(); + tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); + TxGasMeter::new_from_sub_limit(tx_gas_meter.get_available_gas()) + } + TxType::Protocol(_) | TxType::Decrypted(_) => { + // If dry run only the inner tx, use the max block gas as the gas + // limit + TxGasMeter::new( + namada_core::ledger::gas::get_max_block_gas(ctx.wl_storage) + .unwrap() + .into(), + ) + } + TxType::Raw => { + // Cast tx to a decrypted for execution + tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); + + // If dry run only the inner tx, use the max block gas as the gas + // limit + TxGasMeter::new( + namada_core::ledger::gas::get_max_block_gas(ctx.wl_storage) + .unwrap() + .into(), + ) + } + }; + + let mut data = protocol::apply_wasm_tx( + tx, + &TxIndex(0), + ShellParams::new( + &mut tx_gas_meter, + &mut temp_wl_storage, + &mut ctx.vp_wasm_cache, + &mut ctx.tx_wasm_cache, + ), + ) + .into_storage_result()?; + cumulated_gas = cumulated_gas + .checked_add(tx_gas_meter.get_tx_consumed_gas()) + .ok_or(namada_core::ledger::storage_api::Error::SimpleMessage( + "Overflow in gas", + ))?; + // Account gas for both inner and wrapper (if available) + data.gas_used = cumulated_gas; + // NOTE: the keys changed by the wrapper transaction (if any) are not + // returned from this function + let data = data.serialize_to_vec(); + Ok(EncodedResponseQuery { + data, + proof: None, + info: Default::default(), + }) +} + +#[cfg(test)] +mod test { + use borsh::BorshDeserialize; + use borsh_ext::BorshSerializeExt; + use namada_core::ledger::storage::testing::TestWlStorage; + use namada_core::ledger::storage_api::{self, StorageWrite}; + use namada_core::types::hash::Hash; + use namada_core::types::storage::{BlockHeight, Key}; + use namada_core::types::transaction::decrypted::DecryptedTx; + use namada_core::types::transaction::TxType; + use namada_core::types::{address, token}; + use namada_sdk::queries::{Router, RPC}; + use namada_test_utils::TestWasms; + use tempfile::TempDir; + use tendermint_rpc::{Error as RpcError, Response}; + + use crate::ledger::events::log::EventLog; + use crate::ledger::queries::Client; + use crate::ledger::{EncodedResponseQuery, RequestCtx, RequestQuery}; + use crate::proto::{Code, Data, Tx}; + use crate::vm::wasm::{TxCache, VpCache}; + use crate::vm::{wasm, WasmCacheRoAccess}; + + /// A test client that has direct access to the storage + pub struct TestClient + where + RPC: Router, + { + /// RPC router + pub rpc: RPC, + /// storage + pub wl_storage: TestWlStorage, + /// event log + pub event_log: EventLog, + /// VP wasm compilation cache + pub vp_wasm_cache: VpCache, + /// tx wasm compilation cache + pub tx_wasm_cache: TxCache, + /// VP wasm compilation cache directory + pub vp_cache_dir: TempDir, + /// tx wasm compilation cache directory + pub tx_cache_dir: TempDir, + } + + impl TestClient + where + RPC: Router, + { + #[allow(dead_code)] + /// Initialize a test client for the given root RPC router + pub fn new(rpc: RPC) -> Self { + // Initialize the `TestClient` + let mut wl_storage = TestWlStorage::default(); + + // Initialize mock gas limit + let max_block_gas_key = + namada_core::ledger::parameters::storage::get_max_block_gas_key( + ); + wl_storage + .storage + .write( + &max_block_gas_key, + namada_core::ledger::storage::types::encode( + &20_000_000_u64, + ), + ) + .expect( + "Max block gas parameter must be initialized in storage", + ); + let event_log = EventLog::default(); + let (vp_wasm_cache, vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + let (tx_wasm_cache, tx_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + Self { + rpc, + wl_storage, + event_log, + vp_wasm_cache: vp_wasm_cache.read_only(), + tx_wasm_cache: tx_wasm_cache.read_only(), + vp_cache_dir, + tx_cache_dir, + } + } + } + + #[cfg_attr(feature = "async-send", async_trait::async_trait)] + #[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] + impl Client for TestClient + where + RPC: Router + Sync, + { + type Error = std::io::Error; + + async fn request( + &self, + path: String, + data: Option>, + height: Option, + prove: bool, + ) -> Result { + let data = data.unwrap_or_default(); + let height = height.unwrap_or_default(); + // Handle a path by invoking the `RPC.handle` directly with the + // borrowed storage + let request = RequestQuery { + data, + path, + height, + prove, + }; + let ctx = RequestCtx { + wl_storage: &self.wl_storage, + event_log: &self.event_log, + vp_wasm_cache: self.vp_wasm_cache.clone(), + tx_wasm_cache: self.tx_wasm_cache.clone(), + storage_read_past_height_limit: None, + }; + // TODO: this is a hack to propagate errors to the caller, we should + // really permit error types other than [`std::io::Error`] + if request.path == "/shell/dry_run_tx" { + super::dry_run_tx(ctx, &request) + } else { + self.rpc.handle(ctx, &request) + } + .map_err(|err| { + std::io::Error::new(std::io::ErrorKind::Other, err.to_string()) + }) + } + + async fn perform(&self, _request: R) -> Result + where + R: tendermint_rpc::SimpleRequest, + { + Response::from_string("TODO") + } + } + + #[tokio::test] + async fn test_shell_queries_router_with_client() -> storage_api::Result<()> + { + // Initialize the `TestClient` + let mut client = TestClient::new(RPC); + // store the wasm code + let tx_no_op = TestWasms::TxNoOp.read_bytes(); + let tx_hash = Hash::sha256(&tx_no_op); + let key = Key::wasm_code(&tx_hash); + let len_key = Key::wasm_code_len(&tx_hash); + client.wl_storage.storage.write(&key, &tx_no_op).unwrap(); + client + .wl_storage + .storage + .write(&len_key, (tx_no_op.len() as u64).serialize_to_vec()) + .unwrap(); + + // Request last committed epoch + let read_epoch = RPC.shell().epoch(&client).await.unwrap(); + let current_epoch = client.wl_storage.storage.last_epoch; + assert_eq!(current_epoch, read_epoch); + + // Request dry run tx + let mut outer_tx = + Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); + outer_tx.header.chain_id = client.wl_storage.storage.chain_id.clone(); + outer_tx.set_code(Code::from_hash(tx_hash)); + outer_tx.set_data(Data::new(vec![])); + let tx_bytes = outer_tx.to_bytes(); + let result = RPC + .shell() + .dry_run_tx(&client, Some(tx_bytes), None, false) + .await + .unwrap(); + assert!(result.data.is_accepted()); + + // Request storage value for a balance key ... + let token_addr = address::testing::established_address_1(); + let owner = address::testing::established_address_2(); + let balance_key = token::balance_key(&token_addr, &owner); + // ... there should be no value yet. + let read_balance = RPC + .shell() + .storage_value(&client, None, None, false, &balance_key) + .await + .unwrap(); + assert!(read_balance.data.is_empty()); + + // Request storage prefix iterator + let balance_prefix = token::balance_prefix(&token_addr); + let read_balances = RPC + .shell() + .storage_prefix(&client, None, None, false, &balance_prefix) + .await + .unwrap(); + assert!(read_balances.data.is_empty()); + + // Request storage has key + let has_balance_key = RPC + .shell() + .storage_has_key(&client, &balance_key) + .await + .unwrap(); + assert!(!has_balance_key); + + // Then write some balance ... + let balance = token::Amount::native_whole(1000); + StorageWrite::write(&mut client.wl_storage, &balance_key, balance)?; + // It has to be committed to be visible in a query + client.wl_storage.commit_tx(); + client.wl_storage.commit_block().unwrap(); + // ... there should be the same value now + let read_balance = RPC + .shell() + .storage_value(&client, None, None, false, &balance_key) + .await + .unwrap(); + assert_eq!( + balance, + token::Amount::try_from_slice(&read_balance.data).unwrap() + ); + + // Request storage prefix iterator + let balance_prefix = token::balance_prefix(&token_addr); + let read_balances = RPC + .shell() + .storage_prefix(&client, None, None, false, &balance_prefix) + .await + .unwrap(); + assert_eq!(read_balances.data.len(), 1); + + // Request storage has key + let has_balance_key = RPC + .shell() + .storage_has_key(&client, &balance_key) + .await + .unwrap(); + assert!(has_balance_key); + + Ok(()) + } +} diff --git a/shared/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs b/shared/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs index 5efea15ac5..6d07f5a613 100644 --- a/shared/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs +++ b/shared/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs @@ -641,7 +641,8 @@ where mod test_bridge_pool_vp { use std::env::temp_dir; - use borsh::BorshSerialize; + use borsh::BorshDeserialize; + use borsh_ext::BorshSerializeExt; use namada_core::ledger::eth_bridge::storage::bridge_pool::get_signed_root_key; use namada_core::ledger::gas::TxGasMeter; use namada_core::types::address; @@ -756,11 +757,11 @@ mod test_bridge_pool_vp { let mut writelog = WriteLog::default(); // setup the initial bridge pool storage writelog - .write(&get_signed_root_key(), Hash([0; 32]).try_to_vec().unwrap()) + .write(&get_signed_root_key(), Hash([0; 32]).serialize_to_vec()) .expect("Test failed"); let transfer = initial_pool(); writelog - .write(&get_pending_key(&transfer), transfer.try_to_vec().unwrap()) + .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .expect("Test failed"); // whitelist wnam let key = whitelist::Key { @@ -769,7 +770,7 @@ mod test_bridge_pool_vp { } .into(); writelog - .write(&key, true.try_to_vec().unwrap()) + .write(&key, true.serialize_to_vec()) .expect("Test failed"); let key = whitelist::Key { asset: wnam(), @@ -777,7 +778,7 @@ mod test_bridge_pool_vp { } .into(); writelog - .write(&key, Amount::max().try_to_vec().unwrap()) + .write(&key, Amount::max().serialize_to_vec()) .expect("Test failed"); // set up users with ERC20 and NUT balances update_balances( @@ -843,10 +844,7 @@ mod test_bridge_pool_vp { // write the changes to the log let account_key = balance_key(&nam(), &balance.owner); write_log - .write( - &account_key, - updated_balance.try_to_vec().expect("Test failed"), - ) + .write(&account_key, updated_balance.serialize_to_vec()) .expect("Test failed"); // changed keys @@ -889,10 +887,10 @@ mod test_bridge_pool_vp { // write the changes to the log write_log - .write(&account_key, new_gas_balance.try_to_vec().unwrap()) + .write(&account_key, new_gas_balance.serialize_to_vec()) .expect("Test failed"); write_log - .write(&token_key, new_token_balance.try_to_vec().unwrap()) + .write(&token_key, new_token_balance.serialize_to_vec()) .expect("Test failed"); // return the keys changed @@ -1059,7 +1057,7 @@ mod test_bridge_pool_vp { |transfer, log| { log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) @@ -1080,7 +1078,7 @@ mod test_bridge_pool_vp { |transfer, log| { log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) @@ -1101,7 +1099,7 @@ mod test_bridge_pool_vp { |transfer, log| { log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) @@ -1122,7 +1120,7 @@ mod test_bridge_pool_vp { |transfer, log| { log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) @@ -1144,7 +1142,7 @@ mod test_bridge_pool_vp { |transfer, log| { log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) @@ -1165,7 +1163,7 @@ mod test_bridge_pool_vp { |transfer, log| { log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) @@ -1186,7 +1184,7 @@ mod test_bridge_pool_vp { |transfer, log| { log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) @@ -1207,7 +1205,7 @@ mod test_bridge_pool_vp { |transfer, log| { log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) @@ -1254,7 +1252,7 @@ mod test_bridge_pool_vp { payer: bertha_address(), }, }; - log.write(&get_pending_key(transfer), t.try_to_vec().unwrap()) + log.write(&get_pending_key(transfer), t.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) }, @@ -1286,7 +1284,7 @@ mod test_bridge_pool_vp { payer: bertha_address(), }, }; - log.write(&get_pending_key(&t), transfer.try_to_vec().unwrap()) + log.write(&get_pending_key(&t), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) }, @@ -1306,7 +1304,7 @@ mod test_bridge_pool_vp { |transfer, log| { log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([ @@ -1333,10 +1331,7 @@ mod test_bridge_pool_vp { let mut keys_changed = { wl_storage .write_log - .write( - &get_pending_key(&transfer), - transfer.try_to_vec().unwrap(), - ) + .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) }; @@ -1418,10 +1413,7 @@ mod test_bridge_pool_vp { let mut keys_changed = { wl_storage .write_log - .write( - &get_pending_key(&transfer), - transfer.try_to_vec().unwrap(), - ) + .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) }; @@ -1486,10 +1478,7 @@ mod test_bridge_pool_vp { let mut keys_changed = { wl_storage .write_log - .write( - &get_pending_key(&transfer), - transfer.try_to_vec().unwrap(), - ) + .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) }; @@ -1500,9 +1489,7 @@ mod test_bridge_pool_vp { .write_log .write( &account_key, - Amount::from(BERTHA_WEALTH - 200) - .try_to_vec() - .expect("Test failed"), + Amount::from(BERTHA_WEALTH - 200).serialize_to_vec(), ) .expect("Test failed"); assert!(keys_changed.insert(account_key)); @@ -1511,9 +1498,7 @@ mod test_bridge_pool_vp { .write_log .write( &bp_account_key, - Amount::from(ESCROWED_AMOUNT + 100) - .try_to_vec() - .expect("Test failed"), + Amount::from(ESCROWED_AMOUNT + 100).serialize_to_vec(), ) .expect("Test failed"); assert!(keys_changed.insert(bp_account_key)); @@ -1521,9 +1506,7 @@ mod test_bridge_pool_vp { .write_log .write( &eb_account_key, - Amount::from(ESCROWED_AMOUNT + 100) - .try_to_vec() - .expect("Test failed"), + Amount::from(ESCROWED_AMOUNT + 100).serialize_to_vec(), ) .expect("Test failed"); assert!(keys_changed.insert(eb_account_key)); @@ -1580,10 +1563,7 @@ mod test_bridge_pool_vp { let keys_changed = { wl_storage .write_log - .write( - &get_pending_key(&transfer), - transfer.try_to_vec().unwrap(), - ) + .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) }; @@ -1594,9 +1574,7 @@ mod test_bridge_pool_vp { .write_log .write( &account_key, - Amount::from(BERTHA_WEALTH - 200) - .try_to_vec() - .expect("Test failed"), + Amount::from(BERTHA_WEALTH - 200).serialize_to_vec(), ) .expect("Test failed"); let bp_account_key = balance_key(&nam(), &BRIDGE_POOL_ADDRESS); @@ -1604,17 +1582,12 @@ mod test_bridge_pool_vp { .write_log .write( &bp_account_key, - Amount::from(ESCROWED_AMOUNT + 100) - .try_to_vec() - .expect("Test failed"), + Amount::from(ESCROWED_AMOUNT + 100).serialize_to_vec(), ) .expect("Test failed"); wl_storage .write_log - .write( - &eb_account_key, - Amount::from(10).try_to_vec().expect("Test failed"), - ) + .write(&eb_account_key, Amount::from(10).serialize_to_vec()) .expect("Test failed"); let verifiers = BTreeSet::default(); @@ -1649,10 +1622,7 @@ mod test_bridge_pool_vp { let eb_account_key = balance_key(&nam(), &Address::Internal(InternalAddress::EthBridge)); wl_storage - .write_bytes( - &eb_account_key, - Amount::default().try_to_vec().expect("Test failed"), - ) + .write_bytes(&eb_account_key, Amount::default().serialize_to_vec()) .expect("Test failed"); // initialize the gas payers account let gas_payer_balance_key = @@ -1660,9 +1630,7 @@ mod test_bridge_pool_vp { wl_storage .write_bytes( &gas_payer_balance_key, - Amount::from(BERTHA_WEALTH) - .try_to_vec() - .expect("Test failed"), + Amount::from(BERTHA_WEALTH).serialize_to_vec(), ) .expect("Test failed"); wl_storage.write_log.commit_tx(); @@ -1688,10 +1656,7 @@ mod test_bridge_pool_vp { let keys_changed = { wl_storage .write_log - .write( - &get_pending_key(&transfer), - transfer.try_to_vec().unwrap(), - ) + .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) }; @@ -1702,18 +1667,14 @@ mod test_bridge_pool_vp { .write_log .write( &account_key, - Amount::from(BERTHA_WEALTH - 100) - .try_to_vec() - .expect("Test failed"), + Amount::from(BERTHA_WEALTH - 100).serialize_to_vec(), ) .expect("Test failed"); wl_storage .write_log .write( &gas_payer_balance_key, - Amount::from(BERTHA_WEALTH - 100) - .try_to_vec() - .expect("Test failed"), + Amount::from(BERTHA_WEALTH - 100).serialize_to_vec(), ) .expect("Test failed"); let bp_account_key = balance_key(&nam(), &BRIDGE_POOL_ADDRESS); @@ -1721,17 +1682,12 @@ mod test_bridge_pool_vp { .write_log .write( &bp_account_key, - Amount::from(ESCROWED_AMOUNT + 100) - .try_to_vec() - .expect("Test failed"), + Amount::from(ESCROWED_AMOUNT + 100).serialize_to_vec(), ) .expect("Test failed"); wl_storage .write_log - .write( - &eb_account_key, - Amount::from(10).try_to_vec().expect("Test failed"), - ) + .write(&eb_account_key, Amount::from(10).serialize_to_vec()) .expect("Test failed"); let verifiers = BTreeSet::default(); // create the data to be given to the vp @@ -1780,10 +1736,7 @@ mod test_bridge_pool_vp { let mut keys_changed = { wl_storage .write_log - .write( - &get_pending_key(&transfer), - transfer.try_to_vec().unwrap(), - ) + .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) }; @@ -1868,7 +1821,7 @@ mod test_bridge_pool_vp { transfer.transfer.asset = wnam(); log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) @@ -1890,7 +1843,7 @@ mod test_bridge_pool_vp { transfer.transfer.asset = wnam(); log.write( &get_pending_key(transfer), - transfer.try_to_vec().unwrap(), + transfer.serialize_to_vec(), ) .unwrap(); BTreeSet::from([get_pending_key(transfer)]) diff --git a/shared/src/ledger/native_vp/ethereum_bridge/nut.rs b/shared/src/ledger/native_vp/ethereum_bridge/nut.rs index 1f7f313521..6dbb79d788 100644 --- a/shared/src/ledger/native_vp/ethereum_bridge/nut.rs +++ b/shared/src/ledger/native_vp/ethereum_bridge/nut.rs @@ -121,7 +121,7 @@ mod test_nuts { use std::env::temp_dir; use assert_matches::assert_matches; - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use namada_core::ledger::storage::testing::TestWlStorage; use namada_core::ledger::storage_api::StorageWrite; use namada_core::types::address::testing::arb_non_internal_address; @@ -157,13 +157,13 @@ mod test_nuts { wl.write_log .write( &src_balance_key, - Amount::from(100_u64).try_to_vec().expect("Test failed"), + Amount::from(100_u64).serialize_to_vec(), ) .expect("Test failed"); wl.write_log .write( &dst_balance_key, - Amount::from(200_u64).try_to_vec().expect("Test failed"), + Amount::from(200_u64).serialize_to_vec(), ) .expect("Test failed"); diff --git a/shared/src/ledger/native_vp/ethereum_bridge/vp.rs b/shared/src/ledger/native_vp/ethereum_bridge/vp.rs index 4d006229a1..9f5f6dd19c 100644 --- a/shared/src/ledger/native_vp/ethereum_bridge/vp.rs +++ b/shared/src/ledger/native_vp/ethereum_bridge/vp.rs @@ -164,7 +164,7 @@ mod tests { use std::default::Default; use std::env::temp_dir; - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use namada_core::ledger::eth_bridge; use namada_core::ledger::eth_bridge::storage::bridge_pool::BRIDGE_POOL_ADDRESS; use namada_core::ledger::eth_bridge::storage::wrapped_erc20s; @@ -220,8 +220,7 @@ mod tests { .write_bytes( &balance_key, Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE) - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ) .expect("Test failed"); @@ -365,8 +364,7 @@ mod tests { .write( &account_key, Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE - ESCROW_AMOUNT) - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ) .expect("Test failed"); @@ -379,8 +377,7 @@ mod tests { Amount::from( BRIDGE_POOL_ESCROW_INITIAL_BALANCE + ESCROW_AMOUNT, ) - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ) .expect("Test failed"); @@ -417,8 +414,7 @@ mod tests { .write( &account_key, Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE - ESCROW_AMOUNT) - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ) .expect("Test failed"); @@ -429,8 +425,7 @@ mod tests { .write( &escrow_key, Amount::from(BRIDGE_POOL_ESCROW_INITIAL_BALANCE) - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ) .expect("Test failed"); @@ -468,8 +463,7 @@ mod tests { .write( &account_key, Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE - ESCROW_AMOUNT) - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ) .expect("Test failed"); @@ -482,8 +476,7 @@ mod tests { Amount::from( BRIDGE_POOL_ESCROW_INITIAL_BALANCE + ESCROW_AMOUNT, ) - .try_to_vec() - .expect("Test failed"), + .serialize_to_vec(), ) .expect("Test failed"); diff --git a/shared/src/ledger/native_vp/ibc/context.rs b/shared/src/ledger/native_vp/ibc/context.rs index 5af2afebf7..7926f3d838 100644 --- a/shared/src/ledger/native_vp/ibc/context.rs +++ b/shared/src/ledger/native_vp/ibc/context.rs @@ -2,16 +2,22 @@ use std::collections::{BTreeSet, HashMap, HashSet}; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; +use masp_primitives::transaction::Transaction; use namada_core::ledger::ibc::storage::is_ibc_key; use namada_core::ledger::ibc::{IbcCommonContext, IbcStorageContext}; use namada_core::ledger::storage::write_log::StorageModification; use namada_core::ledger::storage::{self as ledger_storage, StorageHasher}; use namada_core::ledger::storage_api::StorageRead; -use namada_core::types::address::{Address, InternalAddress}; -use namada_core::types::ibc::IbcEvent; -use namada_core::types::storage::{BlockHeight, Header, Key}; -use namada_core::types::token::{self, Amount, DenominatedAmount}; +use namada_core::types::address::{self, Address, InternalAddress}; +use namada_core::types::ibc::{IbcEvent, IbcShieldedTransfer}; +use namada_core::types::storage::{ + BlockHeight, Epoch, Header, Key, KeySeg, TxIndex, +}; +use namada_core::types::token::{ + self, Amount, DenominatedAmount, Transfer, HEAD_TX_KEY, PIN_KEY_PREFIX, + TX_KEY_PREFIX, +}; use super::Error; use crate::ledger::native_vp::CtxPreStorageRead; @@ -118,16 +124,16 @@ where Ok(()) } - fn get_ibc_event( + fn get_ibc_events( &self, event_type: impl AsRef, - ) -> Result, Self::Error> { - for event in &self.event { - if event.event_type == *event_type.as_ref() { - return Ok(Some(event.clone())); - } - } - Ok(None) + ) -> Result, Self::Error> { + Ok(self + .event + .iter() + .filter(|event| event.event_type == *event_type.as_ref()) + .cloned() + .collect()) } fn transfer_token( @@ -150,14 +156,43 @@ where .unwrap_or_default(); dest_bal.receive(&amount.amount); - self.write( - &src_key, - src_bal.try_to_vec().expect("encoding shouldn't failed"), - )?; - self.write( - &dest_key, - dest_bal.try_to_vec().expect("encoding shouldn't failed"), - ) + self.write(&src_key, src_bal.serialize_to_vec())?; + self.write(&dest_key, dest_bal.serialize_to_vec()) + } + + fn handle_masp_tx( + &mut self, + shielded: &IbcShieldedTransfer, + ) -> Result<(), Self::Error> { + let masp_addr = address::masp(); + let head_tx_key = Key::from(masp_addr.to_db_key()) + .push(&HEAD_TX_KEY.to_owned()) + .expect("Cannot obtain a storage key"); + let current_tx_idx: u64 = + self.ctx.read(&head_tx_key).unwrap_or(None).unwrap_or(0); + let current_tx_key = Key::from(masp_addr.to_db_key()) + .push(&(TX_KEY_PREFIX.to_owned() + ¤t_tx_idx.to_string())) + .expect("Cannot obtain a storage key"); + // Save the Transfer object and its location within the blockchain + // so that clients do not have to separately look these + // up + let record: (Epoch, BlockHeight, TxIndex, Transfer, Transaction) = ( + self.ctx.get_block_epoch().map_err(Error::NativeVpError)?, + self.ctx.get_block_height().map_err(Error::NativeVpError)?, + self.ctx.get_tx_index().map_err(Error::NativeVpError)?, + shielded.transfer.clone(), + shielded.masp_tx.clone(), + ); + self.write(¤t_tx_key, record.serialize_to_vec())?; + self.write(&head_tx_key, (current_tx_idx + 1).serialize_to_vec())?; + // If storage key has been supplied, then pin this transaction to it + if let Some(key) = &shielded.transfer.key { + let pin_key = Key::from(masp_addr.to_db_key()) + .push(&(PIN_KEY_PREFIX.to_owned() + key)) + .expect("Cannot obtain a storage key"); + self.write(&pin_key, current_tx_idx.serialize_to_vec())?; + } + Ok(()) } fn mint_token( @@ -182,21 +217,13 @@ where .unwrap_or_default(); minted_bal.receive(&amount.amount); - self.write( - &target_key, - target_bal.try_to_vec().expect("encoding shouldn't failed"), - )?; - self.write( - &minted_key, - minted_bal.try_to_vec().expect("encoding shouldn't failed"), - )?; + self.write(&target_key, target_bal.serialize_to_vec())?; + self.write(&minted_key, minted_bal.serialize_to_vec())?; let minter_key = token::minter_key(token); self.write( &minter_key, - Address::Internal(InternalAddress::Ibc) - .try_to_vec() - .expect("encoding shouldn't failed"), + Address::Internal(InternalAddress::Ibc).serialize_to_vec(), ) } @@ -222,14 +249,8 @@ where .unwrap_or_default(); minted_bal.spend(&amount.amount); - self.write( - &target_key, - target_bal.try_to_vec().expect("encoding shouldn't failed"), - )?; - self.write( - &minted_key, - minted_bal.try_to_vec().expect("encoding shouldn't failed"), - ) + self.write(&target_key, target_bal.serialize_to_vec())?; + self.write(&minted_key, minted_bal.serialize_to_vec()) } /// Get the current height of this chain @@ -327,10 +348,10 @@ where unimplemented!("Validation doesn't emit an event") } - fn get_ibc_event( + fn get_ibc_events( &self, _event_type: impl AsRef, - ) -> Result, Self::Error> { + ) -> Result, Self::Error> { unimplemented!("Validation doesn't get an event") } @@ -344,6 +365,13 @@ where unimplemented!("Validation doesn't transfer") } + fn handle_masp_tx( + &mut self, + _shielded: &IbcShieldedTransfer, + ) -> Result<(), Self::Error> { + unimplemented!("Validation doesn't handle a masp tx") + } + fn mint_token( &mut self, _target: &Address, diff --git a/shared/src/ledger/native_vp/ibc/mod.rs b/shared/src/ledger/native_vp/ibc/mod.rs index 3b6521905b..b0901f6fec 100644 --- a/shared/src/ledger/native_vp/ibc/mod.rs +++ b/shared/src/ledger/native_vp/ibc/mod.rs @@ -169,7 +169,7 @@ where fn validate_denom(&self, keys_changed: &BTreeSet) -> VpResult<()> { for key in keys_changed { - if let Some(hash) = is_ibc_denom_key(key) { + if let Some((_, hash)) = is_ibc_denom_key(key) { match self.ctx.read_post::(key).map_err(|e| { Error::Denom(format!( "Getting the denom failed: Key {}, Error {}", @@ -257,6 +257,9 @@ pub fn get_dummy_genesis_validator() let consensus_sk = common_sk_from_simple_seed(0); let consensus_key = consensus_sk.to_public(); + let protocol_sk = common_sk_from_simple_seed(1); + let protocol_key = protocol_sk.to_public(); + let commission_rate = Dec::new(1, 1).expect("expected 0.1 to be a valid decimal"); let max_commission_rate_change = @@ -278,6 +281,7 @@ pub fn get_dummy_genesis_validator() address, tokens, consensus_key, + protocol_key, eth_cold_key, eth_hot_key, commission_rate, @@ -291,8 +295,9 @@ mod tests { use std::convert::TryFrom; use std::str::FromStr; - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use namada_core::ledger::gas::TxGasMeter; + use namada_core::ledger::governance::parameters::GovernanceParameters; use prost::Message; use sha2::Digest; @@ -409,6 +414,8 @@ mod tests { // initialize the storage ibc::init_genesis_storage(&mut wl_storage); + let gov_params = GovernanceParameters::default(); + gov_params.init_storage(&mut wl_storage).unwrap(); pos::init_genesis_storage( &mut wl_storage, &PosParams::default(), @@ -423,7 +430,7 @@ mod tests { }; wl_storage .write_log - .write(&epoch_duration_key, epoch_duration.try_to_vec().unwrap()) + .write(&epoch_duration_key, epoch_duration.serialize_to_vec()) .expect("write failed"); // max_expected_time_per_block let time = DurationSecs::from(Duration::new(60, 0)); @@ -724,7 +731,7 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], + vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -1004,7 +1011,7 @@ mod tests { // client connection list let client_conn_key = client_connections_key(&msg.client_id_on_a); let conn_list = conn_id.to_string(); - let bytes = conn_list.try_to_vec().expect("encoding failed"); + let bytes = conn_list.serialize_to_vec(); wl_storage .write_log .write(&client_conn_key, bytes) @@ -1037,7 +1044,7 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], + vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -1115,7 +1122,7 @@ mod tests { // client connection list let client_conn_key = client_connections_key(&msg.client_id_on_a); let conn_list = conn_id.to_string(); - let bytes = conn_list.try_to_vec().expect("encoding failed"); + let bytes = conn_list.serialize_to_vec(); wl_storage .write_log .write(&client_conn_key, bytes) @@ -1230,7 +1237,7 @@ mod tests { // client connection list let client_conn_key = client_connections_key(&msg.client_id_on_b); let conn_list = conn_id.to_string(); - let bytes = conn_list.try_to_vec().expect("encoding failed"); + let bytes = conn_list.serialize_to_vec(); wl_storage .write_log .write(&client_conn_key, bytes) @@ -1371,7 +1378,7 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], + vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -1459,7 +1466,7 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], + vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -1584,7 +1591,7 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], + vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -1708,7 +1715,7 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], + vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -1817,7 +1824,7 @@ mod tests { outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( - vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], + vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, ))); @@ -1981,7 +1988,7 @@ mod tests { let amount = Amount::native_whole(100); wl_storage .write_log - .write(&balance_key, amount.try_to_vec().unwrap()) + .write(&balance_key, amount.serialize_to_vec()) .expect("write failed"); wl_storage.write_log.commit_tx(); wl_storage.commit_block().expect("commit failed"); @@ -2191,8 +2198,15 @@ mod tests { packet.chan_id_on_b.clone(), )); let trace_hash = calc_hash(coin.denom.to_string()); - let denom_key = ibc_denom_key(&trace_hash); - let bytes = coin.denom.to_string().try_to_vec().unwrap(); + let denom_key = ibc_denom_key(receiver.to_string(), &trace_hash); + let bytes = coin.denom.to_string().serialize_to_vec(); + wl_storage + .write_log + .write(&denom_key, bytes) + .expect("write failed"); + keys_changed.insert(denom_key); + let denom_key = ibc_denom_key(nam().to_string(), &trace_hash); + let bytes = coin.denom.to_string().serialize_to_vec(); wl_storage .write_log .write(&denom_key, bytes) @@ -2455,7 +2469,7 @@ mod tests { let amount = Amount::native_whole(100); wl_storage .write_log - .write(&balance_key, amount.try_to_vec().unwrap()) + .write(&balance_key, amount.serialize_to_vec()) .expect("write failed"); // commitment let transfer_msg = MsgTransfer { @@ -2606,7 +2620,7 @@ mod tests { let amount = Amount::native_whole(100); wl_storage .write_log - .write(&balance_key, amount.try_to_vec().unwrap()) + .write(&balance_key, amount.serialize_to_vec()) .expect("write failed"); // commitment let sender = established_address_1(); diff --git a/shared/src/ledger/native_vp/mod.rs b/shared/src/ledger/native_vp/mod.rs index 31148a1568..39a6b65bb5 100644 --- a/shared/src/ledger/native_vp/mod.rs +++ b/shared/src/ledger/native_vp/mod.rs @@ -5,7 +5,6 @@ pub mod ethereum_bridge; pub mod ibc; pub mod multitoken; pub mod parameters; -pub mod replay_protection; use std::cell::RefCell; use std::collections::BTreeSet; @@ -23,6 +22,7 @@ use crate::ledger::storage::{Storage, StorageHasher}; use crate::proto::Tx; use crate::types::address::Address; use crate::types::hash::Hash; +use crate::types::ibc::IbcEvent; use crate::types::storage::{ BlockHash, BlockHeight, Epoch, Header, Key, TxIndex, }; @@ -449,6 +449,18 @@ where .into_storage_result() } + fn get_ibc_events( + &self, + event_type: String, + ) -> Result, storage_api::Error> { + vp_host_fns::get_ibc_events( + &mut self.gas_meter.borrow_mut(), + self.write_log, + event_type, + ) + .into_storage_result() + } + fn iter_prefix<'iter>( &'iter self, prefix: &Key, diff --git a/shared/src/ledger/native_vp/multitoken.rs b/shared/src/ledger/native_vp/multitoken.rs index 564024fd8f..d3782c9548 100644 --- a/shared/src/ledger/native_vp/multitoken.rs +++ b/shared/src/ledger/native_vp/multitoken.rs @@ -139,7 +139,7 @@ where mod tests { use std::collections::BTreeSet; - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use namada_core::ledger::gas::TxGasMeter; use super::*; @@ -187,14 +187,14 @@ mod tests { let amount = Amount::native_whole(100); wl_storage .storage - .write(&sender_key, amount.try_to_vec().unwrap()) + .write(&sender_key, amount.serialize_to_vec()) .expect("write failed"); // transfer 10 let amount = Amount::native_whole(90); wl_storage .write_log - .write(&sender_key, amount.try_to_vec().unwrap()) + .write(&sender_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(sender_key); let receiver = established_address_2(); @@ -202,7 +202,7 @@ mod tests { let amount = Amount::native_whole(10); wl_storage .write_log - .write(&receiver_key, amount.try_to_vec().unwrap()) + .write(&receiver_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(receiver_key); @@ -243,14 +243,14 @@ mod tests { let amount = Amount::native_whole(100); wl_storage .storage - .write(&sender_key, amount.try_to_vec().unwrap()) + .write(&sender_key, amount.serialize_to_vec()) .expect("write failed"); // transfer 10 let amount = Amount::native_whole(90); wl_storage .write_log - .write(&sender_key, amount.try_to_vec().unwrap()) + .write(&sender_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(sender_key); let receiver = established_address_2(); @@ -259,7 +259,7 @@ mod tests { let amount = Amount::native_whole(100); wl_storage .write_log - .write(&receiver_key, amount.try_to_vec().unwrap()) + .write(&receiver_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(receiver_key); @@ -303,14 +303,14 @@ mod tests { let amount = Amount::native_whole(100); wl_storage .write_log - .write(&target_key, amount.try_to_vec().unwrap()) + .write(&target_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(target_key); let minted_key = minted_balance_key(&token); let amount = Amount::native_whole(100); wl_storage .write_log - .write(&minted_key, amount.try_to_vec().unwrap()) + .write(&minted_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minted_key); @@ -319,7 +319,7 @@ mod tests { let minter_key = minter_key(&token); wl_storage .write_log - .write(&minter_key, minter.try_to_vec().unwrap()) + .write(&minter_key, minter.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minter_key); @@ -363,14 +363,14 @@ mod tests { let amount = Amount::native_whole(1000); wl_storage .write_log - .write(&target_key, amount.try_to_vec().unwrap()) + .write(&target_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(target_key); let minted_key = minted_balance_key(&nam()); let amount = Amount::native_whole(100); wl_storage .write_log - .write(&minted_key, amount.try_to_vec().unwrap()) + .write(&minted_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minted_key); @@ -379,7 +379,7 @@ mod tests { let minter_key = minter_key(&nam()); wl_storage .write_log - .write(&minter_key, minter.try_to_vec().unwrap()) + .write(&minter_key, minter.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minter_key); @@ -425,14 +425,14 @@ mod tests { let amount = Amount::native_whole(100); wl_storage .write_log - .write(&target_key, amount.try_to_vec().unwrap()) + .write(&target_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(target_key); let minted_key = minted_balance_key(&token); let amount = Amount::native_whole(100); wl_storage .write_log - .write(&minted_key, amount.try_to_vec().unwrap()) + .write(&minted_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minted_key); @@ -478,14 +478,14 @@ mod tests { let amount = Amount::native_whole(100); wl_storage .write_log - .write(&target_key, amount.try_to_vec().unwrap()) + .write(&target_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(target_key); let minted_key = minted_balance_key(&token); let amount = Amount::native_whole(100); wl_storage .write_log - .write(&minted_key, amount.try_to_vec().unwrap()) + .write(&minted_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minted_key); @@ -494,7 +494,7 @@ mod tests { let minter_key = minter_key(&token); wl_storage .write_log - .write(&minter_key, minter.try_to_vec().unwrap()) + .write(&minter_key, minter.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minter_key); @@ -535,7 +535,7 @@ mod tests { let minter = established_address_1(); wl_storage .write_log - .write(&minter_key, minter.try_to_vec().unwrap()) + .write(&minter_key, minter.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minter_key); @@ -580,7 +580,7 @@ mod tests { .unwrap(); wl_storage .write_log - .write(&key, 0.try_to_vec().unwrap()) + .write(&key, 0.serialize_to_vec()) .expect("write failed"); keys_changed.insert(key); diff --git a/shared/src/ledger/native_vp/replay_protection.rs b/shared/src/ledger/native_vp/replay_protection.rs deleted file mode 100644 index a2a2a66f36..0000000000 --- a/shared/src/ledger/native_vp/replay_protection.rs +++ /dev/null @@ -1,53 +0,0 @@ -//! Native VP for replay protection - -use std::collections::BTreeSet; - -use namada_core::ledger::storage; -use namada_core::types::address::Address; -use namada_core::types::storage::Key; -use thiserror::Error; - -use crate::ledger::native_vp::{self, Ctx, NativeVp}; -use crate::proto::Tx; -use crate::vm::WasmCacheAccess; - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum Error { - #[error("Native VP error: {0}")] - NativeVpError(#[from] native_vp::Error), -} - -/// ReplayProtection functions result -pub type Result = std::result::Result; - -/// Replay Protection VP -pub struct ReplayProtectionVp<'a, DB, H, CA> -where - DB: storage::DB + for<'iter> storage::DBIter<'iter>, - H: storage::StorageHasher, - CA: WasmCacheAccess, -{ - /// Context to interact with the host structures. - pub ctx: Ctx<'a, DB, H, CA>, -} - -impl<'a, DB, H, CA> NativeVp for ReplayProtectionVp<'a, DB, H, CA> -where - DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, - H: 'static + storage::StorageHasher, - CA: 'static + WasmCacheAccess, -{ - type Error = Error; - - fn validate_tx( - &self, - _tx_data: &Tx, - _keys_changed: &BTreeSet, - _verifiers: &BTreeSet
, - ) -> Result { - // VP should prevent any modification of the subspace. - // Changes are only allowed from protocol - Ok(false) - } -} diff --git a/shared/src/ledger/pos/mod.rs b/shared/src/ledger/pos/mod.rs index a61a721e4f..d47e5cc884 100644 --- a/shared/src/ledger/pos/mod.rs +++ b/shared/src/ledger/pos/mod.rs @@ -11,7 +11,7 @@ pub use namada_core::types::dec::Dec; pub use namada_core::types::key::common; pub use namada_core::types::token; pub use namada_proof_of_stake; -pub use namada_proof_of_stake::parameters::PosParams; +pub use namada_proof_of_stake::parameters::{OwnedPosParams, PosParams}; pub use namada_proof_of_stake::pos_queries::*; pub use namada_proof_of_stake::storage::*; pub use namada_proof_of_stake::{staking_token_address, types}; @@ -42,7 +42,7 @@ pub fn into_tm_voting_power( /// Initialize storage in the genesis block. pub fn init_genesis_storage( storage: &mut S, - params: &PosParams, + params: &OwnedPosParams, validators: impl Iterator + Clone, current_epoch: Epoch, ) where diff --git a/shared/src/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs index a23b026eea..b8a9902ac5 100644 --- a/shared/src/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -2,12 +2,12 @@ use std::collections::BTreeSet; use std::panic; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use eyre::{eyre, WrapErr}; use masp_primitives::transaction::Transaction; use namada_core::ledger::gas::TxGasMeter; use namada_core::ledger::storage::wl_storage::WriteLogAndStorage; -use namada_core::ledger::storage_api::{StorageRead, StorageWrite}; +use namada_core::ledger::storage_api::StorageRead; use namada_core::proto::Section; use namada_core::types::hash::Hash; use namada_core::types::storage::Key; @@ -24,7 +24,6 @@ use crate::ledger::native_vp::ethereum_bridge::vp::EthBridge; use crate::ledger::native_vp::ibc::Ibc; use crate::ledger::native_vp::multitoken::MultitokenVp; use crate::ledger::native_vp::parameters::{self, ParametersVp}; -use crate::ledger::native_vp::replay_protection::ReplayProtectionVp; use crate::ledger::native_vp::{self, NativeVp}; use crate::ledger::pgf::PgfVp; use crate::ledger::pos::{self, PosVP}; @@ -33,10 +32,10 @@ use crate::ledger::storage::{DBIter, Storage, StorageHasher, WlStorage, DB}; use crate::ledger::{replay_protection, storage_api}; use crate::proto::{self, Tx}; use crate::types::address::{Address, InternalAddress}; +use crate::types::storage; use crate::types::storage::TxIndex; use crate::types::transaction::protocol::{EthereumTxData, ProtocolTxType}; use crate::types::transaction::{DecryptedTx, TxResult, TxType, VpsResult}; -use crate::types::{hash, storage}; use crate::vm::wasm::{TxCache, VpCache}; use crate::vm::{self, wasm, WasmCacheAccess}; @@ -83,10 +82,6 @@ pub enum Error { EthBridgeNativeVpError(native_vp::ethereum_bridge::vp::Error), #[error("Ethereum bridge pool native VP error: {0}")] BridgePoolNativeVpError(native_vp::ethereum_bridge::bridge_pool_vp::Error), - #[error("Replay protection native VP error: {0}")] - ReplayProtectionNativeVpError( - crate::ledger::native_vp::replay_protection::Error, - ), #[error("Non usable tokens native VP error: {0}")] NutNativeVpError(native_vp::ethereum_bridge::nut::Error), #[error("Access to an internal address {0} is forbidden")] @@ -169,9 +164,12 @@ where apply_protocol_tx(protocol_tx.tx, tx.data(), wl_storage) } TxType::Wrapper(ref wrapper) => { + let fee_unshielding_transaction = + get_fee_unshielding_transaction(&tx, wrapper); let changed_keys = apply_wrapper_tx( + tx, wrapper, - get_fee_unshielding_transaction(&tx, wrapper), + fee_unshielding_transaction, tx_bytes, ShellParams { tx_gas_meter, @@ -212,12 +210,14 @@ where } /// Performs the required operation on a wrapper transaction: -/// - replay protection /// - fee payment /// - gas accounting +/// - replay protection /// -/// Returns the set of changed storage keys. +/// Returns the set of changed storage keys. The caller should write the hash of +/// the wrapper header to storage in case of failure. pub(crate) fn apply_wrapper_tx<'a, D, H, CA, WLS>( + tx: Tx, wrapper: &WrapperTx, fee_unshield_transaction: Option, tx_bytes: &[u8], @@ -231,18 +231,6 @@ where WLS: WriteLogAndStorage, { let mut changed_keys = BTreeSet::default(); - let mut tx: Tx = tx_bytes.try_into().unwrap(); - - // Writes wrapper tx hash to block write log (changes must be persisted even - // in case of failure) - let wrapper_hash_key = replay_protection::get_replay_protection_key( - &hash::Hash(tx.header_hash().0), - ); - shell_params - .wl_storage - .write(&wrapper_hash_key, ()) - .expect("Error while writing tx hash to storage"); - changed_keys.insert(wrapper_hash_key); // Charge fee before performing any fallible operations charge_fee( @@ -257,14 +245,13 @@ where shell_params.tx_gas_meter.add_tx_size_gas(tx_bytes)?; // If wrapper was succesful, write inner tx hash to storage - let inner_hash_key = replay_protection::get_replay_protection_key( - &hash::Hash(tx.update_header(TxType::Raw).header_hash().0), - ); shell_params .wl_storage - .write(&inner_hash_key, ()) + .write_tx_hash(tx.raw_header_hash()) .expect("Error while writing tx hash to storage"); - changed_keys.insert(inner_hash_key); + changed_keys.insert(replay_protection::get_replay_protection_last_key( + &tx.raw_header_hash(), + )); Ok(changed_keys) } @@ -510,12 +497,12 @@ where Some(new_dest_balance) => { wl_storage .write_log_mut() - .write(&src_key, new_src_balance.try_to_vec().unwrap()) + .write(&src_key, new_src_balance.serialize_to_vec()) .map_err(|e| Error::FeeError(e.to_string()))?; - match wl_storage.write_log_mut().write( - &dest_key, - new_dest_balance.try_to_vec().unwrap(), - ) { + match wl_storage + .write_log_mut() + .write(&dest_key, new_dest_balance.serialize_to_vec()) + { Ok(_) => Ok(()), Err(e) => Err(Error::FeeError(e.to_string())), } @@ -934,16 +921,6 @@ where gas_meter = bridge_pool.ctx.gas_meter.into_inner(); result } - InternalAddress::ReplayProtection => { - let replay_protection_vp = - ReplayProtectionVp { ctx }; - let result = replay_protection_vp - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::ReplayProtectionNativeVpError); - gas_meter = - replay_protection_vp.ctx.gas_meter.into_inner(); - result - } InternalAddress::Pgf => { let pgf_vp = PgfVp { ctx }; let result = pgf_vp diff --git a/shared/src/ledger/queries/mod.rs b/shared/src/ledger/queries/mod.rs deleted file mode 100644 index e78313d804..0000000000 --- a/shared/src/ledger/queries/mod.rs +++ /dev/null @@ -1,222 +0,0 @@ -//! Ledger read-only queries can be handled and dispatched via the [`RPC`] -//! defined via `router!` macro. - -// Re-export to show in rustdoc! -pub use shell::Shell; -use shell::SHELL; -pub use types::{ - EncodedResponseQuery, Error, RequestCtx, RequestQuery, ResponseQuery, - Router, -}; -use vp::{Vp, VP}; - -pub use self::shell::eth_bridge::{ - Erc20FlowControl, GenBridgePoolProofReq, GenBridgePoolProofRsp, - TransferToErcArgs, -}; -use super::storage::traits::StorageHasher; -use super::storage::{DBIter, DB}; -use super::storage_api; -#[cfg(any(test, feature = "async-client"))] -pub use crate::sdk::queries::Client; -use crate::types::storage::BlockHeight; - -#[macro_use] -mod router; -mod shell; -mod types; -pub mod vp; - -// Most commonly expected patterns should be declared first -router! {RPC, - // Shell provides storage read access, block metadata and can dry-run a tx - ( "shell" ) = (sub SHELL), - - // Validity-predicate's specific storage queries - ( "vp" ) = (sub VP), -} - -/// Handle RPC query request in the ledger. On success, returns response with -/// borsh-encoded data. -pub fn handle_path( - ctx: RequestCtx<'_, D, H>, - request: &RequestQuery, -) -> storage_api::Result -where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, -{ - RPC.handle(ctx, request) -} - -// Handler helpers: - -/// For queries that only support latest height, check that the given height is -/// not different from latest height, otherwise return an error. -pub fn require_latest_height( - ctx: &RequestCtx<'_, D, H>, - request: &RequestQuery, -) -> storage_api::Result<()> -where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, -{ - if request.height != BlockHeight(0) - && request.height != ctx.wl_storage.storage.get_last_block_height() - { - return Err(storage_api::Error::new_const( - "This query doesn't support arbitrary block heights, only the \ - latest committed block height ('0' can be used as a special \ - value that means the latest block height)", - )); - } - Ok(()) -} - -/// For queries that do not support proofs, check that proof is not requested, -/// otherwise return an error. -pub fn require_no_proof(request: &RequestQuery) -> storage_api::Result<()> { - if request.prove { - return Err(storage_api::Error::new_const( - "This query doesn't support proofs", - )); - } - Ok(()) -} - -/// For queries that don't use request data, require that there are no data -/// attached. -pub fn require_no_data(request: &RequestQuery) -> storage_api::Result<()> { - if !request.data.is_empty() { - return Err(storage_api::Error::new_const( - "This query doesn't accept request data", - )); - } - Ok(()) -} - -/// Queries testing helpers -#[cfg(any(test, feature = "testing"))] -mod testing { - - use tempfile::TempDir; - use tendermint_rpc::Response; - - use super::*; - use crate::ledger::events::log::EventLog; - use crate::ledger::storage::testing::TestWlStorage; - use crate::tendermint_rpc::error::Error as RpcError; - use crate::types::storage::BlockHeight; - use crate::vm::wasm::{self, TxCache, VpCache}; - use crate::vm::WasmCacheRoAccess; - - /// A test client that has direct access to the storage - pub struct TestClient - where - RPC: Router, - { - /// RPC router - pub rpc: RPC, - /// storage - pub wl_storage: TestWlStorage, - /// event log - pub event_log: EventLog, - /// VP wasm compilation cache - pub vp_wasm_cache: VpCache, - /// tx wasm compilation cache - pub tx_wasm_cache: TxCache, - /// VP wasm compilation cache directory - pub vp_cache_dir: TempDir, - /// tx wasm compilation cache directory - pub tx_cache_dir: TempDir, - } - - impl TestClient - where - RPC: Router, - { - #[allow(dead_code)] - /// Initialize a test client for the given root RPC router - pub fn new(rpc: RPC) -> Self { - // Initialize the `TestClient` - let mut wl_storage = TestWlStorage::default(); - - // Initialize mock gas limit - let max_block_gas_key = - namada_core::ledger::parameters::storage::get_max_block_gas_key( - ); - wl_storage - .storage - .write( - &max_block_gas_key, - namada_core::ledger::storage::types::encode( - &20_000_000_u64, - ), - ) - .expect( - "Max block gas parameter must be initialized in storage", - ); - let event_log = EventLog::default(); - let (vp_wasm_cache, vp_cache_dir) = - wasm::compilation_cache::common::testing::cache(); - let (tx_wasm_cache, tx_cache_dir) = - wasm::compilation_cache::common::testing::cache(); - Self { - rpc, - wl_storage, - event_log, - vp_wasm_cache: vp_wasm_cache.read_only(), - tx_wasm_cache: tx_wasm_cache.read_only(), - vp_cache_dir, - tx_cache_dir, - } - } - } - - #[cfg_attr(feature = "async-send", async_trait::async_trait)] - #[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] - impl Client for TestClient - where - RPC: Router + Sync, - { - type Error = std::io::Error; - - async fn request( - &self, - path: String, - data: Option>, - height: Option, - prove: bool, - ) -> Result { - let data = data.unwrap_or_default(); - let height = height.unwrap_or_default(); - // Handle a path by invoking the `RPC.handle` directly with the - // borrowed storage - let request = RequestQuery { - data, - path, - height, - prove, - }; - let ctx = RequestCtx { - wl_storage: &self.wl_storage, - event_log: &self.event_log, - vp_wasm_cache: self.vp_wasm_cache.clone(), - tx_wasm_cache: self.tx_wasm_cache.clone(), - storage_read_past_height_limit: None, - }; - // TODO: this is a hack to propagate errors to the caller, we should - // really permit error types other than [`std::io::Error`] - self.rpc.handle(ctx, &request).map_err(|err| { - std::io::Error::new(std::io::ErrorKind::Other, err.to_string()) - }) - } - - async fn perform(&self, _request: R) -> Result - where - R: tendermint_rpc::SimpleRequest, - { - Response::from_string("TODO") - } - } -} diff --git a/shared/src/ledger/vp_host_fns.rs b/shared/src/ledger/vp_host_fns.rs index a9aaa7eb16..ebe32d4f72 100644 --- a/shared/src/ledger/vp_host_fns.rs +++ b/shared/src/ledger/vp_host_fns.rs @@ -15,6 +15,7 @@ use crate::ledger::gas::{GasMetering, VpGasMeter}; use crate::ledger::storage::write_log::WriteLog; use crate::ledger::storage::{self, write_log, Storage, StorageHasher}; use crate::proto::{Section, Tx}; +use crate::types::ibc::IbcEvent; /// These runtime errors will abort VP execution immediately #[allow(missing_docs)] @@ -333,6 +334,20 @@ where Ok(storage.native_token.clone()) } +/// Getting the IBC event. +pub fn get_ibc_events( + _gas_meter: &mut VpGasMeter, + write_log: &WriteLog, + event_type: String, +) -> EnvResult> { + Ok(write_log + .get_ibc_events() + .iter() + .filter(|event| event.event_type == event_type) + .cloned() + .collect()) +} + /// Storage prefix iterator for prior state (before tx execution), ordered by /// storage keys. It will try to get an iterator from the storage. pub fn iter_prefix_pre<'a, DB, H>( diff --git a/shared/src/lib.rs b/shared/src/lib.rs index 3036c4cb47..d0d1ea8b2b 100644 --- a/shared/src/lib.rs +++ b/shared/src/lib.rs @@ -24,7 +24,7 @@ pub use { }; pub mod ledger; pub use namada_core::proto; -pub mod sdk; +pub use namada_sdk; pub mod types; pub mod vm; diff --git a/shared/src/sdk/args.rs b/shared/src/sdk/args.rs index b765dece5a..e69de29bb2 100644 --- a/shared/src/sdk/args.rs +++ b/shared/src/sdk/args.rs @@ -1,887 +0,0 @@ -//! Structures encapsulating SDK arguments - -use std::collections::HashMap; -use std::path::PathBuf; -use std::time::Duration as StdDuration; - -use namada_core::types::chain::ChainId; -use namada_core::types::dec::Dec; -use namada_core::types::ethereum_events::EthAddress; -use namada_core::types::time::DateTimeUtc; -use serde::{Deserialize, Serialize}; -use zeroize::Zeroizing; - -use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::types::address::Address; -use crate::types::keccak::KeccakHash; -use crate::types::key::{common, SchemeType}; -use crate::types::masp::MaspValue; -use crate::types::storage::Epoch; -use crate::types::transaction::GasLimit; -use crate::types::{storage, token}; - -/// [`Duration`](StdDuration) wrapper that provides a -/// method to parse a value from a string. -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] -#[repr(transparent)] -pub struct Duration(pub StdDuration); - -impl ::std::str::FromStr for Duration { - type Err = ::parse_duration::parse::Error; - - #[inline] - fn from_str(s: &str) -> Result { - ::parse_duration::parse(s).map(Duration) - } -} - -/// Abstraction of types being used in Namada -pub trait NamadaTypes: Clone + std::fmt::Debug { - /// Represents an address on the ledger - type Address: Clone + std::fmt::Debug; - /// Represents the address of a native token - type NativeAddress: Clone + std::fmt::Debug; - /// Represents a key pair - type Keypair: Clone + std::fmt::Debug; - /// Represents the address of a Tendermint endpoint - type TendermintAddress: Clone + std::fmt::Debug; - /// Represents the address of an Ethereum endpoint - type EthereumAddress: Clone + std::fmt::Debug; - /// Represents a viewing key - type ViewingKey: Clone + std::fmt::Debug; - /// Represents the owner of a balance - type BalanceOwner: Clone + std::fmt::Debug; - /// Represents a public key - type PublicKey: Clone + std::fmt::Debug; - /// Represents the source of a Transfer - type TransferSource: Clone + std::fmt::Debug; - /// Represents the target of a Transfer - type TransferTarget: Clone + std::fmt::Debug; - /// Represents some data that is used in a transaction - type Data: Clone + std::fmt::Debug; - /// Bridge pool recommendations conversion rates table. - type BpConversionTable: Clone + std::fmt::Debug; -} - -/// The concrete types being used in Namada SDK -#[derive(Clone, Debug)] -pub struct SdkTypes; - -/// An entry in the Bridge pool recommendations conversion -/// rates table. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct BpConversionTableEntry { - /// An alias for the token, or the string representation - /// of its address if none is available. - pub alias: String, - /// Conversion rate from the given token to gwei. - pub conversion_rate: f64, -} - -impl NamadaTypes for SdkTypes { - type Address = Address; - type BalanceOwner = namada_core::types::masp::BalanceOwner; - type BpConversionTable = HashMap; - type Data = Vec; - type EthereumAddress = (); - type Keypair = namada_core::types::key::common::SecretKey; - type NativeAddress = Address; - type PublicKey = namada_core::types::key::common::PublicKey; - type TendermintAddress = (); - type TransferSource = namada_core::types::masp::TransferSource; - type TransferTarget = namada_core::types::masp::TransferTarget; - type ViewingKey = namada_core::types::masp::ExtendedViewingKey; -} - -/// Common query arguments -#[derive(Clone, Debug)] -pub struct Query { - /// The address of the ledger node as host:port - pub ledger_address: C::TendermintAddress, -} - -/// Transaction associated results arguments -#[derive(Clone, Debug)] -pub struct QueryResult { - /// Common query args - pub query: Query, - /// Hash of transaction to lookup - pub tx_hash: String, -} - -/// Custom transaction arguments -#[derive(Clone, Debug)] -pub struct TxCustom { - /// Common tx arguments - pub tx: Tx, - /// Path to the tx WASM code file - pub code_path: Option, - /// Path to the data file - pub data_path: Option, - /// Path to the serialized transaction - pub serialized_tx: Option, - /// The address that correspond to the signatures/signing-keys - pub owner: C::Address, -} - -/// Transfer transaction arguments -#[derive(Clone, Debug)] -pub struct TxTransfer { - /// Common tx arguments - pub tx: Tx, - /// Transfer source address - pub source: C::TransferSource, - /// Transfer target address - pub target: C::TransferTarget, - /// Transferred token address - pub token: C::Address, - /// Transferred token amount - pub amount: InputAmount, - /// Native token address - pub native_token: C::NativeAddress, - /// Path to the TX WASM code file - pub tx_code_path: PathBuf, -} -/// An amount read in by the cli -#[derive(Copy, Clone, Debug)] -pub enum InputAmount { - /// An amount whose representation has been validated - /// against the allowed representation in storage - Validated(token::DenominatedAmount), - /// The parsed amount read in from the cli. It has - /// not yet been validated against the allowed - /// representation in storage. - Unvalidated(token::DenominatedAmount), -} - -/// IBC transfer transaction arguments -#[derive(Clone, Debug)] -pub struct TxIbcTransfer { - /// Common tx arguments - pub tx: Tx, - /// Transfer source address - pub source: C::Address, - /// Transfer target address - pub receiver: String, - /// Transferred token addres s - pub token: C::Address, - /// Transferred token amount - pub amount: InputAmount, - /// Port ID - pub port_id: PortId, - /// Channel ID - pub channel_id: ChannelId, - /// Timeout height of the destination chain - pub timeout_height: Option, - /// Timeout timestamp offset - pub timeout_sec_offset: Option, - /// Memo - pub memo: Option, - /// Path to the TX WASM code file - pub tx_code_path: PathBuf, -} - -/// Transaction to initialize create a new proposal -#[derive(Clone, Debug)] -pub struct InitProposal { - /// Common tx arguments - pub tx: Tx, - /// The proposal data - pub proposal_data: C::Data, - /// Native token address - pub native_token: C::NativeAddress, - /// Flag if proposal should be run offline - pub is_offline: bool, - /// Flag if proposal is of type Pgf stewards - pub is_pgf_stewards: bool, - /// Flag if proposal is of type Pgf funding - pub is_pgf_funding: bool, - /// Path to the tx WASM file - pub tx_code_path: PathBuf, -} - -/// Transaction to vote on a proposal -#[derive(Clone, Debug)] -pub struct VoteProposal { - /// Common tx arguments - pub tx: Tx, - /// Proposal id - pub proposal_id: Option, - /// The vote - pub vote: String, - /// The address of the voter - pub voter: C::Address, - /// Flag if proposal vote should be run offline - pub is_offline: bool, - /// The proposal file path - pub proposal_data: Option, - /// Path to the TX WASM code file - pub tx_code_path: PathBuf, -} - -/// Transaction to initialize a new account -#[derive(Clone, Debug)] -pub struct TxInitAccount { - /// Common tx arguments - pub tx: Tx, - /// Path to the VP WASM code file for the new account - pub vp_code_path: PathBuf, - /// Path to the TX WASM code file - pub tx_code_path: PathBuf, - /// Public key for the new account - pub public_keys: Vec, - /// The account multisignature threshold - pub threshold: Option, -} - -/// Transaction to initialize a new account -#[derive(Clone, Debug)] -pub struct TxInitValidator { - /// Common tx arguments - pub tx: Tx, - /// Signature scheme - pub scheme: SchemeType, - /// Account keys - pub account_keys: Vec, - /// The account multisignature threshold - pub threshold: Option, - /// Consensus key - pub consensus_key: Option, - /// Ethereum cold key - pub eth_cold_key: Option, - /// Ethereum hot key - pub eth_hot_key: Option, - /// Protocol key - pub protocol_key: Option, - /// Commission rate - pub commission_rate: Dec, - /// Maximum commission rate change - pub max_commission_rate_change: Dec, - /// Path to the VP WASM code file - pub validator_vp_code_path: PathBuf, - /// Path to the TX WASM code file - pub tx_code_path: PathBuf, - /// Don't encrypt the keypair - pub unsafe_dont_encrypt: bool, -} - -/// Transaction to update a VP arguments -#[derive(Clone, Debug)] -pub struct TxUpdateAccount { - /// Common tx arguments - pub tx: Tx, - /// Path to the VP WASM code file - pub vp_code_path: Option, - /// Path to the TX WASM code file - pub tx_code_path: PathBuf, - /// Address of the account whose VP is to be updated - pub addr: C::Address, - /// Public keys - pub public_keys: Vec, - /// The account threshold - pub threshold: Option, -} - -/// Bond arguments -#[derive(Clone, Debug)] -pub struct Bond { - /// Common tx arguments - pub tx: Tx, - /// Validator address - pub validator: C::Address, - /// Amount of tokens to stake in a bond - pub amount: token::Amount, - /// Source address for delegations. For self-bonds, the validator is - /// also the source. - pub source: Option, - /// Native token address - pub native_token: C::NativeAddress, - /// Path to the TX WASM code file - pub tx_code_path: PathBuf, -} - -/// Unbond arguments -#[derive(Clone, Debug)] -pub struct Unbond { - /// Common tx arguments - pub tx: Tx, - /// Validator address - pub validator: C::Address, - /// Amount of tokens to unbond from a bond - pub amount: token::Amount, - /// Source address for unbonding from delegations. For unbonding from - /// self-bonds, the validator is also the source - pub source: Option, - /// Path to the TX WASM code file - pub tx_code_path: PathBuf, -} - -/// Reveal public key -#[derive(Clone, Debug)] -pub struct RevealPk { - /// Common tx arguments - pub tx: Tx, - /// A public key to be revealed on-chain - pub public_key: C::PublicKey, -} - -/// Query proposal -#[derive(Clone, Debug)] -pub struct QueryProposal { - /// Common query args - pub query: Query, - /// Proposal id - pub proposal_id: Option, -} - -/// Query protocol parameters -#[derive(Clone, Debug)] -pub struct QueryProtocolParameters { - /// Common query args - pub query: Query, -} - -/// Query pgf data -#[derive(Clone, Debug)] -pub struct QueryPgf { - /// Common query args - pub query: Query, -} - -/// Withdraw arguments -#[derive(Clone, Debug)] -pub struct Withdraw { - /// Common tx arguments - pub tx: Tx, - /// Validator address - pub validator: C::Address, - /// Source address for withdrawing from delegations. For withdrawing - /// from self-bonds, the validator is also the source - pub source: Option, - /// Path to the TX WASM code file - pub tx_code_path: PathBuf, -} - -/// Query asset conversions -#[derive(Clone, Debug)] -pub struct QueryConversions { - /// Common query args - pub query: Query, - /// Address of a token - pub token: Option, - /// Epoch of the asset - pub epoch: Option, -} - -/// Query token balance(s) -#[derive(Clone, Debug)] -pub struct QueryAccount { - /// Common query args - pub query: Query, - /// Address of an owner - pub owner: C::Address, -} - -/// Query token balance(s) -#[derive(Clone, Debug)] -pub struct QueryBalance { - /// Common query args - pub query: Query, - /// Address of an owner - pub owner: Option, - /// Address of a token - pub token: Option, - /// Whether not to convert balances - pub no_conversions: bool, -} - -/// Query historical transfer(s) -#[derive(Clone, Debug)] -pub struct QueryTransfers { - /// Common query args - pub query: Query, - /// Address of an owner - pub owner: Option, - /// Address of a token - pub token: Option, -} - -/// Query PoS bond(s) -#[derive(Clone, Debug)] -pub struct QueryBonds { - /// Common query args - pub query: Query, - /// Address of an owner - pub owner: Option, - /// Address of a validator - pub validator: Option, -} - -/// Query PoS bonded stake -#[derive(Clone, Debug)] -pub struct QueryBondedStake { - /// Common query args - pub query: Query, - /// Address of a validator - pub validator: Option, - /// Epoch in which to find bonded stake - pub epoch: Option, -} - -/// Query the state of a validator (its validator set or if it is jailed) -#[derive(Clone, Debug)] -pub struct QueryValidatorState { - /// Common query args - pub query: Query, - /// Address of a validator - pub validator: C::Address, - /// Epoch in which to find the validator state - pub epoch: Option, -} - -#[derive(Clone, Debug)] -/// Commission rate change args -pub struct CommissionRateChange { - /// Common tx arguments - pub tx: Tx, - /// Validator address (should be self) - pub validator: C::Address, - /// Value to which the tx changes the commission rate - pub rate: Dec, - /// Path to the TX WASM code file - pub tx_code_path: PathBuf, -} - -#[derive(Clone, Debug)] -/// Commission rate change args -pub struct UpdateStewardCommission { - /// Common tx arguments - pub tx: Tx, - /// Steward address - pub steward: C::Address, - /// Value to which the tx changes the commission rate - pub commission: C::Data, - /// Path to the TX WASM code file - pub tx_code_path: PathBuf, -} - -#[derive(Clone, Debug)] -/// Commission rate change args -pub struct ResignSteward { - /// Common tx arguments - pub tx: Tx, - /// Validator address - pub steward: C::Address, - /// Path to the TX WASM code file - pub tx_code_path: PathBuf, -} - -#[derive(Clone, Debug)] -/// Re-activate a jailed validator args -pub struct TxUnjailValidator { - /// Common tx arguments - pub tx: Tx, - /// Validator address (should be self) - pub validator: C::Address, - /// Path to the TX WASM code file - pub tx_code_path: PathBuf, -} - -#[derive(Clone, Debug)] -/// Sign a transaction offline -pub struct SignTx { - /// Common tx arguments - pub tx: Tx, - /// Transaction data - pub tx_data: C::Data, - /// The account address - pub owner: C::Address, -} - -/// Query PoS commission rate -#[derive(Clone, Debug)] -pub struct QueryCommissionRate { - /// Common query args - pub query: Query, - /// Address of a validator - pub validator: C::Address, - /// Epoch in which to find commission rate - pub epoch: Option, -} - -/// Query PoS slashes -#[derive(Clone, Debug)] -pub struct QuerySlashes { - /// Common query args - pub query: Query, - /// Address of a validator - pub validator: Option, -} - -/// Query PoS delegations -#[derive(Clone, Debug)] -pub struct QueryDelegations { - /// Common query args - pub query: Query, - /// Address of an owner - pub owner: C::Address, -} - -/// Query PoS to find a validator -#[derive(Clone, Debug)] -pub struct QueryFindValidator { - /// Common query args - pub query: Query, - /// Tendermint address - pub tm_addr: String, -} - -/// Query the raw bytes of given storage key -#[derive(Clone, Debug)] -pub struct QueryRawBytes { - /// The storage key to query - pub storage_key: storage::Key, - /// Common query args - pub query: Query, -} - -/// Common transaction arguments -#[derive(Clone, Debug)] -pub struct Tx { - /// Simulate applying the transaction - pub dry_run: bool, - /// Simulate applying both the wrapper and inner transactions - pub dry_run_wrapper: bool, - /// Dump the transaction bytes to file - pub dump_tx: bool, - /// The output directory path to where serialize the data - pub output_folder: Option, - /// Submit the transaction even if it doesn't pass client checks - pub force: bool, - /// Do not wait for the transaction to be added to the blockchain - pub broadcast_only: bool, - /// The address of the ledger node as host:port - pub ledger_address: C::TendermintAddress, - /// If any new account is initialized by the tx, use the given alias to - /// save it in the wallet. - pub initialized_account_alias: Option, - /// Whether to force overwrite the above alias, if it is provided, in the - /// wallet. - pub wallet_alias_force: bool, - /// The amount being payed (for gas unit) to include the transaction - pub fee_amount: Option, - /// The fee payer signing key - pub wrapper_fee_payer: Option, - /// The token in which the fee is being paid - pub fee_token: C::Address, - /// The optional spending key for fee unshielding - pub fee_unshield: Option, - /// The max amount of gas used to process tx - pub gas_limit: GasLimit, - /// The optional expiration of the transaction - pub expiration: Option, - /// Generate an ephimeral signing key to be used only once to sign a - /// wrapper tx - pub disposable_signing_key: bool, - /// The chain id for which the transaction is intended - pub chain_id: Option, - /// Sign the tx with the key for the given alias from your wallet - pub signing_keys: Vec, - /// List of signatures to attach to the transaction - pub signatures: Vec, - /// Path to the TX WASM code file to reveal PK - pub tx_reveal_code_path: PathBuf, - /// Sign the tx with the public key for the given alias from your wallet - pub verification_key: Option, - /// Password to decrypt key - pub password: Option>, -} - -/// MASP add key or address arguments -#[derive(Clone, Debug)] -pub struct MaspAddrKeyAdd { - /// Key alias - pub alias: String, - /// Whether to force overwrite the alias - pub alias_force: bool, - /// Any MASP value - pub value: MaspValue, - /// Don't encrypt the keypair - pub unsafe_dont_encrypt: bool, -} - -/// MASP generate spending key arguments -#[derive(Clone, Debug)] -pub struct MaspSpendKeyGen { - /// Key alias - pub alias: String, - /// Whether to force overwrite the alias - pub alias_force: bool, - /// Don't encrypt the keypair - pub unsafe_dont_encrypt: bool, -} - -/// MASP generate payment address arguments -#[derive(Clone, Debug)] -pub struct MaspPayAddrGen { - /// Key alias - pub alias: String, - /// Whether to force overwrite the alias - pub alias_force: bool, - /// Viewing key - pub viewing_key: C::ViewingKey, - /// Pin - pub pin: bool, -} - -/// Wallet generate key and implicit address arguments -#[derive(Clone, Debug)] -pub struct KeyAndAddressGen { - /// Scheme type - pub scheme: SchemeType, - /// Key alias - pub alias: Option, - /// Whether to force overwrite the alias, if provided - pub alias_force: bool, - /// Don't encrypt the keypair - pub unsafe_dont_encrypt: bool, - /// BIP44 derivation path - pub derivation_path: Option, -} - -/// Wallet restore key and implicit address arguments -#[derive(Clone, Debug)] -pub struct KeyAndAddressRestore { - /// Scheme type - pub scheme: SchemeType, - /// Key alias - pub alias: Option, - /// Whether to force overwrite the alias, if provided - pub alias_force: bool, - /// Don't encrypt the keypair - pub unsafe_dont_encrypt: bool, - /// BIP44 derivation path - pub derivation_path: Option, -} - -/// Wallet key lookup arguments -#[derive(Clone, Debug)] -pub struct KeyFind { - /// Public key to lookup keypair with - pub public_key: Option, - /// Key alias to lookup keypair with - pub alias: Option, - /// Public key hash to lookup keypair with - pub value: Option, - /// Show secret keys to user - pub unsafe_show_secret: bool, -} - -/// Wallet find shielded address or key arguments -#[derive(Clone, Debug)] -pub struct AddrKeyFind { - /// Address/key alias - pub alias: String, - /// Show secret keys to user - pub unsafe_show_secret: bool, -} - -/// Wallet list shielded keys arguments -#[derive(Clone, Debug)] -pub struct MaspKeysList { - /// Don't decrypt spending keys - pub decrypt: bool, - /// Show secret keys to user - pub unsafe_show_secret: bool, -} - -/// Wallet list keys arguments -#[derive(Clone, Debug)] -pub struct KeyList { - /// Don't decrypt keypairs - pub decrypt: bool, - /// Show secret keys to user - pub unsafe_show_secret: bool, -} - -/// Wallet key export arguments -#[derive(Clone, Debug)] -pub struct KeyExport { - /// Key alias - pub alias: String, -} - -/// Wallet address lookup arguments -#[derive(Clone, Debug)] -pub struct AddressOrAliasFind { - /// Alias to find - pub alias: Option, - /// Address to find - pub address: Option
, -} - -/// Wallet address add arguments -#[derive(Clone, Debug)] -pub struct AddressAdd { - /// Address alias - pub alias: String, - /// Whether to force overwrite the alias - pub alias_force: bool, - /// Address to add - pub address: Address, -} - -/// Bridge pool batch recommendation. -#[derive(Clone, Debug)] -pub struct RecommendBatch { - /// The query parameters. - pub query: Query, - /// The maximum amount of gas to spend. - pub max_gas: Option, - /// An optional parameter indicating how much net - /// gas the relayer is willing to pay. - pub gas: Option, - /// Bridge pool recommendations conversion rates table. - pub conversion_table: C::BpConversionTable, -} - -/// A transfer to be added to the Ethereum bridge pool. -#[derive(Clone, Debug)] -pub struct EthereumBridgePool { - /// Whether the transfer is for a NUT. - /// - /// By default, we add wrapped ERC20s onto the - /// Bridge pool. - pub nut: bool, - /// The args for building a tx to the bridge pool - pub tx: Tx, - /// The type of token - pub asset: EthAddress, - /// The recipient address - pub recipient: EthAddress, - /// The sender of the transfer - pub sender: C::Address, - /// The amount to be transferred - pub amount: InputAmount, - /// The amount of gas fees - pub fee_amount: InputAmount, - /// The account of fee payer. - /// - /// If unset, it is the same as the sender. - pub fee_payer: Option, - /// The token in which the gas is being paid - pub fee_token: C::Address, - /// Path to the tx WASM code file - pub code_path: PathBuf, -} - -/// Bridge pool proof arguments. -#[derive(Debug, Clone)] -pub struct BridgePoolProof { - /// The query parameters. - pub query: Query, - /// The keccak hashes of transfers to - /// acquire a proof of. - pub transfers: Vec, - /// The address of the node responsible for relaying - /// the transfers. - /// - /// This node will receive the gas fees escrowed in - /// the Bridge pool, to compensate the Ethereum relay - /// procedure. - pub relayer: Address, -} - -/// Arguments to an Ethereum Bridge pool relay operation. -#[derive(Debug, Clone)] -pub struct RelayBridgePoolProof { - /// The query parameters. - pub query: Query, - /// The hashes of the transfers to be relayed - pub transfers: Vec, - /// The Namada address for receiving fees for relaying - pub relayer: Address, - /// The number of confirmations to wait for on Ethereum - pub confirmations: u64, - /// The Ethereum RPC endpoint. - pub eth_rpc_endpoint: C::EthereumAddress, - /// The Ethereum gas that can be spent during - /// the relay call. - pub gas: Option, - /// The price of Ethereum gas, during the - /// relay call. - pub gas_price: Option, - /// The address of the Ethereum wallet to pay the gas fees. - /// If unset, the default wallet is used. - pub eth_addr: Option, - /// Synchronize with the network, or exit immediately, - /// if the Ethereum node has fallen behind. - pub sync: bool, - /// Safe mode overrides keyboard interrupt signals, to ensure - /// Ethereum transfers aren't canceled midway through. - pub safe_mode: bool, -} - -/// Bridge validator set arguments. -#[derive(Debug, Clone)] -pub struct BridgeValidatorSet { - /// The query parameters. - pub query: Query, - /// The epoch to query. - pub epoch: Option, -} - -/// Governance validator set arguments. -#[derive(Debug, Clone)] -pub struct GovernanceValidatorSet { - /// The query parameters. - pub query: Query, - /// The epoch to query. - pub epoch: Option, -} - -/// Validator set proof arguments. -#[derive(Debug, Clone)] -pub struct ValidatorSetProof { - /// The query parameters. - pub query: Query, - /// The epoch to query. - pub epoch: Option, -} - -/// Validator set update relayer arguments. -#[derive(Debug, Clone)] -pub struct ValidatorSetUpdateRelay { - /// Run in daemon mode, which will continuously - /// perform validator set updates. - pub daemon: bool, - /// The query parameters. - pub query: Query, - /// The number of block confirmations on Ethereum. - pub confirmations: u64, - /// The Ethereum RPC endpoint. - pub eth_rpc_endpoint: C::EthereumAddress, - /// The epoch of the validator set to relay. - pub epoch: Option, - /// The Ethereum gas that can be spent during - /// the relay call. - pub gas: Option, - /// The price of Ethereum gas, during the - /// relay call. - pub gas_price: Option, - /// The address of the Ethereum wallet to pay the gas fees. - /// If unset, the default wallet is used. - pub eth_addr: Option, - /// Synchronize with the network, or exit immediately, - /// if the Ethereum node has fallen behind. - pub sync: bool, - /// The amount of time to sleep between failed - /// daemon mode relays. - pub retry_dur: Option, - /// The amount of time to sleep between successful - /// daemon mode relays. - pub success_dur: Option, - /// Safe mode overrides keyboard interrupt signals, to ensure - /// Ethereum transfers aren't canceled midway through. - pub safe_mode: bool, -} diff --git a/shared/src/sdk/mod.rs b/shared/src/sdk/mod.rs deleted file mode 100644 index 381bac03d1..0000000000 --- a/shared/src/sdk/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -//! Namada's SDK API -pub mod rpc; - -pub mod args; -pub mod masp; -pub mod signing; -#[allow(clippy::result_large_err)] -pub mod tx; - -pub mod error; -pub mod queries; -pub mod wallet; diff --git a/shared/src/types/mod.rs b/shared/src/types/mod.rs index 83e58f0fa8..04801cf6c5 100644 --- a/shared/src/types/mod.rs +++ b/shared/src/types/mod.rs @@ -1,8 +1,8 @@ //! Types definitions. -pub mod control_flow; +pub use namada_sdk::control_flow; pub mod ibc; -pub mod io; +pub use namada_sdk::io; pub mod key; pub use namada_core::types::{ diff --git a/shared/src/vm/host_env.rs b/shared/src/vm/host_env.rs index 7806d1abef..5f701b7e1f 100644 --- a/shared/src/vm/host_env.rs +++ b/shared/src/vm/host_env.rs @@ -5,6 +5,8 @@ use std::convert::TryInto; use std::num::TryFromIntError; use borsh::{BorshDeserialize, BorshSerialize}; +use borsh_ext::BorshSerializeExt; +use masp_primitives::transaction::Transaction; use namada_core::ledger::gas::{GasMetering, TxGasMeter}; use namada_core::types::internal::KeyVal; use thiserror::Error; @@ -21,11 +23,12 @@ use crate::ledger::vp_host_fns; use crate::proto::Tx; use crate::types::address::{self, Address}; use crate::types::hash::Hash; -use crate::types::ibc::IbcEvent; +use crate::types::ibc::{IbcEvent, IbcShieldedTransfer}; use crate::types::internal::HostEnvResult; -use crate::types::storage::{BlockHeight, Key, TxIndex}; +use crate::types::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; use crate::types::token::{ is_any_minted_balance_key, is_any_minter_key, is_any_token_balance_key, + Transfer, HEAD_TX_KEY, PIN_KEY_PREFIX, TX_KEY_PREFIX, }; use crate::vm::memory::VmMemory; use crate::vm::prefix_iter::{PrefixIteratorId, PrefixIterators}; @@ -727,11 +730,10 @@ where tx_charge_gas(env, iter_gas + log_gas)?; match log_val { Some(write_log::StorageModification::Write { ref value }) => { - let key_val = KeyVal { + let key_val = borsh::to_vec(&KeyVal { key, val: value.clone(), - } - .try_to_vec() + }) .map_err(TxRuntimeError::EncodingError)?; let len: i64 = key_val .len() @@ -750,11 +752,10 @@ where continue; } Some(write_log::StorageModification::Temp { ref value }) => { - let key_val = KeyVal { + let key_val = borsh::to_vec(&KeyVal { key, val: value.clone(), - } - .try_to_vec() + }) .map_err(TxRuntimeError::EncodingError)?; let len: i64 = key_val .len() @@ -765,8 +766,7 @@ where return Ok(len); } None => { - let key_val = KeyVal { key, val } - .try_to_vec() + let key_val = borsh::to_vec(&KeyVal { key, val }) .map_err(TxRuntimeError::EncodingError)?; let len: i64 = key_val .len() @@ -975,7 +975,7 @@ where } /// Getting an IBC event function exposed to the wasm VM Tx environment. -pub fn tx_get_ibc_event( +pub fn tx_get_ibc_events( env: &TxVmEnv, event_type_ptr: u64, event_type_len: u64, @@ -992,20 +992,20 @@ where .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; tx_charge_gas(env, gas)?; let write_log = unsafe { env.ctx.write_log.get() }; - for event in write_log.get_ibc_events() { - if event.event_type == event_type { - let value = - event.try_to_vec().map_err(TxRuntimeError::EncodingError)?; - let len: i64 = value - .len() - .try_into() - .map_err(TxRuntimeError::NumConversionError)?; - let result_buffer = unsafe { env.ctx.result_buffer.get() }; - result_buffer.replace(value); - return Ok(len); - } - } - Ok(HostEnvResult::Fail.to_i64()) + let events: Vec = write_log + .get_ibc_events() + .iter() + .filter(|event| event.event_type == event_type) + .cloned() + .collect(); + let value = events.serialize_to_vec(); + let len: i64 = value + .len() + .try_into() + .map_err(TxRuntimeError::NumConversionError)?; + let result_buffer = unsafe { env.ctx.result_buffer.get() }; + result_buffer.replace(value); + Ok(len) } /// Storage read prior state (before tx execution) function exposed to the wasm @@ -1341,8 +1341,7 @@ where if let Some(iter) = iterators.get_mut(iter_id) { let gas_meter = unsafe { env.ctx.gas_meter.get() }; if let Some((key, val)) = vp_host_fns::iter_next(gas_meter, iter)? { - let key_val = KeyVal { key, val } - .try_to_vec() + let key_val = borsh::to_vec(&KeyVal { key, val }) .map_err(vp_host_fns::RuntimeError::EncodingError)?; let len: i64 = key_val .len() @@ -1452,8 +1451,7 @@ where let code_hash = Hash::try_from(&code_hash[..]) .map_err(|e| TxRuntimeError::InvalidVpCodeHash(e.to_string()))?; let (addr, gas) = write_log.init_account(&storage.address_gen, code_hash); - let addr_bytes = - addr.try_to_vec().map_err(TxRuntimeError::EncodingError)?; + let addr_bytes = addr.serialize_to_vec(); tx_charge_gas(env, gas)?; let gas = env .memory @@ -1616,8 +1614,7 @@ where .map_err(TxRuntimeError::StorageError)?; Ok(match header { Some(h) => { - let value = - h.try_to_vec().map_err(TxRuntimeError::EncodingError)?; + let value = h.serialize_to_vec(); let len: i64 = value .len() .try_into() @@ -1692,9 +1689,7 @@ where vp_host_fns::add_gas(gas_meter, gas)?; Ok(match header { Some(h) => { - let value = h - .try_to_vec() - .map_err(vp_host_fns::RuntimeError::EncodingError)?; + let value = h.serialize_to_vec(); let len: i64 = value .len() .try_into() @@ -1778,6 +1773,38 @@ where Ok(epoch.0) } +/// Getting the IBC event function exposed to the wasm VM VP environment. +pub fn vp_get_ibc_events( + env: &VpVmEnv, + event_type_ptr: u64, + event_type_len: u64, +) -> vp_host_fns::EnvResult +where + MEM: VmMemory, + DB: storage::DB + for<'iter> storage::DBIter<'iter>, + H: StorageHasher, + EVAL: VpEvaluator, + CA: WasmCacheAccess, +{ + let (event_type, gas) = env + .memory + .read_string(event_type_ptr, event_type_len as _) + .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + let gas_meter = unsafe { env.ctx.gas_meter.get() }; + vp_host_fns::add_gas(gas_meter, gas)?; + + let write_log = unsafe { env.ctx.write_log.get() }; + let events = vp_host_fns::get_ibc_events(gas_meter, write_log, event_type)?; + let value = events.serialize_to_vec(); + let len: i64 = value + .len() + .try_into() + .map_err(vp_host_fns::RuntimeError::NumConversionError)?; + let result_buffer = unsafe { env.ctx.result_buffer.get() }; + result_buffer.replace(value); + Ok(len) +} + /// Verify a transaction signature /// TODO: this is just a warkaround to track gas for multiple singature /// verifications. When the runtime gas meter is implemented, this funcion can @@ -1809,7 +1836,7 @@ where let gas_meter = unsafe { env.ctx.gas_meter.get() }; vp_host_fns::add_gas(gas_meter, gas)?; - let hashes = <[Hash; 2]>::try_from_slice(&hash_list) + let hashes = <[Hash; 1]>::try_from_slice(&hash_list) .map_err(vp_host_fns::RuntimeError::EncodingError)?; let (public_keys_map, gas) = env @@ -1848,7 +1875,7 @@ where &Some(signer), threshold, max_signatures, - Some(gas_meter), + &mut Some(gas_meter), ) .is_ok(), ) @@ -1868,8 +1895,6 @@ where EVAL: VpEvaluator, CA: WasmCacheAccess, { - use masp_primitives::transaction::Transaction; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; let (tx_bytes, gas) = env .memory @@ -1885,7 +1910,7 @@ where // TODO: once the runtime gas meter is implemented we need to benchmark // this funcion and charge the gas here. For the moment, the cost of // this is included in the benchmark of the masp vp - HostEnvResult::from(crate::sdk::masp::verify_shielded_tx(&shielded)) + HostEnvResult::from(namada_sdk::masp::verify_shielded_tx(&shielded)) .to_i64(), ) } @@ -2207,16 +2232,17 @@ where ibc_tx_charge_gas(self, gas) } - fn get_ibc_event( + fn get_ibc_events( &self, event_type: impl AsRef, - ) -> Result, Self::Error> { + ) -> Result, Self::Error> { let write_log = unsafe { self.write_log.get() }; Ok(write_log .get_ibc_events() .iter() - .find(|event| event.event_type == event_type.as_ref()) - .cloned()) + .filter(|event| event.event_type == event_type.as_ref()) + .cloned() + .collect()) } fn transfer_token( @@ -2247,6 +2273,42 @@ where Ok(()) } + fn handle_masp_tx( + &mut self, + shielded: &IbcShieldedTransfer, + ) -> Result<(), Self::Error> { + let masp_addr = address::masp(); + let head_tx_key = Key::from(masp_addr.to_db_key()) + .push(&HEAD_TX_KEY.to_owned()) + .expect("Cannot obtain a storage key"); + let current_tx_idx: u64 = ibc_read_borsh(self, &head_tx_key) + .unwrap_or(None) + .unwrap_or(0); + let current_tx_key = Key::from(masp_addr.to_db_key()) + .push(&(TX_KEY_PREFIX.to_owned() + ¤t_tx_idx.to_string())) + .expect("Cannot obtain a storage key"); + // Save the Transfer object and its location within the blockchain + // so that clients do not have to separately look these + // up + let record: (Epoch, BlockHeight, TxIndex, Transfer, Transaction) = ( + ibc_get_block_epoch(self)?, + self.get_height()?, + ibc_get_tx_index(self)?, + shielded.transfer.clone(), + shielded.masp_tx.clone(), + ); + ibc_write_borsh(self, ¤t_tx_key, &record)?; + ibc_write_borsh(self, &head_tx_key, &(current_tx_idx + 1))?; + // If storage key has been supplied, then pin this transaction to it + if let Some(key) = &shielded.transfer.key { + let pin_key = Key::from(masp_addr.to_db_key()) + .push(&(PIN_KEY_PREFIX.to_owned() + key)) + .expect("Cannot obtain a storage key"); + ibc_write_borsh(self, &pin_key, ¤t_tx_idx)?; + } + Ok(()) + } + fn mint_token( &mut self, target: &Address, @@ -2391,11 +2453,45 @@ where H: StorageHasher, CA: WasmCacheAccess, { - let bytes = val.try_to_vec().map_err(TxRuntimeError::EncodingError)?; + let bytes = borsh::to_vec(val).map_err(TxRuntimeError::EncodingError)?; namada_core::ledger::ibc::IbcStorageContext::write(ctx, key, bytes)?; Ok(()) } +/// Get the current epoch. +// Temp helper for ibc tx workaround. +fn ibc_get_block_epoch<'a, DB, H, CA>( + ctx: &TxCtx<'a, DB, H, CA>, +) -> TxResult +where + DB: storage::DB + for<'iter> storage::DBIter<'iter>, + H: StorageHasher, + CA: WasmCacheAccess, +{ + let storage = unsafe { ctx.storage.get() }; + let (epoch, gas) = storage.get_current_epoch(); + ibc_tx_charge_gas(ctx, gas)?; + Ok(epoch) +} + +/// Get the tx index. +// Temp helper for ibc tx workaround. +fn ibc_get_tx_index<'a, DB, H, CA>( + ctx: &TxCtx<'a, DB, H, CA>, +) -> TxResult +where + DB: storage::DB + for<'iter> storage::DBIter<'iter>, + H: StorageHasher, + CA: WasmCacheAccess, +{ + let tx_index = unsafe { ctx.tx_index.get() }; + ibc_tx_charge_gas( + ctx, + crate::vm::host_env::gas::STORAGE_ACCESS_GAS_PER_BYTE, + )?; + Ok(TxIndex(tx_index.0)) +} + // Temp. workaround for impl<'a, DB, H, CA> namada_core::ledger::ibc::IbcCommonContext for TxCtx<'a, DB, H, CA> diff --git a/shared/src/vm/wasm/host_env.rs b/shared/src/vm/wasm/host_env.rs index 57a8bc6986..e9a63e631e 100644 --- a/shared/src/vm/wasm/host_env.rs +++ b/shared/src/vm/wasm/host_env.rs @@ -76,7 +76,7 @@ where "namada_tx_update_validity_predicate" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_update_validity_predicate), "namada_tx_init_account" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_init_account), "namada_tx_emit_ibc_event" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_emit_ibc_event), - "namada_tx_get_ibc_event" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_ibc_event), + "namada_tx_get_ibc_events" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_ibc_events), "namada_tx_get_chain_id" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_chain_id), "namada_tx_get_tx_index" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_tx_index), "namada_tx_get_block_height" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_block_height), @@ -127,6 +127,7 @@ where "namada_vp_get_block_hash" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_block_hash), "namada_vp_get_tx_code_hash" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_tx_code_hash), "namada_vp_get_block_epoch" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_block_epoch), + "namada_vp_get_ibc_events" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_ibc_events), "namada_vp_verify_tx_section_signature" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_verify_tx_section_signature), "namada_vp_verify_masp" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_verify_masp), "namada_vp_eval" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_eval), diff --git a/shared/src/vm/wasm/memory.rs b/shared/src/vm/wasm/memory.rs index 3e0c7975c9..9b04160556 100644 --- a/shared/src/vm/wasm/memory.rs +++ b/shared/src/vm/wasm/memory.rs @@ -5,7 +5,7 @@ use std::ptr::NonNull; use std::str::Utf8Error; use std::sync::Arc; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use namada_core::ledger::gas::VM_MEMORY_ACCESS_GAS_PER_BYTE; use thiserror::Error; use wasmer::{ @@ -86,7 +86,7 @@ pub fn write_tx_inputs( tx_data: &Tx, ) -> Result { let tx_data_ptr = 0; - let tx_data_bytes = tx_data.try_to_vec().map_err(Error::EncodingError)?; + let tx_data_bytes = tx_data.serialize_to_vec(); let tx_data_len = tx_data_bytes.len() as _; write_memory_bytes(memory, tx_data_ptr, tx_data_bytes)?; @@ -129,20 +129,18 @@ pub fn write_vp_inputs( }: VpInput, ) -> Result { let addr_ptr = 0; - let addr_bytes = addr.try_to_vec().map_err(Error::EncodingError)?; + let addr_bytes = addr.serialize_to_vec(); let addr_len = addr_bytes.len() as _; - let data_bytes = data.try_to_vec().map_err(Error::EncodingError)?; + let data_bytes = data.serialize_to_vec(); let data_ptr = addr_ptr + addr_len; let data_len = data_bytes.len() as _; - let keys_changed_bytes = - keys_changed.try_to_vec().map_err(Error::EncodingError)?; + let keys_changed_bytes = keys_changed.serialize_to_vec(); let keys_changed_ptr = data_ptr + data_len; let keys_changed_len = keys_changed_bytes.len() as _; - let verifiers_bytes = - verifiers.try_to_vec().map_err(Error::EncodingError)?; + let verifiers_bytes = verifiers.serialize_to_vec(); let verifiers_ptr = keys_changed_ptr + keys_changed_len; let verifiers_len = verifiers_bytes.len() as _; diff --git a/shared/src/vm/wasm/run.rs b/shared/src/vm/wasm/run.rs index 7678ceb434..9740469b95 100644 --- a/shared/src/vm/wasm/run.rs +++ b/shared/src/vm/wasm/run.rs @@ -552,7 +552,7 @@ fn get_gas_rules() -> wasm_instrument::gas_metering::ConstantCostRules { #[cfg(test)] mod tests { - use borsh::BorshSerialize; + use borsh_ext::BorshSerializeExt; use itertools::Either; use namada_test_utils::TestWasms; use test_log::test; @@ -631,7 +631,7 @@ mod tests { let code_hash = Hash::sha256(&tx_code); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - let code_len = (tx_code.len() as u64).try_to_vec().unwrap(); + let code_len = (tx_code.len() as u64).serialize_to_vec(); write_log.write(&key, tx_code.clone()).unwrap(); write_log.write(&len_key, code_len).unwrap(); @@ -640,7 +640,7 @@ mod tests { // Allocating `2^23` (8 MiB) should be below the memory limit and // shouldn't fail - let tx_data = 2_usize.pow(23).try_to_vec().unwrap(); + let tx_data = 2_usize.pow(23).serialize_to_vec(); let (mut vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let (mut tx_cache, _) = @@ -661,7 +661,7 @@ mod tests { // Allocating `2^24` (16 MiB) should be above the memory limit and // should fail - let tx_data = 2_usize.pow(24).try_to_vec().unwrap(); + let tx_data = 2_usize.pow(24).serialize_to_vec(); let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.set_code(Code::new(tx_code)); outer_tx.set_data(Data::new(tx_data)); @@ -700,7 +700,7 @@ mod tests { let code_hash = Hash::sha256(&vp_eval); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - let code_len = (vp_eval.len() as u64).try_to_vec().unwrap(); + let code_len = (vp_eval.len() as u64).serialize_to_vec(); storage.write(&key, vp_eval).unwrap(); storage.write(&len_key, code_len).unwrap(); // This code will allocate memory of the given size @@ -709,7 +709,7 @@ mod tests { let limit_code_hash = Hash::sha256(&vp_memory_limit); let key = Key::wasm_code(&limit_code_hash); let len_key = Key::wasm_code_len(&limit_code_hash); - let code_len = (vp_memory_limit.len() as u64).try_to_vec().unwrap(); + let code_len = (vp_memory_limit.len() as u64).serialize_to_vec(); storage.write(&key, vp_memory_limit).unwrap(); storage.write(&len_key, code_len).unwrap(); @@ -718,7 +718,7 @@ mod tests { // Allocating `2^23` (8 MiB) should be below the memory limit and // shouldn't fail - let input = 2_usize.pow(23).try_to_vec().unwrap(); + let input = 2_usize.pow(23).serialize_to_vec(); let mut tx = Tx::new(storage.chain_id.clone(), None); tx.add_code(vec![]).add_serialized_data(input); @@ -751,7 +751,7 @@ mod tests { // Allocating `2^24` (16 MiB) should be above the memory limit and // should fail - let input = 2_usize.pow(24).try_to_vec().unwrap(); + let input = 2_usize.pow(24).serialize_to_vec(); let mut tx = Tx::new(storage.chain_id.clone(), None); tx.add_code(vec![]).add_data(input); @@ -801,7 +801,7 @@ mod tests { let vp_code = TestWasms::VpMemoryLimit.read_bytes(); // store the wasm code let code_hash = Hash::sha256(&vp_code); - let code_len = (vp_code.len() as u64).try_to_vec().unwrap(); + let code_len = (vp_code.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); storage.write(&key, vp_code).unwrap(); @@ -812,7 +812,7 @@ mod tests { // Allocating `2^23` (8 MiB) should be below the memory limit and // shouldn't fail - let tx_data = 2_usize.pow(23).try_to_vec().unwrap(); + let tx_data = 2_usize.pow(23).serialize_to_vec(); let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.header.chain_id = storage.chain_id.clone(); outer_tx.set_data(Data::new(tx_data)); @@ -834,7 +834,7 @@ mod tests { // Allocating `2^24` (16 MiB) should be above the memory limit and // should fail - let tx_data = 2_usize.pow(24).try_to_vec().unwrap(); + let tx_data = 2_usize.pow(24).serialize_to_vec(); let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.header.chain_id = storage.chain_id.clone(); outer_tx.set_data(Data::new(tx_data)); @@ -869,7 +869,7 @@ mod tests { let code_hash = Hash::sha256(&tx_no_op); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - let code_len = (tx_no_op.len() as u64).try_to_vec().unwrap(); + let code_len = (tx_no_op.len() as u64).serialize_to_vec(); write_log.write(&key, tx_no_op.clone()).unwrap(); write_log.write(&len_key, code_len).unwrap(); @@ -934,7 +934,7 @@ mod tests { let code_hash = Hash::sha256(&vp_code); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - let code_len = (vp_code.len() as u64).try_to_vec().unwrap(); + let code_len = (vp_code.len() as u64).serialize_to_vec(); storage.write(&key, vp_code).unwrap(); storage.write(&len_key, code_len).unwrap(); @@ -996,7 +996,7 @@ mod tests { let tx_read_key = TestWasms::TxReadStorageKey.read_bytes(); // store the wasm code let code_hash = Hash::sha256(&tx_read_key); - let code_len = (tx_read_key.len() as u64).try_to_vec().unwrap(); + let code_len = (tx_read_key.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); write_log.write(&key, tx_read_key.clone()).unwrap(); @@ -1012,8 +1012,8 @@ mod tests { // Write the value that should be read by the tx into the storage. When // writing directly to storage, the value has to be encoded with // Borsh. - storage.write(&key, value.try_to_vec().unwrap()).unwrap(); - let tx_data = key.try_to_vec().unwrap(); + storage.write(&key, value.serialize_to_vec()).unwrap(); + let tx_data = key.serialize_to_vec(); let (mut vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let (mut tx_cache, _) = @@ -1053,7 +1053,7 @@ mod tests { let vp_read_key = TestWasms::VpReadStorageKey.read_bytes(); // store the wasm code let code_hash = Hash::sha256(&vp_read_key); - let code_len = (vp_read_key.len() as u64).try_to_vec().unwrap(); + let code_len = (vp_read_key.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); storage.write(&key, vp_read_key).unwrap(); @@ -1069,8 +1069,8 @@ mod tests { // Write the value that should be read by the tx into the storage. When // writing directly to storage, the value has to be encoded with // Borsh. - storage.write(&key, value.try_to_vec().unwrap()).unwrap(); - let tx_data = key.try_to_vec().unwrap(); + storage.write(&key, value.serialize_to_vec()).unwrap(); + let tx_data = key.serialize_to_vec(); let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.header.chain_id = storage.chain_id.clone(); outer_tx.set_data(Data::new(tx_data)); @@ -1113,7 +1113,7 @@ mod tests { let vp_eval = TestWasms::VpEval.read_bytes(); // store the wasm code let code_hash = Hash::sha256(&vp_eval); - let code_len = (vp_eval.len() as u64).try_to_vec().unwrap(); + let code_len = (vp_eval.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); storage.write(&key, vp_eval).unwrap(); @@ -1122,7 +1122,7 @@ mod tests { let vp_read_key = TestWasms::VpReadStorageKey.read_bytes(); // store the wasm code let read_code_hash = Hash::sha256(&vp_read_key); - let code_len = (vp_read_key.len() as u64).try_to_vec().unwrap(); + let code_len = (vp_read_key.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&read_code_hash); let len_key = Key::wasm_code_len(&read_code_hash); storage.write(&key, vp_read_key).unwrap(); @@ -1138,8 +1138,8 @@ mod tests { // Write the value that should be read by the tx into the storage. When // writing directly to storage, the value has to be encoded with // Borsh. - storage.write(&key, value.try_to_vec().unwrap()).unwrap(); - let input = 2_usize.pow(23).try_to_vec().unwrap(); + storage.write(&key, value.serialize_to_vec()).unwrap(); + let input = 2_usize.pow(23).serialize_to_vec(); let mut tx = Tx::new(storage.chain_id.clone(), None); tx.add_code(vec![]).add_serialized_data(input); @@ -1216,7 +1216,7 @@ mod tests { // store the tx code let code_hash = Hash::sha256(&tx_code); - let code_len = (tx_code.len() as u64).try_to_vec().unwrap(); + let code_len = (tx_code.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); write_log.write(&key, tx_code).unwrap(); @@ -1279,7 +1279,7 @@ mod tests { let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); // store the vp code let code_hash = Hash::sha256(&vp_code); - let code_len = (vp_code.len() as u64).try_to_vec().unwrap(); + let code_len = (vp_code.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); storage.write(&key, vp_code).unwrap(); diff --git a/test_fixtures/masp_proofs/0B436B9FB574776E45EDA537DCF334701D250179C310C7076DAEE5367CB3D74A.bin b/test_fixtures/masp_proofs/0B436B9FB574776E45EDA537DCF334701D250179C310C7076DAEE5367CB3D74A.bin deleted file mode 100644 index 283608af3c..0000000000 Binary files a/test_fixtures/masp_proofs/0B436B9FB574776E45EDA537DCF334701D250179C310C7076DAEE5367CB3D74A.bin and /dev/null differ diff --git a/test_fixtures/masp_proofs/1362F1CF9B836CF8B05D8189EA9CB1712CCA85B0E96A3330A63BE7CD9E5ECD22.bin b/test_fixtures/masp_proofs/1362F1CF9B836CF8B05D8189EA9CB1712CCA85B0E96A3330A63BE7CD9E5ECD22.bin index 13110a81e5..741f4d5106 100644 Binary files a/test_fixtures/masp_proofs/1362F1CF9B836CF8B05D8189EA9CB1712CCA85B0E96A3330A63BE7CD9E5ECD22.bin and b/test_fixtures/masp_proofs/1362F1CF9B836CF8B05D8189EA9CB1712CCA85B0E96A3330A63BE7CD9E5ECD22.bin differ diff --git a/test_fixtures/masp_proofs/574D00A0B71BE528A2923F6B68934EAA4FA91FFF4AFDF3B08047E7DC6BFCED36.bin b/test_fixtures/masp_proofs/29AC8DE3B07495BEABEAF50FE8FF8BF07596031CD460FEBFAEA4F75AF65D5402.bin similarity index 58% rename from test_fixtures/masp_proofs/574D00A0B71BE528A2923F6B68934EAA4FA91FFF4AFDF3B08047E7DC6BFCED36.bin rename to test_fixtures/masp_proofs/29AC8DE3B07495BEABEAF50FE8FF8BF07596031CD460FEBFAEA4F75AF65D5402.bin index 83e5789827..b748454de8 100644 Binary files a/test_fixtures/masp_proofs/574D00A0B71BE528A2923F6B68934EAA4FA91FFF4AFDF3B08047E7DC6BFCED36.bin and b/test_fixtures/masp_proofs/29AC8DE3B07495BEABEAF50FE8FF8BF07596031CD460FEBFAEA4F75AF65D5402.bin differ diff --git a/test_fixtures/masp_proofs/37332141CB34FC30FF51F4BEE8D76149D3088F539CF8372D404609B89B095EF7.bin b/test_fixtures/masp_proofs/37332141CB34FC30FF51F4BEE8D76149D3088F539CF8372D404609B89B095EF7.bin index 0e74c8f67b..18f83d0543 100644 Binary files a/test_fixtures/masp_proofs/37332141CB34FC30FF51F4BEE8D76149D3088F539CF8372D404609B89B095EF7.bin and b/test_fixtures/masp_proofs/37332141CB34FC30FF51F4BEE8D76149D3088F539CF8372D404609B89B095EF7.bin differ diff --git a/test_fixtures/masp_proofs/DDD66A8E673E8E8A1401967F6FCDD5724C594E275B353F45FF749CB76D3CFF52.bin b/test_fixtures/masp_proofs/52984E26D4A044A259B441C1DAEB66F886B3A3B6C71D33F456B859D01DA47ADD.bin similarity index 52% rename from test_fixtures/masp_proofs/DDD66A8E673E8E8A1401967F6FCDD5724C594E275B353F45FF749CB76D3CFF52.bin rename to test_fixtures/masp_proofs/52984E26D4A044A259B441C1DAEB66F886B3A3B6C71D33F456B859D01DA47ADD.bin index 0a8cd50818..c51b5ed0d9 100644 Binary files a/test_fixtures/masp_proofs/DDD66A8E673E8E8A1401967F6FCDD5724C594E275B353F45FF749CB76D3CFF52.bin and b/test_fixtures/masp_proofs/52984E26D4A044A259B441C1DAEB66F886B3A3B6C71D33F456B859D01DA47ADD.bin differ diff --git a/test_fixtures/masp_proofs/8B29BC2E1A96DF331C7C3A2B227C98D1E5AAAA9988F26B1A47090ACCE693572F.bin b/test_fixtures/masp_proofs/8B29BC2E1A96DF331C7C3A2B227C98D1E5AAAA9988F26B1A47090ACCE693572F.bin index 76b40e6552..afcad7c654 100644 Binary files a/test_fixtures/masp_proofs/8B29BC2E1A96DF331C7C3A2B227C98D1E5AAAA9988F26B1A47090ACCE693572F.bin and b/test_fixtures/masp_proofs/8B29BC2E1A96DF331C7C3A2B227C98D1E5AAAA9988F26B1A47090ACCE693572F.bin differ diff --git a/test_fixtures/masp_proofs/9883C2EF7971504BB1CF651BAFFC3DA2C57E4FD8FF0811D9CB129887F0F9F706.bin b/test_fixtures/masp_proofs/9883C2EF7971504BB1CF651BAFFC3DA2C57E4FD8FF0811D9CB129887F0F9F706.bin new file mode 100644 index 0000000000..094103100a Binary files /dev/null and b/test_fixtures/masp_proofs/9883C2EF7971504BB1CF651BAFFC3DA2C57E4FD8FF0811D9CB129887F0F9F706.bin differ diff --git a/test_fixtures/masp_proofs/B9D0EC03A64BC8F9536A05F587B35316EE670A242606A81AF0139B3F21CDEDD8.bin b/test_fixtures/masp_proofs/99393E3AC8046F86ABA05519568B6780B6F18A312AE3909BEA19D16FCFE837DC.bin similarity index 61% rename from test_fixtures/masp_proofs/B9D0EC03A64BC8F9536A05F587B35316EE670A242606A81AF0139B3F21CDEDD8.bin rename to test_fixtures/masp_proofs/99393E3AC8046F86ABA05519568B6780B6F18A312AE3909BEA19D16FCFE837DC.bin index 6519f4fbc3..f456d94d7d 100644 Binary files a/test_fixtures/masp_proofs/B9D0EC03A64BC8F9536A05F587B35316EE670A242606A81AF0139B3F21CDEDD8.bin and b/test_fixtures/masp_proofs/99393E3AC8046F86ABA05519568B6780B6F18A312AE3909BEA19D16FCFE837DC.bin differ diff --git a/test_fixtures/masp_proofs/B94B8EDBFA5038FFB9D439D316EBD09A62AAF19015EF8149D6502B6C0FA871C4.bin b/test_fixtures/masp_proofs/B94B8EDBFA5038FFB9D439D316EBD09A62AAF19015EF8149D6502B6C0FA871C4.bin deleted file mode 100644 index 0cabab2505..0000000000 Binary files a/test_fixtures/masp_proofs/B94B8EDBFA5038FFB9D439D316EBD09A62AAF19015EF8149D6502B6C0FA871C4.bin and /dev/null differ diff --git a/test_fixtures/masp_proofs/C82CD3AD0DACE8091365CB0D91AE73F7B5BC1D64F787FA8A7985D301776103DD.bin b/test_fixtures/masp_proofs/BA4FED83467B6FEE522748C6F7E72A01F0B169F946835583DC2C71B550315603.bin similarity index 55% rename from test_fixtures/masp_proofs/C82CD3AD0DACE8091365CB0D91AE73F7B5BC1D64F787FA8A7985D301776103DD.bin rename to test_fixtures/masp_proofs/BA4FED83467B6FEE522748C6F7E72A01F0B169F946835583DC2C71B550315603.bin index 863fb078ac..565d189c0c 100644 Binary files a/test_fixtures/masp_proofs/C82CD3AD0DACE8091365CB0D91AE73F7B5BC1D64F787FA8A7985D301776103DD.bin and b/test_fixtures/masp_proofs/BA4FED83467B6FEE522748C6F7E72A01F0B169F946835583DC2C71B550315603.bin differ diff --git a/test_fixtures/masp_proofs/C7ECE8C02C2E764EFD5B6A0756CFE8EEC08E2D8512695A667D294AE1A4A8D4E6.bin b/test_fixtures/masp_proofs/C7ECE8C02C2E764EFD5B6A0756CFE8EEC08E2D8512695A667D294AE1A4A8D4E6.bin new file mode 100644 index 0000000000..d7fe00a74f Binary files /dev/null and b/test_fixtures/masp_proofs/C7ECE8C02C2E764EFD5B6A0756CFE8EEC08E2D8512695A667D294AE1A4A8D4E6.bin differ diff --git a/test_fixtures/masp_proofs/EE7C912B7E21F07494D58AA6668DC6BBB31619C7E93A1A5A2E64B694DBE1BD6E.bin b/test_fixtures/masp_proofs/EE7C912B7E21F07494D58AA6668DC6BBB31619C7E93A1A5A2E64B694DBE1BD6E.bin index c0a788ee99..fee4361a2b 100644 Binary files a/test_fixtures/masp_proofs/EE7C912B7E21F07494D58AA6668DC6BBB31619C7E93A1A5A2E64B694DBE1BD6E.bin and b/test_fixtures/masp_proofs/EE7C912B7E21F07494D58AA6668DC6BBB31619C7E93A1A5A2E64B694DBE1BD6E.bin differ diff --git a/test_fixtures/masp_proofs/72CAB503E1E0568CC0CAFA892125DB879A97D990F8395D0D8C34BC7EDD203DA9.bin b/test_fixtures/masp_proofs/EEB91EB873807EC77BBCA95D4CFA3F379DB351AB4AE081207ABFDFC429C9FA48.bin similarity index 59% rename from test_fixtures/masp_proofs/72CAB503E1E0568CC0CAFA892125DB879A97D990F8395D0D8C34BC7EDD203DA9.bin rename to test_fixtures/masp_proofs/EEB91EB873807EC77BBCA95D4CFA3F379DB351AB4AE081207ABFDFC429C9FA48.bin index fda6b2648e..5064fd5593 100644 Binary files a/test_fixtures/masp_proofs/72CAB503E1E0568CC0CAFA892125DB879A97D990F8395D0D8C34BC7EDD203DA9.bin and b/test_fixtures/masp_proofs/EEB91EB873807EC77BBCA95D4CFA3F379DB351AB4AE081207ABFDFC429C9FA48.bin differ diff --git a/test_fixtures/masp_proofs/F068FDF05B8F25DD923E667215344FFFAA6CA273027CD480AEA68DDED57D88CA.bin b/test_fixtures/masp_proofs/F068FDF05B8F25DD923E667215344FFFAA6CA273027CD480AEA68DDED57D88CA.bin index 1b18c1a6b3..3b05c546b0 100644 Binary files a/test_fixtures/masp_proofs/F068FDF05B8F25DD923E667215344FFFAA6CA273027CD480AEA68DDED57D88CA.bin and b/test_fixtures/masp_proofs/F068FDF05B8F25DD923E667215344FFFAA6CA273027CD480AEA68DDED57D88CA.bin differ diff --git a/test_fixtures/masp_proofs/434F17129C496E5DE034C4F2553E767C0E74D457A68F8606AFBF41E6F2F56D6E.bin b/test_fixtures/masp_proofs/F36A8353F15FD6D8158DBC67DDB827EEEDA858AB983D16024AAA415579A68953.bin similarity index 55% rename from test_fixtures/masp_proofs/434F17129C496E5DE034C4F2553E767C0E74D457A68F8606AFBF41E6F2F56D6E.bin rename to test_fixtures/masp_proofs/F36A8353F15FD6D8158DBC67DDB827EEEDA858AB983D16024AAA415579A68953.bin index e86721b9f7..755c32011e 100644 Binary files a/test_fixtures/masp_proofs/434F17129C496E5DE034C4F2553E767C0E74D457A68F8606AFBF41E6F2F56D6E.bin and b/test_fixtures/masp_proofs/F36A8353F15FD6D8158DBC67DDB827EEEDA858AB983D16024AAA415579A68953.bin differ diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index 8e0a9c6719..3b81ee176e 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -14,5 +14,5 @@ version.workspace = true [dependencies] namada_core = { path = "../core", default-features = false, features = ["abciplus"] } -borsh = "0.9.0" +borsh.workspace = true strum = {version = "0.24", features = ["derive"]} diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 674bce9538..2c21ea9d97 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -31,6 +31,7 @@ wasm-runtime = ["namada/wasm-runtime"] [dependencies] namada = {path = "../shared", features = ["testing"]} namada_core = {path = "../core", features = ["testing"]} +namada_sdk = {path = "../sdk"} namada_test_utils = {path = "../test_utils"} namada_vp_prelude = {path = "../vp_prelude"} namada_tx_prelude = {path = "../tx_prelude"} @@ -58,6 +59,7 @@ tracing.workspace = true namada_apps = {path = "../apps", features = ["testing"]} assert_cmd.workspace = true borsh.workspace = true +borsh-ext.workspace = true color-eyre.workspace = true data-encoding.workspace = true # NOTE: enable "print" feature to see output from builds ran by e2e tests diff --git a/tests/src/e2e/eth_bridge_tests/helpers.rs b/tests/src/e2e/eth_bridge_tests/helpers.rs index a52273366c..04047a1722 100644 --- a/tests/src/e2e/eth_bridge_tests/helpers.rs +++ b/tests/src/e2e/eth_bridge_tests/helpers.rs @@ -55,7 +55,7 @@ impl EventsEndpointClient { /// Sends an Ethereum event to the Namada node. Returns `Ok` iff the event /// was successfully sent. pub async fn send(&mut self, event: &EthereumEvent) -> Result<()> { - let event = event.try_to_vec()?; + let event = event.serialize_to_vec()?; let req = Request::builder() .method(Method::POST) diff --git a/tests/src/e2e/ibc_tests.rs b/tests/src/e2e/ibc_tests.rs index c294367f1c..9e8e8d4eba 100644 --- a/tests/src/e2e/ibc_tests.rs +++ b/tests/src/e2e/ibc_tests.rs @@ -13,6 +13,7 @@ use core::convert::TryFrom; use core::str::FromStr; use core::time::Duration; use std::collections::HashMap; +use std::path::PathBuf; use color_eyre::eyre::Result; use eyre::eyre; @@ -53,7 +54,6 @@ use namada::ibc_proto::google::protobuf::Any; use namada::ledger::events::EventType; use namada::ledger::ibc::storage::*; use namada::ledger::parameters::{storage as param_storage, EpochDuration}; -use namada::ledger::pos::{self, PosParams}; use namada::ledger::queries::RPC; use namada::ledger::storage::ics23_specs::ibc_proof_specs; use namada::ledger::storage::traits::Sha256Hasher; @@ -64,7 +64,7 @@ use namada::types::key::PublicKey; use namada::types::storage::{BlockHeight, Key}; use namada::types::token::Amount; use namada_apps::client::rpc::{ - query_storage_value, query_storage_value_bytes, + query_pos_parameters, query_storage_value, query_storage_value_bytes, }; use namada_apps::client::utils::id_from_pk; use namada_apps::config::ethereum_bridge; @@ -73,13 +73,15 @@ use namada_apps::facade::tendermint::block::Header as TmHeader; use namada_apps::facade::tendermint::merkle::proof::Proof as TmProof; use namada_apps::facade::tendermint_config::net::Address as TendermintAddress; use namada_apps::facade::tendermint_rpc::{Client, HttpClient, Url}; +use namada_sdk::masp::fs::FsShieldedUtils; use prost::Message; use setup::constants::*; use tendermint_light_client::components::io::{Io, ProdIo as TmLightClientIo}; -use super::helpers::wait_for_wasm_pre_compile; use super::setup::set_ethereum_bridge_mode; -use crate::e2e::helpers::{find_address, get_actor_rpc, get_validator_pk}; +use crate::e2e::helpers::{ + find_address, get_actor_rpc, get_validator_pk, wait_for_wasm_pre_compile, +}; use crate::e2e::setup::{self, sleep, Bin, NamadaCmd, Test, Who}; use crate::{run, run_as}; @@ -125,10 +127,6 @@ fn run_ledger_ibc() -> Result<()> { wait_for_wasm_pre_compile(&mut ledger_a)?; wait_for_wasm_pre_compile(&mut ledger_b)?; - // Wait for a first block - ledger_a.exp_string("Committed block hash")?; - ledger_b.exp_string("Committed block hash")?; - let _bg_ledger_a = ledger_a.background(); let _bg_ledger_b = ledger_b.background(); @@ -190,6 +188,18 @@ fn run_ledger_ibc() -> Result<()> { // The balance should not be changed check_balances_after_back(&port_id_b, &channel_id_b, &test_a, &test_b)?; + shielded_transfer( + &test_a, + &test_b, + &client_id_a, + &client_id_b, + &port_id_a, + &channel_id_a, + &port_id_b, + &channel_id_b, + )?; + check_shielded_balances(&port_id_b, &channel_id_b, &test_b)?; + // Skip tests for closing a channel and timeout_on_close since the transfer // channel cannot be closed @@ -197,13 +207,18 @@ fn run_ledger_ibc() -> Result<()> { } fn setup_two_single_node_nets() -> Result<(Test, Test)> { + // Download the shielded pool parameters before starting node + let _ = FsShieldedUtils::new(PathBuf::new()); + // epoch per 100 seconds let update_genesis = |mut genesis: GenesisConfig| { - genesis.parameters.epochs_per_year = 315_360; + genesis.parameters.epochs_per_year = 31536; + genesis.parameters.min_num_of_blocks = 1; genesis }; let update_genesis_b = |mut genesis: GenesisConfig| { - genesis.parameters.epochs_per_year = 315_360; + genesis.parameters.epochs_per_year = 31536; + genesis.parameters.min_num_of_blocks = 1; setup::set_validators(1, genesis, |_| setup::ANOTHER_CHAIN_PORT_OFFSET) }; Ok(( @@ -249,11 +264,8 @@ fn make_client_state(test: &Test, height: Height) -> TmClientState { let ledger_address = TendermintAddress::from_str(&rpc).unwrap(); let client = HttpClient::new(ledger_address).unwrap(); - let key = pos::params_key(); - let pos_params = test - .async_runtime() - .block_on(query_storage_value::(&client, &key)) - .unwrap(); + let pos_params = + test.async_runtime().block_on(query_pos_parameters(&client)); let pipeline_len = pos_params.pipeline_len; let key = param_storage::get_epoch_duration_storage_key(); @@ -635,7 +647,7 @@ fn transfer_token( let height = transfer( test_a, ALBERT, - &receiver, + receiver.to_string(), NAM, "100000", ALBERT_KEY, @@ -643,6 +655,7 @@ fn transfer_token( channel_id_a, None, None, + None, false, )?; let events = get_events(test_a, height)?; @@ -704,13 +717,14 @@ fn try_invalid_transfers( transfer( test_a, ALBERT, - &receiver, + receiver.to_string(), NAM, "10.1", ALBERT_KEY, port_id_a, channel_id_a, None, + None, Some("The amount for the IBC transfer should be an integer"), false, )?; @@ -719,13 +733,14 @@ fn try_invalid_transfers( transfer( test_a, ALBERT, - &receiver, + receiver.to_string(), NAM, "10", ALBERT_KEY, &"port".parse().unwrap(), channel_id_a, None, + None, Some("Error trying to apply a transaction"), false, )?; @@ -734,13 +749,14 @@ fn try_invalid_transfers( transfer( test_a, ALBERT, - &receiver, + receiver.to_string(), NAM, "10", ALBERT_KEY, port_id_a, &"channel-42".parse().unwrap(), None, + None, Some("Error trying to apply a transaction"), false, )?; @@ -753,12 +769,8 @@ fn transfer_received_token( channel_id: &ChannelId, test: &Test, ) -> Result<()> { - let nam = find_address(test, NAM)?; - // token received via the port and channel - let denom = format!("{port_id}/{channel_id}/{nam}"); - let ibc_token = ibc_token(denom).to_string(); - let rpc = get_actor_rpc(test, &Who::Validator(0)); + let ibc_denom = format!("{port_id}/{channel_id}/nam"); let amount = Amount::native_whole(50000).to_string_native(); let tx_args = [ "transfer", @@ -767,7 +779,7 @@ fn transfer_received_token( "--target", ALBERT, "--token", - &ibc_token, + &ibc_denom, "--amount", &amount, "--gas-token", @@ -791,24 +803,23 @@ fn transfer_back( port_id_b: &PortId, channel_id_b: &ChannelId, ) -> Result<()> { - let token = find_address(test_b, NAM)?.to_string(); let receiver = find_address(test_a, ALBERT)?; // Chain A was the source for the sent token - let denom_raw = format!("{}/{}/{}", port_id_b, channel_id_b, token); - let ibc_token = ibc_token(denom_raw).to_string(); + let ibc_denom = format!("{port_id_b}/{channel_id_b}/nam"); // Send a token from Chain B let height = transfer( test_b, BERTHA, - &receiver, - ibc_token, + receiver.to_string(), + ibc_denom, "50000", BERTHA_KEY, port_id_b, channel_id_b, None, None, + None, false, )?; let events = get_events(test_b, height)?; @@ -864,12 +875,13 @@ fn transfer_timeout( let height = transfer( test_a, ALBERT, - &receiver, + receiver.to_string(), NAM, "100000", ALBERT_KEY, port_id_a, channel_id_a, + None, Some(Duration::new(5, 0)), None, false, @@ -899,6 +911,113 @@ fn transfer_timeout( Ok(()) } +#[allow(clippy::too_many_arguments)] +fn shielded_transfer( + test_a: &Test, + test_b: &Test, + client_id_a: &ClientId, + client_id_b: &ClientId, + port_id_a: &PortId, + channel_id_a: &ChannelId, + port_id_b: &PortId, + channel_id_b: &ChannelId, +) -> Result<()> { + // Get masp proof for the following IBC transfer from the destination chain + // It will send 10 BTC from Chain A to PA(B) on Chain B + let rpc_b = get_actor_rpc(test_b, &Who::Validator(0)); + let output_folder = test_b.test_dir.path().to_string_lossy(); + let amount = Amount::native_whole(10).to_string_native(); + let args = [ + "ibc-gen-shielded", + "--output-folder-path", + &output_folder, + "--target", + AB_PAYMENT_ADDRESS, + "--token", + BTC, + "--amount", + &amount, + "--port-id", + port_id_b.as_ref(), + "--channel-id", + channel_id_b.as_ref(), + "--node", + &rpc_b, + ]; + let mut client = run!(test_b, Bin::Client, args, Some(120))?; + let file_path = get_shielded_transfer_path(&mut client)?; + client.assert_success(); + + // Send a token from Chain A to PA(B) on Chain B + let amount = Amount::native_whole(10).to_string_native(); + let height = transfer( + test_a, + ALBERT, + AB_PAYMENT_ADDRESS, + BTC, + amount, + ALBERT_KEY, + port_id_a, + channel_id_a, + Some(&file_path.to_string_lossy()), + None, + None, + false, + )?; + let events = get_events(test_a, height)?; + let packet = + get_packet_from_events(&events).ok_or(eyre!("Transaction failed"))?; + check_ibc_packet_query(test_a, &"send_packet".parse().unwrap(), &packet)?; + + let height_a = query_height(test_a)?; + let proof_commitment_on_a = + get_commitment_proof(test_a, &packet, height_a)?; + let msg = MsgRecvPacket { + packet, + proof_commitment_on_a, + proof_height_on_a: height_a, + signer: signer(), + }; + // Update the client state of Chain A on Chain B + update_client_with_height(test_a, test_b, client_id_b, height_a)?; + // Receive the token on Chain B + let height = submit_ibc_tx(test_b, msg, ALBERT, ALBERT_KEY, false)?; + let events = get_events(test_b, height)?; + let packet = + get_packet_from_events(&events).ok_or(eyre!("Transaction failed"))?; + let ack = + get_ack_from_events(&events).ok_or(eyre!("Transaction failed"))?; + check_ibc_packet_query( + test_b, + &"write_acknowledgement".parse().unwrap(), + &packet, + )?; + + // get the proof on Chain B + let height_b = query_height(test_b)?; + let proof_acked_on_b = get_ack_proof(test_b, &packet, height_b)?; + let msg = MsgAcknowledgement { + packet, + acknowledgement: ack.try_into().expect("invalid ack"), + proof_acked_on_b, + proof_height_on_b: height_b, + signer: signer(), + }; + // Update the client state of Chain B on Chain A + update_client_with_height(test_b, test_a, client_id_a, height_b)?; + // Acknowledge on Chain A + submit_ibc_tx(test_a, msg, ALBERT, ALBERT_KEY, false)?; + + Ok(()) +} + +fn get_shielded_transfer_path(client: &mut NamadaCmd) -> Result { + let (_unread, matched) = + client.exp_regex("Output IBC shielded transfer .*")?; + let file_path = matched.trim().split(' ').last().expect("invalid output"); + Ok(PathBuf::from_str(file_path).expect("invalid file path")) +} + fn get_commitment_proof( test: &Test, packet: &Packet, @@ -993,19 +1112,19 @@ fn submit_ibc_tx( fn transfer( test: &Test, sender: impl AsRef, - receiver: &Address, + receiver: impl AsRef, token: impl AsRef, amount: impl AsRef, signer: impl AsRef, port_id: &PortId, channel_id: &ChannelId, + memo: Option<&str>, timeout_sec: Option, expected_err: Option<&str>, wait_reveal_pk: bool, ) -> Result { let rpc = get_actor_rpc(test, &Who::Validator(0)); - let receiver = receiver.to_string(); let channel_id = channel_id.to_string(); let port_id = port_id.to_string(); let mut tx_args = vec![ @@ -1013,7 +1132,7 @@ fn transfer( "--source", sender.as_ref(), "--receiver", - &receiver, + receiver.as_ref(), "--signing-keys", signer.as_ref(), "--token", @@ -1028,13 +1147,19 @@ fn transfer( &rpc, ]; + let memo_path = memo.unwrap_or_default(); + if memo.is_some() { + tx_args.push("--memo-path"); + tx_args.push(memo_path); + } + let timeout = timeout_sec.unwrap_or_default().as_secs().to_string(); if timeout_sec.is_some() { tx_args.push("--timeout-sec-offset"); tx_args.push(&timeout); } - let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + let mut client = run!(test, Bin::Client, tx_args, Some(300))?; match expected_err { Some(err) => { client.exp_string(err)?; @@ -1130,10 +1255,7 @@ fn check_ibc_update_query( client_id, &consensus_height, )) { - Ok(Some(event)) => { - println!("Found the update event: {:?}", event); - Ok(()) - } + Ok(Some(_)) => Ok(()), Ok(None) => Err(eyre!("No update event for the client {}", client_id)), Err(e) => Err(eyre!("IBC update event query failed: {}", e)), } @@ -1156,10 +1278,7 @@ fn check_ibc_packet_query( &packet.chan_id_on_b, &packet.seq_on_a, )) { - Ok(Some(event)) => { - println!("Found the packet event: {:?}", event); - Ok(()) - } + Ok(Some(_)) => Ok(()), Ok(None) => Err(eyre!("No packet event for the packet {}", packet)), Err(e) => Err(eyre!("IBC packet event query failed: {}", e)), } @@ -1211,8 +1330,6 @@ fn check_balances( test_a: &Test, test_b: &Test, ) -> Result<()> { - let token = find_address(test_a, NAM)?; - // Check the balances on Chain A let rpc_a = get_actor_rpc(test_a, &Who::Validator(0)); let query_args = vec!["balance", "--token", NAM, "--node", &rpc_a]; @@ -1229,13 +1346,12 @@ fn check_balances( client.assert_success(); // Check the balance on Chain B - let denom = format!("{}/{}/{}", &dest_port_id, &dest_channel_id, &token,); - let ibc_token = ibc_token(denom).to_string(); + let trace_path = format!("{}/{}", &dest_port_id, &dest_channel_id); let rpc_b = get_actor_rpc(test_b, &Who::Validator(0)); let query_args = vec![ - "balance", "--owner", BERTHA, "--token", &ibc_token, "--node", &rpc_b, + "balance", "--owner", BERTHA, "--token", NAM, "--node", &rpc_b, ]; - let expected = format!("{}: 100000", ibc_token); + let expected = format!("{}/nam: 100000", trace_path); let mut client = run!(test_b, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; client.assert_success(); @@ -1249,25 +1365,21 @@ fn check_balances_after_non_ibc( test: &Test, ) -> Result<()> { // Check the balance on Chain B - let token = find_address(test, NAM)?; - let denom = format!("{}/{}/{}", port_id, channel_id, token); - let ibc_token = ibc_token(denom).to_string(); + let trace_path = format!("{}/{}", port_id, channel_id); // Check the source let rpc = get_actor_rpc(test, &Who::Validator(0)); - let query_args = vec![ - "balance", "--owner", BERTHA, "--token", &ibc_token, "--node", &rpc, - ]; - let expected = format!("{}: 50000", ibc_token); + let query_args = + vec!["balance", "--owner", BERTHA, "--token", NAM, "--node", &rpc]; + let expected = format!("{}/nam: 50000", trace_path); let mut client = run!(test, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; client.assert_success(); // Check the traget - let query_args = vec![ - "balance", "--owner", ALBERT, "--token", &ibc_token, "--node", &rpc, - ]; - let expected = format!("{}: 50000", ibc_token); + let query_args = + vec!["balance", "--owner", ALBERT, "--token", NAM, "--node", &rpc]; + let expected = format!("{}/nam: 50000", trace_path); let mut client = run!(test, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; client.assert_success(); @@ -1282,8 +1394,6 @@ fn check_balances_after_back( test_a: &Test, test_b: &Test, ) -> Result<()> { - let token = find_address(test_b, NAM)?; - // Check the balances on Chain A let rpc_a = get_actor_rpc(test_a, &Who::Validator(0)); let query_args = vec!["balance", "--token", NAM, "--node", &rpc_a]; @@ -1300,13 +1410,37 @@ fn check_balances_after_back( client.assert_success(); // Check the balance on Chain B - let denom = format!("{}/{}/{}", dest_port_id, dest_channel_id, &token,); - let ibc_token = ibc_token(denom).to_string(); + let trace_path = format!("{}/{}", dest_port_id, dest_channel_id); let rpc_b = get_actor_rpc(test_b, &Who::Validator(0)); let query_args = vec![ - "balance", "--owner", BERTHA, "--token", &ibc_token, "--node", &rpc_b, + "balance", "--owner", BERTHA, "--token", NAM, "--node", &rpc_b, + ]; + let expected = format!("{}/nam: 0", trace_path); + let mut client = run!(test_b, Bin::Client, query_args, Some(40))?; + client.exp_string(&expected)?; + client.assert_success(); + Ok(()) +} + +/// Check balances after IBC shielded transfer +fn check_shielded_balances( + dest_port_id: &PortId, + dest_channel_id: &ChannelId, + test_b: &Test, +) -> Result<()> { + // Check the balance on Chain B + let rpc_b = get_actor_rpc(test_b, &Who::Validator(0)); + let query_args = vec![ + "balance", + "--owner", + AB_VIEWING_KEY, + "--token", + BTC, + "--no-conversions", + "--node", + &rpc_b, ]; - let expected = format!("{}: 0", ibc_token); + let expected = format!("{}/{}/btc: 10", dest_port_id, dest_channel_id); let mut client = run!(test_b, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; client.assert_success(); diff --git a/tests/src/e2e/ledger_tests.rs b/tests/src/e2e/ledger_tests.rs index 2f5bbe4ea7..2ee44c62ae 100644 --- a/tests/src/e2e/ledger_tests.rs +++ b/tests/src/e2e/ledger_tests.rs @@ -16,14 +16,12 @@ use std::str::FromStr; use std::sync::Arc; use std::time::{Duration, Instant}; -use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use color_eyre::eyre::Result; use data_encoding::HEXLOWER; use namada::types::address::Address; -use namada::types::io::DefaultIo; use namada::types::storage::Epoch; use namada::types::token; -use namada_apps::client::tx::CLIShieldedUtils; use namada_apps::config::ethereum_bridge; use namada_apps::config::genesis::genesis_config::{ GenesisConfig, ParametersConfig, PgfParametersConfig, PosParamsConfig, @@ -33,6 +31,7 @@ use namada_apps::facade::tendermint_config::net::Address as TendermintAddress; use namada_core::ledger::governance::cli::onchain::{ PgfFunding, PgfFundingTarget, StewardsUpdate, }; +use namada_sdk::masp::fs::FsShieldedUtils; use namada_test_utils::TestWasms; use namada_vp_prelude::BTreeSet; use serde_json::json; @@ -437,8 +436,7 @@ fn ledger_txs_and_queries() -> Result<()> { key: None, shielded: None, } - .try_to_vec() - .unwrap(); + .serialize_to_vec(); let tx_data_path = test.test_dir.path().join("tx.data"); std::fs::write(&tx_data_path, transfer).unwrap(); let tx_data_path = tx_data_path.to_string_lossy(); @@ -665,7 +663,7 @@ fn ledger_txs_and_queries() -> Result<()> { &validator_one_rpc, ], // expect hex encoded of borsh encoded bytes - HEXLOWER.encode(&christel_balance.try_to_vec().unwrap()), + HEXLOWER.encode(&christel_balance.serialize_to_vec()), ), ]; for (query_args, expected) in &query_args_and_expected_response { @@ -688,7 +686,7 @@ fn ledger_txs_and_queries() -> Result<()> { #[test] fn masp_txs_and_queries() -> Result<()> { // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new::(PathBuf::new()); + let _ = FsShieldedUtils::new(PathBuf::new()); // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. @@ -836,7 +834,7 @@ fn masp_txs_and_queries() -> Result<()> { #[test] fn wrapper_disposable_signer() -> Result<()> { // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new::(PathBuf::new()); + let _ = FsShieldedUtils::new(PathBuf::new()); // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. @@ -986,7 +984,7 @@ fn invalid_transactions() -> Result<()> { client.exp_string("Transaction accepted")?; client.exp_string("Transaction applied")?; client.exp_string("Transaction is invalid")?; - client.exp_string(r#""code": "5"#)?; + client.exp_string(r#""code": "2"#)?; client.assert_success(); let mut ledger = bg_ledger.foreground(); @@ -1042,7 +1040,7 @@ fn invalid_transactions() -> Result<()> { client.exp_string("Error trying to apply a transaction")?; - client.exp_string(r#""code": "4"#)?; + client.exp_string(r#""code": "1"#)?; client.assert_success(); Ok(()) @@ -1051,13 +1049,16 @@ fn invalid_transactions() -> Result<()> { /// PoS bonding, unbonding and withdrawal tests. In this test we: /// /// 1. Run the ledger node with shorter epochs for faster progression -/// 2. Submit a self-bond for the genesis validator -/// 3. Submit a delegation to the genesis validator -/// 4. Submit an unbond of the self-bond -/// 5. Submit an unbond of the delegation -/// 6. Wait for the unbonding epoch -/// 7. Submit a withdrawal of the self-bond -/// 8. Submit a withdrawal of the delegation +/// 2. Submit a self-bond for the first genesis validator +/// 3. Submit a delegation to the first genesis validator +/// 4. Submit a re-delegation from the first to the second genesis validator +/// 5. Submit an unbond of the self-bond +/// 6. Submit an unbond of the delegation from the first validator +/// 7. Submit an unbond of the re-delegation from the second validator +/// 8. Wait for the unbonding epoch +/// 9. Submit a withdrawal of the self-bond +/// 10. Submit a withdrawal of the delegation +/// 11. Submit an withdrawal of the re-delegation #[test] fn pos_bonds() -> Result<()> { let pipeline_len = 2; @@ -1075,11 +1076,17 @@ fn pos_bonds() -> Result<()> { unbonding_len, ..genesis.pos_params }; - GenesisConfig { + let genesis = GenesisConfig { parameters, pos_params, ..genesis - } + }; + let mut genesis = + setup::set_validators(2, genesis, default_port_offset); + // Remove stake from the 2nd validator so chain can run with a + // single node + genesis.validator.get_mut("validator-1").unwrap().tokens = None; + genesis }, None, )?; @@ -1093,13 +1100,13 @@ fn pos_bonds() -> Result<()> { ); // 1. Run the ledger node - let _bg_ledger = + let _bg_validator_0 = start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? .background(); - let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_0_rpc = get_actor_rpc(&test, &Who::Validator(0)); - // 2. Submit a self-bond for the genesis validator + // 2. Submit a self-bond for the first genesis validator let tx_args = vec![ "bond", "--validator", @@ -1109,7 +1116,7 @@ fn pos_bonds() -> Result<()> { "--signing-keys", "validator-0-account-key", "--node", - &validator_one_rpc, + &validator_0_rpc, ]; let mut client = run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; @@ -1117,7 +1124,7 @@ fn pos_bonds() -> Result<()> { client.exp_string("Transaction is valid.")?; client.assert_success(); - // 3. Submit a delegation to the genesis validator + // 3. Submit a delegation to the first genesis validator let tx_args = vec![ "bond", "--validator", @@ -1129,14 +1136,35 @@ fn pos_bonds() -> Result<()> { "--signing-keys", BERTHA_KEY, "--node", - &validator_one_rpc, + &validator_0_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); - // 4. Submit an unbond of the self-bond + // 4. Submit a re-delegation from the first to the second genesis validator + let tx_args = vec![ + "redelegate", + "--source-validator", + "validator-0", + "--destination-validator", + "validator-1", + "--owner", + BERTHA, + "--amount", + "2500.0", + "--signing-keys", + BERTHA_KEY, + "--node", + &validator_0_rpc, + ]; + let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; + client.exp_string("Transaction is valid.")?; + client.assert_success(); + + // 5. Submit an unbond of the self-bond let tx_args = vec![ "unbond", "--validator", @@ -1146,7 +1174,7 @@ fn pos_bonds() -> Result<()> { "--signing-keys", "validator-0-account-key", "--node", - &validator_one_rpc, + &validator_0_rpc, ]; let mut client = run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; @@ -1154,7 +1182,7 @@ fn pos_bonds() -> Result<()> { .exp_string("Amount 5100.000000 withdrawable starting from epoch ")?; client.assert_success(); - // 5. Submit an unbond of the delegation + // 6. Submit an unbond of the delegation from the first validator let tx_args = vec![ "unbond", "--validator", @@ -1162,22 +1190,41 @@ fn pos_bonds() -> Result<()> { "--source", BERTHA, "--amount", - "3200.", + "1600.", "--signing-keys", BERTHA_KEY, "--node", - &validator_one_rpc, + &validator_0_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - let expected = "Amount 3200.000000 withdrawable starting from epoch "; + let expected = "Amount 1600.000000 withdrawable starting from epoch "; + let _ = client.exp_regex(&format!("{expected}.*\n"))?; + client.assert_success(); + + // 7. Submit an unbond of the re-delegation from the second validator + let tx_args = vec![ + "unbond", + "--validator", + "validator-1", + "--source", + BERTHA, + "--amount", + "1600.", + "--signing-keys", + BERTHA_KEY, + "--node", + &validator_0_rpc, + ]; + let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + let expected = "Amount 1600.000000 withdrawable starting from epoch "; let (_unread, matched) = client.exp_regex(&format!("{expected}.*\n"))?; let epoch_raw = matched.trim().split_once(expected).unwrap().1; let delegation_withdrawable_epoch = Epoch::from_str(epoch_raw).unwrap(); client.assert_success(); - // 6. Wait for the delegation withdrawable epoch (the self-bond was unbonded + // 8. Wait for the delegation withdrawable epoch (the self-bond was unbonded // before it) - let epoch = get_epoch(&test, &validator_one_rpc)?; + let epoch = get_epoch(&test, &validator_0_rpc)?; println!( "Current epoch: {}, earliest epoch for withdrawal: {}", @@ -1192,13 +1239,13 @@ fn pos_bonds() -> Result<()> { delegation_withdrawable_epoch ); } - let epoch = epoch_sleep(&test, &validator_one_rpc, 40)?; + let epoch = epoch_sleep(&test, &validator_0_rpc, 40)?; if epoch >= delegation_withdrawable_epoch { break; } } - // 7. Submit a withdrawal of the self-bond + // 9. Submit a withdrawal of the self-bond let tx_args = vec![ "withdraw", "--validator", @@ -1206,7 +1253,7 @@ fn pos_bonds() -> Result<()> { "--signing-keys", "validator-0-account-key", "--node", - &validator_one_rpc, + &validator_0_rpc, ]; let mut client = run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; @@ -1214,7 +1261,7 @@ fn pos_bonds() -> Result<()> { client.exp_string("Transaction is valid.")?; client.assert_success(); - // 8. Submit a withdrawal of the delegation + // 10. Submit a withdrawal of the delegation let tx_args = vec![ "withdraw", "--validator", @@ -1224,12 +1271,30 @@ fn pos_bonds() -> Result<()> { "--signing-keys", BERTHA_KEY, "--node", - &validator_one_rpc, + &validator_0_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; client.exp_string("Transaction applied with result:")?; client.exp_string("Transaction is valid.")?; client.assert_success(); + + // 11. Submit an withdrawal of the re-delegation + let tx_args = vec![ + "withdraw", + "--validator", + "validator-1", + "--source", + BERTHA, + "--signing-keys", + BERTHA_KEY, + "--node", + &validator_0_rpc, + ]; + let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction applied with result:")?; + client.exp_string("Transaction is valid.")?; + client.assert_success(); + Ok(()) } @@ -3248,6 +3313,8 @@ fn double_signing_gets_slashed() -> Result<()> { .exp_regex(r"Slashing [a-z0-9]+ for Duplicate vote in epoch [0-9]+") .unwrap(); println!("\n{res}\n"); + // Wait to commit a block + validator_1.exp_regex(r"Committed block hash.*, height: [0-9]+")?; let bg_validator_1 = validator_1.background(); let exp_processing_epoch = Epoch::from_str(res.split(' ').last().unwrap()) @@ -3257,9 +3324,6 @@ fn double_signing_gets_slashed() -> Result<()> { + 1u64; // Query slashes - // let tx_args = ["slashes", "--node", &validator_one_rpc]; - // let client = run!(test, Bin::Client, tx_args, Some(40))?; - let mut client = run!( test, Bin::Client, diff --git a/tests/src/e2e/multitoken_tests/helpers.rs b/tests/src/e2e/multitoken_tests/helpers.rs index 7ce90bcc87..0856691dd5 100644 --- a/tests/src/e2e/multitoken_tests/helpers.rs +++ b/tests/src/e2e/multitoken_tests/helpers.rs @@ -102,9 +102,9 @@ pub fn mint_red_tokens( test, TxWriteData { key: red_balance_key, - value: amount.try_to_vec()?, + value: amount.serialize_to_vec()?, } - .try_to_vec()?, + .serialize_to_vec()?, )?; let tx_data_path = tx_data_path.to_string_lossy().to_string(); diff --git a/tests/src/integration/masp.rs b/tests/src/integration/masp.rs index ecd1b34465..f01b97c01f 100644 --- a/tests/src/integration/masp.rs +++ b/tests/src/integration/masp.rs @@ -2,13 +2,9 @@ use std::path::PathBuf; use color_eyre::eyre::Result; use color_eyre::owo_colors::OwoColorize; -use namada::types::io::DefaultIo; -use namada_apps::client::tx::CLIShieldedUtils; use namada_apps::node::ledger::shell::testing::client::run; use namada_apps::node::ledger::shell::testing::utils::{Bin, CapturedOutput}; -use namada_core::types::address::{btc, eth, masp_rewards}; -use namada_core::types::token; -use namada_core::types::token::{DenominatedAmount, NATIVE_MAX_DECIMAL_PLACES}; +use namada_sdk::masp::fs::FsShieldedUtils; use test_log::test; use super::setup; @@ -22,20 +18,16 @@ use crate::e2e::setup::constants::{ /// for leaving their assets in the pool for varying periods of time. #[test] fn masp_incentives() -> Result<()> { - // The number of decimal places used by BTC amounts. - const BTC_DENOMINATION: u8 = 8; - // The number of decimal places used by ETH amounts. - const ETH_DENOMINATION: u8 = 18; // This address doesn't matter for tests. But an argument is required. let validator_one_rpc = "127.0.0.1:26567"; // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new::(PathBuf::new()); + let _ = FsShieldedUtils::new(PathBuf::new()); // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. - let mut node = setup::setup()?; + let (mut node, _services) = setup::setup()?; // Wait till epoch boundary - let ep0 = node.next_epoch(); + node.next_epoch(); // Send 20 BTC from Albert to PA run( &node, @@ -94,10 +86,8 @@ fn masp_incentives() -> Result<()> { assert!(captured.result.is_ok()); assert!(captured.contains("No shielded nam balance found")); - let masp_rewards = masp_rewards(); - // Wait till epoch boundary - let ep1 = node.next_epoch(); + node.next_epoch(); // Assert BTC balance at VK(A) is 20 let captured = CapturedOutput::of(|| { @@ -118,9 +108,6 @@ fn masp_incentives() -> Result<()> { assert!(captured.result.is_ok()); assert!(captured.contains("btc: 20")); - let amt20 = token::Amount::from_uint(20, BTC_DENOMINATION).unwrap(); - let amt10 = token::Amount::from_uint(10, ETH_DENOMINATION).unwrap(); - // Assert NAM balance at VK(A) is 20*BTC_reward*(epoch_1-epoch_0) let captured = CapturedOutput::of(|| { run( @@ -138,13 +125,8 @@ fn masp_incentives() -> Result<()> { ) }); - let amt = (amt20 * masp_rewards[&btc()]).0 * (ep1.0 - ep0.0); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated))); + assert!(captured.contains("nam: 22.74")); // Assert NAM balance at MASP pool is 20*BTC_reward*(epoch_1-epoch_0) let captured = CapturedOutput::of(|| { @@ -162,16 +144,11 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = (amt20 * masp_rewards[&btc()]).0 * (ep1.0 - ep0.0); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated))); + assert!(captured.contains("nam: 22.74")); // Wait till epoch boundary - let ep2 = node.next_epoch(); + node.next_epoch(); // Assert BTC balance at VK(A) is 20 let captured = CapturedOutput::of(|| { @@ -208,13 +185,8 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = (amt20 * masp_rewards[&btc()]).0 * (ep2.0 - ep0.0); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated))); + assert!(captured.contains("nam: 90.96")); // Assert NAM balance at MASP pool is 20*BTC_reward*(epoch_2-epoch_0) let captured = CapturedOutput::of(|| { @@ -232,16 +204,11 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = (amt20 * masp_rewards[&btc()]).0 * (ep2.0 - ep0.0); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated))); + assert!(captured.contains("nam: 90.96")); // Wait till epoch boundary - let ep3 = node.next_epoch(); + node.next_epoch(); // Send 10 ETH from Albert to PA(B) run( @@ -302,7 +269,7 @@ fn masp_incentives() -> Result<()> { assert!(captured.contains("No shielded nam balance found")); // Wait till epoch boundary - let ep4 = node.next_epoch(); + node.next_epoch(); // Assert ETH balance at VK(B) is 10 let captured = CapturedOutput::of(|| { @@ -339,13 +306,8 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = (amt10 * masp_rewards[ð()]).0 * (ep4.0 - ep3.0); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated))); + assert!(captured.contains("nam: 22.71432")); // Assert NAM balance at MASP pool is // 20*BTC_reward*(epoch_4-epoch_0)+10*ETH_reward*(epoch_4-epoch_3) @@ -364,17 +326,11 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = ((amt20 * masp_rewards[&btc()]).0 * (ep4.0 - ep0.0)) - + ((amt10 * masp_rewards[ð()]).0 * (ep4.0 - ep3.0)); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated))); + assert!(captured.contains("nam: 386.46336")); // Wait till epoch boundary - let ep5 = node.next_epoch(); + node.next_epoch(); // Send 10 ETH from SK(B) to Christel run( @@ -417,7 +373,7 @@ fn masp_incentives() -> Result<()> { assert!(captured.result.is_ok()); assert!(captured.contains("No shielded eth balance found")); - let _ep = node.next_epoch(); + node.next_epoch(); // Assert NAM balance at VK(B) is 10*ETH_reward*(ep-epoch_3) let captured = CapturedOutput::of(|| { @@ -435,15 +391,10 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = (amt10 * masp_rewards[ð()]).0 * (ep5.0 - ep3.0); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated))); + assert!(captured.contains("nam: 86.60024")); - let ep = node.next_epoch(); + node.next_epoch(); // Assert NAM balance at MASP pool is // 20*BTC_reward*(epoch_5-epoch_0)+10*ETH_reward*(epoch_5-epoch_3) let captured = CapturedOutput::of(|| { @@ -461,17 +412,11 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = ((amt20 * masp_rewards[&btc()]).0 * (ep.0 - ep0.0)) - + ((amt10 * masp_rewards[ð()]).0 * (ep5.0 - ep3.0)); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated))); + assert!(captured.contains("nam: 1180.41525")); // Wait till epoch boundary - let ep6 = node.next_epoch(); + node.next_epoch(); // Send 20 BTC from SK(A) to Christel run( @@ -530,13 +475,8 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = (amt20 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated,))); + assert!(captured.contains("nam: 1407.16324")); // Assert NAM balance at MASP pool is // 20*BTC_reward*(epoch_6-epoch_0)+20*ETH_reward*(epoch_5-epoch_3) @@ -555,17 +495,11 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = ((amt20 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0)) - + ((amt10 * masp_rewards[ð()]).0 * (ep5.0 - ep3.0)); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated,))); + assert!(captured.contains("nam: 1520.37191")); // Wait till epoch boundary - let _ep7 = node.next_epoch(); + node.next_epoch(); // Assert NAM balance at VK(A) is 20*BTC_reward*(epoch_6-epoch_0) let captured = CapturedOutput::of(|| { @@ -583,13 +517,8 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = (amt20 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated))); + assert!(captured.contains("nam: 1573.18")); // Assert NAM balance at VK(B) is 10*ETH_reward*(epoch_5-epoch_3) let captured = CapturedOutput::of(|| { @@ -607,13 +536,8 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = (amt10 * masp_rewards[ð()]).0 * (ep5.0 - ep3.0); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated,))); + assert!(captured.contains("nam: 126.565")); // Assert NAM balance at MASP pool is // 20*BTC_reward*(epoch_6-epoch_0)+10*ETH_reward*(epoch_5-epoch_3) @@ -632,18 +556,12 @@ fn masp_incentives() -> Result<()> { ], ) }); - let amt = ((amt20 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0)) - + ((amt10 * masp_rewards[ð()]).0 * (ep5.0 - ep3.0)); - let denominated = DenominatedAmount { - amount: amt, - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; assert!(captured.result.is_ok()); - assert!(captured.contains(&format!("nam: {}", denominated,))); + assert!(captured.contains("nam: 1699.745")); // Wait till epoch boundary to prevent conversion expiry during transaction // construction - let _ep8 = node.next_epoch(); + node.next_epoch(); // Send 10*ETH_reward*(epoch_5-epoch_3) NAM from SK(B) to Christel run( @@ -658,8 +576,7 @@ fn masp_incentives() -> Result<()> { "--token", NAM, "--amount", - &((amt10 * masp_rewards[ð()]).0 * (ep5.0 - ep3.0)) - .to_string_native(), + "141.49967", "--signing-keys", BERTHA, "--node", @@ -669,7 +586,7 @@ fn masp_incentives() -> Result<()> { node.assert_success(); // Wait till epoch boundary - let _ep9 = node.next_epoch(); + node.next_epoch(); // Send 20*BTC_reward*(epoch_6-epoch_0) NAM from SK(A) to Bertha run( @@ -684,8 +601,7 @@ fn masp_incentives() -> Result<()> { "--token", NAM, "--amount", - &((amt20 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0)) - .to_string_native(), + "1980.356", "--signing-keys", ALBERT, "--node", @@ -765,9 +681,9 @@ fn masp_pinned_txs() -> Result<()> { // This address doesn't matter for tests. But an argument is required. let validator_one_rpc = "127.0.0.1:26567"; // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new::(PathBuf::new()); + let _ = FsShieldedUtils::new(PathBuf::new()); - let mut node = setup::setup()?; + let (mut node, _services) = setup::setup()?; // Wait till epoch boundary let _ep0 = node.next_epoch(); @@ -928,14 +844,14 @@ fn masp_txs_and_queries() -> Result<()> { // This address doesn't matter for tests. But an argument is required. let validator_one_rpc = "127.0.0.1:26567"; // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new::(PathBuf::new()); + let _ = FsShieldedUtils::new(PathBuf::new()); enum Response { Ok(&'static str), Err(&'static str), } - let mut node = setup::setup()?; + let (mut node, _services) = setup::setup()?; _ = node.next_epoch(); let txs_args = vec![ // 0. Attempt to spend 10 BTC at SK(A) to PA(B) @@ -1230,15 +1146,15 @@ fn masp_txs_and_queries() -> Result<()> { /// 3. Submit a new wrapper with an invalid unshielding tx and assert the /// failure #[test] -fn wrapper_fee_unshielding() { +fn wrapper_fee_unshielding() -> Result<()> { // This address doesn't matter for tests. But an argument is required. let validator_one_rpc = "127.0.0.1:26567"; // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new::(PathBuf::new()); + let _ = FsShieldedUtils::new(PathBuf::new()); // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. - let mut node = setup::setup().unwrap(); + let (mut node, _services) = setup::setup()?; _ = node.next_epoch(); // 1. Shield some tokens @@ -1262,8 +1178,7 @@ fn wrapper_fee_unshielding() { "--ledger-address", validator_one_rpc, ], - ) - .unwrap(); + )?; node.assert_success(); _ = node.next_epoch(); @@ -1288,8 +1203,7 @@ fn wrapper_fee_unshielding() { "--ledger-address", validator_one_rpc, ], - ) - .unwrap(); + )?; node.assert_success(); // 3. Invalid unshielding @@ -1320,4 +1234,5 @@ fn wrapper_fee_unshielding() { .is_err(); assert!(tx_run); + Ok(()) } diff --git a/tests/src/integration/setup.rs b/tests/src/integration/setup.rs index df74c5f6f1..9bbd00eef9 100644 --- a/tests/src/integration/setup.rs +++ b/tests/src/integration/setup.rs @@ -11,10 +11,13 @@ use namada_apps::config::genesis::genesis_config::GenesisConfig; use namada_apps::config::TendermintMode; use namada_apps::facade::tendermint::Timeout; use namada_apps::facade::tendermint_proto::google::protobuf::Timestamp; -use namada_apps::node::ledger::shell::testing::node::MockNode; +use namada_apps::node::ledger::shell::testing::node::{ + mock_services, MockNode, MockServicesCfg, MockServicesController, + MockServicesPackage, +}; use namada_apps::node::ledger::shell::testing::utils::TestDir; use namada_apps::node::ledger::shell::Shell; -use namada_core::types::address::Address; +use namada_core::types::address::nam; use namada_core::types::chain::{ChainId, ChainIdPrefix}; use toml::value::Table; @@ -26,14 +29,14 @@ use crate::e2e::setup::{ const ENV_VAR_KEEP_TEMP: &str = "NAMADA_INT_KEEP_TEMP"; /// Setup a network with a single genesis validator node. -pub fn setup() -> Result { +pub fn setup() -> Result<(MockNode, MockServicesController)> { initialize_genesis(|genesis| genesis) } /// Setup folders with genesis, configs, wasm, etc. pub fn initialize_genesis( mut update_genesis: impl FnMut(GenesisConfig) -> GenesisConfig, -) -> Result { +) -> Result<(MockNode, MockServicesController)> { let working_dir = std::fs::canonicalize("..").unwrap(); let keep_temp = match std::env::var(ENV_VAR_KEEP_TEMP) { Ok(val) => val.to_ascii_lowercase() != "false", @@ -81,7 +84,23 @@ pub fn initialize_genesis( }, ); - create_node(test_dir, &genesis, keep_temp) + let auto_drive_services = { + // NB: for now, the only condition that + // dictates whether mock services should + // be enabled is if the Ethereum bridge + // is enabled at genesis + genesis.ethereum_bridge_params.is_some() + }; + let enable_eth_oracle = { + // NB: we only enable the oracle if the + // Ethereum bridge is enabled at genesis + genesis.ethereum_bridge_params.is_some() + }; + let services_cfg = MockServicesCfg { + auto_drive_services, + enable_eth_oracle, + }; + create_node(test_dir, &genesis, keep_temp, services_cfg) } /// Create a mock ledger node. @@ -89,7 +108,8 @@ fn create_node( base_dir: TestDir, genesis: &GenesisConfig, keep_temp: bool, -) -> Result { + services_cfg: MockServicesCfg, +) -> Result<(MockNode, MockServicesController)> { // look up the chain id from the global file. let chain_id = if let toml::Value::String(chain_id) = toml::from_str::
( @@ -119,26 +139,32 @@ fn create_node( ); // instantiate and initialize the ledger node. - let (sender, recv) = tokio::sync::mpsc::unbounded_channel(); + let MockServicesPackage { + auto_drive_services, + services, + shell_handlers, + controller, + } = mock_services(services_cfg); let node = MockNode { shell: Arc::new(Mutex::new(Shell::new( config::Ledger::new( base_dir.path(), chain_id.clone(), - TendermintMode::Validator + TendermintMode::Validator, ), wasm_dir, - sender, - None, + shell_handlers.tx_broadcaster, + shell_handlers.eth_oracle_channels, None, 50 * 1024 * 1024, // 50 kiB 50 * 1024 * 1024, // 50 kiB - Address::from_str("atest1v4ehgw36x3prswzxggunzv6pxqmnvdj9xvcyzvpsggeyvs3cg9qnywf589qnwvfsg5erg3fkl09rg5").unwrap(), + nam(), ))), test_dir: ManuallyDrop::new(base_dir), keep_temp, - _broadcast_recv: recv, + services: Arc::new(services), results: Arc::new(Mutex::new(vec![])), + auto_drive_services, }; let init_req = namada_apps::facade::tower_abci::request::InitChain { time: Some(Timestamp { @@ -156,8 +182,10 @@ fn create_node( locked .init_chain(init_req, 1) .map_err(|e| eyre!("Failed to initialize ledger: {:?}", e))?; + // set the height of the first block (should be 1) + locked.wl_storage.storage.block.height = 1.into(); locked.commit(); } - Ok(node) + Ok((node, controller)) } diff --git a/tests/src/native_vp/eth_bridge_pool.rs b/tests/src/native_vp/eth_bridge_pool.rs index 364dcd074c..3de46fc84b 100644 --- a/tests/src/native_vp/eth_bridge_pool.rs +++ b/tests/src/native_vp/eth_bridge_pool.rs @@ -2,12 +2,9 @@ mod test_bridge_pool_vp { use std::path::PathBuf; - use borsh::{BorshDeserialize, BorshSerialize}; + use borsh::BorshDeserialize; + use borsh_ext::BorshSerializeExt; use namada::core::ledger::eth_bridge::storage::bridge_pool::BRIDGE_POOL_ADDRESS; - use namada::ledger::eth_bridge::{ - wrapped_erc20s, Contracts, Erc20WhitelistEntry, EthereumBridgeConfig, - UpgradeableContract, - }; use namada::ledger::native_vp::ethereum_bridge::bridge_pool_vp::BridgePoolVp; use namada::proto::Tx; use namada::types::address::{nam, wnam}; @@ -20,6 +17,10 @@ mod test_bridge_pool_vp { use namada::types::token::Amount; use namada_apps::wallet::defaults::{albert_address, bertha_address}; use namada_apps::wasm_loader; + use namada_sdk::eth_bridge::{ + wrapped_erc20s, Contracts, Erc20WhitelistEntry, EthereumBridgeConfig, + UpgradeableContract, + }; use crate::native_vp::TestNativeVpEnv; use crate::tx::{tx_host_env, TestTxEnv}; @@ -124,7 +125,7 @@ mod test_bridge_pool_vp { } fn create_tx(transfer: PendingTransfer, keypair: &common::SecretKey) -> Tx { - let data = transfer.try_to_vec().expect("Test failed"); + let data = transfer.serialize_to_vec(); let wasm_code = wasm_loader::read_wasm_or_exit(wasm_dir(), ADD_TRANSFER_WASM); diff --git a/tests/src/native_vp/pos.rs b/tests/src/native_vp/pos.rs index 344a75d4e3..233d999861 100644 --- a/tests/src/native_vp/pos.rs +++ b/tests/src/native_vp/pos.rs @@ -95,9 +95,9 @@ //! - add slashes //! - add rewards -use namada::ledger::pos::namada_proof_of_stake::init_genesis; -use namada::proof_of_stake::parameters::PosParams; -use namada::proof_of_stake::storage::GenesisValidator; +use namada::proof_of_stake::parameters::{OwnedPosParams, PosParams}; +use namada::proof_of_stake::test_init_genesis as init_genesis; +use namada::proof_of_stake::types::GenesisValidator; use namada::types::storage::Epoch; use crate::tx::tx_host_env; @@ -106,9 +106,9 @@ use crate::tx::tx_host_env; /// parameters. pub fn init_pos( genesis_validators: &[GenesisValidator], - params: &PosParams, + params: &OwnedPosParams, start_epoch: Epoch, -) { +) -> PosParams { tx_host_env::init(); tx_host_env::with(|tx_env| { @@ -130,9 +130,9 @@ pub fn init_pos( // .storage // .init_genesis(params, genesis_validators.iter(), start_epoch) // .unwrap(); - init_genesis( + let params = init_genesis( &mut tx_env.wl_storage, - params, + params.clone(), genesis_validators.iter().cloned(), start_epoch, ) @@ -140,7 +140,8 @@ pub fn init_pos( // Commit changes in WL to genesis state tx_env.commit_genesis(); - }); + params + }) } #[cfg(test)] @@ -334,6 +335,7 @@ mod tests { // We're starting from an empty state let state = vec![]; let epoch = Epoch(epoch); + let params = params.with_default_gov_params(); arb_valid_pos_action(&state).prop_map(move |valid_action| { Self { epoch, @@ -572,8 +574,7 @@ pub mod testing { use namada::proof_of_stake::epoched::DynEpochOffset; use namada::proof_of_stake::parameters::testing::arb_rate; use namada::proof_of_stake::parameters::PosParams; - use namada::proof_of_stake::storage::BondId; - use namada::proof_of_stake::types::ValidatorState; + use namada::proof_of_stake::types::{BondId, ValidatorState}; use namada::proof_of_stake::{ get_num_consensus_validators, read_pos_params, unbond_handle, ADDRESS as POS_ADDRESS, @@ -1033,7 +1034,7 @@ pub mod testing { // .sum() // }) // .unwrap_or_default(); - let token_delta = token::Change::default(); + let token_delta = token::Change::zero(); vec![ PosStorageChange::WithdrawUnbond { owner, validator }, @@ -1150,7 +1151,7 @@ pub mod testing { // last // update, until we unbond the full // amount let mut bond_epoch = // u64::from(bonds.last_update()) + params.unbonding_len; - // 'outer: while to_unbond != token::Amount::default() + // 'outer: while to_unbond != token::Amount::zero() // && bond_epoch >= bonds.last_update().into() // { // if let Some(bond) = bonds.get_delta_at_epoch(bond_epoch) diff --git a/tests/src/vm_host_env/ibc.rs b/tests/src/vm_host_env/ibc.rs index b8d88961ea..583c58ada3 100644 --- a/tests/src/vm_host_env/ibc.rs +++ b/tests/src/vm_host_env/ibc.rs @@ -84,8 +84,9 @@ use namada::types::time::DurationSecs; use namada::types::token::{self, Amount, DenominatedAmount}; use namada::vm::{wasm, WasmCacheRwAccess}; use namada_core::ledger::gas::TxGasMeter; +use namada_core::ledger::governance::parameters::GovernanceParameters; use namada_test_utils::TestWasms; -use namada_tx_prelude::BorshSerialize; +use namada_tx_prelude::borsh_ext::BorshSerializeExt; use crate::tx::*; @@ -211,6 +212,8 @@ pub fn init_storage() -> (Address, Address) { tx_host_env::with(|env| { ibc::init_genesis_storage(&mut env.wl_storage); + let gov_params = GovernanceParameters::default(); + gov_params.init_storage(&mut env.wl_storage).unwrap(); pos::init_genesis_storage( &mut env.wl_storage, &PosParams::default(), @@ -243,11 +246,11 @@ pub fn init_storage() -> (Address, Address) { tx_host_env::with(|env| { env.wl_storage .storage - .write(&denom_key, &token_denom.try_to_vec().unwrap()) + .write(&denom_key, &token_denom.serialize_to_vec()) .unwrap(); env.wl_storage .storage - .write(&key, &init_bal.try_to_vec().unwrap()) + .write(&key, &init_bal.serialize_to_vec()) .unwrap(); }); @@ -257,7 +260,7 @@ pub fn init_storage() -> (Address, Address) { min_num_of_blocks: 10, min_duration: DurationSecs(100), }; - let bytes = epoch_duration.try_to_vec().unwrap(); + let bytes = epoch_duration.serialize_to_vec(); tx_host_env::with(|env| { env.wl_storage.storage.write(&key, &bytes).unwrap(); }); diff --git a/tests/src/vm_host_env/mod.rs b/tests/src/vm_host_env/mod.rs index 68ebd76dff..948692fa70 100644 --- a/tests/src/vm_host_env/mod.rs +++ b/tests/src/vm_host_env/mod.rs @@ -21,6 +21,7 @@ mod tests { use std::collections::BTreeSet; use std::panic; + use borsh_ext::BorshSerializeExt; use itertools::Itertools; use namada::ibc::core::Msg; use namada::ledger::ibc::storage as ibc_storage; @@ -41,9 +42,7 @@ mod tests { use namada_test_utils::TestWasms; use namada_tx_prelude::address::InternalAddress; use namada_tx_prelude::chain::ChainId; - use namada_tx_prelude::{ - Address, BorshSerialize, StorageRead, StorageWrite, - }; + use namada_tx_prelude::{Address, StorageRead, StorageWrite}; use namada_vp_prelude::account::AccountPublicKeysMap; use namada_vp_prelude::VpEnv; use prost::Message; @@ -475,15 +474,12 @@ mod tests { assert!( signed_tx_data .verify_signatures( - &[ - *signed_tx_data.data_sechash(), - *signed_tx_data.code_sechash(), - ], + &[signed_tx_data.header_hash(),], pks_map, &None, 1, None, - Some(&mut VpGasMeter::new_from_tx_meter( + &mut Some(&mut VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()) )) ) @@ -494,17 +490,14 @@ mod tests { assert!( signed_tx_data .verify_signatures( - &[ - *signed_tx_data.data_sechash(), - *signed_tx_data.code_sechash(), - ], + &[signed_tx_data.header_hash(),], AccountPublicKeysMap::from_iter([ other_keypair.ref_to() ]), &None, 1, None, - Some(&mut VpGasMeter::new_from_tx_meter( + &mut Some(&mut VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()) )) ) @@ -577,7 +570,7 @@ mod tests { // evaluating the VP template which always returns `true` should pass let code = TestWasms::VpAlwaysTrue.read_bytes(); let code_hash = Hash::sha256(&code); - let code_len = (code.len() as u64).try_to_vec().unwrap(); + let code_len = (code.len() as u64).serialize_to_vec(); vp_host_env::with(|env| { // store wasm codes let key = Key::wasm_code(&code_hash); @@ -600,7 +593,7 @@ mod tests { // pass let code = TestWasms::VpAlwaysFalse.read_bytes(); let code_hash = Hash::sha256(&code); - let code_len = (code.len() as u64).try_to_vec().unwrap(); + let code_len = (code.len() as u64).serialize_to_vec(); vp_host_env::with(|env| { // store wasm codes let key = Key::wasm_code(&code_hash); @@ -1250,20 +1243,14 @@ mod tests { let ibc_token = ibc_storage::ibc_token(&denom); let balance_key = token::balance_key(&ibc_token, &sender); let init_bal = Amount::from_u64(100); - writes.insert(balance_key.clone(), init_bal.try_to_vec().unwrap()); + writes.insert(balance_key.clone(), init_bal.serialize_to_vec()); let minted_key = token::minted_balance_key(&ibc_token); - writes.insert(minted_key.clone(), init_bal.try_to_vec().unwrap()); + writes.insert(minted_key.clone(), init_bal.serialize_to_vec()); let minter_key = token::minter_key(&ibc_token); writes.insert( minter_key, - Address::Internal(InternalAddress::Ibc) - .try_to_vec() - .unwrap(), + Address::Internal(InternalAddress::Ibc).serialize_to_vec(), ); - // original denom - let hash = ibc_storage::calc_hash(&denom); - let denom_key = ibc_storage::ibc_denom_key(hash); - writes.insert(denom_key, denom.try_to_vec().unwrap()); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { env.wl_storage @@ -1505,8 +1492,7 @@ mod tests { ); let val = Amount::from_uint(100, ibc::ANY_DENOMINATION) .unwrap() - .try_to_vec() - .unwrap(); + .serialize_to_vec(); tx_host_env::with(|env| { env.wl_storage .storage @@ -1608,7 +1594,7 @@ mod tests { denom, &address::Address::Internal(address::InternalAddress::Ibc), ); - let val = Amount::from_u64(100).try_to_vec().unwrap(); + let val = Amount::from_u64(100).serialize_to_vec(); tx_host_env::with(|env| { env.wl_storage .storage diff --git a/tests/src/vm_host_env/tx.rs b/tests/src/vm_host_env/tx.rs index 582b668b79..fdb5bd959e 100644 --- a/tests/src/vm_host_env/tx.rs +++ b/tests/src/vm_host_env/tx.rs @@ -18,7 +18,8 @@ use namada::vm::prefix_iter::PrefixIterators; use namada::vm::wasm::run::Error; use namada::vm::wasm::{self, TxCache, VpCache}; use namada::vm::{self, WasmCacheRwAccess}; -use namada_tx_prelude::{storage_api, BorshSerialize, Ctx}; +use namada_tx_prelude::borsh_ext::BorshSerializeExt; +use namada_tx_prelude::{storage_api, Ctx}; use namada_vp_prelude::key::common; use tempfile::TempDir; @@ -186,7 +187,7 @@ impl TestTxEnv { let storage_key = key::threshold_key(address); self.wl_storage .storage - .write(&storage_key, threshold.try_to_vec().unwrap()) + .write(&storage_key, threshold.serialize_to_vec()) .unwrap(); } @@ -216,7 +217,7 @@ impl TestTxEnv { let storage_key = token::balance_key(token, target); self.wl_storage .storage - .write(&storage_key, amount.try_to_vec().unwrap()) + .write(&storage_key, amount.serialize_to_vec()) .unwrap(); } @@ -447,7 +448,7 @@ mod native_tx_host_env { result_ptr: u64 )); native_host_fn!(tx_emit_ibc_event(event_ptr: u64, event_len: u64)); - native_host_fn!(tx_get_ibc_event(event_type_ptr: u64, event_type_len: u64) -> i64); + native_host_fn!(tx_get_ibc_events(event_type_ptr: u64, event_type_len: u64) -> i64); native_host_fn!(tx_get_chain_id(result_ptr: u64)); native_host_fn!(tx_get_block_height() -> u64); native_host_fn!(tx_get_tx_index() -> u32); diff --git a/tx_prelude/Cargo.toml b/tx_prelude/Cargo.toml index 5fa5d24d68..981b9c0c79 100644 --- a/tx_prelude/Cargo.toml +++ b/tx_prelude/Cargo.toml @@ -26,6 +26,7 @@ namada_macros = {path = "../macros"} namada_proof_of_stake = {path = "../proof_of_stake", default-features = false} namada_vm_env = {path = "../vm_env", default-features = false} borsh.workspace = true +borsh-ext.workspace = true masp_primitives.workspace = true sha2.workspace = true thiserror.workspace = true diff --git a/tx_prelude/src/ibc.rs b/tx_prelude/src/ibc.rs index f7ee8230b1..2f19c24a82 100644 --- a/tx_prelude/src/ibc.rs +++ b/tx_prelude/src/ibc.rs @@ -10,11 +10,11 @@ pub use namada_core::ledger::ibc::{ use namada_core::ledger::storage_api::{StorageRead, StorageWrite}; use namada_core::ledger::tx_env::TxEnv; use namada_core::types::address::{Address, InternalAddress}; -pub use namada_core::types::ibc::IbcEvent; +pub use namada_core::types::ibc::{IbcEvent, IbcShieldedTransfer}; use namada_core::types::storage::{BlockHeight, Header, Key}; use namada_core::types::token::DenominatedAmount; -use crate::token::{burn, mint, transfer}; +use crate::token::{burn, handle_masp_tx, mint, transfer}; use crate::{Ctx, KeyValIterator}; /// IBC actions to handle an IBC message @@ -75,11 +75,11 @@ impl IbcStorageContext for Ctx { ::emit_ibc_event(self, &event) } - fn get_ibc_event( + fn get_ibc_events( &self, event_type: impl AsRef, - ) -> Result, Self::Error> { - ::get_ibc_event(self, &event_type) + ) -> Result, Self::Error> { + ::get_ibc_events(self, &event_type) } fn transfer_token( @@ -89,7 +89,14 @@ impl IbcStorageContext for Ctx { token: &Address, amount: DenominatedAmount, ) -> std::result::Result<(), Self::Error> { - transfer(self, src, dest, token, amount, &None, &None, &None) + transfer(self, src, dest, token, amount) + } + + fn handle_masp_tx( + &mut self, + shielded: &IbcShieldedTransfer, + ) -> Result<(), Self::Error> { + handle_masp_tx(self, &shielded.transfer, &shielded.masp_tx) } fn mint_token( diff --git a/tx_prelude/src/lib.rs b/tx_prelude/src/lib.rs index 87d91e07a3..d2a286d542 100644 --- a/tx_prelude/src/lib.rs +++ b/tx_prelude/src/lib.rs @@ -17,6 +17,8 @@ use core::slice; use std::marker::PhantomData; pub use borsh::{BorshDeserialize, BorshSerialize}; +pub use borsh_ext; +use borsh_ext::BorshSerializeExt; pub use namada_core::ledger::eth_bridge; pub use namada_core::ledger::governance::storage as gov_storage; pub use namada_core::ledger::parameters::storage as parameters_storage; @@ -248,7 +250,7 @@ impl TxEnv for Ctx { key: &storage::Key, val: T, ) -> Result<(), Error> { - let buf = val.try_to_vec().unwrap(); + let buf = val.serialize_to_vec(); self.write_bytes_temp(key, buf) } @@ -319,7 +321,7 @@ impl TxEnv for Ctx { } fn emit_ibc_event(&mut self, event: &ibc::IbcEvent) -> Result<(), Error> { - let event = BorshSerialize::try_to_vec(event).unwrap(); + let event = borsh::to_vec(event).unwrap(); unsafe { namada_tx_emit_ibc_event(event.as_ptr() as _, event.len() as _) }; @@ -331,23 +333,21 @@ impl TxEnv for Ctx { Ok(()) } - fn get_ibc_event( + fn get_ibc_events( &self, event_type: impl AsRef, - ) -> Result, Error> { + ) -> Result, Error> { let event_type = event_type.as_ref().to_string(); let read_result = unsafe { - namada_tx_get_ibc_event( + namada_tx_get_ibc_events( event_type.as_ptr() as _, event_type.len() as _, ) }; match read_from_buffer(read_result, namada_tx_result_buffer) { - Some(value) => Ok(Some( - ibc::IbcEvent::try_from_slice(&value[..]) - .expect("The conversion shouldn't fail"), - )), - None => Ok(None), + Some(value) => Ok(Vec::::try_from_slice(&value[..]) + .expect("The conversion shouldn't fail")), + None => Ok(Vec::new()), } } } diff --git a/tx_prelude/src/proof_of_stake.rs b/tx_prelude/src/proof_of_stake.rs index cc8bcb7b63..1f365f1b9c 100644 --- a/tx_prelude/src/proof_of_stake.rs +++ b/tx_prelude/src/proof_of_stake.rs @@ -7,15 +7,15 @@ use namada_core::types::{key, token}; pub use namada_proof_of_stake::parameters::PosParams; use namada_proof_of_stake::{ become_validator, bond_tokens, change_validator_commission_rate, - read_pos_params, unbond_tokens, unjail_validator, withdraw_tokens, - BecomeValidator, + read_pos_params, redelegate_tokens, unbond_tokens, unjail_validator, + withdraw_tokens, BecomeValidator, }; -pub use namada_proof_of_stake::{parameters, types}; +pub use namada_proof_of_stake::{parameters, types, ResultSlashing}; use super::*; impl Ctx { - /// NEW: Self-bond tokens to a validator when `source` is `None` or equal to + /// Self-bond tokens to a validator when `source` is `None` or equal to /// the `validator` address, or delegate tokens from the `source` to the /// `validator`. pub fn bond_tokens( @@ -28,7 +28,7 @@ impl Ctx { bond_tokens(self, source, validator, amount, current_epoch) } - /// NEW: Unbond self-bonded tokens from a validator when `source` is `None` + /// Unbond self-bonded tokens from a validator when `source` is `None` /// or equal to the `validator` address, or unbond delegated tokens from /// the `source` to the `validator`. pub fn unbond_tokens( @@ -36,12 +36,12 @@ impl Ctx { source: Option<&Address>, validator: &Address, amount: token::Amount, - ) -> TxResult { + ) -> EnvResult { let current_epoch = self.get_block_epoch()?; - unbond_tokens(self, source, validator, amount, current_epoch) + unbond_tokens(self, source, validator, amount, current_epoch, false) } - /// NEW: Withdraw unbonded tokens from a self-bond to a validator when + /// Withdraw unbonded tokens from a self-bond to a validator when /// `source` is `None` or equal to the `validator` address, or withdraw /// unbonded tokens delegated to the `validator` to the `source`. pub fn withdraw_tokens( @@ -53,7 +53,7 @@ impl Ctx { withdraw_tokens(self, source, validator, current_epoch) } - /// NEW: Change validator commission rate. + /// Change validator commission rate. pub fn change_validator_commission_rate( &mut self, validator: &Address, @@ -69,7 +69,26 @@ impl Ctx { unjail_validator(self, validator, current_epoch) } - /// NEW: Attempt to initialize a validator account. On success, returns the + /// Redelegate bonded tokens from one validator to another one. + pub fn redelegate_tokens( + &mut self, + owner: &Address, + src_validator: &Address, + dest_validator: &Address, + amount: token::Amount, + ) -> TxResult { + let current_epoch = self.get_block_epoch()?; + redelegate_tokens( + self, + owner, + src_validator, + dest_validator, + current_epoch, + amount, + ) + } + + /// Attempt to initialize a validator account. On success, returns the /// initialized validator account's address. pub fn init_validator( &mut self, @@ -96,8 +115,6 @@ impl Ctx { &account_keys, threshold, )?; - let protocol_pk_key = key::protocol_pk_key(&validator_address); - self.write(&protocol_pk_key, &protocol_key)?; let dkg_pk_key = key::dkg_session_keys::dkg_pk_key(&validator_address); self.write(&dkg_pk_key, &dkg_key)?; let eth_cold_key = key::common::PublicKey::Secp256k1(eth_cold_key); @@ -109,6 +126,7 @@ impl Ctx { params: ¶ms, address: &validator_address, consensus_key: &consensus_key, + protocol_key: &protocol_key, eth_cold_key: ð_cold_key, eth_hot_key: ð_hot_key, current_epoch, diff --git a/tx_prelude/src/token.rs b/tx_prelude/src/token.rs index e950668d2b..6067c82a46 100644 --- a/tx_prelude/src/token.rs +++ b/tx_prelude/src/token.rs @@ -1,6 +1,5 @@ use masp_primitives::transaction::Transaction; use namada_core::types::address::Address; -use namada_core::types::hash::Hash; use namada_core::types::storage::KeySeg; use namada_core::types::token; pub use namada_core::types::token::*; @@ -15,9 +14,6 @@ pub fn transfer( dest: &Address, token: &Address, amount: DenominatedAmount, - key: &Option, - shielded_hash: &Option, - shielded: &Option, ) -> TxResult { if amount.amount != Amount::default() && src != dest { let src_key = token::balance_key(token, src); @@ -33,47 +29,43 @@ pub fn transfer( ctx.write(&src_key, src_bal)?; ctx.write(&dest_key, dest_bal)?; } + Ok(()) +} - // If this transaction has a shielded component, then handle it - // separately - if let Some(shielded) = shielded { - let masp_addr = address::masp(); - ctx.insert_verifier(&masp_addr)?; - let head_tx_key = storage::Key::from(masp_addr.to_db_key()) - .push(&HEAD_TX_KEY.to_owned()) - .expect("Cannot obtain a storage key"); - let current_tx_idx: u64 = - ctx.read(&head_tx_key).unwrap_or(None).unwrap_or(0); - let current_tx_key = storage::Key::from(masp_addr.to_db_key()) - .push(&(TX_KEY_PREFIX.to_owned() + ¤t_tx_idx.to_string())) +/// Handle a MASP transaction. +pub fn handle_masp_tx( + ctx: &mut Ctx, + transfer: &Transfer, + shielded: &Transaction, +) -> TxResult { + let masp_addr = address::masp(); + ctx.insert_verifier(&masp_addr)?; + let head_tx_key = storage::Key::from(masp_addr.to_db_key()) + .push(&HEAD_TX_KEY.to_owned()) + .expect("Cannot obtain a storage key"); + let current_tx_idx: u64 = + ctx.read(&head_tx_key).unwrap_or(None).unwrap_or(0); + let current_tx_key = storage::Key::from(masp_addr.to_db_key()) + .push(&(TX_KEY_PREFIX.to_owned() + ¤t_tx_idx.to_string())) + .expect("Cannot obtain a storage key"); + // Save the Transfer object and its location within the blockchain + // so that clients do not have to separately look these + // up + let record: (Epoch, BlockHeight, TxIndex, Transfer, Transaction) = ( + ctx.get_block_epoch()?, + ctx.get_block_height()?, + ctx.get_tx_index()?, + transfer.clone(), + shielded.clone(), + ); + ctx.write(¤t_tx_key, record)?; + ctx.write(&head_tx_key, current_tx_idx + 1)?; + // If storage key has been supplied, then pin this transaction to it + if let Some(key) = &transfer.key { + let pin_key = storage::Key::from(masp_addr.to_db_key()) + .push(&(PIN_KEY_PREFIX.to_owned() + key)) .expect("Cannot obtain a storage key"); - // Save the Transfer object and its location within the blockchain - // so that clients do not have to separately look these - // up - let transfer = Transfer { - source: src.clone(), - target: dest.clone(), - token: token.clone(), - amount, - key: key.clone(), - shielded: *shielded_hash, - }; - let record: (Epoch, BlockHeight, TxIndex, Transfer, Transaction) = ( - ctx.get_block_epoch()?, - ctx.get_block_height()?, - ctx.get_tx_index()?, - transfer, - shielded.clone(), - ); - ctx.write(¤t_tx_key, record)?; - ctx.write(&head_tx_key, current_tx_idx + 1)?; - // If storage key has been supplied, then pin this transaction to it - if let Some(key) = key { - let pin_key = storage::Key::from(masp_addr.to_db_key()) - .push(&(PIN_KEY_PREFIX.to_owned() + key)) - .expect("Cannot obtain a storage key"); - ctx.write(&pin_key, current_tx_idx)?; - } + ctx.write(&pin_key, current_tx_idx)?; } Ok(()) } diff --git a/vm_env/src/lib.rs b/vm_env/src/lib.rs index 6a630f9349..baadd56182 100644 --- a/vm_env/src/lib.rs +++ b/vm_env/src/lib.rs @@ -78,8 +78,8 @@ pub mod tx { // Emit an IBC event pub fn namada_tx_emit_ibc_event(event_ptr: u64, event_len: u64); - // Get an IBC event - pub fn namada_tx_get_ibc_event( + // Get IBC events + pub fn namada_tx_get_ibc_events( event_type_ptr: u64, event_type_len: u64, ) -> i64; @@ -197,6 +197,12 @@ pub mod vp { // Get the native token address pub fn namada_vp_get_native_token(result_ptr: u64); + // Get the IBC event + pub fn namada_vp_get_ibc_events( + event_type_ptr: u64, + event_type_len: u64, + ) -> i64; + // Requires a node running with "Info" log level pub fn namada_vp_log_string(str_ptr: u64, str_len: u64); diff --git a/vp_prelude/Cargo.toml b/vp_prelude/Cargo.toml index ecffd8eaa4..5384395b22 100644 --- a/vp_prelude/Cargo.toml +++ b/vp_prelude/Cargo.toml @@ -26,5 +26,6 @@ namada_macros = {path = "../macros"} namada_proof_of_stake = {path = "../proof_of_stake", default-features = false} namada_vm_env = {path = "../vm_env", default-features = false} borsh.workspace = true +borsh-ext.workspace = true sha2.workspace = true thiserror.workspace = true diff --git a/vp_prelude/src/lib.rs b/vp_prelude/src/lib.rs index 0962628363..3c2d4f7b88 100644 --- a/vp_prelude/src/lib.rs +++ b/vp_prelude/src/lib.rs @@ -14,6 +14,8 @@ use std::convert::TryFrom; use std::marker::PhantomData; pub use borsh::{BorshDeserialize, BorshSerialize}; +pub use borsh_ext; +use borsh_ext::BorshSerializeExt; pub use namada_core::ledger::governance::storage as gov_storage; pub use namada_core::ledger::parameters; pub use namada_core::ledger::pgf::storage as pgf_storage; @@ -88,13 +90,11 @@ pub fn verify_signatures(ctx: &Ctx, tx: &Tx, owner: &Address) -> VpResult { let threshold = storage_api::account::threshold(&ctx.pre(), owner)?.unwrap_or(1); - let targets = [*tx.data_sechash(), *tx.code_sechash()]; - // Serialize parameters - let max_signatures = max_signatures_per_transaction.try_to_vec().unwrap(); - let public_keys_map = public_keys_index_map.try_to_vec().unwrap(); - let targets = targets.try_to_vec().unwrap(); - let signer = owner.try_to_vec().unwrap(); + let max_signatures = max_signatures_per_transaction.serialize_to_vec(); + let public_keys_map = public_keys_index_map.serialize_to_vec(); + let targets = [tx.raw_header_hash()].serialize_to_vec(); + let signer = owner.serialize_to_vec(); let valid = unsafe { namada_vp_verify_tx_section_signature( @@ -297,6 +297,23 @@ impl<'view> VpEnv<'view> for Ctx { get_native_token() } + fn get_ibc_events( + &self, + event_type: String, + ) -> Result, Error> { + let read_result = unsafe { + namada_vp_get_ibc_events( + event_type.as_ptr() as _, + event_type.len() as _, + ) + }; + match read_from_buffer(read_result, namada_vp_result_buffer) { + Some(value) => Ok(Vec::::try_from_slice(&value[..]) + .expect("The conversion shouldn't fail")), + None => Ok(Vec::new()), + } + } + fn iter_prefix<'iter>( &'iter self, prefix: &storage::Key, @@ -305,7 +322,7 @@ impl<'view> VpEnv<'view> for Ctx { } fn eval(&self, vp_code_hash: Hash, input_data: Tx) -> Result { - let input_data_bytes = BorshSerialize::try_to_vec(&input_data).unwrap(); + let input_data_bytes = borsh::to_vec(&input_data).unwrap(); let result = unsafe { namada_vp_eval( vp_code_hash.0.as_ptr() as _, diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index 2007725ec6..5ca9ccd5a1 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -37,18 +37,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "aes" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" -dependencies = [ - "cfg-if 1.0.0", - "cipher 0.3.0", - "cpufeatures", - "opaque-debug 0.3.0", -] - [[package]] name = "aes" version = "0.8.3" @@ -377,15 +365,15 @@ checksum = "cf9ff0bbfd639f15c74af777d81383cf53efb7c93613f6cab67c6c11e05bbf8b" [[package]] name = "bellman" -version = "0.13.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4dd656ef4fdf7debb6d87d4dd92642fcbcdb78cbf6600c13e25c87e4d1a3807" +checksum = "9afceed28bac7f9f5a508bca8aeeff51cdfa4770c0b967ac55c621e2ddfd6171" dependencies = [ "bitvec 1.0.1", "blake2s_simd", "byteorder", - "ff 0.12.1", - "group 0.12.1", + "ff", + "group", "pairing", "rand_core 0.6.4", "subtle 2.4.1", @@ -411,14 +399,14 @@ dependencies = [ [[package]] name = "bip0039" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0830ae4cc96b0617cc912970c2b17e89456fecbf55e8eed53a956f37ab50c41" +checksum = "bef0f0152ec5cf17f49a5866afaa3439816207fd4f0a224c0211ffaf5e278426" dependencies = [ - "hmac 0.11.0", - "pbkdf2 0.9.0", + "hmac 0.12.1", + "pbkdf2 0.10.1", "rand 0.8.5", - "sha2 0.9.9", + "sha2 0.10.6", "unicode-normalization", "zeroize", ] @@ -517,7 +505,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding 0.1.5", + "block-padding", "byte-tools", "byteorder", "generic-array 0.12.4", @@ -541,16 +529,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "block-modes" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb03d1bed155d89dce0f845b7899b18a9a163e148fd004e1c28421a783e2d8e" -dependencies = [ - "block-padding 0.2.1", - "cipher 0.3.0", -] - [[package]] name = "block-padding" version = "0.1.5" @@ -560,20 +538,14 @@ dependencies = [ "byte-tools", ] -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - [[package]] name = "bls12_381" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3c196a77437e7cc2fb515ce413a6401291578b5afc8ecb29a3c7ab957f05941" +checksum = "d7bc6d6292be3a19e6379786dac800f551e5865a5bb51ebbe3064ab80433f403" dependencies = [ - "ff 0.12.1", - "group 0.12.1", + "ff", + "group", "pairing", "rand_core 0.6.4", "subtle 2.4.1", @@ -581,10 +553,11 @@ dependencies = [ [[package]] name = "borsh" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" dependencies = [ - "borsh-derive 0.9.4", + "borsh-derive 0.9.3", "hashbrown 0.11.2", ] @@ -598,13 +571,24 @@ dependencies = [ "hashbrown 0.12.3", ] +[[package]] +name = "borsh" +version = "1.0.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41898277107b0d3f027593697912977397eba6ac39a55bdd2eb02c1d5d5013b5" +dependencies = [ + "borsh-derive 1.0.0-alpha.4", + "cfg_aliases", +] + [[package]] name = "borsh-derive" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" dependencies = [ - "borsh-derive-internal 0.9.4", - "borsh-schema-derive-internal 0.9.4", + "borsh-derive-internal 0.9.3", + "borsh-schema-derive-internal 0.9.3", "proc-macro-crate 0.1.5", "proc-macro2", "syn 1.0.109", @@ -623,10 +607,25 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "borsh-derive" +version = "1.0.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413cb435569fe499e89235f758304e0e7198016baa351d8f5827ea0f40526ce0" +dependencies = [ + "once_cell", + "proc-macro-crate 1.3.1", + "proc-macro2", + "quote", + "syn 2.0.16", + "syn_derive", +] + [[package]] name = "borsh-derive-internal" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" dependencies = [ "proc-macro2", "quote", @@ -644,10 +643,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "borsh-ext" +version = "1.0.0-alpha.4" +source = "git+https://github.com/heliaxdev/borsh-ext?tag=v1.0.0-alpha.4#6bebf357002f96574ac37a28f547b6c88e91b799" +dependencies = [ + "borsh 1.0.0-alpha.4", +] + [[package]] name = "borsh-schema-derive-internal" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" dependencies = [ "proc-macro2", "quote", @@ -761,6 +769,15 @@ dependencies = [ "thiserror", ] +[[package]] +name = "cbc" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" +dependencies = [ + "cipher 0.4.4", +] + [[package]] name = "cc" version = "1.0.79" @@ -779,6 +796,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chacha20" version = "0.8.2" @@ -1200,16 +1223,6 @@ dependencies = [ "subtle 2.4.1", ] -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array 0.14.7", - "subtle 2.4.1", -] - [[package]] name = "ct-codecs" version = "1.1.1" @@ -1447,6 +1460,7 @@ dependencies = [ "digest 0.10.6", "elliptic-curve", "rfc6979", + "serdect", "signature 2.1.0", "spki", ] @@ -1507,12 +1521,13 @@ dependencies = [ "base16ct", "crypto-bigint", "digest 0.10.6", - "ff 0.13.0", + "ff", "generic-array 0.14.7", - "group 0.13.0", + "group", "pkcs8", "rand_core 0.6.4", "sec1", + "serdect", "subtle 2.4.1", "zeroize", ] @@ -1627,7 +1642,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" dependencies = [ - "aes 0.8.3", + "aes", "ctr", "digest 0.10.6", "hex", @@ -1985,14 +2000,14 @@ dependencies = [ "bincode", "blake2", "blake2b_simd", - "borsh 0.9.4", + "borsh 0.9.3", "digest 0.10.6", "ed25519-dalek", "either", "ferveo-common", "group-threshold-cryptography", "hex", - "itertools", + "itertools 0.10.5", "measure_time", "miracl_core", "num 0.4.0", @@ -2019,23 +2034,13 @@ dependencies = [ "serde_bytes", ] -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "bitvec 1.0.1", - "rand_core 0.6.4", - "subtle 2.4.1", -] - [[package]] name = "ff" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ + "bitvec 1.0.1", "rand_core 0.6.4", "subtle 2.4.1", ] @@ -2085,12 +2090,12 @@ dependencies = [ [[package]] name = "fpe" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd910db5f9ca4dc3116f8c46367825807aa2b942f72565f16b4be0b208a00a9e" +checksum = "26c4b37de5ae15812a764c958297cfc50f5c010438f60c6ce75d11b802abd404" dependencies = [ - "block-modes", - "cipher 0.3.0", + "cbc", + "cipher 0.4.4", "libm", "num-bigint 0.4.3", "num-integer", @@ -2296,25 +2301,14 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff 0.12.1", - "memuse", - "rand_core 0.6.4", - "subtle 2.4.1", -] - [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff 0.13.0", + "ff", + "memuse", "rand_core 0.6.4", "subtle 2.4.1", ] @@ -2334,7 +2328,7 @@ dependencies = [ "blake2b_simd", "chacha20 0.8.2", "hex", - "itertools", + "itertools 0.10.5", "miracl_core", "rand 0.8.5", "rand_core 0.6.4", @@ -2493,16 +2487,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "hmac" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" -dependencies = [ - "crypto-mac 0.11.1", - "digest 0.9.0", -] - [[package]] name = "hmac" version = "0.12.1" @@ -2523,17 +2507,6 @@ dependencies = [ "hmac 0.7.1", ] -[[package]] -name = "hmac-drbg" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" -dependencies = [ - "digest 0.9.0", - "generic-array 0.14.7", - "hmac 0.8.1", -] - [[package]] name = "hmac-sha512" version = "0.1.9" @@ -2661,7 +2634,7 @@ dependencies = [ [[package]] name = "ibc" version = "0.41.0" -source = "git+https://github.com/heliaxdev/cosmos-ibc-rs.git?rev=38a827d3901e590b2935ee5b6b81b4d67c399560#38a827d3901e590b2935ee5b6b81b4d67c399560" +source = "git+https://github.com/heliaxdev/cosmos-ibc-rs.git?rev=206cb5fa74a7ca38038b937d202ae39fbbd63c19#206cb5fa74a7ca38038b937d202ae39fbbd63c19" dependencies = [ "bytes", "cfg-if 1.0.0", @@ -2804,10 +2777,10 @@ checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] name = "index-set" -version = "0.7.1" -source = "git+https://github.com/heliaxdev/index-set?tag=v0.7.1#dc24cdbbe3664514d59f1a4c4031863fc565f1c2" +version = "0.8.0" +source = "git+https://github.com/heliaxdev/index-set?tag=v0.8.0#0c218cc300c1bb7a1acf34f21b6e9d489df5fda8" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "serde", ] @@ -2900,6 +2873,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.6" @@ -2917,14 +2899,14 @@ dependencies = [ [[package]] name = "jubjub" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a575df5f985fe1cd5b2b05664ff6accfc46559032b954529fd225a2168d27b0f" +checksum = "8499f7a74008aafbecb2a2e608a3e13e4dd3e84df198b604451efe93f2de6e61" dependencies = [ "bitvec 1.0.1", "bls12_381", - "ff 0.12.1", - "group 0.12.1", + "ff", + "group", "rand_core 0.6.4", "subtle 2.4.1", ] @@ -2939,6 +2921,7 @@ dependencies = [ "ecdsa", "elliptic-curve", "once_cell", + "serdect", "sha2 0.10.6", "signature 2.1.0", ] @@ -2995,57 +2978,13 @@ dependencies = [ "arrayref", "crunchy", "digest 0.8.1", - "hmac-drbg 0.2.0", + "hmac-drbg", "rand 0.7.3", "sha2 0.8.2", "subtle 2.4.1", "typenum", ] -[[package]] -name = "libsecp256k1" -version = "0.7.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "arrayref", - "base64 0.13.1", - "digest 0.9.0", - "hmac-drbg 0.3.0", - "libsecp256k1-core", - "libsecp256k1-gen-ecmult", - "libsecp256k1-gen-genmult", - "rand 0.8.5", - "serde", - "sha2 0.9.9", - "typenum", -] - -[[package]] -name = "libsecp256k1-core" -version = "0.3.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "crunchy", - "digest 0.9.0", - "subtle 2.4.1", -] - -[[package]] -name = "libsecp256k1-gen-ecmult" -version = "0.3.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "libsecp256k1-core", -] - -[[package]] -name = "libsecp256k1-gen-genmult" -version = "0.3.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "libsecp256k1-core", -] - [[package]] name = "linux-raw-sys" version = "0.3.7" @@ -3101,9 +3040,9 @@ dependencies = [ [[package]] name = "masp_note_encryption" version = "0.2.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=77e009626f3f52fe83c81ec6ee38fc2547d38da3#77e009626f3f52fe83c81ec6ee38fc2547d38da3" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "chacha20 0.9.1", "chacha20poly1305", "cipher 0.4.4", @@ -3114,19 +3053,19 @@ dependencies = [ [[package]] name = "masp_primitives" version = "0.9.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=77e009626f3f52fe83c81ec6ee38fc2547d38da3#77e009626f3f52fe83c81ec6ee38fc2547d38da3" dependencies = [ - "aes 0.7.5", + "aes", "bip0039", "bitvec 1.0.1", "blake2b_simd", "blake2s_simd", "bls12_381", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "byteorder", - "ff 0.12.1", + "ff", "fpe", - "group 0.12.1", + "group", "hex", "incrementalmerkletree", "jubjub", @@ -3137,7 +3076,7 @@ dependencies = [ "num-traits", "rand 0.8.5", "rand_core 0.6.4", - "sha2 0.9.9", + "sha2 0.10.6", "subtle 2.4.1", "zcash_encoding", ] @@ -3145,15 +3084,15 @@ dependencies = [ [[package]] name = "masp_proofs" version = "0.9.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=77e009626f3f52fe83c81ec6ee38fc2547d38da3#77e009626f3f52fe83c81ec6ee38fc2547d38da3" dependencies = [ "bellman", "blake2b_simd", "bls12_381", "directories", "getrandom 0.2.9", - "group 0.12.1", - "itertools", + "group", + "itertools 0.11.0", "jubjub", "lazy_static", "masp_primitives", @@ -3315,7 +3254,8 @@ version = "0.23.1" dependencies = [ "async-trait", "bimap", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "circular-queue", "clru", "data-encoding", @@ -3325,13 +3265,14 @@ dependencies = [ "ethers", "eyre", "futures", - "itertools", + "itertools 0.10.5", "loupe", "masp_primitives", "masp_proofs", "namada_core", "namada_ethereum_bridge", "namada_proof_of_stake", + "namada_sdk", "num256", "orion", "owo-colors", @@ -3376,7 +3317,8 @@ dependencies = [ "ark-ec", "ark-serialize", "bech32 0.8.1", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "chrono", "data-encoding", "derivative", @@ -3392,8 +3334,8 @@ dependencies = [ "ics23", "impl-num-traits", "index-set", - "itertools", - "libsecp256k1 0.7.0", + "itertools 0.10.5", + "k256", "masp_primitives", "namada_macros", "num-integer", @@ -3425,10 +3367,11 @@ dependencies = [ name = "namada_ethereum_bridge" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "ethers", "eyre", - "itertools", + "itertools 0.10.5", "namada_core", "namada_macros", "namada_proof_of_stake", @@ -3454,7 +3397,7 @@ dependencies = [ name = "namada_proof_of_stake" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "data-encoding", "derivative", "namada_core", @@ -3464,11 +3407,55 @@ dependencies = [ "tracing", ] +[[package]] +name = "namada_sdk" +version = "0.23.1" +dependencies = [ + "async-trait", + "bimap", + "borsh 1.0.0-alpha.4", + "borsh-ext", + "circular-queue", + "data-encoding", + "derivation-path", + "ethbridge-bridge-contract", + "ethers", + "futures", + "itertools 0.10.5", + "masp_primitives", + "masp_proofs", + "namada_core", + "namada_ethereum_bridge", + "namada_proof_of_stake", + "num256", + "orion", + "owo-colors", + "parse_duration", + "paste", + "prost", + "rand 0.8.5", + "rand_core 0.6.4", + "ripemd", + "serde", + "serde_json", + "sha2 0.9.9", + "slip10_ed25519", + "tendermint-rpc", + "thiserror", + "tiny-bip39", + "tiny-hderive", + "tokio", + "toml 0.5.11", + "tracing", + "wasmtimer", + "zeroize", +] + [[package]] name = "namada_test_utils" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "namada_core", "strum", ] @@ -3486,6 +3473,7 @@ dependencies = [ "lazy_static", "namada", "namada_core", + "namada_sdk", "namada_test_utils", "namada_tx_prelude", "namada_vp_prelude", @@ -3507,7 +3495,8 @@ dependencies = [ name = "namada_tx_prelude" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "masp_primitives", "namada_core", "namada_macros", @@ -3521,7 +3510,7 @@ dependencies = [ name = "namada_vm_env" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "masp_primitives", "namada_core", ] @@ -3530,7 +3519,8 @@ dependencies = [ name = "namada_vp_prelude" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "namada_core", "namada_macros", "namada_proof_of_stake", @@ -3543,7 +3533,7 @@ dependencies = [ name = "namada_wasm" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "getrandom 0.2.9", "masp_primitives", "namada", @@ -3768,9 +3758,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "opaque-debug" @@ -3835,11 +3825,11 @@ checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" [[package]] name = "pairing" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "135590d8bdba2b31346f9cd1fb2a912329f5135e832a4f422942eb6ead8b6b3b" +checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" dependencies = [ - "group 0.12.1", + "group", ] [[package]] @@ -3919,6 +3909,19 @@ dependencies = [ "subtle 2.4.1", ] +[[package]] +name = "pasta_curves" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e57598f73cc7e1b2ac63c79c517b31a0877cd7c402cdcaa311b5208de7a095" +dependencies = [ + "ff", + "group", + "rand 0.8.5", + "static_assertions", + "subtle 2.4.1", +] + [[package]] name = "paste" version = "1.0.12" @@ -3936,11 +3939,11 @@ dependencies = [ [[package]] name = "pbkdf2" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f05894bce6a1ba4be299d0c5f29563e08af2bc18bb7d48313113bed71e904739" +checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" dependencies = [ - "crypto-mac 0.11.1", + "digest 0.10.6", "password-hash", ] @@ -4209,7 +4212,7 @@ checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck", - "itertools", + "itertools 0.10.5", "lazy_static", "log", "multimap", @@ -4230,7 +4233,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", - "itertools", + "itertools 0.10.5", "proc-macro2", "quote", "syn 1.0.109", @@ -4397,16 +4400,31 @@ dependencies = [ ] [[package]] -name = "redjubjub" -version = "0.5.0" +name = "reddsa" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6039ff156887caf92df308cbaccdc058c9d3155a913da046add6e48c4cdbd91d" +checksum = "78a5191930e84973293aa5f532b513404460cd2216c1cfb76d08748c15b40b02" dependencies = [ "blake2b_simd", "byteorder", - "digest 0.9.0", + "group", + "hex", "jubjub", + "pasta_curves", + "rand_core 0.6.4", + "serde", + "thiserror", + "zeroize", +] + +[[package]] +name = "redjubjub" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a60db2c3bc9c6fd1e8631fee75abc008841d27144be744951d6b9b75f9b569c" +dependencies = [ "rand_core 0.6.4", + "reddsa", "serde", "thiserror", "zeroize", @@ -4883,6 +4901,7 @@ dependencies = [ "der", "generic-array 0.14.7", "pkcs8", + "serdect", "subtle 2.4.1", "zeroize", ] @@ -5031,6 +5050,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serdect" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" +dependencies = [ + "base16ct", + "serde", +] + [[package]] name = "sha1" version = "0.10.5" @@ -5171,9 +5200,9 @@ dependencies = [ [[package]] name = "sparse-merkle-tree" version = "0.3.1-pre" -source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=e086b235ed6e68929bf73f617dd61cd17b000a56#e086b235ed6e68929bf73f617dd61cd17b000a56" +source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=df7ec062e7c40d5e76b136064e9aaf8bd2490750#df7ec062e7c40d5e76b136064e9aaf8bd2490750" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "cfg-if 1.0.0", "ics23", "sha2 0.9.9", @@ -5297,6 +5326,18 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn_derive" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae6eef0000c4a12ecdfd7873ea84a8b5aab5e44db72e38e07b028a25386f29a5" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.16", +] + [[package]] name = "tap" version = "1.0.1" @@ -5553,7 +5594,7 @@ checksum = "01b874a4992538d4b2f4fbbac11b9419d685f4b39bdc3fed95b04e07bfd76040" dependencies = [ "base58", "hmac 0.7.1", - "libsecp256k1 0.3.5", + "libsecp256k1", "memzero", "sha2 0.8.2", ] @@ -5767,7 +5808,7 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" name = "tx_template" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "getrandom 0.2.9", "namada_tests", "namada_tx_prelude", @@ -5890,7 +5931,7 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" name = "vp_template" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "getrandom 0.2.9", "namada_tests", "namada_vp_prelude", @@ -6670,8 +6711,8 @@ dependencies = [ [[package]] name = "zcash_encoding" -version = "0.0.0" -source = "git+https://github.com/zcash/librustzcash?rev=43c18d0#43c18d000fcbe45363b2d53585d5102841eff99e" +version = "0.2.0" +source = "git+https://github.com/zcash/librustzcash?rev=bd7f9d7#bd7f9d7c3ce5cfd14af169ffe0e1c5c903162f46" dependencies = [ "byteorder", "nonempty", diff --git a/wasm/Cargo.toml b/wasm/Cargo.toml index 8453ea48ca..c4cb182c1a 100644 --- a/wasm/Cargo.toml +++ b/wasm/Cargo.toml @@ -7,13 +7,6 @@ members = [ "vp_template", ] -[patch.crates-io] -# TODO temp patch for , and more tba. -borsh = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} -borsh-derive = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} -borsh-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} -borsh-schema-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} - [profile.release] # smaller and faster wasm (https://rustwasm.github.io/book/reference/code-size.html#compiling-with-link-time-optimizations-lto) lto = true diff --git a/wasm/checksums.json b/wasm/checksums.json index 78ae5d3268..ab2d7ecf71 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,22 +1,23 @@ { - "tx_bond.wasm": "tx_bond.ad42b536c7238f79a3e641b66741584266cb332162819b8c2319e2f92cbe6d9a.wasm", - "tx_bridge_pool.wasm": "tx_bridge_pool.5c60c0f9fd2605b86929f635f2e280a48153e86d97c7b4780a2638cf407a9515.wasm", - "tx_change_validator_commission.wasm": "tx_change_validator_commission.2ec2e982bf93e37e865c3558e3ccbc525aa0c5b7dac67102da79be1c7da29417.wasm", - "tx_ibc.wasm": "tx_ibc.a5bee8cded06f83ac06d2f5add87524018c45db0745a7cee350c8c0e38f4e246.wasm", - "tx_init_account.wasm": "tx_init_account.50cb32298b6f6bbd661e09d36bbe7ba7ce2c9db75392a9879ae7381e87c9fc3a.wasm", - "tx_init_proposal.wasm": "tx_init_proposal.d1baa37345ffdb850c6b092f5b314ff04125dfb530f11eadb3a866da9078b36f.wasm", - "tx_init_validator.wasm": "tx_init_validator.9446a4fc34b67b48c15bbb3ebde3a52500306b6999ccb043a0fe130eec63f1f1.wasm", - "tx_resign_steward.wasm": "tx_resign_steward.e01a5e6d35e18f86588b88724106007131f5d896eb628701edf7e455303c3368.wasm", - "tx_reveal_pk.wasm": "tx_reveal_pk.b2ef04e09ba5a450359b5668377d802f67352bda72b31e9795ee4c6bd3ba7bfd.wasm", - "tx_transfer.wasm": "tx_transfer.f0e4e34246a04b19ba8f29a41e3d2123a14b95c18ba1643f94f09544432d3b12.wasm", - "tx_unbond.wasm": "tx_unbond.6f8720127ccf68c7d5d155529f62cea7336c7cbcdb73aa8ebd40e4c3cd46d128.wasm", - "tx_unjail_validator.wasm": "tx_unjail_validator.2e050ff9c8c35e715c838e116c9e4a69bc0735f1a589a8651b4f5523efe78dac.wasm", - "tx_update_account.wasm": "tx_update_account.d744059f698888fb5f4b96f531fe5e2f1eb307ed52621ccc189bbf022ae36ad5.wasm", - "tx_update_steward_commission.wasm": "tx_update_steward_commission.23d33c6d1782736b8caea39475a9ea090f4c8f1b56d41befb383a1270fbb24fa.wasm", - "tx_vote_proposal.wasm": "tx_vote_proposal.b9e6f6aa0edde8b472b08eca84b34a73b646ba650d5884ea2ea073fef69166f1.wasm", - "tx_withdraw.wasm": "tx_withdraw.6898f2fb12586d116c52ad6775ed1011da9d81d6a58a434fea638625da607e13.wasm", - "vp_implicit.wasm": "vp_implicit.2200b625db480112f4dad4801863986f0210a06e1abb5a7d5148c94fdf8e5923.wasm", - "vp_masp.wasm": "vp_masp.44a2183f751b72d20466dacbbc13f21508dfff7f3adfaed891fbb4ad65dd21b5.wasm", - "vp_user.wasm": "vp_user.72d2c2b9fc247ce6cd6ac00bb5bccc3569d0b94b01af746fb3b4ce48761060d3.wasm", - "vp_validator.wasm": "vp_validator.9de3e969820b0fdb5d6baad7a0f3a9ad31e0df363cd87035883f2c97f65479e3.wasm" -} \ No newline at end of file + "tx_bond.wasm": "tx_bond.bd9f6fb78273d234c0be9aa99f58a7cb637a97dc2c3424c0a30cc9b395f3b165.wasm", + "tx_bridge_pool.wasm": "tx_bridge_pool.f7057f5ba2d219d96307304af20946a92e8d2a9984c6569288d66573c83f9e09.wasm", + "tx_change_validator_commission.wasm": "tx_change_validator_commission.f40a962fba8b4b8d610fdbf05a30b2904322684e8afbebb6e155060953952d0d.wasm", + "tx_ibc.wasm": "tx_ibc.0d2cffa511ab313741735313307f90872319beafd1da04e33d9eac58d1eaa9b2.wasm", + "tx_init_account.wasm": "tx_init_account.6b2d874c80371f843fc4d971de0b589a25db307d80fa74b2c7075114cd6ef654.wasm", + "tx_init_proposal.wasm": "tx_init_proposal.3b031b24c1dc203a3b14885f90c060f2da414c0584dbe495afad0ccf29efae11.wasm", + "tx_init_validator.wasm": "tx_init_validator.0399c9c64e4f77773fd4b5f1413bbc515460559cf9dbe851ca3c38501f688137.wasm", + "tx_redelegate.wasm": "tx_redelegate.6b0b58dba41b6cc1be5db89363893d5c2a31340d94ded7ea1076239a04405ee6.wasm", + "tx_resign_steward.wasm": "tx_resign_steward.955eee167ca6b206a02dc61595fe32449003eebb20621cd2343fce7844c885b5.wasm", + "tx_reveal_pk.wasm": "tx_reveal_pk.a4109383a11ecc06bc9098d82c0d9ee23d60068b76e88e70b1adadb4e75d8f9b.wasm", + "tx_transfer.wasm": "tx_transfer.a5881e411cc02dffe0db97a336cc617cd624ac771aeeb901f9d91b0ede385d0b.wasm", + "tx_unbond.wasm": "tx_unbond.3a78a23ebfd6e0ed38c0be827ba07f9057d7f60f6fc8f3a033cddd3736ec70de.wasm", + "tx_unjail_validator.wasm": "tx_unjail_validator.76986a378ff219dadef369f4b459bb7542af2486cda4e3fb07e9ee4768d31366.wasm", + "tx_update_account.wasm": "tx_update_account.45ed453aebe68c8c64553918eaf3be8dcc914f32fcb02082c1766bf90b5609a4.wasm", + "tx_update_steward_commission.wasm": "tx_update_steward_commission.88b35049b102f6d6ae867a5d400fbd44e021eddd426eb5b710489a0f8381c09a.wasm", + "tx_vote_proposal.wasm": "tx_vote_proposal.a9c0a47f1d5afd0338c8f9cfd67c9e781441d7a1f558d3e0cdc305df5fd8993a.wasm", + "tx_withdraw.wasm": "tx_withdraw.7385bd400ee32db3d6d9504593116faa5412ca5ba7088c11fed9aad74e750632.wasm", + "vp_implicit.wasm": "vp_implicit.cc1435713c12ee6de7c1caed76fac09f99ad93c77e78bbed3d1e2eadd324b0fc.wasm", + "vp_masp.wasm": "vp_masp.ac713f88be7d977c37b68c7b76a8f10ee689cda22e79d24a458f30c317163457.wasm", + "vp_user.wasm": "vp_user.1944e4ef7512c2d07c124316952e642eb257daa351dc0af9ea9bfed3d1534ddd.wasm", + "vp_validator.wasm": "vp_validator.345491f65e2ca3553ba2033a4afdacf99c0d6cd8729108362f2e66b642399cbf.wasm" +} diff --git a/wasm/tx_template/Cargo.toml b/wasm/tx_template/Cargo.toml index 5d58371cc7..1a8b27053c 100644 --- a/wasm/tx_template/Cargo.toml +++ b/wasm/tx_template/Cargo.toml @@ -11,7 +11,7 @@ crate-type = ["cdylib"] [dependencies] namada_tx_prelude = {path = "../../tx_prelude"} -borsh = "0.9.0" +borsh = "1.0.0-alpha.4" wee_alloc = "0.4.5" getrandom = { version = "0.2", features = ["custom"] } diff --git a/wasm/vp_template/Cargo.toml b/wasm/vp_template/Cargo.toml index e98f3ffd18..5d287de054 100644 --- a/wasm/vp_template/Cargo.toml +++ b/wasm/vp_template/Cargo.toml @@ -11,7 +11,7 @@ crate-type = ["cdylib"] [dependencies] namada_vp_prelude = {path = "../../vp_prelude"} -borsh = "0.9.0" +borsh = "1.0.0-alpha.4" wee_alloc = "0.4.5" getrandom = { version = "0.2", features = ["custom"] } diff --git a/wasm/wasm_source/Cargo.toml b/wasm/wasm_source/Cargo.toml index dde2a2878e..fa00c06445 100644 --- a/wasm/wasm_source/Cargo.toml +++ b/wasm/wasm_source/Cargo.toml @@ -20,6 +20,7 @@ tx_ibc = ["namada_tx_prelude"] tx_init_account = ["namada_tx_prelude"] tx_init_proposal = ["namada_tx_prelude"] tx_init_validator = ["namada_tx_prelude"] +tx_redelegate = ["namada_tx_prelude"] tx_reveal_pk = ["namada_tx_prelude"] tx_transfer = ["namada_tx_prelude"] tx_unbond = ["namada_tx_prelude"] @@ -38,12 +39,12 @@ vp_validator = ["namada_vp_prelude", "once_cell"] [dependencies] namada_tx_prelude = {path = "../../tx_prelude", optional = true} namada_vp_prelude = {path = "../../vp_prelude", optional = true} -borsh = "0.9.0" +borsh = "1.0.0-alpha.4" once_cell = {version = "1.8.0", optional = true} wee_alloc = "0.4.5" getrandom = { version = "0.2", features = ["custom"] } # branch = "murisi/namada-integration" -masp_primitives = { git = "https://github.com/anoma/masp", rev = "50acc5028fbcd52a05970fe7991c7850ab04358e", optional = true } +masp_primitives = { git = "https://github.com/anoma/masp", rev = "77e009626f3f52fe83c81ec6ee38fc2547d38da3", optional = true } ripemd = "0.1" [dev-dependencies] diff --git a/wasm/wasm_source/Makefile b/wasm/wasm_source/Makefile index 7b00424baf..e78237c89d 100644 --- a/wasm/wasm_source/Makefile +++ b/wasm/wasm_source/Makefile @@ -12,6 +12,7 @@ wasms += tx_ibc wasms += tx_init_account wasms += tx_init_proposal wasms += tx_init_validator +wasms += tx_redelegate wasms += tx_reveal_pk wasms += tx_transfer wasms += tx_unbond diff --git a/wasm/wasm_source/src/lib.rs b/wasm/wasm_source/src/lib.rs index d376f8ca70..139835fe9f 100644 --- a/wasm/wasm_source/src/lib.rs +++ b/wasm/wasm_source/src/lib.rs @@ -12,6 +12,8 @@ pub mod tx_init_account; pub mod tx_init_proposal; #[cfg(feature = "tx_init_validator")] pub mod tx_init_validator; +#[cfg(feature = "tx_redelegate")] +pub mod tx_redelegate; #[cfg(feature = "tx_resign_steward")] pub mod tx_resign_steward; #[cfg(feature = "tx_reveal_pk")] diff --git a/wasm/wasm_source/src/tx_bond.rs b/wasm/wasm_source/src/tx_bond.rs index 3453747161..6b7263ce9b 100644 --- a/wasm/wasm_source/src/tx_bond.rs +++ b/wasm/wasm_source/src/tx_bond.rs @@ -17,7 +17,8 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { mod tests { use std::collections::BTreeSet; - use namada::ledger::pos::{GenesisValidator, PosParams, PosVP}; + use namada::ledger::pos::{OwnedPosParams, PosVP}; + use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; use namada::proof_of_stake::{ bond_handle, read_consensus_validator_set_addresses_with_stake, read_total_stake, read_validator_stake, @@ -32,12 +33,12 @@ mod tests { arb_established_address, arb_non_internal_address, }; use namada_tx_prelude::address::InternalAddress; + use namada_tx_prelude::borsh_ext::BorshSerializeExt; use namada_tx_prelude::chain::ChainId; use namada_tx_prelude::key::testing::arb_common_keypair; use namada_tx_prelude::key::RefTo; use namada_tx_prelude::proof_of_stake::parameters::testing::arb_pos_params; use namada_tx_prelude::token; - use namada_vp_prelude::proof_of_stake::WeightedValidator; use proptest::prelude::*; use super::*; @@ -64,11 +65,11 @@ mod tests { initial_stake: token::Amount, bond: transaction::pos::Bond, key: key::common::SecretKey, - pos_params: PosParams, + pos_params: OwnedPosParams, ) -> TxResult { // Remove the validator stake threshold for simplicity - let pos_params = PosParams { - validator_stake_threshold: token::Amount::default(), + let pos_params = OwnedPosParams { + validator_stake_threshold: token::Amount::zero(), ..pos_params }; @@ -76,6 +77,7 @@ mod tests { let is_delegation = matches!(&bond.source, Some(source) if *source != bond.validator); let consensus_key = key::testing::keypair_1().ref_to(); + let protocol_key = key::testing::keypair_2().ref_to(); let commission_rate = Dec::new(5, 2).expect("Cannot fail"); let max_commission_rate_change = Dec::new(1, 2).expect("Cannot fail"); let eth_cold_key = key::testing::keypair_3().ref_to(); @@ -85,13 +87,15 @@ mod tests { address: bond.validator.clone(), tokens: initial_stake, consensus_key, + protocol_key, eth_cold_key, eth_hot_key, commission_rate, max_commission_rate_change, }]; - init_pos(&genesis_validators[..], &pos_params, Epoch(0)); + let pos_params = + init_pos(&genesis_validators[..], &pos_params, Epoch(0)); let native_token = tx_host_env::with(|tx_env| { if let Some(source) = &bond.source { @@ -106,7 +110,7 @@ mod tests { }); let tx_code = vec![]; - let tx_data = bond.try_to_vec().unwrap(); + let tx_data = bond.serialize_to_vec(); let mut tx = Tx::new(ChainId::default(), None); tx.add_code(tx_code) .add_serialized_data(tx_data) @@ -138,15 +142,12 @@ mod tests { &pos_params, Epoch(epoch), )?); - epoched_validator_stake_pre.push( - read_validator_stake( - ctx(), - &pos_params, - &bond.validator, - Epoch(epoch), - )? - .unwrap(), - ); + epoched_validator_stake_pre.push(read_validator_stake( + ctx(), + &pos_params, + &bond.validator, + Epoch(epoch), + )?); epoched_validator_set_pre.push( read_consensus_validator_set_addresses_with_stake( ctx(), @@ -171,15 +172,12 @@ mod tests { &pos_params, Epoch(epoch), )?); - epoched_validator_stake_post.push( - read_validator_stake( - ctx(), - &pos_params, - &bond.validator, - Epoch(epoch), - )? - .unwrap(), - ); + epoched_validator_stake_post.push(read_validator_stake( + ctx(), + &pos_params, + &bond.validator, + Epoch(epoch), + )?); epoched_validator_set_post.push( read_consensus_validator_set_addresses_with_stake( ctx(), @@ -269,13 +267,6 @@ mod tests { let bonds_post = bond_handle(&bond_src, &bond.validator); // let bonds_post = ctx().read_bond(&bond_id)?.unwrap(); - for epoch in 0..pos_params.unbonding_len { - dbg!( - epoch, - bonds_post.get_delta_val(ctx(), Epoch(epoch), &pos_params)? - ); - } - if is_delegation { // A delegation is applied at pipeline offset // Check that bond is empty before pipeline offset @@ -290,7 +281,7 @@ mod tests { } // Check that bond is updated after the pipeline length for epoch in pos_params.pipeline_len..=pos_params.unbonding_len { - let expected_bond_amount = bond.amount.change(); + let expected_bond_amount = bond.amount; let bond = bonds_post.get_sum(ctx(), Epoch(epoch), &pos_params)?; assert_eq!( @@ -305,7 +296,7 @@ mod tests { // Check that a bond already exists from genesis with initial stake // for the validator for epoch in 0..pos_params.pipeline_len { - let expected_bond_amount = initial_stake.change(); + let expected_bond_amount = initial_stake; let bond = bonds_post .get_sum(ctx(), Epoch(epoch), &pos_params) .expect("Genesis validator should already have self-bond"); @@ -323,7 +314,7 @@ mod tests { bonds_post.get_sum(ctx(), Epoch(epoch), &pos_params)?; assert_eq!( bond, - Some(expected_bond_amount.change()), + Some(expected_bond_amount), "Self-bond at and after pipeline offset should contain \ genesis stake and the bonded amount - checking epoch \ {epoch}" diff --git a/wasm/wasm_source/src/tx_bridge_pool.rs b/wasm/wasm_source/src/tx_bridge_pool.rs index 88d757998f..b287f84a6a 100644 --- a/wasm/wasm_source/src/tx_bridge_pool.rs +++ b/wasm/wasm_source/src/tx_bridge_pool.rs @@ -1,8 +1,9 @@ //! A tx for adding a transfer request across the Ethereum bridge //! into the bridge pool. -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; use eth_bridge::storage::{bridge_pool, native_erc20_key}; use eth_bridge_pool::{GasFee, PendingTransfer, TransferToEthereum}; +use namada_tx_prelude::borsh_ext::BorshSerializeExt; use namada_tx_prelude::*; #[transaction(gas = 100000)] @@ -23,9 +24,6 @@ fn apply_tx(ctx: &mut Ctx, signed: Tx) -> TxResult { &bridge_pool::BRIDGE_POOL_ADDRESS, fee_token_addr, amount.native_denominated(), - &None, - &None, - &None, )?; log_string("Token transfer succeeded."); let TransferToEthereum { @@ -43,9 +41,6 @@ fn apply_tx(ctx: &mut Ctx, signed: Tx) -> TxResult { ð_bridge::ADDRESS, &nam_addr, amount.native_denominated(), - &None, - &None, - &None, )?; } else { // Otherwise we escrow ERC20 tokens. @@ -56,15 +51,12 @@ fn apply_tx(ctx: &mut Ctx, signed: Tx) -> TxResult { &bridge_pool::BRIDGE_POOL_ADDRESS, &token, amount.native_denominated(), - &None, - &None, - &None, )?; } log_string("Escrow succeeded"); // add transfer into the pool let pending_key = bridge_pool::get_pending_key(&transfer); - ctx.write_bytes(&pending_key, transfer.try_to_vec().unwrap()) + ctx.write_bytes(&pending_key, transfer.serialize_to_vec()) .wrap_err("Could not write transfer to bridge pool")?; Ok(()) } diff --git a/wasm/wasm_source/src/tx_change_validator_commission.rs b/wasm/wasm_source/src/tx_change_validator_commission.rs index c1e1b35226..0923797e36 100644 --- a/wasm/wasm_source/src/tx_change_validator_commission.rs +++ b/wasm/wasm_source/src/tx_change_validator_commission.rs @@ -19,7 +19,8 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { mod tests { use std::cmp; - use namada::ledger::pos::{PosParams, PosVP}; + use namada::ledger::pos::{OwnedPosParams, PosVP}; + use namada::proof_of_stake::types::GenesisValidator; use namada::proof_of_stake::validator_commission_rate_handle; use namada::types::dec::{Dec, POS_DECIMAL_PRECISION}; use namada::types::storage::Epoch; @@ -28,12 +29,12 @@ mod tests { use namada_tests::native_vp::TestNativeVpEnv; use namada_tests::tx::*; use namada_tx_prelude::address::testing::arb_established_address; + use namada_tx_prelude::borsh_ext::BorshSerializeExt; use namada_tx_prelude::chain::ChainId; use namada_tx_prelude::key::testing::arb_common_keypair; use namada_tx_prelude::key::RefTo; use namada_tx_prelude::proof_of_stake::parameters::testing::arb_pos_params; use namada_tx_prelude::token; - use namada_vp_prelude::proof_of_stake::GenesisValidator; use proptest::prelude::*; use super::*; @@ -63,9 +64,11 @@ mod tests { max_change: Dec, commission_change: transaction::pos::CommissionChange, key: key::common::SecretKey, - pos_params: PosParams, + pos_params: OwnedPosParams, ) -> TxResult { let consensus_key = key::testing::keypair_1().ref_to(); + let protocol_key = key::testing::keypair_2().ref_to(); + let eth_hot_key = key::common::PublicKey::Secp256k1( key::testing::gen_keypair::().ref_to(), ); @@ -76,16 +79,18 @@ mod tests { address: commission_change.validator.clone(), tokens: token::Amount::from_uint(1_000_000, 0).unwrap(), consensus_key, + protocol_key, commission_rate: initial_rate, max_commission_rate_change: max_change, eth_hot_key, eth_cold_key, }]; - init_pos(&genesis_validators[..], &pos_params, Epoch(0)); + let pos_params = + init_pos(&genesis_validators[..], &pos_params, Epoch(0)); let tx_code = vec![]; - let tx_data = commission_change.try_to_vec().unwrap(); + let tx_data = commission_change.serialize_to_vec(); let mut tx = Tx::new(ChainId::default(), None); tx.add_code(tx_code) .add_serialized_data(tx_data) diff --git a/wasm/wasm_source/src/tx_redelegate.rs b/wasm/wasm_source/src/tx_redelegate.rs new file mode 100644 index 0000000000..adae605a81 --- /dev/null +++ b/wasm/wasm_source/src/tx_redelegate.rs @@ -0,0 +1,414 @@ +//! A tx for a delegator (non-validator bond owner) to redelegate bonded tokens +//! from one validator to another. + +use namada_tx_prelude::*; + +#[transaction(gas = 460000)] +fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { + let signed = tx_data; + let data = signed.data().ok_or_err_msg("Missing data")?; + let transaction::pos::Redelegation { + src_validator, + dest_validator, + owner, + amount, + } = transaction::pos::Redelegation::try_from_slice(&data[..]) + .wrap_err("failed to decode a Redelegation")?; + ctx.redelegate_tokens(&owner, &src_validator, &dest_validator, amount) +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeSet; + + use namada::ledger::pos::{OwnedPosParams, PosVP}; + use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; + use namada::proof_of_stake::{ + bond_handle, read_consensus_validator_set_addresses_with_stake, + read_total_stake, read_validator_stake, unbond_handle, + }; + use namada::types::dec::Dec; + use namada::types::storage::Epoch; + use namada_tests::log::test; + use namada_tests::native_vp::pos::init_pos; + use namada_tests::native_vp::TestNativeVpEnv; + use namada_tests::tx::*; + use namada_tx_prelude::address::InternalAddress; + use namada_tx_prelude::borsh_ext::BorshSerializeExt; + use namada_tx_prelude::chain::ChainId; + use namada_tx_prelude::key::testing::arb_common_keypair; + use namada_tx_prelude::key::RefTo; + use namada_tx_prelude::proof_of_stake::parameters::testing::arb_pos_params; + use namada_tx_prelude::token; + use proptest::prelude::*; + + use super::*; + + proptest! { + /// In this test we setup the ledger and PoS system with an arbitrary + /// initial state with 1 genesis validator, a delegation bond if the + /// unbond is for a delegation, arbitrary PoS parameters, and + /// we generate an arbitrary unbond that we'd like to apply. + /// + /// After we apply the unbond, we check that all the storage values + /// in PoS system have been updated as expected and then we also check + /// that this transaction is accepted by the PoS validity predicate. + #[test] + fn test_tx_redelegate( + (initial_stake, redelegation) in arb_initial_stake_and_redelegation(), + // A key to sign the transaction + key in arb_common_keypair(), + pos_params in arb_pos_params(None)) { + test_tx_redelegate_aux(initial_stake, redelegation, key, pos_params).unwrap() + } + } + + // TODO: more assertions needed!! + fn test_tx_redelegate_aux( + initial_stake: token::Amount, + redelegation: transaction::pos::Redelegation, + key: key::common::SecretKey, + pos_params: OwnedPosParams, + ) -> TxResult { + // Remove the validator stake threshold for simplicity + let pos_params = OwnedPosParams { + validator_stake_threshold: token::Amount::zero(), + ..pos_params + }; + dbg!(&initial_stake, &redelegation); + + let consensus_key_1 = key::testing::keypair_1().ref_to(); + let consensus_key_2 = key::testing::keypair_2().ref_to(); + let protocol_key = key::testing::keypair_2().ref_to(); + let eth_cold_key = key::testing::keypair_3().ref_to(); + let eth_hot_key = key::testing::keypair_4().ref_to(); + let commission_rate = Dec::new(5, 2).expect("Cannot fail"); + let max_commission_rate_change = Dec::new(1, 2).expect("Cannot fail"); + + let genesis_validators = [ + GenesisValidator { + address: redelegation.src_validator.clone(), + tokens: token::Amount::zero(), + consensus_key: consensus_key_1, + protocol_key: protocol_key.clone(), + eth_cold_key: eth_cold_key.clone(), + eth_hot_key: eth_hot_key.clone(), + commission_rate, + max_commission_rate_change, + }, + GenesisValidator { + address: redelegation.dest_validator.clone(), + tokens: token::Amount::zero(), + consensus_key: consensus_key_2, + protocol_key, + eth_cold_key, + eth_hot_key, + commission_rate, + max_commission_rate_change, + }, + ]; + + let pos_params = + init_pos(&genesis_validators[..], &pos_params, Epoch(0)); + + let native_token = tx_host_env::with(|tx_env| { + let native_token = tx_env.wl_storage.storage.native_token.clone(); + let owner = &redelegation.owner; + tx_env.spawn_accounts([owner]); + + // First, credit the delegator with the initial stake, + // before we initialize the bond below + tx_env.credit_tokens(owner, &native_token, initial_stake); + native_token + }); + + // Create the initial bond. + ctx().bond_tokens( + Some(&redelegation.owner), + &redelegation.src_validator, + initial_stake, + )?; + tx_host_env::commit_tx_and_block(); + + let tx_code = vec![]; + let tx_data = redelegation.serialize_to_vec(); + let mut tx = Tx::new(ChainId::default(), None); + tx.add_code(tx_code) + .add_serialized_data(tx_data) + .sign_wrapper(key); + let signed_tx = tx; + + // Check that PoS balance is the same as the initial validator stake + let pos_balance_key = token::balance_key( + &native_token, + &Address::Internal(InternalAddress::PoS), + ); + let pos_balance_pre: token::Amount = ctx() + .read(&pos_balance_key)? + .expect("PoS must have balance"); + assert_eq!(pos_balance_pre, initial_stake); + + let mut epoched_total_stake_pre: Vec = Vec::new(); + let mut epoched_src_validator_stake_pre: Vec = + Vec::new(); + let mut epoched_dest_validator_stake_pre: Vec = + Vec::new(); + let mut epoched_src_bonds_pre: Vec> = Vec::new(); + let mut epoched_dest_bonds_pre: Vec> = Vec::new(); + let mut epoched_validator_set_pre: Vec> = + Vec::new(); + + for epoch in 0..=pos_params.withdrawable_epoch_offset() { + epoched_total_stake_pre.push(read_total_stake( + ctx(), + &pos_params, + Epoch(epoch), + )?); + epoched_src_validator_stake_pre.push(read_validator_stake( + ctx(), + &pos_params, + &redelegation.src_validator, + Epoch(epoch), + )?); + epoched_dest_validator_stake_pre.push(read_validator_stake( + ctx(), + &pos_params, + &redelegation.dest_validator, + Epoch(epoch), + )?); + epoched_src_bonds_pre.push( + bond_handle(&redelegation.owner, &redelegation.src_validator) + .get_delta_val(ctx(), Epoch(epoch))?, + ); + epoched_dest_bonds_pre.push( + bond_handle(&redelegation.owner, &redelegation.src_validator) + .get_delta_val(ctx(), Epoch(epoch))?, + ); + epoched_validator_set_pre.push( + read_consensus_validator_set_addresses_with_stake( + ctx(), + Epoch(epoch), + )?, + ); + } + + // Apply the redelegation tx + apply_tx(ctx(), signed_tx)?; + + // Read the data after the redelegation tx is executed. + // The following storage keys should be updated: + // - `#{PoS}/validator/#{validator}/deltas` + // - `#{PoS}/total_deltas` + // - `#{PoS}/validator_set` + + let mut epoched_src_bonds_post: Vec> = Vec::new(); + let mut epoched_dest_bonds_post: Vec> = + Vec::new(); + for epoch in 0..=pos_params.unbonding_len { + epoched_src_bonds_post.push( + bond_handle(&redelegation.owner, &redelegation.src_validator) + .get_delta_val(ctx(), Epoch(epoch))?, + ); + epoched_dest_bonds_post.push( + bond_handle(&redelegation.owner, &redelegation.dest_validator) + .get_delta_val(ctx(), Epoch(epoch))?, + ); + } + + // Before pipeline offset, there can only be self-bond for genesis + // validator. In case of a delegation the state is setup so that there + // is no bond until pipeline offset. + for epoch in 0..pos_params.pipeline_len { + assert_eq!( + read_validator_stake( + ctx(), + &pos_params, + &redelegation.src_validator, + Epoch(epoch) + )?, + token::Amount::zero(), + "The validator stake before the pipeline offset must be 0 - \ + checking in epoch: {epoch}" + ); + assert_eq!( + read_validator_stake( + ctx(), + &pos_params, + &redelegation.dest_validator, + Epoch(epoch) + )?, + token::Amount::zero(), + "The validator stake before the pipeline offset must be 0 - \ + checking in epoch: {epoch}" + ); + assert_eq!( + read_total_stake(ctx(), &pos_params, Epoch(epoch))?, + token::Amount::zero(), + "The total stake before the pipeline offset must be 0 - \ + checking in epoch: {epoch}" + ); + assert_eq!( + epoched_validator_set_pre[epoch as usize], + read_consensus_validator_set_addresses_with_stake( + ctx(), + Epoch(epoch), + )?, + "Validator set before pipeline offset must not change - \ + checking epoch {epoch}" + ); + } + + // Check stakes after the pipeline length + for epoch in + pos_params.pipeline_len..=pos_params.withdrawable_epoch_offset() + { + assert_eq!( + read_validator_stake( + ctx(), + &pos_params, + &redelegation.src_validator, + Epoch(epoch) + )?, + initial_stake - redelegation.amount, + "The validator stake at and after the pipeline offset must \ + have changed - checking in epoch: {epoch}" + ); + assert_eq!( + read_validator_stake( + ctx(), + &pos_params, + &redelegation.dest_validator, + Epoch(epoch) + )?, + redelegation.amount, + "The validator stake at and after the pipeline offset must \ + have changed - checking in epoch: {epoch}" + ); + assert_eq!( + read_total_stake(ctx(), &pos_params, Epoch(epoch))?, + initial_stake, + "The total stake at and after the pipeline offset must have \ + changed - checking in epoch: {epoch}" + ); + } + // Check validator sets + assert_eq!( + BTreeSet::from_iter([ + WeightedValidator { + bonded_stake: initial_stake - redelegation.amount, + address: redelegation.src_validator.clone() + }, + WeightedValidator { + bonded_stake: redelegation.amount, + address: redelegation.dest_validator.clone() + } + ]), + read_consensus_validator_set_addresses_with_stake( + ctx(), + Epoch(pos_params.pipeline_len), + )?, + "The validator set at pipeline offset should have changed" + ); + + // Check that PoS account balance is unchanged by the redelegation + let pos_balance_post: token::Amount = + ctx().read(&pos_balance_key)?.unwrap(); + assert_eq!( + pos_balance_pre, pos_balance_post, + "Unbonding should not affect PoS system balance" + ); + + // Check that no unbonds exist + assert!( + unbond_handle(&redelegation.owner, &redelegation.src_validator) + .is_empty(ctx())? + ); + assert!( + unbond_handle(&redelegation.owner, &redelegation.dest_validator) + .is_empty(ctx())? + ); + + // Check bonds + for epoch in 0..pos_params.withdrawable_epoch_offset() { + let (exp_src_bond, exp_dest_bond) = + if epoch == pos_params.pipeline_len { + ( + Some(initial_stake - redelegation.amount), + Some(redelegation.amount), + ) + } else { + (None, None) + }; + + assert_eq!( + bond_handle(&redelegation.owner, &redelegation.src_validator) + .get_delta_val(ctx(), Epoch(epoch))?, + exp_src_bond, + "After the tx is applied, the bond should be changed in \ + place, checking epoch {epoch}" + ); + assert_eq!( + bond_handle(&redelegation.owner, &redelegation.dest_validator) + .get_delta_val(ctx(), Epoch(epoch))?, + exp_dest_bond, + "After the tx is applied, the bond should be changed in \ + place, checking epoch {epoch}" + ); + } + + // Use the tx_env to run PoS VP + let tx_env = tx_host_env::take(); + let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); + let result = vp_env.validate_tx(PosVP::new); + let result = + result.expect("Validation of valid changes must not fail!"); + assert!( + result, + "PoS Validity predicate must accept this transaction" + ); + Ok(()) + } + + /// Generates an initial validator stake and a redelegation, while making + /// sure that the `initial_stake >= redelegation.amount`. + fn arb_initial_stake_and_redelegation() + -> impl Strategy + { + // Generate initial stake + token::testing::arb_amount_ceiled((i64::MAX / 8) as u64).prop_flat_map( + |initial_stake| { + // Use the initial stake to limit the bond amount + let redelegation = arb_redelegation( + u128::try_from(initial_stake).unwrap() as u64, + ); + // Use the generated initial stake too too + (Just(initial_stake), redelegation) + }, + ) + } + + /// Generates an arbitrary redelegation, with the amount constrained from + /// above. + fn arb_redelegation( + max_amount: u64, + ) -> impl Strategy { + ( + address::testing::arb_established_address(), + address::testing::arb_established_address(), + address::testing::arb_non_internal_address(), + token::testing::arb_amount_non_zero_ceiled(max_amount), + ) + .prop_map( + |(src_validator, dest_validator, owner, amount)| { + let src_validator = Address::Established(src_validator); + let dest_validator = Address::Established(dest_validator); + transaction::pos::Redelegation { + src_validator, + dest_validator, + owner, + amount, + } + }, + ) + } +} diff --git a/wasm/wasm_source/src/tx_transfer.rs b/wasm/wasm_source/src/tx_transfer.rs index bdc683c339..f36f52c74d 100644 --- a/wasm/wasm_source/src/tx_transfer.rs +++ b/wasm/wasm_source/src/tx_transfer.rs @@ -11,15 +11,17 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let transfer = token::Transfer::try_from_slice(&data[..]) .wrap_err("failed to decode token::Transfer")?; debug_log!("apply_tx called with transfer: {:#?}", transfer); - let token::Transfer { - source, - target, - token, - amount, - key, - shielded: shielded_hash, - } = transfer; - let shielded = shielded_hash + + token::transfer( + ctx, + &transfer.source, + &transfer.target, + &transfer.token, + transfer.amount, + )?; + + let shielded = transfer + .shielded .as_ref() .map(|hash| { signed @@ -28,14 +30,8 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { .ok_or_err_msg("unable to find shielded section") }) .transpose()?; - token::transfer( - ctx, - &source, - &target, - &token, - amount, - &key, - &shielded_hash, - &shielded, - ) + if let Some(shielded) = shielded { + token::handle_masp_tx(ctx, &transfer, &shielded)?; + } + Ok(()) } diff --git a/wasm/wasm_source/src/tx_unbond.rs b/wasm/wasm_source/src/tx_unbond.rs index 7e08c0dcda..7f66b7a338 100644 --- a/wasm/wasm_source/src/tx_unbond.rs +++ b/wasm/wasm_source/src/tx_unbond.rs @@ -10,15 +10,22 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let unbond = transaction::pos::Unbond::try_from_slice(&data[..]) .wrap_err("failed to decode Unbond")?; - ctx.unbond_tokens(unbond.source.as_ref(), &unbond.validator, unbond.amount) + ctx.unbond_tokens( + unbond.source.as_ref(), + &unbond.validator, + unbond.amount, + )?; + // TODO: would using debug_log! be useful? + + Ok(()) } #[cfg(test)] mod tests { use std::collections::BTreeSet; - use namada::ledger::pos::{GenesisValidator, PosParams, PosVP}; - use namada::proof_of_stake::types::WeightedValidator; + use namada::ledger::pos::{OwnedPosParams, PosVP}; + use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; use namada::proof_of_stake::{ bond_handle, read_consensus_validator_set_addresses_with_stake, read_total_stake, read_validator_stake, unbond_handle, @@ -30,6 +37,7 @@ mod tests { use namada_tests::native_vp::TestNativeVpEnv; use namada_tests::tx::*; use namada_tx_prelude::address::InternalAddress; + use namada_tx_prelude::borsh_ext::BorshSerializeExt; use namada_tx_prelude::chain::ChainId; use namada_tx_prelude::key::testing::arb_common_keypair; use namada_tx_prelude::key::RefTo; @@ -62,11 +70,11 @@ mod tests { initial_stake: token::Amount, unbond: transaction::pos::Unbond, key: key::common::SecretKey, - pos_params: PosParams, + pos_params: OwnedPosParams, ) -> TxResult { // Remove the validator stake threshold for simplicity - let pos_params = PosParams { - validator_stake_threshold: token::Amount::default(), + let pos_params = OwnedPosParams { + validator_stake_threshold: token::Amount::zero(), ..pos_params }; @@ -75,6 +83,8 @@ mod tests { &unbond.source, Some(source) if *source != unbond.validator); let consensus_key = key::testing::keypair_1().ref_to(); + let protocol_key = key::testing::keypair_2().ref_to(); + let eth_cold_key = key::testing::keypair_3().ref_to(); let eth_hot_key = key::testing::keypair_4().ref_to(); let commission_rate = Dec::new(5, 2).expect("Cannot fail"); @@ -85,18 +95,20 @@ mod tests { tokens: if is_delegation { // If we're unbonding a delegation, we'll give the initial stake // to the delegation instead of the validator - token::Amount::default() + token::Amount::zero() } else { initial_stake }, consensus_key, + protocol_key, eth_cold_key, eth_hot_key, commission_rate, max_commission_rate_change, }]; - init_pos(&genesis_validators[..], &pos_params, Epoch(0)); + let pos_params = + init_pos(&genesis_validators[..], &pos_params, Epoch(0)); let native_token = tx_host_env::with(|tx_env| { let native_token = tx_env.wl_storage.storage.native_token.clone(); @@ -113,8 +125,9 @@ mod tests { native_token }); - // Initialize the delegation if it is the case - unlike genesis - // validator's self-bond, this happens at pipeline offset + // If delegation, initialize the bond with a delegation from the unbond + // source, which will become active at pipeline offset. If a self-bond, + // the bond is already active from genesis. if is_delegation { ctx().bond_tokens( unbond.source.as_ref(), @@ -125,7 +138,7 @@ mod tests { tx_host_env::commit_tx_and_block(); let tx_code = vec![]; - let tx_data = unbond.try_to_vec().unwrap(); + let tx_data = unbond.serialize_to_vec(); let mut tx = Tx::new(ChainId::default(), None); tx.add_code(tx_code) .add_serialized_data(tx_data) @@ -136,11 +149,8 @@ mod tests { .source .clone() .unwrap_or_else(|| unbond.validator.clone()); - // let unbond_id = BondId { - // validator: unbond.validator.clone(), - // source: unbond_src.clone(), - // }; + // Check that PoS balance is the same as the initial validator stake let pos_balance_key = token::balance_key( &native_token, &Address::Internal(InternalAddress::PoS), @@ -158,26 +168,20 @@ mod tests { let mut epoched_validator_set_pre: Vec> = Vec::new(); - for epoch in 0..=pos_params.unbonding_len { + for epoch in 0..=pos_params.withdrawable_epoch_offset() { epoched_total_stake_pre.push(read_total_stake( ctx(), &pos_params, Epoch(epoch), )?); - epoched_validator_stake_pre.push( - read_validator_stake( - ctx(), - &pos_params, - &unbond.validator, - Epoch(epoch), - )? - .unwrap(), - ); - epoched_bonds_pre.push( - bond_handle - .get_delta_val(ctx(), Epoch(epoch), &pos_params)? - .map(token::Amount::from_change), - ); + epoched_validator_stake_pre.push(read_validator_stake( + ctx(), + &pos_params, + &unbond.validator, + Epoch(epoch), + )?); + epoched_bonds_pre + .push(bond_handle.get_delta_val(ctx(), Epoch(epoch))?); epoched_validator_set_pre.push( read_consensus_validator_set_addresses_with_stake( ctx(), @@ -185,31 +189,25 @@ mod tests { )?, ); } - // dbg!(&epoched_bonds_pre); // Apply the unbond tx apply_tx(ctx(), signed_tx)?; - // Read the data after the tx is executed. + // Read the data after the unbond tx is executed. // The following storage keys should be updated: - // - `#{PoS}/validator/#{validator}/deltas` // - `#{PoS}/total_deltas` // - `#{PoS}/validator_set` - let mut epoched_bonds_post: Vec> = Vec::new(); + let mut epoched_bonds_post: Vec> = Vec::new(); for epoch in 0..=pos_params.unbonding_len { - epoched_bonds_post.push( - bond_handle - .get_delta_val(ctx(), Epoch(epoch), &pos_params)? - .map(token::Amount::from_change), - ); + epoched_bonds_post + .push(bond_handle.get_delta_val(ctx(), Epoch(epoch))?); } - // dbg!(&epoched_bonds_post); let expected_amount_before_pipeline = if is_delegation { // When this is a delegation, there will be no bond until pipeline - token::Amount::default() + token::Amount::zero() } else { // Before pipeline offset, there can only be self-bond initial_stake @@ -226,7 +224,7 @@ mod tests { &unbond.validator, Epoch(epoch) )?, - Some(expected_amount_before_pipeline), + expected_amount_before_pipeline, "The validator deltas before the pipeline offset must not \ change - checking in epoch: {epoch}" ); @@ -249,7 +247,9 @@ mod tests { // At and after pipeline offset, there can be either delegation or // self-bond, both of which are initialized to the same `initial_stake` - for epoch in pos_params.pipeline_len..pos_params.unbonding_len { + for epoch in + pos_params.pipeline_len..=pos_params.withdrawable_epoch_offset() + { assert_eq!( read_validator_stake( ctx(), @@ -257,16 +257,17 @@ mod tests { &unbond.validator, Epoch(epoch) )?, - Some(initial_stake - unbond.amount), - "The validator deltas at and after the pipeline offset must \ + initial_stake - unbond.amount, + "The validator stake at and after the pipeline offset must \ have changed - checking in epoch: {epoch}" ); assert_eq!( read_total_stake(ctx(), &pos_params, Epoch(epoch))?, (initial_stake - unbond.amount), - "The total deltas at and after the pipeline offset must have \ + "The total stake at and after the pipeline offset must have \ changed - checking in epoch: {epoch}" ); + // Only at pipeline because the read won't return anything after if epoch == pos_params.pipeline_len { assert_ne!( epoched_validator_set_pre[epoch as usize], @@ -280,59 +281,16 @@ mod tests { } } - { - let epoch = pos_params.unbonding_len + 1; - let expected_stake = - initial_stake.change() - unbond.amount.change(); - assert_eq!( - read_validator_stake( - ctx(), - &pos_params, - &unbond.validator, - Epoch(epoch) - )? - .map(|v| v.change()), - Some(expected_stake), - "The total deltas at after the unbonding offset epoch must be \ - decremented by the unbonded amount - checking in epoch: \ - {epoch}" - ); - assert_eq!( - read_total_stake(ctx(), &pos_params, Epoch(epoch))?.change(), - expected_stake, - "The total deltas at after the unbonding offset epoch must be \ - decremented by the unbonded amount - checking in epoch: \ - {epoch}" - ); - } - - // - `#{staking_token}/balance/#{PoS}` // Check that PoS account balance is unchanged by unbond let pos_balance_post: token::Amount = ctx().read(&pos_balance_key)?.unwrap(); assert_eq!( pos_balance_pre, pos_balance_post, - "Unbonding doesn't affect PoS system balance" + "Unbonding should not affect PoS system balance" ); - // - `#{PoS}/unbond/#{owner}/#{validator}` // Check that the unbond doesn't exist until unbonding offset - - // Outer epoch is end (withdrawable), inner epoch is beginning of let unbond_handle = unbond_handle(&unbond_src, &unbond.validator); - - // let unbonds_post = ctx().read_unbond(&unbond_id)?.unwrap(); - // let bonds_post = ctx().read_bond(&unbond_id)?.unwrap(); - - for epoch in 0..(pos_params.pipeline_len + pos_params.unbonding_len) { - let unbond = unbond_handle.at(&Epoch(epoch)); - - assert!( - unbond.is_empty(ctx())?, - "There should be no unbond until unbonding offset - checking \ - epoch {epoch}" - ); - } let start_epoch = if is_delegation { // This bond was a delegation Epoch::from(pos_params.pipeline_len) @@ -340,62 +298,42 @@ mod tests { // This bond was a genesis validator self-bond Epoch::default() }; - // let end_epoch = Epoch::from(pos_params.unbonding_len - 1); - - // let expected_unbond = if unbond.amount == token::Amount::default() { - // HashMap::new() - // } else { - // HashMap::from_iter([((start_epoch, end_epoch), unbond.amount)]) - // }; + let withdrawable_epoch = pos_params.withdrawable_epoch_offset(); + for epoch in 0..withdrawable_epoch { + assert!( + unbond_handle + .at(&start_epoch) + .get(ctx(), &Epoch(epoch))? + .is_none(), + "There should be no unbond until the withdrawable offset - \ + checking epoch {epoch}" + ); + } // Ensure that the unbond is structured as expected, withdrawable at // pipeline + unbonding + cubic_slash_window offsets let actual_unbond_amount = unbond_handle - .at(&Epoch::from( - pos_params.pipeline_len - + pos_params.unbonding_len - + pos_params.cubic_slashing_window_length, - )) - .get(ctx(), &start_epoch)?; + .at(&start_epoch) + .get(ctx(), &Epoch(withdrawable_epoch))?; assert_eq!( actual_unbond_amount, Some(unbond.amount), - "Delegation at pipeline + unbonding offset should be equal to the \ - unbonded amount" + "Delegation at pipeline + unbonding + cubic window offset should \ + be equal to the unbonded amount" ); - for epoch in start_epoch.0 - ..(pos_params.pipeline_len - + pos_params.unbonding_len - + pos_params.cubic_slashing_window_length) - { + for epoch in start_epoch.0..pos_params.withdrawable_epoch_offset() { let bond_amount = bond_handle.get_sum(ctx(), Epoch(epoch), &pos_params)?; let expected_amount = initial_stake - unbond.amount; assert_eq!( bond_amount, - Some(expected_amount.change()), + Some(expected_amount), "After the tx is applied, the bond should be changed in \ place, checking epoch {epoch}" ); } - // { - // let epoch = pos_params.unbonding_len + 1; - // let bond: Bond = bonds_post.get(epoch).unwrap(); - // let expected_bond = - // HashMap::from_iter([(start_epoch, initial_stake)]); - // assert_eq!( - // bond.pos_deltas, expected_bond, - // "At unbonding offset, the pos deltas should not change, \ - // checking epoch {epoch}" - // ); - // assert_eq!( - // bond.neg_deltas, unbond.amount, - // "At unbonding offset, the unbonded amount should have been \ - // deducted, checking epoch {epoch}" - // ) - // } // Use the tx_env to run PoS VP let tx_env = tx_host_env::take(); @@ -410,6 +348,8 @@ mod tests { Ok(()) } + /// Generates an initial validator stake and a unbond, while making sure + /// that the `initial_stake >= unbond.amount`. fn arb_initial_stake_and_unbond() -> impl Strategy { // Generate initial stake @@ -424,8 +364,7 @@ mod tests { ) } - /// Generates an initial validator stake and a unbond, while making sure - /// that the `initial_stake >= unbond.amount`. + /// Generates an arbitrary unbond, with the amount constrained from above. fn arb_unbond( max_amount: u64, ) -> impl Strategy { diff --git a/wasm/wasm_source/src/tx_withdraw.rs b/wasm/wasm_source/src/tx_withdraw.rs index c8fa649c43..5bedf44c42 100644 --- a/wasm/wasm_source/src/tx_withdraw.rs +++ b/wasm/wasm_source/src/tx_withdraw.rs @@ -12,7 +12,7 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let slashed = ctx.withdraw_tokens(withdraw.source.as_ref(), &withdraw.validator)?; - if slashed != token::Amount::default() { + if !slashed.is_zero() { debug_log!("New withdrawal slashed for {}", slashed.to_string_native()); } Ok(()) @@ -20,7 +20,8 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { #[cfg(test)] mod tests { - use namada::ledger::pos::{GenesisValidator, PosParams, PosVP}; + use namada::ledger::pos::{OwnedPosParams, PosVP}; + use namada::proof_of_stake::types::GenesisValidator; use namada::proof_of_stake::unbond_handle; use namada::types::dec::Dec; use namada::types::storage::Epoch; @@ -32,6 +33,7 @@ mod tests { arb_established_address, arb_non_internal_address, }; use namada_tx_prelude::address::InternalAddress; + use namada_tx_prelude::borsh_ext::BorshSerializeExt; use namada_tx_prelude::chain::ChainId; use namada_tx_prelude::key::testing::arb_common_keypair; use namada_tx_prelude::key::RefTo; @@ -67,17 +69,19 @@ mod tests { unbonded_amount: token::Amount, withdraw: transaction::pos::Withdraw, key: key::common::SecretKey, - pos_params: PosParams, + pos_params: OwnedPosParams, ) -> TxResult { // Remove the validator stake threshold for simplicity - let pos_params = PosParams { - validator_stake_threshold: token::Amount::default(), + let pos_params = OwnedPosParams { + validator_stake_threshold: token::Amount::zero(), ..pos_params }; let is_delegation = matches!( &withdraw.source, Some(source) if *source != withdraw.validator); let consensus_key = key::testing::keypair_1().ref_to(); + let protocol_key = key::testing::keypair_2().ref_to(); + let eth_cold_key = key::testing::keypair_3().ref_to(); let eth_hot_key = key::testing::keypair_4().ref_to(); let commission_rate = Dec::new(5, 2).expect("Cannot fail"); @@ -89,18 +93,20 @@ mod tests { // If we're withdrawing a delegation, we'll give the initial // stake to the delegation instead of the // validator - token::Amount::default() + token::Amount::zero() } else { initial_stake }, consensus_key, + protocol_key, eth_cold_key, eth_hot_key, commission_rate, max_commission_rate_change, }]; - init_pos(&genesis_validators[..], &pos_params, Epoch(0)); + let pos_params = + init_pos(&genesis_validators[..], &pos_params, Epoch(0)); let native_token = tx_host_env::with(|tx_env| { let native_token = tx_env.wl_storage.storage.native_token.clone(); @@ -169,7 +175,7 @@ mod tests { ); let tx_code = vec![]; - let tx_data = withdraw.try_to_vec().unwrap(); + let tx_data = withdraw.serialize_to_vec(); let mut tx = Tx::new(ChainId::default(), None); tx.add_code(tx_code) .add_serialized_data(tx_data) @@ -193,7 +199,7 @@ mod tests { let handle = unbond_handle(&unbond_src, &withdraw.validator); let unbond_pre = - handle.at(&withdraw_epoch).get(ctx(), &bond_epoch).unwrap(); + handle.at(&bond_epoch).get(ctx(), &withdraw_epoch).unwrap(); assert_eq!(unbond_pre, Some(unbonded_amount)); diff --git a/wasm/wasm_source/src/vp_implicit.rs b/wasm/wasm_source/src/vp_implicit.rs index 215dccf421..c6c41fa363 100644 --- a/wasm/wasm_source/src/vp_implicit.rs +++ b/wasm/wasm_source/src/vp_implicit.rs @@ -362,9 +362,6 @@ mod tests { address, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -391,6 +388,7 @@ mod tests { let validator = address::testing::established_address_3(); let initial_stake = token::Amount::from_uint(10_098_123, 0).unwrap(); let consensus_key = key::testing::keypair_2().ref_to(); + let protocol_key = key::testing::keypair_1().ref_to(); let eth_cold_key = key::testing::keypair_3().ref_to(); let eth_hot_key = key::testing::keypair_4().ref_to(); let commission_rate = Dec::new(5, 2).unwrap(); @@ -400,6 +398,7 @@ mod tests { address: validator.clone(), tokens: initial_stake, consensus_key, + protocol_key, commission_rate, max_commission_rate_change, eth_hot_key, @@ -470,6 +469,7 @@ mod tests { let validator = address::testing::established_address_3(); let initial_stake = token::Amount::from_uint(10_098_123, 0).unwrap(); let consensus_key = key::testing::keypair_2().ref_to(); + let protocol_key = key::testing::keypair_1().ref_to(); let commission_rate = Dec::new(5, 2).unwrap(); let max_commission_rate_change = Dec::new(1, 2).unwrap(); @@ -477,6 +477,7 @@ mod tests { address: validator.clone(), tokens: initial_stake, consensus_key, + protocol_key, commission_rate, max_commission_rate_change, eth_hot_key: key::common::PublicKey::Secp256k1( @@ -536,7 +537,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, ))); @@ -595,9 +596,6 @@ mod tests { &target, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -658,9 +656,6 @@ mod tests { &target, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -672,7 +667,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, ))); @@ -734,9 +729,6 @@ mod tests { &target, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -840,7 +832,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, ))); @@ -933,7 +925,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, ))); @@ -988,7 +980,7 @@ mod tests { tx.set_code(Code::new(vec![])); tx.set_data(Data::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, ))); diff --git a/wasm/wasm_source/src/vp_masp.rs b/wasm/wasm_source/src/vp_masp.rs index cb66211118..b22fceafff 100644 --- a/wasm/wasm_source/src/vp_masp.rs +++ b/wasm/wasm_source/src/vp_masp.rs @@ -4,6 +4,7 @@ use masp_primitives::asset_type::AssetType; use masp_primitives::transaction::components::I128Sum; /// Multi-asset shielded pool VP. use namada_vp_prelude::address::masp; +use namada_vp_prelude::borsh_ext::BorshSerializeExt; use namada_vp_prelude::storage::Epoch; use namada_vp_prelude::*; use ripemd::{Digest, Ripemd160}; @@ -16,9 +17,7 @@ fn asset_type_from_epoched_address( denom: token::MaspDenom, ) -> AssetType { // Timestamp the chosen token with the current epoch - let token_bytes = (token, denom, epoch.0) - .try_to_vec() - .expect("token should serialize"); + let token_bytes = (token, denom, epoch.0).serialize_to_vec(); // Generate the unique asset identifier from the unique token address AssetType::new(token_bytes.as_ref()).expect("unable to create asset type") } @@ -90,185 +89,164 @@ fn validate_tx( verifiers, ); - let signed = tx_data; - let transfer = - token::Transfer::try_from_slice(&signed.data().unwrap()[..]).unwrap(); - - let shielded = transfer - .shielded - .as_ref() - .map(|hash| { - signed - .get_section(hash) - .and_then(|x| x.as_ref().masp_tx()) - .ok_or_err_msg("unable to find shielded section") - }) - .transpose()?; - if let Some(shielded_tx) = shielded { - let mut transparent_tx_pool = I128Sum::zero(); - // The Sapling value balance adds to the transparent tx pool - transparent_tx_pool += shielded_tx.sapling_value_balance(); - - if transfer.source != masp() { - // Handle transparent input - // Note that the asset type is timestamped so shields - // where the shielded value has an incorrect timestamp - // are automatically rejected - for denom in token::MaspDenom::iter() { - let (_transp_asset, transp_amt) = convert_amount( - ctx.get_block_epoch().unwrap(), - &transfer.token, - transfer.amount.into(), - denom, - ); - - // Non-masp sources add to transparent tx pool - transparent_tx_pool += transp_amt; - } - } else { - // Handle shielded input - // The following boundary conditions must be satisfied - // 1. Zero transparent input - // 2. the transparent transaction value pool's amount must equal the - // containing wrapper transaction's fee amount - // Satisfies 1. - if let Some(transp_bundle) = shielded_tx.transparent_bundle() { - if !transp_bundle.vin.is_empty() { - debug_log!( - "Transparent input to a transaction from the masp \ - must be 0 but is {}", - transp_bundle.vin.len() - ); - return reject(); - } - } + let (transfer, shielded_tx) = ctx.get_shielded_action(tx_data)?; + let mut transparent_tx_pool = I128Sum::zero(); + // The Sapling value balance adds to the transparent tx pool + transparent_tx_pool += shielded_tx.sapling_value_balance(); + + if transfer.source != masp() { + // Handle transparent input + // Note that the asset type is timestamped so shields + // where the shielded value has an incorrect timestamp + // are automatically rejected + for denom in token::MaspDenom::iter() { + let (_transp_asset, transp_amt) = convert_amount( + ctx.get_block_epoch().unwrap(), + &transfer.token, + transfer.amount.into(), + denom, + ); + + // Non-masp sources add to transparent tx pool + transparent_tx_pool += transp_amt; } - - if transfer.target != masp() { - // Handle transparent output - // The following boundary conditions must be satisfied - // 1. One to 4 transparent outputs - // 2. Asset type must be properly derived - // 3. Value from the output must be the same as the containing - // transfer - // 4. Public key must be the hash of the target - - // Satisfies 1. - let transp_bundle = - shielded_tx.transparent_bundle().ok_or_err_msg( - "Expected transparent outputs in unshielding transaction", - )?; - - let out_length = transp_bundle.vout.len(); - if !(1..=4).contains(&out_length) { + } else { + // Handle shielded input + // The following boundary conditions must be satisfied + // 1. Zero transparent input + // 2. the transparent transaction value pool's amount must equal the + // containing wrapper transaction's fee amount + // Satisfies 1. + if let Some(transp_bundle) = shielded_tx.transparent_bundle() { + if !transp_bundle.vin.is_empty() { debug_log!( - "Transparent output to a transaction to the masp must be \ - beteween 1 and 4 but is {}", - transp_bundle.vout.len() + "Transparent input to a transaction from the masp must be \ + 0 but is {}", + transp_bundle.vin.len() ); - return reject(); } - let mut outs = transp_bundle.vout.iter(); - let mut valid_count = 0; - for denom in token::MaspDenom::iter() { - let out = match outs.next() { - Some(out) => out, - None => continue, - }; - - let expected_asset_type: AssetType = - asset_type_from_epoched_address( - ctx.get_block_epoch().unwrap(), - &transfer.token, - denom, - ); - - // Satisfies 2. and 3. - if !valid_asset_type(&expected_asset_type, &out.asset_type) { - // we don't know which masp denoms are necessary apriori. - // This is encoded via the asset types. - continue; - } - if !valid_transfer_amount( - out.value, - denom.denominate(&transfer.amount.amount), - ) { - return reject(); - } + } + } - let (_transp_asset, transp_amt) = convert_amount( + if transfer.target != masp() { + // Handle transparent output + // The following boundary conditions must be satisfied + // 1. One to 4 transparent outputs + // 2. Asset type must be properly derived + // 3. Value from the output must be the same as the containing + // transfer + // 4. Public key must be the hash of the target + + // Satisfies 1. + let transp_bundle = shielded_tx.transparent_bundle().ok_or_err_msg( + "Expected transparent outputs in unshielding transaction", + )?; + + let out_length = transp_bundle.vout.len(); + if !(1..=4).contains(&out_length) { + debug_log!( + "Transparent output to a transaction to the masp must be \ + beteween 1 and 4 but is {}", + transp_bundle.vout.len() + ); + + return reject(); + } + let mut outs = transp_bundle.vout.iter(); + let mut valid_count = 0; + for denom in token::MaspDenom::iter() { + let out = match outs.next() { + Some(out) => out, + None => continue, + }; + + let expected_asset_type: AssetType = + asset_type_from_epoched_address( ctx.get_block_epoch().unwrap(), &transfer.token, - transfer.amount.amount, denom, ); - // Non-masp destinations subtract from transparent tx pool - transparent_tx_pool -= transp_amt; - - // Satisfies 4. - let target_enc = transfer - .target - .try_to_vec() - .expect("target address encoding"); - - let hash = Ripemd160::digest(sha256(&target_enc).0.as_slice()); - - if <[u8; 20]>::from(hash) != out.address.0 { - debug_log!( - "the public key of the output account does not match \ - the transfer target" - ); - return reject(); - } - valid_count += 1; + // Satisfies 2. and 3. + if !valid_asset_type(&expected_asset_type, &out.asset_type) { + // we don't know which masp denoms are necessary apriori. + // This is encoded via the asset types. + continue; } - // one or more of the denoms in the batch failed to verify - // the asset derivation. - if valid_count != out_length { + if !valid_transfer_amount( + out.value, + denom.denominate(&transfer.amount.amount), + ) { return reject(); } - } else { - // Handle shielded output - // The following boundary conditions must be satisfied - // 1. Zero transparent output - // Satisfies 1. - if let Some(transp_bundle) = shielded_tx.transparent_bundle() { - if !transp_bundle.vout.is_empty() { - debug_log!( - "Transparent output to a transaction from the masp \ - must be 0 but is {}", - transp_bundle.vout.len() - ); - return reject(); - } - } - } + let (_transp_asset, transp_amt) = convert_amount( + ctx.get_block_epoch().unwrap(), + &transfer.token, + transfer.amount.amount, + denom, + ); + + // Non-masp destinations subtract from transparent tx pool + transparent_tx_pool -= transp_amt; + + // Satisfies 4. + let target_enc = transfer.target.serialize_to_vec(); - match transparent_tx_pool.partial_cmp(&I128Sum::zero()) { - None | Some(Ordering::Less) => { + let hash = Ripemd160::digest(sha256(&target_enc).0.as_slice()); + + if <[u8; 20]>::from(hash) != out.address.0 { debug_log!( - "Transparent transaction value pool must be nonnegative. \ - Violation may be caused by transaction being constructed \ - in previous epoch. Maybe try again." + "the public key of the output account does not match the \ + transfer target" ); - // Section 3.4: The remaining value in the transparent - // transaction value pool MUST be nonnegative. return reject(); } - Some(Ordering::Greater) => { + valid_count += 1; + } + // one or more of the denoms in the batch failed to verify + // the asset derivation. + if valid_count != out_length { + return reject(); + } + } else { + // Handle shielded output + // The following boundary conditions must be satisfied + // 1. Zero transparent output + + // Satisfies 1. + if let Some(transp_bundle) = shielded_tx.transparent_bundle() { + if !transp_bundle.vout.is_empty() { debug_log!( - "Transaction fees cannot be paid inside MASP transaction." + "Transparent output to a transaction from the masp must \ + be 0 but is {}", + transp_bundle.vout.len() ); return reject(); } - _ => {} } - // Do the expensive proof verification in the VM at the end. - ctx.verify_masp(shielded_tx.try_to_vec().unwrap()) - } else { - reject() } + + match transparent_tx_pool.partial_cmp(&I128Sum::zero()) { + None | Some(Ordering::Less) => { + debug_log!( + "Transparent transaction value pool must be nonnegative. \ + Violation may be caused by transaction being constructed in \ + previous epoch. Maybe try again." + ); + // Section 3.4: The remaining value in the transparent + // transaction value pool MUST be nonnegative. + return reject(); + } + Some(Ordering::Greater) => { + debug_log!( + "Transaction fees cannot be paid inside MASP transaction." + ); + return reject(); + } + _ => {} + } + // Do the expensive proof verification in the VM at the end. + ctx.verify_masp(shielded_tx.serialize_to_vec()) } diff --git a/wasm/wasm_source/src/vp_testnet_faucet.rs b/wasm/wasm_source/src/vp_testnet_faucet.rs index 7298c0b126..6950be2111 100644 --- a/wasm/wasm_source/src/vp_testnet_faucet.rs +++ b/wasm/wasm_source/src/vp_testnet_faucet.rs @@ -118,6 +118,7 @@ mod tests { use namada_tests::vp::*; use namada_tx_prelude::{StorageWrite, TxEnv}; use namada_vp_prelude::account::AccountPublicKeysMap; + use namada_vp_prelude::borsh_ext::BorshSerializeExt; use namada_vp_prelude::key::RefTo; use proptest::prelude::*; use storage::testing::arb_account_storage_key_no_vp; @@ -176,9 +177,6 @@ mod tests { address, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -267,7 +265,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -277,10 +275,14 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!( - validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); + assert!(validate_tx( + &CTX, + signed_tx, + vp_owner, + keys_changed, + verifiers + ) + .unwrap()); } prop_compose! { @@ -331,7 +333,7 @@ mod tests { // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Apply transfer in a transaction - tx_host_env::token::transfer(tx::ctx(), address, &target, &token, amount, &None, &None, &None).unwrap(); + tx_host_env::token::transfer(tx::ctx(), address, &target, &token, amount).unwrap(); }); let vp_env = vp_host_env::take(); @@ -376,7 +378,7 @@ mod tests { // Construct a PoW solution like a client would let challenge = testnet_pow::Challenge::new(&mut tx_env.wl_storage, &vp_owner, target.clone()).unwrap(); let solution = challenge.solve(); - let solution_bytes = solution.try_to_vec().unwrap(); + let solution_bytes = solution.serialize_to_vec(); let amount = token::DenominatedAmount { amount, @@ -390,7 +392,7 @@ mod tests { let valid = solution.validate(tx::ctx(), address, target.clone()).unwrap(); assert!(valid); // Apply transfer in a transaction - tx_host_env::token::transfer(tx::ctx(), address, &target, &token, amount, &None, &None, &None).unwrap(); + tx_host_env::token::transfer(tx::ctx(), address, &target, &token, amount).unwrap(); }); let mut vp_env = vp_host_env::take(); @@ -400,7 +402,7 @@ mod tests { tx_data.set_data(Data::new(solution_bytes)); tx_data.set_code(Code::new(vec![])); tx_data.add_section(Section::Signature(Signature::new( - vec![*tx_data.data_sechash(), *tx_data.code_sechash()], + vec![tx_data.raw_header_hash()], [(0, target_key)].into_iter().collect(), None, ))); @@ -454,7 +456,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); diff --git a/wasm/wasm_source/src/vp_user.rs b/wasm/wasm_source/src/vp_user.rs index a334576b53..b7c120532f 100644 --- a/wasm/wasm_source/src/vp_user.rs +++ b/wasm/wasm_source/src/vp_user.rs @@ -259,9 +259,6 @@ mod tests { address, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -317,9 +314,6 @@ mod tests { &target, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -379,9 +373,6 @@ mod tests { &target, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -393,7 +384,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -418,6 +409,7 @@ mod tests { let validator = address::testing::established_address_3(); let initial_stake = token::Amount::from_uint(10_098_123, 0).unwrap(); let consensus_key = key::testing::keypair_2().ref_to(); + let protocol_key = key::testing::keypair_1().ref_to(); let eth_cold_key = key::testing::keypair_3().ref_to(); let eth_hot_key = key::testing::keypair_4().ref_to(); let commission_rate = Dec::new(5, 2).unwrap(); @@ -427,6 +419,7 @@ mod tests { address: validator.clone(), tokens: initial_stake, consensus_key, + protocol_key, commission_rate, max_commission_rate_change, eth_hot_key, @@ -495,6 +488,7 @@ mod tests { let validator = address::testing::established_address_3(); let initial_stake = token::Amount::from_uint(10_098_123, 0).unwrap(); let consensus_key = key::testing::keypair_2().ref_to(); + let protocol_key = key::testing::keypair_1().ref_to(); let commission_rate = Dec::new(5, 2).unwrap(); let max_commission_rate_change = Dec::new(1, 2).unwrap(); @@ -502,6 +496,7 @@ mod tests { address: validator.clone(), tokens: initial_stake, consensus_key, + protocol_key, commission_rate, max_commission_rate_change, eth_hot_key: key::common::PublicKey::Secp256k1( @@ -562,7 +557,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, ))); @@ -612,9 +607,6 @@ mod tests { &target, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -687,56 +679,56 @@ mod tests { } proptest! { - /// Test that a signed tx that performs arbitrary storage writes or - /// deletes to the account is accepted. - #[test] - fn test_signed_arb_storage_write( - (vp_owner, storage_key) in arb_account_storage_subspace_key(), - // Generate bytes to write. If `None`, delete from the key instead - storage_value in any::>>(), - ) { - // Initialize a tx environment - let mut tx_env = TestTxEnv::default(); - - let keypair = key::testing::keypair_1(); - let public_key = keypair.ref_to(); - - // Spawn all the accounts in the storage key to be able to modify - // their storage - let storage_key_addresses = storage_key.find_addresses(); - tx_env.spawn_accounts(storage_key_addresses); - tx_env.init_account_storage(&vp_owner, vec![public_key.clone()], 1); - - // Initialize VP environment from a transaction - vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |_address| { - // Write or delete some data in the transaction - if let Some(value) = &storage_value { - tx::ctx().write(&storage_key, value).unwrap(); - } else { - tx::ctx().delete(&storage_key).unwrap(); - } - }); - - let pks_map = AccountPublicKeysMap::from_iter(vec![public_key]); - - let mut vp_env = vp_host_env::take(); - let mut tx = vp_env.tx.clone(); - tx.set_code(Code::new(vec![])); - tx.set_data(Data::new(vec![])); - tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], - pks_map.index_secret_keys(vec![keypair]), - None, - ))); - let signed_tx = tx.clone(); - vp_env.tx = signed_tx.clone(); - let keys_changed: BTreeSet = - vp_env.all_touched_storage_keys(); - let verifiers: BTreeSet
= BTreeSet::default(); - vp_host_env::set(vp_env); - assert!(validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers).unwrap()); + /// Test that a signed tx that performs arbitrary storage writes or + /// deletes to the account is accepted. + #[test] + fn test_signed_arb_storage_write( + (vp_owner, storage_key) in arb_account_storage_subspace_key(), + // Generate bytes to write. If `None`, delete from the key instead + storage_value in any::>>(), + ) { + // Initialize a tx environment + let mut tx_env = TestTxEnv::default(); + + let keypair = key::testing::keypair_1(); + let public_key = keypair.ref_to(); + + // Spawn all the accounts in the storage key to be able to modify + // their storage + let storage_key_addresses = storage_key.find_addresses(); + tx_env.spawn_accounts(storage_key_addresses); + tx_env.init_account_storage(&vp_owner, vec![public_key.clone()], 1); + + // Initialize VP environment from a transaction + vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |_address| { + // Write or delete some data in the transaction + if let Some(value) = &storage_value { + tx::ctx().write(&storage_key, value).unwrap(); + } else { + tx::ctx().delete(&storage_key).unwrap(); + } + }); + + let pks_map = AccountPublicKeysMap::from_iter(vec![public_key]); + + let mut vp_env = vp_host_env::take(); + let mut tx = vp_env.tx.clone(); + tx.set_code(Code::new(vec![])); + tx.set_data(Data::new(vec![])); + tx.add_section(Section::Signature(Signature::new( + vec![ tx.raw_header_hash()], + pks_map.index_secret_keys(vec![keypair]), + None, + ))); + let signed_tx = tx.clone(); + vp_env.tx = signed_tx.clone(); + let keys_changed: BTreeSet = + vp_env.all_touched_storage_keys(); + let verifiers: BTreeSet
= BTreeSet::default(); + vp_host_env::set(vp_env); + assert!(validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers).unwrap()); + } } - } /// Test that a validity predicate update without a valid signature is /// rejected. @@ -811,7 +803,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -866,7 +858,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -922,7 +914,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -978,7 +970,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -1034,7 +1026,7 @@ mod tests { tx.set_code(Code::new(vec![])); tx.set_data(Data::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); diff --git a/wasm/wasm_source/src/vp_validator.rs b/wasm/wasm_source/src/vp_validator.rs index f929a8a0d1..88383803d0 100644 --- a/wasm/wasm_source/src/vp_validator.rs +++ b/wasm/wasm_source/src/vp_validator.rs @@ -191,7 +191,7 @@ fn validate_tx( #[cfg(test)] mod tests { use address::testing::arb_non_internal_address; - use namada::ledger::pos::{GenesisValidator, PosParams}; + use namada::ledger::pos::{GenesisValidator, OwnedPosParams}; use namada::proto::{Code, Data, Signature}; use namada::types::dec::Dec; use namada::types::storage::Epoch; @@ -266,9 +266,6 @@ mod tests { address, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -324,9 +321,6 @@ mod tests { &target, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -386,9 +380,6 @@ mod tests { &target, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -400,7 +391,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -421,10 +412,11 @@ mod tests { #[test] fn test_unsigned_pos_action_rejected() { // Init PoS genesis - let pos_params = PosParams::default(); + let pos_params = OwnedPosParams::default(); let validator = address::testing::established_address_3(); let initial_stake = token::Amount::from_uint(10_098_123, 0).unwrap(); let consensus_key = key::testing::keypair_2().ref_to(); + let protocol_key = key::testing::keypair_1().ref_to(); let eth_cold_key = key::testing::keypair_3().ref_to(); let eth_hot_key = key::testing::keypair_4().ref_to(); let commission_rate = Dec::new(5, 2).unwrap(); @@ -434,6 +426,7 @@ mod tests { address: validator.clone(), tokens: initial_stake, consensus_key, + protocol_key, commission_rate, max_commission_rate_change, eth_hot_key, @@ -504,10 +497,11 @@ mod tests { #[test] fn test_signed_pos_action_accepted() { // Init PoS genesis - let pos_params = PosParams::default(); + let pos_params = OwnedPosParams::default(); let validator = address::testing::established_address_3(); let initial_stake = token::Amount::from_uint(10_098_123, 0).unwrap(); let consensus_key = key::testing::keypair_2().ref_to(); + let protocol_key = key::testing::keypair_1().ref_to(); let commission_rate = Dec::new(5, 2).unwrap(); let max_commission_rate_change = Dec::new(1, 2).unwrap(); @@ -515,6 +509,7 @@ mod tests { address: validator.clone(), tokens: initial_stake, consensus_key, + protocol_key, commission_rate, max_commission_rate_change, eth_hot_key: key::common::PublicKey::Secp256k1( @@ -580,7 +575,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, ))); @@ -629,9 +624,6 @@ mod tests { &target, &token, amount, - &None, - &None, - &None, ) .unwrap(); }); @@ -742,7 +734,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -828,7 +820,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -883,7 +875,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -939,7 +931,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -995,7 +987,7 @@ mod tests { tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); @@ -1051,7 +1043,7 @@ mod tests { tx.set_code(Code::new(vec![])); tx.set_data(Data::new(vec![])); tx.add_section(Section::Signature(Signature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], + vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, ))); diff --git a/wasm_for_tests/wasm_source/Cargo.lock b/wasm_for_tests/wasm_source/Cargo.lock index 2ff1ca3703..e91bd5c4d5 100644 --- a/wasm_for_tests/wasm_source/Cargo.lock +++ b/wasm_for_tests/wasm_source/Cargo.lock @@ -37,18 +37,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "aes" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" -dependencies = [ - "cfg-if 1.0.0", - "cipher 0.3.0", - "cpufeatures", - "opaque-debug 0.3.0", -] - [[package]] name = "aes" version = "0.8.3" @@ -377,15 +365,15 @@ checksum = "cf9ff0bbfd639f15c74af777d81383cf53efb7c93613f6cab67c6c11e05bbf8b" [[package]] name = "bellman" -version = "0.13.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4dd656ef4fdf7debb6d87d4dd92642fcbcdb78cbf6600c13e25c87e4d1a3807" +checksum = "9afceed28bac7f9f5a508bca8aeeff51cdfa4770c0b967ac55c621e2ddfd6171" dependencies = [ "bitvec 1.0.1", "blake2s_simd", "byteorder", - "ff 0.12.1", - "group 0.12.1", + "ff", + "group", "pairing", "rand_core 0.6.4", "subtle 2.4.1", @@ -411,14 +399,14 @@ dependencies = [ [[package]] name = "bip0039" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0830ae4cc96b0617cc912970c2b17e89456fecbf55e8eed53a956f37ab50c41" +checksum = "bef0f0152ec5cf17f49a5866afaa3439816207fd4f0a224c0211ffaf5e278426" dependencies = [ - "hmac 0.11.0", - "pbkdf2 0.9.0", + "hmac 0.12.1", + "pbkdf2 0.10.1", "rand 0.8.5", - "sha2 0.9.9", + "sha2 0.10.6", "unicode-normalization", "zeroize", ] @@ -517,7 +505,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding 0.1.5", + "block-padding", "byte-tools", "byteorder", "generic-array 0.12.4", @@ -541,16 +529,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "block-modes" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb03d1bed155d89dce0f845b7899b18a9a163e148fd004e1c28421a783e2d8e" -dependencies = [ - "block-padding 0.2.1", - "cipher 0.3.0", -] - [[package]] name = "block-padding" version = "0.1.5" @@ -560,20 +538,14 @@ dependencies = [ "byte-tools", ] -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - [[package]] name = "bls12_381" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3c196a77437e7cc2fb515ce413a6401291578b5afc8ecb29a3c7ab957f05941" +checksum = "d7bc6d6292be3a19e6379786dac800f551e5865a5bb51ebbe3064ab80433f403" dependencies = [ - "ff 0.12.1", - "group 0.12.1", + "ff", + "group", "pairing", "rand_core 0.6.4", "subtle 2.4.1", @@ -581,10 +553,11 @@ dependencies = [ [[package]] name = "borsh" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" dependencies = [ - "borsh-derive 0.9.4", + "borsh-derive 0.9.3", "hashbrown 0.11.2", ] @@ -598,13 +571,24 @@ dependencies = [ "hashbrown 0.12.3", ] +[[package]] +name = "borsh" +version = "1.0.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41898277107b0d3f027593697912977397eba6ac39a55bdd2eb02c1d5d5013b5" +dependencies = [ + "borsh-derive 1.0.0-alpha.4", + "cfg_aliases", +] + [[package]] name = "borsh-derive" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" dependencies = [ - "borsh-derive-internal 0.9.4", - "borsh-schema-derive-internal 0.9.4", + "borsh-derive-internal 0.9.3", + "borsh-schema-derive-internal 0.9.3", "proc-macro-crate 0.1.5", "proc-macro2", "syn 1.0.109", @@ -623,10 +607,25 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "borsh-derive" +version = "1.0.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413cb435569fe499e89235f758304e0e7198016baa351d8f5827ea0f40526ce0" +dependencies = [ + "once_cell", + "proc-macro-crate 1.3.1", + "proc-macro2", + "quote", + "syn 2.0.16", + "syn_derive", +] + [[package]] name = "borsh-derive-internal" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" dependencies = [ "proc-macro2", "quote", @@ -644,10 +643,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "borsh-ext" +version = "1.0.0-alpha.4" +source = "git+https://github.com/heliaxdev/borsh-ext?tag=v1.0.0-alpha.4#6bebf357002f96574ac37a28f547b6c88e91b799" +dependencies = [ + "borsh 1.0.0-alpha.4", +] + [[package]] name = "borsh-schema-derive-internal" -version = "0.9.4" -source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" dependencies = [ "proc-macro2", "quote", @@ -761,6 +769,15 @@ dependencies = [ "thiserror", ] +[[package]] +name = "cbc" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" +dependencies = [ + "cipher 0.4.4", +] + [[package]] name = "cc" version = "1.0.79" @@ -779,6 +796,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chacha20" version = "0.8.2" @@ -1200,16 +1223,6 @@ dependencies = [ "subtle 2.4.1", ] -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array 0.14.7", - "subtle 2.4.1", -] - [[package]] name = "ct-codecs" version = "1.1.1" @@ -1447,6 +1460,7 @@ dependencies = [ "digest 0.10.6", "elliptic-curve", "rfc6979", + "serdect", "signature 2.1.0", "spki", ] @@ -1507,12 +1521,13 @@ dependencies = [ "base16ct", "crypto-bigint", "digest 0.10.6", - "ff 0.13.0", + "ff", "generic-array 0.14.7", - "group 0.13.0", + "group", "pkcs8", "rand_core 0.6.4", "sec1", + "serdect", "subtle 2.4.1", "zeroize", ] @@ -1627,7 +1642,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" dependencies = [ - "aes 0.8.3", + "aes", "ctr", "digest 0.10.6", "hex", @@ -1985,14 +2000,14 @@ dependencies = [ "bincode", "blake2", "blake2b_simd", - "borsh 0.9.4", + "borsh 0.9.3", "digest 0.10.6", "ed25519-dalek", "either", "ferveo-common", "group-threshold-cryptography", "hex", - "itertools", + "itertools 0.10.5", "measure_time", "miracl_core", "num 0.4.0", @@ -2019,23 +2034,13 @@ dependencies = [ "serde_bytes", ] -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "bitvec 1.0.1", - "rand_core 0.6.4", - "subtle 2.4.1", -] - [[package]] name = "ff" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ + "bitvec 1.0.1", "rand_core 0.6.4", "subtle 2.4.1", ] @@ -2085,12 +2090,12 @@ dependencies = [ [[package]] name = "fpe" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd910db5f9ca4dc3116f8c46367825807aa2b942f72565f16b4be0b208a00a9e" +checksum = "26c4b37de5ae15812a764c958297cfc50f5c010438f60c6ce75d11b802abd404" dependencies = [ - "block-modes", - "cipher 0.3.0", + "cbc", + "cipher 0.4.4", "libm", "num-bigint 0.4.3", "num-integer", @@ -2296,25 +2301,14 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff 0.12.1", - "memuse", - "rand_core 0.6.4", - "subtle 2.4.1", -] - [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff 0.13.0", + "ff", + "memuse", "rand_core 0.6.4", "subtle 2.4.1", ] @@ -2334,7 +2328,7 @@ dependencies = [ "blake2b_simd", "chacha20 0.8.2", "hex", - "itertools", + "itertools 0.10.5", "miracl_core", "rand 0.8.5", "rand_core 0.6.4", @@ -2493,16 +2487,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "hmac" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" -dependencies = [ - "crypto-mac 0.11.1", - "digest 0.9.0", -] - [[package]] name = "hmac" version = "0.12.1" @@ -2523,17 +2507,6 @@ dependencies = [ "hmac 0.7.1", ] -[[package]] -name = "hmac-drbg" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" -dependencies = [ - "digest 0.9.0", - "generic-array 0.14.7", - "hmac 0.8.1", -] - [[package]] name = "hmac-sha512" version = "0.1.9" @@ -2661,7 +2634,7 @@ dependencies = [ [[package]] name = "ibc" version = "0.41.0" -source = "git+https://github.com/heliaxdev/cosmos-ibc-rs.git?rev=38a827d3901e590b2935ee5b6b81b4d67c399560#38a827d3901e590b2935ee5b6b81b4d67c399560" +source = "git+https://github.com/heliaxdev/cosmos-ibc-rs.git?rev=206cb5fa74a7ca38038b937d202ae39fbbd63c19#206cb5fa74a7ca38038b937d202ae39fbbd63c19" dependencies = [ "bytes", "cfg-if 1.0.0", @@ -2804,10 +2777,10 @@ checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] name = "index-set" -version = "0.7.1" -source = "git+https://github.com/heliaxdev/index-set?tag=v0.7.1#dc24cdbbe3664514d59f1a4c4031863fc565f1c2" +version = "0.8.0" +source = "git+https://github.com/heliaxdev/index-set?tag=v0.8.0#0c218cc300c1bb7a1acf34f21b6e9d489df5fda8" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "serde", ] @@ -2900,6 +2873,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.6" @@ -2917,14 +2899,14 @@ dependencies = [ [[package]] name = "jubjub" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a575df5f985fe1cd5b2b05664ff6accfc46559032b954529fd225a2168d27b0f" +checksum = "8499f7a74008aafbecb2a2e608a3e13e4dd3e84df198b604451efe93f2de6e61" dependencies = [ "bitvec 1.0.1", "bls12_381", - "ff 0.12.1", - "group 0.12.1", + "ff", + "group", "rand_core 0.6.4", "subtle 2.4.1", ] @@ -2939,6 +2921,7 @@ dependencies = [ "ecdsa", "elliptic-curve", "once_cell", + "serdect", "sha2 0.10.6", "signature 2.1.0", ] @@ -2995,57 +2978,13 @@ dependencies = [ "arrayref", "crunchy", "digest 0.8.1", - "hmac-drbg 0.2.0", + "hmac-drbg", "rand 0.7.3", "sha2 0.8.2", "subtle 2.4.1", "typenum", ] -[[package]] -name = "libsecp256k1" -version = "0.7.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "arrayref", - "base64 0.13.1", - "digest 0.9.0", - "hmac-drbg 0.3.0", - "libsecp256k1-core", - "libsecp256k1-gen-ecmult", - "libsecp256k1-gen-genmult", - "rand 0.8.5", - "serde", - "sha2 0.9.9", - "typenum", -] - -[[package]] -name = "libsecp256k1-core" -version = "0.3.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "crunchy", - "digest 0.9.0", - "subtle 2.4.1", -] - -[[package]] -name = "libsecp256k1-gen-ecmult" -version = "0.3.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "libsecp256k1-core", -] - -[[package]] -name = "libsecp256k1-gen-genmult" -version = "0.3.0" -source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" -dependencies = [ - "libsecp256k1-core", -] - [[package]] name = "linux-raw-sys" version = "0.3.7" @@ -3101,9 +3040,9 @@ dependencies = [ [[package]] name = "masp_note_encryption" version = "0.2.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=77e009626f3f52fe83c81ec6ee38fc2547d38da3#77e009626f3f52fe83c81ec6ee38fc2547d38da3" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "chacha20 0.9.1", "chacha20poly1305", "cipher 0.4.4", @@ -3114,19 +3053,19 @@ dependencies = [ [[package]] name = "masp_primitives" version = "0.9.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=77e009626f3f52fe83c81ec6ee38fc2547d38da3#77e009626f3f52fe83c81ec6ee38fc2547d38da3" dependencies = [ - "aes 0.7.5", + "aes", "bip0039", "bitvec 1.0.1", "blake2b_simd", "blake2s_simd", "bls12_381", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "byteorder", - "ff 0.12.1", + "ff", "fpe", - "group 0.12.1", + "group", "hex", "incrementalmerkletree", "jubjub", @@ -3137,7 +3076,7 @@ dependencies = [ "num-traits", "rand 0.8.5", "rand_core 0.6.4", - "sha2 0.9.9", + "sha2 0.10.6", "subtle 2.4.1", "zcash_encoding", ] @@ -3145,15 +3084,15 @@ dependencies = [ [[package]] name = "masp_proofs" version = "0.9.0" -source = "git+https://github.com/anoma/masp?rev=50acc5028fbcd52a05970fe7991c7850ab04358e#50acc5028fbcd52a05970fe7991c7850ab04358e" +source = "git+https://github.com/anoma/masp?rev=77e009626f3f52fe83c81ec6ee38fc2547d38da3#77e009626f3f52fe83c81ec6ee38fc2547d38da3" dependencies = [ "bellman", "blake2b_simd", "bls12_381", "directories", "getrandom 0.2.9", - "group 0.12.1", - "itertools", + "group", + "itertools 0.11.0", "jubjub", "lazy_static", "masp_primitives", @@ -3315,7 +3254,8 @@ version = "0.23.1" dependencies = [ "async-trait", "bimap", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "circular-queue", "clru", "data-encoding", @@ -3325,13 +3265,14 @@ dependencies = [ "ethers", "eyre", "futures", - "itertools", + "itertools 0.10.5", "loupe", "masp_primitives", "masp_proofs", "namada_core", "namada_ethereum_bridge", "namada_proof_of_stake", + "namada_sdk", "num256", "orion", "owo-colors", @@ -3376,7 +3317,8 @@ dependencies = [ "ark-ec", "ark-serialize", "bech32 0.8.1", - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "chrono", "data-encoding", "derivative", @@ -3392,8 +3334,8 @@ dependencies = [ "ics23", "impl-num-traits", "index-set", - "itertools", - "libsecp256k1 0.7.0", + "itertools 0.10.5", + "k256", "masp_primitives", "namada_macros", "num-integer", @@ -3425,10 +3367,11 @@ dependencies = [ name = "namada_ethereum_bridge" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "ethers", "eyre", - "itertools", + "itertools 0.10.5", "namada_core", "namada_macros", "namada_proof_of_stake", @@ -3454,7 +3397,7 @@ dependencies = [ name = "namada_proof_of_stake" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "data-encoding", "derivative", "namada_core", @@ -3464,11 +3407,55 @@ dependencies = [ "tracing", ] +[[package]] +name = "namada_sdk" +version = "0.23.1" +dependencies = [ + "async-trait", + "bimap", + "borsh 1.0.0-alpha.4", + "borsh-ext", + "circular-queue", + "data-encoding", + "derivation-path", + "ethbridge-bridge-contract", + "ethers", + "futures", + "itertools 0.10.5", + "masp_primitives", + "masp_proofs", + "namada_core", + "namada_ethereum_bridge", + "namada_proof_of_stake", + "num256", + "orion", + "owo-colors", + "parse_duration", + "paste", + "prost", + "rand 0.8.5", + "rand_core 0.6.4", + "ripemd", + "serde", + "serde_json", + "sha2 0.9.9", + "slip10_ed25519", + "tendermint-rpc", + "thiserror", + "tiny-bip39", + "tiny-hderive", + "tokio", + "toml 0.5.11", + "tracing", + "wasmtimer", + "zeroize", +] + [[package]] name = "namada_test_utils" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "namada_core", "strum", ] @@ -3486,6 +3473,7 @@ dependencies = [ "lazy_static", "namada", "namada_core", + "namada_sdk", "namada_test_utils", "namada_tx_prelude", "namada_vp_prelude", @@ -3507,7 +3495,8 @@ dependencies = [ name = "namada_tx_prelude" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "masp_primitives", "namada_core", "namada_macros", @@ -3521,7 +3510,7 @@ dependencies = [ name = "namada_vm_env" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "masp_primitives", "namada_core", ] @@ -3530,7 +3519,8 @@ dependencies = [ name = "namada_vp_prelude" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", + "borsh-ext", "namada_core", "namada_macros", "namada_proof_of_stake", @@ -3543,7 +3533,7 @@ dependencies = [ name = "namada_wasm_for_tests" version = "0.23.1" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "getrandom 0.2.9", "namada_test_utils", "namada_tests", @@ -3761,9 +3751,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "opaque-debug" @@ -3828,11 +3818,11 @@ checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" [[package]] name = "pairing" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "135590d8bdba2b31346f9cd1fb2a912329f5135e832a4f422942eb6ead8b6b3b" +checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" dependencies = [ - "group 0.12.1", + "group", ] [[package]] @@ -3912,6 +3902,19 @@ dependencies = [ "subtle 2.4.1", ] +[[package]] +name = "pasta_curves" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e57598f73cc7e1b2ac63c79c517b31a0877cd7c402cdcaa311b5208de7a095" +dependencies = [ + "ff", + "group", + "rand 0.8.5", + "static_assertions", + "subtle 2.4.1", +] + [[package]] name = "paste" version = "1.0.12" @@ -3929,11 +3932,11 @@ dependencies = [ [[package]] name = "pbkdf2" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f05894bce6a1ba4be299d0c5f29563e08af2bc18bb7d48313113bed71e904739" +checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" dependencies = [ - "crypto-mac 0.11.1", + "digest 0.10.6", "password-hash", ] @@ -4202,7 +4205,7 @@ checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck", - "itertools", + "itertools 0.10.5", "lazy_static", "log", "multimap", @@ -4223,7 +4226,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", - "itertools", + "itertools 0.10.5", "proc-macro2", "quote", "syn 1.0.109", @@ -4390,16 +4393,31 @@ dependencies = [ ] [[package]] -name = "redjubjub" -version = "0.5.0" +name = "reddsa" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6039ff156887caf92df308cbaccdc058c9d3155a913da046add6e48c4cdbd91d" +checksum = "78a5191930e84973293aa5f532b513404460cd2216c1cfb76d08748c15b40b02" dependencies = [ "blake2b_simd", "byteorder", - "digest 0.9.0", + "group", + "hex", "jubjub", + "pasta_curves", + "rand_core 0.6.4", + "serde", + "thiserror", + "zeroize", +] + +[[package]] +name = "redjubjub" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a60db2c3bc9c6fd1e8631fee75abc008841d27144be744951d6b9b75f9b569c" +dependencies = [ "rand_core 0.6.4", + "reddsa", "serde", "thiserror", "zeroize", @@ -4876,6 +4894,7 @@ dependencies = [ "der", "generic-array 0.14.7", "pkcs8", + "serdect", "subtle 2.4.1", "zeroize", ] @@ -5024,6 +5043,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serdect" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" +dependencies = [ + "base16ct", + "serde", +] + [[package]] name = "sha1" version = "0.10.5" @@ -5164,9 +5193,9 @@ dependencies = [ [[package]] name = "sparse-merkle-tree" version = "0.3.1-pre" -source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=e086b235ed6e68929bf73f617dd61cd17b000a56#e086b235ed6e68929bf73f617dd61cd17b000a56" +source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=df7ec062e7c40d5e76b136064e9aaf8bd2490750#df7ec062e7c40d5e76b136064e9aaf8bd2490750" dependencies = [ - "borsh 0.9.4", + "borsh 1.0.0-alpha.4", "cfg-if 1.0.0", "ics23", "sha2 0.9.9", @@ -5290,6 +5319,18 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn_derive" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae6eef0000c4a12ecdfd7873ea84a8b5aab5e44db72e38e07b028a25386f29a5" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.16", +] + [[package]] name = "tap" version = "1.0.1" @@ -5546,7 +5587,7 @@ checksum = "01b874a4992538d4b2f4fbbac11b9419d685f4b39bdc3fed95b04e07bfd76040" dependencies = [ "base58", "hmac 0.7.1", - "libsecp256k1 0.3.5", + "libsecp256k1", "memzero", "sha2 0.8.2", ] @@ -6641,8 +6682,8 @@ dependencies = [ [[package]] name = "zcash_encoding" -version = "0.0.0" -source = "git+https://github.com/zcash/librustzcash?rev=43c18d0#43c18d000fcbe45363b2d53585d5102841eff99e" +version = "0.2.0" +source = "git+https://github.com/zcash/librustzcash?rev=bd7f9d7#bd7f9d7c3ce5cfd14af169ffe0e1c5c903162f46" dependencies = [ "byteorder", "nonempty", diff --git a/wasm_for_tests/wasm_source/Cargo.toml b/wasm_for_tests/wasm_source/Cargo.toml index 9bab1aff5d..10a8fc84a2 100644 --- a/wasm_for_tests/wasm_source/Cargo.toml +++ b/wasm_for_tests/wasm_source/Cargo.toml @@ -27,17 +27,10 @@ tx_proposal_code = [] namada_test_utils = {path = "../../test_utils"} namada_tx_prelude = {path = "../../tx_prelude"} namada_vp_prelude = {path = "../../vp_prelude"} -borsh = "0.9.1" +borsh = "1.0.0-alpha.4" wee_alloc = "0.4.5" getrandom = { version = "0.2", features = ["custom"] } -[patch.crates-io] -# TODO temp patch for , and more tba. -borsh = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} -borsh-derive = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} -borsh-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} -borsh-schema-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} - [dev-dependencies] namada_tests = {path = "../../tests"}