diff --git a/.changelog/v0.14.0/bug/354-ibc-token-vp-for-transfer.md b/.changelog/v0.14.0/bug-fixes/354-ibc-token-vp-for-transfer.md similarity index 100% rename from .changelog/v0.14.0/bug/354-ibc-token-vp-for-transfer.md rename to .changelog/v0.14.0/bug-fixes/354-ibc-token-vp-for-transfer.md diff --git a/.changelog/v0.14.1/bug-fixes/1146-fix-inactive-set-update.md b/.changelog/v0.14.1/bug-fixes/1146-fix-inactive-set-update.md new file mode 100644 index 00000000000..119de7663f1 --- /dev/null +++ b/.changelog/v0.14.1/bug-fixes/1146-fix-inactive-set-update.md @@ -0,0 +1,2 @@ +- Fix Tendermint validator set update to properly skip validator with no voting + power. ([#1146](https://github.com/anoma/namada/pull/1146)) \ No newline at end of file diff --git a/.changelog/v0.14.1/summary.md b/.changelog/v0.14.1/summary.md new file mode 100644 index 00000000000..efd5a23a8f5 --- /dev/null +++ b/.changelog/v0.14.1/summary.md @@ -0,0 +1,2 @@ +Namada 0.14.1 is a bugfix release addressing issues with inactive +validator set updates in proof of stake. diff --git a/.changelog/unreleased/bug-fixes/1141-fix-wl-storage-prefix-iter-ordering.md b/.changelog/v0.14.2/bug-fixes/1141-fix-wl-storage-prefix-iter-ordering.md similarity index 100% rename from .changelog/unreleased/bug-fixes/1141-fix-wl-storage-prefix-iter-ordering.md rename to .changelog/v0.14.2/bug-fixes/1141-fix-wl-storage-prefix-iter-ordering.md diff --git a/.changelog/v0.14.2/bug-fixes/1182-dont-persist-genesis-on-init-chain.md b/.changelog/v0.14.2/bug-fixes/1182-dont-persist-genesis-on-init-chain.md new file mode 100644 index 00000000000..179f39c7ca5 --- /dev/null +++ b/.changelog/v0.14.2/bug-fixes/1182-dont-persist-genesis-on-init-chain.md @@ -0,0 +1,4 @@ +- Fixed the init-chain handler to stop committing state to the DB + as it may be re-applied when the node is shut-down before the + first block is committed, leading to an invalid genesis state. + ([#1182](https://github.com/anoma/namada/pull/1182)) \ No newline at end of file diff --git a/.changelog/v0.14.2/bug-fixes/1191-pos-sm-test.md b/.changelog/v0.14.2/bug-fixes/1191-pos-sm-test.md new file mode 100644 index 00000000000..f5a854834b1 --- /dev/null +++ b/.changelog/v0.14.2/bug-fixes/1191-pos-sm-test.md @@ -0,0 +1,4 @@ +- Fixed an issue in which a validator's stake and validator sets + data gets into an invalid state (duplicate records with incorrect + values) due to a logic error in clearing of historical epoch data. + ([#1191](https://github.com/anoma/namada/pull/1191)) \ No newline at end of file diff --git a/.changelog/v0.14.2/features/1196-lazy-set.md b/.changelog/v0.14.2/features/1196-lazy-set.md new file mode 100644 index 00000000000..d2f2333563a --- /dev/null +++ b/.changelog/v0.14.2/features/1196-lazy-set.md @@ -0,0 +1,2 @@ +- Added a lazy set collection. + ([#1196](https://github.com/anoma/namada/pull/1196)) diff --git a/.changelog/v0.14.2/improvements/1197-pos-unique-consensus-keys.md b/.changelog/v0.14.2/improvements/1197-pos-unique-consensus-keys.md new file mode 100644 index 00000000000..c748b013ded --- /dev/null +++ b/.changelog/v0.14.2/improvements/1197-pos-unique-consensus-keys.md @@ -0,0 +1,2 @@ +- Ensure that PoS validator consensus keys are unique. + ([#1197](https://github.com/anoma/namada/pull/1197)) \ No newline at end of file diff --git a/.changelog/v0.14.2/summary.md b/.changelog/v0.14.2/summary.md new file mode 100644 index 00000000000..126d08249ff --- /dev/null +++ b/.changelog/v0.14.2/summary.md @@ -0,0 +1,2 @@ +Namada 0.14.2 is a maintenance release addressing issues with +proof-of-stake validator logic. diff --git a/.changelog/v0.14.3/bug-fixes/1140-check-pre-genesis-pk.md b/.changelog/v0.14.3/bug-fixes/1140-check-pre-genesis-pk.md new file mode 100644 index 00000000000..5c377c1b8f5 --- /dev/null +++ b/.changelog/v0.14.3/bug-fixes/1140-check-pre-genesis-pk.md @@ -0,0 +1,2 @@ +- Check if validators are valid in pre-genesis setup. + ([#1140](https://github.com/anoma/namada/pull/1140)) \ No newline at end of file diff --git a/.changelog/v0.14.3/bug-fixes/1244-conversion-loading-fix.md b/.changelog/v0.14.3/bug-fixes/1244-conversion-loading-fix.md new file mode 100644 index 00000000000..16594a471b5 --- /dev/null +++ b/.changelog/v0.14.3/bug-fixes/1244-conversion-loading-fix.md @@ -0,0 +1,2 @@ +- Now load conversions from storage even for epoch 1. + ([\#1244](https://github.com/anoma/namada/pull/1244)) \ No newline at end of file diff --git a/.changelog/v0.14.3/improvements/1113-write-tree-stores.md b/.changelog/v0.14.3/improvements/1113-write-tree-stores.md new file mode 100644 index 00000000000..573166cb8a1 --- /dev/null +++ b/.changelog/v0.14.3/improvements/1113-write-tree-stores.md @@ -0,0 +1,2 @@ +- Write Merkle tree stores only when a new epoch + ([#1113](https://github.com/anoma/namada/issues/1113)) \ No newline at end of file diff --git a/.changelog/v0.14.3/improvements/1237-prune_tree_stores.md b/.changelog/v0.14.3/improvements/1237-prune_tree_stores.md new file mode 100644 index 00000000000..edabf26c4bb --- /dev/null +++ b/.changelog/v0.14.3/improvements/1237-prune_tree_stores.md @@ -0,0 +1,2 @@ +- Prune old Merkle tree stores. + ([#1237](https://github.com/anoma/namada/pull/1237)) \ No newline at end of file diff --git a/.changelog/v0.14.3/summary.md b/.changelog/v0.14.3/summary.md new file mode 100644 index 00000000000..3d898c4dc55 --- /dev/null +++ b/.changelog/v0.14.3/summary.md @@ -0,0 +1,2 @@ +Namada 0.14.3 is a bugfix release addressing mainly disk usage +inefficiencies. diff --git a/.changelog/v0.14.3/testing/1131-fix-e2e-ledger-reset-in-dbg.md b/.changelog/v0.14.3/testing/1131-fix-e2e-ledger-reset-in-dbg.md new file mode 100644 index 00000000000..a99c187da27 --- /dev/null +++ b/.changelog/v0.14.3/testing/1131-fix-e2e-ledger-reset-in-dbg.md @@ -0,0 +1,2 @@ +- Fixed run_ledger_load_state_and_reset test in debug build. + ([#1131](https://github.com/anoma/namada/pull/1131)) \ No newline at end of file diff --git a/.changelog/v0.15.0/bug-fixes/1116-fix-batch-delete.md b/.changelog/v0.15.0/bug-fixes/1116-fix-batch-delete.md new file mode 100644 index 00000000000..cd9c1641ed2 --- /dev/null +++ b/.changelog/v0.15.0/bug-fixes/1116-fix-batch-delete.md @@ -0,0 +1,2 @@ +- Fix to read the prev value for batch delete + ([#1116](https://github.com/anoma/namada/issues/1116)) \ No newline at end of file diff --git a/.changelog/v0.15.0/bug-fixes/1154-fix-proof-query.md b/.changelog/v0.15.0/bug-fixes/1154-fix-proof-query.md new file mode 100644 index 00000000000..1cd60f941d7 --- /dev/null +++ b/.changelog/v0.15.0/bug-fixes/1154-fix-proof-query.md @@ -0,0 +1,2 @@ +- Returns an error when getting proof of a non-committed block + ([#1154](https://github.com/anoma/namada/issues/1154)) \ No newline at end of file diff --git a/.changelog/v0.15.0/bug-fixes/1184-rocksdb-dump.md b/.changelog/v0.15.0/bug-fixes/1184-rocksdb-dump.md new file mode 100644 index 00000000000..19ad1dd0d0b --- /dev/null +++ b/.changelog/v0.15.0/bug-fixes/1184-rocksdb-dump.md @@ -0,0 +1,3 @@ +- Fixed dump-db node utility which was not iterating on db keys correctly + leading to duplicates in the dump. Added an historic flag to also dump the + diff keys. ([#1184](https://github.com/anoma/namada/pull/1184)) \ No newline at end of file diff --git a/.changelog/v0.15.0/bug-fixes/1212-lazy-collection-sub-key.md b/.changelog/v0.15.0/bug-fixes/1212-lazy-collection-sub-key.md new file mode 100644 index 00000000000..49d1c5dd578 --- /dev/null +++ b/.changelog/v0.15.0/bug-fixes/1212-lazy-collection-sub-key.md @@ -0,0 +1,3 @@ +- Fixed an issue with lazy collections sub-key validation with the `Address` + type. This issue was also affecting the iterator of nested `LazyMap`. + ([#1212](https://github.com/anoma/namada/pull/1212)) diff --git a/.changelog/v0.15.0/bug-fixes/1239-fix-bonding-query-logging.md b/.changelog/v0.15.0/bug-fixes/1239-fix-bonding-query-logging.md new file mode 100644 index 00000000000..fdd59285754 --- /dev/null +++ b/.changelog/v0.15.0/bug-fixes/1239-fix-bonding-query-logging.md @@ -0,0 +1,2 @@ +- Fixed various features of the CLI output for querying bonds and performing an + unbond action. ([#1239](https://github.com/anoma/namada/pull/1239)) \ No newline at end of file diff --git a/.changelog/v0.15.0/bug-fixes/1246-fix-pos-slashing.md b/.changelog/v0.15.0/bug-fixes/1246-fix-pos-slashing.md new file mode 100644 index 00000000000..797a75230a5 --- /dev/null +++ b/.changelog/v0.15.0/bug-fixes/1246-fix-pos-slashing.md @@ -0,0 +1,3 @@ +- PoS: Fixed an issue with slashable evidence processed + and applied at a new epoch causing a ledger to crash. + ([#1246](https://github.com/anoma/namada/pull/1246)) \ No newline at end of file diff --git a/.changelog/v0.15.0/bug-fixes/1256-fix-addr-storage-key-ord.md b/.changelog/v0.15.0/bug-fixes/1256-fix-addr-storage-key-ord.md new file mode 100644 index 00000000000..64271bba362 --- /dev/null +++ b/.changelog/v0.15.0/bug-fixes/1256-fix-addr-storage-key-ord.md @@ -0,0 +1,3 @@ +- Addresses are now being ordered by their string format (bech32m) + to ensure that their order is preserved inside raw storage keys. + ([#1256](https://github.com/anoma/namada/pull/1256)) \ No newline at end of file diff --git a/.changelog/v0.15.0/bug-fixes/1263-client-check-bond-from-validator.md b/.changelog/v0.15.0/bug-fixes/1263-client-check-bond-from-validator.md new file mode 100644 index 00000000000..1b4751eadf4 --- /dev/null +++ b/.changelog/v0.15.0/bug-fixes/1263-client-check-bond-from-validator.md @@ -0,0 +1,2 @@ +- Prevent clients from delegating from a validator account to another validator + account. ([#1263](https://github.com/anoma/namada/pull/1263)) \ No newline at end of file diff --git a/.changelog/v0.15.0/features/1056-governance-custom-proposals.md b/.changelog/v0.15.0/features/1056-governance-custom-proposals.md new file mode 100644 index 00000000000..d8395c16ff8 --- /dev/null +++ b/.changelog/v0.15.0/features/1056-governance-custom-proposals.md @@ -0,0 +1,2 @@ +- Implements governance custom proposals + ([#1056](https://github.com/anoma/namada/pull/1056)) \ No newline at end of file diff --git a/.changelog/v0.15.0/features/1123-tx-lifetime.md b/.changelog/v0.15.0/features/1123-tx-lifetime.md new file mode 100644 index 00000000000..44b51be3f08 --- /dev/null +++ b/.changelog/v0.15.0/features/1123-tx-lifetime.md @@ -0,0 +1,2 @@ +- Adds expiration field to transactions + ([#1123](https://github.com/anoma/namada/pull/1123)) \ No newline at end of file diff --git a/.changelog/v0.15.0/features/1187-rollback.md b/.changelog/v0.15.0/features/1187-rollback.md new file mode 100644 index 00000000000..6a08eacfff0 --- /dev/null +++ b/.changelog/v0.15.0/features/1187-rollback.md @@ -0,0 +1,2 @@ +- Added a rollback command to revert the Namada state to that of the previous + block. ([#1187](https://github.com/anoma/namada/pull/1187)) \ No newline at end of file diff --git a/.changelog/v0.15.0/features/1189-stop-at-height.md b/.changelog/v0.15.0/features/1189-stop-at-height.md new file mode 100644 index 00000000000..d8df5a6ede3 --- /dev/null +++ b/.changelog/v0.15.0/features/1189-stop-at-height.md @@ -0,0 +1,5 @@ +- Introduced a new ledger sub-command: `run-until`. Then, at the provided block + height, the node will either halt or suspend. If the chain is suspended, only + the consensus connection is suspended. This means that the node can still be + queried. This is useful for debugging purposes. + ([#1189](https://github.com/anoma/namada/pull/1189)) diff --git a/.changelog/v0.15.0/features/714-pos-inflation-rewards.md b/.changelog/v0.15.0/features/714-pos-inflation-rewards.md new file mode 100644 index 00000000000..e6e4c6b5773 --- /dev/null +++ b/.changelog/v0.15.0/features/714-pos-inflation-rewards.md @@ -0,0 +1,6 @@ +- Infrastructure for PoS inflation and rewards. Includes inflation + using the PD controller mechanism and rewards based on validator block voting + behavior. Rewards are tracked and effectively distributed using the F1 fee + mechanism. In this PR, rewards are calculated and stored, but they are not + yet applied to voting powers or considered when unbonding and withdrawing. + ([#714](https://github.com/anoma/namada/pull/714)) \ No newline at end of file diff --git a/.changelog/v0.15.0/improvements/1017-replay-protection-impl.md b/.changelog/v0.15.0/improvements/1017-replay-protection-impl.md new file mode 100644 index 00000000000..1783a892517 --- /dev/null +++ b/.changelog/v0.15.0/improvements/1017-replay-protection-impl.md @@ -0,0 +1,2 @@ +- Adds hash-based replay protection + ([#1017](https://github.com/anoma/namada/pull/1017)) \ No newline at end of file diff --git a/.changelog/v0.15.0/improvements/1031-rename-ledger-address-to-node.md b/.changelog/v0.15.0/improvements/1031-rename-ledger-address-to-node.md new file mode 100644 index 00000000000..6173f9e5e7d --- /dev/null +++ b/.changelog/v0.15.0/improvements/1031-rename-ledger-address-to-node.md @@ -0,0 +1,2 @@ +- Renamed "ledger-address" CLI argument to "node". + ([#1031](https://github.com/anoma/namada/pull/1031)) diff --git a/.changelog/v0.15.0/improvements/1051-temp-wl-storage.md b/.changelog/v0.15.0/improvements/1051-temp-wl-storage.md new file mode 100644 index 00000000000..5be4294bd67 --- /dev/null +++ b/.changelog/v0.15.0/improvements/1051-temp-wl-storage.md @@ -0,0 +1,3 @@ +- Added a TempWlStorage for storage_api::StorageRead/Write + in ABCI++ prepare/process proposal handler. + ([#1051](https://github.com/anoma/namada/pull/1051)) \ No newline at end of file diff --git a/.changelog/v0.15.0/improvements/1081-wallet-tokens.md b/.changelog/v0.15.0/improvements/1081-wallet-tokens.md new file mode 100644 index 00000000000..0a74331d3f6 --- /dev/null +++ b/.changelog/v0.15.0/improvements/1081-wallet-tokens.md @@ -0,0 +1,3 @@ +- Added a wallet section for token addresses to replace hard- + coded values with addresses loaded from genesis configuration. + ([#1081](https://github.com/anoma/namada/pull/1081)) \ No newline at end of file diff --git a/.changelog/v0.15.0/improvements/1087-time-docs.md b/.changelog/v0.15.0/improvements/1087-time-docs.md new file mode 100644 index 00000000000..d1e598e4736 --- /dev/null +++ b/.changelog/v0.15.0/improvements/1087-time-docs.md @@ -0,0 +1,2 @@ +- Improved the CLI description of the start time node argument. + ([#1087](https://github.com/anoma/namada/pull/1087)) \ No newline at end of file diff --git a/.changelog/v0.15.0/improvements/1106-tx-chain-id.md b/.changelog/v0.15.0/improvements/1106-tx-chain-id.md new file mode 100644 index 00000000000..187ec93ca74 --- /dev/null +++ b/.changelog/v0.15.0/improvements/1106-tx-chain-id.md @@ -0,0 +1,2 @@ +- Adds chain id field to transactions + ([#1106](https://github.com/anoma/namada/pull/1106)) \ No newline at end of file diff --git a/.changelog/v0.15.0/improvements/1109-help-text-fix.md b/.changelog/v0.15.0/improvements/1109-help-text-fix.md new file mode 100644 index 00000000000..cb94ba7ec34 --- /dev/null +++ b/.changelog/v0.15.0/improvements/1109-help-text-fix.md @@ -0,0 +1,3 @@ +- update help text on namadc utils join-network so that the url + displays cleanly on a single line, instead of being cut half way + ([#1109](https://github.com/anoma/namada/pull/1109)) diff --git a/.changelog/v0.15.0/improvements/1258-improve-cli-check.md b/.changelog/v0.15.0/improvements/1258-improve-cli-check.md new file mode 100644 index 00000000000..c8c9f3a1655 --- /dev/null +++ b/.changelog/v0.15.0/improvements/1258-improve-cli-check.md @@ -0,0 +1,3 @@ +- Check in the client that the ledger node has at least one + block and is synced before submitting transactions and queries. + ([#1258](https://github.com/anoma/namada/pull/1258)) \ No newline at end of file diff --git a/.changelog/v0.15.0/improvements/856-amount-is-zero.md b/.changelog/v0.15.0/improvements/856-amount-is-zero.md new file mode 100644 index 00000000000..a70f0194262 --- /dev/null +++ b/.changelog/v0.15.0/improvements/856-amount-is-zero.md @@ -0,0 +1,2 @@ +- Return early in PosBase::transfer if an attempt is made to transfer zero + tokens ([#856](https://github.com/anoma/namada/pull/856)) \ No newline at end of file diff --git a/.changelog/v0.15.0/miscellaneous/1163-update-rocksdb-0.20.1.md b/.changelog/v0.15.0/miscellaneous/1163-update-rocksdb-0.20.1.md new file mode 100644 index 00000000000..75c517360f4 --- /dev/null +++ b/.changelog/v0.15.0/miscellaneous/1163-update-rocksdb-0.20.1.md @@ -0,0 +1,2 @@ +- Updated RocksDB to v0.20.1. + ([#1163](https://github.com/anoma/namada/pull/1163)) \ No newline at end of file diff --git a/.changelog/v0.15.0/miscellaneous/796-ethbridge-e2e-cleanup.md b/.changelog/v0.15.0/miscellaneous/796-ethbridge-e2e-cleanup.md new file mode 100644 index 00000000000..738678102cc --- /dev/null +++ b/.changelog/v0.15.0/miscellaneous/796-ethbridge-e2e-cleanup.md @@ -0,0 +1,2 @@ +- Clean up some code relating to the Ethereum bridge + ([#796](https://github.com/anoma/namada/pull/796)) \ No newline at end of file diff --git a/.changelog/v0.15.0/summary.md b/.changelog/v0.15.0/summary.md new file mode 100644 index 00000000000..259f3843102 --- /dev/null +++ b/.changelog/v0.15.0/summary.md @@ -0,0 +1,2 @@ +Namada 0.15.0 is a regular minor release featuring various +implementation improvements. diff --git a/.changelog/v0.15.0/testing/893-namada-test-utils-wasms.md b/.changelog/v0.15.0/testing/893-namada-test-utils-wasms.md new file mode 100644 index 00000000000..a345f0b8e5b --- /dev/null +++ b/.changelog/v0.15.0/testing/893-namada-test-utils-wasms.md @@ -0,0 +1,2 @@ +- Add utility code for working with test wasms + ([#893](https://github.com/anoma/namada/pull/893)) \ No newline at end of file diff --git a/.changelog/v0.15.1/improvements/1278-opt_tx_index.md b/.changelog/v0.15.1/improvements/1278-opt_tx_index.md new file mode 100644 index 00000000000..efbb2905ab1 --- /dev/null +++ b/.changelog/v0.15.1/improvements/1278-opt_tx_index.md @@ -0,0 +1,2 @@ +- Disable Tendermint tx_index as default + ([#1278](https://github.com/anoma/namada/issues/1278)) \ No newline at end of file diff --git a/.changelog/v0.15.1/improvements/1297-tx-wasm-hash.md b/.changelog/v0.15.1/improvements/1297-tx-wasm-hash.md new file mode 100644 index 00000000000..db875efaaf5 --- /dev/null +++ b/.changelog/v0.15.1/improvements/1297-tx-wasm-hash.md @@ -0,0 +1 @@ +- Remove wasm code from tx ([#1297](https://github.com/anoma/namada/issues/1297)) \ No newline at end of file diff --git a/.changelog/v0.15.1/summary.md b/.changelog/v0.15.1/summary.md new file mode 100644 index 00000000000..ae4fd2d55ad --- /dev/null +++ b/.changelog/v0.15.1/summary.md @@ -0,0 +1,2 @@ +Namada 0.15.1 is a patch release addressing issues with high storage +usage due to duplicative storage of wasm code. diff --git a/.changelog/v0.15.2/bug-fixes/1218-nested-lazy-vec-iter.md b/.changelog/v0.15.2/bug-fixes/1218-nested-lazy-vec-iter.md new file mode 100644 index 00000000000..248712d18e1 --- /dev/null +++ b/.changelog/v0.15.2/bug-fixes/1218-nested-lazy-vec-iter.md @@ -0,0 +1,3 @@ +- Fixed an issue with the iterator of LazyMap with a nested LazyVec collection + that would match non-data keys and fail to decode those with the data decoder. + ([#1218](https://github.com/anoma/namada/pull/1218)) diff --git a/.changelog/v0.15.2/bug-fixes/1325-fix-update-data-epoched.md b/.changelog/v0.15.2/bug-fixes/1325-fix-update-data-epoched.md new file mode 100644 index 00000000000..b36100c9eb5 --- /dev/null +++ b/.changelog/v0.15.2/bug-fixes/1325-fix-update-data-epoched.md @@ -0,0 +1,2 @@ +- PoS: fixed a function for clearing of historical epoched data + ([\#1325](https://github.com/anoma/namada/pull/1325)) \ No newline at end of file diff --git a/.changelog/v0.15.2/features/1152-pk-to-tm.md b/.changelog/v0.15.2/features/1152-pk-to-tm.md new file mode 100644 index 00000000000..98139615bed --- /dev/null +++ b/.changelog/v0.15.2/features/1152-pk-to-tm.md @@ -0,0 +1,2 @@ +- Added a utility command to the CLI to compute a tendermint address from a + namada public key. ([#1152](https://github.com/anoma/namada/pull/1152)) \ No newline at end of file diff --git a/.changelog/v0.15.2/improvements/1138-base-directory.md b/.changelog/v0.15.2/improvements/1138-base-directory.md new file mode 100644 index 00000000000..0231900b8c1 --- /dev/null +++ b/.changelog/v0.15.2/improvements/1138-base-directory.md @@ -0,0 +1,2 @@ +- Changed the default base directory. On linux, the default path will be `$XDG_DATA_HOME/namada`, on OSX it will be `$HOME/Library/Application Support/com.heliax.namada`. + ([#1138](https://github.com/anoma/namada/pull/1138)) diff --git a/.changelog/v0.15.2/improvements/1333-rocksdb_optimization.md b/.changelog/v0.15.2/improvements/1333-rocksdb_optimization.md new file mode 100644 index 00000000000..cd64883db34 --- /dev/null +++ b/.changelog/v0.15.2/improvements/1333-rocksdb_optimization.md @@ -0,0 +1,2 @@ +- RocksDB optimization to reduce the storage usage + ([#1333](https://github.com/anoma/namada/issues/1333)) \ No newline at end of file diff --git a/.changelog/v0.15.2/miscellaneous/1295-overflow-check-in-release.md b/.changelog/v0.15.2/miscellaneous/1295-overflow-check-in-release.md new file mode 100644 index 00000000000..b0afa992680 --- /dev/null +++ b/.changelog/v0.15.2/miscellaneous/1295-overflow-check-in-release.md @@ -0,0 +1,2 @@ +- Enabled integer overflow checks in release build. + ([#1295](https://github.com/anoma/namada/pull/1295)) \ No newline at end of file diff --git a/.changelog/v0.15.2/summary.md b/.changelog/v0.15.2/summary.md new file mode 100644 index 00000000000..5a39973f8cc --- /dev/null +++ b/.changelog/v0.15.2/summary.md @@ -0,0 +1,2 @@ +Namada 0.15.2 is a bugfix release containing various fixes, including +a major improvement to storage usage. diff --git a/.changelog/v0.15.3/bug-fixes/1368-base-directory-windows-local.md b/.changelog/v0.15.3/bug-fixes/1368-base-directory-windows-local.md new file mode 100644 index 00000000000..05e48b5ba96 --- /dev/null +++ b/.changelog/v0.15.3/bug-fixes/1368-base-directory-windows-local.md @@ -0,0 +1,2 @@ +- Place the default data directory in the local rather than the roaming profile + on Windows. ([#1368](https://github.com/anoma/namada/pull/1368)) \ No newline at end of file diff --git a/.changelog/v0.15.3/bug-fixes/1369-base-directory-organizations.md b/.changelog/v0.15.3/bug-fixes/1369-base-directory-organizations.md new file mode 100644 index 00000000000..20953230475 --- /dev/null +++ b/.changelog/v0.15.3/bug-fixes/1369-base-directory-organizations.md @@ -0,0 +1,3 @@ +- Use blank qualifier and organization, and upcased Namada, to + construct default base directories on Mac and Windows platforms. + ([#1369](https://github.com/anoma/namada/pull/1369)) \ No newline at end of file diff --git a/.changelog/v0.15.3/summary.md b/.changelog/v0.15.3/summary.md new file mode 100644 index 00000000000..3e0215b170a --- /dev/null +++ b/.changelog/v0.15.3/summary.md @@ -0,0 +1,2 @@ +Namada 0.15.3 is a maintenance release addressing the creation of +incorrect data directories on Mac and Windows platforms. diff --git a/.changelog/v0.16.0/docs/1275-dev-docs-pagetoc.md b/.changelog/v0.16.0/docs/1275-dev-docs-pagetoc.md new file mode 100644 index 00000000000..2cbf2d7fdf8 --- /dev/null +++ b/.changelog/v0.16.0/docs/1275-dev-docs-pagetoc.md @@ -0,0 +1,2 @@ +- Added page table-of-contents via mdbook-pagetoc plugin for the developer + documentation. ([#1275](https://github.com/anoma/namada/pull/1275)) \ No newline at end of file diff --git a/.changelog/v0.16.0/improvements/1366-bump-rocksdb.md b/.changelog/v0.16.0/improvements/1366-bump-rocksdb.md new file mode 100644 index 00000000000..a3cc84a77e5 --- /dev/null +++ b/.changelog/v0.16.0/improvements/1366-bump-rocksdb.md @@ -0,0 +1,2 @@ +- Bump RocksDB crate to 0.21.0 to address compilation errors on certain C++ + toolchains. ([#1366](https://github.com/anoma/namada/pull/1366)) \ No newline at end of file diff --git a/.changelog/v0.16.0/summary.md b/.changelog/v0.16.0/summary.md new file mode 100644 index 00000000000..663b58bbef6 --- /dev/null +++ b/.changelog/v0.16.0/summary.md @@ -0,0 +1,2 @@ +Namada 0.16.0 is a regular release focused on providing the Namada SDK +to developers. diff --git a/.github/workflows/build-and-test-bridge.yml b/.github/workflows/build-and-test-bridge.yml index 1e0827f724a..6520413e925 100644 --- a/.github/workflows/build-and-test-bridge.yml +++ b/.github/workflows/build-and-test-bridge.yml @@ -30,7 +30,7 @@ jobs: timeout-minutes: 30 runs-on: ${{ matrix.os }} container: - image: ghcr.io/anoma/namada:wasm-0.11.0 + image: ghcr.io/anoma/namada:wasm-main strategy: fail-fast: false matrix: @@ -72,7 +72,7 @@ jobs: runs-on: ${{ matrix.os }} needs: [build-wasm] container: - image: ghcr.io/anoma/namada:wasm-0.8.0 + image: ghcr.io/anoma/namada:wasm-main strategy: fail-fast: false matrix: @@ -175,6 +175,10 @@ jobs: with: role-to-assume: arn:aws:iam::375643557360:role/anoma-github-action-ci-master aws-region: eu-west-1 + - name: Install Protoc + uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install sccache (ubuntu-20.04) if: matrix.os == 'ubuntu-20.04' env: @@ -276,6 +280,10 @@ jobs: with: role-to-assume: arn:aws:iam::375643557360:role/anoma-github-action-ci-master aws-region: eu-west-1 + - name: Install Protoc + uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install sccache (ubuntu-20.04) if: matrix.os == 'ubuntu-20.04' env: @@ -349,7 +357,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-20.04] - nightly_version: [nightly-2022-05-20] + nightly_version: [nightly-2022-11-03] mold_version: [1.7.0] make: - name: e2e @@ -389,6 +397,10 @@ jobs: with: role-to-assume: arn:aws:iam::375643557360:role/anoma-github-action-ci-master aws-region: eu-west-1 + - name: Install Protoc + uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install sccache (ubuntu-20.04) if: matrix.os == 'ubuntu-20.04' env: diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index ae3be0fb0b1..b07e2212e9b 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -31,7 +31,7 @@ jobs: timeout-minutes: 30 runs-on: ${{ matrix.os }} container: - image: ghcr.io/anoma/namada:wasm-0.11.0 + image: ghcr.io/anoma/namada:wasm-main strategy: fail-fast: false matrix: @@ -73,7 +73,7 @@ jobs: runs-on: ${{ matrix.os }} needs: [build-wasm] container: - image: ghcr.io/anoma/namada:wasm-0.8.0 + image: ghcr.io/anoma/namada:wasm-main strategy: fail-fast: false matrix: @@ -172,11 +172,21 @@ jobs: # See comment in build-and-test.yml with: ref: ${{ github.event.pull_request.head.sha }} + - name: Remove some unused data in github runners + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf /opt/ghc + sudo rm -rf "/usr/local/share/boost" + sudo rm -rf "$AGENT_TOOLSDIRECTORY" - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v1 with: role-to-assume: arn:aws:iam::375643557360:role/anoma-github-action-ci-master aws-region: eu-west-1 + - name: Install Protoc + uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install sccache (ubuntu-20.04) if: matrix.os == 'ubuntu-20.04' env: @@ -273,11 +283,21 @@ jobs: # See comment in build-and-test.yml with: ref: ${{ github.event.pull_request.head.sha }} + - name: Remove some unused data in github runners + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf /opt/ghc + sudo rm -rf "/usr/local/share/boost" + sudo rm -rf "$AGENT_TOOLSDIRECTORY" - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v1 with: role-to-assume: arn:aws:iam::375643557360:role/anoma-github-action-ci-master aws-region: eu-west-1 + - name: Install Protoc + uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install sccache (ubuntu-20.04) if: matrix.os == 'ubuntu-20.04' env: @@ -351,7 +371,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-20.04] - nightly_version: [nightly-2022-05-20] + nightly_version: [nightly-2022-11-03] mold_version: [1.7.0] make: - name: e2e @@ -386,11 +406,21 @@ jobs: # See comment in build-and-test.yml with: ref: ${{ github.event.pull_request.head.sha }} + - name: Remove some unused data in github runners + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf /opt/ghc + sudo rm -rf "/usr/local/share/boost" + sudo rm -rf "$AGENT_TOOLSDIRECTORY" - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v1 with: role-to-assume: arn:aws:iam::375643557360:role/anoma-github-action-ci-master aws-region: eu-west-1 + - name: Install Protoc + uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install sccache (ubuntu-20.04) if: matrix.os == 'ubuntu-20.04' env: diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 094485da416..681cf1b475c 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -55,6 +55,10 @@ jobs: # See comment in build-and-test.yml with: ref: ${{ github.event.pull_request.head.sha }} + - name: Install Protoc + uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Setup rust toolchain uses: oxidecomputer/actions-rs_toolchain@ad3f86084a8a5acf2c09cb691421b31cf8af7a36 with: diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index aa6b7798974..c3fcec07dd6 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -60,6 +60,10 @@ jobs: restore-keys: ${{ runner.os }}-${{ matrix.make.cache_subkey }}-${{ matrix.make.cache_version }}-cargo- - name: Install cargo ${{ matrix.make.command }} run: curl https://i.jpillora.com/${{ matrix.make.version }}! | bash + - name: Install Protoc + uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: ${{ matrix.make.name }} working-directory: ./.github/workflows/scripts run: | diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 6bd95c514ed..731bc9cc01f 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -33,7 +33,8 @@ jobs: mdbook_linkcheck: [Michael-F-Bryan/mdbook-linkcheck@v0.7.6] mdbook_open_on_gh: [badboy/mdbook-open-on-gh@v2.2.0] mdbook_admonish: [tommilligan/mdbook-admonish@v1.7.0] - mdbook_katex: [lzanini/mdbook-katex@v0.2.10] + mdbook_katex: [lzanini/mdbook-katex@v0.4.0] + mdbook_pagetoc: [slowsage/mdbook-pagetoc@v0.1.7] make: - name: Build specs folder: documentation/specs @@ -78,6 +79,10 @@ jobs: with: role-to-assume: arn:aws:iam::375643557360:role/anoma-github-action-ci-master aws-region: eu-west-1 + - name: Install Protoc + uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install sccache (ubuntu-20.04) if: matrix.os == 'ubuntu-20.04' env: @@ -117,6 +122,7 @@ jobs: curl https://i.jpillora.com/${{ matrix.mdbook_open_on_gh }}! | bash curl https://i.jpillora.com/${{ matrix.mdbook_admonish }}! | bash curl https://i.jpillora.com/${{ matrix.mdbook_katex }}! | bash + curl https://i.jpillora.com/${{ matrix.mdbook_pagetoc }}! | bash cd ${{ matrix.make.folder }} && mdbook-admonish install - name: ${{ matrix.make.name }} run: ${{ matrix.make.command }} @@ -168,6 +174,10 @@ jobs: with: role-to-assume: arn:aws:iam::375643557360:role/anoma-github-action-ci-master aws-region: eu-west-1 + - name: Install Protoc + uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install sccache (ubuntu-20.04) if: matrix.os == 'ubuntu-20.04' env: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 69b02256ccb..7256b810b1b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -42,6 +42,10 @@ jobs: with: role-to-assume: arn:aws:iam::375643557360:role/anoma-github-action-ci-master aws-region: eu-west-1 + - name: Install Protoc + uses: arduino/setup-protoc@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install sccache (ubuntu-20.04) if: matrix.os == 'ubuntu-20.04' env: diff --git a/.github/workflows/scripts/e2e.json b/.github/workflows/scripts/e2e.json index 8ee62bb2364..a6900183dd0 100644 --- a/.github/workflows/scripts/e2e.json +++ b/.github/workflows/scripts/e2e.json @@ -11,6 +11,8 @@ "e2e::ledger_tests::pos_bonds": 19, "e2e::ledger_tests::pos_init_validator": 15, "e2e::ledger_tests::proposal_offline": 15, + "e2e::ledger_tests::pgf_governance_proposal": 35, + "e2e::ledger_tests::eth_governance_proposal": 35, "e2e::ledger_tests::proposal_submission": 35, "e2e::ledger_tests::run_ledger": 5, "e2e::ledger_tests::run_ledger_load_state_and_reset": 5, diff --git a/.github/workflows/scripts/schedule-e2e.py b/.github/workflows/scripts/schedule-e2e.py index 4118a65d7d0..e32a76bf5f4 100644 --- a/.github/workflows/scripts/schedule-e2e.py +++ b/.github/workflows/scripts/schedule-e2e.py @@ -5,8 +5,10 @@ N_OF_MACHINES = 2 +NIGHTLY_VERSION = open("rust-nightly-version", "r").read().strip() + E2E_FILE = ".github/workflows/scripts/e2e.json" -CARGO_TEST_COMMAND = "cargo test {} -- --test-threads=1 --nocapture" +CARGO_TEST_COMMAND = "cargo +{} test {} -Z unstable-options -- --test-threads=1 -Z unstable-options --nocapture" MACHINES = [{'tasks': [], 'total_time': 0} for _ in range(N_OF_MACHINES)] @@ -32,7 +34,7 @@ def find_freer_machine(): for index, machine in enumerate(MACHINES): print("Machine {}: {} tasks for a total of {}s".format(index, len(machine['tasks']), machine['total_time'])) for test in machine['tasks']: - cargo = CARGO_TEST_COMMAND.format(test) + cargo = CARGO_TEST_COMMAND.format(NIGHTLY_VERSION, test) tasks = MACHINES[CURRENT_MACHINE_INDEX]['tasks'] @@ -41,7 +43,7 @@ def find_freer_machine(): for test_name in tasks: try: - command = CARGO_TEST_COMMAND.format(test_name) + command = CARGO_TEST_COMMAND.format(NIGHTLY_VERSION, test_name) subprocess.check_call(command, shell=True, stdout=sys.stdout, stderr=subprocess.STDOUT) test_results[test_name] = { 'status': 'ok', diff --git a/CHANGELOG.md b/CHANGELOG.md index bc16e671f43..139f1b50604 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,17 +1,231 @@ # CHANGELOG +## v0.16.0 + +Namada 0.16.0 is a regular release focused on providing the Namada SDK +to developers. + +### DOCS + +- Added page table-of-contents via mdbook-pagetoc plugin for the developer + documentation. ([#1275](https://github.com/anoma/namada/pull/1275)) + +### IMPROVEMENTS + +- Bump RocksDB crate to 0.21.0 to address compilation errors on certain C++ + toolchains. ([#1366](https://github.com/anoma/namada/pull/1366)) + +## v0.15.3 + +Namada 0.15.3 is a maintenance release addressing the creation of +incorrect data directories on Mac and Windows platforms. + +### BUG FIXES + +- Place the default data directory in the local rather than the roaming profile + on Windows. ([#1368](https://github.com/anoma/namada/pull/1368)) +- Use blank qualifier and organization, and upcased Namada, to + construct default base directories on Mac and Windows platforms. + ([#1369](https://github.com/anoma/namada/pull/1369)) + +## v0.15.2 + +Namada 0.15.2 is a bugfix release containing various fixes, including +a major improvement to storage usage. + +### BUG FIXES + +- Fixed an issue with the iterator of LazyMap with a nested LazyVec collection + that would match non-data keys and fail to decode those with the data decoder. + ([#1218](https://github.com/anoma/namada/pull/1218)) +- PoS: fixed a function for clearing of historical epoched data + ([\#1325](https://github.com/anoma/namada/pull/1325)) + +### FEATURES + +- Added a utility command to the CLI to compute a tendermint address from a + namada public key. ([#1152](https://github.com/anoma/namada/pull/1152)) + +### IMPROVEMENTS + +- Changed the default base directory. On linux, the default path will be `$XDG_DATA_HOME/namada`, on OSX it will be `$HOME/Library/Application Support/com.heliax.namada`. + ([#1138](https://github.com/anoma/namada/pull/1138)) +- RocksDB optimization to reduce the storage usage + ([#1333](https://github.com/anoma/namada/issues/1333)) + +### MISCELLANEOUS + +- Enabled integer overflow checks in release build. + ([#1295](https://github.com/anoma/namada/pull/1295)) + +## v0.15.1 + +Namada 0.15.1 is a patch release addressing issues with high storage +usage due to duplicative storage of wasm code. + +### IMPROVEMENTS + +- Disable Tendermint tx_index as default + ([#1278](https://github.com/anoma/namada/issues/1278)) +- Remove wasm code from tx ([#1297](https://github.com/anoma/namada/issues/1297)) + +## v0.15.0 + +Namada 0.15.0 is a regular minor release featuring various +implementation improvements. + +### BUG FIXES + +- Fix to read the prev value for batch delete + ([#1116](https://github.com/anoma/namada/issues/1116)) +- Returns an error when getting proof of a non-committed block + ([#1154](https://github.com/anoma/namada/issues/1154)) +- Fixed dump-db node utility which was not iterating on db keys correctly + leading to duplicates in the dump. Added an historic flag to also dump the + diff keys. ([#1184](https://github.com/anoma/namada/pull/1184)) +- Fixed an issue with lazy collections sub-key validation with the `Address` + type. This issue was also affecting the iterator of nested `LazyMap`. + ([#1212](https://github.com/anoma/namada/pull/1212)) +- Fixed various features of the CLI output for querying bonds and performing an + unbond action. ([#1239](https://github.com/anoma/namada/pull/1239)) +- PoS: Fixed an issue with slashable evidence processed + and applied at a new epoch causing a ledger to crash. + ([#1246](https://github.com/anoma/namada/pull/1246)) +- Addresses are now being ordered by their string format (bech32m) + to ensure that their order is preserved inside raw storage keys. + ([#1256](https://github.com/anoma/namada/pull/1256)) +- Prevent clients from delegating from a validator account to another validator + account. ([#1263](https://github.com/anoma/namada/pull/1263)) + +### FEATURES + +- Infrastructure for PoS inflation and rewards. Includes inflation + using the PD controller mechanism and rewards based on validator block voting + behavior. Rewards are tracked and effectively distributed using the F1 fee + mechanism. In this PR, rewards are calculated and stored, but they are not + yet applied to voting powers or considered when unbonding and withdrawing. + ([#714](https://github.com/anoma/namada/pull/714)) +- Implements governance custom proposals + ([#1056](https://github.com/anoma/namada/pull/1056)) +- Adds expiration field to transactions + ([#1123](https://github.com/anoma/namada/pull/1123)) +- Added a rollback command to revert the Namada state to that of the previous + block. ([#1187](https://github.com/anoma/namada/pull/1187)) +- Introduced a new ledger sub-command: `run-until`. Then, at the provided block + height, the node will either halt or suspend. If the chain is suspended, only + the consensus connection is suspended. This means that the node can still be + queried. This is useful for debugging purposes. + ([#1189](https://github.com/anoma/namada/pull/1189)) + +### IMPROVEMENTS + +- Return early in PosBase::transfer if an attempt is made to transfer zero + tokens ([#856](https://github.com/anoma/namada/pull/856)) +- Adds hash-based replay protection + ([#1017](https://github.com/anoma/namada/pull/1017)) +- Renamed "ledger-address" CLI argument to "node". + ([#1031](https://github.com/anoma/namada/pull/1031)) +- Added a TempWlStorage for storage_api::StorageRead/Write + in ABCI++ prepare/process proposal handler. + ([#1051](https://github.com/anoma/namada/pull/1051)) +- Added a wallet section for token addresses to replace hard- + coded values with addresses loaded from genesis configuration. + ([#1081](https://github.com/anoma/namada/pull/1081)) +- Improved the CLI description of the start time node argument. + ([#1087](https://github.com/anoma/namada/pull/1087)) +- Adds chain id field to transactions + ([#1106](https://github.com/anoma/namada/pull/1106)) +- update help text on namadc utils join-network so that the url + displays cleanly on a single line, instead of being cut half way + ([#1109](https://github.com/anoma/namada/pull/1109)) +- Check in the client that the ledger node has at least one + block and is synced before submitting transactions and queries. + ([#1258](https://github.com/anoma/namada/pull/1258)) + +### MISCELLANEOUS + +- Clean up some code relating to the Ethereum bridge + ([#796](https://github.com/anoma/namada/pull/796)) +- Updated RocksDB to v0.20.1. + ([#1163](https://github.com/anoma/namada/pull/1163)) + +### TESTING + +- Add utility code for working with test wasms + ([#893](https://github.com/anoma/namada/pull/893)) + +## v0.14.3 + +Namada 0.14.3 is a bugfix release addressing mainly disk usage +inefficiencies. + +### BUG FIXES + +- Check if validators are valid in pre-genesis setup. + ([#1140](https://github.com/anoma/namada/pull/1140)) +- Now load conversions from storage even for epoch 1. + ([\#1244](https://github.com/anoma/namada/pull/1244)) + +### IMPROVEMENTS + +- Write Merkle tree stores only when a new epoch + ([#1113](https://github.com/anoma/namada/issues/1113)) +- Prune old Merkle tree stores. + ([#1237](https://github.com/anoma/namada/pull/1237)) + +### TESTING + +- Fixed run_ledger_load_state_and_reset test in debug build. + ([#1131](https://github.com/anoma/namada/pull/1131)) + +## v0.14.2 + +Namada 0.14.2 is a maintenance release addressing issues with +proof-of-stake validator logic. + +### BUG FIXES + +- Fixed the PrefixIter order of iteration in the write- + log to always match the iteration order in the storage. + ([#1141](https://github.com/anoma/namada/pull/1141)) +- Fixed the init-chain handler to stop committing state to the DB + as it may be re-applied when the node is shut-down before the + first block is committed, leading to an invalid genesis state. + ([#1182](https://github.com/anoma/namada/pull/1182)) +- Fixed an issue in which a validator's stake and validator sets + data gets into an invalid state (duplicate records with incorrect + values) due to a logic error in clearing of historical epoch data. + ([#1191](https://github.com/anoma/namada/pull/1191)) + +### FEATURES + +- Added a lazy set collection. + ([#1196](https://github.com/anoma/namada/pull/1196)) + +### IMPROVEMENTS + +- Ensure that PoS validator consensus keys are unique. + ([#1197](https://github.com/anoma/namada/pull/1197)) + +## v0.14.1 + +Namada 0.14.1 is a bugfix release addressing issues with inactive +validator set updates in proof of stake. + +### BUG FIXES + +- Fix Tendermint validator set update to properly skip validator with no voting + power. ([#1146](https://github.com/anoma/namada/pull/1146)) + ## v0.14.0 Namada 0.14.0 is a scheduled minor release with various protocol stability improvements. -### BUG +### BUG FIXES - Add validation for balances with IBC sub prefix ([#354](https://github.com/anoma/namada/issues/354)) - -### BUG FIXES - - Fixed the prefix iterator method to respect modifications in the write log. ([#913](https://github.com/anoma/namada/pull/913)) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 027c8dc4da6..5798b2c71c7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -39,10 +39,12 @@ unclog add \ The message should be a high-level description of the changes that should explain the scope of the change and affected components to Namada's users (while git commit messages should target developers). +Aim to make the changelog description readable and understandable for people using Namada in plain English, assuming no familiarity with the code, dependencies and other low-level details, and explain not just *what* has changed, but also *why* it's changed. + If none of the sections fit, new sections may be added. To find the existing section names, you can use e.g.: ```shell -for i in $(ls -d .changelog/*/*/); do basename "$i"; done +for i in $(ls -d .changelog/*/*/); do basename "$i"; done | sort | uniq ``` ## Development priorities diff --git a/Cargo.lock b/Cargo.lock index 734c39b2703..68075542f3a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,11 +4,11 @@ version = 3 [[package]] name = "addr2line" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" dependencies = [ - "gimli 0.26.2", + "gimli 0.27.2", ] [[package]] @@ -23,7 +23,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", ] [[package]] @@ -44,16 +44,27 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.9", + "once_cell", + "version_check 0.9.4", +] + +[[package]] +name = "ahash" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +dependencies = [ + "cfg-if 1.0.0", "once_cell", "version_check 0.9.4", ] [[package]] name = "aho-corasick" -version = "0.7.19" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" +checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" dependencies = [ "memchr", ] @@ -78,9 +89,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.65" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98161a4e3e2184da77bb14f02184cdd111e83bbbcc9979dfee3c44b9a85f5602" +checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] name = "ark-bls12-381" @@ -144,7 +155,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -156,7 +167,7 @@ dependencies = [ "num-bigint", "num-traits 0.2.15", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -191,7 +202,7 @@ checksum = "8dd4e5f0bf8285d5ed538d27fab7411f3e297908fd93c62195de8bee3f199e82" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -206,9 +217,9 @@ dependencies = [ [[package]] name = "arrayref" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -250,34 +261,34 @@ checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "async-channel" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14485364214912d3b19cc3435dde4df66065127f05fa0d75c712f36f12c2f28" +checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" dependencies = [ - "concurrent-queue", + "concurrent-queue 2.2.0", "event-listener", "futures-core", ] [[package]] name = "async-executor" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "871f9bb5e0a22eeb7e8cf16641feb87c9dc67032ccf8ff49e772eb9941d3a965" +checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" dependencies = [ + "async-lock", "async-task", - "concurrent-queue", + "concurrent-queue 2.2.0", "fastrand", "futures-lite", - "once_cell", "slab", ] [[package]] name = "async-global-executor" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0da5b41ee986eed3f524c380e6d64965aea573882a8907682ad100f7859305ca" +checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" dependencies = [ "async-channel", "async-executor", @@ -294,7 +305,7 @@ version = "1.9.0" source = "git+https://github.com/heliaxdev/async-io.git?rev=9285dad39c9a37ecd0dbd498c5ce5b0e65b02489#9285dad39c9a37ecd0dbd498c5ce5b0e65b02489" dependencies = [ "autocfg 1.1.0", - "concurrent-queue", + "concurrent-queue 1.2.4", "futures-lite", "libc", "log 0.4.17", @@ -310,9 +321,9 @@ dependencies = [ [[package]] name = "async-lock" -version = "2.5.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97a171d191782fba31bb902b14ad94e24a68145032b7eedf871ab0bc0d077b6" +checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" dependencies = [ "event-listener", ] @@ -346,7 +357,7 @@ dependencies = [ "async-io", "async-lock", "async-process", - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.15", "futures-channel", "futures-core", "futures-io", @@ -365,40 +376,41 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", + "pin-project-lite", ] [[package]] name = "async-stream-impl" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "async-task" -version = "4.3.0" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" +checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" [[package]] name = "async-trait" -version = "0.1.58" +version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" +checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -412,16 +424,16 @@ dependencies = [ "log 0.4.17", "pin-project-lite", "tokio", - "tokio-rustls", + "tokio-rustls 0.22.0", "tungstenite", "webpki-roots 0.21.1", ] [[package]] name = "atomic-waker" -version = "1.0.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" +checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" [[package]] name = "atty" @@ -429,7 +441,7 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "hermit-abi", + "hermit-abi 0.1.19", "libc", "winapi 0.3.9", ] @@ -449,18 +461,63 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "axum" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" +dependencies = [ + "async-trait", + "axum-core", + "bitflags", + "bytes 1.4.0", + "futures-util", + "http", + "http-body", + "hyper 0.14.26", + "itoa", + "matchit", + "memchr", + "mime 0.3.17", + "percent-encoding 2.2.0", + "pin-project-lite", + "rustversion", + "serde 1.0.163", + "sync_wrapper", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +dependencies = [ + "async-trait", + "bytes 1.4.0", + "futures-util", + "http", + "http-body", + "mime 0.3.17", + "rustversion", + "tower-layer", + "tower-service", +] + [[package]] name = "backtrace" -version = "0.3.66" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" +checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" dependencies = [ "addr2line", "cc", "cfg-if 1.0.0", "libc", - "miniz_oxide", - "object 0.29.0", + "miniz_oxide 0.6.2", + "object 0.30.3", "rustc-demangle", ] @@ -491,9 +548,15 @@ dependencies = [ [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "base64" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "base64ct" @@ -507,16 +570,22 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf9ff0bbfd639f15c74af777d81383cf53efb7c93613f6cab67c6c11e05bbf8b" +[[package]] +name = "bech32" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" + [[package]] name = "bellman" version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43473b34abc4b0b405efa0a250bac87eea888182b21687ee5c8115d279b0fda5" dependencies = [ - "bitvec", + "bitvec 0.22.3", "blake2s_simd 0.5.11", "byteorder", - "crossbeam-channel 0.5.6", + "crossbeam-channel 0.5.8", "ff", "group", "lazy_static", @@ -540,11 +609,11 @@ dependencies = [ [[package]] name = "bimap" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc0455254eb5c6964c4545d8bac815e1a1be4f3afe0ae695ea539c12d728d44b" +checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" dependencies = [ - "serde 1.0.145", + "serde 1.0.163", ] [[package]] @@ -553,14 +622,14 @@ version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" dependencies = [ - "serde 1.0.145", + "serde 1.0.163", ] [[package]] name = "bindgen" -version = "0.60.1" +version = "0.65.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "062dddbc1ba4aca46de6338e2bf87771414c335f7b2f2036e8f3e9befebf88e6" +checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" dependencies = [ "bitflags", "cexpr", @@ -568,11 +637,13 @@ dependencies = [ "lazy_static", "lazycell", "peeking_take_while", + "prettyplease 0.2.4", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", + "syn 2.0.15", ] [[package]] @@ -606,23 +677,23 @@ checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" [[package]] name = "bitcoin" -version = "0.28.0" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42b2a9a8e3c7544f5ce2b475f2f56580a3102b37e0ee001558ad4faedcf56cf4" +checksum = "0694ea59225b0c5f3cb405ff3f670e4828358ed26aec49dc352f730f0cb1a8a3" dependencies = [ - "bech32", + "bech32 0.9.1", "bitcoin_hashes", - "secp256k1 0.22.2", - "serde 1.0.145", + "secp256k1 0.24.3", + "serde 1.0.163", ] [[package]] name = "bitcoin_hashes" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006cc91e1a1d99819bc5b8214be3555c1f0611b169f527a1fdc54ed1f2b745b0" +checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" dependencies = [ - "serde 1.0.145", + "serde 1.0.163", ] [[package]] @@ -637,19 +708,31 @@ version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5237f00a8c86130a0cc317830e558b966dd7850d48a953d998c813f01a41b527" dependencies = [ - "funty", - "radium", + "funty 1.2.0", + "radium 0.6.2", "tap", - "wyz", + "wyz 0.4.0", +] + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty 2.0.0", + "radium 0.7.0", + "tap", + "wyz 0.5.1", ] [[package]] name = "blake2" -version = "0.10.4" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -670,18 +753,18 @@ checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" dependencies = [ "arrayref", "arrayvec 0.5.2", - "constant_time_eq", + "constant_time_eq 0.1.5", ] [[package]] name = "blake2b_simd" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72936ee4afc7f8f736d1c38383b56480b5497b4617b4a77bdbf1d2ababc76127" +checksum = "3c2f0dc9a68c6317d884f97cc36cf5a3d20ba14ce404227df55e1af708ab04bc" dependencies = [ "arrayref", "arrayvec 0.7.2", - "constant_time_eq", + "constant_time_eq 0.2.5", ] [[package]] @@ -692,32 +775,32 @@ checksum = "9e461a7034e85b211a4acb57ee2e6730b32912b06c08cc242243c39fc21ae6a2" dependencies = [ "arrayref", "arrayvec 0.5.2", - "constant_time_eq", + "constant_time_eq 0.1.5", ] [[package]] name = "blake2s_simd" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db539cc2b5f6003621f1cd9ef92d7ded8ea5232c7de0f9faa2de251cd98730d4" +checksum = "6637f448b9e61dfadbdcbae9a885fadee1f3eaffb1f8d3c1965d3ade8bdfd44f" dependencies = [ "arrayref", "arrayvec 0.7.2", - "constant_time_eq", + "constant_time_eq 0.2.5", ] [[package]] name = "blake3" -version = "1.3.1" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08e53fc5a564bb15bfe6fae56bd71522205f1f91893f9c0116edad6496c183f" +checksum = "42ae2468a89544a466886840aa467a25b766499f4f04bf7d9fcd10ecee9fccef" dependencies = [ "arrayref", "arrayvec 0.7.2", "cc", "cfg-if 1.0.0", - "constant_time_eq", - "digest 0.10.5", + "constant_time_eq 0.2.5", + "digest 0.10.6", ] [[package]] @@ -738,17 +821,16 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "block-padding 0.2.1", - "generic-array 0.14.6", + "generic-array 0.14.7", ] [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", ] [[package]] @@ -778,16 +860,17 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" [[package]] name = "blocking" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6ccb65d468978a086b69884437ded69a90faab3bbe6e67f242173ea728acccc" +checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" dependencies = [ "async-channel", + "async-lock", "async-task", "atomic-waker", "fastrand", "futures-lite", - "once_cell", + "log 0.4.17", ] [[package]] @@ -808,20 +891,43 @@ name = "borsh" version = "0.9.4" source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" dependencies = [ - "borsh-derive", + "borsh-derive 0.9.4", "hashbrown 0.11.2", ] +[[package]] +name = "borsh" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b" +dependencies = [ + "borsh-derive 0.10.3", + "hashbrown 0.13.2", +] + [[package]] name = "borsh-derive" version = "0.9.4" source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c54cf8259b7ec5ec4073a#cd5223e5103c4f139e0c54cf8259b7ec5ec4073a" dependencies = [ - "borsh-derive-internal", - "borsh-schema-derive-internal", - "proc-macro-crate", + "borsh-derive-internal 0.9.4", + "borsh-schema-derive-internal 0.9.4", + "proc-macro-crate 0.1.5", + "proc-macro2", + "syn 1.0.109", +] + +[[package]] +name = "borsh-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0754613691538d51f329cce9af41d7b7ca150bc973056f1156611489475f54f7" +dependencies = [ + "borsh-derive-internal 0.10.3", + "borsh-schema-derive-internal 0.10.3", + "proc-macro-crate 0.1.5", "proc-macro2", - "syn", + "syn 1.0.109", ] [[package]] @@ -831,7 +937,18 @@ source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "borsh-derive-internal" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] @@ -841,9 +958,26 @@ source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "borsh-schema-derive-internal" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", ] +[[package]] +name = "bs58" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" + [[package]] name = "bstr" version = "0.2.17" @@ -857,9 +991,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.11.1" +version = "3.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c6ed94e98ecff0c12dd1b04c15ec0d7d9458ca8fe806cea6f12954efe74c63b" + +[[package]] +name = "byte-slice-cast" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "byte-tools" @@ -869,32 +1009,34 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "byte-unit" -version = "4.0.14" +version = "4.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ebf10dda65f19ff0f42ea15572a359ed60d7fc74fdc984d90310937be0014b" +checksum = "da78b32057b8fdfc352504708feeba7216dcd65a2c9ab02978cbd288d1279b6c" dependencies = [ + "serde 1.0.163", "utf8-width", ] [[package]] name = "bytecheck" -version = "0.6.9" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d11cac2c12b5adc6570dad2ee1b87eff4955dac476fe12d81e5fdd352e52406f" +checksum = "8b6372023ac861f6e6dc89c8344a8f398fb42aaba2b5dbc649ca0c0e9dbcb627" dependencies = [ "bytecheck_derive", "ptr_meta", + "simdutf8", ] [[package]] name = "bytecheck_derive" -version = "0.6.9" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" +checksum = "a7ec4c6f261935ad534c0c22dbef2201b45918860eb1c574b972bd213a76af61" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -921,9 +1063,9 @@ dependencies = [ [[package]] name = "bytes" -version = "1.2.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" [[package]] name = "bzip2-sys" @@ -938,17 +1080,17 @@ dependencies = [ [[package]] name = "cache-padded" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" +checksum = "981520c98f422fcc584dc1a95c334e6953900b9106bc47a9839b81790009eb21" [[package]] name = "camino" -version = "1.1.1" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ad0e1e3e88dd237a156ab9f571021b8a158caa0ae44b1968a241efb5144c1e" +checksum = "c530edf18f37068ac2d977409ed5cd50d53d73bc653c7647b48eb78976ac9ae2" dependencies = [ - "serde 1.0.145", + "serde 1.0.163", ] [[package]] @@ -957,7 +1099,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27" dependencies = [ - "serde 1.0.145", + "serde 1.0.163", ] [[package]] @@ -968,16 +1110,16 @@ checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" dependencies = [ "camino", "cargo-platform", - "semver 1.0.14", - "serde 1.0.145", + "semver 1.0.17", + "serde 1.0.163", "serde_json", ] [[package]] name = "cc" -version = "1.0.73" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" dependencies = [ "jobserver", ] @@ -988,7 +1130,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ - "nom 7.1.1", + "nom 7.1.3", ] [[package]] @@ -1030,9 +1172,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.22" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfd4d1b31faaa3a89d7934dbded3111da0d2ef28e3ebccdb4f0179f5929d1ef1" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", "num-integer", @@ -1042,9 +1184,9 @@ dependencies = [ [[package]] name = "chunked_transfer" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff857943da45f546682664a79488be82e69e43c1a7a2307679ab9afb3a66d2e" +checksum = "cca491388666e04d7248af3f60f0c40cfb0991c72205595d7c396e3510207d1a" [[package]] name = "cipher" @@ -1052,7 +1194,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", ] [[package]] @@ -1066,9 +1208,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.4.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" +checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" dependencies = [ "glob", "libc", @@ -1106,16 +1248,6 @@ name = "clru" version = "0.5.0" source = "git+https://github.com/marmeladema/clru-rs.git?rev=71ca566#71ca566915f21f3c308091ca7756a91b0f8b5afc" -[[package]] -name = "codespan-reporting" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" -dependencies = [ - "termcolor", - "unicode-width", -] - [[package]] name = "color-eyre" version = "0.5.11" @@ -1139,18 +1271,18 @@ checksum = "b6eee477a4a8a72f4addd4de416eb56d54bc307b284d6601bafdee1f4ea462d1" dependencies = [ "once_cell", "owo-colors", - "tracing-core 0.1.30", + "tracing-core 0.1.31", "tracing-error", ] [[package]] name = "concat-idents" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b6f90860248d75014b7b103db8fee4f291c07bfb41306cdf77a0a5ab7a10d2f" +checksum = "0fe0e1d9f7de897d18e590a7496b5facbe87813f746cf4b8db596ba77e07e832" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1162,6 +1294,15 @@ dependencies = [ "cache-padded", ] +[[package]] +name = "concurrent-queue" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" +dependencies = [ + "crossbeam-utils 0.8.15", +] + [[package]] name = "config" version = "0.11.0" @@ -1169,9 +1310,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b1b9d958c2b1368a663f05538fc1b5975adce1e19f435acceae987aceeeb369" dependencies = [ "lazy_static", - "nom 5.1.2", + "nom 5.1.3", "rust-ini", - "serde 1.0.145", + "serde 1.0.163", "serde-hjson", "serde_json", "toml", @@ -1184,7 +1325,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "977baae4026273d7f9bb69a0a8eb4aed7ab9dac98799f742dce09173a9734754" dependencies = [ - "windows", + "windows 0.29.0", ] [[package]] @@ -1199,6 +1340,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "constant_time_eq" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13418e745008f7349ec7e449155f419a61b92b58a99cc3616942b926825ec76b" + [[package]] name = "contracts" version = "0.6.3" @@ -1207,7 +1354,7 @@ checksum = "f1d1429e3bd78171c65aa010eabcdf8f863ba3254728dbfb0ad4b1545beac15c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1222,15 +1369,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" dependencies = [ "libc", ] @@ -1316,50 +1463,35 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.15", ] [[package]] name = "crossbeam-deque" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.11", - "crossbeam-utils 0.8.12", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg 1.1.0", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset 0.5.6", - "scopeguard", + "crossbeam-epoch", + "crossbeam-utils 0.8.15", ] [[package]] name = "crossbeam-epoch" -version = "0.9.11" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ "autocfg 1.1.0", "cfg-if 1.0.0", - "crossbeam-utils 0.8.12", - "memoffset 0.6.5", + "crossbeam-utils 0.8.15", + "memoffset 0.8.0", "scopeguard", ] @@ -1376,9 +1508,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.12" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if 1.0.0", ] @@ -1401,7 +1533,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", "rand_core 0.6.4", "subtle", "zeroize", @@ -1413,7 +1545,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", "typenum", ] @@ -1423,7 +1555,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", "subtle", ] @@ -1433,7 +1565,7 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", "subtle", ] @@ -1474,7 +1606,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1509,55 +1641,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "cxx" -version = "1.0.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f83d0ebf42c6eafb8d7c52f7e5f2d3003b89c7aa4fd2b79229209459a849af8" -dependencies = [ - "cc", - "cxxbridge-flags", - "cxxbridge-macro", - "link-cplusplus", -] - -[[package]] -name = "cxx-build" -version = "1.0.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07d050484b55975889284352b0ffc2ecbda25c0c55978017c132b29ba0818a86" -dependencies = [ - "cc", - "codespan-reporting", - "once_cell", - "proc-macro2", - "quote", - "scratch", - "syn", -] - -[[package]] -name = "cxxbridge-flags" -version = "1.0.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d2199b00553eda8012dfec8d3b1c75fce747cf27c169a270b3b99e3448ab78" - -[[package]] -name = "cxxbridge-macro" -version = "1.0.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb67a6de1f602736dd7eaead0080cf3435df806c61b24b13328db128c58868f" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "darling" -version = "0.14.1" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4529658bdda7fd6769b8614be250cdcfc3aeb0ee72fe66f9e41e5e5eb73eac02" +checksum = "0558d22a7b463ed0241e993f76f09f30b126687447751a8638587b864e4b3944" dependencies = [ "darling_core", "darling_macro", @@ -1565,33 +1653,33 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.14.1" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "649c91bc01e8b1eac09fb91e8dbc7d517684ca6be8ebc75bb9cafc894f9fdb6f" +checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "darling_macro" -version = "0.14.1" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc69c5bfcbd2fc09a0f38451d2daf0e372e367986a83906d1b0dbc88134fb5" +checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "data-encoding" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" +checksum = "23d8666cb01533c39dde32bcbab8e227b4ed6679b2c925eba05feabea39508fb" [[package]] name = "der" @@ -1602,6 +1690,12 @@ dependencies = [ "const-oid", ] +[[package]] +name = "derivation-path" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e5c37193a1db1d8ed868c03ec7b152175f26160a5b740e5e484143877e0adf0" + [[package]] name = "derivative" version = "2.2.0" @@ -1610,7 +1704,7 @@ checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1621,7 +1715,7 @@ checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1651,16 +1745,16 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", ] [[package]] name = "digest" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.3", + "block-buffer 0.10.4", "crypto-common", "subtle", ] @@ -1706,6 +1800,17 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "displaydoc" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.15", +] + [[package]] name = "doc-comment" version = "0.3.3" @@ -1718,6 +1823,12 @@ version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" +[[package]] +name = "dyn-clone" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68b0cf012f1230e43cd00ebb729c6bb58707ecfa8ad08b52ef3a4ccd2697fc30" + [[package]] name = "dynasm" version = "1.2.3" @@ -1730,7 +1841,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1758,11 +1869,11 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.5.2" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" +checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" dependencies = [ - "serde 1.0.145", + "serde 1.0.163", "signature", ] @@ -1775,7 +1886,7 @@ dependencies = [ "curve25519-dalek-ng", "hex", "rand_core 0.6.4", - "serde 1.0.145", + "serde 1.0.163", "sha2 0.9.9", "thiserror", "zeroize", @@ -1791,17 +1902,29 @@ dependencies = [ "ed25519", "merlin", "rand 0.7.3", - "serde 1.0.145", + "serde 1.0.163", "serde_bytes", "sha2 0.9.9", "zeroize", ] +[[package]] +name = "ed25519-dalek-bip32" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d2be62a4061b872c8c0873ee4fc6f101ce7b889d039f019c5fa2af471a59908" +dependencies = [ + "derivation-path", + "ed25519-dalek", + "hmac 0.12.1", + "sha2 0.10.6", +] + [[package]] name = "either" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "elliptic-curve" @@ -1813,7 +1936,7 @@ dependencies = [ "crypto-bigint", "der", "ff", - "generic-array 0.14.6", + "generic-array 0.14.7", "group", "rand_core 0.6.4", "sec1", @@ -1823,9 +1946,9 @@ dependencies = [ [[package]] name = "encoding_rs" -version = "0.8.31" +version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" +checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ "cfg-if 1.0.0", ] @@ -1847,39 +1970,69 @@ checksum = "c134c37760b27a871ba422106eedbb8247da973a09e82558bf26d619c882b159" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "enumset" -version = "1.0.12" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19be8061a06ab6f3a6cf21106c873578bf01bd42ad15e0311a9c76161cb1c753" +checksum = "e875f1719c16de097dee81ed675e2d9bb63096823ed3f0ca827b7dea3028bbbb" dependencies = [ "enumset_derive", ] [[package]] name = "enumset_derive" -version = "0.6.1" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03e7b551eba279bf0fa88b83a46330168c1560a52a94f5126f892f0b364ab3e0" +checksum = "e08b6c6ab82d70f08844964ba10c7babb716de2ecaeab9be5717918a5177d3af" dependencies = [ "darling", "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "equihash" version = "0.1.0" -source = "git+https://github.com/zcash/librustzcash?rev=2425a08#2425a0869098e3b0588ccd73c42716bcf418612c" +source = "git+https://github.com/zcash/librustzcash/?rev=2425a08#2425a0869098e3b0588ccd73c42716bcf418612c" dependencies = [ - "blake2b_simd 1.0.0", + "blake2b_simd 1.0.1", "byteorder", ] +[[package]] +name = "erased-serde" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f2b0c2380453a92ea8b6c8e5f64ecaafccddde8ceab55ff7a8ac1029f894569" +dependencies = [ + "serde 1.0.163", +] + +[[package]] +name = "errno" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +dependencies = [ + "errno-dragonfly", + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "error-chain" version = "0.12.4" @@ -1897,7 +2050,7 @@ checksum = "f5584ba17d7ab26a8a7284f13e5bd196294dd2f2d79773cff29b9e9edef601a6" dependencies = [ "log 0.4.17", "once_cell", - "serde 1.0.145", + "serde 1.0.163", "serde_json", ] @@ -1914,7 +2067,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2795e11f4ee3124984d454f25ac899515a5fa6d956562ef2b147fef6050b02f8" dependencies = [ "conpty", - "nix 0.23.1", + "nix 0.23.2", "ptyprocess", "regex", ] @@ -1943,9 +2096,9 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fastrand" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] @@ -1953,7 +2106,7 @@ dependencies = [ [[package]] name = "ferveo" version = "0.1.1" -source = "git+https://github.com/anoma/ferveo#1022ab2c7ccc689abcc05e5a08df6fb0c2a3fc65" +source = "git+https://github.com/anoma/ferveo?rev=e5abd0acc938da90140351a65a26472eb495ce4d#e5abd0acc938da90140351a65a26472eb495ce4d" dependencies = [ "anyhow", "ark-bls12-381", @@ -1965,9 +2118,9 @@ dependencies = [ "ark-std", "bincode", "blake2", - "blake2b_simd 1.0.0", - "borsh", - "digest 0.10.5", + "blake2b_simd 1.0.1", + "borsh 0.9.4", + "digest 0.10.6", "ed25519-dalek", "either", "ferveo-common", @@ -1979,7 +2132,7 @@ dependencies = [ "num", "rand 0.7.3", "rand 0.8.5", - "serde 1.0.145", + "serde 1.0.163", "serde_bytes", "serde_json", "subproductdomain", @@ -1990,13 +2143,13 @@ dependencies = [ [[package]] name = "ferveo-common" version = "0.1.0" -source = "git+https://github.com/anoma/ferveo#1022ab2c7ccc689abcc05e5a08df6fb0c2a3fc65" +source = "git+https://github.com/anoma/ferveo?rev=e5abd0acc938da90140351a65a26472eb495ce4d#e5abd0acc938da90140351a65a26472eb495ce4d" dependencies = [ "anyhow", "ark-ec", "ark-serialize", "ark-std", - "serde 1.0.145", + "serde 1.0.163", "serde_bytes", ] @@ -2006,28 +2159,26 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "131655483be284720a17d74ff97592b8e76576dc25563148601df2d7c9080924" dependencies = [ - "bitvec", + "bitvec 0.22.3", "rand_core 0.6.4", "subtle", ] [[package]] name = "file-lock" -version = "2.1.6" +version = "2.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0815fc2a1924e651b71ae6d13df07b356a671a09ecaf857dbd344a2ba937a496" +checksum = "f59be9010c5418713a48aac4c1b897d85dafd958055683dc31bdae553536647b" dependencies = [ "cc", "libc", - "mktemp", - "nix 0.24.2", ] [[package]] name = "file-serve" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e43addbb09a5dcb5609cb44a01a79e67716fe40b50c109f50112ef201a8c7c59" +checksum = "547ebf393d987692a02b5d2be1c0b398b16a5b185c23a047c1d3fc3050d6d803" dependencies = [ "log 0.4.17", "mime_guess", @@ -2036,14 +2187,26 @@ dependencies = [ [[package]] name = "filetime" -version = "0.2.17" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94a7bbaa59354bc20dd75b67f23e2797b4490e9d6928203fb105c79e448c86c" +checksum = "5cbc844cecaee9d4443931972e1289c8ff485cb4cc2767cb03ca139ed6885153" dependencies = [ "cfg-if 1.0.0", "libc", "redox_syscall 0.2.16", - "windows-sys 0.36.1", + "windows-sys 0.48.0", +] + +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", ] [[package]] @@ -2054,12 +2217,12 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.24" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" +checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", - "miniz_oxide", + "miniz_oxide 0.7.1", ] [[package]] @@ -2118,9 +2281,9 @@ dependencies = [ [[package]] name = "fs_extra" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" [[package]] name = "fuchsia-cprng" @@ -2149,6 +2312,12 @@ name = "funty" version = "1.2.0" source = "git+https://github.com/bitvecto-rs/funty/?rev=7ef0d890fbcd8b3def1635ac1a877fc298488446#7ef0d890fbcd8b3def1635ac1a877fc298488446" +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + [[package]] name = "futures" version = "0.1.31" @@ -2157,9 +2326,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -2172,9 +2341,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -2182,15 +2351,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -2199,15 +2368,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-lite" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" dependencies = [ "fastrand", "futures-core", @@ -2220,32 +2389,32 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "futures-sink" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-util" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", @@ -2270,9 +2439,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check 0.9.4", @@ -2285,17 +2454,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ "cfg-if 1.0.0", - "js-sys", "libc", "wasi 0.9.0+wasi-snapshot-preview1", - "wasm-bindgen", ] [[package]] name = "getrandom" -version = "0.2.7" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" +checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if 1.0.0", "libc", @@ -2315,9 +2482,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.26.2" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" [[package]] name = "git2" @@ -2336,15 +2503,15 @@ dependencies = [ [[package]] name = "glob" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "gloo-timers" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fb7d06c1c8cc2a29bee7ec961009a0b2caa0793ee4900c2ffb348734ba1c8f9" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" dependencies = [ "futures-channel", "futures-core", @@ -2367,7 +2534,7 @@ dependencies = [ [[package]] name = "group-threshold-cryptography" version = "0.1.0" -source = "git+https://github.com/anoma/ferveo#1022ab2c7ccc689abcc05e5a08df6fb0c2a3fc65" +source = "git+https://github.com/anoma/ferveo?rev=e5abd0acc938da90140351a65a26472eb495ce4d#e5abd0acc938da90140351a65a26472eb495ce4d" dependencies = [ "anyhow", "ark-bls12-381", @@ -2376,7 +2543,7 @@ dependencies = [ "ark-poly", "ark-serialize", "ark-std", - "blake2b_simd 1.0.0", + "blake2b_simd 1.0.1", "chacha20", "hex", "itertools", @@ -2405,16 +2572,16 @@ checksum = "729f9bd3449d77e7831a18abfb7ba2f99ee813dfd15b8c2167c9a54ba20aa99d" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "h2" -version = "0.3.14" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca32592cf21ac7ccab1825cd87f6c9b3d9022c44d086172ed0966bec8af30be" +checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" dependencies = [ - "bytes 1.2.1", + "bytes 1.4.0", "fnv", "futures-core", "futures-sink", @@ -2423,7 +2590,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.4", + "tokio-util 0.7.8", "tracing 0.1.37", ] @@ -2453,7 +2620,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" dependencies = [ - "ahash", + "ahash 0.7.6", ] [[package]] @@ -2462,14 +2629,23 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.6", +] + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash 0.8.3", ] [[package]] name = "hdpath" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dafb09e5d85df264339ad786a147d9de1da13687a3697c52244297e5e7c32d9c" +checksum = "09ae1615f843ce3981b47468f3f7c435ac17deb33c2261e64d7f1e87f5c11acc" dependencies = [ "byteorder", ] @@ -2490,13 +2666,13 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", "bitflags", - "bytes 1.2.1", + "bytes 1.4.0", "headers-core", "http", "httpdate", - "mime 0.3.16", + "mime 0.3.17", "sha1", ] @@ -2511,12 +2687,9 @@ dependencies = [ [[package]] name = "heck" -version = "0.3.3" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" -dependencies = [ - "unicode-segmentation", -] +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" @@ -2527,6 +2700,21 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" +dependencies = [ + "libc", +] + +[[package]] +name = "hermit-abi" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" + [[package]] name = "hex" version = "0.4.3" @@ -2553,6 +2741,15 @@ dependencies = [ "digest 0.9.0", ] +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.6", +] + [[package]] name = "hmac-drbg" version = "0.3.0" @@ -2560,17 +2757,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" dependencies = [ "digest 0.9.0", - "generic-array 0.14.6", + "generic-array 0.14.7", "hmac 0.8.1", ] [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ - "bytes 1.2.1", + "bytes 1.4.0", "fnv", "itoa", ] @@ -2581,7 +2778,7 @@ version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ - "bytes 1.2.1", + "bytes 1.4.0", "http", "pin-project-lite", ] @@ -2611,7 +2808,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" dependencies = [ "humantime", - "serde 1.0.145", + "serde 1.0.163", ] [[package]] @@ -2626,7 +2823,7 @@ dependencies = [ "log 0.3.9", "mime 0.2.6", "num_cpus", - "time 0.1.44", + "time 0.1.43", "traitobject", "typeable", "unicase 1.4.2", @@ -2635,11 +2832,11 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.20" +version = "0.14.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" +checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" dependencies = [ - "bytes 1.2.1", + "bytes 1.4.0", "futures-channel", "futures-core", "futures-util", @@ -2663,15 +2860,15 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca815a891b24fdfb243fa3239c86154392b0953ee584aa1a2a1f66d20cbe75cc" dependencies = [ - "bytes 1.2.1", - "futures 0.3.25", + "bytes 1.4.0", + "futures 0.3.28", "headers", "http", - "hyper 0.14.20", + "hyper 0.14.26", "hyper-rustls", - "rustls-native-certs", + "rustls-native-certs 0.5.0", "tokio", - "tokio-rustls", + "tokio-rustls 0.22.0", "tower-service", "webpki 0.21.4", ] @@ -2684,12 +2881,12 @@ checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" dependencies = [ "ct-logs", "futures-util", - "hyper 0.14.20", + "hyper 0.14.26", "log 0.4.17", "rustls 0.19.1", - "rustls-native-certs", + "rustls-native-certs 0.5.0", "tokio", - "tokio-rustls", + "tokio-rustls 0.22.0", "webpki 0.21.4", "webpki-roots 0.21.1", ] @@ -2700,7 +2897,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ - "hyper 0.14.20", + "hyper 0.14.26", "pin-project-lite", "tokio", "tokio-io-timeout", @@ -2712,8 +2909,8 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes 1.2.1", - "hyper 0.14.20", + "bytes 1.4.0", + "hyper 0.14.26", "native-tls", "tokio", "tokio-native-tls", @@ -2721,147 +2918,166 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.51" +version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5a6ef98976b22b3b7f2f3a806f858cb862044cfa66805aa3ad84cb3d3b785ed" +checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "winapi 0.3.9", + "windows 0.48.0", ] [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] name = "ibc" -version = "0.14.0" -source = "git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d#9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d" +version = "0.36.0" +source = "git+https://github.com/heliaxdev/cosmos-ibc-rs.git?rev=2d7edc16412b60cabf78163fe24a6264e11f77a9#2d7edc16412b60cabf78163fe24a6264e11f77a9" dependencies = [ - "bytes 1.2.1", + "bytes 1.4.0", + "cfg-if 1.0.0", "derive_more", - "flex-error", - "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d)", + "displaydoc", + "dyn-clone", + "erased-serde", + "ibc-proto 0.26.0 (git+https://github.com/heliaxdev/ibc-proto-rs.git?rev=7e527b5b8c95d83351e93ceafc14ac853224283f)", "ics23", "num-traits 0.2.15", + "parking_lot 0.12.1", + "primitive-types", "prost", - "prost-types", "safe-regex", - "serde 1.0.145", + "serde 1.0.163", "serde_derive", "serde_json", "sha2 0.10.6", "subtle-encoding", - "tendermint 0.23.5", - "tendermint-light-client-verifier 0.23.5", - "tendermint-proto 0.23.5", - "tendermint-testgen 0.23.5", - "time 0.3.15", + "tendermint 0.23.6", + "tendermint-light-client-verifier 0.23.6", + "tendermint-proto 0.23.6", + "tendermint-testgen 0.23.6", + "time 0.3.17", "tracing 0.1.37", + "uint", ] [[package]] name = "ibc" -version = "0.14.0" -source = "git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2#f4703dfe2c1f25cc431279ab74f10f3e0f6827e2" +version = "0.36.0" +source = "git+https://github.com/heliaxdev/cosmos-ibc-rs?rev=db14744bfba6239cc5f58345ff90f8b7d42637d6#db14744bfba6239cc5f58345ff90f8b7d42637d6" dependencies = [ - "bytes 1.2.1", + "bytes 1.4.0", + "cfg-if 1.0.0", "derive_more", - "flex-error", - "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", + "displaydoc", + "dyn-clone", + "erased-serde", + "ibc-proto 0.26.0 (git+https://github.com/heliaxdev/ibc-proto-rs?rev=dd8ba23110a144ffe2074a0b889676468266435a)", "ics23", "num-traits 0.2.15", + "parking_lot 0.12.1", + "primitive-types", "prost", - "prost-types", "safe-regex", - "serde 1.0.145", + "serde 1.0.163", "serde_derive", "serde_json", "sha2 0.10.6", "subtle-encoding", - "tendermint 0.23.6", - "tendermint-light-client-verifier 0.23.6", - "tendermint-proto 0.23.6", - "tendermint-testgen 0.23.6", - "time 0.3.15", + "tendermint 0.23.5", + "tendermint-light-client-verifier 0.23.5", + "tendermint-proto 0.23.5", + "tendermint-testgen 0.23.5", + "time 0.3.17", "tracing 0.1.37", + "uint", ] [[package]] name = "ibc-proto" -version = "0.17.1" -source = "git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d#9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d" +version = "0.26.0" +source = "git+https://github.com/heliaxdev/ibc-proto-rs.git?rev=7e527b5b8c95d83351e93ceafc14ac853224283f#7e527b5b8c95d83351e93ceafc14ac853224283f" dependencies = [ - "base64 0.13.0", - "bytes 1.2.1", + "base64 0.13.1", + "bytes 1.4.0", + "flex-error", "prost", - "prost-types", - "serde 1.0.145", - "tendermint-proto 0.23.5", + "serde 1.0.163", + "subtle-encoding", + "tendermint-proto 0.23.6", + "tonic", ] [[package]] name = "ibc-proto" -version = "0.17.1" -source = "git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2#f4703dfe2c1f25cc431279ab74f10f3e0f6827e2" +version = "0.26.0" +source = "git+https://github.com/heliaxdev/ibc-proto-rs?rev=dd8ba23110a144ffe2074a0b889676468266435a#dd8ba23110a144ffe2074a0b889676468266435a" dependencies = [ - "base64 0.13.0", - "bytes 1.2.1", + "base64 0.13.1", + "borsh 0.10.3", + "bytes 1.4.0", + "flex-error", + "parity-scale-codec", "prost", - "prost-types", - "serde 1.0.145", - "tendermint-proto 0.23.6", - "tonic", + "scale-info", + "serde 1.0.163", + "subtle-encoding", + "tendermint-proto 0.23.5", ] [[package]] name = "ibc-relayer" -version = "0.14.0" -source = "git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2#f4703dfe2c1f25cc431279ab74f10f3e0f6827e2" +version = "0.22.0" +source = "git+https://github.com/heliaxdev/hermes.git?rev=8e2ff3479edc0653f34b22df450d451eedd2c2ab#8e2ff3479edc0653f34b22df450d451eedd2c2ab" dependencies = [ "anyhow", "async-stream", - "bech32", + "bech32 0.9.1", "bitcoin", - "bytes 1.2.1", - "crossbeam-channel 0.5.6", + "bs58", + "bytes 1.4.0", + "crossbeam-channel 0.5.8", + "digest 0.10.6", "dirs-next", + "ed25519", + "ed25519-dalek", + "ed25519-dalek-bip32", "flex-error", - "futures 0.3.25", + "futures 0.3.28", + "generic-array 0.14.7", "hdpath", "hex", "http", "humantime", "humantime-serde", - "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", - "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", + "ibc-proto 0.26.0 (git+https://github.com/heliaxdev/ibc-proto-rs.git?rev=7e527b5b8c95d83351e93ceafc14ac853224283f)", + "ibc-relayer-types", "itertools", - "k256", "moka", - "nanoid", "num-bigint", "num-rational", "prost", - "prost-types", "regex", "retry", - "ripemd160", - "semver 1.0.14", - "serde 1.0.145", + "ripemd", + "secp256k1 0.24.3", + "semver 1.0.17", + "serde 1.0.163", "serde_derive", "serde_json", "sha2 0.10.6", "signature", + "strum", "subtle-encoding", "tendermint 0.23.6", "tendermint-light-client", @@ -2875,23 +3091,52 @@ dependencies = [ "toml", "tonic", "tracing 0.1.37", + "uuid 1.3.2", +] + +[[package]] +name = "ibc-relayer-types" +version = "0.22.0" +source = "git+https://github.com/heliaxdev/hermes.git?rev=8e2ff3479edc0653f34b22df450d451eedd2c2ab#8e2ff3479edc0653f34b22df450d451eedd2c2ab" +dependencies = [ + "bytes 1.4.0", + "derive_more", + "dyn-clone", + "erased-serde", + "flex-error", + "ibc-proto 0.26.0 (git+https://github.com/heliaxdev/ibc-proto-rs.git?rev=7e527b5b8c95d83351e93ceafc14ac853224283f)", + "ics23", + "itertools", + "num-rational", + "primitive-types", + "prost", + "safe-regex", + "serde 1.0.163", + "serde_derive", + "serde_json", + "subtle-encoding", + "tendermint 0.23.6", + "tendermint-light-client-verifier 0.23.6", + "tendermint-proto 0.23.6", + "tendermint-rpc 0.23.6", + "tendermint-testgen 0.23.6", + "time 0.3.17", "uint", ] [[package]] name = "ics23" -version = "0.7.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d454cc0a22bd556cc3d3c69f9d75a392a36244634840697a4b9eb81bc5c8ae0" +checksum = "ca44b684ce1859cff746ff46f5765ab72e12e3c06f76a8356db8f9a2ecf43f17" dependencies = [ "anyhow", - "bytes 1.2.1", + "bytes 1.4.0", "hex", "prost", - "ripemd160", - "sha2 0.9.9", + "ripemd", + "sha2 0.10.6", "sha3", - "sp-std", ] [[package]] @@ -2921,13 +3166,42 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-serde" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" +dependencies = [ + "serde 1.0.163", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "incrementalmerkletree" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "186fd3ab92aeac865d4b80b410de9a7b341d31ba8281373caed0b6d17b2b5e96" dependencies = [ - "serde 1.0.145", + "serde 1.0.163", ] [[package]] @@ -2941,19 +3215,19 @@ name = "index-set" version = "0.7.1" source = "git+https://github.com/heliaxdev/index-set?tag=v0.7.1#dc24cdbbe3664514d59f1a4c4031863fc565f1c2" dependencies = [ - "borsh", - "serde 1.0.145", + "borsh 0.9.4", + "serde 1.0.163", ] [[package]] name = "indexmap" -version = "1.9.1" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg 1.1.0", "hashbrown 0.12.3", - "serde 1.0.145", + "serde 1.0.163", ] [[package]] @@ -2962,7 +3236,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f97967975f448f1a7ddb12b0bc41069d09ed6a1c161a92687e057325db35d413" dependencies = [ - "bytes 1.2.1", + "bytes 1.4.0", ] [[package]] @@ -2977,6 +3251,17 @@ dependencies = [ "web-sys", ] +[[package]] +name = "io-lifetimes" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" +dependencies = [ + "hermit-abi 0.3.1", + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "iovec" version = "0.1.4" @@ -2988,9 +3273,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.5.0" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" +checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" [[package]] name = "itertools" @@ -3003,24 +3288,24 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.4" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "jobserver" -version = "0.1.25" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" +checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.60" +version = "0.3.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" +checksum = "68c16e1bfd491478ab155fd8b4896b86f9ede344949b641e61501e07c2b8b4d5" dependencies = [ "wasm-bindgen", ] @@ -3031,7 +3316,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2e7baec19d4e83f9145d4891178101a604565edff9645770fc979804138b04c" dependencies = [ - "bitvec", + "bitvec 0.22.3", "bls12_381", "ff", "group", @@ -3054,9 +3339,12 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9b7d56ba4a8344d6be9729995e6b06f928af29998cdf79fe390cbf6b1fee838" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +dependencies = [ + "cpufeatures", +] [[package]] name = "kernel32-sys" @@ -3116,9 +3404,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.135" +version = "0.2.144" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c" +checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" [[package]] name = "libgit2-sys" @@ -3136,9 +3424,9 @@ dependencies = [ [[package]] name = "libloading" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ "cfg-if 1.0.0", "winapi 0.3.9", @@ -3152,9 +3440,9 @@ checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" [[package]] name = "librocksdb-sys" -version = "0.8.0+7.4.4" +version = "0.11.0+8.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611804e4666a25136fcc5f8cf425ab4d26c7f74ea245ffe92ea23b85b6420b5d" +checksum = "d3386f101bcb4bd252d8e9d2fb41ec3b0862a15a62b478c355b2982efa469e3e" dependencies = [ "bindgen", "bzip2-sys", @@ -3172,14 +3460,14 @@ version = "0.7.0" source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" dependencies = [ "arrayref", - "base64 0.13.0", + "base64 0.13.1", "digest 0.9.0", "hmac-drbg", "libsecp256k1-core", "libsecp256k1-gen-ecmult", "libsecp256k1-gen-genmult", "rand 0.8.5", - "serde 1.0.145", + "serde 1.0.163", "sha2 0.9.9", "typenum", ] @@ -3226,9 +3514,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" +checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db" dependencies = [ "cc", "libc", @@ -3236,24 +3524,21 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "link-cplusplus" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369" -dependencies = [ - "cc", -] - [[package]] name = "linked-hash-map" version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" dependencies = [ - "serde 1.0.145", + "serde 1.0.163", ] +[[package]] +name = "linux-raw-sys" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ece97ea872ece730aed82664c424eb4c8291e1ff2480247ccf7409044bc6479f" + [[package]] name = "lock_api" version = "0.3.4" @@ -3310,7 +3595,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0fbfc88337168279f2e9ae06e157cfed4efd3316e14dc96ed074d4f2e6c5952" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -3331,7 +3616,7 @@ dependencies = [ "indexmap", "linked-hash-map", "regex", - "serde 1.0.145", + "serde 1.0.163", "serde_derive", "serde_yaml", ] @@ -3343,11 +3628,11 @@ source = "git+https://github.com/anoma/masp?rev=bee40fc465f6afbd10558d12fe96eb17 dependencies = [ "aes", "bip0039", - "bitvec", - "blake2b_simd 1.0.0", - "blake2s_simd 1.0.0", + "bitvec 0.22.3", + "blake2b_simd 1.0.1", + "blake2s_simd 1.0.1", "bls12_381", - "borsh", + "borsh 0.9.4", "byteorder", "chacha20poly1305", "crypto_api_chachapoly", @@ -3362,7 +3647,7 @@ dependencies = [ "rand_core 0.6.4", "ripemd160", "secp256k1 0.20.3", - "serde 1.0.145", + "serde 1.0.163", "sha2 0.9.9", "subtle", "zcash_encoding", @@ -3375,7 +3660,7 @@ version = "0.5.0" source = "git+https://github.com/anoma/masp?rev=bee40fc465f6afbd10558d12fe96eb1742eee45c#bee40fc465f6afbd10558d12fe96eb1742eee45c" dependencies = [ "bellman", - "blake2b_simd 1.0.0", + "blake2b_simd 1.0.1", "bls12_381", "byteorder", "directories", @@ -3403,9 +3688,15 @@ dependencies = [ [[package]] name = "matches" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" + +[[package]] +name = "matchit" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" [[package]] name = "maybe-uninit" @@ -3431,27 +3722,27 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memmap2" -version = "0.5.7" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95af15f345b17af2efc8ead6080fb8bc376f8cec1b35277b935637595fe77498" +checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327" dependencies = [ "libc", ] [[package]] name = "memoffset" -version = "0.5.6" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ "autocfg 1.1.0", ] [[package]] name = "memoffset" -version = "0.6.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg 1.1.0", ] @@ -3488,9 +3779,9 @@ dependencies = [ [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" @@ -3498,7 +3789,7 @@ version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" dependencies = [ - "mime 0.3.16", + "mime 0.3.17", "unicase 2.6.0", ] @@ -3510,22 +3801,31 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.5.4" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" +dependencies = [ + "adler", +] + +[[package]] +name = "miniz_oxide" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ "adler", ] [[package]] name = "minreq" -version = "2.6.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c785bc6027fd359756e538541c8624012ba3776d3d3fe123885643092ed4132" +checksum = "cb6c6973f78ef55d0e5fc04fdb8f9ad67c87c9e86bca0ff77b6a3102b0eb36b7" dependencies = [ - "lazy_static", "log 0.4.17", - "rustls 0.20.7", + "once_cell", + "rustls 0.20.8", "webpki 0.22.0", "webpki-roots 0.22.6", ] @@ -3551,14 +3851,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log 0.4.17", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.36.1", + "windows-sys 0.45.0", ] [[package]] @@ -3579,35 +3879,27 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94c7128ba23c81f6471141b90f17654f89ef44a56e14b8a4dd0fddfccd655277" -[[package]] -name = "mktemp" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "975de676448231fcde04b9149d2543077e166b78fc29eae5aa219e7928410da2" -dependencies = [ - "uuid 0.8.2", -] - [[package]] name = "moka" -version = "0.8.6" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "975fa04238144061e7f8df9746b2e9cd93ef85881da5548d842a7c6a4b614415" +checksum = "19b9268097a2cf211ac9955b1cc95e80fa84fff5c2d13ba292916445dc8a311f" dependencies = [ - "crossbeam-channel 0.5.6", - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.8.12", + "crossbeam-channel 0.5.8", + "crossbeam-epoch", + "crossbeam-utils 0.8.15", "num_cpus", "once_cell", "parking_lot 0.12.1", "quanta", + "rustc_version 0.4.0", "scheduled-thread-pool", "skeptic", "smallvec 1.10.0", "tagptr", "thiserror", "triomphe", - "uuid 1.2.2", + "uuid 1.3.2", ] [[package]] @@ -3624,22 +3916,24 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "namada" -version = "0.14.0" +version = "0.16.0" dependencies = [ "assert_matches", + "async-std", "async-trait", "bellman", + "bimap", "bls12_381", - "borsh", + "borsh 0.9.4", "byte-unit", "circular-queue", "clru", "data-encoding", "derivative", - "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d)", - "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", - "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d)", - "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", + "ibc 0.36.0 (git+https://github.com/heliaxdev/cosmos-ibc-rs.git?rev=2d7edc16412b60cabf78163fe24a6264e11f77a9)", + "ibc 0.36.0 (git+https://github.com/heliaxdev/cosmos-ibc-rs?rev=db14744bfba6239cc5f58345ff90f8b7d42637d6)", + "ibc-proto 0.26.0 (git+https://github.com/heliaxdev/ibc-proto-rs.git?rev=7e527b5b8c95d83351e93ceafc14ac853224283f)", + "ibc-proto 0.26.0 (git+https://github.com/heliaxdev/ibc-proto-rs?rev=dd8ba23110a144ffe2074a0b889676468266435a)", "itertools", "libsecp256k1", "loupe", @@ -3647,29 +3941,32 @@ dependencies = [ "masp_proofs", "namada_core", "namada_proof_of_stake", + "namada_test_utils", + "orion", "parity-wasm", "paste", "pretty_assertions", "proptest", "prost", "pwasm-utils", + "rand 0.8.5", + "rand_core 0.6.4", "rayon", "rust_decimal", "rust_decimal_macros", + "serde 1.0.163", "serde_json", "sha2 0.9.9", "tempfile", - "tendermint 0.23.5", "tendermint 0.23.6", - "tendermint-proto 0.23.5", "tendermint-proto 0.23.6", - "tendermint-rpc 0.23.5", "tendermint-rpc 0.23.6", "test-log", "thiserror", "tokio", + "toml", "tracing 0.1.37", - "tracing-subscriber 0.3.16", + "tracing-subscriber 0.3.17", "wasmer", "wasmer-cache", "wasmer-compiler-singlepass", @@ -3682,18 +3979,19 @@ dependencies = [ [[package]] name = "namada_apps" -version = "0.14.0" +version = "0.16.0" dependencies = [ "ark-serialize", "ark-std", + "assert_matches", "async-std", "async-trait", - "base64 0.13.0", - "bech32", + "base64 0.13.1", + "bech32 0.8.1", "bimap", "bit-set", "blake2b-rs", - "borsh", + "borsh 0.9.4", "byte-unit", "byteorder", "clap", @@ -3701,13 +3999,14 @@ dependencies = [ "config", "data-encoding", "derivative", + "directories", "ed25519-consensus", "eyre", "ferveo", "ferveo-common", "file-lock", "flate2", - "futures 0.3.25", + "futures 0.3.28", "git2", "itertools", "libc", @@ -3715,7 +4014,9 @@ dependencies = [ "masp_primitives", "masp_proofs", "namada", + "namada_test_utils", "num-derive", + "num-rational", "num-traits 0.2.15", "num_cpus", "once_cell", @@ -3733,7 +4034,7 @@ dependencies = [ "rpassword", "rust_decimal", "rust_decimal_macros", - "serde 1.0.145", + "serde 1.0.163", "serde_bytes", "serde_json", "sha2 0.9.9", @@ -3742,13 +4043,9 @@ dependencies = [ "sysinfo", "tar", "tempfile", - "tendermint 0.23.5", "tendermint 0.23.6", - "tendermint-config 0.23.5", "tendermint-config 0.23.6", - "tendermint-proto 0.23.5", "tendermint-proto 0.23.6", - "tendermint-rpc 0.23.5", "tendermint-rpc 0.23.6", "test-log", "thiserror", @@ -3757,26 +4054,26 @@ dependencies = [ "toml", "tonic", "tower", - "tower-abci 0.1.0 (git+https://github.com/heliaxdev/tower-abci?rev=f6463388fc319b6e210503b43b3aecf6faf6b200)", - "tower-abci 0.1.0 (git+https://github.com/heliaxdev/tower-abci.git?rev=fcc0014d0bda707109901abfa1b2f782d242f082)", + "tower-abci 0.1.0 (git+https://github.com/heliaxdev/tower-abci.git?rev=79069a441cee7d9955a3d826d29656a0fb16115c)", + "tower-abci 0.1.0 (git+https://github.com/heliaxdev/tower-abci?rev=a31ce06533f5fbd943508676059d44de27395792)", "tracing 0.1.37", "tracing-log", - "tracing-subscriber 0.3.16", + "tracing-subscriber 0.3.17", "websocket", "winapi 0.3.9", ] [[package]] name = "namada_core" -version = "0.14.0" +version = "0.16.0" dependencies = [ "ark-bls12-381", "ark-ec", "ark-serialize", "assert_matches", - "bech32", + "bech32 0.8.1", "bellman", - "borsh", + "borsh 0.9.4", "chrono", "data-encoding", "derivative", @@ -3784,17 +4081,16 @@ dependencies = [ "ferveo", "ferveo-common", "group-threshold-cryptography", - "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d)", - "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", - "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d)", - "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", + "ibc 0.36.0 (git+https://github.com/heliaxdev/cosmos-ibc-rs.git?rev=2d7edc16412b60cabf78163fe24a6264e11f77a9)", + "ibc 0.36.0 (git+https://github.com/heliaxdev/cosmos-ibc-rs?rev=db14744bfba6239cc5f58345ff90f8b7d42637d6)", + "ibc-proto 0.26.0 (git+https://github.com/heliaxdev/ibc-proto-rs.git?rev=7e527b5b8c95d83351e93ceafc14ac853224283f)", + "ibc-proto 0.26.0 (git+https://github.com/heliaxdev/ibc-proto-rs?rev=dd8ba23110a144ffe2074a0b889676468266435a)", "ics23", "index-set", "itertools", "libsecp256k1", "masp_primitives", "namada_macros", - "namada_tests", "pretty_assertions", "proptest", "prost", @@ -3804,27 +4100,25 @@ dependencies = [ "rayon", "rust_decimal", "rust_decimal_macros", - "serde 1.0.145", + "serde 1.0.163", "serde_json", "sha2 0.9.9", "sparse-merkle-tree", - "tendermint 0.23.5", "tendermint 0.23.6", - "tendermint-proto 0.23.5", "tendermint-proto 0.23.6", "test-log", "thiserror", "tonic-build", "tracing 0.1.37", - "tracing-subscriber 0.3.16", + "tracing-subscriber 0.3.17", "zeroize", ] [[package]] name = "namada_encoding_spec" -version = "0.14.0" +version = "0.16.0" dependencies = [ - "borsh", + "borsh 0.9.4", "itertools", "lazy_static", "madato", @@ -3833,47 +4127,48 @@ dependencies = [ [[package]] name = "namada_macros" -version = "0.14.0" +version = "0.16.0" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "namada_proof_of_stake" -version = "0.14.0" +version = "0.16.0" dependencies = [ - "borsh", + "borsh 0.9.4", "data-encoding", "derivative", "hex", + "itertools", "namada_core", "once_cell", "proptest", "rust_decimal", "rust_decimal_macros", - "tendermint-proto 0.23.5", "test-log", "thiserror", "tracing 0.1.37", - "tracing-subscriber 0.3.16", + "tracing-subscriber 0.3.17", ] [[package]] name = "namada_test_utils" -version = "0.14.0" +version = "0.16.0" dependencies = [ - "borsh", + "borsh 0.9.4", "namada_core", + "strum", ] [[package]] name = "namada_tests" -version = "0.14.0" +version = "0.16.0" dependencies = [ "assert_cmd", - "borsh", + "borsh 0.9.4", "chrono", "color-eyre", "concat-idents", @@ -3884,9 +4179,8 @@ dependencies = [ "eyre", "file-serve", "fs_extra", - "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", - "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", "ibc-relayer", + "ibc-relayer-types", "itertools", "namada", "namada_apps", @@ -3912,14 +4206,14 @@ dependencies = [ "tokio", "toml", "tracing 0.1.37", - "tracing-subscriber 0.3.16", + "tracing-subscriber 0.3.17", ] [[package]] name = "namada_tx_prelude" -version = "0.14.0" +version = "0.16.0" dependencies = [ - "borsh", + "borsh 0.9.4", "masp_primitives", "namada_core", "namada_macros", @@ -3932,9 +4226,9 @@ dependencies = [ [[package]] name = "namada_vm_env" -version = "0.14.0" +version = "0.16.0" dependencies = [ - "borsh", + "borsh 0.9.4", "hex", "masp_primitives", "masp_proofs", @@ -3943,9 +4237,9 @@ dependencies = [ [[package]] name = "namada_vp_prelude" -version = "0.14.0" +version = "0.16.0" dependencies = [ - "borsh", + "borsh 0.9.4", "namada_core", "namada_macros", "namada_proof_of_stake", @@ -3954,20 +4248,11 @@ dependencies = [ "thiserror", ] -[[package]] -name = "nanoid" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" -dependencies = [ - "rand 0.8.5", -] - [[package]] name = "native-tls" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd7e2f3618557f980e0b17e8856252eee3c97fa12c54dff0ca290fb6266ca4a9" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ "lazy_static", "libc", @@ -4007,9 +4292,9 @@ dependencies = [ [[package]] name = "nix" -version = "0.23.1" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" +checksum = "8f3790c00a0150112de0f4cd161e3d7fc4b2d8a5542ffc35f099a2562aecb35c" dependencies = [ "bitflags", "cc", @@ -4018,22 +4303,11 @@ dependencies = [ "memoffset 0.6.5", ] -[[package]] -name = "nix" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" -dependencies = [ - "bitflags", - "cfg-if 1.0.0", - "libc", -] - [[package]] name = "nom" -version = "5.1.2" +version = "5.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" +checksum = "08959a387a676302eebf4ddbcbc611da04285579f76f88ee0506c63b1a61dd4b" dependencies = [ "lexical-core", "memchr", @@ -4042,9 +4316,9 @@ dependencies = [ [[package]] name = "nom" -version = "7.1.1" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", @@ -4098,14 +4372,14 @@ dependencies = [ "autocfg 1.1.0", "num-integer", "num-traits 0.2.15", - "serde 1.0.145", + "serde 1.0.163", ] [[package]] name = "num-complex" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19" +checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d" dependencies = [ "num-traits 0.2.15", ] @@ -4118,7 +4392,7 @@ checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -4152,7 +4426,7 @@ dependencies = [ "num-bigint", "num-integer", "num-traits 0.2.15", - "serde 1.0.145", + "serde 1.0.163", ] [[package]] @@ -4171,24 +4445,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg 1.1.0", + "libm", ] [[package]] name = "num_cpus" -version = "1.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" -dependencies = [ - "hermit-abi", - "libc", -] - -[[package]] -name = "num_threads" -version = "0.1.6" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" +checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" dependencies = [ + "hermit-abi 0.2.6", "libc", ] @@ -4206,18 +4472,18 @@ dependencies = [ [[package]] name = "object" -version = "0.29.0" +version = "0.30.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" +checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.15.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "opaque-debug" @@ -4233,9 +4499,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.42" +version = "0.10.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12fc0523e3bd51a692c8850d075d74dc062ccf251c0110668cbd921917118a13" +checksum = "01b8574602df80f7b85fdfc5392fa884a4e3b3f4f35402c070ab34c3d3f78d56" dependencies = [ "bitflags", "cfg-if 1.0.0", @@ -4248,13 +4514,13 @@ dependencies = [ [[package]] name = "openssl-macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -4265,11 +4531,10 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.76" +version = "0.9.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5230151e44c0f05157effb743e8d517472843121cf9243e8b81393edb5acd9ce" +checksum = "8e17f59264b2809d77ae94f0e1ebabc434773f370d6ca667bd223ea10e06cc7e" dependencies = [ - "autocfg 1.1.0", "cc", "libc", "pkg-config", @@ -4285,8 +4550,8 @@ dependencies = [ "aes", "arrayvec 0.7.2", "bigint", - "bitvec", - "blake2b_simd 1.0.0", + "bitvec 0.22.3", + "blake2b_simd 1.0.1", "ff", "fpe", "group", @@ -4298,7 +4563,7 @@ dependencies = [ "pasta_curves", "rand 0.8.5", "reddsa", - "serde 1.0.145", + "serde 1.0.163", "subtle", "zcash_note_encryption 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -4310,7 +4575,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6624905ddd92e460ff0685567539ed1ac985b2dee4c92c7edcd64fce905b00c" dependencies = [ "ct-codecs", - "getrandom 0.2.7", + "getrandom 0.2.9", "subtle", "zeroize", ] @@ -4351,6 +4616,32 @@ dependencies = [ "group", ] +[[package]] +name = "parity-scale-codec" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ddb756ca205bd108aee3c62c6d3c994e1df84a59b9d6d4a5ea42ee1fd5a9a28" +dependencies = [ + "arrayvec 0.7.2", + "bitvec 1.0.1", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "serde 1.0.163", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" +dependencies = [ + "proc-macro-crate 1.3.1", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "parity-wasm" version = "0.45.0" @@ -4359,9 +4650,9 @@ checksum = "e1ad0aff30c1da14b1254fcb2af73e1fa9a28670e584a626f53a369d0e157304" [[package]] name = "parking" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" +checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" [[package]] name = "parking_lot" @@ -4370,7 +4661,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" dependencies = [ "lock_api 0.3.4", - "parking_lot_core 0.6.2", + "parking_lot_core 0.6.3", "rustc_version 0.2.3", ] @@ -4381,14 +4672,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api 0.4.9", - "parking_lot_core 0.9.4", + "parking_lot_core 0.9.7", ] [[package]] name = "parking_lot_core" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" +checksum = "bda66b810a62be75176a80873726630147a5ca780cd33921e0b5709033e66b0a" dependencies = [ "cfg-if 0.1.10", "cloudabi", @@ -4401,15 +4692,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.4" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" +checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if 1.0.0", "libc", "redox_syscall 0.2.16", "smallvec 1.10.0", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -4440,27 +4731,27 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.9" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" +checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" [[package]] name = "pbkdf2" -version = "0.4.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" +checksum = "f05894bce6a1ba4be299d0c5f29563e08af2bc18bb7d48313113bed71e904739" dependencies = [ - "crypto-mac 0.8.0", + "crypto-mac 0.11.1", + "password-hash", ] [[package]] name = "pbkdf2" -version = "0.9.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f05894bce6a1ba4be299d0c5f29563e08af2bc18bb7d48313113bed71e904739" +checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ - "crypto-mac 0.11.1", - "password-hash", + "digest 0.10.6", ] [[package]] @@ -4510,18 +4801,19 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.1.3" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" +checksum = "e68e84bfb01f0507134eac1e9b410a12ba379d064eab48c50ba4ce329a527b70" dependencies = [ + "thiserror", "ucd-trie", ] [[package]] name = "petgraph" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" +checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" dependencies = [ "fixedbitset", "indexmap", @@ -4529,22 +4821,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -4559,22 +4851,11 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs8" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" -dependencies = [ - "der", - "spki", - "zeroize", -] - [[package]] name = "pkg-config" -version = "0.3.25" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "polling" @@ -4603,15 +4884,15 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "predicates" -version = "2.1.1" +version = "2.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5aab5be6e4732b473071984b3164dbbfb7a3674d30ea5ff44410b6bcd960c3c" +checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" dependencies = [ "difflib", "itertools", @@ -4620,15 +4901,15 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.3" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da1c2388b1513e1b605fcec39a95e0a9e8ef088f71443ef37099fa9ae6673fcb" +checksum = "b794032607612e7abeb4db69adb4e33590fa6cf1149e95fd7cb00e634b92f174" [[package]] name = "predicates-tree" -version = "1.0.5" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d86de6de25020a36c6d3643a86d9a6a9f552107c0559c60ea03551b5e16c032" +checksum = "368ba315fb8c5052ab692e68a0eefec6ec57b23a36959c14496f0b0df2c0cecf" dependencies = [ "predicates-core", "termtree", @@ -4646,6 +4927,38 @@ dependencies = [ "output_vt100", ] +[[package]] +name = "prettyplease" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" +dependencies = [ + "proc-macro2", + "syn 1.0.109", +] + +[[package]] +name = "prettyplease" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" +dependencies = [ + "proc-macro2", + "syn 2.0.15", +] + +[[package]] +name = "primitive-types" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f3486ccba82358b11a77516035647c34ba167dfa53312630de83b12bd4f3d66" +dependencies = [ + "fixed-hash", + "impl-codec", + "impl-serde", + "uint", +] + [[package]] name = "proc-macro-crate" version = "0.1.5" @@ -4655,6 +4968,16 @@ dependencies = [ "toml", ] +[[package]] +name = "proc-macro-crate" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +dependencies = [ + "once_cell", + "toml_edit", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -4664,7 +4987,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "version_check 0.9.4", ] @@ -4681,82 +5004,83 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.47" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" +checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" dependencies = [ "unicode-ident", ] [[package]] name = "proptest" -version = "1.0.0" -source = "git+https://github.com/heliaxdev/proptest?branch=tomas/sm#b9517a726c032897a8b41c215147f44588b33dcc" +version = "1.1.0" +source = "git+https://github.com/heliaxdev/proptest?rev=8f1b4abe7ebd35c0781bf9a00a4ee59833ffa2a1#8f1b4abe7ebd35c0781bf9a00a4ee59833ffa2a1" dependencies = [ "bit-set", "bitflags", "byteorder", "lazy_static", "num-traits 0.2.15", - "quick-error 2.0.1", "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift 0.3.0", - "regex-syntax", + "regex-syntax 0.6.29", "rusty-fork", "tempfile", + "unarray", ] [[package]] name = "prost" -version = "0.9.0" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ - "bytes 1.2.1", + "bytes 1.4.0", "prost-derive", ] [[package]] name = "prost-build" -version = "0.9.0" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" +checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ - "bytes 1.2.1", + "bytes 1.4.0", "heck", "itertools", "lazy_static", "log 0.4.17", "multimap", "petgraph", + "prettyplease 0.1.25", "prost", "prost-types", "regex", + "syn 1.0.109", "tempfile", "which", ] [[package]] name = "prost-derive" -version = "0.9.0" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "prost-types" -version = "0.9.0" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" +checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" dependencies = [ - "bytes 1.2.1", "prost", ] @@ -4777,7 +5101,7 @@ checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -4816,12 +5140,12 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7e31331286705f455e56cca62e0e717158474ff02b7936c1fa596d983f4ae27" dependencies = [ - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.15", "libc", "mach", "once_cell", "raw-cpuid", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.10.2+wasi-snapshot-preview1", "web-sys", "winapi 0.3.9", ] @@ -4832,17 +5156,11 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" -[[package]] -name = "quick-error" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" - [[package]] name = "quote" -version = "1.0.21" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" +checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" dependencies = [ "proc-macro2", ] @@ -4853,6 +5171,12 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + [[package]] name = "rand" version = "0.6.5" @@ -4956,7 +5280,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.9", ] [[package]] @@ -5041,9 +5365,9 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "10.6.0" +version = "10.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6823ea29436221176fe662da99998ad3b4db2c7f31e7b6f5fe43adccd6320bb" +checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" dependencies = [ "bitflags", ] @@ -5062,13 +5386,13 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.9.3" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ - "crossbeam-channel 0.5.6", + "crossbeam-channel 0.5.8", "crossbeam-deque", - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.15", "num_cpus", ] @@ -5094,7 +5418,7 @@ dependencies = [ "jubjub", "pasta_curves", "rand_core 0.6.4", - "serde 1.0.145", + "serde 1.0.163", "thiserror", "zeroize", ] @@ -5114,13 +5438,22 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.9", "redox_syscall 0.2.16", "thiserror", ] @@ -5138,13 +5471,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.7.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" +checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.7.1", ] [[package]] @@ -5153,14 +5486,20 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "regex-syntax", + "regex-syntax 0.6.29", ] [[package]] name = "regex-syntax" -version = "0.6.27" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" +checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" [[package]] name = "region" @@ -5174,49 +5513,40 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi 0.3.9", -] - [[package]] name = "rend" -version = "0.3.6" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79af64b4b6362ffba04eef3a4e10829718a4896dac19daa741851c86781edf95" +checksum = "581008d2099240d37fb08d77ad713bcaec2c4d89d50b5b21a8bb1996bbab68ab" dependencies = [ "bytecheck", ] [[package]] name = "reqwest" -version = "0.11.12" +version = "0.11.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "431949c384f4e2ae07605ccaa56d1d9d2ecdb5cadd4f9577ccfab29f2e5149fc" +checksum = "13293b639a097af28fc8a90f22add145a9c954e49d77da06263d58cf44d5fb91" dependencies = [ - "base64 0.13.0", - "bytes 1.2.1", + "base64 0.21.0", + "bytes 1.4.0", "encoding_rs", "futures-core", "futures-util", "h2", "http", "http-body", - "hyper 0.14.20", + "hyper 0.14.26", "hyper-tls", "ipnet", "js-sys", "log 0.4.17", - "mime 0.3.16", + "mime 0.3.17", "native-tls", "once_cell", "percent-encoding 2.2.0", "pin-project-lite", - "serde 1.0.145", + "serde 1.0.163", "serde_json", "serde_urlencoded", "tokio", @@ -5231,9 +5561,9 @@ dependencies = [ [[package]] name = "retry" -version = "1.3.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac95c60a949a63fd2822f4964939662d8f2c16c4fa0624fd954bc6e703b9a3f6" +checksum = "9166d72162de3575f950507683fac47e30f6f2c3836b71b7fbc61aa517c9c5f4" [[package]] name = "rfc6979" @@ -5261,6 +5591,15 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.6", +] + [[package]] name = "ripemd160" version = "0.9.1" @@ -5274,27 +5613,30 @@ dependencies = [ [[package]] name = "rkyv" -version = "0.7.39" +version = "0.7.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cec2b3485b07d96ddfd3134767b8a447b45ea4eb91448d0a35180ec0ffd5ed15" +checksum = "0200c8230b013893c0b2d6213d6ec64ed2b9be2e0e016682b7224ff82cff5c58" dependencies = [ + "bitvec 1.0.1", "bytecheck", "hashbrown 0.12.3", "ptr_meta", "rend", "rkyv_derive", "seahash", + "tinyvec", + "uuid 1.3.2", ] [[package]] name = "rkyv_derive" -version = "0.7.39" +version = "0.7.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eaedadc88b53e36dd32d940ed21ae4d850d5916f2581526921f553a72ac34c4" +checksum = "b2e06b915b5c230a17d7a736d1e2e63ee753c256a8614ef3f5147b13a4f5541d" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5308,9 +5650,9 @@ dependencies = [ [[package]] name = "rocksdb" -version = "0.19.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e9562ea1d70c0cc63a34a22d977753b50cca91cc6b6527750463bd5dd8697bc" +checksum = "bb6f170a4041d50a0ce04b0d2e14916d6ca863ea2e422689a5b694395d299ffe" dependencies = [ "libc", "librocksdb-sys", @@ -5339,9 +5681,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee9164faf726e4f3ece4978b25ca877ddc6802fa77f38cdccb32c7f805ecd70c" dependencies = [ "arrayvec 0.7.2", - "borsh", + "borsh 0.9.4", "num-traits 0.2.15", - "serde 1.0.145", + "serde 1.0.163", ] [[package]] @@ -5356,9 +5698,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" @@ -5366,6 +5708,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + [[package]] name = "rustc_version" version = "0.2.3" @@ -5379,9 +5727,32 @@ dependencies = [ name = "rustc_version" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver 1.0.17", +] + +[[package]] +name = "rustix" +version = "0.37.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f79bef90eb6d984c72722595b5b1348ab39275a5e5123faca6863bf07d75a4e0" dependencies = [ - "semver 0.11.0", + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys 0.48.0", ] [[package]] @@ -5390,7 +5761,7 @@ version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", "log 0.4.17", "ring", "sct 0.6.1", @@ -5399,9 +5770,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.7" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log 0.4.17", "ring", @@ -5421,11 +5792,32 @@ dependencies = [ "security-framework", ] +[[package]] +name = "rustls-native-certs" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +dependencies = [ + "base64 0.21.0", +] + [[package]] name = "rustversion" -version = "1.0.9" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "rusty-fork" @@ -5434,16 +5826,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" dependencies = [ "fnv", - "quick-error 1.2.3", + "quick-error", "tempfile", "wait-timeout", ] [[package]] name = "ryu" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "safe-proc-macro2" @@ -5507,21 +5899,44 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "scale-info" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfdef77228a4c05dc94211441595746732131ad7f6530c6c18f045da7b7ab937" +dependencies = [ + "cfg-if 1.0.0", + "derive_more", + "parity-scale-codec", + "scale-info-derive", +] + +[[package]] +name = "scale-info-derive" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53012eae69e5aa5c14671942a5dd47de59d4cdcff8532a6dd0e081faf1119482" +dependencies = [ + "proc-macro-crate 1.3.1", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "schannel" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" +checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" dependencies = [ - "lazy_static", - "windows-sys 0.36.1", + "windows-sys 0.42.0", ] [[package]] name = "scheduled-thread-pool" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "977a7519bff143a44f842fd07e80ad1329295bd71686457f18e496736f4bf9bf" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" dependencies = [ "parking_lot 0.12.1", ] @@ -5532,12 +5947,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "scratch" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898" - [[package]] name = "sct" version = "0.6.1" @@ -5571,8 +5980,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" dependencies = [ "der", - "generic-array 0.14.6", - "pkcs8", + "generic-array 0.14.7", "subtle", "zeroize", ] @@ -5588,12 +5996,14 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.22.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "295642060261c80709ac034f52fca8e5a9fa2c7d341ded5cdb164b7c33768b2a" +checksum = "6b1629c9c557ef9b293568b338dddfc8208c98a18c59d722a9d53f859d9c9b62" dependencies = [ - "secp256k1-sys 0.5.2", - "serde 1.0.145", + "bitcoin_hashes", + "rand 0.8.5", + "secp256k1-sys 0.6.1", + "serde 1.0.163", ] [[package]] @@ -5607,9 +6017,9 @@ dependencies = [ [[package]] name = "secp256k1-sys" -version = "0.5.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "152e20a0fd0519390fc43ab404663af8a0b794273d2a91d60ad4a39f13ffe110" +checksum = "83080e2c2fc1006e625be82e5d1eb6a43b7fd9578b617fcc55814daf286bba4b" dependencies = [ "cc", ] @@ -5629,9 +6039,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.6.1" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" dependencies = [ "core-foundation-sys", "libc", @@ -5657,11 +6067,11 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.14" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" dependencies = [ - "serde 1.0.145", + "serde 1.0.163", ] [[package]] @@ -5687,9 +6097,9 @@ checksum = "9dad3f759919b92c3068c696c15c3d17238234498bbdcc80f2c469606f948ac8" [[package]] name = "serde" -version = "1.0.145" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b" +checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" dependencies = [ "serde_derive", ] @@ -5708,11 +6118,11 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.7" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfc50e8183eeeb6178dcb167ae34a8051d63535023ae38b5d8d12beae193d37b" +checksum = "416bda436f9aab92e02c8e10d49a15ddd339cea90b6e340fe51ed97abb548294" dependencies = [ - "serde 1.0.145", + "serde 1.0.163", ] [[package]] @@ -5722,40 +6132,40 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" dependencies = [ "half", - "serde 1.0.145", + "serde 1.0.163", ] [[package]] name = "serde_derive" -version = "1.0.145" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c" +checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "serde_json" -version = "1.0.87" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" +checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" dependencies = [ "itoa", "ryu", - "serde 1.0.145", + "serde 1.0.163", ] [[package]] name = "serde_repr" -version = "0.1.9" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" +checksum = "bcec881020c684085e55a25f7fd888954d56609ef363479dc5a1305eb0d40cab" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -5767,7 +6177,7 @@ dependencies = [ "form_urlencoded", "itoa", "ryu", - "serde 1.0.145", + "serde 1.0.163", ] [[package]] @@ -5778,7 +6188,7 @@ checksum = "ef8099d3df28273c99a1728190c7a9f19d444c941044f64adf986bee7ec53051" dependencies = [ "dtoa", "linked-hash-map", - "serde 1.0.145", + "serde 1.0.163", "yaml-rust", ] @@ -5815,7 +6225,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -5839,19 +6249,17 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] name = "sha3" -version = "0.9.1" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", + "digest 0.10.6", "keccak", - "opaque-debug 0.3.0", ] [[package]] @@ -5871,9 +6279,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a253b5e89e2698464fc26b545c9edceb338e18a89effeeecfea192c3025be29d" +checksum = "732768f1176d21d09e076c23a93123d40bba92d50c4058da34d45c8de8e682b9" dependencies = [ "libc", "signal-hook-registry", @@ -5881,9 +6289,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] @@ -5898,6 +6306,12 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "simdutf8" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" + [[package]] name = "simple-error" version = "0.2.3" @@ -5921,9 +6335,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg 1.1.0", ] @@ -5945,27 +6359,21 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi 0.3.9", ] -[[package]] -name = "sp-std" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35391ea974fa5ee869cb094d5b437688fbf3d8127d64d1b9fed5822a1ed39b12" - [[package]] name = "sparse-merkle-tree" version = "0.3.1-pre" -source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=04ad1eeb28901b57a7599bbe433b3822965dabe8#04ad1eeb28901b57a7599bbe433b3822965dabe8" +source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=e086b235ed6e68929bf73f617dd61cd17b000a56#e086b235ed6e68929bf73f617dd61cd17b000a56" dependencies = [ "blake2b-rs", - "borsh", + "borsh 0.9.4", "cfg-if 1.0.0", "ics23", "sha2 0.9.9", @@ -5977,16 +6385,6 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -[[package]] -name = "spki" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" -dependencies = [ - "base64ct", - "der", -] - [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -6005,10 +6403,32 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +[[package]] +name = "strum" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.24.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn 1.0.109", +] + [[package]] name = "subproductdomain" version = "0.1.0" -source = "git+https://github.com/anoma/ferveo#1022ab2c7ccc689abcc05e5a08df6fb0c2a3fc65" +source = "git+https://github.com/anoma/ferveo?rev=e5abd0acc938da90140351a65a26472eb495ce4d#e5abd0acc938da90140351a65a26472eb495ce4d" dependencies = [ "anyhow", "ark-ec", @@ -6041,9 +6461,9 @@ checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" [[package]] name = "syn" -version = "1.0.102" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", @@ -6051,17 +6471,22 @@ dependencies = [ ] [[package]] -name = "synstructure" -version = "0.12.6" +name = "syn" +version = "2.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" +checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" dependencies = [ "proc-macro2", "quote", - "syn", - "unicode-xid", + "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + [[package]] name = "sysinfo" version = "0.21.1" @@ -6101,40 +6526,39 @@ dependencies = [ [[package]] name = "target-lexicon" -version = "0.12.4" +version = "0.12.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02424087780c9b71cc96799eaeddff35af2bc513278cda5c99fc1f5d026d3c1" +checksum = "fd1ba337640d60c3e96bc6f0638a939b9c9a7f2c316a1598c279828b3d1dc8c5" [[package]] name = "tempfile" -version = "3.3.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" dependencies = [ "cfg-if 1.0.0", "fastrand", - "libc", - "redox_syscall 0.2.16", - "remove_dir_all", - "winapi 0.3.9", + "redox_syscall 0.3.5", + "rustix", + "windows-sys 0.45.0", ] [[package]] name = "tendermint" version = "0.23.5" -source = "git+https://github.com/heliaxdev/tendermint-rs?rev=95c52476bc37927218374f94ac8e2a19bd35bec9#95c52476bc37927218374f94ac8e2a19bd35bec9" +source = "git+https://github.com/heliaxdev/tendermint-rs?rev=a3a0ad5f07d380976bbd5321239aec9cc3a8f916#a3a0ad5f07d380976bbd5321239aec9cc3a8f916" dependencies = [ "async-trait", - "bytes 1.2.1", + "bytes 1.4.0", "ed25519", "ed25519-dalek", "flex-error", - "futures 0.3.25", + "futures 0.3.28", "num-traits 0.2.15", "once_cell", "prost", "prost-types", - "serde 1.0.145", + "serde 1.0.163", "serde_bytes", "serde_json", "serde_repr", @@ -6143,28 +6567,28 @@ dependencies = [ "subtle", "subtle-encoding", "tendermint-proto 0.23.5", - "time 0.3.15", + "time 0.3.17", "zeroize", ] [[package]] name = "tendermint" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=4db3c5ea09fae4057008d22bf9e96bf541b55b35#4db3c5ea09fae4057008d22bf9e96bf541b55b35" dependencies = [ "async-trait", - "bytes 1.2.1", + "bytes 1.4.0", "ed25519", "ed25519-dalek", "flex-error", - "futures 0.3.25", + "futures 0.3.28", "k256", "num-traits 0.2.15", "once_cell", "prost", "prost-types", "ripemd160", - "serde 1.0.145", + "serde 1.0.163", "serde_bytes", "serde_json", "serde_repr", @@ -6173,17 +6597,17 @@ dependencies = [ "subtle", "subtle-encoding", "tendermint-proto 0.23.6", - "time 0.3.15", + "time 0.3.17", "zeroize", ] [[package]] name = "tendermint-config" version = "0.23.5" -source = "git+https://github.com/heliaxdev/tendermint-rs?rev=95c52476bc37927218374f94ac8e2a19bd35bec9#95c52476bc37927218374f94ac8e2a19bd35bec9" +source = "git+https://github.com/heliaxdev/tendermint-rs?rev=a3a0ad5f07d380976bbd5321239aec9cc3a8f916#a3a0ad5f07d380976bbd5321239aec9cc3a8f916" dependencies = [ "flex-error", - "serde 1.0.145", + "serde 1.0.163", "serde_json", "tendermint 0.23.5", "toml", @@ -6193,10 +6617,10 @@ dependencies = [ [[package]] name = "tendermint-config" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=4db3c5ea09fae4057008d22bf9e96bf541b55b35#4db3c5ea09fae4057008d22bf9e96bf541b55b35" dependencies = [ "flex-error", - "serde 1.0.145", + "serde 1.0.163", "serde_json", "tendermint 0.23.6", "toml", @@ -6206,101 +6630,94 @@ dependencies = [ [[package]] name = "tendermint-light-client" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=4db3c5ea09fae4057008d22bf9e96bf541b55b35#4db3c5ea09fae4057008d22bf9e96bf541b55b35" dependencies = [ "contracts", "crossbeam-channel 0.4.4", "derive_more", "flex-error", - "futures 0.3.25", - "serde 1.0.145", + "futures 0.3.28", + "serde 1.0.163", "serde_cbor", "serde_derive", "static_assertions", "tendermint 0.23.6", "tendermint-light-client-verifier 0.23.6", "tendermint-rpc 0.23.6", - "time 0.3.15", + "time 0.3.17", "tokio", ] [[package]] name = "tendermint-light-client-verifier" version = "0.23.5" -source = "git+https://github.com/heliaxdev/tendermint-rs?rev=95c52476bc37927218374f94ac8e2a19bd35bec9#95c52476bc37927218374f94ac8e2a19bd35bec9" +source = "git+https://github.com/heliaxdev/tendermint-rs?rev=a3a0ad5f07d380976bbd5321239aec9cc3a8f916#a3a0ad5f07d380976bbd5321239aec9cc3a8f916" dependencies = [ "derive_more", "flex-error", - "serde 1.0.145", + "serde 1.0.163", "tendermint 0.23.5", "tendermint-rpc 0.23.5", - "time 0.3.15", + "time 0.3.17", ] [[package]] name = "tendermint-light-client-verifier" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=4db3c5ea09fae4057008d22bf9e96bf541b55b35#4db3c5ea09fae4057008d22bf9e96bf541b55b35" dependencies = [ "derive_more", "flex-error", - "serde 1.0.145", + "serde 1.0.163", "tendermint 0.23.6", - "time 0.3.15", + "time 0.3.17", ] [[package]] name = "tendermint-proto" version = "0.23.5" -source = "git+https://github.com/heliaxdev/tendermint-rs?rev=95c52476bc37927218374f94ac8e2a19bd35bec9#95c52476bc37927218374f94ac8e2a19bd35bec9" +source = "git+https://github.com/heliaxdev/tendermint-rs?rev=a3a0ad5f07d380976bbd5321239aec9cc3a8f916#a3a0ad5f07d380976bbd5321239aec9cc3a8f916" dependencies = [ - "bytes 1.2.1", + "bytes 1.4.0", "flex-error", "num-derive", "num-traits 0.2.15", "prost", "prost-types", - "serde 1.0.145", + "serde 1.0.163", "serde_bytes", "subtle-encoding", - "time 0.3.15", + "time 0.3.17", ] [[package]] name = "tendermint-proto" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=4db3c5ea09fae4057008d22bf9e96bf541b55b35#4db3c5ea09fae4057008d22bf9e96bf541b55b35" dependencies = [ - "bytes 1.2.1", + "bytes 1.4.0", "flex-error", "num-derive", "num-traits 0.2.15", "prost", "prost-types", - "serde 1.0.145", + "serde 1.0.163", "serde_bytes", "subtle-encoding", - "time 0.3.15", + "time 0.3.17", ] [[package]] name = "tendermint-rpc" version = "0.23.5" -source = "git+https://github.com/heliaxdev/tendermint-rs?rev=95c52476bc37927218374f94ac8e2a19bd35bec9#95c52476bc37927218374f94ac8e2a19bd35bec9" +source = "git+https://github.com/heliaxdev/tendermint-rs?rev=a3a0ad5f07d380976bbd5321239aec9cc3a8f916#a3a0ad5f07d380976bbd5321239aec9cc3a8f916" dependencies = [ - "async-trait", - "async-tungstenite", - "bytes 1.2.1", + "bytes 1.4.0", "flex-error", - "futures 0.3.25", - "getrandom 0.2.7", - "http", - "hyper 0.14.20", - "hyper-proxy", - "hyper-rustls", + "getrandom 0.2.9", "peg", "pin-project", - "serde 1.0.145", + "serde 1.0.163", "serde_bytes", "serde_json", "subtle-encoding", @@ -6308,9 +6725,7 @@ dependencies = [ "tendermint-config 0.23.5", "tendermint-proto 0.23.5", "thiserror", - "time 0.3.15", - "tokio", - "tracing 0.1.37", + "time 0.3.17", "url 2.3.1", "uuid 0.8.2", "walkdir", @@ -6319,21 +6734,21 @@ dependencies = [ [[package]] name = "tendermint-rpc" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=4db3c5ea09fae4057008d22bf9e96bf541b55b35#4db3c5ea09fae4057008d22bf9e96bf541b55b35" dependencies = [ "async-trait", "async-tungstenite", - "bytes 1.2.1", + "bytes 1.4.0", "flex-error", - "futures 0.3.25", - "getrandom 0.2.7", + "futures 0.3.28", + "getrandom 0.2.9", "http", - "hyper 0.14.20", + "hyper 0.14.26", "hyper-proxy", "hyper-rustls", "peg", "pin-project", - "serde 1.0.145", + "serde 1.0.163", "serde_bytes", "serde_json", "subtle-encoding", @@ -6341,7 +6756,7 @@ dependencies = [ "tendermint-config 0.23.6", "tendermint-proto 0.23.6", "thiserror", - "time 0.3.15", + "time 0.3.17", "tokio", "tracing 0.1.37", "url 2.3.1", @@ -6352,47 +6767,47 @@ dependencies = [ [[package]] name = "tendermint-testgen" version = "0.23.5" -source = "git+https://github.com/heliaxdev/tendermint-rs?rev=95c52476bc37927218374f94ac8e2a19bd35bec9#95c52476bc37927218374f94ac8e2a19bd35bec9" +source = "git+https://github.com/heliaxdev/tendermint-rs?rev=a3a0ad5f07d380976bbd5321239aec9cc3a8f916#a3a0ad5f07d380976bbd5321239aec9cc3a8f916" dependencies = [ "ed25519-dalek", "gumdrop", - "serde 1.0.145", + "serde 1.0.163", "serde_json", "simple-error", "tempfile", "tendermint 0.23.5", - "time 0.3.15", + "time 0.3.17", ] [[package]] name = "tendermint-testgen" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=4db3c5ea09fae4057008d22bf9e96bf541b55b35#4db3c5ea09fae4057008d22bf9e96bf541b55b35" dependencies = [ "ed25519-dalek", "gumdrop", - "serde 1.0.145", + "serde 1.0.163", "serde_json", "simple-error", "tempfile", "tendermint 0.23.6", - "time 0.3.15", + "time 0.3.17", ] [[package]] name = "termcolor" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" dependencies = [ "winapi-util", ] [[package]] name = "termtree" -version = "0.2.4" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "507e9898683b6c43a9aa55b64259b721b52ba226e0f3779137e50ad114a4c90b" +checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test-log" @@ -6402,7 +6817,7 @@ checksum = "38f0c854faeb68a048f0f2dc410c5ddae3bf83854ef0e4977d58306a5edef50e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6416,86 +6831,93 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.30" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.30" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ + "cfg-if 1.0.0", "once_cell", ] [[package]] name = "tikv-jemalloc-sys" -version = "0.5.2+5.3.0-patched" +version = "0.5.3+5.3.0-patched" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec45c14da997d0925c7835883e4d5c181f196fa142f8c19d7643d1e9af2592c3" +checksum = "a678df20055b43e57ef8cddde41cdfda9a3c1a060b67f4c5836dfb1d78543ba8" dependencies = [ "cc", - "fs_extra", "libc", ] [[package]] name = "time" -version = "0.1.44" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "wasi 0.10.0+wasi-snapshot-preview1", "winapi 0.3.9", ] [[package]] name = "time" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d634a985c4d4238ec39cacaed2e7ae552fbd3c476b552c1deac3021b7d7eaf0c" +checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" dependencies = [ - "itoa", - "libc", - "num_threads", + "serde 1.0.163", + "time-core", "time-macros", ] +[[package]] +name = "time-core" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" + [[package]] name = "time-macros" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" +checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" +dependencies = [ + "time-core", +] [[package]] name = "tiny-bip39" -version = "0.8.2" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc59cb9dfc85bb312c3a78fd6aa8a8582e310b0fa885d5bb877f6dcc601839d" +checksum = "62cc94d358b5a1e84a5cb9109f559aa3c4d634d2b1b4de3d0fa4adc7c78e2861" dependencies = [ "anyhow", - "hmac 0.8.1", + "hmac 0.12.1", "once_cell", - "pbkdf2 0.4.0", - "rand 0.7.3", + "pbkdf2 0.11.0", + "rand 0.8.5", "rustc-hash", - "sha2 0.9.9", + "sha2 0.10.6", "thiserror", "unicode-normalization", "wasm-bindgen", @@ -6513,15 +6935,14 @@ dependencies = [ [[package]] name = "tiny_http" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0d6ef4e10d23c1efb862eecad25c5054429a71958b4eeef85eb5e7170b477ca" +checksum = "389915df6413a2e74fb181895f933386023c71110878cd0825588928e64cdc82" dependencies = [ "ascii", "chunked_transfer", + "httpdate", "log 0.4.17", - "time 0.3.15", - "url 2.3.1", ] [[package]] @@ -6535,28 +6956,27 @@ dependencies = [ [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.21.2" +version = "1.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" +checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105" dependencies = [ "autocfg 1.1.0", - "bytes 1.2.1", + "bytes 1.4.0", "libc", - "memchr", - "mio 0.8.4", + "mio 0.8.6", "num_cpus", "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", - "winapi 0.3.9", + "windows-sys 0.48.0", ] [[package]] @@ -6603,20 +7023,20 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.8.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "tokio-native-tls" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", "tokio", @@ -6652,11 +7072,22 @@ dependencies = [ "webpki 0.21.4", ] +[[package]] +name = "tokio-rustls" +version = "0.23.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +dependencies = [ + "rustls 0.20.8", + "tokio", + "webpki 0.22.0", +] + [[package]] name = "tokio-stream" -version = "0.1.11" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite", @@ -6694,7 +7125,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53474327ae5e166530d17f2d956afcb4f8a004de581b3cae10f12006bc8163e3" dependencies = [ "async-stream", - "bytes 1.2.1", + "bytes 1.4.0", "futures-core", "tokio", "tokio-stream", @@ -6717,7 +7148,7 @@ version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" dependencies = [ - "bytes 1.2.1", + "bytes 1.4.0", "futures-core", "futures-sink", "log 0.4.17", @@ -6727,11 +7158,11 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.4" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ - "bytes 1.2.1", + "bytes 1.4.0", "futures-core", "futures-sink", "pin-project-lite", @@ -6741,39 +7172,58 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.9" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" +dependencies = [ + "serde 1.0.163", +] + +[[package]] +name = "toml_datetime" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" +checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" + +[[package]] +name = "toml_edit" +version = "0.19.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "239410c8609e8125456927e6707163a3b1fdb40561e4b803bc041f466ccfdc13" dependencies = [ - "serde 1.0.145", + "indexmap", + "toml_datetime", + "winnow", ] [[package]] name = "tonic" -version = "0.6.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff08f4649d10a70ffa3522ca559031285d8e421d727ac85c60825761818f5d0a" +checksum = "8f219fad3b929bef19b1f86fbc0358d35daed8f2cac972037ac0dc10bbb8d5fb" dependencies = [ "async-stream", "async-trait", - "base64 0.13.0", - "bytes 1.2.1", + "axum", + "base64 0.13.1", + "bytes 1.4.0", "futures-core", "futures-util", "h2", "http", "http-body", - "hyper 0.14.20", + "hyper 0.14.26", "hyper-timeout", "percent-encoding 2.2.0", "pin-project", "prost", "prost-derive", - "rustls-native-certs", + "rustls-native-certs 0.6.2", + "rustls-pemfile", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.4", "tokio-stream", - "tokio-util 0.6.10", + "tokio-util 0.7.8", "tower", "tower-layer", "tower-service", @@ -6783,14 +7233,15 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.6.2" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" +checksum = "5bf5e9b9c0f7e0a7c027dcfaba7b2c60816c7049171f679d99ee2ff65d0de8c4" dependencies = [ + "prettyplease 0.1.25", "proc-macro2", "prost-build", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6808,7 +7259,7 @@ dependencies = [ "rand 0.8.5", "slab", "tokio", - "tokio-util 0.7.4", + "tokio-util 0.7.8", "tower-layer", "tower-service", "tracing 0.1.37", @@ -6817,13 +7268,13 @@ dependencies = [ [[package]] name = "tower-abci" version = "0.1.0" -source = "git+https://github.com/heliaxdev/tower-abci?rev=f6463388fc319b6e210503b43b3aecf6faf6b200#f6463388fc319b6e210503b43b3aecf6faf6b200" +source = "git+https://github.com/heliaxdev/tower-abci.git?rev=79069a441cee7d9955a3d826d29656a0fb16115c#79069a441cee7d9955a3d826d29656a0fb16115c" dependencies = [ - "bytes 1.2.1", - "futures 0.3.25", + "bytes 1.4.0", + "futures 0.3.28", "pin-project", "prost", - "tendermint-proto 0.23.5", + "tendermint-proto 0.23.6", "tokio", "tokio-stream", "tokio-util 0.6.10", @@ -6835,13 +7286,13 @@ dependencies = [ [[package]] name = "tower-abci" version = "0.1.0" -source = "git+https://github.com/heliaxdev/tower-abci.git?rev=fcc0014d0bda707109901abfa1b2f782d242f082#fcc0014d0bda707109901abfa1b2f782d242f082" +source = "git+https://github.com/heliaxdev/tower-abci?rev=a31ce06533f5fbd943508676059d44de27395792#a31ce06533f5fbd943508676059d44de27395792" dependencies = [ - "bytes 1.2.1", - "futures 0.3.25", + "bytes 1.4.0", + "futures 0.3.28", "pin-project", "prost", - "tendermint-proto 0.23.6", + "tendermint-proto 0.23.5", "tokio", "tokio-stream", "tokio-util 0.6.10", @@ -6891,8 +7342,8 @@ dependencies = [ "cfg-if 1.0.0", "log 0.4.17", "pin-project-lite", - "tracing-attributes 0.1.23", - "tracing-core 0.1.30", + "tracing-attributes 0.1.24", + "tracing-core 0.1.31", ] [[package]] @@ -6902,18 +7353,18 @@ source = "git+https://github.com/tokio-rs/tracing/?tag=tracing-0.1.30#df4ba17d85 dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "tracing-attributes" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -6926,9 +7377,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", @@ -6971,7 +7422,7 @@ checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" dependencies = [ "lazy_static", "log 0.4.17", - "tracing-core 0.1.30", + "tracing-core 0.1.31", ] [[package]] @@ -6980,8 +7431,8 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" dependencies = [ - "serde 1.0.145", - "tracing-core 0.1.30", + "serde 1.0.163", + "tracing-core 0.1.31", ] [[package]] @@ -6992,26 +7443,26 @@ checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" dependencies = [ "sharded-slab", "thread_local", - "tracing-core 0.1.30", + "tracing-core 0.1.31", ] [[package]] name = "tracing-subscriber" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ "matchers", "nu-ansi-term", "once_cell", "regex", - "serde 1.0.145", + "serde 1.0.163", "serde_json", "sharded-slab", "smallvec 1.10.0", "thread_local", "tracing 0.1.37", - "tracing-core 0.1.30", + "tracing-core 0.1.31", "tracing-log", "tracing-serde", ] @@ -7021,7 +7472,7 @@ name = "tracing-tower" version = "0.1.0" source = "git+https://github.com/tokio-rs/tracing/?tag=tracing-0.1.30#df4ba17d857db8ba1b553f7b293ac8ba967a42f8" dependencies = [ - "futures 0.3.25", + "futures 0.3.28", "pin-project-lite", "tower-layer", "tower-make", @@ -7044,9 +7495,9 @@ checksum = "f1ee9bd9239c339d714d657fac840c6d2a4f9c45f4f9ec7b0975113458be78db" [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "tungstenite" @@ -7054,9 +7505,9 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ada8297e8d70872fa9a551d93250a9f407beb9f37ef86494eb20012a2ff7c24" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", "byteorder", - "bytes 1.2.1", + "bytes 1.4.0", "http", "httparse", "input_buffer", @@ -7075,9 +7526,9 @@ checksum = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887" [[package]] name = "typenum" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "ucd-trie" @@ -7097,6 +7548,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "unicase" version = "1.4.2" @@ -7117,15 +7574,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.5" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -7136,12 +7593,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fdbf052a0783de01e944a6ce7a8cb939e295b1e7be835a1112c3b9a7f047a5a" - [[package]] name = "unicode-width" version = "0.1.10" @@ -7160,7 +7611,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", "subtle", ] @@ -7209,17 +7660,14 @@ name = "uuid" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" -dependencies = [ - "getrandom 0.2.7", -] [[package]] name = "uuid" -version = "1.2.2" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" +checksum = "4dad5567ad0cf5b760e5665964bec1b47dfd077ba8a2544b513f3556d3d239a2" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.9", ] [[package]] @@ -7329,12 +7777,11 @@ checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "walkdir" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", - "winapi 0.3.9", "winapi-util", ] @@ -7356,9 +7803,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" +version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasi" @@ -7368,9 +7815,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.83" +version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" +checksum = "5b6cb788c4e39112fbe1822277ef6fb3c55cd86b95cb3d3c4c1c9597e4ac74b4" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -7378,24 +7825,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.83" +version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" +checksum = "35e522ed4105a9d626d885b35d62501b30d9666283a5c8be12c14a8bdafe7822" dependencies = [ "bumpalo", "log 0.4.17", "once_cell", "proc-macro2", "quote", - "syn", + "syn 2.0.15", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.33" +version = "0.4.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" +checksum = "083abe15c5d88556b77bdf7aef403625be9e327ad37c62c4e4129af740168163" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -7405,9 +7852,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.83" +version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" +checksum = "358a79a0cb89d21db8120cbfb91392335913e4890665b1a7981d9e956903b434" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7415,28 +7862,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.83" +version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" +checksum = "4783ce29f09b9d93134d41297aded3a712b7b979e9c6f28c32cb88c973a94869" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.83" +version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" +checksum = "a901d592cafaa4d711bc324edfaff879ac700b19c3dfd60058d2b445be2691eb" [[package]] name = "wasm-encoder" -version = "0.18.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c64ac98d5d61192cc45c701b7e4bd0b9aff91e2edfc7a088406cfe2288581e2c" +checksum = "d05d0b6fcd0aeb98adf16e7975331b3c17222aa815148f5b976370ce589d80ef" dependencies = [ "leb128", ] @@ -7488,7 +7935,7 @@ dependencies = [ "enumset", "loupe", "rkyv", - "serde 1.0.145", + "serde 1.0.163", "serde_bytes", "smallvec 1.10.0", "target-lexicon", @@ -7547,7 +7994,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -7563,7 +8010,7 @@ dependencies = [ "memmap2", "more-asserts", "rustc-demangle", - "serde 1.0.145", + "serde 1.0.163", "serde_bytes", "target-lexicon", "thiserror", @@ -7586,7 +8033,7 @@ dependencies = [ "loupe", "object 0.28.4", "rkyv", - "serde 1.0.145", + "serde 1.0.163", "tempfile", "tracing 0.1.37", "wasmer-compiler", @@ -7638,7 +8085,7 @@ dependencies = [ "indexmap", "loupe", "rkyv", - "serde 1.0.145", + "serde 1.0.163", "thiserror", ] @@ -7659,7 +8106,7 @@ dependencies = [ "more-asserts", "region", "rkyv", - "serde 1.0.145", + "serde 1.0.163", "thiserror", "wasmer-types", "winapi 0.3.9", @@ -7679,9 +8126,9 @@ checksum = "718ed7c55c2add6548cca3ddd6383d738cd73b892df400e96b9aa876f0141d7a" [[package]] name = "wast" -version = "47.0.1" +version = "57.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b98502f3978adea49551e801a6687678e6015317d7d9470a67fe813393f2a8" +checksum = "6eb0f5ed17ac4421193c7477da05892c2edafd67f9639e3c11a82086416662dc" dependencies = [ "leb128", "memchr", @@ -7691,18 +8138,18 @@ dependencies = [ [[package]] name = "wat" -version = "1.0.49" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aab4e20c60429fbba9670a6cae0fff9520046ba0aa3e6d0b1cd2653bea14898" +checksum = "ab9ab0d87337c3be2bb6fc5cd331c4ba9fd6bcb4ee85048a0dd59ed9ecf92e53" dependencies = [ "wast", ] [[package]] name = "web-sys" -version = "0.3.60" +version = "0.3.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" +checksum = "16b5f940c7edfdc6d12126d98c9ef4d1b3d470011c47c76a6581df47ad9ba721" dependencies = [ "js-sys", "wasm-bindgen", @@ -7798,9 +8245,9 @@ dependencies = [ [[package]] name = "which" -version = "4.3.0" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" +checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" dependencies = [ "either", "libc", @@ -7864,16 +8311,12 @@ dependencies = [ ] [[package]] -name = "windows-sys" -version = "0.36.1" +name = "windows" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows_aarch64_msvc 0.36.1", - "windows_i686_gnu 0.36.1", - "windows_i686_msvc 0.36.1", - "windows_x86_64_gnu 0.36.1", - "windows_x86_64_msvc 0.36.1", + "windows-targets 0.48.0", ] [[package]] @@ -7882,20 +8325,74 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.0", - "windows_i686_gnu 0.42.0", - "windows_i686_msvc 0.42.0", - "windows_x86_64_gnu 0.42.0", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.0", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" [[package]] name = "windows_aarch64_msvc" @@ -7905,15 +8402,15 @@ checksum = "c3d027175d00b01e0cbeb97d6ab6ebe03b12330a35786cbaca5252b1c4bf5d9b" [[package]] name = "windows_aarch64_msvc" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] name = "windows_i686_gnu" @@ -7923,15 +8420,15 @@ checksum = "8793f59f7b8e8b01eda1a652b2697d87b93097198ae85f823b969ca5b89bba58" [[package]] name = "windows_i686_gnu" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] name = "windows_i686_msvc" @@ -7941,15 +8438,15 @@ checksum = "8602f6c418b67024be2996c512f5f995de3ba417f4c75af68401ab8756796ae4" [[package]] name = "windows_i686_msvc" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_x86_64_gnu" @@ -7959,21 +8456,27 @@ checksum = "f3d615f419543e0bd7d2b3323af0d86ff19cbc4f816e6453f36a2c2ce889c354" [[package]] name = "windows_x86_64_gnu" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.0" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_msvc" @@ -7983,15 +8486,24 @@ checksum = "11d95421d9ed3672c280884da53201a5c46b7b2765ca6faf34b0d71cf34a3561" [[package]] name = "windows_x86_64_msvc" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" -version = "0.42.0" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + +[[package]] +name = "winnow" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +checksum = "61de7bac303dc551fe038e2b3cef0f571087a47571ea6e79a87692ac99b99699" +dependencies = [ + "memchr", +] [[package]] name = "winreg" @@ -8021,6 +8533,15 @@ dependencies = [ "tap", ] +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + [[package]] name = "xattr" version = "0.2.3" @@ -8042,7 +8563,7 @@ dependencies = [ [[package]] name = "zcash_encoding" version = "0.0.0" -source = "git+https://github.com/zcash/librustzcash?rev=2425a08#2425a0869098e3b0588ccd73c42716bcf418612c" +source = "git+https://github.com/zcash/librustzcash/?rev=2425a08#2425a0869098e3b0588ccd73c42716bcf418612c" dependencies = [ "byteorder", "nonempty", @@ -8063,7 +8584,7 @@ dependencies = [ [[package]] name = "zcash_note_encryption" version = "0.1.0" -source = "git+https://github.com/zcash/librustzcash?rev=2425a08#2425a0869098e3b0588ccd73c42716bcf418612c" +source = "git+https://github.com/zcash/librustzcash/?rev=2425a08#2425a0869098e3b0588ccd73c42716bcf418612c" dependencies = [ "chacha20", "chacha20poly1305", @@ -8074,13 +8595,13 @@ dependencies = [ [[package]] name = "zcash_primitives" version = "0.5.0" -source = "git+https://github.com/zcash/librustzcash?rev=2425a08#2425a0869098e3b0588ccd73c42716bcf418612c" +source = "git+https://github.com/zcash/librustzcash/?rev=2425a08#2425a0869098e3b0588ccd73c42716bcf418612c" dependencies = [ "aes", "bip0039", - "bitvec", - "blake2b_simd 1.0.0", - "blake2s_simd 1.0.0", + "bitvec 0.22.3", + "blake2b_simd 1.0.1", + "blake2s_simd 1.0.1", "bls12_381", "byteorder", "chacha20poly1305", @@ -8100,16 +8621,16 @@ dependencies = [ "sha2 0.9.9", "subtle", "zcash_encoding", - "zcash_note_encryption 0.1.0 (git+https://github.com/zcash/librustzcash?rev=2425a08)", + "zcash_note_encryption 0.1.0 (git+https://github.com/zcash/librustzcash/?rev=2425a08)", ] [[package]] name = "zcash_proofs" version = "0.5.0" -source = "git+https://github.com/zcash/librustzcash?rev=2425a08#2425a0869098e3b0588ccd73c42716bcf418612c" +source = "git+https://github.com/zcash/librustzcash/?rev=2425a08#2425a0869098e3b0588ccd73c42716bcf418612c" dependencies = [ "bellman", - "blake2b_simd 1.0.0", + "blake2b_simd 1.0.1", "bls12_381", "byteorder", "directories", @@ -8123,31 +8644,31 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.3.2" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn", - "synstructure", + "syn 2.0.15", ] [[package]] name = "zstd-sys" -version = "2.0.1+zstd.1.5.2" +version = "2.0.8+zstd.1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fd07cbbc53846d9145dbffdf6dd09a7a0aa52be46741825f5c97bdd4f73f12b" +checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" dependencies = [ "cc", "libc", + "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index 84820607142..6c9c630d823 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,21 +37,21 @@ async-process = {git = "https://github.com/heliaxdev/async-process.git", rev = " # borsh-schema-derive-internal = {path = "../borsh-rs/borsh-schema-derive-internal"} # patched to a commit on the `eth-bridge-integration+consensus-timeout` branch of our fork -tendermint = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} -tendermint-config = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} -tendermint-proto = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} -tendermint-rpc = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} -tendermint-testgen = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} -tendermint-light-client = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} -tendermint-light-client-verifier = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} +tendermint = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35"} +tendermint-config = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35"} +tendermint-proto = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35"} +tendermint-rpc = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35"} +tendermint-testgen = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35"} +tendermint-light-client-verifier = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35"} # patched to a commit on the `eth-bridge-integration` branch of our fork -ibc = {git = "https://github.com/heliaxdev/ibc-rs.git", rev = "f4703dfe2c1f25cc431279ab74f10f3e0f6827e2"} -ibc-proto = {git = "https://github.com/heliaxdev/ibc-rs.git", rev = "f4703dfe2c1f25cc431279ab74f10f3e0f6827e2"} -ibc-relayer = {git = "https://github.com/heliaxdev/ibc-rs.git", rev = "f4703dfe2c1f25cc431279ab74f10f3e0f6827e2"} +ibc = {git = "https://github.com/heliaxdev/cosmos-ibc-rs.git", rev = "2d7edc16412b60cabf78163fe24a6264e11f77a9"} +ibc-proto = {git = "https://github.com/heliaxdev/ibc-proto-rs.git", rev = "7e527b5b8c95d83351e93ceafc14ac853224283f"} +ibc-relayer = {git = "https://github.com/heliaxdev/hermes.git", rev = "8e2ff3479edc0653f34b22df450d451eedd2c2ab"} +ibc-relayer-types = {git = "https://github.com/heliaxdev/hermes.git", rev = "8e2ff3479edc0653f34b22df450d451eedd2c2ab"} # patched to a commit on the `eth-bridge-integration` branch of our fork -tower-abci = {git = "https://github.com/heliaxdev/tower-abci.git", rev = "fcc0014d0bda707109901abfa1b2f782d242f082"} +tower-abci = {git = "https://github.com/heliaxdev/tower-abci.git", rev = "79069a441cee7d9955a3d826d29656a0fb16115c"} # patched to the yanked 1.2.0 until masp updates bitvec funty = { git = "https://github.com/bitvecto-rs/funty/", rev = "7ef0d890fbcd8b3def1635ac1a877fc298488446" } @@ -60,3 +60,4 @@ funty = { git = "https://github.com/bitvecto-rs/funty/", rev = "7ef0d890fbcd8b3d lto = true opt-level = 3 panic = "unwind" +overflow-checks = true diff --git a/Makefile b/Makefile index 6ba375aac7c..d42b3fe3ece 100644 --- a/Makefile +++ b/Makefile @@ -20,7 +20,7 @@ build: $(cargo) build build-test: - $(cargo) build --tests + $(cargo) +$(nightly) build --tests -Z unstable-options build-release: NAMADA_DEV=false $(cargo) build --release --package namada_apps --manifest-path Cargo.toml @@ -42,12 +42,13 @@ check: $(foreach wasm,$(wasm_templates),$(check-wasm) && ) true check-abcipp: - $(cargo) check \ + $(cargo) +$(nightly) check \ --workspace \ --exclude namada_tests \ --all-targets \ --no-default-features \ - --features "abcipp ibc-mocks-abcipp testing" + --features "abcipp ibc-mocks-abcipp testing" \ + -Z unstable-options check-mainnet: $(cargo) check --workspace --features "mainnet" @@ -108,11 +109,18 @@ audit: test: test-unit test-e2e test-wasm +# NOTE: `unstable-options` are used twice for all unit tests - 1st to compile +# with allowing to use unstable features in test, 2nd to run with `report-time` test-unit-coverage: - $(cargo) llvm-cov --output-dir target --features namada/testing --html -- --skip e2e -Z unstable-options --report-time + $(cargo) +$(nightly) llvm-cov --output-dir target \ + --features namada/testing \ + --html \ + -Z unstable-options \ + -- --skip e2e -Z unstable-options --report-time test-e2e: RUST_BACKTRACE=1 $(cargo) test e2e \ + -Z unstable-options \ -- \ --test-threads=1 \ -Z unstable-options --report-time @@ -122,46 +130,53 @@ test-unit-abcipp: --manifest-path ./apps/Cargo.toml \ --no-default-features \ --features "testing std abcipp" \ - $(TEST_FILTER) -- \ - -Z unstable-options --report-time && \ + -Z unstable-options \ + $(TEST_FILTER) -- \ + -Z unstable-options --report-time && \ $(cargo) test \ --manifest-path \ ./proof_of_stake/Cargo.toml \ --features "testing" \ - $(TEST_FILTER) -- \ - -Z unstable-options --report-time && \ + -Z unstable-options \ + $(TEST_FILTER) -- \ + -Z unstable-options --report-time && \ $(cargo) test \ --manifest-path ./shared/Cargo.toml \ --no-default-features \ --features "testing wasm-runtime abcipp ibc-mocks-abcipp" \ - $(TEST_FILTER) -- \ - -Z unstable-options --report-time && \ + -Z unstable-options \ + $(TEST_FILTER) -- \ + -Z unstable-options --report-time && \ $(cargo) test \ --manifest-path ./vm_env/Cargo.toml \ --no-default-features \ --features "abcipp" \ - $(TEST_FILTER) -- \ - -Z unstable-options --report-time + -Z unstable-options \ + $(TEST_FILTER) -- \ + -Z unstable-options --report-time test-unit: - $(cargo) test \ - $(TEST_FILTER) -- \ - --skip e2e \ - -Z unstable-options --report-time + $(cargo) +$(nightly) test \ + $(TEST_FILTER) \ + -Z unstable-options \ + -- --skip e2e \ + -Z unstable-options --report-time test-unit-mainnet: - $(cargo) test \ + $(cargo) +$(nightly) test \ --features "mainnet" \ - $(TEST_FILTER) -- \ - --skip e2e \ + $(TEST_FILTER) \ + -Z unstable-options \ + -- --skip e2e \ -Z unstable-options --report-time test-unit-debug: - $(debug-cargo) test \ - $(TEST_FILTER) -- \ - --skip e2e \ - --nocapture \ - -Z unstable-options --report-time + $(debug-cargo) +$(nightly) test \ + $(TEST_FILTER) -- \ + -Z unstable-options \ + -- --skip e2e \ + --nocapture \ + -Z unstable-options --report-time test-wasm: make -C $(wasms) test @@ -175,6 +190,7 @@ test-wasm-templates: test-debug: $(debug-cargo) test \ + -Z unstable-options \ -- \ --nocapture \ -Z unstable-options --report-time diff --git a/README.md b/README.md index 85ba88d6e38..a3c45f712e1 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ the form of native protocol tokens. A multi-asset shielded transfer wallet is provided in order to facilitate safe and private user interaction with the protocol. -* Blogpost: [Introducing Namada: Shielded transfers with any assets](https://medium.com/namadanetwork/introducing-namada-shielded-transfers-with-any-assets-dce2e579384c) +* Blogpost: [Introducing Namada: Interchain Asset-agnostic Privacy](https://blog.namada.net/introducing-namada-interchain-asset-agnostic-privacy/) ## 📓 Docs @@ -87,4 +87,3 @@ Please see the [contributing page](./CONTRIBUTING.md). ### Dependencies The ledger currently requires that the Heliax fork of tendermint[v0.1.4-abciplus] is installed and available on path. This can be achieved through following [these instructions](https://docs.namada.net/user-guide/install/installing-tendermint.html) - diff --git a/apps/Cargo.toml b/apps/Cargo.toml index f1bf95a05d1..a6439fbb350 100644 --- a/apps/Cargo.toml +++ b/apps/Cargo.toml @@ -6,7 +6,7 @@ license = "GPL-3.0" name = "namada_apps" readme = "../README.md" resolver = "2" -version = "0.14.0" +version = "0.16.0" default-run = "namada" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -70,11 +70,12 @@ abciplus = [ ] [dependencies] -namada = {path = "../shared", default-features = false, features = ["wasm-runtime", "ferveo-tpke"]} +namada = {path = "../shared", default-features = false, features = ["wasm-runtime", "ferveo-tpke", "masp-tx-gen"]} ark-serialize = "0.3.0" ark-std = "0.3.0" # branch = "bat/arse-merkle-tree" -arse-merkle-tree = {package = "sparse-merkle-tree", git = "https://github.com/heliaxdev/sparse-merkle-tree", rev = "04ad1eeb28901b57a7599bbe433b3822965dabe8", features = ["std", "borsh"]} +arse-merkle-tree = {package = "sparse-merkle-tree", git = "https://github.com/heliaxdev/sparse-merkle-tree", rev = "e086b235ed6e68929bf73f617dd61cd17b000a56", features = ["std", "borsh"]} +assert_matches = "1.5.0" async-std = {version = "=1.11.0", features = ["unstable"]} async-trait = "0.1.51" base64 = "0.13.0" @@ -90,8 +91,8 @@ config = "0.11.0" data-encoding = "2.3.2" derivative = "2.2.0" ed25519-consensus = "1.2.0" -ferveo = {git = "https://github.com/anoma/ferveo"} -ferveo-common = {git = "https://github.com/anoma/ferveo"} +ferveo = {git = "https://github.com/anoma/ferveo", rev = "e5abd0acc938da90140351a65a26472eb495ce4d"} +ferveo-common = {git = "https://github.com/anoma/ferveo", rev = "e5abd0acc938da90140351a65a26472eb495ce4d"} eyre = "0.6.5" flate2 = "1.0.22" file-lock = "2.0.2" @@ -100,19 +101,20 @@ itertools = "0.10.1" libc = "0.2.97" libloading = "0.7.2" num-derive = "0.3.3" +num-rational = "0.4.1" num-traits = "0.2.14" num_cpus = "1.13.0" once_cell = "1.8.0" orion = "0.16.0" -prost = "0.9.0" -prost-types = "0.9.0" +prost = "0.11.6" +prost-types = "0.11.6" rand = {version = "0.8", default-features = false} rand_core = {version = "0.6", default-features = false} rayon = "=1.5.3" regex = "1.4.5" reqwest = "0.11.4" rlimit = "0.5.4" -rocksdb = {version = "0.19.0", features = ['zstd', 'jemalloc'], default-features = false} +rocksdb = {version = "0.21.0", features = ['zstd', 'jemalloc'], default-features = false} rpassword = "5.0.1" serde = {version = "1.0.125", features = ["derive"]} serde_bytes = "0.11.5" @@ -123,22 +125,22 @@ signal-hook = "0.3.9" sysinfo = {version = "=0.21.1", default-features = false} tar = "0.4.37" # temporarily using fork work-around -tendermint-abcipp = {package = "tendermint", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} -tendermint-config-abcipp = {package = "tendermint-config", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} -tendermint-proto-abcipp = {package = "tendermint-proto", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} -tendermint-rpc-abcipp = {package = "tendermint-rpc", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", features = ["http-client", "websocket-client"], optional = true} +tendermint-abcipp = {package = "tendermint", git = "https://github.com/heliaxdev/tendermint-rs", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35", optional = true} +tendermint-config-abcipp = {package = "tendermint-config", git = "https://github.com/heliaxdev/tendermint-rs", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35", optional = true} +tendermint-proto-abcipp = {package = "tendermint-proto", git = "https://github.com/heliaxdev/tendermint-rs", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35", optional = true} +tendermint-rpc-abcipp = {package = "tendermint-rpc", git = "https://github.com/heliaxdev/tendermint-rs", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35", features = ["http-client", "websocket-client"], optional = true} tendermint = {version = "0.23.6", optional = true} tendermint-config = {version = "0.23.6", optional = true} tendermint-proto = {version = "0.23.6", optional = true} tendermint-rpc = {version = "0.23.6", features = ["http-client", "websocket-client"], optional = true} -thiserror = "1.0.30" +thiserror = "1.0.38" tokio = {version = "1.8.2", features = ["full"]} toml = "0.5.8" -tonic = "0.6.1" +tonic = "0.8.3" tower = "0.4" # Also, using the same version of tendermint-rs as we do here. # with a patch for https://github.com/penumbra-zone/tower-abci/issues/7. -tower-abci-abcipp = {package = "tower-abci", git = "https://github.com/heliaxdev/tower-abci", rev = "f6463388fc319b6e210503b43b3aecf6faf6b200", optional = true} +tower-abci-abcipp = {package = "tower-abci", git = "https://github.com/heliaxdev/tower-abci", rev = "a31ce06533f5fbd943508676059d44de27395792", optional = true} tower-abci = {version = "0.1.0", optional = true} tracing = "0.1.30" tracing-log = "0.1.2" @@ -149,14 +151,16 @@ winapi = "0.3.9" masp_primitives = { git = "https://github.com/anoma/masp", rev = "bee40fc465f6afbd10558d12fe96eb1742eee45c", features = ["transparent-inputs"] } masp_proofs = { git = "https://github.com/anoma/masp", rev = "bee40fc465f6afbd10558d12fe96eb1742eee45c", features = ["bundled-prover", "download-params"] } bimap = {version = "0.6.2", features = ["serde"]} -rust_decimal = "1.26.1" -rust_decimal_macros = "1.26.1" +rust_decimal = "=1.26.1" +rust_decimal_macros = "=1.26.1" +directories = "4.0.1" [dev-dependencies] namada = {path = "../shared", default-features = false, features = ["testing", "wasm-runtime"]} +namada_test_utils = {path = "../test_utils"} bit-set = "0.5.2" # A fork with state machime testing -proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm"} +proptest = {git = "https://github.com/heliaxdev/proptest", rev = "8f1b4abe7ebd35c0781bf9a00a4ee59833ffa2a1"} tempfile = "3.2.0" test-log = {version = "0.2.7", default-features = false, features = ["trace"]} tokio-test = "0.4.2" diff --git a/apps/src/bin/namada-client/cli.rs b/apps/src/bin/namada-client/cli.rs index 14a07bb6c10..10316cb6bf0 100644 --- a/apps/src/bin/namada-client/cli.rs +++ b/apps/src/bin/namada-client/cli.rs @@ -1,99 +1,304 @@ //! Namada client CLI. +use std::time::Duration; + use color_eyre::eyre::Result; -use namada_apps::cli; +use namada_apps::cli::args::CliToSdk; use namada_apps::cli::cmds::*; +use namada_apps::cli::{self, safe_exit}; use namada_apps::client::{rpc, tx, utils}; +use namada_apps::facade::tendermint::block::Height; +use namada_apps::facade::tendermint_config::net::Address as TendermintAddress; +use namada_apps::facade::tendermint_rpc::{Client, HttpClient}; +use tokio::time::sleep; pub async fn main() -> Result<()> { match cli::namada_client_cli()? { cli::NamadaClient::WithContext(cmd_box) => { - let (cmd, ctx) = *cmd_box; + let (cmd, mut ctx) = *cmd_box; use NamadaClientWithContext as Sub; match cmd { // Ledger cmds Sub::TxCustom(TxCustom(args)) => { - tx::submit_custom(ctx, args).await; + wait_until_node_is_synched(&args.tx.ledger_address).await; + let client = + HttpClient::new(args.tx.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + let dry_run = args.tx.dry_run; + tx::submit_custom::(&client, &mut ctx, args) + .await?; + if !dry_run { + namada_apps::wallet::save(&ctx.wallet) + .unwrap_or_else(|err| eprintln!("{}", err)); + } else { + println!( + "Transaction dry run. No addresses have been \ + saved." + ) + } } Sub::TxTransfer(TxTransfer(args)) => { - tx::submit_transfer(ctx, args).await; + wait_until_node_is_synched(&args.tx.ledger_address).await; + let client = + HttpClient::new(args.tx.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + tx::submit_transfer(&client, ctx, args).await?; } Sub::TxIbcTransfer(TxIbcTransfer(args)) => { - tx::submit_ibc_transfer(ctx, args).await; + wait_until_node_is_synched(&args.tx.ledger_address).await; + let client = + HttpClient::new(args.tx.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + tx::submit_ibc_transfer::(&client, ctx, args) + .await?; } Sub::TxUpdateVp(TxUpdateVp(args)) => { - tx::submit_update_vp(ctx, args).await; + wait_until_node_is_synched(&args.tx.ledger_address).await; + let client = + HttpClient::new(args.tx.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + tx::submit_update_vp::(&client, &mut ctx, args) + .await?; } Sub::TxInitAccount(TxInitAccount(args)) => { - tx::submit_init_account(ctx, args).await; + wait_until_node_is_synched(&args.tx.ledger_address).await; + let client = + HttpClient::new(args.tx.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + let dry_run = args.tx.dry_run; + tx::submit_init_account::( + &client, &mut ctx, args, + ) + .await?; + if !dry_run { + namada_apps::wallet::save(&ctx.wallet) + .unwrap_or_else(|err| eprintln!("{}", err)); + } else { + println!( + "Transaction dry run. No addresses have been \ + saved." + ) + } } Sub::TxInitValidator(TxInitValidator(args)) => { - tx::submit_init_validator(ctx, args).await; + wait_until_node_is_synched(&args.tx.ledger_address).await; + let client = + HttpClient::new(args.tx.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + tx::submit_init_validator::(&client, ctx, args) + .await; } Sub::TxInitProposal(TxInitProposal(args)) => { - tx::submit_init_proposal(ctx, args).await; + wait_until_node_is_synched(&args.tx.ledger_address).await; + let client = + HttpClient::new(args.tx.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + tx::submit_init_proposal::(&client, ctx, args) + .await?; } Sub::TxVoteProposal(TxVoteProposal(args)) => { - tx::submit_vote_proposal(ctx, args).await; + wait_until_node_is_synched(&args.tx.ledger_address).await; + let client = + HttpClient::new(args.tx.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + tx::submit_vote_proposal::(&client, ctx, args) + .await?; } Sub::TxRevealPk(TxRevealPk(args)) => { - tx::submit_reveal_pk(ctx, args).await; + wait_until_node_is_synched(&args.tx.ledger_address).await; + let client = + HttpClient::new(args.tx.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + tx::submit_reveal_pk::(&client, &mut ctx, args) + .await?; } Sub::Bond(Bond(args)) => { - tx::submit_bond(ctx, args).await; + wait_until_node_is_synched(&args.tx.ledger_address).await; + let client = + HttpClient::new(args.tx.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + tx::submit_bond::(&client, &mut ctx, args) + .await?; } Sub::Unbond(Unbond(args)) => { - tx::submit_unbond(ctx, args).await; + wait_until_node_is_synched(&args.tx.ledger_address).await; + let client = + HttpClient::new(args.tx.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + tx::submit_unbond::(&client, &mut ctx, args) + .await?; } Sub::Withdraw(Withdraw(args)) => { - tx::submit_withdraw(ctx, args).await; + wait_until_node_is_synched(&args.tx.ledger_address).await; + let client = + HttpClient::new(args.tx.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + tx::submit_withdraw::(&client, ctx, args) + .await?; } // Ledger queries Sub::QueryEpoch(QueryEpoch(args)) => { - rpc::query_and_print_epoch(args).await; + wait_until_node_is_synched(&args.ledger_address).await; + let client = HttpClient::new(args.ledger_address).unwrap(); + rpc::query_and_print_epoch(&client).await; } Sub::QueryTransfers(QueryTransfers(args)) => { - rpc::query_transfers(ctx, args).await; + wait_until_node_is_synched(&args.query.ledger_address) + .await; + let client = + HttpClient::new(args.query.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + rpc::query_transfers( + &client, + &mut ctx.wallet, + &mut ctx.shielded, + args, + ) + .await; } Sub::QueryConversions(QueryConversions(args)) => { - rpc::query_conversions(ctx, args).await; + wait_until_node_is_synched(&args.query.ledger_address) + .await; + let client = + HttpClient::new(args.query.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + rpc::query_conversions(&client, &mut ctx.wallet, args) + .await; } Sub::QueryBlock(QueryBlock(args)) => { - rpc::query_block(args).await; + wait_until_node_is_synched(&args.ledger_address).await; + let client = + HttpClient::new(args.ledger_address.clone()).unwrap(); + rpc::query_block(&client).await; } Sub::QueryBalance(QueryBalance(args)) => { - rpc::query_balance(ctx, args).await; + wait_until_node_is_synched(&args.query.ledger_address) + .await; + let client = + HttpClient::new(args.query.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + rpc::query_balance( + &client, + &mut ctx.wallet, + &mut ctx.shielded, + args, + ) + .await; } Sub::QueryBonds(QueryBonds(args)) => { - rpc::query_bonds(ctx, args).await; + wait_until_node_is_synched(&args.query.ledger_address) + .await; + let client = + HttpClient::new(args.query.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + rpc::query_bonds(&client, &mut ctx.wallet, args) + .await + .expect("expected successful query of bonds"); } Sub::QueryBondedStake(QueryBondedStake(args)) => { - rpc::query_bonded_stake(ctx, args).await; + wait_until_node_is_synched(&args.query.ledger_address) + .await; + let client = + HttpClient::new(args.query.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + rpc::query_bonded_stake(&client, args).await; } Sub::QueryCommissionRate(QueryCommissionRate(args)) => { - rpc::query_and_print_commission_rate(ctx, args).await; + wait_until_node_is_synched(&args.query.ledger_address) + .await; + let client = + HttpClient::new(args.query.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + rpc::query_and_print_commission_rate( + &client, + &mut ctx.wallet, + args, + ) + .await; } Sub::QuerySlashes(QuerySlashes(args)) => { - rpc::query_slashes(ctx, args).await; + wait_until_node_is_synched(&args.query.ledger_address) + .await; + let client = + HttpClient::new(args.query.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + rpc::query_slashes(&client, &mut ctx.wallet, args).await; } Sub::QueryDelegations(QueryDelegations(args)) => { - rpc::query_delegations(ctx, args).await; + wait_until_node_is_synched(&args.query.ledger_address) + .await; + let client = + HttpClient::new(args.query.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + rpc::query_delegations(&client, &mut ctx.wallet, args) + .await; } Sub::QueryResult(QueryResult(args)) => { - rpc::query_result(ctx, args).await; + wait_until_node_is_synched(&args.query.ledger_address) + .await; + // Connect to the Tendermint server holding the transactions + let client = + HttpClient::new(args.query.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + rpc::query_result(&client, args).await; } Sub::QueryRawBytes(QueryRawBytes(args)) => { - rpc::query_raw_bytes(ctx, args).await; + wait_until_node_is_synched(&args.query.ledger_address) + .await; + let client = + HttpClient::new(args.query.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + rpc::query_raw_bytes(&client, args).await; } Sub::QueryProposal(QueryProposal(args)) => { - rpc::query_proposal(ctx, args).await; + wait_until_node_is_synched(&args.query.ledger_address) + .await; + let client = + HttpClient::new(args.query.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + rpc::query_proposal(&client, args).await; } Sub::QueryProposalResult(QueryProposalResult(args)) => { - rpc::query_proposal_result(ctx, args).await; + wait_until_node_is_synched(&args.query.ledger_address) + .await; + let client = + HttpClient::new(args.query.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + rpc::query_proposal_result(&client, args).await; } Sub::QueryProtocolParameters(QueryProtocolParameters(args)) => { - rpc::query_protocol_parameters(ctx, args).await; + wait_until_node_is_synched(&args.query.ledger_address) + .await; + let client = + HttpClient::new(args.query.ledger_address.clone()) + .unwrap(); + let args = args.to_sdk(&mut ctx); + rpc::query_protocol_parameters(&client, args).await; } } } @@ -111,7 +316,58 @@ pub async fn main() -> Result<()> { Utils::InitGenesisValidator(InitGenesisValidator(args)) => { utils::init_genesis_validator(global_args, args) } + Utils::PkToTmAddress(PkToTmAddress(args)) => { + utils::pk_to_tm_address(global_args, args) + } }, } Ok(()) } + +/// Wait for a first block and node to be synced. Will attempt to +async fn wait_until_node_is_synched(ledger_address: &TendermintAddress) { + let client = HttpClient::new(ledger_address.clone()).unwrap(); + let height_one = Height::try_from(1_u64).unwrap(); + let mut try_count = 0_u64; + const MAX_TRIES: u64 = 5; + + loop { + let node_status = client.status().await; + match node_status { + Ok(status) => { + let latest_block_height = status.sync_info.latest_block_height; + let is_catching_up = status.sync_info.catching_up; + let is_at_least_height_one = latest_block_height >= height_one; + if is_at_least_height_one && !is_catching_up { + return; + } else { + if try_count > MAX_TRIES { + println!( + "Node is still catching up, wait for it to finish \ + synching." + ); + safe_exit(1) + } else { + println!( + " Waiting for {} ({}/{} tries)...", + if is_at_least_height_one { + "a first block" + } else { + "node to sync" + }, + try_count + 1, + MAX_TRIES + ); + sleep(Duration::from_secs((try_count + 1).pow(2))) + .await; + } + try_count += 1; + } + } + Err(e) => { + eprintln!("Failed to query node status with error: {}", e); + safe_exit(1) + } + } + } +} diff --git a/apps/src/bin/namada-node/cli.rs b/apps/src/bin/namada-node/cli.rs index 0f5305e4d4c..240e81f90cc 100644 --- a/apps/src/bin/namada-node/cli.rs +++ b/apps/src/bin/namada-node/cli.rs @@ -1,7 +1,7 @@ //! Namada node CLI. use eyre::{Context, Result}; -use namada::types::time::Utc; +use namada::types::time::{DateTimeUtc, Utc}; use namada_apps::cli::{self, cmds}; use namada_apps::node::ledger; @@ -14,23 +14,15 @@ pub fn main() -> Result<()> { cmds::NamadaNode::Ledger(sub) => match sub { cmds::Ledger::Run(cmds::LedgerRun(args)) => { let wasm_dir = ctx.wasm_dir(); - - // Sleep until start time if needed - if let Some(time) = args.0 { - if let Ok(sleep_time) = - time.0.signed_duration_since(Utc::now()).to_std() - { - if !sleep_time.is_zero() { - tracing::info!( - "Waiting ledger start time: {:?}, time left: \ - {:?}", - time, - sleep_time - ); - std::thread::sleep(sleep_time) - } - } - } + sleep_until(args.start_time); + ctx.config.ledger.tendermint.tx_index = args.tx_index; + ledger::run(ctx.config.ledger, wasm_dir); + } + cmds::Ledger::RunUntil(cmds::LedgerRunUntil(args)) => { + let wasm_dir = ctx.wasm_dir(); + sleep_until(args.time); + ctx.config.ledger.shell.action_at_height = + Some(args.action_at_height); ledger::run(ctx.config.ledger, wasm_dir); } cmds::Ledger::Reset(_) => { @@ -40,6 +32,10 @@ pub fn main() -> Result<()> { cmds::Ledger::DumpDb(cmds::LedgerDumpDb(args)) => { ledger::dump_db(ctx.config.ledger, args); } + cmds::Ledger::RollBack(_) => { + ledger::rollback(ctx.config.ledger) + .wrap_err("Failed to rollback the Namada node")?; + } }, cmds::NamadaNode::Config(sub) => match sub { cmds::Config::Gen(cmds::ConfigGen) => { @@ -64,3 +60,22 @@ pub fn main() -> Result<()> { } Ok(()) } + +/// Sleep until the given start time if necessary. +fn sleep_until(time: Option) { + // Sleep until start time if needed + if let Some(time) = time { + if let Ok(sleep_time) = + time.0.signed_duration_since(Utc::now()).to_std() + { + if !sleep_time.is_zero() { + tracing::info!( + "Waiting ledger start time: {:?}, time left: {:?}", + time, + sleep_time + ); + std::thread::sleep(sleep_time) + } + } + } +} diff --git a/apps/src/bin/namada-wallet/cli.rs b/apps/src/bin/namada-wallet/cli.rs index 82a994b0aca..150e2af4c7d 100644 --- a/apps/src/bin/namada-wallet/cli.rs +++ b/apps/src/bin/namada-wallet/cli.rs @@ -7,16 +7,20 @@ use borsh::BorshSerialize; use color_eyre::eyre::Result; use itertools::sorted; use masp_primitives::zip32::ExtendedFullViewingKey; +use namada::ledger::masp::find_valid_diversifier; +use namada::ledger::wallet::FindKeyError; use namada::types::key::*; use namada::types::masp::{MaspValue, PaymentAddress}; use namada_apps::cli; +use namada_apps::cli::args::CliToSdk; use namada_apps::cli::{args, cmds, Context}; -use namada_apps::client::tx::find_valid_diversifier; -use namada_apps::wallet::{DecryptionError, FindKeyError}; +use namada_apps::wallet::{ + read_and_confirm_pwd, CliWalletUtils, DecryptionError, +}; use rand_core::OsRng; pub fn main() -> Result<()> { - let (cmd, ctx) = cli::namada_wallet_cli()?; + let (cmd, mut ctx) = cli::namada_wallet_cli()?; match cmd { cmds::NamadaWallet::Key(sub) => match sub { cmds::WalletKey::Gen(cmds::KeyGen(args)) => { @@ -45,6 +49,7 @@ pub fn main() -> Result<()> { spending_key_gen(ctx, args) } cmds::WalletMasp::GenPayAddr(cmds::MaspGenPayAddr(args)) => { + let args = args.to_sdk(&mut ctx); payment_address_gen(ctx, args) } cmds::WalletMasp::AddAddrKey(cmds::MaspAddAddrKey(args)) => { @@ -79,7 +84,7 @@ fn address_key_find( println!("Viewing key: {}", viewing_key); if unsafe_show_secret { // Check if alias is also a spending key - match wallet.find_spending_key(&alias) { + match wallet.find_spending_key(&alias, None) { Ok(spending_key) => println!("Spending key: {}", spending_key), Err(FindKeyError::KeyNotFound) => {} Err(err) => eprintln!("{}", err), @@ -141,7 +146,7 @@ fn spending_keys_list( // Print those too if they are available and requested. if unsafe_show_secret { if let Some(spending_key) = spending_key_opt { - match spending_key.get(decrypt, None) { + match spending_key.get::(decrypt, None) { // Here the spending key is unencrypted or successfully // decrypted Ok(spending_key) => { @@ -194,13 +199,16 @@ fn spending_key_gen( ctx: Context, args::MaspSpendKeyGen { alias, + alias_force, unsafe_dont_encrypt, }: args::MaspSpendKeyGen, ) { let mut wallet = ctx.wallet; let alias = alias.to_lowercase(); - let (alias, _key) = wallet.gen_spending_key(alias, unsafe_dont_encrypt); - wallet.save().unwrap_or_else(|err| eprintln!("{}", err)); + let password = read_and_confirm_pwd(unsafe_dont_encrypt); + let (alias, _key) = wallet.gen_spending_key(alias, password, alias_force); + namada_apps::wallet::save(&wallet) + .unwrap_or_else(|err| eprintln!("{}", err)); println!( "Successfully added a spending key with alias: \"{}\"", alias @@ -209,18 +217,16 @@ fn spending_key_gen( /// Generate a shielded payment address from the given key. fn payment_address_gen( - mut ctx: Context, + ctx: Context, args::MaspPayAddrGen { alias, + alias_force, viewing_key, pin, }: args::MaspPayAddrGen, ) { let alias = alias.to_lowercase(); - let viewing_key = - ExtendedFullViewingKey::from(ctx.get_cached(&viewing_key)) - .fvk - .vk; + let viewing_key = ExtendedFullViewingKey::from(viewing_key).fvk.vk; let (div, _g_d) = find_valid_diversifier(&mut OsRng); let payment_addr = viewing_key .to_payment_address(div) @@ -230,12 +236,14 @@ fn payment_address_gen( .insert_payment_addr( alias, PaymentAddress::from(payment_addr).pinned(pin), + alias_force, ) .unwrap_or_else(|| { eprintln!("Payment address not added"); cli::safe_exit(1); }); - wallet.save().unwrap_or_else(|err| eprintln!("{}", err)); + namada_apps::wallet::save(&wallet) + .unwrap_or_else(|err| eprintln!("{}", err)); println!( "Successfully generated a payment address with the following alias: {}", alias, @@ -247,6 +255,7 @@ fn address_key_add( mut ctx: Context, args::MaspAddrKeyAdd { alias, + alias_force, value, unsafe_dont_encrypt, }: args::MaspAddrKeyAdd, @@ -256,7 +265,7 @@ fn address_key_add( MaspValue::FullViewingKey(viewing_key) => { let alias = ctx .wallet - .insert_viewing_key(alias, viewing_key) + .insert_viewing_key(alias, viewing_key, alias_force) .unwrap_or_else(|| { eprintln!("Viewing key not added"); cli::safe_exit(1); @@ -264,13 +273,10 @@ fn address_key_add( (alias, "viewing key") } MaspValue::ExtendedSpendingKey(spending_key) => { + let password = read_and_confirm_pwd(unsafe_dont_encrypt); let alias = ctx .wallet - .encrypt_insert_spending_key( - alias, - spending_key, - unsafe_dont_encrypt, - ) + .encrypt_insert_spending_key(alias, spending_key, password, alias_force) .unwrap_or_else(|| { eprintln!("Spending key not added"); cli::safe_exit(1); @@ -280,7 +286,7 @@ fn address_key_add( MaspValue::PaymentAddress(payment_addr) => { let alias = ctx .wallet - .insert_payment_addr(alias, payment_addr) + .insert_payment_addr(alias, payment_addr, alias_force) .unwrap_or_else(|| { eprintln!("Payment address not added"); cli::safe_exit(1); @@ -288,7 +294,8 @@ fn address_key_add( (alias, "payment address") } }; - ctx.wallet.save().unwrap_or_else(|err| eprintln!("{}", err)); + namada_apps::wallet::save(&ctx.wallet) + .unwrap_or_else(|err| eprintln!("{}", err)); println!( "Successfully added a {} with the following alias to wallet: {}", typ, alias, @@ -302,12 +309,15 @@ fn key_and_address_gen( args::KeyAndAddressGen { scheme, alias, + alias_force, unsafe_dont_encrypt, }: args::KeyAndAddressGen, ) { let mut wallet = ctx.wallet; - let (alias, _key) = wallet.gen_key(scheme, alias, unsafe_dont_encrypt); - wallet.save().unwrap_or_else(|err| eprintln!("{}", err)); + let password = read_and_confirm_pwd(unsafe_dont_encrypt); + let (alias, _key) = wallet.gen_key(scheme, alias, password, alias_force); + namada_apps::wallet::save(&wallet) + .unwrap_or_else(|err| eprintln!("{}", err)); println!( "Successfully added a key and an address with alias: \"{}\"", alias @@ -326,7 +336,7 @@ fn key_find( ) { let mut wallet = ctx.wallet; let found_keypair = match public_key { - Some(pk) => wallet.find_key_by_pk(&pk), + Some(pk) => wallet.find_key_by_pk(&pk, None), None => { let alias = alias.or(value); match alias { @@ -337,7 +347,7 @@ fn key_find( ); cli::safe_exit(1) } - Some(alias) => wallet.find_key(alias.to_lowercase()), + Some(alias) => wallet.find_key(alias.to_lowercase(), None), } } }; @@ -385,7 +395,7 @@ fn key_list( if let Some(pkh) = pkh { writeln!(w, " Public key hash: {}", pkh).unwrap(); } - match stored_keypair.get(decrypt, None) { + match stored_keypair.get::(decrypt, None) { Ok(keypair) => { writeln!(w, " Public key: {}", keypair.ref_to()) .unwrap(); @@ -409,7 +419,7 @@ fn key_list( fn key_export(ctx: Context, args::KeyExport { alias }: args::KeyExport) { let mut wallet = ctx.wallet; wallet - .find_key(alias.to_lowercase()) + .find_key(alias.to_lowercase(), None) .map(|keypair| { let file_data = keypair .try_to_vec() @@ -482,13 +492,14 @@ fn address_or_alias_find(ctx: Context, args: args::AddressOrAliasFind) { fn address_add(ctx: Context, args: args::AddressAdd) { let mut wallet = ctx.wallet; if wallet - .add_address(args.alias.clone().to_lowercase(), args.address) + .add_address(args.alias.clone().to_lowercase(), args.address, args.alias_force) .is_none() { eprintln!("Address not added"); cli::safe_exit(1); } - wallet.save().unwrap_or_else(|err| eprintln!("{}", err)); + namada_apps::wallet::save(&wallet) + .unwrap_or_else(|err| eprintln!("{}", err)); println!( "Successfully added a key and an address with alias: \"{}\"", args.alias.to_lowercase() diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index 9c23cadb468..8e938ea7833 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -617,7 +617,7 @@ pub mod cmds { /// Generate a payment address from a viewing key or payment address #[derive(Clone, Debug)] - pub struct MaspGenPayAddr(pub args::MaspPayAddrGen); + pub struct MaspGenPayAddr(pub args::MaspPayAddrGen); impl SubCmd for MaspGenPayAddr { const CMD: &'static str = "gen-addr"; @@ -633,7 +633,7 @@ pub mod cmds { .about( "Generates a payment address from the given spending key", ) - .add_args::() + .add_args::>() } } @@ -759,8 +759,10 @@ pub mod cmds { #[derive(Clone, Debug)] pub enum Ledger { Run(LedgerRun), + RunUntil(LedgerRunUntil), Reset(LedgerReset), DumpDb(LedgerDumpDb), + RollBack(LedgerRollBack), } impl SubCmd for Ledger { @@ -771,10 +773,17 @@ pub mod cmds { let run = SubCmd::parse(matches).map(Self::Run); let reset = SubCmd::parse(matches).map(Self::Reset); let dump_db = SubCmd::parse(matches).map(Self::DumpDb); + let rollback = SubCmd::parse(matches).map(Self::RollBack); + let run_until = SubCmd::parse(matches).map(Self::RunUntil); run.or(reset) .or(dump_db) + .or(rollback) + .or(run_until) // The `run` command is the default if no sub-command given - .or(Some(Self::Run(LedgerRun(args::LedgerRun(None))))) + .or(Some(Self::Run(LedgerRun(args::LedgerRun { + start_time: None, + tx_index: false, + })))) }) } @@ -785,8 +794,10 @@ pub mod cmds { defaults to run the node.", ) .subcommand(LedgerRun::def()) + .subcommand(LedgerRunUntil::def()) .subcommand(LedgerReset::def()) .subcommand(LedgerDumpDb::def()) + .subcommand(LedgerRollBack::def()) } } @@ -809,6 +820,28 @@ pub mod cmds { } } + #[derive(Clone, Debug)] + pub struct LedgerRunUntil(pub args::LedgerRunUntil); + + impl SubCmd for LedgerRunUntil { + const CMD: &'static str = "run-until"; + + fn parse(matches: &ArgMatches) -> Option { + matches + .subcommand_matches(Self::CMD) + .map(|matches| Self(args::LedgerRunUntil::parse(matches))) + } + + fn def() -> App { + App::new(Self::CMD) + .about( + "Run Namada ledger node until a given height. Then halt \ + or suspend.", + ) + .add_args::() + } + } + #[derive(Clone, Debug)] pub struct LedgerReset; @@ -846,6 +879,26 @@ pub mod cmds { } } + #[derive(Clone, Debug)] + pub struct LedgerRollBack; + + impl SubCmd for LedgerRollBack { + const CMD: &'static str = "rollback"; + + fn parse(matches: &ArgMatches) -> Option { + matches.subcommand_matches(Self::CMD).map(|_matches| Self) + } + + fn def() -> App { + App::new(Self::CMD).about( + "Roll Namada state back to the previous height. This command \ + does not create a backup of neither the Namada nor the \ + Tendermint state before execution: for extra safety, it is \ + recommended to make a backup in advance.", + ) + } + } + #[derive(Clone, Debug)] pub enum Config { Gen(ConfigGen), @@ -885,7 +938,7 @@ pub mod cmds { } #[derive(Clone, Debug)] - pub struct QueryResult(pub args::QueryResult); + pub struct QueryResult(pub args::QueryResult); impl SubCmd for QueryResult { const CMD: &'static str = "tx-result"; @@ -899,12 +952,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Query the result of a transaction.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct QueryProposal(pub args::QueryProposal); + pub struct QueryProposal(pub args::QueryProposal); impl SubCmd for QueryProposal { const CMD: &'static str = "query-proposal"; @@ -921,12 +974,14 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Query proposals.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct QueryProposalResult(pub args::QueryProposalResult); + pub struct QueryProposalResult( + pub args::QueryProposalResult, + ); impl SubCmd for QueryProposalResult { const CMD: &'static str = "query-proposal-result"; @@ -943,12 +998,14 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Query proposals result.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct QueryProtocolParameters(pub args::QueryProtocolParameters); + pub struct QueryProtocolParameters( + pub args::QueryProtocolParameters, + ); impl SubCmd for QueryProtocolParameters { const CMD: &'static str = "query-protocol-parameters"; @@ -967,12 +1024,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Query protocol parameters.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct TxCustom(pub args::TxCustom); + pub struct TxCustom(pub args::TxCustom); impl SubCmd for TxCustom { const CMD: &'static str = "tx"; @@ -986,12 +1043,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Send a transaction with custom WASM code.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct TxTransfer(pub args::TxTransfer); + pub struct TxTransfer(pub args::TxTransfer); impl SubCmd for TxTransfer { const CMD: &'static str = "transfer"; @@ -1005,12 +1062,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Send a signed transfer transaction.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct TxIbcTransfer(pub args::TxIbcTransfer); + pub struct TxIbcTransfer(pub args::TxIbcTransfer); impl SubCmd for TxIbcTransfer { const CMD: &'static str = "ibc-transfer"; @@ -1024,12 +1081,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Send a signed IBC transfer transaction.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct TxUpdateVp(pub args::TxUpdateVp); + pub struct TxUpdateVp(pub args::TxUpdateVp); impl SubCmd for TxUpdateVp { const CMD: &'static str = "update"; @@ -1046,12 +1103,12 @@ pub mod cmds { "Send a signed transaction to update account's validity \ predicate.", ) - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct TxInitAccount(pub args::TxInitAccount); + pub struct TxInitAccount(pub args::TxInitAccount); impl SubCmd for TxInitAccount { const CMD: &'static str = "init-account"; @@ -1068,12 +1125,12 @@ pub mod cmds { "Send a signed transaction to create a new established \ account.", ) - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct TxInitValidator(pub args::TxInitValidator); + pub struct TxInitValidator(pub args::TxInitValidator); impl SubCmd for TxInitValidator { const CMD: &'static str = "init-validator"; @@ -1090,12 +1147,12 @@ pub mod cmds { "Send a signed transaction to create a new validator \ account.", ) - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct Bond(pub args::Bond); + pub struct Bond(pub args::Bond); impl SubCmd for Bond { const CMD: &'static str = "bond"; @@ -1109,12 +1166,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Bond tokens in PoS system.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct Unbond(pub args::Unbond); + pub struct Unbond(pub args::Unbond); impl SubCmd for Unbond { const CMD: &'static str = "unbond"; @@ -1128,12 +1185,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Unbond tokens from a PoS bond.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct Withdraw(pub args::Withdraw); + pub struct Withdraw(pub args::Withdraw); impl SubCmd for Withdraw { const CMD: &'static str = "withdraw"; @@ -1147,12 +1204,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Withdraw tokens from previously unbonded PoS bond.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct QueryEpoch(pub args::Query); + pub struct QueryEpoch(pub args::Query); impl SubCmd for QueryEpoch { const CMD: &'static str = "epoch"; @@ -1166,12 +1223,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Query the epoch of the last committed block.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct QueryConversions(pub args::QueryConversions); + pub struct QueryConversions(pub args::QueryConversions); impl SubCmd for QueryConversions { const CMD: &'static str = "conversions"; @@ -1185,12 +1242,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Query currently applicable conversions.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct QueryBlock(pub args::Query); + pub struct QueryBlock(pub args::Query); impl SubCmd for QueryBlock { const CMD: &'static str = "block"; @@ -1204,12 +1261,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Query the last committed block.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct QueryBalance(pub args::QueryBalance); + pub struct QueryBalance(pub args::QueryBalance); impl SubCmd for QueryBalance { const CMD: &'static str = "balance"; @@ -1223,12 +1280,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Query balance(s) of tokens.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct QueryBonds(pub args::QueryBonds); + pub struct QueryBonds(pub args::QueryBonds); impl SubCmd for QueryBonds { const CMD: &'static str = "bonds"; @@ -1242,12 +1299,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Query PoS bond(s).") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct QueryBondedStake(pub args::QueryBondedStake); + pub struct QueryBondedStake(pub args::QueryBondedStake); impl SubCmd for QueryBondedStake { const CMD: &'static str = "bonded-stake"; @@ -1261,12 +1318,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Query PoS bonded stake.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct QueryTransfers(pub args::QueryTransfers); + pub struct QueryTransfers(pub args::QueryTransfers); impl SubCmd for QueryTransfers { const CMD: &'static str = "show-transfers"; @@ -1280,12 +1337,14 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Query the accepted transfers to date.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct QueryCommissionRate(pub args::QueryCommissionRate); + pub struct QueryCommissionRate( + pub args::QueryCommissionRate, + ); impl SubCmd for QueryCommissionRate { const CMD: &'static str = "commission-rate"; @@ -1299,12 +1358,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Query commission rate.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct QuerySlashes(pub args::QuerySlashes); + pub struct QuerySlashes(pub args::QuerySlashes); impl SubCmd for QuerySlashes { const CMD: &'static str = "slashes"; @@ -1321,12 +1380,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Query PoS applied slashes.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct QueryDelegations(pub args::QueryDelegations); + pub struct QueryDelegations(pub args::QueryDelegations); impl SubCmd for QueryDelegations { const CMD: &'static str = "delegations"; @@ -1343,12 +1402,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Find PoS delegations from the given owner address.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct QueryRawBytes(pub args::QueryRawBytes); + pub struct QueryRawBytes(pub args::QueryRawBytes); impl SubCmd for QueryRawBytes { const CMD: &'static str = "query-bytes"; @@ -1362,12 +1421,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Query the raw bytes of a given storage key") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct TxInitProposal(pub args::InitProposal); + pub struct TxInitProposal(pub args::InitProposal); impl SubCmd for TxInitProposal { const CMD: &'static str = "init-proposal"; @@ -1384,12 +1443,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Create a new proposal.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct TxVoteProposal(pub args::VoteProposal); + pub struct TxVoteProposal(pub args::VoteProposal); impl SubCmd for TxVoteProposal { const CMD: &'static str = "vote-proposal"; @@ -1406,12 +1465,12 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about("Vote a proposal.") - .add_args::() + .add_args::>() } } #[derive(Clone, Debug)] - pub struct TxRevealPk(pub args::RevealPk); + pub struct TxRevealPk(pub args::RevealPk); impl SubCmd for TxRevealPk { const CMD: &'static str = "reveal-pk"; @@ -1436,7 +1495,7 @@ pub mod cmds { signature verification on transactions authorized by \ this account.", ) - .add_args::() + .add_args::>() } } @@ -1446,6 +1505,7 @@ pub mod cmds { FetchWasms(FetchWasms), InitNetwork(InitNetwork), InitGenesisValidator(InitGenesisValidator), + PkToTmAddress(PkToTmAddress), } impl SubCmd for Utils { @@ -1460,10 +1520,13 @@ pub mod cmds { SubCmd::parse(matches).map(Self::InitNetwork); let init_genesis = SubCmd::parse(matches).map(Self::InitGenesisValidator); + let pk_to_tm_address = + SubCmd::parse(matches).map(Self::PkToTmAddress); join_network .or(fetch_wasms) .or(init_network) .or(init_genesis) + .or(pk_to_tm_address) }) } @@ -1474,6 +1537,7 @@ pub mod cmds { .subcommand(FetchWasms::def()) .subcommand(InitNetwork::def()) .subcommand(InitGenesisValidator::def()) + .subcommand(PkToTmAddress::def()) .setting(AppSettings::SubcommandRequiredElseHelp) } } @@ -1557,6 +1621,28 @@ pub mod cmds { .add_args::() } } + + #[derive(Clone, Debug)] + pub struct PkToTmAddress(pub args::PkToTmAddress); + + impl SubCmd for PkToTmAddress { + const CMD: &'static str = "pk-to-tm"; + + fn parse(matches: &ArgMatches) -> Option { + matches + .subcommand_matches(Self::CMD) + .map(|matches| Self(args::PkToTmAddress::parse(matches))) + } + + fn def() -> App { + App::new(Self::CMD) + .about( + "Convert a validator's consensus public key to a \ + Tendermint address.", + ) + .add_args::() + } + } } pub mod args { @@ -1567,136 +1653,162 @@ pub mod args { use std::str::FromStr; use namada::ibc::core::ics24_host::identifier::{ChannelId, PortId}; + pub use namada::ledger::args::*; use namada::types::address::Address; use namada::types::chain::{ChainId, ChainIdPrefix}; - use namada::types::governance::ProposalVote; use namada::types::key::*; use namada::types::masp::MaspValue; - use namada::types::storage::{self, Epoch}; + use namada::types::storage::{self, BlockHeight, Epoch}; use namada::types::time::DateTimeUtc; use namada::types::token; - use namada::types::transaction::GasLimit; use rust_decimal::Decimal; use super::context::*; use super::utils::*; use super::{ArgGroup, ArgMatches}; - use crate::client::types::{ParsedTxArgs, ParsedTxTransferArgs}; - use crate::config; - use crate::config::TendermintMode; + use crate::config::{self, Action, ActionAtHeight, TendermintMode}; use crate::facade::tendermint::Timeout; use crate::facade::tendermint_config::net::Address as TendermintAddress; - const ADDRESS: Arg = arg("address"); - const ALIAS_OPT: ArgOpt = ALIAS.opt(); - const ALIAS: Arg = arg("alias"); - const ALLOW_DUPLICATE_IP: ArgFlag = flag("allow-duplicate-ip"); - const AMOUNT: Arg = arg("amount"); - const ARCHIVE_DIR: ArgOpt = arg_opt("archive-dir"); - const BALANCE_OWNER: ArgOpt = arg_opt("owner"); - const BASE_DIR: ArgDefault = arg_default( + pub const TX_INIT_ACCOUNT_WASM: &str = "tx_init_account.wasm"; + pub const TX_INIT_VALIDATOR_WASM: &str = "tx_init_validator.wasm"; + pub const TX_INIT_PROPOSAL: &str = "tx_init_proposal.wasm"; + pub const TX_VOTE_PROPOSAL: &str = "tx_vote_proposal.wasm"; + pub const TX_REVEAL_PK: &str = "tx_reveal_pk.wasm"; + pub const TX_UPDATE_VP_WASM: &str = "tx_update_vp.wasm"; + pub const TX_TRANSFER_WASM: &str = "tx_transfer.wasm"; + pub const TX_IBC_WASM: &str = "tx_ibc.wasm"; + pub const VP_USER_WASM: &str = "vp_user.wasm"; + pub const TX_BOND_WASM: &str = "tx_bond.wasm"; + pub const TX_UNBOND_WASM: &str = "tx_unbond.wasm"; + pub const TX_WITHDRAW_WASM: &str = "tx_withdraw.wasm"; + pub const TX_CHANGE_COMMISSION_WASM: &str = + "tx_change_validator_commission.wasm"; + + pub const ADDRESS: Arg = arg("address"); + pub const ALIAS_OPT: ArgOpt = ALIAS.opt(); + pub const ALIAS: Arg = arg("alias"); + pub const ALIAS_FORCE: ArgFlag = flag("alias-force"); + pub const ALLOW_DUPLICATE_IP: ArgFlag = flag("allow-duplicate-ip"); + pub const AMOUNT: Arg = arg("amount"); + pub const ARCHIVE_DIR: ArgOpt = arg_opt("archive-dir"); + pub const BALANCE_OWNER: ArgOpt = arg_opt("owner"); + pub const BASE_DIR: ArgDefault = arg_default( "base-dir", DefaultFn(|| match env::var("NAMADA_BASE_DIR") { - Ok(dir) => dir.into(), - Err(_) => config::DEFAULT_BASE_DIR.into(), + Ok(dir) => PathBuf::from(dir), + Err(_) => config::get_default_namada_folder(), }), ); - // const BLOCK_HEIGHT_OPT: ArgOpt = arg_opt("height"); - const BROADCAST_ONLY: ArgFlag = flag("broadcast-only"); - const CHAIN_ID: Arg = arg("chain-id"); - const CHAIN_ID_OPT: ArgOpt = CHAIN_ID.opt(); - const CHAIN_ID_PREFIX: Arg = arg("chain-prefix"); - const CHANNEL_ID: Arg = arg("channel-id"); - const CODE_PATH: Arg = arg("code-path"); - const CODE_PATH_OPT: ArgOpt = CODE_PATH.opt(); - const COMMISSION_RATE: Arg = arg("commission-rate"); - const CONSENSUS_TIMEOUT_COMMIT: ArgDefault = arg_default( + pub const BLOCK_HEIGHT: Arg = arg("block-height"); + // pub const BLOCK_HEIGHT_OPT: ArgOpt = arg_opt("height"); + pub const BROADCAST_ONLY: ArgFlag = flag("broadcast-only"); + pub const CHAIN_ID: Arg = arg("chain-id"); + pub const CHAIN_ID_OPT: ArgOpt = CHAIN_ID.opt(); + pub const CHAIN_ID_PREFIX: Arg = arg("chain-prefix"); + pub const CHANNEL_ID: Arg = arg("channel-id"); + pub const CODE_PATH: Arg = arg("code-path"); + pub const CODE_PATH_OPT: ArgOpt = CODE_PATH.opt(); + pub const COMMISSION_RATE: Arg = arg("commission-rate"); + pub const CONSENSUS_TIMEOUT_COMMIT: ArgDefault = arg_default( "consensus-timeout-commit", DefaultFn(|| Timeout::from_str("1s").unwrap()), ); - const DATA_PATH_OPT: ArgOpt = arg_opt("data-path"); - const DATA_PATH: Arg = arg("data-path"); - const DECRYPT: ArgFlag = flag("decrypt"); - const DONT_ARCHIVE: ArgFlag = flag("dont-archive"); - const DRY_RUN_TX: ArgFlag = flag("dry-run"); - const DUMP_TX: ArgFlag = flag("dump-tx"); - const EPOCH: ArgOpt = arg_opt("epoch"); - const FORCE: ArgFlag = flag("force"); - const DONT_PREFETCH_WASM: ArgFlag = flag("dont-prefetch-wasm"); - const GAS_AMOUNT: ArgDefault = + pub const DATA_PATH_OPT: ArgOpt = arg_opt("data-path"); + pub const DATA_PATH: Arg = arg("data-path"); + pub const DECRYPT: ArgFlag = flag("decrypt"); + pub const DONT_ARCHIVE: ArgFlag = flag("dont-archive"); + pub const DRY_RUN_TX: ArgFlag = flag("dry-run"); + pub const DUMP_TX: ArgFlag = flag("dump-tx"); + pub const EPOCH: ArgOpt = arg_opt("epoch"); + pub const EXPIRATION_OPT: ArgOpt = arg_opt("expiration"); + pub const FORCE: ArgFlag = flag("force"); + pub const DONT_PREFETCH_WASM: ArgFlag = flag("dont-prefetch-wasm"); + pub const GAS_AMOUNT: ArgDefault = arg_default("gas-amount", DefaultFn(|| token::Amount::from(0))); - const GAS_LIMIT: ArgDefault = + pub const GAS_LIMIT: ArgDefault = arg_default("gas-limit", DefaultFn(|| token::Amount::from(0))); - const GAS_TOKEN: ArgDefaultFromCtx = - arg_default_from_ctx("gas-token", DefaultFn(|| "NAM".into())); - const GENESIS_PATH: Arg = arg("genesis-path"); - const GENESIS_VALIDATOR: ArgOpt = arg("genesis-validator").opt(); - const LEDGER_ADDRESS_ABOUT: &str = + pub const GAS_TOKEN: ArgDefaultFromCtx = + arg_default_from_ctx("gas-token", DefaultFn(|| "NAM".parse().unwrap())); + pub const GENESIS_PATH: Arg = arg("genesis-path"); + pub const GENESIS_VALIDATOR: ArgOpt = + arg("genesis-validator").opt(); + pub const HALT_ACTION: ArgFlag = flag("halt"); + pub const HISTORIC: ArgFlag = flag("historic"); + pub const LEDGER_ADDRESS_ABOUT: &str = "Address of a ledger node as \"{scheme}://{host}:{port}\". If the \ scheme is not supplied, it is assumed to be TCP."; - const LEDGER_ADDRESS_DEFAULT: ArgDefault = + pub const LEDGER_ADDRESS_DEFAULT: ArgDefault = LEDGER_ADDRESS.default(DefaultFn(|| { let raw = "127.0.0.1:26657"; TendermintAddress::from_str(raw).unwrap() })); - const LEDGER_ADDRESS: Arg = arg("ledger-address"); - const LOCALHOST: ArgFlag = flag("localhost"); - const MASP_VALUE: Arg = arg("value"); - const MAX_COMMISSION_RATE_CHANGE: Arg = + pub const LEDGER_ADDRESS: Arg = arg("node"); + pub const LOCALHOST: ArgFlag = flag("localhost"); + pub const MASP_VALUE: Arg = arg("value"); + pub const MAX_COMMISSION_RATE_CHANGE: Arg = arg("max-commission-rate-change"); - const MODE: ArgOpt = arg_opt("mode"); - const NET_ADDRESS: Arg = arg("net-address"); - const NAMADA_START_TIME: ArgOpt = arg_opt("time"); - const NO_CONVERSIONS: ArgFlag = flag("no-conversions"); - const OUT_FILE_PATH_OPT: ArgOpt = arg_opt("out-file-path"); - const OWNER: Arg = arg("owner"); - const OWNER_OPT: ArgOpt = OWNER.opt(); - const PIN: ArgFlag = flag("pin"); - const PORT_ID: ArgDefault = arg_default( + pub const MODE: ArgOpt = arg_opt("mode"); + pub const NET_ADDRESS: Arg = arg("net-address"); + pub const NAMADA_START_TIME: ArgOpt = arg_opt("time"); + pub const NO_CONVERSIONS: ArgFlag = flag("no-conversions"); + pub const OUT_FILE_PATH_OPT: ArgOpt = arg_opt("out-file-path"); + pub const OWNER: Arg = arg("owner"); + pub const OWNER_OPT: ArgOpt = OWNER.opt(); + pub const PIN: ArgFlag = flag("pin"); + pub const PORT_ID: ArgDefault = arg_default( "port-id", DefaultFn(|| PortId::from_str("transfer").unwrap()), ); - const PROPOSAL_OFFLINE: ArgFlag = flag("offline"); - const PROTOCOL_KEY: ArgOpt = arg_opt("protocol-key"); - const PRE_GENESIS_PATH: ArgOpt = arg_opt("pre-genesis-path"); - const PUBLIC_KEY: Arg = arg("public-key"); - const PROPOSAL_ID: Arg = arg("proposal-id"); - const PROPOSAL_ID_OPT: ArgOpt = arg_opt("proposal-id"); - const PROPOSAL_VOTE: Arg = arg("vote"); - const RAW_ADDRESS: Arg
= arg("address"); - const RAW_ADDRESS_OPT: ArgOpt
= RAW_ADDRESS.opt(); - const RAW_PUBLIC_KEY_OPT: ArgOpt = arg_opt("public-key"); - const RECEIVER: Arg = arg("receiver"); - const SCHEME: ArgDefault = + pub const PROPOSAL_OFFLINE: ArgFlag = flag("offline"); + pub const PROTOCOL_KEY: ArgOpt = arg_opt("protocol-key"); + pub const PRE_GENESIS_PATH: ArgOpt = arg_opt("pre-genesis-path"); + pub const PUBLIC_KEY: Arg = arg("public-key"); + pub const PROPOSAL_ID: Arg = arg("proposal-id"); + pub const PROPOSAL_ID_OPT: ArgOpt = arg_opt("proposal-id"); + pub const PROPOSAL_VOTE_PGF_OPT: ArgOpt = arg_opt("pgf"); + pub const PROPOSAL_VOTE_ETH_OPT: ArgOpt = arg_opt("eth"); + pub const PROPOSAL_VOTE: Arg = arg("vote"); + pub const RAW_ADDRESS: Arg
= arg("address"); + pub const RAW_ADDRESS_OPT: ArgOpt
= RAW_ADDRESS.opt(); + pub const RAW_PUBLIC_KEY: Arg = arg("public-key"); + pub const RAW_PUBLIC_KEY_OPT: ArgOpt = + arg_opt("public-key"); + pub const RECEIVER: Arg = arg("receiver"); + pub const SCHEME: ArgDefault = arg_default("scheme", DefaultFn(|| SchemeType::Ed25519)); - const SIGNER: ArgOpt = arg_opt("signer"); - const SIGNING_KEY_OPT: ArgOpt = SIGNING_KEY.opt(); - const SIGNING_KEY: Arg = arg("signing-key"); - const SOURCE: Arg = arg("source"); - const SOURCE_OPT: ArgOpt = SOURCE.opt(); - const STORAGE_KEY: Arg = arg("storage-key"); - const SUB_PREFIX: ArgOpt = arg_opt("sub-prefix"); - const TIMEOUT_HEIGHT: ArgOpt = arg_opt("timeout-height"); - const TIMEOUT_SEC_OFFSET: ArgOpt = arg_opt("timeout-sec-offset"); - const TOKEN_OPT: ArgOpt = TOKEN.opt(); - const TOKEN: Arg = arg("token"); - const TRANSFER_SOURCE: Arg = arg("source"); - const TRANSFER_TARGET: Arg = arg("target"); - const TX_HASH: Arg = arg("tx-hash"); - const UNSAFE_DONT_ENCRYPT: ArgFlag = flag("unsafe-dont-encrypt"); - const UNSAFE_SHOW_SECRET: ArgFlag = flag("unsafe-show-secret"); - const VALIDATOR: Arg = arg("validator"); - const VALIDATOR_OPT: ArgOpt = VALIDATOR.opt(); - const VALIDATOR_ACCOUNT_KEY: ArgOpt = + pub const SIGNER: ArgOpt = arg_opt("signer"); + pub const SIGNING_KEY_OPT: ArgOpt = SIGNING_KEY.opt(); + pub const SIGNING_KEY: Arg = arg("signing-key"); + pub const SOURCE: Arg = arg("source"); + pub const SOURCE_OPT: ArgOpt = SOURCE.opt(); + pub const STORAGE_KEY: Arg = arg("storage-key"); + pub const SUB_PREFIX: ArgOpt = arg_opt("sub-prefix"); + pub const SUSPEND_ACTION: ArgFlag = flag("suspend"); + pub const TENDERMINT_TX_INDEX: ArgFlag = flag("tx-index"); + pub const TIMEOUT_HEIGHT: ArgOpt = arg_opt("timeout-height"); + pub const TIMEOUT_SEC_OFFSET: ArgOpt = arg_opt("timeout-sec-offset"); + pub const TOKEN_OPT: ArgOpt = TOKEN.opt(); + pub const TOKEN: Arg = arg("token"); + pub const TRANSFER_SOURCE: Arg = arg("source"); + pub const TRANSFER_TARGET: Arg = arg("target"); + pub const TX_HASH: Arg = arg("tx-hash"); + pub const UNSAFE_DONT_ENCRYPT: ArgFlag = flag("unsafe-dont-encrypt"); + pub const UNSAFE_SHOW_SECRET: ArgFlag = flag("unsafe-show-secret"); + pub const VALIDATOR: Arg = arg("validator"); + pub const VALIDATOR_OPT: ArgOpt = VALIDATOR.opt(); + pub const VALIDATOR_ACCOUNT_KEY: ArgOpt = arg_opt("account-key"); - const VALIDATOR_CONSENSUS_KEY: ArgOpt = + pub const VALIDATOR_CONSENSUS_KEY: ArgOpt = arg_opt("consensus-key"); - const VALIDATOR_CODE_PATH: ArgOpt = arg_opt("validator-code-path"); - const VALUE: ArgOpt = arg_opt("value"); - const VIEWING_KEY: Arg = arg("key"); - const WASM_CHECKSUMS_PATH: Arg = arg("wasm-checksums-path"); - const WASM_DIR: ArgOpt = arg_opt("wasm-dir"); + pub const VALIDATOR_CODE_PATH: ArgOpt = + arg_opt("validator-code-path"); + pub const VALUE: ArgOpt = arg_opt("value"); + pub const VIEWING_KEY: Arg = arg("key"); + pub const WALLET_ALIAS_FORCE: ArgFlag = flag("wallet-alias-force"); + pub const WASM_CHECKSUMS_PATH: Arg = arg("wasm-checksums-path"); + pub const WASM_DIR: ArgOpt = arg_opt("wasm-dir"); /// Global command arguments #[derive(Clone, Debug)] @@ -1731,7 +1843,10 @@ pub mod args { configuration and state is stored. This value can also \ be set via `NAMADA_BASE_DIR` environment variable, but \ the argument takes precedence, if specified. Defaults to \ - `.namada`.", + `$XDG_DATA_HOME/namada` (`$HOME/.local/share/namada` \ + where `XDG_DATA_HOME` is unset) on \ + Unix,`$HOME/Library/Application Support/Namada` on \ + Mac,and `%AppData%\\Namada` on Windows.", )) .arg(WASM_DIR.def().about( "Directory with built WASM validity predicates, \ @@ -1747,12 +1862,57 @@ pub mod args { } #[derive(Clone, Debug)] - pub struct LedgerRun(pub Option); + pub struct LedgerRun { + pub start_time: Option, + pub tx_index: bool, + } impl Args for LedgerRun { fn parse(matches: &ArgMatches) -> Self { - let time = NAMADA_START_TIME.parse(matches); - Self(time) + let start_time = NAMADA_START_TIME.parse(matches); + let tx_index = TENDERMINT_TX_INDEX.parse(matches); + Self { + start_time, + tx_index, + } + } + + fn def(app: App) -> App { + app.arg(NAMADA_START_TIME.def().about( + "The start time of the ledger. Accepts a relaxed form of \ + RFC3339. A space or a 'T' are accepted as the separator \ + between the date and time components. Additional spaces are \ + allowed between each component.\nAll of these examples are \ + equivalent:\n2023-01-20T12:12:12Z\n2023-01-20 \ + 12:12:12Z\n2023- 01-20T12: 12:12Z", + )) + .arg( + TENDERMINT_TX_INDEX + .def() + .about("Enable Tendermint tx indexing."), + ) + } + } + + #[derive(Clone, Debug)] + pub struct LedgerRunUntil { + pub time: Option, + pub action_at_height: ActionAtHeight, + } + + impl Args for LedgerRunUntil { + fn parse(matches: &ArgMatches) -> Self { + Self { + time: NAMADA_START_TIME.parse(matches), + action_at_height: ActionAtHeight { + height: BLOCK_HEIGHT.parse(matches), + action: if HALT_ACTION.parse(matches) { + Action::Halt + } else { + Action::Suspend + }, + }, + } } fn def(app: App) -> App { @@ -1761,6 +1921,18 @@ pub mod args { .def() .about("The start time of the ledger."), ) + .arg(BLOCK_HEIGHT.def().about("The block height to run until.")) + .arg(HALT_ACTION.def().about("Halt at the given block height")) + .arg( + SUSPEND_ACTION + .def() + .about("Suspend consensus at the given block height"), + ) + .group( + ArgGroup::new("find_flags") + .args(&[HALT_ACTION.name, SUSPEND_ACTION.name]) + .required(true), + ) } } @@ -1769,6 +1941,7 @@ pub mod args { // TODO: allow to specify height // pub block_height: Option, pub out_file_path: PathBuf, + pub historic: bool, } impl Args for LedgerDumpDb { @@ -1777,9 +1950,12 @@ pub mod args { let out_file_path = OUT_FILE_PATH_OPT .parse(matches) .unwrap_or_else(|| PathBuf::from("db_dump".to_string())); + let historic = HISTORIC.parse(matches); + Self { // block_height, out_file_path, + historic, } } @@ -1793,19 +1969,26 @@ pub mod args { Defaults to \"db_dump.{block_height}.toml\" in the \ current working directory.", )) + .arg(HISTORIC.def().about( + "If provided, dump also the diff of the last height", + )) } } - /// Transaction associated results arguments - #[derive(Clone, Debug)] - pub struct QueryResult { - /// Common query args - pub query: Query, - /// Hash of transaction to lookup - pub tx_hash: String, + pub trait CliToSdk: Args { + fn to_sdk(self, ctx: &mut Context) -> X; + } + + impl CliToSdk> for QueryResult { + fn to_sdk(self, ctx: &mut Context) -> QueryResult { + QueryResult:: { + query: self.query.to_sdk(ctx), + tx_hash: self.tx_hash, + } + } } - impl Args for QueryResult { + impl Args for QueryResult { fn parse(matches: &ArgMatches) -> Self { let query = Query::parse(matches); let tx_hash = TX_HASH.parse(matches); @@ -1813,7 +1996,7 @@ pub mod args { } fn def(app: App) -> App { - app.add_args::().arg( + app.add_args::>().arg( TX_HASH .def() .about("The hash of the transaction being looked up."), @@ -1821,18 +2004,20 @@ pub mod args { } } - /// Custom transaction arguments - #[derive(Clone, Debug)] - pub struct TxCustom { - /// Common tx arguments - pub tx: Tx, - /// Path to the tx WASM code file - pub code_path: PathBuf, - /// Path to the data file - pub data_path: Option, + impl CliToSdk> for TxCustom { + fn to_sdk(self, ctx: &mut Context) -> TxCustom { + TxCustom:: { + tx: self.tx.to_sdk(ctx), + code_path: ctx.read_wasm(self.code_path), + data_path: self.data_path.map(|data_path| { + std::fs::read(data_path) + .expect("Expected a file at given data path") + }), + } + } } - impl Args for TxCustom { + impl Args for TxCustom { fn parse(matches: &ArgMatches) -> Self { let tx = Tx::parse(matches); let code_path = CODE_PATH.parse(matches); @@ -1845,7 +2030,7 @@ pub mod args { } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg( CODE_PATH .def() @@ -1859,39 +2044,22 @@ pub mod args { } } - /// Transfer transaction arguments - #[derive(Clone, Debug)] - pub struct TxTransfer { - /// Common tx arguments - pub tx: Tx, - /// Transfer source address - pub source: WalletTransferSource, - /// Transfer target address - pub target: WalletTransferTarget, - /// Transferred token address - pub token: WalletAddress, - /// Transferred token address - pub sub_prefix: Option, - /// Transferred token amount - pub amount: token::Amount, - } - - impl TxTransfer { - pub fn parse_from_context( - &self, - ctx: &mut Context, - ) -> ParsedTxTransferArgs { - ParsedTxTransferArgs { - tx: self.tx.parse_from_context(ctx), + impl CliToSdk> for TxTransfer { + fn to_sdk(self, ctx: &mut Context) -> TxTransfer { + TxTransfer:: { + tx: self.tx.to_sdk(ctx), source: ctx.get_cached(&self.source), target: ctx.get(&self.target), token: ctx.get(&self.token), + sub_prefix: self.sub_prefix, amount: self.amount, + native_token: ctx.native_token.clone(), + tx_code_path: ctx.read_wasm(self.tx_code_path), } } } - impl Args for TxTransfer { + impl Args for TxTransfer { fn parse(matches: &ArgMatches) -> Self { let tx = Tx::parse(matches); let source = TRANSFER_SOURCE.parse(matches); @@ -1899,6 +2067,7 @@ pub mod args { let token = TOKEN.parse(matches); let sub_prefix = SUB_PREFIX.parse(matches); let amount = AMOUNT.parse(matches); + let tx_code_path = PathBuf::from(TX_TRANSFER_WASM); Self { tx, source, @@ -1906,11 +2075,13 @@ pub mod args { token, sub_prefix, amount, + native_token: (), + tx_code_path, } } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg(TRANSFER_SOURCE.def().about( "The source account address. The source's key may be used \ to produce the signature.", @@ -1925,32 +2096,25 @@ pub mod args { } } - /// IBC transfer transaction arguments - #[derive(Clone, Debug)] - pub struct TxIbcTransfer { - /// Common tx arguments - pub tx: Tx, - /// Transfer source address - pub source: WalletAddress, - /// Transfer target address - pub receiver: String, - /// Transferred token address - pub token: WalletAddress, - /// Transferred token address - pub sub_prefix: Option, - /// Transferred token amount - pub amount: token::Amount, - /// Port ID - pub port_id: PortId, - /// Channel ID - pub channel_id: ChannelId, - /// Timeout height of the destination chain - pub timeout_height: Option, - /// Timeout timestamp offset - pub timeout_sec_offset: Option, - } - - impl Args for TxIbcTransfer { + impl CliToSdk> for TxIbcTransfer { + fn to_sdk(self, ctx: &mut Context) -> TxIbcTransfer { + TxIbcTransfer:: { + tx: self.tx.to_sdk(ctx), + source: ctx.get(&self.source), + receiver: self.receiver, + token: ctx.get(&self.token), + sub_prefix: self.sub_prefix, + amount: self.amount, + port_id: self.port_id, + channel_id: self.channel_id, + timeout_height: self.timeout_height, + timeout_sec_offset: self.timeout_sec_offset, + tx_code_path: ctx.read_wasm(self.tx_code_path), + } + } + } + + impl Args for TxIbcTransfer { fn parse(matches: &ArgMatches) -> Self { let tx = Tx::parse(matches); let source = SOURCE.parse(matches); @@ -1962,6 +2126,7 @@ pub mod args { let channel_id = CHANNEL_ID.parse(matches); let timeout_height = TIMEOUT_HEIGHT.parse(matches); let timeout_sec_offset = TIMEOUT_SEC_OFFSET.parse(matches); + let tx_code_path = PathBuf::from(TX_IBC_WASM); Self { tx, source, @@ -1973,11 +2138,12 @@ pub mod args { channel_id, timeout_height, timeout_sec_offset, + tx_code_path, } } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg(SOURCE.def().about( "The source account address. The source's key is used to \ produce the signature.", @@ -1999,35 +2165,47 @@ pub mod args { } } - /// Transaction to initialize a new account - #[derive(Clone, Debug)] - pub struct TxInitAccount { - /// Common tx arguments - pub tx: Tx, - /// Address of the source account - pub source: WalletAddress, - /// Path to the VP WASM code file for the new account - pub vp_code_path: Option, - /// Public key for the new account - pub public_key: WalletPublicKey, + impl CliToSdk> for TxInitAccount { + fn to_sdk(self, ctx: &mut Context) -> TxInitAccount { + TxInitAccount:: { + tx: self.tx.to_sdk(ctx), + source: ctx.get(&self.source), + vp_code: ctx.read_wasm(self.vp_code), + vp_code_path: self + .vp_code_path + .as_path() + .to_str() + .unwrap() + .to_string() + .into_bytes(), + tx_code_path: ctx.read_wasm(self.tx_code_path), + public_key: ctx.get_cached(&self.public_key), + } + } } - impl Args for TxInitAccount { + impl Args for TxInitAccount { fn parse(matches: &ArgMatches) -> Self { let tx = Tx::parse(matches); let source = SOURCE.parse(matches); - let vp_code_path = CODE_PATH_OPT.parse(matches); + let vp_code_path = CODE_PATH_OPT + .parse(matches) + .unwrap_or_else(|| PathBuf::from(VP_USER_WASM)); + let vp_code = vp_code_path.clone(); + let tx_code_path = PathBuf::from(TX_INIT_ACCOUNT_WASM); let public_key = PUBLIC_KEY.parse(matches); Self { tx, source, + vp_code, vp_code_path, public_key, + tx_code_path, } } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg(SOURCE.def().about( "The source account's address that signs the transaction.", )) @@ -2043,22 +2221,31 @@ pub mod args { } } - /// Transaction to initialize a new account - #[derive(Clone, Debug)] - pub struct TxInitValidator { - pub tx: Tx, - pub source: WalletAddress, - pub scheme: SchemeType, - pub account_key: Option, - pub consensus_key: Option, - pub protocol_key: Option, - pub commission_rate: Decimal, - pub max_commission_rate_change: Decimal, - pub validator_vp_code_path: Option, - pub unsafe_dont_encrypt: bool, + impl CliToSdk> for TxInitValidator { + fn to_sdk(self, ctx: &mut Context) -> TxInitValidator { + TxInitValidator:: { + tx: self.tx.to_sdk(ctx), + source: ctx.get(&self.source), + scheme: self.scheme, + account_key: self.account_key.map(|x| ctx.get_cached(&x)), + consensus_key: self.consensus_key.map(|x| ctx.get_cached(&x)), + protocol_key: self.protocol_key.map(|x| ctx.get_cached(&x)), + commission_rate: self.commission_rate, + max_commission_rate_change: self.max_commission_rate_change, + validator_vp_code_path: self + .validator_vp_code_path + .as_path() + .to_str() + .unwrap() + .to_string() + .into_bytes(), + unsafe_dont_encrypt: self.unsafe_dont_encrypt, + tx_code_path: ctx.read_wasm(self.tx_code_path), + } + } } - impl Args for TxInitValidator { + impl Args for TxInitValidator { fn parse(matches: &ArgMatches) -> Self { let tx = Tx::parse(matches); let source = SOURCE.parse(matches); @@ -2069,8 +2256,11 @@ pub mod args { let commission_rate = COMMISSION_RATE.parse(matches); let max_commission_rate_change = MAX_COMMISSION_RATE_CHANGE.parse(matches); - let validator_vp_code_path = VALIDATOR_CODE_PATH.parse(matches); + let validator_vp_code_path = VALIDATOR_CODE_PATH + .parse(matches) + .unwrap_or_else(|| PathBuf::from(VP_USER_WASM)); let unsafe_dont_encrypt = UNSAFE_DONT_ENCRYPT.parse(matches); + let tx_code_path = PathBuf::from(TX_INIT_VALIDATOR_WASM); Self { tx, source, @@ -2082,11 +2272,12 @@ pub mod args { max_commission_rate_change, validator_vp_code_path, unsafe_dont_encrypt, + tx_code_path, } } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg(SOURCE.def().about( "The source account's address that signs the transaction.", )) @@ -2129,31 +2320,45 @@ pub mod args { } } - /// Transaction to update a VP arguments - #[derive(Clone, Debug)] - pub struct TxUpdateVp { - /// Common tx arguments - pub tx: Tx, - /// Path to the VP WASM code file - pub vp_code_path: PathBuf, - /// Address of the account whose VP is to be updated - pub addr: WalletAddress, + impl CliToSdk> for TxUpdateVp { + fn to_sdk(self, ctx: &mut Context) -> TxUpdateVp { + TxUpdateVp:: { + tx: self.tx.to_sdk(ctx), + vp_code_path: self + .vp_code_path + .as_path() + .to_str() + .unwrap() + .to_string() + .into_bytes(), + tx_code_path: self + .tx_code_path + .as_path() + .to_str() + .unwrap() + .to_string() + .into_bytes(), + addr: ctx.get(&self.addr), + } + } } - impl Args for TxUpdateVp { + impl Args for TxUpdateVp { fn parse(matches: &ArgMatches) -> Self { let tx = Tx::parse(matches); let vp_code_path = CODE_PATH.parse(matches); let addr = ADDRESS.parse(matches); + let tx_code_path = PathBuf::from(TX_UPDATE_VP_WASM); Self { tx, vp_code_path, addr, + tx_code_path, } } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg( CODE_PATH.def().about( "The path to the new validity predicate WASM code.", @@ -2166,36 +2371,38 @@ pub mod args { } } - /// Bond arguments - #[derive(Clone, Debug)] - pub struct Bond { - /// Common tx arguments - pub tx: Tx, - /// Validator address - pub validator: WalletAddress, - /// Amount of tokens to stake in a bond - pub amount: token::Amount, - /// Source address for delegations. For self-bonds, the validator is - /// also the source. - pub source: Option, + impl CliToSdk> for Bond { + fn to_sdk(self, ctx: &mut Context) -> Bond { + Bond:: { + tx: self.tx.to_sdk(ctx), + validator: ctx.get(&self.validator), + amount: self.amount, + source: self.source.map(|x| ctx.get(&x)), + native_token: ctx.native_token.clone(), + tx_code_path: ctx.read_wasm(self.tx_code_path), + } + } } - impl Args for Bond { + impl Args for Bond { fn parse(matches: &ArgMatches) -> Self { let tx = Tx::parse(matches); let validator = VALIDATOR.parse(matches); let amount = AMOUNT.parse(matches); let source = SOURCE_OPT.parse(matches); + let tx_code_path = PathBuf::from(TX_BOND_WASM); Self { tx, validator, amount, source, + native_token: (), + tx_code_path, } } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg(VALIDATOR.def().about("Validator address.")) .arg(AMOUNT.def().about("Amount of tokens to stake in a bond.")) .arg(SOURCE_OPT.def().about( @@ -2205,36 +2412,36 @@ pub mod args { } } - /// Unbond arguments - #[derive(Clone, Debug)] - pub struct Unbond { - /// Common tx arguments - pub tx: Tx, - /// Validator address - pub validator: WalletAddress, - /// Amount of tokens to unbond from a bond - pub amount: token::Amount, - /// Source address for unbonding from delegations. For unbonding from - /// self-bonds, the validator is also the source - pub source: Option, + impl CliToSdk> for Unbond { + fn to_sdk(self, ctx: &mut Context) -> Unbond { + Unbond:: { + tx: self.tx.to_sdk(ctx), + validator: ctx.get(&self.validator), + amount: self.amount, + source: self.source.map(|x| ctx.get(&x)), + tx_code_path: ctx.read_wasm(self.tx_code_path), + } + } } - impl Args for Unbond { + impl Args for Unbond { fn parse(matches: &ArgMatches) -> Self { let tx = Tx::parse(matches); let validator = VALIDATOR.parse(matches); let amount = AMOUNT.parse(matches); let source = SOURCE_OPT.parse(matches); + let tx_code_path = PathBuf::from(TX_UNBOND_WASM); Self { tx, validator, amount, source, + tx_code_path, } } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg(VALIDATOR.def().about("Validator address.")) .arg( AMOUNT @@ -2248,32 +2455,50 @@ pub mod args { )) } } - #[derive(Clone, Debug)] - pub struct InitProposal { + pub struct InitProposal { /// Common tx arguments - pub tx: Tx, + pub tx: Tx, /// The proposal file path pub proposal_data: PathBuf, /// Flag if proposal should be run offline pub offline: bool, + /// Native token address + pub native_token: C::NativeAddress, + /// Path to the TX WASM code file + pub tx_code_path: C::Data, + } + + impl CliToSdk> for InitProposal { + fn to_sdk(self, ctx: &mut Context) -> InitProposal { + InitProposal:: { + tx: self.tx.to_sdk(ctx), + proposal_data: self.proposal_data, + offline: self.offline, + native_token: ctx.native_token.clone(), + tx_code_path: ctx.read_wasm(self.tx_code_path), + } + } } - impl Args for InitProposal { + impl Args for InitProposal { fn parse(matches: &ArgMatches) -> Self { let tx = Tx::parse(matches); let proposal_data = DATA_PATH.parse(matches); let offline = PROPOSAL_OFFLINE.parse(matches); + let tx_code_path = PathBuf::from(TX_INIT_PROPOSAL); Self { tx, proposal_data, offline, + native_token: (), + tx_code_path, } } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg(DATA_PATH.def().about( "The data path file (json) that describes the proposal.", )) @@ -2286,38 +2511,65 @@ pub mod args { } #[derive(Clone, Debug)] - pub struct VoteProposal { + pub struct VoteProposal { /// Common tx arguments - pub tx: Tx, + pub tx: Tx, /// Proposal id pub proposal_id: Option, /// The vote - pub vote: ProposalVote, + pub vote: String, + /// PGF proposal + pub proposal_pgf: Option, + /// ETH proposal + pub proposal_eth: Option, /// Flag if proposal vote should be run offline pub offline: bool, /// The proposal file path pub proposal_data: Option, + /// Path to the TX WASM code file + pub tx_code_path: C::Data, + } + + impl CliToSdk> for VoteProposal { + fn to_sdk(self, ctx: &mut Context) -> VoteProposal { + VoteProposal:: { + tx: self.tx.to_sdk(ctx), + proposal_id: self.proposal_id, + vote: self.vote, + offline: self.offline, + proposal_data: self.proposal_data, + tx_code_path: ctx.read_wasm(self.tx_code_path), + proposal_pgf: self.proposal_pgf, + proposal_eth: self.proposal_eth, + } + } } - impl Args for VoteProposal { + impl Args for VoteProposal { fn parse(matches: &ArgMatches) -> Self { let tx = Tx::parse(matches); let proposal_id = PROPOSAL_ID_OPT.parse(matches); + let proposal_pgf = PROPOSAL_VOTE_PGF_OPT.parse(matches); + let proposal_eth = PROPOSAL_VOTE_ETH_OPT.parse(matches); let vote = PROPOSAL_VOTE.parse(matches); let offline = PROPOSAL_OFFLINE.parse(matches); let proposal_data = DATA_PATH_OPT.parse(matches); + let tx_code_path = PathBuf::from(TX_VOTE_PROPOSAL); Self { tx, proposal_id, vote, + proposal_pgf, + proposal_eth, offline, proposal_data, + tx_code_path, } } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg( PROPOSAL_ID_OPT .def() @@ -2330,7 +2582,29 @@ pub mod args { .arg( PROPOSAL_VOTE .def() - .about("The vote for the proposal. Either yay or nay."), + .about("The vote for the proposal. Either yay or nay"), + ) + .arg( + PROPOSAL_VOTE_PGF_OPT + .def() + .about( + "The list of proposed councils and spending \ + caps:\n$council1 $cap1 $council2 $cap2 ... \ + (council is bech32m encoded address, cap is \ + expressed in microNAM", + ) + .requires(PROPOSAL_ID.name) + .conflicts_with(PROPOSAL_VOTE_ETH_OPT.name), + ) + .arg( + PROPOSAL_VOTE_ETH_OPT + .def() + .about( + "The signing key and message bytes (hex encoded) \ + to be signed: $signing_key $message", + ) + .requires(PROPOSAL_ID.name) + .conflicts_with(PROPOSAL_VOTE_PGF_OPT.name), ) .arg( PROPOSAL_OFFLINE @@ -2350,15 +2624,16 @@ pub mod args { } } - #[derive(Clone, Debug)] - pub struct RevealPk { - /// Common tx arguments - pub tx: Tx, - /// A public key to be revealed on-chain - pub public_key: WalletPublicKey, + impl CliToSdk> for RevealPk { + fn to_sdk(self, ctx: &mut Context) -> RevealPk { + RevealPk:: { + tx: self.tx.to_sdk(ctx), + public_key: ctx.get_cached(&self.public_key), + } + } } - impl Args for RevealPk { + impl Args for RevealPk { fn parse(matches: &ArgMatches) -> Self { let tx = Tx::parse(matches); let public_key = PUBLIC_KEY.parse(matches); @@ -2367,20 +2642,21 @@ pub mod args { } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg(PUBLIC_KEY.def().about("A public key to reveal.")) } } - #[derive(Clone, Debug)] - pub struct QueryProposal { - /// Common query args - pub query: Query, - /// Proposal id - pub proposal_id: Option, + impl CliToSdk> for QueryProposal { + fn to_sdk(self, ctx: &mut Context) -> QueryProposal { + QueryProposal:: { + query: self.query.to_sdk(ctx), + proposal_id: self.proposal_id, + } + } } - impl Args for QueryProposal { + impl Args for QueryProposal { fn parse(matches: &ArgMatches) -> Self { let query = Query::parse(matches); let proposal_id = PROPOSAL_ID_OPT.parse(matches); @@ -2389,15 +2665,15 @@ pub mod args { } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg(PROPOSAL_ID_OPT.def().about("The proposal identifier.")) } } #[derive(Clone, Debug)] - pub struct QueryProposalResult { + pub struct QueryProposalResult { /// Common query args - pub query: Query, + pub query: Query, /// Proposal id pub proposal_id: Option, /// Flag if proposal result should be run on offline data @@ -2406,7 +2682,18 @@ pub mod args { pub proposal_folder: Option, } - impl Args for QueryProposalResult { + impl CliToSdk> for QueryProposalResult { + fn to_sdk(self, ctx: &mut Context) -> QueryProposalResult { + QueryProposalResult:: { + query: self.query.to_sdk(ctx), + proposal_id: self.proposal_id, + offline: self.offline, + proposal_folder: self.proposal_folder, + } + } + } + + impl Args for QueryProposalResult { fn parse(matches: &ArgMatches) -> Self { let query = Query::parse(matches); let proposal_id = PROPOSAL_ID_OPT.parse(matches); @@ -2422,7 +2709,7 @@ pub mod args { } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg(PROPOSAL_ID_OPT.def().about("The proposal identifier.")) .arg( PROPOSAL_OFFLINE @@ -2445,13 +2732,20 @@ pub mod args { } } - #[derive(Clone, Debug)] - pub struct QueryProtocolParameters { - /// Common query args - pub query: Query, + impl CliToSdk> + for QueryProtocolParameters + { + fn to_sdk( + self, + ctx: &mut Context, + ) -> QueryProtocolParameters { + QueryProtocolParameters:: { + query: self.query.to_sdk(ctx), + } + } } - impl Args for QueryProtocolParameters { + impl Args for QueryProtocolParameters { fn parse(matches: &ArgMatches) -> Self { let query = Query::parse(matches); @@ -2459,36 +2753,37 @@ pub mod args { } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() } } - /// Withdraw arguments - #[derive(Clone, Debug)] - pub struct Withdraw { - /// Common tx arguments - pub tx: Tx, - /// Validator address - pub validator: WalletAddress, - /// Source address for withdrawing from delegations. For withdrawing - /// from self-bonds, the validator is also the source - pub source: Option, + impl CliToSdk> for Withdraw { + fn to_sdk(self, ctx: &mut Context) -> Withdraw { + Withdraw:: { + tx: self.tx.to_sdk(ctx), + validator: ctx.get(&self.validator), + source: self.source.map(|x| ctx.get(&x)), + tx_code_path: ctx.read_wasm(self.tx_code_path), + } + } } - impl Args for Withdraw { + impl Args for Withdraw { fn parse(matches: &ArgMatches) -> Self { let tx = Tx::parse(matches); let validator = VALIDATOR.parse(matches); let source = SOURCE_OPT.parse(matches); + let tx_code_path = PathBuf::from(TX_WITHDRAW_WASM); Self { tx, validator, source, + tx_code_path, } } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg(VALIDATOR.def().about("Validator address.")) .arg(SOURCE_OPT.def().about( "Source address for withdrawing from delegations. For \ @@ -2498,18 +2793,17 @@ pub mod args { } } - /// Query asset conversions - #[derive(Clone, Debug)] - pub struct QueryConversions { - /// Common query args - pub query: Query, - /// Address of a token - pub token: Option, - /// Epoch of the asset - pub epoch: Option, + impl CliToSdk> for QueryConversions { + fn to_sdk(self, ctx: &mut Context) -> QueryConversions { + QueryConversions:: { + query: self.query.to_sdk(ctx), + token: self.token.map(|x| ctx.get(&x)), + epoch: self.epoch, + } + } } - impl Args for QueryConversions { + impl Args for QueryConversions { fn parse(matches: &ArgMatches) -> Self { let query = Query::parse(matches); let token = TOKEN_OPT.parse(matches); @@ -2522,7 +2816,7 @@ pub mod args { } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg( EPOCH .def() @@ -2536,22 +2830,19 @@ pub mod args { } } - /// Query token balance(s) - #[derive(Clone, Debug)] - pub struct QueryBalance { - /// Common query args - pub query: Query, - /// Address of an owner - pub owner: Option, - /// Address of a token - pub token: Option, - /// Whether not to convert balances - pub no_conversions: bool, - /// Sub prefix of an account - pub sub_prefix: Option, - } - - impl Args for QueryBalance { + impl CliToSdk> for QueryBalance { + fn to_sdk(self, ctx: &mut Context) -> QueryBalance { + QueryBalance:: { + query: self.query.to_sdk(ctx), + owner: self.owner.map(|x| ctx.get_cached(&x)), + token: self.token.map(|x| ctx.get(&x)), + no_conversions: self.no_conversions, + sub_prefix: self.sub_prefix, + } + } + } + + impl Args for QueryBalance { fn parse(matches: &ArgMatches) -> Self { let query = Query::parse(matches); let owner = BALANCE_OWNER.parse(matches); @@ -2568,7 +2859,7 @@ pub mod args { } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg( BALANCE_OWNER .def() @@ -2592,18 +2883,17 @@ pub mod args { } } - /// Query historical transfer(s) - #[derive(Clone, Debug)] - pub struct QueryTransfers { - /// Common query args - pub query: Query, - /// Address of an owner - pub owner: Option, - /// Address of a token - pub token: Option, + impl CliToSdk> for QueryTransfers { + fn to_sdk(self, ctx: &mut Context) -> QueryTransfers { + QueryTransfers:: { + query: self.query.to_sdk(ctx), + owner: self.owner.map(|x| ctx.get_cached(&x)), + token: self.token.map(|x| ctx.get(&x)), + } + } } - impl Args for QueryTransfers { + impl Args for QueryTransfers { fn parse(matches: &ArgMatches) -> Self { let query = Query::parse(matches); let owner = BALANCE_OWNER.parse(matches); @@ -2616,7 +2906,7 @@ pub mod args { } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg(BALANCE_OWNER.def().about( "The account address that queried transfers must involve.", )) @@ -2626,18 +2916,17 @@ pub mod args { } } - /// Query PoS bond(s) - #[derive(Clone, Debug)] - pub struct QueryBonds { - /// Common query args - pub query: Query, - /// Address of an owner - pub owner: Option, - /// Address of a validator - pub validator: Option, + impl CliToSdk> for QueryBonds { + fn to_sdk(self, ctx: &mut Context) -> QueryBonds { + QueryBonds:: { + query: self.query.to_sdk(ctx), + owner: self.owner.map(|x| ctx.get(&x)), + validator: self.validator.map(|x| ctx.get(&x)), + } + } } - impl Args for QueryBonds { + impl Args for QueryBonds { fn parse(matches: &ArgMatches) -> Self { let query = Query::parse(matches); let owner = OWNER_OPT.parse(matches); @@ -2650,7 +2939,7 @@ pub mod args { } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg( OWNER_OPT.def().about( "The owner account address whose bonds to query.", @@ -2664,18 +2953,17 @@ pub mod args { } } - /// Query PoS bonded stake - #[derive(Clone, Debug)] - pub struct QueryBondedStake { - /// Common query args - pub query: Query, - /// Address of a validator - pub validator: Option, - /// Epoch in which to find bonded stake - pub epoch: Option, + impl CliToSdk> for QueryBondedStake { + fn to_sdk(self, ctx: &mut Context) -> QueryBondedStake { + QueryBondedStake:: { + query: self.query.to_sdk(ctx), + validator: self.validator.map(|x| ctx.get(&x)), + epoch: self.epoch, + } + } } - impl Args for QueryBondedStake { + impl Args for QueryBondedStake { fn parse(matches: &ArgMatches) -> Self { let query = Query::parse(matches); let validator = VALIDATOR_OPT.parse(matches); @@ -2688,7 +2976,7 @@ pub mod args { } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg(VALIDATOR_OPT.def().about( "The validator's address whose bonded stake to query.", )) @@ -2699,31 +2987,35 @@ pub mod args { } } - #[derive(Clone, Debug)] - /// Commission rate change args - pub struct TxCommissionRateChange { - /// Common tx arguments - pub tx: Tx, - /// Validator address (should be self) - pub validator: WalletAddress, - /// Value to which the tx changes the commission rate - pub rate: Decimal, + impl CliToSdk> + for TxCommissionRateChange + { + fn to_sdk(self, ctx: &mut Context) -> TxCommissionRateChange { + TxCommissionRateChange:: { + tx: self.tx.to_sdk(ctx), + validator: ctx.get(&self.validator), + rate: self.rate, + tx_code_path: ctx.read_wasm(self.tx_code_path), + } + } } - impl Args for TxCommissionRateChange { + impl Args for TxCommissionRateChange { fn parse(matches: &ArgMatches) -> Self { let tx = Tx::parse(matches); let validator = VALIDATOR.parse(matches); let rate = COMMISSION_RATE.parse(matches); + let tx_code_path = PathBuf::from(TX_CHANGE_COMMISSION_WASM); Self { tx, validator, rate, + tx_code_path, } } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg(VALIDATOR.def().about( "The validator's address whose commission rate to change.", )) @@ -2735,18 +3027,17 @@ pub mod args { } } - /// Query PoS commission rate - #[derive(Clone, Debug)] - pub struct QueryCommissionRate { - /// Common query args - pub query: Query, - /// Address of a validator - pub validator: WalletAddress, - /// Epoch in which to find commission rate - pub epoch: Option, + impl CliToSdk> for QueryCommissionRate { + fn to_sdk(self, ctx: &mut Context) -> QueryCommissionRate { + QueryCommissionRate:: { + query: self.query.to_sdk(ctx), + validator: ctx.get(&self.validator), + epoch: self.epoch, + } + } } - impl Args for QueryCommissionRate { + impl Args for QueryCommissionRate { fn parse(matches: &ArgMatches) -> Self { let query = Query::parse(matches); let validator = VALIDATOR.parse(matches); @@ -2759,7 +3050,7 @@ pub mod args { } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg(VALIDATOR.def().about( "The validator's address whose commission rate to query.", )) @@ -2770,16 +3061,16 @@ pub mod args { } } - /// Query PoS slashes - #[derive(Clone, Debug)] - pub struct QuerySlashes { - /// Common query args - pub query: Query, - /// Address of a validator - pub validator: Option, + impl CliToSdk> for QuerySlashes { + fn to_sdk(self, ctx: &mut Context) -> QuerySlashes { + QuerySlashes:: { + query: self.query.to_sdk(ctx), + validator: self.validator.map(|x| ctx.get(&x)), + } + } } - impl Args for QuerySlashes { + impl Args for QuerySlashes { fn parse(matches: &ArgMatches) -> Self { let query = Query::parse(matches); let validator = VALIDATOR_OPT.parse(matches); @@ -2787,7 +3078,7 @@ pub mod args { } fn def(app: App) -> App { - app.add_args::().arg( + app.add_args::>().arg( VALIDATOR_OPT .def() .about("The validator's address whose slashes to query."), @@ -2795,16 +3086,7 @@ pub mod args { } } - /// Query PoS delegations - #[derive(Clone, Debug)] - pub struct QueryDelegations { - /// Common query args - pub query: Query, - /// Address of an owner - pub owner: WalletAddress, - } - - impl Args for QueryDelegations { + impl Args for QueryDelegations { fn parse(matches: &ArgMatches) -> Self { let query = Query::parse(matches); let owner = OWNER.parse(matches); @@ -2812,7 +3094,7 @@ pub mod args { } fn def(app: App) -> App { - app.add_args::().arg( + app.add_args::>().arg( OWNER.def().about( "The address of the owner of the delegations to find.", ), @@ -2820,16 +3102,25 @@ pub mod args { } } - /// Query the raw bytes of given storage key - #[derive(Clone, Debug)] - pub struct QueryRawBytes { - /// The storage key to query - pub storage_key: storage::Key, - /// Common query args - pub query: Query, + impl CliToSdk> for QueryDelegations { + fn to_sdk(self, ctx: &mut Context) -> QueryDelegations { + QueryDelegations:: { + query: self.query.to_sdk(ctx), + owner: ctx.get(&self.owner), + } + } + } + + impl CliToSdk> for QueryRawBytes { + fn to_sdk(self, ctx: &mut Context) -> QueryRawBytes { + QueryRawBytes:: { + query: self.query.to_sdk(ctx), + storage_key: self.storage_key, + } + } } - impl Args for QueryRawBytes { + impl Args for QueryRawBytes { fn parse(matches: &ArgMatches) -> Self { let storage_key = STORAGE_KEY.parse(matches); let query = Query::parse(matches); @@ -2837,62 +3128,52 @@ pub mod args { } fn def(app: App) -> App { - app.add_args::() + app.add_args::>() .arg(STORAGE_KEY.def().about("Storage key")) } } - /// Common transaction arguments - #[derive(Clone, Debug)] - pub struct Tx { - /// Simulate applying the transaction - pub dry_run: bool, - /// Dump the transaction bytes - pub dump_tx: bool, - /// Submit the transaction even if it doesn't pass client checks - pub force: bool, - /// Do not wait for the transaction to be added to the blockchain - pub broadcast_only: bool, - /// The address of the ledger node as host:port - pub ledger_address: TendermintAddress, - /// If any new account is initialized by the tx, use the given alias to - /// save it in the wallet. - pub initialized_account_alias: Option, - /// The amount being payed to include the transaction - pub fee_amount: token::Amount, - /// The token in which the fee is being paid - pub fee_token: WalletAddress, - /// The max amount of gas used to process tx - pub gas_limit: GasLimit, - /// Sign the tx with the key for the given alias from your wallet - pub signing_key: Option, - /// Sign the tx with the keypair of the public key of the given address - pub signer: Option, - } - - impl Tx { - pub fn parse_from_context(&self, ctx: &mut Context) -> ParsedTxArgs { - ParsedTxArgs { + + /// The concrete types being used in the CLI + #[derive(Clone, Debug)] + pub struct CliTypes; + + impl NamadaTypes for CliTypes { + type Address = WalletAddress; + type BalanceOwner = WalletBalanceOwner; + type Data = PathBuf; + type Keypair = WalletKeypair; + type NativeAddress = (); + type PublicKey = WalletPublicKey; + type TendermintAddress = TendermintAddress; + type TransferSource = WalletTransferSource; + type TransferTarget = WalletTransferTarget; + type ViewingKey = WalletViewingKey; + } + + impl CliToSdk> for Tx { + fn to_sdk(self, ctx: &mut Context) -> Tx { + Tx:: { dry_run: self.dry_run, dump_tx: self.dump_tx, force: self.force, broadcast_only: self.broadcast_only, - ledger_address: self.ledger_address.clone(), - initialized_account_alias: self - .initialized_account_alias - .clone(), + ledger_address: (), + initialized_account_alias: self.initialized_account_alias, + wallet_alias_force: self.wallet_alias_force, fee_amount: self.fee_amount, fee_token: ctx.get(&self.fee_token), - gas_limit: self.gas_limit.clone(), - signing_key: self - .signing_key - .as_ref() - .map(|sk| ctx.get_cached(sk)), - signer: self.signer.as_ref().map(|signer| ctx.get(signer)), + gas_limit: self.gas_limit, + signing_key: self.signing_key.map(|x| ctx.get_cached(&x)), + signer: self.signer.map(|x| ctx.get(&x)), + tx_code_path: ctx.read_wasm(self.tx_code_path), + password: self.password, + expiration: self.expiration, + chain_id: self.chain_id, } } } - impl Args for Tx { + impl Args for Tx { fn def(app: App) -> App { app.arg( DRY_RUN_TX @@ -2907,7 +3188,13 @@ pub mod args { "Do not wait for the transaction to be applied. This will \ return once the transaction is added to the mempool.", )) - .arg(LEDGER_ADDRESS_DEFAULT.def().about(LEDGER_ADDRESS_ABOUT)) + .arg( + LEDGER_ADDRESS_DEFAULT + .def() + .about(LEDGER_ADDRESS_ABOUT) + // This used to be "ledger-address", alias for compatibility + .alias("ledger-address"), + ) .arg(ALIAS_OPT.def().about( "If any new account is initialized by the tx, use the given \ alias to save it in the wallet. If multiple accounts are \ @@ -2923,6 +3210,12 @@ pub mod args { "The maximum amount of gas needed to run transaction", ), ) + .arg(EXPIRATION_OPT.def().about( + "The expiration datetime of the transaction, after which the \ + tx won't be accepted anymore. All of these examples are \ + equivalent:\n2012-12-12T12:12:12Z\n2012-12-12 \ + 12:12:12Z\n2012- 12-12T12: 12:12Z", + )) .arg( SIGNING_KEY_OPT .def() @@ -2951,12 +3244,16 @@ pub mod args { let broadcast_only = BROADCAST_ONLY.parse(matches); let ledger_address = LEDGER_ADDRESS_DEFAULT.parse(matches); let initialized_account_alias = ALIAS_OPT.parse(matches); + let wallet_alias_force = WALLET_ALIAS_FORCE.parse(matches); let fee_amount = GAS_AMOUNT.parse(matches); let fee_token = GAS_TOKEN.parse(matches); let gas_limit = GAS_LIMIT.parse(matches).into(); - + let expiration = EXPIRATION_OPT.parse(matches); let signing_key = SIGNING_KEY_OPT.parse(matches); let signer = SIGNER.parse(matches); + let tx_code_path = PathBuf::from(TX_REVEAL_PK); + let chain_id = CHAIN_ID_OPT.parse(matches); + let password = None; Self { dry_run, dump_tx, @@ -2964,25 +3261,35 @@ pub mod args { broadcast_only, ledger_address, initialized_account_alias, + wallet_alias_force, fee_amount, fee_token, gas_limit, + expiration, signing_key, signer, + tx_code_path, + password, + chain_id, } } } - /// Common query arguments - #[derive(Clone, Debug)] - pub struct Query { - /// The address of the ledger node as host:port - pub ledger_address: TendermintAddress, + impl CliToSdk> for Query { + fn to_sdk(self, _ctx: &mut Context) -> Query { + Query:: { ledger_address: () } + } } - impl Args for Query { + impl Args for Query { fn def(app: App) -> App { - app.arg(LEDGER_ADDRESS_DEFAULT.def().about(LEDGER_ADDRESS_ABOUT)) + app.arg( + LEDGER_ADDRESS_DEFAULT + .def() + .about(LEDGER_ADDRESS_ABOUT) + // This used to be "ledger-address", alias for compatibility + .alias("ledger-address"), + ) } fn parse(matches: &ArgMatches) -> Self { @@ -2991,24 +3298,15 @@ pub mod args { } } - /// MASP add key or address arguments - #[derive(Clone, Debug)] - pub struct MaspAddrKeyAdd { - /// Key alias - pub alias: String, - /// Any MASP value - pub value: MaspValue, - /// Don't encrypt the keypair - pub unsafe_dont_encrypt: bool, - } - impl Args for MaspAddrKeyAdd { fn parse(matches: &ArgMatches) -> Self { let alias = ALIAS.parse(matches); + let alias_force = ALIAS_FORCE.parse(matches); let value = MASP_VALUE.parse(matches); let unsafe_dont_encrypt = UNSAFE_DONT_ENCRYPT.parse(matches); Self { alias, + alias_force, value, unsafe_dont_encrypt, } @@ -3032,21 +3330,14 @@ pub mod args { } } - /// MASP generate spending key arguments - #[derive(Clone, Debug)] - pub struct MaspSpendKeyGen { - /// Key alias - pub alias: String, - /// Don't encrypt the keypair - pub unsafe_dont_encrypt: bool, - } - impl Args for MaspSpendKeyGen { fn parse(matches: &ArgMatches) -> Self { let alias = ALIAS.parse(matches); + let alias_force = ALIAS_FORCE.parse(matches); let unsafe_dont_encrypt = UNSAFE_DONT_ENCRYPT.parse(matches); Self { alias, + alias_force, unsafe_dont_encrypt, } } @@ -3064,24 +3355,26 @@ pub mod args { } } - /// MASP generate payment address arguments - #[derive(Clone, Debug)] - pub struct MaspPayAddrGen { - /// Key alias - pub alias: String, - /// Viewing key - pub viewing_key: WalletViewingKey, - /// Pin - pub pin: bool, + impl CliToSdk> for MaspPayAddrGen { + fn to_sdk(self, ctx: &mut Context) -> MaspPayAddrGen { + MaspPayAddrGen:: { + alias: self.alias, + alias_force: self.alias_force, + viewing_key: ctx.get_cached(&self.viewing_key), + pin: self.pin, + } + } } - impl Args for MaspPayAddrGen { + impl Args for MaspPayAddrGen { fn parse(matches: &ArgMatches) -> Self { let alias = ALIAS.parse(matches); + let alias_force = ALIAS_FORCE.parse(matches); let viewing_key = VIEWING_KEY.parse(matches); let pin = PIN.parse(matches); Self { alias, + alias_force, viewing_key, pin, } @@ -3101,25 +3394,16 @@ pub mod args { } } - /// Wallet generate key and implicit address arguments - #[derive(Clone, Debug)] - pub struct KeyAndAddressGen { - /// Scheme type - pub scheme: SchemeType, - /// Key alias - pub alias: Option, - /// Don't encrypt the keypair - pub unsafe_dont_encrypt: bool, - } - impl Args for KeyAndAddressGen { fn parse(matches: &ArgMatches) -> Self { let scheme = SCHEME.parse(matches); let alias = ALIAS_OPT.parse(matches); + let alias_force = ALIAS_FORCE.parse(matches); let unsafe_dont_encrypt = UNSAFE_DONT_ENCRYPT.parse(matches); Self { scheme, alias, + alias_force, unsafe_dont_encrypt, } } @@ -3141,15 +3425,6 @@ pub mod args { } } - /// Wallet key lookup arguments - #[derive(Clone, Debug)] - pub struct KeyFind { - pub public_key: Option, - pub alias: Option, - pub value: Option, - pub unsafe_show_secret: bool, - } - impl Args for KeyFind { fn parse(matches: &ArgMatches) -> Self { let public_key = RAW_PUBLIC_KEY_OPT.parse(matches); @@ -3191,13 +3466,6 @@ pub mod args { } } - /// Wallet find shielded address or key arguments - #[derive(Clone, Debug)] - pub struct AddrKeyFind { - pub alias: String, - pub unsafe_show_secret: bool, - } - impl Args for AddrKeyFind { fn parse(matches: &ArgMatches) -> Self { let alias = ALIAS.parse(matches); @@ -3218,13 +3486,6 @@ pub mod args { } } - /// Wallet list shielded keys arguments - #[derive(Clone, Debug)] - pub struct MaspKeysList { - pub decrypt: bool, - pub unsafe_show_secret: bool, - } - impl Args for MaspKeysList { fn parse(matches: &ArgMatches) -> Self { let decrypt = DECRYPT.parse(matches); @@ -3245,13 +3506,6 @@ pub mod args { } } - /// Wallet list keys arguments - #[derive(Clone, Debug)] - pub struct KeyList { - pub decrypt: bool, - pub unsafe_show_secret: bool, - } - impl Args for KeyList { fn parse(matches: &ArgMatches) -> Self { let decrypt = DECRYPT.parse(matches); @@ -3272,12 +3526,6 @@ pub mod args { } } - /// Wallet key export arguments - #[derive(Clone, Debug)] - pub struct KeyExport { - pub alias: String, - } - impl Args for KeyExport { fn parse(matches: &ArgMatches) -> Self { let alias = ALIAS.parse(matches); @@ -3294,13 +3542,6 @@ pub mod args { } } - /// Wallet address lookup arguments - #[derive(Clone, Debug)] - pub struct AddressOrAliasFind { - pub alias: Option, - pub address: Option
, - } - impl Args for AddressOrAliasFind { fn parse(matches: &ArgMatches) -> Self { let alias = ALIAS_OPT.parse(matches); @@ -3327,18 +3568,12 @@ pub mod args { } } - /// Wallet address add arguments - #[derive(Clone, Debug)] - pub struct AddressAdd { - pub alias: String, - pub address: Address, - } - impl Args for AddressAdd { fn parse(matches: &ArgMatches) -> Self { let alias = ALIAS.parse(matches); + let alias_force = ALIAS_FORCE.parse(matches); let address = RAW_ADDRESS.parse(matches); - Self { alias, address } + Self { alias, alias_force, address } } fn def(app: App) -> App { @@ -3378,7 +3613,8 @@ pub mod args { } fn def(app: App) -> App { - app.arg(CHAIN_ID.def().about("The chain ID. The chain must be known in the https://github.com/heliaxdev/anoma-network-config repository.")) + app.arg(CHAIN_ID.def().about("The chain ID. The chain must be known in the repository: \ + https://github.com/heliaxdev/anoma-network-config")) .arg(GENESIS_VALIDATOR.def().about("The alias of the genesis validator that you want to set up as, if any.")) .arg(PRE_GENESIS_PATH.def().about("The path to the pre-genesis directory for genesis validator, if any. Defaults to \"{base-dir}/pre-genesis/{genesis-validator}\".")) .arg(DONT_PREFETCH_WASM.def().about( @@ -3387,6 +3623,25 @@ pub mod args { } } + #[derive(Clone, Debug)] + pub struct PkToTmAddress { + pub public_key: common::PublicKey, + } + + impl Args for PkToTmAddress { + fn parse(matches: &ArgMatches) -> Self { + let public_key = RAW_PUBLIC_KEY.parse(matches); + Self { public_key } + } + + fn def(app: App) -> App { + app.arg(RAW_PUBLIC_KEY.def().about( + "The consensus public key to be converted to Tendermint \ + address.", + )) + } + } + #[derive(Clone, Debug)] pub struct FetchWasms { pub chain_id: ChainId, diff --git a/apps/src/lib/cli/context.rs b/apps/src/lib/cli/context.rs index e61fda9dfc0..42c1bb6c94d 100644 --- a/apps/src/lib/cli/context.rs +++ b/apps/src/lib/cli/context.rs @@ -1,22 +1,26 @@ //! CLI input types can be used for command arguments +use std::collections::HashSet; use std::env; use std::marker::PhantomData; use std::path::{Path, PathBuf}; use std::str::FromStr; use color_eyre::eyre::Result; +use namada::ledger::masp::ShieldedContext; +use namada::ledger::wallet::store::AddressVpType; +use namada::ledger::wallet::Wallet; use namada::types::address::Address; use namada::types::chain::ChainId; use namada::types::key::*; use namada::types::masp::*; use super::args; -use crate::client::tx::ShieldedContext; +use crate::client::tx::CLIShieldedUtils; use crate::config::genesis::genesis_config; use crate::config::global::GlobalConfig; use crate::config::{self, Config}; -use crate::wallet::Wallet; +use crate::wallet::CliWalletUtils; use crate::wasm_loader; /// Env. var to set chain ID @@ -66,13 +70,13 @@ pub struct Context { /// Global arguments pub global_args: args::Global, /// The wallet - pub wallet: Wallet, + pub wallet: Wallet, /// The global configuration pub global_config: GlobalConfig, /// The ledger configuration for a specific chain ID pub config: Config, /// The context fr shielded operations - pub shielded: ShieldedContext, + pub shielded: ShieldedContext, /// Native token's address pub native_token: Address, } @@ -98,8 +102,10 @@ impl Context { let native_token = genesis.native_token; let default_genesis = genesis_config::open_genesis_config(genesis_file_path)?; - let wallet = - Wallet::load_or_new_from_genesis(&chain_dir, default_genesis); + let wallet = crate::wallet::load_or_new_from_genesis( + &chain_dir, + default_genesis, + ); // If the WASM dir specified, put it in the config match global_args.wasm_dir.as_ref() { @@ -118,7 +124,7 @@ impl Context { wallet, global_config, config, - shielded: ShieldedContext::new(chain_dir), + shielded: CLIShieldedUtils::new(chain_dir), native_token, }) } @@ -187,6 +193,11 @@ impl Context { pub fn read_wasm(&self, file_name: impl AsRef) -> Vec { wasm_loader::read_wasm_or_exit(self.wasm_dir(), file_name) } + + /// Get address with vp type + pub fn tokens(&self) -> HashSet
{ + self.wallet.get_addresses_with_vp_type(AddressVpType::Token) + } } /// Load global config from expected path in the `base_dir` or try to generate a @@ -343,7 +354,7 @@ impl ArgFromMutContext for common::SecretKey { FromStr::from_str(raw).or_else(|_parse_err| { // Or it can be an alias ctx.wallet - .find_key(raw) + .find_key(raw, None) .map_err(|_find_err| format!("Unknown key {}", raw)) }) } @@ -360,13 +371,13 @@ impl ArgFromMutContext for common::PublicKey { // Or it can be a public key hash in hex string FromStr::from_str(raw) .map(|pkh: PublicKeyHash| { - let key = ctx.wallet.find_key_by_pkh(&pkh).unwrap(); + let key = ctx.wallet.find_key_by_pkh(&pkh, None).unwrap(); key.ref_to() }) // Or it can be an alias that may be found in the wallet .or_else(|_parse_err| { ctx.wallet - .find_key(raw) + .find_key(raw, None) .map(|x| x.ref_to()) .map_err(|x| x.to_string()) }) @@ -384,7 +395,7 @@ impl ArgFromMutContext for ExtendedSpendingKey { FromStr::from_str(raw).or_else(|_parse_err| { // Or it is a stored alias of one ctx.wallet - .find_spending_key(raw) + .find_spending_key(raw, None) .map_err(|_find_err| format!("Unknown spending key {}", raw)) }) } diff --git a/apps/src/lib/client/mod.rs b/apps/src/lib/client/mod.rs index 486eb3c26d6..57f3c5a043d 100644 --- a/apps/src/lib/client/mod.rs +++ b/apps/src/lib/client/mod.rs @@ -1,6 +1,4 @@ pub mod rpc; pub mod signing; -pub mod tendermint_rpc_types; pub mod tx; -pub mod types; pub mod utils; diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index 4dd36ea2eb5..109b0ffa2d6 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -1,322 +1,156 @@ //! Client RPC queries -use std::borrow::Cow; use std::cmp::Ordering; -use std::collections::{BTreeMap, HashMap, HashSet}; -use std::convert::TryInto; +use std::collections::{HashMap, HashSet}; use std::fs::File; use std::io::{self, Write}; use std::iter::Iterator; use std::str::FromStr; +use std::time::Duration; use async_std::fs; use async_std::path::PathBuf; use async_std::prelude::*; use borsh::{BorshDeserialize, BorshSerialize}; use data_encoding::HEXLOWER; -use eyre::{eyre, Context as EyreContext}; +use itertools::Either; use masp_primitives::asset_type::AssetType; use masp_primitives::merkle_tree::MerklePath; use masp_primitives::primitives::ViewingKey; use masp_primitives::sapling::Node; use masp_primitives::transaction::components::Amount; use masp_primitives::zip32::ExtendedFullViewingKey; -#[cfg(not(feature = "mainnet"))] -use namada::core::ledger::testnet_pow; +use namada::core::types::transaction::governance::ProposalType; use namada::ledger::events::Event; use namada::ledger::governance::parameters::GovParams; use namada::ledger::governance::storage as gov_storage; -use namada::ledger::native_vp::governance::utils::Votes; +use namada::ledger::masp::{ + Conversions, PinnedBalanceError, ShieldedContext, ShieldedUtils, +}; +use namada::ledger::native_vp::governance::utils::{self, Votes}; use namada::ledger::parameters::{storage as param_storage, EpochDuration}; use namada::ledger::pos::{ self, BondId, BondsAndUnbondsDetail, CommissionPair, PosParams, Slash, }; -use namada::ledger::queries::{self, RPC}; +use namada::ledger::queries::RPC; +use namada::ledger::rpc::{query_epoch, TxResponse}; use namada::ledger::storage::ConversionState; -use namada::proto::{SignedTxData, Tx}; -use namada::types::address::{masp, tokens, Address}; +use namada::ledger::wallet::{AddressVpType, Wallet}; +use namada::proof_of_stake::types::WeightedValidator; +use namada::types::address::{masp, Address}; use namada::types::governance::{ - OfflineProposal, OfflineVote, ProposalResult, ProposalVote, TallyResult, - VotePower, + OfflineProposal, OfflineVote, ProposalVote, VotePower, VoteType, }; use namada::types::hash::Hash; use namada::types::key::*; use namada::types::masp::{BalanceOwner, ExtendedViewingKey, PaymentAddress}; -use namada::types::storage::{ - BlockHeight, BlockResults, Epoch, Key, KeySeg, PrefixValue, TxIndex, -}; -use namada::types::token::{balance_key, Transfer}; -use namada::types::transaction::{ - process_tx, AffineCurve, DecryptedTx, EllipticCurve, PairingEngine, TxType, - WrapperTx, -}; -use namada::types::{address, storage, token}; -use tokio::time::{Duration, Instant}; +use namada::types::storage::{BlockHeight, BlockResults, Epoch, Key, KeySeg}; +use namada::types::{storage, token}; -use crate::cli::{self, args, Context}; -use crate::client::tendermint_rpc_types::TxResponse; -use crate::client::tx::{ - Conversions, PinnedBalanceError, TransactionDelta, TransferDelta, -}; +use crate::cli::{self, args}; use crate::facade::tendermint::merkle::proof::Proof; -use crate::facade::tendermint_config::net::Address as TendermintAddress; use crate::facade::tendermint_rpc::error::Error as TError; -use crate::facade::tendermint_rpc::query::Query; -use crate::facade::tendermint_rpc::{ - Client, HttpClient, Order, SubscriptionClient, WebSocketClient, -}; +use crate::wallet::CliWalletUtils; /// Query the status of a given transaction. /// /// If a response is not delivered until `deadline`, we exit the cli with an /// error. -pub async fn query_tx_status( - status: TxEventQuery<'_>, - address: TendermintAddress, - deadline: Instant, +pub async fn query_tx_status( + client: &C, + status: namada::ledger::rpc::TxEventQuery<'_>, + deadline: Duration, ) -> Event { - const ONE_SECOND: Duration = Duration::from_secs(1); - // sleep for the duration of `backoff`, - // and update the underlying value - async fn sleep_update(query: TxEventQuery<'_>, backoff: &mut Duration) { - tracing::debug!( - ?query, - duration = ?backoff, - "Retrying tx status query after timeout", - ); - // simple linear backoff - if an event is not available, - // increase the backoff duration by one second - tokio::time::sleep(*backoff).await; - *backoff += ONE_SECOND; - } - tokio::time::timeout_at(deadline, async move { - let client = HttpClient::new(address).unwrap(); - let mut backoff = ONE_SECOND; - - loop { - tracing::debug!(query = ?status, "Querying tx status"); - let maybe_event = match query_tx_events(&client, status).await { - Ok(response) => response, - Err(err) => { - tracing::debug!(%err, "ABCI query failed"); - sleep_update(status, &mut backoff).await; - continue; - } - }; - if let Some(e) = maybe_event { - break Ok(e); - } - sleep_update(status, &mut backoff).await; - } - }) - .await - .map_err(|_| { - eprintln!("Transaction status query deadline of {deadline:?} exceeded"); - }) - .and_then(|result| result) - .unwrap_or_else(|_| cli::safe_exit(1)) + namada::ledger::rpc::query_tx_status(client, status, deadline).await } /// Query and print the epoch of the last committed block -pub async fn query_and_print_epoch(args: args::Query) -> Epoch { - let client = HttpClient::new(args.ledger_address).unwrap(); - let epoch = unwrap_client_response(RPC.shell().epoch(&client).await); +pub async fn query_and_print_epoch< + C: namada::ledger::queries::Client + Sync, +>( + client: &C, +) -> Epoch { + let epoch = namada::ledger::rpc::query_epoch(client).await; println!("Last committed epoch: {}", epoch); epoch } -/// Query the epoch of the last committed block -pub async fn query_epoch(client: &HttpClient) -> Epoch { - unwrap_client_response(RPC.shell().epoch(client).await) -} - /// Query the last committed block -pub async fn query_block( - args: args::Query, +pub async fn query_block( + client: &C, ) -> crate::facade::tendermint_rpc::endpoint::block::Response { - let client = HttpClient::new(args.ledger_address).unwrap(); - let response = client.latest_block().await.unwrap(); - println!( - "Last committed block ID: {}, height: {}, time: {}", - response.block_id, - response.block.header.height, - response.block.header.time - ); - response + namada::ledger::rpc::query_block(client).await } /// Query the results of the last committed block -pub async fn query_results(args: args::Query) -> Vec { - let client = HttpClient::new(args.ledger_address).unwrap(); - unwrap_client_response(RPC.shell().read_results(&client).await) -} - -/// Obtain the known effects of all accepted shielded and transparent -/// transactions. If an owner is specified, then restrict the set to only -/// transactions crediting/debiting the given owner. If token is specified, then -/// restrict set to only transactions involving the given token. -pub async fn query_tx_deltas( - ctx: &mut Context, - ledger_address: TendermintAddress, - query_owner: &Option, - query_token: &Option
, -) -> BTreeMap<(BlockHeight, TxIndex), (Epoch, TransferDelta, TransactionDelta)> -{ - const TXS_PER_PAGE: u8 = 100; - // Connect to the Tendermint server holding the transactions - let client = HttpClient::new(ledger_address.clone()).unwrap(); - // Build up the context that will be queried for transactions - let _ = ctx.shielded.load(); - let vks = ctx.wallet.get_viewing_keys(); - let fvks: Vec<_> = vks - .values() - .map(|fvk| ExtendedFullViewingKey::from(*fvk).fvk.vk) - .collect(); - ctx.shielded.fetch(&ledger_address, &[], &fvks).await; - // Save the update state so that future fetches can be short-circuited - let _ = ctx.shielded.save(); - // Required for filtering out rejected transactions from Tendermint - // responses - let block_results = query_results(args::Query { ledger_address }).await; - let mut transfers = ctx.shielded.get_tx_deltas().clone(); - // Construct the set of addresses relevant to user's query - let relevant_addrs = match &query_owner { - Some(BalanceOwner::Address(owner)) => vec![owner.clone()], - // MASP objects are dealt with outside of tx_search - Some(BalanceOwner::FullViewingKey(_viewing_key)) => vec![], - Some(BalanceOwner::PaymentAddress(_owner)) => vec![], - // Unspecified owner means all known addresses are considered relevant - None => ctx.wallet.get_addresses().into_values().collect(), - }; - // Find all transactions to or from the relevant address set - for addr in relevant_addrs { - for prop in ["transfer.source", "transfer.target"] { - // Query transactions involving the current address - let mut tx_query = Query::eq(prop, addr.encode()); - // Elaborate the query if requested by the user - if let Some(token) = &query_token { - tx_query = tx_query.and_eq("transfer.token", token.encode()); - } - for page in 1.. { - let txs = &client - .tx_search( - tx_query.clone(), - true, - page, - TXS_PER_PAGE, - Order::Ascending, - ) - .await - .expect("Unable to query for transactions") - .txs; - for response_tx in txs { - let height = BlockHeight(response_tx.height.value()); - let idx = TxIndex(response_tx.index); - // Only process yet unprocessed transactions which have been - // accepted by node VPs - let should_process = !transfers - .contains_key(&(height, idx)) - && block_results[u64::from(height) as usize] - .is_accepted(idx.0 as usize); - if !should_process { - continue; - } - let tx = Tx::try_from(response_tx.tx.as_ref()) - .expect("Ill-formed Tx"); - let mut wrapper = None; - let mut transfer = None; - extract_payload(tx, &mut wrapper, &mut transfer); - // Epoch data is not needed for transparent transactions - let epoch = wrapper.map(|x| x.epoch).unwrap_or_default(); - if let Some(transfer) = transfer { - // Skip MASP addresses as they are already handled by - // ShieldedContext - if transfer.source == masp() - || transfer.target == masp() - { - continue; - } - // Describe how a Transfer simply subtracts from one - // account and adds the same to another - let mut delta = TransferDelta::default(); - let tfer_delta = Amount::from_nonnegative( - transfer.token.clone(), - u64::from(transfer.amount), - ) - .expect("invalid value for amount"); - delta.insert( - transfer.source, - Amount::zero() - &tfer_delta, - ); - delta.insert(transfer.target, tfer_delta); - // No shielded accounts are affected by this Transfer - transfers.insert( - (height, idx), - (epoch, delta, TransactionDelta::new()), - ); - } - } - // An incomplete page signifies no more transactions - if (txs.len() as u8) < TXS_PER_PAGE { - break; - } - } - } - } - transfers +pub async fn query_results( + client: &C, + _args: args::Query, +) -> Vec { + unwrap_client_response::>( + RPC.shell().read_results(client).await, + ) } /// Query the specified accepted transfers from the ledger -pub async fn query_transfers(mut ctx: Context, args: args::QueryTransfers) { - let query_token = args.token.as_ref().map(|x| ctx.get(x)); - let query_owner = args.owner.as_ref().map(|x| ctx.get_cached(x)); +pub async fn query_transfers< + C: namada::ledger::queries::Client + Sync, + U: ShieldedUtils, +>( + client: &C, + wallet: &mut Wallet, + shielded: &mut ShieldedContext, + args: args::QueryTransfers, +) { + let query_token = args.token; + let query_owner = args.owner.map_or_else( + || Either::Right(wallet.get_addresses().into_values().collect()), + Either::Left, + ); + let _ = shielded.load(); // Obtain the effects of all shielded and transparent transactions - let transfers = query_tx_deltas( - &mut ctx, - args.query.ledger_address.clone(), - &query_owner, - &query_token, - ) - .await; + let transfers = shielded + .query_tx_deltas( + client, + &query_owner, + &query_token, + &wallet.get_viewing_keys(), + ) + .await; // To facilitate lookups of human-readable token names - let tokens = tokens(); - let vks = ctx.wallet.get_viewing_keys(); + let vks = wallet.get_viewing_keys(); // To enable ExtendedFullViewingKeys to be displayed instead of ViewingKeys let fvk_map: HashMap<_, _> = vks .values() .map(|fvk| (ExtendedFullViewingKey::from(*fvk).fvk.vk, fvk)) .collect(); - // Connect to the Tendermint server holding the transactions - let client = HttpClient::new(args.query.ledger_address.clone()).unwrap(); // Now display historical shielded and transparent transactions for ((height, idx), (epoch, tfer_delta, tx_delta)) in transfers { // Check if this transfer pertains to the supplied owner let mut relevant = match &query_owner { - Some(BalanceOwner::FullViewingKey(fvk)) => tx_delta + Either::Left(BalanceOwner::FullViewingKey(fvk)) => tx_delta .contains_key(&ExtendedFullViewingKey::from(*fvk).fvk.vk), - Some(BalanceOwner::Address(owner)) => { + Either::Left(BalanceOwner::Address(owner)) => { tfer_delta.contains_key(owner) } - Some(BalanceOwner::PaymentAddress(_owner)) => false, - None => true, + Either::Left(BalanceOwner::PaymentAddress(_owner)) => false, + Either::Right(_) => true, }; // Realize and decode the shielded changes to enable relevance check let mut shielded_accounts = HashMap::new(); for (acc, amt) in tx_delta { // Realize the rewards that would have been attained upon the // transaction's reception - let amt = ctx - .shielded + let amt = shielded .compute_exchanged_amount( - client.clone(), + client, amt, epoch, Conversions::new(), ) .await .0; - let dec = - ctx.shielded.decode_amount(client.clone(), amt, epoch).await; + let dec = shielded.decode_amount(client, amt, epoch).await; shielded_accounts.insert(acc, dec); } // Check if this transfer pertains to the supplied token @@ -337,9 +171,7 @@ pub async fn query_transfers(mut ctx: Context, args: args::QueryTransfers) { if account != masp() { print!(" {}:", account); for (addr, val) in amt.components() { - let addr_enc = addr.encode(); - let readable = - tokens.get(addr).cloned().unwrap_or(addr_enc.as_str()); + let token_alias = lookup_alias(wallet, addr); let sign = match val.cmp(&0) { Ordering::Greater => "+", Ordering::Less => "-", @@ -349,7 +181,7 @@ pub async fn query_transfers(mut ctx: Context, args: args::QueryTransfers) { " {}{} {}", sign, token::Amount::from(val.unsigned_abs()), - readable + token_alias ); } println!(); @@ -361,9 +193,7 @@ pub async fn query_transfers(mut ctx: Context, args: args::QueryTransfers) { if fvk_map.contains_key(&account) { print!(" {}:", fvk_map[&account]); for (addr, val) in amt.components() { - let addr_enc = addr.encode(); - let readable = - tokens.get(addr).cloned().unwrap_or(addr_enc.as_str()); + let token_alias = lookup_alias(wallet, addr); let sign = match val.cmp(&0) { Ordering::Greater => "+", Ordering::Less => "-", @@ -373,7 +203,7 @@ pub async fn query_transfers(mut ctx: Context, args: args::QueryTransfers) { " {}{} {}", sign, token::Amount::from(val.unsigned_abs()), - readable + token_alias ); } println!(); @@ -382,51 +212,14 @@ pub async fn query_transfers(mut ctx: Context, args: args::QueryTransfers) { } } -/// Extract the payload from the given Tx object -fn extract_payload( - tx: Tx, - wrapper: &mut Option, - transfer: &mut Option, -) { - match process_tx(tx) { - Ok(TxType::Wrapper(wrapper_tx)) => { - let privkey = ::G2Affine::prime_subgroup_generator(); - extract_payload( - Tx::from(match wrapper_tx.decrypt(privkey) { - Ok(tx) => DecryptedTx::Decrypted { - tx, - #[cfg(not(feature = "mainnet"))] - has_valid_pow: false, - }, - _ => DecryptedTx::Undecryptable(wrapper_tx.clone()), - }), - wrapper, - transfer, - ); - *wrapper = Some(wrapper_tx); - } - Ok(TxType::Decrypted(DecryptedTx::Decrypted { - tx, - #[cfg(not(feature = "mainnet"))] - has_valid_pow: _, - })) => { - let empty_vec = vec![]; - let tx_data = tx.data.as_ref().unwrap_or(&empty_vec); - let _ = SignedTxData::try_from_slice(tx_data).map(|signed| { - Transfer::try_from_slice(&signed.data.unwrap()[..]) - .map(|tfer| *transfer = Some(tfer)) - }); - } - _ => {} - } -} - /// Query the raw bytes of given storage key -pub async fn query_raw_bytes(_ctx: Context, args: args::QueryRawBytes) { - let client = HttpClient::new(args.query.ledger_address).unwrap(); - let response = unwrap_client_response( +pub async fn query_raw_bytes( + client: &C, + args: args::QueryRawBytes, +) { + let response = unwrap_client_response::( RPC.shell() - .storage_value(&client, None, None, false, &args.storage_key) + .storage_value(client, None, None, false, &args.storage_key) .await, ); if !response.data.is_empty() { @@ -437,41 +230,50 @@ pub async fn query_raw_bytes(_ctx: Context, args: args::QueryRawBytes) { } /// Query token balance(s) -pub async fn query_balance(mut ctx: Context, args: args::QueryBalance) { +pub async fn query_balance< + C: namada::ledger::queries::Client + Sync, + U: ShieldedUtils, +>( + client: &C, + wallet: &mut Wallet, + shielded: &mut ShieldedContext, + args: args::QueryBalance, +) { // Query the balances of shielded or transparent account types depending on // the CLI arguments - match args.owner.as_ref().map(|x| ctx.get_cached(x)) { + match &args.owner { Some(BalanceOwner::FullViewingKey(_viewing_key)) => { - query_shielded_balance(&mut ctx, args).await + query_shielded_balance(client, wallet, shielded, args).await } Some(BalanceOwner::Address(_owner)) => { - query_transparent_balance(&mut ctx, args).await + query_transparent_balance(client, wallet, args).await } Some(BalanceOwner::PaymentAddress(_owner)) => { - query_pinned_balance(&mut ctx, args).await + query_pinned_balance(client, wallet, shielded, args).await } None => { // Print pinned balance - query_pinned_balance(&mut ctx, args.clone()).await; + query_pinned_balance(client, wallet, shielded, args.clone()).await; // Print shielded balance - query_shielded_balance(&mut ctx, args.clone()).await; + query_shielded_balance(client, wallet, shielded, args.clone()) + .await; // Then print transparent balance - query_transparent_balance(&mut ctx, args).await; + query_transparent_balance(client, wallet, args).await; } }; } /// Query token balance(s) -pub async fn query_transparent_balance( - ctx: &mut Context, +pub async fn query_transparent_balance< + C: namada::ledger::queries::Client + Sync, +>( + client: &C, + wallet: &mut Wallet, args: args::QueryBalance, ) { - let client = HttpClient::new(args.query.ledger_address).unwrap(); - let tokens = address::tokens(); + let tokens = wallet.get_addresses_with_vp_type(AddressVpType::Token); match (args.token, args.owner) { (Some(token), Some(owner)) => { - let token = ctx.get(&token); - let owner = ctx.get_cached(&owner); let key = match &args.sub_prefix { Some(sub_prefix) => { let sub_prefix = Key::parse(sub_prefix).unwrap(); @@ -484,35 +286,31 @@ pub async fn query_transparent_balance( } None => token::balance_key(&token, &owner.address().unwrap()), }; - let currency_code = tokens - .get(&token) - .map(|c| Cow::Borrowed(*c)) - .unwrap_or_else(|| Cow::Owned(token.to_string())); - match query_storage_value::(&client, &key).await { + let token_alias = lookup_alias(wallet, &token); + match query_storage_value::(client, &key).await { Some(balance) => match &args.sub_prefix { Some(sub_prefix) => { println!( "{} with {}: {}", - currency_code, sub_prefix, balance + token_alias, sub_prefix, balance ); } - None => println!("{}: {}", currency_code, balance), + None => println!("{}: {}", token_alias, balance), }, None => { - println!("No {} balance found for {}", currency_code, owner) + println!("No {} balance found for {}", token_alias, owner) } } } (None, Some(owner)) => { - let owner = ctx.get_cached(&owner); - for (token, _) in tokens { + for token in tokens { let prefix = token.to_db_key().into(); let balances = - query_storage_prefix::(&client, &prefix) + query_storage_prefix::(client, &prefix) .await; if let Some(balances) = balances { print_balances( - ctx, + wallet, balances, &token, owner.address().as_ref(), @@ -521,21 +319,21 @@ pub async fn query_transparent_balance( } } (Some(token), None) => { - let token = ctx.get(&token); let prefix = token.to_db_key().into(); let balances = - query_storage_prefix::(&client, &prefix).await; + query_storage_prefix::(client, &prefix).await; if let Some(balances) = balances { - print_balances(ctx, balances, &token, None); + print_balances(wallet, balances, &token, None); } } (None, None) => { - for (token, _) in tokens { + for token in tokens { let key = token::balance_prefix(&token); let balances = - query_storage_prefix::(&client, &key).await; + query_storage_prefix::(client, &key) + .await; if let Some(balances) = balances { - print_balances(ctx, balances, &token, None); + print_balances(wallet, balances, &token, None); } } } @@ -543,45 +341,42 @@ pub async fn query_transparent_balance( } /// Query the token pinned balance(s) -pub async fn query_pinned_balance(ctx: &mut Context, args: args::QueryBalance) { +pub async fn query_pinned_balance< + C: namada::ledger::queries::Client + Sync, + U: ShieldedUtils, +>( + client: &C, + wallet: &mut Wallet, + shielded: &mut ShieldedContext, + args: args::QueryBalance, +) { // Map addresses to token names - let tokens = address::tokens(); - let owners = if let Some(pa) = args - .owner - .and_then(|x| ctx.get_cached(&x).payment_address()) + let tokens = wallet.get_addresses_with_vp_type(AddressVpType::Token); + let owners = if let Some(pa) = args.owner.and_then(|x| x.payment_address()) { vec![pa] } else { - ctx.wallet + wallet .get_payment_addrs() .into_values() .filter(PaymentAddress::is_pinned) .collect() }; // Get the viewing keys with which to try note decryptions - let viewing_keys: Vec = ctx - .wallet + let viewing_keys: Vec = wallet .get_viewing_keys() .values() .map(|fvk| ExtendedFullViewingKey::from(*fvk).fvk.vk) .collect(); - // Build up the context that will be queried for asset decodings - let _ = ctx.shielded.load(); - // Establish connection with which to do exchange rate queries - let client = HttpClient::new(args.query.ledger_address.clone()).unwrap(); + let _ = shielded.load(); // Print the token balances by payment address for owner in owners { let mut balance = Err(PinnedBalanceError::InvalidViewingKey); // Find the viewing key that can recognize payments the current payment // address for vk in &viewing_keys { - balance = ctx - .shielded - .compute_exchanged_pinned_balance( - &args.query.ledger_address, - owner, - vk, - ) + balance = shielded + .compute_exchanged_pinned_balance(client, owner, vk) .await; if balance != Err(PinnedBalanceError::InvalidViewingKey) { break; @@ -602,13 +397,8 @@ pub async fn query_pinned_balance(ctx: &mut Context, args: args::QueryBalance) { }; let vk = ExtendedFullViewingKey::from(fvk).fvk.vk; // Use the given viewing key to decrypt pinned transaction data - balance = ctx - .shielded - .compute_exchanged_pinned_balance( - &args.query.ledger_address, - owner, - &vk, - ) + balance = shielded + .compute_exchanged_pinned_balance(client, owner, &vk) .await } // Now print out the received quantities according to CLI arguments @@ -621,36 +411,30 @@ pub async fn query_pinned_balance(ctx: &mut Context, args: args::QueryBalance) { println!("Payment address {} has not yet been consumed.", owner) } (Ok((balance, epoch)), Some(token)) => { - let token = ctx.get(token); // Extract and print only the specified token from the total let (_asset_type, balance) = value_by_address(&balance, token.clone(), epoch); - let currency_code = tokens - .get(&token) - .map(|c| Cow::Borrowed(*c)) - .unwrap_or_else(|| Cow::Owned(token.to_string())); + let token_alias = lookup_alias(wallet, token); if balance == 0 { println!( "Payment address {} was consumed during epoch {}. \ Received no shielded {}", - owner, epoch, currency_code + owner, epoch, token_alias ); } else { let asset_value = token::Amount::from(balance as u64); println!( "Payment address {} was consumed during epoch {}. \ Received {} {}", - owner, epoch, asset_value, currency_code + owner, epoch, asset_value, token_alias ); } } (Ok((balance, epoch)), None) => { let mut found_any = false; // Print balances by human-readable token names - let balance = ctx - .shielded - .decode_amount(client.clone(), balance, epoch) - .await; + let balance = + shielded.decode_amount(client, balance, epoch).await; for (addr, value) in balance.components() { let asset_value = token::Amount::from(*value as u64); if !found_any { @@ -661,10 +445,12 @@ pub async fn query_pinned_balance(ctx: &mut Context, args: args::QueryBalance) { ); found_any = true; } - let addr_enc = addr.encode(); println!( " {}: {}", - tokens.get(addr).cloned().unwrap_or(addr_enc.as_str()), + tokens + .get(addr) + .cloned() + .unwrap_or_else(|| addr.clone()), asset_value, ); } @@ -681,7 +467,7 @@ pub async fn query_pinned_balance(ctx: &mut Context, args: args::QueryBalance) { } fn print_balances( - ctx: &Context, + wallet: &Wallet, balances: impl Iterator, token: &Address, target: Option<&Address>, @@ -689,13 +475,8 @@ fn print_balances( let stdout = io::stdout(); let mut w = stdout.lock(); - // Token - let tokens = address::tokens(); - let currency_code = tokens - .get(token) - .map(|c| Cow::Borrowed(*c)) - .unwrap_or_else(|| Cow::Owned(token.to_string())); - writeln!(w, "Token {}", currency_code).unwrap(); + let token_alias = lookup_alias(wallet, token); + writeln!(w, "Token {}", token_alias).unwrap(); let print_num = balances .filter_map( @@ -706,7 +487,7 @@ fn print_balances( "with {}: {}, owned by {}", sub_prefix, balance, - lookup_alias(ctx, owner) + lookup_alias(wallet, owner) ), )), None => token::is_any_token_balance_key(&key).map(|owner| { @@ -715,7 +496,7 @@ fn print_balances( format!( ": {}, owned by {}", balance, - lookup_alias(ctx, owner) + lookup_alias(wallet, owner) ), ) }), @@ -734,20 +515,23 @@ fn print_balances( if print_num == 0 { match target { Some(t) => { - writeln!(w, "No balances owned by {}", lookup_alias(ctx, t)) + writeln!(w, "No balances owned by {}", lookup_alias(wallet, t)) .unwrap() } None => { - writeln!(w, "No balances for token {}", currency_code).unwrap() + writeln!(w, "No balances for token {}", token_alias).unwrap() } } } } /// Query Proposals -pub async fn query_proposal(_ctx: Context, args: args::QueryProposal) { - async fn print_proposal( - client: &HttpClient, +pub async fn query_proposal( + client: &C, + args: args::QueryProposal, +) { + async fn print_proposal( + client: &C, id: u64, current_epoch: Epoch, details: bool, @@ -755,26 +539,32 @@ pub async fn query_proposal(_ctx: Context, args: args::QueryProposal) { let author_key = gov_storage::get_author_key(id); let start_epoch_key = gov_storage::get_voting_start_epoch_key(id); let end_epoch_key = gov_storage::get_voting_end_epoch_key(id); + let proposal_type_key = gov_storage::get_proposal_type_key(id); let author = - query_storage_value::
(client, &author_key).await?; + query_storage_value::(client, &author_key).await?; let start_epoch = - query_storage_value::(client, &start_epoch_key).await?; + query_storage_value::(client, &start_epoch_key).await?; let end_epoch = - query_storage_value::(client, &end_epoch_key).await?; + query_storage_value::(client, &end_epoch_key).await?; + let proposal_type = + query_storage_value::(client, &proposal_type_key) + .await?; if details { let content_key = gov_storage::get_content_key(id); let grace_epoch_key = gov_storage::get_grace_epoch_key(id); - let content = query_storage_value::>( + let content = query_storage_value::>( client, &content_key, ) .await?; let grace_epoch = - query_storage_value::(client, &grace_epoch_key).await?; + query_storage_value::(client, &grace_epoch_key) + .await?; println!("Proposal: {}", id); + println!("{:4}Type: {}", "", proposal_type); println!("{:4}Author: {}", "", author); println!("{:4}Content:", ""); for (key, value) in &content { @@ -783,31 +573,43 @@ pub async fn query_proposal(_ctx: Context, args: args::QueryProposal) { println!("{:4}Start Epoch: {}", "", start_epoch); println!("{:4}End Epoch: {}", "", end_epoch); println!("{:4}Grace Epoch: {}", "", grace_epoch); + let votes = get_proposal_votes(client, start_epoch, id).await; + let total_stake = + get_total_staked_tokens(client, start_epoch).await.into(); if start_epoch > current_epoch { println!("{:4}Status: pending", ""); } else if start_epoch <= current_epoch && current_epoch <= end_epoch { - let votes = get_proposal_votes(client, start_epoch, id).await; - let partial_proposal_result = - compute_tally(client, start_epoch, votes).await; - println!( - "{:4}Yay votes: {}", - "", partial_proposal_result.total_yay_power - ); - println!( - "{:4}Nay votes: {}", - "", partial_proposal_result.total_nay_power - ); - println!("{:4}Status: on-going", ""); + match utils::compute_tally(votes, total_stake, &proposal_type) { + Ok(partial_proposal_result) => { + println!( + "{:4}Yay votes: {}", + "", partial_proposal_result.total_yay_power + ); + println!( + "{:4}Nay votes: {}", + "", partial_proposal_result.total_nay_power + ); + println!("{:4}Status: on-going", ""); + } + Err(msg) => { + eprintln!("Error in tally computation: {}", msg) + } + } } else { - let votes = get_proposal_votes(client, start_epoch, id).await; - let proposal_result = - compute_tally(client, start_epoch, votes).await; - println!("{:4}Status: done", ""); - println!("{:4}Result: {}", "", proposal_result); + match utils::compute_tally(votes, total_stake, &proposal_type) { + Ok(proposal_result) => { + println!("{:4}Status: done", ""); + println!("{:4}Result: {}", "", proposal_result); + } + Err(msg) => { + eprintln!("Error in tally computation: {}", msg) + } + } } } else { println!("Proposal: {}", id); + println!("{:4}Type: {}", "", proposal_type); println!("{:4}Author: {}", "", author); println!("{:4}Start Epoch: {}", "", start_epoch); println!("{:4}End Epoch: {}", "", end_epoch); @@ -824,11 +626,10 @@ pub async fn query_proposal(_ctx: Context, args: args::QueryProposal) { Some(()) } - let client = HttpClient::new(args.query.ledger_address.clone()).unwrap(); - let current_epoch = query_and_print_epoch(args.query.clone()).await; + let current_epoch = query_and_print_epoch(client).await; match args.proposal_id { Some(id) => { - if print_proposal(&client, id, current_epoch, true) + if print_proposal::(client, id, current_epoch, true) .await .is_none() { @@ -838,12 +639,12 @@ pub async fn query_proposal(_ctx: Context, args: args::QueryProposal) { None => { let last_proposal_id_key = gov_storage::get_counter_key(); let last_proposal_id = - query_storage_value::(&client, &last_proposal_id_key) + query_storage_value::(client, &last_proposal_id_key) .await .unwrap(); for id in 0..last_proposal_id { - if print_proposal(&client, id, current_epoch, false) + if print_proposal::(client, id, current_epoch, false) .await .is_none() { @@ -872,40 +673,38 @@ pub fn value_by_address( } /// Query token shielded balance(s) -pub async fn query_shielded_balance( - ctx: &mut Context, +pub async fn query_shielded_balance< + C: namada::ledger::queries::Client + Sync, + U: ShieldedUtils, +>( + client: &C, + wallet: &mut Wallet, + shielded: &mut ShieldedContext, args: args::QueryBalance, ) { // Used to control whether balances for all keys or a specific key are // printed - let owner = args - .owner - .and_then(|x| ctx.get_cached(&x).full_viewing_key()); + let owner = args.owner.and_then(|x| x.full_viewing_key()); // Used to control whether conversions are automatically performed let no_conversions = args.no_conversions; // Viewing keys are used to query shielded balances. If a spending key is // provided, then convert to a viewing key first. let viewing_keys = match owner { Some(viewing_key) => vec![viewing_key], - None => ctx.wallet.get_viewing_keys().values().copied().collect(), + None => wallet.get_viewing_keys().values().copied().collect(), }; - // Build up the context that will be queried for balances - let _ = ctx.shielded.load(); + let _ = shielded.load(); let fvks: Vec<_> = viewing_keys .iter() .map(|fvk| ExtendedFullViewingKey::from(*fvk).fvk.vk) .collect(); - ctx.shielded - .fetch(&args.query.ledger_address, &[], &fvks) - .await; + shielded.fetch(client, &[], &fvks).await; // Save the update state so that future fetches can be short-circuited - let _ = ctx.shielded.save(); + let _ = shielded.save(); // The epoch is required to identify timestamped tokens - let epoch = query_and_print_epoch(args.query.clone()).await; - // Establish connection with which to do exchange rate queries - let client = HttpClient::new(args.query.ledger_address.clone()).unwrap(); + let epoch = query_and_print_epoch(client).await; // Map addresses to token names - let tokens = address::tokens(); + let tokens = wallet.get_addresses_with_vp_type(AddressVpType::Token); match (args.token, owner.is_some()) { // Here the user wants to know the balance for a specific token (Some(token), true) => { @@ -913,21 +712,17 @@ pub async fn query_shielded_balance( let viewing_key = ExtendedFullViewingKey::from(viewing_keys[0]).fvk.vk; let balance: Amount = if no_conversions { - ctx.shielded + shielded .compute_shielded_balance(&viewing_key) .expect("context should contain viewing key") } else { - ctx.shielded - .compute_exchanged_balance( - client.clone(), - &viewing_key, - epoch, - ) + shielded + .compute_exchanged_balance(client, &viewing_key, epoch) .await .expect("context should contain viewing key") }; // Compute the unique asset identifier from the token address - let token = ctx.get(&token); + let token = token; let asset_type = AssetType::new( (token.clone(), epoch.0) .try_to_vec() @@ -935,19 +730,16 @@ pub async fn query_shielded_balance( .as_ref(), ) .unwrap(); - let currency_code = tokens - .get(&token) - .map(|c| Cow::Borrowed(*c)) - .unwrap_or_else(|| Cow::Owned(token.to_string())); + let token_alias = lookup_alias(wallet, &token); if balance[&asset_type] == 0 { println!( "No shielded {} balance found for given key", - currency_code + token_alias ); } else { let asset_value = token::Amount::from(balance[&asset_type] as u64); - println!("{}: {}", currency_code, asset_value); + println!("{}: {}", token_alias, asset_value); } } // Here the user wants to know the balance of all tokens across users @@ -958,16 +750,12 @@ pub async fn query_shielded_balance( // Query the multi-asset balance at the given spending key let viewing_key = ExtendedFullViewingKey::from(fvk).fvk.vk; let balance = if no_conversions { - ctx.shielded + shielded .compute_shielded_balance(&viewing_key) .expect("context should contain viewing key") } else { - ctx.shielded - .compute_exchanged_balance( - client.clone(), - &viewing_key, - epoch, - ) + shielded + .compute_exchanged_balance(client, &viewing_key, epoch) .await .expect("context should contain viewing key") }; @@ -984,20 +772,17 @@ pub async fn query_shielded_balance( // Print non-zero balances whose asset types can be decoded for (asset_type, balances) in balances { // Decode the asset type - let decoded = ctx - .shielded - .decode_asset_type(client.clone(), asset_type) - .await; + let decoded = + shielded.decode_asset_type(client, asset_type).await; match decoded { Some((addr, asset_epoch)) if asset_epoch == epoch => { // Only assets with the current timestamp count - let addr_enc = addr.encode(); println!( "Shielded Token {}:", tokens .get(&addr) .cloned() - .unwrap_or(addr_enc.as_str()) + .unwrap_or_else(|| addr.clone()) ); read_tokens.insert(addr); } @@ -1018,12 +803,13 @@ pub async fn query_shielded_balance( } } // Print zero balances for remaining assets - for (token, currency_code) in tokens { + for token in tokens { if !read_tokens.contains(&token) { - println!("Shielded Token {}:", currency_code); + let token_alias = lookup_alias(wallet, &token); + println!("Shielded Token {}:", token_alias); println!( "No shielded {} balance found for any wallet key", - currency_code + token_alias ); } } @@ -1032,7 +818,7 @@ pub async fn query_shielded_balance( // users (Some(token), false) => { // Compute the unique asset identifier from the token address - let token = ctx.get(&token); + let token = token; let asset_type = AssetType::new( (token.clone(), epoch.0) .try_to_vec() @@ -1040,26 +826,19 @@ pub async fn query_shielded_balance( .as_ref(), ) .unwrap(); - let currency_code = tokens - .get(&token) - .map(|c| Cow::Borrowed(*c)) - .unwrap_or_else(|| Cow::Owned(token.to_string())); - println!("Shielded Token {}:", currency_code); + let token_alias = lookup_alias(wallet, &token); + println!("Shielded Token {}:", token_alias); let mut found_any = false; for fvk in viewing_keys { // Query the multi-asset balance at the given spending key let viewing_key = ExtendedFullViewingKey::from(fvk).fvk.vk; let balance = if no_conversions { - ctx.shielded + shielded .compute_shielded_balance(&viewing_key) .expect("context should contain viewing key") } else { - ctx.shielded - .compute_exchanged_balance( - client.clone(), - &viewing_key, - epoch, - ) + shielded + .compute_exchanged_balance(client, &viewing_key, epoch) .await .expect("context should contain viewing key") }; @@ -1073,7 +852,7 @@ pub async fn query_shielded_balance( if !found_any { println!( "No shielded {} balance found for any wallet key", - currency_code + token_alias ); } } @@ -1084,48 +863,35 @@ pub async fn query_shielded_balance( ExtendedFullViewingKey::from(viewing_keys[0]).fvk.vk; let balance; if no_conversions { - balance = ctx - .shielded + balance = shielded .compute_shielded_balance(&viewing_key) .expect("context should contain viewing key"); // Print balances by human-readable token names - let decoded_balance = ctx - .shielded - .decode_all_amounts(client.clone(), balance) - .await; - print_decoded_balance_with_epoch(decoded_balance); + let decoded_balance = + shielded.decode_all_amounts(client, balance).await; + print_decoded_balance_with_epoch(wallet, decoded_balance); } else { - balance = ctx - .shielded - .compute_exchanged_balance( - client.clone(), - &viewing_key, - epoch, - ) + balance = shielded + .compute_exchanged_balance(client, &viewing_key, epoch) .await .expect("context should contain viewing key"); // Print balances by human-readable token names - let decoded_balance = ctx - .shielded - .decode_amount(client.clone(), balance, epoch) - .await; - print_decoded_balance(decoded_balance); + let decoded_balance = + shielded.decode_amount(client, balance, epoch).await; + print_decoded_balance(wallet, decoded_balance); } } } } -pub fn print_decoded_balance(decoded_balance: Amount
) { - let tokens = address::tokens(); +pub fn print_decoded_balance( + wallet: &mut Wallet, + decoded_balance: Amount
, +) { let mut found_any = false; for (addr, value) in decoded_balance.components() { let asset_value = token::Amount::from(*value as u64); - let addr_enc = addr.encode(); - println!( - "{} : {}", - tokens.get(addr).cloned().unwrap_or(addr_enc.as_str()), - asset_value - ); + println!("{} : {}", lookup_alias(wallet, addr), asset_value); found_any = true; } if !found_any { @@ -1134,16 +900,16 @@ pub fn print_decoded_balance(decoded_balance: Amount
) { } pub fn print_decoded_balance_with_epoch( + wallet: &mut Wallet, decoded_balance: Amount<(Address, Epoch)>, ) { - let tokens = address::tokens(); + let tokens = wallet.get_addresses_with_vp_type(AddressVpType::Token); let mut found_any = false; for ((addr, epoch), value) in decoded_balance.components() { let asset_value = token::Amount::from(*value as u64); - let addr_enc = addr.encode(); println!( "{} | {} : {}", - tokens.get(addr).cloned().unwrap_or(addr_enc.as_str()), + tokens.get(addr).cloned().unwrap_or_else(|| addr.clone()), epoch, asset_value ); @@ -1155,37 +921,60 @@ pub fn print_decoded_balance_with_epoch( } /// Query token amount of owner. -pub async fn get_token_balance( - client: &HttpClient, +pub async fn get_token_balance( + client: &C, token: &Address, owner: &Address, ) -> Option { - let balance_key = balance_key(token, owner); - query_storage_value(client, &balance_key).await + namada::ledger::rpc::get_token_balance(client, token, owner).await } -pub async fn query_proposal_result( - _ctx: Context, +pub async fn query_proposal_result< + C: namada::ledger::queries::Client + Sync, +>( + client: &C, args: args::QueryProposalResult, ) { - let client = HttpClient::new(args.query.ledger_address.clone()).unwrap(); - let current_epoch = query_and_print_epoch(args.query.clone()).await; + let current_epoch = query_epoch(client).await; match args.proposal_id { Some(id) => { let end_epoch_key = gov_storage::get_voting_end_epoch_key(id); let end_epoch = - query_storage_value::(&client, &end_epoch_key).await; + query_storage_value::(client, &end_epoch_key).await; match end_epoch { Some(end_epoch) => { if current_epoch > end_epoch { let votes = - get_proposal_votes(&client, end_epoch, id).await; - let proposal_result = - compute_tally(&client, end_epoch, votes).await; + get_proposal_votes(client, end_epoch, id).await; + let proposal_type_key = + gov_storage::get_proposal_type_key(id); + let proposal_type = query_storage_value::< + C, + ProposalType, + >( + client, &proposal_type_key + ) + .await + .expect("Could not read proposal type from storage"); + let total_stake = + get_total_staked_tokens(client, end_epoch) + .await + .into(); println!("Proposal: {}", id); - println!("{:4}Result: {}", "", proposal_result); + match utils::compute_tally( + votes, + total_stake, + &proposal_type, + ) { + Ok(proposal_result) => { + println!("{:4}Result: {}", "", proposal_result) + } + Err(msg) => { + eprintln!("Error in tally computation: {}", msg) + } + } } else { eprintln!("Proposal is still in progress."); cli::safe_exit(1) @@ -1257,12 +1046,10 @@ pub async fn query_proposal_result( "JSON was not well-formatted for proposal.", ); - let public_key = get_public_key( - &proposal.address, - args.query.ledger_address.clone(), - ) - .await - .expect("Public key should exist."); + let public_key = + get_public_key(client, &proposal.address) + .await + .expect("Public key should exist."); if !proposal.check_signature(&public_key) { eprintln!("Bad proposal signature."); @@ -1270,16 +1057,29 @@ pub async fn query_proposal_result( } let votes = get_proposal_offline_votes( - &client, + client, proposal.clone(), files, ) .await; - let proposal_result = - compute_tally(&client, proposal.tally_epoch, votes) - .await; - - println!("{:4}Result: {}", "", proposal_result); + let total_stake = get_total_staked_tokens( + client, + proposal.tally_epoch, + ) + .await + .into(); + match utils::compute_tally( + votes, + total_stake, + &ProposalType::Default(None), + ) { + Ok(proposal_result) => { + println!("{:4}Result: {}", "", proposal_result) + } + Err(msg) => { + eprintln!("Error in tally computation: {}", msg) + } + } } None => { eprintln!( @@ -1299,18 +1099,18 @@ pub async fn query_proposal_result( } } -pub async fn query_protocol_parameters( - _ctx: Context, - args: args::QueryProtocolParameters, +pub async fn query_protocol_parameters< + C: namada::ledger::queries::Client + Sync, +>( + client: &C, + _args: args::QueryProtocolParameters, ) { - let client = HttpClient::new(args.query.ledger_address).unwrap(); - - let gov_parameters = get_governance_parameters(&client).await; + let gov_parameters = get_governance_parameters(client).await; println!("Governance Parameters\n {:4}", gov_parameters); println!("Protocol parameters"); let key = param_storage::get_epoch_duration_storage_key(); - let epoch_duration = query_storage_value::(&client, &key) + let epoch_duration = query_storage_value::(client, &key) .await .expect("Parameter should be definied."); println!( @@ -1323,26 +1123,26 @@ pub async fn query_protocol_parameters( ); let key = param_storage::get_max_expected_time_per_block_key(); - let max_block_duration = query_storage_value::(&client, &key) + let max_block_duration = query_storage_value::(client, &key) .await .expect("Parameter should be defined."); println!("{:4}Max. block duration: {}", "", max_block_duration); let key = param_storage::get_tx_whitelist_storage_key(); - let vp_whitelist = query_storage_value::>(&client, &key) + let vp_whitelist = query_storage_value::>(client, &key) .await .expect("Parameter should be defined."); println!("{:4}VP whitelist: {:?}", "", vp_whitelist); let key = param_storage::get_tx_whitelist_storage_key(); - let tx_whitelist = query_storage_value::>(&client, &key) + let tx_whitelist = query_storage_value::>(client, &key) .await .expect("Parameter should be defined."); println!("{:4}Transactions whitelist: {:?}", "", tx_whitelist); println!("PoS parameters"); let key = pos::params_key(); - let pos_params = query_storage_value::(&client, &key) + let pos_params = query_storage_value::(client, &key) .await .expect("Parameter should be defined."); println!( @@ -1370,23 +1170,25 @@ pub async fn query_protocol_parameters( println!("{:4}Votes per token: {}", "", pos_params.tm_votes_per_token); } -pub async fn query_bond( - client: &HttpClient, +pub async fn query_bond( + client: &C, source: &Address, validator: &Address, epoch: Option, ) -> token::Amount { - unwrap_client_response( + unwrap_client_response::( RPC.vp().pos().bond(client, source, validator, &epoch).await, ) } -pub async fn query_unbond_with_slashing( - client: &HttpClient, +pub async fn query_unbond_with_slashing< + C: namada::ledger::queries::Client + Sync, +>( + client: &C, source: &Address, validator: &Address, ) -> HashMap<(Epoch, Epoch), token::Amount> { - unwrap_client_response( + unwrap_client_response::>( RPC.vp() .pos() .unbond_with_slashing(client, source, validator) @@ -1394,27 +1196,34 @@ pub async fn query_unbond_with_slashing( ) } -pub async fn query_and_print_unbonds( - client: &HttpClient, +pub async fn query_and_print_unbonds< + C: namada::ledger::queries::Client + Sync, +>( + client: &C, source: &Address, validator: &Address, ) { let unbonds = query_unbond_with_slashing(client, source, validator).await; let current_epoch = query_epoch(client).await; - let (withdrawable, not_yet_withdrawable): (HashMap<_, _>, HashMap<_, _>) = - unbonds.into_iter().partition(|((_, withdraw_epoch), _)| { - withdraw_epoch <= ¤t_epoch - }); - let total_withdrawable = withdrawable - .into_iter() - .fold(token::Amount::default(), |acc, (_, amount)| acc + amount); + + let mut total_withdrawable = token::Amount::default(); + let mut not_yet_withdrawable = HashMap::::new(); + for ((_start_epoch, withdraw_epoch), amount) in unbonds.into_iter() { + if withdraw_epoch <= current_epoch { + total_withdrawable += amount; + } else { + let withdrawable_amount = + not_yet_withdrawable.entry(withdraw_epoch).or_default(); + *withdrawable_amount += amount; + } + } if total_withdrawable != token::Amount::default() { println!("Total withdrawable now: {total_withdrawable}."); } if !not_yet_withdrawable.is_empty() { println!("Current epoch: {current_epoch}.") } - for ((_start_epoch, withdraw_epoch), amount) in not_yet_withdrawable { + for (withdraw_epoch, amount) in not_yet_withdrawable { println!( "Amount {amount} withdrawable starting from epoch \ {withdraw_epoch}." @@ -1422,13 +1231,15 @@ pub async fn query_and_print_unbonds( } } -pub async fn query_withdrawable_tokens( - client: &HttpClient, +pub async fn query_withdrawable_tokens< + C: namada::ledger::queries::Client + Sync, +>( + client: &C, bond_source: &Address, validator: &Address, epoch: Option, ) -> token::Amount { - unwrap_client_response( + unwrap_client_response::( RPC.vp() .pos() .withdrawable_tokens(client, bond_source, validator, &epoch) @@ -1437,21 +1248,24 @@ pub async fn query_withdrawable_tokens( } /// Query PoS bond(s) and unbond(s) -pub async fn query_bonds(ctx: Context, args: args::QueryBonds) { - let _epoch = query_and_print_epoch(args.query.clone()).await; - let client = HttpClient::new(args.query.ledger_address).unwrap(); +pub async fn query_bonds( + client: &C, + _wallet: &mut Wallet, + args: args::QueryBonds, +) -> std::io::Result<()> { + let _epoch = query_and_print_epoch(client).await; - let source = args.owner.map(|owner| ctx.get(&owner)); - let validator = args.validator.map(|val| ctx.get(&val)); + let source = args.owner; + let validator = args.validator; let stdout = io::stdout(); let mut w = stdout.lock(); let bonds_and_unbonds: pos::types::BondsAndUnbondsDetails = - unwrap_client_response( + unwrap_client_response::( RPC.vp() .pos() - .bonds_and_unbonds(&client, &source, &validator) + .bonds_and_unbonds(client, &source, &validator) .await, ); let mut bonds_total: token::Amount = 0.into(); @@ -1470,14 +1284,13 @@ pub async fn query_bonds(ctx: Context, args: args::QueryBonds) { bond_id.source, bond_id.validator ) }; - writeln!(w, "{}:", bond_type).unwrap(); + writeln!(w, "{}:", bond_type)?; for bond in details.bonds { writeln!( w, " Remaining active bond from epoch {}: Δ {}", bond.start, bond.amount - ) - .unwrap(); + )?; total += bond.amount; total_slashed += bond.slashed_amount.unwrap_or_default(); } @@ -1486,10 +1299,10 @@ pub async fn query_bonds(ctx: Context, args: args::QueryBonds) { w, "Active (slashed) bonds total: {}", total - total_slashed - ) - .unwrap(); + )?; } - writeln!(w, "Bonds total: {}", total).unwrap(); + writeln!(w, "Bonds total: {}", total)?; + writeln!(w)?; bonds_total += total; bonds_total_slashed += total_slashed; @@ -1502,7 +1315,7 @@ pub async fn query_bonds(ctx: Context, args: args::QueryBonds) { } else { format!("Unbonded delegations from {}", bond_id.source) }; - writeln!(w, "{}:", bond_type).unwrap(); + writeln!(w, "{}:", bond_type)?; for unbond in details.unbonds { total += unbond.amount; total_slashed += unbond.slashed_amount.unwrap_or_default(); @@ -1510,50 +1323,54 @@ pub async fn query_bonds(ctx: Context, args: args::QueryBonds) { w, " Withdrawable from epoch {} (active from {}): Δ {}", unbond.withdraw, unbond.start, unbond.amount - ) - .unwrap(); + )?; } withdrawable = total - total_slashed; - writeln!(w, "Unbonded total: {}", total).unwrap(); + writeln!(w, "Unbonded total: {}", total)?; unbonds_total += total; unbonds_total_slashed += total_slashed; total_withdrawable += withdrawable; } - writeln!(w, "Withdrawable total: {}", withdrawable).unwrap(); - println!(); + writeln!(w, "Withdrawable total: {}", withdrawable)?; + writeln!(w)?; } if bonds_total != bonds_total_slashed { - println!( + writeln!( + w, "All bonds total active: {}", bonds_total - bonds_total_slashed - ); + )?; } - println!("All bonds total: {}", bonds_total); + writeln!(w, "All bonds total: {}", bonds_total)?; if unbonds_total != unbonds_total_slashed { - println!( + writeln!( + w, "All unbonds total active: {}", unbonds_total - unbonds_total_slashed - ); + )?; } - println!("All unbonds total: {}", unbonds_total); - println!("All unbonds total withdrawable: {}", total_withdrawable); + writeln!(w, "All unbonds total: {}", unbonds_total)?; + writeln!(w, "All unbonds total withdrawable: {}", total_withdrawable)?; + Ok(()) } /// Query PoS bonded stake -pub async fn query_bonded_stake(ctx: Context, args: args::QueryBondedStake) { +pub async fn query_bonded_stake( + client: &C, + args: args::QueryBondedStake, +) { let epoch = match args.epoch { Some(epoch) => epoch, - None => query_and_print_epoch(args.query.clone()).await, + None => query_and_print_epoch(client).await, }; - let client = HttpClient::new(args.query.ledger_address).unwrap(); match args.validator { Some(validator) => { - let validator = ctx.get(&validator); + let validator = validator; // Find bonded stake for the given validator - let stake = get_validator_stake(&client, epoch, &validator).await; + let stake = get_validator_stake(client, epoch, &validator).await; match stake { Some(stake) => { // TODO: show if it's in consensus set, below capacity, or @@ -1566,18 +1383,20 @@ pub async fn query_bonded_stake(ctx: Context, args: args::QueryBondedStake) { } } None => { - let consensus = unwrap_client_response( - RPC.vp() - .pos() - .consensus_validator_set(&client, &Some(epoch)) - .await, - ); - let below_capacity = unwrap_client_response( - RPC.vp() - .pos() - .below_capacity_validator_set(&client, &Some(epoch)) - .await, - ); + let consensus = + unwrap_client_response::>( + RPC.vp() + .pos() + .consensus_validator_set(client, &Some(epoch)) + .await, + ); + let below_capacity = + unwrap_client_response::>( + RPC.vp() + .pos() + .below_capacity_validator_set(client, &Some(epoch)) + .await, + ); // Iterate all validators let stdout = io::stdout(); @@ -1603,18 +1422,20 @@ pub async fn query_bonded_stake(ctx: Context, args: args::QueryBondedStake) { } } - let total_staked_tokens = get_total_staked_tokens(&client, epoch).await; + let total_staked_tokens = get_total_staked_tokens(client, epoch).await; println!("Total bonded stake: {total_staked_tokens}"); } /// Query and return validator's commission rate and max commission rate change /// per epoch -pub async fn query_commission_rate( - client: &HttpClient, +pub async fn query_commission_rate< + C: namada::ledger::queries::Client + Sync, +>( + client: &C, validator: &Address, epoch: Option, ) -> Option { - unwrap_client_response( + unwrap_client_response::>( RPC.vp() .pos() .validator_commission(client, validator, &epoch) @@ -1623,15 +1444,17 @@ pub async fn query_commission_rate( } /// Query PoS validator's commission rate information -pub async fn query_and_print_commission_rate( - ctx: Context, +pub async fn query_and_print_commission_rate< + C: namada::ledger::queries::Client + Sync, +>( + client: &C, + _wallet: &mut Wallet, args: args::QueryCommissionRate, ) { - let client = HttpClient::new(args.query.ledger_address.clone()).unwrap(); - let validator = ctx.get(&args.validator); + let validator = args.validator; let info: Option = - query_commission_rate(&client, &validator, args.epoch).await; + query_commission_rate(client, &validator, args.epoch).await; match info { Some(CommissionPair { commission_rate: rate, @@ -1655,19 +1478,22 @@ pub async fn query_and_print_commission_rate( } /// Query PoS slashes -pub async fn query_slashes(ctx: Context, args: args::QuerySlashes) { - let client = HttpClient::new(args.query.ledger_address).unwrap(); +pub async fn query_slashes( + client: &C, + _wallet: &mut Wallet, + args: args::QuerySlashes, +) { let params_key = pos::params_key(); - let params = query_storage_value::(&client, ¶ms_key) + let params = query_storage_value::(client, ¶ms_key) .await .expect("Parameter should be defined."); match args.validator { Some(validator) => { - let validator = ctx.get(&validator); + let validator = validator; // Find slashes for the given validator - let slashes: Vec = unwrap_client_response( - RPC.vp().pos().validator_slashes(&client, &validator).await, + let slashes: Vec = unwrap_client_response::>( + RPC.vp().pos().validator_slashes(client, &validator).await, ); if !slashes.is_empty() { let stdout = io::stdout(); @@ -1688,7 +1514,9 @@ pub async fn query_slashes(ctx: Context, args: args::QuerySlashes) { } None => { let all_slashes: HashMap> = - unwrap_client_response(RPC.vp().pos().slashes(&client).await); + unwrap_client_response::>>( + RPC.vp().pos().slashes(client).await, + ); if !all_slashes.is_empty() { let stdout = io::stdout(); @@ -1715,11 +1543,14 @@ pub async fn query_slashes(ctx: Context, args: args::QuerySlashes) { } } -pub async fn query_delegations(ctx: Context, args: args::QueryDelegations) { - let client = HttpClient::new(args.query.ledger_address).unwrap(); - let owner = ctx.get(&args.owner); - let delegations = unwrap_client_response( - RPC.vp().pos().delegation_validators(&client, &owner).await, +pub async fn query_delegations( + client: &C, + _wallet: &mut Wallet, + args: args::QueryDelegations, +) { + let owner = args.owner; + let delegations = unwrap_client_response::>( + RPC.vp().pos().delegation_validators(client, &owner).await, ); if delegations.is_empty() { println!("No delegations found"); @@ -1732,116 +1563,75 @@ pub async fn query_delegations(ctx: Context, args: args::QueryDelegations) { } /// Dry run a transaction -pub async fn dry_run_tx(ledger_address: &TendermintAddress, tx_bytes: Vec) { - let client = HttpClient::new(ledger_address.clone()).unwrap(); - let (data, height, prove) = (Some(tx_bytes), None, false); - let result = unwrap_client_response( - RPC.shell().dry_run_tx(&client, data, height, prove).await, - ) - .data; - println!("Dry-run result: {}", result); +pub async fn dry_run_tx( + client: &C, + tx_bytes: Vec, +) { + println!( + "Dry-run result: {}", + namada::ledger::rpc::dry_run_tx(client, tx_bytes).await + ); } /// Get account's public key stored in its storage sub-space -pub async fn get_public_key( +pub async fn get_public_key( + client: &C, address: &Address, - ledger_address: TendermintAddress, ) -> Option { - let client = HttpClient::new(ledger_address).unwrap(); - let key = pk_key(address); - query_storage_value(&client, &key).await + namada::ledger::rpc::get_public_key(client, address).await } /// Check if the given address is a known validator. -pub async fn is_validator(client: &HttpClient, address: &Address) -> bool { - unwrap_client_response(RPC.vp().pos().is_validator(client, address).await) +pub async fn is_validator( + client: &C, + address: &Address, +) -> bool { + namada::ledger::rpc::is_validator(client, address).await } /// Check if a given address is a known delegator -pub async fn is_delegator(client: &HttpClient, address: &Address) -> bool { - unwrap_client_response( - RPC.vp().pos().is_delegator(client, address, &None).await, - ) +pub async fn is_delegator( + client: &C, + address: &Address, +) -> bool { + namada::ledger::rpc::is_delegator(client, address).await } -/// Check if a given address is a known delegator at a particular epoch -pub async fn is_delegator_at( - client: &HttpClient, +pub async fn is_delegator_at( + client: &C, address: &Address, epoch: Epoch, ) -> bool { - unwrap_client_response( - RPC.vp() - .pos() - .is_delegator(client, address, &Some(epoch)) - .await, - ) + namada::ledger::rpc::is_delegator_at(client, address, epoch).await } /// Check if the address exists on chain. Established address exists if it has a /// stored validity predicate. Implicit and internal addresses always return /// true. -pub async fn known_address( - address: &Address, - ledger_address: TendermintAddress, -) -> bool { - let client = HttpClient::new(ledger_address).unwrap(); - match address { - Address::Established(_) => { - // Established account exists if it has a VP - let key = storage::Key::validity_predicate(address); - query_has_storage_key(&client, &key).await - } - Address::Implicit(_) | Address::Internal(_) => true, - } -} - -#[cfg(not(feature = "mainnet"))] -/// Check if the given address is a testnet faucet account address. -pub async fn is_faucet_account( +pub async fn known_address( + client: &C, address: &Address, - ledger_address: TendermintAddress, ) -> bool { - let client = HttpClient::new(ledger_address).unwrap(); - unwrap_client_response(RPC.vp().is_faucet(&client, address).await) -} - -#[cfg(not(feature = "mainnet"))] -/// Get faucet account address, if any is setup for the network. -pub async fn get_faucet_address( - ledger_address: TendermintAddress, -) -> Option
{ - let client = HttpClient::new(ledger_address).unwrap(); - unwrap_client_response(RPC.vp().get_faucet_address(&client).await) -} - -#[cfg(not(feature = "mainnet"))] -/// Obtain a PoW challenge for a withdrawal from a testnet faucet account, if -/// any is setup for the network. -pub async fn get_testnet_pow_challenge( - source: Address, - ledger_address: TendermintAddress, -) -> testnet_pow::Challenge { - let client = HttpClient::new(ledger_address).unwrap(); - unwrap_client_response( - RPC.vp().testnet_pow_challenge(&client, source).await, - ) + namada::ledger::rpc::known_address(client, address).await } /// Query for all conversions. -pub async fn query_conversions(ctx: Context, args: args::QueryConversions) { +pub async fn query_conversions( + client: &C, + wallet: &mut Wallet, + args: args::QueryConversions, +) { // The chosen token type of the conversions - let target_token = args.token.as_ref().map(|x| ctx.get(x)); + let target_token = args.token; // To facilitate human readable token addresses - let tokens = address::tokens(); - let client = HttpClient::new(args.query.ledger_address).unwrap(); + let tokens = wallet.get_addresses_with_vp_type(AddressVpType::Token); let masp_addr = masp(); let key_prefix: Key = masp_addr.to_db_key().into(); let state_key = key_prefix .push(&(token::CONVERSION_KEY_PREFIX.to_owned())) .unwrap(); let conv_state = - query_storage_value::(&client, &state_key) + query_storage_value::(client, &state_key) .await .expect("Conversions should be defined"); // Track whether any non-sentinel conversions are found @@ -1859,10 +1649,9 @@ pub async fn query_conversions(ctx: Context, args: args::QueryConversions) { } conversions_found = true; // Print the asset to which the conversion applies - let addr_enc = addr.encode(); print!( "{}[{}]: ", - tokens.get(addr).cloned().unwrap_or(addr_enc.as_str()), + tokens.get(addr).cloned().unwrap_or_else(|| addr.clone()), epoch, ); // Now print out the components of the allowed conversion @@ -1872,12 +1661,11 @@ pub async fn query_conversions(ctx: Context, args: args::QueryConversions) { // printing let (addr, epoch, _, _) = &conv_state.assets[asset_type]; // Now print out this component of the conversion - let addr_enc = addr.encode(); print!( "{}{} {}[{}]", prefix, val, - tokens.get(addr).cloned().unwrap_or(addr_enc.as_str()), + tokens.get(addr).cloned().unwrap_or_else(|| addr.clone()), epoch ); // Future iterations need to be prefixed with + @@ -1892,8 +1680,8 @@ pub async fn query_conversions(ctx: Context, args: args::QueryConversions) { } /// Query a conversion. -pub async fn query_conversion( - client: HttpClient, +pub async fn query_conversion( + client: &C, asset_type: AssetType, ) -> Option<( Address, @@ -1901,264 +1689,98 @@ pub async fn query_conversion( masp_primitives::transaction::components::Amount, MerklePath, )> { - Some(unwrap_client_response( - RPC.shell().read_conversion(&client, &asset_type).await, - )) + namada::ledger::rpc::query_conversion(client, asset_type).await +} + +/// Query a wasm code hash +pub async fn query_wasm_code_hash( + client: &C, + code_path: impl AsRef, +) -> Option { + namada::ledger::rpc::query_wasm_code_hash(client, code_path).await } /// Query a storage value and decode it with [`BorshDeserialize`]. -pub async fn query_storage_value( - client: &HttpClient, +pub async fn query_storage_value( + client: &C, key: &storage::Key, ) -> Option where T: BorshDeserialize, { - // In case `T` is a unit (only thing that encodes to 0 bytes), we have to - // use `storage_has_key` instead of `storage_value`, because `storage_value` - // returns 0 bytes when the key is not found. - let maybe_unit = T::try_from_slice(&[]); - if let Ok(unit) = maybe_unit { - return if unwrap_client_response( - RPC.shell().storage_has_key(client, key).await, - ) { - Some(unit) - } else { - None - }; - } - - let response = unwrap_client_response( - RPC.shell() - .storage_value(client, None, None, false, key) - .await, - ); - if response.data.is_empty() { - return None; - } - T::try_from_slice(&response.data[..]) - .map(Some) - .unwrap_or_else(|err| { - eprintln!("Error decoding the value: {}", err); - cli::safe_exit(1) - }) + namada::ledger::rpc::query_storage_value(client, key).await } /// Query a storage value and the proof without decoding. -pub async fn query_storage_value_bytes( - client: &HttpClient, +pub async fn query_storage_value_bytes< + C: namada::ledger::queries::Client + Sync, +>( + client: &C, key: &storage::Key, height: Option, prove: bool, ) -> (Option>, Option) { - let data = None; - let response = unwrap_client_response( - RPC.shell() - .storage_value(client, data, height, prove, key) - .await, - ); - if response.data.is_empty() { - (None, response.proof) - } else { - (Some(response.data), response.proof) - } + namada::ledger::rpc::query_storage_value_bytes(client, key, height, prove) + .await } /// Query a range of storage values with a matching prefix and decode them with /// [`BorshDeserialize`]. Returns an iterator of the storage keys paired with /// their associated values. -pub async fn query_storage_prefix( - client: &HttpClient, +pub async fn query_storage_prefix< + C: namada::ledger::queries::Client + Sync, + T, +>( + client: &C, key: &storage::Key, ) -> Option> where T: BorshDeserialize, { - let values = unwrap_client_response( - RPC.shell() - .storage_prefix(client, None, None, false, key) - .await, - ); - let decode = - |PrefixValue { key, value }: PrefixValue| match T::try_from_slice( - &value[..], - ) { - Err(err) => { - eprintln!( - "Skipping a value for key {}. Error in decoding: {}", - key, err - ); - None - } - Ok(value) => Some((key, value)), - }; - if values.data.is_empty() { - None - } else { - Some(values.data.into_iter().filter_map(decode)) - } + namada::ledger::rpc::query_storage_prefix(client, key).await } /// Query to check if the given storage key exists. -pub async fn query_has_storage_key( - client: &HttpClient, +pub async fn query_has_storage_key< + C: namada::ledger::queries::Client + Sync, +>( + client: &C, key: &storage::Key, ) -> bool { - unwrap_client_response(RPC.shell().storage_has_key(client, key).await) -} - -/// Represents a query for an event pertaining to the specified transaction -#[derive(Debug, Copy, Clone)] -pub enum TxEventQuery<'a> { - Accepted(&'a str), - Applied(&'a str), -} - -impl<'a> TxEventQuery<'a> { - /// The event type to which this event query pertains - fn event_type(self) -> &'static str { - match self { - TxEventQuery::Accepted(_) => "accepted", - TxEventQuery::Applied(_) => "applied", - } - } - - /// The transaction to which this event query pertains - fn tx_hash(self) -> &'a str { - match self { - TxEventQuery::Accepted(tx_hash) => tx_hash, - TxEventQuery::Applied(tx_hash) => tx_hash, - } - } -} - -/// Transaction event queries are semantically a subset of general queries -impl<'a> From> for Query { - fn from(tx_query: TxEventQuery<'a>) -> Self { - match tx_query { - TxEventQuery::Accepted(tx_hash) => { - Query::default().and_eq("accepted.hash", tx_hash) - } - TxEventQuery::Applied(tx_hash) => { - Query::default().and_eq("applied.hash", tx_hash) - } - } - } + namada::ledger::rpc::query_has_storage_key(client, key).await } /// Call the corresponding `tx_event_query` RPC method, to fetch /// the current status of a transation. -pub async fn query_tx_events( - client: &HttpClient, - tx_event_query: TxEventQuery<'_>, -) -> eyre::Result> { - let tx_hash: Hash = tx_event_query.tx_hash().try_into()?; - match tx_event_query { - TxEventQuery::Accepted(_) => RPC - .shell() - .accepted(client, &tx_hash) - .await - .wrap_err_with(|| { - eyre!("Failed querying whether a transaction was accepted") - }), - TxEventQuery::Applied(_) => RPC - .shell() - .applied(client, &tx_hash) - .await - .wrap_err_with(|| { - eyre!("Error querying whether a transaction was applied") - }), - } +pub async fn query_tx_events( + client: &C, + tx_event_query: namada::ledger::rpc::TxEventQuery<'_>, +) -> std::result::Result< + Option, + ::Error, +> { + namada::ledger::rpc::query_tx_events(client, tx_event_query).await } /// Lookup the full response accompanying the specified transaction event // TODO: maybe remove this in favor of `query_tx_status` -pub async fn query_tx_response( - ledger_address: &TendermintAddress, - tx_query: TxEventQuery<'_>, +pub async fn query_tx_response( + client: &C, + tx_query: namada::ledger::rpc::TxEventQuery<'_>, ) -> Result { - // Connect to the Tendermint server holding the transactions - let (client, driver) = WebSocketClient::new(ledger_address.clone()).await?; - let driver_handle = tokio::spawn(async move { driver.run().await }); - // Find all blocks that apply a transaction with the specified hash - let blocks = &client - .block_search(tx_query.into(), 1, 255, Order::Ascending) - .await - .expect("Unable to query for transaction with given hash") - .blocks; - // Get the block results corresponding to a block to which - // the specified transaction belongs - let block = &blocks - .get(0) - .ok_or_else(|| { - TError::server( - "Unable to find a block applying the given transaction" - .to_string(), - ) - })? - .block; - let response_block_results = client - .block_results(block.header.height) - .await - .expect("Unable to retrieve block containing transaction"); - // Search for the event where the specified transaction is - // applied to the blockchain - let query_event_opt = - response_block_results.end_block_events.and_then(|events| { - events - .iter() - .find(|event| { - event.type_str == tx_query.event_type() - && event.attributes.iter().any(|tag| { - tag.key.as_ref() == "hash" - && tag.value.as_ref() == tx_query.tx_hash() - }) - }) - .cloned() - }); - let query_event = query_event_opt.ok_or_else(|| { - TError::server( - "Unable to find the event corresponding to the specified \ - transaction" - .to_string(), - ) - })?; - // Reformat the event attributes so as to ease value extraction - let event_map: std::collections::HashMap<&str, &str> = query_event - .attributes - .iter() - .map(|tag| (tag.key.as_ref(), tag.value.as_ref())) - .collect(); - // Summarize the transaction results that we were searching for - let result = TxResponse { - info: event_map["info"].to_string(), - log: event_map["log"].to_string(), - height: event_map["height"].to_string(), - hash: event_map["hash"].to_string(), - code: event_map["code"].to_string(), - gas_used: event_map["gas_used"].to_string(), - initialized_accounts: serde_json::from_str( - event_map["initialized_accounts"], - ) - .unwrap_or_default(), - }; - // Signal to the driver to terminate. - client.close()?; - // Await the driver's termination to ensure proper connection closure. - let _ = driver_handle.await.unwrap_or_else(|x| { - eprintln!("{}", x); - cli::safe_exit(1) - }); - Ok(result) + namada::ledger::rpc::query_tx_response(client, tx_query).await } /// Lookup the results of applying the specified transaction to the /// blockchain. -pub async fn query_result(_ctx: Context, args: args::QueryResult) { +pub async fn query_result( + client: &C, + args: args::QueryResult, +) { // First try looking up application event pertaining to given hash. let tx_response = query_tx_response( - &args.query.ledger_address, - TxEventQuery::Applied(&args.tx_hash), + client, + namada::ledger::rpc::TxEventQuery::Applied(&args.tx_hash), ) .await; match tx_response { @@ -2171,8 +1793,8 @@ pub async fn query_result(_ctx: Context, args: args::QueryResult) { Err(err1) => { // If this fails then instead look for an acceptance event. let tx_response = query_tx_response( - &args.query.ledger_address, - TxEventQuery::Accepted(&args.tx_hash), + client, + namada::ledger::rpc::TxEventQuery::Accepted(&args.tx_hash), ) .await; match tx_response { @@ -2190,76 +1812,18 @@ pub async fn query_result(_ctx: Context, args: args::QueryResult) { } } -pub async fn get_proposal_votes( - client: &HttpClient, +pub async fn get_proposal_votes( + client: &C, epoch: Epoch, proposal_id: u64, ) -> Votes { - let validators = get_all_validators(client, epoch).await; - - let vote_prefix_key = - gov_storage::get_proposal_vote_prefix_key(proposal_id); - let vote_iter = - query_storage_prefix::(client, &vote_prefix_key).await; - - let mut yay_validators: HashMap = HashMap::new(); - let mut yay_delegators: HashMap> = - HashMap::new(); - let mut nay_delegators: HashMap> = - HashMap::new(); - - if let Some(vote_iter) = vote_iter { - for (key, vote) in vote_iter { - let voter_address = gov_storage::get_voter_address(&key) - .expect("Vote key should contain the voting address.") - .clone(); - if vote.is_yay() && validators.contains(&voter_address) { - let amount: VotePower = - get_validator_stake(client, epoch, &voter_address) - .await - .unwrap_or_default() - .into(); - yay_validators.insert(voter_address, amount); - } else if !validators.contains(&voter_address) { - let validator_address = - gov_storage::get_vote_delegation_address(&key) - .expect( - "Vote key should contain the delegation address.", - ) - .clone(); - let delegator_token_amount = get_bond_amount_at( - client, - &voter_address, - &validator_address, - epoch, - ) - .await; - if let Some(amount) = delegator_token_amount { - if vote.is_yay() { - let entry = - yay_delegators.entry(voter_address).or_default(); - entry - .insert(validator_address, VotePower::from(amount)); - } else { - let entry = - nay_delegators.entry(voter_address).or_default(); - entry - .insert(validator_address, VotePower::from(amount)); - } - } - } - } - } - - Votes { - yay_validators, - yay_delegators, - nay_delegators, - } + namada::ledger::rpc::get_proposal_votes(client, epoch, proposal_id).await } -pub async fn get_proposal_offline_votes( - client: &HttpClient, +pub async fn get_proposal_offline_votes< + C: namada::ledger::queries::Client + Sync, +>( + client: &C, proposal: OfflineProposal, files: HashSet, ) -> Votes { @@ -2267,11 +1831,12 @@ pub async fn get_proposal_offline_votes( let proposal_hash = proposal.compute_hash(); - let mut yay_validators: HashMap = HashMap::new(); - let mut yay_delegators: HashMap> = - HashMap::new(); - let mut nay_delegators: HashMap> = + let mut yay_validators: HashMap = HashMap::new(); + let mut delegators: HashMap< + Address, + HashMap, + > = HashMap::new(); for path in files { let file = File::open(&path).expect("Proposal file must exist."); @@ -2291,7 +1856,7 @@ pub async fn get_proposal_offline_votes( if proposal_vote.vote.is_yay() // && validators.contains(&proposal_vote.address) - && unwrap_client_response( + && unwrap_client_response::( RPC.vp().pos().is_validator(client, &proposal_vote.address).await, ) { @@ -2303,7 +1868,10 @@ pub async fn get_proposal_offline_votes( .await .unwrap_or_default() .into(); - yay_validators.insert(proposal_vote.address, amount); + yay_validators.insert( + proposal_vote.address, + (amount, ProposalVote::Yay(VoteType::Default)), + ); } else if is_delegator_at( client, &proposal_vote.address, @@ -2314,7 +1882,7 @@ pub async fn get_proposal_offline_votes( // TODO: decide whether to do this with `bond_with_slashing` RPC // endpoint or with `bonds_and_unbonds` let bonds_and_unbonds: pos::types::BondsAndUnbondsDetails = - unwrap_client_response( + unwrap_client_response::( RPC.vp() .pos() .bonds_and_unbonds( @@ -2343,17 +1911,17 @@ pub async fn get_proposal_offline_votes( - delta.slashed_amount.unwrap_or_default(); } } - if proposal_vote.vote.is_yay() { - let entry = yay_delegators - .entry(proposal_vote.address.clone()) - .or_default(); - entry.insert(validator, VotePower::from(delegated_amount)); - } else { - let entry = nay_delegators - .entry(proposal_vote.address.clone()) - .or_default(); - entry.insert(validator, VotePower::from(delegated_amount)); - } + + let entry = delegators + .entry(proposal_vote.address.clone()) + .or_default(); + entry.insert( + validator, + ( + VotePower::from(delegated_amount), + proposal_vote.vote.clone(), + ), + ); } // let key = pos::bonds_for_source_prefix(&proposal_vote.address); @@ -2432,112 +2000,48 @@ pub async fn get_proposal_offline_votes( Votes { yay_validators, - yay_delegators, - nay_delegators, - } -} - -// Compute the result of a proposal -pub async fn compute_tally( - client: &HttpClient, - epoch: Epoch, - votes: Votes, -) -> ProposalResult { - let total_staked_tokens: VotePower = - get_total_staked_tokens(client, epoch).await.into(); - - let Votes { - yay_validators, - yay_delegators, - nay_delegators, - } = votes; - - let mut total_yay_staked_tokens = VotePower::from(0_u64); - for (_, amount) in yay_validators.clone().into_iter() { - total_yay_staked_tokens += amount; - } - - // YAY: Add delegator amount whose validator didn't vote / voted nay - for (_, vote_map) in yay_delegators.iter() { - for (validator_address, vote_power) in vote_map.iter() { - if !yay_validators.contains_key(validator_address) { - total_yay_staked_tokens += vote_power; - } - } - } - - // NAY: Remove delegator amount whose validator validator vote yay - for (_, vote_map) in nay_delegators.iter() { - for (validator_address, vote_power) in vote_map.iter() { - if yay_validators.contains_key(validator_address) { - total_yay_staked_tokens -= vote_power; - } - } - } - - if total_yay_staked_tokens >= (total_staked_tokens / 3) * 2 { - ProposalResult { - result: TallyResult::Passed, - total_voting_power: total_staked_tokens, - total_yay_power: total_yay_staked_tokens, - total_nay_power: 0, - } - } else { - ProposalResult { - result: TallyResult::Rejected, - total_voting_power: total_staked_tokens, - total_yay_power: total_yay_staked_tokens, - total_nay_power: 0, - } + delegators, } } -pub async fn get_bond_amount_at( - client: &HttpClient, +pub async fn get_bond_amount_at( + client: &C, delegator: &Address, validator: &Address, epoch: Epoch, ) -> Option { - let (_total, total_active) = unwrap_client_response( - RPC.vp() - .pos() - .bond_with_slashing(client, delegator, validator, &Some(epoch)) - .await, - ); + let (_total, total_active) = + unwrap_client_response::( + RPC.vp() + .pos() + .bond_with_slashing(client, delegator, validator, &Some(epoch)) + .await, + ); Some(total_active) } -pub async fn get_all_validators( - client: &HttpClient, +pub async fn get_all_validators( + client: &C, epoch: Epoch, ) -> HashSet
{ - unwrap_client_response( - RPC.vp() - .pos() - .validator_addresses(client, &Some(epoch)) - .await, - ) + namada::ledger::rpc::get_all_validators(client, epoch).await } -pub async fn get_total_staked_tokens( - client: &HttpClient, +pub async fn get_total_staked_tokens< + C: namada::ledger::queries::Client + Sync, +>( + client: &C, epoch: Epoch, ) -> token::Amount { - unwrap_client_response( - RPC.vp().pos().total_stake(client, &Some(epoch)).await, - ) + namada::ledger::rpc::get_total_staked_tokens(client, epoch).await } -/// Get the total stake of a validator at the given epoch. The total stake is a -/// sum of validator's self-bonds and delegations to their address. -/// Returns `None` when the given address is not a validator address. For a -/// validator with `0` stake, this returns `Ok(token::Amount::default())`. -async fn get_validator_stake( - client: &HttpClient, +async fn get_validator_stake( + client: &C, epoch: Epoch, validator: &Address, ) -> Option { - unwrap_client_response( + unwrap_client_response::>( RPC.vp() .pos() .validator_stake(client, validator, &Some(epoch)) @@ -2545,70 +2049,38 @@ async fn get_validator_stake( ) } -pub async fn get_delegators_delegation( - client: &HttpClient, +pub async fn get_delegators_delegation< + C: namada::ledger::queries::Client + Sync, +>( + client: &C, address: &Address, ) -> HashSet
{ - unwrap_client_response( - RPC.vp().pos().delegation_validators(client, address).await, - ) + namada::ledger::rpc::get_delegators_delegation(client, address).await } -pub async fn get_governance_parameters(client: &HttpClient) -> GovParams { - use namada::types::token::Amount; - let key = gov_storage::get_max_proposal_code_size_key(); - let max_proposal_code_size = query_storage_value::(client, &key) - .await - .expect("Parameter should be definied."); - - let key = gov_storage::get_max_proposal_content_key(); - let max_proposal_content_size = query_storage_value::(client, &key) - .await - .expect("Parameter should be definied."); - - let key = gov_storage::get_min_proposal_fund_key(); - let min_proposal_fund = query_storage_value::(client, &key) - .await - .expect("Parameter should be definied."); - - let key = gov_storage::get_min_proposal_grace_epoch_key(); - let min_proposal_grace_epochs = query_storage_value::(client, &key) - .await - .expect("Parameter should be definied."); - - let key = gov_storage::get_min_proposal_period_key(); - let min_proposal_period = query_storage_value::(client, &key) - .await - .expect("Parameter should be definied."); - - let key = gov_storage::get_max_proposal_period_key(); - let max_proposal_period = query_storage_value::(client, &key) - .await - .expect("Parameter should be definied."); - - GovParams { - min_proposal_fund: u64::from(min_proposal_fund), - max_proposal_code_size, - min_proposal_period, - max_proposal_period, - max_proposal_content_size, - min_proposal_grace_epochs, - } +pub async fn get_governance_parameters< + C: namada::ledger::queries::Client + Sync, +>( + client: &C, +) -> GovParams { + namada::ledger::rpc::get_governance_parameters(client).await } /// Try to find an alias for a given address from the wallet. If not found, /// formats the address into a string. -fn lookup_alias(ctx: &Context, addr: &Address) -> String { - match ctx.wallet.find_alias(addr) { +fn lookup_alias(wallet: &Wallet, addr: &Address) -> String { + match wallet.find_alias(addr) { Some(alias) => format!("{}", alias), None => format!("{}", addr), } } /// A helper to unwrap client's response. Will shut down process on error. -fn unwrap_client_response(response: Result) -> T { - response.unwrap_or_else(|err| { - eprintln!("Error in the query {}", err); +fn unwrap_client_response( + response: Result, +) -> T { + response.unwrap_or_else(|_err| { + eprintln!("Error in the query"); cli::safe_exit(1) }) } diff --git a/apps/src/lib/client/signing.rs b/apps/src/lib/client/signing.rs index 9b1a00b987d..eec89b4f80b 100644 --- a/apps/src/lib/client/signing.rs +++ b/apps/src/lib/client/signing.rs @@ -1,140 +1,46 @@ //! Helpers for making digital signatures using cryptographic keys from the //! wallet. -use borsh::BorshSerialize; -use namada::ledger::parameters::storage as parameter_storage; +use namada::ledger::rpc::TxBroadcastData; +use namada::ledger::signing::TxSigningKey; +use namada::ledger::tx; +use namada::ledger::wallet::{Wallet, WalletUtils}; use namada::proto::Tx; -use namada::types::address::{Address, ImplicitAddress}; -use namada::types::hash::Hash; +use namada::types::address::Address; use namada::types::key::*; use namada::types::storage::Epoch; -use namada::types::token; -use namada::types::token::Amount; -use namada::types::transaction::{hash_tx, Fee, WrapperTx, MIN_FEE}; -use super::rpc; -use crate::cli::context::{WalletAddress, WalletKeypair}; -use crate::cli::{self, args, Context}; -use crate::client::tendermint_rpc_types::TxBroadcastData; -use crate::facade::tendermint_config::net::Address as TendermintAddress; -use crate::facade::tendermint_rpc::HttpClient; -use crate::wallet::Wallet; +use crate::cli::args; /// Find the public key for the given address and try to load the keypair /// for it from the wallet. Panics if the key cannot be found or loaded. -pub async fn find_keypair( - wallet: &mut Wallet, +pub async fn find_keypair< + C: namada::ledger::queries::Client + Sync, + U: WalletUtils, +>( + client: &C, + wallet: &mut Wallet, addr: &Address, - ledger_address: TendermintAddress, -) -> common::SecretKey { - match addr { - Address::Established(_) => { - println!( - "Looking-up public key of {} from the ledger...", - addr.encode() - ); - let public_key = rpc::get_public_key(addr, ledger_address) - .await - .unwrap_or_else(|| { - eprintln!( - "No public key found for the address {}", - addr.encode() - ); - cli::safe_exit(1); - }); - wallet.find_key_by_pk(&public_key).unwrap_or_else(|err| { - eprintln!( - "Unable to load the keypair from the wallet for public \ - key {}. Failed with: {}", - public_key, err - ); - cli::safe_exit(1) - }) - } - Address::Implicit(ImplicitAddress(pkh)) => { - wallet.find_key_by_pkh(pkh).unwrap_or_else(|err| { - eprintln!( - "Unable to load the keypair from the wallet for the \ - implicit address {}. Failed with: {}", - addr.encode(), - err - ); - cli::safe_exit(1) - }) - } - Address::Internal(_) => { - eprintln!( - "Internal address {} doesn't have any signing keys.", - addr - ); - cli::safe_exit(1) - } - } -} - -/// Carries types that can be directly/indirectly used to sign a transaction. -#[allow(clippy::large_enum_variant)] -#[derive(Clone)] -pub enum TxSigningKey { - // Do not sign any transaction - None, - // Obtain the actual keypair from wallet and use that to sign - WalletKeypair(WalletKeypair), - // Obtain the keypair corresponding to given address from wallet and sign - WalletAddress(WalletAddress), - // Directly use the given secret key to sign transactions - SecretKey(common::SecretKey), +) -> Result { + namada::ledger::signing::find_keypair::(client, wallet, addr, None) + .await } /// Given CLI arguments and some defaults, determine the rightful transaction /// signer. Return the given signing key or public key of the given signer if /// possible. If no explicit signer given, use the `default`. If no `default` /// is given, panics. -pub async fn tx_signer( - ctx: &mut Context, +pub async fn tx_signer< + C: namada::ledger::queries::Client + Sync, + U: WalletUtils, +>( + client: &C, + wallet: &mut Wallet, args: &args::Tx, - mut default: TxSigningKey, -) -> common::SecretKey { - // Override the default signing key source if possible - if let Some(signing_key) = &args.signing_key { - default = TxSigningKey::WalletKeypair(signing_key.clone()); - } else if let Some(signer) = &args.signer { - default = TxSigningKey::WalletAddress(signer.clone()); - } - // Now actually fetch the signing key and apply it - match default { - TxSigningKey::WalletKeypair(signing_key) => { - ctx.get_cached(&signing_key) - } - TxSigningKey::WalletAddress(signer) => { - let signer = ctx.get(&signer); - let signing_key = find_keypair( - &mut ctx.wallet, - &signer, - args.ledger_address.clone(), - ) - .await; - // Check if the signer is implicit account that needs to reveal its - // PK first - if matches!(signer, Address::Implicit(_)) { - let pk: common::PublicKey = signing_key.ref_to(); - super::tx::reveal_pk_if_needed(ctx, &pk, args).await; - } - signing_key - } - TxSigningKey::SecretKey(signing_key) => { - // Check if the signing key needs to reveal its PK first - let pk: common::PublicKey = signing_key.ref_to(); - super::tx::reveal_pk_if_needed(ctx, &pk, args).await; - signing_key - } - TxSigningKey::None => { - panic!( - "All transactions must be signed; please either specify the \ - key or the address from which to look up the signing key." - ); - } - } + default: TxSigningKey, +) -> Result { + namada::ledger::signing::tx_signer::(client, wallet, args, default) + .await } /// Sign a transaction with a given signing key or public key of a given signer. @@ -145,174 +51,48 @@ pub async fn tx_signer( /// hashes needed for monitoring the tx on chain. /// /// If it is a dry run, it is not put in a wrapper, but returned as is. -pub async fn sign_tx( - mut ctx: Context, +pub async fn sign_tx< + C: namada::ledger::queries::Client + Sync, + U: WalletUtils, +>( + client: &C, + wallet: &mut Wallet, tx: Tx, args: &args::Tx, default: TxSigningKey, #[cfg(not(feature = "mainnet"))] requires_pow: bool, -) -> (Context, TxBroadcastData) { - if args.dump_tx { - dump_tx_helper(&ctx, &tx, "unsigned", None); - } - - let keypair = tx_signer(&mut ctx, args, default).await; - let tx = tx.sign(&keypair); - if args.dump_tx { - dump_tx_helper(&ctx, &tx, "signed", None); - } - - let epoch = rpc::query_and_print_epoch(args::Query { - ledger_address: args.ledger_address.clone(), - }) - .await; - let broadcast_data = if args.dry_run { - TxBroadcastData::DryRun(tx) - } else { - sign_wrapper( - &ctx, - args, - epoch, - tx, - &keypair, - #[cfg(not(feature = "mainnet"))] - requires_pow, - ) - .await - }; - - if args.dump_tx && !args.dry_run { - let (wrapper_tx, wrapper_hash) = match broadcast_data { - TxBroadcastData::DryRun(_) => panic!( - "somehow created a dry run transaction without --dry-run" - ), - TxBroadcastData::Wrapper { - ref tx, - ref wrapper_hash, - decrypted_hash: _, - } => (tx, wrapper_hash), - }; - - dump_tx_helper(&ctx, wrapper_tx, "wrapper", Some(wrapper_hash)); - } - - (ctx, broadcast_data) -} - -pub fn dump_tx_helper( - ctx: &Context, - tx: &Tx, - extension: &str, - precomputed_hash: Option<&String>, -) { - let chain_dir = ctx.config.ledger.chain_dir(); - let hash = match precomputed_hash { - Some(hash) => hash.to_owned(), - None => { - let hash: Hash = tx - .hash() - .as_ref() - .try_into() - .expect("expected hash of dumped tx to be a hash"); - format!("{}", hash) - } - }; - let filename = chain_dir.join(hash).with_extension(extension); - let tx_bytes = tx.to_bytes(); - - std::fs::write(filename, tx_bytes) - .expect("expected to be able to write tx dump file"); +) -> Result { + namada::ledger::signing::sign_tx::( + client, + wallet, + tx, + args, + default, + #[cfg(not(feature = "mainnet"))] + requires_pow, + ) + .await } /// Create a wrapper tx from a normal tx. Get the hash of the /// wrapper and its payload which is needed for monitoring its /// progress on chain. -pub async fn sign_wrapper( - ctx: &Context, +pub async fn sign_wrapper( + client: &C, args: &args::Tx, epoch: Epoch, tx: Tx, keypair: &common::SecretKey, #[cfg(not(feature = "mainnet"))] requires_pow: bool, ) -> TxBroadcastData { - let client = HttpClient::new(args.ledger_address.clone()).unwrap(); - - let fee_amount = if cfg!(feature = "mainnet") { - Amount::whole(MIN_FEE) - } else { - let wrapper_tx_fees_key = parameter_storage::get_wrapper_tx_fees_key(); - rpc::query_storage_value::(&client, &wrapper_tx_fees_key) - .await - .unwrap_or_default() - }; - let fee_token = ctx.get(&args.fee_token); - let source = Address::from(&keypair.ref_to()); - let balance_key = token::balance_key(&fee_token, &source); - let balance = - rpc::query_storage_value::(&client, &balance_key) - .await - .unwrap_or_default(); - if balance < fee_amount { - eprintln!( - "The wrapper transaction source doesn't have enough balance to \ - pay fee {fee_amount}, got {balance}." - ); - if !args.force && cfg!(feature = "mainnet") { - cli::safe_exit(1); - } - } - - #[cfg(not(feature = "mainnet"))] - // A PoW solution can be used to allow zero-fee testnet transactions - let pow_solution: Option = { - // If the address derived from the keypair doesn't have enough balance - // to pay for the fee, allow to find a PoW solution instead. - if requires_pow || balance < fee_amount { - println!( - "The transaction requires the completion of a PoW challenge." - ); - // Obtain a PoW challenge for faucet withdrawal - let challenge = rpc::get_testnet_pow_challenge( - source, - args.ledger_address.clone(), - ) - .await; - - // Solve the solution, this blocks until a solution is found - let solution = challenge.solve(); - Some(solution) - } else { - None - } - }; - - let tx = { - WrapperTx::new( - Fee { - amount: fee_amount, - token: fee_token, - }, - keypair, - epoch, - args.gas_limit.clone(), - tx, - // TODO: Actually use the fetched encryption key - Default::default(), - #[cfg(not(feature = "mainnet"))] - pow_solution, - ) - }; - - // We use this to determine when the wrapper tx makes it on-chain - let wrapper_hash = hash_tx(&tx.try_to_vec().unwrap()).to_string(); - // We use this to determine when the decrypted inner tx makes it - // on-chain - let decrypted_hash = tx.tx_hash.to_string(); - TxBroadcastData::Wrapper { - tx: tx - .sign(keypair) - .expect("Wrapper tx signing keypair should be correct"), - wrapper_hash, - decrypted_hash, - } + namada::ledger::signing::sign_wrapper( + client, + args, + epoch, + tx, + keypair, + #[cfg(not(feature = "mainnet"))] + requires_pow, + ) + .await } diff --git a/apps/src/lib/client/tendermint_rpc_types.rs b/apps/src/lib/client/tendermint_rpc_types.rs deleted file mode 100644 index 537cca243fa..00000000000 --- a/apps/src/lib/client/tendermint_rpc_types.rs +++ /dev/null @@ -1,100 +0,0 @@ -use std::convert::TryFrom; - -use namada::ledger::events::Event; -use namada::proto::Tx; -use namada::types::address::Address; -use serde::Serialize; - -use crate::cli::safe_exit; - -/// Data needed for broadcasting a tx and -/// monitoring its progress on chain -/// -/// Txs may be either a dry run or else -/// they should be encrypted and included -/// in a wrapper. -#[derive(Debug, Clone)] -pub enum TxBroadcastData { - DryRun(Tx), - Wrapper { - tx: Tx, - wrapper_hash: String, - decrypted_hash: String, - }, -} - -/// A parsed event from tendermint relating to a transaction -#[derive(Debug, Serialize)] -pub struct TxResponse { - pub info: String, - pub log: String, - pub height: String, - pub hash: String, - pub code: String, - pub gas_used: String, - pub initialized_accounts: Vec
, -} - -impl TryFrom for TxResponse { - type Error = String; - - fn try_from(event: Event) -> Result { - fn missing_field_err(field: &str) -> String { - format!("Field \"{field}\" not present in event") - } - - let hash = event - .get("hash") - .ok_or_else(|| missing_field_err("hash"))? - .clone(); - let info = event - .get("info") - .ok_or_else(|| missing_field_err("info"))? - .clone(); - let log = event - .get("log") - .ok_or_else(|| missing_field_err("log"))? - .clone(); - let height = event - .get("height") - .ok_or_else(|| missing_field_err("height"))? - .clone(); - let code = event - .get("code") - .ok_or_else(|| missing_field_err("code"))? - .clone(); - let gas_used = event - .get("gas_used") - .ok_or_else(|| missing_field_err("gas_used"))? - .clone(); - let initialized_accounts = event - .get("initialized_accounts") - .map(String::as_str) - // TODO: fix finalize block, to return initialized accounts, - // even when we reject a tx? - .map_or(Ok(vec![]), |initialized_accounts| { - serde_json::from_str(initialized_accounts) - .map_err(|err| format!("JSON decode error: {err}")) - })?; - - Ok(TxResponse { - hash, - info, - log, - height, - code, - gas_used, - initialized_accounts, - }) - } -} - -impl TxResponse { - /// Convert an [`Event`] to a [`TxResponse`], or error out. - pub fn from_event(event: Event) -> Self { - event.try_into().unwrap_or_else(|err| { - eprintln!("Error fetching TxResponse: {err}"); - safe_exit(1); - }) - } -} diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index 933c8dbf2f2..0fcd984b144 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -1,221 +1,84 @@ -use std::borrow::Cow; -use std::collections::hash_map::Entry; -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::HashSet; use std::env; use std::fmt::Debug; use std::fs::{File, OpenOptions}; use std::io::{Read, Write}; -use std::ops::Deref; use std::path::PathBuf; +use async_std::io; use async_std::io::prelude::WriteExt; -use async_std::io::{self}; use borsh::{BorshDeserialize, BorshSerialize}; -use itertools::Either::*; -use masp_primitives::asset_type::AssetType; -use masp_primitives::consensus::{BranchId, TestNetwork}; -use masp_primitives::convert::AllowedConversion; -use masp_primitives::ff::PrimeField; -use masp_primitives::group::cofactor::CofactorGroup; -use masp_primitives::keys::FullViewingKey; -use masp_primitives::legacy::TransparentAddress; -use masp_primitives::merkle_tree::{ - CommitmentTree, IncrementalWitness, MerklePath, -}; -use masp_primitives::note_encryption::*; -use masp_primitives::primitives::{Diversifier, Note, ViewingKey}; -use masp_primitives::sapling::Node; -use masp_primitives::transaction::builder::{self, secp256k1, *}; -use masp_primitives::transaction::components::{Amount, OutPoint, TxOut}; -use masp_primitives::transaction::Transaction; -use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; +use data_encoding::HEXLOWER_PERMISSIVE; use masp_proofs::prover::LocalTxProver; -use namada::ibc::applications::ics20_fungible_token_transfer::msgs::transfer::MsgTransfer; -use namada::ibc::signer::Signer; -use namada::ibc::timestamp::Timestamp as IbcTimestamp; -use namada::ibc::tx_msg::Msg; -use namada::ibc::Height as IbcHeight; -use namada::ibc_proto::cosmos::base::v1beta1::Coin; use namada::ledger::governance::storage as gov_storage; -use namada::ledger::masp; -use namada::ledger::pos::{CommissionPair, PosParams}; +use namada::ledger::rpc::{TxBroadcastData, TxResponse}; +use namada::ledger::signing::TxSigningKey; +use namada::ledger::wallet::{Wallet, WalletUtils}; +use namada::ledger::{masp, tx}; use namada::proto::Tx; -use namada::types::address::{masp, masp_tx_key, Address}; +use namada::types::address::Address; use namada::types::governance::{ - OfflineProposal, OfflineVote, Proposal, ProposalVote, + OfflineProposal, OfflineVote, Proposal, ProposalVote, VoteType, }; use namada::types::key::*; -use namada::types::masp::{PaymentAddress, TransferTarget}; -use namada::types::storage::{ - BlockHeight, Epoch, Key, KeySeg, TxIndex, RESERVED_ADDRESS_PREFIX, -}; -use namada::types::time::DateTimeUtc; -use namada::types::token::{ - Transfer, HEAD_TX_KEY, PIN_KEY_PREFIX, TX_KEY_PREFIX, -}; +use namada::types::storage::{Epoch, Key}; +use namada::types::token; use namada::types::transaction::governance::{ - InitProposalData, VoteProposalData, + InitProposalData, ProposalType, VoteProposalData, }; -use namada::types::transaction::{pos, InitAccount, InitValidator, UpdateVp}; -use namada::types::{storage, token}; -use namada::vm; -use rand_core::{CryptoRng, OsRng, RngCore}; +use namada::types::transaction::InitValidator; use rust_decimal::Decimal; -use sha2::Digest; -use tokio::time::{Duration, Instant}; +use tendermint_rpc::HttpClient; use super::rpc; -use super::types::ShieldedTransferContext; use crate::cli::context::WalletAddress; use crate::cli::{args, safe_exit, Context}; -use crate::client::rpc::{query_conversion, query_storage_value}; -use crate::client::signing::{find_keypair, sign_tx, tx_signer, TxSigningKey}; -use crate::client::tendermint_rpc_types::{TxBroadcastData, TxResponse}; -use crate::client::types::ParsedTxTransferArgs; -use crate::facade::tendermint_config::net::Address as TendermintAddress; +use crate::client::rpc::query_wasm_code_hash; +use crate::client::signing::find_keypair; use crate::facade::tendermint_rpc::endpoint::broadcast::tx_sync::Response; -use crate::facade::tendermint_rpc::error::Error as RpcError; -use crate::facade::tendermint_rpc::{Client, HttpClient}; use crate::node::ledger::tendermint_node; +use crate::wallet::{gen_validator_keys, read_and_confirm_pwd, CliWalletUtils}; -const TX_INIT_ACCOUNT_WASM: &str = "tx_init_account.wasm"; -const TX_INIT_VALIDATOR_WASM: &str = "tx_init_validator.wasm"; -const TX_INIT_PROPOSAL: &str = "tx_init_proposal.wasm"; -const TX_VOTE_PROPOSAL: &str = "tx_vote_proposal.wasm"; -const TX_REVEAL_PK: &str = "tx_reveal_pk.wasm"; -const TX_UPDATE_VP_WASM: &str = "tx_update_vp.wasm"; -const TX_TRANSFER_WASM: &str = "tx_transfer.wasm"; -const TX_IBC_WASM: &str = "tx_ibc.wasm"; -const VP_USER_WASM: &str = "vp_user.wasm"; -const TX_BOND_WASM: &str = "tx_bond.wasm"; -const TX_UNBOND_WASM: &str = "tx_unbond.wasm"; -const TX_WITHDRAW_WASM: &str = "tx_withdraw.wasm"; -const TX_CHANGE_COMMISSION_WASM: &str = "tx_change_validator_commission.wasm"; - -/// Timeout for requests to the `/accepted` and `/applied` -/// ABCI query endpoints. -const ENV_VAR_NAMADA_EVENTS_MAX_WAIT_TIME_SECONDS: &str = - "NAMADA_EVENTS_MAX_WAIT_TIME_SECONDS"; - -/// Default timeout in seconds for requests to the `/accepted` -/// and `/applied` ABCI query endpoints. -const DEFAULT_NAMADA_EVENTS_MAX_WAIT_TIME_SECONDS: u64 = 60; - -pub async fn submit_custom(ctx: Context, args: args::TxCustom) { - let tx_code = ctx.read_wasm(args.code_path); - let data = args.data_path.map(|data_path| { - std::fs::read(data_path).expect("Expected a file at given data path") - }); - let tx = Tx::new(tx_code, data); - let (ctx, initialized_accounts) = process_tx( - ctx, - &args.tx, - tx, - TxSigningKey::None, - #[cfg(not(feature = "mainnet"))] - false, - ) - .await; - save_initialized_accounts(ctx, &args.tx, initialized_accounts).await; +pub async fn submit_custom( + client: &C, + ctx: &mut Context, + mut args: args::TxCustom, +) -> Result<(), tx::Error> { + args.tx.chain_id = args + .tx + .chain_id + .or_else(|| Some(ctx.config.ledger.chain_id.clone())); + tx::submit_custom::(client, &mut ctx.wallet, args).await } -pub async fn submit_update_vp(ctx: Context, args: args::TxUpdateVp) { - let addr = ctx.get(&args.addr); - - // Check that the address is established and exists on chain - match &addr { - Address::Established(_) => { - let exists = - rpc::known_address(&addr, args.tx.ledger_address.clone()).await; - if !exists { - eprintln!("The address {} doesn't exist on chain.", addr); - if !args.tx.force { - safe_exit(1) - } - } - } - Address::Implicit(_) => { - eprintln!( - "A validity predicate of an implicit address cannot be \ - directly updated. You can use an established address for \ - this purpose." - ); - if !args.tx.force { - safe_exit(1) - } - } - Address::Internal(_) => { - eprintln!( - "A validity predicate of an internal address cannot be \ - directly updated." - ); - if !args.tx.force { - safe_exit(1) - } - } - } - - let vp_code = ctx.read_wasm(args.vp_code_path); - // Validate the VP code - if let Err(err) = vm::validate_untrusted_wasm(&vp_code) { - eprintln!("Validity predicate code validation failed with {}", err); - if !args.tx.force { - safe_exit(1) - } - } - - let tx_code = ctx.read_wasm(TX_UPDATE_VP_WASM); - - let data = UpdateVp { addr, vp_code }; - let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); - - let tx = Tx::new(tx_code, Some(data)); - process_tx( - ctx, - &args.tx, - tx, - TxSigningKey::WalletAddress(args.addr), - #[cfg(not(feature = "mainnet"))] - false, - ) - .await; +pub async fn submit_update_vp( + client: &C, + ctx: &mut Context, + mut args: args::TxUpdateVp, +) -> Result<(), tx::Error> { + args.tx.chain_id = args + .tx + .chain_id + .or_else(|| Some(ctx.config.ledger.chain_id.clone())); + tx::submit_update_vp::(client, &mut ctx.wallet, args).await } -pub async fn submit_init_account(mut ctx: Context, args: args::TxInitAccount) { - let public_key = ctx.get_cached(&args.public_key); - let vp_code = args - .vp_code_path - .map(|path| ctx.read_wasm(path)) - .unwrap_or_else(|| ctx.read_wasm(VP_USER_WASM)); - // Validate the VP code - if let Err(err) = vm::validate_untrusted_wasm(&vp_code) { - eprintln!("Validity predicate code validation failed with {}", err); - if !args.tx.force { - safe_exit(1) - } - } - - let tx_code = ctx.read_wasm(TX_INIT_ACCOUNT_WASM); - let data = InitAccount { - public_key, - vp_code, - }; - let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); - - let tx = Tx::new(tx_code, Some(data)); - let (ctx, initialized_accounts) = process_tx( - ctx, - &args.tx, - tx, - TxSigningKey::WalletAddress(args.source), - #[cfg(not(feature = "mainnet"))] - false, - ) - .await; - save_initialized_accounts(ctx, &args.tx, initialized_accounts).await; +pub async fn submit_init_account( + client: &C, + ctx: &mut Context, + mut args: args::TxInitAccount, +) -> Result<(), tx::Error> { + args.tx.chain_id = args + .tx + .chain_id + .or_else(|| Some(ctx.config.ledger.chain_id.clone())); + tx::submit_init_account::(client, &mut ctx.wallet, args).await } -pub async fn submit_init_validator( +pub async fn submit_init_validator< + C: namada::ledger::queries::Client + Sync, +>( + client: &C, mut ctx: Context, args::TxInitValidator { tx: tx_args, @@ -228,8 +91,16 @@ pub async fn submit_init_validator( max_commission_rate_change, validator_vp_code_path, unsafe_dont_encrypt, + tx_code_path: _, }: args::TxInitValidator, ) { + let tx_args = args::Tx { + chain_id: tx_args + .clone() + .chain_id + .or_else(|| Some(ctx.config.ledger.chain_id.clone())), + ..tx_args.clone() + }; let alias = tx_args .initialized_account_alias .as_ref() @@ -238,20 +109,16 @@ pub async fn submit_init_validator( let validator_key_alias = format!("{}-key", alias); let consensus_key_alias = format!("{}-consensus-key", alias); - let account_key = ctx.get_opt_cached(&account_key).unwrap_or_else(|| { + let account_key = account_key.unwrap_or_else(|| { println!("Generating validator account key..."); + let password = read_and_confirm_pwd(unsafe_dont_encrypt); ctx.wallet - .gen_key( - scheme, - Some(validator_key_alias.clone()), - unsafe_dont_encrypt, - ) + .gen_key(scheme, Some(validator_key_alias.clone()), password, tx_args.wallet_alias_force) .1 .ref_to() }); - let consensus_key = ctx - .get_opt_cached(&consensus_key) + let consensus_key = consensus_key .map(|key| match key { common::SecretKey::Ed25519(_) => key, common::SecretKey::Secp256k1(_) => { @@ -261,24 +128,26 @@ pub async fn submit_init_validator( }) .unwrap_or_else(|| { println!("Generating consensus key..."); + let password = read_and_confirm_pwd(unsafe_dont_encrypt); ctx.wallet .gen_key( // Note that TM only allows ed25519 for consensus key SchemeType::Ed25519, Some(consensus_key_alias.clone()), - unsafe_dont_encrypt, + password, + tx_args.wallet_alias_force, ) .1 }); - let protocol_key = ctx.get_opt_cached(&protocol_key); + let protocol_key = protocol_key; if protocol_key.is_none() { println!("Generating protocol signing key..."); } // Generate the validator keys let validator_keys = - ctx.wallet.gen_validator_keys(protocol_key, scheme).unwrap(); + gen_validator_keys(&mut ctx.wallet, protocol_key, scheme).unwrap(); let protocol_key = validator_keys.get_protocol_keypair().ref_to(); let dkg_key = validator_keys .dkg_keypair @@ -286,11 +155,11 @@ pub async fn submit_init_validator( .expect("DKG sessions keys should have been created") .public(); - ctx.wallet.save().unwrap_or_else(|err| eprintln!("{}", err)); - - let validator_vp_code = validator_vp_code_path - .map(|path| ctx.read_wasm(path)) - .unwrap_or_else(|| ctx.read_wasm(VP_USER_WASM)); + let vp_code_path = String::from_utf8(validator_vp_code_path).unwrap(); + let validator_vp_code_hash = + query_wasm_code_hash::(client, vp_code_path) + .await + .unwrap(); // Validate the commission rate data if commission_rate > Decimal::ONE || commission_rate < Decimal::ZERO { @@ -313,17 +182,10 @@ pub async fn submit_init_validator( safe_exit(1) } } - // Validate the validator VP code - if let Err(err) = vm::validate_untrusted_wasm(&validator_vp_code) { - eprintln!( - "Validator validity predicate code validation failed with {}", - err - ); - if !tx_args.force { - safe_exit(1) - } - } - let tx_code = ctx.read_wasm(TX_INIT_VALIDATOR_WASM); + let tx_code_hash = + query_wasm_code_hash(client, args::TX_INIT_VALIDATOR_WASM) + .await + .unwrap(); let data = InitValidator { account_key, @@ -332,11 +194,17 @@ pub async fn submit_init_validator( dkg_key, commission_rate, max_commission_rate_change, - validator_vp_code, + validator_vp_code_hash, }; let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); - let tx = Tx::new(tx_code, Some(data)); - let (mut ctx, initialized_accounts) = process_tx( + let tx = Tx::new( + tx_code_hash.to_vec(), + Some(data), + tx_args.chain_id.clone().unwrap(), + tx_args.expiration, + ); + let (mut ctx, result) = process_tx( + client, ctx, &tx_args, tx, @@ -344,57 +212,58 @@ pub async fn submit_init_validator( #[cfg(not(feature = "mainnet"))] false, ) - .await; + .await + .expect("expected process_tx to work"); + if !tx_args.dry_run { - let (validator_address_alias, validator_address) = - match &initialized_accounts[..] { - // There should be 1 account for the validator itself - [validator_address] => { - let validator_address_alias = match tx_args - .initialized_account_alias - { - Some(alias) => alias, - None => { - print!( - "Choose an alias for the validator address: " - ); - io::stdout().flush().await.unwrap(); - let mut alias = String::new(); - io::stdin().read_line(&mut alias).await.unwrap(); - alias.trim().to_owned() - } - }; - let validator_address_alias = - if validator_address_alias.is_empty() { - println!( - "Empty alias given, using {} as the alias.", - validator_address.encode() - ); - validator_address.encode() - } else { - validator_address_alias - }; - if let Some(new_alias) = ctx.wallet.add_address( - validator_address_alias.clone(), - validator_address.clone(), - ) { + let (validator_address_alias, validator_address) = match &result[..] { + // There should be 1 account for the validator itself + [validator_address] => { + let validator_address_alias = match tx_args + .initialized_account_alias + { + Some(alias) => alias, + None => { + print!("Choose an alias for the validator address: "); + io::stdout().flush().await.unwrap(); + let mut alias = String::new(); + io::stdin().read_line(&mut alias).await.unwrap(); + alias.trim().to_owned() + } + }; + let validator_address_alias = + if validator_address_alias.is_empty() { println!( - "Added alias {} for address {}.", - new_alias, + "Empty alias given, using {} as the alias.", validator_address.encode() ); - } - (validator_address_alias, validator_address.clone()) - } - _ => { - eprintln!("Expected two accounts to be created"); - safe_exit(1) + validator_address.encode() + } else { + validator_address_alias + }; + if let Some(new_alias) = ctx.wallet.add_address( + validator_address_alias.clone(), + validator_address.clone(), + tx_args.wallet_alias_force, + ) { + println!( + "Added alias {} for address {}.", + new_alias, + validator_address.encode() + ); } - }; + (validator_address_alias, validator_address.clone()) + } + _ => { + eprintln!("Expected one account to be created"); + safe_exit(1) + } + }; // add validator address and keys to the wallet ctx.wallet .add_validator_data(validator_address, validator_keys); - ctx.wallet.save().unwrap_or_else(|err| eprintln!("{}", err)); + crate::wallet::save(&ctx.wallet) + .unwrap_or_else(|err| eprintln!("{}", err)); let tendermint_home = ctx.config.ledger.tendermint_dir(); tendermint_node::write_validator_key(&tendermint_home, &consensus_key); @@ -416,149 +285,84 @@ pub async fn submit_init_validator( } } -/// Make a ViewingKey that can view notes encrypted by given ExtendedSpendingKey -pub fn to_viewing_key(esk: &ExtendedSpendingKey) -> FullViewingKey { - ExtendedFullViewingKey::from(esk).fvk +/// Shielded context file name +const FILE_NAME: &str = "shielded.dat"; +const TMP_FILE_NAME: &str = "shielded.tmp"; + +#[derive(Debug, BorshSerialize, BorshDeserialize, Clone)] +pub struct CLIShieldedUtils { + #[borsh_skip] + context_dir: PathBuf, } -/// Generate a valid diversifier, i.e. one that has a diversified base. Return -/// also this diversified base. -pub fn find_valid_diversifier( - rng: &mut R, -) -> (Diversifier, masp_primitives::jubjub::SubgroupPoint) { - let mut diversifier; - let g_d; - // Keep generating random diversifiers until one has a diversified base - loop { - let mut d = [0; 11]; - rng.fill_bytes(&mut d); - diversifier = Diversifier(d); - if let Some(val) = diversifier.g_d() { - g_d = val; - break; +impl CLIShieldedUtils { + /// Initialize a shielded transaction context that identifies notes + /// decryptable by any viewing key in the given set + pub fn new(context_dir: PathBuf) -> masp::ShieldedContext { + // Make sure that MASP parameters are downloaded to enable MASP + // transaction building and verification later on + let params_dir = masp::get_params_dir(); + let spend_path = params_dir.join(masp::SPEND_NAME); + let convert_path = params_dir.join(masp::CONVERT_NAME); + let output_path = params_dir.join(masp::OUTPUT_NAME); + if !(spend_path.exists() + && convert_path.exists() + && output_path.exists()) + { + println!("MASP parameters not present, downloading..."); + masp_proofs::download_parameters() + .expect("MASP parameters not present or downloadable"); + println!("MASP parameter download complete, resuming execution..."); + } + // Finally initialize a shielded context with the supplied directory + let utils = Self { context_dir }; + masp::ShieldedContext { + utils, + ..Default::default() } } - (diversifier, g_d) } -/// Determine if using the current note would actually bring us closer to our -/// target -pub fn is_amount_required(src: Amount, dest: Amount, delta: Amount) -> bool { - if delta > Amount::zero() { - let gap = dest - src; - for (asset_type, value) in gap.components() { - if *value > 0 && delta[asset_type] > 0 { - return true; - } +impl Default for CLIShieldedUtils { + fn default() -> Self { + Self { + context_dir: PathBuf::from(FILE_NAME), } } - false -} - -/// An extension of Option's cloned method for pair types -fn cloned_pair((a, b): (&T, &U)) -> (T, U) { - (a.clone(), b.clone()) -} - -/// Errors that can occur when trying to retrieve pinned transaction -#[derive(PartialEq, Eq)] -pub enum PinnedBalanceError { - /// No transaction has yet been pinned to the given payment address - NoTransactionPinned, - /// The supplied viewing key does not recognize payments to given address - InvalidViewingKey, -} - -/// Represents the amount used of different conversions -pub type Conversions = - HashMap, i64)>; - -/// Represents the changes that were made to a list of transparent accounts -pub type TransferDelta = HashMap>; - -/// Represents the changes that were made to a list of shielded accounts -pub type TransactionDelta = HashMap; - -/// Represents the current state of the shielded pool from the perspective of -/// the chosen viewing keys. -#[derive(BorshSerialize, BorshDeserialize, Debug)] -pub struct ShieldedContext { - /// Location where this shielded context is saved - #[borsh_skip] - context_dir: PathBuf, - /// The last transaction index to be processed in this context - last_txidx: u64, - /// The commitment tree produced by scanning all transactions up to tx_pos - tree: CommitmentTree, - /// Maps viewing keys to applicable note positions - pos_map: HashMap>, - /// Maps a nullifier to the note position to which it applies - nf_map: HashMap<[u8; 32], usize>, - /// Maps note positions to their corresponding notes - note_map: HashMap, - /// Maps note positions to their corresponding memos - memo_map: HashMap, - /// Maps note positions to the diversifier of their payment address - div_map: HashMap, - /// Maps note positions to their witness (used to make merkle paths) - witness_map: HashMap>, - /// Tracks what each transaction does to various account balances - delta_map: BTreeMap< - (BlockHeight, TxIndex), - (Epoch, TransferDelta, TransactionDelta), - >, - /// The set of note positions that have been spent - spents: HashSet, - /// Maps asset types to their decodings - asset_types: HashMap, - /// Maps note positions to their corresponding viewing keys - vk_map: HashMap, } -/// Shielded context file name -const FILE_NAME: &str = "shielded.dat"; -const TMP_FILE_NAME: &str = "shielded.tmp"; +impl masp::ShieldedUtils for CLIShieldedUtils { + type C = tendermint_rpc::HttpClient; -/// Default implementation to ease construction of TxContexts. Derive cannot be -/// used here due to CommitmentTree not implementing Default. -impl Default for ShieldedContext { - fn default() -> ShieldedContext { - ShieldedContext { - context_dir: PathBuf::from(FILE_NAME), - last_txidx: u64::default(), - tree: CommitmentTree::empty(), - pos_map: HashMap::default(), - nf_map: HashMap::default(), - note_map: HashMap::default(), - memo_map: HashMap::default(), - div_map: HashMap::default(), - witness_map: HashMap::default(), - spents: HashSet::default(), - delta_map: BTreeMap::default(), - asset_types: HashMap::default(), - vk_map: HashMap::default(), + fn local_tx_prover(&self) -> LocalTxProver { + if let Ok(params_dir) = env::var(masp::ENV_VAR_MASP_PARAMS_DIR) { + let params_dir = PathBuf::from(params_dir); + let spend_path = params_dir.join(masp::SPEND_NAME); + let convert_path = params_dir.join(masp::CONVERT_NAME); + let output_path = params_dir.join(masp::OUTPUT_NAME); + LocalTxProver::new(&spend_path, &output_path, &convert_path) + } else { + LocalTxProver::with_default_location() + .expect("unable to load MASP Parameters") } } -} -impl ShieldedContext { /// Try to load the last saved shielded context from the given context /// directory. If this fails, then leave the current context unchanged. - pub fn load(&mut self) -> std::io::Result<()> { + fn load(self) -> std::io::Result> { // Try to load shielded context from file let mut ctx_file = File::open(self.context_dir.join(FILE_NAME))?; let mut bytes = Vec::new(); ctx_file.read_to_end(&mut bytes)?; - let mut new_ctx = Self::deserialize(&mut &bytes[..])?; + let mut new_ctx = masp::ShieldedContext::deserialize(&mut &bytes[..])?; // Associate the originating context directory with the // shielded context under construction - new_ctx.context_dir = self.context_dir.clone(); - *self = new_ctx; - Ok(()) + new_ctx.utils = self; + Ok(new_ctx) } /// Save this shielded context into its associated context directory - pub fn save(&self) -> std::io::Result<()> { + fn save(&self, ctx: &masp::ShieldedContext) -> std::io::Result<()> { // TODO: use mktemp crate? let tmp_path = self.context_dir.join(TMP_FILE_NAME); { @@ -572,7 +376,7 @@ impl ShieldedContext { .create_new(true) .open(tmp_path.clone())?; let mut bytes = Vec::new(); - self.serialize(&mut bytes) + ctx.serialize(&mut bytes) .expect("cannot serialize shielded context"); ctx_file.write_all(&bytes[..])?; } @@ -585,1306 +389,108 @@ impl ShieldedContext { std::fs::remove_file(tmp_path)?; Ok(()) } +} - /// Merge data from the given shielded context into the current shielded - /// context. It must be the case that the two shielded contexts share the - /// same last transaction ID and share identical commitment trees. - pub fn merge(&mut self, new_ctx: ShieldedContext) { - debug_assert_eq!(self.last_txidx, new_ctx.last_txidx); - // Merge by simply extending maps. Identical keys should contain - // identical values, so overwriting should not be problematic. - self.pos_map.extend(new_ctx.pos_map); - self.nf_map.extend(new_ctx.nf_map); - self.note_map.extend(new_ctx.note_map); - self.memo_map.extend(new_ctx.memo_map); - self.div_map.extend(new_ctx.div_map); - self.witness_map.extend(new_ctx.witness_map); - self.spents.extend(new_ctx.spents); - self.asset_types.extend(new_ctx.asset_types); - self.vk_map.extend(new_ctx.vk_map); - // The deltas are the exception because different keys can reveal - // different parts of the same transaction. Hence each delta needs to be - // merged separately. - for ((height, idx), (ep, ntfer_delta, ntx_delta)) in new_ctx.delta_map { - let (_ep, tfer_delta, tx_delta) = self - .delta_map - .entry((height, idx)) - .or_insert((ep, TransferDelta::new(), TransactionDelta::new())); - tfer_delta.extend(ntfer_delta); - tx_delta.extend(ntx_delta); - } - } - - /// Fetch the current state of the multi-asset shielded pool into a - /// ShieldedContext - pub async fn fetch( - &mut self, - ledger_address: &TendermintAddress, - sks: &[ExtendedSpendingKey], - fvks: &[ViewingKey], - ) { - // First determine which of the keys requested to be fetched are new. - // Necessary because old transactions will need to be scanned for new - // keys. - let mut unknown_keys = Vec::new(); - for esk in sks { - let vk = to_viewing_key(esk).vk; - if !self.pos_map.contains_key(&vk) { - unknown_keys.push(vk); - } - } - for vk in fvks { - if !self.pos_map.contains_key(vk) { - unknown_keys.push(*vk); - } - } +pub async fn submit_transfer( + client: &HttpClient, + mut ctx: Context, + mut args: args::TxTransfer, +) -> Result<(), tx::Error> { + args.tx.chain_id = args + .tx + .chain_id + .or_else(|| Some(ctx.config.ledger.chain_id.clone())); + tx::submit_transfer(client, &mut ctx.wallet, &mut ctx.shielded, args).await +} - // If unknown keys are being used, we need to scan older transactions - // for any unspent notes - let (txs, mut tx_iter); - if !unknown_keys.is_empty() { - // Load all transactions accepted until this point - txs = Self::fetch_shielded_transfers(ledger_address, 0).await; - tx_iter = txs.iter(); - // Do this by constructing a shielding context only for unknown keys - let mut tx_ctx = ShieldedContext::new(self.context_dir.clone()); - for vk in unknown_keys { - tx_ctx.pos_map.entry(vk).or_insert_with(HashSet::new); - } - // Update this unknown shielded context until it is level with self - while tx_ctx.last_txidx != self.last_txidx { - if let Some(((height, idx), (epoch, tx))) = tx_iter.next() { - tx_ctx.scan_tx(*height, *idx, *epoch, tx); - } else { - break; - } - } - // Merge the context data originating from the unknown keys into the - // current context - self.merge(tx_ctx); - } else { - // Load only transactions accepted from last_txid until this point - txs = - Self::fetch_shielded_transfers(ledger_address, self.last_txidx) - .await; - tx_iter = txs.iter(); - } - // Now that we possess the unspent notes corresponding to both old and - // new keys up until tx_pos, proceed to scan the new transactions. - for ((height, idx), (epoch, tx)) in &mut tx_iter { - self.scan_tx(*height, *idx, *epoch, tx); - } - } +pub async fn submit_ibc_transfer( + client: &C, + mut ctx: Context, + mut args: args::TxIbcTransfer, +) -> Result<(), tx::Error> { + args.tx.chain_id = args + .tx + .chain_id + .or_else(|| Some(ctx.config.ledger.chain_id.clone())); + tx::submit_ibc_transfer::(client, &mut ctx.wallet, args).await +} - /// Initialize a shielded transaction context that identifies notes - /// decryptable by any viewing key in the given set - pub fn new(context_dir: PathBuf) -> ShieldedContext { - // Make sure that MASP parameters are downloaded to enable MASP - // transaction building and verification later on - let params_dir = masp::get_params_dir(); - let spend_path = params_dir.join(masp::SPEND_NAME); - let convert_path = params_dir.join(masp::CONVERT_NAME); - let output_path = params_dir.join(masp::OUTPUT_NAME); - if !(spend_path.exists() - && convert_path.exists() - && output_path.exists()) - { - println!("MASP parameters not present, downloading..."); - masp_proofs::download_parameters() - .expect("MASP parameters not present or downloadable"); - println!("MASP parameter download complete, resuming execution..."); - } - // Finally initialize a shielded context with the supplied directory - Self { - context_dir, - ..Default::default() - } - } +pub async fn submit_init_proposal( + client: &C, + mut ctx: Context, + mut args: args::InitProposal, +) -> Result<(), tx::Error> { + args.tx.chain_id = args + .tx + .chain_id + .or_else(|| Some(ctx.config.ledger.chain_id.clone())); + let file = File::open(&args.proposal_data).expect("File must exist."); + let proposal: Proposal = + serde_json::from_reader(file).expect("JSON was not well-formatted"); - /// Obtain a chronologically-ordered list of all accepted shielded - /// transactions from the ledger. The ledger conceptually stores - /// transactions as a vector. More concretely, the HEAD_TX_KEY location - /// stores the index of the last accepted transaction and each transaction - /// is stored at a key derived from its index. - pub async fn fetch_shielded_transfers( - ledger_address: &TendermintAddress, - last_txidx: u64, - ) -> BTreeMap<(BlockHeight, TxIndex), (Epoch, Transfer)> { - let client = HttpClient::new(ledger_address.clone()).unwrap(); - // The address of the MASP account - let masp_addr = masp(); - // Construct the key where last transaction pointer is stored - let head_tx_key = Key::from(masp_addr.to_db_key()) - .push(&HEAD_TX_KEY.to_owned()) - .expect("Cannot obtain a storage key"); - // Query for the index of the last accepted transaction - let head_txidx = query_storage_value::(&client, &head_tx_key) - .await - .unwrap_or(0); - let mut shielded_txs = BTreeMap::new(); - // Fetch all the transactions we do not have yet - for i in last_txidx..head_txidx { - // Construct the key for where the current transaction is stored - let current_tx_key = Key::from(masp_addr.to_db_key()) - .push(&(TX_KEY_PREFIX.to_owned() + &i.to_string())) - .expect("Cannot obtain a storage key"); - // Obtain the current transaction - let (tx_epoch, tx_height, tx_index, current_tx) = - query_storage_value::<(Epoch, BlockHeight, TxIndex, Transfer)>( - &client, - ¤t_tx_key, - ) - .await - .unwrap(); - // Collect the current transaction - shielded_txs.insert((tx_height, tx_index), (tx_epoch, current_tx)); - } - shielded_txs - } + let signer = WalletAddress::new(proposal.clone().author.to_string()); + let governance_parameters = rpc::get_governance_parameters(client).await; + let current_epoch = rpc::query_and_print_epoch(client).await; - /// Applies the given transaction to the supplied context. More precisely, - /// the shielded transaction's outputs are added to the commitment tree. - /// Newly discovered notes are associated to the supplied viewing keys. Note - /// nullifiers are mapped to their originating notes. Note positions are - /// associated to notes, memos, and diversifiers. And the set of notes that - /// we have spent are updated. The witness map is maintained to make it - /// easier to construct note merkle paths in other code. See - /// - pub fn scan_tx( - &mut self, - height: BlockHeight, - index: TxIndex, - epoch: Epoch, - tx: &Transfer, - ) { - // Ignore purely transparent transactions - let shielded = if let Some(shielded) = &tx.shielded { - shielded - } else { - return; - }; - // For tracking the account changes caused by this Transaction - let mut transaction_delta = TransactionDelta::new(); - // Listen for notes sent to our viewing keys - for so in &shielded.shielded_outputs { - // Create merkle tree leaf node from note commitment - let node = Node::new(so.cmu.to_repr()); - // Update each merkle tree in the witness map with the latest - // addition - for (_, witness) in self.witness_map.iter_mut() { - witness.append(node).expect("note commitment tree is full"); - } - let note_pos = self.tree.size(); - self.tree - .append(node) - .expect("note commitment tree is full"); - // Finally, make it easier to construct merkle paths to this new - // note - let witness = IncrementalWitness::::from_tree(&self.tree); - self.witness_map.insert(note_pos, witness); - // Let's try to see if any of our viewing keys can decrypt latest - // note - for (vk, notes) in self.pos_map.iter_mut() { - let decres = try_sapling_note_decryption::( - 0, - &vk.ivk().0, - &so.ephemeral_key.into_subgroup().unwrap(), - &so.cmu, - &so.enc_ciphertext, - ); - // So this current viewing key does decrypt this current note... - if let Some((note, pa, memo)) = decres { - // Add this note to list of notes decrypted by this viewing - // key - notes.insert(note_pos); - // Compute the nullifier now to quickly recognize when spent - let nf = note.nf(vk, note_pos.try_into().unwrap()); - self.note_map.insert(note_pos, note); - self.memo_map.insert(note_pos, memo); - // The payment address' diversifier is required to spend - // note - self.div_map.insert(note_pos, *pa.diversifier()); - self.nf_map.insert(nf.0, note_pos); - // Note the account changes - let balance = transaction_delta - .entry(*vk) - .or_insert_with(Amount::zero); - *balance += - Amount::from_nonnegative(note.asset_type, note.value) - .expect( - "found note with invalid value or asset type", - ); - self.vk_map.insert(note_pos, *vk); - break; - } - } - } - // Cancel out those of our notes that have been spent - for ss in &shielded.shielded_spends { - // If the shielded spend's nullifier is in our map, then target note - // is rendered unusable - if let Some(note_pos) = self.nf_map.get(&ss.nullifier) { - self.spents.insert(*note_pos); - // Note the account changes - let balance = transaction_delta - .entry(self.vk_map[note_pos]) - .or_insert_with(Amount::zero); - let note = self.note_map[note_pos]; - *balance -= - Amount::from_nonnegative(note.asset_type, note.value) - .expect("found note with invalid value or asset type"); - } - } - // Record the changes to the transparent accounts - let transparent_delta = - Amount::from_nonnegative(tx.token.clone(), u64::from(tx.amount)) - .expect("invalid value for amount"); - let mut transfer_delta = TransferDelta::new(); - transfer_delta - .insert(tx.source.clone(), Amount::zero() - &transparent_delta); - transfer_delta.insert(tx.target.clone(), transparent_delta); - self.delta_map.insert( - (height, index), - (epoch, transfer_delta, transaction_delta), + if proposal.voting_start_epoch <= current_epoch + || proposal.voting_start_epoch.0 + % governance_parameters.min_proposal_period + != 0 + { + println!("{}", proposal.voting_start_epoch <= current_epoch); + println!( + "{}", + proposal.voting_start_epoch.0 + % governance_parameters.min_proposal_period + == 0 ); - self.last_txidx += 1; - } - - /// Summarize the effects on shielded and transparent accounts of each - /// Transfer in this context - pub fn get_tx_deltas( - &self, - ) -> &BTreeMap< - (BlockHeight, TxIndex), - (Epoch, TransferDelta, TransactionDelta), - > { - &self.delta_map - } - - /// Compute the total unspent notes associated with the viewing key in the - /// context. If the key is not in the context, then we do not know the - /// balance and hence we return None. - pub fn compute_shielded_balance(&self, vk: &ViewingKey) -> Option { - // Cannot query the balance of a key that's not in the map - if !self.pos_map.contains_key(vk) { - return None; + eprintln!( + "Invalid proposal start epoch: {} must be greater than current \ + epoch {} and a multiple of {}", + proposal.voting_start_epoch, + current_epoch, + governance_parameters.min_proposal_period + ); + if !args.tx.force { + safe_exit(1) } - let mut val_acc = Amount::zero(); - // Retrieve the notes that can be spent by this key - if let Some(avail_notes) = self.pos_map.get(vk) { - for note_idx in avail_notes { - // Spent notes cannot contribute a new transaction's pool - if self.spents.contains(note_idx) { - continue; - } - // Get note associated with this ID - let note = self.note_map.get(note_idx).unwrap(); - // Finally add value to multi-asset accumulator - val_acc += - Amount::from_nonnegative(note.asset_type, note.value) - .expect("found note with invalid value or asset type"); - } + } else if proposal.voting_end_epoch <= proposal.voting_start_epoch + || proposal.voting_end_epoch.0 - proposal.voting_start_epoch.0 + < governance_parameters.min_proposal_period + || proposal.voting_end_epoch.0 - proposal.voting_start_epoch.0 + > governance_parameters.max_proposal_period + || proposal.voting_end_epoch.0 % 3 != 0 + { + eprintln!( + "Invalid proposal end epoch: difference between proposal start \ + and end epoch must be at least {} and at max {} and end epoch \ + must be a multiple of {}", + governance_parameters.min_proposal_period, + governance_parameters.max_proposal_period, + governance_parameters.min_proposal_period + ); + if !args.tx.force { + safe_exit(1) } - Some(val_acc) - } - - /// Query the ledger for the decoding of the given asset type and cache it - /// if it is found. - pub async fn decode_asset_type( - &mut self, - client: HttpClient, - asset_type: AssetType, - ) -> Option<(Address, Epoch)> { - // Try to find the decoding in the cache - if let decoded @ Some(_) = self.asset_types.get(&asset_type) { - return decoded.cloned(); - } - // Query for the ID of the last accepted transaction - let (addr, ep, _conv, _path): (Address, _, Amount, MerklePath) = - query_conversion(client, asset_type).await?; - self.asset_types.insert(asset_type, (addr.clone(), ep)); - Some((addr, ep)) - } - - /// Query the ledger for the conversion that is allowed for the given asset - /// type and cache it. - async fn query_allowed_conversion<'a>( - &'a mut self, - client: HttpClient, - asset_type: AssetType, - conversions: &'a mut Conversions, - ) -> Option<&'a mut (AllowedConversion, MerklePath, i64)> { - match conversions.entry(asset_type) { - Entry::Occupied(conv_entry) => Some(conv_entry.into_mut()), - Entry::Vacant(conv_entry) => { - // Query for the ID of the last accepted transaction - let (addr, ep, conv, path): (Address, _, _, _) = - query_conversion(client, asset_type).await?; - self.asset_types.insert(asset_type, (addr, ep)); - // If the conversion is 0, then we just have a pure decoding - if conv == Amount::zero() { - None - } else { - Some(conv_entry.insert((Amount::into(conv), path, 0))) - } - } - } - } - - /// Compute the total unspent notes associated with the viewing key in the - /// context and express that value in terms of the currently timestamped - /// asset types. If the key is not in the context, then we do not know the - /// balance and hence we return None. - pub async fn compute_exchanged_balance( - &mut self, - client: HttpClient, - vk: &ViewingKey, - target_epoch: Epoch, - ) -> Option { - // First get the unexchanged balance - if let Some(balance) = self.compute_shielded_balance(vk) { - // And then exchange balance into current asset types - Some( - self.compute_exchanged_amount( - client, - balance, - target_epoch, - HashMap::new(), - ) - .await - .0, - ) - } else { - None - } - } - - /// Try to convert as much of the given asset type-value pair using the - /// given allowed conversion. usage is incremented by the amount of the - /// conversion used, the conversions are applied to the given input, and - /// the trace amount that could not be converted is moved from input to - /// output. - fn apply_conversion( - conv: AllowedConversion, - asset_type: AssetType, - value: i64, - usage: &mut i64, - input: &mut Amount, - output: &mut Amount, - ) { - // If conversion if possible, accumulate the exchanged amount - let conv: Amount = conv.into(); - // The amount required of current asset to qualify for conversion - let threshold = -conv[&asset_type]; - if threshold == 0 { - eprintln!( - "Asset threshold of selected conversion for asset type {} is \ - 0, this is a bug, please report it.", - asset_type - ); - } - // We should use an amount of the AllowedConversion that almost - // cancels the original amount - let required = value / threshold; - // Forget about the trace amount left over because we cannot - // realize its value - let trace = Amount::from_pair(asset_type, value % threshold).unwrap(); - // Record how much more of the given conversion has been used - *usage += required; - // Apply the conversions to input and move the trace amount to output - *input += conv * required - &trace; - *output += trace; - } - - /// Convert the given amount into the latest asset types whilst making a - /// note of the conversions that were used. Note that this function does - /// not assume that allowed conversions from the ledger are expressed in - /// terms of the latest asset types. - pub async fn compute_exchanged_amount( - &mut self, - client: HttpClient, - mut input: Amount, - target_epoch: Epoch, - mut conversions: Conversions, - ) -> (Amount, Conversions) { - // Where we will store our exchanged value - let mut output = Amount::zero(); - // Repeatedly exchange assets until it is no longer possible - while let Some((asset_type, value)) = - input.components().next().map(cloned_pair) - { - let target_asset_type = self - .decode_asset_type(client.clone(), asset_type) - .await - .map(|(addr, _epoch)| make_asset_type(target_epoch, &addr)) - .unwrap_or(asset_type); - let at_target_asset_type = asset_type == target_asset_type; - if let (Some((conv, _wit, usage)), false) = ( - self.query_allowed_conversion( - client.clone(), - asset_type, - &mut conversions, - ) - .await, - at_target_asset_type, - ) { - println!( - "converting current asset type to latest asset type..." - ); - // Not at the target asset type, not at the latest asset type. - // Apply conversion to get from current asset type to the latest - // asset type. - Self::apply_conversion( - conv.clone(), - asset_type, - value, - usage, - &mut input, - &mut output, - ); - } else if let (Some((conv, _wit, usage)), false) = ( - self.query_allowed_conversion( - client.clone(), - target_asset_type, - &mut conversions, - ) - .await, - at_target_asset_type, - ) { - println!( - "converting latest asset type to target asset type..." - ); - // Not at the target asset type, yes at the latest asset type. - // Apply inverse conversion to get from latest asset type to - // the target asset type. - Self::apply_conversion( - conv.clone(), - asset_type, - value, - usage, - &mut input, - &mut output, - ); - } else { - // At the target asset type. Then move component over to output. - let comp = input.project(asset_type); - output += ∁ - // Strike from input to avoid repeating computation - input -= comp; - } - } - (output, conversions) - } - - /// Collect enough unspent notes in this context to exceed the given amount - /// of the specified asset type. Return the total value accumulated plus - /// notes and the corresponding diversifiers/merkle paths that were used to - /// achieve the total value. - pub async fn collect_unspent_notes( - &mut self, - ledger_address: TendermintAddress, - vk: &ViewingKey, - target: Amount, - target_epoch: Epoch, - ) -> ( - Amount, - Vec<(Diversifier, Note, MerklePath)>, - Conversions, - ) { - // Establish connection with which to do exchange rate queries - let client = HttpClient::new(ledger_address.clone()).unwrap(); - let mut conversions = HashMap::new(); - let mut val_acc = Amount::zero(); - let mut notes = Vec::new(); - // Retrieve the notes that can be spent by this key - if let Some(avail_notes) = self.pos_map.get(vk).cloned() { - for note_idx in &avail_notes { - // No more transaction inputs are required once we have met - // the target amount - if val_acc >= target { - break; - } - // Spent notes cannot contribute a new transaction's pool - if self.spents.contains(note_idx) { - continue; - } - // Get note, merkle path, diversifier associated with this ID - let note = *self.note_map.get(note_idx).unwrap(); - - // The amount contributed by this note before conversion - let pre_contr = Amount::from_pair(note.asset_type, note.value) - .expect("received note has invalid value or asset type"); - let (contr, proposed_convs) = self - .compute_exchanged_amount( - client.clone(), - pre_contr, - target_epoch, - conversions.clone(), - ) - .await; - - // Use this note only if it brings us closer to our target - if is_amount_required( - val_acc.clone(), - target.clone(), - contr.clone(), - ) { - // Be sure to record the conversions used in computing - // accumulated value - val_acc += contr; - // Commit the conversions that were used to exchange - conversions = proposed_convs; - let merkle_path = - self.witness_map.get(note_idx).unwrap().path().unwrap(); - let diversifier = self.div_map.get(note_idx).unwrap(); - // Commit this note to our transaction - notes.push((*diversifier, note, merkle_path)); - } - } - } - (val_acc, notes, conversions) - } - - /// Compute the combined value of the output notes of the transaction pinned - /// at the given payment address. This computation uses the supplied viewing - /// keys to try to decrypt the output notes. If no transaction is pinned at - /// the given payment address fails with - /// `PinnedBalanceError::NoTransactionPinned`. - pub async fn compute_pinned_balance( - ledger_address: &TendermintAddress, - owner: PaymentAddress, - viewing_key: &ViewingKey, - ) -> Result<(Amount, Epoch), PinnedBalanceError> { - // Check that the supplied viewing key corresponds to given payment - // address - let counter_owner = viewing_key.to_payment_address( - *masp_primitives::primitives::PaymentAddress::diversifier( - &owner.into(), - ), - ); - match counter_owner { - Some(counter_owner) if counter_owner == owner.into() => {} - _ => return Err(PinnedBalanceError::InvalidViewingKey), - } - let client = HttpClient::new(ledger_address.clone()).unwrap(); - // The address of the MASP account - let masp_addr = masp(); - // Construct the key for where the transaction ID would be stored - let pin_key = Key::from(masp_addr.to_db_key()) - .push(&(PIN_KEY_PREFIX.to_owned() + &owner.hash())) - .expect("Cannot obtain a storage key"); - // Obtain the transaction pointer at the key - let txidx = query_storage_value::(&client, &pin_key) - .await - .ok_or(PinnedBalanceError::NoTransactionPinned)?; - // Construct the key for where the pinned transaction is stored - let tx_key = Key::from(masp_addr.to_db_key()) - .push(&(TX_KEY_PREFIX.to_owned() + &txidx.to_string())) - .expect("Cannot obtain a storage key"); - // Obtain the pointed to transaction - let (tx_epoch, _tx_height, _tx_index, tx) = - query_storage_value::<(Epoch, BlockHeight, TxIndex, Transfer)>( - &client, &tx_key, - ) - .await - .expect("Ill-formed epoch, transaction pair"); - // Accumulate the combined output note value into this Amount - let mut val_acc = Amount::zero(); - let tx = tx - .shielded - .expect("Pinned Transfers should have shielded part"); - for so in &tx.shielded_outputs { - // Let's try to see if our viewing key can decrypt current note - let decres = try_sapling_note_decryption::( - 0, - &viewing_key.ivk().0, - &so.ephemeral_key.into_subgroup().unwrap(), - &so.cmu, - &so.enc_ciphertext, - ); - match decres { - // So the given viewing key does decrypt this current note... - Some((note, pa, _memo)) if pa == owner.into() => { - val_acc += - Amount::from_nonnegative(note.asset_type, note.value) - .expect( - "found note with invalid value or asset type", - ); - break; - } - _ => {} - } - } - Ok((val_acc, tx_epoch)) - } - - /// Compute the combined value of the output notes of the pinned transaction - /// at the given payment address if there's any. The asset types may be from - /// the epoch of the transaction or even before, so exchange all these - /// amounts to the epoch of the transaction in order to get the value that - /// would have been displayed in the epoch of the transaction. - pub async fn compute_exchanged_pinned_balance( - &mut self, - ledger_address: &TendermintAddress, - owner: PaymentAddress, - viewing_key: &ViewingKey, - ) -> Result<(Amount, Epoch), PinnedBalanceError> { - // Obtain the balance that will be exchanged - let (amt, ep) = - Self::compute_pinned_balance(ledger_address, owner, viewing_key) - .await?; - // Establish connection with which to do exchange rate queries - let client = HttpClient::new(ledger_address.clone()).unwrap(); - // Finally, exchange the balance to the transaction's epoch - Ok(( - self.compute_exchanged_amount(client, amt, ep, HashMap::new()) - .await - .0, - ep, - )) - } - - /// Convert an amount whose units are AssetTypes to one whose units are - /// Addresses that they decode to. All asset types not corresponding to - /// the given epoch are ignored. - pub async fn decode_amount( - &mut self, - client: HttpClient, - amt: Amount, - target_epoch: Epoch, - ) -> Amount
{ - let mut res = Amount::zero(); - for (asset_type, val) in amt.components() { - // Decode the asset type - let decoded = - self.decode_asset_type(client.clone(), *asset_type).await; - // Only assets with the target timestamp count - match decoded { - Some((addr, epoch)) if epoch == target_epoch => { - res += &Amount::from_pair(addr, *val).unwrap() - } - _ => {} - } - } - res - } - - /// Convert an amount whose units are AssetTypes to one whose units are - /// Addresses that they decode to. - pub async fn decode_all_amounts( - &mut self, - client: HttpClient, - amt: Amount, - ) -> Amount<(Address, Epoch)> { - let mut res = Amount::zero(); - for (asset_type, val) in amt.components() { - // Decode the asset type - let decoded = - self.decode_asset_type(client.clone(), *asset_type).await; - // Only assets with the target timestamp count - if let Some((addr, epoch)) = decoded { - res += &Amount::from_pair((addr, epoch), *val).unwrap() - } - } - res - } -} - -/// Make asset type corresponding to given address and epoch -fn make_asset_type(epoch: Epoch, token: &Address) -> AssetType { - // Typestamp the chosen token with the current epoch - let token_bytes = (token, epoch.0) - .try_to_vec() - .expect("token should serialize"); - // Generate the unique asset identifier from the unique token address - AssetType::new(token_bytes.as_ref()).expect("unable to create asset type") -} - -/// Convert Namada amount and token type to MASP equivalents -fn convert_amount( - epoch: Epoch, - token: &Address, - val: token::Amount, -) -> (AssetType, Amount) { - let asset_type = make_asset_type(epoch, token); - // Combine the value and unit into one amount - let amount = Amount::from_nonnegative(asset_type, u64::from(val)) - .expect("invalid value for amount"); - (asset_type, amount) -} - -/// Make shielded components to embed within a Transfer object. If no shielded -/// payment address nor spending key is specified, then no shielded components -/// are produced. Otherwise a transaction containing nullifiers and/or note -/// commitments are produced. Dummy transparent UTXOs are sometimes used to make -/// transactions balanced, but it is understood that transparent account changes -/// are effected only by the amounts and signatures specified by the containing -/// Transfer object. -async fn gen_shielded_transfer( - ctx: &mut C, - args: &ParsedTxTransferArgs, - shielded_gas: bool, -) -> Result, builder::Error> -where - C: ShieldedTransferContext, -{ - let spending_key = args.source.spending_key().map(|x| x.into()); - let payment_address = args.target.payment_address(); - // Determine epoch in which to submit potential shielded transaction - let epoch = ctx.query_epoch(args.tx.ledger_address.clone()).await; - // Context required for storing which notes are in the source's possesion - let consensus_branch_id = BranchId::Sapling; - let amt: u64 = args.amount.into(); - let memo: Option = None; - - // Now we build up the transaction within this object - let mut builder = Builder::::new(0u32); - // Convert transaction amount into MASP types - let (asset_type, amount) = convert_amount(epoch, &args.token, args.amount); - - // Transactions with transparent input and shielded output - // may be affected if constructed close to epoch boundary - let mut epoch_sensitive: bool = false; - // If there are shielded inputs - if let Some(sk) = spending_key { - // Transaction fees need to match the amount in the wrapper Transfer - // when MASP source is used - let (_, fee) = - convert_amount(epoch, &args.tx.fee_token, args.tx.fee_amount); - builder.set_fee(fee.clone())?; - // If the gas is coming from the shielded pool, then our shielded inputs - // must also cover the gas fee - let required_amt = if shielded_gas { amount + fee } else { amount }; - // Locate unspent notes that can help us meet the transaction amount - let (_, unspent_notes, used_convs) = ctx - .collect_unspent_notes( - args.tx.ledger_address.clone(), - &to_viewing_key(&sk).vk, - required_amt, - epoch, - ) - .await; - // Commit the notes found to our transaction - for (diversifier, note, merkle_path) in unspent_notes { - builder.add_sapling_spend(sk, diversifier, note, merkle_path)?; - } - // Commit the conversion notes used during summation - for (conv, wit, value) in used_convs.values() { - if *value > 0 { - builder.add_convert( - conv.clone(), - *value as u64, - wit.clone(), - )?; - } - } - } else { - // No transfer fees come from the shielded transaction for non-MASP - // sources - builder.set_fee(Amount::zero())?; - // We add a dummy UTXO to our transaction, but only the source of the - // parent Transfer object is used to validate fund availability - let secp_sk = - secp256k1::SecretKey::from_slice(&[0xcd; 32]).expect("secret key"); - let secp_ctx = secp256k1::Secp256k1::::gen_new(); - let secp_pk = - secp256k1::PublicKey::from_secret_key(&secp_ctx, &secp_sk) - .serialize(); - let hash = - ripemd160::Ripemd160::digest(&sha2::Sha256::digest(&secp_pk)); - let script = TransparentAddress::PublicKey(hash.into()).script(); - epoch_sensitive = true; - builder.add_transparent_input( - secp_sk, - OutPoint::new([0u8; 32], 0), - TxOut { - asset_type, - value: amt, - script_pubkey: script, - }, - )?; - } - // Now handle the outputs of this transaction - // If there is a shielded output - if let Some(pa) = payment_address { - let ovk_opt = spending_key.map(|x| x.expsk.ovk); - builder.add_sapling_output( - ovk_opt, - pa.into(), - asset_type, - amt, - memo.clone(), - )?; - } else { - epoch_sensitive = false; - // Embed the transparent target address into the shielded transaction so - // that it can be signed - let target_enc = args - .target - .address() - .expect("target address should be transparent") - .try_to_vec() - .expect("target address encoding"); - let hash = ripemd160::Ripemd160::digest(&sha2::Sha256::digest( - target_enc.as_ref(), - )); - builder.add_transparent_output( - &TransparentAddress::PublicKey(hash.into()), - asset_type, - amt, - )?; - } - let prover = if let Ok(params_dir) = env::var(masp::ENV_VAR_MASP_PARAMS_DIR) - { - let params_dir = PathBuf::from(params_dir); - let spend_path = params_dir.join(masp::SPEND_NAME); - let convert_path = params_dir.join(masp::CONVERT_NAME); - let output_path = params_dir.join(masp::OUTPUT_NAME); - LocalTxProver::new(&spend_path, &output_path, &convert_path) - } else { - LocalTxProver::with_default_location() - .expect("unable to load MASP Parameters") - }; - // Build and return the constructed transaction - let mut tx = builder.build(consensus_branch_id, &prover); - - if epoch_sensitive { - let new_epoch = ctx.query_epoch(args.tx.ledger_address.clone()).await; - - // If epoch has changed, recalculate shielded outputs to match new epoch - if new_epoch != epoch { - // Hack: build new shielded transfer with updated outputs - let mut replay_builder = Builder::::new(0u32); - replay_builder.set_fee(Amount::zero())?; - let ovk_opt = spending_key.map(|x| x.expsk.ovk); - let (new_asset_type, _) = - convert_amount(new_epoch, &args.token, args.amount); - replay_builder.add_sapling_output( - ovk_opt, - payment_address.unwrap().into(), - new_asset_type, - amt, - memo, - )?; - - let secp_sk = secp256k1::SecretKey::from_slice(&[0xcd; 32]) - .expect("secret key"); - let secp_ctx = - secp256k1::Secp256k1::::gen_new(); - let secp_pk = - secp256k1::PublicKey::from_secret_key(&secp_ctx, &secp_sk) - .serialize(); - let hash = - ripemd160::Ripemd160::digest(&sha2::Sha256::digest(&secp_pk)); - let script = TransparentAddress::PublicKey(hash.into()).script(); - replay_builder.add_transparent_input( - secp_sk, - OutPoint::new([0u8; 32], 0), - TxOut { - asset_type: new_asset_type, - value: amt, - script_pubkey: script, - }, - )?; - - let (replay_tx, _) = - replay_builder.build(consensus_branch_id, &prover)?; - tx = tx.map(|(t, tm)| { - let mut temp = t.deref().clone(); - temp.shielded_outputs = replay_tx.shielded_outputs.clone(); - temp.value_balance = temp.value_balance.reject(asset_type) - - Amount::from_pair(new_asset_type, amt).unwrap(); - (temp.freeze().unwrap(), tm) - }); - } - } - - tx.map(Some) -} - -pub async fn submit_transfer(mut ctx: Context, args: args::TxTransfer) { - let parsed_args = args.parse_from_context(&mut ctx); - let source = parsed_args.source.effective_address(); - let target = parsed_args.target.effective_address(); - // Check that the source address exists on chain - let source_exists = - rpc::known_address(&source, args.tx.ledger_address.clone()).await; - if !source_exists { - eprintln!("The source address {} doesn't exist on chain.", source); - if !args.tx.force { - safe_exit(1) - } - } - // Check that the target address exists on chain - let target_exists = - rpc::known_address(&target, args.tx.ledger_address.clone()).await; - if !target_exists { - eprintln!("The target address {} doesn't exist on chain.", target); - if !args.tx.force { - safe_exit(1) - } - } - // Check that the token address exists on chain - let token_exists = - rpc::known_address(&parsed_args.token, args.tx.ledger_address.clone()) - .await; - if !token_exists { - eprintln!( - "The token address {} doesn't exist on chain.", - parsed_args.token - ); - if !args.tx.force { - safe_exit(1) - } - } - // Check source balance - let (sub_prefix, balance_key) = match args.sub_prefix { - Some(sub_prefix) => { - let sub_prefix = storage::Key::parse(sub_prefix).unwrap(); - let prefix = token::multitoken_balance_prefix( - &parsed_args.token, - &sub_prefix, - ); - ( - Some(sub_prefix), - token::multitoken_balance_key(&prefix, &source), - ) - } - None => (None, token::balance_key(&parsed_args.token, &source)), - }; - let client = HttpClient::new(args.tx.ledger_address.clone()).unwrap(); - match rpc::query_storage_value::(&client, &balance_key).await - { - Some(balance) => { - if balance < args.amount { - eprintln!( - "The balance of the source {} of token {} is lower than \ - the amount to be transferred. Amount to transfer is {} \ - and the balance is {}.", - source, parsed_args.token, args.amount, balance - ); - if !args.tx.force { - safe_exit(1) - } - } - } - None => { - eprintln!( - "No balance found for the source {} of token {}", - source, parsed_args.token - ); - if !args.tx.force { - safe_exit(1) - } - } - }; - - let masp_addr = masp(); - // For MASP sources, use a special sentinel key recognized by VPs as default - // signer. Also, if the transaction is shielded, redact the amount and token - // types by setting the transparent value to 0 and token type to a constant. - // This has no side-effect because transaction is to self. - let (default_signer, amount, token) = - if source == masp_addr && target == masp_addr { - // TODO Refactor me, we shouldn't rely on any specific token here. - ( - TxSigningKey::SecretKey(masp_tx_key()), - 0.into(), - ctx.native_token.clone(), - ) - } else if source == masp_addr { - ( - TxSigningKey::SecretKey(masp_tx_key()), - args.amount, - parsed_args.token.clone(), - ) - } else { - ( - TxSigningKey::WalletAddress(args.source.to_address()), - args.amount, - parsed_args.token.clone(), - ) - }; - // If our chosen signer is the MASP sentinel key, then our shielded inputs - // will need to cover the gas fees. - let chosen_signer = tx_signer(&mut ctx, &args.tx, default_signer.clone()) - .await - .ref_to(); - let shielded_gas = masp_tx_key().ref_to() == chosen_signer; - // Determine whether to pin this transaction to a storage key - let key = match ctx.get(&args.target) { - TransferTarget::PaymentAddress(pa) if pa.is_pinned() => Some(pa.hash()), - _ => None, - }; - - #[cfg(not(feature = "mainnet"))] - let is_source_faucet = - rpc::is_faucet_account(&source, args.tx.ledger_address.clone()).await; - - let transfer = token::Transfer { - source, - target, - token, - sub_prefix, - amount, - key, - shielded: { - let spending_key = parsed_args.source.spending_key(); - let payment_address = parsed_args.target.payment_address(); - // No shielded components are needed when neither source nor - // destination are shielded - if spending_key.is_none() && payment_address.is_none() { - None - } else { - // We want to fund our transaction solely from supplied spending - // key - let spending_key = spending_key.map(|x| x.into()); - let spending_keys: Vec<_> = spending_key.into_iter().collect(); - // Load the current shielded context given the spending key we - // possess - let _ = ctx.shielded.load(); - ctx.shielded - .fetch(&args.tx.ledger_address, &spending_keys, &[]) - .await; - // Save the update state so that future fetches can be - // short-circuited - let _ = ctx.shielded.save(); - let stx_result = - gen_shielded_transfer(&mut ctx, &parsed_args, shielded_gas) - .await; - match stx_result { - Ok(stx) => stx.map(|x| x.0), - Err(builder::Error::ChangeIsNegative(_)) => { - eprintln!( - "The balance of the source {} is lower than the \ - amount to be transferred and fees. Amount to \ - transfer is {} {} and fees are {} {}.", - parsed_args.source, - args.amount, - parsed_args.token, - args.tx.fee_amount, - parsed_args.tx.fee_token, - ); - safe_exit(1) - } - Err(err) => panic!("{}", err), - } - } - }, - }; - tracing::debug!("Transfer data {:?}", transfer); - let data = transfer - .try_to_vec() - .expect("Encoding tx data shouldn't fail"); - let tx_code = ctx.read_wasm(TX_TRANSFER_WASM); - let tx = Tx::new(tx_code, Some(data)); - let signing_address = TxSigningKey::WalletAddress(args.source.to_address()); - - process_tx( - ctx, - &args.tx, - tx, - signing_address, - #[cfg(not(feature = "mainnet"))] - is_source_faucet, - ) - .await; -} - -pub async fn submit_ibc_transfer(ctx: Context, args: args::TxIbcTransfer) { - let source = ctx.get(&args.source); - // Check that the source address exists on chain - let source_exists = - rpc::known_address(&source, args.tx.ledger_address.clone()).await; - if !source_exists { - eprintln!("The source address {} doesn't exist on chain.", source); - if !args.tx.force { - safe_exit(1) - } - } - - // We cannot check the receiver - - let token = ctx.get(&args.token); - // Check that the token address exists on chain - let token_exists = - rpc::known_address(&token, args.tx.ledger_address.clone()).await; - if !token_exists { - eprintln!("The token address {} doesn't exist on chain.", token); - if !args.tx.force { - safe_exit(1) - } - } - // Check source balance - let (sub_prefix, balance_key) = match args.sub_prefix { - Some(sub_prefix) => { - let sub_prefix = storage::Key::parse(sub_prefix).unwrap(); - let prefix = token::multitoken_balance_prefix(&token, &sub_prefix); - ( - Some(sub_prefix), - token::multitoken_balance_key(&prefix, &source), - ) - } - None => (None, token::balance_key(&token, &source)), - }; - let client = HttpClient::new(args.tx.ledger_address.clone()).unwrap(); - match rpc::query_storage_value::(&client, &balance_key).await - { - Some(balance) => { - if balance < args.amount { - eprintln!( - "The balance of the source {} of token {} is lower than \ - the amount to be transferred. Amount to transfer is {} \ - and the balance is {}.", - source, token, args.amount, balance - ); - if !args.tx.force { - safe_exit(1) - } - } - } - None => { - eprintln!( - "No balance found for the source {} of token {}", - source, token - ); - if !args.tx.force { - safe_exit(1) - } - } - } - let tx_code = ctx.read_wasm(TX_IBC_WASM); - - let denom = match sub_prefix { - // To parse IbcToken address, remove the address prefix - Some(sp) => sp.to_string().replace(RESERVED_ADDRESS_PREFIX, ""), - None => token.to_string(), - }; - let token = Some(Coin { - denom, - amount: args.amount.to_string(), - }); - - // this height should be that of the destination chain, not this chain - let timeout_height = match args.timeout_height { - Some(h) => IbcHeight::new(0, h), - None => IbcHeight::zero(), - }; - - let now: namada::tendermint::Time = DateTimeUtc::now().try_into().unwrap(); - let now: IbcTimestamp = now.into(); - let timeout_timestamp = if let Some(offset) = args.timeout_sec_offset { - (now + Duration::new(offset, 0)).unwrap() - } else if timeout_height.is_zero() { - // we cannot set 0 to both the height and the timestamp - (now + Duration::new(3600, 0)).unwrap() - } else { - IbcTimestamp::none() - }; - - let msg = MsgTransfer { - source_port: args.port_id, - source_channel: args.channel_id, - token, - sender: Signer::new(source.to_string()), - receiver: Signer::new(args.receiver), - timeout_height, - timeout_timestamp, - }; - tracing::debug!("IBC transfer message {:?}", msg); - let any_msg = msg.to_any(); - let mut data = vec![]; - prost::Message::encode(&any_msg, &mut data) - .expect("Encoding tx data shouldn't fail"); - - let tx = Tx::new(tx_code, Some(data)); - process_tx( - ctx, - &args.tx, - tx, - TxSigningKey::WalletAddress(args.source), - #[cfg(not(feature = "mainnet"))] - false, - ) - .await; -} - -pub async fn submit_init_proposal(mut ctx: Context, args: args::InitProposal) { - let file = File::open(&args.proposal_data).expect("File must exist."); - let proposal: Proposal = - serde_json::from_reader(file).expect("JSON was not well-formatted"); - - let client = HttpClient::new(args.tx.ledger_address.clone()).unwrap(); - - let signer = WalletAddress::new(proposal.clone().author.to_string()); - let governance_parameters = rpc::get_governance_parameters(&client).await; - let current_epoch = rpc::query_and_print_epoch(args::Query { - ledger_address: args.tx.ledger_address.clone(), - }) - .await; - - if proposal.voting_start_epoch <= current_epoch - || proposal.voting_start_epoch.0 - % governance_parameters.min_proposal_period - != 0 - { - println!("{}", proposal.voting_start_epoch <= current_epoch); - println!( - "{}", - proposal.voting_start_epoch.0 - % governance_parameters.min_proposal_period - == 0 - ); - eprintln!( - "Invalid proposal start epoch: {} must be greater than current \ - epoch {} and a multiple of {}", - proposal.voting_start_epoch, - current_epoch, - governance_parameters.min_proposal_period - ); - if !args.tx.force { - safe_exit(1) - } - } else if proposal.voting_end_epoch <= proposal.voting_start_epoch - || proposal.voting_end_epoch.0 - proposal.voting_start_epoch.0 - < governance_parameters.min_proposal_period - || proposal.voting_end_epoch.0 - proposal.voting_start_epoch.0 - > governance_parameters.max_proposal_period - || proposal.voting_end_epoch.0 % 3 != 0 - { - eprintln!( - "Invalid proposal end epoch: difference between proposal start \ - and end epoch must be at least {} and at max {} and end epoch \ - must be a multiple of {}", - governance_parameters.min_proposal_period, - governance_parameters.max_proposal_period, - governance_parameters.min_proposal_period - ); - if !args.tx.force { - safe_exit(1) - } - } else if proposal.grace_epoch <= proposal.voting_end_epoch - || proposal.grace_epoch.0 - proposal.voting_end_epoch.0 - < governance_parameters.min_proposal_grace_epochs - { - eprintln!( - "Invalid proposal grace epoch: difference between proposal grace \ - and end epoch must be at least {}", - governance_parameters.min_proposal_grace_epochs - ); - if !args.tx.force { - safe_exit(1) + } else if proposal.grace_epoch <= proposal.voting_end_epoch + || proposal.grace_epoch.0 - proposal.voting_end_epoch.0 + < governance_parameters.min_proposal_grace_epochs + { + eprintln!( + "Invalid proposal grace epoch: difference between proposal grace \ + and end epoch must be at least {}", + governance_parameters.min_proposal_grace_epochs + ); + if !args.tx.force { + safe_exit(1) } } if args.offline { let signer = ctx.get(&signer); - let signing_key = find_keypair( - &mut ctx.wallet, - &signer, - args.tx.ledger_address.clone(), - ) - .await; + let signing_key = + find_keypair::(client, &mut ctx.wallet, &signer) + .await?; let offline_proposal = OfflineProposal::new(proposal, signer, &signing_key); let proposal_filename = args @@ -1905,7 +511,9 @@ pub async fn submit_init_proposal(mut ctx: Context, args: args::InitProposal) { safe_exit(1) } } + Ok(()) } else { + let signer = ctx.get(&signer); let tx_data: Result = proposal.clone().try_into(); let init_proposal_data = if let Ok(data) = tx_data { data @@ -1915,8 +523,8 @@ pub async fn submit_init_proposal(mut ctx: Context, args: args::InitProposal) { }; let balance = rpc::get_token_balance( - &client, - &ctx.native_token, + client, + &args.native_token, &proposal.author, ) .await @@ -1941,10 +549,18 @@ pub async fn submit_init_proposal(mut ctx: Context, args: args::InitProposal) { let data = init_proposal_data .try_to_vec() .expect("Encoding proposal data shouldn't fail"); - let tx_code = ctx.read_wasm(TX_INIT_PROPOSAL); - let tx = Tx::new(tx_code, Some(data)); + let tx_code_hash = query_wasm_code_hash(client, args::TX_INIT_PROPOSAL) + .await + .unwrap(); + let tx = Tx::new( + tx_code_hash.to_vec(), + Some(data), + ctx.config.ledger.chain_id.clone(), + args.tx.expiration, + ); - process_tx( + process_tx::( + client, ctx, &args.tx, tx, @@ -1952,11 +568,20 @@ pub async fn submit_init_proposal(mut ctx: Context, args: args::InitProposal) { #[cfg(not(feature = "mainnet"))] false, ) - .await; + .await?; + Ok(()) } } -pub async fn submit_vote_proposal(mut ctx: Context, args: args::VoteProposal) { +pub async fn submit_vote_proposal( + client: &C, + mut ctx: Context, + mut args: args::VoteProposal, +) -> Result<(), tx::Error> { + args.tx.chain_id = args + .tx + .chain_id + .or_else(|| Some(ctx.config.ledger.chain_id.clone())); let signer = if let Some(addr) = &args.tx.signer { addr } else { @@ -1964,34 +589,87 @@ pub async fn submit_vote_proposal(mut ctx: Context, args: args::VoteProposal) { safe_exit(1) }; + // Construct vote + let proposal_vote = match args.vote.to_ascii_lowercase().as_str() { + "yay" => { + if let Some(pgf) = args.proposal_pgf { + let splits = pgf.trim().split_ascii_whitespace(); + let address_iter = splits.clone().into_iter().step_by(2); + let cap_iter = splits.into_iter().skip(1).step_by(2); + let mut set = HashSet::new(); + for (address, cap) in + address_iter.zip(cap_iter).map(|(addr, cap)| { + ( + addr.parse() + .expect("Failed to parse pgf council address"), + cap.parse::() + .expect("Failed to parse pgf spending cap"), + ) + }) + { + set.insert((address, cap.into())); + } + + ProposalVote::Yay(VoteType::PGFCouncil(set)) + } else if let Some(eth) = args.proposal_eth { + let mut splits = eth.trim().split_ascii_whitespace(); + // Sign the message + let sigkey = splits + .next() + .expect("Expected signing key") + .parse::() + .expect("Signing key parsing failed."); + + let msg = splits.next().expect("Missing message to sign"); + if splits.next().is_some() { + eprintln!("Unexpected argument after message"); + safe_exit(1); + } + + ProposalVote::Yay(VoteType::ETHBridge(common::SigScheme::sign( + &sigkey, + HEXLOWER_PERMISSIVE + .decode(msg.as_bytes()) + .expect("Error while decoding message"), + ))) + } else { + ProposalVote::Yay(VoteType::Default) + } + } + "nay" => ProposalVote::Nay, + _ => { + eprintln!("Vote must be either yay or nay"); + safe_exit(1); + } + }; + if args.offline { - let signer = ctx.get(signer); + if !proposal_vote.is_default_vote() { + eprintln!( + "Wrong vote type for offline proposal. Just vote yay or nay!" + ); + safe_exit(1); + } let proposal_file_path = args.proposal_data.expect("Proposal file should exist."); let file = File::open(&proposal_file_path).expect("File must exist."); let proposal: OfflineProposal = serde_json::from_reader(file).expect("JSON was not well-formatted"); - let public_key = rpc::get_public_key( - &proposal.address, - args.tx.ledger_address.clone(), - ) - .await - .expect("Public key should exist."); + let public_key = rpc::get_public_key(client, &proposal.address) + .await + .expect("Public key should exist."); if !proposal.check_signature(&public_key) { eprintln!("Proposal signature mismatch!"); safe_exit(1) } - let signing_key = find_keypair( - &mut ctx.wallet, - &signer, - args.tx.ledger_address.clone(), - ) - .await; + let signing_key = + find_keypair::(client, &mut ctx.wallet, signer) + .await?; let offline_vote = OfflineVote::new( &proposal, - args.vote, + proposal_vote, signer.clone(), &signing_key, ); @@ -2007,28 +685,74 @@ pub async fn submit_vote_proposal(mut ctx: Context, args: args::VoteProposal) { "Proposal vote created: {}.", proposal_vote_filename.to_string_lossy() ); + Ok(()) } Err(e) => { eprintln!("Error while creating proposal vote file: {}.", e); safe_exit(1) } } - } else { - let client = HttpClient::new(args.tx.ledger_address.clone()).unwrap(); - let current_epoch = rpc::query_and_print_epoch(args::Query { - ledger_address: args.tx.ledger_address.clone(), - }) - .await; - - let voter_address = ctx.get(signer); - let proposal_id = args.proposal_id.unwrap(); - let proposal_start_epoch_key = - gov_storage::get_voting_start_epoch_key(proposal_id); - let proposal_start_epoch = rpc::query_storage_value::( - &client, - &proposal_start_epoch_key, - ) - .await; + } else { + let current_epoch = rpc::query_and_print_epoch(client).await; + + let voter_address = signer.clone(); + let proposal_id = args.proposal_id.unwrap(); + let proposal_start_epoch_key = + gov_storage::get_voting_start_epoch_key(proposal_id); + let proposal_start_epoch = rpc::query_storage_value::( + client, + &proposal_start_epoch_key, + ) + .await; + + // Check vote type and memo + let proposal_type_key = gov_storage::get_proposal_type_key(proposal_id); + let proposal_type: ProposalType = rpc::query_storage_value::< + C, + ProposalType, + >(client, &proposal_type_key) + .await + .unwrap_or_else(|| { + panic!("Didn't find type of proposal id {} in storage", proposal_id) + }); + + if let ProposalVote::Yay(ref vote_type) = proposal_vote { + if &proposal_type != vote_type { + eprintln!( + "Expected vote of type {}, found {}", + proposal_type, args.vote + ); + safe_exit(1); + } else if let VoteType::PGFCouncil(set) = vote_type { + // Check that addresses proposed as council are established and + // are present in storage + for (address, _) in set { + match address { + Address::Established(_) => { + let vp_key = Key::validity_predicate(address); + if !rpc::query_has_storage_key::(client, &vp_key) + .await + { + eprintln!( + "Proposed PGF council {} cannot be found \ + in storage", + address + ); + safe_exit(1); + } + } + _ => { + eprintln!( + "PGF council vote contains a non-established \ + address: {}", + address + ); + safe_exit(1); + } + } + } + } + } match proposal_start_epoch { Some(epoch) => { @@ -2044,7 +768,7 @@ pub async fn submit_vote_proposal(mut ctx: Context, args: args::VoteProposal) { } } let mut delegations = - rpc::get_delegators_delegation(&client, &voter_address) + rpc::get_delegators_delegation(client, &voter_address) .await; // Optimize by quering if a vote from a validator @@ -2054,37 +778,34 @@ pub async fn submit_vote_proposal(mut ctx: Context, args: args::VoteProposal) { // validator changing his vote and, effectively, invalidating // the delgator's vote if !args.tx.force - && is_safe_voting_window( - args.tx.ledger_address.clone(), - &client, - proposal_id, - epoch, - ) - .await + && is_safe_voting_window(client, proposal_id, epoch).await? { delegations = filter_delegations( - &client, + client, delegations, proposal_id, - &args.vote, + &proposal_vote, ) .await; } let tx_data = VoteProposalData { id: proposal_id, - vote: args.vote, + vote: proposal_vote, voter: voter_address, delegations: delegations.into_iter().collect(), }; + let chain_id = args.tx.chain_id.clone().unwrap(); + let expiration = args.tx.expiration; let data = tx_data .try_to_vec() .expect("Encoding proposal data shouldn't fail"); - let tx_code = ctx.read_wasm(TX_VOTE_PROPOSAL); - let tx = Tx::new(tx_code, Some(data)); + let tx_code = args.tx_code_path; + let tx = Tx::new(tx_code, Some(data), chain_id, expiration); - process_tx( + process_tx::( + client, ctx, &args.tx, tx, @@ -2092,175 +813,88 @@ pub async fn submit_vote_proposal(mut ctx: Context, args: args::VoteProposal) { #[cfg(not(feature = "mainnet"))] false, ) - .await; + .await?; + Ok(()) } None => { eprintln!( "Proposal start epoch for proposal id {} is not definied.", proposal_id ); - if !args.tx.force { - safe_exit(1) - } + if !args.tx.force { safe_exit(1) } else { Ok(()) } } } } } -pub async fn submit_reveal_pk(mut ctx: Context, args: args::RevealPk) { - let args::RevealPk { - tx: args, - public_key, - } = args; - let public_key = ctx.get_cached(&public_key); - if !reveal_pk_if_needed(&mut ctx, &public_key, &args).await { - let addr: Address = (&public_key).into(); - println!("PK for {addr} is already revealed, nothing to do."); - } +pub async fn submit_reveal_pk( + client: &C, + ctx: &mut Context, + mut args: args::RevealPk, +) -> Result<(), tx::Error> { + args.tx.chain_id = args + .tx + .chain_id + .or_else(|| Some(ctx.config.ledger.chain_id.clone())); + tx::submit_reveal_pk::(client, &mut ctx.wallet, args).await } -pub async fn reveal_pk_if_needed( +pub async fn reveal_pk_if_needed( + client: &C, ctx: &mut Context, public_key: &common::PublicKey, args: &args::Tx, -) -> bool { - let addr: Address = public_key.into(); - // Check if PK revealed - if args.force || !has_revealed_pk(&addr, args.ledger_address.clone()).await - { - // If not, submit it - submit_reveal_pk_aux(ctx, public_key, args).await; - true - } else { - false - } +) -> Result { + let args = args::Tx { + chain_id: args + .clone() + .chain_id + .or_else(|| Some(ctx.config.ledger.chain_id.clone())), + ..args.clone() + }; + tx::reveal_pk_if_needed::(client, &mut ctx.wallet, public_key, &args) + .await } -pub async fn has_revealed_pk( +pub async fn has_revealed_pk( + client: &C, addr: &Address, - ledger_address: TendermintAddress, ) -> bool { - rpc::get_public_key(addr, ledger_address).await.is_some() + tx::has_revealed_pk(client, addr).await } -pub async fn submit_reveal_pk_aux( +pub async fn submit_reveal_pk_aux( + client: &C, ctx: &mut Context, public_key: &common::PublicKey, args: &args::Tx, -) { - let addr: Address = public_key.into(); - println!("Submitting a tx to reveal the public key for address {addr}..."); - let tx_data = public_key - .try_to_vec() - .expect("Encoding a public key shouldn't fail"); - let tx_code = ctx.read_wasm(TX_REVEAL_PK); - let tx = Tx::new(tx_code, Some(tx_data)); - - // submit_tx without signing the inner tx - let keypair = if let Some(signing_key) = &args.signing_key { - ctx.get_cached(signing_key) - } else if let Some(signer) = args.signer.as_ref() { - let signer = ctx.get(signer); - find_keypair(&mut ctx.wallet, &signer, args.ledger_address.clone()) - .await - } else { - find_keypair(&mut ctx.wallet, &addr, args.ledger_address.clone()).await +) -> Result<(), tx::Error> { + let args = args::Tx { + chain_id: args + .clone() + .chain_id + .or_else(|| Some(ctx.config.ledger.chain_id.clone())), + ..args.clone() }; - let epoch = rpc::query_and_print_epoch(args::Query { - ledger_address: args.ledger_address.clone(), - }) - .await; - let to_broadcast = if args.dry_run { - TxBroadcastData::DryRun(tx) - } else { - super::signing::sign_wrapper( - ctx, - args, - epoch, - tx, - &keypair, - #[cfg(not(feature = "mainnet"))] - false, - ) + tx::submit_reveal_pk_aux::(client, &mut ctx.wallet, public_key, &args) .await - }; - - if args.dry_run { - if let TxBroadcastData::DryRun(tx) = to_broadcast { - rpc::dry_run_tx(&args.ledger_address, tx.to_bytes()).await; - } else { - panic!( - "Expected a dry-run transaction, received a wrapper \ - transaction instead" - ); - } - } else { - // Either broadcast or submit transaction and collect result into - // sum type - let result = if args.broadcast_only { - Left(broadcast_tx(args.ledger_address.clone(), &to_broadcast).await) - } else { - Right(submit_tx(args.ledger_address.clone(), to_broadcast).await) - }; - // Return result based on executed operation, otherwise deal with - // the encountered errors uniformly - match result { - Right(Err(err)) => { - eprintln!( - "Encountered error while broadcasting transaction: {}", - err - ); - safe_exit(1) - } - Left(Err(err)) => { - eprintln!( - "Encountered error while broadcasting transaction: {}", - err - ); - safe_exit(1) - } - _ => {} - } - } } /// Check if current epoch is in the last third of the voting period of the /// proposal. This ensures that it is safe to optimize the vote writing to /// storage. -async fn is_safe_voting_window( - ledger_address: TendermintAddress, - client: &HttpClient, +async fn is_safe_voting_window( + client: &C, proposal_id: u64, proposal_start_epoch: Epoch, -) -> bool { - let current_epoch = - rpc::query_and_print_epoch(args::Query { ledger_address }).await; - - let proposal_end_epoch_key = - gov_storage::get_voting_end_epoch_key(proposal_id); - let proposal_end_epoch = - rpc::query_storage_value::(client, &proposal_end_epoch_key) - .await; - - match proposal_end_epoch { - Some(proposal_end_epoch) => { - !namada::ledger::native_vp::governance::utils::is_valid_validator_voting_period( - current_epoch, - proposal_start_epoch, - proposal_end_epoch, - ) - } - None => { - eprintln!("Proposal end epoch is not in the storage."); - safe_exit(1) - } - } +) -> Result { + tx::is_safe_voting_window(client, proposal_id, proposal_start_epoch).await } /// Removes validators whose vote corresponds to that of the delegator (needless /// vote) -async fn filter_delegations( - client: &HttpClient, +async fn filter_delegations( + client: &C, delegations: HashSet
, proposal_id: u64, delegator_vote: &ProposalVote, @@ -2279,8 +913,10 @@ async fn filter_delegations( ); if let Some(validator_vote) = - rpc::query_storage_value::(client, &vote_key) - .await + rpc::query_storage_value::( + client, &vote_key, + ) + .await { if &validator_vote == delegator_vote { return None; @@ -2294,457 +930,106 @@ async fn filter_delegations( delegations.into_iter().flatten().collect() } -pub async fn submit_bond(ctx: Context, args: args::Bond) { - let validator = ctx.get(&args.validator); - - // Check that the validator address exists on chain - let client = HttpClient::new(args.tx.ledger_address.clone()).unwrap(); - let is_validator = rpc::is_validator(&client, &validator).await; - if !is_validator { - eprintln!( - "The address {} doesn't belong to any known validator account.", - validator - ); - if !args.tx.force { - safe_exit(1) - } - } - let source = ctx.get_opt(&args.source); - // Check that the source address exists on chain - if let Some(source) = &source { - let source_exists = - rpc::known_address(source, args.tx.ledger_address.clone()).await; - if !source_exists { - eprintln!("The source address {} doesn't exist on chain.", source); - if !args.tx.force { - safe_exit(1) - } - } - } - // Check bond's source (source for delegation or validator for self-bonds) - // balance - let bond_source = source.as_ref().unwrap_or(&validator); - let balance_key = token::balance_key(&ctx.native_token, bond_source); - match rpc::query_storage_value::(&client, &balance_key).await - { - Some(balance) => { - println!("Found source balance {}", balance); - if balance < args.amount { - eprintln!( - "The balance of the source {} is lower than the amount to \ - be transferred. Amount to transfer is {} and the balance \ - is {}.", - bond_source, args.amount, balance - ); - if !args.tx.force { - safe_exit(1) - } - } - } - None => { - eprintln!("No balance found for the source {}", bond_source); - if !args.tx.force { - safe_exit(1) - } - } - } - let tx_code = ctx.read_wasm(TX_BOND_WASM); - println!("Wasm tx bond code bytes length = {}\n", tx_code.len()); - let bond = pos::Bond { - validator, - amount: args.amount, - source, - }; - let data = bond.try_to_vec().expect("Encoding tx data shouldn't fail"); - - let tx = Tx::new(tx_code, Some(data)); - let default_signer = args.source.unwrap_or(args.validator); - process_tx( - ctx, - &args.tx, - tx, - TxSigningKey::WalletAddress(default_signer), - #[cfg(not(feature = "mainnet"))] - false, - ) - .await; +pub async fn submit_bond( + client: &C, + ctx: &mut Context, + mut args: args::Bond, +) -> Result<(), tx::Error> { + args.tx.chain_id = args + .tx + .chain_id + .or_else(|| Some(ctx.config.ledger.chain_id.clone())); + tx::submit_bond::(client, &mut ctx.wallet, args).await } -pub async fn submit_unbond(ctx: Context, args: args::Unbond) { - let validator = ctx.get(&args.validator); - let source = ctx.get_opt(&args.source); - - // Check that the validator address exists on chain - let client = HttpClient::new(args.tx.ledger_address.clone()).unwrap(); - let is_validator = rpc::is_validator(&client, &validator).await; - if !is_validator { - eprintln!( - "The address {} doesn't belong to any known validator account.", - validator - ); - if !args.tx.force { - safe_exit(1) - } - } - - // Check the source's current bond amount - let bond_source = source.clone().unwrap_or_else(|| validator.clone()); - let bond_amount = - rpc::query_bond(&client, &bond_source, &validator, None).await; - println!("BOND AMOUNT REMAINING IS {}", bond_amount); - - if args.amount > bond_amount { - eprintln!( - "The total bonds of the source {} is lower than the amount to be \ - unbonded. Amount to unbond is {} and the total bonds is {}.", - bond_source, args.amount, bond_amount - ); - if !args.tx.force { - safe_exit(1) - } - } - - let data = pos::Unbond { - validator: validator.clone(), - amount: args.amount, - source: Some(bond_source.clone()), - }; - let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); - - let tx_code = ctx.read_wasm(TX_UNBOND_WASM); - let tx = Tx::new(tx_code, Some(data)); - let default_signer = args.source.unwrap_or(args.validator); - let (_ctx, _) = process_tx( - ctx, - &args.tx, - tx, - TxSigningKey::WalletAddress(default_signer), - #[cfg(not(feature = "mainnet"))] - false, - ) - .await; - - rpc::query_and_print_unbonds(&client, &bond_source, &validator).await; +pub async fn submit_unbond( + client: &C, + ctx: &mut Context, + mut args: args::Unbond, +) -> Result<(), tx::Error> { + args.tx.chain_id = args + .tx + .chain_id + .or_else(|| Some(ctx.config.ledger.chain_id.clone())); + tx::submit_unbond::(client, &mut ctx.wallet, args).await } -pub async fn submit_withdraw(ctx: Context, args: args::Withdraw) { - let validator = ctx.get(&args.validator); - let source = ctx.get_opt(&args.source); - - let epoch = rpc::query_and_print_epoch(args::Query { - ledger_address: args.tx.ledger_address.clone(), - }) - .await; - - // Check that the validator address exists on chain - let client = HttpClient::new(args.tx.ledger_address.clone()).unwrap(); - let is_validator = rpc::is_validator(&client, &validator).await; - if !is_validator { - eprintln!( - "The address {} doesn't belong to any known validator account.", - validator - ); - if !args.tx.force { - safe_exit(1) - } - } - - // Check the source's current unbond amount - let bond_source = source.clone().unwrap_or_else(|| validator.clone()); - let tokens = rpc::query_withdrawable_tokens( - &client, - &bond_source, - &validator, - Some(epoch), - ) - .await; - if tokens == 0.into() { - eprintln!( - "There are no unbonded bonds ready to withdraw in the current \ - epoch {}.", - epoch - ); - rpc::query_and_print_unbonds(&client, &bond_source, &validator).await; - if !args.tx.force { - safe_exit(1) - } - } else { - println!("Found {tokens} tokens that can be withdrawn."); - println!("Submitting transaction to withdraw them..."); - } - - let data = pos::Withdraw { validator, source }; - let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); - - let tx_code = ctx.read_wasm(TX_WITHDRAW_WASM); - let tx = Tx::new(tx_code, Some(data)); - let default_signer = args.source.unwrap_or(args.validator); - process_tx( - ctx, - &args.tx, - tx, - TxSigningKey::WalletAddress(default_signer), - #[cfg(not(feature = "mainnet"))] - false, - ) - .await; +pub async fn submit_withdraw( + client: &C, + mut ctx: Context, + mut args: args::Withdraw, +) -> Result<(), tx::Error> { + args.tx.chain_id = args + .tx + .chain_id + .or_else(|| Some(ctx.config.ledger.chain_id.clone())); + tx::submit_withdraw::(client, &mut ctx.wallet, args).await } -pub async fn submit_validator_commission_change( - ctx: Context, - args: args::TxCommissionRateChange, -) { - let epoch = rpc::query_and_print_epoch(args::Query { - ledger_address: args.tx.ledger_address.clone(), - }) - .await; - - let tx_code = ctx.read_wasm(TX_CHANGE_COMMISSION_WASM); - let client = HttpClient::new(args.tx.ledger_address.clone()).unwrap(); - - // TODO: put following two let statements in its own function - let params_key = namada::ledger::pos::params_key(); - let params = query_storage_value::(&client, ¶ms_key) - .await - .expect("Parameter should be defined."); - - let validator = ctx.get(&args.validator); - if rpc::is_validator(&client, &validator).await { - if args.rate < Decimal::ZERO || args.rate > Decimal::ONE { - eprintln!("Invalid new commission rate, received {}", args.rate); - if !args.tx.force { - safe_exit(1) - } - } - - let pipeline_epoch_minus_one = epoch + params.pipeline_len - 1; - - match rpc::query_commission_rate( - &client, - &validator, - Some(pipeline_epoch_minus_one), - ) - .await - { - Some(CommissionPair { - commission_rate, - max_commission_change_per_epoch, - }) => { - if (args.rate - commission_rate).abs() - > max_commission_change_per_epoch - { - eprintln!( - "New rate is too large of a change with respect to \ - the predecessor epoch in which the rate will take \ - effect." - ); - if !args.tx.force { - safe_exit(1) - } - } - } - None => { - eprintln!("Error retrieving from storage"); - if !args.tx.force { - safe_exit(1) - } - } - } - } else { - eprintln!("The given address {validator} is not a validator."); - if !args.tx.force { - safe_exit(1) - } - } - - let data = pos::CommissionChange { - validator: ctx.get(&args.validator), - new_rate: args.rate, - }; - let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); - - let tx = Tx::new(tx_code, Some(data)); - let default_signer = args.validator; - process_tx( - ctx, - &args.tx, - tx, - TxSigningKey::WalletAddress(default_signer), - #[cfg(not(feature = "mainnet"))] - false, +pub async fn submit_validator_commission_change< + C: namada::ledger::queries::Client + Sync, +>( + client: &C, + mut ctx: Context, + mut args: args::TxCommissionRateChange, +) -> Result<(), tx::Error> { + args.tx.chain_id = args + .tx + .chain_id + .or_else(|| Some(ctx.config.ledger.chain_id.clone())); + tx::submit_validator_commission_change::( + client, + &mut ctx.wallet, + args, ) - .await; + .await } /// Submit transaction and wait for result. Returns a list of addresses /// initialized in the transaction if any. In dry run, this is always empty. -async fn process_tx( - ctx: Context, +async fn process_tx( + client: &C, + mut ctx: Context, args: &args::Tx, tx: Tx, default_signer: TxSigningKey, #[cfg(not(feature = "mainnet"))] requires_pow: bool, -) -> (Context, Vec
) { - let (ctx, to_broadcast) = sign_tx( - ctx, +) -> Result<(Context, Vec
), tx::Error> { + let args = args::Tx { + chain_id: args.clone().chain_id.or_else(|| Some(tx.chain_id.clone())), + ..args.clone() + }; + let res: Vec
= tx::process_tx::( + client, + &mut ctx.wallet, + &args, tx, - args, default_signer, #[cfg(not(feature = "mainnet"))] requires_pow, ) - .await; - // NOTE: use this to print the request JSON body: - - // let request = - // tendermint_rpc::endpoint::broadcast::tx_commit::Request::new( - // tx_bytes.clone().into(), - // ); - // use tendermint_rpc::Request; - // let request_body = request.into_json(); - // println!("HTTP request body: {}", request_body); - - if args.dry_run { - if let TxBroadcastData::DryRun(tx) = to_broadcast { - rpc::dry_run_tx(&args.ledger_address, tx.to_bytes()).await; - (ctx, vec![]) - } else { - panic!( - "Expected a dry-run transaction, received a wrapper \ - transaction instead" - ); - } - } else { - // Either broadcast or submit transaction and collect result into - // sum type - let result = if args.broadcast_only { - Left(broadcast_tx(args.ledger_address.clone(), &to_broadcast).await) - } else { - Right(submit_tx(args.ledger_address.clone(), to_broadcast).await) - }; - // Return result based on executed operation, otherwise deal with - // the encountered errors uniformly - match result { - Right(Ok(result)) => (ctx, result.initialized_accounts), - Left(Ok(_)) => (ctx, Vec::default()), - Right(Err(err)) => { - eprintln!( - "Encountered error while broadcasting transaction: {}", - err - ); - safe_exit(1) - } - Left(Err(err)) => { - eprintln!( - "Encountered error while broadcasting transaction: {}", - err - ); - safe_exit(1) - } - } - } + .await?; + Ok((ctx, res)) } /// Save accounts initialized from a tx into the wallet, if any. -async fn save_initialized_accounts( - mut ctx: Context, +pub async fn save_initialized_accounts( + wallet: &mut Wallet, args: &args::Tx, initialized_accounts: Vec
, ) { - let len = initialized_accounts.len(); - if len != 0 { - // Store newly initialized account addresses in the wallet - println!( - "The transaction initialized {} new account{}", - len, - if len == 1 { "" } else { "s" } - ); - // Store newly initialized account addresses in the wallet - let wallet = &mut ctx.wallet; - for (ix, address) in initialized_accounts.iter().enumerate() { - let encoded = address.encode(); - let alias: Cow = match &args.initialized_account_alias { - Some(initialized_account_alias) => { - if len == 1 { - // If there's only one account, use the - // alias as is - initialized_account_alias.into() - } else { - // If there're multiple accounts, use - // the alias as prefix, followed by - // index number - format!("{}{}", initialized_account_alias, ix).into() - } - } - None => { - print!("Choose an alias for {}: ", encoded); - io::stdout().flush().await.unwrap(); - let mut alias = String::new(); - io::stdin().read_line(&mut alias).await.unwrap(); - alias.trim().to_owned().into() - } - }; - let alias = alias.into_owned(); - let added = wallet.add_address(alias.clone(), address.clone()); - match added { - Some(new_alias) if new_alias != encoded => { - println!( - "Added alias {} for address {}.", - new_alias, encoded - ); - } - _ => println!("No alias added for address {}.", encoded), - }; - } - if !args.dry_run { - wallet.save().unwrap_or_else(|err| eprintln!("{}", err)); - } else { - println!("Transaction dry run. No addresses have been saved.") - } - } + tx::save_initialized_accounts::(wallet, args, initialized_accounts).await } /// Broadcast a transaction to be included in the blockchain and checks that /// the tx has been successfully included into the mempool of a validator /// /// In the case of errors in any of those stages, an error message is returned -pub async fn broadcast_tx( - address: TendermintAddress, +pub async fn broadcast_tx( + rpc_cli: &C, to_broadcast: &TxBroadcastData, -) -> Result { - let (tx, wrapper_tx_hash, decrypted_tx_hash) = match to_broadcast { - TxBroadcastData::Wrapper { - tx, - wrapper_hash, - decrypted_hash, - } => (tx, wrapper_hash, decrypted_hash), - _ => panic!("Cannot broadcast a dry-run transaction"), - }; - - tracing::debug!( - tendermint_rpc_address = ?address, - transaction = ?to_broadcast, - "Broadcasting transaction", - ); - let rpc_cli = HttpClient::new(address)?; - - // TODO: configure an explicit timeout value? we need to hack away at - // `tendermint-rs` for this, which is currently using a hard-coded 30s - // timeout. - let response = rpc_cli.broadcast_tx_sync(tx.to_bytes().into()).await?; - - if response.code == 0.into() { - println!("Transaction added to mempool: {:?}", response); - // Print the transaction identifiers to enable the extraction of - // acceptance/application results later - { - println!("Wrapper transaction hash: {:?}", wrapper_tx_hash); - println!("Inner transaction hash: {:?}", decrypted_tx_hash); - } - Ok(response) - } else { - Err(RpcError::server(serde_json::to_string(&response).unwrap())) - } +) -> Result { + tx::broadcast_tx(rpc_cli, to_broadcast).await } /// Broadcast a transaction to be included in the blockchain. @@ -2755,72 +1040,9 @@ pub async fn broadcast_tx( /// 3. The decrypted payload of the tx has been included on the blockchain. /// /// In the case of errors in any of those stages, an error message is returned -pub async fn submit_tx( - address: TendermintAddress, +pub async fn submit_tx( + client: &C, to_broadcast: TxBroadcastData, -) -> Result { - let (_, wrapper_hash, decrypted_hash) = match &to_broadcast { - TxBroadcastData::Wrapper { - tx, - wrapper_hash, - decrypted_hash, - } => (tx, wrapper_hash, decrypted_hash), - _ => panic!("Cannot broadcast a dry-run transaction"), - }; - - // Broadcast the supplied transaction - broadcast_tx(address.clone(), &to_broadcast).await?; - - let max_wait_time = Duration::from_secs( - env::var(ENV_VAR_NAMADA_EVENTS_MAX_WAIT_TIME_SECONDS) - .ok() - .and_then(|val| val.parse().ok()) - .unwrap_or(DEFAULT_NAMADA_EVENTS_MAX_WAIT_TIME_SECONDS), - ); - let deadline = Instant::now() + max_wait_time; - - tracing::debug!( - tendermint_rpc_address = ?address, - transaction = ?to_broadcast, - ?deadline, - "Awaiting transaction approval", - ); - - let parsed = { - let wrapper_query = rpc::TxEventQuery::Accepted(wrapper_hash.as_str()); - let event = - rpc::query_tx_status(wrapper_query, address.clone(), deadline) - .await; - let parsed = TxResponse::from_event(event); - - println!( - "Transaction accepted with result: {}", - serde_json::to_string_pretty(&parsed).unwrap() - ); - // The transaction is now on chain. We wait for it to be decrypted - // and applied - if parsed.code == 0.to_string() { - // We also listen to the event emitted when the encrypted - // payload makes its way onto the blockchain - let decrypted_query = - rpc::TxEventQuery::Applied(decrypted_hash.as_str()); - let event = - rpc::query_tx_status(decrypted_query, address, deadline).await; - let parsed = TxResponse::from_event(event); - println!( - "Transaction applied with result: {}", - serde_json::to_string_pretty(&parsed).unwrap() - ); - Ok(parsed) - } else { - Ok(parsed) - } - }; - - tracing::debug!( - transaction = ?to_broadcast, - "Transaction approved", - ); - - parsed +) -> Result { + tx::submit_tx(client, to_broadcast).await } diff --git a/apps/src/lib/client/types.rs b/apps/src/lib/client/types.rs deleted file mode 100644 index d75d5a596c9..00000000000 --- a/apps/src/lib/client/types.rs +++ /dev/null @@ -1,96 +0,0 @@ -use async_trait::async_trait; -use masp_primitives::merkle_tree::MerklePath; -use masp_primitives::primitives::{Diversifier, Note, ViewingKey}; -use masp_primitives::sapling::Node; -use masp_primitives::transaction::components::Amount; -use namada::types::address::Address; -use namada::types::masp::{TransferSource, TransferTarget}; -use namada::types::storage::Epoch; -use namada::types::transaction::GasLimit; -use namada::types::{key, token}; - -use super::rpc; -use crate::cli::{args, Context}; -use crate::client::tx::Conversions; -use crate::facade::tendermint_config::net::Address as TendermintAddress; - -#[derive(Clone, Debug)] -pub struct ParsedTxArgs { - /// Simulate applying the transaction - pub dry_run: bool, - /// Dump the transaction bytes - pub dump_tx: bool, - /// Submit the transaction even if it doesn't pass client checks - pub force: bool, - /// Do not wait for the transaction to be added to the blockchain - pub broadcast_only: bool, - /// The address of the ledger node as host:port - pub ledger_address: TendermintAddress, - /// If any new account is initialized by the tx, use the given alias to - /// save it in the wallet. - pub initialized_account_alias: Option, - /// The amount being payed to include the transaction - pub fee_amount: token::Amount, - /// The token in which the fee is being paid - pub fee_token: Address, - /// The max amount of gas used to process tx - pub gas_limit: GasLimit, - /// Sign the tx with the key for the given alias from your wallet - pub signing_key: Option, - /// Sign the tx with the keypair of the public key of the given address - pub signer: Option
, -} - -#[derive(Clone, Debug)] -pub struct ParsedTxTransferArgs { - /// Common tx arguments - pub tx: ParsedTxArgs, - /// Transfer source address - pub source: TransferSource, - /// Transfer target address - pub target: TransferTarget, - /// Transferred token address - pub token: Address, - /// Transferred token amount - pub amount: token::Amount, -} - -#[async_trait(?Send)] -pub trait ShieldedTransferContext { - async fn collect_unspent_notes( - &mut self, - ledger_address: TendermintAddress, - vk: &ViewingKey, - target: Amount, - target_epoch: Epoch, - ) -> ( - Amount, - Vec<(Diversifier, Note, MerklePath)>, - Conversions, - ); - - async fn query_epoch(&self, ledger_address: TendermintAddress) -> Epoch; -} - -#[async_trait(?Send)] -impl ShieldedTransferContext for Context { - async fn collect_unspent_notes( - &mut self, - ledger_address: TendermintAddress, - vk: &ViewingKey, - target: Amount, - target_epoch: Epoch, - ) -> ( - Amount, - Vec<(Diversifier, Note, MerklePath)>, - Conversions, - ) { - self.shielded - .collect_unspent_notes(ledger_address, vk, target, target_epoch) - .await - } - - async fn query_epoch(&self, ledger_address: TendermintAddress) -> Epoch { - rpc::query_and_print_epoch(args::Query { ledger_address }).await - } -} diff --git a/apps/src/lib/client/utils.rs b/apps/src/lib/client/utils.rs index 21fed46c116..4655b42f2f1 100644 --- a/apps/src/lib/client/utils.rs +++ b/apps/src/lib/client/utils.rs @@ -10,6 +10,7 @@ use borsh::BorshSerialize; use flate2::read::GzDecoder; use flate2::write::GzEncoder; use flate2::Compression; +use namada::ledger::wallet::Wallet; use namada::types::address; use namada::types::chain::ChainId; use namada::types::key::*; @@ -21,16 +22,16 @@ use serde_json::json; use sha2::{Digest, Sha256}; use crate::cli::context::ENV_VAR_WASM_DIR; -use crate::cli::{self, args}; +use crate::cli::{self, args, safe_exit}; use crate::config::genesis::genesis_config::{ - self, HexString, ValidatorPreGenesisConfig, + self, GenesisConfig, HexString, ValidatorPreGenesisConfig, }; use crate::config::global::GlobalConfig; use crate::config::{self, Config, TendermintMode}; use crate::facade::tendermint::node::Id as TendermintNodeId; use crate::facade::tendermint_config::net::Address as TendermintAddress; use crate::node::ledger::tendermint_node; -use crate::wallet::{pre_genesis, Wallet}; +use crate::wallet::{pre_genesis, read_and_confirm_pwd, CliWalletUtils}; use crate::wasm_loader; pub const NET_ACCOUNTS_DIR: &str = "setup"; @@ -107,13 +108,12 @@ pub async fn join_network( validator_alias_and_dir.map(|(validator_alias, pre_genesis_dir)| { ( validator_alias, - pre_genesis::ValidatorWallet::load(&pre_genesis_dir) - .unwrap_or_else(|err| { - eprintln!( - "Error loading validator pre-genesis wallet {err}", - ); - cli::safe_exit(1) - }), + pre_genesis::load(&pre_genesis_dir).unwrap_or_else(|err| { + eprintln!( + "Error loading validator pre-genesis wallet {err}", + ); + cli::safe_exit(1) + }), ) }); @@ -260,10 +260,22 @@ pub async fn join_network( let genesis_file_path = base_dir.join(format!("{}.toml", chain_id.as_str())); - let mut wallet = Wallet::load_or_new_from_genesis( - &chain_dir, - genesis_config::open_genesis_config(genesis_file_path).unwrap(), - ); + let genesis_config = + genesis_config::open_genesis_config(genesis_file_path).unwrap(); + + if !is_valid_validator_for_current_chain( + &tendermint_node_key.ref_to(), + &genesis_config, + ) { + eprintln!( + "The current validator is not valid for chain {}.", + chain_id.as_str() + ); + safe_exit(1) + } + + let mut wallet = + crate::wallet::load_or_new_from_genesis(&chain_dir, genesis_config); let address = wallet .find_address(&validator_alias) @@ -301,7 +313,7 @@ pub async fn join_network( pre_genesis_wallet, ); - wallet.save().unwrap(); + crate::wallet::save(&wallet).unwrap(); // Update the config from the default non-validator settings to // validator settings @@ -480,7 +492,7 @@ pub fn init_network( // Generate the consensus, account and reward keys, unless they're // pre-defined. - let mut wallet = Wallet::load_or_new(&chain_dir); + let mut wallet = crate::wallet::load_or_new(&chain_dir); let consensus_pk = try_parse_public_key( format!("validator {name} consensus key"), @@ -489,11 +501,9 @@ pub fn init_network( .unwrap_or_else(|| { let alias = format!("{}-consensus-key", name); println!("Generating validator {} consensus key...", name); - let (_alias, keypair) = wallet.gen_key( - SchemeType::Ed25519, - Some(alias), - unsafe_dont_encrypt, - ); + let password = read_and_confirm_pwd(unsafe_dont_encrypt); + let (_alias, keypair) = + wallet.gen_key(SchemeType::Ed25519, Some(alias), password, true); // Write consensus key for Tendermint tendermint_node::write_validator_key(&tm_home_dir, &keypair); @@ -508,11 +518,9 @@ pub fn init_network( .unwrap_or_else(|| { let alias = format!("{}-account-key", name); println!("Generating validator {} account key...", name); - let (_alias, keypair) = wallet.gen_key( - SchemeType::Ed25519, - Some(alias), - unsafe_dont_encrypt, - ); + let password = read_and_confirm_pwd(unsafe_dont_encrypt); + let (_alias, keypair) = + wallet.gen_key(SchemeType::Ed25519, Some(alias), password, true); keypair.ref_to() }); @@ -523,11 +531,9 @@ pub fn init_network( .unwrap_or_else(|| { let alias = format!("{}-protocol-key", name); println!("Generating validator {} protocol signing key...", name); - let (_alias, keypair) = wallet.gen_key( - SchemeType::Ed25519, - Some(alias), - unsafe_dont_encrypt, - ); + let password = read_and_confirm_pwd(unsafe_dont_encrypt); + let (_alias, keypair) = + wallet.gen_key(SchemeType::Ed25519, Some(alias), password, true); keypair.ref_to() }); @@ -547,12 +553,12 @@ pub fn init_network( name ); - let validator_keys = wallet - .gen_validator_keys( - Some(protocol_pk.clone()), - SchemeType::Ed25519, - ) - .expect("Generating new validator keys should not fail"); + let validator_keys = crate::wallet::gen_validator_keys( + &mut wallet, + Some(protocol_pk.clone()), + SchemeType::Ed25519, + ) + .expect("Generating new validator keys should not fail"); let pk = validator_keys.dkg_keypair.as_ref().unwrap().public(); wallet.add_validator_data(address.clone(), validator_keys); pk @@ -570,14 +576,14 @@ pub fn init_network( Some(genesis_config::HexString(dkg_pk.to_string())); // Write keypairs to wallet - wallet.add_address(name.clone(), address); + wallet.add_address(name.clone(), address, true); - wallet.save().unwrap(); + crate::wallet::save(&wallet).unwrap(); }); // Create a wallet for all accounts other than validators let mut wallet = - Wallet::load_or_new(&accounts_dir.join(NET_OTHER_ACCOUNTS_DIR)); + crate::wallet::load_or_new(&accounts_dir.join(NET_OTHER_ACCOUNTS_DIR)); if let Some(established) = &mut config.established { established.iter_mut().for_each(|(name, config)| { init_established_account( @@ -593,7 +599,7 @@ pub fn init_network( if config.address.is_none() { let address = address::gen_established_address("token"); config.address = Some(address.to_string()); - wallet.add_address(name.clone(), address); + wallet.add_address(name.clone(), address, true); } if config.vp.is_none() { config.vp = Some("vp_token".to_string()); @@ -607,10 +613,12 @@ pub fn init_network( "Generating implicit account {} key and address ...", name ); + let password = read_and_confirm_pwd(unsafe_dont_encrypt); let (_alias, keypair) = wallet.gen_key( SchemeType::Ed25519, Some(name.clone()), - unsafe_dont_encrypt, + password, + true, ); let public_key = genesis_config::HexString(keypair.ref_to().to_string()); @@ -643,8 +651,8 @@ pub fn init_network( genesis_config::write_genesis_config(&config_clean, &genesis_path); // Add genesis addresses and save the wallet with other account keys - wallet.add_genesis_addresses(config_clean.clone()); - wallet.save().unwrap(); + crate::wallet::add_genesis_addresses(&mut wallet, config_clean.clone()); + crate::wallet::save(&wallet).unwrap(); // Write the global config setting the default chain ID let global_config = GlobalConfig::new(chain_id.clone()); @@ -693,9 +701,9 @@ pub fn init_network( ); global_config.write(validator_dir).unwrap(); // Add genesis addresses to the validator's wallet - let mut wallet = Wallet::load_or_new(&validator_chain_dir); - wallet.add_genesis_addresses(config_clean.clone()); - wallet.save().unwrap(); + let mut wallet = crate::wallet::load_or_new(&validator_chain_dir); + crate::wallet::add_genesis_addresses(&mut wallet, config_clean.clone()); + crate::wallet::save(&wallet).unwrap(); }); // Generate the validators' ledger config @@ -846,21 +854,23 @@ pub fn init_network( fn init_established_account( name: impl AsRef, - wallet: &mut Wallet, + wallet: &mut Wallet, config: &mut genesis_config::EstablishedAccountConfig, unsafe_dont_encrypt: bool, ) { if config.address.is_none() { let address = address::gen_established_address("established"); config.address = Some(address.to_string()); - wallet.add_address(&name, address); + wallet.add_address(&name, address, true); } if config.public_key.is_none() { println!("Generating established account {} key...", name.as_ref()); + let password = read_and_confirm_pwd(unsafe_dont_encrypt); let (_alias, keypair) = wallet.gen_key( SchemeType::Ed25519, Some(format!("{}-key", name.as_ref())), - unsafe_dont_encrypt, + password, + true, ); let public_key = genesis_config::HexString(keypair.ref_to().to_string()); @@ -871,6 +881,14 @@ fn init_established_account( } } +pub fn pk_to_tm_address( + _global_args: args::Global, + args::PkToTmAddress { public_key }: args::PkToTmAddress, +) { + let tm_addr = tm_consensus_key_raw_hash(&public_key); + println!("{tm_addr}"); +} + /// Initialize genesis validator's address, consensus key and validator account /// key and use it in the ledger's node. pub fn init_genesis_validator( @@ -904,7 +922,7 @@ pub fn init_genesis_validator( let pre_genesis_dir = validator_pre_genesis_dir(&global_args.base_dir, &alias); println!("Generating validator keys..."); - let pre_genesis = pre_genesis::ValidatorWallet::gen_and_store( + let pre_genesis = pre_genesis::gen_and_store( key_scheme, unsafe_dont_encrypt, &pre_genesis_dir, @@ -1058,3 +1076,16 @@ pub fn validator_pre_genesis_file(pre_genesis_path: &Path) -> PathBuf { pub fn validator_pre_genesis_dir(base_dir: &Path, alias: &str) -> PathBuf { base_dir.join(PRE_GENESIS_DIR).join(alias) } + +fn is_valid_validator_for_current_chain( + tendermint_node_pk: &common::PublicKey, + genesis_config: &GenesisConfig, +) -> bool { + genesis_config.validator.iter().any(|(_alias, config)| { + if let Some(tm_node_key) = &config.tendermint_node_key { + tm_node_key.0.eq(&tendermint_node_pk.to_string()) + } else { + false + } + }) +} diff --git a/apps/src/lib/config/genesis.rs b/apps/src/lib/config/genesis.rs index 5806b96a43d..6b029b64071 100644 --- a/apps/src/lib/config/genesis.rs +++ b/apps/src/lib/config/genesis.rs @@ -889,7 +889,7 @@ pub fn genesis(base_dir: impl AsRef, chain_id: &ChainId) -> Genesis { } #[cfg(feature = "dev")] pub fn genesis(num_validators: u64) -> Genesis { - use namada::types::address; + use namada::types::address::{self}; use rust_decimal_macros::dec; use crate::wallet; @@ -1032,7 +1032,7 @@ pub fn genesis(num_validators: u64) -> Genesis { balances.insert((&validator.account_key).into(), default_key_tokens); } - let token_accounts = address::tokens() + let token_accounts = address::masp_rewards() .into_keys() .map(|address| TokenAccount { address, diff --git a/apps/src/lib/config/mod.rs b/apps/src/lib/config/mod.rs index 811289c790a..2df988e298c 100644 --- a/apps/src/lib/config/mod.rs +++ b/apps/src/lib/config/mod.rs @@ -10,7 +10,9 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::{Path, PathBuf}; use std::str::FromStr; +use directories::ProjectDirs; use namada::types::chain::ChainId; +use namada::types::storage::BlockHeight; use namada::types::time::Rfc3339String; use serde::{Deserialize, Serialize}; use thiserror::Error; @@ -67,6 +69,27 @@ impl From for TendermintMode { } } +/// An action to be performed at a +/// certain block height. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum Action { + /// Stop the chain. + Halt, + /// Suspend consensus indefinitely. + Suspend, +} + +/// An action to be performed at a +/// certain block height along with the +/// given height. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ActionAtHeight { + /// The height at which to take action. + pub height: BlockHeight, + /// The action to take. + pub action: Action, +} + #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Ledger { pub genesis_time: Rfc3339String, @@ -95,6 +118,8 @@ pub struct Shell { db_dir: PathBuf, /// Use the [`Ledger::tendermint_dir()`] method to read the value. tendermint_dir: PathBuf, + /// An optional action to take when a given blockheight is reached. + pub action_at_height: Option, } #[derive(Clone, Debug, Serialize, Deserialize)] @@ -118,6 +143,8 @@ pub struct Tendermint { pub instrumentation_prometheus: bool, pub instrumentation_prometheus_listen_addr: SocketAddr, pub instrumentation_namespace: String, + /// Toggle to enable tx indexing + pub tx_index: bool, } impl Ledger { @@ -142,6 +169,7 @@ impl Ledger { storage_read_past_height_limit: Some(3600), db_dir: DB_DIR.into(), tendermint_dir: TENDERMINT_DIR.into(), + action_at_height: None, }, tendermint: Tendermint { rpc_address: SocketAddr::new( @@ -164,6 +192,7 @@ impl Ledger { 26661, ), instrumentation_namespace: "namadan_tm".to_string(), + tx_index: false, }, } } @@ -348,6 +377,14 @@ impl Config { } } +pub fn get_default_namada_folder() -> PathBuf { + if let Some(project_dir) = ProjectDirs::from("", "", "Namada") { + project_dir.data_local_dir().to_path_buf() + } else { + DEFAULT_BASE_DIR.into() + } +} + pub const VALUE_AFTER_TABLE_ERROR_MSG: &str = r#" Error while serializing to toml. It means that some nested structure is followed by simple fields. diff --git a/apps/src/lib/node/ledger/mod.rs b/apps/src/lib/node/ledger/mod.rs index 85875187e65..39a8460ac2b 100644 --- a/apps/src/lib/node/ledger/mod.rs +++ b/apps/src/lib/node/ledger/mod.rs @@ -211,6 +211,7 @@ pub fn dump_db( args::LedgerDumpDb { // block_height, out_file_path, + historic, }: args::LedgerDumpDb, ) { use namada::ledger::storage::DB; @@ -219,7 +220,12 @@ pub fn dump_db( let db_path = config.shell.db_dir(&chain_id); let db = storage::PersistentDB::open(db_path, None); - db.dump_last_block(out_file_path); + db.dump_last_block(out_file_path, historic); +} + +/// Roll Namada state back to the previous height +pub fn rollback(config: config::Ledger) -> Result<(), shell::Error> { + shell::rollback(config) } /// Runs and monitors a few concurrent tasks. @@ -442,8 +448,7 @@ fn start_abci_broadcaster_shell( // Setup DB cache, it must outlive the DB instance that's in the shell let db_cache = - rocksdb::Cache::new_lru_cache(db_block_cache_size_bytes as usize) - .unwrap(); + rocksdb::Cache::new_lru_cache(db_block_cache_size_bytes as usize); // Construct our ABCI application. let tendermint_mode = config.tendermint.tendermint_mode.clone(); @@ -452,7 +457,7 @@ fn start_abci_broadcaster_shell( let genesis = genesis::genesis(&config.shell.base_dir, &config.chain_id); #[cfg(feature = "dev")] let genesis = genesis::genesis(1); - let (shell, abci_service) = AbcippShim::new( + let (shell, abci_service, service_handle) = AbcippShim::new( config, wasm_dir, broadcaster_sender, @@ -468,8 +473,13 @@ fn start_abci_broadcaster_shell( // Start the ABCI server let abci = spawner .spawn_abortable("ABCI", move |aborter| async move { - let res = - run_abci(abci_service, ledger_address, abci_abort_recv).await; + let res = run_abci( + abci_service, + service_handle, + ledger_address, + abci_abort_recv, + ) + .await; drop(aborter); res @@ -502,6 +512,7 @@ fn start_abci_broadcaster_shell( /// mempool, snapshot, and info. async fn run_abci( abci_service: AbciService, + service_handle: tokio::sync::broadcast::Sender<()>, ledger_address: SocketAddr, abort_recv: tokio::sync::oneshot::Receiver<()>, ) -> shell::Result<()> { @@ -528,13 +539,13 @@ async fn run_abci( ) .finish() .unwrap(); - tokio::select! { // Run the server with the ABCI service status = server.listen(ledger_address) => { status.map_err(|err| Error::TowerServer(err.to_string())) }, resp_sender = abort_recv => { + _ = service_handle.send(()); match resp_sender { Ok(()) => { tracing::info!("Shutting down ABCI server..."); diff --git a/apps/src/lib/node/ledger/shell/block_space_alloc.rs b/apps/src/lib/node/ledger/shell/block_space_alloc.rs new file mode 100644 index 00000000000..ff06740df02 --- /dev/null +++ b/apps/src/lib/node/ledger/shell/block_space_alloc.rs @@ -0,0 +1,476 @@ +//! Primitives that facilitate keeping track of the number +//! of bytes utilized by some Tendermint consensus round's proposal. +//! +//! This is important, because Tendermint places an upper bound +//! on the size of a block, rejecting blocks whose size exceeds +//! the limit stated in [`RequestPrepareProposal`]. +//! +//! The code in this module doesn't perform any deserializing to +//! verify if we are, in fact, allocating space for the correct +//! kind of tx for the current [`BlockSpaceAllocator`] state. It +//! is up to `PrepareProposal` to dispatch the correct kind of tx +//! into the current state of the allocator. +//! +//! # How space is allocated +//! +//! In the current implementation, we allocate space for transactions +//! in the following order of preference: +//! +//! - First, we allot space for DKG encrypted txs. We allow DKG encrypted txs to +//! take up at most 1/3 of the total block space. +//! - Next, we allot space for DKG decrypted txs. Decrypted txs take up as much +//! space as needed. We will see, shortly, why in practice this is fine. +//! - Finally, we allot space for protocol txs. Protocol txs get half of the +//! remaining block space allotted to them. +//! +//! Since at some fixed height `H` decrypted txs only take up as +//! much space as the encrypted txs from height `H - 1`, and we +//! restrict the space of encrypted txs to at most 1/3 of the +//! total block space, we roughly divide the Tendermint block +//! space in 3, for each major type of tx. + +pub mod states; + +// TODO: what if a tx has a size greater than the threshold for +// its bin? how do we handle this? if we keep it in the mempool +// forever, it'll be a DoS vec, as we can make nodes run out of +// memory! maybe we should allow block decisions for txs that are +// too big to fit in their respective bin? in these special block +// decisions, we would only decide proposals with "large" txs?? +// +// MAYBE: in the state machine impl, reset to beginning state, and +// and alloc space for large tx right at the start. the problem with +// this is that then we may not have enough space for decrypted txs + +// TODO: panic if we don't have enough space reserved for a +// decrypted tx; in theory, we should always have enough space +// reserved for decrypted txs, given the invariants of the state +// machine + +// TODO: refactor our measure of space to also reflect gas costs. +// the total gas of all chosen txs cannot exceed the configured max +// gas per block, otherwise a proposal will be rejected! + +use std::marker::PhantomData; + +use namada::core::ledger::storage::{self, WlStorage}; +use namada::proof_of_stake::pos_queries::PosQueries; + +#[allow(unused_imports)] +use crate::facade::tendermint_proto::abci::RequestPrepareProposal; + +/// Block space allocation failure status responses. +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub enum AllocFailure { + /// The transaction can only be included in an upcoming block. + /// + /// We return the space left in the tx bin for logging purposes. + Rejected { bin_space_left: u64 }, + /// The transaction would overflow the allotted bin space, + /// therefore it needs to be handled separately. + /// + /// We return the size of the tx bin for logging purposes. + OverflowsBin { bin_size: u64 }, +} + +/// Allotted space for a batch of transactions in some proposed block, +/// measured in bytes. +/// +/// We keep track of the current space utilized by: +/// +/// - DKG encrypted transactions. +/// - DKG decrypted transactions. +/// - Protocol transactions. +#[derive(Debug, Default)] +pub struct BlockSpaceAllocator { + /// The current state of the [`BlockSpaceAllocator`] state machine. + _state: PhantomData<*const State>, + /// The total space Tendermint has allotted to the + /// application for the current block height. + block: TxBin, + /// The current space utilized by protocol transactions. + protocol_txs: TxBin, + /// The current space utilized by DKG encrypted transactions. + encrypted_txs: TxBin, + /// The current space utilized by DKG decrypted transactions. + decrypted_txs: TxBin, +} + +impl From<&WlStorage> + for BlockSpaceAllocator> +where + D: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, + H: 'static + storage::StorageHasher, +{ + #[inline] + fn from(storage: &WlStorage) -> Self { + Self::init(storage.pos_queries().get_max_proposal_bytes().get()) + } +} + +impl BlockSpaceAllocator> { + /// Construct a new [`BlockSpaceAllocator`], with an upper bound + /// on the max size of all txs in a block defined by Tendermint. + #[inline] + pub fn init(tendermint_max_block_space_in_bytes: u64) -> Self { + let max = tendermint_max_block_space_in_bytes; + Self { + _state: PhantomData, + block: TxBin::init(max), + protocol_txs: TxBin::default(), + encrypted_txs: TxBin::init_over_ratio(max, threshold::ONE_THIRD), + decrypted_txs: TxBin::default(), + } + } +} + +impl BlockSpaceAllocator { + /// Return the amount of space left to initialize in all + /// [`TxBin`] instances. + /// + /// This is calculated based on the difference between the Tendermint + /// block space for a given round and the sum of the allotted space + /// to each [`TxBin`] instance in a [`BlockSpaceAllocator`]. + #[inline] + fn uninitialized_space_in_bytes(&self) -> u64 { + let total_bin_space = self.protocol_txs.allotted_space_in_bytes + + self.encrypted_txs.allotted_space_in_bytes + + self.decrypted_txs.allotted_space_in_bytes; + self.block.allotted_space_in_bytes - total_bin_space + } +} + +/// Allotted space for a batch of transactions of the same kind in some +/// proposed block, measured in bytes. +#[derive(Debug, Copy, Clone, Default)] +pub struct TxBin { + /// The current space utilized by the batch of transactions. + occupied_space_in_bytes: u64, + /// The maximum space the batch of transactions may occupy. + allotted_space_in_bytes: u64, +} + +impl TxBin { + /// Return a new [`TxBin`] with a total allotted space equal to the + /// floor of the fraction `frac` of the available block space `max_bytes`. + #[inline] + pub fn init_over_ratio(max_bytes: u64, frac: threshold::Threshold) -> Self { + let allotted_space_in_bytes = frac.over(max_bytes); + Self { + allotted_space_in_bytes, + occupied_space_in_bytes: 0, + } + } + + /// Return the amount of space left in this [`TxBin`]. + #[inline] + pub fn space_left_in_bytes(&self) -> u64 { + self.allotted_space_in_bytes - self.occupied_space_in_bytes + } + + /// Construct a new [`TxBin`], with a capacity of `max_bytes`. + #[inline] + pub fn init(max_bytes: u64) -> Self { + Self { + allotted_space_in_bytes: max_bytes, + occupied_space_in_bytes: 0, + } + } + + /// Shrink the allotted space of this [`TxBin`] to whatever + /// space is currently being utilized. + #[inline] + pub fn shrink_to_fit(&mut self) { + self.allotted_space_in_bytes = self.occupied_space_in_bytes; + } + + /// Try to dump a new transaction into this [`TxBin`]. + /// + /// Signal the caller if the tx is larger than its max + /// allotted bin space. + pub fn try_dump(&mut self, tx: &[u8]) -> Result<(), AllocFailure> { + let tx_len = tx.len() as u64; + if tx_len > self.allotted_space_in_bytes { + let bin_size = self.allotted_space_in_bytes; + return Err(AllocFailure::OverflowsBin { bin_size }); + } + let occupied = self.occupied_space_in_bytes + tx_len; + if occupied <= self.allotted_space_in_bytes { + self.occupied_space_in_bytes = occupied; + Ok(()) + } else { + let bin_space_left = self.space_left_in_bytes(); + Err(AllocFailure::Rejected { bin_space_left }) + } + } +} + +pub mod threshold { + //! Transaction allotment thresholds. + + use num_rational::Ratio; + + /// Threshold over a portion of block space. + #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] + pub struct Threshold(Ratio); + + impl Threshold { + /// Return a new [`Threshold`]. + const fn new(numer: u64, denom: u64) -> Self { + // constrain ratio to a max of 1 + let numer = if numer > denom { denom } else { numer }; + Self(Ratio::new_raw(numer, denom)) + } + + /// Return a [`Threshold`] over some free space. + pub fn over(self, free_space_in_bytes: u64) -> u64 { + (self.0 * free_space_in_bytes).to_integer() + } + } + + /// Divide free space in three. + pub const ONE_THIRD: Threshold = Threshold::new(1, 3); +} + +#[cfg(test)] +mod tests { + use std::cell::RefCell; + + use assert_matches::assert_matches; + use proptest::prelude::*; + + use super::states::{ + BuildingEncryptedTxBatch, NextState, TryAlloc, WithEncryptedTxs, + WithoutEncryptedTxs, + }; + use super::*; + use crate::node::ledger::shims::abcipp_shim_types::shim::TxBytes; + + /// Convenience alias for a block space allocator at a state with encrypted + /// txs. + type BsaWrapperTxs = + BlockSpaceAllocator>; + + /// Convenience alias for a block space allocator at a state without + /// encrypted txs. + type BsaNoWrapperTxs = + BlockSpaceAllocator>; + + /// Proptest generated txs. + #[derive(Debug)] + struct PropTx { + tendermint_max_block_space_in_bytes: u64, + protocol_txs: Vec, + encrypted_txs: Vec, + decrypted_txs: Vec, + } + + /// Check that at most 1/3 of the block space is + /// reserved for each kind of tx type, in the + /// allocator's common path. + #[test] + fn test_txs_are_evenly_split_across_block() { + const BLOCK_SIZE: u64 = 60; + + // reserve block space for encrypted txs + let mut alloc = BsaWrapperTxs::init(BLOCK_SIZE); + + // allocate ~1/3 of the block space to encrypted txs + assert!(alloc.try_alloc(&[0; 18]).is_ok()); + + // reserve block space for decrypted txs + let mut alloc = alloc.next_state(); + + // the space we allotted to encrypted txs was shrunk to + // the total space we actually used up + assert_eq!(alloc.encrypted_txs.allotted_space_in_bytes, 18); + + // check that the allotted space for decrypted txs is correct + assert_eq!( + alloc.decrypted_txs.allotted_space_in_bytes, + BLOCK_SIZE - 18 + ); + + // add about ~1/3 worth of decrypted txs + assert!(alloc.try_alloc(&[0; 17]).is_ok()); + + // reserve block space for protocol txs + let mut alloc = alloc.next_state(); + + // check that space was shrunk + assert_eq!( + alloc.protocol_txs.allotted_space_in_bytes, + BLOCK_SIZE - (18 + 17) + ); + + // add protocol txs to the block space allocator + assert!(alloc.try_alloc(&[0; 25]).is_ok()); + + // the block should be full at this point + assert_matches!( + alloc.try_alloc(&[0; 1]), + Err(AllocFailure::Rejected { .. }) + ); + } + + // Test that we cannot include encrypted txs in a block + // when the state invariants banish them from inclusion. + #[test] + fn test_encrypted_txs_are_rejected() { + let mut alloc = BsaNoWrapperTxs::init(1234); + assert_matches!( + alloc.try_alloc(&[0; 1]), + Err(AllocFailure::Rejected { .. }) + ); + } + + proptest! { + /// Check if we reject a tx when its respective bin + /// capacity has been reached on a [`BlockSpaceAllocator`]. + #[test] + fn test_reject_tx_on_bin_cap_reached(max in prop::num::u64::ANY) { + proptest_reject_tx_on_bin_cap_reached(max) + } + + /// Check if the initial bin capcity of the [`BlockSpaceAllocator`] + /// is correct. + #[test] + fn test_initial_bin_capacity(max in prop::num::u64::ANY) { + proptest_initial_bin_capacity(max) + } + + /// Test that dumping txs whose total combined size + /// is less than the bin cap does not fill up the bin. + #[test] + fn test_tx_dump_doesnt_fill_up_bin(args in arb_transactions()) { + proptest_tx_dump_doesnt_fill_up_bin(args) + } + } + + /// Implementation of [`test_reject_tx_on_bin_cap_reached`]. + fn proptest_reject_tx_on_bin_cap_reached( + tendermint_max_block_space_in_bytes: u64, + ) { + let mut bins = BsaWrapperTxs::init(tendermint_max_block_space_in_bytes); + + // fill the entire bin of encrypted txs + bins.encrypted_txs.occupied_space_in_bytes = + bins.encrypted_txs.allotted_space_in_bytes; + + // make sure we can't dump any new encrypted txs in the bin + assert_matches!( + bins.try_alloc(b"arbitrary tx bytes"), + Err(AllocFailure::Rejected { .. }) + ); + } + + /// Implementation of [`test_initial_bin_capacity`]. + fn proptest_initial_bin_capacity(tendermint_max_block_space_in_bytes: u64) { + let bins = BsaWrapperTxs::init(tendermint_max_block_space_in_bytes); + let expected = tendermint_max_block_space_in_bytes + - threshold::ONE_THIRD.over(tendermint_max_block_space_in_bytes); + assert_eq!(expected, bins.uninitialized_space_in_bytes()); + } + + /// Implementation of [`test_tx_dump_doesnt_fill_up_bin`]. + fn proptest_tx_dump_doesnt_fill_up_bin(args: PropTx) { + let PropTx { + tendermint_max_block_space_in_bytes, + protocol_txs, + encrypted_txs, + decrypted_txs, + } = args; + + // produce new txs until the moment we would have + // filled up the bins. + // + // iterate over the produced txs to make sure we can keep + // dumping new txs without filling up the bins + + let bins = RefCell::new(BsaWrapperTxs::init( + tendermint_max_block_space_in_bytes, + )); + let encrypted_txs = encrypted_txs.into_iter().take_while(|tx| { + let bin = bins.borrow().encrypted_txs; + let new_size = bin.occupied_space_in_bytes + tx.len() as u64; + new_size < bin.allotted_space_in_bytes + }); + for tx in encrypted_txs { + assert!(bins.borrow_mut().try_alloc(&tx).is_ok()); + } + + let bins = RefCell::new(bins.into_inner().next_state()); + let decrypted_txs = decrypted_txs.into_iter().take_while(|tx| { + let bin = bins.borrow().decrypted_txs; + let new_size = bin.occupied_space_in_bytes + tx.len() as u64; + new_size < bin.allotted_space_in_bytes + }); + for tx in decrypted_txs { + assert!(bins.borrow_mut().try_alloc(&tx).is_ok()); + } + + let bins = RefCell::new(bins.into_inner().next_state()); + let protocol_txs = protocol_txs.into_iter().take_while(|tx| { + let bin = bins.borrow().protocol_txs; + let new_size = bin.occupied_space_in_bytes + tx.len() as u64; + new_size < bin.allotted_space_in_bytes + }); + for tx in protocol_txs { + assert!(bins.borrow_mut().try_alloc(&tx).is_ok()); + } + } + + prop_compose! { + /// Generate arbitrarily sized txs of different kinds. + fn arb_transactions() + // create base strategies + ( + (tendermint_max_block_space_in_bytes, protocol_tx_max_bin_size, encrypted_tx_max_bin_size, + decrypted_tx_max_bin_size) in arb_max_bin_sizes(), + ) + // compose strategies + ( + tendermint_max_block_space_in_bytes in Just(tendermint_max_block_space_in_bytes), + protocol_txs in arb_tx_list(protocol_tx_max_bin_size), + encrypted_txs in arb_tx_list(encrypted_tx_max_bin_size), + decrypted_txs in arb_tx_list(decrypted_tx_max_bin_size), + ) + -> PropTx { + PropTx { + tendermint_max_block_space_in_bytes, + protocol_txs, + encrypted_txs, + decrypted_txs, + } + } + } + + /// Return random bin sizes for a [`BlockSpaceAllocator`]. + fn arb_max_bin_sizes() -> impl Strategy + { + const MAX_BLOCK_SIZE_BYTES: u64 = 1000; + (1..=MAX_BLOCK_SIZE_BYTES).prop_map( + |tendermint_max_block_space_in_bytes| { + ( + tendermint_max_block_space_in_bytes, + threshold::ONE_THIRD + .over(tendermint_max_block_space_in_bytes) + as usize, + threshold::ONE_THIRD + .over(tendermint_max_block_space_in_bytes) + as usize, + threshold::ONE_THIRD + .over(tendermint_max_block_space_in_bytes) + as usize, + ) + }, + ) + } + + /// Return a list of txs. + fn arb_tx_list(max_bin_size: usize) -> impl Strategy>> { + const MAX_TX_NUM: usize = 64; + let tx = prop::collection::vec(prop::num::u8::ANY, 0..=max_bin_size); + prop::collection::vec(tx, 0..=MAX_TX_NUM) + } +} diff --git a/apps/src/lib/node/ledger/shell/block_space_alloc/states.rs b/apps/src/lib/node/ledger/shell/block_space_alloc/states.rs new file mode 100644 index 00000000000..18712998e3c --- /dev/null +++ b/apps/src/lib/node/ledger/shell/block_space_alloc/states.rs @@ -0,0 +1,119 @@ +//! All the states of the [`BlockSpaceAllocator`] state machine, +//! over the extent of a Tendermint consensus round +//! block proposal. +//! +//! # States +//! +//! The state machine moves through the following state DAG: +//! +//! 1. [`BuildingEncryptedTxBatch`] - the initial state. In +//! this state, we populate a block with DKG encrypted txs. +//! This state supports two modes of operation, which you can +//! think of as two sub-states: +//! * [`WithoutEncryptedTxs`] - When this mode is active, no encrypted txs are +//! included in a block proposal. +//! * [`WithEncryptedTxs`] - When this mode is active, we are able to include +//! encrypted txs in a block proposal. +//! 2. [`BuildingDecryptedTxBatch`] - the second state. In +//! this state, we populate a block with DKG decrypted txs. +//! 3. [`BuildingProtocolTxBatch`] - the third state. In +//! this state, we populate a block with protocol txs. + +mod decrypted_txs; +mod encrypted_txs; +mod protocol_txs; + +use super::{AllocFailure, BlockSpaceAllocator}; + +/// Convenience wrapper for a [`BlockSpaceAllocator`] state that allocates +/// encrypted transactions. +#[allow(dead_code)] +pub enum EncryptedTxBatchAllocator { + WithEncryptedTxs( + BlockSpaceAllocator>, + ), + WithoutEncryptedTxs( + BlockSpaceAllocator>, + ), +} + +/// The leader of the current Tendermint round is building +/// a new batch of DKG decrypted transactions. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub enum BuildingDecryptedTxBatch {} + +/// The leader of the current Tendermint round is building +/// a new batch of Namada protocol transactions. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub enum BuildingProtocolTxBatch {} + +/// The leader of the current Tendermint round is building +/// a new batch of DKG encrypted transactions. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub struct BuildingEncryptedTxBatch { + /// One of [`WithEncryptedTxs`] and [`WithoutEncryptedTxs`]. + _mode: Mode, +} + +/// Allow block proposals to include encrypted txs. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub enum WithEncryptedTxs {} + +/// Prohibit block proposals from including encrypted txs. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub enum WithoutEncryptedTxs {} + +/// Try to allocate a new transaction on a [`BlockSpaceAllocator`] state. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub trait TryAlloc { + /// Try to allocate space for a new transaction. + fn try_alloc(&mut self, tx: &[u8]) -> Result<(), AllocFailure>; +} + +/// Represents a state transition in the [`BlockSpaceAllocator`] state machine. +/// +/// This trait should not be used directly. Instead, consider using one of +/// [`NextState`], [`NextStateWithEncryptedTxs`] or +/// [`NextStateWithoutEncryptedTxs`]. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub trait NextStateImpl { + /// The next state in the [`BlockSpaceAllocator`] state machine. + type Next; + + /// Transition to the next state in the [`BlockSpaceAllocator`] state + /// machine. + fn next_state_impl(self) -> Self::Next; +} + +/// Convenience extension of [`NextStateImpl`], to transition to a new +/// state with a null transition function. +/// +/// For more info, read the module docs of +/// [`crate::node::ledger::shell::prepare_proposal::block_space_alloc::states`]. +pub trait NextState: NextStateImpl { + /// Transition to the next state in the [`BlockSpaceAllocator`] state, + /// using a null transiiton function. + #[inline] + fn next_state(self) -> Self::Next + where + Self: Sized, + { + self.next_state_impl() + } +} + +impl NextState for S where S: NextStateImpl {} diff --git a/apps/src/lib/node/ledger/shell/block_space_alloc/states/decrypted_txs.rs b/apps/src/lib/node/ledger/shell/block_space_alloc/states/decrypted_txs.rs new file mode 100644 index 00000000000..7fd15671a3b --- /dev/null +++ b/apps/src/lib/node/ledger/shell/block_space_alloc/states/decrypted_txs.rs @@ -0,0 +1,43 @@ +use std::marker::PhantomData; + +use super::super::{AllocFailure, BlockSpaceAllocator, TxBin}; +use super::{ + BuildingDecryptedTxBatch, BuildingProtocolTxBatch, NextStateImpl, TryAlloc, +}; + +impl TryAlloc for BlockSpaceAllocator { + #[inline] + fn try_alloc(&mut self, tx: &[u8]) -> Result<(), AllocFailure> { + self.decrypted_txs.try_dump(tx) + } +} + +impl NextStateImpl for BlockSpaceAllocator { + type Next = BlockSpaceAllocator; + + #[inline] + fn next_state_impl(mut self) -> Self::Next { + self.decrypted_txs.shrink_to_fit(); + + // the remaining space is allocated to protocol txs + let remaining_free_space = self.uninitialized_space_in_bytes(); + self.protocol_txs = TxBin::init(remaining_free_space); + + // cast state + let Self { + block, + protocol_txs, + encrypted_txs, + decrypted_txs, + .. + } = self; + + BlockSpaceAllocator { + _state: PhantomData, + block, + protocol_txs, + encrypted_txs, + decrypted_txs, + } + } +} diff --git a/apps/src/lib/node/ledger/shell/block_space_alloc/states/encrypted_txs.rs b/apps/src/lib/node/ledger/shell/block_space_alloc/states/encrypted_txs.rs new file mode 100644 index 00000000000..f5fb2447ff1 --- /dev/null +++ b/apps/src/lib/node/ledger/shell/block_space_alloc/states/encrypted_txs.rs @@ -0,0 +1,110 @@ +use std::marker::PhantomData; + +use super::super::{AllocFailure, BlockSpaceAllocator, TxBin}; +use super::{ + BuildingDecryptedTxBatch, BuildingEncryptedTxBatch, + EncryptedTxBatchAllocator, NextStateImpl, TryAlloc, WithEncryptedTxs, + WithoutEncryptedTxs, +}; + +impl TryAlloc + for BlockSpaceAllocator> +{ + #[inline] + fn try_alloc(&mut self, tx: &[u8]) -> Result<(), AllocFailure> { + self.encrypted_txs.try_dump(tx) + } +} + +impl NextStateImpl + for BlockSpaceAllocator> +{ + type Next = BlockSpaceAllocator; + + #[inline] + fn next_state_impl(self) -> Self::Next { + next_state(self) + } +} + +impl TryAlloc + for BlockSpaceAllocator> +{ + #[inline] + fn try_alloc(&mut self, _tx: &[u8]) -> Result<(), AllocFailure> { + Err(AllocFailure::Rejected { bin_space_left: 0 }) + } +} + +impl NextStateImpl + for BlockSpaceAllocator> +{ + type Next = BlockSpaceAllocator; + + #[inline] + fn next_state_impl(self) -> Self::Next { + next_state(self) + } +} + +#[inline] +fn next_state( + mut alloc: BlockSpaceAllocator>, +) -> BlockSpaceAllocator { + alloc.encrypted_txs.shrink_to_fit(); + + // decrypted txs can use as much space as they need - which + // in practice will only be, at most, 1/3 of the block space + // used by encrypted txs at the prev height + let remaining_free_space = alloc.uninitialized_space_in_bytes(); + alloc.decrypted_txs = TxBin::init(remaining_free_space); + + // cast state + let BlockSpaceAllocator { + block, + protocol_txs, + encrypted_txs, + decrypted_txs, + .. + } = alloc; + + BlockSpaceAllocator { + _state: PhantomData, + block, + protocol_txs, + encrypted_txs, + decrypted_txs, + } +} + +impl TryAlloc for EncryptedTxBatchAllocator { + #[inline] + fn try_alloc(&mut self, tx: &[u8]) -> Result<(), AllocFailure> { + match self { + EncryptedTxBatchAllocator::WithEncryptedTxs(state) => { + state.try_alloc(tx) + } + EncryptedTxBatchAllocator::WithoutEncryptedTxs(state) => { + // NOTE: this operation will cause the allocator to + // run out of memory immediately + state.try_alloc(tx) + } + } + } +} + +impl NextStateImpl for EncryptedTxBatchAllocator { + type Next = BlockSpaceAllocator; + + #[inline] + fn next_state_impl(self) -> Self::Next { + match self { + EncryptedTxBatchAllocator::WithEncryptedTxs(state) => { + state.next_state_impl() + } + EncryptedTxBatchAllocator::WithoutEncryptedTxs(state) => { + state.next_state_impl() + } + } + } +} diff --git a/apps/src/lib/node/ledger/shell/block_space_alloc/states/protocol_txs.rs b/apps/src/lib/node/ledger/shell/block_space_alloc/states/protocol_txs.rs new file mode 100644 index 00000000000..bc31717ddde --- /dev/null +++ b/apps/src/lib/node/ledger/shell/block_space_alloc/states/protocol_txs.rs @@ -0,0 +1,9 @@ +use super::super::{AllocFailure, BlockSpaceAllocator}; +use super::{BuildingProtocolTxBatch, TryAlloc}; + +impl TryAlloc for BlockSpaceAllocator { + #[inline] + fn try_alloc(&mut self, tx: &[u8]) -> Result<(), AllocFailure> { + self.protocol_txs.try_dump(tx) + } +} diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index e9223996a9f..80bb7505ea5 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -2,15 +2,14 @@ use std::collections::HashMap; -use namada::ledger::inflation::{self, RewardsController}; +use data_encoding::HEXUPPER; use namada::ledger::parameters::storage as params_storage; use namada::ledger::pos::types::{decimal_mult_u64, into_tm_voting_power}; -use namada::ledger::pos::{ - namada_proof_of_stake, staking_token_address, ADDRESS as POS_ADDRESS, -}; -use namada::ledger::protocol; +use namada::ledger::pos::{namada_proof_of_stake, staking_token_address}; use namada::ledger::storage::EPOCH_SWITCH_BLOCKS_DELAY; +use namada::ledger::storage_api::token::credit_tokens; use namada::ledger::storage_api::{StorageRead, StorageWrite}; +use namada::ledger::{inflation, protocol, replay_protection}; use namada::proof_of_stake::{ delegator_rewards_products_handle, find_validator_by_raw_hash, read_last_block_proposer_address, read_pos_params, read_total_stake, @@ -19,15 +18,16 @@ use namada::proof_of_stake::{ write_last_block_proposer_address, }; use namada::types::address::Address; -#[cfg(feature = "abcipp")] -use namada::types::key::{tm_consensus_key_raw_hash, tm_raw_hash_to_string}; +use namada::types::key::tm_raw_hash_to_string; use namada::types::storage::{BlockHash, BlockResults, Epoch, Header}; use namada::types::token::{total_supply_key, Amount}; use rust_decimal::prelude::Decimal; use super::governance::execute_governance_proposals; use super::*; -use crate::facade::tendermint_proto::abci::Misbehavior as Evidence; +use crate::facade::tendermint_proto::abci::{ + Misbehavior as Evidence, VoteInfo, +}; use crate::facade::tendermint_proto::crypto::PublicKey as TendermintPublicKey; use crate::node::ledger::shell::stats::InternalStats; @@ -101,6 +101,10 @@ where )?; } + // Invariant: This has to be applied after + // `copy_validator_sets_and_positions` if we're starting a new epoch + self.slash(); + let wrapper_fees = self.get_wrapper_tx_fees(); let mut stats = InternalStats::default(); @@ -173,17 +177,49 @@ where tx_event["gas_used"] = "0".into(); response.events.push(tx_event); // if the rejected tx was decrypted, remove it - // from the queue of txs to be processed + // from the queue of txs to be processed and remove the hash + // from storage if let TxType::Decrypted(_) = &tx_type { - self.wl_storage.storage.tx_queue.pop(); + let tx_hash = self + .wl_storage + .storage + .tx_queue + .pop() + .expect("Missing wrapper tx in queue") + .tx + .tx_hash; + let tx_hash_key = + replay_protection::get_tx_hash_key(&tx_hash); + self.wl_storage + .storage + .delete(&tx_hash_key) + .expect("Error while deleting tx hash from storage"); } continue; } - let mut tx_event = match &tx_type { + let (mut tx_event, tx_unsigned_hash) = match &tx_type { TxType::Wrapper(wrapper) => { let mut tx_event = Event::new_tx_event(&tx_type, height.0); + // Writes both txs hash to storage + let tx = Tx::try_from(processed_tx.tx.as_ref()).unwrap(); + let wrapper_tx_hash_key = + replay_protection::get_tx_hash_key(&hash::Hash( + tx.unsigned_hash(), + )); + self.wl_storage + .storage + .write(&wrapper_tx_hash_key, vec![]) + .expect("Error while writing tx hash to storage"); + + let inner_tx_hash_key = + replay_protection::get_tx_hash_key(&wrapper.tx_hash); + self.wl_storage + .storage + .write(&inner_tx_hash_key, vec![]) + .expect("Error while writing tx hash to storage"); + #[cfg(not(feature = "mainnet"))] let has_valid_pow = self.invalidate_pow_solution_if_valid(wrapper); @@ -244,11 +280,18 @@ where #[cfg(not(feature = "mainnet"))] has_valid_pow, }); - tx_event + (tx_event, None) } TxType::Decrypted(inner) => { // We remove the corresponding wrapper tx from the queue - self.wl_storage.storage.tx_queue.pop(); + let wrapper_hash = self + .wl_storage + .storage + .tx_queue + .pop() + .expect("Missing wrapper tx in queue") + .tx + .tx_hash; let mut event = Event::new_tx_event(&tx_type, height.0); match inner { @@ -267,8 +310,7 @@ where event["code"] = ErrorCodes::Undecryptable.into(); } } - - event + (event, Some(wrapper_hash)) } TxType::Raw(_) => { tracing::error!( @@ -320,7 +362,7 @@ where .results .accept(tx_index); } - if let Some(ibc_event) = &result.ibc_event { + for ibc_event in &result.ibc_events { // Add the IBC event besides the tx_event let event = Event::from(ibc_event.clone()); response.events.push(event); @@ -361,6 +403,25 @@ where msg ); stats.increment_errored_txs(); + + // If transaction type is Decrypted and failed because of + // out of gas, remove its hash from storage to allow + // rewrapping it + if let Some(hash) = tx_unsigned_hash { + if let Error::TxApply(protocol::Error::GasError(namada::ledger::gas::Error::TransactionGasExceededError)) = + msg + { + let tx_hash_key = + replay_protection::get_tx_hash_key(&hash); + self.wl_storage + .storage + .delete(&tx_hash_key) + .expect( + "Error while deleting tx hash key from storage", + ); + } + } + self.wl_storage.drop_tx(); tx_event["gas_used"] = self .gas_meter @@ -396,15 +457,16 @@ where tracing::debug!( "Found last block proposer: {proposer_address}" ); + let votes = pos_votes_from_abci(&self.wl_storage, &req.votes); namada_proof_of_stake::log_block_rewards( &mut self.wl_storage, if new_epoch { - current_epoch - 1 + current_epoch.prev() } else { current_epoch }, &proposer_address, - &req.votes, + votes, )?; } None => { @@ -483,11 +545,8 @@ where let new_epoch = self .wl_storage - .storage .update_epoch(height, header_time) .expect("Must be able to update epoch"); - - self.slash(); (height, new_epoch) } @@ -501,39 +560,39 @@ where .expect("Could not find the PoS parameters"); // TODO ABCI validator updates on block H affects the validator set // on block H+2, do we need to update a block earlier? - // self.wl_storage.validator_set_update(current_epoch, |update| { - namada_proof_of_stake::validator_set_update_tendermint( - &self.wl_storage, - &pos_params, - current_epoch, - |update| { - let (consensus_key, power) = match update { - ValidatorSetUpdate::Consensus(ConsensusValidator { - consensus_key, - bonded_stake, - }) => { - let power: i64 = into_tm_voting_power( - pos_params.tm_votes_per_token, + response.validator_updates = + namada_proof_of_stake::validator_set_update_tendermint( + &self.wl_storage, + &pos_params, + current_epoch, + |update| { + let (consensus_key, power) = match update { + ValidatorSetUpdate::Consensus(ConsensusValidator { + consensus_key, bonded_stake, - ); - (consensus_key, power) - } - ValidatorSetUpdate::Deactivated(consensus_key) => { - // Any validators that have been dropped from the - // consensus set must have voting power set to 0 to - // remove them from the conensus set - let power = 0_i64; - (consensus_key, power) - } - }; - let pub_key = TendermintPublicKey { - sum: Some(key_to_tendermint(&consensus_key).unwrap()), - }; - let pub_key = Some(pub_key); - let update = ValidatorUpdate { pub_key, power }; - response.validator_updates.push(update); - }, - ); + }) => { + let power: i64 = into_tm_voting_power( + pos_params.tm_votes_per_token, + bonded_stake, + ); + (consensus_key, power) + } + ValidatorSetUpdate::Deactivated(consensus_key) => { + // Any validators that have been dropped from the + // consensus set must have voting power set to 0 to + // remove them from the conensus set + let power = 0_i64; + (consensus_key, power) + } + }; + let pub_key = TendermintPublicKey { + sum: Some(key_to_tendermint(&consensus_key).unwrap()), + }; + let pub_key = Some(pub_key); + ValidatorUpdate { pub_key, power } + }, + ) + .expect("Must be able to update validator sets"); } /// Calculate the new inflation rate, mint the new tokens to the PoS @@ -568,7 +627,9 @@ where .expect("PoS inflation rate should exist in storage"); // Read from PoS storage let total_tokens = self - .read_storage_key(&total_supply_key(&staking_token_address())) + .read_storage_key(&total_supply_key(&staking_token_address( + &self.wl_storage, + ))) .expect("Total NAM balance should exist in storage"); let pos_locked_supply = read_total_stake(&self.wl_storage, ¶ms, last_epoch)?; @@ -585,43 +646,39 @@ where let masp_d_gain = Decimal::new(1, 1); // Run rewards PD controller - let pos_controller = inflation::RewardsController::new( - pos_locked_supply, + let pos_controller = inflation::RewardsController { + locked_tokens: pos_locked_supply, total_tokens, - pos_locked_ratio_target, - pos_last_staked_ratio, - pos_max_inflation_rate, - token::Amount::from(pos_last_inflation_amount), - pos_p_gain_nom, - pos_d_gain_nom, + locked_ratio_target: pos_locked_ratio_target, + locked_ratio_last: pos_last_staked_ratio, + max_reward_rate: pos_max_inflation_rate, + last_inflation_amount: token::Amount::from( + pos_last_inflation_amount, + ), + p_gain_nom: pos_p_gain_nom, + d_gain_nom: pos_d_gain_nom, epochs_per_year, - ); - let _masp_controller = inflation::RewardsController::new( - masp_locked_supply, + }; + let _masp_controller = inflation::RewardsController { + locked_tokens: masp_locked_supply, total_tokens, - masp_locked_ratio_target, - masp_locked_ratio_last, - masp_max_inflation_rate, - token::Amount::from(masp_last_inflation_rate), - masp_p_gain, - masp_d_gain, + locked_ratio_target: masp_locked_ratio_target, + locked_ratio_last: masp_locked_ratio_last, + max_reward_rate: masp_max_inflation_rate, + last_inflation_amount: token::Amount::from( + masp_last_inflation_rate, + ), + p_gain_nom: masp_p_gain, + d_gain_nom: masp_d_gain, epochs_per_year, - ); + }; // Run the rewards controllers let inflation::ValsToUpdate { locked_ratio, inflation, - } = RewardsController::run(&pos_controller); - // let new_masp_vals = RewardsController::run(&_masp_controller); - - // Mint tokens to the PoS account for the last epoch's inflation - inflation::mint_tokens( - &mut self.wl_storage, - &POS_ADDRESS, - &staking_token_address(), - Amount::from(inflation), - )?; + } = pos_controller.run(); + // let new_masp_vals = _masp_controller.run(); // Get the number of blocks in the last epoch let first_block_of_last_epoch = self @@ -700,14 +757,34 @@ where )?; } - // TODO: Figure out how to deal with round-off to a whole number of - // tokens. May be tricky. TODO: Storing reward products - // as a Decimal suggests that no round-off should be done here, - // TODO: perhaps only upon withdrawal. But by truncating at - // withdrawal, may leave tokens in TDOD: the PoS account - // that are not accounted for. Is this an issue? + let staking_token = staking_token_address(&self.wl_storage); + + // Mint tokens to the PoS account for the last epoch's inflation + let pos_reward_tokens = + Amount::from(inflation - reward_tokens_remaining); + tracing::info!( + "Minting tokens for PoS rewards distribution into the PoS \ + account. Amount: {pos_reward_tokens}.", + ); + credit_tokens( + &mut self.wl_storage, + &staking_token, + &address::POS, + pos_reward_tokens, + )?; + if reward_tokens_remaining > 0 { - // TODO: do something here? + let amount = Amount::from(reward_tokens_remaining); + tracing::info!( + "Minting tokens remaining from PoS rewards distribution into \ + the Governance account. Amount: {amount}.", + ); + credit_tokens( + &mut self.wl_storage, + &staking_token, + &address::GOV, + amount, + )?; } // Write new rewards parameters that will be used for the inflation of @@ -734,6 +811,75 @@ where } } +/// Convert ABCI vote info to PoS vote info. Any info which fails the conversion +/// will be skipped and errors logged. +/// +/// # Panics +/// Panics if a validator's address cannot be converted to native address +/// (either due to storage read error or the address not being found) or +/// if the voting power cannot be converted to u64. +fn pos_votes_from_abci( + storage: &impl StorageRead, + votes: &[VoteInfo], +) -> Vec { + votes + .iter() + .filter_map( + |VoteInfo { + validator, + signed_last_block, + }| { + if let Some( + crate::facade::tendermint_proto::abci::Validator { + address, + power, + }, + ) = validator + { + let tm_raw_hash_string = HEXUPPER.encode(address); + if *signed_last_block { + tracing::debug!( + "Looking up validator from Tendermint VoteInfo's \ + raw hash {tm_raw_hash_string}" + ); + + // Look-up the native address + let validator_address = find_validator_by_raw_hash( + storage, + &tm_raw_hash_string, + ) + .expect( + "Must be able to read from storage to find native \ + address of validator from tendermint raw hash", + ) + .expect( + "Must be able to find the native address of \ + validator from tendermint raw hash", + ); + + // Try to convert voting power to u64 + let validator_vp = u64::try_from(*power).expect( + "Must be able to convert voting power from i64 to \ + u64", + ); + + return Some(namada_proof_of_stake::types::VoteInfo { + validator_address, + validator_vp, + }); + } else { + tracing::debug!( + "Validator {tm_raw_hash_string} didn't sign last \ + block" + ) + } + } + None + }, + ) + .collect() +} + /// We test the failure cases of [`finalize_block`]. The happy flows /// are covered by the e2e tests. #[cfg(test)] @@ -743,7 +889,6 @@ mod test_finalize_block { use data_encoding::HEXUPPER; use namada::ledger::parameters::EpochDuration; - use namada::ledger::pos::types::VoteInfo; use namada::ledger::storage_api; use namada::proof_of_stake::btree_set::BTreeSetShims; use namada::proof_of_stake::types::WeightedValidator; @@ -753,21 +898,25 @@ mod test_finalize_block { validator_rewards_products_handle, }; use namada::types::governance::ProposalVote; + use namada::types::key::tm_consensus_key_raw_hash; use namada::types::storage::Epoch; use namada::types::time::DurationSecs; use namada::types::transaction::governance::{ - InitProposalData, VoteProposalData, + InitProposalData, ProposalType, VoteProposalData, }; use namada::types::transaction::{EncryptionKey, Fee, WrapperTx, MIN_FEE}; + use namada_test_utils::TestWasms; use rust_decimal_macros::dec; use test_log::test; use super::*; + use crate::facade::tendermint_proto::abci::{Validator}; use crate::node::ledger::shell::test_utils::*; use crate::node::ledger::shims::abcipp_shim_types::shim::request::{ FinalizeBlock, ProcessedTx, }; + use crate::facade::tendermint_proto::abci::VoteInfo; /// Check that if a wrapper tx was rejected by [`process_proposal`], /// check that the correct event is returned. Check that it does /// not appear in the queue of txs to be decrypted @@ -794,6 +943,8 @@ mod test_finalize_block { let raw_tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some(format!("transaction data: {}", i).as_bytes().to_owned()), + shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -808,7 +959,9 @@ mod test_finalize_block { #[cfg(not(feature = "mainnet"))] None, ); - let tx = wrapper.sign(&keypair).expect("Test failed"); + let tx = wrapper + .sign(&keypair, shell.chain_id.clone(), None) + .expect("Test failed"); if i > 1 { processed_txs.push(ProcessedTx { tx: tx.to_bytes(), @@ -867,6 +1020,8 @@ mod test_finalize_block { let raw_tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some(String::from("transaction data").as_bytes().to_owned()), + shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -980,7 +1135,7 @@ mod test_finalize_block { let mut processed_txs = vec![]; let mut valid_txs = vec![]; - // Add unshielded balance for fee paymenty + // Add unshielded balance for fee payment let balance_key = token::balance_key( &shell.wl_storage.storage.native_token, &Address::from(&keypair.ref_to()), @@ -992,10 +1147,7 @@ mod test_finalize_block { .unwrap(); // create two decrypted txs - let mut wasm_path = top_level_directory(); - wasm_path.push("wasm_for_tests/tx_no_op.wasm"); - let tx_code = std::fs::read(wasm_path) - .expect("Expected a file at given code path"); + let tx_code = TestWasms::TxNoOp.read_bytes(); for i in 0..2 { let raw_tx = Tx::new( tx_code.clone(), @@ -1004,6 +1156,8 @@ mod test_finalize_block { .as_bytes() .to_owned(), ), + shell.chain_id.clone(), + None, ); let wrapper_tx = WrapperTx::new( Fee { @@ -1041,6 +1195,8 @@ mod test_finalize_block { .as_bytes() .to_owned(), ), + shell.chain_id.clone(), + None, ); let wrapper_tx = WrapperTx::new( Fee { @@ -1055,7 +1211,9 @@ mod test_finalize_block { #[cfg(not(feature = "mainnet"))] None, ); - let wrapper = wrapper_tx.sign(&keypair).expect("Test failed"); + let wrapper = wrapper_tx + .sign(&keypair, shell.chain_id.clone(), None) + .expect("Test failed"); valid_txs.push(wrapper_tx); processed_txs.push(ProcessedTx { tx: wrapper.to_bytes(), @@ -1125,7 +1283,7 @@ mod test_finalize_block { min_duration: DurationSecs(0), }; namada::ledger::parameters::update_epoch_parameter( - &mut shell.wl_storage.storage, + &mut shell.wl_storage, &epoch_duration, ) .unwrap(); @@ -1144,7 +1302,7 @@ mod test_finalize_block { voting_start_epoch: Epoch::default(), voting_end_epoch: Epoch::default().next(), grace_epoch: Epoch::default().next(), - proposal_code: None, + r#type: ProposalType::Default(None), }; storage_api::governance::init_proposal( @@ -1166,11 +1324,14 @@ mod test_finalize_block { }; // Add a proposal to be accepted and one to be rejected. - add_proposal(0, ProposalVote::Yay); + add_proposal( + 0, + ProposalVote::Yay(namada::types::governance::VoteType::Default), + ); add_proposal(1, ProposalVote::Nay); // Commit the genesis state - shell.wl_storage.commit_genesis().unwrap(); + shell.wl_storage.commit_block().unwrap(); shell.commit(); // Collect all storage key-vals into a sorted map @@ -1211,9 +1372,12 @@ mod test_finalize_block { ) .unwrap() .unwrap(); + let votes = vec![VoteInfo { - validator_address: proposer_address.clone(), - validator_vp: u64::from(val_stake), + validator: Some(Validator { + address: proposer_address.clone(), + power: u64::from(val_stake) as i64, + }), signed_last_block: true, }]; @@ -1288,23 +1452,31 @@ mod test_finalize_block { // All validators sign blocks initially let votes = vec![ VoteInfo { - validator_address: pkh1.clone(), - validator_vp: u64::from(val1.bonded_stake), + validator: Some(Validator { + address: pkh1.clone(), + power: u64::from(val1.bonded_stake) as i64, + }), signed_last_block: true, }, VoteInfo { - validator_address: pkh2.clone(), - validator_vp: u64::from(val2.bonded_stake), + validator: Some(Validator { + address: pkh2.clone(), + power: u64::from(val2.bonded_stake) as i64, + }), signed_last_block: true, }, VoteInfo { - validator_address: pkh3.clone(), - validator_vp: u64::from(val3.bonded_stake), + validator: Some(Validator { + address: pkh3.clone(), + power: u64::from(val3.bonded_stake) as i64, + }), signed_last_block: true, }, VoteInfo { - validator_address: pkh4.clone(), - validator_vp: u64::from(val4.bonded_stake), + validator: Some(Validator { + address: pkh4.clone(), + power: u64::from(val4.bonded_stake) as i64, + }), signed_last_block: true, }, ]; @@ -1389,23 +1561,31 @@ mod test_finalize_block { // Now we don't receive a vote from val4. let votes = vec![ VoteInfo { - validator_address: pkh1.clone(), - validator_vp: u64::from(val1.bonded_stake), + validator: Some(Validator { + address: pkh1.clone(), + power: u64::from(val1.bonded_stake) as i64, + }), signed_last_block: true, }, VoteInfo { - validator_address: pkh2, - validator_vp: u64::from(val2.bonded_stake), + validator: Some(Validator { + address: pkh2, + power: u64::from(val2.bonded_stake) as i64, + }), signed_last_block: true, }, VoteInfo { - validator_address: pkh3, - validator_vp: u64::from(val3.bonded_stake), + validator: Some(Validator { + address: pkh3, + power: u64::from(val3.bonded_stake) as i64, + }), signed_last_block: true, }, VoteInfo { - validator_address: pkh4, - validator_vp: u64::from(val4.bonded_stake), + validator: Some(Validator { + address: pkh4, + power: u64::from(val4.bonded_stake) as i64, + }), signed_last_block: false, }, ]; @@ -1509,4 +1689,81 @@ mod test_finalize_block { shell.finalize_block(req).unwrap(); shell.commit(); } + + /// Test that if a decrypted transaction fails because of out-of-gas, its + /// hash is removed from storage to allow rewrapping it + #[test] + fn test_remove_tx_hash() { + let (mut shell, _) = setup(1); + let keypair = gen_keypair(); + + let mut wasm_path = top_level_directory(); + wasm_path.push("wasm_for_tests/tx_no_op.wasm"); + let tx_code = std::fs::read(wasm_path) + .expect("Expected a file at given code path"); + let raw_tx = Tx::new( + tx_code, + Some("Encrypted transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, + ); + let wrapper_tx = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + raw_tx.clone(), + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + + // Write inner hash in storage + let inner_hash_key = + replay_protection::get_tx_hash_key(&wrapper_tx.tx_hash); + shell + .wl_storage + .storage + .write(&inner_hash_key, vec![]) + .expect("Test failed"); + + let processed_tx = ProcessedTx { + tx: Tx::from(TxType::Decrypted(DecryptedTx::Decrypted { + tx: raw_tx, + #[cfg(not(feature = "mainnet"))] + has_valid_pow: false, + })) + .to_bytes(), + result: TxResult { + code: ErrorCodes::Ok.into(), + info: "".into(), + }, + }; + shell.enqueue_tx(wrapper_tx); + + let _event = &shell + .finalize_block(FinalizeBlock { + txs: vec![processed_tx], + ..Default::default() + }) + .expect("Test failed")[0]; + + // FIXME: uncomment when proper gas metering is in place + // // Check inner tx hash has been removed from storage + // assert_eq!(event.event_type.to_string(), String::from("applied")); + // let code = event.attributes.get("code").expect("Test + // failed").as_str(); assert_eq!(code, + // String::from(ErrorCodes::WasmRuntimeError).as_str()); + + // assert!( + // !shell + // .storage + // .has_key(&inner_hash_key) + // .expect("Test failed") + // .0 + // ) + } } diff --git a/apps/src/lib/node/ledger/shell/governance.rs b/apps/src/lib/node/ledger/shell/governance.rs index a27814029d0..dfdae4d04ea 100644 --- a/apps/src/lib/node/ledger/shell/governance.rs +++ b/apps/src/lib/node/ledger/shell/governance.rs @@ -1,4 +1,5 @@ use namada::core::ledger::slash_fund::ADDRESS as slash_fund_address; +use namada::core::types::transaction::governance::ProposalType; use namada::ledger::events::EventType; use namada::ledger::governance::{ storage as gov_storage, ADDRESS as gov_address, @@ -10,8 +11,9 @@ use namada::ledger::protocol; use namada::ledger::storage::types::encode; use namada::ledger::storage::{DBIter, StorageHasher, DB}; use namada::ledger::storage_api::{token, StorageWrite}; +use namada::proof_of_stake::read_total_stake; use namada::types::address::Address; -use namada::types::governance::TallyResult; +use namada::types::governance::{Council, Tally, TallyResult, VotePower}; use namada::types::storage::Epoch; use super::*; @@ -35,6 +37,7 @@ where for id in std::mem::take(&mut shell.proposal_data) { let proposal_funds_key = gov_storage::get_funds_key(id); let proposal_end_epoch_key = gov_storage::get_voting_end_epoch_key(id); + let proposal_type_key = gov_storage::get_proposal_type_key(id); let funds = shell .read_storage_key::(&proposal_funds_key) @@ -50,126 +53,55 @@ where ) })?; - let votes = - get_proposal_votes(&shell.wl_storage, proposal_end_epoch, id); - let is_accepted = votes.and_then(|votes| { - compute_tally(&shell.wl_storage, proposal_end_epoch, votes) - }); - - let transfer_address = match is_accepted { - Ok(true) => { - let proposal_author_key = gov_storage::get_author_key(id); - let proposal_author = shell - .read_storage_key::
(&proposal_author_key) - .ok_or_else(|| { - Error::BadProposal( - id, - "Invalid proposal author.".to_string(), - ) - })?; - - let proposal_code_key = gov_storage::get_proposal_code_key(id); - let proposal_code = - shell.read_storage_key_bytes(&proposal_code_key); - match proposal_code { - Some(proposal_code) => { - let tx = Tx::new(proposal_code, Some(encode(&id))); - let tx_type = - TxType::Decrypted(DecryptedTx::Decrypted { - tx, - #[cfg(not(feature = "mainnet"))] - has_valid_pow: false, - }); - let pending_execution_key = - gov_storage::get_proposal_execution_key(id); - shell - .wl_storage - .write(&pending_execution_key, ()) - .expect("Should be able to write to storage."); - let tx_result = protocol::apply_tx( - tx_type, - 0, /* this is used to compute the fee - * based on the code size. We dont - * need it here. */ - TxIndex::default(), - &mut BlockGasMeter::default(), - &mut shell.wl_storage.write_log, - &shell.wl_storage.storage, - &mut shell.vp_wasm_cache, - &mut shell.tx_wasm_cache, - ); - shell - .wl_storage - .delete(&pending_execution_key) - .expect("Should be able to delete the storage."); - match tx_result { - Ok(tx_result) => { - if tx_result.is_accepted() { - shell.wl_storage.write_log.commit_tx(); - let proposal_event: Event = - ProposalEvent::new( - EventType::Proposal.to_string(), - TallyResult::Passed, - id, - true, - true, - ) - .into(); - response.events.push(proposal_event); - proposals_result.passed.push(id); - - proposal_author - } else { - shell.wl_storage.write_log.drop_tx(); - let proposal_event: Event = - ProposalEvent::new( - EventType::Proposal.to_string(), - TallyResult::Passed, - id, - true, - false, - ) - .into(); - response.events.push(proposal_event); - proposals_result.rejected.push(id); + let proposal_type = shell + .read_storage_key::(&proposal_type_key) + .ok_or_else(|| { + Error::BadProposal(id, "Invalid proposal type".to_string()) + })?; - slash_fund_address - } - } - Err(_e) => { - shell.wl_storage.write_log.drop_tx(); - let proposal_event: Event = ProposalEvent::new( - EventType::Proposal.to_string(), - TallyResult::Passed, - id, - true, - false, - ) - .into(); - response.events.push(proposal_event); - proposals_result.rejected.push(id); + let votes = + get_proposal_votes(&shell.wl_storage, proposal_end_epoch, id) + .map_err(|msg| Error::BadProposal(id, msg.to_string()))?; + let params = read_pos_params(&shell.wl_storage) + .map_err(|msg| Error::BadProposal(id, msg.to_string()))?; + let total_stake = + read_total_stake(&shell.wl_storage, ¶ms, proposal_end_epoch) + .map_err(|msg| Error::BadProposal(id, msg.to_string()))?; + let total_stake = VotePower::from(u64::from(total_stake)); + let tally_result = compute_tally(votes, total_stake, &proposal_type) + .map_err(|msg| Error::BadProposal(id, msg.to_string()))? + .result; - slash_fund_address - } - } + // Execute proposal if succesful + let transfer_address = match tally_result { + TallyResult::Passed(tally) => { + let (successful_execution, proposal_event) = match tally { + Tally::Default => execute_default_proposal(shell, id), + Tally::PGFCouncil(council) => { + execute_pgf_proposal(id, council) } - None => { - let proposal_event: Event = ProposalEvent::new( - EventType::Proposal.to_string(), - TallyResult::Passed, - id, - false, - false, - ) - .into(); - response.events.push(proposal_event); - proposals_result.passed.push(id); + Tally::ETHBridge => execute_eth_proposal(id), + }; - proposal_author - } + response.events.push(proposal_event); + if successful_execution { + proposals_result.passed.push(id); + shell + .read_storage_key::
( + &gov_storage::get_author_key(id), + ) + .ok_or_else(|| { + Error::BadProposal( + id, + "Invalid proposal author.".to_string(), + ) + })? + } else { + proposals_result.rejected.push(id); + slash_fund_address } } - Ok(false) => { + TallyResult::Rejected => { let proposal_event: Event = ProposalEvent::new( EventType::Proposal.to_string(), TallyResult::Rejected, @@ -181,23 +113,6 @@ where response.events.push(proposal_event); proposals_result.rejected.push(id); - slash_fund_address - } - Err(err) => { - tracing::error!( - "Unexpectedly failed to tally proposal ID {id} with error \ - {err}" - ); - let proposal_event: Event = ProposalEvent::new( - EventType::Proposal.to_string(), - TallyResult::Failed, - id, - false, - false, - ) - .into(); - response.events.push(proposal_event); - slash_fund_address } }; @@ -219,3 +134,127 @@ where Ok(proposals_result) } + +fn execute_default_proposal( + shell: &mut Shell, + id: u64, +) -> (bool, Event) +where + D: DB + for<'iter> DBIter<'iter> + Sync + 'static, + H: StorageHasher + Sync + 'static, +{ + let proposal_code_key = gov_storage::get_proposal_code_key(id); + let proposal_code = shell.read_storage_key_bytes(&proposal_code_key); + match proposal_code { + Some(proposal_code) => { + let tx = Tx::new( + proposal_code, + Some(encode(&id)), + shell.chain_id.clone(), + None, + ); + let tx_type = TxType::Decrypted(DecryptedTx::Decrypted { + tx, + #[cfg(not(feature = "mainnet"))] + has_valid_pow: false, + }); + let pending_execution_key = + gov_storage::get_proposal_execution_key(id); + shell + .wl_storage + .write(&pending_execution_key, ()) + .expect("Should be able to write to storage."); + let tx_result = protocol::apply_tx( + tx_type, + 0, /* this is used to compute the fee + * based on the code size. We dont + * need it here. */ + TxIndex::default(), + &mut BlockGasMeter::default(), + &mut shell.wl_storage.write_log, + &shell.wl_storage.storage, + &mut shell.vp_wasm_cache, + &mut shell.tx_wasm_cache, + ); + shell + .wl_storage + .storage + .delete(&pending_execution_key) + .expect("Should be able to delete the storage."); + match tx_result { + Ok(tx_result) if tx_result.is_accepted() => { + shell.wl_storage.commit_tx(); + ( + tx_result.is_accepted(), + ProposalEvent::new( + EventType::Proposal.to_string(), + TallyResult::Passed(Tally::Default), + id, + true, + tx_result.is_accepted(), + ) + .into(), + ) + } + _ => { + shell.wl_storage.drop_tx(); + ( + false, + ProposalEvent::new( + EventType::Proposal.to_string(), + TallyResult::Passed(Tally::Default), + id, + true, + false, + ) + .into(), + ) + } + } + } + None => ( + true, + ProposalEvent::new( + EventType::Proposal.to_string(), + TallyResult::Passed(Tally::Default), + id, + false, + false, + ) + .into(), + ), + } +} + +fn execute_pgf_proposal(id: u64, council: Council) -> (bool, Event) { + // TODO: implement when PGF is in place, update the PGF + // council in storage + ( + true, + ProposalEvent::new( + EventType::Proposal.to_string(), + TallyResult::Passed(Tally::PGFCouncil(council)), + id, + false, + false, + ) + .into(), + ) +} + +fn execute_eth_proposal(id: u64) -> (bool, Event) { + // TODO: implement when ETH Bridge. Apply the + // modification requested by the proposal + // + ( + true, + ProposalEvent::new( + EventType::Proposal.to_string(), + TallyResult::Passed(Tally::ETHBridge), + id, + false, + false, + ) + .into(), + ) +} diff --git a/apps/src/lib/node/ledger/shell/init_chain.rs b/apps/src/lib/node/ledger/shell/init_chain.rs index b40e777cd67..58891c3b4e4 100644 --- a/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/apps/src/lib/node/ledger/shell/init_chain.rs @@ -4,17 +4,15 @@ use std::hash::Hash; #[cfg(not(feature = "mainnet"))] use namada::core::ledger::testnet_pow; -use namada::ledger::parameters::storage::get_staked_ratio_key; -use namada::ledger::parameters::Parameters; +use namada::ledger::parameters::{self, Parameters}; use namada::ledger::pos::{into_tm_voting_power, staking_token_address}; use namada::ledger::storage_api::token::{ credit_tokens, read_balance, read_total_supply, }; -use namada::ledger::storage_api::StorageWrite; +use namada::ledger::storage_api::{ResultExt, StorageRead, StorageWrite}; +use namada::types::hash::Hash as CodeHash; use namada::types::key::*; use rust_decimal::Decimal; -#[cfg(not(feature = "dev"))] -use sha2::{Digest, Sha256}; use super::*; use crate::facade::tendermint_proto::abci; @@ -30,6 +28,8 @@ where /// Create a new genesis for the chain with specified id. This includes /// 1. A set of initial users and tokens /// 2. Setting up the validity predicates for both users and tokens + /// + /// INVARIANT: This method must not commit the state changes to DB. pub fn init_chain( &mut self, init: request::InitChain, @@ -89,26 +89,6 @@ where pos_inflation_amount, wrapper_tx_fees, } = genesis.parameters; - // borrow necessary for release build, annoys clippy on dev build - #[allow(clippy::needless_borrow)] - let implicit_vp = - wasm_loader::read_wasm(&self.wasm_dir, &implicit_vp_code_path) - .map_err(Error::ReadingWasm)?; - // In dev, we don't check the hash - #[cfg(feature = "dev")] - let _ = implicit_vp_sha256; - #[cfg(not(feature = "dev"))] - { - let mut hasher = Sha256::new(); - hasher.update(&implicit_vp); - let vp_code_hash = hasher.finalize(); - assert_eq!( - vp_code_hash.as_slice(), - &implicit_vp_sha256, - "Invalid implicit account's VP sha256 hash for {}", - implicit_vp_code_path - ); - } #[cfg(not(feature = "mainnet"))] // Try to find a faucet account let faucet_account = { @@ -127,13 +107,73 @@ where ) }; + // Store wasm codes into storage + let checksums = wasm_loader::Checksums::read_checksums(&self.wasm_dir); + for (name, full_name) in checksums.0.iter() { + let code = wasm_loader::read_wasm(&self.wasm_dir, name) + .map_err(Error::ReadingWasm)?; + let code_hash = CodeHash::sha256(&code); + + let elements = full_name.split('.').collect::>(); + let checksum = elements.get(1).ok_or_else(|| { + Error::LoadingWasm(format!("invalid full name: {}", full_name)) + })?; + assert_eq!( + code_hash.to_string(), + checksum.to_uppercase(), + "Invalid wasm code sha256 hash for {}", + name + ); + + if (tx_whitelist.is_empty() && vp_whitelist.is_empty()) + || tx_whitelist.contains(&code_hash.to_string().to_lowercase()) + || vp_whitelist.contains(&code_hash.to_string().to_lowercase()) + { + #[cfg(not(test))] + if name.starts_with("tx_") { + self.tx_wasm_cache.pre_compile(&code); + } else if name.starts_with("vp_") { + self.vp_wasm_cache.pre_compile(&code); + } + + let code_key = Key::wasm_code(&code_hash); + self.wl_storage.write_bytes(&code_key, code)?; + + let hash_key = Key::wasm_hash(name); + self.wl_storage.write_bytes(&hash_key, code_hash)?; + } else { + tracing::warn!("The wasm {name} isn't whitelisted."); + } + } + + // check if implicit_vp wasm is stored + let implicit_vp_code_hash = + read_wasm_hash(&self.wl_storage, &implicit_vp_code_path)?.ok_or( + Error::LoadingWasm(format!( + "Unknown vp code path: {}", + implicit_vp_code_path + )), + )?; + // In dev, we don't check the hash + #[cfg(feature = "dev")] + let _ = implicit_vp_sha256; + #[cfg(not(feature = "dev"))] + { + assert_eq!( + implicit_vp_code_hash.as_slice(), + &implicit_vp_sha256, + "Invalid implicit account's VP sha256 hash for {}", + implicit_vp_code_path + ); + } + let parameters = Parameters { epoch_duration, max_proposal_bytes, max_expected_time_per_block, vp_whitelist, tx_whitelist, - implicit_vp, + implicit_vp_code_hash, epochs_per_year, pos_gain_p, pos_gain_d, @@ -144,12 +184,15 @@ where #[cfg(not(feature = "mainnet"))] wrapper_tx_fees, }; - parameters.init_storage(&mut self.wl_storage.storage); + parameters + .init_storage(&mut self.wl_storage) + .expect("Initializing chain parameters must not fail"); // Initialize governance parameters genesis .gov_params - .init_storage(&mut self.wl_storage.storage); + .init_storage(&mut self.wl_storage) + .expect("Initializing governance parameters must not fail"); // Depends on parameters being initialized self.wl_storage @@ -157,9 +200,6 @@ where .init_genesis_epoch(initial_height, genesis_time, ¶meters) .expect("Initializing genesis epoch must not fail"); - // Loaded VP code cache to avoid loading the same files multiple times - let mut vp_code_cache: HashMap> = HashMap::default(); - // Initialize genesis established accounts for genesis::EstablishedAccount { address, @@ -169,25 +209,17 @@ where storage, } in genesis.established_accounts { - let vp_code = match vp_code_cache.get(&vp_code_path).cloned() { - Some(vp_code) => vp_code, - None => { - let wasm = - wasm_loader::read_wasm(&self.wasm_dir, &vp_code_path) - .map_err(Error::ReadingWasm)?; - vp_code_cache.insert(vp_code_path.clone(), wasm.clone()); - wasm - } - }; + let vp_code_hash = read_wasm_hash(&self.wl_storage, &vp_code_path)? + .ok_or(Error::LoadingWasm(format!( + "Unknown vp code path: {}", + implicit_vp_code_path + )))?; // In dev, we don't check the hash #[cfg(feature = "dev")] let _ = vp_sha256; #[cfg(not(feature = "dev"))] { - let mut hasher = Sha256::new(); - hasher.update(&vp_code); - let vp_code_hash = hasher.finalize(); assert_eq!( vp_code_hash.as_slice(), &vp_sha256, @@ -197,7 +229,7 @@ where } self.wl_storage - .write_bytes(&Key::validity_predicate(&address), vp_code) + .write_bytes(&Key::validity_predicate(&address), vp_code_hash) .unwrap(); if let Some(pk) = public_key { @@ -258,20 +290,19 @@ where .write(&token::last_locked_ratio(&address), last_locked_ratio) .unwrap(); - let vp_code = - vp_code_cache.get_or_insert_with(vp_code_path.clone(), || { - wasm_loader::read_wasm(&self.wasm_dir, &vp_code_path) - .unwrap() - }); + let vp_code_hash = + read_wasm_hash(&self.wl_storage, vp_code_path.clone())?.ok_or( + Error::LoadingWasm(format!( + "Unknown vp code path: {}", + implicit_vp_code_path + )), + )?; // In dev, we don't check the hash #[cfg(feature = "dev")] let _ = vp_sha256; #[cfg(not(feature = "dev"))] { - let mut hasher = Sha256::new(); - hasher.update(&vp_code); - let vp_code_hash = hasher.finalize(); assert_eq!( vp_code_hash.as_slice(), &vp_sha256, @@ -281,7 +312,7 @@ where } self.wl_storage - .write_bytes(&Key::validity_predicate(&address), vp_code) + .write_bytes(&Key::validity_predicate(&address), vp_code_hash) .unwrap(); let mut total_balance_for_token = token::Amount::default(); @@ -300,23 +331,19 @@ where } // Initialize genesis validator accounts + let staking_token = staking_token_address(&self.wl_storage); for validator in &genesis.validators { - let vp_code = vp_code_cache.get_or_insert_with( - validator.validator_vp_code_path.clone(), - || { - wasm_loader::read_wasm( - &self.wasm_dir, - &validator.validator_vp_code_path, - ) - .unwrap() - }, - ); + let vp_code_hash = read_wasm_hash( + &self.wl_storage, + &validator.validator_vp_code_path, + )? + .ok_or(Error::LoadingWasm(format!( + "Unknown vp code path: {}", + implicit_vp_code_path + )))?; #[cfg(not(feature = "dev"))] { - let mut hasher = Sha256::new(); - hasher.update(&vp_code); - let vp_code_hash = hasher.finalize(); assert_eq!( vp_code_hash.as_slice(), &validator.validator_vp_sha256, @@ -327,7 +354,7 @@ where let addr = &validator.pos_data.address; self.wl_storage - .write_bytes(&Key::validity_predicate(addr), vp_code) + .write_bytes(&Key::validity_predicate(addr), vp_code_hash) .expect("Unable to write user VP"); // Validator account key let pk_key = pk_key(addr); @@ -339,7 +366,7 @@ where // Account balance (tokens not staked in PoS) credit_tokens( &mut self.wl_storage, - &staking_token_address(), + &staking_token, addr, validator.non_staked_balance, ) @@ -373,29 +400,24 @@ where ); let total_nam = - read_total_supply(&self.wl_storage, &staking_token_address()) - .unwrap(); + read_total_supply(&self.wl_storage, &staking_token).unwrap(); // At this stage in the chain genesis, the PoS address balance is the // same as the number of staked tokens - let total_staked_nam = read_balance( - &self.wl_storage, - &staking_token_address(), - &address::POS, - ) - .unwrap(); + let total_staked_nam = + read_balance(&self.wl_storage, &staking_token, &address::POS) + .unwrap(); tracing::info!("Genesis total native tokens: {total_nam}."); tracing::info!("Total staked tokens: {total_staked_nam}."); // Set the ratio of staked to total NAM tokens in the parameters storage - self.wl_storage - .write( - &get_staked_ratio_key(), - Decimal::from(total_staked_nam) / Decimal::from(total_nam), - ) - .expect("unable to set staked ratio of NAM in storage"); + parameters::update_staked_ratio_parameter( + &mut self.wl_storage, + &(Decimal::from(total_staked_nam) / Decimal::from(total_nam)), + ) + .expect("unable to set staked ratio of NAM in storage"); - ibc::init_genesis_storage(&mut self.wl_storage.storage); + ibc::init_genesis_storage(&mut self.wl_storage); // Set the initial validator set for validator in genesis.validators { @@ -413,14 +435,24 @@ where response.validators.push(abci_validator); } - self.wl_storage - .commit_genesis() - .expect("Must be able to commit genesis state"); - Ok(response) } } +fn read_wasm_hash( + storage: &impl StorageRead, + path: impl AsRef, +) -> storage_api::Result> { + let hash_key = Key::wasm_hash(path); + match storage.read_bytes(&hash_key)? { + Some(value) => { + let hash = CodeHash::try_from(&value[..]).into_storage_result()?; + Ok(Some(hash)) + } + None => Ok(None), + } +} + trait HashMapExt where K: Eq + Hash, @@ -444,3 +476,62 @@ where } } } + +#[cfg(test)] +mod test { + use std::collections::BTreeMap; + use std::str::FromStr; + + use namada::ledger::storage::DBIter; + use namada::types::chain::ChainId; + use namada::types::storage; + + use crate::facade::tendermint_proto::abci::RequestInitChain; + use crate::facade::tendermint_proto::google::protobuf::Timestamp; + use crate::node::ledger::shell::test_utils::TestShell; + + /// Test that the init-chain handler never commits changes directly to the + /// DB. + #[test] + fn test_init_chain_doesnt_commit_db() { + let (mut shell, _receiver) = TestShell::new(); + + // Collect all storage key-vals into a sorted map + let store_block_state = |shell: &TestShell| -> BTreeMap<_, _> { + let prefix: storage::Key = FromStr::from_str("").unwrap(); + shell + .wl_storage + .storage + .db + .iter_prefix(&prefix) + .map(|(key, val, _gas)| (key, val)) + .collect() + }; + + // Store the full state in sorted map + let initial_storage_state: std::collections::BTreeMap> = + store_block_state(&shell); + + shell.init_chain( + RequestInitChain { + time: Some(Timestamp { + seconds: 0, + nanos: 0, + }), + chain_id: ChainId::default().to_string(), + ..Default::default() + }, + 1, + ); + + // Store the full state again + let storage_state: std::collections::BTreeMap> = + store_block_state(&shell); + + // The storage state must be unchanged + itertools::assert_equal( + initial_storage_state.iter(), + storage_state.iter(), + ); + } +} diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 9d40d20760b..3f69be490e3 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -5,6 +5,7 @@ //! and [`Shell::process_proposal`] must be also reverted //! (unless we can simply overwrite them in the next block). //! More info in . +mod block_space_alloc; mod finalize_block; mod governance; mod init_chain; @@ -32,10 +33,9 @@ use namada::ledger::storage::{ DBIter, Sha256Hasher, Storage, StorageHasher, WlStorage, DB, }; use namada::ledger::storage_api::{self, StorageRead}; -use namada::ledger::{ibc, pos, protocol}; +use namada::ledger::{ibc, pos, protocol, replay_protection}; use namada::proof_of_stake::{self, read_pos_params, slash}; use namada::proto::{self, Tx}; -use namada::types::address; use namada::types::address::{masp, masp_tx_key, Address}; use namada::types::chain::ChainId; use namada::types::internal::WrapperTxInQueue; @@ -43,10 +43,13 @@ use namada::types::key::*; use namada::types::storage::{BlockHeight, Key, TxIndex}; use namada::types::time::{DateTimeUtc, TimeZone, Utc}; use namada::types::token::{self}; +#[cfg(not(feature = "mainnet"))] +use namada::types::transaction::MIN_FEE; use namada::types::transaction::{ hash_tx, process_tx, verify_decrypted_correctly, AffineCurve, DecryptedTx, - EllipticCurve, PairingEngine, TxType, MIN_FEE, + EllipticCurve, PairingEngine, TxType, }; +use namada::types::{address, hash}; use namada::vm::wasm::{TxCache, VpCache}; use namada::vm::WasmCacheRwAccess; use num_derive::{FromPrimitive, ToPrimitive}; @@ -54,6 +57,7 @@ use num_traits::{FromPrimitive, ToPrimitive}; use thiserror::Error; use tokio::sync::mpsc::UnboundedSender; +use crate::config; use crate::config::{genesis, TendermintMode}; #[cfg(feature = "abcipp")] use crate::facade::tendermint_proto::abci::response_verify_vote_extension::VerifyStatus; @@ -61,13 +65,13 @@ use crate::facade::tendermint_proto::abci::{ Misbehavior as Evidence, MisbehaviorType as EvidenceType, ValidatorUpdate, }; use crate::facade::tendermint_proto::crypto::public_key; +use crate::facade::tendermint_proto::google::protobuf::Timestamp; use crate::facade::tower_abci::{request, response}; use crate::node::ledger::shims::abcipp_shim_types::shim; use crate::node::ledger::shims::abcipp_shim_types::shim::response::TxResult; use crate::node::ledger::{storage, tendermint_node}; #[allow(unused_imports)] use crate::wallet::ValidatorData; -use crate::{config, wallet}; fn key_to_tendermint( pk: &common::PublicKey, @@ -104,6 +108,8 @@ pub enum Error { BadProposal(u64, String), #[error("Error reading wasm: {0}")] ReadingWasm(#[from] eyre::Error), + #[error("Error loading wasm: {0}")] + LoadingWasm(String), #[error("Error reading from or writing to storage: {0}")] StorageApi(#[from] storage_api::Error), } @@ -120,15 +126,40 @@ impl From for TxResult { /// The different error codes that the ledger may /// send back to a client indicating the status /// of their submitted tx -#[derive(Debug, Clone, FromPrimitive, ToPrimitive, PartialEq)] +#[derive(Debug, Copy, Clone, FromPrimitive, ToPrimitive, PartialEq)] pub enum ErrorCodes { Ok = 0, - InvalidTx = 1, - InvalidSig = 2, + InvalidDecryptedChainId = 1, + ExpiredDecryptedTx = 2, WasmRuntimeError = 3, - InvalidOrder = 4, - ExtraTxs = 5, - Undecryptable = 6, + InvalidTx = 4, + InvalidSig = 5, + InvalidOrder = 6, + ExtraTxs = 7, + Undecryptable = 8, + AllocationError = 9, + ReplayTx = 10, + InvalidChainId = 11, + ExpiredTx = 12, +} + +impl ErrorCodes { + /// Checks if the given [`ErrorCodes`] value is a protocol level error, + /// that can be recovered from at the finalize block stage. + pub const fn is_recoverable(&self) -> bool { + use ErrorCodes::*; + // NOTE: pattern match on all `ErrorCodes` variants, in order + // to catch potential bugs when adding new codes + match self { + Ok + | InvalidDecryptedChainId + | ExpiredDecryptedTx + | WasmRuntimeError => true, + InvalidTx | InvalidSig | InvalidOrder | ExtraTxs + | Undecryptable | AllocationError | ReplayTx | InvalidChainId + | ExpiredTx => false, + } + } } impl From for u32 { @@ -158,6 +189,22 @@ pub fn reset(config: config::Ledger) -> Result<()> { Ok(()) } +pub fn rollback(config: config::Ledger) -> Result<()> { + // Rollback Tendermint state + tracing::info!("Rollback Tendermint state"); + let tendermint_block_height = + tendermint_node::rollback(config.tendermint_dir()) + .map_err(Error::Tendermint)?; + + // Rollback Namada state + let db_path = config.shell.db_dir(&config.chain_id); + let mut db = storage::PersistentDB::open(db_path, None); + tracing::info!("Rollback Namada state"); + + db.rollback(tendermint_block_height) + .map_err(|e| Error::StorageApi(storage_api::Error::new(e))) +} + #[derive(Debug)] #[allow(dead_code, clippy::large_enum_variant)] pub(super) enum ShellMode { @@ -254,8 +301,13 @@ where .expect("Creating directory for Namada should not fail"); } // load last state from storage - let mut storage = - Storage::open(db_path, chain_id.clone(), native_token, db_cache); + let mut storage = Storage::open( + db_path, + chain_id.clone(), + native_token, + db_cache, + config.shell.storage_read_past_height_limit, + ); storage .load_last_state() .map_err(|e| { @@ -279,7 +331,7 @@ where "{}", wallet_path.as_path().to_str().unwrap() ); - let wallet = wallet::Wallet::load_or_new_from_genesis( + let mut wallet = crate::wallet::load_or_new_from_genesis( wallet_path, genesis::genesis_config::open_genesis_config( genesis_path, @@ -289,7 +341,7 @@ where wallet .take_validator_data() .map(|data| ShellMode::Validator { - data, + data: data.clone(), broadcast_sender, }) .expect( @@ -299,11 +351,13 @@ where } #[cfg(feature = "dev")] { - let validator_keys = wallet::defaults::validator_keys(); + let validator_keys = + crate::wallet::defaults::validator_keys(); ShellMode::Validator { - data: wallet::ValidatorData { - address: wallet::defaults::validator_address(), - keys: wallet::ValidatorKeys { + data: crate::wallet::ValidatorData { + address: crate::wallet::defaults::validator_address( + ), + keys: crate::wallet::ValidatorKeys { protocol_keypair: validator_keys.0, dkg_keypair: Some(validator_keys.1), }, @@ -388,6 +442,25 @@ where response } + /// Takes the optional tendermint timestamp of the block: if it's Some than + /// converts it to a [`DateTimeUtc`], otherwise retrieve from self the + /// time of the last block committed + pub fn get_block_timestamp( + &self, + tendermint_block_time: Option, + ) -> DateTimeUtc { + if let Some(t) = tendermint_block_time { + if let Ok(t) = t.try_into() { + return t; + } + } + // Default to last committed block time + self.wl_storage + .storage + .get_last_block_timestamp() + .expect("Failed to retrieve last block timestamp") + } + /// Read the value for a storage key dropping any error pub fn read_storage_key(&self, key: &Key) -> Option where @@ -578,49 +651,138 @@ where /// Validate a transaction request. On success, the transaction will /// included in the mempool and propagated to peers, otherwise it will be /// rejected. + /// + /// Error codes: + /// 0: Ok + /// 1: Invalid tx + /// 2: Tx is invalidly signed + /// 7: Replay attack + /// 8: Invalid chain id in tx pub fn mempool_validate( &self, tx_bytes: &[u8], r#_type: MempoolTxType, ) -> response::CheckTx { let mut response = response::CheckTx::default(); - match Tx::try_from(tx_bytes).map_err(Error::TxDecoding) { - Ok(tx) => { - // Check balance for fee - if let Ok(TxType::Wrapper(wrapper)) = process_tx(tx) { - let fee_payer = if wrapper.pk != masp_tx_key().ref_to() { - wrapper.fee_payer() - } else { - masp() - }; - // check that the fee payer has sufficient balance - let balance = - self.get_balance(&wrapper.fee.token, &fee_payer); - // In testnets with a faucet, tx is allowed to skip fees if - // it includes a valid PoW - #[cfg(not(feature = "mainnet"))] - let has_valid_pow = self.has_valid_pow_solution(&wrapper); - #[cfg(feature = "mainnet")] - let has_valid_pow = false; + // Tx format check + let tx = match Tx::try_from(tx_bytes).map_err(Error::TxDecoding) { + Ok(t) => t, + Err(msg) => { + response.code = ErrorCodes::InvalidTx.into(); + response.log = msg.to_string(); + return response; + } + }; - if !has_valid_pow && self.get_wrapper_tx_fees() > balance { - response.code = 1; - response.log = String::from( - "The address given does not have sufficient \ - balance to pay fee", - ); - return response; - } - } + // Tx chain id + if tx.chain_id != self.chain_id { + response.code = ErrorCodes::InvalidChainId.into(); + response.log = format!( + "Tx carries a wrong chain id: expected {}, found {}", + self.chain_id, tx.chain_id + ); + return response; + } + + // Tx expiration + if let Some(exp) = tx.expiration { + let last_block_timestamp = self.get_block_timestamp(None); - response.log = String::from("Mempool validation passed"); + if last_block_timestamp > exp { + response.code = ErrorCodes::ExpiredTx.into(); + response.log = format!( + "Tx expired at {:#?}, last committed block time: {:#?}", + exp, last_block_timestamp + ); + return response; } + } + + // Tx signature check + let tx_type = match process_tx(tx) { + Ok(ty) => ty, Err(msg) => { - response.code = 1; + response.code = ErrorCodes::InvalidSig.into(); response.log = msg.to_string(); + return response; + } + }; + + // Tx type check + if let TxType::Wrapper(wrapper) = tx_type { + // Replay protection check + let inner_hash_key = + replay_protection::get_tx_hash_key(&wrapper.tx_hash); + if self + .wl_storage + .storage + .has_key(&inner_hash_key) + .expect("Error while checking inner tx hash key in storage") + .0 + { + response.code = ErrorCodes::ReplayTx.into(); + response.log = format!( + "Inner transaction hash {} already in storage, replay \ + attempt", + wrapper.tx_hash + ); + return response; + } + + let tx = + Tx::try_from(tx_bytes).expect("Deserialization shouldn't fail"); + let wrapper_hash = hash::Hash(tx.unsigned_hash()); + let wrapper_hash_key = + replay_protection::get_tx_hash_key(&wrapper_hash); + if self + .wl_storage + .storage + .has_key(&wrapper_hash_key) + .expect("Error while checking wrapper tx hash key in storage") + .0 + { + response.code = ErrorCodes::ReplayTx.into(); + response.log = format!( + "Wrapper transaction hash {} already in storage, replay \ + attempt", + wrapper_hash + ); + return response; + } + + // Check balance for fee + let fee_payer = if wrapper.pk != masp_tx_key().ref_to() { + wrapper.fee_payer() + } else { + masp() + }; + // check that the fee payer has sufficient balance + let balance = self.get_balance(&wrapper.fee.token, &fee_payer); + + // In testnets with a faucet, tx is allowed to skip fees if + // it includes a valid PoW + #[cfg(not(feature = "mainnet"))] + let has_valid_pow = self.has_valid_pow_solution(&wrapper); + #[cfg(feature = "mainnet")] + let has_valid_pow = false; + + if !has_valid_pow && self.get_wrapper_tx_fees() > balance { + response.code = ErrorCodes::InvalidTx.into(); + response.log = String::from( + "The given address does not have a sufficient balance to \ + pay fee", + ); + return response; } + } else { + response.code = ErrorCodes::InvalidTx.into(); + response.log = "Unsupported tx type".to_string(); + return response; } + + response.log = "Mempool validation passed".to_string(); + response } @@ -677,7 +839,7 @@ where let genesis_path = &self .base_dir .join(format!("{}.toml", self.chain_id.as_str())); - let mut wallet = wallet::Wallet::load_or_new_from_genesis( + let mut wallet = crate::wallet::load_or_new_from_genesis( wallet_path, genesis::genesis_config::open_genesis_config(genesis_path).unwrap(), ); @@ -694,7 +856,7 @@ where it's established account", ); let pk = sk.ref_to(); - wallet.find_key_by_pk(&pk).expect( + wallet.find_key_by_pk(&pk, None).expect( "A validator's established keypair should be stored in its \ wallet", ) @@ -709,9 +871,9 @@ where tx: &namada::types::transaction::WrapperTx, ) -> bool { if let Some(solution) = &tx.pow_solution { - if let (Some(faucet_address), _gas) = + if let Some(faucet_address) = namada::ledger::parameters::read_faucet_account_parameter( - &self.wl_storage.storage, + &self.wl_storage, ) .expect("Must be able to read faucet account parameter") { @@ -727,11 +889,10 @@ where #[cfg(not(feature = "mainnet"))] /// Get fixed amount of fees for wrapper tx fn get_wrapper_tx_fees(&self) -> token::Amount { - let (fees, _gas) = - namada::ledger::parameters::read_wrapper_tx_fees_parameter( - &self.wl_storage.storage, - ) - .expect("Must be able to read wrapper tx fees parameter"); + let fees = namada::ledger::parameters::read_wrapper_tx_fees_parameter( + &self.wl_storage, + ) + .expect("Must be able to read wrapper tx fees parameter"); fees.unwrap_or(token::Amount::whole(MIN_FEE)) } @@ -743,9 +904,9 @@ where tx: &namada::types::transaction::WrapperTx, ) -> bool { if let Some(solution) = &tx.pow_solution { - if let (Some(faucet_address), _gas) = + if let Some(faucet_address) = namada::ledger::parameters::read_faucet_account_parameter( - &self.wl_storage.storage, + &self.wl_storage, ) .expect("Must be able to read faucet account parameter") { @@ -771,12 +932,11 @@ mod test_utils { use std::path::PathBuf; use namada::ledger::storage::mockdb::MockDB; - use namada::ledger::storage::{BlockStateWrite, MerkleTree, Sha256Hasher}; - use namada::types::address::EstablishedAddressGen; + use namada::ledger::storage::{update_allowed_conversions, Sha256Hasher}; use namada::types::chain::ChainId; use namada::types::hash::Hash; use namada::types::key::*; - use namada::types::storage::{BlockHash, BlockResults, Epoch, Header}; + use namada::types::storage::{BlockHash, Epoch, Epochs, Header}; use namada::types::transaction::{Fee, WrapperTx}; use tempfile::tempdir; use tokio::sync::mpsc::UnboundedReceiver; @@ -1005,11 +1165,18 @@ mod test_utils { tx_wasm_compilation_cache, native_token.clone(), ); + shell + .wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(1)) + .expect("begin_block failed"); let keypair = gen_keypair(); // enqueue a wrapper tx let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -1031,29 +1198,11 @@ mod test_utils { }); // Artificially increase the block height so that chain // will read the new block when restarted - let merkle_tree = MerkleTree::::default(); - let stores = merkle_tree.stores(); - let hash = BlockHash([0; 32]); - let pred_epochs = Default::default(); - let address_gen = EstablishedAddressGen::new("test"); - shell - .wl_storage - .storage - .db - .write_block(BlockStateWrite { - merkle_tree_stores: stores, - header: None, - hash: &hash, - height: BlockHeight(1), - epoch: Epoch(0), - pred_epochs: &pred_epochs, - next_epoch_min_start_height: BlockHeight(3), - next_epoch_min_start_time: DateTimeUtc::now(), - address_gen: &address_gen, - results: &BlockResults::default(), - tx_queue: &shell.wl_storage.storage.tx_queue, - }) - .expect("Test failed"); + let mut pred_epochs: Epochs = Default::default(); + pred_epochs.new_epoch(BlockHeight(1), 1000); + update_allowed_conversions(&mut shell.wl_storage) + .expect("update conversions failed"); + shell.wl_storage.commit_block().expect("commit failed"); // Drop the shell std::mem::drop(shell); @@ -1075,3 +1224,335 @@ mod test_utils { assert!(!shell.wl_storage.storage.tx_queue.is_empty()); } } + +/// Test the failure cases of [`mempool_validate`] +#[cfg(test)] +mod test_mempool_validate { + use namada::proof_of_stake::Epoch; + use namada::proto::SignedTxData; + use namada::types::transaction::{Fee, WrapperTx}; + + use super::test_utils::TestShell; + use super::{MempoolTxType, *}; + + /// Mempool validation must reject unsigned wrappers + #[test] + fn test_missing_signature() { + let (shell, _) = TestShell::new(); + + let keypair = super::test_utils::gen_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, + ); + + let mut wrapper = WrapperTx::new( + Fee { + amount: 100.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ) + .sign(&keypair, shell.chain_id.clone(), None) + .expect("Wrapper signing failed"); + + let unsigned_wrapper = if let Some(Ok(SignedTxData { + data: Some(data), + sig: _, + })) = wrapper + .data + .take() + .map(|data| SignedTxData::try_from_slice(&data[..])) + { + Tx::new(vec![], Some(data), shell.chain_id.clone(), None) + } else { + panic!("Test failed") + }; + + let mut result = shell.mempool_validate( + unsigned_wrapper.to_bytes().as_ref(), + MempoolTxType::NewTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::InvalidSig)); + result = shell.mempool_validate( + unsigned_wrapper.to_bytes().as_ref(), + MempoolTxType::RecheckTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::InvalidSig)); + } + + /// Mempool validation must reject wrappers with an invalid signature + #[test] + fn test_invalid_signature() { + let (shell, _) = TestShell::new(); + + let keypair = super::test_utils::gen_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, + ); + + let mut wrapper = WrapperTx::new( + Fee { + amount: 100.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ) + .sign(&keypair, shell.chain_id.clone(), None) + .expect("Wrapper signing failed"); + + let invalid_wrapper = if let Some(Ok(SignedTxData { + data: Some(data), + sig, + })) = wrapper + .data + .take() + .map(|data| SignedTxData::try_from_slice(&data[..])) + { + let mut new_wrapper = if let TxType::Wrapper(wrapper) = + ::deserialize(&mut data.as_ref()) + .expect("Test failed") + { + wrapper + } else { + panic!("Test failed") + }; + + // we mount a malleability attack to try and remove the fee + new_wrapper.fee.amount = 0.into(); + let new_data = TxType::Wrapper(new_wrapper) + .try_to_vec() + .expect("Test failed"); + Tx::new( + vec![], + Some( + SignedTxData { + sig, + data: Some(new_data), + } + .try_to_vec() + .expect("Test failed"), + ), + shell.chain_id.clone(), + None, + ) + } else { + panic!("Test failed"); + }; + + let mut result = shell.mempool_validate( + invalid_wrapper.to_bytes().as_ref(), + MempoolTxType::NewTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::InvalidSig)); + result = shell.mempool_validate( + invalid_wrapper.to_bytes().as_ref(), + MempoolTxType::RecheckTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::InvalidSig)); + } + + /// Mempool validation must reject non-wrapper txs + #[test] + fn test_wrong_tx_type() { + let (shell, _) = TestShell::new(); + + // Test Raw TxType + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + None, + shell.chain_id.clone(), + None, + ); + + let result = shell.mempool_validate( + tx.to_bytes().as_ref(), + MempoolTxType::NewTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::InvalidTx)); + assert_eq!(result.log, "Unsupported tx type") + } + + /// Mempool validation must reject already applied wrapper and decrypted + /// transactions + #[test] + fn test_replay_attack() { + let (mut shell, _) = TestShell::new(); + + let keypair = super::test_utils::gen_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, + ); + + let wrapper = WrapperTx::new( + Fee { + amount: 100.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ) + .sign(&keypair, shell.chain_id.clone(), None) + .expect("Wrapper signing failed"); + + let tx_type = match process_tx(wrapper.clone()).expect("Test failed") { + TxType::Wrapper(t) => t, + _ => panic!("Test failed"), + }; + + // Write wrapper hash to storage + let wrapper_hash = hash::Hash(wrapper.unsigned_hash()); + let wrapper_hash_key = + replay_protection::get_tx_hash_key(&wrapper_hash); + shell + .wl_storage + .storage + .write(&wrapper_hash_key, &wrapper_hash) + .expect("Test failed"); + + // Try wrapper tx replay attack + let result = shell.mempool_validate( + wrapper.to_bytes().as_ref(), + MempoolTxType::NewTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::ReplayTx)); + assert_eq!( + result.log, + format!( + "Wrapper transaction hash {} already in storage, replay \ + attempt", + wrapper_hash + ) + ); + + let result = shell.mempool_validate( + wrapper.to_bytes().as_ref(), + MempoolTxType::RecheckTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::ReplayTx)); + assert_eq!( + result.log, + format!( + "Wrapper transaction hash {} already in storage, replay \ + attempt", + wrapper_hash + ) + ); + + // Write inner hash in storage + let inner_hash_key = + replay_protection::get_tx_hash_key(&tx_type.tx_hash); + shell + .wl_storage + .storage + .write(&inner_hash_key, &tx_type.tx_hash) + .expect("Test failed"); + + // Try inner tx replay attack + let result = shell.mempool_validate( + wrapper.to_bytes().as_ref(), + MempoolTxType::NewTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::ReplayTx)); + assert_eq!( + result.log, + format!( + "Inner transaction hash {} already in storage, replay attempt", + tx_type.tx_hash + ) + ); + + let result = shell.mempool_validate( + wrapper.to_bytes().as_ref(), + MempoolTxType::RecheckTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::ReplayTx)); + assert_eq!( + result.log, + format!( + "Inner transaction hash {} already in storage, replay attempt", + tx_type.tx_hash + ) + ) + } + + /// Check that a transaction with a wrong chain id gets discarded + #[test] + fn test_wrong_chain_id() { + let (shell, _) = TestShell::new(); + + let keypair = super::test_utils::gen_keypair(); + + let wrong_chain_id = ChainId("Wrong chain id".to_string()); + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + wrong_chain_id.clone(), + None, + ) + .sign(&keypair); + + let result = shell.mempool_validate( + tx.to_bytes().as_ref(), + MempoolTxType::NewTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::InvalidChainId)); + assert_eq!( + result.log, + format!( + "Tx carries a wrong chain id: expected {}, found {}", + shell.chain_id, wrong_chain_id + ) + ) + } + + /// Check that an expired transaction gets rejected + #[test] + fn test_expired_tx() { + let (shell, _) = TestShell::new(); + + let keypair = super::test_utils::gen_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + Some(DateTimeUtc::now()), + ) + .sign(&keypair); + + let result = shell.mempool_validate( + tx.to_bytes().as_ref(), + MempoolTxType::NewTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::ExpiredTx)); + } +} diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 1783a127fd7..d48d5cfcec9 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -1,25 +1,29 @@ //! Implementation of the [`RequestPrepareProposal`] ABCI++ method for the Shell +use namada::core::hints; use namada::ledger::storage::{DBIter, StorageHasher, DB}; +use namada::proof_of_stake::pos_queries::PosQueries; use namada::proto::Tx; use namada::types::internal::WrapperTxInQueue; +use namada::types::time::DateTimeUtc; use namada::types::transaction::tx_types::TxType; use namada::types::transaction::wrapper::wrapper_tx::PairingEngine; use namada::types::transaction::{AffineCurve, DecryptedTx, EllipticCurve}; use super::super::*; -use crate::facade::tendermint_proto::abci::RequestPrepareProposal; +#[allow(unused_imports)] +use super::block_space_alloc; +use super::block_space_alloc::states::{ + BuildingDecryptedTxBatch, BuildingProtocolTxBatch, + EncryptedTxBatchAllocator, NextState, TryAlloc, +}; +use super::block_space_alloc::{AllocFailure, BlockSpaceAllocator}; #[cfg(feature = "abcipp")] -use crate::facade::tendermint_proto::abci::{tx_record::TxAction, TxRecord}; +use crate::facade::tendermint_proto::abci::ExtendedCommitInfo; +use crate::facade::tendermint_proto::abci::RequestPrepareProposal; +use crate::facade::tendermint_proto::google::protobuf::Timestamp; use crate::node::ledger::shell::{process_tx, ShellMode}; -use crate::node::ledger::shims::abcipp_shim_types::shim::TxBytes; - -// TODO: remove this hard-coded value; Tendermint, and thus -// Namada uses 20 MiB max block sizes by default; 5 MiB leaves -// plenty of room for header data, evidence and protobuf serialization -// overhead -const MAX_PROPOSAL_SIZE: usize = 5 << 20; -const HALF_MAX_PROPOSAL_SIZE: usize = MAX_PROPOSAL_SIZE / 2; +use crate::node::ledger::shims::abcipp_shim_types::shim::{response, TxBytes}; impl Shell where @@ -28,10 +32,8 @@ where { /// Begin a new block. /// - /// We fill half the block space with new wrapper txs given to us - /// from the mempool by tendermint. The rest of the block is filled - /// with decryptions of the wrapper txs from the previously - /// committed block. + /// Block construction is documented in [`block_space_alloc`] + /// and [`block_space_alloc::states`]. /// /// INVARIANT: Any changes applied in this method must be reverted if /// the proposal is rejected (unless we can simply overwrite @@ -41,64 +43,166 @@ where req: RequestPrepareProposal, ) -> response::PrepareProposal { let txs = if let ShellMode::Validator { .. } = self.mode { - // TODO: This should not be hardcoded - let privkey = ::G2Affine::prime_subgroup_generator(); + // start counting allotted space for txs + let alloc = self.get_encrypted_txs_allocator(); - // TODO: Craft the Ethereum state update tx - // filter in half of the new txs from Tendermint, only keeping - // wrappers - let mut total_proposal_size = 0; - #[cfg(feature = "abcipp")] - let mut txs: Vec = req - .txs - .into_iter() - .map(|tx_bytes| { - if let Ok(Ok(TxType::Wrapper(_))) = - Tx::try_from(tx_bytes.as_slice()).map(process_tx) - { - record::keep(tx_bytes) - } else { - record::remove(tx_bytes) - } - }) - .take_while(|tx_record| { - let new_size = total_proposal_size + tx_record.tx.len(); - if new_size > HALF_MAX_PROPOSAL_SIZE - || tx_record.action != TxAction::Unmodified as i32 - { - false - } else { - total_proposal_size = new_size; - true - } - }) - .collect(); - #[cfg(not(feature = "abcipp"))] - let mut txs: Vec = req - .txs - .into_iter() - .filter_map(|tx_bytes| { - if let Ok(Ok(TxType::Wrapper(_))) = - Tx::try_from(tx_bytes.as_slice()).map(process_tx) - { - Some(tx_bytes) - } else { - None + // add encrypted txs + let (encrypted_txs, alloc) = + self.build_encrypted_txs(alloc, &req.txs, &req.time); + let mut txs = encrypted_txs; + + // decrypt the wrapper txs included in the previous block + let (mut decrypted_txs, alloc) = self.build_decrypted_txs(alloc); + txs.append(&mut decrypted_txs); + + // add vote extension protocol txs + let mut protocol_txs = self.build_protocol_txs( + alloc, + #[cfg(feature = "abcipp")] + req.local_last_commit, + #[cfg(not(feature = "abcipp"))] + &req.txs, + ); + txs.append(&mut protocol_txs); + + txs + } else { + vec![] + }; + + tracing::info!( + height = req.height, + num_of_txs = txs.len(), + "Proposing block" + ); + + response::PrepareProposal { txs } + } + + /// Depending on the current block height offset within the epoch, + /// transition state accordingly, return a block space allocator + /// with or without encrypted txs. + /// + /// # How to determine which path to take in the states DAG + /// + /// If we are at the second or third block height offset within an + /// epoch, we do not allow encrypted transactions to be included in + /// a block, therefore we return an allocator wrapped in an + /// [`EncryptedTxBatchAllocator::WithoutEncryptedTxs`] value. + /// Otherwise, we return an allocator wrapped in an + /// [`EncryptedTxBatchAllocator::WithEncryptedTxs`] value. + #[inline] + fn get_encrypted_txs_allocator(&self) -> EncryptedTxBatchAllocator { + let pos_queries = self.wl_storage.pos_queries(); + + let is_2nd_height_off = pos_queries.is_deciding_offset_within_epoch(1); + let is_3rd_height_off = pos_queries.is_deciding_offset_within_epoch(2); + + if hints::unlikely(is_2nd_height_off || is_3rd_height_off) { + tracing::warn!( + proposal_height = + ?pos_queries.get_current_decision_height(), + "No mempool txs are being included in the current proposal" + ); + EncryptedTxBatchAllocator::WithoutEncryptedTxs( + (&self.wl_storage).into(), + ) + } else { + EncryptedTxBatchAllocator::WithEncryptedTxs( + (&self.wl_storage).into(), + ) + } + } + + /// Builds a batch of encrypted transactions, retrieved from + /// Tendermint's mempool. + fn build_encrypted_txs( + &self, + mut alloc: EncryptedTxBatchAllocator, + txs: &[TxBytes], + block_time: &Option, + ) -> (Vec, BlockSpaceAllocator) { + let pos_queries = self.wl_storage.pos_queries(); + let block_time = block_time.clone().and_then(|block_time| { + // If error in conversion, default to last block datetime, it's + // valid because of mempool check + TryInto::::try_into(block_time).ok() + }); + let txs = txs + .iter() + .filter_map(|tx_bytes| { + if let Ok(tx) = Tx::try_from(tx_bytes.as_slice()) { + // If tx doesn't have an expiration it is valid. If time cannot be + // retrieved from block default to last block datetime which has + // already been checked by mempool_validate, so it's valid + if let (Some(block_time), Some(exp)) = (block_time.as_ref(), &tx.expiration) { + if block_time > exp { return None } } - }) - .take_while(|tx_bytes| { - let new_size = total_proposal_size + tx_bytes.len(); - if new_size > HALF_MAX_PROPOSAL_SIZE { - false - } else { - total_proposal_size = new_size; - true + if let Ok(TxType::Wrapper(_)) = process_tx(tx) { + return Some(tx_bytes.clone()); } - }) - .collect(); + } + None + }) + .take_while(|tx_bytes| { + alloc.try_alloc(&tx_bytes[..]) + .map_or_else( + |status| match status { + AllocFailure::Rejected { bin_space_left } => { + tracing::debug!( + tx_bytes_len = tx_bytes.len(), + bin_space_left, + proposal_height = + ?pos_queries.get_current_decision_height(), + "Dropping encrypted tx from the current proposal", + ); + false + } + AllocFailure::OverflowsBin { bin_size } => { + // TODO: handle tx whose size is greater + // than bin size + tracing::warn!( + tx_bytes_len = tx_bytes.len(), + bin_size, + proposal_height = + ?pos_queries.get_current_decision_height(), + "Dropping large encrypted tx from the current proposal", + ); + true + } + }, + |()| true, + ) + }) + .collect(); + let alloc = alloc.next_state(); - // decrypt the wrapper txs included in the previous block - let decrypted_txs = self.wl_storage.storage.tx_queue.iter().map( + (txs, alloc) + } + + /// Builds a batch of DKG decrypted transactions. + // NOTE: we won't have frontrunning protection until V2 of the + // Anoma protocol; Namada runs V1, therefore this method is + // essentially a NOOP + // + // sources: + // - https://specs.namada.net/main/releases/v2.html + // - https://github.com/anoma/ferveo + fn build_decrypted_txs( + &self, + mut alloc: BlockSpaceAllocator, + ) -> (Vec, BlockSpaceAllocator) { + // TODO: This should not be hardcoded + let privkey = + ::G2Affine::prime_subgroup_generator(); + + let pos_queries = self.wl_storage.pos_queries(); + let txs = self + .wl_storage + .storage + .tx_queue + .iter() + .map( |WrapperTxInQueue { tx, #[cfg(not(feature = "mainnet"))] @@ -114,96 +218,81 @@ where }) .to_bytes() }, - ); - #[cfg(feature = "abcipp")] - let mut decrypted_txs: Vec<_> = - decrypted_txs.map(record::add).collect(); - #[cfg(not(feature = "abcipp"))] - let mut decrypted_txs: Vec<_> = decrypted_txs.collect(); - - txs.append(&mut decrypted_txs); - txs - } else { - vec![] - }; - - #[cfg(feature = "abcipp")] - { - response::PrepareProposal { - tx_records: txs, - ..Default::default() - } - } - #[cfg(not(feature = "abcipp"))] - { - response::PrepareProposal { txs } - } - } -} - -/// Functions for creating the appropriate TxRecord given the -/// numeric code -#[cfg(feature = "abcipp")] -pub(super) mod record { - use super::*; - - /// Keep this transaction in the proposal - pub fn keep(tx: TxBytes) -> TxRecord { - TxRecord { - action: TxAction::Unmodified as i32, - tx, - } - } + ) + // TODO: make sure all decrypted txs are accepted + .take_while(|tx_bytes| { + alloc.try_alloc(&tx_bytes[..]).map_or_else( + |status| match status { + AllocFailure::Rejected { bin_space_left } => { + tracing::warn!( + tx_bytes_len = tx_bytes.len(), + bin_space_left, + proposal_height = + ?pos_queries.get_current_decision_height(), + "Dropping decrypted tx from the current proposal", + ); + false + } + AllocFailure::OverflowsBin { bin_size } => { + tracing::warn!( + tx_bytes_len = tx_bytes.len(), + bin_size, + proposal_height = + ?pos_queries.get_current_decision_height(), + "Dropping large decrypted tx from the current proposal", + ); + true + } + }, + |()| true, + ) + }) + .collect(); + let alloc = alloc.next_state(); - /// A transaction added to the proposal not provided by - /// Tendermint from the mempool - pub fn add(tx: TxBytes) -> TxRecord { - TxRecord { - action: TxAction::Added as i32, - tx, - } + (txs, alloc) } - /// Remove this transaction from the set provided - /// by Tendermint from the mempool - pub fn remove(tx: TxBytes) -> TxRecord { - TxRecord { - action: TxAction::Removed as i32, - tx, - } + /// Builds a batch of protocol transactions. + fn build_protocol_txs( + &self, + _alloc: BlockSpaceAllocator, + #[cfg(feature = "abcipp")] _local_last_commit: Option< + ExtendedCommitInfo, + >, + #[cfg(not(feature = "abcipp"))] _txs: &[TxBytes], + ) -> Vec { + // no protocol txs are implemented yet + vec![] } } #[cfg(test)] mod test_prepare_proposal { + use borsh::BorshSerialize; - use namada::types::storage::Epoch; + use namada::proof_of_stake::Epoch; use namada::types::transaction::{Fee, WrapperTx}; use super::*; - use crate::node::ledger::shell::test_utils::{gen_keypair, TestShell}; + use crate::node::ledger::shell::test_utils::{self, gen_keypair}; /// Test that if a tx from the mempool is not a /// WrapperTx type, it is not included in the /// proposed block. #[test] fn test_prepare_proposal_rejects_non_wrapper_tx() { - let (shell, _) = TestShell::new(); + let (shell, _) = test_utils::setup(1); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction_data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, ); let req = RequestPrepareProposal { txs: vec![tx.to_bytes()], - max_tx_bytes: 0, ..Default::default() }; - #[cfg(feature = "abcipp")] - assert_eq!( - shell.prepare_proposal(req).tx_records, - vec![record::remove(tx.to_bytes())] - ); - #[cfg(not(feature = "abcipp"))] assert!(shell.prepare_proposal(req).txs.is_empty()); } @@ -212,11 +301,13 @@ mod test_prepare_proposal { /// we simply exclude it from the proposal #[test] fn test_error_in_processing_tx() { - let (shell, _) = TestShell::new(); + let (shell, _) = test_utils::setup(1); let keypair = gen_keypair(); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction_data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, ); // an unsigned wrapper will cause an error in processing let wrapper = Tx::new( @@ -238,20 +329,15 @@ mod test_prepare_proposal { .try_to_vec() .expect("Test failed"), ), + shell.chain_id.clone(), + None, ) .to_bytes(); #[allow(clippy::redundant_clone)] let req = RequestPrepareProposal { txs: vec![wrapper.clone()], - max_tx_bytes: 0, ..Default::default() }; - #[cfg(feature = "abcipp")] - assert_eq!( - shell.prepare_proposal(req).tx_records, - vec![record::remove(wrapper)] - ); - #[cfg(not(feature = "abcipp"))] assert!(shell.prepare_proposal(req).txs.is_empty()); } @@ -260,14 +346,13 @@ mod test_prepare_proposal { /// corresponding wrappers #[test] fn test_decrypted_txs_in_correct_order() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _) = test_utils::setup(1); let keypair = gen_keypair(); let mut expected_wrapper = vec![]; let mut expected_decrypted = vec![]; let mut req = RequestPrepareProposal { txs: vec![], - max_tx_bytes: 0, ..Default::default() }; // create a request with two new wrappers from mempool and @@ -276,6 +361,8 @@ mod test_prepare_proposal { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some(format!("transaction data: {}", i).as_bytes().to_owned()), + shell.chain_id.clone(), + None, ); expected_decrypted.push(Tx::from(DecryptedTx::Decrypted { tx: tx.clone(), @@ -295,63 +382,79 @@ mod test_prepare_proposal { #[cfg(not(feature = "mainnet"))] None, ); - let wrapper = wrapper_tx.sign(&keypair).expect("Test failed"); + let wrapper = wrapper_tx + .sign(&keypair, shell.chain_id.clone(), None) + .expect("Test failed"); shell.enqueue_tx(wrapper_tx); expected_wrapper.push(wrapper.clone()); req.txs.push(wrapper.to_bytes()); } - // we extract the inner data from the txs for testing - // equality since otherwise changes in timestamps would - // fail the test - expected_wrapper.append(&mut expected_decrypted); - let expected_txs: Vec> = expected_wrapper - .iter() - .map(|tx| tx.data.clone().expect("Test failed")) + let expected_txs: Vec = expected_wrapper + .into_iter() + .chain(expected_decrypted.into_iter()) + // we extract the inner data from the txs for testing + // equality since otherwise changes in timestamps would + // fail the test + .map(|tx| tx.data.expect("Test failed")) .collect(); - #[cfg(feature = "abcipp")] - { - let received: Vec> = shell - .prepare_proposal(req) - .tx_records - .iter() - .filter_map( - |TxRecord { - tx: tx_bytes, - action, - }| { - if *action == (TxAction::Unmodified as i32) - || *action == (TxAction::Added as i32) - { - Some( - Tx::try_from(tx_bytes.as_slice()) - .expect("Test failed") - .data - .expect("Test failed"), - ) - } else { - None - } - }, - ) - .collect(); - // check that the order of the txs is correct - assert_eq!(received, expected_txs); - } - #[cfg(not(feature = "abcipp"))] - { - let received: Vec> = shell - .prepare_proposal(req) - .txs - .into_iter() - .map(|tx_bytes| { - Tx::try_from(tx_bytes.as_slice()) - .expect("Test failed") - .data - .expect("Test failed") - }) - .collect(); - // check that the order of the txs is correct - assert_eq!(received, expected_txs); - } + let received: Vec = shell + .prepare_proposal(req) + .txs + .into_iter() + .map(|tx_bytes| { + Tx::try_from(tx_bytes.as_slice()) + .expect("Test failed") + .data + .expect("Test failed") + }) + .collect(); + // check that the order of the txs is correct + assert_eq!(received, expected_txs); + } + + /// Test that expired wrapper transactions are not included in the block + #[test] + fn test_expired_wrapper_tx() { + let (shell, _) = test_utils::setup(1); + let keypair = gen_keypair(); + let tx_time = DateTimeUtc::now(); + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, + ); + let wrapper_tx = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let wrapper = wrapper_tx + .sign(&keypair, shell.chain_id.clone(), Some(tx_time)) + .expect("Test failed"); + + let time = DateTimeUtc::now(); + let block_time = + namada::core::tendermint_proto::google::protobuf::Timestamp { + seconds: time.0.timestamp(), + nanos: time.0.timestamp_subsec_nanos() as i32, + }; + let req = RequestPrepareProposal { + txs: vec![wrapper.to_bytes()], + max_tx_bytes: 0, + time: Some(block_time), + ..Default::default() + }; + let result = shell.prepare_proposal(req); + eprintln!("Proposal: {:?}", result.txs); + assert!(result.txs.is_empty()); } } diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 7e55da3cea2..acc57e5979f 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -1,12 +1,60 @@ //! Implementation of the ['VerifyHeader`], [`ProcessProposal`], //! and [`RevertProposal`] ABCI++ methods for the Shell +use data_encoding::HEXUPPER; +use namada::core::hints; +use namada::core::ledger::storage::WlStorage; +use namada::core::types::hash::Hash; +use namada::ledger::storage::TempWlStorage; +use namada::proof_of_stake::pos_queries::PosQueries; use namada::types::internal::WrapperTxInQueue; use super::*; use crate::facade::tendermint_proto::abci::response_process_proposal::ProposalStatus; use crate::facade::tendermint_proto::abci::RequestProcessProposal; +use crate::node::ledger::shell::block_space_alloc::{ + threshold, AllocFailure, TxBin, +}; use crate::node::ledger::shims::abcipp_shim_types::shim::response::ProcessProposal; +use crate::node::ledger::shims::abcipp_shim_types::shim::TxBytes; + +/// Validation metadata, to keep track of used resources or +/// transaction numbers, in a block proposal. +#[derive(Default)] +pub struct ValidationMeta { + /// Space utilized by encrypted txs. + pub encrypted_txs_bin: TxBin, + /// Space utilized by all txs. + pub txs_bin: TxBin, + /// Check if the decrypted tx queue has any elements + /// left. + /// + /// This field will only evaluate to true if a block + /// proposer didn't include all decrypted txs in a block. + pub decrypted_queue_has_remaining_txs: bool, + /// Check if a block has decrypted txs. + pub has_decrypted_txs: bool, +} + +impl From<&WlStorage> for ValidationMeta +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn from(storage: &WlStorage) -> Self { + let max_proposal_bytes = + storage.pos_queries().get_max_proposal_bytes().get(); + let encrypted_txs_bin = + TxBin::init_over_ratio(max_proposal_bytes, threshold::ONE_THIRD); + let txs_bin = TxBin::init(max_proposal_bytes); + Self { + decrypted_queue_has_remaining_txs: false, + has_decrypted_txs: false, + encrypted_txs_bin, + txs_bin, + } + } +} impl Shell where @@ -21,34 +69,100 @@ where Default::default() } - /// Check all the txs in a block. Some txs may be incorrect, - /// but we only reject the entire block if the order of the - /// included txs violates the order decided upon in the previous - /// block. + /// Check all the txs in a block. + /// We reject the entire block when: + /// - decrypted txs violate the committed order + /// - more decrypted txs than expected + /// - checks on wrapper tx fail + /// + /// We cannot reject the block for failed checks on the decrypted txs since + /// their order has already been committed in storage, so we simply discard + /// the single invalid inner tx pub fn process_proposal( &self, req: RequestProcessProposal, ) -> ProcessProposal { - let tx_results = self.process_txs(&req.txs); + let (tx_results, metadata) = + self.process_txs(&req.txs, self.get_block_timestamp(req.time)); + + // Erroneous transactions were detected when processing + // the leader's proposal. We allow txs that do not + // deserialize properly, that have invalid signatures + // and that have invalid wasm code to reach FinalizeBlock. + let invalid_txs = tx_results.iter().any(|res| { + let error = ErrorCodes::from_u32(res.code).expect( + "All error codes returned from process_single_tx are valid", + ); + !error.is_recoverable() + }); + if invalid_txs { + tracing::warn!( + proposer = ?HEXUPPER.encode(&req.proposer_address), + height = req.height, + hash = ?HEXUPPER.encode(&req.hash), + "Found invalid transactions, proposed block will be rejected" + ); + } + + let has_remaining_decrypted_txs = + metadata.decrypted_queue_has_remaining_txs; + if has_remaining_decrypted_txs { + tracing::warn!( + proposer = ?HEXUPPER.encode(&req.proposer_address), + height = req.height, + hash = ?HEXUPPER.encode(&req.hash), + "Not all decrypted txs from the previous height were included in + the proposal, the block will be rejected" + ); + } + + let will_reject_proposal = invalid_txs || has_remaining_decrypted_txs; + + let status = if will_reject_proposal { + ProposalStatus::Reject + } else { + ProposalStatus::Accept + }; ProcessProposal { - status: if tx_results.iter().any(|res| res.code > 3) { - ProposalStatus::Reject as i32 - } else { - ProposalStatus::Accept as i32 - }, + status: status as i32, tx_results, } } /// Check all the given txs. - pub fn process_txs(&self, txs: &[Vec]) -> Vec { + pub fn process_txs( + &self, + txs: &[TxBytes], + block_time: DateTimeUtc, + ) -> (Vec, ValidationMeta) { let mut tx_queue_iter = self.wl_storage.storage.tx_queue.iter(); - txs.iter() + let mut temp_wl_storage = TempWlStorage::new(&self.wl_storage.storage); + let mut metadata = ValidationMeta::from(&self.wl_storage); + let tx_results = txs + .iter() .map(|tx_bytes| { - self.process_single_tx(tx_bytes, &mut tx_queue_iter) + let result = self.process_single_tx( + tx_bytes, + &mut tx_queue_iter, + &mut metadata, + &mut temp_wl_storage, + block_time, + ); + if let ErrorCodes::Ok = + ErrorCodes::from_u32(result.code).unwrap() + { + temp_wl_storage.write_log.commit_tx(); + } else { + temp_wl_storage.write_log.drop_tx(); + } + result }) - .collect() + .collect(); + metadata.decrypted_queue_has_remaining_txs = + !self.wl_storage.storage.tx_queue.is_empty() + && tx_queue_iter.next().is_some(); + (tx_results, metadata) } /// Checks if the Tx can be deserialized from bytes. Checks the fees and @@ -65,6 +179,9 @@ where /// 3: Wasm runtime error /// 4: Invalid order of decrypted txs /// 5. More decrypted txs than expected + /// 6. A transaction could not be decrypted + /// 7. Not enough block space was available for some tx + /// 8: Replay attack /// /// INVARIANT: Any changes applied in this method must be reverted if the /// proposal is rejected (unless we can simply overwrite them in the @@ -73,47 +190,107 @@ where &self, tx_bytes: &[u8], tx_queue_iter: &mut impl Iterator, + metadata: &mut ValidationMeta, + temp_wl_storage: &mut TempWlStorage, + block_time: DateTimeUtc, ) -> TxResult { - let tx = match Tx::try_from(tx_bytes) { - Ok(tx) => tx, - Err(_) => { - return TxResult { + // try to allocate space for this tx + if let Err(e) = metadata.txs_bin.try_dump(tx_bytes) { + return TxResult { + code: ErrorCodes::AllocationError.into(), + info: match e { + AllocFailure::Rejected { .. } => { + "No more space left in the block" + } + AllocFailure::OverflowsBin { .. } => { + "The given tx is larger than the max configured \ + proposal size" + } + } + .into(), + }; + } + + let maybe_tx = Tx::try_from(tx_bytes).map_or_else( + |err| { + tracing::debug!( + ?err, + "Couldn't deserialize transaction received during \ + PrepareProposal" + ); + Err(TxResult { code: ErrorCodes::InvalidTx.into(), info: "The submitted transaction was not deserializable" .into(), - }; - } + }) + }, + |tx| { + let tx_chain_id = tx.chain_id.clone(); + let tx_expiration = tx.expiration; + let tx_type = process_tx(tx).map_err(|err| { + // This occurs if the wrapper / protocol tx signature is + // invalid + TxResult { + code: ErrorCodes::InvalidSig.into(), + info: err.to_string(), + } + })?; + Ok((tx_chain_id, tx_expiration, tx_type)) + }, + ); + let (tx_chain_id, tx_expiration, tx) = match maybe_tx { + Ok(tx) => tx, + Err(tx_result) => return tx_result, }; + // TODO: This should not be hardcoded let privkey = ::G2Affine::prime_subgroup_generator(); - match process_tx(tx) { - // This occurs if the wrapper / protocol tx signature is invalid - Err(err) => TxResult { - code: ErrorCodes::InvalidSig.into(), - info: err.to_string(), + match tx { + // If it is a raw transaction, we do no further validation + TxType::Raw(_) => TxResult { + code: ErrorCodes::InvalidTx.into(), + info: "Transaction rejected: Non-encrypted transactions are \ + not supported" + .into(), }, - Ok(result) => match result { - // If it is a raw transaction, we do no further validation - TxType::Raw(_) => TxResult { - code: ErrorCodes::InvalidTx.into(), - info: "Transaction rejected: Non-encrypted transactions \ - are not supported" - .into(), - }, - TxType::Protocol(_) => TxResult { + TxType::Protocol(_) => { + // Tx chain id + if tx_chain_id != self.chain_id { + return TxResult { + code: ErrorCodes::InvalidChainId.into(), + info: format!( + "Tx carries a wrong chain id: expected {}, found \ + {}", + self.chain_id, tx_chain_id + ), + }; + } + + // Tx expiration + if let Some(exp) = tx_expiration { + if block_time > exp { + return TxResult { + code: ErrorCodes::ExpiredTx.into(), + info: format!( + "Tx expired at {:#?}, block time: {:#?}", + exp, block_time + ), + }; + } + } + TxResult { code: ErrorCodes::InvalidTx.into(), info: "Protocol transactions are a fun new feature that \ is coming soon to a blockchain near you. Patience." .into(), - }, - TxType::Decrypted(tx) => match tx_queue_iter.next() { - Some(WrapperTxInQueue { - tx: wrapper, - #[cfg(not(feature = "mainnet"))] - has_valid_pow: _, - }) => { - if wrapper.tx_hash != tx.hash_commitment() { + } + } + TxType::Decrypted(tx) => { + metadata.has_decrypted_txs = true; + match tx_queue_iter.next() { + Some(wrapper) => { + if wrapper.tx.tx_hash != tx.hash_commitment() { TxResult { code: ErrorCodes::InvalidOrder.into(), info: "Process proposal rejected a decrypted \ @@ -122,6 +299,41 @@ where .into(), } } else if verify_decrypted_correctly(&tx, privkey) { + if let DecryptedTx::Decrypted { + tx, + has_valid_pow: _, + } = tx + { + // Tx chain id + if tx.chain_id != self.chain_id { + return TxResult { + code: + ErrorCodes::InvalidDecryptedChainId + .into(), + info: format!( + "Decrypted tx carries a wrong \ + chain id: expected {}, found {}", + self.chain_id, tx.chain_id + ), + }; + } + + // Tx expiration + if let Some(exp) = tx.expiration { + if block_time > exp { + return TxResult { + code: + ErrorCodes::ExpiredDecryptedTx + .into(), + info: format!( + "Decrypted tx expired at \ + {:#?}, block time: {:#?}", + exp, block_time + ), + }; + } + } + } TxResult { code: ErrorCodes::Ok.into(), info: "Process Proposal accepted this \ @@ -129,6 +341,7 @@ where .into(), } } else { + // Wrong inner tx commitment TxResult { code: ErrorCodes::InvalidTx.into(), info: "The encrypted payload of tx was \ @@ -142,59 +355,164 @@ where info: "Received more decrypted txs than expected" .into(), }, - }, - TxType::Wrapper(tx) => { - // validate the ciphertext via Ferveo - if !tx.validate_ciphertext() { - TxResult { - code: ErrorCodes::InvalidTx.into(), + } + } + TxType::Wrapper(wrapper) => { + // decrypted txs shouldn't show up before wrapper txs + if metadata.has_decrypted_txs { + return TxResult { + code: ErrorCodes::InvalidTx.into(), + info: "Decrypted txs should not be proposed before \ + wrapper txs" + .into(), + }; + } + // try to allocate space for this encrypted tx + if let Err(e) = metadata.encrypted_txs_bin.try_dump(tx_bytes) { + return TxResult { + code: ErrorCodes::AllocationError.into(), + info: match e { + AllocFailure::Rejected { .. } => { + "No more space left in the block for wrapper \ + txs" + } + AllocFailure::OverflowsBin { .. } => { + "The given wrapper tx is larger than 1/3 of \ + the available block space" + } + } + .into(), + }; + } + if hints::unlikely(self.encrypted_txs_not_allowed()) { + return TxResult { + code: ErrorCodes::AllocationError.into(), + info: "Wrapper txs not allowed at the current block \ + height" + .into(), + }; + } + + // ChainId check + if tx_chain_id != self.chain_id { + return TxResult { + code: ErrorCodes::InvalidChainId.into(), + info: format!( + "Tx carries a wrong chain id: expected {}, found \ + {}", + self.chain_id, tx_chain_id + ), + }; + } + + // Tx expiration + if let Some(exp) = tx_expiration { + if block_time > exp { + return TxResult { + code: ErrorCodes::ExpiredTx.into(), + info: format!( + "Tx expired at {:#?}, block time: {:#?}", + exp, block_time + ), + }; + } + } + + // validate the ciphertext via Ferveo + if !wrapper.validate_ciphertext() { + TxResult { + code: ErrorCodes::InvalidTx.into(), + info: format!( + "The ciphertext of the wrapped tx {} is invalid", + hash_tx(tx_bytes) + ), + } + } else { + // Replay protection checks + let inner_hash_key = + replay_protection::get_tx_hash_key(&wrapper.tx_hash); + if temp_wl_storage.has_key(&inner_hash_key).expect( + "Error while checking inner tx hash key in storage", + ) { + return TxResult { + code: ErrorCodes::ReplayTx.into(), + info: format!( + "Inner transaction hash {} already in \ + storage, replay attempt", + &wrapper.tx_hash + ), + }; + } + + // Write inner hash to WAL + temp_wl_storage + .write_log + .write(&inner_hash_key, vec![]) + .expect( + "Couldn't write inner transaction hash to write \ + log", + ); + + let tx = Tx::try_from(tx_bytes) + .expect("Deserialization shouldn't fail"); + let wrapper_hash = Hash(tx.unsigned_hash()); + let wrapper_hash_key = + replay_protection::get_tx_hash_key(&wrapper_hash); + if temp_wl_storage.has_key(&wrapper_hash_key).expect( + "Error while checking wrapper tx hash key in storage", + ) { + return TxResult { + code: ErrorCodes::ReplayTx.into(), info: format!( - "The ciphertext of the wrapped tx {} is \ - invalid", - hash_tx(tx_bytes) + "Wrapper transaction hash {} already in \ + storage, replay attempt", + wrapper_hash ), + }; + } + + // Write wrapper hash to WAL + temp_wl_storage + .write_log + .write(&wrapper_hash_key, vec![]) + .expect("Couldn't write wrapper tx hash to write log"); + + // If the public key corresponds to the MASP sentinel + // transaction key, then the fee payer is effectively + // the MASP, otherwise derive + // they payer from public key. + let fee_payer = if wrapper.pk != masp_tx_key().ref_to() { + wrapper.fee_payer() + } else { + masp() + }; + // check that the fee payer has sufficient balance + let balance = + self.get_balance(&wrapper.fee.token, &fee_payer); + + // In testnets, tx is allowed to skip fees if it + // includes a valid PoW + #[cfg(not(feature = "mainnet"))] + let has_valid_pow = self.has_valid_pow_solution(&wrapper); + #[cfg(feature = "mainnet")] + let has_valid_pow = false; + + if has_valid_pow || self.get_wrapper_tx_fees() <= balance { + TxResult { + code: ErrorCodes::Ok.into(), + info: "Process proposal accepted this transaction" + .into(), } } else { - // If the public key corresponds to the MASP sentinel - // transaction key, then the fee payer is effectively - // the MASP, otherwise derive - // they payer from public key. - let fee_payer = if tx.pk != masp_tx_key().ref_to() { - tx.fee_payer() - } else { - masp() - }; - // check that the fee payer has sufficient balance - let balance = - self.get_balance(&tx.fee.token, &fee_payer); - - // In testnets, tx is allowed to skip fees if it - // includes a valid PoW - #[cfg(not(feature = "mainnet"))] - let has_valid_pow = self.has_valid_pow_solution(&tx); - #[cfg(feature = "mainnet")] - let has_valid_pow = false; - - if has_valid_pow - || self.get_wrapper_tx_fees() <= balance - { - TxResult { - code: ErrorCodes::Ok.into(), - info: "Process proposal accepted this \ - transaction" - .into(), - } - } else { - TxResult { - code: ErrorCodes::InvalidTx.into(), - info: "The address given does not have \ - sufficient balance to pay fee" - .into(), - } + TxResult { + code: ErrorCodes::InvalidTx.into(), + info: "The address given does not have sufficient \ + balance to pay fee" + .into(), } } } - }, + } } } @@ -204,6 +522,15 @@ where ) -> shim::response::RevertProposal { Default::default() } + + /// Checks if it is not possible to include encrypted txs at the current + /// block height. + fn encrypted_txs_not_allowed(&self) -> bool { + let pos_queries = self.wl_storage.pos_queries(); + let is_2nd_height_off = pos_queries.is_deciding_offset_within_epoch(1); + let is_3rd_height_off = pos_queries.is_deciding_offset_within_epoch(2); + is_2nd_height_off || is_3rd_height_off + } } /// We test the failure cases of [`process_proposal`]. The happy flows @@ -211,30 +538,32 @@ where #[cfg(test)] mod test_process_proposal { use borsh::BorshDeserialize; + use namada::ledger::parameters::storage::get_wrapper_tx_fees_key; use namada::proto::SignedTxData; use namada::types::hash::Hash; use namada::types::key::*; use namada::types::storage::Epoch; use namada::types::token::Amount; use namada::types::transaction::encrypted::EncryptedTx; - use namada::types::transaction::{EncryptionKey, Fee, WrapperTx}; + use namada::types::transaction::protocol::ProtocolTxType; + use namada::types::transaction::{EncryptionKey, Fee, WrapperTx, MIN_FEE}; use super::*; - use crate::facade::tendermint_proto::abci::RequestInitChain; - use crate::facade::tendermint_proto::google::protobuf::Timestamp; use crate::node::ledger::shell::test_utils::{ - gen_keypair, ProcessProposal, TestError, TestShell, + self, gen_keypair, ProcessProposal, TestError, }; - /// Test that if a wrapper tx is not signed, it is rejected + /// Test that if a wrapper tx is not signed, the block is rejected /// by [`process_proposal`]. #[test] fn test_unsigned_wrapper_rejected() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _) = test_utils::setup(1); let keypair = gen_keypair(); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -252,6 +581,8 @@ mod test_process_proposal { let tx = Tx::new( vec![], Some(TxType::Wrapper(wrapper).try_to_vec().expect("Test failed")), + shell.chain_id.clone(), + None, ) .to_bytes(); #[allow(clippy::redundant_clone)] @@ -259,30 +590,32 @@ mod test_process_proposal { txs: vec![tx.clone()], }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") - }; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidSig)); - assert_eq!( - response.result.info, - String::from("Wrapper transactions must be signed") - ); + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::InvalidSig) + ); + assert_eq!( + response[0].result.info, + String::from("Wrapper transactions must be signed") + ); + } + } } - /// Test that a wrapper tx with invalid signature is rejected + /// Test that a block including a wrapper tx with invalid signature is + /// rejected #[test] fn test_wrapper_bad_signature_rejected() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _) = test_utils::setup(1); let keypair = gen_keypair(); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, ); let timestamp = tx.timestamp; let mut wrapper = WrapperTx::new( @@ -298,7 +631,7 @@ mod test_process_proposal { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair) + .sign(&keypair, shell.chain_id.clone(), None) .expect("Test failed"); let new_tx = if let Some(Ok(SignedTxData { data: Some(data), @@ -323,7 +656,7 @@ mod test_process_proposal { .try_to_vec() .expect("Test failed"); Tx { - code: vec![], + code_or_hash: vec![], data: Some( SignedTxData { sig, @@ -333,6 +666,8 @@ mod test_process_proposal { .expect("Test failed"), ), timestamp, + chain_id: shell.chain_id.clone(), + expiration: None, } } else { panic!("Test failed"); @@ -340,34 +675,45 @@ mod test_process_proposal { let request = ProcessProposal { txs: vec![new_tx.to_bytes()], }; - let response = if let [response] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - response.clone() - } else { - panic!("Test failed") - }; - let expected_error = "Signature verification failed: Invalid signature"; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidSig)); - assert!( - response.result.info.contains(expected_error), - "Result info {} doesn't contain the expected error {}", - response.result.info, - expected_error - ); + + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + let expected_error = + "Signature verification failed: Invalid signature"; + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::InvalidSig) + ); + assert!( + response[0].result.info.contains(expected_error), + "Result info {} doesn't contain the expected error {}", + response[0].result.info, + expected_error + ); + } + } } /// Test that if the account submitting the tx is not known and the fee is - /// non-zero, [`process_proposal`] rejects that tx + /// non-zero, [`process_proposal`] rejects that block #[test] fn test_wrapper_unknown_address() { - let (mut shell, _) = TestShell::new(); - let keypair = crate::wallet::defaults::keys().remove(0).1; + let (mut shell, _) = test_utils::setup(1); + shell + .wl_storage + .write_log + .write( + &get_wrapper_tx_fees_key(), + token::Amount::whole(MIN_FEE).try_to_vec().unwrap(), + ) + .unwrap(); + let keypair = gen_keypair(); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -382,34 +728,35 @@ mod test_process_proposal { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair) + .sign(&keypair, shell.chain_id.clone(), None) .expect("Test failed"); let request = ProcessProposal { txs: vec![wrapper.to_bytes()], }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") - }; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidTx)); - assert_eq!( - response.result.info, - "The address given does not have sufficient balance to pay fee" - .to_string(), - ); + + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::InvalidTx) + ); + assert_eq!( + response[0].result.info, + "The address given does not have sufficient balance to \ + pay fee" + .to_string(), + ); + } + } } /// Test that if the account submitting the tx does /// not have sufficient balance to pay the fee, - /// [`process_proposal`] rejects that tx + /// [`process_proposal`] rejects the entire block #[test] fn test_wrapper_insufficient_balance_address() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _) = test_utils::setup(1); let keypair = crate::wallet::defaults::daewon_keypair(); // reduce address balance to match the 100 token fee let balance_key = token::balance_key( @@ -418,13 +765,23 @@ mod test_process_proposal { ); shell .wl_storage - .storage + .write_log .write(&balance_key, Amount::whole(99).try_to_vec().unwrap()) .unwrap(); + shell + .wl_storage + .write_log + .write( + &get_wrapper_tx_fees_key(), + token::Amount::whole(MIN_FEE).try_to_vec().unwrap(), + ) + .unwrap(); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -439,42 +796,44 @@ mod test_process_proposal { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair) + .sign(&keypair, shell.chain_id.clone(), None) .expect("Test failed"); let request = ProcessProposal { txs: vec![wrapper.to_bytes()], }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") - }; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidTx)); - assert_eq!( - response.result.info, - String::from( - "The address given does not have sufficient balance to pay fee" - ) - ); + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::InvalidTx) + ); + assert_eq!( + response[0].result.info, + String::from( + "The address given does not have sufficient balance \ + to pay fee" + ) + ); + } + } } /// Test that if the expected order of decrypted txs is /// validated, [`process_proposal`] rejects it #[test] fn test_decrypted_txs_out_of_order() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _) = test_utils::setup(1); let keypair = gen_keypair(); let mut txs = vec![]; for i in 0..3 { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some(format!("transaction data: {}", i).as_bytes().to_owned()), + shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -490,44 +849,35 @@ mod test_process_proposal { None, ); shell.enqueue_tx(wrapper); - txs.push(Tx::from(TxType::Decrypted(DecryptedTx::Decrypted { - tx, - #[cfg(not(feature = "mainnet"))] - has_valid_pow: false, - }))); + let mut decrypted_tx = + Tx::from(TxType::Decrypted(DecryptedTx::Decrypted { + tx, + #[cfg(not(feature = "mainnet"))] + has_valid_pow: false, + })); + decrypted_tx.chain_id = shell.chain_id.clone(); + txs.push(decrypted_tx); } - let req_1 = ProcessProposal { - txs: vec![txs[0].to_bytes()], - }; - let response_1 = if let [resp] = shell - .process_proposal(req_1) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") - }; - assert_eq!(response_1.result.code, u32::from(ErrorCodes::Ok)); - - let req_2 = ProcessProposal { - txs: vec![txs[2].to_bytes()], - }; - - let response_2 = if let Err(TestError::RejectProposal(resp)) = - shell.process_proposal(req_2) - { - if let [resp] = resp.as_slice() { - resp.clone() + let response = { + let request = ProcessProposal { + txs: vec![ + txs[0].to_bytes(), + txs[2].to_bytes(), + txs[1].to_bytes(), + ], + }; + if let Err(TestError::RejectProposal(mut resp)) = + shell.process_proposal(request) + { + assert_eq!(resp.len(), 3); + resp.remove(1) } else { panic!("Test failed") } - } else { - panic!("Test failed") }; - assert_eq!(response_2.result.code, u32::from(ErrorCodes::InvalidOrder)); + assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidOrder)); assert_eq!( - response_2.result.info, + response.result.info, String::from( "Process proposal rejected a decrypted transaction that \ violated the tx order determined in the previous block" @@ -535,16 +885,18 @@ mod test_process_proposal { ); } - /// Test that a tx incorrectly labelled as undecryptable + /// Test that a block containing a tx incorrectly labelled as undecryptable /// is rejected by [`process_proposal`] #[test] fn test_incorrectly_labelled_as_undecryptable() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _) = test_utils::setup(1); let keypair = gen_keypair(); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -561,30 +913,30 @@ mod test_process_proposal { ); shell.enqueue_tx(wrapper.clone()); - let tx = + let mut tx = Tx::from(TxType::Decrypted(DecryptedTx::Undecryptable(wrapper))); + tx.chain_id = shell.chain_id.clone(); let request = ProcessProposal { txs: vec![tx.to_bytes()], }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") - }; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidTx)); - assert_eq!( - response.result.info, - String::from( - "The encrypted payload of tx was incorrectly marked as \ - un-decryptable" - ), - ) + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::InvalidTx) + ); + assert_eq!( + response[0].result.info, + String::from( + "The encrypted payload of tx was incorrectly marked \ + as un-decryptable" + ), + ); + } + } } /// Test that a wrapper tx whose inner_tx does not have @@ -592,23 +944,14 @@ mod test_process_proposal { /// undecryptable but still accepted #[test] fn test_invalid_hash_commitment() { - let (mut shell, _) = TestShell::new(); - shell.init_chain( - RequestInitChain { - time: Some(Timestamp { - seconds: 0, - nanos: 0, - }), - chain_id: ChainId::default().to_string(), - ..Default::default() - }, - 1, - ); + let (mut shell, _) = test_utils::setup(1); let keypair = crate::wallet::defaults::daewon_keypair(); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, ); let mut wrapper = WrapperTx::new( Fee { @@ -626,10 +969,11 @@ mod test_process_proposal { wrapper.tx_hash = Hash([0; 32]); shell.enqueue_tx(wrapper.clone()); - let tx = Tx::from(TxType::Decrypted(DecryptedTx::Undecryptable( + let mut tx = Tx::from(TxType::Decrypted(DecryptedTx::Undecryptable( #[allow(clippy::redundant_clone)] wrapper.clone(), ))); + tx.chain_id = shell.chain_id.clone(); let request = ProcessProposal { txs: vec![tx.to_bytes()], @@ -651,18 +995,7 @@ mod test_process_proposal { /// marked undecryptable and the errors handled correctly #[test] fn test_undecryptable() { - let (mut shell, _) = TestShell::new(); - shell.init_chain( - RequestInitChain { - time: Some(Timestamp { - seconds: 0, - nanos: 0, - }), - chain_id: ChainId::default().to_string(), - ..Default::default() - }, - 1, - ); + let (mut shell, _) = test_utils::setup(1); let keypair = crate::wallet::defaults::daewon_keypair(); let pubkey = EncryptionKey::default(); // not valid tx bytes @@ -683,10 +1016,12 @@ mod test_process_proposal { }; shell.enqueue_tx(wrapper.clone()); - let signed = Tx::from(TxType::Decrypted(DecryptedTx::Undecryptable( - #[allow(clippy::redundant_clone)] - wrapper.clone(), - ))); + let mut signed = + Tx::from(TxType::Decrypted(DecryptedTx::Undecryptable( + #[allow(clippy::redundant_clone)] + wrapper.clone(), + ))); + signed.chain_id = shell.chain_id.clone(); let request = ProcessProposal { txs: vec![signed.to_bytes()], }; @@ -706,18 +1041,21 @@ mod test_process_proposal { /// [`process_proposal`] than expected, they are rejected #[test] fn test_too_many_decrypted_txs() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _) = test_utils::setup(1); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, ); - let tx = Tx::from(TxType::Decrypted(DecryptedTx::Decrypted { + let mut tx = Tx::from(TxType::Decrypted(DecryptedTx::Decrypted { tx, #[cfg(not(feature = "mainnet"))] has_valid_pow: false, })); + tx.chain_id = shell.chain_id.clone(); let request = ProcessProposal { txs: vec![tx.to_bytes()], @@ -740,35 +1078,551 @@ mod test_process_proposal { ); } - /// Process Proposal should reject a RawTx, but not panic + /// Process Proposal should reject a block containing a RawTx, but not panic #[test] fn test_raw_tx_rejected() { - let (mut shell, _) = TestShell::new(); + let (mut shell, _) = test_utils::setup(1); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, ); - let tx = Tx::from(TxType::Raw(tx)); + let mut tx = Tx::from(TxType::Raw(tx)); + tx.chain_id = shell.chain_id.clone(); let request = ProcessProposal { txs: vec![tx.to_bytes()], }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") - }; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidTx)); - assert_eq!( - response.result.info, - String::from( - "Transaction rejected: Non-encrypted transactions are not \ - supported" - ), + + match shell.process_proposal(request) { + Ok(_) => panic!("Test failes"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::InvalidTx) + ); + assert_eq!( + response[0].result.info, + String::from( + "Transaction rejected: Non-encrypted transactions are \ + not supported" + ), + ); + } + } + } + + /// Test that if the unsigned wrapper tx hash is known (replay attack), the + /// block is rejected + #[test] + fn test_wrapper_tx_hash() { + let (mut shell, _) = test_utils::setup(1); + + let keypair = crate::wallet::defaults::daewon_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, ); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let signed = wrapper + .sign(&keypair, shell.chain_id.clone(), None) + .expect("Test failed"); + + // Write wrapper hash to storage + let wrapper_unsigned_hash = Hash(signed.unsigned_hash()); + let hash_key = + replay_protection::get_tx_hash_key(&wrapper_unsigned_hash); + shell + .wl_storage + .storage + .write(&hash_key, vec![]) + .expect("Test failed"); + + // Run validation + let request = ProcessProposal { + txs: vec![signed.to_bytes()], + }; + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::ReplayTx) + ); + assert_eq!( + response[0].result.info, + format!( + "Wrapper transaction hash {} already in storage, \ + replay attempt", + wrapper_unsigned_hash + ) + ); + } + } + } + + /// Test that a block containing two identical wrapper txs is rejected + #[test] + fn test_wrapper_tx_hash_same_block() { + let (mut shell, _) = test_utils::setup(1); + + let keypair = crate::wallet::defaults::daewon_keypair(); + + // Add unshielded balance for fee payment + let balance_key = token::balance_key( + &shell.wl_storage.storage.native_token, + &Address::from(&keypair.ref_to()), + ); + shell + .wl_storage + .storage + .write(&balance_key, Amount::whole(1000).try_to_vec().unwrap()) + .unwrap(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, + ); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let signed = wrapper + .sign(&keypair, shell.chain_id.clone(), None) + .expect("Test failed"); + + // Run validation + let request = ProcessProposal { + txs: vec![signed.to_bytes(); 2], + }; + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!(response[0].result.code, u32::from(ErrorCodes::Ok)); + assert_eq!( + response[1].result.code, + u32::from(ErrorCodes::ReplayTx) + ); + // The checks happens on the inner hash first, so the tx is + // rejected because of this hash, not the + // wrapper one + assert_eq!( + response[1].result.info, + format!( + "Inner transaction hash {} already in storage, replay \ + attempt", + wrapper.tx_hash + ) + ); + } + } + } + + /// Test that if the unsigned inner tx hash is known (replay attack), the + /// block is rejected + #[test] + fn test_inner_tx_hash() { + let (mut shell, _) = test_utils::setup(1); + + let keypair = crate::wallet::defaults::daewon_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, + ); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let inner_unsigned_hash = wrapper.tx_hash.clone(); + let signed = wrapper + .sign(&keypair, shell.chain_id.clone(), None) + .expect("Test failed"); + + // Write inner hash to storage + let hash_key = replay_protection::get_tx_hash_key(&inner_unsigned_hash); + shell + .wl_storage + .storage + .write(&hash_key, vec![]) + .expect("Test failed"); + + // Run validation + let request = ProcessProposal { + txs: vec![signed.to_bytes()], + }; + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::ReplayTx) + ); + assert_eq!( + response[0].result.info, + format!( + "Inner transaction hash {} already in storage, replay \ + attempt", + inner_unsigned_hash + ) + ); + } + } + } + + /// Test that a block containing two identical inner transactions is + /// rejected + #[test] + fn test_inner_tx_hash_same_block() { + let (mut shell, _) = test_utils::setup(1); + + let keypair = crate::wallet::defaults::daewon_keypair(); + let keypair_2 = crate::wallet::defaults::daewon_keypair(); + + // Add unshielded balance for fee payment + let balance_key = token::balance_key( + &shell.wl_storage.storage.native_token, + &Address::from(&keypair.ref_to()), + ); + shell + .wl_storage + .storage + .write(&balance_key, Amount::whole(1000).try_to_vec().unwrap()) + .unwrap(); + + // Add unshielded balance for fee payment + let balance_key = token::balance_key( + &shell.wl_storage.storage.native_token, + &Address::from(&keypair_2.ref_to()), + ); + shell + .wl_storage + .storage + .write(&balance_key, Amount::whole(1000).try_to_vec().unwrap()) + .unwrap(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, + ); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx.clone(), + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let inner_unsigned_hash = wrapper.tx_hash.clone(); + let signed = wrapper + .sign(&keypair, shell.chain_id.clone(), None) + .expect("Test failed"); + + let new_wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair_2, + Epoch(0), + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let new_signed = new_wrapper + .sign(&keypair, shell.chain_id.clone(), None) + .expect("Test failed"); + + // Run validation + let request = ProcessProposal { + txs: vec![signed.to_bytes(), new_signed.to_bytes()], + }; + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!(response[0].result.code, u32::from(ErrorCodes::Ok)); + assert_eq!( + response[1].result.code, + u32::from(ErrorCodes::ReplayTx) + ); + assert_eq!( + response[1].result.info, + format!( + "Inner transaction hash {} already in storage, replay \ + attempt", + inner_unsigned_hash + ) + ); + } + } + } + + /// Test that a wrapper or protocol transaction with a mismatching chain id + /// causes the entire block to be rejected + #[test] + fn test_wong_chain_id() { + let (mut shell, _) = test_utils::setup(1); + let keypair = crate::wallet::defaults::daewon_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, + ); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx.clone(), + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let wrong_chain_id = ChainId("Wrong chain id".to_string()); + let signed = wrapper + .sign(&keypair, wrong_chain_id.clone(), None) + .expect("Test failed"); + + let protocol_tx = ProtocolTxType::EthereumStateUpdate(tx).sign( + &keypair.ref_to(), + &keypair, + wrong_chain_id.clone(), + ); + + // Run validation + let request = ProcessProposal { + txs: vec![signed.to_bytes(), protocol_tx.to_bytes()], + }; + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + for res in response { + assert_eq!( + res.result.code, + u32::from(ErrorCodes::InvalidChainId) + ); + assert_eq!( + res.result.info, + format!( + "Tx carries a wrong chain id: expected {}, found \ + {}", + shell.chain_id, wrong_chain_id + ) + ); + } + } + } + } + + /// Test that a decrypted transaction with a mismatching chain id gets + /// rejected without rejecting the entire block + #[test] + fn test_decrypted_wong_chain_id() { + let (mut shell, _) = test_utils::setup(1); + let keypair = crate::wallet::defaults::daewon_keypair(); + + let wrong_chain_id = ChainId("Wrong chain id".to_string()); + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("new transaction data".as_bytes().to_owned()), + wrong_chain_id.clone(), + None, + ); + let decrypted: Tx = DecryptedTx::Decrypted { + tx: tx.clone(), + has_valid_pow: false, + } + .into(); + let signed_decrypted = decrypted.sign(&keypair); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let wrapper_in_queue = WrapperTxInQueue { + tx: wrapper, + has_valid_pow: false, + }; + shell.wl_storage.storage.tx_queue.push(wrapper_in_queue); + + // Run validation + let request = ProcessProposal { + txs: vec![signed_decrypted.to_bytes()], + }; + + match shell.process_proposal(request) { + Ok(response) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::InvalidDecryptedChainId) + ); + assert_eq!( + response[0].result.info, + format!( + "Decrypted tx carries a wrong chain id: expected {}, \ + found {}", + shell.chain_id, wrong_chain_id + ) + ) + } + Err(_) => panic!("Test failed"), + } + } + + /// Test that an expired wrapper transaction causes a block rejection + #[test] + fn test_expired_wrapper() { + let (mut shell, _) = test_utils::setup(1); + let keypair = crate::wallet::defaults::daewon_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, + ); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let signed = wrapper + .sign(&keypair, shell.chain_id.clone(), Some(DateTimeUtc::now())) + .expect("Test failed"); + + // Run validation + let request = ProcessProposal { + txs: vec![signed.to_bytes()], + }; + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::ExpiredTx) + ); + } + } + } + + /// Test that an expired decrypted transaction is correctly marked as so + /// without rejecting the entire block + #[test] + fn test_expired_decrypted() { + let (mut shell, _) = test_utils::setup(1); + let keypair = crate::wallet::defaults::daewon_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("new transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + Some(DateTimeUtc::now()), + ); + let decrypted: Tx = DecryptedTx::Decrypted { + tx: tx.clone(), + has_valid_pow: false, + } + .into(); + let signed_decrypted = decrypted.sign(&keypair); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let wrapper_in_queue = WrapperTxInQueue { + tx: wrapper, + has_valid_pow: false, + }; + shell.wl_storage.storage.tx_queue.push(wrapper_in_queue); + + // Run validation + let request = ProcessProposal { + txs: vec![signed_decrypted.to_bytes()], + }; + match shell.process_proposal(request) { + Ok(response) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::ExpiredDecryptedTx) + ); + } + Err(_) => panic!("Test failed"), + } } } diff --git a/apps/src/lib/node/ledger/shims/abcipp_shim.rs b/apps/src/lib/node/ledger/shims/abcipp_shim.rs index 74a56a4ddcc..05ffbf8de0c 100644 --- a/apps/src/lib/node/ledger/shims/abcipp_shim.rs +++ b/apps/src/lib/node/ledger/shims/abcipp_shim.rs @@ -5,13 +5,16 @@ use std::pin::Pin; use std::task::{Context, Poll}; use futures::future::FutureExt; +use namada::proto::Tx; use namada::types::address::Address; #[cfg(not(feature = "abcipp"))] use namada::types::hash::Hash; #[cfg(not(feature = "abcipp"))] use namada::types::storage::BlockHash; +use namada::types::storage::BlockHeight; #[cfg(not(feature = "abcipp"))] use namada::types::transaction::hash_tx; +use tokio::sync::broadcast; use tokio::sync::mpsc::UnboundedSender; use tower::Service; @@ -21,8 +24,13 @@ use super::abcipp_shim_types::shim::request::{FinalizeBlock, ProcessedTx}; use super::abcipp_shim_types::shim::TxBytes; use super::abcipp_shim_types::shim::{Error, Request, Response}; use crate::config; +use crate::config::{Action, ActionAtHeight}; #[cfg(not(feature = "abcipp"))] -use crate::facade::tendermint_proto::abci::RequestBeginBlock; +use crate::facade::tendermint_proto::abci::{ + RequestBeginBlock, ResponseDeliverTx, +}; +#[cfg(not(feature = "abcipp"))] +use crate::facade::tower_abci::response::DeliverTx; use crate::facade::tower_abci::{BoxError, Request as Req, Response as Resp}; /// The shim wraps the shell, which implements ABCI++. @@ -52,10 +60,13 @@ impl AbcippShim { vp_wasm_compilation_cache: u64, tx_wasm_compilation_cache: u64, native_token: Address, - ) -> (Self, AbciService) { + ) -> (Self, AbciService, broadcast::Sender<()>) { // We can use an unbounded channel here, because tower-abci limits the // the number of requests that can come in + let (shell_send, shell_recv) = std::sync::mpsc::channel(); + let (server_shutdown, _) = broadcast::channel::<()>(1); + let action_at_height = config.shell.action_at_height.clone(); ( Self { service: Shell::new( @@ -73,7 +84,13 @@ impl AbcippShim { delivered_txs: vec![], shell_recv, }, - AbciService { shell_send }, + AbciService { + shell_send, + shutdown: server_shutdown.clone(), + action_at_height, + suspended: false, + }, + server_shutdown, ) } @@ -102,9 +119,11 @@ impl AbcippShim { }), #[cfg(feature = "abcipp")] Req::FinalizeBlock(block) => { + let block_time = + self.service.get_block_timestamp(block.time.clone()); let unprocessed_txs = block.txs.clone(); - let processing_results = - self.service.process_txs(&block.txs); + let (processing_results, _) = + self.service.process_txs(&block.txs, block_time); let mut txs = Vec::with_capacity(unprocessed_txs.len()); for (result, tx) in processing_results .into_iter() @@ -132,13 +151,29 @@ impl AbcippShim { } #[cfg(not(feature = "abcipp"))] Req::DeliverTx(tx) => { + let mut deliver: DeliverTx = Default::default(); + // Attach events to this transaction if possible + if let Ok(tx) = Tx::try_from(&tx.tx[..]) { + let resp: ResponseDeliverTx = tx.into(); + deliver.events = resp.events; + } self.delivered_txs.push(tx.tx); - Ok(Resp::DeliverTx(Default::default())) + Ok(Resp::DeliverTx(deliver)) } #[cfg(not(feature = "abcipp"))] Req::EndBlock(_) => { - let processing_results = - self.service.process_txs(&self.delivered_txs); + let begin_block_request = + self.begin_block_request.take().unwrap(); + let block_time = self.service.get_block_timestamp( + begin_block_request + .header + .as_ref() + .and_then(|header| header.time.to_owned()), + ); + + let (processing_results, _) = self + .service + .process_txs(&self.delivered_txs, block_time); let mut txs = Vec::with_capacity(self.delivered_txs.len()); let mut delivered = vec![]; std::mem::swap(&mut self.delivered_txs, &mut delivered); @@ -149,7 +184,7 @@ impl AbcippShim { txs.push(ProcessedTx { tx, result }); } let mut end_block_request: FinalizeBlock = - self.begin_block_request.take().unwrap().into(); + begin_block_request.into(); let hash = self.get_hash(); end_block_request.hash = BlockHash::from(hash.clone()); end_block_request.txs = txs; @@ -181,12 +216,147 @@ impl AbcippShim { } } +/// Indicates how [`AbciService`] should +/// check whether or not it needs to take +/// action. +#[derive(Debug)] +enum CheckAction { + /// No check necessary. + NoAction, + /// Check a given block height. + Check(i64), + /// The action been taken. + AlreadySuspended, +} + #[derive(Debug)] pub struct AbciService { + /// A channel for forwarding requests to the shell shell_send: std::sync::mpsc::Sender<( Req, tokio::sync::oneshot::Sender>, )>, + /// Indicates if the consensus connection is suspended. + suspended: bool, + /// This resolves the non-completing futures returned to tower-abci + /// during suspension. + shutdown: broadcast::Sender<()>, + /// An action to be taken at a specified block height. + action_at_height: Option, +} + +impl AbciService { + /// Check if we are at a block height with a scheduled action. + /// If so, perform the action. + fn maybe_take_action( + action_at_height: Option, + check: CheckAction, + mut shutdown_recv: broadcast::Receiver<()>, + ) -> (bool, Option<>::Future>) { + let hght = match check { + CheckAction::AlreadySuspended => BlockHeight::from(u64::MAX), + CheckAction::Check(hght) => BlockHeight::from(hght as u64), + CheckAction::NoAction => BlockHeight::default(), + }; + match action_at_height { + Some(ActionAtHeight { + height, + action: Action::Suspend, + }) if height <= hght => { + if height == hght { + tracing::info!( + "Reached block height {}, suspending.", + height + ); + tracing::warn!( + "\x1b[93mThis feature is intended for debugging \ + purposes. Note that on shutdown a spurious panic \ + message will be produced.\x1b[0m" + ) + } + ( + true, + Some( + async move { + shutdown_recv.recv().await.unwrap(); + Err(BoxError::from( + "Not all tendermint responses were processed. \ + If the `--suspended` flag was passed, you \ + may ignore this error.", + )) + } + .boxed(), + ), + ) + } + Some(ActionAtHeight { + height, + action: Action::Halt, + }) if height == hght => { + tracing::info!( + "Reached block height {}, halting the chain.", + height + ); + ( + false, + Some( + async move { + Err(BoxError::from(format!( + "Reached block height {}, halting the chain.", + height + ))) + } + .boxed(), + ), + ) + } + _ => (false, None), + } + } + + /// If we are not taking special action for this request, + /// forward it normally. + fn forward_request(&mut self, req: Req) -> >::Future { + let (resp_send, recv) = tokio::sync::oneshot::channel(); + let result = self.shell_send.send((req, resp_send)); + + async move { + if let Err(err) = result { + // The shell has shut-down + return Err(err.into()); + } + match recv.await { + Ok(resp) => resp, + Err(err) => { + tracing::info!("ABCI response channel didn't respond"); + Err(err.into()) + } + } + } + .boxed() + } + + /// Given the type of request, determine if we need to check + /// to possibly take an action. + fn get_action(&self, req: &Req) -> Option { + match req { + Req::PrepareProposal(req) => Some(CheckAction::Check(req.height)), + Req::ProcessProposal(req) => Some(CheckAction::Check(req.height)), + Req::EndBlock(req) => Some(CheckAction::Check(req.height)), + Req::BeginBlock(_) + | Req::DeliverTx(_) + | Req::InitChain(_) + | Req::CheckTx(_) + | Req::Commit(_) => { + if self.suspended { + Some(CheckAction::AlreadySuspended) + } else { + Some(CheckAction::NoAction) + } + } + _ => None, + } + } } /// The ABCI tower service implementation sends and receives messages to and @@ -206,23 +376,17 @@ impl Service for AbciService { } fn call(&mut self, req: Req) -> Self::Future { - let (resp_send, recv) = tokio::sync::oneshot::channel(); - let result = self.shell_send.send((req, resp_send)); - Box::pin( - async move { - if let Err(err) = result { - // The shell has shut-down - return Err(err.into()); - } - match recv.await { - Ok(resp) => resp, - Err(err) => { - tracing::info!("ABCI response channel didn't respond"); - Err(err.into()) - } - } - } - .boxed(), - ) + let action = self.get_action(&req); + if let Some(action) = action { + let (suspended, fut) = Self::maybe_take_action( + self.action_at_height.clone(), + action, + self.shutdown.subscribe(), + ); + self.suspended = suspended; + fut.unwrap_or_else(|| self.forward_request(req)) + } else { + self.forward_request(req) + } } } diff --git a/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs b/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs index f78bcc5339c..c94ba2e1d99 100644 --- a/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs +++ b/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs @@ -15,7 +15,7 @@ pub mod shim { ResponseCheckTx, ResponseCommit, ResponseEcho, ResponseEndBlock, ResponseFlush, ResponseInfo, ResponseInitChain, ResponseListSnapshots, ResponseLoadSnapshotChunk, ResponseOfferSnapshot, - ResponsePrepareProposal, ResponseQuery, VoteInfo as TendermintVoteInfo, + ResponsePrepareProposal, ResponseQuery, VoteInfo, }; #[cfg(feature = "abcipp")] use tendermint_proto_abcipp::abci::{ @@ -28,7 +28,7 @@ pub mod shim { ResponseFlush, ResponseInfo, ResponseInitChain, ResponseListSnapshots, ResponseLoadSnapshotChunk, ResponseOfferSnapshot, ResponsePrepareProposal, ResponseQuery, ResponseVerifyVoteExtension, - VoteInfo as TendermintVoteInfo, + VoteInfo, }; use thiserror::Error; @@ -130,7 +130,7 @@ pub mod shim { InitChain(ResponseInitChain), Info(ResponseInfo), Query(ResponseQuery), - PrepareProposal(ResponsePrepareProposal), + PrepareProposal(response::PrepareProposal), VerifyHeader(response::VerifyHeader), ProcessProposal(response::ProcessProposal), RevertProposal(response::RevertProposal), @@ -177,7 +177,7 @@ pub mod shim { Ok(Resp::ApplySnapshotChunk(inner)) } Response::PrepareProposal(inner) => { - Ok(Resp::PrepareProposal(inner)) + Ok(Resp::PrepareProposal(inner.into())) } #[cfg(feature = "abcipp")] Response::ExtendVote(inner) => Ok(Resp::ExtendVote(inner)), @@ -194,7 +194,6 @@ pub mod shim { pub mod request { use std::convert::TryFrom; - use namada::ledger::pos::types::VoteInfo; #[cfg(not(feature = "abcipp"))] use namada::tendermint_proto::abci::RequestBeginBlock; use namada::types::hash::Hash; @@ -207,7 +206,7 @@ pub mod shim { Misbehavior as Evidence, RequestFinalizeBlock, }; - use super::TendermintVoteInfo; + use super::VoteInfo; pub struct VerifyHeader; @@ -246,28 +245,11 @@ pub mod shim { byzantine_validators: req.byzantine_validators, txs: vec![], proposer_address: req.proposer_address, - votes: req - .decided_last_commit - .unwrap() - .votes - .iter() - .map(|tm_vote_info| { - vote_info_to_tendermint(tm_vote_info.clone()) - }) - .collect(), + votes: req.decided_last_commit.unwrap().votes, } } } - fn vote_info_to_tendermint(info: TendermintVoteInfo) -> VoteInfo { - let val_info = info.validator.clone().unwrap(); - VoteInfo { - validator_address: info.validator.unwrap().address, - validator_vp: val_info.power as u64, - signed_last_block: info.signed_last_block, - } - } - #[cfg(not(feature = "abcipp"))] impl From for FinalizeBlock { fn from(req: RequestBeginBlock) -> FinalizeBlock { @@ -287,15 +269,7 @@ pub mod shim { byzantine_validators: req.byzantine_validators, txs: vec![], proposer_address: header.proposer_address, - votes: req - .last_commit_info - .unwrap() - .votes - .iter() - .map(|tm_vote_info| { - vote_info_to_tendermint(tm_vote_info.clone()) - }) - .collect(), + votes: req.last_commit_info.unwrap().votes, } } } @@ -318,6 +292,26 @@ pub mod shim { types::ConsensusParams, }; + #[derive(Debug, Default)] + pub struct PrepareProposal { + pub txs: Vec, + } + + #[cfg(feature = "abcipp")] + impl From for super::ResponsePrepareProposal { + fn from(_: PrepareProposal) -> Self { + // TODO(namada#198): When abci++ arrives, we should return a + // real response. + Self::default() + } + } + + #[cfg(not(feature = "abcipp"))] + impl From for super::ResponsePrepareProposal { + fn from(resp: PrepareProposal) -> Self { + Self { txs: resp.txs } + } + } #[derive(Debug, Default)] pub struct VerifyHeader; diff --git a/apps/src/lib/node/ledger/storage/mod.rs b/apps/src/lib/node/ledger/storage/mod.rs index a5d0fed81d7..73c21ba6ca3 100644 --- a/apps/src/lib/node/ledger/storage/mod.rs +++ b/apps/src/lib/node/ledger/storage/mod.rs @@ -50,10 +50,16 @@ fn new_blake2b() -> Blake2b { #[cfg(test)] mod tests { + use std::collections::HashMap; + use itertools::Itertools; - use namada::ledger::storage::{types, WlStorage}; + use namada::ledger::storage::write_log::WriteLog; + use namada::ledger::storage::{ + types, update_allowed_conversions, WlStorage, + }; use namada::ledger::storage_api::{self, StorageWrite}; use namada::types::chain::ChainId; + use namada::types::hash::Hash; use namada::types::storage::{BlockHash, BlockHeight, Key}; use namada::types::{address, storage}; use proptest::collection::vec; @@ -72,6 +78,7 @@ mod tests { ChainId::default(), address::nam(), None, + None, ); let key = Key::parse("key").expect("cannot parse the key string"); let value: u64 = 1; @@ -119,6 +126,7 @@ mod tests { ChainId::default(), address::nam(), None, + None, ); storage .begin_block(BlockHash::default(), BlockHeight(100)) @@ -131,13 +139,19 @@ mod tests { storage .write(&key, value_bytes.clone()) .expect("write failed"); - storage.commit_block().expect("commit failed"); - - // save the last state and drop the storage - let root = storage.merkle_root().0; - let hash = storage.get_block_hash().0; - let address_gen = storage.address_gen.clone(); - drop(storage); + storage.block.epoch = storage.block.epoch.next(); + storage.block.pred_epochs.new_epoch(BlockHeight(100), 1000); + // make wl_storage to update conversion for a new epoch + let mut wl_storage = WlStorage::new(WriteLog::default(), storage); + update_allowed_conversions(&mut wl_storage) + .expect("update conversions failed"); + wl_storage.commit_block().expect("commit failed"); + + // save the last state and the storage + let root = wl_storage.storage.merkle_root().0; + let hash = wl_storage.storage.get_block_hash().0; + let address_gen = wl_storage.storage.address_gen.clone(); + drop(wl_storage); // load the last state let mut storage = PersistentStorage::open( @@ -145,6 +159,7 @@ mod tests { ChainId::default(), address::nam(), None, + None, ); storage .load_last_state() @@ -168,6 +183,7 @@ mod tests { ChainId::default(), address::nam(), None, + None, ); storage .begin_block(BlockHash::default(), BlockHeight(100)) @@ -186,7 +202,8 @@ mod tests { .expect("write failed"); expected.push((key.to_string(), value_bytes)); } - storage.commit_block().expect("commit failed"); + let batch = PersistentStorage::batch(); + storage.commit_block(batch).expect("commit failed"); let (iter, gas) = storage.iter_prefix(&prefix); assert_eq!(gas, prefix.len() as u64); @@ -212,6 +229,7 @@ mod tests { ChainId::default(), address::nam(), None, + None, ); storage .begin_block(BlockHash::default(), BlockHeight(100)) @@ -227,13 +245,13 @@ mod tests { assert_eq!(gas, key.len() as u64); // insert - let vp1 = "vp1".as_bytes().to_vec(); + let vp1 = Hash::sha256("vp1".as_bytes()); storage.write(&key, vp1.clone()).expect("write failed"); // check - let (vp, gas) = + let (vp_code_hash, gas) = storage.validity_predicate(&addr).expect("VP load failed"); - assert_eq!(vp.expect("no VP"), vp1); + assert_eq!(vp_code_hash.expect("no VP"), vp1); assert_eq!(gas, (key.len() + vp1.len()) as u64); } @@ -246,6 +264,11 @@ mod tests { fn test_read_with_height(blocks_write_value in vec(any::(), 20)) { test_read_with_height_aux(blocks_write_value).unwrap() } + + #[test] + fn test_get_merkle_tree(blocks_write_type in vec(0..5_u64, 50)) { + test_get_merkle_tree_aux(blocks_write_type).unwrap() + } } /// Test reads at arbitrary block heights. @@ -270,6 +293,7 @@ mod tests { ChainId::default(), address::nam(), None, + None, ); // 1. For each `blocks_write_value`, write the current block height if @@ -301,7 +325,8 @@ mod tests { } else { storage.delete(&key)?; } - storage.commit_block()?; + let batch = PersistentStorage::batch(); + storage.commit_block(batch)?; } // 2. We try to read from these heights to check that we get back @@ -347,6 +372,184 @@ mod tests { Ok(()) } + /// Test the restore of the merkle tree + fn test_get_merkle_tree_aux( + blocks_write_type: Vec, + ) -> namada::ledger::storage::Result<()> { + let db_path = + TempDir::new().expect("Unable to create a temporary DB directory"); + let mut storage = PersistentStorage::open( + db_path.path(), + ChainId::default(), + address::nam(), + None, + None, + ); + + let num_keys = 5; + let blocks_write_type = blocks_write_type.into_iter().enumerate().map( + |(index, write_type)| { + // try to update some keys at each height + let height = BlockHeight::from(index as u64 / num_keys + 1); + let key = Key::parse(format!("key{}", index as u64 % num_keys)) + .unwrap(); + (height, key, write_type) + }, + ); + + let mut roots = HashMap::new(); + + // write values at Height 0 like init_storage + for i in 0..num_keys { + let key = Key::parse(format!("key{}", i)).unwrap(); + let value_bytes = types::encode(&storage.block.height); + storage.write(&key, value_bytes)?; + } + + // Update and commit + let hash = BlockHash::default(); + storage.begin_block(hash, BlockHeight(1))?; + let mut batch = PersistentStorage::batch(); + for (height, key, write_type) in blocks_write_type.clone() { + if height != storage.block.height { + // to check the root later + roots.insert(storage.block.height, storage.merkle_root()); + if storage.block.height.0 % 5 == 0 { + // new epoch every 5 heights + storage.block.epoch = storage.block.epoch.next(); + storage + .block + .pred_epochs + .new_epoch(storage.block.height, 1000); + } + storage.commit_block(batch)?; + let hash = BlockHash::default(); + storage + .begin_block(hash, storage.block.height.next_height())?; + batch = PersistentStorage::batch(); + } + match write_type { + 0 => { + // no update + } + 1 => { + storage.delete(&key)?; + } + 2 => { + let value_bytes = types::encode(&storage.block.height); + storage.write(&key, value_bytes)?; + } + 3 => { + storage.batch_delete_subspace_val(&mut batch, &key)?; + } + _ => { + let value_bytes = types::encode(&storage.block.height); + storage.batch_write_subspace_val( + &mut batch, + &key, + value_bytes, + )?; + } + } + } + roots.insert(storage.block.height, storage.merkle_root()); + storage.commit_block(batch)?; + + let mut current_state = HashMap::new(); + for i in 0..num_keys { + let key = Key::parse(format!("key{}", i)).unwrap(); + current_state.insert(key, true); + } + // Check a Merkle tree + for (height, key, write_type) in blocks_write_type { + let tree = storage.get_merkle_tree(height)?; + assert_eq!(tree.root().0, roots.get(&height).unwrap().0); + match write_type { + 0 => { + if *current_state.get(&key).unwrap() { + assert!(tree.has_key(&key)?); + } else { + assert!(!tree.has_key(&key)?); + } + } + 1 | 3 => { + assert!(!tree.has_key(&key)?); + current_state.insert(key, false); + } + _ => { + assert!(tree.has_key(&key)?); + current_state.insert(key, true); + } + } + } + + Ok(()) + } + + /// Test the restore of the merkle tree + #[test] + fn test_prune_merkle_tree_stores() { + let db_path = + TempDir::new().expect("Unable to create a temporary DB directory"); + let mut storage = PersistentStorage::open( + db_path.path(), + ChainId::default(), + address::nam(), + None, + Some(5), + ); + storage + .begin_block(BlockHash::default(), BlockHeight(1)) + .expect("begin_block failed"); + + let key = Key::parse("key").expect("cannot parse the key string"); + let value: u64 = 1; + storage + .write(&key, types::encode(&value)) + .expect("write failed"); + + storage.block.epoch = storage.block.epoch.next(); + storage.block.pred_epochs.new_epoch(BlockHeight(1), 1000); + let batch = PersistentStorage::batch(); + storage.commit_block(batch).expect("commit failed"); + + storage + .begin_block(BlockHash::default(), BlockHeight(6)) + .expect("begin_block failed"); + + let key = Key::parse("key2").expect("cannot parse the key string"); + let value: u64 = 2; + storage + .write(&key, types::encode(&value)) + .expect("write failed"); + + storage.block.epoch = storage.block.epoch.next(); + storage.block.pred_epochs.new_epoch(BlockHeight(6), 1000); + let batch = PersistentStorage::batch(); + storage.commit_block(batch).expect("commit failed"); + + let result = storage.get_merkle_tree(1.into()); + assert!(result.is_ok(), "The tree at Height 1 should be restored"); + + storage + .begin_block(BlockHash::default(), BlockHeight(11)) + .expect("begin_block failed"); + storage.block.epoch = storage.block.epoch.next(); + storage.block.pred_epochs.new_epoch(BlockHeight(11), 1000); + let batch = PersistentStorage::batch(); + storage.commit_block(batch).expect("commit failed"); + + let result = storage.get_merkle_tree(1.into()); + assert!(result.is_err(), "The tree at Height 1 should be pruned"); + let result = storage.get_merkle_tree(5.into()); + assert!( + result.is_err(), + "The tree at Height 5 shouldn't be able to be restored" + ); + let result = storage.get_merkle_tree(6.into()); + assert!(result.is_ok(), "The tree should be restored"); + } + /// Test the prefix iterator with RocksDB. #[test] fn test_persistent_storage_prefix_iter() { @@ -357,6 +560,7 @@ mod tests { ChainId::default(), address::nam(), None, + None, ); let mut storage = WlStorage { storage, @@ -389,7 +593,7 @@ mod tests { itertools::assert_equal(iter, expected.clone()); // Commit genesis state - storage.commit_genesis().unwrap(); + storage.commit_block().unwrap(); // Again, try to iterate over their prefix let iter = storage_api::iter_prefix(&storage, &prefix) @@ -440,7 +644,7 @@ mod tests { itertools::assert_equal(iter, expected.clone()); // Commit genesis state - storage.commit_genesis().unwrap(); + storage.commit_block().unwrap(); // And check again let iter = storage_api::iter_prefix(&storage, &prefix) diff --git a/apps/src/lib/node/ledger/storage/rocksdb.rs b/apps/src/lib/node/ledger/storage/rocksdb.rs index 82d3faee4f5..9d54bc6de3d 100644 --- a/apps/src/lib/node/ledger/storage/rocksdb.rs +++ b/apps/src/lib/node/ledger/storage/rocksdb.rs @@ -1,36 +1,36 @@ //! The persistent storage in RocksDB. //! //! The current storage tree is: -//! - `chain_id` -//! - `height`: the last committed block height -//! - `tx_queue`: txs to be decrypted in the next block -//! - `pred`: predecessor values of the top-level keys of the same name -//! - `tx_queue` -//! - `next_epoch_min_start_height`: minimum block height from which the next -//! epoch can start -//! - `next_epoch_min_start_time`: minimum block time from which the next epoch -//! can start -//! - `pred`: predecessor values of the top-level keys of the same name -//! - `next_epoch_min_start_height` -//! - `next_epoch_min_start_time` +//! - `state`: the latest ledger state +//! - `height`: the last committed block height +//! - `tx_queue`: txs to be decrypted in the next block +//! - `next_epoch_min_start_height`: minimum block height from which the next +//! epoch can start +//! - `next_epoch_min_start_time`: minimum block time from which the next +//! epoch can start +//! - `pred`: predecessor values of the top-level keys of the same name +//! - `next_epoch_min_start_height` +//! - `next_epoch_min_start_time` //! - `subspace`: accounts sub-spaces //! - `{address}/{dyn}`: any byte data associated with accounts -//! - `h`: for each block at height `h`: -//! - `tree`: merkle tree -//! - `root`: root hash -//! - `store`: the tree's store -//! - `hash`: block hash -//! - `epoch`: block epoch -//! - `address_gen`: established address generator -//! - `diffs`: diffs in account subspaces' key-vals -//! - `new/{dyn}`: value set in block height `h` -//! - `old/{dyn}`: value from predecessor block height -//! - `header`: block's header - -use std::cmp::Ordering; +//! - `diffs`: diffs in account subspaces' key-vals +//! - `new/{dyn}`: value set in block height `h` +//! - `old/{dyn}`: value from predecessor block height +//! - `block`: block state +//! - `results/{h}`: block results at height `h` +//! - `h`: for each block at height `h`: +//! - `tree`: merkle tree +//! - `root`: root hash +//! - `store`: the tree's store +//! - `hash`: block hash +//! - `epoch`: block epoch +//! - `address_gen`: established address generator +//! - `header`: block's header + use std::fs::File; use std::path::Path; use std::str::FromStr; +use std::sync::Mutex; use borsh::{BorshDeserialize, BorshSerialize}; use data_encoding::HEXLOWER; @@ -41,12 +41,14 @@ use namada::ledger::storage::{ }; use namada::types::internal::TxQueue; use namada::types::storage::{ - BlockHeight, BlockResults, Header, Key, KeySeg, KEY_SEGMENT_SEPARATOR, + BlockHeight, BlockResults, Epoch, Epochs, Header, Key, KeySeg, + KEY_SEGMENT_SEPARATOR, }; use namada::types::time::DateTimeUtc; +use rayon::prelude::*; use rocksdb::{ - BlockBasedOptions, Direction, FlushOptions, IteratorMode, Options, - ReadOptions, SliceTransform, WriteBatch, WriteOptions, + BlockBasedOptions, ColumnFamily, ColumnFamilyDescriptor, Direction, + FlushOptions, IteratorMode, Options, ReadOptions, WriteBatch, }; use crate::config::utils::num_of_threads; @@ -57,6 +59,12 @@ use crate::config::utils::num_of_threads; const ENV_VAR_ROCKSDB_COMPACTION_THREADS: &str = "NAMADA_ROCKSDB_COMPACTION_THREADS"; +/// Column family names +const SUBSPACE_CF: &str = "subspace"; +const DIFFS_CF: &str = "diffs"; +const STATE_CF: &str = "state"; +const BLOCK_CF: &str = "block"; + /// RocksDB handle #[derive(Debug)] pub struct RocksDB(rocksdb::DB); @@ -81,21 +89,24 @@ pub fn open( compaction_threads ); - let mut cf_opts = Options::default(); - // ! recommended initial setup https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning#other-general-options - cf_opts.set_level_compaction_dynamic_level_bytes(true); + // DB options + let mut db_opts = Options::default(); // This gives `compaction_threads` number to compaction threads and 1 thread // for flush background jobs: https://github.com/facebook/rocksdb/blob/17ce1ca48be53ba29138f92dafc9c853d9241377/options/options.cc#L622 - cf_opts.increase_parallelism(compaction_threads); + db_opts.increase_parallelism(compaction_threads); - cf_opts.set_bytes_per_sync(1048576); - set_max_open_files(&mut cf_opts); + db_opts.set_bytes_per_sync(1048576); + set_max_open_files(&mut db_opts); - cf_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); - cf_opts.set_compression_options(0, 0, 0, 1024 * 1024); // TODO the recommended default `options.compaction_pri = // kMinOverlappingRatio` doesn't seem to be available in Rust + + db_opts.create_missing_column_families(true); + db_opts.create_if_missing(true); + db_opts.set_atomic_flush(true); + + let mut cfs = Vec::new(); let mut table_opts = BlockBasedOptions::default(); table_opts.set_block_size(16 * 1024); table_opts.set_cache_index_and_filter_blocks(true); @@ -105,49 +116,46 @@ pub fn open( } // latest format versions https://github.com/facebook/rocksdb/blob/d1c510baecc1aef758f91f786c4fbee3bc847a63/include/rocksdb/table.h#L394 table_opts.set_format_version(5); - cf_opts.set_block_based_table_factory(&table_opts); - - cf_opts.create_missing_column_families(true); - cf_opts.create_if_missing(true); - cf_opts.set_atomic_flush(true); - cf_opts.set_comparator("key_comparator", key_comparator); - let extractor = SliceTransform::create_fixed_prefix(20); - cf_opts.set_prefix_extractor(extractor); - // TODO use column families - - rocksdb::DB::open_cf_descriptors(&cf_opts, path, vec![]) + // for subspace (read/update-intensive) + let mut subspace_cf_opts = Options::default(); + subspace_cf_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); + subspace_cf_opts.set_compression_options(0, 0, 0, 1024 * 1024); + // ! recommended initial setup https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning#other-general-options + subspace_cf_opts.set_level_compaction_dynamic_level_bytes(true); + subspace_cf_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); + subspace_cf_opts.set_block_based_table_factory(&table_opts); + cfs.push(ColumnFamilyDescriptor::new(SUBSPACE_CF, subspace_cf_opts)); + + // for diffs (insert-intensive) + let mut diffs_cf_opts = Options::default(); + diffs_cf_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); + diffs_cf_opts.set_compression_options(0, 0, 0, 1024 * 1024); + diffs_cf_opts.set_compaction_style(rocksdb::DBCompactionStyle::Universal); + diffs_cf_opts.set_block_based_table_factory(&table_opts); + cfs.push(ColumnFamilyDescriptor::new(DIFFS_CF, diffs_cf_opts)); + + // for the ledger state (update-intensive) + let mut state_cf_opts = Options::default(); + // No compression since the size of the state is small + state_cf_opts.set_level_compaction_dynamic_level_bytes(true); + state_cf_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); + state_cf_opts.set_block_based_table_factory(&table_opts); + cfs.push(ColumnFamilyDescriptor::new(STATE_CF, state_cf_opts)); + + // for blocks (insert-intensive) + let mut block_cf_opts = Options::default(); + block_cf_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); + block_cf_opts.set_compression_options(0, 0, 0, 1024 * 1024); + block_cf_opts.set_compaction_style(rocksdb::DBCompactionStyle::Universal); + block_cf_opts.set_block_based_table_factory(&table_opts); + cfs.push(ColumnFamilyDescriptor::new(BLOCK_CF, block_cf_opts)); + + rocksdb::DB::open_cf_descriptors(&db_opts, path, cfs) .map(RocksDB) .map_err(|e| Error::DBError(e.into_string())) } -/// A custom key comparator is used to sort keys by the height. In -/// lexicographical order, the height aren't ordered. For example, "11" is -/// before "2". -fn key_comparator(a: &[u8], b: &[u8]) -> Ordering { - let a_str = &String::from_utf8(a.to_vec()).unwrap(); - let b_str = &String::from_utf8(b.to_vec()).unwrap(); - - let a_vec: Vec<&str> = a_str.split('/').collect(); - let b_vec: Vec<&str> = b_str.split('/').collect(); - - let result_a_h = a_vec[0].parse::(); - let result_b_h = b_vec[0].parse::(); - match (result_a_h, result_b_h) { - (Ok(a_h), Ok(b_h)) => { - if a_h == b_h { - a_vec[1..].cmp(&b_vec[1..]) - } else { - a_h.cmp(&b_h) - } - } - _ => { - // the key doesn't include the height - a_str.cmp(b_str) - } - } -} - impl Drop for RocksDB { fn drop(&mut self) { self.flush(true).expect("flush failed"); @@ -155,6 +163,12 @@ impl Drop for RocksDB { } impl RocksDB { + fn get_column_family(&self, cf_name: &str) -> Result<&ColumnFamily> { + self.0 + .cf_handle(cf_name) + .ok_or(Error::DBError("No {cf_name} column family".to_string())) + } + fn flush(&self, wait: bool) -> Result<()> { let mut flush_opts = FlushOptions::default(); flush_opts.set_wait(wait); @@ -166,15 +180,14 @@ impl RocksDB { /// Persist the diff of an account subspace key-val under the height where /// it was changed. fn write_subspace_diff( - &mut self, + &self, height: BlockHeight, key: &Key, old_value: Option<&[u8]>, new_value: Option<&[u8]>, ) -> Result<()> { - let key_prefix = Key::from(height.to_db_key()) - .push(&"diffs".to_owned()) - .map_err(Error::KeyError)?; + let cf = self.get_column_family(DIFFS_CF)?; + let key_prefix = Key::from(height.to_db_key()); if let Some(old_value) = old_value { let old_val_key = key_prefix @@ -183,7 +196,7 @@ impl RocksDB { .join(key) .to_string(); self.0 - .put(old_val_key, old_value) + .put_cf(cf, old_val_key, old_value) .map_err(|e| Error::DBError(e.into_string()))?; } @@ -194,7 +207,7 @@ impl RocksDB { .join(key) .to_string(); self.0 - .put(new_val_key, new_value) + .put_cf(cf, new_val_key, new_value) .map_err(|e| Error::DBError(e.into_string()))?; } Ok(()) @@ -203,15 +216,15 @@ impl RocksDB { /// Persist the diff of an account subspace key-val under the height where /// it was changed in a batch write. fn batch_write_subspace_diff( + &self, batch: &mut RocksDBWriteBatch, height: BlockHeight, key: &Key, old_value: Option<&[u8]>, new_value: Option<&[u8]>, ) -> Result<()> { - let key_prefix = Key::from(height.to_db_key()) - .push(&"diffs".to_owned()) - .map_err(Error::KeyError)?; + let cf = self.get_column_family(DIFFS_CF)?; + let key_prefix = Key::from(height.to_db_key()); if let Some(old_value) = old_value { let old_val_key = key_prefix @@ -219,7 +232,7 @@ impl RocksDB { .map_err(Error::KeyError)? .join(key) .to_string(); - batch.0.put(old_val_key, old_value); + batch.0.put_cf(cf, old_val_key, old_value); } if let Some(new_value) = new_value { @@ -228,27 +241,30 @@ impl RocksDB { .map_err(Error::KeyError)? .join(key) .to_string(); - batch.0.put(new_val_key, new_value); + batch.0.put_cf(cf, new_val_key, new_value); } Ok(()) } fn exec_batch(&mut self, batch: WriteBatch) -> Result<()> { - let mut write_opts = WriteOptions::default(); - write_opts.disable_wal(true); self.0 - .write_opt(batch, &write_opts) + .write(batch) .map_err(|e| Error::DBError(e.into_string())) } /// Dump last known block - pub fn dump_last_block(&self, out_file_path: std::path::PathBuf) { - use std::io::Write; - - // Fine the last block height + pub fn dump_last_block( + &self, + out_file_path: std::path::PathBuf, + historic: bool, + ) { + // Find the last block height + let state_cf = self + .get_column_family(STATE_CF) + .expect("State column family should exist"); let height: BlockHeight = types::decode( self.0 - .get("height") + .get_cf(state_cf, "height") .expect("Unable to read DB") .expect("No block height found"), ) @@ -272,36 +288,179 @@ impl RocksDB { println!("Will write to {} ...", full_path.to_string_lossy()); - let mut dump_it = |prefix: String| { - for next in self.0.iterator(IteratorMode::From( - prefix.as_bytes(), - Direction::Forward, - )) { - match next { - Err(e) => { - eprintln!( - "Something failed in a \"{prefix}\" iterator: {e}" - ) - } - Ok((raw_key, raw_val)) => { - let key = std::str::from_utf8(&raw_key) - .expect("All keys should be valid UTF-8 strings"); - let val = HEXLOWER.encode(&raw_val); - let bytes = format!("\"{key}\" = \"{val}\"\n"); - file.write_all(bytes.as_bytes()) - .expect("Unable to write to output file"); - } - }; - } - }; + if historic { + // Dump the keys prepended with the selected block height (includes + // subspace diff keys) + + // Diffs + let cf = self + .get_column_family(DIFFS_CF) + .expect("Diffs column family should exist"); + let prefix = height.raw(); + self.dump_it(cf, Some(prefix.clone()), &mut file); + + // Block + let cf = self + .get_column_family(BLOCK_CF) + .expect("Block column family should exist"); + self.dump_it(cf, Some(prefix), &mut file); + } - // Dump accounts subspace and block height data - dump_it("subspace".to_string()); - let block_prefix = format!("{}/", height.raw()); - dump_it(block_prefix); + // subspace + let cf = self + .get_column_family(SUBSPACE_CF) + .expect("Subspace column family should exist"); + self.dump_it(cf, None, &mut file); println!("Done writing to {}", full_path.to_string_lossy()); } + + /// Dump data + fn dump_it( + &self, + cf: &ColumnFamily, + prefix: Option, + file: &mut File, + ) { + use std::io::Write; + + let read_opts = make_iter_read_opts(prefix.clone()); + let iter = if let Some(prefix) = prefix { + self.0.iterator_cf_opt( + cf, + read_opts, + IteratorMode::From(prefix.as_bytes(), Direction::Forward), + ) + } else { + self.0.iterator_cf_opt(cf, read_opts, IteratorMode::Start) + }; + + for (key, raw_val, _gas) in PersistentPrefixIterator( + PrefixIterator::new(iter, String::default()), + // Empty string to prevent prefix stripping, the prefix is + // already in the enclosed iterator + ) { + let val = HEXLOWER.encode(&raw_val); + let bytes = format!("\"{key}\" = \"{val}\"\n"); + file.write_all(bytes.as_bytes()) + .expect("Unable to write to output file"); + } + } + + /// Rollback to previous block. Given the inner working of tendermint + /// rollback and of the key structure of Namada, calling rollback more than + /// once without restarting the chain results in a single rollback. + pub fn rollback( + &mut self, + tendermint_block_height: BlockHeight, + ) -> Result<()> { + let last_block = self.read_last_block()?.ok_or(Error::DBError( + "Missing last block in storage".to_string(), + ))?; + tracing::info!( + "Namada last block height: {}, Tendermint last block height: {}", + last_block.height, + tendermint_block_height + ); + + // If the block height to which tendermint rolled back matches the + // Namada height, there's no need to rollback + if tendermint_block_height == last_block.height { + tracing::info!( + "Namada height already matches the rollback Tendermint \ + height, no need to rollback." + ); + return Ok(()); + } + + let mut batch = WriteBatch::default(); + let previous_height = + BlockHeight::from(u64::from(last_block.height) - 1); + + let state_cf = self.get_column_family(STATE_CF)?; + // Revert the non-height-prepended metadata storage keys which get + // updated with every block. Because of the way we save these + // three keys in storage we can only perform one rollback before + // restarting the chain + tracing::info!("Reverting non-height-prepended metadata keys"); + batch.put_cf(state_cf, "height", types::encode(&previous_height)); + for metadata_key in [ + "next_epoch_min_start_height", + "next_epoch_min_start_time", + "tx_queue", + ] { + let previous_key = format!("pred/{}", metadata_key); + let previous_value = self + .0 + .get_cf(state_cf, previous_key.as_bytes()) + .map_err(|e| Error::DBError(e.to_string()))? + .ok_or(Error::UnknownKey { key: previous_key })?; + + batch.put_cf(state_cf, metadata_key, previous_value); + // NOTE: we cannot restore the "pred/" keys themselves since we + // don't have their predecessors in storage, but there's no need to + // since we cannot do more than one rollback anyway because of + // Tendermint. + } + + // Delete block results for the last block + let block_cf = self.get_column_family(BLOCK_CF)?; + tracing::info!("Removing last block results"); + batch.delete_cf(block_cf, format!("results/{}", last_block.height)); + + // Execute next step in parallel + let batch = Mutex::new(batch); + + tracing::info!("Restoring previous hight subspace diffs"); + self.iter_prefix(&Key::default()) + .par_bridge() + .try_for_each(|(key, _value, _gas)| -> Result<()> { + // Restore previous height diff if present, otherwise delete the + // subspace key + let subspace_cf = self.get_column_family(SUBSPACE_CF)?; + match self.read_subspace_val_with_height( + &Key::from(key.to_db_key()), + previous_height, + last_block.height, + )? { + Some(previous_value) => batch.lock().unwrap().put_cf( + subspace_cf, + &key, + previous_value, + ), + None => batch.lock().unwrap().delete_cf(subspace_cf, &key), + } + + Ok(()) + })?; + + tracing::info!("Deleting keys prepended with the last height"); + let mut batch = batch.into_inner().unwrap(); + let prefix = last_block.height.to_string(); + let mut delete_keys = |cf: &ColumnFamily| { + let read_opts = make_iter_read_opts(Some(prefix.clone())); + let iter = self.0.iterator_cf_opt( + cf, + read_opts, + IteratorMode::From(prefix.as_bytes(), Direction::Forward), + ); + for (key, _value, _gas) in PersistentPrefixIterator( + // Empty prefix string to prevent stripping + PrefixIterator::new(iter, String::default()), + ) { + batch.delete_cf(cf, key); + } + }; + // Delete any height-prepended key in subspace diffs + let diffs_cf = self.get_column_family(DIFFS_CF)?; + delete_keys(diffs_cf); + // Delete any height-prepended key in the block + delete_keys(block_cf); + + // Write the batch and persist changes to disk + tracing::info!("Flushing restored state to disk"); + self.exec_batch(batch) + } } impl DB for RocksDB { @@ -325,9 +484,10 @@ impl DB for RocksDB { fn read_last_block(&mut self) -> Result> { // Block height + let state_cf = self.get_column_family(STATE_CF)?; let height: BlockHeight = match self .0 - .get("height") + .get_cf(state_cf, "height") .map_err(|e| Error::DBError(e.into_string()))? { Some(bytes) => { @@ -339,10 +499,11 @@ impl DB for RocksDB { }; // Block results + let block_cf = self.get_column_family(BLOCK_CF)?; let results_path = format!("results/{}", height.raw()); let results: BlockResults = match self .0 - .get(results_path) + .get_cf(block_cf, results_path) .map_err(|e| Error::DBError(e.into_string()))? { Some(bytes) => types::decode(bytes).map_err(Error::CodingError)?, @@ -352,7 +513,7 @@ impl DB for RocksDB { // Epoch start height and time let next_epoch_min_start_height: BlockHeight = match self .0 - .get("next_epoch_min_start_height") + .get_cf(state_cf, "next_epoch_min_start_height") .map_err(|e| Error::DBError(e.into_string()))? { Some(bytes) => types::decode(bytes).map_err(Error::CodingError)?, @@ -365,7 +526,7 @@ impl DB for RocksDB { }; let next_epoch_min_start_time: DateTimeUtc = match self .0 - .get("next_epoch_min_start_time") + .get_cf(state_cf, "next_epoch_min_start_time") .map_err(|e| Error::DBError(e.into_string()))? { Some(bytes) => types::decode(bytes).map_err(Error::CodingError)?, @@ -378,7 +539,7 @@ impl DB for RocksDB { }; let tx_queue: TxQueue = match self .0 - .get("tx_queue") + .get_cf(state_cf, "tx_queue") .map_err(|e| Error::DBError(e.into_string()))? { Some(bytes) => types::decode(bytes).map_err(Error::CodingError)?, @@ -388,7 +549,7 @@ impl DB for RocksDB { } }; - // Load data at the height + // Load block data at the height let prefix = format!("{}/", height.raw()); let mut read_opts = ReadOptions::default(); read_opts.set_total_order_seek(false); @@ -399,9 +560,10 @@ impl DB for RocksDB { let mut epoch = None; let mut pred_epochs = None; let mut address_gen = None; - for value in self.0.iterator_opt( - IteratorMode::From(prefix.as_bytes(), Direction::Forward), + for value in self.0.iterator_cf_opt( + block_cf, read_opts, + IteratorMode::From(prefix.as_bytes(), Direction::Forward), ) { let (key, bytes) = match value { Ok(data) => data, @@ -458,9 +620,6 @@ impl DB for RocksDB { types::decode(bytes).map_err(Error::CodingError)?, ); } - "diffs" => { - // ignore the diffs - } _ => unknown_key_error(path)?, }, None => unknown_key_error(path)?, @@ -488,8 +647,12 @@ impl DB for RocksDB { } } - fn write_block(&mut self, state: BlockStateWrite) -> Result<()> { - let mut batch = WriteBatch::default(); + fn write_block( + &mut self, + state: BlockStateWrite, + batch: &mut Self::WriteBatch, + is_full_commit: bool, + ) -> Result<()> { let BlockStateWrite { merkle_tree_stores, header, @@ -505,42 +668,56 @@ impl DB for RocksDB { }: BlockStateWrite = state; // Epoch start height and time - if let Some(current_value) = - self.0 - .get("next_epoch_min_start_height") - .map_err(|e| Error::DBError(e.into_string()))? + let state_cf = self.get_column_family(STATE_CF)?; + if let Some(current_value) = self + .0 + .get_cf(state_cf, "next_epoch_min_start_height") + .map_err(|e| Error::DBError(e.into_string()))? { // Write the predecessor value for rollback - batch.put("pred/next_epoch_min_start_height", current_value); + batch.0.put_cf( + state_cf, + "pred/next_epoch_min_start_height", + current_value, + ); } - batch.put( + batch.0.put_cf( + state_cf, "next_epoch_min_start_height", types::encode(&next_epoch_min_start_height), ); if let Some(current_value) = self .0 - .get("next_epoch_min_start_time") + .get_cf(state_cf, "next_epoch_min_start_time") .map_err(|e| Error::DBError(e.into_string()))? { // Write the predecessor value for rollback - batch.put("pred/next_epoch_min_start_time", current_value); + batch.0.put_cf( + state_cf, + "pred/next_epoch_min_start_time", + current_value, + ); } - batch.put( + batch.0.put_cf( + state_cf, "next_epoch_min_start_time", types::encode(&next_epoch_min_start_time), ); // Tx queue if let Some(pred_tx_queue) = self .0 - .get("tx_queue") + .get_cf(state_cf, "tx_queue") .map_err(|e| Error::DBError(e.into_string()))? { // Write the predecessor value for rollback - batch.put("pred/tx_queue", pred_tx_queue); + batch.0.put_cf(state_cf, "pred/tx_queue", pred_tx_queue); } - batch.put("tx_queue", types::encode(&tx_queue)); + batch + .0 + .put_cf(state_cf, "tx_queue", types::encode(&tx_queue)); + let block_cf = self.get_column_family(BLOCK_CF)?; let prefix_key = Key::from(height.to_db_key()); // Merkle tree { @@ -548,23 +725,27 @@ impl DB for RocksDB { .push(&"tree".to_owned()) .map_err(Error::KeyError)?; for st in StoreType::iter() { - let prefix_key = prefix_key - .push(&st.to_string()) - .map_err(Error::KeyError)?; - let root_key = prefix_key - .push(&"root".to_owned()) - .map_err(Error::KeyError)?; - batch.put( - root_key.to_string(), - types::encode(merkle_tree_stores.root(st)), - ); - let store_key = prefix_key - .push(&"store".to_owned()) - .map_err(Error::KeyError)?; - batch.put( - store_key.to_string(), - merkle_tree_stores.store(st).encode(), - ); + if *st == StoreType::Base || is_full_commit { + let prefix_key = prefix_key + .push(&st.to_string()) + .map_err(Error::KeyError)?; + let root_key = prefix_key + .push(&"root".to_owned()) + .map_err(Error::KeyError)?; + batch.0.put_cf( + block_cf, + root_key.to_string(), + types::encode(merkle_tree_stores.root(st)), + ); + let store_key = prefix_key + .push(&"store".to_owned()) + .map_err(Error::KeyError)?; + batch.0.put_cf( + block_cf, + store_key.to_string(), + merkle_tree_stores.store(st).encode(), + ); + } } } // Block header @@ -573,7 +754,8 @@ impl DB for RocksDB { let key = prefix_key .push(&"header".to_owned()) .map_err(Error::KeyError)?; - batch.put( + batch.0.put_cf( + block_cf, key.to_string(), h.try_to_vec().expect("serialization failed"), ); @@ -584,53 +766,64 @@ impl DB for RocksDB { let key = prefix_key .push(&"hash".to_owned()) .map_err(Error::KeyError)?; - batch.put(key.to_string(), types::encode(&hash)); + batch + .0 + .put_cf(block_cf, key.to_string(), types::encode(&hash)); } // Block epoch { let key = prefix_key .push(&"epoch".to_owned()) .map_err(Error::KeyError)?; - batch.put(key.to_string(), types::encode(&epoch)); + batch + .0 + .put_cf(block_cf, key.to_string(), types::encode(&epoch)); } // Block results { let results_path = format!("results/{}", height.raw()); - batch.put(results_path, types::encode(&results)); + batch + .0 + .put_cf(block_cf, results_path, types::encode(&results)); } // Predecessor block epochs { let key = prefix_key .push(&"pred_epochs".to_owned()) .map_err(Error::KeyError)?; - batch.put(key.to_string(), types::encode(&pred_epochs)); + batch.0.put_cf( + block_cf, + key.to_string(), + types::encode(&pred_epochs), + ); } // Address gen { let key = prefix_key .push(&"address_gen".to_owned()) .map_err(Error::KeyError)?; - batch.put(key.to_string(), types::encode(&address_gen)); + batch.0.put_cf( + block_cf, + key.to_string(), + types::encode(&address_gen), + ); } // Block height - batch.put("height", types::encode(&height)); + batch.0.put_cf(state_cf, "height", types::encode(&height)); - // Write the batch - self.exec_batch(batch)?; - - // Flush without waiting - self.flush(false) + Ok(()) } fn read_block_header(&self, height: BlockHeight) -> Result> { + let block_cf = self.get_column_family(BLOCK_CF)?; let prefix_key = Key::from(height.to_db_key()); let key = prefix_key .push(&"header".to_owned()) .map_err(Error::KeyError)?; let value = self .0 - .get(key.to_string()) + .get_cf(block_cf, key.to_string()) .map_err(|e| Error::DBError(e.into_string()))?; match value { Some(v) => Ok(Some( @@ -644,12 +837,31 @@ impl DB for RocksDB { fn read_merkle_tree_stores( &self, height: BlockHeight, - ) -> Result> { - let mut merkle_tree_stores = MerkleTreeStoresRead::default(); + ) -> Result> { + // Get the latest height at which the tree stores were written + let block_cf = self.get_column_family(BLOCK_CF)?; let height_key = Key::from(height.to_db_key()); - let tree_key = height_key + let key = height_key + .push(&"pred_epochs".to_owned()) + .expect("Cannot obtain a storage key"); + let pred_epochs: Epochs = match self + .0 + .get_cf(block_cf, key.to_string()) + .map_err(|e| Error::DBError(e.into_string()))? + { + Some(b) => types::decode(b).map_err(Error::CodingError)?, + None => return Ok(None), + }; + // Read the tree at the first height if no epoch update + let stored_height = match pred_epochs.get_epoch_start_height(height) { + Some(BlockHeight(0)) | None => BlockHeight(1), + Some(h) => h, + }; + + let tree_key = Key::from(stored_height.to_db_key()) .push(&"tree".to_owned()) .map_err(Error::KeyError)?; + let mut merkle_tree_stores = MerkleTreeStoresRead::default(); for st in StoreType::iter() { let prefix_key = tree_key.push(&st.to_string()).map_err(Error::KeyError)?; @@ -658,7 +870,7 @@ impl DB for RocksDB { .map_err(Error::KeyError)?; let bytes = self .0 - .get(root_key.to_string()) + .get_cf(block_cf, root_key.to_string()) .map_err(|e| Error::DBError(e.into_string()))?; match bytes { Some(b) => { @@ -673,7 +885,7 @@ impl DB for RocksDB { .map_err(Error::KeyError)?; let bytes = self .0 - .get(store_key.to_string()) + .get_cf(block_cf, store_key.to_string()) .map_err(|e| Error::DBError(e.into_string()))?; match bytes { Some(b) => { @@ -682,14 +894,13 @@ impl DB for RocksDB { None => return Ok(None), } } - Ok(Some(merkle_tree_stores)) + Ok(Some((stored_height, merkle_tree_stores))) } fn read_subspace_val(&self, key: &Key) -> Result>> { - let subspace_key = - Key::parse("subspace").map_err(Error::KeyError)?.join(key); + let subspace_cf = self.get_column_family(SUBSPACE_CF)?; self.0 - .get(subspace_key.to_string()) + .get_cf(subspace_cf, key.to_string()) .map_err(|e| Error::DBError(e.into_string())) } @@ -700,9 +911,8 @@ impl DB for RocksDB { last_height: BlockHeight, ) -> Result>> { // Check if the value changed at this height - let key_prefix = Key::from(height.to_db_key()) - .push(&"diffs".to_owned()) - .map_err(Error::KeyError)?; + let diffs_cf = self.get_column_family(DIFFS_CF)?; + let key_prefix = Key::from(height.to_db_key()); let new_val_key = key_prefix .push(&"new".to_owned()) .map_err(Error::KeyError)? @@ -712,7 +922,7 @@ impl DB for RocksDB { // If it has a "new" val, it was written at this height match self .0 - .get(new_val_key) + .get_cf(diffs_cf, new_val_key) .map_err(|e| Error::DBError(e.into_string()))? { Some(new_val) => { @@ -725,11 +935,11 @@ impl DB for RocksDB { .join(key) .to_string(); // If it has an "old" val, it was deleted at this height - if self.0.key_may_exist(old_val_key.clone()) { + if self.0.key_may_exist_cf(diffs_cf, old_val_key.clone()) { // check if it actually exists if self .0 - .get(old_val_key) + .get_cf(diffs_cf, old_val_key) .map_err(|e| Error::DBError(e.into_string()))? .is_some() { @@ -744,9 +954,7 @@ impl DB for RocksDB { let mut raw_height = height.0 + 1; loop { // Try to find the next diff on this key - let key_prefix = Key::from(BlockHeight(raw_height).to_db_key()) - .push(&"diffs".to_owned()) - .map_err(Error::KeyError)?; + let key_prefix = Key::from(BlockHeight(raw_height).to_db_key()); let old_val_key = key_prefix .push(&"old".to_owned()) .map_err(Error::KeyError)? @@ -754,7 +962,7 @@ impl DB for RocksDB { .to_string(); let old_val = self .0 - .get(old_val_key) + .get_cf(diffs_cf, old_val_key) .map_err(|e| Error::DBError(e.into_string()))?; // If it has an "old" val, it's the one we're looking for match old_val { @@ -767,11 +975,11 @@ impl DB for RocksDB { .map_err(Error::KeyError)? .join(key) .to_string(); - if self.0.key_may_exist(new_val_key.clone()) { + if self.0.key_may_exist_cf(diffs_cf, new_val_key.clone()) { // check if it actually exists if self .0 - .get(new_val_key) + .get_cf(diffs_cf, new_val_key) .map_err(|e| Error::DBError(e.into_string()))? .is_some() { @@ -796,12 +1004,11 @@ impl DB for RocksDB { key: &Key, value: impl AsRef<[u8]>, ) -> Result { + let subspace_cf = self.get_column_family(SUBSPACE_CF)?; let value = value.as_ref(); - let subspace_key = - Key::parse("subspace").map_err(Error::KeyError)?.join(key); let size_diff = match self .0 - .get(subspace_key.to_string()) + .get_cf(subspace_cf, key.to_string()) .map_err(|e| Error::DBError(e.into_string()))? { Some(prev_value) => { @@ -822,7 +1029,7 @@ impl DB for RocksDB { // Write the new key-val self.0 - .put(subspace_key.to_string(), value) + .put_cf(subspace_cf, key.to_string(), value) .map_err(|e| Error::DBError(e.into_string()))?; Ok(size_diff) @@ -833,13 +1040,12 @@ impl DB for RocksDB { height: BlockHeight, key: &Key, ) -> Result { - let subspace_key = - Key::parse("subspace").map_err(Error::KeyError)?.join(key); + let subspace_cf = self.get_column_family(SUBSPACE_CF)?; // Check the length of previous value, if any let prev_len = match self .0 - .get(subspace_key.to_string()) + .get_cf(subspace_cf, key.to_string()) .map_err(|e| Error::DBError(e.into_string()))? { Some(prev_value) => { @@ -852,7 +1058,7 @@ impl DB for RocksDB { // Delete the key-val self.0 - .delete(subspace_key.to_string()) + .delete_cf(subspace_cf, key.to_string()) .map_err(|e| Error::DBError(e.into_string()))?; Ok(prev_len) @@ -874,17 +1080,16 @@ impl DB for RocksDB { value: impl AsRef<[u8]>, ) -> Result { let value = value.as_ref(); - let subspace_key = - Key::parse("subspace").map_err(Error::KeyError)?.join(key); + let subspace_cf = self.get_column_family(SUBSPACE_CF)?; let size_diff = match self .0 - .get(subspace_key.to_string()) + .get_cf(subspace_cf, key.to_string()) .map_err(|e| Error::DBError(e.into_string()))? { Some(old_value) => { let size_diff = value.len() as i64 - old_value.len() as i64; // Persist the previous value - Self::batch_write_subspace_diff( + self.batch_write_subspace_diff( batch, height, key, @@ -894,7 +1099,7 @@ impl DB for RocksDB { size_diff } None => { - Self::batch_write_subspace_diff( + self.batch_write_subspace_diff( batch, height, key, @@ -906,7 +1111,7 @@ impl DB for RocksDB { }; // Write the new key-val - batch.put(&subspace_key.to_string(), value); + batch.0.put_cf(subspace_cf, key.to_string(), value); Ok(size_diff) } @@ -917,19 +1122,18 @@ impl DB for RocksDB { height: BlockHeight, key: &Key, ) -> Result { - let subspace_key = - Key::parse("subspace").map_err(Error::KeyError)?.join(key); + let subspace_cf = self.get_column_family(SUBSPACE_CF)?; // Check the length of previous value, if any let prev_len = match self .0 - .get(key.to_string()) + .get_cf(subspace_cf, key.to_string()) .map_err(|e| Error::DBError(e.into_string()))? { Some(prev_value) => { let prev_len = prev_value.len() as i64; // Persist the previous value - Self::batch_write_subspace_diff( + self.batch_write_subspace_diff( batch, height, key, @@ -942,10 +1146,43 @@ impl DB for RocksDB { }; // Delete the key-val - batch.delete(subspace_key.to_string()); + batch.0.delete_cf(subspace_cf, key.to_string()); Ok(prev_len) } + + fn prune_merkle_tree_stores( + &mut self, + batch: &mut Self::WriteBatch, + epoch: Epoch, + pred_epochs: &Epochs, + ) -> Result<()> { + let block_cf = self.get_column_family(BLOCK_CF)?; + match pred_epochs.get_start_height_of_epoch(epoch) { + Some(height) => { + let prefix_key = Key::from(height.to_db_key()) + .push(&"tree".to_owned()) + .map_err(Error::KeyError)?; + for st in StoreType::iter() { + if *st != StoreType::Base { + let prefix_key = prefix_key + .push(&st.to_string()) + .map_err(Error::KeyError)?; + let root_key = prefix_key + .push(&"root".to_owned()) + .map_err(Error::KeyError)?; + batch.0.delete_cf(block_cf, root_key.to_string()); + let store_key = prefix_key + .push(&"store".to_owned()) + .map_err(Error::KeyError)?; + batch.0.delete_cf(block_cf, store_key.to_string()); + } + } + Ok(()) + } + None => Ok(()), + } + } } impl<'iter> DBIter<'iter> for RocksDB { @@ -955,49 +1192,76 @@ impl<'iter> DBIter<'iter> for RocksDB { &'iter self, prefix: &Key, ) -> PersistentPrefixIterator<'iter> { - iter_prefix(self, prefix) + iter_subspace_prefix(self, prefix) } fn iter_results(&'iter self) -> PersistentPrefixIterator<'iter> { let db_prefix = "results/".to_owned(); let prefix = "results".to_owned(); - let mut read_opts = ReadOptions::default(); - // don't use the prefix bloom filter - read_opts.set_total_order_seek(true); - let mut upper_prefix = prefix.clone().into_bytes(); - if let Some(last) = upper_prefix.pop() { - upper_prefix.push(last + 1); - } - read_opts.set_iterate_upper_bound(upper_prefix); - - let iter = self.0.iterator_opt( - IteratorMode::From(prefix.as_bytes(), Direction::Forward), + let block_cf = self + .get_column_family(BLOCK_CF) + .expect("{BLOCK_CF} column family should exist"); + let read_opts = make_iter_read_opts(Some(prefix.clone())); + let iter = self.0.iterator_cf_opt( + block_cf, read_opts, + IteratorMode::From(prefix.as_bytes(), Direction::Forward), ); PersistentPrefixIterator(PrefixIterator::new(iter, db_prefix)) } + + fn iter_old_diffs( + &'iter self, + height: BlockHeight, + ) -> PersistentPrefixIterator<'iter> { + iter_diffs_prefix(self, height, true) + } + + fn iter_new_diffs( + &'iter self, + height: BlockHeight, + ) -> PersistentPrefixIterator<'iter> { + iter_diffs_prefix(self, height, false) + } } -fn iter_prefix<'iter>( +fn iter_subspace_prefix<'iter>( db: &'iter RocksDB, prefix: &Key, ) -> PersistentPrefixIterator<'iter> { - let db_prefix = "subspace/".to_owned(); - let prefix = format!("{}{}", db_prefix, prefix); + let subspace_cf = db + .get_column_family(SUBSPACE_CF) + .expect("{SUBSPACE_CF} column family should exist"); + let db_prefix = "".to_owned(); + iter_prefix(db, subspace_cf, db_prefix, prefix.to_string()) +} - let mut read_opts = ReadOptions::default(); - // don't use the prefix bloom filter - read_opts.set_total_order_seek(true); - let mut upper_prefix = prefix.clone().into_bytes(); - if let Some(last) = upper_prefix.pop() { - upper_prefix.push(last + 1); - } - read_opts.set_iterate_upper_bound(upper_prefix); +fn iter_diffs_prefix( + db: &RocksDB, + height: BlockHeight, + is_old: bool, +) -> PersistentPrefixIterator { + let diffs_cf = db + .get_column_family(DIFFS_CF) + .expect("{DIFFS_CF} column family should exist"); + let prefix = if is_old { "old" } else { "new" }; + let db_prefix = format!("{}/{}/", height.0.raw(), prefix); + // get keys without a prefix + iter_prefix(db, diffs_cf, db_prefix.clone(), db_prefix) +} - let iter = db.0.iterator_opt( - IteratorMode::From(prefix.as_bytes(), Direction::Forward), +fn iter_prefix<'a>( + db: &'a RocksDB, + cf: &'a ColumnFamily, + db_prefix: String, + prefix: String, +) -> PersistentPrefixIterator<'a> { + let read_opts = make_iter_read_opts(Some(prefix.clone())); + let iter = db.0.iterator_cf_opt( + cf, read_opts, + IteratorMode::From(prefix.as_bytes(), Direction::Forward), ); PersistentPrefixIterator(PrefixIterator::new(iter, db_prefix)) } @@ -1031,20 +1295,25 @@ impl<'a> Iterator for PersistentPrefixIterator<'a> { } } -impl DBWriteBatch for RocksDBWriteBatch { - fn put(&mut self, key: K, value: V) - where - K: AsRef<[u8]>, - V: AsRef<[u8]>, - { - self.0.put(key, value) - } +/// Make read options for RocksDB iterator with the given prefix +fn make_iter_read_opts(prefix: Option) -> ReadOptions { + let mut read_opts = ReadOptions::default(); + // don't use the prefix bloom filter + read_opts.set_total_order_seek(true); - fn delete>(&mut self, key: K) { - self.0.delete(key) + if let Some(prefix) = prefix { + let mut upper_prefix = prefix.into_bytes(); + if let Some(last) = upper_prefix.pop() { + upper_prefix.push(last + 1); + } + read_opts.set_iterate_upper_bound(upper_prefix); } + + read_opts } +impl DBWriteBatch for RocksDBWriteBatch {} + fn unknown_key_error(key: &str) -> Result<()> { Err(Error::UnknownKey { key: key.to_owned(), @@ -1134,7 +1403,6 @@ mod test { vec![1_u8, 1, 1, 1], ) .unwrap(); - db.exec_batch(batch.0).unwrap(); let merkle_tree = MerkleTree::::default(); let merkle_tree_stores = merkle_tree.stores(); @@ -1161,7 +1429,8 @@ mod test { tx_queue: &tx_queue, }; - db.write_block(block).unwrap(); + db.write_block(block, &mut batch, true).unwrap(); + db.exec_batch(batch.0).unwrap(); let _state = db .read_last_block() @@ -1175,36 +1444,97 @@ mod test { let mut db = open(dir.path(), None).unwrap(); let key = Key::parse("test").unwrap(); + let batch_key = Key::parse("batch").unwrap(); let mut batch = RocksDB::batch(); let last_height = BlockHeight(100); db.batch_write_subspace_val( &mut batch, last_height, - &key, + &batch_key, vec![1_u8, 1, 1, 1], ) .unwrap(); db.exec_batch(batch.0).unwrap(); + db.write_subspace_val(last_height, &key, vec![1_u8, 1, 1, 0]) + .unwrap(); + let mut batch = RocksDB::batch(); let last_height = BlockHeight(111); db.batch_write_subspace_val( &mut batch, last_height, - &key, + &batch_key, vec![2_u8, 2, 2, 2], ) .unwrap(); db.exec_batch(batch.0).unwrap(); + db.write_subspace_val(last_height, &key, vec![2_u8, 2, 2, 0]) + .unwrap(); + let prev_value = db - .read_subspace_val_with_height(&key, BlockHeight(100), last_height) + .read_subspace_val_with_height( + &batch_key, + BlockHeight(100), + last_height, + ) .expect("read should succeed"); assert_eq!(prev_value, Some(vec![1_u8, 1, 1, 1])); + let prev_value = db + .read_subspace_val_with_height(&key, BlockHeight(100), last_height) + .expect("read should succeed"); + assert_eq!(prev_value, Some(vec![1_u8, 1, 1, 0])); + + let updated_value = db + .read_subspace_val_with_height( + &batch_key, + BlockHeight(111), + last_height, + ) + .expect("read should succeed"); + assert_eq!(updated_value, Some(vec![2_u8, 2, 2, 2])); + let updated_value = db + .read_subspace_val_with_height(&key, BlockHeight(111), last_height) + .expect("read should succeed"); + assert_eq!(updated_value, Some(vec![2_u8, 2, 2, 0])); + let latest_value = db + .read_subspace_val(&batch_key) + .expect("read should succeed"); + assert_eq!(latest_value, Some(vec![2_u8, 2, 2, 2])); let latest_value = db.read_subspace_val(&key).expect("read should succeed"); - assert_eq!(latest_value, Some(vec![2_u8, 2, 2, 2])); + assert_eq!(latest_value, Some(vec![2_u8, 2, 2, 0])); + + let mut batch = RocksDB::batch(); + let last_height = BlockHeight(222); + db.batch_delete_subspace_val(&mut batch, last_height, &batch_key) + .unwrap(); + db.exec_batch(batch.0).unwrap(); + + db.delete_subspace_val(last_height, &key).unwrap(); + + let deleted_value = db + .read_subspace_val_with_height( + &batch_key, + BlockHeight(222), + last_height, + ) + .expect("read should succeed"); + assert_eq!(deleted_value, None); + let deleted_value = db + .read_subspace_val_with_height(&key, BlockHeight(222), last_height) + .expect("read should succeed"); + assert_eq!(deleted_value, None); + + let latest_value = db + .read_subspace_val(&batch_key) + .expect("read should succeed"); + assert_eq!(latest_value, None); + let latest_value = + db.read_subspace_val(&key).expect("read should succeed"); + assert_eq!(latest_value, None); } } diff --git a/apps/src/lib/node/ledger/tendermint_node.rs b/apps/src/lib/node/ledger/tendermint_node.rs index ac4c17ff8c9..63c9cd40c2e 100644 --- a/apps/src/lib/node/ledger/tendermint_node.rs +++ b/apps/src/lib/node/ledger/tendermint_node.rs @@ -6,6 +6,7 @@ use std::str::FromStr; use borsh::BorshSerialize; use namada::types::chain::ChainId; use namada::types::key::*; +use namada::types::storage::BlockHeight; use namada::types::time::DateTimeUtc; use serde_json::json; #[cfg(feature = "abciplus")] @@ -22,7 +23,7 @@ use crate::config; use crate::facade::tendermint::{block, Genesis}; use crate::facade::tendermint_config::net::Address as TendermintAddress; use crate::facade::tendermint_config::{ - Error as TendermintError, TendermintConfig, + Error as TendermintError, TendermintConfig, TxIndexConfig, TxIndexer, }; /// Env. var to output Tendermint log to stdout @@ -44,6 +45,8 @@ pub enum Error { StartUp(std::io::Error), #[error("{0}")] Runtime(String), + #[error("Failed to rollback tendermint state: {0}")] + RollBack(String), #[error("Failed to convert to String: {0:?}")] TendermintPath(std::ffi::OsString), } @@ -190,6 +193,48 @@ pub fn reset(tendermint_dir: impl AsRef) -> Result<()> { Ok(()) } +pub fn rollback(tendermint_dir: impl AsRef) -> Result { + let tendermint_path = from_env_or_default()?; + let tendermint_dir = tendermint_dir.as_ref().to_string_lossy(); + + // Rollback tendermint state, see https://github.com/tendermint/tendermint/blob/main/cmd/tendermint/commands/rollback.go for details + // on how the tendermint rollback behaves + let output = std::process::Command::new(tendermint_path) + .args([ + "rollback", + "unsafe-all", + // NOTE: log config: https://docs.tendermint.com/master/nodes/logging.html#configuring-log-levels + // "--log-level=\"*debug\"", + "--home", + &tendermint_dir, + ]) + .output() + .map_err(|e| Error::RollBack(e.to_string()))?; + + // Capture the block height from the output of tendermint rollback + // Tendermint stdout message: "Rolled + // back state to height %d and hash %v" + let output_msg = String::from_utf8(output.stdout) + .map_err(|e| Error::RollBack(e.to_string()))?; + let (_, right) = output_msg + .split_once("Rolled back state to height") + .ok_or(Error::RollBack( + "Missing expected block height in tendermint stdout message" + .to_string(), + ))?; + + let mut sub = right.split_ascii_whitespace(); + let height = sub.next().ok_or(Error::RollBack( + "Missing expected block height in tendermint stdout message" + .to_string(), + ))?; + + Ok(height + .parse::() + .map_err(|e| Error::RollBack(e.to_string()))? + .into()) +} + /// Convert a common signing scheme validator key into JSON for /// Tendermint fn validator_key_to_json( @@ -351,6 +396,15 @@ async fn update_tendermint_config( tendermint_config.consensus_timeout_commit; } + let indexer = if tendermint_config.tx_index { + TxIndexer::Kv + } else { + TxIndexer::Null + }; + #[cfg(feature = "abcipp")] + let indexer = [indexer]; + config.tx_index = TxIndexConfig { indexer }; + let mut file = OpenOptions::new() .write(true) .truncate(true) diff --git a/apps/src/lib/wallet/alias.rs b/apps/src/lib/wallet/alias.rs index 13d977b8524..e69de29bb2d 100644 --- a/apps/src/lib/wallet/alias.rs +++ b/apps/src/lib/wallet/alias.rs @@ -1,103 +0,0 @@ -//! Wallet address and key aliases. - -use std::convert::Infallible; -use std::fmt::Display; -use std::hash::Hash; -use std::str::FromStr; - -use serde::{Deserialize, Serialize}; - -/// Aliases created from raw strings are kept in-memory as given, but their -/// `Serialize` and `Display` instance converts them to lowercase. Their -/// `PartialEq` instance is case-insensitive. -#[derive(Clone, Debug, Default, Deserialize, PartialOrd, Ord, Eq)] -#[serde(transparent)] -pub struct Alias(String); - -impl Alias { - /// Normalize an alias to lower-case - pub fn normalize(&self) -> String { - self.0.to_lowercase() - } - - /// Returns the length of the underlying `String`. - pub fn len(&self) -> usize { - self.0.len() - } - - /// Is the underlying `String` empty? - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } -} - -impl Serialize for Alias { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - self.normalize().serialize(serializer) - } -} - -impl PartialEq for Alias { - fn eq(&self, other: &Self) -> bool { - self.normalize() == other.normalize() - } -} - -impl Hash for Alias { - fn hash(&self, state: &mut H) { - self.normalize().hash(state); - } -} - -impl From for Alias -where - T: AsRef, -{ - fn from(raw: T) -> Self { - Self(raw.as_ref().to_owned()) - } -} - -impl From for String { - fn from(alias: Alias) -> Self { - alias.normalize() - } -} - -impl<'a> From<&'a Alias> for String { - fn from(alias: &'a Alias) -> Self { - alias.normalize() - } -} - -impl Display for Alias { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - self.normalize().fmt(f) - } -} - -impl FromStr for Alias { - type Err = Infallible; - - fn from_str(s: &str) -> Result { - Ok(Self(s.into())) - } -} - -/// Default alias of a validator's account key -pub fn validator_key(validator_alias: &Alias) -> Alias { - format!("{validator_alias}-validator-key").into() -} - -/// Default alias of a validator's consensus key -pub fn validator_consensus_key(validator_alias: &Alias) -> Alias { - format!("{validator_alias}-consensus-key").into() -} - -/// Default alias of a validator's Tendermint node key -pub fn validator_tendermint_node_key(validator_alias: &Alias) -> Alias { - format!("{validator_alias}-tendermint-node-key").into() -} diff --git a/apps/src/lib/wallet/defaults.rs b/apps/src/lib/wallet/defaults.rs index b0ae08ac836..95d1fe035b3 100644 --- a/apps/src/lib/wallet/defaults.rs +++ b/apps/src/lib/wallet/defaults.rs @@ -6,12 +6,12 @@ pub use dev::{ christel_address, christel_keypair, daewon_address, daewon_keypair, keys, validator_address, validator_keypair, validator_keys, }; +use namada::ledger::wallet::alias::Alias; use namada::ledger::{eth_bridge, governance, pos}; use namada::types::address::Address; use namada::types::key::*; use crate::config::genesis::genesis_config::GenesisConfig; -use crate::wallet::alias::Alias; /// The default addresses with their aliases. pub fn addresses_from_genesis(genesis: GenesisConfig) -> Vec<(Alias, Address)> { @@ -70,14 +70,17 @@ pub fn addresses_from_genesis(genesis: GenesisConfig) -> Vec<(Alias, Address)> { #[cfg(feature = "dev")] mod dev { + use std::collections::HashMap; + use borsh::BorshDeserialize; + use namada::ledger::wallet::alias::Alias; use namada::ledger::{governance, pos}; - use namada::types::address::{self, Address}; + use namada::types::address::{ + apfel, btc, dot, eth, kartoffel, nam, schnitzel, Address, + }; use namada::types::key::dkg_session_keys::DkgKeypair; use namada::types::key::*; - use crate::wallet::alias::Alias; - /// Generate a new protocol signing keypair and DKG session keypair pub fn validator_keys() -> (common::SecretKey, DkgKeypair) { let bytes: [u8; 33] = [ @@ -108,6 +111,21 @@ mod dev { ] } + /// Deprecated function, soon to be deleted. Generates default tokens + fn tokens() -> HashMap { + vec![ + (nam(), "NAM"), + (btc(), "BTC"), + (eth(), "ETH"), + (dot(), "DOT"), + (schnitzel(), "Schnitzel"), + (apfel(), "Apfel"), + (kartoffel(), "Kartoffel"), + ] + .into_iter() + .collect() + } + /// The default addresses with their aliases. pub fn addresses() -> Vec<(Alias, Address)> { let mut addresses: Vec<(Alias, Address)> = vec![ @@ -120,7 +138,7 @@ mod dev { ("christel".into(), christel_address()), ("daewon".into(), daewon_address()), ]; - let token_addresses = address::tokens() + let token_addresses = tokens() .into_iter() .map(|(addr, alias)| (alias.into(), addr)); addresses.extend(token_addresses); diff --git a/apps/src/lib/wallet/keys.rs b/apps/src/lib/wallet/keys.rs index 7627bd9b165..8b137891791 100644 --- a/apps/src/lib/wallet/keys.rs +++ b/apps/src/lib/wallet/keys.rs @@ -1,243 +1 @@ -//! Cryptographic keys for digital signatures support for the wallet. -use std::fmt::Display; -use std::marker::PhantomData; -use std::str::FromStr; - -use borsh::{BorshDeserialize, BorshSerialize}; -use data_encoding::HEXLOWER; -use orion::{aead, kdf}; -use serde::{Deserialize, Serialize}; -use thiserror::Error; - -use super::read_password; - -const ENCRYPTED_KEY_PREFIX: &str = "encrypted:"; -const UNENCRYPTED_KEY_PREFIX: &str = "unencrypted:"; - -/// A keypair stored in a wallet -#[derive(Debug)] -pub enum StoredKeypair -where - ::Err: Display, -{ - /// An encrypted keypair - Encrypted(EncryptedKeypair), - /// An raw (unencrypted) keypair - Raw(T), -} - -impl Serialize - for StoredKeypair -where - ::Err: Display, -{ - fn serialize( - &self, - serializer: S, - ) -> std::result::Result - where - S: serde::Serializer, - { - // String encoded, because toml doesn't support enums - match self { - StoredKeypair::Encrypted(encrypted) => { - let keypair_string = - format!("{}{}", ENCRYPTED_KEY_PREFIX, encrypted); - serde::Serialize::serialize(&keypair_string, serializer) - } - StoredKeypair::Raw(raw) => { - let keypair_string = - format!("{}{}", UNENCRYPTED_KEY_PREFIX, raw); - serde::Serialize::serialize(&keypair_string, serializer) - } - } - } -} - -impl<'de, T: BorshSerialize + BorshDeserialize + Display + FromStr> - Deserialize<'de> for StoredKeypair -where - ::Err: Display, -{ - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - use serde::de::Error; - - let keypair_string: String = - serde::Deserialize::deserialize(deserializer) - .map_err(|err| { - DeserializeStoredKeypairError::InvalidStoredKeypairString( - err.to_string(), - ) - }) - .map_err(D::Error::custom)?; - if let Some(raw) = keypair_string.strip_prefix(UNENCRYPTED_KEY_PREFIX) { - FromStr::from_str(raw) - .map(|keypair| Self::Raw(keypair)) - .map_err(|err| { - DeserializeStoredKeypairError::InvalidStoredKeypairString( - err.to_string(), - ) - }) - .map_err(D::Error::custom) - } else if let Some(encrypted) = - keypair_string.strip_prefix(ENCRYPTED_KEY_PREFIX) - { - FromStr::from_str(encrypted) - .map(Self::Encrypted) - .map_err(|err| { - DeserializeStoredKeypairError::InvalidStoredKeypairString( - err.to_string(), - ) - }) - .map_err(D::Error::custom) - } else { - Err(DeserializeStoredKeypairError::MissingPrefix) - .map_err(D::Error::custom) - } - } -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum DeserializeStoredKeypairError { - #[error("The stored keypair is not valid: {0}")] - InvalidStoredKeypairString(String), - #[error("The stored keypair is missing a prefix")] - MissingPrefix, -} - -/// An encrypted keypair stored in a wallet -#[derive(Debug)] -pub struct EncryptedKeypair( - Vec, - PhantomData, -); - -impl Display for EncryptedKeypair { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", HEXLOWER.encode(self.0.as_ref())) - } -} - -impl FromStr for EncryptedKeypair { - type Err = data_encoding::DecodeError; - - fn from_str(s: &str) -> Result { - HEXLOWER.decode(s.as_ref()).map(|x| Self(x, PhantomData)) - } -} - -#[allow(missing_docs)] -#[derive(Debug, Error)] -pub enum DecryptionError { - #[error("Unexpected encryption salt")] - BadSalt, - #[error("Unable to decrypt the keypair. Is the password correct?")] - DecryptionError, - #[error("Unable to deserialize the keypair")] - DeserializingError, - #[error("Asked not to decrypt")] - NotDecrypting, -} - -impl - StoredKeypair -where - ::Err: Display, -{ - /// Construct a keypair for storage. If no password is provided, the keypair - /// will be stored raw without encryption. Returns the key for storing and a - /// reference-counting point to the raw key. - pub fn new(keypair: T, password: Option) -> (Self, T) { - match password { - Some(password) => ( - Self::Encrypted(EncryptedKeypair::new(&keypair, password)), - keypair, - ), - None => (Self::Raw(keypair.clone()), keypair), - } - } - - /// Get a raw keypair from a stored keypair. If the keypair is encrypted and - /// no password is provided in the argument, a password will be prompted - /// from stdin. - pub fn get( - &self, - decrypt: bool, - password: Option, - ) -> Result { - match self { - StoredKeypair::Encrypted(encrypted_keypair) => { - if decrypt { - let password = password.unwrap_or_else(|| { - read_password("Enter decryption password: ") - }); - let key = encrypted_keypair.decrypt(password)?; - Ok(key) - } else { - Err(DecryptionError::NotDecrypting) - } - } - StoredKeypair::Raw(keypair) => Ok(keypair.clone()), - } - } - - pub fn is_encrypted(&self) -> bool { - match self { - StoredKeypair::Encrypted(_) => true, - StoredKeypair::Raw(_) => false, - } - } -} - -impl EncryptedKeypair { - /// Encrypt a keypair and store it with its salt. - pub fn new(keypair: &T, password: String) -> Self { - let salt = encryption_salt(); - let encryption_key = encryption_key(&salt, password); - - let data = keypair - .try_to_vec() - .expect("Serializing keypair shouldn't fail"); - - let encrypted_keypair = aead::seal(&encryption_key, &data) - .expect("Encryption of data shouldn't fail"); - - let encrypted_data = [salt.as_ref(), &encrypted_keypair].concat(); - - Self(encrypted_data, PhantomData) - } - - /// Decrypt an encrypted keypair - pub fn decrypt(&self, password: String) -> Result { - let salt_len = encryption_salt().len(); - let (raw_salt, cipher) = self.0.split_at(salt_len); - - let salt = kdf::Salt::from_slice(raw_salt) - .map_err(|_| DecryptionError::BadSalt)?; - - let encryption_key = encryption_key(&salt, password); - - let decrypted_data = aead::open(&encryption_key, cipher) - .map_err(|_| DecryptionError::DecryptionError)?; - - T::try_from_slice(&decrypted_data) - .map_err(|_| DecryptionError::DeserializingError) - } -} - -/// Keypair encryption salt -fn encryption_salt() -> kdf::Salt { - kdf::Salt::default() -} - -/// Make encryption secret key from a password. -fn encryption_key(salt: &kdf::Salt, password: String) -> kdf::SecretKey { - kdf::Password::from_slice(password.as_bytes()) - .and_then(|password| kdf::derive_key(&password, salt, 3, 1 << 17, 32)) - .expect("Generation of encryption secret key shouldn't fail") -} diff --git a/apps/src/lib/wallet/mod.rs b/apps/src/lib/wallet/mod.rs index b79ff6703be..04aae73dc65 100644 --- a/apps/src/lib/wallet/mod.rs +++ b/apps/src/lib/wallet/mod.rs @@ -1,531 +1,204 @@ -mod alias; pub mod defaults; mod keys; pub mod pre_genesis; mod store; -use std::collections::HashMap; -use std::fmt::Display; +use std::io::{self, Write}; use std::path::{Path, PathBuf}; -use std::str::FromStr; use std::{env, fs}; -use borsh::{BorshDeserialize, BorshSerialize}; -use masp_primitives::zip32::ExtendedFullViewingKey; -use namada::types::address::Address; -use namada::types::key::*; -use namada::types::masp::{ - ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, +pub use namada::ledger::wallet::alias::Alias; +use namada::ledger::wallet::{ + ConfirmationResponse, FindKeyError, Wallet, WalletUtils, +}; +pub use namada::ledger::wallet::{ + DecryptionError, StoredKeypair, ValidatorData, ValidatorKeys, }; +use namada::types::key::*; pub use store::wallet_file; -use thiserror::Error; -use self::alias::Alias; -pub use self::keys::{DecryptionError, StoredKeypair}; -use self::store::Store; -pub use self::store::{ValidatorData, ValidatorKeys}; use crate::cli; use crate::config::genesis::genesis_config::GenesisConfig; #[derive(Debug)] -pub struct Wallet { - store_dir: PathBuf, - store: Store, - decrypted_key_cache: HashMap, - decrypted_spendkey_cache: HashMap, -} - -#[derive(Error, Debug)] -pub enum FindKeyError { - #[error("No matching key found")] - KeyNotFound, - #[error("{0}")] - KeyDecryptionError(keys::DecryptionError), -} - -impl Wallet { - /// Load a wallet from the store file. - pub fn load(store_dir: &Path) -> Option { - let store = Store::load(store_dir).unwrap_or_else(|err| { - eprintln!("Unable to load the wallet: {}", err); - cli::safe_exit(1) - }); - Some(Self { - store_dir: store_dir.to_path_buf(), - store, - decrypted_key_cache: HashMap::default(), - decrypted_spendkey_cache: HashMap::default(), - }) - } - - /// Load a wallet from the store file or create a new wallet without any - /// keys or addresses. - pub fn load_or_new(store_dir: &Path) -> Self { - let store = Store::load_or_new(store_dir).unwrap_or_else(|err| { - eprintln!("Unable to load the wallet: {}", err); - cli::safe_exit(1) - }); - Self { - store_dir: store_dir.to_path_buf(), - store, - decrypted_key_cache: HashMap::default(), - decrypted_spendkey_cache: HashMap::default(), - } - } - - /// Load a wallet from the store file or create a new one with the default - /// addresses loaded from the genesis file, if not found. - pub fn load_or_new_from_genesis( - store_dir: &Path, - genesis_cfg: GenesisConfig, - ) -> Self { - let store = Store::load_or_new_from_genesis(store_dir, genesis_cfg) - .unwrap_or_else(|err| { - eprintln!("Unable to load the wallet: {}", err); - cli::safe_exit(1) - }); - Self { - store_dir: store_dir.to_path_buf(), - store, - decrypted_key_cache: HashMap::default(), - decrypted_spendkey_cache: HashMap::default(), - } - } - - /// Add addresses from a genesis configuration. - pub fn add_genesis_addresses(&mut self, genesis: GenesisConfig) { - self.store.add_genesis_addresses(genesis) - } - - /// Save the wallet store to a file. - pub fn save(&self) -> std::io::Result<()> { - self.store.save(&self.store_dir) - } - - /// Prompt for pssword and confirm it if parameter is false - fn new_password_prompt(unsafe_dont_encrypt: bool) -> Option { - let password = if unsafe_dont_encrypt { - println!("Warning: The keypair will NOT be encrypted."); - None - } else { - Some(read_password("Enter your encryption password: ")) +pub struct CliWalletUtils; + +impl WalletUtils for CliWalletUtils { + type Storage = PathBuf; + + /// Read the password for encryption/decryption from the file/env/stdin. + /// Panics if all options are empty/invalid. + fn read_password(prompt_msg: &str) -> String { + let pwd = match env::var("NAMADA_WALLET_PASSWORD_FILE") { + Ok(path) => fs::read_to_string(path) + .expect("Something went wrong reading the file"), + Err(_) => match env::var("NAMADA_WALLET_PASSWORD") { + Ok(password) => password, + Err(_) => rpassword::read_password_from_tty(Some(prompt_msg)) + .unwrap_or_default(), + }, }; - // Bis repetita for confirmation. - let pwd = if unsafe_dont_encrypt { - None - } else { - Some(read_password( - "To confirm, please enter the same encryption password once \ - more: ", - )) - }; - if pwd != password { - eprintln!("Your two inputs do not match!"); + if pwd.is_empty() { + eprintln!("Password cannot be empty"); cli::safe_exit(1) } - password - } - - /// Generate a new keypair and derive an implicit address from its public - /// and insert them into the store with the provided alias, converted to - /// lower case. If none provided, the alias will be the public key hash (in - /// lowercase too). If the key is to be encrypted, will prompt for - /// password from stdin. Stores the key in decrypted key cache and - /// returns the alias of the key and a reference-counting pointer to the - /// key. - pub fn gen_key( - &mut self, - scheme: SchemeType, - alias: Option, - unsafe_dont_encrypt: bool, - ) -> (String, common::SecretKey) { - let password = read_and_confirm_pwd(unsafe_dont_encrypt); - let (alias, key) = self.store.gen_key(scheme, alias, password); - // Cache the newly added key - self.decrypted_key_cache.insert(alias.clone(), key.clone()); - (alias.into(), key) - } - - pub fn gen_spending_key( - &mut self, - alias: String, - unsafe_dont_encrypt: bool, - ) -> (String, ExtendedSpendingKey) { - let password = Self::new_password_prompt(unsafe_dont_encrypt); - let (alias, key) = self.store.gen_spending_key(alias, password); - // Cache the newly added key - self.decrypted_spendkey_cache.insert(alias.clone(), key); - (alias.into(), key) - } - - /// Generate keypair - /// for signing protocol txs and for the DKG (which will also be stored) - /// A protocol keypair may be optionally provided, indicating that - /// we should re-use a keypair already in the wallet - pub fn gen_validator_keys( - &mut self, - protocol_pk: Option, - scheme: SchemeType, - ) -> Result { - let protocol_keypair = protocol_pk.map(|pk| { - self.find_key_by_pkh(&PublicKeyHash::from(&pk)) - .ok() - .or_else(|| { - self.store - .validator_data - .take() - .map(|data| data.keys.protocol_keypair) - }) - .ok_or(FindKeyError::KeyNotFound) - }); - match protocol_keypair { - Some(Err(err)) => Err(err), - other => Ok(Store::gen_validator_keys( - other.map(|res| res.unwrap()), - scheme, - )), - } - } - - /// Add validator data to the store - pub fn add_validator_data( - &mut self, - address: Address, - keys: ValidatorKeys, - ) { - self.store.add_validator_data(address, keys); - } - - /// Returns the validator data, if it exists. - pub fn get_validator_data(&self) -> Option<&ValidatorData> { - self.store.get_validator_data() - } - - /// Returns the validator data, if it exists. - /// [`Wallet::save`] cannot be called after using this - /// method as it involves a partial move - pub fn take_validator_data(self) -> Option { - self.store.validator_data() - } - - /// Find the stored key by an alias, a public key hash or a public key. - /// If the key is encrypted, will prompt for password from stdin. - /// Any keys that are decrypted are stored in and read from a cache to avoid - /// prompting for password multiple times. - pub fn find_key( - &mut self, - alias_pkh_or_pk: impl AsRef, - ) -> Result { - // Try cache first - if let Some(cached_key) = self - .decrypted_key_cache - .get(&alias_pkh_or_pk.as_ref().into()) - { - return Ok(cached_key.clone()); - } - // If not cached, look-up in store - let stored_key = self - .store - .find_key(alias_pkh_or_pk.as_ref()) - .ok_or(FindKeyError::KeyNotFound)?; - Self::decrypt_stored_key( - &mut self.decrypted_key_cache, - stored_key, - alias_pkh_or_pk.into(), - ) - } - - pub fn find_spending_key( - &mut self, - alias: impl AsRef, - ) -> Result { - // Try cache first - if let Some(cached_key) = - self.decrypted_spendkey_cache.get(&alias.as_ref().into()) - { - return Ok(*cached_key); - } - // If not cached, look-up in store - let stored_spendkey = self - .store - .find_spending_key(alias.as_ref()) - .ok_or(FindKeyError::KeyNotFound)?; - Self::decrypt_stored_key( - &mut self.decrypted_spendkey_cache, - stored_spendkey, - alias.into(), - ) - } - - pub fn find_viewing_key( - &mut self, - alias: impl AsRef, - ) -> Result<&ExtendedViewingKey, FindKeyError> { - self.store - .find_viewing_key(alias.as_ref()) - .ok_or(FindKeyError::KeyNotFound) - } - - pub fn find_payment_addr( - &self, - alias: impl AsRef, - ) -> Option<&PaymentAddress> { - self.store.find_payment_addr(alias.as_ref()) - } - - /// Find the stored key by a public key. - /// If the key is encrypted, will prompt for password from stdin. - /// Any keys that are decrypted are stored in and read from a cache to avoid - /// prompting for password multiple times. - pub fn find_key_by_pk( - &mut self, - pk: &common::PublicKey, - ) -> Result { - // Try to look-up alias for the given pk. Otherwise, use the PKH string. - let pkh: PublicKeyHash = pk.into(); - let alias = self - .store - .find_alias_by_pkh(&pkh) - .unwrap_or_else(|| pkh.to_string().into()); - // Try read cache - if let Some(cached_key) = self.decrypted_key_cache.get(&alias) { - return Ok(cached_key.clone()); - } - // Look-up from store - let stored_key = self - .store - .find_key_by_pk(pk) - .ok_or(FindKeyError::KeyNotFound)?; - Self::decrypt_stored_key( - &mut self.decrypted_key_cache, - stored_key, - alias, - ) - } - - /// Find the stored key by a public key hash. - /// If the key is encrypted, will prompt for password from stdin. - /// Any keys that are decrypted are stored in and read from a cache to avoid - /// prompting for password multiple times. - pub fn find_key_by_pkh( - &mut self, - pkh: &PublicKeyHash, - ) -> Result { - // Try to look-up alias for the given pk. Otherwise, use the PKH string. - let alias = self - .store - .find_alias_by_pkh(pkh) - .unwrap_or_else(|| pkh.to_string().into()); - // Try read cache - if let Some(cached_key) = self.decrypted_key_cache.get(&alias) { - return Ok(cached_key.clone()); - } - // Look-up from store - let stored_key = self - .store - .find_key_by_pkh(pkh) - .ok_or(FindKeyError::KeyNotFound)?; - Self::decrypt_stored_key( - &mut self.decrypted_key_cache, - stored_key, - alias, - ) - } - - /// Decrypt stored key, if it's not stored un-encrypted. - /// If a given storage key needs to be decrypted, prompt for password from - /// stdin and if successfully decrypted, store it in a cache. - fn decrypt_stored_key< - T: FromStr + Display + BorshSerialize + BorshDeserialize + Clone, - >( - decrypted_key_cache: &mut HashMap, - stored_key: &StoredKeypair, - alias: Alias, - ) -> Result - where - ::Err: Display, - { - match stored_key { - StoredKeypair::Encrypted(encrypted) => { - let password = read_password("Enter decryption password: "); - let key = encrypted - .decrypt(password) - .map_err(FindKeyError::KeyDecryptionError)?; - decrypted_key_cache.insert(alias.clone(), key); - decrypted_key_cache - .get(&alias) - .cloned() - .ok_or(FindKeyError::KeyNotFound) + pwd + } + + /// Read an alias from the file/env/stdin. + fn read_alias(prompt_msg: &str) -> String { + print!("Choose an alias for {}: ", prompt_msg); + io::stdout().flush().unwrap(); + let mut alias = String::new(); + io::stdin().read_line(&mut alias).unwrap(); + alias.trim().to_owned() + } + + // The given alias has been selected but conflicts with another alias in + // the store. Offer the user to either replace existing mapping, alter the + // chosen alias to a name of their chosing, or cancel the aliasing. + fn show_overwrite_confirmation( + alias: &Alias, + alias_for: &str, + ) -> ConfirmationResponse { + print!( + "You're trying to create an alias \"{}\" that already exists for \ + {} in your store.\nWould you like to replace it? \ + s(k)ip/re(p)lace/re(s)elect: ", + alias, alias_for + ); + io::stdout().flush().unwrap(); + + let mut buffer = String::new(); + // Get the user to select between 3 choices + match io::stdin().read_line(&mut buffer) { + Ok(size) if size > 0 => { + // Isolate the single character representing the choice + let byte = buffer.chars().next().unwrap(); + buffer.clear(); + match byte { + 'p' | 'P' => return ConfirmationResponse::Replace, + 's' | 'S' => { + // In the case of reselection, elicit new alias + print!("Please enter a different alias: "); + io::stdout().flush().unwrap(); + if io::stdin().read_line(&mut buffer).is_ok() { + return ConfirmationResponse::Reselect( + buffer.trim().into(), + ); + } + } + 'k' | 'K' => return ConfirmationResponse::Skip, + // Input is senseless fall through to repeat prompt + _ => {} + }; } - StoredKeypair::Raw(raw) => Ok(raw.clone()), + _ => {} } + // Input is senseless fall through to repeat prompt + println!("Invalid option, try again."); + Self::show_overwrite_confirmation(alias, alias_for) } +} - /// Get all known keys by their alias, paired with PKH, if known. - pub fn get_keys( - &self, - ) -> HashMap< - String, - (&StoredKeypair, Option<&PublicKeyHash>), - > { - self.store - .get_keys() - .into_iter() - .map(|(alias, value)| (alias.into(), value)) - .collect() - } - - /// Find the stored address by an alias. - pub fn find_address(&self, alias: impl AsRef) -> Option<&Address> { - self.store.find_address(alias) - } - - /// Find an alias by the address if it's in the wallet. - pub fn find_alias(&self, address: &Address) -> Option<&Alias> { - self.store.find_alias(address) - } - - /// Get all known addresses by their alias, paired with PKH, if known. - pub fn get_addresses(&self) -> HashMap { - self.store - .get_addresses() - .iter() - .map(|(alias, value)| (alias.into(), value.clone())) - .collect() - } - - /// Get all known payment addresses by their alias - pub fn get_payment_addrs(&self) -> HashMap { - self.store - .get_payment_addrs() - .iter() - .map(|(alias, value)| (alias.into(), *value)) - .collect() - } - - /// Get all known viewing keys by their alias - pub fn get_viewing_keys(&self) -> HashMap { - self.store - .get_viewing_keys() - .iter() - .map(|(alias, value)| (alias.into(), *value)) - .collect() - } - - /// Get all known viewing keys by their alias - pub fn get_spending_keys( - &self, - ) -> HashMap> { - self.store - .get_spending_keys() - .iter() - .map(|(alias, value)| (alias.into(), value)) - .collect() - } - - /// Add a new address with the given alias. If the alias is already used, - /// will ask whether the existing alias should be replaced, a different - /// alias is desired, or the alias creation should be cancelled. Return - /// the chosen alias if the address has been added, otherwise return - /// nothing. - pub fn add_address( - &mut self, - alias: impl AsRef, - address: Address, - ) -> Option { - self.store - .insert_address(alias.into(), address) - .map(Into::into) - } - - /// Insert a new key with the given alias. If the alias is already used, - /// will prompt for overwrite confirmation. - pub fn insert_keypair( - &mut self, - alias: String, - keypair: StoredKeypair, - pkh: PublicKeyHash, - ) -> Option { - self.store - .insert_keypair(alias.into(), keypair, pkh) - .map(Into::into) +/// Generate keypair +/// for signing protocol txs and for the DKG (which will also be stored) +/// A protocol keypair may be optionally provided, indicating that +/// we should re-use a keypair already in the wallet +pub fn gen_validator_keys( + wallet: &mut Wallet, + protocol_pk: Option, + scheme: SchemeType, +) -> Result { + let protocol_keypair = protocol_pk.map(|pk| { + wallet + .find_key_by_pkh(&PublicKeyHash::from(&pk), None) + .ok() + .or_else(|| { + wallet + .store_mut() + .validator_data() + .take() + .map(|data| data.keys.protocol_keypair.clone()) + }) + .ok_or(FindKeyError::KeyNotFound) + }); + match protocol_keypair { + Some(Err(err)) => Err(err), + other => Ok(store::gen_validator_keys( + other.map(|res| res.unwrap()), + scheme, + )), } +} - pub fn insert_viewing_key( - &mut self, - alias: String, - view_key: ExtendedViewingKey, - ) -> Option { - self.store - .insert_viewing_key(alias.into(), view_key) - .map(Into::into) +/// Add addresses from a genesis configuration. +pub fn add_genesis_addresses( + wallet: &mut Wallet, + genesis: GenesisConfig, +) { + for (alias, addr) in defaults::addresses_from_genesis(genesis) { + wallet.add_address(alias.normalize(), addr, true); } +} - pub fn insert_spending_key( - &mut self, - alias: String, - spend_key: StoredKeypair, - viewkey: ExtendedViewingKey, - ) -> Option { - self.store - .insert_spending_key(alias.into(), spend_key, viewkey) - .map(Into::into) - } +/// Save the wallet store to a file. +pub fn save(wallet: &Wallet) -> std::io::Result<()> { + self::store::save(wallet.store(), wallet.store_dir()) +} - pub fn encrypt_insert_spending_key( - &mut self, - alias: String, - spend_key: ExtendedSpendingKey, - unsafe_dont_encrypt: bool, - ) -> Option { - let password = Self::new_password_prompt(unsafe_dont_encrypt); - self.store - .insert_spending_key( - alias.into(), - StoredKeypair::new(spend_key, password).0, - ExtendedFullViewingKey::from(&spend_key.into()).into(), - ) - .map(Into::into) - } +/// Load a wallet from the store file. +pub fn load(store_dir: &Path) -> Option> { + let store = self::store::load(store_dir).unwrap_or_else(|err| { + eprintln!("Unable to load the wallet: {}", err); + cli::safe_exit(1) + }); + Some(Wallet::::new( + store_dir.to_path_buf(), + store, + )) +} - pub fn insert_payment_addr( - &mut self, - alias: String, - payment_addr: PaymentAddress, - ) -> Option { - self.store - .insert_payment_addr(alias.into(), payment_addr) - .map(Into::into) - } +/// Load a wallet from the store file or create a new wallet without any +/// keys or addresses. +pub fn load_or_new(store_dir: &Path) -> Wallet { + let store = self::store::load_or_new(store_dir).unwrap_or_else(|err| { + eprintln!("Unable to load the wallet: {}", err); + cli::safe_exit(1) + }); + Wallet::::new(store_dir.to_path_buf(), store) +} - /// Extend this wallet from pre-genesis validator wallet. - pub fn extend_from_pre_genesis_validator( - &mut self, - validator_address: Address, - validator_alias: Alias, - other: pre_genesis::ValidatorWallet, - ) { - self.store.extend_from_pre_genesis_validator( - validator_address, - validator_alias, - other, - ) - } +/// Load a wallet from the store file or create a new one with the default +/// addresses loaded from the genesis file, if not found. +pub fn load_or_new_from_genesis( + store_dir: &Path, + genesis_cfg: GenesisConfig, +) -> Wallet { + let store = self::store::load_or_new_from_genesis(store_dir, genesis_cfg) + .unwrap_or_else(|err| { + eprintln!("Unable to load the wallet: {}", err); + cli::safe_exit(1) + }); + Wallet::::new(store_dir.to_path_buf(), store) } -/// Read the password for encryption from the file/env/stdin with confirmation. +/// Read the password for encryption from the file/env/stdin with +/// confirmation. pub fn read_and_confirm_pwd(unsafe_dont_encrypt: bool) -> Option { let password = if unsafe_dont_encrypt { println!("Warning: The keypair will NOT be encrypted."); None } else { - Some(read_password("Enter your encryption password: ")) + Some(CliWalletUtils::read_password( + "Enter your encryption password: ", + )) }; // Bis repetita for confirmation. let to_confirm = if unsafe_dont_encrypt { None } else { - Some(read_password( + Some(CliWalletUtils::read_password( "To confirm, please enter the same encryption password once more: ", )) }; @@ -535,22 +208,3 @@ pub fn read_and_confirm_pwd(unsafe_dont_encrypt: bool) -> Option { } password } - -/// Read the password for encryption/decryption from the file/env/stdin. Panics -/// if all options are empty/invalid. -pub fn read_password(prompt_msg: &str) -> String { - let pwd = match env::var("NAMADA_WALLET_PASSWORD_FILE") { - Ok(path) => fs::read_to_string(path) - .expect("Something went wrong reading the file"), - Err(_) => match env::var("NAMADA_WALLET_PASSWORD") { - Ok(password) => password, - Err(_) => rpassword::read_password_from_tty(Some(prompt_msg)) - .unwrap_or_default(), - }, - }; - if pwd.is_empty() { - eprintln!("Password cannot be empty"); - cli::safe_exit(1) - } - pwd -} diff --git a/apps/src/lib/wallet/pre_genesis.rs b/apps/src/lib/wallet/pre_genesis.rs index d3c5fa14c5a..12209d56745 100644 --- a/apps/src/lib/wallet/pre_genesis.rs +++ b/apps/src/lib/wallet/pre_genesis.rs @@ -3,188 +3,125 @@ use std::path::{Path, PathBuf}; use ark_serialize::{Read, Write}; use file_lock::{FileLock, FileOptions}; -use namada::types::key::{common, SchemeType}; -use serde::{Deserialize, Serialize}; -use thiserror::Error; +use namada::ledger::wallet::pre_genesis::{ + ReadError, ValidatorStore, ValidatorWallet, +}; +use namada::ledger::wallet::{gen_key_to_store, WalletUtils}; +use namada::types::key::SchemeType; -use crate::wallet; -use crate::wallet::{store, StoredKeypair}; +use crate::wallet::store::gen_validator_keys; +use crate::wallet::{read_and_confirm_pwd, CliWalletUtils}; /// Validator pre-genesis wallet file name const VALIDATOR_FILE_NAME: &str = "wallet.toml"; -#[derive(Error, Debug)] -pub enum ReadError { - #[error("Failed decoding the wallet store: {0}")] - Decode(toml::de::Error), - #[error("Failed to read the wallet store from {0}: {1}")] - ReadWallet(String, String), - #[error("Failed to write the wallet store: {0}")] - StoreNewWallet(String), - #[error("Failed to decode a key: {0}")] - Decryption(wallet::keys::DecryptionError), -} - /// Get the path to the validator pre-genesis wallet store. pub fn validator_file_name(store_dir: impl AsRef) -> PathBuf { store_dir.as_ref().join(VALIDATOR_FILE_NAME) } -/// Validator pre-genesis wallet includes all the required keys for genesis -/// setup and a cache of decrypted keys. -pub struct ValidatorWallet { - /// The wallet store that can be written/read to/from TOML - pub store: ValidatorStore, - /// Cryptographic keypair for validator account key - pub account_key: common::SecretKey, - /// Cryptographic keypair for consensus key - pub consensus_key: common::SecretKey, - /// Cryptographic keypair for Tendermint node key - pub tendermint_node_key: common::SecretKey, -} - -/// Validator pre-genesis wallet store includes all the required keys for -/// genesis setup. -#[derive(Serialize, Deserialize, Debug)] -pub struct ValidatorStore { - /// Cryptographic keypair for validator account key - pub account_key: wallet::StoredKeypair, - /// Cryptographic keypair for consensus key - pub consensus_key: wallet::StoredKeypair, - /// Cryptographic keypair for Tendermint node key - pub tendermint_node_key: wallet::StoredKeypair, - /// Special validator keys - pub validator_keys: wallet::ValidatorKeys, +/// Generate a new [`ValidatorWallet`] with required pre-genesis keys and +/// store it as TOML at the given path. +pub fn gen_and_store( + scheme: SchemeType, + unsafe_dont_encrypt: bool, + store_dir: &Path, +) -> std::io::Result { + let password = read_and_confirm_pwd(unsafe_dont_encrypt); + let validator = gen(scheme, password); + let data = validator.store.encode(); + let wallet_path = validator_file_name(store_dir); + // Make sure the dir exists + let wallet_dir = wallet_path.parent().unwrap(); + fs::create_dir_all(wallet_dir)?; + // Write the file + let options = FileOptions::new().create(true).write(true).truncate(true); + let mut filelock = + FileLock::lock(wallet_path.to_str().unwrap(), true, options)?; + filelock.file.write_all(&data)?; + Ok(validator) } -impl ValidatorWallet { - /// Generate a new [`ValidatorWallet`] with required pre-genesis keys and - /// store it as TOML at the given path. - pub fn gen_and_store( - scheme: SchemeType, - unsafe_dont_encrypt: bool, - store_dir: &Path, - ) -> std::io::Result { - let validator = Self::gen(scheme, unsafe_dont_encrypt); - let data = validator.store.encode(); - let wallet_path = validator_file_name(store_dir); - // Make sure the dir exists - let wallet_dir = wallet_path.parent().unwrap(); - fs::create_dir_all(wallet_dir)?; - // Write the file - let options = - FileOptions::new().create(true).write(true).truncate(true); - let mut filelock = - FileLock::lock(wallet_path.to_str().unwrap(), true, options)?; - filelock.file.write_all(&data)?; - Ok(validator) - } - - /// Try to load and decrypt keys, if encrypted, in a [`ValidatorWallet`] - /// from a TOML file. - pub fn load(store_dir: &Path) -> Result { - let wallet_file = validator_file_name(store_dir); - match FileLock::lock( - wallet_file.to_str().unwrap(), - true, - FileOptions::new().read(true).write(false), - ) { - Ok(mut filelock) => { - let mut store = Vec::::new(); - filelock.file.read_to_end(&mut store).map_err(|err| { - ReadError::ReadWallet( - store_dir.to_str().unwrap().into(), - err.to_string(), - ) - })?; - let store = - ValidatorStore::decode(store).map_err(ReadError::Decode)?; - - let password = if store.account_key.is_encrypted() - || store.consensus_key.is_encrypted() - || store.account_key.is_encrypted() - { - Some(wallet::read_password("Enter decryption password: ")) - } else { - None - }; - - let account_key = - store.account_key.get(true, password.clone())?; - let consensus_key = - store.consensus_key.get(true, password.clone())?; - let tendermint_node_key = - store.tendermint_node_key.get(true, password)?; - - Ok(Self { - store, - account_key, - consensus_key, - tendermint_node_key, - }) - } - Err(err) => Err(ReadError::ReadWallet( - wallet_file.to_string_lossy().into_owned(), - err.to_string(), - )), +/// Try to load and decrypt keys, if encrypted, in a [`ValidatorWallet`] +/// from a TOML file. +pub fn load(store_dir: &Path) -> Result { + let wallet_file = validator_file_name(store_dir); + match FileLock::lock( + wallet_file.to_str().unwrap(), + true, + FileOptions::new().read(true).write(false), + ) { + Ok(mut filelock) => { + let mut store = Vec::::new(); + filelock.file.read_to_end(&mut store).map_err(|err| { + ReadError::ReadWallet( + store_dir.to_str().unwrap().into(), + err.to_string(), + ) + })?; + let store = + ValidatorStore::decode(store).map_err(ReadError::Decode)?; + + let password = if store.account_key.is_encrypted() + || store.consensus_key.is_encrypted() + || store.account_key.is_encrypted() + { + Some(CliWalletUtils::read_password( + "Enter decryption password: ", + )) + } else { + None + }; + + let account_key = store + .account_key + .get::(true, password.clone())?; + let consensus_key = store + .consensus_key + .get::(true, password.clone())?; + let tendermint_node_key = store + .tendermint_node_key + .get::(true, password)?; + + Ok(ValidatorWallet { + store, + account_key, + consensus_key, + tendermint_node_key, + }) } + Err(err) => Err(ReadError::ReadWallet( + wallet_file.to_string_lossy().into_owned(), + err.to_string(), + )), } - - /// Generate a new [`ValidatorWallet`] with required pre-genesis keys. Will - /// prompt for password when `!unsafe_dont_encrypt`. - fn gen(scheme: SchemeType, unsafe_dont_encrypt: bool) -> Self { - let password = wallet::read_and_confirm_pwd(unsafe_dont_encrypt); - let (account_key, account_sk) = gen_key_to_store(scheme, &password); - let (consensus_key, consensus_sk) = gen_key_to_store( - // Note that TM only allows ed25519 for consensus key - SchemeType::Ed25519, - &password, - ); - let (tendermint_node_key, tendermint_node_sk) = gen_key_to_store( - // Note that TM only allows ed25519 for node IDs - SchemeType::Ed25519, - &password, - ); - let validator_keys = store::Store::gen_validator_keys(None, scheme); - let store = ValidatorStore { - account_key, - consensus_key, - tendermint_node_key, - validator_keys, - }; - Self { - store, - account_key: account_sk, - consensus_key: consensus_sk, - tendermint_node_key: tendermint_node_sk, - } - } -} - -impl ValidatorStore { - /// Decode from TOML string bytes - pub fn decode(data: Vec) -> Result { - toml::from_slice(&data) - } - - /// Encode in TOML string bytes - pub fn encode(&self) -> Vec { - toml::to_vec(self).expect( - "Serializing of validator pre-genesis wallet shouldn't fail", - ) - } -} - -fn gen_key_to_store( - scheme: SchemeType, - password: &Option, -) -> (StoredKeypair, common::SecretKey) { - let sk = store::gen_sk(scheme); - StoredKeypair::new(sk, password.clone()) } -impl From for ReadError { - fn from(err: wallet::keys::DecryptionError) -> Self { - ReadError::Decryption(err) +/// Generate a new [`ValidatorWallet`] with required pre-genesis keys. Will +/// prompt for password when `!unsafe_dont_encrypt`. +fn gen(scheme: SchemeType, password: Option) -> ValidatorWallet { + let (account_key, account_sk) = gen_key_to_store(scheme, &password); + let (consensus_key, consensus_sk) = gen_key_to_store( + // Note that TM only allows ed25519 for consensus key + SchemeType::Ed25519, + &password, + ); + let (tendermint_node_key, tendermint_node_sk) = gen_key_to_store( + // Note that TM only allows ed25519 for node IDs + SchemeType::Ed25519, + &password, + ); + let validator_keys = gen_validator_keys(None, scheme); + let store = ValidatorStore { + account_key, + consensus_key, + tendermint_node_key, + validator_keys, + }; + ValidatorWallet { + store, + account_key: account_sk, + consensus_key: consensus_sk, + tendermint_node_key: tendermint_node_sk, } } diff --git a/apps/src/lib/wallet/store.rs b/apps/src/lib/wallet/store.rs index aa1ae1ca88d..fcdcfb24d98 100644 --- a/apps/src/lib/wallet/store.rs +++ b/apps/src/lib/wallet/store.rs @@ -1,75 +1,26 @@ -use std::collections::HashMap; use std::fs; use std::io::prelude::*; -use std::io::{self, Write}; +use std::io::Write; use std::path::{Path, PathBuf}; +#[cfg(not(feature = "dev"))] use std::str::FromStr; use ark_std::rand::prelude::*; use ark_std::rand::SeedableRng; -use bimap::BiHashMap; use file_lock::{FileLock, FileOptions}; -use masp_primitives::zip32::ExtendedFullViewingKey; -use namada::types::address::{Address, ImplicitAddress}; -use namada::types::key::dkg_session_keys::DkgKeypair; +#[cfg(not(feature = "dev"))] +use namada::ledger::wallet::store::AddressVpType; +#[cfg(feature = "dev")] +use namada::ledger::wallet::StoredKeypair; +use namada::ledger::wallet::{gen_sk, Store, ValidatorKeys}; +#[cfg(not(feature = "dev"))] +use namada::types::address::Address; use namada::types::key::*; -use namada::types::masp::{ - ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, -}; use namada::types::transaction::EllipticCurve; -use serde::{Deserialize, Serialize}; use thiserror::Error; -use super::alias::{self, Alias}; -use super::keys::StoredKeypair; -use super::pre_genesis; -use crate::cli; use crate::config::genesis::genesis_config::GenesisConfig; - -/// Special keys for a validator -#[derive(Serialize, Deserialize, Debug)] -pub struct ValidatorKeys { - /// Special keypair for signing protocol txs - pub protocol_keypair: common::SecretKey, - /// Special session keypair needed by validators for participating - /// in the DKG protocol - pub dkg_keypair: Option, -} - -impl ValidatorKeys { - /// Get the protocol keypair - pub fn get_protocol_keypair(&self) -> &common::SecretKey { - &self.protocol_keypair - } -} - -/// Special data associated with a validator -#[derive(Serialize, Deserialize, Debug)] -pub struct ValidatorData { - /// The address associated to a validator - pub address: Address, - /// special keys for a validator - pub keys: ValidatorKeys, -} - -#[derive(Serialize, Deserialize, Debug, Default)] -pub struct Store { - /// Known viewing keys - view_keys: HashMap, - /// Known spending keys - spend_keys: HashMap>, - /// Known payment addresses - payment_addrs: HashMap, - /// Cryptographic keypairs - keys: HashMap>, - /// Namada address book - addresses: BiHashMap, - /// Known mappings of public key hashes to their aliases in the `keys` - /// field. Used for look-up by a public key. - pkhs: HashMap, - /// Special keys if the wallet belongs to a validator - pub(crate) validator_data: Option, -} +use crate::wallet::CliWalletUtils; #[derive(Error, Debug)] pub enum LoadStoreError { @@ -81,690 +32,163 @@ pub enum LoadStoreError { StoreNewWallet(String), } -impl Store { - #[cfg(not(feature = "dev"))] - fn new(genesis: GenesisConfig) -> Self { - let mut store = Self::default(); - store.add_genesis_addresses(genesis); - store - } - - #[cfg(feature = "dev")] - fn new() -> Self { - let mut store = Self::default(); - // Pre-load the default keys without encryption - let no_password = None; - for (alias, keypair) in super::defaults::keys() { - let pkh: PublicKeyHash = (&keypair.ref_to()).into(); - store.keys.insert( - alias.clone(), - StoredKeypair::new(keypair, no_password.clone()).0, - ); - store.pkhs.insert(pkh, alias); - } - store - .addresses - .extend(super::defaults::addresses().into_iter()); - store - } - - /// Add addresses from a genesis configuration. - pub fn add_genesis_addresses(&mut self, genesis: GenesisConfig) { - self.addresses.extend( - super::defaults::addresses_from_genesis(genesis).into_iter(), - ); - } - - /// Save the wallet store to a file. - pub fn save(&self, store_dir: &Path) -> std::io::Result<()> { - let data = self.encode(); - let wallet_path = wallet_file(store_dir); - // Make sure the dir exists - let wallet_dir = wallet_path.parent().unwrap(); - fs::create_dir_all(wallet_dir)?; - // Write the file - let options = - FileOptions::new().create(true).write(true).truncate(true); - let mut filelock = - FileLock::lock(wallet_path.to_str().unwrap(), true, options)?; - filelock.file.write_all(&data) - } - - /// Load the store file or create a new one without any keys or addresses. - pub fn load_or_new(store_dir: &Path) -> Result { - Self::load(store_dir).or_else(|_| { - let store = Self::default(); - store.save(store_dir).map_err(|err| { - LoadStoreError::StoreNewWallet(err.to_string()) - })?; - Ok(store) - }) - } - - /// Load the store file or create a new one with the default addresses from - /// the genesis file, if not found. - pub fn load_or_new_from_genesis( - store_dir: &Path, - genesis_cfg: GenesisConfig, - ) -> Result { - Self::load(store_dir).or_else(|_| { - #[cfg(not(feature = "dev"))] - let store = Self::new(genesis_cfg); - #[cfg(feature = "dev")] - let store = { - // The function is unused in dev - let _ = genesis_cfg; - Self::new() - }; - store.save(store_dir).map_err(|err| { - LoadStoreError::StoreNewWallet(err.to_string()) - })?; - Ok(store) - }) - } - - /// Attempt to load the store file. - pub fn load(store_dir: &Path) -> Result { - let wallet_file = wallet_file(store_dir); - match FileLock::lock( - wallet_file.to_str().unwrap(), - true, - FileOptions::new().read(true).write(false), - ) { - Ok(mut filelock) => { - let mut store = Vec::::new(); - filelock.file.read_to_end(&mut store).map_err(|err| { - LoadStoreError::ReadWallet( - store_dir.to_str().unwrap().into(), - err.to_string(), - ) - })?; - Store::decode(store).map_err(LoadStoreError::Decode) - } - Err(err) => Err(LoadStoreError::ReadWallet( - wallet_file.to_string_lossy().into_owned(), - err.to_string(), - )), - } - } - - /// Find the stored key by an alias, a public key hash or a public key. - pub fn find_key( - &self, - alias_pkh_or_pk: impl AsRef, - ) -> Option<&StoredKeypair> { - let alias_pkh_or_pk = alias_pkh_or_pk.as_ref(); - // Try to find by alias - self.keys - .get(&alias_pkh_or_pk.into()) - // Try to find by PKH - .or_else(|| { - let pkh = PublicKeyHash::from_str(alias_pkh_or_pk).ok()?; - self.find_key_by_pkh(&pkh) - }) - // Try to find by PK - .or_else(|| { - let pk = common::PublicKey::from_str(alias_pkh_or_pk).ok()?; - self.find_key_by_pk(&pk) - }) - } - - pub fn find_spending_key( - &self, - alias: impl AsRef, - ) -> Option<&StoredKeypair> { - self.spend_keys.get(&alias.into()) - } - - pub fn find_viewing_key( - &self, - alias: impl AsRef, - ) -> Option<&ExtendedViewingKey> { - self.view_keys.get(&alias.into()) - } - - pub fn find_payment_addr( - &self, - alias: impl AsRef, - ) -> Option<&PaymentAddress> { - self.payment_addrs.get(&alias.into()) - } - - /// Find the stored key by a public key. - pub fn find_key_by_pk( - &self, - pk: &common::PublicKey, - ) -> Option<&StoredKeypair> { - let pkh = PublicKeyHash::from(pk); - self.find_key_by_pkh(&pkh) - } - - /// Find the stored key by a public key hash. - pub fn find_key_by_pkh( - &self, - pkh: &PublicKeyHash, - ) -> Option<&StoredKeypair> { - let alias = self.pkhs.get(pkh)?; - self.keys.get(alias) - } - - /// Find the stored alias for a public key hash. - pub fn find_alias_by_pkh(&self, pkh: &PublicKeyHash) -> Option { - self.pkhs.get(pkh).cloned() - } - - /// Find the stored address by an alias. - pub fn find_address(&self, alias: impl AsRef) -> Option<&Address> { - self.addresses.get_by_left(&alias.into()) - } - - /// Find an alias by the address if it's in the wallet. - pub fn find_alias(&self, address: &Address) -> Option<&Alias> { - self.addresses.get_by_right(address) - } - - /// Get all known keys by their alias, paired with PKH, if known. - pub fn get_keys( - &self, - ) -> HashMap< - Alias, - (&StoredKeypair, Option<&PublicKeyHash>), - > { - let mut keys: HashMap< - Alias, - (&StoredKeypair, Option<&PublicKeyHash>), - > = self - .pkhs - .iter() - .filter_map(|(pkh, alias)| { - let key = &self.keys.get(alias)?; - Some((alias.clone(), (*key, Some(pkh)))) - }) - .collect(); - self.keys.iter().for_each(|(alias, key)| { - if !keys.contains_key(alias) { - keys.insert(alias.clone(), (key, None)); - } - }); - keys - } - - /// Get all known addresses by their alias, paired with PKH, if known. - pub fn get_addresses(&self) -> &BiHashMap { - &self.addresses - } - - /// Get all known payment addresses by their alias. - pub fn get_payment_addrs(&self) -> &HashMap { - &self.payment_addrs - } - - /// Get all known viewing keys by their alias. - pub fn get_viewing_keys(&self) -> &HashMap { - &self.view_keys - } - - /// Get all known spending keys by their alias. - pub fn get_spending_keys( - &self, - ) -> &HashMap> { - &self.spend_keys - } +/// Wallet file name +const FILE_NAME: &str = "wallet.toml"; - fn generate_spending_key() -> ExtendedSpendingKey { - use rand::rngs::OsRng; - let mut spend_key = [0; 32]; - OsRng.fill_bytes(&mut spend_key); - masp_primitives::zip32::ExtendedSpendingKey::master(spend_key.as_ref()) - .into() - } +/// Get the path to the wallet store. +pub fn wallet_file(store_dir: impl AsRef) -> PathBuf { + store_dir.as_ref().join(FILE_NAME) +} - /// Generate a new keypair and insert it into the store with the provided - /// alias. If none provided, the alias will be the public key hash. - /// If no password is provided, the keypair will be stored raw without - /// encryption. Returns the alias of the key and a reference-counting - /// pointer to the key. - pub fn gen_key( - &mut self, - scheme: SchemeType, - alias: Option, - password: Option, - ) -> (Alias, common::SecretKey) { - let sk = gen_sk(scheme); - let pkh: PublicKeyHash = PublicKeyHash::from(&sk.ref_to()); - let (keypair_to_store, raw_keypair) = StoredKeypair::new(sk, password); - let address = Address::Implicit(ImplicitAddress(pkh.clone())); - let alias: Alias = alias.unwrap_or_else(|| pkh.clone().into()).into(); - if self - .insert_keypair(alias.clone(), keypair_to_store, pkh) - .is_none() - { - eprintln!("Action cancelled, no changes persisted."); - cli::safe_exit(1); - } - if self.insert_address(alias.clone(), address).is_none() { - eprintln!("Action cancelled, no changes persisted."); - cli::safe_exit(1); - } - (alias, raw_keypair) - } +/// Save the wallet store to a file. +pub fn save(store: &Store, store_dir: &Path) -> std::io::Result<()> { + let data = store.encode(); + let wallet_path = wallet_file(store_dir); + // Make sure the dir exists + let wallet_dir = wallet_path.parent().unwrap(); + fs::create_dir_all(wallet_dir)?; + // Write the file + let options = FileOptions::new().create(true).write(true).truncate(true); + let mut filelock = + FileLock::lock(wallet_path.to_str().unwrap(), true, options)?; + filelock.file.write_all(&data) +} - /// Generate a spending key similarly to how it's done for keypairs - pub fn gen_spending_key( - &mut self, - alias: String, - password: Option, - ) -> (Alias, ExtendedSpendingKey) { - let spendkey = Self::generate_spending_key(); - let viewkey = ExtendedFullViewingKey::from(&spendkey.into()).into(); - let (spendkey_to_store, _raw_spendkey) = - StoredKeypair::new(spendkey, password); - let alias = Alias::from(alias); - if self - .insert_spending_key(alias.clone(), spendkey_to_store, viewkey) - .is_none() - { - eprintln!("Action cancelled, no changes persisted."); - cli::safe_exit(1); - } - (alias, spendkey) - } +/// Load the store file or create a new one without any keys or addresses. +pub fn load_or_new(store_dir: &Path) -> Result { + load(store_dir).or_else(|_| { + let store = Store::default(); + save(&store, store_dir) + .map_err(|err| LoadStoreError::StoreNewWallet(err.to_string()))?; + Ok(store) + }) +} - /// Generate keypair for signing protocol txs and for the DKG - /// A protocol keypair may be optionally provided - /// - /// Note that this removes the validator data. - pub fn gen_validator_keys( - protocol_keypair: Option, - scheme: SchemeType, - ) -> ValidatorKeys { - let protocol_keypair = - protocol_keypair.unwrap_or_else(|| gen_sk(scheme)); - let dkg_keypair = ferveo_common::Keypair::::new( - &mut StdRng::from_entropy(), - ); - ValidatorKeys { - protocol_keypair, - dkg_keypair: Some(dkg_keypair.into()), - } - } +/// Load the store file or create a new one with the default addresses from +/// the genesis file, if not found. +pub fn load_or_new_from_genesis( + store_dir: &Path, + genesis_cfg: GenesisConfig, +) -> Result { + load(store_dir).or_else(|_| { + #[cfg(not(feature = "dev"))] + let store = new(genesis_cfg); + #[cfg(feature = "dev")] + let store = { + // The function is unused in dev + let _ = genesis_cfg; + new() + }; + save(&store, store_dir) + .map_err(|err| LoadStoreError::StoreNewWallet(err.to_string()))?; + Ok(store) + }) +} - /// Add validator data to the store - pub fn add_validator_data( - &mut self, - address: Address, - keys: ValidatorKeys, +/// Attempt to load the store file. +pub fn load(store_dir: &Path) -> Result { + let wallet_file = wallet_file(store_dir); + match FileLock::lock( + wallet_file.to_str().unwrap(), + true, + FileOptions::new().read(true).write(false), ) { - self.validator_data = Some(ValidatorData { address, keys }); - } - - /// Returns the validator data, if it exists - pub fn get_validator_data(&self) -> Option<&ValidatorData> { - self.validator_data.as_ref() - } - - /// Returns the validator data, if it exists - pub fn validator_data(self) -> Option { - self.validator_data - } - - /// Insert a new key with the given alias. If the alias is already used, - /// will prompt for overwrite/reselection confirmation. If declined, then - /// keypair is not inserted and nothing is returned, otherwise selected - /// alias is returned. - pub(super) fn insert_keypair( - &mut self, - alias: Alias, - keypair: StoredKeypair, - pkh: PublicKeyHash, - ) -> Option { - if alias.is_empty() { - println!( - "Empty alias given, defaulting to {}.", - Into::::into(pkh.to_string()) - ); - } - // Addresses and keypairs can share aliases, so first remove any - // addresses sharing the same namesake before checking if alias has been - // used. - let counterpart_address = self.addresses.remove_by_left(&alias); - if self.contains_alias(&alias) { - match show_overwrite_confirmation(&alias, "a key") { - ConfirmationResponse::Replace => {} - ConfirmationResponse::Reselect(new_alias) => { - // Restore the removed address in case the recursive prompt - // terminates with a cancellation - counterpart_address - .map(|x| self.addresses.insert(alias.clone(), x.1)); - return self.insert_keypair(new_alias, keypair, pkh); - } - ConfirmationResponse::Skip => { - // Restore the removed address since this insertion action - // has now been cancelled - counterpart_address - .map(|x| self.addresses.insert(alias.clone(), x.1)); - return None; - } - } - } - self.remove_alias(&alias); - self.keys.insert(alias.clone(), keypair); - self.pkhs.insert(pkh, alias.clone()); - // Since it is intended for the inserted keypair to share its namesake - // with the pre-existing address - counterpart_address.map(|x| self.addresses.insert(alias.clone(), x.1)); - Some(alias) - } - - /// Insert spending keys similarly to how it's done for keypairs - pub fn insert_spending_key( - &mut self, - alias: Alias, - spendkey: StoredKeypair, - viewkey: ExtendedViewingKey, - ) -> Option { - if alias.is_empty() { - eprintln!("Empty alias given."); - return None; - } - if self.contains_alias(&alias) { - match show_overwrite_confirmation(&alias, "a spending key") { - ConfirmationResponse::Replace => {} - ConfirmationResponse::Reselect(new_alias) => { - return self - .insert_spending_key(new_alias, spendkey, viewkey); - } - ConfirmationResponse::Skip => return None, - } - } - self.remove_alias(&alias); - self.spend_keys.insert(alias.clone(), spendkey); - // Simultaneously add the derived viewing key to ease balance viewing - self.view_keys.insert(alias.clone(), viewkey); - Some(alias) - } - - /// Insert viewing keys similarly to how it's done for keypairs - pub fn insert_viewing_key( - &mut self, - alias: Alias, - viewkey: ExtendedViewingKey, - ) -> Option { - if alias.is_empty() { - eprintln!("Empty alias given."); - return None; - } - if self.contains_alias(&alias) { - match show_overwrite_confirmation(&alias, "a viewing key") { - ConfirmationResponse::Replace => {} - ConfirmationResponse::Reselect(new_alias) => { - return self.insert_viewing_key(new_alias, viewkey); - } - ConfirmationResponse::Skip => return None, - } - } - self.remove_alias(&alias); - self.view_keys.insert(alias.clone(), viewkey); - Some(alias) - } - - /// Check if any map of the wallet contains the given alias - fn contains_alias(&self, alias: &Alias) -> bool { - self.payment_addrs.contains_key(alias) - || self.view_keys.contains_key(alias) - || self.spend_keys.contains_key(alias) - || self.keys.contains_key(alias) - || self.addresses.contains_left(alias) - } - - /// Completely remove the given alias from all maps in the wallet - fn remove_alias(&mut self, alias: &Alias) { - self.payment_addrs.remove(alias); - self.view_keys.remove(alias); - self.spend_keys.remove(alias); - self.keys.remove(alias); - self.addresses.remove_by_left(alias); - self.pkhs.retain(|_key, val| val != alias); - } - - /// Insert payment addresses similarly to how it's done for keypairs - pub fn insert_payment_addr( - &mut self, - alias: Alias, - payment_addr: PaymentAddress, - ) -> Option { - if alias.is_empty() { - eprintln!("Empty alias given."); - return None; - } - if self.contains_alias(&alias) { - match show_overwrite_confirmation(&alias, "a payment address") { - ConfirmationResponse::Replace => {} - ConfirmationResponse::Reselect(new_alias) => { - return self.insert_payment_addr(new_alias, payment_addr); - } - ConfirmationResponse::Skip => return None, - } + Ok(mut filelock) => { + let mut store = Vec::::new(); + filelock.file.read_to_end(&mut store).map_err(|err| { + LoadStoreError::ReadWallet( + store_dir.to_str().unwrap().parse().unwrap(), + err.to_string(), + ) + })?; + Store::decode(store).map_err(LoadStoreError::Decode) } - self.remove_alias(&alias); - self.payment_addrs.insert(alias.clone(), payment_addr); - Some(alias) - } - - /// Helper function to restore keypair given alias-keypair mapping and the - /// pkhs-alias mapping. - fn restore_keypair( - &mut self, - alias: Alias, - key: Option>, - pkh: Option, - ) { - key.map(|x| self.keys.insert(alias.clone(), x)); - pkh.map(|x| self.pkhs.insert(x, alias.clone())); + Err(err) => Err(LoadStoreError::ReadWallet( + wallet_file.to_string_lossy().into_owned(), + err.to_string(), + )), } +} - /// Insert a new address with the given alias. If the alias is already used, - /// will prompt for overwrite/reselection confirmation, which when declined, - /// the address won't be added. Return the selected alias if the address has - /// been added. - pub fn insert_address( - &mut self, - alias: Alias, - address: Address, - ) -> Option { - if alias.is_empty() { - println!("Empty alias given, defaulting to {}.", address.encode()); - } - // Addresses and keypairs can share aliases, so first remove any keys - // sharing the same namesake before checking if alias has been used. - let counterpart_key = self.keys.remove(&alias); - let mut counterpart_pkh = None; - self.pkhs.retain(|k, v| { - if v == &alias { - counterpart_pkh = Some(k.clone()); - false - } else { - true - } - }); - if self.addresses.contains_left(&alias) { - match show_overwrite_confirmation(&alias, "an address") { - ConfirmationResponse::Replace => {} - ConfirmationResponse::Reselect(new_alias) => { - // Restore the removed keypair in case the recursive prompt - // terminates with a cancellation - self.restore_keypair( - alias, - counterpart_key, - counterpart_pkh, - ); - return self.insert_address(new_alias, address); +/// Add addresses from a genesis configuration. +#[cfg(not(feature = "dev"))] +pub fn add_genesis_addresses(store: &mut Store, genesis: GenesisConfig) { + for (alias, addr) in + super::defaults::addresses_from_genesis(genesis.clone()) + { + store.insert_address::(alias, addr, true); + } + for (alias, token) in &genesis.token { + if let Some(address) = token.address.as_ref() { + match Address::from_str(address) { + Ok(address) => { + store.add_vp_type_to_address(AddressVpType::Token, address) } - ConfirmationResponse::Skip => { - // Restore the removed keypair since this insertion action - // has now been cancelled - self.restore_keypair( - alias, - counterpart_key, - counterpart_pkh, - ); - return None; + Err(_) => { + tracing::error!( + "Weird address for token {alias}: {address}" + ) } } } - self.remove_alias(&alias); - self.addresses.insert(alias.clone(), address); - // Since it is intended for the inserted address to share its namesake - // with the pre-existing keypair - self.restore_keypair(alias.clone(), counterpart_key, counterpart_pkh); - Some(alias) - } - - /// Extend this store from pre-genesis validator wallet. - pub fn extend_from_pre_genesis_validator( - &mut self, - validator_address: Address, - validator_alias: Alias, - other: pre_genesis::ValidatorWallet, - ) { - let account_key_alias = alias::validator_key(&validator_alias); - let consensus_key_alias = - alias::validator_consensus_key(&validator_alias); - let tendermint_node_key_alias = - alias::validator_tendermint_node_key(&validator_alias); - - let keys = [ - (account_key_alias.clone(), other.store.account_key), - (consensus_key_alias.clone(), other.store.consensus_key), - ( - tendermint_node_key_alias.clone(), - other.store.tendermint_node_key, - ), - ]; - self.keys.extend(keys.into_iter()); - - let account_pk = other.account_key.ref_to(); - let consensus_pk = other.consensus_key.ref_to(); - let tendermint_node_pk = other.tendermint_node_key.ref_to(); - let addresses = [ - (account_key_alias.clone(), (&account_pk).into()), - (consensus_key_alias.clone(), (&consensus_pk).into()), - ( - tendermint_node_key_alias.clone(), - (&tendermint_node_pk).into(), - ), - ]; - self.addresses.extend(addresses.into_iter()); - - let pkhs = [ - ((&account_pk).into(), account_key_alias), - ((&consensus_pk).into(), consensus_key_alias), - ((&tendermint_node_pk).into(), tendermint_node_key_alias), - ]; - self.pkhs.extend(pkhs.into_iter()); - - self.validator_data = Some(ValidatorData { - address: validator_address, - keys: other.store.validator_keys, - }); - } - - fn decode(data: Vec) -> Result { - toml::from_slice(&data) - } - - fn encode(&self) -> Vec { - toml::to_vec(self).expect("Serializing of store shouldn't fail") } } -enum ConfirmationResponse { - Replace, - Reselect(Alias), - Skip, +#[cfg(not(feature = "dev"))] +fn new(genesis: GenesisConfig) -> Store { + let mut store = Store::default(); + add_genesis_addresses(&mut store, genesis); + store } -/// The given alias has been selected but conflicts with another alias in -/// the store. Offer the user to either replace existing mapping, alter the -/// chosen alias to a name of their chosing, or cancel the aliasing. - -fn show_overwrite_confirmation( - alias: &Alias, - alias_for: &str, -) -> ConfirmationResponse { - print!( - "You're trying to create an alias \"{}\" that already exists for {} \ - in your store.\nWould you like to replace it? \ - s(k)ip/re(p)lace/re(s)elect: ", - alias, alias_for - ); - io::stdout().flush().unwrap(); - - let mut buffer = String::new(); - // Get the user to select between 3 choices - match io::stdin().read_line(&mut buffer) { - Ok(size) if size > 0 => { - // Isolate the single character representing the choice - let byte = buffer.chars().next().unwrap(); - buffer.clear(); - match byte { - 'p' | 'P' => return ConfirmationResponse::Replace, - 's' | 'S' => { - // In the case of reselection, elicit new alias - print!("Please enter a different alias: "); - io::stdout().flush().unwrap(); - if io::stdin().read_line(&mut buffer).is_ok() { - return ConfirmationResponse::Reselect( - buffer.trim().into(), - ); - } - } - 'k' | 'K' => return ConfirmationResponse::Skip, - // Input is senseless fall through to repeat prompt - _ => {} - }; - } - _ => {} +#[cfg(feature = "dev")] +fn new() -> Store { + let mut store = Store::default(); + // Pre-load the default keys without encryption + let no_password = None; + for (alias, keypair) in super::defaults::keys() { + let pkh: PublicKeyHash = (&keypair.ref_to()).into(); + store.insert_keypair::( + alias, + StoredKeypair::new(keypair, no_password.clone()).0, + pkh, + true, + ); } - // Input is senseless fall through to repeat prompt - println!("Invalid option, try again."); - show_overwrite_confirmation(alias, alias_for) -} - -/// Wallet file name -const FILE_NAME: &str = "wallet.toml"; - -/// Get the path to the wallet store. -pub fn wallet_file(store_dir: impl AsRef) -> PathBuf { - store_dir.as_ref().join(FILE_NAME) + for (alias, addr) in super::defaults::addresses() { + store.insert_address::(alias, addr, true); + } + store } -/// Generate a new secret key. -pub fn gen_sk(scheme: SchemeType) -> common::SecretKey { - use rand::rngs::OsRng; - let mut csprng = OsRng {}; - match scheme { - SchemeType::Ed25519 => ed25519::SigScheme::generate(&mut csprng) - .try_to_sk() - .unwrap(), - SchemeType::Secp256k1 => secp256k1::SigScheme::generate(&mut csprng) - .try_to_sk() - .unwrap(), - SchemeType::Common => common::SigScheme::generate(&mut csprng) - .try_to_sk() - .unwrap(), +/// Generate keypair for signing protocol txs and for the DKG +/// A protocol keypair may be optionally provided +/// +/// Note that this removes the validator data. +pub fn gen_validator_keys( + protocol_keypair: Option, + scheme: SchemeType, +) -> ValidatorKeys { + let protocol_keypair = protocol_keypair.unwrap_or_else(|| gen_sk(scheme)); + let dkg_keypair = ferveo_common::Keypair::::new( + &mut StdRng::from_entropy(), + ); + ValidatorKeys { + protocol_keypair, + dkg_keypair: Some(dkg_keypair.into()), } } #[cfg(all(test, feature = "dev"))] mod test_wallet { + use namada::types::address::Address; + use super::*; #[test] fn test_toml_roundtrip_ed25519() { - let mut store = Store::new(); - let validator_keys = - Store::gen_validator_keys(None, SchemeType::Ed25519); + let mut store = new(); + let validator_keys = gen_validator_keys(None, SchemeType::Ed25519); store.add_validator_data( Address::decode("atest1v4ehgw36x3prswzxggunzv6pxqmnvdj9xvcyzvpsggeyvs3cg9qnywf589qnwvfsg5erg3fkl09rg5").unwrap(), validator_keys @@ -775,9 +199,8 @@ mod test_wallet { #[test] fn test_toml_roundtrip_secp256k1() { - let mut store = Store::new(); - let validator_keys = - Store::gen_validator_keys(None, SchemeType::Secp256k1); + let mut store = new(); + let validator_keys = gen_validator_keys(None, SchemeType::Secp256k1); store.add_validator_data( Address::decode("atest1v4ehgw36x3prswzxggunzv6pxqmnvdj9xvcyzvpsggeyvs3cg9qnywf589qnwvfsg5erg3fkl09rg5").unwrap(), validator_keys diff --git a/core/Cargo.toml b/core/Cargo.toml index 7adbba45c2d..0e98344216e 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_core" resolver = "2" -version = "0.14.0" +version = "0.16.0" [features] default = [] @@ -36,14 +36,15 @@ abciplus = [ "ibc-proto", "tendermint", "tendermint-proto", - "namada_tests/abciplus", ] ibc-mocks = [ "ibc/mocks", + "ibc/std", ] ibc-mocks-abcipp = [ "ibc-abcipp/mocks", + "ibc-abcipp/std", ] # for integration tests and test utilies @@ -60,7 +61,7 @@ ark-ec = {version = "0.3", optional = true} ark-serialize = {version = "0.3"} # We switch off "blake2b" because it cannot be compiled to wasm # branch = "bat/arse-merkle-tree" -arse-merkle-tree = {package = "sparse-merkle-tree", git = "https://github.com/heliaxdev/sparse-merkle-tree", rev = "04ad1eeb28901b57a7599bbe433b3822965dabe8", default-features = false, features = ["std", "borsh"]} +arse-merkle-tree = {package = "sparse-merkle-tree", git = "https://github.com/heliaxdev/sparse-merkle-tree", rev = "e086b235ed6e68929bf73f617dd61cd17b000a56", default-features = false, features = ["std", "borsh"]} bech32 = "0.8.0" bellman = "0.11.2" borsh = "0.9.0" @@ -68,49 +69,48 @@ chrono = {version = "0.4.22", default-features = false, features = ["clock", "st data-encoding = "2.3.2" derivative = "2.2.0" ed25519-consensus = "1.2.0" -ferveo = {optional = true, git = "https://github.com/anoma/ferveo"} -ferveo-common = {git = "https://github.com/anoma/ferveo"} -tpke = {package = "group-threshold-cryptography", optional = true, git = "https://github.com/anoma/ferveo"} +ferveo = {optional = true, git = "https://github.com/anoma/ferveo", rev = "e5abd0acc938da90140351a65a26472eb495ce4d"} +ferveo-common = {git = "https://github.com/anoma/ferveo", rev = "e5abd0acc938da90140351a65a26472eb495ce4d"} +tpke = {package = "group-threshold-cryptography", optional = true, git = "https://github.com/anoma/ferveo", rev = "e5abd0acc938da90140351a65a26472eb495ce4d"} # TODO using the same version of tendermint-rs as we do here. -ibc = {version = "0.14.0", default-features = false, optional = true} -ibc-proto = {version = "0.17.1", default-features = false, optional = true} -ibc-abcipp = {package = "ibc", git = "https://github.com/heliaxdev/ibc-rs", rev = "9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d", default-features = false, optional = true} -ibc-proto-abcipp = {package = "ibc-proto", git = "https://github.com/heliaxdev/ibc-rs", rev = "9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d", default-features = false, optional = true} -ics23 = "0.7.0" +ibc = {version = "0.36.0", default-features = false, features = ["serde"], optional = true} +ibc-proto = {version = "0.26.0", default-features = false, optional = true} +ibc-abcipp = {package = "ibc", git = "https://github.com/heliaxdev/cosmos-ibc-rs", rev = "db14744bfba6239cc5f58345ff90f8b7d42637d6", default-features = false, features = ["serde"], optional = true} +ibc-proto-abcipp = {package = "ibc-proto", git = "https://github.com/heliaxdev/ibc-proto-rs", rev = "dd8ba23110a144ffe2074a0b889676468266435a", default-features = false, optional = true} +ics23 = "0.9.0" index-set = {git = "https://github.com/heliaxdev/index-set", tag = "v0.7.1", features = ["serialize-borsh", "serialize-serde"]} itertools = "0.10.0" libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9", default-features = false, features = ["std", "static-context"]} masp_primitives = { git = "https://github.com/anoma/masp", rev = "bee40fc465f6afbd10558d12fe96eb1742eee45c" } -proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm", optional = true} -prost = "0.9.0" -prost-types = "0.9.0" +proptest = {git = "https://github.com/heliaxdev/proptest", rev = "8f1b4abe7ebd35c0781bf9a00a4ee59833ffa2a1", optional = true} +prost = "0.11.6" +prost-types = "0.11.6" rand = {version = "0.8", optional = true} rand_core = {version = "0.6", optional = true} rayon = {version = "=1.5.3", optional = true} -rust_decimal = { version = "1.26.1", features = ["borsh"] } -rust_decimal_macros = "1.26.1" +rust_decimal = { version = "=1.26.1", features = ["borsh"] } +rust_decimal_macros = "=1.26.1" serde = {version = "1.0.125", features = ["derive"]} serde_json = "1.0.62" sha2 = "0.9.3" tendermint = {version = "0.23.6", optional = true} tendermint-proto = {version = "0.23.6", optional = true} -tendermint-abcipp = {package = "tendermint", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} -tendermint-proto-abcipp = {package = "tendermint-proto", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} -thiserror = "1.0.30" +tendermint-abcipp = {package = "tendermint", git = "https://github.com/heliaxdev/tendermint-rs", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35", optional = true} +tendermint-proto-abcipp = {package = "tendermint-proto", git = "https://github.com/heliaxdev/tendermint-rs", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35", optional = true} +thiserror = "1.0.38" tracing = "0.1.30" zeroize = {version = "1.5.5", features = ["zeroize_derive"]} [dev-dependencies] -namada_tests = {path = "../tests", default-features = false, features = ["wasm-runtime"]} assert_matches = "1.5.0" libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9"} pretty_assertions = "0.7.2" # A fork with state machine testing -proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm"} +proptest = {git = "https://github.com/heliaxdev/proptest", rev = "8f1b4abe7ebd35c0781bf9a00a4ee59833ffa2a1"} rand = {version = "0.8"} rand_core = {version = "0.6"} test-log = {version = "0.2.7", default-features = false, features = ["trace"]} tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} [build-dependencies] -tonic-build = "0.6.0" +tonic-build = "0.8.4" diff --git a/core/build.rs b/core/build.rs index c5b251c5191..ae567b8bdcd 100644 --- a/core/build.rs +++ b/core/build.rs @@ -1,13 +1,8 @@ -use std::fs::read_to_string; -use std::process::Command; use std::{env, str}; /// Path to the .proto source files, relative to `core` directory const PROTO_SRC: &str = "./proto"; -/// The version should match the one we use in the `Makefile` -const RUSTFMT_TOOLCHAIN_SRC: &str = "../rust-nightly-version"; - fn main() { if let Ok(val) = env::var("COMPILE_PROTO") { if val.to_ascii_lowercase() == "false" { @@ -19,31 +14,8 @@ fn main() { // Tell Cargo that if the given file changes, to rerun this build script. println!("cargo:rerun-if-changed={}", PROTO_SRC); - let mut use_rustfmt = false; - - // The version should match the one we use in the `Makefile` - if let Ok(rustfmt_toolchain) = read_to_string(RUSTFMT_TOOLCHAIN_SRC) { - // Try to find the path to rustfmt. - if let Ok(output) = Command::new("rustup") - .args(["which", "rustfmt", "--toolchain", rustfmt_toolchain.trim()]) - .output() - { - if let Ok(rustfmt) = str::from_utf8(&output.stdout) { - // Set the command to be used by tonic_build below to format the - // generated files - let rustfmt = rustfmt.trim(); - if !rustfmt.is_empty() { - println!("using rustfmt from path \"{}\"", rustfmt); - env::set_var("RUSTFMT", rustfmt); - use_rustfmt = true - } - } - } - } - tonic_build::configure() .out_dir("src/proto/generated") - .format(use_rustfmt) .protoc_arg("--experimental_allow_proto3_optional") .compile(&[format!("{}/types.proto", PROTO_SRC)], &[PROTO_SRC]) .unwrap(); diff --git a/core/src/hints.rs b/core/src/hints.rs new file mode 100644 index 00000000000..78d49eeab5e --- /dev/null +++ b/core/src/hints.rs @@ -0,0 +1,44 @@ +//! Compiler hints, to improve the performance of certain operations. + +/// A function that is seldom called. +#[inline] +#[cold] +pub fn cold() {} + +/// A likely path to be taken in an if-expression. +/// +/// # Example +/// +/// ```ignore +/// if likely(frequent_condition()) { +/// // most common path to take +/// } else { +/// // ... +/// } +/// ``` +#[inline] +pub fn likely(b: bool) -> bool { + if !b { + cold() + } + b +} + +/// An unlikely path to be taken in an if-expression. +/// +/// # Example +/// +/// ```ignore +/// if unlikely(rare_condition()) { +/// // ... +/// } else { +/// // most common path to take +/// } +/// ``` +#[inline] +pub fn unlikely(b: bool) -> bool { + if b { + cold() + } + b +} diff --git a/core/src/ledger/governance/mod.rs b/core/src/ledger/governance/mod.rs index 8e3fb977f3d..ae488383bfc 100644 --- a/core/src/ledger/governance/mod.rs +++ b/core/src/ledger/governance/mod.rs @@ -1,6 +1,6 @@ //! Governance library code -use crate::types::address::{Address, InternalAddress}; +use crate::types::address::{self, Address}; /// governance parameters pub mod parameters; @@ -8,4 +8,4 @@ pub mod parameters; pub mod storage; /// The governance internal address -pub const ADDRESS: Address = Address::Internal(InternalAddress::Governance); +pub const ADDRESS: Address = address::GOV; diff --git a/core/src/ledger/governance/parameters.rs b/core/src/ledger/governance/parameters.rs index 71dca8c91b4..9ae820d96c3 100644 --- a/core/src/ledger/governance/parameters.rs +++ b/core/src/ledger/governance/parameters.rs @@ -3,8 +3,7 @@ use std::fmt::Display; use borsh::{BorshDeserialize, BorshSerialize}; use super::storage as gov_storage; -use crate::ledger::storage::types::encode; -use crate::ledger::storage::{self, Storage}; +use crate::ledger::storage_api::{self, StorageRead, StorageWrite}; use crate::types::token::Amount; #[derive( @@ -66,10 +65,9 @@ impl Default for GovParams { impl GovParams { /// Initialize governance parameters into storage - pub fn init_storage(&self, storage: &mut Storage) + pub fn init_storage(&self, storage: &mut S) -> storage_api::Result<()> where - DB: storage::DB + for<'iter> storage::DBIter<'iter>, - H: storage::StorageHasher, + S: StorageRead + StorageWrite, { let Self { min_proposal_fund, @@ -82,49 +80,31 @@ impl GovParams { let min_proposal_fund_key = gov_storage::get_min_proposal_fund_key(); let amount = Amount::whole(*min_proposal_fund); - storage - .write(&min_proposal_fund_key, encode(&amount)) - .unwrap(); + storage.write(&min_proposal_fund_key, amount)?; let max_proposal_code_size_key = gov_storage::get_max_proposal_code_size_key(); - storage - .write(&max_proposal_code_size_key, encode(max_proposal_code_size)) - .unwrap(); + storage.write(&max_proposal_code_size_key, max_proposal_code_size)?; let min_proposal_period_key = gov_storage::get_min_proposal_period_key(); - storage - .write(&min_proposal_period_key, encode(min_proposal_period)) - .unwrap(); + storage.write(&min_proposal_period_key, min_proposal_period)?; let max_proposal_period_key = gov_storage::get_max_proposal_period_key(); - storage - .write(&max_proposal_period_key, encode(max_proposal_period)) - .unwrap(); + storage.write(&max_proposal_period_key, max_proposal_period)?; let max_proposal_content_size_key = gov_storage::get_max_proposal_content_key(); storage - .write( - &max_proposal_content_size_key, - encode(max_proposal_content_size), - ) - .expect("Should be able to write to storage"); + .write(&max_proposal_content_size_key, max_proposal_content_size)?; let min_proposal_grace_epoch_key = gov_storage::get_min_proposal_grace_epoch_key(); storage - .write( - &min_proposal_grace_epoch_key, - encode(min_proposal_grace_epochs), - ) - .expect("Should be able to write to storage"); + .write(&min_proposal_grace_epoch_key, min_proposal_grace_epochs)?; let counter_key = gov_storage::get_counter_key(); - storage - .write(&counter_key, encode(&u64::MIN)) - .expect("Should be able to write to storage"); + storage.write(&counter_key, u64::MIN) } } diff --git a/core/src/ledger/governance/storage.rs b/core/src/ledger/governance/storage.rs index fb4ecaf76b4..e00c4be6787 100644 --- a/core/src/ledger/governance/storage.rs +++ b/core/src/ledger/governance/storage.rs @@ -5,6 +5,7 @@ use crate::types::storage::{DbKeySeg, Key, KeySeg}; const PROPOSAL_PREFIX: &str = "proposal"; const PROPOSAL_VOTE: &str = "vote"; const PROPOSAL_AUTHOR: &str = "author"; +const PROPOSAL_TYPE: &str = "type"; const PROPOSAL_CONTENT: &str = "content"; const PROPOSAL_START_EPOCH: &str = "start_epoch"; const PROPOSAL_END_EPOCH: &str = "end_epoch"; @@ -65,7 +66,7 @@ pub fn is_author_key(key: &Key) -> bool { } } -/// Check if key is proposal key +/// Check if key is proposal code key pub fn is_proposal_code_key(key: &Key) -> bool { match &key.segments[..] { [ @@ -173,6 +174,24 @@ pub fn is_end_epoch_key(key: &Key) -> bool { } } +/// Check if key is proposal type key +pub fn is_proposal_type_key(key: &Key) -> bool { + match &key.segments[..] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::StringSeg(id), + DbKeySeg::StringSeg(proposal_type), + ] if addr == &ADDRESS + && prefix == PROPOSAL_PREFIX + && proposal_type == PROPOSAL_TYPE => + { + id.parse::().is_ok() + } + _ => false, + } +} + /// Check if key is counter key pub fn is_counter_key(key: &Key) -> bool { matches!(&key.segments[..], [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(counter)] if addr == &ADDRESS && counter == COUNTER_KEY) @@ -334,6 +353,15 @@ pub fn get_author_key(id: u64) -> Key { .expect("Cannot obtain a storage key") } +/// Get key of a proposal type +pub fn get_proposal_type_key(id: u64) -> Key { + proposal_prefix() + .push(&id.to_string()) + .expect("Cannot obtain a storage key") + .push(&PROPOSAL_TYPE.to_owned()) + .expect("Cannot obtain a storage key") +} + /// Get key of proposal voting start epoch pub fn get_voting_start_epoch_key(id: u64) -> Key { proposal_prefix() @@ -370,21 +398,21 @@ pub fn get_grace_epoch_key(id: u64) -> Key { .expect("Cannot obtain a storage key") } -/// Get proposal code key -pub fn get_proposal_code_key(id: u64) -> Key { +/// Get the proposal committing key prefix +pub fn get_commiting_proposals_prefix(epoch: u64) -> Key { proposal_prefix() - .push(&id.to_string()) + .push(&PROPOSAL_COMMITTING_EPOCH.to_owned()) .expect("Cannot obtain a storage key") - .push(&PROPOSAL_CODE.to_owned()) + .push(&epoch.to_string()) .expect("Cannot obtain a storage key") } -/// Get the proposal committing key prefix -pub fn get_commiting_proposals_prefix(epoch: u64) -> Key { +/// Get proposal code key +pub fn get_proposal_code_key(id: u64) -> Key { proposal_prefix() - .push(&PROPOSAL_COMMITTING_EPOCH.to_owned()) + .push(&id.to_string()) .expect("Cannot obtain a storage key") - .push(&epoch.to_string()) + .push(&PROPOSAL_CODE.to_owned()) .expect("Cannot obtain a storage key") } diff --git a/core/src/ledger/ibc/actions.rs b/core/src/ledger/ibc/actions.rs deleted file mode 100644 index 4e09f269c28..00000000000 --- a/core/src/ledger/ibc/actions.rs +++ /dev/null @@ -1,1605 +0,0 @@ -//! Functions to handle IBC modules - -use std::str::FromStr; - -use sha2::Digest; -use thiserror::Error; - -use crate::ibc::applications::ics20_fungible_token_transfer::msgs::transfer::MsgTransfer; -use crate::ibc::clients::ics07_tendermint::consensus_state::ConsensusState as TmConsensusState; -use crate::ibc::core::ics02_client::client_consensus::{ - AnyConsensusState, ConsensusState, -}; -use crate::ibc::core::ics02_client::client_state::{ - AnyClientState, ClientState, -}; -use crate::ibc::core::ics02_client::client_type::ClientType; -use crate::ibc::core::ics02_client::events::{ - Attributes as ClientAttributes, CreateClient, UpdateClient, UpgradeClient, -}; -use crate::ibc::core::ics02_client::header::{AnyHeader, Header}; -use crate::ibc::core::ics02_client::height::Height; -use crate::ibc::core::ics02_client::msgs::create_client::MsgCreateAnyClient; -use crate::ibc::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; -use crate::ibc::core::ics02_client::msgs::upgrade_client::MsgUpgradeAnyClient; -use crate::ibc::core::ics02_client::msgs::ClientMsg; -use crate::ibc::core::ics03_connection::connection::{ - ConnectionEnd, Counterparty as ConnCounterparty, State as ConnState, -}; -use crate::ibc::core::ics03_connection::events::{ - Attributes as ConnectionAttributes, OpenAck as ConnOpenAck, - OpenConfirm as ConnOpenConfirm, OpenInit as ConnOpenInit, - OpenTry as ConnOpenTry, -}; -use crate::ibc::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; -use crate::ibc::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; -use crate::ibc::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; -use crate::ibc::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; -use crate::ibc::core::ics03_connection::msgs::ConnectionMsg; -use crate::ibc::core::ics04_channel::channel::{ - ChannelEnd, Counterparty as ChanCounterparty, Order, State as ChanState, -}; -use crate::ibc::core::ics04_channel::commitment::PacketCommitment; -use crate::ibc::core::ics04_channel::events::{ - AcknowledgePacket, CloseConfirm as ChanCloseConfirm, - CloseInit as ChanCloseInit, OpenAck as ChanOpenAck, - OpenConfirm as ChanOpenConfirm, OpenInit as ChanOpenInit, - OpenTry as ChanOpenTry, SendPacket, TimeoutPacket, WriteAcknowledgement, -}; -use crate::ibc::core::ics04_channel::msgs::acknowledgement::MsgAcknowledgement; -use crate::ibc::core::ics04_channel::msgs::chan_close_confirm::MsgChannelCloseConfirm; -use crate::ibc::core::ics04_channel::msgs::chan_close_init::MsgChannelCloseInit; -use crate::ibc::core::ics04_channel::msgs::chan_open_ack::MsgChannelOpenAck; -use crate::ibc::core::ics04_channel::msgs::chan_open_confirm::MsgChannelOpenConfirm; -use crate::ibc::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit; -use crate::ibc::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; -use crate::ibc::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; -use crate::ibc::core::ics04_channel::msgs::timeout::MsgTimeout; -use crate::ibc::core::ics04_channel::msgs::timeout_on_close::MsgTimeoutOnClose; -use crate::ibc::core::ics04_channel::msgs::{ChannelMsg, PacketMsg}; -use crate::ibc::core::ics04_channel::packet::{Packet, Sequence}; -use crate::ibc::core::ics23_commitment::commitment::CommitmentPrefix; -use crate::ibc::core::ics24_host::error::ValidationError as Ics24Error; -use crate::ibc::core::ics24_host::identifier::{ - ChannelId, ClientId, ConnectionId, PortChannelId, PortId, -}; -use crate::ibc::core::ics26_routing::msgs::Ics26Envelope; -use crate::ibc::events::IbcEvent; -#[cfg(any(feature = "ibc-mocks-abcipp", feature = "ibc-mocks"))] -use crate::ibc::mock::client_state::{MockClientState, MockConsensusState}; -use crate::ibc::timestamp::Timestamp; -use crate::ledger::ibc::data::{ - Error as IbcDataError, FungibleTokenPacketData, IbcMessage, PacketAck, - PacketReceipt, -}; -use crate::ledger::ibc::storage; -use crate::ledger::storage_api; -use crate::tendermint::Time; -use crate::tendermint_proto::{Error as ProtoError, Protobuf}; -use crate::types::address::{Address, InternalAddress}; -use crate::types::ibc::IbcEvent as NamadaIbcEvent; -use crate::types::storage::{BlockHeight, Key}; -use crate::types::time::Rfc3339String; -use crate::types::token::{self, Amount}; - -const COMMITMENT_PREFIX: &[u8] = b"ibc"; - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum Error { - #[error("Invalid client error: {0}")] - ClientId(Ics24Error), - #[error("Invalid port error: {0}")] - PortId(Ics24Error), - #[error("Updating a client error: {0}")] - ClientUpdate(String), - #[error("IBC data error: {0}")] - IbcData(IbcDataError), - #[error("Decoding IBC data error: {0}")] - Decoding(ProtoError), - #[error("Client error: {0}")] - Client(String), - #[error("Connection error: {0}")] - Connection(String), - #[error("Channel error: {0}")] - Channel(String), - #[error("Counter error: {0}")] - Counter(String), - #[error("Sequence error: {0}")] - Sequence(String), - #[error("Time error: {0}")] - Time(String), - #[error("Invalid transfer message: {0}")] - TransferMessage(token::TransferError), - #[error("Sending a token error: {0}")] - SendingToken(String), - #[error("Receiving a token error: {0}")] - ReceivingToken(String), - #[error("IBC storage error: {0}")] - IbcStorage(storage::Error), -} - -// This is needed to use `ibc::Handler::Error` with `IbcActions` in -// `tx_prelude/src/ibc.rs` -impl From for storage_api::Error { - fn from(err: Error) -> Self { - storage_api::Error::new(err) - } -} - -/// for handling IBC modules -pub type Result = std::result::Result; - -/// IBC trait to be implemented in integration that can read and write -pub trait IbcActions { - /// IBC action error - type Error: From; - - /// Read IBC-related data - fn read_ibc_data( - &self, - key: &Key, - ) -> std::result::Result>, Self::Error>; - - /// Write IBC-related data - fn write_ibc_data( - &mut self, - key: &Key, - data: impl AsRef<[u8]>, - ) -> std::result::Result<(), Self::Error>; - - /// Delete IBC-related data - fn delete_ibc_data( - &mut self, - key: &Key, - ) -> std::result::Result<(), Self::Error>; - - /// Emit an IBC event - fn emit_ibc_event( - &mut self, - event: NamadaIbcEvent, - ) -> std::result::Result<(), Self::Error>; - - /// Transfer token - fn transfer_token( - &mut self, - src: &Key, - dest: &Key, - amount: Amount, - ) -> std::result::Result<(), Self::Error>; - - /// Get the current height of this chain - fn get_height(&self) -> std::result::Result; - - /// Get the current time of the tendermint header of this chain - fn get_header_time( - &self, - ) -> std::result::Result; - - /// dispatch according to ICS26 routing - fn dispatch_ibc_action( - &mut self, - tx_data: &[u8], - ) -> std::result::Result<(), Self::Error> { - let ibc_msg = IbcMessage::decode(tx_data).map_err(Error::IbcData)?; - match &ibc_msg.0 { - Ics26Envelope::Ics2Msg(ics02_msg) => match ics02_msg { - ClientMsg::CreateClient(msg) => self.create_client(msg), - ClientMsg::UpdateClient(msg) => self.update_client(msg), - ClientMsg::Misbehaviour(_msg) => todo!(), - ClientMsg::UpgradeClient(msg) => self.upgrade_client(msg), - }, - Ics26Envelope::Ics3Msg(ics03_msg) => match ics03_msg { - ConnectionMsg::ConnectionOpenInit(msg) => { - self.init_connection(msg) - } - ConnectionMsg::ConnectionOpenTry(msg) => { - self.try_connection(msg) - } - ConnectionMsg::ConnectionOpenAck(msg) => { - self.ack_connection(msg) - } - ConnectionMsg::ConnectionOpenConfirm(msg) => { - self.confirm_connection(msg) - } - }, - Ics26Envelope::Ics4ChannelMsg(ics04_msg) => match ics04_msg { - ChannelMsg::ChannelOpenInit(msg) => self.init_channel(msg), - ChannelMsg::ChannelOpenTry(msg) => self.try_channel(msg), - ChannelMsg::ChannelOpenAck(msg) => self.ack_channel(msg), - ChannelMsg::ChannelOpenConfirm(msg) => { - self.confirm_channel(msg) - } - ChannelMsg::ChannelCloseInit(msg) => { - self.close_init_channel(msg) - } - ChannelMsg::ChannelCloseConfirm(msg) => { - self.close_confirm_channel(msg) - } - }, - Ics26Envelope::Ics4PacketMsg(ics04_msg) => match ics04_msg { - PacketMsg::AckPacket(msg) => self.acknowledge_packet(msg), - PacketMsg::RecvPacket(msg) => self.receive_packet(msg), - PacketMsg::ToPacket(msg) => self.timeout_packet(msg), - PacketMsg::ToClosePacket(msg) => { - self.timeout_on_close_packet(msg) - } - }, - Ics26Envelope::Ics20Msg(msg) => self.send_token(msg), - } - } - - /// Create a new client - fn create_client( - &mut self, - msg: &MsgCreateAnyClient, - ) -> std::result::Result<(), Self::Error> { - let counter_key = storage::client_counter_key(); - let counter = self.get_and_inc_counter(&counter_key)?; - let client_type = msg.client_state.client_type(); - let client_id = client_id(client_type, counter)?; - // client type - let client_type_key = storage::client_type_key(&client_id); - self.write_ibc_data(&client_type_key, client_type.as_str().as_bytes())?; - // client state - let client_state_key = storage::client_state_key(&client_id); - self.write_ibc_data( - &client_state_key, - msg.client_state - .encode_vec() - .expect("encoding shouldn't fail"), - )?; - // consensus state - let height = msg.client_state.latest_height(); - let consensus_state_key = - storage::consensus_state_key(&client_id, height); - self.write_ibc_data( - &consensus_state_key, - msg.consensus_state - .encode_vec() - .expect("encoding shouldn't fail"), - )?; - - self.set_client_update_time(&client_id)?; - - let event = make_create_client_event(&client_id, msg) - .try_into() - .unwrap(); - self.emit_ibc_event(event)?; - - Ok(()) - } - - /// Update a client - fn update_client( - &mut self, - msg: &MsgUpdateAnyClient, - ) -> std::result::Result<(), Self::Error> { - // get and update the client - let client_id = msg.client_id.clone(); - let client_state_key = storage::client_state_key(&client_id); - let value = - self.read_ibc_data(&client_state_key)?.ok_or_else(|| { - Error::Client(format!( - "The client to be updated doesn't exist: ID {}", - client_id - )) - })?; - let client_state = - AnyClientState::decode_vec(&value).map_err(Error::Decoding)?; - let (new_client_state, new_consensus_state) = - update_client(client_state, msg.header.clone())?; - - let height = new_client_state.latest_height(); - self.write_ibc_data( - &client_state_key, - new_client_state - .encode_vec() - .expect("encoding shouldn't fail"), - )?; - let consensus_state_key = - storage::consensus_state_key(&client_id, height); - self.write_ibc_data( - &consensus_state_key, - new_consensus_state - .encode_vec() - .expect("encoding shouldn't fail"), - )?; - - self.set_client_update_time(&client_id)?; - - let event = make_update_client_event(&client_id, msg) - .try_into() - .unwrap(); - self.emit_ibc_event(event)?; - - Ok(()) - } - - /// Upgrade a client - fn upgrade_client( - &mut self, - msg: &MsgUpgradeAnyClient, - ) -> std::result::Result<(), Self::Error> { - let client_state_key = storage::client_state_key(&msg.client_id); - let height = msg.client_state.latest_height(); - let consensus_state_key = - storage::consensus_state_key(&msg.client_id, height); - self.write_ibc_data( - &client_state_key, - msg.client_state - .encode_vec() - .expect("encoding shouldn't fail"), - )?; - self.write_ibc_data( - &consensus_state_key, - msg.consensus_state - .encode_vec() - .expect("encoding shouldn't fail"), - )?; - - self.set_client_update_time(&msg.client_id)?; - - let event = make_upgrade_client_event(&msg.client_id, msg) - .try_into() - .unwrap(); - self.emit_ibc_event(event)?; - - Ok(()) - } - - /// Initialize a connection for ConnectionOpenInit - fn init_connection( - &mut self, - msg: &MsgConnectionOpenInit, - ) -> std::result::Result<(), Self::Error> { - let counter_key = storage::connection_counter_key(); - let counter = self.get_and_inc_counter(&counter_key)?; - // new connection - let conn_id = connection_id(counter); - let conn_key = storage::connection_key(&conn_id); - let connection = init_connection(msg); - self.write_ibc_data( - &conn_key, - connection.encode_vec().expect("encoding shouldn't fail"), - )?; - - let event = make_open_init_connection_event(&conn_id, msg) - .try_into() - .unwrap(); - self.emit_ibc_event(event)?; - - Ok(()) - } - - /// Initialize a connection for ConnectionOpenTry - fn try_connection( - &mut self, - msg: &MsgConnectionOpenTry, - ) -> std::result::Result<(), Self::Error> { - let counter_key = storage::connection_counter_key(); - let counter = self.get_and_inc_counter(&counter_key)?; - // new connection - let conn_id = connection_id(counter); - let conn_key = storage::connection_key(&conn_id); - let connection = try_connection(msg); - self.write_ibc_data( - &conn_key, - connection.encode_vec().expect("encoding shouldn't fail"), - )?; - - let event = make_open_try_connection_event(&conn_id, msg) - .try_into() - .unwrap(); - self.emit_ibc_event(event)?; - - Ok(()) - } - - /// Open the connection for ConnectionOpenAck - fn ack_connection( - &mut self, - msg: &MsgConnectionOpenAck, - ) -> std::result::Result<(), Self::Error> { - let conn_key = storage::connection_key(&msg.connection_id); - let value = self.read_ibc_data(&conn_key)?.ok_or_else(|| { - Error::Connection(format!( - "The connection to be opened doesn't exist: ID {}", - msg.connection_id - )) - })?; - let mut connection = - ConnectionEnd::decode_vec(&value).map_err(Error::Decoding)?; - open_connection(&mut connection); - let mut counterparty = connection.counterparty().clone(); - counterparty.connection_id = - Some(msg.counterparty_connection_id.clone()); - connection.set_counterparty(counterparty); - self.write_ibc_data( - &conn_key, - connection.encode_vec().expect("encoding shouldn't fail"), - )?; - - let event = make_open_ack_connection_event(msg).try_into().unwrap(); - self.emit_ibc_event(event)?; - - Ok(()) - } - - /// Open the connection for ConnectionOpenConfirm - fn confirm_connection( - &mut self, - msg: &MsgConnectionOpenConfirm, - ) -> std::result::Result<(), Self::Error> { - let conn_key = storage::connection_key(&msg.connection_id); - let value = self.read_ibc_data(&conn_key)?.ok_or_else(|| { - Error::Connection(format!( - "The connection to be opend doesn't exist: ID {}", - msg.connection_id - )) - })?; - let mut connection = - ConnectionEnd::decode_vec(&value).map_err(Error::Decoding)?; - open_connection(&mut connection); - self.write_ibc_data( - &conn_key, - connection.encode_vec().expect("encoding shouldn't fail"), - )?; - - let event = make_open_confirm_connection_event(msg).try_into().unwrap(); - self.emit_ibc_event(event)?; - - Ok(()) - } - - /// Initialize a channel for ChannelOpenInit - fn init_channel( - &mut self, - msg: &MsgChannelOpenInit, - ) -> std::result::Result<(), Self::Error> { - self.bind_port(&msg.port_id)?; - let counter_key = storage::channel_counter_key(); - let counter = self.get_and_inc_counter(&counter_key)?; - let channel_id = channel_id(counter); - let port_channel_id = port_channel_id(msg.port_id.clone(), channel_id); - let channel_key = storage::channel_key(&port_channel_id); - self.write_ibc_data( - &channel_key, - msg.channel.encode_vec().expect("encoding shouldn't fail"), - )?; - - let event = make_open_init_channel_event(&channel_id, msg) - .try_into() - .unwrap(); - self.emit_ibc_event(event)?; - - Ok(()) - } - - /// Initialize a channel for ChannelOpenTry - fn try_channel( - &mut self, - msg: &MsgChannelOpenTry, - ) -> std::result::Result<(), Self::Error> { - self.bind_port(&msg.port_id)?; - let counter_key = storage::channel_counter_key(); - let counter = self.get_and_inc_counter(&counter_key)?; - let channel_id = channel_id(counter); - let port_channel_id = port_channel_id(msg.port_id.clone(), channel_id); - let channel_key = storage::channel_key(&port_channel_id); - self.write_ibc_data( - &channel_key, - msg.channel.encode_vec().expect("encoding shouldn't fail"), - )?; - - let event = make_open_try_channel_event(&channel_id, msg) - .try_into() - .unwrap(); - self.emit_ibc_event(event)?; - - Ok(()) - } - - /// Open the channel for ChannelOpenAck - fn ack_channel( - &mut self, - msg: &MsgChannelOpenAck, - ) -> std::result::Result<(), Self::Error> { - let port_channel_id = - port_channel_id(msg.port_id.clone(), msg.channel_id); - let channel_key = storage::channel_key(&port_channel_id); - let value = self.read_ibc_data(&channel_key)?.ok_or_else(|| { - Error::Channel(format!( - "The channel to be opened doesn't exist: Port/Channel {}", - port_channel_id - )) - })?; - let mut channel = - ChannelEnd::decode_vec(&value).map_err(Error::Decoding)?; - channel.set_counterparty_channel_id(msg.counterparty_channel_id); - open_channel(&mut channel); - self.write_ibc_data( - &channel_key, - channel.encode_vec().expect("encoding shouldn't fail"), - )?; - - let event = make_open_ack_channel_event(msg, &channel)? - .try_into() - .unwrap(); - self.emit_ibc_event(event)?; - - Ok(()) - } - - /// Open the channel for ChannelOpenConfirm - fn confirm_channel( - &mut self, - msg: &MsgChannelOpenConfirm, - ) -> std::result::Result<(), Self::Error> { - let port_channel_id = - port_channel_id(msg.port_id.clone(), msg.channel_id); - let channel_key = storage::channel_key(&port_channel_id); - let value = self.read_ibc_data(&channel_key)?.ok_or_else(|| { - Error::Channel(format!( - "The channel to be opened doesn't exist: Port/Channel {}", - port_channel_id - )) - })?; - let mut channel = - ChannelEnd::decode_vec(&value).map_err(Error::Decoding)?; - open_channel(&mut channel); - self.write_ibc_data( - &channel_key, - channel.encode_vec().expect("encoding shouldn't fail"), - )?; - - let event = make_open_confirm_channel_event(msg, &channel)? - .try_into() - .unwrap(); - self.emit_ibc_event(event)?; - - Ok(()) - } - - /// Close the channel for ChannelCloseInit - fn close_init_channel( - &mut self, - msg: &MsgChannelCloseInit, - ) -> std::result::Result<(), Self::Error> { - let port_channel_id = - port_channel_id(msg.port_id.clone(), msg.channel_id); - let channel_key = storage::channel_key(&port_channel_id); - let value = self.read_ibc_data(&channel_key)?.ok_or_else(|| { - Error::Channel(format!( - "The channel to be closed doesn't exist: Port/Channel {}", - port_channel_id - )) - })?; - let mut channel = - ChannelEnd::decode_vec(&value).map_err(Error::Decoding)?; - close_channel(&mut channel); - self.write_ibc_data( - &channel_key, - channel.encode_vec().expect("encoding shouldn't fail"), - )?; - - let event = make_close_init_channel_event(msg, &channel)? - .try_into() - .unwrap(); - self.emit_ibc_event(event)?; - - Ok(()) - } - - /// Close the channel for ChannelCloseConfirm - fn close_confirm_channel( - &mut self, - msg: &MsgChannelCloseConfirm, - ) -> std::result::Result<(), Self::Error> { - let port_channel_id = - port_channel_id(msg.port_id.clone(), msg.channel_id); - let channel_key = storage::channel_key(&port_channel_id); - let value = self.read_ibc_data(&channel_key)?.ok_or_else(|| { - Error::Channel(format!( - "The channel to be closed doesn't exist: Port/Channel {}", - port_channel_id - )) - })?; - let mut channel = - ChannelEnd::decode_vec(&value).map_err(Error::Decoding)?; - close_channel(&mut channel); - self.write_ibc_data( - &channel_key, - channel.encode_vec().expect("encoding shouldn't fail"), - )?; - - let event = make_close_confirm_channel_event(msg, &channel)? - .try_into() - .unwrap(); - self.emit_ibc_event(event)?; - - Ok(()) - } - - /// Send a packet - fn send_packet( - &mut self, - port_channel_id: PortChannelId, - data: Vec, - timeout_height: Height, - timeout_timestamp: Timestamp, - ) -> std::result::Result<(), Self::Error> { - // get and increment the next sequence send - let seq_key = storage::next_sequence_send_key(&port_channel_id); - let sequence = self.get_and_inc_sequence(&seq_key)?; - - // get the channel for the destination info. - let channel_key = storage::channel_key(&port_channel_id); - let channel = self - .read_ibc_data(&channel_key)? - .expect("cannot get the channel to be closed"); - let channel = - ChannelEnd::decode_vec(&channel).expect("cannot get the channel"); - let counterparty = channel.counterparty(); - - // make a packet - let packet = Packet { - sequence, - source_port: port_channel_id.port_id.clone(), - source_channel: port_channel_id.channel_id, - destination_port: counterparty.port_id.clone(), - destination_channel: *counterparty - .channel_id() - .expect("the counterparty channel should exist"), - data, - timeout_height, - timeout_timestamp, - }; - // store the commitment of the packet - let commitment_key = storage::commitment_key( - &port_channel_id.port_id, - &port_channel_id.channel_id, - packet.sequence, - ); - let commitment = commitment(&packet); - self.write_ibc_data(&commitment_key, commitment.into_vec())?; - - let event = make_send_packet_event(packet).try_into().unwrap(); - self.emit_ibc_event(event)?; - - Ok(()) - } - - /// Receive a packet - fn receive_packet( - &mut self, - msg: &MsgRecvPacket, - ) -> std::result::Result<(), Self::Error> { - // check the packet data - let packet_ack = - if let Ok(data) = serde_json::from_slice(&msg.packet.data) { - match self.receive_token(&msg.packet, &data) { - Ok(_) => PacketAck::result_success(), - Err(_) => PacketAck::result_error( - "receiving a token failed".to_string(), - ), - } - } else { - PacketAck::result_error("unknown packet data".to_string()) - }; - - // store the receipt - let receipt_key = storage::receipt_key( - &msg.packet.destination_port, - &msg.packet.destination_channel, - msg.packet.sequence, - ); - self.write_ibc_data(&receipt_key, PacketReceipt::default().as_bytes())?; - - // store the ack - let ack_key = storage::ack_key( - &msg.packet.destination_port, - &msg.packet.destination_channel, - msg.packet.sequence, - ); - let ack = packet_ack.encode_to_vec(); - let ack_commitment = sha2::Sha256::digest(&ack).to_vec(); - self.write_ibc_data(&ack_key, ack_commitment)?; - - // increment the next sequence receive - let port_channel_id = port_channel_id( - msg.packet.destination_port.clone(), - msg.packet.destination_channel, - ); - let seq_key = storage::next_sequence_recv_key(&port_channel_id); - self.get_and_inc_sequence(&seq_key)?; - - let event = make_write_ack_event(msg.packet.clone(), ack) - .try_into() - .unwrap(); - self.emit_ibc_event(event)?; - - Ok(()) - } - - /// Receive a acknowledgement - fn acknowledge_packet( - &mut self, - msg: &MsgAcknowledgement, - ) -> std::result::Result<(), Self::Error> { - let ack = PacketAck::try_from(msg.acknowledgement.clone()) - .map_err(Error::IbcData)?; - if !ack.is_success() { - if let Ok(data) = serde_json::from_slice(&msg.packet.data) { - self.refund_token(&msg.packet, &data)?; - } - } - - let commitment_key = storage::commitment_key( - &msg.packet.source_port, - &msg.packet.source_channel, - msg.packet.sequence, - ); - self.delete_ibc_data(&commitment_key)?; - - // get and increment the next sequence ack - let port_channel_id = port_channel_id( - msg.packet.source_port.clone(), - msg.packet.source_channel, - ); - let seq_key = storage::next_sequence_ack_key(&port_channel_id); - self.get_and_inc_sequence(&seq_key)?; - - let event = make_ack_event(msg.packet.clone()).try_into().unwrap(); - self.emit_ibc_event(event)?; - - Ok(()) - } - - /// Receive a timeout - fn timeout_packet( - &mut self, - msg: &MsgTimeout, - ) -> std::result::Result<(), Self::Error> { - // check the packet data - if let Ok(data) = serde_json::from_slice(&msg.packet.data) { - self.refund_token(&msg.packet, &data)?; - } - - // delete the commitment of the packet - let commitment_key = storage::commitment_key( - &msg.packet.source_port, - &msg.packet.source_channel, - msg.packet.sequence, - ); - self.delete_ibc_data(&commitment_key)?; - - // close the channel - let port_channel_id = port_channel_id( - msg.packet.source_port.clone(), - msg.packet.source_channel, - ); - let channel_key = storage::channel_key(&port_channel_id); - let value = self.read_ibc_data(&channel_key)?.ok_or_else(|| { - Error::Channel(format!( - "The channel to be closed doesn't exist: Port/Channel {}", - port_channel_id - )) - })?; - let mut channel = - ChannelEnd::decode_vec(&value).map_err(Error::Decoding)?; - if channel.order_matches(&Order::Ordered) { - close_channel(&mut channel); - self.write_ibc_data( - &channel_key, - channel.encode_vec().expect("encoding shouldn't fail"), - )?; - } - - let event = make_timeout_event(msg.packet.clone()).try_into().unwrap(); - self.emit_ibc_event(event)?; - - Ok(()) - } - - /// Receive a timeout for TimeoutOnClose - fn timeout_on_close_packet( - &mut self, - msg: &MsgTimeoutOnClose, - ) -> std::result::Result<(), Self::Error> { - // check the packet data - if let Ok(data) = serde_json::from_slice(&msg.packet.data) { - self.refund_token(&msg.packet, &data)?; - } - - // delete the commitment of the packet - let commitment_key = storage::commitment_key( - &msg.packet.source_port, - &msg.packet.source_channel, - msg.packet.sequence, - ); - self.delete_ibc_data(&commitment_key)?; - - // close the channel - let port_channel_id = port_channel_id( - msg.packet.source_port.clone(), - msg.packet.source_channel, - ); - let channel_key = storage::channel_key(&port_channel_id); - let value = self.read_ibc_data(&channel_key)?.ok_or_else(|| { - Error::Channel(format!( - "The channel to be closed doesn't exist: Port/Channel {}", - port_channel_id - )) - })?; - let mut channel = - ChannelEnd::decode_vec(&value).map_err(Error::Decoding)?; - if channel.order_matches(&Order::Ordered) { - close_channel(&mut channel); - self.write_ibc_data( - &channel_key, - channel.encode_vec().expect("encoding shouldn't fail"), - )?; - } - - Ok(()) - } - - /// Set the timestamp and the height for the client update - fn set_client_update_time( - &mut self, - client_id: &ClientId, - ) -> std::result::Result<(), Self::Error> { - let time = Time::parse_from_rfc3339(&self.get_header_time()?.0) - .map_err(|e| { - Error::Time(format!("The time of the header is invalid: {}", e)) - })?; - let key = storage::client_update_timestamp_key(client_id); - self.write_ibc_data( - &key, - time.encode_vec().expect("encoding shouldn't fail"), - )?; - - // the revision number is always 0 - let height = Height::new(0, self.get_height()?.0); - let height_key = storage::client_update_height_key(client_id); - // write the current height as u64 - self.write_ibc_data( - &height_key, - height.encode_vec().expect("Encoding shouldn't fail"), - )?; - - Ok(()) - } - - /// Get and increment the counter - fn get_and_inc_counter( - &mut self, - key: &Key, - ) -> std::result::Result { - let value = self.read_ibc_data(key)?.ok_or_else(|| { - Error::Counter(format!("The counter doesn't exist: {}", key)) - })?; - let value: [u8; 8] = value.try_into().map_err(|_| { - Error::Counter(format!("The counter value wasn't u64: Key {}", key)) - })?; - let counter = u64::from_be_bytes(value); - self.write_ibc_data(key, (counter + 1).to_be_bytes())?; - Ok(counter) - } - - /// Get and increment the sequence - fn get_and_inc_sequence( - &mut self, - key: &Key, - ) -> std::result::Result { - let index = match self.read_ibc_data(key)? { - Some(v) => { - let index: [u8; 8] = v.try_into().map_err(|_| { - Error::Sequence(format!( - "The sequence index wasn't u64: Key {}", - key - )) - })?; - u64::from_be_bytes(index) - } - // when the sequence has never been used, returns the initial value - None => 1, - }; - self.write_ibc_data(key, (index + 1).to_be_bytes())?; - Ok(index.into()) - } - - /// Bind a new port - fn bind_port( - &mut self, - port_id: &PortId, - ) -> std::result::Result<(), Self::Error> { - let port_key = storage::port_key(port_id); - match self.read_ibc_data(&port_key)? { - Some(_) => {} - None => { - // create a new capability and claim it - let index_key = storage::capability_index_key(); - let cap_index = self.get_and_inc_counter(&index_key)?; - self.write_ibc_data(&port_key, cap_index.to_be_bytes())?; - let cap_key = storage::capability_key(cap_index); - self.write_ibc_data(&cap_key, port_id.as_bytes())?; - } - } - Ok(()) - } - - /// Send the specified token by escrowing or burning - fn send_token( - &mut self, - msg: &MsgTransfer, - ) -> std::result::Result<(), Self::Error> { - let mut data = FungibleTokenPacketData::from(msg.clone()); - if let Some(hash) = storage::token_hash_from_denom(&data.denom) - .map_err(Error::IbcStorage)? - { - let denom_key = storage::ibc_denom_key(hash); - let denom_bytes = - self.read_ibc_data(&denom_key)?.ok_or_else(|| { - Error::SendingToken(format!( - "No original denom: denom_key {}", - denom_key - )) - })?; - let denom = std::str::from_utf8(&denom_bytes).map_err(|e| { - Error::SendingToken(format!( - "Decoding the denom failed: denom_key {}, error {}", - denom_key, e - )) - })?; - data.denom = denom.to_string(); - } - let token = storage::token(&data.denom).map_err(Error::IbcStorage)?; - let amount = Amount::from_str(&data.amount).map_err(|e| { - Error::SendingToken(format!( - "Invalid amount: amount {}, error {}", - data.amount, e - )) - })?; - - let source_addr = Address::decode(&data.sender).map_err(|e| { - Error::SendingToken(format!( - "Invalid sender address: sender {}, error {}", - data.sender, e - )) - })?; - - // check the denomination field - let prefix = format!( - "{}/{}/", - msg.source_port.clone(), - msg.source_channel.clone() - ); - let (source, target) = if data.denom.starts_with(&prefix) { - // the receiver's chain was the source - // transfer from the origin-specific account of the token - let key_prefix = storage::ibc_token_prefix(&data.denom) - .map_err(Error::IbcStorage)?; - let src = token::multitoken_balance_key(&key_prefix, &source_addr); - - let key_prefix = storage::ibc_account_prefix( - &msg.source_port, - &msg.source_channel, - &token, - ); - let burn = token::multitoken_balance_key( - &key_prefix, - &Address::Internal(InternalAddress::IbcBurn), - ); - (src, burn) - } else { - // this chain is the source - // escrow the amount of the token - let src = if data.denom == token.to_string() { - token::balance_key(&token, &source_addr) - } else { - let key_prefix = storage::ibc_token_prefix(&data.denom) - .map_err(Error::IbcStorage)?; - token::multitoken_balance_key(&key_prefix, &source_addr) - }; - - let key_prefix = storage::ibc_account_prefix( - &msg.source_port, - &msg.source_channel, - &token, - ); - let escrow = token::multitoken_balance_key( - &key_prefix, - &Address::Internal(InternalAddress::IbcEscrow), - ); - (src, escrow) - }; - self.transfer_token(&source, &target, amount)?; - - // send a packet - let port_channel_id = - port_channel_id(msg.source_port.clone(), msg.source_channel); - let packet_data = serde_json::to_vec(&data) - .expect("encoding the packet data shouldn't fail"); - self.send_packet( - port_channel_id, - packet_data, - msg.timeout_height, - msg.timeout_timestamp, - ) - } - - /// Receive the specified token by unescrowing or minting - fn receive_token( - &mut self, - packet: &Packet, - data: &FungibleTokenPacketData, - ) -> std::result::Result<(), Self::Error> { - let token = storage::token(&data.denom).map_err(Error::IbcStorage)?; - let amount = Amount::from_str(&data.amount).map_err(|e| { - Error::ReceivingToken(format!( - "Invalid amount: amount {}, error {}", - data.amount, e - )) - })?; - // The receiver should be an address because the origin-specific account - // key should be assigned internally - let dest_addr = Address::decode(&data.receiver).map_err(|e| { - Error::ReceivingToken(format!( - "Invalid receiver address: receiver {}, error {}", - data.receiver, e - )) - })?; - - let prefix = format!( - "{}/{}/", - packet.source_port.clone(), - packet.source_channel.clone() - ); - let (source, target) = match data.denom.strip_prefix(&prefix) { - Some(denom) => { - // unescrow the token because this chain was the source - let escrow_prefix = storage::ibc_account_prefix( - &packet.destination_port, - &packet.destination_channel, - &token, - ); - let escrow = token::multitoken_balance_key( - &escrow_prefix, - &Address::Internal(InternalAddress::IbcEscrow), - ); - let dest = if denom == token.to_string() { - token::balance_key(&token, &dest_addr) - } else { - let key_prefix = storage::ibc_token_prefix(denom) - .map_err(Error::IbcStorage)?; - token::multitoken_balance_key(&key_prefix, &dest_addr) - }; - (escrow, dest) - } - None => { - // mint the token because the sender chain is the source - let key_prefix = storage::ibc_account_prefix( - &packet.destination_port, - &packet.destination_channel, - &token, - ); - let mint = token::multitoken_balance_key( - &key_prefix, - &Address::Internal(InternalAddress::IbcMint), - ); - - // prefix the denom with the this chain port and channel - let denom = format!( - "{}/{}/{}", - &packet.destination_port, - &packet.destination_channel, - &data.denom - ); - let key_prefix = storage::ibc_token_prefix(&denom) - .map_err(Error::IbcStorage)?; - let dest = - token::multitoken_balance_key(&key_prefix, &dest_addr); - - // store the prefixed denom - let token_hash = storage::calc_hash(&denom); - let denom_key = storage::ibc_denom_key(token_hash); - self.write_ibc_data(&denom_key, denom.as_bytes())?; - - (mint, dest) - } - }; - self.transfer_token(&source, &target, amount)?; - - Ok(()) - } - - /// Refund the specified token by unescrowing or minting - fn refund_token( - &mut self, - packet: &Packet, - data: &FungibleTokenPacketData, - ) -> std::result::Result<(), Self::Error> { - let token = storage::token(&data.denom).map_err(Error::IbcStorage)?; - let amount = Amount::from_str(&data.amount).map_err(|e| { - Error::ReceivingToken(format!( - "Invalid amount: amount {}, error {}", - data.amount, e - )) - })?; - - let dest_addr = Address::decode(&data.sender).map_err(|e| { - Error::SendingToken(format!( - "Invalid sender address: sender {}, error {}", - data.sender, e - )) - })?; - - let prefix = format!( - "{}/{}/", - packet.source_port.clone(), - packet.source_channel.clone() - ); - let (source, target) = if data.denom.starts_with(&prefix) { - // mint the token because the amount was burned - let key_prefix = storage::ibc_account_prefix( - &packet.source_port, - &packet.source_channel, - &token, - ); - let mint = token::multitoken_balance_key( - &key_prefix, - &Address::Internal(InternalAddress::IbcMint), - ); - let key_prefix = storage::ibc_token_prefix(&data.denom) - .map_err(Error::IbcStorage)?; - let dest = token::multitoken_balance_key(&key_prefix, &dest_addr); - (mint, dest) - } else { - // unescrow the token because the acount was escrowed - let dest = if data.denom == token.to_string() { - token::balance_key(&token, &dest_addr) - } else { - let key_prefix = storage::ibc_token_prefix(&data.denom) - .map_err(Error::IbcStorage)?; - token::multitoken_balance_key(&key_prefix, &dest_addr) - }; - - let key_prefix = storage::ibc_account_prefix( - &packet.source_port, - &packet.source_channel, - &token, - ); - let escrow = token::multitoken_balance_key( - &key_prefix, - &Address::Internal(InternalAddress::IbcEscrow), - ); - (escrow, dest) - }; - self.transfer_token(&source, &target, amount)?; - - Ok(()) - } -} - -/// Update a client with the given state and headers -pub fn update_client( - client_state: AnyClientState, - header: AnyHeader, -) -> Result<(AnyClientState, AnyConsensusState)> { - match client_state { - AnyClientState::Tendermint(cs) => match header { - AnyHeader::Tendermint(h) => { - let new_client_state = cs.with_header(h.clone()).wrap_any(); - let new_consensus_state = TmConsensusState::from(h).wrap_any(); - Ok((new_client_state, new_consensus_state)) - } - #[cfg(any(feature = "ibc-mocks-abcipp", feature = "ibc-mocks"))] - _ => Err(Error::ClientUpdate( - "The header type is mismatched".to_owned(), - )), - }, - #[cfg(any(feature = "ibc-mocks-abcipp", feature = "ibc-mocks"))] - AnyClientState::Mock(_) => match header { - AnyHeader::Mock(h) => Ok(( - MockClientState::new(h).wrap_any(), - MockConsensusState::new(h).wrap_any(), - )), - _ => Err(Error::ClientUpdate( - "The header type is mismatched".to_owned(), - )), - }, - } -} - -/// Returns a new client ID -pub fn client_id(client_type: ClientType, counter: u64) -> Result { - ClientId::new(client_type, counter).map_err(Error::ClientId) -} - -/// Returns a new connection ID -pub fn connection_id(counter: u64) -> ConnectionId { - ConnectionId::new(counter) -} - -/// Make a connection end from the init message -pub fn init_connection(msg: &MsgConnectionOpenInit) -> ConnectionEnd { - ConnectionEnd::new( - ConnState::Init, - msg.client_id.clone(), - msg.counterparty.clone(), - vec![msg.version.clone().unwrap_or_default()], - msg.delay_period, - ) -} - -/// Make a connection end from the try message -pub fn try_connection(msg: &MsgConnectionOpenTry) -> ConnectionEnd { - ConnectionEnd::new( - ConnState::TryOpen, - msg.client_id.clone(), - msg.counterparty.clone(), - msg.counterparty_versions.clone(), - msg.delay_period, - ) -} - -/// Open the connection -pub fn open_connection(conn: &mut ConnectionEnd) { - conn.set_state(ConnState::Open); -} - -/// Returns a new channel ID -pub fn channel_id(counter: u64) -> ChannelId { - ChannelId::new(counter) -} - -/// Open the channel -pub fn open_channel(channel: &mut ChannelEnd) { - channel.set_state(ChanState::Open); -} - -/// Close the channel -pub fn close_channel(channel: &mut ChannelEnd) { - channel.set_state(ChanState::Closed); -} - -/// Returns a port ID -pub fn port_id(id: &str) -> Result { - PortId::from_str(id).map_err(Error::PortId) -} - -/// Returns a pair of port ID and channel ID -pub fn port_channel_id( - port_id: PortId, - channel_id: ChannelId, -) -> PortChannelId { - PortChannelId { - port_id, - channel_id, - } -} - -/// Returns a sequence -pub fn sequence(index: u64) -> Sequence { - Sequence::from(index) -} - -/// Make a packet from MsgTransfer -pub fn packet_from_message( - msg: &MsgTransfer, - sequence: Sequence, - counterparty: &ChanCounterparty, -) -> Packet { - Packet { - sequence, - source_port: msg.source_port.clone(), - source_channel: msg.source_channel, - destination_port: counterparty.port_id.clone(), - destination_channel: *counterparty - .channel_id() - .expect("the counterparty channel should exist"), - data: serde_json::to_vec(&FungibleTokenPacketData::from(msg.clone())) - .expect("encoding the packet data shouldn't fail"), - timeout_height: msg.timeout_height, - timeout_timestamp: msg.timeout_timestamp, - } -} - -/// Returns a commitment from the given packet -pub fn commitment(packet: &Packet) -> PacketCommitment { - let timeout = packet.timeout_timestamp.nanoseconds().to_be_bytes(); - let revision_number = packet.timeout_height.revision_number.to_be_bytes(); - let revision_height = packet.timeout_height.revision_height.to_be_bytes(); - let data = sha2::Sha256::digest(&packet.data); - let input = [ - &timeout, - &revision_number, - &revision_height, - data.as_slice(), - ] - .concat(); - sha2::Sha256::digest(&input).to_vec().into() -} - -/// Returns a counterparty of a connection -pub fn connection_counterparty( - client_id: ClientId, - conn_id: ConnectionId, -) -> ConnCounterparty { - ConnCounterparty::new(client_id, Some(conn_id), commitment_prefix()) -} - -/// Returns a counterparty of a channel -pub fn channel_counterparty( - port_id: PortId, - channel_id: ChannelId, -) -> ChanCounterparty { - ChanCounterparty::new(port_id, Some(channel_id)) -} - -/// Returns Namada commitment prefix -pub fn commitment_prefix() -> CommitmentPrefix { - CommitmentPrefix::try_from(COMMITMENT_PREFIX.to_vec()) - .expect("the conversion shouldn't fail") -} - -/// Makes CreateClient event -pub fn make_create_client_event( - client_id: &ClientId, - msg: &MsgCreateAnyClient, -) -> IbcEvent { - let attributes = ClientAttributes { - client_id: client_id.clone(), - client_type: msg.client_state.client_type(), - consensus_height: msg.client_state.latest_height(), - ..Default::default() - }; - IbcEvent::CreateClient(CreateClient::from(attributes)) -} - -/// Makes UpdateClient event -pub fn make_update_client_event( - client_id: &ClientId, - msg: &MsgUpdateAnyClient, -) -> IbcEvent { - let attributes = ClientAttributes { - client_id: client_id.clone(), - client_type: msg.header.client_type(), - consensus_height: msg.header.height(), - ..Default::default() - }; - IbcEvent::UpdateClient(UpdateClient::from(attributes)) -} - -/// Makes UpgradeClient event -pub fn make_upgrade_client_event( - client_id: &ClientId, - msg: &MsgUpgradeAnyClient, -) -> IbcEvent { - let attributes = ClientAttributes { - client_id: client_id.clone(), - client_type: msg.client_state.client_type(), - consensus_height: msg.client_state.latest_height(), - ..Default::default() - }; - IbcEvent::UpgradeClient(UpgradeClient::from(attributes)) -} - -/// Makes OpenInitConnection event -pub fn make_open_init_connection_event( - conn_id: &ConnectionId, - msg: &MsgConnectionOpenInit, -) -> IbcEvent { - let attributes = ConnectionAttributes { - connection_id: Some(conn_id.clone()), - client_id: msg.client_id.clone(), - counterparty_connection_id: msg.counterparty.connection_id().cloned(), - counterparty_client_id: msg.counterparty.client_id().clone(), - ..Default::default() - }; - ConnOpenInit::from(attributes).into() -} - -/// Makes OpenTryConnection event -pub fn make_open_try_connection_event( - conn_id: &ConnectionId, - msg: &MsgConnectionOpenTry, -) -> IbcEvent { - let attributes = ConnectionAttributes { - connection_id: Some(conn_id.clone()), - client_id: msg.client_id.clone(), - counterparty_connection_id: msg.counterparty.connection_id().cloned(), - counterparty_client_id: msg.counterparty.client_id().clone(), - ..Default::default() - }; - ConnOpenTry::from(attributes).into() -} - -/// Makes OpenAckConnection event -pub fn make_open_ack_connection_event(msg: &MsgConnectionOpenAck) -> IbcEvent { - let attributes = ConnectionAttributes { - connection_id: Some(msg.connection_id.clone()), - counterparty_connection_id: Some( - msg.counterparty_connection_id.clone(), - ), - ..Default::default() - }; - ConnOpenAck::from(attributes).into() -} - -/// Makes OpenConfirmConnection event -pub fn make_open_confirm_connection_event( - msg: &MsgConnectionOpenConfirm, -) -> IbcEvent { - let attributes = ConnectionAttributes { - connection_id: Some(msg.connection_id.clone()), - ..Default::default() - }; - ConnOpenConfirm::from(attributes).into() -} - -/// Makes OpenInitChannel event -pub fn make_open_init_channel_event( - channel_id: &ChannelId, - msg: &MsgChannelOpenInit, -) -> IbcEvent { - let connection_id = match msg.channel.connection_hops().get(0) { - Some(c) => c.clone(), - None => ConnectionId::default(), - }; - let attributes = ChanOpenInit { - height: Height::default(), - port_id: msg.port_id.clone(), - channel_id: Some(*channel_id), - connection_id, - counterparty_port_id: msg.channel.counterparty().port_id().clone(), - counterparty_channel_id: msg - .channel - .counterparty() - .channel_id() - .cloned(), - }; - attributes.into() -} - -/// Makes OpenTryChannel event -pub fn make_open_try_channel_event( - channel_id: &ChannelId, - msg: &MsgChannelOpenTry, -) -> IbcEvent { - let connection_id = match msg.channel.connection_hops().get(0) { - Some(c) => c.clone(), - None => ConnectionId::default(), - }; - let attributes = ChanOpenTry { - height: Height::default(), - port_id: msg.port_id.clone(), - channel_id: Some(*channel_id), - connection_id, - counterparty_port_id: msg.channel.counterparty().port_id().clone(), - counterparty_channel_id: msg - .channel - .counterparty() - .channel_id() - .cloned(), - }; - attributes.into() -} - -/// Makes OpenAckChannel event -pub fn make_open_ack_channel_event( - msg: &MsgChannelOpenAck, - channel: &ChannelEnd, -) -> Result { - let conn_id = get_connection_id_from_channel(channel)?; - let counterparty = channel.counterparty(); - let attributes = ChanOpenAck { - height: Height::default(), - port_id: msg.port_id.clone(), - channel_id: Some(msg.channel_id), - counterparty_channel_id: Some(msg.counterparty_channel_id), - connection_id: conn_id.clone(), - counterparty_port_id: counterparty.port_id().clone(), - }; - Ok(attributes.into()) -} - -/// Makes OpenConfirmChannel event -pub fn make_open_confirm_channel_event( - msg: &MsgChannelOpenConfirm, - channel: &ChannelEnd, -) -> Result { - let conn_id = get_connection_id_from_channel(channel)?; - let counterparty = channel.counterparty(); - let attributes = ChanOpenConfirm { - height: Height::default(), - port_id: msg.port_id.clone(), - channel_id: Some(msg.channel_id), - connection_id: conn_id.clone(), - counterparty_port_id: counterparty.port_id().clone(), - counterparty_channel_id: counterparty.channel_id().cloned(), - }; - Ok(attributes.into()) -} - -/// Makes CloseInitChannel event -pub fn make_close_init_channel_event( - msg: &MsgChannelCloseInit, - channel: &ChannelEnd, -) -> Result { - let conn_id = get_connection_id_from_channel(channel)?; - let counterparty = channel.counterparty(); - let attributes = ChanCloseInit { - height: Height::default(), - port_id: msg.port_id.clone(), - channel_id: msg.channel_id, - connection_id: conn_id.clone(), - counterparty_port_id: counterparty.port_id().clone(), - counterparty_channel_id: counterparty.channel_id().cloned(), - }; - Ok(attributes.into()) -} - -/// Makes CloseConfirmChannel event -pub fn make_close_confirm_channel_event( - msg: &MsgChannelCloseConfirm, - channel: &ChannelEnd, -) -> Result { - let conn_id = get_connection_id_from_channel(channel)?; - let counterparty = channel.counterparty(); - let attributes = ChanCloseConfirm { - height: Height::default(), - port_id: msg.port_id.clone(), - channel_id: Some(msg.channel_id), - connection_id: conn_id.clone(), - counterparty_port_id: counterparty.port_id.clone(), - counterparty_channel_id: counterparty.channel_id().cloned(), - }; - Ok(attributes.into()) -} - -fn get_connection_id_from_channel( - channel: &ChannelEnd, -) -> Result<&ConnectionId> { - channel.connection_hops().get(0).ok_or_else(|| { - Error::Channel("No connection for the channel".to_owned()) - }) -} - -/// Makes SendPacket event -pub fn make_send_packet_event(packet: Packet) -> IbcEvent { - IbcEvent::SendPacket(SendPacket { - height: packet.timeout_height, - packet, - }) -} - -/// Makes WriteAcknowledgement event -pub fn make_write_ack_event(packet: Packet, ack: Vec) -> IbcEvent { - IbcEvent::WriteAcknowledgement(WriteAcknowledgement { - // this height is not used - height: Height::default(), - packet, - ack, - }) -} - -/// Makes AcknowledgePacket event -pub fn make_ack_event(packet: Packet) -> IbcEvent { - IbcEvent::AcknowledgePacket(AcknowledgePacket { - // this height is not used - height: Height::default(), - packet, - }) -} - -/// Makes TimeoutPacket event -pub fn make_timeout_event(packet: Packet) -> IbcEvent { - IbcEvent::TimeoutPacket(TimeoutPacket { - // this height is not used - height: Height::default(), - packet, - }) -} diff --git a/core/src/ledger/ibc/context/common.rs b/core/src/ledger/ibc/context/common.rs new file mode 100644 index 00000000000..1d03ffbeece --- /dev/null +++ b/core/src/ledger/ibc/context/common.rs @@ -0,0 +1,373 @@ +//! IbcCommonContext implementation for IBC + +use prost::Message; +use sha2::Digest; + +use super::storage::IbcStorageContext; +use crate::ibc::applications::transfer::denom::PrefixedDenom; +use crate::ibc::clients::ics07_tendermint::client_state::ClientState as TmClientState; +use crate::ibc::clients::ics07_tendermint::consensus_state::ConsensusState as TmConsensusState; +use crate::ibc::core::ics02_client::client_state::ClientState; +use crate::ibc::core::ics02_client::consensus_state::ConsensusState; +use crate::ibc::core::ics02_client::error::ClientError; +use crate::ibc::core::ics03_connection::connection::ConnectionEnd; +use crate::ibc::core::ics03_connection::error::ConnectionError; +use crate::ibc::core::ics04_channel::channel::ChannelEnd; +use crate::ibc::core::ics04_channel::commitment::PacketCommitment; +use crate::ibc::core::ics04_channel::error::{ChannelError, PacketError}; +use crate::ibc::core::ics04_channel::packet::Sequence; +use crate::ibc::core::ics04_channel::timeout::TimeoutHeight; +use crate::ibc::core::ics24_host::identifier::{ClientId, ConnectionId}; +use crate::ibc::core::ics24_host::path::{ + ChannelEndPath, ClientConsensusStatePath, CommitmentPath, Path, SeqSendPath, +}; +use crate::ibc::core::ContextError; +#[cfg(any(feature = "ibc-mocks-abcipp", feature = "ibc-mocks"))] +use crate::ibc::mock::client_state::MockClientState; +#[cfg(any(feature = "ibc-mocks-abcipp", feature = "ibc-mocks"))] +use crate::ibc::mock::consensus_state::MockConsensusState; +use crate::ibc::timestamp::Timestamp; +use crate::ibc_proto::google::protobuf::Any; +use crate::ibc_proto::protobuf::Protobuf; +use crate::ledger::ibc::storage; +use crate::types::storage::Key; + +/// Context to handle typical IBC data +pub trait IbcCommonContext: IbcStorageContext { + /// Get the ClientState + fn client_state( + &self, + client_id: &ClientId, + ) -> Result, ContextError> { + let key = storage::client_state_key(client_id); + match self.read(&key) { + Ok(Some(value)) => { + let any = Any::decode(&value[..]).map_err(|e| { + ContextError::ClientError(ClientError::Decode(e)) + })?; + self.decode_client_state(any) + } + Ok(None) => { + Err(ContextError::ClientError(ClientError::ClientNotFound { + client_id: client_id.clone(), + })) + } + Err(_) => Err(ContextError::ClientError(ClientError::Other { + description: format!( + "Reading the client state failed: ID {}", + client_id, + ), + })), + } + } + + /// Get the ConsensusState + fn consensus_state( + &self, + client_cons_state_path: &ClientConsensusStatePath, + ) -> Result, ContextError> { + let path = Path::ClientConsensusState(client_cons_state_path.clone()); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + match self.read(&key) { + Ok(Some(value)) => { + let any = Any::decode(&value[..]).map_err(|e| { + ContextError::ClientError(ClientError::Decode(e)) + })?; + self.decode_consensus_state(any) + } + Ok(None) => { + let client_id = storage::client_id(&key).expect("invalid key"); + let height = + storage::consensus_height(&key).expect("invalid key"); + Err(ContextError::ClientError( + ClientError::ConsensusStateNotFound { client_id, height }, + )) + } + Err(_) => Err(ContextError::ClientError(ClientError::Other { + description: format!( + "Reading the consensus state failed: Key {}", + key, + ), + })), + } + } + + /// Get the ConnectionEnd + fn connection_end( + &self, + connection_id: &ConnectionId, + ) -> Result { + let key = storage::connection_key(connection_id); + match self.read(&key) { + Ok(Some(value)) => { + ConnectionEnd::decode_vec(&value).map_err(|_| { + ContextError::ConnectionError(ConnectionError::Other { + description: format!( + "Decoding the connection end failed: ID {}", + connection_id, + ), + }) + }) + } + Ok(None) => Err(ContextError::ConnectionError( + ConnectionError::ConnectionNotFound { + connection_id: connection_id.clone(), + }, + )), + Err(_) => { + Err(ContextError::ConnectionError(ConnectionError::Other { + description: format!( + "Reading the connection end failed: ID {}", + connection_id, + ), + })) + } + } + } + + /// Get the ChannelEnd + fn channel_end( + &self, + channel_end_path: &ChannelEndPath, + ) -> Result { + let path = Path::ChannelEnd(channel_end_path.clone()); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + match self.read(&key) { + Ok(Some(value)) => ChannelEnd::decode_vec(&value).map_err(|_| { + ContextError::ChannelError(ChannelError::Other { + description: format!( + "Decoding the channel end failed: Key {}", + key, + ), + }) + }), + Ok(None) => { + let port_channel_id = + storage::port_channel_id(&key).expect("invalid key"); + Err(ContextError::ChannelError(ChannelError::ChannelNotFound { + channel_id: port_channel_id.channel_id, + port_id: port_channel_id.port_id, + })) + } + Err(_) => Err(ContextError::ChannelError(ChannelError::Other { + description: format!( + "Reading the channel end failed: Key {}", + key, + ), + })), + } + } + + /// Get the NextSequenceSend + fn get_next_sequence_send( + &self, + path: &SeqSendPath, + ) -> Result { + let path = Path::SeqSend(path.clone()); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + self.read_sequence(&key) + } + + /// Calculate the hash + fn hash(value: &[u8]) -> Vec { + sha2::Sha256::digest(value).to_vec() + } + + /// Calculate the packet commitment + fn compute_packet_commitment( + &self, + packet_data: &[u8], + timeout_height: &TimeoutHeight, + timeout_timestamp: &Timestamp, + ) -> PacketCommitment { + let mut hash_input = + timeout_timestamp.nanoseconds().to_be_bytes().to_vec(); + + let revision_number = + timeout_height.commitment_revision_number().to_be_bytes(); + hash_input.append(&mut revision_number.to_vec()); + + let revision_height = + timeout_height.commitment_revision_height().to_be_bytes(); + hash_input.append(&mut revision_height.to_vec()); + + let packet_data_hash = Self::hash(packet_data); + hash_input.append(&mut packet_data_hash.to_vec()); + + Self::hash(&hash_input).into() + } + + /// Decode ClientState from Any + fn decode_client_state( + &self, + client_state: Any, + ) -> Result, ContextError> { + #[cfg(any(feature = "ibc-mocks-abcipp", feature = "ibc-mocks"))] + if let Ok(cs) = MockClientState::try_from(client_state.clone()) { + return Ok(cs.into_box()); + } + + if let Ok(cs) = TmClientState::try_from(client_state) { + return Ok(cs.into_box()); + } + + Err(ContextError::ClientError(ClientError::ClientSpecific { + description: "Unknown client state".to_string(), + })) + } + + /// Decode ConsensusState from Any + fn decode_consensus_state( + &self, + consensus_state: Any, + ) -> Result, ContextError> { + #[cfg(any(feature = "ibc-mocks-abcipp", feature = "ibc-mocks"))] + if let Ok(cs) = MockConsensusState::try_from(consensus_state.clone()) { + return Ok(cs.into_box()); + } + + if let Ok(cs) = TmConsensusState::try_from(consensus_state) { + return Ok(cs.into_box()); + } + + Err(ContextError::ClientError(ClientError::ClientSpecific { + description: "Unknown consensus state".to_string(), + })) + } + + /// Read a counter + fn read_counter(&self, key: &Key) -> Result { + match self.read(key) { + Ok(Some(value)) => { + let value: [u8; 8] = value.try_into().map_err(|_| { + ContextError::ClientError(ClientError::Other { + description: format!( + "The counter value wasn't u64: Key {}", + key + ), + }) + })?; + Ok(u64::from_be_bytes(value)) + } + Ok(None) => unreachable!("the counter should be initialized"), + Err(_) => Err(ContextError::ClientError(ClientError::Other { + description: format!("Reading the counter failed: Key {}", key), + })), + } + } + + /// Read a sequence + fn read_sequence(&self, key: &Key) -> Result { + match self.read(key) { + Ok(Some(value)) => { + let value: [u8; 8] = value.try_into().map_err(|_| { + ContextError::ChannelError(ChannelError::Other { + description: format!( + "The counter value wasn't u64: Key {}", + key + ), + }) + })?; + Ok(u64::from_be_bytes(value).into()) + } + // when the sequence has never been used, returns the initial value + Ok(None) => Ok(1.into()), + Err(_) => { + let sequence = storage::port_channel_sequence_id(key) + .expect("The key should have sequence") + .2; + Err(ContextError::ChannelError(ChannelError::Other { + description: format!( + "Reading the next sequence send failed: Sequence {}", + sequence + ), + })) + } + } + } + + /// Write the packet commitment + fn store_packet_commitment( + &mut self, + path: &CommitmentPath, + commitment: PacketCommitment, + ) -> Result<(), ContextError> { + let path = Path::Commitment(path.clone()); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + let bytes = commitment.into_vec(); + self.write(&key, bytes).map_err(|_| { + ContextError::PacketError(PacketError::Channel( + ChannelError::Other { + description: format!( + "Writing the packet commitment failed: Key {}", + key + ), + }, + )) + }) + } + + /// Write the NextSequenceSend + fn store_next_sequence_send( + &mut self, + path: &SeqSendPath, + seq: Sequence, + ) -> Result<(), ContextError> { + let path = Path::SeqSend(path.clone()); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + self.store_sequence(&key, seq) + } + + /// Increment and write the counter + fn increase_counter(&mut self, key: &Key) -> Result<(), ContextError> { + let count = self.read_counter(key)?; + self.write(key, (count + 1).to_be_bytes().to_vec()) + .map_err(|_| { + ContextError::ClientError(ClientError::Other { + description: format!( + "Writing the counter failed: Key {}", + key + ), + }) + }) + } + + /// Write the sequence + fn store_sequence( + &mut self, + key: &Key, + sequence: Sequence, + ) -> Result<(), ContextError> { + self.write(key, u64::from(sequence).to_be_bytes().to_vec()) + .map_err(|_| { + ContextError::PacketError(PacketError::Channel( + ChannelError::Other { + description: format!( + "Writing the counter failed: Key {}", + key + ), + }, + )) + }) + } + + /// Write the denom + fn store_denom( + &mut self, + trace_hash: String, + denom: PrefixedDenom, + ) -> Result<(), ContextError> { + let key = storage::ibc_denom_key(trace_hash); + let bytes = denom.to_string().as_bytes().to_vec(); + self.write(&key, bytes).map_err(|_| { + ContextError::ChannelError(ChannelError::Other { + description: format!("Writing the denom failed: Key {}", key), + }) + }) + } +} diff --git a/core/src/ledger/ibc/context/execution.rs b/core/src/ledger/ibc/context/execution.rs new file mode 100644 index 00000000000..ddf289ce08a --- /dev/null +++ b/core/src/ledger/ibc/context/execution.rs @@ -0,0 +1,382 @@ +//! ExecutionContext implementation for IBC + +use super::super::{IbcActions, IbcCommonContext}; +use crate::ibc::core::ics02_client::client_state::ClientState; +use crate::ibc::core::ics02_client::client_type::ClientType; +use crate::ibc::core::ics02_client::consensus_state::ConsensusState; +use crate::ibc::core::ics02_client::error::ClientError; +use crate::ibc::core::ics03_connection::connection::ConnectionEnd; +use crate::ibc::core::ics03_connection::error::ConnectionError; +use crate::ibc::core::ics04_channel::channel::ChannelEnd; +use crate::ibc::core::ics04_channel::commitment::{ + AcknowledgementCommitment, PacketCommitment, +}; +use crate::ibc::core::ics04_channel::error::{ChannelError, PacketError}; +use crate::ibc::core::ics04_channel::packet::{Receipt, Sequence}; +use crate::ibc::core::ics24_host::identifier::{ClientId, ConnectionId}; +use crate::ibc::core::ics24_host::path::{ + AckPath, ChannelEndPath, ClientConnectionPath, ClientConsensusStatePath, + ClientStatePath, ClientTypePath, CommitmentPath, ConnectionPath, Path, + ReceiptPath, SeqAckPath, SeqRecvPath, SeqSendPath, +}; +use crate::ibc::core::{ContextError, ExecutionContext, ValidationContext}; +use crate::ibc::events::IbcEvent; +use crate::ibc::timestamp::Timestamp; +use crate::ibc::Height; +use crate::ibc_proto::protobuf::Protobuf; +use crate::ledger::ibc::storage; +use crate::tendermint_proto::Protobuf as TmProtobuf; + +impl ExecutionContext for IbcActions<'_, C> +where + C: IbcCommonContext, +{ + fn store_client_type( + &mut self, + client_type_path: ClientTypePath, + client_type: ClientType, + ) -> Result<(), ContextError> { + let path = Path::ClientType(client_type_path); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + let bytes = client_type.as_str().as_bytes().to_vec(); + self.ctx.borrow_mut().write(&key, bytes).map_err(|_| { + ContextError::ClientError(ClientError::Other { + description: format!( + "Writing the client state failed: Key {}", + key + ), + }) + }) + } + + fn store_client_state( + &mut self, + client_state_path: ClientStatePath, + client_state: Box, + ) -> Result<(), ContextError> { + let path = Path::ClientState(client_state_path); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + let bytes = client_state.encode_vec().expect("encoding shouldn't fail"); + self.ctx.borrow_mut().write(&key, bytes).map_err(|_| { + ContextError::ClientError(ClientError::Other { + description: format!( + "Writing the client state failed: Key {}", + key + ), + }) + }) + } + + fn store_consensus_state( + &mut self, + consensus_state_path: ClientConsensusStatePath, + consensus_state: Box, + ) -> Result<(), ContextError> { + let path = Path::ClientConsensusState(consensus_state_path); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + let bytes = consensus_state + .encode_vec() + .expect("encoding shouldn't fail"); + self.ctx.borrow_mut().write(&key, bytes).map_err(|_| { + ContextError::ClientError(ClientError::Other { + description: format!( + "Writing the consensus state failed: Key {}", + key + ), + }) + }) + } + + fn increase_client_counter(&mut self) { + let key = storage::client_counter_key(); + let count = self.client_counter().expect("read failed"); + self.ctx + .borrow_mut() + .write(&key, (count + 1).to_be_bytes().to_vec()) + .expect("write failed"); + } + + fn store_update_time( + &mut self, + client_id: ClientId, + _height: Height, + timestamp: Timestamp, + ) -> Result<(), ContextError> { + let key = storage::client_update_timestamp_key(&client_id); + match timestamp.into_tm_time() { + Some(time) => self + .ctx + .borrow_mut() + .write( + &key, + time.encode_vec().expect("encoding shouldn't fail"), + ) + .map_err(|_| { + ContextError::ClientError(ClientError::Other { + description: format!( + "Writing the consensus state failed: Key {}", + key + ), + }) + }), + None => Err(ContextError::ClientError(ClientError::Other { + description: format!( + "The client timestamp is invalid: ID {}", + client_id + ), + })), + } + } + + fn store_update_height( + &mut self, + client_id: ClientId, + _height: Height, + host_height: Height, + ) -> Result<(), ContextError> { + let key = storage::client_update_height_key(&client_id); + let bytes = host_height.encode_vec().expect("encoding shouldn't fail"); + self.ctx.borrow_mut().write(&key, bytes).map_err(|_| { + ContextError::ClientError(ClientError::Other { + description: format!( + "Writing the consensus state failed: Key {}", + key + ), + }) + }) + } + + fn store_connection( + &mut self, + connection_path: &ConnectionPath, + connection_end: ConnectionEnd, + ) -> Result<(), ContextError> { + let path = Path::Connection(connection_path.clone()); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + let bytes = connection_end + .encode_vec() + .expect("encoding shouldn't fail"); + self.ctx.borrow_mut().write(&key, bytes).map_err(|_| { + ContextError::ConnectionError(ConnectionError::Other { + description: format!( + "Writing the connection end failed: Key {}", + key + ), + }) + }) + } + + fn store_connection_to_client( + &mut self, + client_connection_path: &ClientConnectionPath, + conn_id: ConnectionId, + ) -> Result<(), ContextError> { + let path = Path::ClientConnection(client_connection_path.clone()); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + let list = match self.ctx.borrow().read(&key) { + Ok(Some(value)) => { + let list = String::from_utf8(value).map_err(|e| { + ContextError::ConnectionError(ConnectionError::Other { + description: format!( + "Decoding the connection list failed: Key {}, \ + error {}", + key, e + ), + }) + })?; + format!("{},{}", list, conn_id) + } + Ok(None) => conn_id.to_string(), + Err(_) => { + Err(ContextError::ConnectionError(ConnectionError::Other { + description: format!( + "Reading the connection list of failed: Key {}", + key, + ), + }))? + } + }; + let bytes = list.as_bytes().to_vec(); + self.ctx.borrow_mut().write(&key, bytes).map_err(|_| { + ContextError::ConnectionError(ConnectionError::Other { + description: format!( + "Writing the list of connection IDs failed: Key {}", + key + ), + }) + }) + } + + fn increase_connection_counter(&mut self) { + let key = storage::connection_counter_key(); + self.ctx + .borrow_mut() + .increase_counter(&key) + .expect("Error cannot be returned"); + } + + fn store_packet_commitment( + &mut self, + path: &CommitmentPath, + commitment: PacketCommitment, + ) -> Result<(), ContextError> { + self.ctx + .borrow_mut() + .store_packet_commitment(path, commitment) + } + + fn delete_packet_commitment( + &mut self, + path: &CommitmentPath, + ) -> Result<(), ContextError> { + let path = Path::Commitment(path.clone()); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + self.ctx.borrow_mut().delete(&key).map_err(|_| { + ContextError::PacketError(PacketError::Channel( + ChannelError::Other { + description: format!( + "Deleting the packet commitment failed: Key {}", + key + ), + }, + )) + }) + } + + fn store_packet_receipt( + &mut self, + path: &ReceiptPath, + _receipt: Receipt, + ) -> Result<(), ContextError> { + let path = Path::Receipt(path.clone()); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + // the value is the same as ibc-go + let bytes = [1_u8].to_vec(); + self.ctx.borrow_mut().write(&key, bytes).map_err(|_| { + ContextError::PacketError(PacketError::Channel( + ChannelError::Other { + description: format!( + "Writing the receipt failed: Key {}", + key + ), + }, + )) + }) + } + + fn store_packet_acknowledgement( + &mut self, + path: &AckPath, + ack_commitment: AcknowledgementCommitment, + ) -> Result<(), ContextError> { + let path = Path::Ack(path.clone()); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + let bytes = ack_commitment.into_vec(); + self.ctx.borrow_mut().write(&key, bytes).map_err(|_| { + ContextError::PacketError(PacketError::Channel( + ChannelError::Other { + description: format!( + "Writing the packet ack failed: Key {}", + key + ), + }, + )) + }) + } + + fn delete_packet_acknowledgement( + &mut self, + path: &AckPath, + ) -> Result<(), ContextError> { + let path = Path::Ack(path.clone()); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + self.ctx.borrow_mut().delete(&key).map_err(|_| { + ContextError::PacketError(PacketError::Channel( + ChannelError::Other { + description: format!( + "Deleting the packet ack failed: Key {}", + key + ), + }, + )) + }) + } + + fn store_channel( + &mut self, + path: &ChannelEndPath, + channel_end: ChannelEnd, + ) -> Result<(), ContextError> { + let path = Path::ChannelEnd(path.clone()); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + let bytes = channel_end.encode_vec().expect("encoding shouldn't fail"); + self.ctx.borrow_mut().write(&key, bytes).map_err(|_| { + ContextError::ChannelError(ChannelError::Other { + description: format!( + "Writing the channel end failed: Key {}", + key + ), + }) + }) + } + + fn store_next_sequence_send( + &mut self, + path: &SeqSendPath, + seq: Sequence, + ) -> Result<(), ContextError> { + self.ctx.borrow_mut().store_next_sequence_send(path, seq) + } + + fn store_next_sequence_recv( + &mut self, + path: &SeqRecvPath, + seq: Sequence, + ) -> Result<(), ContextError> { + let path = Path::SeqRecv(path.clone()); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + self.ctx.borrow_mut().store_sequence(&key, seq) + } + + fn store_next_sequence_ack( + &mut self, + path: &SeqAckPath, + seq: Sequence, + ) -> Result<(), ContextError> { + let path = Path::SeqAck(path.clone()); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + self.ctx.borrow_mut().store_sequence(&key, seq) + } + + fn increase_channel_counter(&mut self) { + let key = storage::channel_counter_key(); + self.ctx + .borrow_mut() + .increase_counter(&key) + .expect("Error cannot be returned"); + } + + fn emit_ibc_event(&mut self, event: IbcEvent) { + let event = event.try_into().expect("The event should be converted"); + self.ctx + .borrow_mut() + .emit_ibc_event(event) + .expect("Emitting an event shouldn't fail"); + } + + fn log_message(&mut self, message: String) { + self.ctx.borrow_mut().log_string(message) + } +} diff --git a/core/src/ledger/ibc/context/mod.rs b/core/src/ledger/ibc/context/mod.rs new file mode 100644 index 00000000000..ef2311979a8 --- /dev/null +++ b/core/src/ledger/ibc/context/mod.rs @@ -0,0 +1,8 @@ +//! IBC Contexts + +pub mod common; +pub mod execution; +pub mod router; +pub mod storage; +pub mod transfer_mod; +pub mod validation; diff --git a/core/src/ledger/ibc/context/router.rs b/core/src/ledger/ibc/context/router.rs new file mode 100644 index 00000000000..62d676c3a21 --- /dev/null +++ b/core/src/ledger/ibc/context/router.rs @@ -0,0 +1,35 @@ +//! Functions to handle IBC modules + +use std::rc::Rc; + +use super::super::{IbcActions, IbcCommonContext}; +use crate::ibc::core::context::Router; +use crate::ibc::core::ics24_host::identifier::PortId; +use crate::ibc::core::ics26_routing::context::{Module, ModuleId}; + +impl Router for IbcActions<'_, C> +where + C: IbcCommonContext, +{ + fn get_route(&self, module_id: &ModuleId) -> Option<&dyn Module> { + self.modules.get(module_id).map(|b| b.as_module()) + } + + fn get_route_mut( + &mut self, + module_id: &ModuleId, + ) -> Option<&mut dyn Module> { + self.modules + .get_mut(module_id) + .and_then(Rc::get_mut) + .map(|b| b.as_module_mut()) + } + + fn has_route(&self, module_id: &ModuleId) -> bool { + self.modules.contains_key(module_id) + } + + fn lookup_module_by_port(&self, port_id: &PortId) -> Option { + self.ports.get(port_id).cloned() + } +} diff --git a/core/src/ledger/ibc/context/storage.rs b/core/src/ledger/ibc/context/storage.rs new file mode 100644 index 00000000000..a99a6591876 --- /dev/null +++ b/core/src/ledger/ibc/context/storage.rs @@ -0,0 +1,73 @@ +//! IBC storage context + +use std::fmt::Debug; + +pub use ics23::ProofSpec; + +use super::super::Error; +use crate::ledger::storage_api; +use crate::types::ibc::IbcEvent; +use crate::types::storage::{BlockHeight, Header, Key}; +use crate::types::token::Amount; + +// This is needed to use `ibc::Handler::Error` with `IbcActions` in +// `tx_prelude/src/ibc.rs` +impl From for storage_api::Error { + fn from(err: Error) -> Self { + storage_api::Error::new(err) + } +} + +/// IBC context trait to be implemented in integration that can read and write +pub trait IbcStorageContext { + /// IBC storage error + type Error: From + Debug; + /// Storage read prefix iterator + type PrefixIter<'iter> + where + Self: 'iter; + + /// Read IBC-related data + fn read(&self, key: &Key) -> Result>, Self::Error>; + + /// Read IBC-related data with a prefix + fn iter_prefix<'iter>( + &'iter self, + prefix: &Key, + ) -> Result, Self::Error>; + + /// next key value pair + fn iter_next<'iter>( + &'iter self, + iter: &mut Self::PrefixIter<'iter>, + ) -> Result)>, Self::Error>; + + /// Write IBC-related data + fn write(&mut self, key: &Key, value: Vec) -> Result<(), Self::Error>; + + /// Delete IBC-related data + fn delete(&mut self, key: &Key) -> Result<(), Self::Error>; + + /// Emit an IBC event + fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<(), Self::Error>; + + /// Transfer token + fn transfer_token( + &mut self, + src: &Key, + dest: &Key, + amount: Amount, + ) -> Result<(), Self::Error>; + + /// Get the current height of this chain + fn get_height(&self) -> Result; + + /// Get the block header of this chain + fn get_header( + &self, + height: BlockHeight, + ) -> Result, Self::Error>; + + /// Logging + fn log_string(&self, message: String); +} diff --git a/core/src/ledger/ibc/context/transfer_mod.rs b/core/src/ledger/ibc/context/transfer_mod.rs new file mode 100644 index 00000000000..73961a1e28f --- /dev/null +++ b/core/src/ledger/ibc/context/transfer_mod.rs @@ -0,0 +1,823 @@ +//! IBC module for token transfer + +use std::cell::RefCell; +use std::fmt::Debug; +use std::rc::Rc; +use std::str::FromStr; + +use super::common::IbcCommonContext; +use crate::ibc::applications::transfer::coin::PrefixedCoin; +use crate::ibc::applications::transfer::context::{ + on_acknowledgement_packet_execute, on_acknowledgement_packet_validate, + on_chan_close_confirm_execute, on_chan_close_confirm_validate, + on_chan_close_init_execute, on_chan_close_init_validate, + on_chan_open_ack_execute, on_chan_open_ack_validate, + on_chan_open_confirm_execute, on_chan_open_confirm_validate, + on_chan_open_init_execute, on_chan_open_init_validate, + on_chan_open_try_execute, on_chan_open_try_validate, + on_recv_packet_execute, on_timeout_packet_execute, + on_timeout_packet_validate, TokenTransferExecutionContext, + TokenTransferValidationContext, +}; +use crate::ibc::applications::transfer::denom::PrefixedDenom; +use crate::ibc::applications::transfer::error::TokenTransferError; +use crate::ibc::applications::transfer::MODULE_ID_STR; +use crate::ibc::core::ics02_client::client_state::ClientState; +use crate::ibc::core::ics02_client::consensus_state::ConsensusState; +use crate::ibc::core::ics03_connection::connection::ConnectionEnd; +use crate::ibc::core::ics04_channel::channel::{ + ChannelEnd, Counterparty, Order, +}; +use crate::ibc::core::ics04_channel::commitment::PacketCommitment; +use crate::ibc::core::ics04_channel::context::{ + SendPacketExecutionContext, SendPacketValidationContext, +}; +use crate::ibc::core::ics04_channel::error::{ChannelError, PacketError}; +use crate::ibc::core::ics04_channel::handler::ModuleExtras; +use crate::ibc::core::ics04_channel::msgs::acknowledgement::Acknowledgement; +use crate::ibc::core::ics04_channel::packet::{Packet, Sequence}; +use crate::ibc::core::ics04_channel::Version; +use crate::ibc::core::ics24_host::identifier::{ + ChannelId, ClientId, ConnectionId, PortId, +}; +use crate::ibc::core::ics24_host::path::{ + ChannelEndPath, ClientConsensusStatePath, CommitmentPath, SeqSendPath, +}; +use crate::ibc::core::ics26_routing::context::{Module, ModuleId}; +use crate::ibc::core::ContextError; +use crate::ibc::events::IbcEvent; +use crate::ibc::signer::Signer; +use crate::ledger::ibc::storage; +use crate::types::address::{Address, InternalAddress}; +use crate::types::token; + +/// IBC module wrapper for getting the reference of the module +pub trait ModuleWrapper: Module { + /// Reference of the module + fn as_module(&self) -> &dyn Module; + + /// Mutable reference of the module + fn as_module_mut(&mut self) -> &mut dyn Module; +} + +/// IBC module for token transfer +#[derive(Debug)] +pub struct TransferModule +where + C: IbcCommonContext, +{ + /// IBC actions + pub ctx: Rc>, +} + +impl TransferModule +where + C: IbcCommonContext, +{ + /// Make a new module + pub fn new(ctx: Rc>) -> Self { + Self { ctx } + } + + /// Get the module ID + pub fn module_id(&self) -> ModuleId { + ModuleId::from_str(MODULE_ID_STR).expect("should be parsable") + } +} + +impl ModuleWrapper for TransferModule +where + C: IbcCommonContext + Debug, +{ + fn as_module(&self) -> &dyn Module { + self + } + + fn as_module_mut(&mut self) -> &mut dyn Module { + self + } +} + +impl Module for TransferModule +where + C: IbcCommonContext + Debug, +{ + #[allow(clippy::too_many_arguments)] + fn on_chan_open_init_validate( + &self, + order: Order, + connection_hops: &[ConnectionId], + port_id: &PortId, + channel_id: &ChannelId, + counterparty: &Counterparty, + version: &Version, + ) -> Result { + on_chan_open_init_validate( + self, + order, + connection_hops, + port_id, + channel_id, + counterparty, + version, + ) + .map_err(into_channel_error)?; + Ok(version.clone()) + } + + #[allow(clippy::too_many_arguments)] + fn on_chan_open_init_execute( + &mut self, + order: Order, + connection_hops: &[ConnectionId], + port_id: &PortId, + channel_id: &ChannelId, + counterparty: &Counterparty, + version: &Version, + ) -> Result<(ModuleExtras, Version), ChannelError> { + on_chan_open_init_execute( + self, + order, + connection_hops, + port_id, + channel_id, + counterparty, + version, + ) + .map_err(into_channel_error) + } + + #[allow(clippy::too_many_arguments)] + fn on_chan_open_try_validate( + &self, + order: Order, + connection_hops: &[ConnectionId], + port_id: &PortId, + channel_id: &ChannelId, + counterparty: &Counterparty, + counterparty_version: &Version, + ) -> Result { + on_chan_open_try_validate( + self, + order, + connection_hops, + port_id, + channel_id, + counterparty, + counterparty_version, + ) + .map_err(into_channel_error)?; + Ok(counterparty_version.clone()) + } + + #[allow(clippy::too_many_arguments)] + fn on_chan_open_try_execute( + &mut self, + order: Order, + connection_hops: &[ConnectionId], + port_id: &PortId, + channel_id: &ChannelId, + counterparty: &Counterparty, + counterparty_version: &Version, + ) -> Result<(ModuleExtras, Version), ChannelError> { + on_chan_open_try_execute( + self, + order, + connection_hops, + port_id, + channel_id, + counterparty, + counterparty_version, + ) + .map_err(into_channel_error) + } + + fn on_chan_open_ack_validate( + &self, + port_id: &PortId, + channel_id: &ChannelId, + counterparty_version: &Version, + ) -> Result<(), ChannelError> { + on_chan_open_ack_validate( + self, + port_id, + channel_id, + counterparty_version, + ) + .map_err(into_channel_error) + } + + fn on_chan_open_ack_execute( + &mut self, + port_id: &PortId, + channel_id: &ChannelId, + counterparty_version: &Version, + ) -> Result { + on_chan_open_ack_execute( + self, + port_id, + channel_id, + counterparty_version, + ) + .map_err(into_channel_error) + } + + fn on_chan_open_confirm_validate( + &self, + port_id: &PortId, + channel_id: &ChannelId, + ) -> Result<(), ChannelError> { + on_chan_open_confirm_validate(self, port_id, channel_id) + .map_err(into_channel_error) + } + + fn on_chan_open_confirm_execute( + &mut self, + port_id: &PortId, + channel_id: &ChannelId, + ) -> Result { + on_chan_open_confirm_execute(self, port_id, channel_id) + .map_err(into_channel_error) + } + + fn on_chan_close_init_validate( + &self, + port_id: &PortId, + channel_id: &ChannelId, + ) -> Result<(), ChannelError> { + on_chan_close_init_validate(self, port_id, channel_id) + .map_err(into_channel_error) + } + + fn on_chan_close_init_execute( + &mut self, + port_id: &PortId, + channel_id: &ChannelId, + ) -> Result { + on_chan_close_init_execute(self, port_id, channel_id) + .map_err(into_channel_error) + } + + fn on_chan_close_confirm_validate( + &self, + port_id: &PortId, + channel_id: &ChannelId, + ) -> Result<(), ChannelError> { + on_chan_close_confirm_validate(self, port_id, channel_id) + .map_err(into_channel_error) + } + + fn on_chan_close_confirm_execute( + &mut self, + port_id: &PortId, + channel_id: &ChannelId, + ) -> Result { + on_chan_close_confirm_execute(self, port_id, channel_id) + .map_err(into_channel_error) + } + + fn on_recv_packet_execute( + &mut self, + packet: &Packet, + _relayer: &Signer, + ) -> (ModuleExtras, Acknowledgement) { + on_recv_packet_execute(self, packet) + } + + fn on_acknowledgement_packet_validate( + &self, + packet: &Packet, + acknowledgement: &Acknowledgement, + relayer: &Signer, + ) -> Result<(), PacketError> { + on_acknowledgement_packet_validate( + self, + packet, + acknowledgement, + relayer, + ) + .map_err(into_packet_error) + } + + fn on_acknowledgement_packet_execute( + &mut self, + packet: &Packet, + acknowledgement: &Acknowledgement, + relayer: &Signer, + ) -> (ModuleExtras, Result<(), PacketError>) { + let (extras, result) = on_acknowledgement_packet_execute( + self, + packet, + acknowledgement, + relayer, + ); + (extras, result.map_err(into_packet_error)) + } + + fn on_timeout_packet_validate( + &self, + packet: &Packet, + relayer: &Signer, + ) -> Result<(), PacketError> { + on_timeout_packet_validate(self, packet, relayer) + .map_err(into_packet_error) + } + + fn on_timeout_packet_execute( + &mut self, + packet: &Packet, + relayer: &Signer, + ) -> (ModuleExtras, Result<(), PacketError>) { + let (extras, result) = on_timeout_packet_execute(self, packet, relayer); + (extras, result.map_err(into_packet_error)) + } +} + +impl SendPacketValidationContext for TransferModule +where + C: IbcCommonContext, +{ + fn channel_end( + &self, + channel_end_path: &ChannelEndPath, + ) -> Result { + self.ctx.borrow().channel_end(channel_end_path) + } + + fn connection_end( + &self, + connection_id: &ConnectionId, + ) -> Result { + self.ctx.borrow().connection_end(connection_id) + } + + fn client_state( + &self, + client_id: &ClientId, + ) -> Result, ContextError> { + self.ctx.borrow().client_state(client_id) + } + + fn client_consensus_state( + &self, + client_cons_state_path: &ClientConsensusStatePath, + ) -> Result, ContextError> { + self.ctx.borrow().consensus_state(client_cons_state_path) + } + + fn get_next_sequence_send( + &self, + seq_send_path: &SeqSendPath, + ) -> Result { + self.ctx.borrow().get_next_sequence_send(seq_send_path) + } +} + +impl TokenTransferValidationContext for TransferModule +where + C: IbcCommonContext, +{ + type AccountId = Address; + + fn get_port(&self) -> Result { + Ok(PortId::transfer()) + } + + fn get_escrow_account( + &self, + _port_id: &PortId, + _channel_id: &ChannelId, + ) -> Result { + Ok(Address::Internal(InternalAddress::IbcEscrow)) + } + + fn can_send_coins(&self) -> Result<(), TokenTransferError> { + Ok(()) + } + + fn can_receive_coins(&self) -> Result<(), TokenTransferError> { + Ok(()) + } + + fn send_coins_validate( + &self, + _from: &Self::AccountId, + _to: &Self::AccountId, + _coin: &PrefixedCoin, + ) -> Result<(), TokenTransferError> { + // validated by IBC token VP + Ok(()) + } + + fn mint_coins_validate( + &self, + _account: &Self::AccountId, + _coin: &PrefixedCoin, + ) -> Result<(), TokenTransferError> { + // validated by IBC token VP + Ok(()) + } + + fn burn_coins_validate( + &self, + _account: &Self::AccountId, + _coin: &PrefixedCoin, + ) -> Result<(), TokenTransferError> { + // validated by IBC token VP + Ok(()) + } + + fn denom_hash_string(&self, denom: &PrefixedDenom) -> Option { + Some(storage::calc_hash(denom.to_string())) + } +} + +impl TokenTransferExecutionContext for TransferModule +where + C: IbcCommonContext, +{ + fn send_coins_execute( + &mut self, + from: &Self::AccountId, + to: &Self::AccountId, + coin: &PrefixedCoin, + ) -> Result<(), TokenTransferError> { + // Assumes that the coin denom is prefixed with "port-id/channel-id" or + // has no prefix + let (token, amount) = get_token_amount(coin)?; + + let src = if coin.denom.trace_path.is_empty() + || *from == Address::Internal(InternalAddress::IbcEscrow) + || *from == Address::Internal(InternalAddress::IbcMint) + { + token::balance_key(&token, from) + } else { + let prefix = storage::ibc_token_prefix(coin.denom.to_string()) + .map_err(|_| TokenTransferError::InvalidCoin { + coin: coin.to_string(), + })?; + token::multitoken_balance_key(&prefix, from) + }; + + let dest = if coin.denom.trace_path.is_empty() + || *to == Address::Internal(InternalAddress::IbcEscrow) + || *to == Address::Internal(InternalAddress::IbcBurn) + { + token::balance_key(&token, to) + } else { + let prefix = storage::ibc_token_prefix(coin.denom.to_string()) + .map_err(|_| TokenTransferError::InvalidCoin { + coin: coin.to_string(), + })?; + token::multitoken_balance_key(&prefix, to) + }; + + self.ctx + .borrow_mut() + .transfer_token(&src, &dest, amount) + .map_err(|_| { + TokenTransferError::ContextError(ContextError::ChannelError( + ChannelError::Other { + description: format!( + "Sending a coin failed: from {}, to {}, amount {}", + src, dest, amount + ), + }, + )) + }) + } + + fn mint_coins_execute( + &mut self, + account: &Self::AccountId, + coin: &PrefixedCoin, + ) -> Result<(), TokenTransferError> { + let (token, amount) = get_token_amount(coin)?; + + let src = token::balance_key( + &token, + &Address::Internal(InternalAddress::IbcMint), + ); + + let dest = if coin.denom.trace_path.is_empty() { + token::balance_key(&token, account) + } else { + let prefix = storage::ibc_token_prefix(coin.denom.to_string()) + .map_err(|_| TokenTransferError::InvalidCoin { + coin: coin.to_string(), + })?; + token::multitoken_balance_key(&prefix, account) + }; + + self.ctx + .borrow_mut() + .transfer_token(&src, &dest, amount) + .map_err(|_| { + TokenTransferError::ContextError(ContextError::ChannelError( + ChannelError::Other { + description: format!( + "Sending a coin failed: from {}, to {}, amount {}", + src, dest, amount + ), + }, + )) + }) + } + + fn burn_coins_execute( + &mut self, + account: &Self::AccountId, + coin: &PrefixedCoin, + ) -> Result<(), TokenTransferError> { + let (token, amount) = get_token_amount(coin)?; + + let src = if coin.denom.trace_path.is_empty() { + token::balance_key(&token, account) + } else { + let prefix = storage::ibc_token_prefix(coin.denom.to_string()) + .map_err(|_| TokenTransferError::InvalidCoin { + coin: coin.to_string(), + })?; + token::multitoken_balance_key(&prefix, account) + }; + + let dest = token::balance_key( + &token, + &Address::Internal(InternalAddress::IbcBurn), + ); + + self.ctx + .borrow_mut() + .transfer_token(&src, &dest, amount) + .map_err(|_| { + TokenTransferError::ContextError(ContextError::ChannelError( + ChannelError::Other { + description: format!( + "Sending a coin failed: from {}, to {}, amount {}", + src, dest, amount + ), + }, + )) + }) + } +} + +impl SendPacketExecutionContext for TransferModule +where + C: IbcCommonContext, +{ + fn store_next_sequence_send( + &mut self, + seq_send_path: &SeqSendPath, + seq: Sequence, + ) -> Result<(), ContextError> { + self.ctx + .borrow_mut() + .store_next_sequence_send(seq_send_path, seq) + } + + fn store_packet_commitment( + &mut self, + commitment_path: &CommitmentPath, + commitment: PacketCommitment, + ) -> Result<(), ContextError> { + self.ctx + .borrow_mut() + .store_packet_commitment(commitment_path, commitment) + } + + fn emit_ibc_event(&mut self, event: IbcEvent) { + let event = event.try_into().expect("IBC event conversion failed"); + self.ctx + .borrow_mut() + .emit_ibc_event(event) + .expect("Emitting an IBC event failed") + } + + fn log_message(&mut self, message: String) { + self.ctx.borrow_mut().log_string(message) + } +} + +/// Get the token address and the amount from PrefixedCoin +fn get_token_amount( + coin: &PrefixedCoin, +) -> Result<(Address, token::Amount), TokenTransferError> { + let token = + Address::decode(coin.denom.base_denom.as_str()).map_err(|_| { + TokenTransferError::InvalidCoin { + coin: coin.denom.base_denom.to_string(), + } + })?; + + let amount = coin.amount.try_into().map_err(|_| { + TokenTransferError::InvalidCoin { + coin: coin.to_string(), + } + })?; + + Ok((token, amount)) +} + +fn into_channel_error(error: TokenTransferError) -> ChannelError { + ChannelError::AppModule { + description: error.to_string(), + } +} + +fn into_packet_error(error: TokenTransferError) -> PacketError { + PacketError::AppModule { + description: error.to_string(), + } +} + +/// Helpers for testing +#[cfg(any(test, feature = "testing"))] +pub mod testing { + use super::*; + use crate::ibc::applications::transfer::acknowledgement::TokenTransferAcknowledgement; + + /// Dummy IBC module for token transfer + #[derive(Debug)] + pub struct DummyTransferModule {} + + impl DummyTransferModule { + /// Get the module ID + pub fn module_id(&self) -> ModuleId { + ModuleId::from_str(MODULE_ID_STR).expect("should be parsable") + } + } + + impl ModuleWrapper for DummyTransferModule { + fn as_module(&self) -> &dyn Module { + self + } + + fn as_module_mut(&mut self) -> &mut dyn Module { + self + } + } + + impl Module for DummyTransferModule { + #[allow(clippy::too_many_arguments)] + fn on_chan_open_init_validate( + &self, + _order: Order, + _connection_hops: &[ConnectionId], + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty: &Counterparty, + version: &Version, + ) -> Result { + Ok(version.clone()) + } + + #[allow(clippy::too_many_arguments)] + fn on_chan_open_init_execute( + &mut self, + _order: Order, + _connection_hops: &[ConnectionId], + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty: &Counterparty, + version: &Version, + ) -> Result<(ModuleExtras, Version), ChannelError> { + Ok((ModuleExtras::empty(), version.clone())) + } + + #[allow(clippy::too_many_arguments)] + fn on_chan_open_try_validate( + &self, + _order: Order, + _connection_hops: &[ConnectionId], + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty: &Counterparty, + counterparty_version: &Version, + ) -> Result { + Ok(counterparty_version.clone()) + } + + #[allow(clippy::too_many_arguments)] + fn on_chan_open_try_execute( + &mut self, + _order: Order, + _connection_hops: &[ConnectionId], + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty: &Counterparty, + counterparty_version: &Version, + ) -> Result<(ModuleExtras, Version), ChannelError> { + Ok((ModuleExtras::empty(), counterparty_version.clone())) + } + + fn on_chan_open_ack_validate( + &self, + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty_version: &Version, + ) -> Result<(), ChannelError> { + Ok(()) + } + + fn on_chan_open_ack_execute( + &mut self, + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty_version: &Version, + ) -> Result { + Ok(ModuleExtras::empty()) + } + + fn on_chan_open_confirm_validate( + &self, + _port_id: &PortId, + _channel_id: &ChannelId, + ) -> Result<(), ChannelError> { + Ok(()) + } + + fn on_chan_open_confirm_execute( + &mut self, + _port_id: &PortId, + _channel_id: &ChannelId, + ) -> Result { + Ok(ModuleExtras::empty()) + } + + fn on_chan_close_init_validate( + &self, + _port_id: &PortId, + _channel_id: &ChannelId, + ) -> Result<(), ChannelError> { + Ok(()) + } + + fn on_chan_close_init_execute( + &mut self, + _port_id: &PortId, + _channel_id: &ChannelId, + ) -> Result { + Ok(ModuleExtras::empty()) + } + + fn on_chan_close_confirm_validate( + &self, + _port_id: &PortId, + _channel_id: &ChannelId, + ) -> Result<(), ChannelError> { + Ok(()) + } + + fn on_chan_close_confirm_execute( + &mut self, + _port_id: &PortId, + _channel_id: &ChannelId, + ) -> Result { + Ok(ModuleExtras::empty()) + } + + fn on_recv_packet_execute( + &mut self, + _packet: &Packet, + _relayer: &Signer, + ) -> (ModuleExtras, Acknowledgement) { + let transfer_ack = TokenTransferAcknowledgement::success(); + (ModuleExtras::empty(), transfer_ack.into()) + } + + fn on_acknowledgement_packet_validate( + &self, + _packet: &Packet, + _acknowledgement: &Acknowledgement, + _relayer: &Signer, + ) -> Result<(), PacketError> { + Ok(()) + } + + fn on_acknowledgement_packet_execute( + &mut self, + _packet: &Packet, + _acknowledgement: &Acknowledgement, + _relayer: &Signer, + ) -> (ModuleExtras, Result<(), PacketError>) { + (ModuleExtras::empty(), Ok(())) + } + + fn on_timeout_packet_validate( + &self, + _packet: &Packet, + _relayer: &Signer, + ) -> Result<(), PacketError> { + Ok(()) + } + + fn on_timeout_packet_execute( + &mut self, + _packet: &Packet, + _relayer: &Signer, + ) -> (ModuleExtras, Result<(), PacketError>) { + (ModuleExtras::empty(), Ok(())) + } + } +} diff --git a/core/src/ledger/ibc/context/validation.rs b/core/src/ledger/ibc/context/validation.rs new file mode 100644 index 00000000000..9e138208830 --- /dev/null +++ b/core/src/ledger/ibc/context/validation.rs @@ -0,0 +1,530 @@ +//! ValidationContext implementation for IBC + +use prost::Message; + +use super::super::{IbcActions, IbcCommonContext}; +use crate::ibc::clients::ics07_tendermint::consensus_state::ConsensusState as TmConsensusState; +#[cfg(any(feature = "ibc-mocks-abcipp", feature = "ibc-mocks"))] +use crate::ibc::core::ics02_client::client_state::downcast_client_state; +use crate::ibc::core::ics02_client::client_state::ClientState; +use crate::ibc::core::ics02_client::consensus_state::ConsensusState; +use crate::ibc::core::ics02_client::error::ClientError; +use crate::ibc::core::ics03_connection::connection::ConnectionEnd; +use crate::ibc::core::ics04_channel::channel::ChannelEnd; +use crate::ibc::core::ics04_channel::commitment::{ + AcknowledgementCommitment, PacketCommitment, +}; +use crate::ibc::core::ics04_channel::error::{ChannelError, PacketError}; +use crate::ibc::core::ics04_channel::packet::{Receipt, Sequence}; +use crate::ibc::core::ics23_commitment::commitment::CommitmentPrefix; +use crate::ibc::core::ics23_commitment::specs::ProofSpecs; +use crate::ibc::core::ics24_host::identifier::{ + ChainId, ClientId, ConnectionId, +}; +use crate::ibc::core::ics24_host::path::{ + AckPath, ChannelEndPath, ClientConsensusStatePath, CommitmentPath, Path, + ReceiptPath, SeqAckPath, SeqRecvPath, SeqSendPath, +}; +use crate::ibc::core::{ContextError, ValidationContext}; +use crate::ibc::hosts::tendermint::ValidateSelfClientContext; +#[cfg(any(feature = "ibc-mocks-abcipp", feature = "ibc-mocks"))] +use crate::ibc::mock::client_state::MockClientState; +use crate::ibc::timestamp::Timestamp; +use crate::ibc::Height; +use crate::ibc_proto::google::protobuf::Any; +use crate::ibc_proto::protobuf::Protobuf; +use crate::ledger::ibc::storage; +use crate::ledger::parameters::storage::get_max_expected_time_per_block_key; +use crate::tendermint::Time as TmTime; +use crate::tendermint_proto::Protobuf as TmProtobuf; +use crate::types::storage::{BlockHeight, Key}; +use crate::types::time::DurationSecs; + +const COMMITMENT_PREFIX: &[u8] = b"ibc"; + +impl ValidationContext for IbcActions<'_, C> +where + C: IbcCommonContext, +{ + fn client_state( + &self, + client_id: &ClientId, + ) -> Result, ContextError> { + self.ctx.borrow().client_state(client_id) + } + + fn decode_client_state( + &self, + client_state: Any, + ) -> Result, ContextError> { + self.ctx.borrow().decode_client_state(client_state) + } + + fn consensus_state( + &self, + client_cons_state_path: &ClientConsensusStatePath, + ) -> Result, ContextError> { + self.ctx.borrow().consensus_state(client_cons_state_path) + } + + fn next_consensus_state( + &self, + client_id: &ClientId, + height: &Height, + ) -> Result>, ContextError> { + let prefix = storage::consensus_state_prefix(client_id); + // for iterator + let ctx = self.ctx.borrow(); + let mut iter = ctx.iter_prefix(&prefix).map_err(|_| { + ContextError::ClientError(ClientError::Other { + description: format!( + "Reading the consensus state failed: ID {}, height {}", + client_id, height, + ), + }) + })?; + let mut lowest_height_value = None; + while let Some((key, value)) = + ctx.iter_next(&mut iter).map_err(|_| { + ContextError::ClientError(ClientError::Other { + description: format!( + "Iterating consensus states failed: ID {}, height {}", + client_id, height, + ), + }) + })? + { + let key = Key::parse(key).expect("the key should be parsable"); + let consensus_height = storage::consensus_height(&key) + .expect("the key should have a height"); + if consensus_height > *height { + lowest_height_value = match lowest_height_value { + Some((lowest, _)) if consensus_height < lowest => { + Some((consensus_height, value)) + } + Some(_) => continue, + None => Some((consensus_height, value)), + }; + } + } + match lowest_height_value { + Some((_, value)) => { + let any = Any::decode(&value[..]).map_err(|e| { + ContextError::ClientError(ClientError::Decode(e)) + })?; + let cs = self.ctx.borrow().decode_consensus_state(any)?; + Ok(Some(cs)) + } + None => Ok(None), + } + } + + fn prev_consensus_state( + &self, + client_id: &ClientId, + height: &Height, + ) -> Result>, ContextError> { + let prefix = storage::consensus_state_prefix(client_id); + // for iterator + let ctx = self.ctx.borrow(); + let mut iter = ctx.iter_prefix(&prefix).map_err(|_| { + ContextError::ClientError(ClientError::Other { + description: format!( + "Reading the consensus state failed: ID {}, height {}", + client_id, height, + ), + }) + })?; + let mut highest_height_value = None; + while let Some((key, value)) = + ctx.iter_next(&mut iter).map_err(|_| { + ContextError::ClientError(ClientError::Other { + description: format!( + "Iterating consensus states failed: ID {}, height {}", + client_id, height, + ), + }) + })? + { + let key = Key::parse(key).expect("the key should be parsable"); + let consensus_height = storage::consensus_height(&key) + .expect("the key should have the height"); + if consensus_height < *height { + highest_height_value = match highest_height_value { + Some((highest, _)) if consensus_height > highest => { + Some((consensus_height, value)) + } + Some(_) => continue, + None => Some((consensus_height, value)), + }; + } + } + match highest_height_value { + Some((_, value)) => { + let any = Any::decode(&value[..]).map_err(|e| { + ContextError::ClientError(ClientError::Decode(e)) + })?; + let cs = self.ctx.borrow().decode_consensus_state(any)?; + Ok(Some(cs)) + } + None => Ok(None), + } + } + + fn host_height(&self) -> Result { + let height = self.ctx.borrow().get_height().map_err(|_| { + ContextError::ClientError(ClientError::Other { + description: "Getting the host height failed".to_string(), + }) + })?; + // the revision number is always 0 + Height::new(0, height.0).map_err(ContextError::ClientError) + } + + fn host_timestamp(&self) -> Result { + let height = self.host_height()?; + let height = BlockHeight(height.revision_height()); + let header = self + .ctx + .borrow() + .get_header(height) + .map_err(|_| { + ContextError::ClientError(ClientError::Other { + description: "Getting the host header failed".to_string(), + }) + })? + .ok_or_else(|| { + ContextError::ClientError(ClientError::Other { + description: "No host header".to_string(), + }) + })?; + let time = TmTime::try_from(header.time).map_err(|_| { + ContextError::ClientError(ClientError::Other { + description: "Converting to Tenderming time failed".to_string(), + }) + })?; + Ok(time.into()) + } + + fn host_consensus_state( + &self, + height: &Height, + ) -> Result, ContextError> { + let height = BlockHeight(height.revision_height()); + let header = self + .ctx + .borrow() + .get_header(height) + .map_err(|_| { + ContextError::ClientError(ClientError::Other { + description: format!( + "Getting the header on this chain failed: Height {}", + height + ), + }) + })? + .ok_or_else(|| { + ContextError::ClientError(ClientError::Other { + description: "No host header".to_string(), + }) + })?; + let commitment_root = header.hash.to_vec().into(); + let time = header + .time + .try_into() + .expect("The time should be converted"); + let next_validators_hash = header + .next_validators_hash + .try_into() + .expect("The hash should be converted"); + let consensus_state = + TmConsensusState::new(commitment_root, time, next_validators_hash); + Ok(consensus_state.into_box()) + } + + fn client_counter(&self) -> Result { + let key = storage::client_counter_key(); + self.ctx.borrow().read_counter(&key) + } + + fn connection_end( + &self, + connection_id: &ConnectionId, + ) -> Result { + self.ctx.borrow().connection_end(connection_id) + } + + fn validate_self_client( + &self, + counterparty_client_state: Any, + ) -> Result<(), ContextError> { + #[cfg(any(feature = "ibc-mocks-abcipp", feature = "ibc-mocks"))] + { + let client_state = self + .decode_client_state(counterparty_client_state.clone()) + .map_err(|_| ClientError::Other { + description: "Decoding the client state failed".to_string(), + })?; + + if let Some(_mock) = + downcast_client_state::(client_state.as_ref()) + { + return Ok(()); + } + } + + ValidateSelfClientContext::validate_self_tendermint_client( + self, + counterparty_client_state, + ) + } + + fn commitment_prefix(&self) -> CommitmentPrefix { + CommitmentPrefix::try_from(COMMITMENT_PREFIX.to_vec()) + .expect("the prefix should be parsable") + } + + fn connection_counter(&self) -> Result { + let key = storage::connection_counter_key(); + self.ctx.borrow().read_counter(&key) + } + + fn channel_end( + &self, + channel_end_path: &ChannelEndPath, + ) -> Result { + self.ctx.borrow().channel_end(channel_end_path) + } + + fn get_next_sequence_send( + &self, + path: &SeqSendPath, + ) -> Result { + self.ctx.borrow().get_next_sequence_send(path) + } + + fn get_next_sequence_recv( + &self, + path: &SeqRecvPath, + ) -> Result { + let path = Path::SeqRecv(path.clone()); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + self.ctx.borrow().read_sequence(&key) + } + + fn get_next_sequence_ack( + &self, + path: &SeqAckPath, + ) -> Result { + let path = Path::SeqAck(path.clone()); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + self.ctx.borrow().read_sequence(&key) + } + + fn get_packet_commitment( + &self, + path: &CommitmentPath, + ) -> Result { + let path = Path::Commitment(path.clone()); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + match self.ctx.borrow().read(&key) { + Ok(Some(value)) => Ok(value.into()), + Ok(None) => { + let port_channel_sequence_id = + storage::port_channel_sequence_id(&key) + .expect("invalid key"); + Err(ContextError::PacketError( + PacketError::PacketCommitmentNotFound { + sequence: port_channel_sequence_id.2, + }, + )) + } + Err(_) => Err(ContextError::PacketError(PacketError::Channel( + ChannelError::Other { + description: format!( + "Reading commitment failed: Key {}", + key, + ), + }, + ))), + } + } + + fn get_packet_receipt( + &self, + path: &ReceiptPath, + ) -> Result { + let path = Path::Receipt(path.clone()); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + match self.ctx.borrow().read(&key) { + Ok(Some(_)) => Ok(Receipt::Ok), + Ok(None) => { + let port_channel_sequence_id = + storage::port_channel_sequence_id(&key) + .expect("invalid key"); + Err(ContextError::PacketError( + PacketError::PacketReceiptNotFound { + sequence: port_channel_sequence_id.2, + }, + )) + } + Err(_) => Err(ContextError::PacketError(PacketError::Channel( + ChannelError::Other { + description: format!( + "Reading the receipt failed: Key {}", + key, + ), + }, + ))), + } + } + + fn get_packet_acknowledgement( + &self, + path: &AckPath, + ) -> Result { + let path = Path::Ack(path.clone()); + let key = storage::ibc_key(path.to_string()) + .expect("Creating a key for the client state shouldn't fail"); + match self.ctx.borrow().read(&key) { + Ok(Some(value)) => Ok(value.into()), + Ok(None) => { + let port_channel_sequence_id = + storage::port_channel_sequence_id(&key) + .expect("invalid key"); + Err(ContextError::PacketError( + PacketError::PacketAcknowledgementNotFound { + sequence: port_channel_sequence_id.2, + }, + )) + } + Err(_) => Err(ContextError::PacketError(PacketError::Channel( + ChannelError::Other { + description: format!( + "Reading the ack commitment failed: Key {}", + key + ), + }, + ))), + } + } + + fn client_update_time( + &self, + client_id: &ClientId, + _height: &Height, + ) -> Result { + let key = storage::client_update_timestamp_key(client_id); + match self.ctx.borrow().read(&key) { + Ok(Some(value)) => { + let time = TmTime::decode_vec(&value).map_err(|_| { + ContextError::ClientError(ClientError::Other { + description: format!( + "Decoding the client update time failed: ID {}", + client_id + ), + }) + })?; + Ok(time.into()) + } + Ok(None) => { + Err(ContextError::ClientError(ClientError::ClientSpecific { + description: format!( + "The client update time doesn't exist: ID {}", + client_id + ), + })) + } + Err(_) => Err(ContextError::ClientError(ClientError::Other { + description: format!( + "Reading the client update time failed: ID {}", + client_id, + ), + })), + } + } + + fn client_update_height( + &self, + client_id: &ClientId, + _height: &Height, + ) -> Result { + let key = storage::client_update_height_key(client_id); + match self.ctx.borrow().read(&key) { + Ok(Some(value)) => Height::decode_vec(&value).map_err(|e| { + ContextError::ClientError(ClientError::Other { + description: format!( + "Decoding the height failed: Key {}, error {}", + key, e + ), + }) + }), + Ok(None) => { + Err(ContextError::ClientError(ClientError::ClientSpecific { + description: format!( + "The client update height doesn't exist: ID {}", + client_id + ), + })) + } + Err(_) => Err(ContextError::ClientError(ClientError::Other { + description: format!( + "Reading the client update height failed: ID {}", + client_id, + ), + })), + } + } + + fn channel_counter(&self) -> Result { + let key = storage::channel_counter_key(); + self.ctx.borrow().read_counter(&key) + } + + fn max_expected_time_per_block(&self) -> core::time::Duration { + let key = get_max_expected_time_per_block_key(); + match self.ctx.borrow().read(&key) { + Ok(Some(value)) => { + crate::ledger::storage::types::decode::(value) + .expect("Decoding max_expected_time_per_block failed") + .into() + } + _ => unreachable!("The parameter should be initialized"), + } + } +} + +impl ValidateSelfClientContext for IbcActions<'_, C> +where + C: IbcCommonContext, +{ + fn chain_id(&self) -> &ChainId { + &self.validation_params.chain_id + } + + fn host_current_height(&self) -> Height { + let height = self + .ctx + .borrow() + .get_height() + .expect("The height should exist"); + Height::new(0, height.0).expect("The conversion shouldn't fail") + } + + fn proof_specs(&self) -> &ProofSpecs { + &self.validation_params.proof_specs + } + + fn unbonding_period(&self) -> core::time::Duration { + self.validation_params.unbonding_period + } + + /// Returns the host upgrade path. May be empty. + fn upgrade_path(&self) -> &[String] { + &self.validation_params.upgrade_path + } +} diff --git a/core/src/ledger/ibc/data.rs b/core/src/ledger/ibc/data.rs deleted file mode 100644 index 947142ab97a..00000000000 --- a/core/src/ledger/ibc/data.rs +++ /dev/null @@ -1,420 +0,0 @@ -//! IBC-related data definitions. -use std::convert::TryFrom; -use std::fmt::{self, Display, Formatter}; - -use prost::Message; -use thiserror::Error; - -use crate::ibc::applications::ics20_fungible_token_transfer::msgs::transfer::MsgTransfer; -use crate::ibc::core::ics02_client::msgs::create_client::MsgCreateAnyClient; -use crate::ibc::core::ics02_client::msgs::misbehavior::MsgSubmitAnyMisbehaviour; -use crate::ibc::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; -use crate::ibc::core::ics02_client::msgs::upgrade_client::MsgUpgradeAnyClient; -use crate::ibc::core::ics02_client::msgs::ClientMsg; -use crate::ibc::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; -use crate::ibc::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; -use crate::ibc::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; -use crate::ibc::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; -use crate::ibc::core::ics03_connection::msgs::ConnectionMsg; -use crate::ibc::core::ics04_channel::msgs::acknowledgement::{ - Acknowledgement, MsgAcknowledgement, -}; -use crate::ibc::core::ics04_channel::msgs::chan_close_confirm::MsgChannelCloseConfirm; -use crate::ibc::core::ics04_channel::msgs::chan_close_init::MsgChannelCloseInit; -use crate::ibc::core::ics04_channel::msgs::chan_open_ack::MsgChannelOpenAck; -use crate::ibc::core::ics04_channel::msgs::chan_open_confirm::MsgChannelOpenConfirm; -use crate::ibc::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit; -use crate::ibc::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; -use crate::ibc::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; -use crate::ibc::core::ics04_channel::msgs::timeout::MsgTimeout; -use crate::ibc::core::ics04_channel::msgs::timeout_on_close::MsgTimeoutOnClose; -use crate::ibc::core::ics04_channel::msgs::{ChannelMsg, PacketMsg}; -use crate::ibc::core::ics04_channel::packet::Receipt; -use crate::ibc::core::ics26_routing::error::Error as Ics26Error; -use crate::ibc::core::ics26_routing::msgs::Ics26Envelope; -use crate::ibc::downcast; -use crate::ibc_proto::google::protobuf::Any; - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum Error { - #[error("Decoding IBC data error: {0}")] - DecodingData(prost::DecodeError), - #[error("Decoding Json data error: {0}")] - DecodingJsonData(serde_json::Error), - #[error("Decoding message error: {0}")] - DecodingMessage(Ics26Error), - #[error("Downcast error: {0}")] - Downcast(String), -} - -/// Decode result for IBC data -pub type Result = std::result::Result; - -/// IBC Message -#[derive(Debug, Clone)] -pub struct IbcMessage(pub Ics26Envelope); - -impl TryFrom for IbcMessage { - type Error = Error; - - fn try_from(message: Any) -> Result { - let envelope = - Ics26Envelope::try_from(message).map_err(Error::DecodingMessage)?; - Ok(Self(envelope)) - } -} - -impl IbcMessage { - /// Decode an IBC message from transaction data - pub fn decode(tx_data: &[u8]) -> Result { - let msg = Any::decode(tx_data).map_err(Error::DecodingData)?; - msg.try_into() - } - - /// Get the IBC message of CreateClient - pub fn msg_create_any_client(self) -> Result { - let ics02_msg = self.ics02_msg()?; - downcast!(ics02_msg => ClientMsg::CreateClient).ok_or_else(|| { - Error::Downcast( - "The message is not a CreateClient message".to_string(), - ) - }) - } - - /// Get the IBC message of UpdateClient - pub fn msg_update_any_client(self) -> Result { - let ics02_msg = self.ics02_msg()?; - downcast!(ics02_msg => ClientMsg::UpdateClient).ok_or_else(|| { - Error::Downcast( - "The message is not a UpdateClient message".to_string(), - ) - }) - } - - /// Get the IBC message of Misbehaviour - pub fn msg_submit_any_misbehaviour( - self, - ) -> Result { - let ics02_msg = self.ics02_msg()?; - downcast!(ics02_msg => ClientMsg::Misbehaviour).ok_or_else(|| { - Error::Downcast( - "The message is not a Misbehaviour message".to_string(), - ) - }) - } - - /// Get the IBC message of UpgradeClient - pub fn msg_upgrade_any_client(self) -> Result { - let ics02_msg = self.ics02_msg()?; - downcast!(ics02_msg => ClientMsg::UpgradeClient).ok_or_else(|| { - Error::Downcast( - "The message is not a UpgradeClient message".to_string(), - ) - }) - } - - /// Get the IBC message of ConnectionOpenInit - pub fn msg_connection_open_init(self) -> Result { - let ics03_msg = self.ics03_msg()?; - downcast!(ics03_msg => ConnectionMsg::ConnectionOpenInit).ok_or_else( - || { - Error::Downcast( - "The message is not a ConnectionOpenInit message" - .to_string(), - ) - }, - ) - } - - /// Get the IBC message of ConnectionOpenTry - pub fn msg_connection_open_try(self) -> Result> { - let ics03_msg = self.ics03_msg()?; - downcast!(ics03_msg => ConnectionMsg::ConnectionOpenTry).ok_or_else( - || { - Error::Downcast( - "The message is not a ConnectionOpenTry message" - .to_string(), - ) - }, - ) - } - - /// Get the IBC message of ConnectionOpenAck - pub fn msg_connection_open_ack(self) -> Result> { - let ics03_msg = self.ics03_msg()?; - downcast!(ics03_msg => ConnectionMsg::ConnectionOpenAck).ok_or_else( - || { - Error::Downcast( - "The message is not a ConnectionOpenAck message" - .to_string(), - ) - }, - ) - } - - /// Get the IBC message of ConnectionOpenConfirm - pub fn msg_connection_open_confirm( - self, - ) -> Result { - let ics03_msg = self.ics03_msg()?; - downcast!(ics03_msg => ConnectionMsg::ConnectionOpenConfirm).ok_or_else( - || { - Error::Downcast( - "The message is not a ConnectionOpenAck message" - .to_string(), - ) - }, - ) - } - - /// Get the IBC message of ChannelOpenInit - pub fn msg_channel_open_init(self) -> Result { - let ics04_msg = self.ics04_channel_msg()?; - downcast!(ics04_msg => ChannelMsg::ChannelOpenInit).ok_or_else(|| { - Error::Downcast( - "The message is not a ChannelOpenInit message".to_string(), - ) - }) - } - - /// Get the IBC message of ChannelOpenTry - pub fn msg_channel_open_try(self) -> Result { - let ics04_msg = self.ics04_channel_msg()?; - downcast!(ics04_msg => ChannelMsg::ChannelOpenTry).ok_or_else(|| { - Error::Downcast( - "The message is not a ChannelOpenTry message".to_string(), - ) - }) - } - - /// Get the IBC message of ChannelOpenAck - pub fn msg_channel_open_ack(self) -> Result { - let ics04_msg = self.ics04_channel_msg()?; - downcast!(ics04_msg => ChannelMsg::ChannelOpenAck).ok_or_else(|| { - Error::Downcast( - "The message is not a ChannelOpenAck message".to_string(), - ) - }) - } - - /// Get the IBC message of ChannelOpenConfirm - pub fn msg_channel_open_confirm(self) -> Result { - let ics04_msg = self.ics04_channel_msg()?; - downcast!(ics04_msg => ChannelMsg::ChannelOpenConfirm).ok_or_else( - || { - Error::Downcast( - "The message is not a ChannelOpenConfirm message" - .to_string(), - ) - }, - ) - } - - /// Get the IBC message of ChannelCloseInit - pub fn msg_channel_close_init(self) -> Result { - let ics04_msg = self.ics04_channel_msg()?; - downcast!(ics04_msg => ChannelMsg::ChannelCloseInit).ok_or_else(|| { - Error::Downcast( - "The message is not a ChannelCloseInit message".to_string(), - ) - }) - } - - /// Get the IBC message of ChannelCloseConfirm - pub fn msg_channel_close_confirm(self) -> Result { - let ics04_msg = self.ics04_channel_msg()?; - downcast!(ics04_msg => ChannelMsg::ChannelCloseConfirm).ok_or_else( - || { - Error::Downcast( - "The message is not a ChannelCloseInit message".to_string(), - ) - }, - ) - } - - /// Get the IBC message of RecvPacket - pub fn msg_recv_packet(self) -> Result { - let ics04_msg = self.ics04_packet_msg()?; - downcast!(ics04_msg => PacketMsg::RecvPacket).ok_or_else(|| { - Error::Downcast( - "The message is not a RecvPacket message".to_string(), - ) - }) - } - - /// Get the IBC message of Acknowledgement - pub fn msg_acknowledgement(self) -> Result { - let ics04_msg = self.ics04_packet_msg()?; - downcast!(ics04_msg => PacketMsg::AckPacket).ok_or_else(|| { - Error::Downcast( - "The message is not an Acknowledgement message".to_string(), - ) - }) - } - - /// Get the IBC message of TimeoutPacket - pub fn msg_timeout(self) -> Result { - let ics04_msg = self.ics04_packet_msg()?; - downcast!(ics04_msg => PacketMsg::ToPacket).ok_or_else(|| { - Error::Downcast( - "The message is not a TimeoutPacket message".to_string(), - ) - }) - } - - /// Get the IBC message of TimeoutPacketOnClose - pub fn msg_timeout_on_close(self) -> Result { - let ics04_msg = self.ics04_packet_msg()?; - downcast!(ics04_msg => PacketMsg::ToClosePacket).ok_or_else(|| { - Error::Downcast( - "The message is not a TimeoutPacketOnClose message".to_string(), - ) - }) - } - - /// Get the IBC message of ICS20 - pub fn msg_transfer(self) -> Result { - downcast!(self.0 => Ics26Envelope::Ics20Msg).ok_or_else(|| { - Error::Downcast("The message is not an ICS20 message".to_string()) - }) - } - - fn ics02_msg(self) -> Result { - downcast!(self.0 => Ics26Envelope::Ics2Msg).ok_or_else(|| { - Error::Downcast("The message is not an ICS02 message".to_string()) - }) - } - - fn ics03_msg(self) -> Result { - downcast!(self.0 => Ics26Envelope::Ics3Msg).ok_or_else(|| { - Error::Downcast("The message is not an ICS03 message".to_string()) - }) - } - - fn ics04_channel_msg(self) -> Result { - downcast!(self.0 => Ics26Envelope::Ics4ChannelMsg).ok_or_else(|| { - Error::Downcast( - "The message is not an ICS04 channel message".to_string(), - ) - }) - } - - fn ics04_packet_msg(self) -> Result { - downcast!(self.0 => Ics26Envelope::Ics4PacketMsg).ok_or_else(|| { - Error::Downcast( - "The message is not an ICS04 packet message".to_string(), - ) - }) - } -} - -/// Receipt for a packet -#[derive(Clone, Debug)] -pub struct PacketReceipt(pub Receipt); - -impl PacketReceipt { - /// Return bytes - pub fn as_bytes(&self) -> &[u8] { - // same as ibc-go - &[1_u8] - } -} - -impl Default for PacketReceipt { - fn default() -> Self { - Self(Receipt::Ok) - } -} - -/// Acknowledgement for a packet -#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "lowercase")] -pub enum PacketAck { - /// Success Acknowledgement - Result(String), - /// Error Acknowledgement - Error(String), -} - -/// Success acknowledgement -const ACK_SUCCESS_B64: &str = "AQ=="; -/// Error acknowledgement -const ACK_ERR_STR: &str = - "error handling packet on destination chain: see events for details"; - -// TODO temporary type. add a new type for ack to ibc-rs -impl PacketAck { - /// Success acknowledgement - pub fn result_success() -> Self { - Self::Result(ACK_SUCCESS_B64.to_string()) - } - - /// Acknowledgement with an error - pub fn result_error(err: String) -> Self { - Self::Error(format!("{}: {}", ACK_ERR_STR, err)) - } - - /// Check if the ack is for success - pub fn is_success(&self) -> bool { - match self { - Self::Result(_) => true, - Self::Error(_) => false, - } - } - - /// Encode the ack - pub fn encode_to_vec(&self) -> Vec { - serde_json::to_vec(&self) - .expect("Encoding acknowledgement shouldn't fail") - } -} - -impl TryFrom for PacketAck { - type Error = Error; - - fn try_from(ack: Acknowledgement) -> Result { - serde_json::from_slice(&ack.into_bytes()) - .map_err(Error::DecodingJsonData) - } -} - -// for the string to be used by the current reader -impl Display for PacketAck { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "{}", serde_json::to_string(&self).unwrap()) - } -} - -// TODO temporary type. add a new type for packet data to ibc-rs -/// Data to transfer a token -#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] -pub struct FungibleTokenPacketData { - /// the token denomination to be transferred - pub denom: String, - /// the token amount to be transferred - pub amount: String, - /// the sender address - pub sender: String, - /// the recipient address on the destination chain - pub receiver: String, -} - -impl From for FungibleTokenPacketData { - fn from(msg: MsgTransfer) -> Self { - // TODO validation - let token = msg.token.unwrap(); - Self { - denom: token.denom, - amount: token.amount, - sender: msg.sender.to_string(), - receiver: msg.receiver.to_string(), - } - } -} - -impl Display for FungibleTokenPacketData { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "{}", serde_json::to_string(self).unwrap()) - } -} diff --git a/core/src/ledger/ibc/mod.rs b/core/src/ledger/ibc/mod.rs index f98fb2e4327..9a184ee51e3 100644 --- a/core/src/ledger/ibc/mod.rs +++ b/core/src/ledger/ibc/mod.rs @@ -1,5 +1,240 @@ //! IBC library code -pub mod actions; -pub mod data; +pub mod context; pub mod storage; + +use std::cell::RefCell; +use std::collections::HashMap; +use std::fmt::Debug; +use std::rc::Rc; +use std::time::Duration; + +pub use context::common::IbcCommonContext; +pub use context::storage::{IbcStorageContext, ProofSpec}; +pub use context::transfer_mod::{ModuleWrapper, TransferModule}; +use prost::Message; +use thiserror::Error; + +use crate::ibc::applications::transfer::denom::TracePrefix; +use crate::ibc::applications::transfer::error::TokenTransferError; +use crate::ibc::applications::transfer::msgs::transfer::{ + MsgTransfer, TYPE_URL as MSG_TRANSFER_TYPE_URL, +}; +use crate::ibc::applications::transfer::packet::PacketData; +use crate::ibc::applications::transfer::relay::send_transfer::{ + send_transfer_execute, send_transfer_validate, +}; +use crate::ibc::core::context::Router; +use crate::ibc::core::ics04_channel::msgs::PacketMsg; +use crate::ibc::core::ics23_commitment::specs::ProofSpecs; +use crate::ibc::core::ics24_host::identifier::{ChainId as IbcChainId, PortId}; +use crate::ibc::core::ics26_routing::context::{Module, ModuleId}; +use crate::ibc::core::ics26_routing::error::RouterError; +use crate::ibc::core::ics26_routing::msgs::MsgEnvelope; +use crate::ibc::core::{execute, validate}; +use crate::ibc_proto::google::protobuf::Any; +use crate::types::chain::ChainId; + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum Error { + #[error("Decoding IBC data error: {0}")] + DecodingData(prost::DecodeError), + #[error("Decoding message error: {0}")] + DecodingMessage(RouterError), + #[error("IBC storage error: {0}")] + IbcStorage(storage::Error), + #[error("IBC execution error: {0}")] + Execution(RouterError), + #[error("IBC token transfer error: {0}")] + TokenTransfer(TokenTransferError), + #[error("IBC validation error: {0}")] + Validation(RouterError), + #[error("IBC module doesn't exist")] + NoModule, + #[error("Denom error: {0}")] + Denom(String), + #[error("Invalid chain ID: {0}")] + ChainId(ChainId), +} + +/// IBC actions to handle IBC operations +#[derive(Debug)] +pub struct IbcActions<'a, C> +where + C: IbcCommonContext, +{ + ctx: Rc>, + modules: HashMap>, + ports: HashMap, + validation_params: ValidationParams, +} + +impl<'a, C> IbcActions<'a, C> +where + C: IbcCommonContext + Debug, +{ + /// Make new IBC actions + pub fn new(ctx: Rc>) -> Self { + Self { + ctx, + modules: HashMap::new(), + ports: HashMap::new(), + validation_params: ValidationParams::default(), + } + } + + /// Set the validation parameters + pub fn set_validation_params(&mut self, params: ValidationParams) { + self.validation_params = params; + } + + /// Add TokenTransfer route + pub fn add_transfer_route( + &mut self, + module_id: ModuleId, + module: impl ModuleWrapper + 'a, + ) { + self.modules.insert(module_id.clone(), Rc::new(module)); + self.ports.insert(PortId::transfer(), module_id); + } + + fn get_route_by_port(&self, port_id: &PortId) -> Option<&dyn Module> { + self.lookup_module_by_port(port_id) + .and_then(|id| self.get_route(&id)) + } + + fn get_route_mut_by_port( + &mut self, + port_id: &PortId, + ) -> Option<&mut dyn Module> { + self.lookup_module_by_port(port_id) + .and_then(|id| self.get_route_mut(&id)) + } + + /// Execute according to the message in an IBC transaction or VP + pub fn execute(&mut self, tx_data: &[u8]) -> Result<(), Error> { + let msg = Any::decode(tx_data).map_err(Error::DecodingData)?; + match msg.type_url.as_str() { + MSG_TRANSFER_TYPE_URL => { + let msg = + MsgTransfer::try_from(msg).map_err(Error::TokenTransfer)?; + let port_id = msg.port_id_on_a.clone(); + match self.get_route_mut_by_port(&port_id) { + Some(_module) => { + let mut module = TransferModule::new(self.ctx.clone()); + // restore the denom if it is hashed + let msg = self.restore_denom(msg)?; + send_transfer_execute(&mut module, msg) + .map_err(Error::TokenTransfer) + } + None => Err(Error::NoModule), + } + } + _ => { + execute(self, msg.clone()).map_err(Error::Execution)?; + // the current ibc-rs execution doesn't store the denom for the + // token hash when transfer with MsgRecvPacket + self.store_denom(msg) + } + } + } + + /// Restore the denom when it is hashed, i.e. the denom is `ibc/{hash}`. + fn restore_denom(&self, msg: MsgTransfer) -> Result { + let mut msg = msg; + // lookup the original denom with the IBC token hash + if let Some(token_hash) = + storage::token_hash_from_denom(&msg.token.denom).map_err(|e| { + Error::Denom(format!("Invalid denom: error {}", e)) + })? + { + let denom_key = storage::ibc_denom_key(token_hash); + let denom = match self.ctx.borrow().read(&denom_key) { + Ok(Some(v)) => String::from_utf8(v).map_err(|e| { + Error::Denom(format!( + "Decoding the denom string failed: {}", + e + )) + })?, + _ => { + return Err(Error::Denom(format!( + "No original denom: denom_key {}", + denom_key + ))); + } + }; + msg.token.denom = denom; + } + Ok(msg) + } + + /// Store the denom when transfer with MsgRecvPacket + fn store_denom(&mut self, msg: Any) -> Result<(), Error> { + let envelope = MsgEnvelope::try_from(msg).map_err(|e| { + Error::Denom(format!("Decoding the message failed: {}", e)) + })?; + match envelope { + MsgEnvelope::Packet(PacketMsg::Recv(msg)) => { + let data = match serde_json::from_slice::( + &msg.packet.data, + ) { + Ok(data) => data, + // not token transfer data + Err(_) => return Ok(()), + }; + let prefix = TracePrefix::new( + msg.packet.port_id_on_b.clone(), + msg.packet.chan_id_on_b, + ); + let mut coin = data.token; + coin.denom.add_trace_prefix(prefix); + let trace_hash = storage::calc_hash(coin.denom.to_string()); + self.ctx + .borrow_mut() + .store_denom(trace_hash, coin.denom) + .map_err(|e| { + Error::Denom(format!("Write the denom failed: {}", e)) + }) + } + // other messages + _ => Ok(()), + } + } + + /// Validate according to the message in IBC VP + pub fn validate(&self, tx_data: &[u8]) -> Result<(), Error> { + let msg = Any::decode(tx_data).map_err(Error::DecodingData)?; + match msg.type_url.as_str() { + MSG_TRANSFER_TYPE_URL => { + let msg = + MsgTransfer::try_from(msg).map_err(Error::TokenTransfer)?; + let port_id = msg.port_id_on_a.clone(); + match self.get_route_by_port(&port_id) { + Some(_module) => { + let module = TransferModule::new(self.ctx.clone()); + // restore the denom if it is hashed + let msg = self.restore_denom(msg)?; + send_transfer_validate(&module, msg) + .map_err(Error::TokenTransfer) + } + None => Err(Error::NoModule), + } + } + _ => validate(self, msg).map_err(Error::Validation), + } + } +} + +#[derive(Debug, Default)] +/// Parameters for validation +pub struct ValidationParams { + /// Chain ID + pub chain_id: IbcChainId, + /// IBC proof specs + pub proof_specs: ProofSpecs, + /// Unbonding period + pub unbonding_period: Duration, + /// Upgrade path + pub upgrade_path: Vec, +} diff --git a/core/src/ledger/ibc/storage.rs b/core/src/ledger/ibc/storage.rs index 478a9e10f34..fab224e7554 100644 --- a/core/src/ledger/ibc/storage.rs +++ b/core/src/ledger/ibc/storage.rs @@ -7,14 +7,13 @@ use thiserror::Error; use crate::ibc::core::ics02_client::height::Height; use crate::ibc::core::ics04_channel::packet::Sequence; -use crate::ibc::core::ics05_port::capabilities::Capability; use crate::ibc::core::ics24_host::identifier::{ ChannelId, ClientId, ConnectionId, PortChannelId, PortId, }; use crate::ibc::core::ics24_host::path::{ - AcksPath, ChannelEndsPath, ClientConsensusStatePath, ClientStatePath, - ClientTypePath, CommitmentsPath, ConnectionsPath, PortsPath, ReceiptsPath, - SeqAcksPath, SeqRecvsPath, SeqSendsPath, + AckPath, ChannelEndPath, ClientConnectionPath, ClientConsensusStatePath, + ClientStatePath, ClientTypePath, CommitmentPath, ConnectionPath, PortPath, + ReceiptPath, SeqAckPath, SeqRecvPath, SeqSendPath, }; use crate::ibc::core::ics24_host::Path; use crate::types::address::{Address, InternalAddress, HASH_LEN}; @@ -23,8 +22,6 @@ use crate::types::storage::{self, DbKeySeg, Key, KeySeg}; const CLIENTS_COUNTER: &str = "clients/counter"; const CONNECTIONS_COUNTER: &str = "connections/counter"; const CHANNELS_COUNTER: &str = "channelEnds/counter"; -const CAPABILITIES_INDEX: &str = "capabilities/index"; -const CAPABILITIES: &str = "capabilities"; const DENOM: &str = "denom"; /// Key segment for a multitoken related to IBC pub const MULTITOKEN_STORAGE_KEY: &str = "ibc"; @@ -40,6 +37,8 @@ pub enum Error { InvalidPortCapability(String), #[error("Denom error: {0}")] Denom(String), + #[error("IBS signer error: {0}")] + IbcSigner(String), } /// IBC storage functions result @@ -106,13 +105,8 @@ pub fn is_channel_counter_key(key: &Key) -> bool { *key == channel_counter_key() } -/// Check if the given key is a key of the capability index -pub fn is_capability_index_key(key: &Key) -> bool { - *key == capability_index_key() -} - /// Returns a key of the IBC-related data -fn ibc_key(path: impl AsRef) -> Result { +pub fn ibc_key(path: impl AsRef) -> Result { let path = Key::parse(path).map_err(Error::StorageKey)?; let addr = Address::Internal(InternalAddress::Ibc); let key = Key::from(addr.to_db_key()); @@ -139,13 +133,6 @@ pub fn channel_counter_key() -> Key { .expect("Creating a key for the channel counter shouldn't fail") } -/// Returns a key of the IBC capability index -pub fn capability_index_key() -> Key { - let path = CAPABILITIES_INDEX.to_owned(); - ibc_key(path) - .expect("Creating a key for the capability index shouldn't fail") -} - /// Returns a key for the client type pub fn client_type_key(client_id: &ClientId) -> Key { let path = Path::ClientType(ClientTypePath(client_id.clone())); @@ -164,8 +151,8 @@ pub fn client_state_key(client_id: &ClientId) -> Key { pub fn consensus_state_key(client_id: &ClientId, height: Height) -> Key { let path = Path::ClientConsensusState(ClientConsensusStatePath { client_id: client_id.clone(), - epoch: height.revision_number, - height: height.revision_height, + epoch: height.revision_number(), + height: height.revision_height(), }); ibc_key(path.to_string()) .expect("Creating a key for the consensus state shouldn't fail") @@ -187,39 +174,40 @@ pub fn consensus_state_prefix(client_id: &ClientId) -> Key { /// Returns a key for the connection end pub fn connection_key(conn_id: &ConnectionId) -> Key { - let path = Path::Connections(ConnectionsPath(conn_id.clone())); + let path = Path::Connection(ConnectionPath(conn_id.clone())); ibc_key(path.to_string()) .expect("Creating a key for the connection shouldn't fail") } /// Returns a key for the channel end pub fn channel_key(port_channel_id: &PortChannelId) -> Key { - let path = Path::ChannelEnds(ChannelEndsPath( + let path = Path::ChannelEnd(ChannelEndPath( port_channel_id.port_id.clone(), - port_channel_id.channel_id, + port_channel_id.channel_id.clone(), )); ibc_key(path.to_string()) .expect("Creating a key for the channel shouldn't fail") } +/// Returns a key for the connection list +pub fn client_connections_key(client_id: &ClientId) -> Key { + let path = Path::ClientConnection(ClientConnectionPath(client_id.clone())); + ibc_key(path.to_string()) + .expect("Creating a key for the channel shouldn't fail") +} + /// Returns a key for the port pub fn port_key(port_id: &PortId) -> Key { - let path = Path::Ports(PortsPath(port_id.clone())); + let path = Path::Ports(PortPath(port_id.clone())); ibc_key(path.to_string()) .expect("Creating a key for the port shouldn't fail") } -/// Returns a key of the reversed map for IBC capabilities -pub fn capability_key(index: u64) -> Key { - let path = format!("{}/{}", CAPABILITIES, index); - ibc_key(path).expect("Creating a key for a capability shouldn't fail") -} - /// Returns a key for nextSequenceSend pub fn next_sequence_send_key(port_channel_id: &PortChannelId) -> Key { - let path = Path::SeqSends(SeqSendsPath( + let path = Path::SeqSend(SeqSendPath( port_channel_id.port_id.clone(), - port_channel_id.channel_id, + port_channel_id.channel_id.clone(), )); ibc_key(path.to_string()) .expect("Creating a key for nextSequenceSend shouldn't fail") @@ -227,9 +215,9 @@ pub fn next_sequence_send_key(port_channel_id: &PortChannelId) -> Key { /// Returns a key for nextSequenceRecv pub fn next_sequence_recv_key(port_channel_id: &PortChannelId) -> Key { - let path = Path::SeqRecvs(SeqRecvsPath( + let path = Path::SeqRecv(SeqRecvPath( port_channel_id.port_id.clone(), - port_channel_id.channel_id, + port_channel_id.channel_id.clone(), )); ibc_key(path.to_string()) .expect("Creating a key for nextSequenceRecv shouldn't fail") @@ -237,9 +225,9 @@ pub fn next_sequence_recv_key(port_channel_id: &PortChannelId) -> Key { /// Returns a key for nextSequenceAck pub fn next_sequence_ack_key(port_channel_id: &PortChannelId) -> Key { - let path = Path::SeqAcks(SeqAcksPath( + let path = Path::SeqAck(SeqAckPath( port_channel_id.port_id.clone(), - port_channel_id.channel_id, + port_channel_id.channel_id.clone(), )); ibc_key(path.to_string()) .expect("Creating a key for nextSequenceAck shouldn't fail") @@ -251,9 +239,9 @@ pub fn commitment_key( channel_id: &ChannelId, sequence: Sequence, ) -> Key { - let path = Path::Commitments(CommitmentsPath { + let path = Path::Commitment(CommitmentPath { port_id: port_id.clone(), - channel_id: *channel_id, + channel_id: channel_id.clone(), sequence, }); ibc_key(path.to_string()) @@ -266,9 +254,9 @@ pub fn receipt_key( channel_id: &ChannelId, sequence: Sequence, ) -> Key { - let path = Path::Receipts(ReceiptsPath { + let path = Path::Receipt(ReceiptPath { port_id: port_id.clone(), - channel_id: *channel_id, + channel_id: channel_id.clone(), sequence, }); ibc_key(path.to_string()) @@ -281,9 +269,9 @@ pub fn ack_key( channel_id: &ChannelId, sequence: Sequence, ) -> Key { - let path = Path::Acks(AcksPath { + let path = Path::Ack(AckPath { port_id: port_id.clone(), - channel_id: *channel_id, + channel_id: channel_id.clone(), sequence, }); ibc_key(path.to_string()) @@ -464,54 +452,12 @@ pub fn port_id(key: &Key) -> Result { } } -/// Returns a capability from the given capability key -/// `#IBC/capabilities/` -pub fn capability(key: &Key) -> Result { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::StringSeg(index), - .., - ] if addr == &Address::Internal(InternalAddress::Ibc) - && prefix == "capabilities" => - { - let index: u64 = index.raw().parse().map_err(|e| { - Error::InvalidPortCapability(format!( - "The key has a non-number index: Key {}, {}", - key, e - )) - })?; - Ok(Capability::from(index)) - } - _ => Err(Error::InvalidPortCapability(format!( - "The key doesn't have a capability index: Key {}", - key - ))), - } -} - /// The storage key to get the denom name from the hashed token pub fn ibc_denom_key(token_hash: impl AsRef) -> Key { let path = format!("{}/{}", DENOM, token_hash.as_ref()); ibc_key(path).expect("Creating a key for the denom key shouldn't fail") } -/// Key's prefix for the escrow, burn, or mint account -pub fn ibc_account_prefix( - port_id: &PortId, - channel_id: &ChannelId, - token: &Address, -) -> Key { - Key::from(token.to_db_key()) - .push(&MULTITOKEN_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") - .push(&port_id.to_string().to_db_key()) - .expect("Cannot obtain a storage key") - .push(&channel_id.to_string().to_db_key()) - .expect("Cannot obtain a storage key") -} - /// Token address from the denom string pub fn token(denom: impl AsRef) -> Result
{ let token_str = denom.as_ref().split('/').last().ok_or_else(|| { @@ -570,8 +516,24 @@ pub fn ibc_token_prefix(denom: impl AsRef) -> Result { Ok(prefix) } +/// Returns true if the given key is for IBC +pub fn is_ibc_key(key: &Key) -> bool { + matches!(&key.segments[0], + DbKeySeg::AddressSeg(addr) if *addr == Address::Internal(InternalAddress::Ibc)) +} + /// Returns true if the sub prefix is for IBC pub fn is_ibc_sub_prefix(sub_prefix: &Key) -> bool { matches!(&sub_prefix.segments[0], DbKeySeg::StringSeg(s) if s == MULTITOKEN_STORAGE_KEY) } + +/// Returns true if the given key is the denom key +pub fn is_ibc_denom_key(key: &Key) -> bool { + match &key.segments[..] { + [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(prefix), ..] => { + addr == &Address::Internal(InternalAddress::Ibc) && prefix == DENOM + } + _ => false, + } +} diff --git a/core/src/ledger/inflation.rs b/core/src/ledger/inflation.rs index f3301c4d7dd..e2248cc8515 100644 --- a/core/src/ledger/inflation.rs +++ b/core/src/ledger/inflation.rs @@ -30,15 +30,24 @@ pub struct ValsToUpdate { /// PD controller used to dynamically adjust the rewards rates #[derive(Debug, Clone)] pub struct RewardsController { - locked_tokens: token::Amount, - total_tokens: token::Amount, - locked_ratio_target: Decimal, - locked_ratio_last: Decimal, - max_reward_rate: Decimal, - last_inflation_amount: token::Amount, - p_gain_nom: Decimal, - d_gain_nom: Decimal, - epochs_per_year: u64, + /// Locked token amount in the relevant system + pub locked_tokens: token::Amount, + /// Total token supply + pub total_tokens: token::Amount, + /// PD target locked ratio + pub locked_ratio_target: Decimal, + /// PD last locked ratio + pub locked_ratio_last: Decimal, + /// Maximum reward rate + pub max_reward_rate: Decimal, + /// Last inflation amount + pub last_inflation_amount: token::Amount, + /// Nominal proportional gain + pub p_gain_nom: Decimal, + /// Nominal derivative gain + pub d_gain_nom: Decimal, + /// Number of epochs per year + pub epochs_per_year: u64, } impl RewardsController { @@ -69,8 +78,8 @@ impl RewardsController { } /// Calculate a new rewards rate - pub fn run( - Self { + pub fn run(self) -> ValsToUpdate { + let Self { locked_tokens, total_tokens, locked_ratio_target, @@ -80,11 +89,11 @@ impl RewardsController { p_gain_nom, d_gain_nom, epochs_per_year, - }: &Self, - ) -> ValsToUpdate { - let locked: Decimal = u64::from(*locked_tokens).into(); - let total: Decimal = u64::from(*total_tokens).into(); - let epochs_py: Decimal = (*epochs_per_year).into(); + } = self; + + let locked: Decimal = u64::from(locked_tokens).into(); + let total: Decimal = u64::from(total_tokens).into(); + let epochs_py: Decimal = (epochs_per_year).into(); let locked_ratio = locked / total; let max_inflation = total * max_reward_rate / epochs_py; @@ -95,7 +104,7 @@ impl RewardsController { let delta_error = locked_ratio_last - locked_ratio; let control_val = p_gain * error - d_gain * delta_error; - let last_inflation_amount = Decimal::from(*last_inflation_amount); + let last_inflation_amount = Decimal::from(last_inflation_amount); let inflation = if last_inflation_amount + control_val > max_inflation { max_inflation } else if last_inflation_amount + control_val > dec!(0.0) { diff --git a/core/src/ledger/mod.rs b/core/src/ledger/mod.rs index c0bd1e45a97..43ed966699c 100644 --- a/core/src/ledger/mod.rs +++ b/core/src/ledger/mod.rs @@ -6,6 +6,7 @@ pub mod governance; pub mod ibc; pub mod inflation; pub mod parameters; +pub mod replay_protection; pub mod slash_fund; pub mod storage; pub mod storage_api; diff --git a/core/src/ledger/parameters/mod.rs b/core/src/ledger/parameters/mod.rs index e93f9d23e6a..bb9ae995774 100644 --- a/core/src/ledger/parameters/mod.rs +++ b/core/src/ledger/parameters/mod.rs @@ -5,12 +5,12 @@ use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use rust_decimal::Decimal; use thiserror::Error; -use super::storage::types::{decode, encode}; -use super::storage::{types, Storage}; +use super::storage::types; +use super::storage_api::{self, ResultExt, StorageRead, StorageWrite}; use crate::ledger::storage::{self as ledger_storage}; use crate::types::address::{Address, InternalAddress}; use crate::types::chain::ProposalBytes; -use crate::types::storage::Key; +use crate::types::hash::Hash; use crate::types::time::DurationSecs; use crate::types::token; @@ -40,8 +40,8 @@ pub struct Parameters { pub vp_whitelist: Vec, /// Whitelisted tx hashes (read only) pub tx_whitelist: Vec, - /// Implicit accounts validity predicate WASM code - pub implicit_vp: Vec, + /// Implicit accounts validity predicate WASM code hash + pub implicit_vp_code_hash: Hash, /// Expected number of epochs per year (read only) pub epochs_per_year: u64, /// PoS gain p (read only) @@ -103,10 +103,9 @@ pub enum WriteError { impl Parameters { /// Initialize parameters in storage in the genesis block. - pub fn init_storage(&self, storage: &mut Storage) + pub fn init_storage(&self, storage: &mut S) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let Self { epoch_duration, @@ -114,7 +113,7 @@ impl Parameters { max_proposal_bytes, vp_whitelist, tx_whitelist, - implicit_vp, + implicit_vp_code_hash, epochs_per_year, pos_gain_p, pos_gain_d, @@ -128,519 +127,362 @@ impl Parameters { // write max proposal bytes parameter let max_proposal_bytes_key = storage::get_max_proposal_bytes_key(); - let max_proposal_bytes_value = encode(&max_proposal_bytes); - storage - .write(&max_proposal_bytes_key, max_proposal_bytes_value) - .expect( - "Max proposal bytes parameter must be initialized in the \ - genesis block", - ); + storage.write(&max_proposal_bytes_key, max_proposal_bytes)?; // write epoch parameters let epoch_key = storage::get_epoch_duration_storage_key(); - let epoch_value = encode(epoch_duration); - storage.write(&epoch_key, epoch_value).expect( - "Epoch parameters must be initialized in the genesis block", - ); + storage.write(&epoch_key, epoch_duration)?; // write vp whitelist parameter let vp_whitelist_key = storage::get_vp_whitelist_storage_key(); - let vp_whitelist_value = encode( - &vp_whitelist - .iter() - .map(|id| id.to_lowercase()) - .collect::>(), - ); - storage.write(&vp_whitelist_key, vp_whitelist_value).expect( - "Vp whitelist parameter must be initialized in the genesis block", - ); + let vp_whitelist = vp_whitelist + .iter() + .map(|id| id.to_lowercase()) + .collect::>(); + storage.write(&vp_whitelist_key, vp_whitelist)?; // write tx whitelist parameter let tx_whitelist_key = storage::get_tx_whitelist_storage_key(); - let tx_whitelist_value = encode( - &tx_whitelist - .iter() - .map(|id| id.to_lowercase()) - .collect::>(), - ); - storage.write(&tx_whitelist_key, tx_whitelist_value).expect( - "Tx whitelist parameter must be initialized in the genesis block", - ); + let tx_whitelist = tx_whitelist + .iter() + .map(|id| id.to_lowercase()) + .collect::>(); + storage.write(&tx_whitelist_key, tx_whitelist)?; - // write tx whitelist parameter + // write max expected time per block let max_expected_time_per_block_key = storage::get_max_expected_time_per_block_key(); - let max_expected_time_per_block_value = - encode(&max_expected_time_per_block); - storage - .write( - &max_expected_time_per_block_key, - max_expected_time_per_block_value, - ) - .expect( - "Max expected time per block parameter must be initialized in \ - the genesis block", - ); + storage.write( + &max_expected_time_per_block_key, + max_expected_time_per_block, + )?; // write implicit vp parameter let implicit_vp_key = storage::get_implicit_vp_key(); - storage.write(&implicit_vp_key, implicit_vp).expect( - "Implicit VP parameter must be initialized in the genesis block", - ); + // Using `fn write_bytes` here, because implicit_vp code hash doesn't + // need to be encoded, it's bytes already. + storage.write_bytes(&implicit_vp_key, implicit_vp_code_hash)?; let epochs_per_year_key = storage::get_epochs_per_year_key(); - let epochs_per_year_value = encode(epochs_per_year); - storage - .write(&epochs_per_year_key, epochs_per_year_value) - .expect( - "Epochs per year parameter must be initialized in the genesis \ - block", - ); + storage.write(&epochs_per_year_key, epochs_per_year)?; let pos_gain_p_key = storage::get_pos_gain_p_key(); - let pos_gain_p_value = encode(pos_gain_p); - storage.write(&pos_gain_p_key, pos_gain_p_value).expect( - "PoS P-gain parameter must be initialized in the genesis block", - ); + storage.write(&pos_gain_p_key, pos_gain_p)?; let pos_gain_d_key = storage::get_pos_gain_d_key(); - let pos_gain_d_value = encode(pos_gain_d); - storage.write(&pos_gain_d_key, pos_gain_d_value).expect( - "PoS D-gain parameter must be initialized in the genesis block", - ); + storage.write(&pos_gain_d_key, pos_gain_d)?; let staked_ratio_key = storage::get_staked_ratio_key(); - let staked_ratio_val = encode(staked_ratio); - storage.write(&staked_ratio_key, staked_ratio_val).expect( - "PoS staked ratio parameter must be initialized in the genesis \ - block", - ); + storage.write(&staked_ratio_key, staked_ratio)?; let pos_inflation_key = storage::get_pos_inflation_amount_key(); - let pos_inflation_val = encode(pos_inflation_amount); - storage.write(&pos_inflation_key, pos_inflation_val).expect( - "PoS inflation rate parameter must be initialized in the genesis \ - block", - ); + storage.write(&pos_inflation_key, pos_inflation_amount)?; #[cfg(not(feature = "mainnet"))] if let Some(faucet_account) = faucet_account { let faucet_account_key = storage::get_faucet_account_key(); - let faucet_account_val = encode(faucet_account); - storage - .write(&faucet_account_key, faucet_account_val) - .expect( - "Faucet account parameter must be initialized in the \ - genesis block, if any", - ); + storage.write(&faucet_account_key, faucet_account)?; } #[cfg(not(feature = "mainnet"))] { let wrapper_tx_fees_key = storage::get_wrapper_tx_fees_key(); - let wrapper_tx_fees_val = - encode(&wrapper_tx_fees.unwrap_or(token::Amount::whole(100))); - storage - .write(&wrapper_tx_fees_key, wrapper_tx_fees_val) - .expect( - "Wrapper tx fees must be initialized in the genesis block", - ); + let wrapper_tx_fees = + wrapper_tx_fees.unwrap_or(token::Amount::whole(100)); + storage.write(&wrapper_tx_fees_key, wrapper_tx_fees)?; } + Ok(()) } } + /// Update the max_expected_time_per_block parameter in storage. Returns the /// parameters and gas cost. -pub fn update_max_expected_time_per_block_parameter( - storage: &mut Storage, +pub fn update_max_expected_time_per_block_parameter( + storage: &mut S, value: &DurationSecs, -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_max_expected_time_per_block_key(); - update(storage, value, key) + storage.write(&key, value) } /// Update the vp whitelist parameter in storage. Returns the parameters and gas /// cost. -pub fn update_vp_whitelist_parameter( - storage: &mut Storage, +pub fn update_vp_whitelist_parameter( + storage: &mut S, value: Vec, -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_vp_whitelist_storage_key(); - update( - storage, - &value + storage.write( + &key, + value .iter() .map(|id| id.to_lowercase()) .collect::>(), - key, ) } /// Update the tx whitelist parameter in storage. Returns the parameters and gas /// cost. -pub fn update_tx_whitelist_parameter( - storage: &mut Storage, +pub fn update_tx_whitelist_parameter( + storage: &mut S, value: Vec, -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_tx_whitelist_storage_key(); - update( - storage, - &value + storage.write( + &key, + value .iter() .map(|id| id.to_lowercase()) .collect::>(), - key, ) } /// Update the epoch parameter in storage. Returns the parameters and gas /// cost. -pub fn update_epoch_parameter( - storage: &mut Storage, +pub fn update_epoch_parameter( + storage: &mut S, value: &EpochDuration, -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_epoch_duration_storage_key(); - update(storage, value, key) + storage.write(&key, value) } /// Update the epochs_per_year parameter in storage. Returns the parameters and /// gas cost. -pub fn update_epochs_per_year_parameter( - storage: &mut Storage, +pub fn update_epochs_per_year_parameter( + storage: &mut S, value: &u64, -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_epochs_per_year_key(); - update(storage, value, key) + storage.write(&key, value) } /// Update the PoS P-gain parameter in storage. Returns the parameters and gas /// cost. -pub fn update_pos_gain_p_parameter( - storage: &mut Storage, +pub fn update_pos_gain_p_parameter( + storage: &mut S, value: &Decimal, -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_pos_gain_p_key(); - update(storage, value, key) + storage.write(&key, value) } /// Update the PoS D-gain parameter in storage. Returns the parameters and gas /// cost. -pub fn update_pos_gain_d_parameter( - storage: &mut Storage, +pub fn update_pos_gain_d_parameter( + storage: &mut S, value: &Decimal, -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_pos_gain_d_key(); - update(storage, value, key) + storage.write(&key, value) } /// Update the PoS staked ratio parameter in storage. Returns the parameters and /// gas cost. -pub fn update_staked_ratio_parameter( - storage: &mut Storage, +pub fn update_staked_ratio_parameter( + storage: &mut S, value: &Decimal, -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_staked_ratio_key(); - update(storage, value, key) + storage.write(&key, value) } /// Update the PoS inflation rate parameter in storage. Returns the parameters /// and gas cost. -pub fn update_pos_inflation_amount_parameter( - storage: &mut Storage, +pub fn update_pos_inflation_amount_parameter( + storage: &mut S, value: &u64, -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_pos_inflation_amount_key(); - update(storage, value, key) + storage.write(&key, value) } /// Update the implicit VP parameter in storage. Return the gas cost. -pub fn update_implicit_vp( - storage: &mut Storage, +pub fn update_implicit_vp( + storage: &mut S, implicit_vp: &[u8], -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_implicit_vp_key(); - // Not using `fn update` here, because implicit_vp doesn't need to be + // Using `fn write_bytes` here, because implicit_vp doesn't need to be // encoded, it's bytes already. - let (gas, _size_diff) = storage - .write(&key, implicit_vp) - .map_err(WriteError::StorageError)?; - Ok(gas) -} - -/// Update the parameters in storage. Returns the parameters and gas -/// cost. -pub fn update( - storage: &mut Storage, - value: &T, - key: Key, -) -> std::result::Result -where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, - T: BorshSerialize, -{ - let serialized_value = value - .try_to_vec() - .map_err(|e| WriteError::SerializeError(e.to_string()))?; - let (gas, _size_diff) = storage - .write(&key, serialized_value) - .map_err(WriteError::StorageError)?; - Ok(gas) + storage.write_bytes(&key, implicit_vp) } /// Read the the epoch duration parameter from store -pub fn read_epoch_duration_parameter( - storage: &Storage, -) -> std::result::Result<(EpochDuration, u64), ReadError> +pub fn read_epoch_duration_parameter( + storage: &S, +) -> storage_api::Result where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead, { // read epoch let epoch_key = storage::get_epoch_duration_storage_key(); - let (value, gas) = - storage.read(&epoch_key).map_err(ReadError::StorageError)?; - let epoch_duration: EpochDuration = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; - - Ok((epoch_duration, gas)) + let epoch_duration = storage.read(&epoch_key)?; + epoch_duration + .ok_or(ReadError::ParametersMissing) + .into_storage_result() } #[cfg(not(feature = "mainnet"))] /// Read the faucet account's address, if any -pub fn read_faucet_account_parameter( - storage: &Storage, -) -> std::result::Result<(Option
, u64), ReadError> +pub fn read_faucet_account_parameter( + storage: &S, +) -> storage_api::Result> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead, { let faucet_account_key = storage::get_faucet_account_key(); - let (value, gas_faucet_account) = storage - .read(&faucet_account_key) - .map_err(ReadError::StorageError)?; - let address: Option
= value - .map(|value| decode(value).map_err(ReadError::StorageTypeError)) - .transpose()?; - Ok((address, gas_faucet_account)) + storage.read(&faucet_account_key) } #[cfg(not(feature = "mainnet"))] /// Read the wrapper tx fees amount, if any -pub fn read_wrapper_tx_fees_parameter( - storage: &Storage, -) -> std::result::Result<(Option, u64), ReadError> +pub fn read_wrapper_tx_fees_parameter( + storage: &S, +) -> storage_api::Result> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead, { let wrapper_tx_fees_key = storage::get_wrapper_tx_fees_key(); - let (value, gas_wrapper_tx_fees) = storage - .read(&wrapper_tx_fees_key) - .map_err(ReadError::StorageError)?; - let fee: Option = value - .map(|value| decode(value).map_err(ReadError::StorageTypeError)) - .transpose()?; - Ok((fee, gas_wrapper_tx_fees)) + storage.read(&wrapper_tx_fees_key) } // Read the all the parameters from storage. Returns the parameters and gas /// cost. -pub fn read( - storage: &Storage, -) -> std::result::Result<(Parameters, u64), ReadError> +pub fn read(storage: &S) -> storage_api::Result where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead, { // read max proposal bytes - let (max_proposal_bytes, gas_proposal_bytes) = { + let max_proposal_bytes: ProposalBytes = { let key = storage::get_max_proposal_bytes_key(); - let (value, gas) = - storage.read(&key).map_err(ReadError::StorageError)?; - let value: ProposalBytes = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; - (value, gas) + let value = storage.read(&key)?; + value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()? }; // read epoch duration - let (epoch_duration, gas_epoch) = read_epoch_duration_parameter(storage) - .expect("Couldn't read epoch duration parameters"); + let epoch_duration = read_epoch_duration_parameter(storage)?; // read vp whitelist let vp_whitelist_key = storage::get_vp_whitelist_storage_key(); - let (value, gas_vp) = storage - .read(&vp_whitelist_key) - .map_err(ReadError::StorageError)?; - let vp_whitelist: Vec = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; + let value = storage.read(&vp_whitelist_key)?; + let vp_whitelist: Vec = value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()?; // read tx whitelist let tx_whitelist_key = storage::get_tx_whitelist_storage_key(); - let (value, gas_tx) = storage - .read(&tx_whitelist_key) - .map_err(ReadError::StorageError)?; - let tx_whitelist: Vec = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; + let value = storage.read(&tx_whitelist_key)?; + let tx_whitelist: Vec = value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()?; // read max expected block time let max_expected_time_per_block_key = storage::get_max_expected_time_per_block_key(); - let (value, gas_time) = storage - .read(&max_expected_time_per_block_key) - .map_err(ReadError::StorageError)?; - let max_expected_time_per_block: DurationSecs = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; + let value = storage.read(&max_expected_time_per_block_key)?; + let max_expected_time_per_block: DurationSecs = value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()?; let implicit_vp_key = storage::get_implicit_vp_key(); - let (value, gas_implicit_vp) = storage - .read(&implicit_vp_key) - .map_err(ReadError::StorageError)?; - let implicit_vp = value.ok_or(ReadError::ParametersMissing)?; + let value = storage + .read_bytes(&implicit_vp_key)? + .ok_or(ReadError::ParametersMissing) + .into_storage_result()?; + let implicit_vp_code_hash = + Hash::try_from(&value[..]).into_storage_result()?; // read epochs per year let epochs_per_year_key = storage::get_epochs_per_year_key(); - let (value, gas_epy) = storage - .read(&epochs_per_year_key) - .map_err(ReadError::StorageError)?; - let epochs_per_year: u64 = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; + let value = storage.read(&epochs_per_year_key)?; + let epochs_per_year: u64 = value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()?; // read PoS gain P let pos_gain_p_key = storage::get_pos_gain_p_key(); - let (value, gas_gain_p) = storage - .read(&pos_gain_p_key) - .map_err(ReadError::StorageError)?; - let pos_gain_p: Decimal = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; + let value = storage.read(&pos_gain_p_key)?; + let pos_gain_p: Decimal = value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()?; // read PoS gain D let pos_gain_d_key = storage::get_pos_gain_d_key(); - let (value, gas_gain_d) = storage - .read(&pos_gain_d_key) - .map_err(ReadError::StorageError)?; - let pos_gain_d: Decimal = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; + let value = storage.read(&pos_gain_d_key)?; + let pos_gain_d: Decimal = value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()?; // read staked ratio let staked_ratio_key = storage::get_staked_ratio_key(); - let (value, gas_staked) = storage - .read(&staked_ratio_key) - .map_err(ReadError::StorageError)?; - let staked_ratio: Decimal = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; + let value = storage.read(&staked_ratio_key)?; + let staked_ratio: Decimal = value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()?; // read PoS inflation rate let pos_inflation_key = storage::get_pos_inflation_amount_key(); - let (value, gas_reward) = storage - .read(&pos_inflation_key) - .map_err(ReadError::StorageError)?; - let pos_inflation_amount: u64 = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; + let value = storage.read(&pos_inflation_key)?; + let pos_inflation_amount: u64 = value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()?; // read faucet account #[cfg(not(feature = "mainnet"))] - let (faucet_account, gas_faucet_account) = - read_faucet_account_parameter(storage)?; - #[cfg(feature = "mainnet")] - let gas_faucet_account = 0; + let faucet_account = read_faucet_account_parameter(storage)?; // read faucet account #[cfg(not(feature = "mainnet"))] - let (wrapper_tx_fees, gas_wrapper_tx_fees) = - read_wrapper_tx_fees_parameter(storage)?; - #[cfg(feature = "mainnet")] - let gas_wrapper_tx_fees = 0; - - let total_gas_cost = [ - gas_epoch, - gas_tx, - gas_vp, - gas_time, - gas_implicit_vp, - gas_epy, - gas_gain_p, - gas_gain_d, - gas_staked, - gas_reward, - gas_proposal_bytes, - gas_faucet_account, - gas_wrapper_tx_fees, - ] - .into_iter() - .fold(0u64, |accum, gas| { - accum - .checked_add(gas) - .expect("u64 overflow occurred while doing gas arithmetic") - }); - - Ok(( - Parameters { - epoch_duration, - max_expected_time_per_block, - max_proposal_bytes, - vp_whitelist, - tx_whitelist, - implicit_vp, - epochs_per_year, - pos_gain_p, - pos_gain_d, - staked_ratio, - pos_inflation_amount, - #[cfg(not(feature = "mainnet"))] - faucet_account, - #[cfg(not(feature = "mainnet"))] - wrapper_tx_fees, - }, - total_gas_cost, - )) + let wrapper_tx_fees = read_wrapper_tx_fees_parameter(storage)?; + + Ok(Parameters { + epoch_duration, + max_expected_time_per_block, + max_proposal_bytes, + vp_whitelist, + tx_whitelist, + implicit_vp_code_hash, + epochs_per_year, + pos_gain_p, + pos_gain_d, + staked_ratio, + pos_inflation_amount, + #[cfg(not(feature = "mainnet"))] + faucet_account, + #[cfg(not(feature = "mainnet"))] + wrapper_tx_fees, + }) } diff --git a/core/src/ledger/replay_protection.rs b/core/src/ledger/replay_protection.rs new file mode 100644 index 00000000000..cee54ef06fd --- /dev/null +++ b/core/src/ledger/replay_protection.rs @@ -0,0 +1,21 @@ +//! Replay protection storage + +use crate::types::address::{Address, InternalAddress}; +use crate::types::hash::Hash; +use crate::types::storage::{DbKeySeg, Key, KeySeg}; + +/// Internal replay protection address +pub const ADDRESS: Address = + Address::Internal(InternalAddress::ReplayProtection); + +/// Check if a key is a replay protection key +pub fn is_tx_hash_key(key: &Key) -> bool { + matches!(&key.segments[0], DbKeySeg::AddressSeg(addr) if addr == &ADDRESS) +} + +/// Get the transaction hash key +pub fn get_tx_hash_key(hash: &Hash) -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&hash.to_string()) + .expect("Cannot obtain a valid db key") +} diff --git a/core/src/ledger/storage/masp_conversions.rs b/core/src/ledger/storage/masp_conversions.rs index accd472e3fa..acb88a9a4f2 100644 --- a/core/src/ledger/storage/masp_conversions.rs +++ b/core/src/ledger/storage/masp_conversions.rs @@ -31,8 +31,8 @@ fn calculate_masp_rewards( addr: &Address, ) -> crate::ledger::storage_api::Result<(u64, u64)> where - D: super::DB + for<'iter> super::DBIter<'iter>, - H: super::StorageHasher, + D: 'static + super::DB + for<'iter> super::DBIter<'iter>, + H: 'static + super::StorageHasher, { use rust_decimal::Decimal; @@ -99,7 +99,7 @@ where let ValsToUpdate { locked_ratio, inflation, - } = RewardsController::run(&controller); + } = RewardsController::run(controller); // inflation-per-token = inflation / locked tokens = n/100 // ∴ n = (inflation * 100) / locked tokens @@ -165,8 +165,8 @@ pub fn update_allowed_conversions( wl_storage: &mut super::WlStorage, ) -> crate::ledger::storage_api::Result<()> where - D: super::DB + for<'iter> super::DBIter<'iter>, - H: super::StorageHasher, + D: 'static + super::DB + for<'iter> super::DBIter<'iter>, + H: 'static + super::StorageHasher, { use std::cmp::Ordering; diff --git a/core/src/ledger/storage/merkle_tree.rs b/core/src/ledger/storage/merkle_tree.rs index dc65a125401..31fb40b1ebd 100644 --- a/core/src/ledger/storage/merkle_tree.rs +++ b/core/src/ledger/storage/merkle_tree.rs @@ -248,16 +248,38 @@ impl core::fmt::Debug for MerkleTree { impl MerkleTree { /// Restore the tree from the stores - pub fn new(stores: MerkleTreeStoresRead) -> Self { + pub fn new(stores: MerkleTreeStoresRead) -> Result { let base = Smt::new(stores.base.0.into(), stores.base.1); let account = Smt::new(stores.account.0.into(), stores.account.1); let ibc = Amt::new(stores.ibc.0.into(), stores.ibc.1); let pos = Smt::new(stores.pos.0.into(), stores.pos.1); - Self { + let tree = Self { base, account, ibc, pos, + }; + + // validate + let account_key = H::hash(StoreType::Account.to_string()); + let account_root = tree.base.get(&account_key.into())?; + let ibc_key = H::hash(StoreType::Ibc.to_string()); + let ibc_root = tree.base.get(&ibc_key.into())?; + let pos_key = H::hash(StoreType::PoS.to_string()); + let pos_root = tree.base.get(&pos_key.into())?; + if (tree.base.root().is_zero() + && tree.account.root().is_zero() + && tree.ibc.root().is_zero() + && tree.pos.root().is_zero()) + || (account_root == tree.account.root().into() + && ibc_root == tree.ibc.root().into() + && pos_root == tree.pos.root().into()) + { + Ok(tree) + } else { + Err(Error::MerkleTree( + "Invalid MerkleTreeStoresRead".to_string(), + )) } } @@ -592,6 +614,8 @@ impl From for crate::tendermint::merkle::proof::Proof { #[cfg(test)] mod test { + use ics23::HostFunctionsManager; + use super::*; use crate::ledger::storage::ics23_specs::{ibc_proof_specs, proof_specs}; use crate::ledger::storage::traits::Sha256Hasher; @@ -641,9 +665,11 @@ mod test { _ => unreachable!(), }; let subtree_root = if let Some(left) = &non_existence_proof.left { - ics23::calculate_existence_root(left).unwrap() + ics23::calculate_existence_root::(left) + .unwrap() } else if let Some(right) = &non_existence_proof.right { - ics23::calculate_existence_root(right).unwrap() + ics23::calculate_existence_root::(right) + .unwrap() } else { unreachable!() }; @@ -651,12 +677,13 @@ mod test { StoreType::sub_key(&ibc_non_key).expect("Test failed"); let specs = ibc_proof_specs::(); - let nep_verification_res = ics23::verify_non_membership( - &nep_commitment_proof, - &specs[0], - &subtree_root, - sub_key.to_string().as_bytes(), - ); + let nep_verification_res = + ics23::verify_non_membership::( + &nep_commitment_proof, + &specs[0], + &subtree_root, + sub_key.to_string().as_bytes(), + ); assert!(nep_verification_res); let basetree_ep_commitment_proof = nep.base_proof; let basetree_ics23_ep = @@ -664,15 +691,18 @@ mod test { Ics23Proof::Exist(ep) => ep, _ => unreachable!(), }; - let basetree_root = - ics23::calculate_existence_root(&basetree_ics23_ep).unwrap(); - let basetree_verification_res = ics23::verify_membership( - &basetree_ep_commitment_proof, - &specs[1], - &basetree_root, - store_type.to_string().as_bytes(), - &subtree_root, - ); + let basetree_root = ics23::calculate_existence_root::< + HostFunctionsManager, + >(&basetree_ics23_ep) + .unwrap(); + let basetree_verification_res = + ics23::verify_membership::( + &basetree_ep_commitment_proof, + &specs[1], + &basetree_root, + store_type.to_string().as_bytes(), + &subtree_root, + ); assert!(basetree_verification_res); } @@ -696,7 +726,8 @@ mod test { stores_read.set_root(st, stores_write.root(st).clone()); stores_read.set_store(stores_write.store(st).to_owned()); } - let restored_tree = MerkleTree::::new(stores_read); + let restored_tree = + MerkleTree::::new(stores_read).unwrap(); assert!(restored_tree.has_key(&ibc_key).unwrap()); assert!(restored_tree.has_key(&pos_key).unwrap()); } @@ -742,9 +773,11 @@ mod test { Ics23Proof::Exist(ep) => ep, _ => unreachable!(), }; - sub_root = - ics23::calculate_existence_root(&existence_proof).unwrap(); - assert!(ics23::verify_membership( + sub_root = ics23::calculate_existence_root::( + &existence_proof, + ) + .unwrap(); + assert!(ics23::verify_membership::( &commitment_proof, spec, &sub_root, @@ -799,9 +832,11 @@ mod test { Ics23Proof::Exist(ep) => ep, _ => unreachable!(), }; - sub_root = - ics23::calculate_existence_root(&existence_proof).unwrap(); - assert!(ics23::verify_membership( + sub_root = ics23::calculate_existence_root::( + &existence_proof, + ) + .unwrap(); + assert!(ics23::verify_membership::( &commitment_proof, spec, &sub_root, @@ -840,9 +875,11 @@ mod test { _ => unreachable!(), }; let subtree_root = if let Some(left) = &non_existence_proof.left { - ics23::calculate_existence_root(left).unwrap() + ics23::calculate_existence_root::(left) + .unwrap() } else if let Some(right) = &non_existence_proof.right { - ics23::calculate_existence_root(right).unwrap() + ics23::calculate_existence_root::(right) + .unwrap() } else { unreachable!() }; @@ -850,12 +887,13 @@ mod test { StoreType::sub_key(&ibc_non_key).expect("Test failed"); let specs = ibc_proof_specs::(); - let nep_verification_res = ics23::verify_non_membership( - &nep_commitment_proof, - &specs[0], - &subtree_root, - sub_key.to_string().as_bytes(), - ); + let nep_verification_res = + ics23::verify_non_membership::( + &nep_commitment_proof, + &specs[0], + &subtree_root, + sub_key.to_string().as_bytes(), + ); assert!(nep_verification_res); let basetree_ep_commitment_proof = nep.base_proof; let basetree_ics23_ep = @@ -863,15 +901,18 @@ mod test { Ics23Proof::Exist(ep) => ep, _ => unreachable!(), }; - let basetree_root = - ics23::calculate_existence_root(&basetree_ics23_ep).unwrap(); - let basetree_verification_res = ics23::verify_membership( - &basetree_ep_commitment_proof, - &specs[1], - &basetree_root, - store_type.to_string().as_bytes(), - &subtree_root, - ); + let basetree_root = ics23::calculate_existence_root::< + HostFunctionsManager, + >(&basetree_ics23_ep) + .unwrap(); + let basetree_verification_res = + ics23::verify_membership::( + &basetree_ep_commitment_proof, + &specs[1], + &basetree_root, + store_type.to_string().as_bytes(), + &subtree_root, + ); assert!(basetree_verification_res); } } diff --git a/core/src/ledger/storage/mockdb.rs b/core/src/ledger/storage/mockdb.rs index eb8ae04543a..16e28d27591 100644 --- a/core/src/ledger/storage/mockdb.rs +++ b/core/src/ledger/storage/mockdb.rs @@ -16,7 +16,8 @@ use crate::ledger::storage::types::{self, KVBytes, PrefixIterator}; #[cfg(feature = "ferveo-tpke")] use crate::types::internal::TxQueue; use crate::types::storage::{ - BlockHeight, BlockResults, Header, Key, KeySeg, KEY_SEGMENT_SEPARATOR, + BlockHeight, BlockResults, Epoch, Epochs, Header, Key, KeySeg, + KEY_SEGMENT_SEPARATOR, }; use crate::types::time::DateTimeUtc; @@ -172,7 +173,12 @@ impl DB for MockDB { } } - fn write_block(&mut self, state: BlockStateWrite) -> Result<()> { + fn write_block( + &mut self, + state: BlockStateWrite, + _batch: &mut Self::WriteBatch, + _is_full_commit: bool, + ) -> Result<()> { let BlockStateWrite { merkle_tree_stores, header, @@ -310,7 +316,7 @@ impl DB for MockDB { fn read_merkle_tree_stores( &self, height: BlockHeight, - ) -> Result> { + ) -> Result> { let mut merkle_tree_stores = MerkleTreeStoresRead::default(); let height_key = Key::from(height.to_db_key()); let tree_key = height_key @@ -342,7 +348,7 @@ impl DB for MockDB { None => return Ok(None), } } - Ok(Some(merkle_tree_stores)) + Ok(Some((height, merkle_tree_stores))) } fn read_subspace_val(&self, key: &Key) -> Result>> { @@ -437,6 +443,38 @@ impl DB for MockDB { None => 0, }) } + + fn prune_merkle_tree_stores( + &mut self, + _batch: &mut Self::WriteBatch, + epoch: Epoch, + pred_epochs: &Epochs, + ) -> Result<()> { + match pred_epochs.get_start_height_of_epoch(epoch) { + Some(height) => { + let prefix_key = Key::from(height.to_db_key()) + .push(&"tree".to_owned()) + .map_err(Error::KeyError)?; + for st in StoreType::iter() { + if *st != StoreType::Base { + let prefix_key = prefix_key + .push(&st.to_string()) + .map_err(Error::KeyError)?; + let root_key = prefix_key + .push(&"root".to_owned()) + .map_err(Error::KeyError)?; + self.0.borrow_mut().remove(&root_key.to_string()); + let store_key = prefix_key + .push(&"store".to_owned()) + .map_err(Error::KeyError)?; + self.0.borrow_mut().remove(&store_key.to_string()); + } + } + Ok(()) + } + None => Ok(()), + } + } } impl<'iter> DBIter<'iter> for MockDB { @@ -455,6 +493,16 @@ impl<'iter> DBIter<'iter> for MockDB { let iter = self.0.borrow().clone().into_iter(); MockPrefixIterator::new(MockIterator { prefix, iter }, db_prefix) } + + fn iter_old_diffs(&self, _height: BlockHeight) -> MockPrefixIterator { + // Mock DB can read only the latest value for now + unimplemented!() + } + + fn iter_new_diffs(&self, _height: BlockHeight) -> MockPrefixIterator { + // Mock DB can read only the latest value for now + unimplemented!() + } } /// A prefix iterator base for the [`MockPrefixIterator`]. @@ -508,21 +556,7 @@ impl Iterator for PrefixIterator { } } -impl DBWriteBatch for MockDBWriteBatch { - fn put(&mut self, _key: K, _value: V) - where - K: AsRef<[u8]>, - V: AsRef<[u8]>, - { - // Nothing to do - in MockDB, batch writes are committed directly from - // `batch_write_subspace_val` and `batch_delete_subspace_val`. - } - - fn delete>(&mut self, _key: K) { - // Nothing to do - in MockDB, batch writes are committed directly from - // `batch_write_subspace_val` and `batch_delete_subspace_val`. - } -} +impl DBWriteBatch for MockDBWriteBatch {} fn unknown_key_error(key: &str) -> Result<()> { Err(Error::UnknownKey { diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs index 71eade18b3a..e9bdf93de90 100644 --- a/core/src/ledger/storage/mod.rs +++ b/core/src/ledger/storage/mod.rs @@ -11,8 +11,8 @@ mod wl_storage; pub mod write_log; use core::fmt::Debug; +use std::cmp::Ordering; -use merkle_tree::StorageBytes; pub use merkle_tree::{ MembershipProof, MerkleTree, MerkleTreeStoresRead, MerkleTreeStoresWrite, StoreType, @@ -20,7 +20,7 @@ pub use merkle_tree::{ use thiserror::Error; pub use traits::{Sha256Hasher, StorageHasher}; pub use wl_storage::{ - iter_prefix_post, iter_prefix_pre, PrefixIter, WlStorage, + iter_prefix_post, iter_prefix_pre, PrefixIter, TempWlStorage, WlStorage, }; #[cfg(feature = "wasm-runtime")] @@ -37,6 +37,7 @@ use crate::types::address::{ masp, Address, EstablishedAddressGen, InternalAddress, }; use crate::types::chain::{ChainId, CHAIN_ID_LENGTH}; +use crate::types::hash::{Error as HashError, Hash}; // TODO #[cfg(feature = "ferveo-tpke")] use crate::types::internal::TxQueue; @@ -99,6 +100,8 @@ where /// Wrapper txs to be decrypted in the next block proposal #[cfg(feature = "ferveo-tpke")] pub tx_queue: TxQueue, + /// How many block heights in the past can the storage be queried + pub storage_read_past_height_limit: Option, } /// The block storage data @@ -145,6 +148,8 @@ pub enum Error { BorshCodingError(std::io::Error), #[error("Merkle tree at the height {height} is not stored")] NoMerkleTree { height: BlockHeight }, + #[error("Code hash error: {0}")] + InvalidCodeHash(HashError), } /// The block's state as stored in the database. @@ -218,8 +223,14 @@ pub trait DB: std::fmt::Debug { /// Read the last committed block's metadata fn read_last_block(&mut self) -> Result>; - /// Write block's metadata - fn write_block(&mut self, state: BlockStateWrite) -> Result<()>; + /// Write block's metadata. Merkle tree sub-stores are committed only when + /// `is_full_commit` is `true` (typically on a beginning of a new epoch). + fn write_block( + &mut self, + state: BlockStateWrite, + batch: &mut Self::WriteBatch, + is_full_commit: bool, + ) -> Result<()>; /// Read the block header with the given height from the DB fn read_block_header(&self, height: BlockHeight) -> Result>; @@ -228,7 +239,7 @@ pub trait DB: std::fmt::Debug { fn read_merkle_tree_stores( &self, height: BlockHeight, - ) -> Result>; + ) -> Result>; /// Read the latest value for account subspace key from the DB fn read_subspace_val(&self, key: &Key) -> Result>>; @@ -289,6 +300,14 @@ pub trait DB: std::fmt::Debug { height: BlockHeight, key: &Key, ) -> Result; + + /// Prune Merkle tree stores at the given epoch + fn prune_merkle_tree_stores( + &mut self, + batch: &mut Self::WriteBatch, + pruned_epoch: Epoch, + pred_epochs: &Epochs, + ) -> Result<()>; } /// A database prefix iterator. @@ -306,20 +325,16 @@ pub trait DBIter<'iter> { /// Read results subspace key value pairs from the DB fn iter_results(&'iter self) -> Self::PrefixIter; + + /// Read subspace old diffs at a given height + fn iter_old_diffs(&'iter self, height: BlockHeight) -> Self::PrefixIter; + + /// Read subspace new diffs at a given height + fn iter_new_diffs(&'iter self, height: BlockHeight) -> Self::PrefixIter; } /// Atomic batch write. -pub trait DBWriteBatch { - /// Insert a value into the database under the given key. - fn put(&mut self, key: K, value: V) - where - K: AsRef<[u8]>, - V: AsRef<[u8]>; - - /// Removes the database entry for key. Does nothing if the key was not - /// found. - fn delete>(&mut self, key: K); -} +pub trait DBWriteBatch {} impl Storage where @@ -332,6 +347,7 @@ where chain_id: ChainId, native_token: Address, cache: Option<&D::Cache>, + storage_read_past_height_limit: Option, ) -> Self { let block = BlockStorage { tree: MerkleTree::default(), @@ -359,6 +375,7 @@ where #[cfg(feature = "ferveo-tpke")] tx_queue: TxQueue::default(), native_token, + storage_read_past_height_limit, } } @@ -379,7 +396,6 @@ where tx_queue, }) = self.db.read_last_block()? { - self.block.tree = MerkleTree::new(merkle_tree_stores); self.block.hash = hash; self.block.height = height; self.block.epoch = epoch; @@ -390,7 +406,10 @@ where self.next_epoch_min_start_height = next_epoch_min_start_height; self.next_epoch_min_start_time = next_epoch_min_start_time; self.address_gen = address_gen; - if self.last_epoch.0 > 1 { + // Rebuild Merkle tree + self.block.tree = MerkleTree::new(merkle_tree_stores) + .or_else(|_| self.get_merkle_tree(height))?; + if self.last_epoch.0 > 0 { // The derived conversions will be placed in MASP address space let masp_addr = masp(); let key_prefix: Key = masp_addr.to_db_key().into(); @@ -429,7 +448,10 @@ where } /// Persist the current block's state to the database - pub fn commit_block(&mut self) -> Result<()> { + pub fn commit_block(&mut self, mut batch: D::WriteBatch) -> Result<()> { + // All states are written only when the first height or a new epoch + let is_full_commit = + self.block.height.0 == 1 || self.last_epoch != self.block.epoch; let state = BlockStateWrite { merkle_tree_stores: self.block.tree.stores(), header: self.header.as_ref(), @@ -444,11 +466,15 @@ where #[cfg(feature = "ferveo-tpke")] tx_queue: &self.tx_queue, }; - self.db.write_block(state)?; + self.db.write_block(state, &mut batch, is_full_commit)?; self.last_height = self.block.height; self.last_epoch = self.block.epoch; self.header = None; - Ok(()) + if is_full_commit { + // prune old merkle tree stores + self.prune_merkle_tree_stores(&mut batch)?; + } + self.db.exec_batch(batch) } /// Find the root hash of the merkle tree @@ -575,18 +601,25 @@ where Ok(()) } - /// Get a validity predicate for the given account address and the gas cost - /// for reading it. + /// Get the hash of a validity predicate for the given account address and + /// the gas cost for reading it. pub fn validity_predicate( &self, addr: &Address, - ) -> Result<(Option>, u64)> { + ) -> Result<(Option, u64)> { let key = if let Address::Implicit(_) = addr { parameters::storage::get_implicit_vp_key() } else { Key::validity_predicate(addr) }; - self.read(&key) + match self.read(&key)? { + (Some(value), gas) => { + let vp_code_hash = Hash::try_from(&value[..]) + .map_err(Error::InvalidCodeHash)?; + Ok((Some(vp_code_hash), gas)) + } + (None, gas) => Ok((None, gas)), + } } #[allow(dead_code)] @@ -611,43 +644,99 @@ where (self.block.hash.clone(), BLOCK_HASH_LENGTH as _) } + /// Get the Merkle tree with stores and diffs in the DB + /// Use `self.block.tree` if you want that of the current block height + pub fn get_merkle_tree( + &self, + height: BlockHeight, + ) -> Result> { + let (stored_height, stores) = self + .db + .read_merkle_tree_stores(height)? + .ok_or(Error::NoMerkleTree { height })?; + // Restore the tree state with diffs + let mut tree = MerkleTree::::new(stores).expect("invalid stores"); + let mut target_height = stored_height; + while target_height < height { + target_height = target_height.next_height(); + let mut old_diff_iter = self.db.iter_old_diffs(target_height); + let mut new_diff_iter = self.db.iter_new_diffs(target_height); + + let mut old_diff = old_diff_iter.next(); + let mut new_diff = new_diff_iter.next(); + loop { + match (&old_diff, &new_diff) { + (Some(old), Some(new)) => { + let old_key = Key::parse(old.0.clone()) + .expect("the key should be parsable"); + let new_key = Key::parse(new.0.clone()) + .expect("the key should be parsable"); + // compare keys as String + match old.0.cmp(&new.0) { + Ordering::Equal => { + // the value was updated + tree.update(&new_key, new.1.clone())?; + old_diff = old_diff_iter.next(); + new_diff = new_diff_iter.next(); + } + Ordering::Less => { + // the value was deleted + tree.delete(&old_key)?; + old_diff = old_diff_iter.next(); + } + Ordering::Greater => { + // the value was inserted + tree.update(&new_key, new.1.clone())?; + new_diff = new_diff_iter.next(); + } + } + } + (Some(old), None) => { + // the value was deleted + let key = Key::parse(old.0.clone()) + .expect("the key should be parsable"); + tree.delete(&key)?; + old_diff = old_diff_iter.next(); + } + (None, Some(new)) => { + // the value was inserted + let key = Key::parse(new.0.clone()) + .expect("the key should be parsable"); + tree.update(&key, new.1.clone())?; + new_diff = new_diff_iter.next(); + } + (None, None) => break, + } + } + } + Ok(tree) + } + /// Get the existence proof #[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] pub fn get_existence_proof( &self, key: &Key, - value: StorageBytes, + value: merkle_tree::StorageBytes, height: BlockHeight, ) -> Result { use std::array; - if height >= self.get_block_height().0 { - let MembershipProof::ICS23(proof) = self - .block - .tree + if height > self.last_height { + Err(Error::Temporary { + error: format!( + "The block at the height {} hasn't committed yet", + height, + ), + }) + } else { + let tree = self.get_merkle_tree(height)?; + let MembershipProof::ICS23(proof) = tree .get_sub_tree_existence_proof(array::from_ref(key), vec![value]) .map_err(Error::MerkleTreeError)?; - self.block - .tree - .get_sub_tree_proof(key, proof) + tree.get_sub_tree_proof(key, proof) .map(Into::into) .map_err(Error::MerkleTreeError) - } else { - match self.db.read_merkle_tree_stores(height)? { - Some(stores) => { - let tree = MerkleTree::::new(stores); - let MembershipProof::ICS23(proof) = tree - .get_sub_tree_existence_proof( - array::from_ref(key), - vec![value], - ) - .map_err(Error::MerkleTreeError)?; - tree.get_sub_tree_proof(key, proof) - .map(Into::into) - .map_err(Error::MerkleTreeError) - } - None => Err(Error::NoMerkleTree { height }), - } } } @@ -658,20 +747,18 @@ where key: &Key, height: BlockHeight, ) -> Result { - if height >= self.last_height { - self.block - .tree + if height > self.last_height { + Err(Error::Temporary { + error: format!( + "The block at the height {} hasn't committed yet", + height, + ), + }) + } else { + self.get_merkle_tree(height)? .get_non_existence_proof(key) .map(Into::into) .map_err(Error::MerkleTreeError) - } else { - match self.db.read_merkle_tree_stores(height)? { - Some(stores) => MerkleTree::::new(stores) - .get_non_existence_proof(key) - .map(Into::into) - .map_err(Error::MerkleTreeError), - None => Err(Error::NoMerkleTree { height }), - } } } @@ -721,60 +808,15 @@ where } } - /// Initialize a new epoch when the current epoch is finished. Returns - /// `true` on a new epoch. - pub fn update_epoch( - &mut self, - height: BlockHeight, - time: DateTimeUtc, - ) -> Result { - let (parameters, _gas) = - parameters::read(self).expect("Couldn't read protocol parameters"); - - match self.update_epoch_blocks_delay.as_mut() { - None => { - // Check if the new epoch minimum start height and start time - // have been fulfilled. If so, queue the next - // epoch to start two blocks into the future so - // as to align validator set updates + etc with - // tendermint. This is because tendermint has a two block delay - // to validator changes. - let current_epoch_duration_satisfied = height - >= self.next_epoch_min_start_height - && time >= self.next_epoch_min_start_time; - if current_epoch_duration_satisfied { - self.update_epoch_blocks_delay = - Some(EPOCH_SWITCH_BLOCKS_DELAY); - } - } - Some(blocks_until_switch) => { - *blocks_until_switch -= 1; - } - }; - let new_epoch = matches!(self.update_epoch_blocks_delay, Some(0)); - - if new_epoch { - // Reset the delay tracker - self.update_epoch_blocks_delay = None; + /// Get the timestamp of the last committed block, or the current timestamp + /// if no blocks have been produced yet + pub fn get_last_block_timestamp(&self) -> Result { + let last_block_height = self.get_block_height().0; - // Begin a new epoch - self.block.epoch = self.block.epoch.next(); - let EpochDuration { - min_num_of_blocks, - min_duration, - } = parameters.epoch_duration; - self.next_epoch_min_start_height = height + min_num_of_blocks; - self.next_epoch_min_start_time = time + min_duration; - // TODO put this into PoS parameters and pass it to tendermint - // `consensus_params` on `InitChain` and `EndBlock` - let evidence_max_age_num_blocks: u64 = 100000; - self.block - .pred_epochs - .new_epoch(height, evidence_max_age_num_blocks); - tracing::info!("Began a new epoch {}", self.block.epoch); - } - self.update_epoch_in_merkle_tree()?; - Ok(new_epoch) + Ok(self + .db + .read_block_header(last_block_height)? + .map_or_else(DateTimeUtc::now, |header| header.time)) } /// Get the current conversions @@ -848,6 +890,38 @@ where self.db .batch_delete_subspace_val(batch, self.block.height, key) } + + // Prune merkle tree stores. Use after updating self.block.height in the + // commit. + fn prune_merkle_tree_stores( + &mut self, + batch: &mut D::WriteBatch, + ) -> Result<()> { + if let Some(limit) = self.storage_read_past_height_limit { + if self.last_height.0 <= limit { + return Ok(()); + } + + let min_height = (self.last_height.0 - limit).into(); + if let Some(epoch) = self.block.pred_epochs.get_epoch(min_height) { + if epoch.0 == 0 { + return Ok(()); + } else { + // get the start height of the previous epoch because the + // Merkle tree stores at the starting + // height of the epoch would be used + // to restore stores at a height (> min_height) in the epoch + self.db.prune_merkle_tree_stores( + batch, + epoch.prev(), + &self.block.pred_epochs, + )?; + } + } + } + + Ok(()) + } } impl From for Error { @@ -910,6 +984,7 @@ pub mod testing { #[cfg(feature = "ferveo-tpke")] tx_queue: TxQueue::default(), native_token: address::nam(), + storage_read_past_height_limit: Some(1000), } } } @@ -1036,17 +1111,17 @@ mod tests { min_blocks_delta, min_duration_delta, max_time_per_block_delta) in arb_and_epoch_duration_start_and_block()) { - let storage = TestStorage { - next_epoch_min_start_height: - start_height + epoch_duration.min_num_of_blocks, - next_epoch_min_start_time: - start_time + epoch_duration.min_duration, + let mut wl_storage = + TestWlStorage { + storage: TestStorage { + next_epoch_min_start_height: + start_height + epoch_duration.min_num_of_blocks, + next_epoch_min_start_time: + start_time + epoch_duration.min_duration, + ..Default::default() + }, ..Default::default() }; - let mut wl_storage = TestWlStorage { - write_log: Default::default(), - storage, - }; let mut parameters = Parameters { max_proposal_bytes: Default::default(), @@ -1054,7 +1129,7 @@ mod tests { max_expected_time_per_block: Duration::seconds(max_expected_time_per_block).into(), vp_whitelist: vec![], tx_whitelist: vec![], - implicit_vp: vec![], + implicit_vp_code_hash: Hash::zero(), epochs_per_year: 100, pos_gain_p: dec!(0.1), pos_gain_d: dec!(0.1), @@ -1065,14 +1140,14 @@ mod tests { #[cfg(not(feature = "mainnet"))] wrapper_tx_fees: None, }; - parameters.init_storage(&mut wl_storage.storage); + parameters.init_storage(&mut wl_storage).unwrap(); wl_storage.initalize_tokens(token::Amount::from(1000), token::Amount::from(500)); let epoch_before = wl_storage.storage.last_epoch; assert_eq!(epoch_before, wl_storage.storage.block.epoch); // Try to apply the epoch update - wl_storage.storage.update_epoch(block_height, block_time).unwrap(); + wl_storage.update_epoch(block_height, block_time).unwrap(); // Test for 1. if block_height.0 - start_height.0 @@ -1089,13 +1164,13 @@ mod tests { let block_height = block_height + 1; let block_time = block_time + Duration::seconds(1); - wl_storage.storage.update_epoch(block_height, block_time).unwrap(); + wl_storage.update_epoch(block_height, block_time).unwrap(); assert_eq!(wl_storage.storage.block.epoch, epoch_before); assert_eq!(wl_storage.storage.update_epoch_blocks_delay, Some(1)); let block_height = block_height + 1; let block_time = block_time + Duration::seconds(1); - wl_storage.storage.update_epoch(block_height, block_time).unwrap(); + wl_storage.update_epoch(block_height, block_time).unwrap(); assert_eq!(wl_storage.storage.block.epoch, epoch_before.next()); assert!(wl_storage.storage.update_epoch_blocks_delay.is_none()); @@ -1130,14 +1205,8 @@ mod tests { Duration::seconds(min_duration + min_duration_delta).into(); parameters.max_expected_time_per_block = Duration::seconds(max_expected_time_per_block + max_time_per_block_delta).into(); - parameters::update_max_expected_time_per_block_parameter( - &mut wl_storage.storage, - ¶meters.max_expected_time_per_block - ).unwrap(); - parameters::update_epoch_parameter( - &mut wl_storage.storage, - ¶meters.epoch_duration - ).unwrap(); + parameters::update_max_expected_time_per_block_parameter(&mut wl_storage, ¶meters.max_expected_time_per_block).unwrap(); + parameters::update_epoch_parameter(&mut wl_storage, ¶meters.epoch_duration).unwrap(); // Test for 2. let epoch_before = wl_storage.storage.block.epoch; @@ -1149,31 +1218,31 @@ mod tests { // No update should happen before both epoch duration conditions are // satisfied - wl_storage.storage.update_epoch(height_before_update, time_before_update).unwrap(); + wl_storage.update_epoch(height_before_update, time_before_update).unwrap(); assert_eq!(wl_storage.storage.block.epoch, epoch_before); assert!(wl_storage.storage.update_epoch_blocks_delay.is_none()); - wl_storage.storage.update_epoch(height_of_update, time_before_update).unwrap(); + wl_storage.update_epoch(height_of_update, time_before_update).unwrap(); assert_eq!(wl_storage.storage.block.epoch, epoch_before); assert!(wl_storage.storage.update_epoch_blocks_delay.is_none()); - wl_storage.storage.update_epoch(height_before_update, time_of_update).unwrap(); + wl_storage.update_epoch(height_before_update, time_of_update).unwrap(); assert_eq!(wl_storage.storage.block.epoch, epoch_before); assert!(wl_storage.storage.update_epoch_blocks_delay.is_none()); // Update should be enqueued for 2 blocks in the future starting at or after this height and time - wl_storage.storage.update_epoch(height_of_update, time_of_update).unwrap(); + wl_storage.update_epoch(height_of_update, time_of_update).unwrap(); assert_eq!(wl_storage.storage.block.epoch, epoch_before); assert_eq!(wl_storage.storage.update_epoch_blocks_delay, Some(2)); // Increment the block height and time to simulate new blocks now let height_of_update = height_of_update + 1; let time_of_update = time_of_update + Duration::seconds(1); - wl_storage.storage.update_epoch(height_of_update, time_of_update).unwrap(); + wl_storage.update_epoch(height_of_update, time_of_update).unwrap(); assert_eq!(wl_storage.storage.block.epoch, epoch_before); assert_eq!(wl_storage.storage.update_epoch_blocks_delay, Some(1)); let height_of_update = height_of_update + 1; let time_of_update = time_of_update + Duration::seconds(1); - wl_storage.storage.update_epoch(height_of_update, time_of_update).unwrap(); + wl_storage.update_epoch(height_of_update, time_of_update).unwrap(); assert_eq!(wl_storage.storage.block.epoch, epoch_before.next()); assert!(wl_storage.storage.update_epoch_blocks_delay.is_none()); // The next epoch's minimum duration should change @@ -1185,8 +1254,9 @@ mod tests { // Increment the block height and time once more to make sure things reset let height_of_update = height_of_update + 1; let time_of_update = time_of_update + Duration::seconds(1); - wl_storage.storage.update_epoch(height_of_update, time_of_update).unwrap(); + wl_storage.update_epoch(height_of_update, time_of_update).unwrap(); assert_eq!(wl_storage.storage.block.epoch, epoch_before.next()); + assert!(wl_storage.storage.update_epoch_blocks_delay.is_none()); } } } diff --git a/core/src/ledger/storage/traits.rs b/core/src/ledger/storage/traits.rs index dc5c18a4a3b..232192fef1f 100644 --- a/core/src/ledger/storage/traits.rs +++ b/core/src/ledger/storage/traits.rs @@ -3,7 +3,7 @@ use std::convert::TryInto; use std::fmt; -use arse_merkle_tree::traits::{Hasher, Value}; +use arse_merkle_tree::traits::Hasher; use arse_merkle_tree::{Key as TreeKey, H256}; use ics23::commitment_proof::Proof as Ics23Proof; use ics23::{CommitmentProof, ExistenceProof}; @@ -155,7 +155,7 @@ impl<'a, H: StorageHasher + Default> SubTreeWrite for &'a mut Amt { } /// The storage hasher used for the merkle tree. -pub trait StorageHasher: Hasher + Default { +pub trait StorageHasher: Hasher + fmt::Debug + Default { /// Hash the value to store fn hash(value: impl AsRef<[u8]>) -> H256; } diff --git a/core/src/ledger/storage/wl_storage.rs b/core/src/ledger/storage/wl_storage.rs index 1735083174a..82bb799f39e 100644 --- a/core/src/ledger/storage/wl_storage.rs +++ b/core/src/ledger/storage/wl_storage.rs @@ -2,12 +2,15 @@ use std::iter::Peekable; +use super::EPOCH_SWITCH_BLOCKS_DELAY; +use crate::ledger::parameters::EpochDuration; use crate::ledger::storage::write_log::{self, WriteLog}; use crate::ledger::storage::{DBIter, Storage, StorageHasher, DB}; use crate::ledger::storage_api::{ResultExt, StorageRead, StorageWrite}; -use crate::ledger::{gas, storage_api}; +use crate::ledger::{gas, parameters, storage_api}; use crate::types::address::Address; -use crate::types::storage; +use crate::types::storage::{self, BlockHeight}; +use crate::types::time::DateTimeUtc; /// Storage with write log that allows to implement prefix iterator that works /// with changes not yet committed to the DB. @@ -23,27 +26,107 @@ where pub storage: Storage, } +/// Temporary storage that can be used for changes that will never be committed +/// to the DB. This is useful for the shell `PrepareProposal` and +/// `ProcessProposal` handlers that should not change state, but need to apply +/// storage changes for replay protection to validate the proposal. +#[derive(Debug)] +pub struct TempWlStorage<'a, D, H> +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// Write log + pub write_log: WriteLog, + /// Storage provides access to DB + pub storage: &'a Storage, +} + +impl<'a, D, H> TempWlStorage<'a, D, H> +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// Create a temp storage that can mutated in memory, but never committed to + /// DB. + pub fn new(storage: &'a Storage) -> Self { + Self { + write_log: WriteLog::default(), + storage, + } + } +} + +/// Common trait for [`WlStorage`] and [`TempWlStorage`], used to implement +/// storage_api traits. +trait WriteLogAndStorage { + // DB type + type D: DB + for<'iter> DBIter<'iter>; + // DB hasher type + type H: StorageHasher; + + /// Borrow `WriteLog` + fn write_log(&self) -> &WriteLog; + + /// Borrow mutable `WriteLog` + fn write_log_mut(&mut self) -> &mut WriteLog; + + /// Borrow `Storage` + fn storage(&self) -> &Storage; +} + +impl WriteLogAndStorage for WlStorage +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + type D = D; + type H = H; + + fn write_log(&self) -> &WriteLog { + &self.write_log + } + + fn write_log_mut(&mut self) -> &mut WriteLog { + &mut self.write_log + } + + fn storage(&self) -> &Storage { + &self.storage + } +} + +impl WriteLogAndStorage for TempWlStorage<'_, D, H> +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + type D = D; + type H = H; + + fn write_log(&self) -> &WriteLog { + &self.write_log + } + + fn write_log_mut(&mut self) -> &mut WriteLog { + &mut self.write_log + } + + fn storage(&self) -> &Storage { + self.storage + } +} + impl WlStorage where D: 'static + DB + for<'iter> DBIter<'iter>, - H: StorageHasher, + H: 'static + StorageHasher, { /// Combine storage with write-log pub fn new(write_log: WriteLog, storage: Storage) -> Self { Self { write_log, storage } } - /// Commit the genesis state to DB. This should only be used before any - /// blocks are produced. - pub fn commit_genesis(&mut self) -> storage_api::Result<()> { - // Because the `impl StorageWrite for WlStorage` writes into block-level - // write log, we just commit the `block_write_log`, but without - // committing an actual block in storage - self.write_log - .commit_block(&mut self.storage) - .into_storage_result() - } - /// Commit the current transaction's write log to the block when it's /// accepted by all the triggered validity predicates. Starts a new /// transaction write log. @@ -60,10 +143,70 @@ where /// Commit the current block's write log to the storage and commit the block /// to DB. Starts a new block write log. pub fn commit_block(&mut self) -> storage_api::Result<()> { + let mut batch = D::batch(); self.write_log - .commit_block(&mut self.storage) + .commit_block(&mut self.storage, &mut batch) .into_storage_result()?; - self.storage.commit_block().into_storage_result() + self.storage.commit_block(batch).into_storage_result() + } + + /// Initialize a new epoch when the current epoch is finished. Returns + /// `true` on a new epoch. + pub fn update_epoch( + &mut self, + height: BlockHeight, + time: DateTimeUtc, + ) -> crate::ledger::storage::Result { + let parameters = + parameters::read(self).expect("Couldn't read protocol parameters"); + + match self.storage.update_epoch_blocks_delay.as_mut() { + None => { + // Check if the new epoch minimum start height and start time + // have been fulfilled. If so, queue the next + // epoch to start two blocks into the future so + // as to align validator set updates + etc with + // tendermint. This is because tendermint has a two block delay + // to validator changes. + let current_epoch_duration_satisfied = height + >= self.storage.next_epoch_min_start_height + && time >= self.storage.next_epoch_min_start_time; + if current_epoch_duration_satisfied { + self.storage.update_epoch_blocks_delay = + Some(EPOCH_SWITCH_BLOCKS_DELAY); + } + } + Some(blocks_until_switch) => { + *blocks_until_switch -= 1; + } + }; + let new_epoch = + matches!(self.storage.update_epoch_blocks_delay, Some(0)); + + if new_epoch { + // Reset the delay tracker + self.storage.update_epoch_blocks_delay = None; + + // Begin a new epoch + self.storage.block.epoch = self.storage.block.epoch.next(); + let EpochDuration { + min_num_of_blocks, + min_duration, + } = parameters.epoch_duration; + self.storage.next_epoch_min_start_height = + height + min_num_of_blocks; + self.storage.next_epoch_min_start_time = time + min_duration; + // TODO put this into PoS parameters and pass it to tendermint + // `consensus_params` on `InitChain` and `EndBlock` + let evidence_max_age_num_blocks: u64 = 100000; + self.storage + .block + .pred_epochs + .new_epoch(height, evidence_max_age_num_blocks); + tracing::info!("Began a new epoch {}", self.storage.block.epoch); + } + self.storage.update_epoch_in_merkle_tree()?; + Ok(new_epoch) } } @@ -156,10 +299,9 @@ where what = Next::ReturnStorage; } (Some((storage_key, _, _)), Some((wl_key, _))) => { - let wl_key = wl_key.to_string(); - if &wl_key <= storage_key { + if wl_key <= storage_key { what = Next::ReturnWl { - advance_storage: &wl_key == storage_key, + advance_storage: wl_key == storage_key, }; } else { what = Next::ReturnStorage; @@ -183,10 +325,10 @@ where return Some((key, value, gas)); } write_log::StorageModification::InitAccount { - vp, + vp_code_hash, } => { - let gas = vp.len() as u64; - return Some((key, vp, gas)); + let gas = vp_code_hash.len() as u64; + return Some((key, vp_code_hash.to_vec(), gas)); } write_log::StorageModification::Delete => { continue; @@ -204,10 +346,11 @@ where } } -impl StorageRead for WlStorage +impl StorageRead for T where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, + T: WriteLogAndStorage, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, { type PrefixIter<'iter> = PrefixIter<'iter, D> where Self: 'iter; @@ -216,29 +359,31 @@ where key: &storage::Key, ) -> storage_api::Result>> { // try to read from the write log first - let (log_val, _gas) = self.write_log.read(key); + let (log_val, _gas) = self.write_log().read(key); match log_val { - Some(&write_log::StorageModification::Write { ref value }) => { + Some(write_log::StorageModification::Write { ref value }) => { Ok(Some(value.clone())) } - Some(&write_log::StorageModification::Delete) => Ok(None), - Some(&write_log::StorageModification::InitAccount { - ref vp, - .. - }) => Ok(Some(vp.clone())), - Some(&write_log::StorageModification::Temp { ref value }) => { + Some(write_log::StorageModification::Delete) => Ok(None), + Some(write_log::StorageModification::InitAccount { + ref vp_code_hash, + }) => Ok(Some(vp_code_hash.to_vec())), + Some(write_log::StorageModification::Temp { ref value }) => { Ok(Some(value.clone())) } None => { // when not found in write log, try to read from the storage - self.storage.db.read_subspace_val(key).into_storage_result() + self.storage() + .db + .read_subspace_val(key) + .into_storage_result() } } } fn has_key(&self, key: &storage::Key) -> storage_api::Result { // try to read from the write log first - let (log_val, _gas) = self.write_log.read(key); + let (log_val, _gas) = self.write_log().read(key); match log_val { Some(&write_log::StorageModification::Write { .. }) | Some(&write_log::StorageModification::InitAccount { .. }) @@ -249,7 +394,7 @@ where } None => { // when not found in write log, try to check the storage - self.storage.block.tree.has_key(key).into_storage_result() + self.storage().block.tree.has_key(key).into_storage_result() } } } @@ -259,7 +404,7 @@ where prefix: &storage::Key, ) -> storage_api::Result> { let (iter, _gas) = - iter_prefix_post(&self.write_log, &self.storage, prefix); + iter_prefix_post(self.write_log(), self.storage(), prefix); Ok(iter) } @@ -271,40 +416,51 @@ where } fn get_chain_id(&self) -> std::result::Result { - Ok(self.storage.chain_id.to_string()) + Ok(self.storage().chain_id.to_string()) } fn get_block_height( &self, ) -> std::result::Result { - Ok(self.storage.block.height) + Ok(self.storage().block.height) + } + + fn get_block_header( + &self, + height: storage::BlockHeight, + ) -> std::result::Result, storage_api::Error> { + self.storage() + .db + .read_block_header(height) + .into_storage_result() } fn get_block_hash( &self, ) -> std::result::Result { - Ok(self.storage.block.hash.clone()) + Ok(self.storage().block.hash.clone()) } fn get_block_epoch( &self, ) -> std::result::Result { - Ok(self.storage.block.epoch) + Ok(self.storage().block.epoch) } fn get_tx_index( &self, ) -> std::result::Result { - Ok(self.storage.tx_index) + Ok(self.storage().tx_index) } fn get_native_token(&self) -> storage_api::Result
{ - Ok(self.storage.native_token.clone()) + Ok(self.storage().native_token.clone()) } } -impl StorageWrite for WlStorage +impl StorageWrite for T where + T: WriteLogAndStorage, D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, { @@ -313,13 +469,19 @@ where key: &storage::Key, val: impl AsRef<[u8]>, ) -> storage_api::Result<()> { - self.write_log + let _ = self + .write_log_mut() .protocol_write(key, val.as_ref().to_vec()) - .into_storage_result() + .into_storage_result(); + Ok(()) } fn delete(&mut self, key: &storage::Key) -> storage_api::Result<()> { - self.write_log.protocol_delete(key).into_storage_result() + let _ = self + .write_log_mut() + .protocol_delete(key) + .into_storage_result(); + Ok(()) } } diff --git a/core/src/ledger/storage/write_log.rs b/core/src/ledger/storage/write_log.rs index 739a5102e16..bf4881aab0f 100644 --- a/core/src/ledger/storage/write_log.rs +++ b/core/src/ledger/storage/write_log.rs @@ -9,6 +9,7 @@ use thiserror::Error; use crate::ledger; use crate::ledger::storage::{Storage, StorageHasher}; use crate::types::address::{Address, EstablishedAddressGen}; +use crate::types::hash::Hash; use crate::types::ibc::IbcEvent; use crate::types::storage; @@ -44,11 +45,11 @@ pub enum StorageModification { /// Delete an existing key-value Delete, /// Initialize a new account with established address and a given validity - /// predicate. The key for `InitAccount` inside the [`WriteLog`] must point - /// to its validity predicate. + /// predicate hash. The key for `InitAccount` inside the [`WriteLog`] must + /// point to its validity predicate. InitAccount { - /// Validity predicate bytes - vp: Vec, + /// Validity predicate hash bytes + vp_code_hash: Hash, }, /// Temporary value. This value will be never written to the storage. After /// writing a temporary value, it can't be mutated with normal write. @@ -68,8 +69,8 @@ pub struct WriteLog { block_write_log: HashMap, /// The storage modifications for the current transaction tx_write_log: HashMap, - /// The IBC event for the current transaction - ibc_event: Option, + /// The IBC events for the current transaction + ibc_events: BTreeSet, } /// Write log prefix iterator @@ -94,7 +95,7 @@ impl Default for WriteLog { address_gen: None, block_write_log: HashMap::with_capacity(100_000), tx_write_log: HashMap::with_capacity(100), - ibc_event: None, + ibc_events: BTreeSet::new(), } } } @@ -117,8 +118,8 @@ impl WriteLog { key.len() + value.len() } StorageModification::Delete => key.len(), - StorageModification::InitAccount { ref vp } => { - key.len() + vp.len() + StorageModification::InitAccount { ref vp_code_hash } => { + key.len() + vp_code_hash.len() } StorageModification::Temp { ref value } => { key.len() + value.len() @@ -145,8 +146,8 @@ impl WriteLog { key.len() + value.len() } StorageModification::Delete => key.len(), - StorageModification::InitAccount { ref vp } => { - key.len() + vp.len() + StorageModification::InitAccount { ref vp_code_hash } => { + key.len() + vp_code_hash.len() } StorageModification::Temp { ref value } => { key.len() + value.len() @@ -313,7 +314,7 @@ impl WriteLog { pub fn init_account( &mut self, storage_address_gen: &EstablishedAddressGen, - vp: Vec, + vp_code_hash: Hash, ) -> (Address, u64) { // If we've previously generated a new account, we use the local copy of // the generator. Otherwise, we create a new copy from the storage @@ -322,19 +323,19 @@ impl WriteLog { let addr = address_gen.generate_address("TODO more randomness".as_bytes()); let key = storage::Key::validity_predicate(&addr); - let gas = (key.len() + vp.len()) as _; + let gas = (key.len() + vp_code_hash.len()) as _; self.tx_write_log - .insert(key, StorageModification::InitAccount { vp }); + .insert(key, StorageModification::InitAccount { vp_code_hash }); (addr, gas) } /// Set an IBC event and return the gas cost. - pub fn set_ibc_event(&mut self, event: IbcEvent) -> u64 { + pub fn emit_ibc_event(&mut self, event: IbcEvent) -> u64 { let len = event .attributes .iter() .fold(0, |acc, (k, v)| acc + k.len() + v.len()); - self.ibc_event = Some(event); + self.ibc_events.insert(event); len as _ } @@ -381,13 +382,13 @@ impl WriteLog { } /// Take the IBC event of the current transaction - pub fn take_ibc_event(&mut self) -> Option { - self.ibc_event.take() + pub fn take_ibc_events(&mut self) -> BTreeSet { + std::mem::take(&mut self.ibc_events) } /// Get the IBC event of the current transaction - pub fn get_ibc_event(&self) -> Option<&IbcEvent> { - self.ibc_event.as_ref() + pub fn get_ibc_events(&self) -> &BTreeSet { + &self.ibc_events } /// Commit the current transaction's write log to the block when it's @@ -402,7 +403,7 @@ impl WriteLog { HashMap::with_capacity(100), ); self.block_write_log.extend(tx_write_log); - self.take_ibc_event(); + self.take_ibc_events(); } /// Drop the current transaction's write log when it's declined by any of @@ -416,6 +417,7 @@ impl WriteLog { pub fn commit_block( &mut self, storage: &mut Storage, + batch: &mut DB::WriteBatch, ) -> Result<()> where DB: 'static @@ -423,33 +425,31 @@ impl WriteLog { + for<'iter> ledger::storage::DBIter<'iter>, H: StorageHasher, { - let mut batch = Storage::::batch(); for (key, entry) in self.block_write_log.iter() { match entry { StorageModification::Write { value } => { storage - .batch_write_subspace_val( - &mut batch, - key, - value.clone(), - ) + .batch_write_subspace_val(batch, key, value.clone()) .map_err(Error::StorageError)?; } StorageModification::Delete => { storage - .batch_delete_subspace_val(&mut batch, key) + .batch_delete_subspace_val(batch, key) .map_err(Error::StorageError)?; } - StorageModification::InitAccount { vp } => { + StorageModification::InitAccount { vp_code_hash } => { storage - .batch_write_subspace_val(&mut batch, key, vp.clone()) + .batch_write_subspace_val( + batch, + key, + vp_code_hash.clone(), + ) .map_err(Error::StorageError)?; } // temporary value isn't persisted StorageModification::Temp { .. } => {} } } - storage.exec_batch(batch).map_err(Error::StorageError)?; if let Some(address_gen) = self.address_gen.take() { storage.address_gen = address_gen } @@ -535,6 +535,7 @@ mod tests { use proptest::prelude::*; use super::*; + use crate::types::hash::Hash; use crate::types::{address, storage}; #[test] @@ -607,17 +608,20 @@ mod tests { // init let init_vp = "initialized".as_bytes().to_vec(); - let (addr, gas) = write_log.init_account(&address_gen, init_vp.clone()); + let vp_hash = Hash::sha256(init_vp); + let (addr, gas) = write_log.init_account(&address_gen, vp_hash.clone()); let vp_key = storage::Key::validity_predicate(&addr); - assert_eq!(gas, (vp_key.len() + init_vp.len()) as u64); + assert_eq!(gas, (vp_key.len() + vp_hash.len()) as u64); // read let (value, gas) = write_log.read(&vp_key); match value.expect("no read value") { - StorageModification::InitAccount { vp } => assert_eq!(*vp, init_vp), + StorageModification::InitAccount { vp_code_hash } => { + assert_eq!(*vp_code_hash, vp_hash) + } _ => panic!("unexpected result"), } - assert_eq!(gas, (vp_key.len() + init_vp.len()) as u64); + assert_eq!(gas, (vp_key.len() + vp_hash.len()) as u64); // get all let (_changed_keys, init_accounts) = write_log.get_partitioned_keys(); @@ -631,12 +635,16 @@ mod tests { let address_gen = EstablishedAddressGen::new("test"); let init_vp = "initialized".as_bytes().to_vec(); - let (addr, _) = write_log.init_account(&address_gen, init_vp); + let vp_hash = Hash::sha256(init_vp); + let (addr, _) = write_log.init_account(&address_gen, vp_hash); let vp_key = storage::Key::validity_predicate(&addr); // update should fail let updated_vp = "updated".as_bytes().to_vec(); - let result = write_log.write(&vp_key, updated_vp).unwrap_err(); + let updated_vp_hash = Hash::sha256(updated_vp); + let result = write_log + .write(&vp_key, updated_vp_hash.to_vec()) + .unwrap_err(); assert_matches!(result, Error::UpdateVpOfNewAccount); } @@ -646,7 +654,8 @@ mod tests { let address_gen = EstablishedAddressGen::new("test"); let init_vp = "initialized".as_bytes().to_vec(); - let (addr, _) = write_log.init_account(&address_gen, init_vp); + let vp_hash = Hash::sha256(init_vp); + let (addr, _) = write_log.init_account(&address_gen, vp_hash); let vp_key = storage::Key::validity_predicate(&addr); // delete should fail @@ -670,6 +679,7 @@ mod tests { let mut storage = crate::ledger::storage::testing::TestStorage::default(); let mut write_log = WriteLog::default(); + let mut batch = crate::ledger::storage::testing::TestStorage::batch(); let address_gen = EstablishedAddressGen::new("test"); let key1 = @@ -682,7 +692,7 @@ mod tests { storage::Key::parse("key4").expect("cannot parse the key string"); // initialize an account - let vp1 = "vp1".as_bytes().to_vec(); + let vp1 = Hash::sha256("vp1".as_bytes()); let (addr1, _) = write_log.init_account(&address_gen, vp1.clone()); write_log.commit_tx(); @@ -708,11 +718,13 @@ mod tests { write_log.commit_tx(); // commit a block - write_log.commit_block(&mut storage).expect("commit failed"); + write_log + .commit_block(&mut storage, &mut batch) + .expect("commit failed"); - let (vp, _gas) = + let (vp_code_hash, _gas) = storage.validity_predicate(&addr1).expect("vp read failed"); - assert_eq!(vp, Some(vp1)); + assert_eq!(vp_code_hash, Some(vp1)); let (value, _) = storage.read(&key1).expect("read failed"); assert_eq!(value.expect("no read value"), val1); let (value, _) = storage.read(&key2).expect("read failed"); @@ -790,6 +802,7 @@ pub mod testing { use super::*; use crate::types::address::testing::arb_address; + use crate::types::hash::HASH_LENGTH; use crate::types::storage::testing::arb_key; /// Generate an arbitrary tx write log of [`HashMap>() .prop_map(|value| StorageModification::Write { value }), Just(StorageModification::Delete), - any::>() - .prop_map(|vp| StorageModification::InitAccount { vp }), + any::<[u8; HASH_LENGTH]>().prop_map(|hash| { + StorageModification::InitAccount { + vp_code_hash: Hash(hash), + } + }), any::>() .prop_map(|value| StorageModification::Temp { value }), ] diff --git a/core/src/ledger/storage_api/collections/lazy_map.rs b/core/src/ledger/storage_api/collections/lazy_map.rs index 80072f24864..c1e8ae6dbf4 100644 --- a/core/src/ledger/storage_api/collections/lazy_map.rs +++ b/core/src/ledger/storage_api/collections/lazy_map.rs @@ -40,7 +40,7 @@ pub struct LazyMap { pub type NestedMap = LazyMap; /// Possible sub-keys of a [`LazyMap`] -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] pub enum SubKey { /// Data sub-key, further sub-keyed by its literal map key Data(K), @@ -81,7 +81,7 @@ pub enum NestedAction { } /// Possible sub-keys of a nested [`LazyMap`] -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] pub enum NestedSubKey { /// Data sub-key Data { @@ -141,33 +141,58 @@ where Some(Some(suffix)) => suffix, }; + // A helper to validate the 2nd key segment + let validate_sub_key = |raw_sub_key| { + if let Ok(key_in_kv) = storage::KeySeg::parse(raw_sub_key) { + let nested = self.at(&key_in_kv).is_valid_sub_key(key)?; + match nested { + Some(nested_sub_key) => Ok(Some(NestedSubKey::Data { + key: key_in_kv, + nested_sub_key, + })), + None => { + Err(ValidationError::InvalidNestedSubKey(key.clone())) + .into_storage_result() + } + } + } else { + Err(ValidationError::InvalidSubKey(key.clone())) + .into_storage_result() + } + }; + // Match the suffix against expected sub-keys match &suffix.segments[..2] { [DbKeySeg::StringSeg(sub_a), DbKeySeg::StringSeg(sub_b)] if sub_a == DATA_SUBKEY => { - if let Ok(key_in_kv) = storage::KeySeg::parse(sub_b.clone()) { - let nested = self.at(&key_in_kv).is_valid_sub_key(key)?; - match nested { - Some(nested_sub_key) => Ok(Some(NestedSubKey::Data { - key: key_in_kv, - nested_sub_key, - })), - None => Err(ValidationError::InvalidNestedSubKey( - key.clone(), - )) - .into_storage_result(), - } - } else { - Err(ValidationError::InvalidSubKey(key.clone())) - .into_storage_result() - } + validate_sub_key(sub_b.clone()) + } + [DbKeySeg::StringSeg(sub_a), DbKeySeg::AddressSeg(sub_b)] + if sub_a == DATA_SUBKEY => + { + validate_sub_key(sub_b.raw()) } _ => Err(ValidationError::InvalidSubKey(key.clone())) .into_storage_result(), } } + fn is_data_sub_key(&self, key: &storage::Key) -> bool { + let sub_key = self.is_valid_sub_key(key); + match sub_key { + Ok(Some(NestedSubKey::Data { + key: parsed_key, + nested_sub_key: _, + })) => { + let sub = self.at(&parsed_key); + // Check in the nested collection + sub.is_data_sub_key(key) + } + _ => false, + } + } + fn read_sub_key_data( env: &ENV, storage_key: &storage::Key, @@ -266,23 +291,37 @@ where Some(Some(suffix)) => suffix, }; + // A helper to validate the 2nd key segment + let validate_sub_key = |raw_sub_key| { + if let Ok(key_in_kv) = storage::KeySeg::parse(raw_sub_key) { + Ok(Some(SubKey::Data(key_in_kv))) + } else { + Err(ValidationError::InvalidSubKey(key.clone())) + .into_storage_result() + } + }; + // Match the suffix against expected sub-keys match &suffix.segments[..] { [DbKeySeg::StringSeg(sub_a), DbKeySeg::StringSeg(sub_b)] if sub_a == DATA_SUBKEY => { - if let Ok(key_in_kv) = storage::KeySeg::parse(sub_b.clone()) { - Ok(Some(SubKey::Data(key_in_kv))) - } else { - Err(ValidationError::InvalidSubKey(key.clone())) - .into_storage_result() - } + validate_sub_key(sub_b.clone()) + } + [DbKeySeg::StringSeg(sub_a), DbKeySeg::AddressSeg(sub_b)] + if sub_a == DATA_SUBKEY => + { + validate_sub_key(sub_b.raw()) } _ => Err(ValidationError::InvalidSubKey(key.clone())) .into_storage_result(), } } + fn is_data_sub_key(&self, key: &storage::Key) -> bool { + matches!(self.is_valid_sub_key(key), Ok(Some(_))) + } + fn read_sub_key_data( env: &ENV, storage_key: &storage::Key, @@ -372,7 +411,11 @@ where )>, > + 'iter, > { - let iter = storage_api::iter_prefix(storage, &self.get_data_prefix())?; + let iter = storage_api::iter_prefix_with_filter( + storage, + &self.get_data_prefix(), + |key| self.is_data_sub_key(key), + )?; Ok(iter.map(|key_val_res| { let (key, val) = key_val_res?; let sub_key = LazyCollection::is_valid_sub_key(self, &key)? @@ -523,6 +566,7 @@ where mod test { use super::*; use crate::ledger::storage::testing::TestWlStorage; + use crate::types::address::{self, Address}; #[test] fn test_lazy_map_basics() -> storage_api::Result<()> { @@ -533,7 +577,7 @@ mod test { // The map should be empty at first assert!(lazy_map.is_empty(&storage)?); - assert!(lazy_map.len(&storage)? == 0); + assert_eq!(lazy_map.len(&storage)?, 0); assert!(!lazy_map.contains(&storage, &0)?); assert!(!lazy_map.contains(&storage, &1)?); assert!(lazy_map.iter(&storage)?.next().is_none()); @@ -552,7 +596,7 @@ mod test { assert!(!lazy_map.contains(&storage, &0)?); assert!(lazy_map.contains(&storage, &key)?); assert!(!lazy_map.is_empty(&storage)?); - assert!(lazy_map.len(&storage)? == 2); + assert_eq!(lazy_map.len(&storage)?, 2); let mut map_it = lazy_map.iter(&storage)?; assert_eq!(map_it.next().unwrap()?, (key, val.clone())); assert_eq!(map_it.next().unwrap()?, (key2, val2.clone())); @@ -566,7 +610,7 @@ mod test { let removed = lazy_map.remove(&mut storage, &key)?.unwrap(); assert_eq!(removed, val); assert!(!lazy_map.is_empty(&storage)?); - assert!(lazy_map.len(&storage)? == 1); + assert_eq!(lazy_map.len(&storage)?, 1); assert!(!lazy_map.contains(&storage, &0)?); assert!(!lazy_map.contains(&storage, &1)?); assert!(!lazy_map.contains(&storage, &123)?); @@ -579,7 +623,120 @@ mod test { let removed = lazy_map.remove(&mut storage, &key2)?.unwrap(); assert_eq!(removed, val2); assert!(lazy_map.is_empty(&storage)?); - assert!(lazy_map.len(&storage)? == 0); + assert_eq!(lazy_map.len(&storage)?, 0); + + let storage_key = lazy_map.get_data_key(&key); + assert_eq!( + lazy_map.is_valid_sub_key(&storage_key).unwrap(), + Some(SubKey::Data(key)) + ); + + let storage_key2 = lazy_map.get_data_key(&key2); + assert_eq!( + lazy_map.is_valid_sub_key(&storage_key2).unwrap(), + Some(SubKey::Data(key2)) + ); + + Ok(()) + } + + #[test] + fn test_lazy_map_with_addr_key() -> storage_api::Result<()> { + let mut storage = TestWlStorage::default(); + + let key = storage::Key::parse("test").unwrap(); + let lazy_map = LazyMap::::open(key); + + // Insert a new value and check that it's added + let (key, val) = ( + address::testing::established_address_1(), + "Test".to_string(), + ); + lazy_map.insert(&mut storage, key.clone(), val.clone())?; + + assert_eq!(lazy_map.len(&storage)?, 1); + let mut map_it = lazy_map.iter(&storage)?; + assert_eq!(map_it.next().unwrap()?, (key.clone(), val.clone())); + drop(map_it); + + let (key2, val2) = ( + address::testing::established_address_2(), + "Test2".to_string(), + ); + lazy_map.insert(&mut storage, key2.clone(), val2.clone())?; + + assert_eq!(lazy_map.len(&storage)?, 2); + let mut map_it = lazy_map.iter(&storage)?; + assert!(key < key2, "sanity check - this influences the iter order"); + assert_eq!(map_it.next().unwrap()?, (key.clone(), val)); + assert_eq!(map_it.next().unwrap()?, (key2.clone(), val2)); + assert!(map_it.next().is_none()); + drop(map_it); + + let storage_key = lazy_map.get_data_key(&key); + assert_eq!( + lazy_map.is_valid_sub_key(&storage_key).unwrap(), + Some(SubKey::Data(key)) + ); + + let storage_key2 = lazy_map.get_data_key(&key2); + assert_eq!( + lazy_map.is_valid_sub_key(&storage_key2).unwrap(), + Some(SubKey::Data(key2)) + ); + + Ok(()) + } + + #[test] + fn test_nested_lazy_map_with_addr_key() -> storage_api::Result<()> { + let mut storage = TestWlStorage::default(); + + let key = storage::Key::parse("test").unwrap(); + let lazy_map = NestedMap::>::open(key); + + // Insert a new value and check that it's added + let (key, sub_key, val) = ( + address::testing::established_address_1(), + 1_u64, + "Test".to_string(), + ); + lazy_map + .at(&key) + .insert(&mut storage, sub_key, val.clone())?; + + assert_eq!(lazy_map.at(&key).len(&storage)?, 1); + let mut map_it = lazy_map.iter(&storage)?; + let expected_key = NestedSubKey::Data { + key: key.clone(), + nested_sub_key: SubKey::Data(sub_key), + }; + assert_eq!( + map_it.next().unwrap()?, + (expected_key.clone(), val.clone()) + ); + drop(map_it); + + let (key2, sub_key2, val2) = ( + address::testing::established_address_2(), + 2_u64, + "Test2".to_string(), + ); + lazy_map + .at(&key2) + .insert(&mut storage, sub_key2, val2.clone())?; + + assert_eq!(lazy_map.at(&key2).len(&storage)?, 1); + let mut map_it = lazy_map.iter(&storage)?; + assert!(key < key2, "sanity check - this influences the iter order"); + let expected_key2 = NestedSubKey::Data { + key: key2, + nested_sub_key: SubKey::Data(sub_key2), + }; + assert_eq!(map_it.next().unwrap()?, (expected_key, val)); + assert_eq!(map_it.next().unwrap()?, (expected_key2, val2)); + assert!(map_it.next().is_none()); + drop(map_it); Ok(()) } diff --git a/core/src/ledger/storage_api/collections/lazy_set.rs b/core/src/ledger/storage_api/collections/lazy_set.rs new file mode 100644 index 00000000000..038b7a87d05 --- /dev/null +++ b/core/src/ledger/storage_api/collections/lazy_set.rs @@ -0,0 +1,399 @@ +//! Lazy set. + +use std::fmt::Debug; +use std::marker::PhantomData; + +use thiserror::Error; + +use super::super::Result; +use super::{LazyCollection, ReadError}; +use crate::ledger::storage_api::{self, ResultExt, StorageRead, StorageWrite}; +use crate::ledger::vp_env::VpEnv; +use crate::types::storage::{self, DbKeySeg, KeySeg}; + +/// A lazy set. +/// +/// This can be used as an alternative to `std::collections::HashSet` and +/// `BTreeSet`. In the lazy set, the elements do not reside in memory but are +/// instead read and written to storage sub-keys of the storage `key` used to +/// construct the set. +/// +/// In the [`LazySet`], the type of key `K` can be anything that implements +/// [`storage::KeySeg`], and this trait is used to turn the keys into key +/// segments. +#[derive(Debug)] +pub struct LazySet { + key: storage::Key, + phantom_k: PhantomData, +} + +/// Possible sub-keys of a [`LazySet`] +#[derive(Clone, Debug, PartialEq)] +pub enum SubKey { + /// Literal set key + Data(K), +} + +/// Possible actions that can modify a [`LazySet`]. This roughly corresponds to +/// the methods that have `StorageWrite` access. +#[derive(Clone, Debug)] +pub enum Action { + /// Insert a key `K` in a [`LazySet`]. + Insert(K), + /// Remove a key `K` from a [`LazySet`]. + Remove(K), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum ValidationError { + #[error("Invalid storage key {0}")] + InvalidSubKey(storage::Key), +} + +/// [`LazySet`] validation result +pub type ValidationResult = std::result::Result; + +impl LazyCollection for LazySet +where + K: storage::KeySeg + Debug, +{ + type Action = Action; + type SubKey = SubKey; + type SubKeyWithData = Action; + type Value = (); + + /// Create or use an existing map with the given storage `key`. + fn open(key: storage::Key) -> Self { + Self { + key, + phantom_k: PhantomData, + } + } + + fn is_valid_sub_key( + &self, + key: &storage::Key, + ) -> storage_api::Result> { + let suffix = match key.split_prefix(&self.key) { + None => { + // not matching prefix, irrelevant + return Ok(None); + } + Some(None) => { + // no suffix, invalid + return Err(ValidationError::InvalidSubKey(key.clone())) + .into_storage_result(); + } + Some(Some(suffix)) => suffix, + }; + + // A helper to validate the 2nd key segment + let validate_sub_key = |raw_sub_key| { + if let Ok(key) = storage::KeySeg::parse(raw_sub_key) { + Ok(Some(SubKey::Data(key))) + } else { + Err(ValidationError::InvalidSubKey(key.clone())) + .into_storage_result() + } + }; + + // Match the suffix against expected sub-keys + match &suffix.segments[..] { + [DbKeySeg::StringSeg(sub)] => validate_sub_key(sub.clone()), + [DbKeySeg::AddressSeg(sub)] => validate_sub_key(sub.raw()), + _ => Err(ValidationError::InvalidSubKey(key.clone())) + .into_storage_result(), + } + } + + fn is_data_sub_key(&self, key: &storage::Key) -> bool { + matches!(self.is_valid_sub_key(key), Ok(Some(_))) + } + + fn read_sub_key_data( + env: &ENV, + storage_key: &storage::Key, + sub_key: Self::SubKey, + ) -> storage_api::Result> + where + ENV: for<'a> VpEnv<'a>, + { + let SubKey::Data(key) = sub_key; + determine_action(env, storage_key, key) + } + + fn validate_changed_sub_keys( + keys: Vec, + ) -> storage_api::Result> { + Ok(keys) + } +} + +// `LazySet` methods +impl LazySet +where + K: storage::KeySeg, +{ + /// Returns whether the set contains a value. + pub fn contains(&self, storage: &S, key: &K) -> Result + where + S: StorageRead, + { + storage.has_key(&self.get_key(key)) + } + + /// Get the storage sub-key of a given raw key + pub fn get_key(&self, key: &K) -> storage::Key { + let key_str = key.to_db_key(); + self.key.push(&key_str).unwrap() + } + + /// Inserts a key into the set. + /// + /// If the set did not have this key present, `false` is returned. + /// If the set did have this key present, `true` is returned. Unlike in + /// `std::collection::HashSet`, the key is also updated; this matters + /// for types that can be `==` without being identical. + pub fn insert(&self, storage: &mut S, key: K) -> Result + where + S: StorageWrite + StorageRead, + { + let present = self.contains(storage, &key)?; + + let key = self.get_key(&key); + storage.write(&key, ())?; + + Ok(present) + } + + /// Tries to inserts a key into the set. + /// + /// An error is returned if the key is already present. + pub fn try_insert(&self, storage: &mut S, key: K) -> Result<()> + where + S: StorageWrite + StorageRead, + { + let present = self.contains(storage, &key)?; + if present { + return Err(storage_api::Error::new_const("Occupied")); + } + + let key = self.get_key(&key); + storage.write(&key, ()) + } + + /// Removes a key from the set, returning `true` if the key + /// was in the set. + pub fn remove(&self, storage: &mut S, key: &K) -> Result + where + S: StorageWrite + StorageRead, + { + let present = self.contains(storage, key)?; + + let key = self.get_key(key); + storage.delete(&key)?; + + Ok(present) + } + + /// Returns whether the set contains no elements. + pub fn is_empty(&self, storage: &S) -> Result + where + S: StorageRead, + { + let mut iter = storage_api::iter_prefix_bytes(storage, &self.key)?; + Ok(iter.next().is_none()) + } + + /// Reads the number of elements in the map. + /// + /// Note that this function shouldn't be used in transactions and VPs code + /// on unbounded maps to avoid gas usage increasing with the length of the + /// set. + #[allow(clippy::len_without_is_empty)] + pub fn len(&self, storage: &S) -> Result + where + S: StorageRead, + { + let iter = storage_api::iter_prefix_bytes(storage, &self.key)?; + iter.count().try_into().into_storage_result() + } + + /// An iterator visiting all keys. The iterator element type is `Result`, + /// because the iterator's call to `next` may fail with e.g. out of gas. + /// + /// Note that this function shouldn't be used in transactions and VPs code + /// on unbounded sets to avoid gas usage increasing with the length of the + /// set. + pub fn iter<'iter>( + &self, + storage: &'iter impl StorageRead, + ) -> Result> + 'iter> { + let iter = storage_api::iter_prefix(storage, &self.key)?; + Ok(iter.map(|key_val_res| { + let (key, ()) = key_val_res?; + let last_key_seg = key + .last() + .ok_or(ReadError::UnexpectedlyEmptyStorageKey) + .into_storage_result()?; + let key = K::parse(last_key_seg.raw()).into_storage_result()?; + Ok(key) + })) + } +} + +/// Determine what action was taken from the pre/post state +pub fn determine_action( + env: &ENV, + storage_key: &storage::Key, + parsed_key: K, +) -> storage_api::Result>> +where + ENV: for<'a> VpEnv<'a>, +{ + let pre = env.read_pre(storage_key)?; + let post = env.read_post(storage_key)?; + Ok(match (pre, post) { + (None, None) => { + // If the key was inserted and then deleted in the same tx, we don't + // need to validate it as it's not visible to any VPs + None + } + (None, Some(())) => Some(Action::Insert(parsed_key)), + (Some(()), None) => Some(Action::Remove(parsed_key)), + (Some(()), Some(())) => { + // Because the value for set is a unit, we can skip this too + None + } + }) +} + +#[cfg(test)] +mod test { + use super::*; + use crate::ledger::storage::testing::TestWlStorage; + use crate::types::address::{self, Address}; + + #[test] + fn test_lazy_set_basics() -> storage_api::Result<()> { + let mut storage = TestWlStorage::default(); + + let key = storage::Key::parse("test").unwrap(); + let lazy_set = LazySet::::open(key); + + // The map should be empty at first + assert!(lazy_set.is_empty(&storage)?); + assert!(lazy_set.len(&storage)? == 0); + assert!(!lazy_set.contains(&storage, &0)?); + assert!(!lazy_set.contains(&storage, &1)?); + assert!(lazy_set.iter(&storage)?.next().is_none()); + assert!(!lazy_set.remove(&mut storage, &0)?); + assert!(!lazy_set.remove(&mut storage, &1)?); + + // Insert a new value and check that it's added + let key = 123; + lazy_set.insert(&mut storage, key)?; + + let key2 = 456; + lazy_set.insert(&mut storage, key2)?; + + let key3 = 256; + lazy_set.try_insert(&mut storage, key3).unwrap(); + + assert!(!lazy_set.contains(&storage, &0)?); + assert!(lazy_set.contains(&storage, &key)?); + assert!(!lazy_set.is_empty(&storage)?); + assert!(lazy_set.len(&storage)? == 3); + let mut set_it = lazy_set.iter(&storage)?; + assert_eq!(set_it.next().unwrap()?, key); + assert_eq!(set_it.next().unwrap()?, key3); + assert_eq!(set_it.next().unwrap()?, key2); + drop(set_it); + + assert!(!lazy_set.contains(&storage, &0)?); + assert!(lazy_set.contains(&storage, &key)?); + assert!(lazy_set.contains(&storage, &key2)?); + assert!(lazy_set.try_insert(&mut storage, key3).is_err()); + + // Remove the values and check the map contents + let removed = lazy_set.remove(&mut storage, &key)?; + assert!(removed); + assert!(!lazy_set.is_empty(&storage)?); + assert!(lazy_set.len(&storage)? == 2); + assert!(!lazy_set.contains(&storage, &0)?); + assert!(!lazy_set.contains(&storage, &1)?); + assert!(!lazy_set.contains(&storage, &123)?); + assert!(lazy_set.contains(&storage, &456)?); + assert!(!lazy_set.contains(&storage, &key)?); + assert!(lazy_set.contains(&storage, &key2)?); + assert!(lazy_set.iter(&storage)?.next().is_some()); + assert!(!lazy_set.remove(&mut storage, &key)?); + let removed = lazy_set.remove(&mut storage, &key2)?; + assert!(removed); + assert!(lazy_set.len(&storage)? == 1); + let removed = lazy_set.remove(&mut storage, &key3)?; + assert!(removed); + assert!(lazy_set.is_empty(&storage)?); + assert!(lazy_set.len(&storage)? == 0); + + assert!(lazy_set.try_insert(&mut storage, key).is_ok()); + assert!(lazy_set.try_insert(&mut storage, key).is_err()); + + let storage_key = lazy_set.get_key(&key); + assert_eq!( + lazy_set.is_valid_sub_key(&storage_key).unwrap(), + Some(SubKey::Data(key)) + ); + + let storage_key2 = lazy_set.get_key(&key2); + assert_eq!( + lazy_set.is_valid_sub_key(&storage_key2).unwrap(), + Some(SubKey::Data(key2)) + ); + + Ok(()) + } + + #[test] + fn test_lazy_set_with_addr_key() -> storage_api::Result<()> { + let mut storage = TestWlStorage::default(); + + let key = storage::Key::parse("test").unwrap(); + let lazy_set = LazySet::
::open(key); + + // Insert a new value and check that it's added + let key = address::testing::established_address_1(); + lazy_set.insert(&mut storage, key.clone())?; + + assert_eq!(lazy_set.len(&storage)?, 1); + let mut map_it = lazy_set.iter(&storage)?; + assert_eq!(map_it.next().unwrap()?, key); + drop(map_it); + + let key2 = address::testing::established_address_2(); + lazy_set.insert(&mut storage, key2.clone())?; + + assert_eq!(lazy_set.len(&storage)?, 2); + let mut iter = lazy_set.iter(&storage)?; + assert!(key < key2, "sanity check - this influences the iter order"); + assert_eq!(iter.next().unwrap()?, key); + assert_eq!(iter.next().unwrap()?, key2); + assert!(iter.next().is_none()); + drop(iter); + + let storage_key = lazy_set.get_key(&key); + assert_eq!( + lazy_set.is_valid_sub_key(&storage_key).unwrap(), + Some(SubKey::Data(key)) + ); + + let storage_key2 = lazy_set.get_key(&key2); + assert_eq!( + lazy_set.is_valid_sub_key(&storage_key2).unwrap(), + Some(SubKey::Data(key2)) + ); + + Ok(()) + } +} diff --git a/core/src/ledger/storage_api/collections/lazy_vec.rs b/core/src/ledger/storage_api/collections/lazy_vec.rs index 47b5c95c754..1e834568144 100644 --- a/core/src/ledger/storage_api/collections/lazy_vec.rs +++ b/core/src/ledger/storage_api/collections/lazy_vec.rs @@ -12,7 +12,7 @@ use super::LazyCollection; use crate::ledger::storage_api::validation::{self, Data}; use crate::ledger::storage_api::{self, ResultExt, StorageRead, StorageWrite}; use crate::ledger::vp_env::VpEnv; -use crate::types::storage::{self, DbKeySeg}; +use crate::types::storage::{self, DbKeySeg, KeySeg}; /// Subkey pointing to the length of the LazyVec pub const LEN_SUBKEY: &str = "len"; @@ -35,7 +35,7 @@ pub struct LazyVec { } /// Possible sub-keys of a [`LazyVec`] -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub enum SubKey { /// Length sub-key Len, @@ -144,6 +144,16 @@ where Some(Some(suffix)) => suffix, }; + // A helper to validate the 2nd key segment + let validate_sub_key = |raw_sub_key| { + if let Ok(index) = storage::KeySeg::parse(raw_sub_key) { + Ok(Some(SubKey::Data(index))) + } else { + Err(ValidationError::InvalidSubKey(key.clone())) + .into_storage_result() + } + }; + // Match the suffix against expected sub-keys match &suffix.segments[..] { [DbKeySeg::StringSeg(sub)] if sub == LEN_SUBKEY => { @@ -152,18 +162,24 @@ where [DbKeySeg::StringSeg(sub_a), DbKeySeg::StringSeg(sub_b)] if sub_a == DATA_SUBKEY => { - if let Ok(index) = storage::KeySeg::parse(sub_b.clone()) { - Ok(Some(SubKey::Data(index))) - } else { - Err(ValidationError::InvalidSubKey(key.clone())) - .into_storage_result() - } + validate_sub_key(sub_b.clone()) + } + [DbKeySeg::StringSeg(sub_a), DbKeySeg::AddressSeg(sub_b)] + if sub_a == DATA_SUBKEY => + { + validate_sub_key(sub_b.raw()) } _ => Err(ValidationError::InvalidSubKey(key.clone())) .into_storage_result(), } } + fn is_data_sub_key(&self, key: &storage::Key) -> bool { + let sub_key = self.is_valid_sub_key(key); + // The `SubKey::Len` is not data sub-key + matches!(sub_key, Ok(Some(SubKey::Data(_)))) + } + fn read_sub_key_data( env: &ENV, storage_key: &storage::Key, @@ -477,6 +493,8 @@ where mod test { use super::*; use crate::ledger::storage::testing::TestWlStorage; + use crate::ledger::storage_api::collections::lazy_map::{self, NestedMap}; + use crate::types::address::{self, Address}; #[test] fn test_lazy_vec_basics() -> storage_api::Result<()> { @@ -511,6 +529,123 @@ mod test { assert!(lazy_vec.get(&storage, 0)?.is_none()); assert!(lazy_vec.get(&storage, 1)?.is_none()); + let storage_key = lazy_vec.get_data_key(0); + assert_eq!( + lazy_vec.is_valid_sub_key(&storage_key).unwrap(), + Some(SubKey::Data(0)) + ); + + let storage_key2 = lazy_vec.get_data_key(1); + assert_eq!( + lazy_vec.is_valid_sub_key(&storage_key2).unwrap(), + Some(SubKey::Data(1)) + ); + + Ok(()) + } + + #[test] + fn test_lazy_vec_with_addr() -> storage_api::Result<()> { + let mut storage = TestWlStorage::default(); + + let key = storage::Key::parse("test").unwrap(); + let lazy_vec = LazyVec::
::open(key); + + // Push a new value and check that it's added + let val = address::testing::established_address_1(); + lazy_vec.push(&mut storage, val.clone())?; + assert!(!lazy_vec.is_empty(&storage)?); + assert!(lazy_vec.len(&storage)? == 1); + assert_eq!(lazy_vec.iter(&storage)?.next().unwrap()?, val); + assert_eq!(lazy_vec.get(&storage, 0)?.unwrap(), val); + assert!(lazy_vec.get(&storage, 1)?.is_none()); + + let val2 = address::testing::established_address_2(); + lazy_vec.push(&mut storage, val2.clone())?; + + assert_eq!(lazy_vec.len(&storage)?, 2); + let mut iter = lazy_vec.iter(&storage)?; + // The iterator order follows the indices + assert_eq!(iter.next().unwrap()?, val); + assert_eq!(iter.next().unwrap()?, val2); + assert!(iter.next().is_none()); + drop(iter); + + let storage_key = lazy_vec.get_data_key(0); + assert_eq!( + lazy_vec.is_valid_sub_key(&storage_key).unwrap(), + Some(SubKey::Data(0)) + ); + + let storage_key2 = lazy_vec.get_data_key(1); + assert_eq!( + lazy_vec.is_valid_sub_key(&storage_key2).unwrap(), + Some(SubKey::Data(1)) + ); + + Ok(()) + } + + /// Test iterator on a `LazyVec` nested inside a `LazyMap` + #[test] + fn test_nested_lazy_vec_iter() -> storage_api::Result<()> { + let mut storage = TestWlStorage::default(); + + let prefix = storage::Key::parse("test").unwrap(); + let handle = NestedMap::>::open(prefix); + + let key = address::testing::established_address_1(); + + // Push first value and check iterator + handle.at(&key).push(&mut storage, 15)?; + let expected = ( + lazy_map::NestedSubKey::Data { + key: key.clone(), // LazyMap key + nested_sub_key: SubKey::Data(0), // LazyVec index + }, + 15, // the value + ); + + let mut iter = handle.iter(&storage)?; + assert_eq!(iter.next().unwrap()?, expected); + assert!(iter.next().is_none()); + drop(iter); + + // Push second value and check iterator again + handle.at(&key).push(&mut storage, 1)?; + let expected2 = ( + lazy_map::NestedSubKey::Data { + key: key.clone(), // LazyMap key + nested_sub_key: SubKey::Data(1), // LazyVec index + }, + 1, // the value + ); + + let mut iter = handle.iter(&storage)?; + assert_eq!(iter.next().unwrap()?, expected); + assert_eq!(iter.next().unwrap()?, expected2); + assert!(iter.next().is_none()); + drop(iter); + + let key2 = address::testing::established_address_2(); + // Push third value on a different outer key and check iterator again + handle.at(&key2).push(&mut storage, 9)?; + let expected3 = ( + lazy_map::NestedSubKey::Data { + key: key2.clone(), // LazyMap key + nested_sub_key: SubKey::Data(0), // LazyVec index + }, + 9, // the value + ); + + let mut iter = handle.iter(&storage)?; + assert!(key < key2, "sanity check - this influences the iter order"); + assert_eq!(iter.next().unwrap()?, expected); + assert_eq!(iter.next().unwrap()?, expected2); + assert_eq!(iter.next().unwrap()?, expected3); + assert!(iter.next().is_none()); + drop(iter); + Ok(()) } } diff --git a/core/src/ledger/storage_api/collections/mod.rs b/core/src/ledger/storage_api/collections/mod.rs index 688b76bd499..6301d151bea 100644 --- a/core/src/ledger/storage_api/collections/mod.rs +++ b/core/src/ledger/storage_api/collections/mod.rs @@ -14,9 +14,11 @@ use derivative::Derivative; use thiserror::Error; pub mod lazy_map; +pub mod lazy_set; pub mod lazy_vec; pub use lazy_map::LazyMap; +pub use lazy_set::LazySet; pub use lazy_vec::LazyVec; use crate::ledger::storage_api; @@ -73,6 +75,13 @@ pub trait LazyCollection { key: &storage::Key, ) -> storage_api::Result>; + /// Check if the given storage key is a valid data key. + /// + /// For most collections, this is the same as `is_valid_sub_key`, but for + /// example for `LazyVec`, which has an additional sub-key for length of the + /// vec, only the element data sub-keys would return `true`. + fn is_data_sub_key(&self, key: &storage::Key) -> bool; + /// Try to read and decode the data for each change storage key in prior and /// posterior state. If there is no value in neither prior or posterior /// state (which is a possible state when transaction e.g. writes and then diff --git a/core/src/ledger/storage_api/governance.rs b/core/src/ledger/storage_api/governance.rs index c6197ebbb3f..b71f4a6e409 100644 --- a/core/src/ledger/storage_api/governance.rs +++ b/core/src/ledger/storage_api/governance.rs @@ -4,7 +4,7 @@ use super::token; use crate::ledger::governance::{storage, ADDRESS as governance_address}; use crate::ledger::storage_api::{self, StorageRead, StorageWrite}; use crate::types::transaction::governance::{ - InitProposalData, VoteProposalData, + InitProposalData, ProposalType, VoteProposalData, }; /// A proposal creation transaction. @@ -28,6 +28,17 @@ where let author_key = storage::get_author_key(proposal_id); storage.write(&author_key, data.author.clone())?; + let proposal_type_key = storage::get_proposal_type_key(proposal_id); + match data.r#type { + ProposalType::Default(Some(ref code)) => { + // Remove wasm code and write it under a different subkey + storage.write(&proposal_type_key, ProposalType::Default(None))?; + let proposal_code_key = storage::get_proposal_code_key(proposal_id); + storage.write_bytes(&proposal_code_key, code)? + } + _ => storage.write(&proposal_type_key, data.r#type.clone())?, + } + let voting_start_epoch_key = storage::get_voting_start_epoch_key(proposal_id); storage.write(&voting_start_epoch_key, data.voting_start_epoch)?; @@ -38,7 +49,7 @@ where let grace_epoch_key = storage::get_grace_epoch_key(proposal_id); storage.write(&grace_epoch_key, data.grace_epoch)?; - if let Some(proposal_code) = data.proposal_code { + if let ProposalType::Default(Some(proposal_code)) = data.r#type { let proposal_code_key = storage::get_proposal_code_key(proposal_id); storage.write_bytes(&proposal_code_key, proposal_code)?; } diff --git a/core/src/ledger/storage_api/mod.rs b/core/src/ledger/storage_api/mod.rs index 1a4bcd13da6..2475db3197c 100644 --- a/core/src/ledger/storage_api/mod.rs +++ b/core/src/ledger/storage_api/mod.rs @@ -12,7 +12,9 @@ use borsh::{BorshDeserialize, BorshSerialize}; pub use error::{CustomError, Error, OptionExt, Result, ResultExt}; use crate::types::address::Address; -use crate::types::storage::{self, BlockHash, BlockHeight, Epoch, TxIndex}; +use crate::types::storage::{ + self, BlockHash, BlockHeight, Epoch, Header, TxIndex, +}; /// Common storage read interface /// @@ -79,6 +81,9 @@ pub trait StorageRead { /// current transaction is being applied. fn get_block_height(&self) -> Result; + /// Getting the block header. + fn get_block_header(&self, height: BlockHeight) -> Result>; + /// Getting the block hash. The height is that of the block to which the /// current transaction is being applied. fn get_block_hash(&self) -> Result; @@ -183,3 +188,63 @@ where }); Ok(iter) } + +/// Iterate Borsh encoded items matching the given prefix and passing the given +/// `filter` predicate, ordered by the storage keys. +/// +/// The `filter` predicate is a function from a storage key to bool and only +/// the items that return `true` will be returned from the iterator. +/// +/// Note that this is preferable over the regular `iter_prefix` combined with +/// the iterator's `filter` function as it avoids trying to decode values that +/// don't pass the filter. For `iter_prefix_bytes`, `filter` works fine. +pub fn iter_prefix_with_filter<'a, T, F>( + storage: &'a impl StorageRead, + prefix: &crate::types::storage::Key, + filter: F, +) -> Result> + 'a> +where + T: BorshDeserialize, + F: Fn(&storage::Key) -> bool + 'a, +{ + let iter = storage.iter_prefix(prefix)?; + let iter = itertools::unfold(iter, move |iter| { + // The loop is for applying filter - we `continue` when the current key + // doesn't pass the predicate. + loop { + match storage.iter_next(iter) { + Ok(Some((key, val))) => { + let key = + match storage::Key::parse(key).into_storage_result() { + Ok(key) => key, + Err(err) => { + // Propagate key encoding errors into Iterator's + // Item + return Some(Err(err)); + } + }; + // Check the predicate + if !filter(&key) { + continue; + } + let val = + match T::try_from_slice(&val).into_storage_result() { + Ok(val) => val, + Err(err) => { + // Propagate val encoding errors into Iterator's + // Item + return Some(Err(err)); + } + }; + return Some(Ok((key, val))); + } + Ok(None) => return None, + Err(err) => { + // Propagate `iter_next` errors into Iterator's Item + return Some(Err(err)); + } + } + } + }); + Ok(iter) +} diff --git a/core/src/ledger/storage_api/token.rs b/core/src/ledger/storage_api/token.rs index dcc55b43455..8cccc2d3a6e 100644 --- a/core/src/ledger/storage_api/token.rs +++ b/core/src/ledger/storage_api/token.rs @@ -4,7 +4,10 @@ use super::{StorageRead, StorageWrite}; use crate::ledger::storage_api; use crate::types::address::Address; use crate::types::token; -pub use crate::types::token::Amount; +pub use crate::types::token::{ + balance_key, is_balance_key, is_total_supply_key, total_supply_key, Amount, + Change, +}; /// Read the balance of a given token and owner. pub fn read_balance( @@ -46,6 +49,9 @@ pub fn transfer( where S: StorageRead + StorageWrite, { + if amount.is_zero() { + return Ok(()); + } let src_key = token::balance_key(token, src); let src_balance = read_balance(storage, token, src)?; match src_balance.checked_sub(amount) { @@ -79,13 +85,20 @@ pub fn credit_tokens( where S: StorageRead + StorageWrite, { - let key = token::balance_key(token, dest); - let new_balance = read_balance(storage, token, dest)? + amount; - storage.write(&key, new_balance)?; + let balance_key = token::balance_key(token, dest); + let cur_balance = read_balance(storage, token, dest)?; + let new_balance = cur_balance.checked_add(amount).ok_or_else(|| { + storage_api::Error::new_const("Token balance overflow") + })?; let total_supply_key = token::total_supply_key(token); - let current_supply = storage + let cur_supply = storage .read::(&total_supply_key)? .unwrap_or_default(); - storage.write(&total_supply_key, current_supply + amount) + let new_supply = cur_supply.checked_add(amount).ok_or_else(|| { + storage_api::Error::new_const("Token total supply overflow") + })?; + + storage.write(&balance_key, new_balance)?; + storage.write(&total_supply_key, new_supply) } diff --git a/core/src/ledger/testnet_pow.rs b/core/src/ledger/testnet_pow.rs index bed2273a706..601cc0c6390 100644 --- a/core/src/ledger/testnet_pow.rs +++ b/core/src/ledger/testnet_pow.rs @@ -489,116 +489,3 @@ mod test { assert_eq!(bytes.len(), SOLUTION_VAL_BYTES_LEN); } } - -#[cfg(test)] -mod test_with_tx_and_vp_env { - // IMPORTANT: do not import anything directly from this `crate` here, only - // via `namada_tests`. This gets us around the `core -> tests -> core` dep - // cycle, which is okay, because `tests` is only a `dev-dependency` of - // core and allows us to test the code in the same module as its defined. - // - // This imports the same code as `super::*` but from a different version of - // this crate (one that `namada_tests` depends on). It's re-exported - // from `namada_tests` so that we can use it together with - // `namada_tests` modules back in here. - use namada_tests::namada::core::ledger::storage_api; - use namada_tests::namada::core::ledger::testnet_pow::*; - use namada_tests::namada::core::types::{address, token}; - use namada_tests::tx::{self, TestTxEnv}; - use namada_tests::vp; - - #[test] - fn test_challenge_and_solution() -> storage_api::Result<()> { - let faucet_address = address::testing::established_address_1(); - let difficulty = Difficulty::try_new(1).unwrap(); - let withdrawal_limit = token::Amount::whole(1_000); - - let mut tx_env = TestTxEnv::default(); - - // Source address that's using PoW (this would be derived from the tx - // wrapper pk) - let source = address::testing::established_address_2(); - - // Ensure that the addresses exists, so we can use them in a tx - tx_env.spawn_accounts([&faucet_address, &source]); - - init_faucet_storage( - &mut tx_env.wl_storage, - &faucet_address, - difficulty, - withdrawal_limit, - )?; - tx_env.commit_genesis(); - - let challenge = Challenge::new( - &mut tx_env.wl_storage, - &faucet_address, - source.clone(), - )?; - - let solution = challenge.solve(); - - // The solution must be valid - assert!(solution.verify_solution(source.clone())); - - // Changing the solution to `0` invalidates it - { - let mut solution = solution.clone(); - solution.value = 0; - // If you're unlucky and this fails, try changing the solution to - // a different literal. - assert!(!solution.verify_solution(source.clone())); - } - // Changing the counter invalidates it - { - let mut solution = solution.clone(); - solution.params.counter = 10; - // If you're unlucky and this fails, try changing the counter to - // a different literal. - assert!(!solution.verify_solution(source.clone())); - } - - // Apply the solution from a tx - vp::vp_host_env::init_from_tx( - faucet_address.clone(), - tx_env, - |_addr| { - solution - .apply_from_tx(tx::ctx(), &faucet_address, &source) - .unwrap(); - }, - ); - - // Check that it's valid - let is_valid = solution.validate( - &vp::ctx().pre(), - &faucet_address, - source.clone(), - )?; - assert!(is_valid); - - // Commit the tx - let vp_env = vp::vp_host_env::take(); - tx::tx_host_env::set_from_vp_env(vp_env); - tx::tx_host_env::commit_tx_and_block(); - let tx_env = tx::tx_host_env::take(); - - // Re-apply the same solution from a tx - vp::vp_host_env::init_from_tx( - faucet_address.clone(), - tx_env, - |_addr| { - solution - .apply_from_tx(tx::ctx(), &faucet_address, &source) - .unwrap(); - }, - ); - - // Check that it's not longer valid - let is_valid = - solution.validate(&vp::ctx().pre(), &faucet_address, source)?; - assert!(!is_valid); - - Ok(()) - } -} diff --git a/core/src/ledger/tx_env.rs b/core/src/ledger/tx_env.rs index 6ca47bb9d9a..c53d38d4b29 100644 --- a/core/src/ledger/tx_env.rs +++ b/core/src/ledger/tx_env.rs @@ -7,7 +7,6 @@ use crate::ledger::storage_api::{self, StorageRead, StorageWrite}; use crate::types::address::Address; use crate::types::ibc::IbcEvent; use crate::types::storage; -use crate::types::time::Rfc3339String; /// Transaction host functions pub trait TxEnv: StorageRead + StorageWrite { @@ -51,13 +50,9 @@ pub trait TxEnv: StorageRead + StorageWrite { code: impl AsRef<[u8]>, ) -> Result<(), storage_api::Error>; - /// Emit an IBC event. There can be only one event per transaction. On - /// multiple calls, only the last emitted event will be used. + /// Emit an IBC event. On multiple calls, these emitted event will be added. fn emit_ibc_event( &mut self, event: &IbcEvent, ) -> Result<(), storage_api::Error>; - - /// Get time of the current block header as rfc 3339 string - fn get_block_time(&self) -> Result; } diff --git a/core/src/ledger/vp_env.rs b/core/src/ledger/vp_env.rs index 43bc7446353..f1d1210e28a 100644 --- a/core/src/ledger/vp_env.rs +++ b/core/src/ledger/vp_env.rs @@ -7,7 +7,9 @@ use super::storage_api::{self, StorageRead}; use crate::types::address::Address; use crate::types::hash::Hash; use crate::types::key::common; -use crate::types::storage::{BlockHash, BlockHeight, Epoch, Key, TxIndex}; +use crate::types::storage::{ + BlockHash, BlockHeight, Epoch, Header, Key, TxIndex, +}; /// Validity predicate's environment is available for native VPs and WASM VPs pub trait VpEnv<'view> @@ -53,6 +55,12 @@ where /// current transaction is being applied. fn get_block_height(&self) -> Result; + /// Getting the block header. + fn get_block_header( + &self, + height: BlockHeight, + ) -> Result, storage_api::Error>; + /// Getting the block hash. The height is that of the block to which the /// current transaction is being applied. fn get_block_hash(&self) -> Result; @@ -82,7 +90,7 @@ where /// Otherwise returns the result of evaluation. fn eval( &self, - vp_code: Vec, + vp_code: Hash, input_data: Vec, ) -> Result; diff --git a/core/src/lib.rs b/core/src/lib.rs index c9bd40084e1..44ca4204099 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -7,6 +7,7 @@ #![deny(rustdoc::private_intra_doc_links)] pub mod bytes; +pub mod hints; pub mod ledger; pub mod proto; pub mod types; diff --git a/core/src/proto/mod.rs b/core/src/proto/mod.rs index 32710375953..daeb0e9a496 100644 --- a/core/src/proto/mod.rs +++ b/core/src/proto/mod.rs @@ -7,18 +7,23 @@ pub use types::{Dkg, Error, Signed, SignedTxData, Tx}; #[cfg(test)] mod tests { + use std::time::SystemTime; + use data_encoding::HEXLOWER; use generated::types::Tx; use prost::Message; use super::*; + use crate::types::chain::ChainId; #[test] fn encoding_round_trip() { let tx = Tx { - code: "wasm code".as_bytes().to_owned(), + code_or_hash: "wasm code".as_bytes().to_owned(), data: Some("arbitrary data".as_bytes().to_owned()), - timestamp: Some(std::time::SystemTime::now().into()), + timestamp: Some(SystemTime::now().into()), + chain_id: ChainId::default().0, + expiration: Some(SystemTime::now().into()), }; let mut tx_bytes = vec![]; tx.encode(&mut tx_bytes).unwrap(); diff --git a/core/src/proto/types.rs b/core/src/proto/types.rs index 40e343d1bf0..f82fb7bd8b6 100644 --- a/core/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -9,6 +9,7 @@ use thiserror::Error; use super::generated::types; #[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] use crate::tendermint_proto::abci::ResponseDeliverTx; +use crate::types::chain::ChainId; use crate::types::key::*; use crate::types::time::DateTimeUtc; #[cfg(feature = "ferveo-tpke")] @@ -32,7 +33,7 @@ pub enum Error { #[error("Timestamp is empty")] NoTimestampError, #[error("Timestamp is invalid: {0}")] - InvalidTimestamp(prost_types::TimestampOutOfSystemRangeError), + InvalidTimestamp(prost_types::TimestampError), } pub type Result = std::result::Result; @@ -136,16 +137,21 @@ pub struct SigningTx { pub code_hash: [u8; 32], pub data: Option>, pub timestamp: DateTimeUtc, + pub chain_id: ChainId, + pub expiration: Option, } impl SigningTx { pub fn hash(&self) -> [u8; 32] { let timestamp = Some(self.timestamp.into()); + let expiration = self.expiration.map(|e| e.into()); let mut bytes = vec![]; types::Tx { - code: self.code_hash.to_vec(), + code_or_hash: self.code_hash.to_vec(), data: self.data.clone(), timestamp, + chain_id: self.chain_id.as_str().to_owned(), + expiration, } .encode(&mut bytes) .expect("encoding a transaction failed"); @@ -166,6 +172,8 @@ impl SigningTx { code_hash: self.code_hash, data: Some(signed), timestamp: self.timestamp, + chain_id: self.chain_id, + expiration: self.expiration, } } @@ -185,6 +193,8 @@ impl SigningTx { code_hash: self.code_hash, data, timestamp: self.timestamp, + chain_id: self.chain_id.clone(), + expiration: self.expiration, }; let signed_data = tx.hash(); common::SigScheme::verify_signature_raw(pk, &signed_data, sig) @@ -195,9 +205,11 @@ impl SigningTx { pub fn expand(self, code: Vec) -> Option { if hash_tx(&code).0 == self.code_hash { Some(Tx { - code, + code_or_hash: code, data: self.data, timestamp: self.timestamp, + chain_id: self.chain_id, + expiration: self.expiration, }) } else { None @@ -208,9 +220,11 @@ impl SigningTx { impl From for SigningTx { fn from(tx: Tx) -> SigningTx { SigningTx { - code_hash: hash_tx(&tx.code).0, + code_hash: hash_tx(&tx.code_or_hash).0, data: tx.data, timestamp: tx.timestamp, + chain_id: tx.chain_id, + expiration: tx.expiration, } } } @@ -222,9 +236,11 @@ impl From for SigningTx { Clone, Debug, PartialEq, BorshSerialize, BorshDeserialize, BorshSchema, Hash, )] pub struct Tx { - pub code: Vec, + pub code_or_hash: Vec, pub data: Option>, pub timestamp: DateTimeUtc, + pub chain_id: ChainId, + pub expiration: Option, } impl TryFrom<&[u8]> for Tx { @@ -236,10 +252,18 @@ impl TryFrom<&[u8]> for Tx { Some(t) => t.try_into().map_err(Error::InvalidTimestamp)?, None => return Err(Error::NoTimestampError), }; + let chain_id = ChainId(tx.chain_id); + let expiration = match tx.expiration { + Some(e) => Some(e.try_into().map_err(Error::InvalidTimestamp)?), + None => None, + }; + Ok(Tx { - code: tx.code, + code_or_hash: tx.code_or_hash, data: tx.data, timestamp, + chain_id, + expiration, }) } } @@ -247,10 +271,14 @@ impl TryFrom<&[u8]> for Tx { impl From for types::Tx { fn from(tx: Tx) -> Self { let timestamp = Some(tx.timestamp.into()); + let expiration = tx.expiration.map(|e| e.into()); + types::Tx { - code: tx.code, + code_or_hash: tx.code_or_hash, data: tx.data, timestamp, + chain_id: tx.chain_id.as_str().to_owned(), + expiration, } } } @@ -342,11 +370,20 @@ impl From for ResponseDeliverTx { } impl Tx { - pub fn new(code: Vec, data: Option>) -> Self { + /// Create a new transaction. `code_or_hash` should be set as the wasm code + /// bytes or hash. + pub fn new( + code_or_hash: Vec, + data: Option>, + chain_id: ChainId, + expiration: Option, + ) -> Self { Tx { - code, + code_or_hash, data, timestamp: DateTimeUtc::now(), + chain_id, + expiration, } } @@ -362,13 +399,41 @@ impl Tx { SigningTx::from(self.clone()).hash() } + pub fn unsigned_hash(&self) -> [u8; 32] { + match self.data { + Some(ref data) => { + match SignedTxData::try_from_slice(data) { + Ok(signed_data) => { + // Reconstruct unsigned tx + let unsigned_tx = Tx { + code_or_hash: self.code_or_hash.clone(), + data: signed_data.data, + timestamp: self.timestamp, + chain_id: self.chain_id.clone(), + expiration: self.expiration, + }; + unsigned_tx.hash() + } + Err(_) => { + // Unsigned tx + self.hash() + } + } + } + None => { + // Unsigned tx + self.hash() + } + } + } + pub fn code_hash(&self) -> [u8; 32] { SigningTx::from(self.clone()).code_hash } /// Sign a transaction using [`SignedTxData`]. pub fn sign(self, keypair: &common::SecretKey) -> Self { - let code = self.code.clone(); + let code = self.code_or_hash.clone(); SigningTx::from(self) .sign(keypair) .expand(code) @@ -468,7 +533,9 @@ mod tests { fn test_tx() { let code = "wasm code".as_bytes().to_owned(); let data = "arbitrary data".as_bytes().to_owned(); - let tx = Tx::new(code.clone(), Some(data.clone())); + let chain_id = ChainId::default(); + let tx = + Tx::new(code.clone(), Some(data.clone()), chain_id.clone(), None); let bytes = tx.to_bytes(); let tx_from_bytes = @@ -476,9 +543,11 @@ mod tests { assert_eq!(tx_from_bytes, tx); let types_tx = types::Tx { - code, + code_or_hash: code, data: Some(data), timestamp: None, + chain_id: chain_id.0, + expiration: None, }; let mut bytes = vec![]; types_tx.encode(&mut bytes).expect("encoding failed"); diff --git a/core/src/types/address.rs b/core/src/types/address.rs index 3543ef133d0..b062a23f54b 100644 --- a/core/src/types/address.rs +++ b/core/src/types/address.rs @@ -12,6 +12,7 @@ use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use thiserror::Error; +use crate::ibc::signer::Signer; use crate::types::key; use crate::types::key::PublicKeyHash; @@ -44,6 +45,8 @@ pub const POS: Address = Address::Internal(InternalAddress::PoS); /// Internal PoS slash pool address pub const POS_SLASH_POOL: Address = Address::Internal(InternalAddress::PosSlashPool); +/// Internal Governance address +pub const GOV: Address = Address::Internal(InternalAddress::Governance); /// Raw strings used to produce internal addresses. All the strings must begin /// with `PREFIX_INTERNAL` and be `FIXED_LEN_STRING_BYTES` characters long. @@ -69,6 +72,8 @@ mod internal { "ibc::IBC Mint Address "; pub const ETH_BRIDGE: &str = "ano::ETH Bridge Address "; + pub const REPLAY_PROTECTION: &str = + "ano::Replay Protection "; } /// Fixed-length address strings prefix for established addresses. @@ -100,15 +105,7 @@ pub type Result = std::result::Result; /// An account's address #[derive( - Clone, - BorshSerialize, - BorshDeserialize, - BorshSchema, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, + Clone, BorshSerialize, BorshDeserialize, BorshSchema, PartialEq, Eq, Hash, )] pub enum Address { /// An established address is generated on-chain @@ -119,6 +116,21 @@ pub enum Address { Internal(InternalAddress), } +// We're using the string format of addresses (bech32m) for ordering to ensure +// that addresses as strings, storage keys and storage keys as strings preserve +// the order. +impl PartialOrd for Address { + fn partial_cmp(&self, other: &Self) -> Option { + self.encode().partial_cmp(&other.encode()) + } +} + +impl Ord for Address { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.encode().cmp(&other.encode()) + } +} + impl Address { /// Encode an address with Bech32m encoding pub fn encode(&self) -> String { @@ -198,6 +210,9 @@ impl Address { InternalAddress::EthBridge => { internal::ETH_BRIDGE.to_string() } + InternalAddress::ReplayProtection => { + internal::REPLAY_PROTECTION.to_string() + } }; debug_assert_eq!(string.len(), FIXED_LEN_STRING_BYTES); string @@ -251,6 +266,9 @@ impl Address { internal::ETH_BRIDGE => { Ok(Address::Internal(InternalAddress::EthBridge)) } + internal::REPLAY_PROTECTION => { + Ok(Address::Internal(InternalAddress::ReplayProtection)) + } _ => Err(Error::new( ErrorKind::InvalidData, "Invalid internal address", @@ -346,6 +364,15 @@ impl FromStr for Address { } } +/// for IBC signer +impl TryFrom for Address { + type Error = DecodeError; + + fn try_from(signer: Signer) -> Result { + Address::decode(signer.as_ref()) + } +} + /// An established address is generated on-chain #[derive( Debug, @@ -466,6 +493,8 @@ pub enum InternalAddress { SlashFund, /// Bridge to Ethereum EthBridge, + /// Replay protection contains transactions' hash + ReplayProtection, } impl InternalAddress { @@ -500,6 +529,7 @@ impl Display for InternalAddress { Self::IbcBurn => "IbcBurn".to_string(), Self::IbcMint => "IbcMint".to_string(), Self::EthBridge => "EthBridge".to_string(), + Self::ReplayProtection => "ReplayProtection".to_string(), } ) } @@ -556,22 +586,6 @@ pub fn masp_tx_key() -> crate::types::key::common::SecretKey { common::SecretKey::try_from_slice(bytes.as_ref()).unwrap() } -/// Temporary helper for testing, a hash map of tokens addresses with their -/// informal currency codes. -pub fn tokens() -> HashMap { - vec![ - (nam(), "NAM"), - (btc(), "BTC"), - (eth(), "ETH"), - (dot(), "DOT"), - (schnitzel(), "Schnitzel"), - (apfel(), "Apfel"), - (kartoffel(), "Kartoffel"), - ] - .into_iter() - .collect() -} - /// Temporary helper for testing, a hash map of tokens addresses with their /// MASP XAN incentive schedules. If the reward is (a, b) then a rewarded tokens /// are dispensed for every b possessed tokens. @@ -669,6 +683,13 @@ pub fn gen_established_address(seed: impl AsRef) -> Address { key_gen.generate_address(rng_source) } +/// Generate a new established address. Unlike `gen_established_address`, this +/// will give the same address for the same `seed`. +pub fn gen_deterministic_established_address(seed: impl AsRef) -> Address { + let mut key_gen = EstablishedAddressGen::new(seed); + key_gen.generate_address("") +} + /// Helpers for testing with addresses. #[cfg(any(test, feature = "testing"))] pub mod testing { @@ -685,7 +706,7 @@ pub mod testing { /// Derive an established address from a simple seed (`u64`). pub fn address_from_simple_seed(seed: u64) -> Address { - super::gen_established_address(seed.to_string()) + super::gen_deterministic_established_address(seed.to_string()) } /// Generate a new implicit address. @@ -776,8 +797,10 @@ pub mod testing { InternalAddress::IbcEscrow => {} InternalAddress::IbcBurn => {} InternalAddress::IbcMint => {} - InternalAddress::EthBridge => {} /* Add new addresses in the - * `prop_oneof` below. */ + InternalAddress::EthBridge => {} + InternalAddress::ReplayProtection => {} /* Add new addresses in + * the + * `prop_oneof` below. */ }; prop_oneof![ Just(InternalAddress::PoS), @@ -792,6 +815,7 @@ pub mod testing { Just(InternalAddress::Governance), Just(InternalAddress::SlashFund), Just(InternalAddress::EthBridge), + Just(InternalAddress::ReplayProtection) ] } diff --git a/core/src/types/chain.rs b/core/src/types/chain.rs index 7437793cfc0..b14fdbbef2d 100644 --- a/core/src/types/chain.rs +++ b/core/src/types/chain.rs @@ -192,6 +192,7 @@ pub const DEFAULT_CHAIN_ID: &str = "namada-internal.00000000000000"; Deserialize, BorshSerialize, BorshDeserialize, + BorshSchema, PartialOrd, Ord, PartialEq, @@ -199,7 +200,7 @@ pub const DEFAULT_CHAIN_ID: &str = "namada-internal.00000000000000"; Hash, )] #[serde(transparent)] -pub struct ChainId(String); +pub struct ChainId(pub String); impl ChainId { /// Extracts a string slice containing the entire chain ID. diff --git a/core/src/types/governance.rs b/core/src/types/governance.rs index 438017a3709..dc17d07e225 100644 --- a/core/src/types/governance.rs +++ b/core/src/types/governance.rs @@ -1,8 +1,7 @@ //! Files defyining the types used in governance. -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashSet}; use std::fmt::{self, Display}; -use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; use rust_decimal::Decimal; @@ -14,11 +13,34 @@ use crate::types::hash::Hash; use crate::types::key::common::{self, Signature}; use crate::types::key::SigScheme; use crate::types::storage::Epoch; -use crate::types::token::SCALE; +use crate::types::token::{Amount, SCALE}; /// Type alias for vote power pub type VotePower = u128; +/// A PGF cocuncil composed of the address and spending cap +pub type Council = (Address, Amount); + +/// The type of a governance vote with the optional associated Memo +#[derive( + Debug, + Clone, + PartialEq, + BorshSerialize, + BorshDeserialize, + Serialize, + Deserialize, + Eq, +)] +pub enum VoteType { + /// A default vote without Memo + Default, + /// A vote for the PGF council + PGFCouncil(HashSet), + /// A vote for ETH bridge carrying the signature over the proposed message + ETHBridge(Signature), +} + #[derive( Debug, Clone, @@ -32,7 +54,7 @@ pub type VotePower = u128; /// The vote for a proposal pub enum ProposalVote { /// Yes - Yay, + Yay(VoteType), /// No Nay, } @@ -40,17 +62,40 @@ pub enum ProposalVote { impl ProposalVote { /// Check if a vote is yay pub fn is_yay(&self) -> bool { - match self { - ProposalVote::Yay => true, - ProposalVote::Nay => false, - } + matches!(self, ProposalVote::Yay(_)) + } + + /// Check if vote is of type default + pub fn is_default_vote(&self) -> bool { + matches!( + self, + ProposalVote::Yay(VoteType::Default) | ProposalVote::Nay + ) } } impl Display for ProposalVote { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - ProposalVote::Yay => write!(f, "yay"), + ProposalVote::Yay(vote_type) => match vote_type { + VoteType::Default => write!(f, "yay"), + VoteType::PGFCouncil(councils) => { + writeln!(f, "yay with councils:")?; + for (address, spending_cap) in councils { + writeln!( + f, + "Council: {}, spending cap: {}", + address, spending_cap + )? + } + + Ok(()) + } + VoteType::ETHBridge(sig) => { + write!(f, "yay with signature: {:#?}", sig) + } + }, + ProposalVote::Nay => write!(f, "nay"), } } @@ -63,28 +108,22 @@ pub enum ProposalVoteParseError { InvalidVote, } -impl FromStr for ProposalVote { - type Err = ProposalVoteParseError; - - fn from_str(s: &str) -> Result { - if s.eq("yay") { - Ok(ProposalVote::Yay) - } else if s.eq("nay") { - Ok(ProposalVote::Nay) - } else { - Err(ProposalVoteParseError::InvalidVote) - } - } +/// The type of the tally +pub enum Tally { + /// Default proposal + Default, + /// PGF proposal + PGFCouncil(Council), + /// ETH Bridge proposal + ETHBridge, } /// The result of a proposal pub enum TallyResult { - /// Proposal was accepted - Passed, + /// Proposal was accepted with the associated value + Passed(Tally), /// Proposal was rejected Rejected, - /// A critical error in tally computation - Failed, } /// The result with votes of a proposal @@ -121,13 +160,32 @@ impl Display for ProposalResult { impl Display for TallyResult { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - TallyResult::Passed => write!(f, "passed"), + TallyResult::Passed(vote) => match vote { + Tally::Default | Tally::ETHBridge => write!(f, "passed"), + Tally::PGFCouncil((council, cap)) => write!( + f, + "passed with PGF council address: {}, spending cap: {}", + council, cap + ), + }, TallyResult::Rejected => write!(f, "rejected"), - TallyResult::Failed => write!(f, "failed"), } } } +/// The type of a governance proposal +#[derive( + Debug, Clone, BorshSerialize, BorshDeserialize, Serialize, Deserialize, +)] +pub enum ProposalType { + /// A default proposal with the optional path to wasm code + Default(Option), + /// A PGF council proposal + PGFCouncil, + /// An ETH bridge proposal + ETHBridge, +} + #[derive( Debug, Clone, BorshSerialize, BorshDeserialize, Serialize, Deserialize, )] @@ -139,14 +197,14 @@ pub struct Proposal { pub content: BTreeMap, /// The proposal author address pub author: Address, + /// The proposal type + pub r#type: ProposalType, /// The epoch from which voting is allowed pub voting_start_epoch: Epoch, /// The epoch from which voting is stopped pub voting_end_epoch: Epoch, /// The epoch from which this changes are executed pub grace_epoch: Epoch, - /// The code containing the storage changes - pub proposal_code_path: Option, } impl Display for Proposal { diff --git a/core/src/types/hash.rs b/core/src/types/hash.rs index 74bfe3dd454..080826a415a 100644 --- a/core/src/types/hash.rs +++ b/core/src/types/hash.rs @@ -33,6 +33,8 @@ pub type HashResult = std::result::Result; Clone, Debug, Default, + PartialOrd, + Ord, Hash, PartialEq, Eq, @@ -118,7 +120,8 @@ impl Hash { Self(*digest.as_ref()) } - fn zero() -> Self { + /// Return zeros + pub fn zero() -> Self { Self([0u8; HASH_LENGTH]) } diff --git a/core/src/types/ibc.rs b/core/src/types/ibc.rs index 3d537cb0259..5e7514aea3d 100644 --- a/core/src/types/ibc.rs +++ b/core/src/types/ibc.rs @@ -1,5 +1,6 @@ //! IBC event without IBC-related data types +use std::cmp::Ordering; use std::collections::HashMap; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; @@ -15,6 +16,19 @@ pub struct IbcEvent { pub attributes: HashMap, } +impl std::cmp::PartialOrd for IbcEvent { + fn partial_cmp(&self, other: &Self) -> Option { + self.event_type.partial_cmp(&other.event_type) + } +} + +impl std::cmp::Ord for IbcEvent { + fn cmp(&self, other: &Self) -> Ordering { + // should not compare the same event type + self.event_type.cmp(&other.event_type) + } +} + impl std::fmt::Display for IbcEvent { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let attributes = self @@ -39,7 +53,7 @@ mod ibc_rs_conversion { use super::IbcEvent; use crate::ibc::events::{Error as IbcEventError, IbcEvent as RawIbcEvent}; - use crate::tendermint::abci::Event as AbciEvent; + use crate::tendermint_proto::abci::Event as AbciEvent; #[allow(missing_docs)] #[derive(Error, Debug)] diff --git a/core/src/types/internal.rs b/core/src/types/internal.rs index 8c85a4236e2..d13d3923819 100644 --- a/core/src/types/internal.rs +++ b/core/src/types/internal.rs @@ -89,6 +89,12 @@ mod tx_queue { pub fn is_empty(&self) -> bool { self.0.is_empty() } + + /// Get reference to the element at the given index. + /// Returns [`None`] if index exceeds the queue lenght. + pub fn get(&self, index: usize) -> Option<&WrapperTxInQueue> { + self.0.get(index) + } } } diff --git a/core/src/types/storage.rs b/core/src/types/storage.rs index 9655a7e107f..9ceddecf151 100644 --- a/core/src/types/storage.rs +++ b/core/src/types/storage.rs @@ -14,6 +14,7 @@ use index_set::vec::VecIndexSet; use serde::{Deserialize, Serialize}; use thiserror::Error; +use super::key::common; use crate::bytes::ByteBuf; use crate::types::address::{self, Address}; use crate::types::hash::Hash; @@ -53,6 +54,12 @@ pub const RESERVED_ADDRESS_PREFIX: char = '#'; pub const VP_KEY_PREFIX: char = '?'; /// The reserved storage key for validity predicates pub const RESERVED_VP_KEY: &str = "?"; +/// The reserved storage key prefix for wasm codes +pub const WASM_KEY_PREFIX: &str = "wasm"; +/// The reserved storage key prefix for wasm codes +pub const WASM_CODE_PREFIX: &str = "code"; +/// The reserved storage key prefix for wasm code hashes +pub const WASM_HASH_PREFIX: &str = "hash"; /// Transaction index within block. #[derive( @@ -533,6 +540,24 @@ impl Key { Some((KeyRef { segments: prefix }, last)) } + /// Returns a key of the wasm code of the given hash + pub fn wasm_code(code_hash: &Hash) -> Self { + let mut segments = + Self::from(WASM_KEY_PREFIX.to_owned().to_db_key()).segments; + segments.push(DbKeySeg::StringSeg(WASM_CODE_PREFIX.to_owned())); + segments.push(DbKeySeg::StringSeg(code_hash.to_string())); + Key { segments } + } + + /// Returns a key of the wasm code hash of the given code path + pub fn wasm_hash(code_path: impl AsRef) -> Self { + let mut segments = + Self::from(WASM_KEY_PREFIX.to_owned().to_db_key()).segments; + segments.push(DbKeySeg::StringSeg(WASM_HASH_PREFIX.to_owned())); + segments.push(DbKeySeg::StringSeg(code_path.as_ref().to_string())); + Key { segments } + } + /// Returns a key of the validity predicate of the given address /// Only this function can push "?" segment for validity predicate pub fn validity_predicate(addr: &Address) -> Self { @@ -861,6 +886,25 @@ impl KeySeg for Epoch { } } +impl KeySeg for common::PublicKey { + fn parse(string: String) -> Result + where + Self: Sized, + { + let raw = common::PublicKey::from_str(&string) + .map_err(|err| Error::ParseKeySeg(err.to_string()))?; + Ok(raw) + } + + fn raw(&self) -> String { + self.to_string() + } + + fn to_db_key(&self) -> DbKeySeg { + DbKeySeg::StringSeg(self.raw()) + } +} + /// Epoch identifier. Epochs are identified by consecutive numbers. #[derive( Clone, @@ -1106,6 +1150,47 @@ impl Epochs { } None } + + /// Look-up the starting block height of an epoch at or before a given + /// height. + pub fn get_epoch_start_height( + &self, + height: BlockHeight, + ) -> Option { + for start_height in self.first_block_heights.iter().rev() { + if *start_height <= height { + return Some(*start_height); + } + } + None + } + + /// Look-up the starting block height of the given epoch + pub fn get_start_height_of_epoch( + &self, + epoch: Epoch, + ) -> Option { + if epoch < self.first_known_epoch { + return None; + } + + let mut cur_epoch = self.first_known_epoch; + for height in &self.first_block_heights { + if epoch == cur_epoch { + return Some(*height); + } else { + cur_epoch = cur_epoch.next(); + } + } + None + } + + /// Return all starting block heights for each successive Epoch. + /// + /// __INVARIANT:__ The returned values are sorted in ascending order. + pub fn first_block_heights(&self) -> &[BlockHeight] { + &self.first_block_heights + } } /// A value of a storage prefix iterator. @@ -1122,6 +1207,7 @@ mod tests { use proptest::prelude::*; use super::*; + use crate::types::address::testing::arb_address; proptest! { /// Tests that any key that doesn't contain reserved prefixes is valid. @@ -1207,10 +1293,30 @@ mod tests { epochs.new_epoch(BlockHeight(10), max_age_num_blocks); println!("epochs {:#?}", epochs); assert_eq!(epochs.get_epoch(BlockHeight(0)), Some(Epoch(0))); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(0)), + Some(BlockHeight(0)) + ); assert_eq!(epochs.get_epoch(BlockHeight(9)), Some(Epoch(0))); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(9)), + Some(BlockHeight(0)) + ); assert_eq!(epochs.get_epoch(BlockHeight(10)), Some(Epoch(1))); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(10)), + Some(BlockHeight(10)) + ); assert_eq!(epochs.get_epoch(BlockHeight(11)), Some(Epoch(1))); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(11)), + Some(BlockHeight(10)) + ); assert_eq!(epochs.get_epoch(BlockHeight(100)), Some(Epoch(1))); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(100)), + Some(BlockHeight(10)) + ); // epoch 2 epochs.new_epoch(BlockHeight(20), max_age_num_blocks); @@ -1219,8 +1325,20 @@ mod tests { assert_eq!(epochs.get_epoch(BlockHeight(9)), Some(Epoch(0))); assert_eq!(epochs.get_epoch(BlockHeight(10)), Some(Epoch(1))); assert_eq!(epochs.get_epoch(BlockHeight(11)), Some(Epoch(1))); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(11)), + Some(BlockHeight(10)) + ); assert_eq!(epochs.get_epoch(BlockHeight(20)), Some(Epoch(2))); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(20)), + Some(BlockHeight(20)) + ); assert_eq!(epochs.get_epoch(BlockHeight(100)), Some(Epoch(2))); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(100)), + Some(BlockHeight(20)) + ); // epoch 3, epoch 0 and 1 should be trimmed epochs.new_epoch(BlockHeight(200), max_age_num_blocks); @@ -1231,7 +1349,15 @@ mod tests { assert_eq!(epochs.get_epoch(BlockHeight(11)), None); assert_eq!(epochs.get_epoch(BlockHeight(20)), Some(Epoch(2))); assert_eq!(epochs.get_epoch(BlockHeight(100)), Some(Epoch(2))); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(100)), + Some(BlockHeight(20)) + ); assert_eq!(epochs.get_epoch(BlockHeight(200)), Some(Epoch(3))); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(200)), + Some(BlockHeight(200)) + ); // increase the limit max_age_num_blocks = 200; @@ -1279,6 +1405,48 @@ mod tests { assert_eq!(epochs.get_epoch(BlockHeight(550)), Some(Epoch(7))); assert_eq!(epochs.get_epoch(BlockHeight(600)), Some(Epoch(8))); } + + proptest! { + /// Ensure that addresses in storage keys preserve the order of the + /// addresses. + #[test] + fn test_address_in_storage_key_order( + addr1 in arb_address(), + addr2 in arb_address(), + ) { + test_address_in_storage_key_order_aux(addr1, addr2) + } + } + + fn test_address_in_storage_key_order_aux(addr1: Address, addr2: Address) { + println!("addr1 {addr1}"); + println!("addr2 {addr2}"); + let expected_order = addr1.cmp(&addr2); + + // Turn the addresses into strings + let str1 = addr1.to_string(); + let str2 = addr2.to_string(); + println!("addr1 str {str1}"); + println!("addr1 str {str2}"); + let order = str1.cmp(&str2); + assert_eq!(order, expected_order); + + // Turn the addresses into storage keys + let key1 = Key::from(addr1.to_db_key()); + let key2 = Key::from(addr2.to_db_key()); + println!("addr1 key {key1}"); + println!("addr2 key {key2}"); + let order = key1.cmp(&key2); + assert_eq!(order, expected_order); + + // Turn the addresses into raw storage keys (formatted to strings) + let raw1 = addr1.raw(); + let raw2 = addr2.raw(); + println!("addr 1 raw {raw1}"); + println!("addr 2 raw {raw2}"); + let order = raw1.cmp(&raw2); + assert_eq!(order, expected_order); + } } /// Helpers for testing with storage types. @@ -1308,6 +1476,11 @@ pub mod testing { // a key from key segments collection::vec(arb_key_seg(), 2..5) .prop_map(|segments| Key { segments }) + .prop_filter("Key length must be below IBC limit", |key| { + let key_str = key.to_string(); + let bytes = key_str.as_bytes(); + bytes.len() <= IBC_KEY_LIMIT + }) } /// Generate an arbitrary [`Key`] for a given address storage sub-space. diff --git a/core/src/types/time.rs b/core/src/types/time.rs index 72f7510e0be..af596db545e 100644 --- a/core/src/types/time.rs +++ b/core/src/types/time.rs @@ -194,7 +194,7 @@ impl From> for DateTimeUtc { } impl TryFrom for DateTimeUtc { - type Error = prost_types::TimestampOutOfSystemRangeError; + type Error = prost_types::TimestampError; fn try_from( timestamp: prost_types::Timestamp, @@ -216,7 +216,7 @@ impl From for prost_types::Timestamp { impl TryFrom for DateTimeUtc { - type Error = prost_types::TimestampOutOfSystemRangeError; + type Error = prost_types::TimestampError; fn try_from( timestamp: crate::tendermint_proto::google::protobuf::Timestamp, diff --git a/core/src/types/token.rs b/core/src/types/token.rs index 6a7f4eea8ee..3ee3c63bd37 100644 --- a/core/src/types/token.rs +++ b/core/src/types/token.rs @@ -11,6 +11,7 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use self::parameters::key_of_token; +use crate::ibc::applications::transfer::Amount as IbcAmount; use crate::types::address::{masp, Address, DecodeError as AddressError}; use crate::types::storage::{DbKeySeg, Key, KeySeg}; @@ -46,6 +47,11 @@ pub const MAX_AMOUNT: Amount = Amount { micro: u64::MAX }; pub type Change = i128; impl Amount { + /// Returns whether an amount is zero. + pub fn is_zero(&self) -> bool { + self.micro == 0 + } + /// Get the amount as a [`Change`] pub fn change(&self) -> Change { self.micro as Change @@ -99,6 +105,23 @@ impl Amount { micro: change as u64, } } + + /// Convert the amount to [`Decimal`] ignoring its scale (i.e. as an integer + /// in micro units). + pub fn as_dec_unscaled(&self) -> Decimal { + Into::::into(self.micro) + } + + /// Convert from a [`Decimal`] that's not scaled (i.e. an integer + /// in micro units). + /// + /// # Panics + /// + /// Panics if the given decimal is not an integer that fits `u64`. + pub fn from_dec_unscaled(micro: Decimal) -> Self { + let res = micro.to_u64().unwrap(); + Self { micro: res } + } } impl serde::Serialize for Amount { @@ -288,6 +311,18 @@ impl From for Change { } } +impl TryFrom for Amount { + type Error = AmountParseError; + + fn try_from(amount: IbcAmount) -> Result { + // TODO: https://github.com/anoma/namada/issues/1089 + if amount > u64::MAX.into() { + return Err(AmountParseError::InvalidRange); + } + Self::from_str(&amount.to_string()) + } +} + /// Key segment for a balance key pub const BALANCE_STORAGE_KEY: &str = "balance"; /// Key segment for head shielded transaction pointer key @@ -498,35 +533,6 @@ pub enum TransferError { NoToken, } -#[cfg(any(feature = "abciplus", feature = "abcipp"))] -impl TryFrom for Transfer { - type Error = TransferError; - - fn try_from( - data: crate::ledger::ibc::data::FungibleTokenPacketData, - ) -> Result { - let source = - Address::decode(&data.sender).map_err(TransferError::Address)?; - let target = - Address::decode(&data.receiver).map_err(TransferError::Address)?; - let token_str = - data.denom.split('/').last().ok_or(TransferError::NoToken)?; - let token = - Address::decode(token_str).map_err(TransferError::Address)?; - let amount = - Amount::from_str(&data.amount).map_err(TransferError::Amount)?; - Ok(Self { - source, - target, - token, - sub_prefix: None, - amount, - key: None, - shielded: None, - }) - } -} - #[cfg(test)] mod tests { use proptest::prelude::*; @@ -591,6 +597,15 @@ mod tests { assert_eq!(max.checked_add(one), None); assert_eq!(max.checked_add(max), None); } + + #[test] + fn test_amount_is_zero() { + let zero = Amount::from(0); + assert!(zero.is_zero()); + + let non_zero = Amount::from(1); + assert!(!non_zero.is_zero()); + } } /// Helpers for testing with addresses. diff --git a/core/src/types/transaction/decrypted.rs b/core/src/types/transaction/decrypted.rs index 3ac49efc778..34071791682 100644 --- a/core/src/types/transaction/decrypted.rs +++ b/core/src/types/transaction/decrypted.rs @@ -11,7 +11,8 @@ pub mod decrypted_tx { use super::EllipticCurve; use crate::proto::Tx; - use crate::types::transaction::{hash_tx, Hash, TxType, WrapperTx}; + use crate::types::chain::ChainId; + use crate::types::transaction::{Hash, TxType, WrapperTx}; #[derive(Clone, Debug, BorshSerialize, BorshDeserialize, BorshSchema)] #[allow(clippy::large_enum_variant)] @@ -56,14 +57,15 @@ pub mod decrypted_tx { } /// Return the hash used as a commitment to the tx's contents in the - /// wrapper tx that includes this tx as an encrypted payload. + /// wrapper tx that includes this tx as an encrypted payload. The + /// commitment is computed on the unsigned tx if tx is signed pub fn hash_commitment(&self) -> Hash { match self { DecryptedTx::Decrypted { tx, #[cfg(not(feature = "mainnet"))] has_valid_pow: _, - } => hash_tx(&tx.to_bytes()), + } => Hash(tx.unsigned_hash()), DecryptedTx::Undecryptable(wrapper) => wrapper.tx_hash.clone(), } } @@ -91,6 +93,14 @@ pub mod decrypted_tx { .try_to_vec() .expect("Encrypting transaction should not fail"), ), + // If undecrytable we cannot extract the ChainId and + // expiration. If instead the tx gets decrypted + // successfully, the correct chain id and + // expiration are serialized inside the data field + // of the Tx, while the ones available + // in the chain_id and expiration field are just placeholders + ChainId(String::new()), + None, ) } } diff --git a/core/src/types/transaction/governance.rs b/core/src/types/transaction/governance.rs index ba2bd5f9336..3b1f183eaaa 100644 --- a/core/src/types/transaction/governance.rs +++ b/core/src/types/transaction/governance.rs @@ -1,10 +1,80 @@ +use std::fmt::Display; + use borsh::{BorshDeserialize, BorshSerialize}; use serde::{Deserialize, Serialize}; use crate::types::address::Address; -use crate::types::governance::{Proposal, ProposalError, ProposalVote}; +use crate::types::governance::{ + self, Proposal, ProposalError, ProposalVote, VoteType, +}; use crate::types::storage::Epoch; +/// The type of a Proposal +#[derive( + Debug, + Clone, + PartialEq, + BorshSerialize, + BorshDeserialize, + Serialize, + Deserialize, +)] +pub enum ProposalType { + /// Default governance proposal with the optional wasm code + Default(Option>), + /// PGF council proposal + PGFCouncil, + /// ETH proposal + ETHBridge, +} + +impl Display for ProposalType { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + ProposalType::Default(_) => write!(f, "Default"), + ProposalType::PGFCouncil => write!(f, "PGF Council"), + ProposalType::ETHBridge => write!(f, "ETH Bridge"), + } + } +} + +impl PartialEq for ProposalType { + fn eq(&self, other: &VoteType) -> bool { + match self { + Self::Default(_) => { + matches!(other, VoteType::Default) + } + Self::PGFCouncil => { + matches!(other, VoteType::PGFCouncil(..)) + } + Self::ETHBridge => { + matches!(other, VoteType::ETHBridge(_)) + } + } + } +} + +impl TryFrom for ProposalType { + type Error = ProposalError; + + fn try_from(value: governance::ProposalType) -> Result { + match value { + governance::ProposalType::Default(path) => { + if let Some(p) = path { + match std::fs::read(p) { + Ok(code) => Ok(Self::Default(Some(code))), + Err(_) => Err(Self::Error::InvalidProposalData), + } + } else { + Ok(Self::Default(None)) + } + } + governance::ProposalType::PGFCouncil => Ok(Self::PGFCouncil), + governance::ProposalType::ETHBridge => Ok(Self::ETHBridge), + } + } +} + /// A tx data type to hold proposal data #[derive( Debug, @@ -22,14 +92,14 @@ pub struct InitProposalData { pub content: Vec, /// The proposal author address pub author: Address, + /// The proposal type + pub r#type: ProposalType, /// The epoch from which voting is allowed pub voting_start_epoch: Epoch, /// The epoch from which voting is stopped pub voting_end_epoch: Epoch, /// The epoch from which this changes are executed pub grace_epoch: Epoch, - /// The code containing the storage changes - pub proposal_code: Option>, } /// A tx data type to hold vote proposal data @@ -57,23 +127,14 @@ impl TryFrom for InitProposalData { type Error = ProposalError; fn try_from(proposal: Proposal) -> Result { - let proposal_code = if let Some(path) = proposal.proposal_code_path { - match std::fs::read(path) { - Ok(bytes) => Some(bytes), - Err(_) => return Err(Self::Error::InvalidProposalData), - } - } else { - None - }; - Ok(InitProposalData { id: proposal.id, content: proposal.content.try_to_vec().unwrap(), author: proposal.author, + r#type: proposal.r#type.try_into()?, voting_start_epoch: proposal.voting_start_epoch, voting_end_epoch: proposal.voting_end_epoch, grace_epoch: proposal.grace_epoch, - proposal_code, }) } } diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index 0e0a5e980e5..e643a526628 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -51,8 +51,8 @@ pub struct TxResult { pub vps_result: VpsResult, /// New established addresses created by the transaction pub initialized_accounts: Vec
, - /// Optional IBC event emitted by the transaction - pub ibc_event: Option, + /// IBC events emitted by the transaction + pub ibc_events: BTreeSet, } impl TxResult { @@ -143,8 +143,8 @@ fn iterable_to_string( pub struct UpdateVp { /// An address of the account pub addr: Address, - /// The new VP code - pub vp_code: Vec, + /// The new VP code hash + pub vp_code_hash: Hash, } /// A tx data type to initialize a new established account @@ -163,8 +163,8 @@ pub struct InitAccount { /// for signature verification of transactions for the newly created /// account. pub public_key: common::PublicKey, - /// The VP code - pub vp_code: Vec, + /// The VP code hash + pub vp_code_hash: Hash, } /// A tx data type to initialize a new validator account. @@ -195,7 +195,7 @@ pub struct InitValidator { /// immutable once set here. pub max_commission_rate_change: Decimal, /// The VP code for validator account - pub validator_vp_code: Vec, + pub validator_vp_code_hash: Hash, } /// Module that includes helper functions for classifying @@ -210,6 +210,7 @@ pub mod tx_types { use super::*; use crate::proto::{SignedTxData, Tx}; + use crate::types::chain::ChainId; use crate::types::transaction::protocol::ProtocolTx; /// Errors relating to decrypting a wrapper tx and its @@ -241,7 +242,15 @@ pub mod tx_types { impl From for Tx { fn from(ty: TxType) -> Self { - Tx::new(vec![], Some(ty.try_to_vec().unwrap())) + Tx::new( + vec![], + Some(ty.try_to_vec().unwrap()), + ChainId(String::new()), /* No need to provide a valid + * ChainId or expiration when + * casting back from + * TxType */ + None, + ) } } @@ -293,15 +302,19 @@ pub mod tx_types { .map(|data| SignedTxData::try_from_slice(&data[..])) { let signed_hash = Tx { - code: tx.code, + code_or_hash: tx.code_or_hash, data: Some(data.clone()), timestamp: tx.timestamp, + chain_id: tx.chain_id.clone(), + expiration: tx.expiration, } .hash(); match TxType::try_from(Tx { - code: vec![], + code_or_hash: vec![], data: Some(data), timestamp: tx.timestamp, + chain_id: tx.chain_id, + expiration: tx.expiration, }) .map_err(|err| TxError::Deserialization(err.to_string()))? { @@ -342,6 +355,7 @@ pub mod tx_types { use super::*; use crate::types::address::nam; use crate::types::storage::Epoch; + use crate::types::time::DateTimeUtc; fn gen_keypair() -> common::SecretKey { use rand::prelude::ThreadRng; @@ -355,7 +369,12 @@ pub mod tx_types { /// data and returns an identical copy #[test] fn test_process_tx_raw_tx_no_data() { - let tx = Tx::new("wasm code".as_bytes().to_owned(), None); + let tx = Tx::new( + "wasm code".as_bytes().to_owned(), + None, + ChainId::default(), + None, + ); match process_tx(tx.clone()).expect("Test failed") { TxType::Raw(raw) => assert_eq!(tx, raw), @@ -371,6 +390,8 @@ pub mod tx_types { let inner = Tx::new( "code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + ChainId::default(), + None, ); let tx = Tx::new( "wasm code".as_bytes().to_owned(), @@ -379,6 +400,8 @@ pub mod tx_types { .try_to_vec() .expect("Test failed"), ), + inner.chain_id.clone(), + None, ); match process_tx(tx).expect("Test failed") { @@ -394,6 +417,8 @@ pub mod tx_types { let inner = Tx::new( "code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + ChainId::default(), + None, ); let tx = Tx::new( "wasm code".as_bytes().to_owned(), @@ -402,6 +427,8 @@ pub mod tx_types { .try_to_vec() .expect("Test failed"), ), + inner.chain_id.clone(), + None, ) .sign(&gen_keypair()); @@ -419,6 +446,8 @@ pub mod tx_types { let tx = Tx::new( "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + ChainId::default(), + None, ); // the signed tx let wrapper = WrapperTx::new( @@ -434,7 +463,7 @@ pub mod tx_types { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair) + .sign(&keypair, tx.chain_id.clone(), Some(DateTimeUtc::now())) .expect("Test failed"); match process_tx(wrapper).expect("Test failed") { @@ -456,6 +485,8 @@ pub mod tx_types { let tx = Tx::new( "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + ChainId::default(), + None, ); // the signed tx let wrapper = WrapperTx::new( @@ -477,6 +508,8 @@ pub mod tx_types { Some( TxType::Wrapper(wrapper).try_to_vec().expect("Test failed"), ), + ChainId::default(), + None, ); let result = process_tx(tx).expect_err("Test failed"); assert_matches!(result, TxError::Unsigned(_)); @@ -490,6 +523,8 @@ pub mod tx_types { let payload = Tx::new( "transaction data".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + ChainId::default(), + None, ); let decrypted = DecryptedTx::Decrypted { tx: payload.clone(), @@ -517,6 +552,8 @@ pub mod tx_types { let payload = Tx::new( "transaction data".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + ChainId::default(), + None, ); let decrypted = DecryptedTx::Decrypted { tx: payload.clone(), @@ -535,8 +572,12 @@ pub mod tx_types { sig: common::Signature::try_from_sig(&ed_sig).unwrap(), }; // create the tx with signed decrypted data - let tx = - Tx::new(vec![], Some(signed.try_to_vec().expect("Test failed"))); + let tx = Tx::new( + vec![], + Some(signed.try_to_vec().expect("Test failed")), + ChainId::default(), + None, + ); match process_tx(tx).expect("Test failed") { TxType::Decrypted(DecryptedTx::Decrypted { tx: processed, diff --git a/core/src/types/transaction/protocol.rs b/core/src/types/transaction/protocol.rs index becc17941f0..a0aee560eda 100644 --- a/core/src/types/transaction/protocol.rs +++ b/core/src/types/transaction/protocol.rs @@ -33,6 +33,7 @@ mod protocol_txs { use super::*; use crate::proto::Tx; + use crate::types::chain::ChainId; use crate::types::key::*; use crate::types::transaction::{EllipticCurve, TxError, TxType}; @@ -87,6 +88,7 @@ mod protocol_txs { self, pk: &common::PublicKey, signing_key: &common::SecretKey, + chain_id: ChainId, ) -> Tx { Tx::new( vec![], @@ -98,6 +100,8 @@ mod protocol_txs { .try_to_vec() .expect("Could not serialize ProtocolTx"), ), + chain_id, + None, ) .sign(signing_key) } @@ -108,6 +112,7 @@ mod protocol_txs { signing_key: &common::SecretKey, wasm_dir: &'a Path, wasm_loader: F, + chain_id: ChainId, ) -> Self where F: FnOnce(&'a str, &'static str) -> Vec, @@ -125,6 +130,8 @@ mod protocol_txs { data.try_to_vec() .expect("Serializing request should not fail"), ), + chain_id, + None, ) .sign(signing_key), ) diff --git a/core/src/types/transaction/wrapper.rs b/core/src/types/transaction/wrapper.rs index 70ef2827bc7..5de138bacde 100644 --- a/core/src/types/transaction/wrapper.rs +++ b/core/src/types/transaction/wrapper.rs @@ -13,13 +13,13 @@ pub mod wrapper_tx { use crate::proto::Tx; use crate::types::address::Address; + use crate::types::chain::ChainId; use crate::types::key::*; use crate::types::storage::Epoch; + use crate::types::time::DateTimeUtc; use crate::types::token::Amount; use crate::types::transaction::encrypted::EncryptedTx; - use crate::types::transaction::{ - hash_tx, EncryptionKey, Hash, TxError, TxType, - }; + use crate::types::transaction::{EncryptionKey, Hash, TxError, TxType}; /// Minimum fee amount in micro NAMs pub const MIN_FEE: u64 = 100; @@ -206,7 +206,7 @@ pub mod wrapper_tx { epoch, gas_limit, inner_tx, - tx_hash: hash_tx(&tx.to_bytes()), + tx_hash: Hash(tx.unsigned_hash()), #[cfg(not(feature = "mainnet"))] pow_solution, } @@ -227,7 +227,7 @@ pub mod wrapper_tx { /// Decrypt the wrapped transaction. /// - /// Will fail if the inner transaction does match the + /// Will fail if the inner transaction doesn't match the /// hash commitment or we are unable to recover a /// valid Tx from the decoded byte stream. pub fn decrypt( @@ -236,20 +236,23 @@ pub mod wrapper_tx { ) -> Result { // decrypt the inner tx let decrypted = self.inner_tx.decrypt(privkey); + let decrypted_tx = Tx::try_from(decrypted.as_ref()) + .map_err(|_| WrapperTxErr::InvalidTx)?; + // check that the hash equals commitment - if hash_tx(&decrypted) != self.tx_hash { - Err(WrapperTxErr::DecryptedHash) - } else { - // convert back to Tx type - Tx::try_from(decrypted.as_ref()) - .map_err(|_| WrapperTxErr::InvalidTx) + if decrypted_tx.unsigned_hash() != self.tx_hash.0 { + return Err(WrapperTxErr::DecryptedHash); } + + Ok(decrypted_tx) } /// Sign the wrapper transaction and convert to a normal Tx type pub fn sign( &self, keypair: &common::SecretKey, + chain_id: ChainId, + expiration: Option, ) -> Result { if self.pk != keypair.ref_to() { return Err(WrapperTxErr::InvalidKeyPair); @@ -261,6 +264,8 @@ pub mod wrapper_tx { .try_to_vec() .expect("Could not serialize WrapperTx"), ), + chain_id, + expiration, ) .sign(keypair)) } @@ -365,6 +370,8 @@ pub mod wrapper_tx { let tx = Tx::new( "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + ChainId::default(), + Some(DateTimeUtc::now()), ); let wrapper = WrapperTx::new( @@ -393,6 +400,8 @@ pub mod wrapper_tx { let tx = Tx::new( "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + ChainId::default(), + Some(DateTimeUtc::now()), ); let mut wrapper = WrapperTx::new( @@ -416,7 +425,7 @@ pub mod wrapper_tx { assert_matches!(err, WrapperTxErr::DecryptedHash); } - /// We check that even if the encrypted payload and has of its + /// We check that even if the encrypted payload and hash of its /// contents are correctly changed, we detect fraudulent activity /// via the signature. #[test] @@ -427,6 +436,8 @@ pub mod wrapper_tx { let tx = Tx::new( "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + ChainId::default(), + Some(DateTimeUtc::now()), ); // the signed tx let mut tx = WrapperTx::new( @@ -442,7 +453,7 @@ pub mod wrapper_tx { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair) + .sign(&keypair, ChainId::default(), None) .expect("Test failed"); // we now try to alter the inner tx maliciously @@ -460,8 +471,12 @@ pub mod wrapper_tx { .expect("Test failed"); // malicious transaction - let malicious = - Tx::new("Give me all the money".as_bytes().to_owned(), None); + let malicious = Tx::new( + "Give me all the money".as_bytes().to_owned(), + None, + ChainId::default(), + None, + ); // We replace the inner tx with a malicious one wrapper.inner_tx = EncryptedTx::encrypt( @@ -470,7 +485,7 @@ pub mod wrapper_tx { ); // We change the commitment appropriately - wrapper.tx_hash = hash_tx(&malicious.to_bytes()); + wrapper.tx_hash = Hash(malicious.unsigned_hash()); // we check ciphertext validity still passes assert!(wrapper.validate_ciphertext()); diff --git a/core/src/types/validity_predicate.rs b/core/src/types/validity_predicate.rs index 618dc53db34..a9bfb81168a 100644 --- a/core/src/types/validity_predicate.rs +++ b/core/src/types/validity_predicate.rs @@ -3,6 +3,8 @@ use borsh::{BorshDeserialize, BorshSerialize}; use serde::{Deserialize, Serialize}; +use crate::types::hash::Hash; + /// A validity predicate with an input that is intended to be invoked via `eval` /// host function. #[derive( @@ -15,8 +17,8 @@ use serde::{Deserialize, Serialize}; Deserialize, )] pub struct EvalVp { - /// The VP code to `eval` - pub vp_code: Vec, + /// The VP code hash to `eval` + pub vp_code_hash: Hash, /// The input for the `eval`ed VP pub input: Vec, } diff --git a/docker/namada-wasm/Dockerfile b/docker/namada-wasm/Dockerfile index 2b4a55a46cc..fc55044e7a4 100644 --- a/docker/namada-wasm/Dockerfile +++ b/docker/namada-wasm/Dockerfile @@ -9,8 +9,12 @@ WORKDIR /__w/namada/namada RUN rustup toolchain install 1.65.0 --profile minimal RUN rustup target add wasm32-unknown-unknown +RUN apt-get update && apt-get install -y \ + protobuf-compiler \ + && apt-get clean + # Download binaryen and extract wasm-opt ADD https://github.com/WebAssembly/binaryen/releases/download/version_110/binaryen-version_110-x86_64-linux.tar.gz /tmp/binaryen.tar.gz RUN tar -xf /tmp/binaryen.tar.gz RUN mv binaryen-version_*/bin/wasm-opt /usr/local/bin -RUN rm -rf binaryen-version_*/ /tmp/binaryen.tar.gz \ No newline at end of file +RUN rm -rf binaryen-version_*/ /tmp/binaryen.tar.gz diff --git a/docker/namada/Dockerfile b/docker/namada/Dockerfile index 703fa0f949c..bc1df48e937 100644 --- a/docker/namada/Dockerfile +++ b/docker/namada/Dockerfile @@ -14,6 +14,7 @@ RUN apt-get update && apt-get install -y \ git \ libssl-dev \ pkg-config \ + protobuf-compiler \ && apt-get clean COPY --from=planner /app/recipe.json recipe.json @@ -48,4 +49,4 @@ EXPOSE 26659 EXPOSE 26657 ENTRYPOINT ["/usr/local/bin/namada"] -CMD ["--help"] \ No newline at end of file +CMD ["--help"] diff --git a/documentation/dev/.gitignore b/documentation/dev/.gitignore index 3006b271da6..6abfa10e30a 100644 --- a/documentation/dev/.gitignore +++ b/documentation/dev/.gitignore @@ -1 +1,6 @@ book/ + +# pagetoc generated (from https://github.com/slowsage/mdbook-pagetoc#configuration) +theme/index.hbs +theme/pagetoc.css +theme/pagetoc.js \ No newline at end of file diff --git a/documentation/dev/Makefile b/documentation/dev/Makefile index 804a2f00d8f..75fc5b40b57 100644 --- a/documentation/dev/Makefile +++ b/documentation/dev/Makefile @@ -12,5 +12,6 @@ dev-deps: $(cargo) install mdbook-linkcheck $(cargo) install mdbook-open-on-gh $(cargo) install mdbook-admonish + $(cargo) install mdbook-pagetoc .PHONY: build serve diff --git a/documentation/dev/assets/custom.css b/documentation/dev/assets/custom.css index cf7a00c8701..20386001e19 100644 --- a/documentation/dev/assets/custom.css +++ b/documentation/dev/assets/custom.css @@ -7,4 +7,10 @@ footer { text-align: center; border-top: 1px solid black; padding: 10px 0; +} + +/* Hide page table-of-contents when there's only one heading + https://github.com/slowsage/mdbook-pagetoc#configuration */ +a[class^='pagetoc-H']:only-child { + display: none; } \ No newline at end of file diff --git a/documentation/dev/book.toml b/documentation/dev/book.toml index f9ac4cebb28..2b3ee5ef03b 100644 --- a/documentation/dev/book.toml +++ b/documentation/dev/book.toml @@ -10,8 +10,8 @@ title = "Anoma - DOCS" [output.html] edit-url-template = "https://github.com/anoma/namada/edit/main/documentation/dev/{path}" git-repository-url = "https://github.com/anoma/namada" -additional-css = ["assets/custom.css", "assets/mdbook-admonish.css"] -additional-js = ["assets/mermaid.min.js", "assets/mermaid-init.js"] +additional-css = ["assets/custom.css", "assets/mdbook-admonish.css", "theme/pagetoc.css"] +additional-js = ["assets/mermaid.min.js", "assets/mermaid-init.js", "theme/pagetoc.js"] mathjax-support = true git-branch = "main" @@ -31,3 +31,5 @@ renderer = ["html"] [preprocessor.admonish] command = "mdbook-admonish" assets_version = "2.0.0" # do not edit: managed by `mdbook-admonish install` + +[preprocessor.pagetoc] \ No newline at end of file diff --git a/documentation/dev/src/README.md b/documentation/dev/src/README.md index 5eb79763402..aa66afe4843 100644 --- a/documentation/dev/src/README.md +++ b/documentation/dev/src/README.md @@ -6,7 +6,7 @@ Welcome to Namada's docs! Namada is a sovereign, proof-of-stake blockchain protocol that enables private, asset-agnostic cash and private bartering among any number of parties. To learn more about the protocol, we recommend the following resources: -- [Introduction to Namada Medium article](https://medium.com/namadanetwork/introducing-namada-a-blockchain-for-private-asset-agnostic-bartering-dcc47ac42d9f) +- [Introducing Namada: Interchain Asset-agnostic Privacy](https://blog.namada.net/introducing-namada-interchain-asset-agnostic-privacy/) - [Namada's Whitepaper](https://namada.network/papers/whitepaper.pdf) - [Namada's Vision paper](https://namada.network/papers/vision-paper.pdf) diff --git a/documentation/dev/src/SUMMARY.md b/documentation/dev/src/SUMMARY.md index 0eced4ff2a8..18e7281e052 100644 --- a/documentation/dev/src/SUMMARY.md +++ b/documentation/dev/src/SUMMARY.md @@ -22,6 +22,9 @@ - [Actors](./explore/design/actors.md) - [Testnet setup](./explore/design/testnet-setup.md) - [Testnet launch procedure](./explore/design/testnet-launch-procedure/README.md) + - [Dev](./explore/dev/README.md) + - [Development considerations](./explore/dev/development-considerations.md) + - [Storage API](./explore/dev/storage_api.md) - [Libraries & Tools](./explore/libraries/README.md) - [Cryptography]() - [network](./explore/libraries/network.md) diff --git a/documentation/dev/src/explore/design/ledger/accounts.md b/documentation/dev/src/explore/design/ledger/accounts.md index 0b2b5284259..cb8846a856d 100644 --- a/documentation/dev/src/explore/design/ledger/accounts.md +++ b/documentation/dev/src/explore/design/ledger/accounts.md @@ -14,7 +14,7 @@ There's only a single account type. Each account is associated with: Similar to [Zcash Sapling protocol payment addresses and keys (section 3.1)](https://raw.githubusercontent.com/zcash/zips/master/protocol/protocol.pdf), users can generate spending keys for private payments. A shielded payment address, incoming viewing key and full viewing key are derived from a spending key. In a private payment, a shielded payment address is hashed with a diversifier into a diversified transmission key. When a different diversifier function is chosen for different transactions, it prevents the transmission key from being matched across the transactions. -The encoding of the shielded addresses, spending and viewing keys is not yet decided, but for consistency we'll probably use a the same schema with different prefixes for anything that can use an identifier. +The encoding of the shielded addresses, spending and viewing keys is not yet decided, but for consistency we'll probably use the same schema with different prefixes for anything that can use an identifier. - TODO consider using a schema similar to the [unified addresses proposed in Zcash](https://github.com/zcash/zips/issues/482), that are designed to unify the payment addresses across different versions by encoding a typecode and the length of the payment address together with it. This may be especially useful for the protocol upgrade system and fractal scaling system. @@ -25,7 +25,7 @@ state may be comprised of keys of the built-in supported types and values of arb The dynamic storage sub-space could be a unix filesystem-like tree under the account's address key-space with `read, write, delete, has_key, iter_prefix` -(and maybe a few other convenience functions for hash-maps, hash-sets, optional values, etc.) functions parameterized with the the account's address. +(and maybe a few other convenience functions for hash-maps, hash-sets, optional values, etc.) functions parameterized with the account's address. In addition, the storage sub-space would provide: diff --git a/documentation/dev/src/explore/dev/README.md b/documentation/dev/src/explore/dev/README.md new file mode 100644 index 00000000000..be3a80a9398 --- /dev/null +++ b/documentation/dev/src/explore/dev/README.md @@ -0,0 +1,3 @@ +# Dev + +This section contains developer knowledge share about implementation details, considerations and recommendations. diff --git a/documentation/dev/src/explore/dev/development-considerations.md b/documentation/dev/src/explore/dev/development-considerations.md new file mode 100644 index 00000000000..42457d03b19 --- /dev/null +++ b/documentation/dev/src/explore/dev/development-considerations.md @@ -0,0 +1,41 @@ +# Development considerations + +Given our settings, the number one consideration for development is correctness. To that end, striving to write clean code with small, reusable and well-tested units is very helpful. Less code means less opportunities for defects. First and foremost, optimize code for readability. Common approaches to managing complexity like separation of concerns and separation of effectful code from pure code is always a good idea as it makes it easier to test. On a hot path, it's good to avoid allocations when possible. + +For safety critical parts it is good to add redundancy in safety checks, especially if those checks that can prevent accidental loss of assets. As an example, the node should try to prevent validators from double signing that could lead to weakening of security of the PoS system and punitive loss of tokens in slashing for the validator. The term "belt and braces" is appropriate for such measures. + +## Error handling + +A very related concern to correctness is error handling. Whenever possible, it is best to rule out errors using the type system, i.e. make invalid states impossible to represent using the type system. However, there are many places where that is not practical or possible (for example, when we consume some values from Tendermint, in complex logic or in IO operations like reading and writing from/to storage). How errors should be handled depends on the context. + +When you're not sure which context some piece of code falls into or if you want to make it re-usable in different settings, the default should be "defensive coding" approach, with any possible issues captured in `Result`'s errors and propagated up to the caller. The caller can then decide how to handle errors. + +### Native code that doesn't depend on interactions + +In ledger's shell and the protocol code that's compiled to native binary, in logic that is not dependent on user interactions like transactions and queries, for an error in functionality that is critical to the overall operation of the ledger (systems without which the ledger cannot continue to operate) it is preferable to fail early. *Panics* are preferable when a error from which there is no reasonable way to recover occurs in this context. Emphasis on panics as it's perhaps somewhat counter-intuitive. It makes for easy diagnostics and prevents the error from propagating into a deeper issue that might even go unnoticed. To counter point possible issues, this code must be tested incredibly well to ensure that the panics cannot actually occur during regular operation and to maintain ledger's stability. Property based testing is a good fit, but it is essential that the inputs generated to these tests cover as much of the real-world scenarios as possible. + +### Interaction-sensitive code + +A place where "defensive coding" comes into play is logic that depends on user input (typically transactions and queries handling in the native ledger code and native VPs, the P2P layer is abstracted away by Tendermint). We must ensure that a malicious input cannot trigger unexpected behavior or cause a panic. In practical terms, this means avoid making assumptions about the user input and handle any possible issues (like data decoding issues, fallible conversions and interference overflows) gracefully. Fuzz testing can help in finding these issues. + +### Sandboxed code + +In the WASM transactions and validity predicates, we have a safety net of a sandboxed environment and so it is totally fine to *panic* on unexpected inputs. It however doesn't provide very good experience as any panic that occurs in the WASM is turned into a generic WASM runtime error message. That takes us to the next point. + +### The client + +In the context of the client, we should do as much checking as possible before submitting a transaction (e.g. before a transfer, we check the balance of the source is sufficient to execute the transaction) to the ledger to prevent possible issues early, before any gas is spent, and provide a nice experience with user-friendly messages, where we can explain what went wrong. + +## Practical guidelines + +In practical terms this means: + +- Avoid using `unwrap`, `expect` and `panic!`/`unreachable!`. Instead, turn error conditions into `Error` types. Using `unwrap_or_default` can often be a sensible choice, but it should be well reasoned about - for example when reading token balance, the default value `0` is fine in most setting. +- Avoid the default arithmetic operators, use checked versions instead (e.g. `checked_add` or `checked_div`). +- Avoid using `as` for conversions, use `TryFrom`/`TryInto` instead. +- Avoid `unsafe` code - this is typically only needed at FFI boundaries (e.g. WASM) and should be well tested and abstracted away from a public API. +- Avoid indexing operators and slicing without bounds checks (e.g. `[0]` or `[0..2]`), prefer to use calls that cannot panic or guard them with bounds checks. +- Don't use `assert!` in non-test code, but use `debug_assert!` generously. +- Type system doesn't make up for lack of tests. Specified behavior should always be covered by tests to avoid regressions. +- If some code is hard to test, take it as a hint that it could use refactoring. If it's hard to test, it's most likely easy for it to break in unexpected ways. +- If something breaks past the development stage (i.e. in devnets or testnets), it's hint for a lack of testing. You should write a test that reproduces the issue before fixing it. diff --git a/documentation/dev/src/explore/dev/storage_api.md b/documentation/dev/src/explore/dev/storage_api.md new file mode 100644 index 00000000000..ba0e34e0228 --- /dev/null +++ b/documentation/dev/src/explore/dev/storage_api.md @@ -0,0 +1,96 @@ +# Storage API + +To facilitate code reuse, the core's crate `storage_api` module was designed to unify interface for reading from and writing to the storage from: + +1. Protocol (typically `InitChain` and `FinalizeBlock` handlers; and read-only `Query` handler) +2. Transactions +3. Validity predicates (read-only - there are actually two instances of `StorageRead` in VPs, more on this below) + +This module comes with two main traits, `StorageRead` and `StorageWrite` together with `storage_api::Result` and `storage_api::Error` types that you can use to implement your custom logic. + +~~~admonish example title="Token balance example" +Token balance read and write may look something like this (the [real thing is here](https://github.com/anoma/namada/blob/main/core/src/ledger/storage_api/token.rs)): +```rust +fn read_balance( + s: &S, + token: &Address, + owner: &Address + ) -> storage_api::Result + where S: StorageRead; + +fn write_balance( + s: &mut S, + token: &Address, + owner: &Address, + balance: token::Amount + ) -> storage_api::Result<()> + where S: StorageRead + StorageWrite; +``` +~~~ + +```admonish info title="Data encoding" +Note that the `StorageRead::read` and `StorageWrite::write` methods use Borsh encoding. If you want custom encoding, use `read_bytes` and `write_bytes`. +``` + +## Error handling + +All the methods in the `StorageRead` and `StorageWrite` return `storage_api::Result` so you can simply use the try operator `?` in your implementation to handle any potential errors. + +A custom `storage_api::Error` can be constructed from a static str with `new_const`, or from another Error type with `new`. Furthermore, you can wrap your custom `Result` with `into_storage_result` using the `trait ResultExt`. + +```admonish warning +In library code written over `storage_api`, it is critical to propagate errors correctly (no `unwrap/expect`) to be able to re-use these in native environment. +``` + +In native VPs the `storage_api` methods may return an error when we run out of gas in the current execution and a panic would crash the node. This is a good motivation to document error conditions of your functions. Furthermore, adding new error conditions to existing functions should be considered a breaking change and reviewed carefully! + +In protocol code, the traits' methods will never fail under normal operation and so if you're absolutely sure that there are no other error conditions, you're safe to call `expect` on these. + +We don't yet have a good story for error matching and on related note, we should consider using `std::io::Error` in place of `storage_api::Error`. () + +## Transactions + +For transactions specific functionality, you can use `trait TxEnv` that inherits both the `StorageRead` and `StorageWrite`. + +## Validity predicates + +Similarly, for VP specific functionality, there's `trait VpEnv`, which is implemented for both the native and WASM VPs. + +To access `StorageRead` from a VP, you can pick between `pre` and `post` view functions to read the state prior and posterior to the transaction execution, respectively. + +```admonish warning +If you expect that the value you're reading must not change, prefer to use the `pre` view function so that the validation may not be affected by any storage change applied in the transaction. +``` + +## Testing + +To test code written over `storage_api` traits, look for `TestWlStorage`, which you can instantiate with `default()` and you're good to go. + +For transactions and VPs, there are `TestTxEnv` and `TestVpEnv` in the `tests` crate together with respective `Ctx` types that implement the `storage_api` traits. You can find examples of how these are used across the codebase. + +## Lazy collections + +For dynamically sized collections, there is `LazyVec`, `LazyMap` and `LazySet` with APIs similar to that of standard in-memory collections. The data for these can be read on demand and they don't need to be fully read to write into or delete from them, which is also useful for validation. + +~~~admonish example title="LazyMap usage example" +To use lazy collections, call `open` on them with some storage key prefix, typically starting with the address that will store the data. This will give you a "handle" that you can use to access and manipulate the data. In a `LazyMap` keys and in `LazySet` value are turned into storage key segments via `impl KeySeg`: + +```rust +let mut storage = TestWlStorage::default(); +let address = todo!(); + +// Storage prefix "/#{address}/map" +let prefix = Key::from(address.to_db_key()) + .push(&"map".to_owned()) + .expect("Cannot obtain a storage key"); + +let handle = LazyMap::::open(prefix); + +// Storage key "/#{address}/map/data/0000000" will point to value "zero" +handle.insert(&mut storage, 0_u32, "zero".to_owned()); +assert_eq!(handle.get(&storage, &0)?.unwrap(), Some("zero".to_owned())); + +handle.remove(&mut storage, &0); +assert_eq!(handle.get(&storage, &0)?.unwrap().is_none()); +``` +~~~ diff --git a/documentation/dev/src/specs/ledger.md b/documentation/dev/src/specs/ledger.md index 20169171c0b..6767a4d5ae0 100644 --- a/documentation/dev/src/specs/ledger.md +++ b/documentation/dev/src/specs/ledger.md @@ -8,7 +8,7 @@ The ledger is backed by an account-based system. Each account has a unique [addr ### Addresses -There are two main types of address: transparent and shielded. +There are two main types of addresses: transparent and shielded. The transparent addresses are the addresses of accounts associated with dynamic storage sub-spaces, where the address of the account is the prefix key segment of its sub-space. @@ -39,7 +39,7 @@ The SHA-256 hash of this data [encoded with Borsh](encoding.html#borsh-binary-en The fields of a `WrapperTx` are: -- `fee`: Fee to be payed by the source implicit account for including the tx in a block. +- `fee`: Fee to be paid by the source implicit account for including the tx in a block. - `pk`: [Public key](crypto.md#public-keys) of the source implicit account. - `epoch`: The [epoch](#epochs) in which the transaction is being included. This should be queried from a synchronized ledger node before the transaction is fully constructed. @@ -52,7 +52,7 @@ The fields of a `WrapperTx` are: Please refer to the [signing of the default transactions](ledger/default-transactions.md#signing-transactions) to learn how to construct inner transaction's signatures which will be accepted by the [default validity predicates](ledger/default-validity-predicates.md). - Note that currently the key doesn't change and so it stay constant for the duration of a chain and `::G1Affine::prime_subgroup_generator()` may be used to encrypt the inner transaction for now as done by the the [`WrapperTx::new` method](https://dev.namada.net/master/rustdoc/namada/types/transaction/wrapper/wrapper_tx/struct.WrapperTx.html#method.new) (depends on ). + Note that currently the key doesn't change and so it stays constant for the duration of a chain and `::G1Affine::prime_subgroup_generator()` may be used to encrypt the inner transaction for now as done by the [`WrapperTx::new` method](https://dev.namada.net/master/rustdoc/namada/types/transaction/wrapper/wrapper_tx/struct.WrapperTx.html#method.new) (depends on ). - `tx_hash`: A SHA-256 hash of the inner transaction. This MUST match the hash of decrypted `inner_tx`. @@ -86,7 +86,7 @@ The parameters for [epoch](#epochs) duration are: ### Mempool -When a request to add a transaction to the mempool is received, it will only be added it's a [`Tx` encoded with proto3](./encoding.md#transactions). +When a request to add a transaction to the mempool is received, it will only be added if it's a [`Tx` encoded with proto3](./encoding.md#transactions). ### Outer transaction processing @@ -211,7 +211,7 @@ cargo test test_vp_stack_limiter #### Transaction host environment functions -The following functions from the host ledger are made available in transaction's WASM code. They MAY be imported in the WASM module as shown bellow and MUST be provided by the ledger's WASM runtime: +The following functions from the host ledger are made available in transaction's WASM code. They MAY be imported in the WASM module as shown below and MUST be provided by the ledger's WASM runtime: ```wat (import "env" "gas" (func (param i32))) @@ -237,12 +237,12 @@ Additionally, the WASM module MUST export its memory as shown: (export "memory" (memory 0)) ``` -- `namada_tx_init_account` TODO newly created accounts' validity predicates aren't used until the block is committed (i.e. only the transaction that created the account may write into its storage in the block in which its being applied). +- `namada_tx_init_account` TODO newly created accounts' validity predicates aren't used until the block is committed (i.e. only the transaction that created the account may write into its storage in the block in which it's being applied). - TODO describe functions in detail #### Validity predicate host environment functions -The following functions from the host ledger are made available in validity predicate's WASM code. They MAY be imported in the WASM module as shown bellow and MUST be provided by the ledger's WASM runtime. +The following functions from the host ledger are made available in validity predicate's WASM code. They MAY be imported in the WASM module as shown below and MUST be provided by the ledger's WASM runtime. ```wat (import "env" "gas" (func (param i32))) diff --git a/documentation/docs/src/README.md b/documentation/docs/src/README.md index ff2269fd44c..4b9af2b17ee 100644 --- a/documentation/docs/src/README.md +++ b/documentation/docs/src/README.md @@ -7,12 +7,14 @@ Welcome to Namada's docs! [Namada](https://namada.net/) is a Proof-of-Stake layer 1 protocol for asset-agnostic, interchain privacy. Namada is Anoma's first fractal instance and is currently being developed by [Heliax](https://heliax.dev), a public goods lab. Key innovations include: -- ZCash-like transfers for any assets (fungible and non-fungible) + +- Zcash-like transfers for any assets (fungible and non-fungible) - Rewarded usage of privacy as a public good - Interoperability with Ethereum via a custom bridge with trust-minimisation - Vertically integrated user interfaces ## Overview of features + - Proof-of-Stake with governance to secure and evolve Namada - Fast-finality BFT with 4-second blocks - Near-zero fees @@ -24,11 +26,13 @@ Key innovations include: - Ledger application For high-level introductions, we recommend: -- Article: [Introducing Namada: Shielded Transfers with Any Assets](https://medium.com/namadanetwork/introducing-namada-shielded-transfers-with-any-assets-dce2e579384c) + +- Article: [Introducing Namada: Interchain Asset-agnostic Privacy](https://blog.namada.net/introducing-namada-interchain-asset-agnostic-privacy/) - Article: [What is Namada?](https://blog.namada.net/what-is-namada/) - [Talks & Podcasts](https://namada.net/talks) To learn more about the protocol, we recommend the following in-depth resources: + - Talk at ZK8 [Namada: asset-agnostic interchain privacy](https://youtu.be/5K6YxmZPFkE) - [Namada's specifications](https://specs.namada.net) - [Codebase](https://github.com/anoma/namada) diff --git a/documentation/docs/src/SUMMARY.md b/documentation/docs/src/SUMMARY.md index 0b690d15e8b..fea36d84dbd 100644 --- a/documentation/docs/src/SUMMARY.md +++ b/documentation/docs/src/SUMMARY.md @@ -29,7 +29,8 @@ - [IBC](./user-guide/ibc.md) - [Testnets](./testnets/README.md) - [Environment setup](./testnets/environment-setup.md) - - [Pre-genesis validator](./testnets/pre-genesis-validator.md) + - [Genesis validator setup](./testnets/genesis-validator-setup.md) + - [Applying as a genesis validator](./testnets/genesis-validator-apply.md) - [Running your genesis validator](./testnets/run-your-genesis-validator.md) - [Running a full node](./testnets/running-a-full-node.md) - [Becoming a validator post-genesis](./testnets/post-genesis-validator.md) diff --git a/documentation/docs/src/testnets/README.md b/documentation/docs/src/testnets/README.md index 41e4b17a4ed..11e19546d6a 100644 --- a/documentation/docs/src/testnets/README.md +++ b/documentation/docs/src/testnets/README.md @@ -13,22 +13,63 @@ If you find a bug, please submit an issue with the `bug` [issue template](https: ## How to join a Namada testnet 1. [Environment setup](./environment-setup.md) - 2. [Pre-genesis validator](./pre-genesis-validator.md) + 2. [Pre-genesis validator setup](./genesis-validator-setup.md) + 3. [Pre-genesis validator apply](./genesis-validator-apply.md) 3. [Running your genesis validator](./run-your-genesis-validator.md) 4. [Running a full node](./running-a-full-node.md) 5. [Becoming a validator post-genesis](./post-genesis-validator.md) ![testnet_flowchart](../images/testnet_flowchart.png) -## Latest Testnet The Namada public testnet is permissionless, anyone can join without the authorisation of a centralised party. Expect frequent upgrades (every two weeks). +## Latest Upgrade + + +## Latest Testnet +- Namada public testnet 8: + - From date: 17th of May 2023 17.00 UTC + - Namada protocol version: `v0.15.3` + - Tendermint (Core) version: `v0.1.4-abciplus` + - CHAIN_ID: `public-testnet-8.0.b92ef72b820` + + +## Testnet History Timeline +- Namada public testnet 7: + - From date: 24th of April 2023 17.00 UTC + - Namada protocol version: `v0.15.1` + - Tendermint (Core) version: `v0.1.4-abciplus` + - CHAIN_ID: `public-testnet-7.0.3c5a38dc983` + +- Namada public testnet 6: + - From date: 29th of March 2023 17.00 UTC + - Namada protocol version: `v0.14.3` + - Tendermint (Core) version: `v0.1.4-abciplus` + - CHAIN_ID: `public-testnet-6.0.a0266444b06` + + +- Namada public testnet 5: + - From date: 15th of March 2023 + - Namada protocol version: `v0.14.2` + - Tendermint version: `v0.1.4-abciplus` + - CHAIN_ID: `public-testnet-5.0.d25aa64ace6` + +- Namada public testnet 4: + - From date: 22nd of February 2023 + - Namada protocol version: `v0.14.1` + - Tendermint version: `v0.1.4-abciplus` + - CHAIN_ID: `public-testnet-4.0.16a35d789f4` + +- Namada public testnet 3 hotfix (did not suffice): + - From date: 13th of February 2023 + - Namada protocol version: `v0.13.4` + - Tendermint version: `v0.1.4-abciplus` + - CHAIN_ID: `public-testnet-3.0.81edd4d6eb6` + - Namada public testnet 3: - From date: 9th of February 2023 - Namada protocol version: `v0.13.3` - Tendermint version: `v0.1.4-abciplus` - - CHAIN_ID: `TBD` - -## Testnet History Timeline + - CHAIN_ID: `public-testnet-3.0.81edd4d6eb6` - Namada public testnet 2.1.2 hotfix: - From date: 25th of January 2023 diff --git a/documentation/docs/src/testnets/environment-setup.md b/documentation/docs/src/testnets/environment-setup.md index 32b4ddeab89..8d6c7b1174d 100644 --- a/documentation/docs/src/testnets/environment-setup.md +++ b/documentation/docs/src/testnets/environment-setup.md @@ -6,53 +6,60 @@ If you don't want to build Namada from source you can [install Namada from binar Export the following variables: ```bash -export NAMADA_TAG=v0.13.3 +export NAMADA_TAG=v0.15.3 export TM_HASH=v0.1.4-abciplus ``` ## Installing Namada -- Clone namada repository and build binaries - ```bash - git clone https://github.com/anoma/namada && cd namada && git checkout $NAMADA_TAG - ``` -- Build binaries - - `make build-release` - - There may be some additional requirements you may have to install (linux): - ```bash - sudo apt-get update -y - sudo apt-get install build-essential make pkg-config libssl-dev libclang-dev -y - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh - ``` +1. Clone namada repository and checkout the correct versions + +```bash +git clone https://github.com/anoma/namada && cd namada && git checkout $NAMADA_TAG +``` +2. Build binaries +```bash +make build-release +``` +- There may be some additional requirements you may have to install (linux): +```bash +sudo apt-get update -y +sudo apt-get install build-essential make pkg-config libssl-dev libclang-dev -y +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` ## Installing Tendermint -- Install the heliaxdev/tendermint fork - ```bash - git clone https://github.com/heliaxdev/tendermint && cd tendermint && git checkout $TM_HASH - make build - ``` - The above requires that golang is correctly installed with the correct $PATH setup - - In linux, this can be resolved by - - `sudo snap install go --channel=1.18/stable --classic` -- Copy both the namada and tendermint binaries to somewhere on $PATH (or uselol the relative paths) - - This step may or may not be necessary - - namada binaries can be found in `/target/release` - - tendermint is in `build/tendermint` +1. Install the heliaxdev/tendermint fork +```bash +git clone https://github.com/heliaxdev/tendermint && cd tendermint && git checkout $TM_HASH +make build +``` +The above requires that golang is correctly installed with the correct $PATH setup +```admonish note +In linux, this can be resolved by +`sudo snap install go --channel=1.18/stable --classic` +``` +2. Copy both the namada and tendermint binaries to somewhere on $PATH (or use the relative paths). This step may or may not be necessary. + +- namada binaries can be found in `/target/release` +- tendermint is in `build/tendermint` + ## Check ports -- Open ports on your machine: +1. Open ports on your machine: - 26656 - 26657 - - To check if ports are open you can setup a simple server and curl the port from another host - - Inside the namada folder, run - ``` bash - { printf 'HTTP/1.0 200 OK\r\nContent-Length: %d\r\n\r\n' "$(wc -c < namada)"; cat namada; } | nc -l $PORT` - ``` - - From another host run one of the two commands: - - `nmap $IP -p$PORT` - - `curl $IP:$PORT >/dev/null` +2. To check if ports are open you can setup a simple server and curl the port from another host + +- Inside the namada folder, run +``` bash +{ printf 'HTTP/1.0 200 OK\r\nContent-Length: %d\r\n\r\n' "$(wc -c < namada)"; cat namada; } | nc -l $PORT` +``` +- From another host run one of the two commands: + - `nmap $IP -p$PORT` + - `curl $IP:$PORT >/dev/null` ## Verifying your installation - Make sure you are using the correct tendermint version - `tendermint version` should output `0.1.4-abciplus` - Make sure you are using the correct Namada version - - `namada --version` should output `Namada v0.13.3` + - `namada --version` should output `Namada v0.15.3` diff --git a/documentation/docs/src/user-guide/genesis-validator-apply.md b/documentation/docs/src/testnets/genesis-validator-apply.md similarity index 61% rename from documentation/docs/src/user-guide/genesis-validator-apply.md rename to documentation/docs/src/testnets/genesis-validator-apply.md index c4bf6bdd1f7..1415ae37db7 100644 --- a/documentation/docs/src/user-guide/genesis-validator-apply.md +++ b/documentation/docs/src/testnets/genesis-validator-apply.md @@ -1,8 +1,8 @@ -## Applying to be a genesis validator +# Applying to be a genesis validator Before a testnet launches, you can apply to be a genesis validator. -### Set up +## 1) Set up Follow [this guide](./genesis-validator-setup.md#pre-genesis) on how to generate your "pre-genesis" validator files. @@ -15,11 +15,18 @@ account_public_key = "00f1bd321be2e23b9503653dd50fcd5177ca43a0ade6da60108eaecde0 staking_reward_public_key = "005725f952115838590fc7c5dd9590bc054ac4bd5af55672a40df4ac7dca50ce97" protocol_public_key = "0054c213d2f8fe2dd3fc5a41a52fd2839cb49643d960d7f75e993202692c5d8783" dkg_public_key = "6000000054eafa7320ddebf00c9487e5f7ea5107a8444f042b74caf9ed5679163f854577bf4d0992a8fd301ec4f3438c9934c617a2c71649178e536f7e2a8cdc1f8331139b7fd9b4d36861f0a9915d83f61d7f969219f0eba95bb6fa45595425923d4c0e" +commission_rate = "0.05" +max_commission_rate_change = "0.01" net_address = "1.2.3.4:26656" tendermint_node_key = "00e1a8fe1abceb700063ab4558baec680b64247e2fd9891962af552b9e49318d8d" ``` This file contains only public information and is safe to share publicly. -### Submitting the config -If you want to be a genesis validator for a testnet, please make a pull request to [https://github.com/anoma/namada-testnets](https://github.com/namada/namada-testnets) adding your `validator.toml` file to the relevant directory (e.g. `namada-close-quarters-testnet-1/` for the `namada-cq-1` testnet), renaming it to `$alias.toml`. e.g. if you chose your alias to be "bertha", submit the file with the name `bertha.toml`. You can see what an example PR looks like [here](https://github.com/namada/namada-testnets/pull/1). +## 2.1) Submitting the config +If you want to be a genesis validator for the testnet, please make a pull request to https://github.com/anoma/namada-testnets adding your validator.toml file to the relevant directory (e.g. `namada-public-testnet-2` for the second public testnet), renaming it to `$alias.toml`. + +e.g. if you chose your alias to be "bertha", submit the file with the name `bertha.toml`. You can see what an example PR looks like [here](https://github.com/anoma/namada-testnets/pull/29). + +## 2.2) Wait for the CHAIN_ID +Wait until corresponding `CHAIN_ID` has been distributed. \ No newline at end of file diff --git a/documentation/docs/src/user-guide/genesis-validator-setup.md b/documentation/docs/src/testnets/genesis-validator-setup.md similarity index 57% rename from documentation/docs/src/user-guide/genesis-validator-setup.md rename to documentation/docs/src/testnets/genesis-validator-setup.md index b23ec0657ae..6d338ab7cd7 100644 --- a/documentation/docs/src/user-guide/genesis-validator-setup.md +++ b/documentation/docs/src/testnets/genesis-validator-setup.md @@ -4,7 +4,7 @@ A genesis validator is one which is a validator right from the first block of th ### Prerequisites -- a machine that meets the [requirements](./install.md#hardware-requirements) for running a validator node +- a machine that meets the [requirements](../user-guide/install/hardware.md) for running a validator node - an associated public IPv4 address with ports 26656 reachable from anywhere for P2P connections ## Pre-genesis @@ -13,50 +13,52 @@ To setup all the [required keys](#required-keys) for a genesis validator for an You must also provide a static `{IP:port}` to the `--net-address` argument of your future node's P2P address. -```shell -export ALIAS="1337-validator" -namada client utils init-genesis-validator \ - --alias $ALIAS \ - --net-address 1.2.3.4:26656 +### 1. Create your validator keys: +``` bash +export ALIAS="CHOOSE_A_NAME_FOR_YOUR_VALIDATOR" +export PUBLIC_IP="LAPTOP_OR_SERVER_IP" +namada client utils init-genesis-validator --alias $ALIAS \ +--max-commission-rate-change 0.01 --commission-rate 0.05 \ +--net-address $PUBLIC_IP:26656 ``` -After generating your keys, the command will print something like this: +### 2. After generating your keys, the command will print something like this: -```shell -Pre-genesis TOML written to .namada/pre-genesis/1337-validator/validator.toml +```admonish note +If you have set the variable $XDG_DATA_HOME this is where the pre-genesis TOML will be written to. Otherwise see below for the default locations. ``` -This file is the public configuration of your validator. You can safely share this file with the network's organizer, who is responsible for setting up and publishing the finalized genesis file and Namada configuration for the chain. - -Note that the wallet containing your private keys will also be written into this directory. - -## After network config release - -Once the network is finalized, a new chain ID will be created and released on [anoma-network-config/releases](https://github.com/heliaxdev/namada-network-config/releases) (a custom configs URL can be used instead with `NAMADA_NETWORK_CONFIGS_SERVER` env var). You can use it to setup your genesis validator node for the `--chain-id` argument in the command below. - +#### Linux ```shell -export CHAIN_ID="TBD" -namada client utils join-network \ - --chain-id $CHAIN_ID \ - --genesis-validator $ALIAS +Pre-genesis TOML written to $HOME/.local/share/namada ``` - -This command will use your pre-genesis wallet for the given chain and take care of setting up Namada with Tendermint. - -If you run this command in the same directory that you ran `namada client utils init-genesis-validator`, it should find the pre-genesis wallet for you, otherwise you can pass the path to the pre-genesis directory using `--pre-genesis-path`. e.g. - +#### MacOS ```shell -namada client utils join-network \ - --chain-id $CHAIN_ID \ - --pre-genesis-path workspace/.namada/pre-genesis/$ALIAS +Pre-genesis TOML written to /Users/$USER/Library/Application\ Support/Namada ``` -Once setup, you can start the ledger as usual with e.g.: +### 3. Save this directory as an environment variable for later use: +#### Linux +```shell +export BASE_DIR="$HOME/.local/share/namada" +``` +#### MacOS ```shell -namada ledger +export BASE_DIR="/Users/$USER/Library/Application\ Support/Namada" ``` +This file is the public configuration of your validator. You can safely share this file with the network's organizer, who is responsible for setting up and publishing the finalized genesis file and Namada configuration for the chain. + +Note that the wallet containing your private keys will also be written into this directory. + +### 4. You can print the validator.toml by running: + +### Linux +`cat $HOME/.local/share/namada/pre-genesis/$ALIAS/validator.toml` +### MacOS +`cat $HOME/Library/Application\ Support/Namada/pre-genesis/$ALIAS/validator.toml` + ## Required keys - Account key: Can be used to sign transactions that require authorization in the default validator validity predicate, such as a balance transfer. diff --git a/documentation/docs/src/testnets/pre-genesis-validator.md b/documentation/docs/src/testnets/pre-genesis-validator.md deleted file mode 100644 index 71f0b6b0830..00000000000 --- a/documentation/docs/src/testnets/pre-genesis-validator.md +++ /dev/null @@ -1,19 +0,0 @@ -# 2) Generate pre-genesis validator setup - -- Create a pre-genesis file inside the `namada` repository. - - - ``` bash - cd namada - export ALIAS="CHOOSE_A_NAME_FOR_YOUR_VALIDATOR" - export PUBLIC_IP="LAPTOP_OR_SERVER_IP" - namada client utils init-genesis-validator --alias $ALIAS --max-commission-rate-change 0.01 --commission-rate 0.05 --net-address $PUBLIC_IP:26656 - ``` - - Expect the message `Pre-genesis TOML written to .namada/pre-genesis/[your-alias]/validator.toml` -- This will generate a folder inside `namada/.namada`. - - `cat namada/.namada/pre-genesis/$ALIAS/validator.toml` - -## 2.1) Submitting the config -If you want to be a genesis validator for the testnet, please make a pull request to https://github.com/anoma/namada-testnets adding your validator.toml file to the relevant directory (e.g. `namada-public-testnet-2` for the second public testnet), renaming it to `$alias.toml`. e.g. if you chose your alias to be "bertha", submit the file with the name `bertha.toml`. You can see what an example PR looks like [here](https://github.com/anoma/namada-testnets/pull/29). - -## 2.2) Wait for the CHAIN_ID -Wait until corresponding `CHAIN_ID` has been distributed. diff --git a/documentation/docs/src/testnets/run-your-genesis-validator.md b/documentation/docs/src/testnets/run-your-genesis-validator.md index 9f54a582cca..042c73724d2 100644 --- a/documentation/docs/src/testnets/run-your-genesis-validator.md +++ b/documentation/docs/src/testnets/run-your-genesis-validator.md @@ -1,44 +1,82 @@ # 3) (OPTIONAL) Reset your validator node **You can skip to 3.1 if you don't need to reset the ledger state (most can skip to 3.1)** -- This is the right time to save any logs file you want to share with us! -**IMPORTANT STEP** +```admonish note +With the release of `v0.15.3` we have introduced a new base directory. This means that you will need to reset your validator node to use the new base directory. This is a one time operation. +The base directory has been moved from `.namada` to `.local/share/namada` on Linux and `Library/Application Support/Namada` on MacOS. +``` -- Save your `pre-genesis` folder in the ledger base directory - - `mkdir backup-pregenesis && cp -r .namada/pre-genesis backup-pregenesis/` -**Ensure keys are saved** -- `ls backup-pregenesis` should output a saved `wallet.toml`. + + +This is the right time to save any logs file you want to share with us! + +### 1. IMPORTANT! Save your `pre-genesis` folder in the ledger base directory + +```bash +mkdir backup-pregenesis && cp -r .namada/pre-genesis backup-pregenesis/ +``` + +### 2. **Ensure keys are saved** + +`ls backup-pregenesis` should output a saved `wallet.toml`. **DELETING THE OLD DIRECTORY** *(WARNING: THIS WILL ALSO DELETE YOUR VALIDATOR KEYS, DO NOT RUN UNLESS YOU'VE BACKED IT UP)* -- Delete ledger base directory - - `rm -rf .namada` -- Check that namada and tendermint binaries are correct (see step 1) -- Create a `.namada` folder - - `mkdir .namada` - - `mkdir .namada/pre-genesis` -- Copy the backuped file back to `.namada/pre-genesis` folder - - `cp -r backup-pregenesis/* .namada/pre-genesis/` +### 3. Delete ledger base directory by running `rm -rf .namada` + +### 4. Check that namada and tendermint binaries are correct. `namada --version` should give `v0.15.3` and `tendermint version` should give `0.1.4-abciplus` +### 5. Create a base directory for the ledger +#### Linux +`mkdir $HOME/.local/share/namada` +#### MacOS +`mkdir $HOME/Library/Application\ Support/Namada` + +### 6. Save the base directory path to a variable +#### Linux: +```bash +export BASE_DIR=$HOME/.local/share/namada +``` +#### MacOS: +```bash +export BASE_DIR=$HOME/Library/Application\ Support/Namada +``` +### 7. Create a pre-genesis directory +#### Linux: +`mkdir $HOME/.local/share/namada/pre-genesis` +#### MacOS: +`mkdir $HOME/Library/Application\ Support/Namada/pre-genesis` + +### 8. Copy the backuped file back to `$BASE_DIR/pre-genesis` folder +```bash +cp -r backup-pregenesis/* $BASE_DIR/pre-genesis/ +``` ## 3.1) Run your node as a genesis validator -- Wait for the genesis file to be ready, `CHAIN_ID`. -- Join the network with the `CHAIN_ID` - ``` bash - export CHAIN_ID="TBD" - namada client utils join-network \ - --chain-id $CHAIN_ID --genesis-validator $ALIAS - ``` -- Start your node and sync - - `NAMADA_TM_STDOUT=true namada node ledger run` - - if you want more logs - - `NAMADA_LOG=debug NAMADA_TM_STDOUT=true namada node ledger run` - - if you want to save logs to a file - - `TIMESTAMP=$(date +%s)` - - `NAMADA_LOG=debug NAMADA_TM_STDOUT=true namada node ledger run &> logs-${TIMESTAMP}.txt` - - `tail -f -n 20 logs-${TIMESTAMP}.txt` (in another shell) -- If started correctly you should see a the following log: - - `[] This node is a validator ...` - \ No newline at end of file +#### 1. Wait for the genesis file to be ready, `CHAIN_ID`. +#### 2. Join the network with the `CHAIN_ID` +``` bash +export CHAIN_ID="public-testnet-8.0.b92ef72b820" +namada client utils join-network \ +--chain-id $CHAIN_ID --genesis-validator $ALIAS +``` + +#### 3. Start your node and sync +```bash +NAMADA_TM_STDOUT=true namada node ledger run +``` +Optional: If you want more logs, you can instead run +```bash +NAMADA_LOG=debug NAMADA_TM_STDOUT=true namada node ledger run +``` +And if you want to save your logs to a file, you can instead run: +```bash +TIMESTAMP=$(date +%s) +NAMADA_LOG=debug NAMADA_TM_STDOUT=true namada node ledger run &> logs-${TIMESTAMP}.txt +tail -f -n 20 logs-${TIMESTAMP}.txt ## (in another shell) +``` +#### 4. If started correctly you should see a the following log: +`[] This node is a validator ...` + diff --git a/documentation/docs/src/testnets/running-a-full-node.md b/documentation/docs/src/testnets/running-a-full-node.md index b8948f922ae..b185c3d47df 100644 --- a/documentation/docs/src/testnets/running-a-full-node.md +++ b/documentation/docs/src/testnets/running-a-full-node.md @@ -1,15 +1,21 @@ # 4) Run your full node as a user -- Wait for the genesis file to be ready, you will receive a `$CHAIN_ID`. -- Join the network with the `CHAIN_ID` +1. Wait for the genesis file to be ready, you will receive a `$CHAIN_ID`. +2. Join the network with the `CHAIN_ID` ```bash - export CHAIN_ID="TBD" + export CHAIN_ID="public-testnet-8.0.b92ef72b820" namada client utils join-network --chain-id $CHAIN_ID ``` -- Start your node and sync - - `NAMADA_TM_STDOUT=true namada node ledger run` - - if you want more logs - - `NAMADA_LOG=debug ANOMA_TM_STDOUT=true namada node ledger run` - - if you want to save logs to a file - - `TIMESTAMP=$(date +%s)` - - `ANOMA_LOG=debug NAMADA_TM_STDOUT=true namada node ledger run &> logs-${TIMESTAMP}.txt` - - `tail -f -n 20 logs-${TIMESTAMP}.txt` (in another shell) \ No newline at end of file +3. Start your node and sync +```bash + NAMADA_TM_STDOUT=true namada node ledger run + ``` +Optional: If you want more logs, you can instead run +```bash +NAMADA_LOG=debug ANOMA_TM_STDOUT=true namada node ledger run +``` +And if you want to save your logs to a file, you can instead run: +```bash +TIMESTAMP=$(date +%s) +ANOMA_LOG=debug NAMADA_TM_STDOUT=true namada node ledger run &> logs-${TIMESTAMP}.txt +tail -f -n 20 logs-${TIMESTAMP}.txt ## (in another shell) +``` \ No newline at end of file diff --git a/documentation/docs/src/testnets/upgrades.md b/documentation/docs/src/testnets/upgrades.md index dbc92c97e3a..0a33d4f802d 100644 --- a/documentation/docs/src/testnets/upgrades.md +++ b/documentation/docs/src/testnets/upgrades.md @@ -1,17 +1,78 @@ # Upgrades This page covers all installation steps required by various upgrades to testnets. + +## Latest Upgrade + +TBD + + ## Latest Testnet -***06/02/2023*** `public-testnet-3` +***17/05/2023*** `public-testnet-8` -The testnet launches on 09/02/2023 at 17:00 UTC with the genesis validators from `public-testnet-3`. It launches with [version v0.13.3](https://github.com/anoma/namada/releases/tag/v0.13.3) and chain-id `TBD`. -If your genesis transaction is contained in [this folder](https://github.com/anoma/namada-testnets/tree/main/namada-public-testnet-3), you are one of the genesis validators. In order for the testnet to come online at least 2/3 of those validators need to be online. +The testnet launches on 17/05/2023 at 17:00 UTC with the genesis validators from `public-testnet-8`. It launches with [version v0.15.3](https://github.com/anoma/namada/releases/tag/v0.15.3) and chain-id `public-testnet-8.0.b92ef72b820`. +If your genesis transaction is contained in [this folder](https://github.com/anoma/namada-testnets/tree/main/namada-public-testnet-8), you are one of the genesis validators. In order for the testnet to come online, at least 2/3 of those validators need to be online. The installation docs are updated and can be found [here](./environment-setup.md). The running docs for validators/fullnodes can be found [here](./running-a-full-node.md). ## Previous upgrades: +***24/04/2023*** `public-testnet-7` (offline) + +The testnet launches on 24/04/2023 at 17:00 UTC with the genesis validators from `public-testnet-7`. It launches with [version v0.15.1](https://github.com/anoma/namada/releases/tag/v0.15.1) + +The intended fix to solve the storage issue was only partially solved. This led to `v0.15.3` which intended to fix these issues. + +***29/03/2023*** `public-testnet-6` (offline) + +The testnet launches on 29/03/2023 at 17:00 UTC with the genesis validators from `public-testnet-6`. It launches with [version v0.14.3](https://github.com/anoma/namada/releases/tag/v0.14.3) and chain-id `public-testnet-6.0.a0266444b06`. +If your genesis transaction is contained in [this folder](https://github.com/anoma/namada-testnets/tree/main/namada-public-testnet-5), you are one of the genesis validators. In order for the testnet to come online, at least 2/3 of those validators need to be online. + +The installation docs are updated and can be found [here](./environment-setup.md). The running docs for validators/fullnodes can be found [here](./running-a-full-node.md). + +***13/02/2023*** `public-testnet-3` + +On *09/02/2023* the Namada chain `public-testnet-3` halted due to a bug in the Proof of Stake implementation when handling an edge case. Over the weekend, the team were able to fix and test a new patch that resolves the issue at hand. On *13/02/2023 11:30 UTC*, we were able to recover the network by having internal validators upgrade to the new patch. We are now calling on validators to upgrade to the new testnet as well, which will allow you to interact with the recovered chain. + +**Upgrading** +1. Begin by stopping all instances of the namada node +```bash +killall namadan +``` +2. Build the new tag (or download the binaries [here](https://github.com/anoma/namada/releases/tag/v0.13.4)) +```bash +cd namada +export NAMADA_TAG=v0.13.4 +make build-release +``` +3. Copy the new binaries to path. More in depth instructions can be found at [here](./environment-setup.md) +4. Once this has been completed, **the node must tesync from genesis** (see below) + +**How to resync from genesis:** +1. As a precautionary measure, make a backup of your pregenesis keys +```bash +mkdir backup-pregenesis && cp -r .namada/pre-genesis backup-pregenesis/ +``` +2. Delete the relevant folder in .namada +```bash +rm -r .namada/public-testnet-3.0.81edd4d6eb6 +rm .namada/public-testnet-3.0.81edd4d6eb6.toml +``` +WARNING: Do not delete the entire `.namada` folder, as it contains your pre-genesis keys. If this is accidentally done, you will have to copy over the backup-pregenesis file. See [these instructions](./run-your-genesis-validator.md) for more details +3. Rejoin the network +```bash +export CHAIN_ID="public-testnet-3.0.81edd4d6eb6" +namada client utils join-network \ +--chain-id $CHAIN_ID --genesis-validator $ALIAS +``` +4. Run the node. One can simply run the ledger again using the familiar command +```bash + NAMADA_TM_STDOUT=true namada node ledger run + ``` + +Please reach out with any questions if you have any. This upgrade can be done asynchronously, but if you wish to continue validating the chain and testing our features, you must execute the above steps. + ### Hotfix for Testnet `public-testnet-2.1.4014f207f6d` ***27/01/2023*** diff --git a/documentation/docs/src/user-guide/FAQ.md b/documentation/docs/src/user-guide/FAQ.md index b1f5c453225..183ea62837f 100644 --- a/documentation/docs/src/user-guide/FAQ.md +++ b/documentation/docs/src/user-guide/FAQ.md @@ -27,7 +27,7 @@ HINT: `namadac balance` ### **Q: How do I use the Ethereum Bridge?** -**A:** The Ethereum Bridge is not yet implemented as of 0.12.0. Keep an eye on the [Changelog](https://github.com/anoma/namada/tree/main/.changelog) :eyes: to see when it will be officially released. +**A:** The Ethereum Bridge is not yet implemented as of 0.12.0. Keep an eye on the [Changelog](https://github.com/anoma/namada/tree/main/.changelog) 👀 to see when it will be officially released. ### **Q: How can I make an IBC transfer?** diff --git a/documentation/docs/src/user-guide/ibc.md b/documentation/docs/src/user-guide/ibc.md index 90cb175c328..ebc1c5218b8 100644 --- a/documentation/docs/src/user-guide/ibc.md +++ b/documentation/docs/src/user-guide/ibc.md @@ -42,7 +42,7 @@ The path to the config file, which is is saved in the variable `$HERMES_CONFIG` Each chain configuration is specified under the `[[chains]]` object. - These are the pieces of this puzzle you want to keep your :eyes: on: + These are the pieces of this puzzle you want to keep your 👀 on: - `chains.id` is the name of the chain - `chains.rpc_address` specifies the port that the channel is communicating through, and will be the argument for the `ledger_address` of Namada when interacting with the ledger (will become clearer later) - Make sure to change the IP address to the IP address of your local machine that is running this node! @@ -256,9 +256,9 @@ killall namadan ## Transferring assets over IBC This will make transfers across chains by Namada CLI. This assumes that a channel has been created and Hermes is running with the proper config. -In order to do this by Namada's `ibc-transfer` command, we will need to know the `base-dir` and `ledger-address` of each instance (and other transfer parameters). +In order to do this by Namada's `ibc-transfer` command, we will need to know the `base-dir` and `node` of each instance (and other transfer parameters). `base-dir` is the base directory of each node. If you have used the script, the direcotry is `${IBC_RS}/data/namada-*/.namada`. -`ledger-address` is `rpc_addr` in the relevant hermes' config files. +`node` is `rpc_addr` in the relevant hermes' config files. One can run `grep "rpc_addr" ${HERMES_CONFIG}`. @@ -289,7 +289,7 @@ namadac --base-dir ${BASE_DIR_A} --receiver ${RECEIVER_RAW_ADDRESS} \ --token ${TOKEN_ALIAS} \ --channel-id ${CHANNEL_ID} \ - --ledger-address ${LEDGER_ADDRESS_A} + --node ${LEDGER_ADDRESS_A} ``` Where the above variables in `${VARIABLE}` must be substituted with appropriate values. The raw address of the receiver can be found by `namadaw --base-dir ${BASE_DIR_B} address find --alias ${RECEIVER}`. @@ -303,5 +303,5 @@ namadac --base-dir ${BASE_DIR_A} --receiver atest1d9khqw36g56nqwpkgezrvvejg3p5xv2z8y6nydehxprygvp5g4znj3phxfpyv3pcgcunws2x0wwa76 \ --token nam \ --channel-id channel-0 \ - --ledger-address 127.0.0.1:27657 + --node 127.0.0.1:27657 ``` diff --git a/documentation/docs/src/user-guide/install/from-source.md b/documentation/docs/src/user-guide/install/from-source.md index 9a0672aec3a..ed9fa2619f9 100644 --- a/documentation/docs/src/user-guide/install/from-source.md +++ b/documentation/docs/src/user-guide/install/from-source.md @@ -17,7 +17,7 @@ Then, install the remaining dependencies. **Ubuntu:** running the following command should install everything needed: ```shell -sudo apt-get install -y make git-core libssl-dev pkg-config libclang-12-dev build-essential +sudo apt-get install -y make git-core libssl-dev pkg-config libclang-12-dev build-essential protobuf-compiler ``` **Mac:** installing the Xcode command line tools should provide you with almost everything you need: @@ -26,6 +26,14 @@ sudo apt-get install -y make git-core libssl-dev pkg-config libclang-12-dev buil xcode-select --install ``` +`protoc` is also required. On Mac, you can install it with `Homebrew`: + +```shell +brew install protobuf +``` + +Please refer to [protoc-installation doc](https://grpc.io/docs/protoc-installation/) for other installation options. + Now that you have all the required dependencies installed, you can clone the source code from the [Namada repository](https://github.com/anoma/namada) and build it with: ```shell @@ -36,4 +44,4 @@ make install ```admonish warning During internal and private testnets, checkout the latest testnet branch using `git checkout $NAMADA_TESTNET_BRANCH`. -``` \ No newline at end of file +``` diff --git a/documentation/docs/src/user-guide/ledger.md b/documentation/docs/src/user-guide/ledger.md index bd02ff6e77d..e70d4949977 100644 --- a/documentation/docs/src/user-guide/ledger.md +++ b/documentation/docs/src/user-guide/ledger.md @@ -10,11 +10,20 @@ namada ledger The node will attempt to connect to the persistent validator nodes and other peers in the network, and synchronize to the latest block. -By default, the ledger will store its configuration and state in the `.namada` directory relative to the current working directory. You can use the `--base-dir` CLI global argument or `NAMADA_BASE_DIR` environment variable to change it. +By default, the ledger will store its configuration and state in either `$HOME/.local/share/namada` or `$HOME/Library/Application\ Support/Namada`. You can use the `--base-dir` CLI global argument or `BASE_DIR` environment variable to change it. -The ledger also needs access to the built WASM files that are used in the genesis block. These files are included in release and shouldn't be modified, otherwise your node will fail with a consensus error on the genesis block. By default, these are expected to be in the `wasm` directory, relative to the current working directory. This can also be set with the `--wasm-dir` CLI global argument, `NAMADA_WASM_DIR` environment variable or the configuration file. +- Linux: +```bash +export BASE_DIR=$HOME/.local/share/namada +``` +- MacOS: +```bash +export BASE_DIR=$HOME/Library/Application\ Support/Namada +``` + +The ledger also needs access to the built WASM files that are used in the genesis block. These files are included in release and shouldn't be modified, otherwise your node will fail with a consensus error on the genesis block. By default, these are expected to be in the `wasm` directory inside the chain directory that's in the base directory. This can also be set with the `--wasm-dir` CLI global argument, `NAMADA_WASM_DIR` environment variable or the configuration file. -The ledger configuration is stored in `.namada/{chain_id}/config.toml` (with +The ledger configuration is stored in `$BASE_DIR/{chain_id}/config.toml` (with default `--base-dir`). It is created when you join the network. You can modify that file to change the configuration of your node. All values can also be set via environment variables. Names of the recognized environment variables are diff --git a/documentation/docs/src/user-guide/ledger/on-chain-governance.md b/documentation/docs/src/user-guide/ledger/on-chain-governance.md index 9223bf19ea2..7cae5acd583 100644 --- a/documentation/docs/src/user-guide/ledger/on-chain-governance.md +++ b/documentation/docs/src/user-guide/ledger/on-chain-governance.md @@ -27,7 +27,9 @@ Now, we need to create a json file `proposal.json` holding the content of our pr "voting_start_epoch": 3, "voting_end_epoch": 6, "grace_epoch": 12, - "proposal_code_path": "./wasm_for_tests/tx_no_op.wasm" + "type": { + "Default":null + } } ``` @@ -37,7 +39,11 @@ You should change the value of: - `voting_start_epoch` with a future epoch (must be a multiple of 3) for which you want the voting to begin - `voting_end_epoch` with an epoch greater than `voting_start_epoch`, a multiple of 3, and by which no further votes will be accepted - `grace_epoch` with an epoch greater than `voting_end_epoch` + 6, in which the proposal, if passed, will come into effect -- `proposal_code_path` with the absolute path of the wasm file to execute (or remove the field completely) +- `type` with the correct type for your proposal, which can be one of the followings: + - `"type": {"Default":null}` for a default proposal without wasm code + - `"type": {"Default":"$PATH_TO_WASM_CODE"}` for a default proposal with an associated wasm code + - `"type": "PGFCouncil"` to initiate a proposal for a new council + - `"type": "ETHBridge"` for an ethereum bridge related proposal As soon as your `proposal.json` file is ready, you can submit the proposal with (making sure to be in the same directory as the `proposal.json` file): @@ -70,7 +76,7 @@ namada client vote-proposal \ --signer validator ``` -where `--vote` can be either `yay` or `nay`. +where `--vote` can be either `yay` or `nay`. An optional `memo` field can be attached to the vote for pgf and eth bridge proposals. ## Check the result diff --git a/documentation/docs/src/user-guide/ledger/staking.md b/documentation/docs/src/user-guide/ledger/staking.md index 517b2745d22..18168d22b87 100644 --- a/documentation/docs/src/user-guide/ledger/staking.md +++ b/documentation/docs/src/user-guide/ledger/staking.md @@ -19,7 +19,7 @@ namada client bonds --owner my-new-acc The result of this query will inform the epoch from which your delegations will be active. -Because the PoS system is just an account, you can query its balance, which is the sum of all staked tokens: +Because the PoS system is just an account, you can query its balance, which is the sum of all staked tokens and unbonded tokens that have not yet been withdrawn: ```shell namada client balance --owner PoS diff --git a/documentation/docs/src/user-guide/troubleshooting.md b/documentation/docs/src/user-guide/troubleshooting.md index ad519f1b52b..32cb584bcad 100644 --- a/documentation/docs/src/user-guide/troubleshooting.md +++ b/documentation/docs/src/user-guide/troubleshooting.md @@ -68,6 +68,32 @@ rustup target add wasm32-unknown-unknown ``` (Yes the name of the target is `wasm32-unknown-unknown`. This is not the compiler unable to tell which version/release it is). +#### OpenSSL + +If you run into the error + +```bash +Could not find directory of OpenSSL installation, and this `-sys` crate cannot + proceed without this knowledge. If OpenSSL is installed and this crate had + trouble finding it, you can set the `OPENSSL_DIR` environment variable for the + compilation process. + + Make sure you also have the development packages of openssl installed. + For example, `libssl-dev` on Ubuntu or `openssl-devel` on Fedora. + + If you're in a situation where you think the directory *should* be found + automatically, please open a bug at https://github.com/sfackler/rust-openssl + and include information about your system as well as this message. +``` + +Then the solution is spelled out for you. You need to install the development packages of OpenSSL. For Ubuntu, this is `libssl-dev`. For Fedora, this is `openssl-devel`. For other distributions, please refer to the [OpenSSL website](https://www.openssl.org/). + +For ubuntu, this can be achieved through + +```bash +sudo apt-get install libssl-dev +``` + ## Validator Troubleshooting ### Missed pre-genesis diff --git a/documentation/specs/book.toml b/documentation/specs/book.toml index 18b3e24ec9a..2431a7bf679 100644 --- a/documentation/specs/book.toml +++ b/documentation/specs/book.toml @@ -15,8 +15,6 @@ git-branch = "main" [output.html.search] expand = true -[output.katex] - [output.linkcheck] [preprocessor.katex] diff --git a/documentation/specs/src/base-ledger.md b/documentation/specs/src/base-ledger.md index 749b91ce1f7..b76ae448098 100644 --- a/documentation/specs/src/base-ledger.md +++ b/documentation/specs/src/base-ledger.md @@ -1,3 +1,3 @@ ## Base ledger -The base ledger of Namada includes a [consensus system](./base-ledger/consensus.md), validity predicate-based [execution system](./base-ledger/execution.md), and signalling-based [governance mechanism](./base-ledger/governance.md). Namada's ledger also includes proof-of-stake, slashing, fees, and inflation funding for staking rewards, shielded pool incentives, and public goods — these are specified in the [economics section](./economics.md). \ No newline at end of file +The base ledger of Namada includes a [consensus system](./base-ledger/consensus.md), validity predicate-based [execution system](./base-ledger/execution.md), and signalling-based [governance mechanism](./base-ledger/governance.md). Namada's ledger also includes proof-of-stake, slashing, fees, and inflation funding for staking rewards, shielded pool incentives, and public goods — these are specified in the [economics section](./economics.md). This section also documents Namada's [multisignature VP](./base-ledger/multisignature.md), [fungible token VP](./base-ledger/fungible-token.md), and [replay protection system](./base-ledger/replay-protection.md). \ No newline at end of file diff --git a/documentation/specs/src/base-ledger/consensus.md b/documentation/specs/src/base-ledger/consensus.md index cf9ce284cee..de45d842cff 100644 --- a/documentation/specs/src/base-ledger/consensus.md +++ b/documentation/specs/src/base-ledger/consensus.md @@ -1,3 +1,3 @@ # Consensus -Namada uses [Tendermint Go](https://github.com/tendermint/tendermint) through the [tendermint-rs](https://github.com/heliaxdev/tendermint-rs) bindings in order to provide peer-to-peer transaction gossip, BFT consensus, and state machine replication for Namada's custom state machine. Tendermint Go implements the Tendermint BFT consensus algorithm, which you can read more about [here](https://arxiv.org/abs/1807.04938). \ No newline at end of file +Namada uses [CometBFT](https://github.com/cometbft/cometbft/) (nee Tendermint Go) through the [cometbft-rs](https://github.com/heliaxdev/tendermint-rs) (nee tendermint-rs) bindings in order to provide peer-to-peer transaction gossip, BFT consensus, and state machine replication for Namada's custom state machine. CometBFT implements the Tendermint consensus algorithm, which you can read more about [here](https://arxiv.org/abs/1807.04938). \ No newline at end of file diff --git a/documentation/specs/src/base-ledger/execution.md b/documentation/specs/src/base-ledger/execution.md index b8e55cb6474..93fcaa3e798 100644 --- a/documentation/specs/src/base-ledger/execution.md +++ b/documentation/specs/src/base-ledger/execution.md @@ -10,9 +10,9 @@ Conceptually, a validity predicate (VP) is a function from the transaction's dat The Namada ledger is built on top of [Tendermint](https://docs.tendermint.com/master/spec/)'s [ABCI](https://docs.tendermint.com/master/spec/abci/) interface with a slight deviation from the ABCI convention: in Namada, the transactions are currently *not* being executed in ABCI's [`DeliverTx` method](https://docs.tendermint.com/master/spec/abci/abci.html), but rather in the [`EndBlock` method](https://docs.tendermint.com/master/spec/abci/abci.html). The reason for this is to prepare for future DKG and threshold decryption integration. -The ledger features an account-based system (in which UTXO-based systems such as the MASP can be internally implemented as specific accounts), where each account has a unique address and a dynamic key-value storage sub-space. Every account in Namada is associated with exactly one validity predicate. Fungible tokens, for example, are accounts, whose rules are governed by their validity predicates. Many of the base ledger subsystems specified here are themselves just special Namada accounts too (e.g. PoS, IBC and MASP). +The ledger features an account-based system (in which UTXO-based systems such as the MASP can be internally implemented as specific accounts), where each account has a unique address and a dynamic key-value storage sub-space. Every account in Namada is associated with exactly one validity predicate. Fungible tokens, for example, are accounts, whose rules are governed by their validity predicates. Many of the base ledger subsystems specified here are themselves just special Namada accounts too (e.g. PoS, IBC and MASP). This model is broadly similar to that of Ethereum, where each account is associated with contract code, but differs in the execution model. -Interaction with the Namada ledger are made possible via transactions (note transaction whitelist below). In Namada, transactions are allowed to perform arbitrary modifications to the storage of any account, but the transaction will be accepted and state changes applied only if all the validity predicates that were triggered by the transaction accept it. That is, the accounts whose storage sub-spaces were touched by the transaction and/or an account that was explicitly elected by the transaction as the verifier will all have their validity predicates verifying the transaction. A transaction can add any number of additional verifiers, but cannot remove the ones determined by the protocol. For example, a transparent fungible token transfer would typically trigger 3 validity predicates - those of the token, source and target addresses. +Interactions with the Namada ledger are made possible via transactions. In Namada, transactions are allowed to perform arbitrary modifications to the storage of any account, but the transaction will be accepted and state changes applied only if all the validity predicates that were triggered by the transaction accept it. That is, the accounts whose storage sub-spaces were touched by the transaction and/or an account that was explicitly elected by the transaction as the verifier will all have their validity predicates verifying the transaction. A transaction can add any number of additional verifiers, but cannot remove the ones determined by the protocol. For example, a transparent fungible token transfer would typically trigger 3 validity predicates - those of the token, source and target addresses. ## Supported validity predicates @@ -25,9 +25,8 @@ Supported validity predicates for Namada: - Proof-of-stake (see [spec](../economics/proof-of-stake.md)) - IBC & IbcToken (see [spec](../interoperability/ibc.md)) - Governance (see [spec](./governance.md)) - - SlashFund (see [spec](./governance.md#SlashFundAddress)) - - Protocol parameters + - Protocol parameters (part of governance) - WASM - Fungible token (see [spec](./fungible-token.md)) - MASP (see [spec](../masp.md)) - - k-of-n multisignature VP (see [spec](./multisignature.md)) + - k-of-n multisignature VP (see [spec](./multisignature.md)) \ No newline at end of file diff --git a/documentation/specs/src/base-ledger/fungible-token.md b/documentation/specs/src/base-ledger/fungible-token.md index 7b9b630b5f0..d11ff4e2e45 100644 --- a/documentation/specs/src/base-ledger/fungible-token.md +++ b/documentation/specs/src/base-ledger/fungible-token.md @@ -1,8 +1,7 @@ # Fungible token -The fungible token validity predicate authorises token balance changes on the basis of conservation-of-supply and approval-by-sender. +The fungible token validity predicate authorises token balance changes on the basis of conservation-of-supply and approval-by-sender. Namada implements a "multitoken" validity predicate, in that all tokens have the same logic and can share one VP (with appropriate storage distinctions). -## Multitoken A token balance is stored with a storage key. The token balance key should be `{token_addr}/balance/{owner_addr}` or `{token_addr}/{sub_prefix}/balance/{owner_addr}`. `{sub_prefix}` can have multiple key segments. These keys can be made with [token functions](https://github.com/anoma/namada/blob/5da82f093f10c0381865accba99f60c557360c51/core/src/types/token.rs). We can have multitoken balances with the same token and the same owner by `{sub_prefix}`, e.g. a token balance received over IBC is managed in `{token_addr}/ibc/{ibc_token_hash}/balance/{receiver_addr}`. It is distinguished from the receiver's original balance in `{token_addr}/balance/{receiver_addr}` to know which chain the token was transferred from. @@ -27,4 +26,4 @@ Some special transactions can transfer to another balance with the different `{s | Receive (as the source) | `{token_addr}/ibc/{port_id}/{channel_id}/balance/IBC_ESCROW` | `{token_addr}/balance/{receiver_addr}` | | Receive (from the source) | `{token_addr}/ibc/{port_id}/{channel_id}/balance/IBC_MINT` | `{token_addr}/ibc/{ibc_token_hash}/balance/{receiver_addr}` | -[IBC token validity predicate](https://github.com/anoma/namada/blob/5da82f093f10c0381865accba99f60c557360c51/shared/src/ledger/ibc/vp/token.rs) should validate these transfers. These special transfers like IBC should be validated by not only the fungible token validity predicate but also other validity predicates. +[IBC token validity predicate](https://github.com/anoma/namada/blob/5da82f093f10c0381865accba99f60c557360c51/shared/src/ledger/ibc/vp/token.rs) should validate these transfers. These special transfers like IBC should be validated by not only the fungible token validity predicate but also other validity predicates. \ No newline at end of file diff --git a/documentation/specs/src/base-ledger/governance.md b/documentation/specs/src/base-ledger/governance.md index bc9dc3c4f03..4ce75ccbe1d 100644 --- a/documentation/specs/src/base-ledger/governance.md +++ b/documentation/specs/src/base-ledger/governance.md @@ -105,13 +105,13 @@ At the moment, Namada supports 3 types of governance proposals: ```rust pub enum ProposalType { /// Carries the optional proposal code path - Custom(Option), + Default(Option), PGFCouncil, ETHBridge, } ``` -`Custom` represents a generic proposal with the following properties: +`Default` represents a generic proposal with the following properties: - Can carry a wasm code to be executed in case the proposal passes - Allows both validators and delegators to vote @@ -122,15 +122,15 @@ pub enum ProposalType { - Doesn't carry any wasm code - Allows both validators and delegators to vote -- Requires 1/3 of the total voting power to vote for the same council -- Expect every vote to carry a memo in the form of a tuple `Set<(Set
, BudgetCap)>` +- Requires 1/3 of the total voting power to vote `Yay` +- Expect every vote to carry a memo in the form of `Set<(Address, BudgetCap)>` `ETHBridge` is aimed at regulating actions on the bridge like the update of the Ethereum smart contracts or the withdrawing of all the funds from the `Vault` : - Doesn't carry any wasm code - Allows only validators to vote -- Requires 2/3 of the validators' total voting power to succeed -- Expect every vote to carry a memo in the form of a tuple `(Action, Signature)` +- Requires 2/3 of the total voting power to succeed +- Expect every vote to carry a memo in the form of a `Signature` over some bytes provided in the proposal ### GovernanceAddress VP @@ -207,7 +207,8 @@ where `ProposalVote` is an enum representing a `Yay` or `Nay` vote: the yay vari The storage key will only be created if the transaction is signed either by a validator or a delegator. In case a vote misses a required memo or carries a memo with an invalid format, the vote will be discarded at validation time (VP) and it won't be written to storage. -If delegators are allowed to vote, validators will be able to vote only for 2/3 of the total voting period, while delegators can vote until the end of the voting period. +If delegators are allowed to vote, validators will be able to vote only for 2/3 of the total voting period, while delegators can vote until the end of the voting period. If only validators are allowed to vote +for the `ProposalType` in exam, they are allowed to vote for the entire voting window. If a delegator votes differently than its validator, this will *override* the corresponding vote of this validator (e.g. if a delegator has a voting power of 200 and votes opposite to the delegator holding these tokens, than 200 will be subtracted from the voting power of the involved validator). diff --git a/documentation/specs/src/base-ledger/images/block-space-allocator-bins.svg b/documentation/specs/src/base-ledger/images/block-space-allocator-bins.svg new file mode 100644 index 00000000000..f9d7209da1e --- /dev/null +++ b/documentation/specs/src/base-ledger/images/block-space-allocator-bins.svg @@ -0,0 +1,4 @@ + + + +
DECRYPTED
DECRYPT...
E
E
E
E
E
E
E
E
E
E
E
E
E
E
bin.try_dump(tx)
bin.try_dump(tx)
PROTOCOL
PROTOC...
ENCRYPTED
ENCRYPT...
Set M of mempool transactions
Set P of proposed transactions
BlockSpaceAllocator
BlockSpaceAllocator
Viewer does not support full SVG 1.1
diff --git a/documentation/specs/src/base-ledger/images/block-space-allocator-example.svg b/documentation/specs/src/base-ledger/images/block-space-allocator-example.svg new file mode 100644 index 00000000000..b19ad90ce79 --- /dev/null +++ b/documentation/specs/src/base-ledger/images/block-space-allocator-example.svg @@ -0,0 +1,4 @@ + + + +
Height
Height
H
H
D
D
P
P
E
E
H+1
H+1
D
D
P
P
H+2
H+2
P
P
H+3
H+3
P
P
E
E
P
P
H+4
H+4
E
E
P
P
D
D
Block space
Block space
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/documentation/specs/src/base-ledger/multisignature.md b/documentation/specs/src/base-ledger/multisignature.md index e169841ed42..c865c3a36e6 100644 --- a/documentation/specs/src/base-ledger/multisignature.md +++ b/documentation/specs/src/base-ledger/multisignature.md @@ -1,6 +1,6 @@ # k-of-n multisignature -The k-of-n multisignature validity predicate authorizes transactions on the basis of k out of n parties approving them. This document targets the encrypted wasm transactions: there won't be support for multisignature on wrapper or protocol transactions. +The k-of-n multisignature validity predicate authorizes transactions on the basis of k out of n parties approving them. This document targets the encrypted (inner) WASM transactions. Namada does not support multiple signers on wrapper or protocol transactions. ## Protocol @@ -16,9 +16,9 @@ pub struct SignedTxData { } ``` -The `sig` field now holds a vector of tuples where the first element is an 8-bit integer and the second one is a signature. The integer serves as an index to match a specific signature to one of the public keys in the list of accepted ones. This way, we can improve the verification algorithm and check each signature only against the public key at the provided index (linear in time complexity), without the need to cycle on all of them which would be $\mathcal{O}(n^2)$. +The `sig` field now holds a vector of tuples where the first element is an 8-bit integer and the second one is a signature. The integer serves as an index to match a specific signature to one of the public keys in the list of accepted ones. This way, we can improve the verification algorithm and check each signature only against the public key at the provided index ($\mathcal{O}(n)$), without the need to cycle on all of them which would be $\mathcal{O}(n^2)$. -This means that non-multisig addresses will now be seen as 1-of-1 multisig accounts. +This means that non-multisig addresses will now be implemented as 1-of-1 multisig accounts (but this difference is transparent to the user). ## VPs @@ -68,12 +68,7 @@ Finally, the tx performs the following writes to storage: - The threshold - The list of public keys of the signers -`Internal` addresses may want a multi-signature scheme on top of their validation process as well. Among the internal ones, `PGF` will require multisignature for its council (see the [relative](../economics/public-goods-funding.md) spec). The storage data necessary for the correct working of the multisig for an internal address are written in the genesis file: these keys can be later modified through governance. - -`Implicit` addresses are not generated by a transaction and, therefore, are not suitable for a multisignature scheme since there would be no way to properly construct them. More specifically, an implicit address doesn't allow for: - -- A custom, modifiable VP -- An initial transaction to be used as an initializer for the relevant data +Multisignature accounts can also be initialised at genesis time - in this case, the requisite parameters are kept in the genesis file and written to storage during initialisation. ## Multisig account init validation @@ -99,4 +94,4 @@ In the end, we don't implement any of these checks and will leave the responsibi To craft a multisigned transaction, the involved parties will need to coordinate. More specifically, the transaction will be constructed by one entity which will then distribute it to the signers and collect their signatures: note that the constructing party doesn't necessarily need to be one of the signers. Finally, these signatures will be inserted in the `SignedTxData` struct so that it can be encrypted, wrapped and submitted to the network. -Namada does not provide a layer to support this process, so the involved parties will need to rely on an external communication mechanism. +Namada does not provide a layer to support this process, so the involved parties will need to rely on an external communication mechanism. \ No newline at end of file diff --git a/documentation/specs/src/base-ledger/replay-protection.md b/documentation/specs/src/base-ledger/replay-protection.md index 1094460cad8..5cbefdd7bd0 100644 --- a/documentation/specs/src/base-ledger/replay-protection.md +++ b/documentation/specs/src/base-ledger/replay-protection.md @@ -1,232 +1,488 @@ # Replay Protection -Replay protection is a mechanism to prevent _replay attacks_, which consist of a malicious user resubmitting an already executed transaction (also mentioned as tx in this document) to the ledger. +Replay protection is a mechanism to prevent _replay attacks_, which consist of a malicious user resubmitting an already executed transaction (often shortened to "tx" in this document) to the ledger. -A replay attack causes the state of the machine to deviate from the intended one (from the perspective of the parties involved in the original transaction) and causes economic damage to the fee payer of the original transaction, who finds himself paying more than once. Further economic damage is caused if the transaction involved the moving of value in some form (e.g. a transfer of tokens) with the sender being deprived of more value than intended. +A replay attack causes the state of the machine to deviate from the intended one +(from the perspective of the parties involved in the original transaction) and +causes economic damage to the fee payer of the original transaction, who finds +himself paying more than once. Further economic damage is caused if the +transaction involved the moving of value in some form (e.g. a transfer of +tokens) with the sender being deprived of more value than intended. -Since the original transaction was already well formatted for the protocol's rules, the attacker doesn't need to rework it, making this attack relatively easy. +Since the original transaction was already well formatted for the protocol's +rules, the attacker doesn't need to rework it, making this attack relatively +easy. -Of course, a replay attack makes sense only if the attacker differs from the _source_ of the original transaction, as a user will always be able to generate another semantically identical transaction to submit without the need to replay the same one. +Of course, a replay attack makes sense only if the attacker differs from the +_source_ of the original transaction, as a user will always be able to generate +another semantically identical transaction to submit without the need to replay +the same one. + +To prevent this scenario, Namada supports a replay protection mechanism to +prevent the execution of already processed transactions. -To prevent this scenario, Namada supports a replay protection mechanism to prevent the execution of already processed transactions. - ## Context -This section will illustrate the pre-existing context in which we are going to implement the replay protection mechanism. +This section will illustrate the pre-existing context in which we are going to +implement the replay protection mechanism. ### Encryption-Authentication -The current implementation of Namada is built on top of Tendermint which provides an encrypted and authenticated communication channel between every two nodes to prevent a _man-in-the-middle_ attack (see the detailed [spec](https://github.com/tendermint/tendermint/blob/29e5fbcc648510e4763bd0af0b461aed92c21f30/spec/p2p/peer.md)). +The current implementation of Namada is built on top of Tendermint which +provides an encrypted and authenticated communication channel between every two +nodes to prevent a _man-in-the-middle_ attack (see the detailed +[spec](https://github.com/tendermint/tendermint/blob/29e5fbcc648510e4763bd0af0b461aed92c21f30/spec/p2p/peer.md)). -The Namada protocol relies on this substrate to exchange transactions (messages) that will define the state transition of the ledger. More specifically, a transaction is composed of two parts: a `WrapperTx` and an inner `Tx` +The Namada protocol relies on this substrate to exchange transactions (messages) +that will define the state transition of the ledger. More specifically, a +transaction is composed of two parts: a `WrapperTx` and an inner `Tx` ```rust pub struct WrapperTx { - /// The fee to be payed for including the tx - pub fee: Fee, - /// Used to determine an implicit account of the fee payer - pub pk: common::PublicKey, - /// The epoch in which the tx is to be submitted. This determines - /// which decryption key will be used - pub epoch: Epoch, - /// Max amount of gas that can be used when executing the inner tx - pub gas_limit: GasLimit, - /// the encrypted payload - pub inner_tx: EncryptedTx, - /// sha-2 hash of the inner transaction acting as a commitment - /// the contents of the encrypted payload - pub tx_hash: Hash, + /// The fee to be payed for including the tx + pub fee: Fee, + /// Used to determine an implicit account of the fee payer + pub pk: common::PublicKey, + /// The epoch in which the tx is to be submitted. This determines + /// which decryption key will be used + pub epoch: Epoch, + /// Max amount of gas that can be used when executing the inner tx + pub gas_limit: GasLimit, + /// The optional unshielding tx for fee payment + pub unshield: Option, + /// the encrypted payload + pub inner_tx: EncryptedTx, + /// sha-2 hash of the inner transaction acting as a commitment + /// the contents of the encrypted payload + pub tx_hash: Hash, } pub struct Tx { - pub code: Vec, - pub data: Option>, - pub timestamp: DateTimeUtc, + pub code: Vec, + pub data: Option>, + pub timestamp: DateTimeUtc, } -``` +``` -The wrapper transaction is composed of some metadata, the encrypted inner transaction itself and the hash of this. The inner `Tx` transaction carries the Wasm code to be executed and the associated data. +The wrapper transaction is composed of some metadata, an optional unshielding tx +for fee payment (see [fee specs](../economics/fee-system.md)), the encrypted +inner transaction itself and the hash of this. The inner `Tx` transaction +carries the Wasm code to be executed and the associated data. A transaction is constructed as follows: 1. The struct `Tx` is produced 2. The hash of this transaction gets signed by the author, producing another `Tx` where the data field holds the concatenation of the original data and the signature (`SignedTxData`) -3. The produced transaction is encrypted and embedded in a `WrapperTx`. The encryption step is there for a future implementation of DKG (see [Ferveo](https://github.com/anoma/ferveo)) +3. The produced transaction is encrypted and embedded in a `WrapperTx`. The encryption step is there for a future implementation of threshold transaction decryption (see [Ferveo](https://github.com/anoma/ferveo)) 4. Finally, the `WrapperTx` gets converted to a `Tx` struct, signed over its hash (same as step 2, relying on `SignedTxData`), and submitted to the network -Note that the signer of the `WrapperTx` and that of the inner one don't need to coincide, but the signer of the wrapper will be charged with gas and fees. -In the execution steps: +Note that the signer of the `WrapperTx` and that of the inner one don't need to +coincide, but the signer of the wrapper will be charged with gas and fees. In +the execution steps: 1. The `WrapperTx` signature is verified and, only if valid, the tx is processed 2. In the following height the proposer decrypts the inner tx, checks that the hash matches that of the `tx_hash` field and, if everything went well, includes the decrypted tx in the proposed block -3. The inner tx will then be executed by the Wasm runtime -4. After the execution, the affected validity predicates (also mentioned as VP in this document) will check the storage changes and (if relevant) the signature of the transaction: if the signature is not valid, the VP will deem the transaction invalid and the changes won't be applied to the storage +3. The inner tx will then be executed by the WASM runtime +4. After the execution, the affected validity predicates (also mentioned as VPs in this document) will check the storage changes and (if relevant) the signature of the transaction: if the signature is not valid, the VP will deem the transaction invalid and the changes won't be applied to the storage -The signature checks effectively prevent any tampering with the transaction data because that would cause the checks to fail and the transaction to be rejected. -For a more in-depth view, please refer to the [Namada execution spec](./execution.md). +The signature checks effectively prevent any tampering with the transaction data +because that would cause the checks to fail and the transaction to be rejected. +For a more in-depth view, please refer to the +[Namada execution spec](./execution.md). ### Tendermint replay protection -The underlying consensus engine, [Tendermint](https://github.com/tendermint/tendermint/blob/29e5fbcc648510e4763bd0af0b461aed92c21f30/spec/abci/apps.md), provides a first layer of protection in its mempool which is based on a cache of previously seen transactions. This mechanism is actually aimed at preventing a block proposer from including an already processed transaction in the next block, which can happen when the transaction has been received late. Of course, this also acts as a countermeasure against intentional replay attacks. This check though, like all the checks performed in `CheckTx`, is weak, since a malicious validator could always propose a block containing invalid transactions. There's therefore the need for a more robust replay protection mechanism implemented directly in the application. +The underlying consensus engine, +[Tendermint](https://github.com/tendermint/tendermint/blob/29e5fbcc648510e4763bd0af0b461aed92c21f30/spec/abci/apps.md), +provides a first layer of protection in its mempool which is based on a cache of +previously seen transactions. This mechanism is actually aimed at preventing a +block proposer from including an already processed transaction in the next +block, which can happen when the transaction has been received late. Of course, +this also acts as a countermeasure against intentional replay attacks. This +check though, like all the checks performed in `CheckTx`, is weak, since a +malicious validator could always propose a block containing invalid +transactions. There's therefore the need for a more robust replay protection +mechanism implemented directly in the application. ## Implementation -Namada replay protection consists of three parts: the hash-based solution for both `EncryptedTx` (also called the `InnerTx`) and `WrapperTx`, a way to mitigate replay attacks in case of a fork and a concept of a lifetime for the transactions. +Namada replay protection consists of three parts: the hash-based solution for +both `EncryptedTx` (also called the `InnerTx`) and `WrapperTx`, a way to +mitigate replay attacks in case of a fork and a concept of a lifetime for the +transactions. ### Hash register -The actual Wasm code and data for the transaction are encapsulated inside a struct `Tx`, which gets encrypted as an `EncryptedTx` and wrapped inside a `WrapperTx` (see the [relative](#encryption-authentication) section). This inner transaction must be protected from replay attacks because it carries the actual semantics of the state transition. Moreover, even if the wrapper transaction was protected from replay attacks, an attacker could extract the inner transaction, rewrap it, and replay it. Note that for this attack to work, the attacker will need to sign the outer transaction himself and pay gas and fees for that, but this could still cause much greater damage to the parties involved in the inner transaction. - -`WrapperTx` is the only type of transaction currently accepted by the ledger. It must be protected from replay attacks because, if it wasn't, a malicious user could replay the transaction as is. Even if the inner transaction implemented replay protection or, for any reason, wasn't accepted, the signer of the wrapper would still pay for gas and fees, effectively suffering economic damage. - -To prevent the replay of both these transactions we will rely on a set of already processed transactions' digests that will be kept in storage. These digests will be computed on the **unsigned** transactions, to support replay protection even for [multisigned](multisignature.md) transactions: in this case, if hashes were taken from the signed transactions, a different set of signatures on the same tx would produce a different hash, effectively allowing for a replay. To support this, we'll need a subspace in storage headed by a `ReplayProtection` internal address: +The actual Wasm code and data for the transaction are encapsulated inside a +struct `Tx`, which gets encrypted as an `EncryptedTx` and wrapped inside a +`WrapperTx` (see the [relative](#encryption-authentication) section). This inner +transaction must be protected from replay attacks because it carries the actual +semantics of the state transition. Moreover, even if the wrapper transaction was +protected from replay attacks, an attacker could extract the inner transaction, +rewrap it, and replay it. Note that for this attack to work, the attacker will +need to sign the outer transaction himself and pay gas and fees for that, but +this could still cause much greater damage to the parties involved in the inner +transaction. + +`WrapperTx` is the only type of transaction currently accepted by the ledger. It +must be protected from replay attacks because, if it wasn't, a malicious user +could replay the transaction as is. Even if the inner transaction implemented +replay protection or, for any reason, wasn't accepted, the signer of the wrapper +would still pay for gas and fees, effectively suffering economic damage. + +To prevent the replay of both these transactions we will rely on a set of +already processed transactions' digests that will be kept in storage. These +digests will be computed on the **unsigned** transactions, to support replay +protection even for [multisigned](multisignature.md) transactions: in this case, +if hashes were taken from the signed transactions, a different set of signatures +on the same tx would produce a different hash, effectively allowing for a +replay. To support this, we'll first need to update the `WrapperTx` hash field +to contain the hash of the unsigned inner tx, instead of the signed one: this +doesn't affect the overall safety of Namada (since the wrapper is still signed +over all of its bytes, including the inner signature) and allows for early +replay attack checks in mempool and at wrapper block-inclusion time. +Additionally, we need a subspace in storage headed by a `ReplayProtection` +internal address: ``` -/$ReplayProtectionAddress/$tx0_hash: None -/$ReplayProtectionAddress/$tx1_hash: None -/$ReplayProtectionAddress/$tx2_hash: None +/\$ReplayProtectionAddress/\$tx0_hash: None +/\$ReplayProtectionAddress/\$tx1_hash: None +/\$ReplayProtectionAddress/\$tx2_hash: None ... ``` -The hashes will form the last part of the path to allow for a fast storage lookup. - -The consistency of the storage subspace is of critical importance for the correct working of the replay protection mechanism. To protect it, a validity predicate will check that no changes to this subspace are applied by any wasm transaction, as those should only be available from protocol. - -Both in `mempool_validation` and `process_proposal` we will perform a check (together with others, see the [relative](#wrapper-checks) section) on both the digests against the storage to check that neither of the transactions has already been executed: if this doesn't hold, the `WrapperTx` will not be included into the mempool/block respectively. If both checks pass then the transaction is included in the block and executed. In the `finalize_block` function we will add the transaction's hash to storage to prevent re-executions. We will first add the hash of the wrapper transaction. After that, in the following block, we deserialize the inner transaction, check the correct order of the transactions in the block and execute the tx: if it runs out of gas then we'll avoid storing its hash to allow rewrapping and executing the transaction, otherwise we'll add the hash in storage (both in case of success or failure of the tx). +The hashes will form the last part of the path to allow for a fast storage +lookup. + +The consistency of the storage subspace is of critical importance for the +correct working of the replay protection mechanism. To protect it, a validity +predicate will check that no changes to this subspace are applied by any wasm +transaction, as those should only be available from protocol. + +Both in `mempool_validation` and `process_proposal` we will perform a check +(together with others, see the [relative](#wrapper-checks) section) on both the +digests against the storage to check that neither of the transactions has +already been executed: if this doesn't hold, the `WrapperTx` will not be +included into the mempool/block respectively. In `process_proposal` we'll use a +temporary cache to prevent a replay of a transaction in the same block. If both +checks pass then the transaction is included in the block. The hashes are +committed to storage in `finalize_block` and the transaction is executed. + +In the next block we deserialize the inner transaction, check the validity of +the decrypted txs and their correct order: if the order is off a new round of +tendermint will start. If instead an error is found in any single decrypted tx, +we remove from storage the previously inserted hash of the inner tx to allow it +to be rewrapped, and discard the tx itself. Finally, in `finalize_block` we +execute the tx: if it runs out of gas then we'll remove its hash from storage, +again to allow rewrapping and executing the transaction, otherwise we'll keep +the hash in storage (both in case of success or failure of the tx). + +#### Optional unshielding + +The optional `unshield` field is supposed to carry an unshielding masp +`Transfer`. Given this assumption, there's no need to manage it since masp has +an internal replay protection mechanism. + +Still, since this field represents a valid, signed `Tx`, there are three +possible attacks that can be run by leveraging this field: + +1. If the wrapper signer constructs an `unshield` tx that actually encodes + another type of transaction, then this one can be extracted and executed + separately +2. A malicious user could extract this tx before it makes it to a block and play + it in advance +3. A combination of the previous two + +In the first case, the unshielding operation would fail because of the checks +run in protocol, but the tx itself could be extracted, wrapped and submitted to +the network. This issue could be solved with the mechanism explained in the +previous section. + +The second attack, instead, is performed before the original tx is placed in a +block and, therefore, cannot be prevented with a replay protection mechanism. +The only result of this attack would be that the original wrapper transaction +would fail since it would attempt to replay a masp transfer: in this case, the +submitter of the original tx can recreate it without the need for the +unshielding operation since the attacker has already performed it. + +In the last case the unshielding transaction (which is not a masp transfer) +could be encrypted, wrapped and executed before the original transaction is +inserted in a block. When the latter gets executed the protocol checks detect +that this is not a masp unshielding transfer and reject it. + +Given that saving the hash of the unshielding transaction is redundant in case +of a proper masp transfer and it doesn't prevent the second scenario in case of +non-masp transaction, Namada does not implement the replay protection mechanism +on the unshielding transaction, whose correctness is left to the wrapper signer +and the masp validity predicate (in case the unshielding tx was indeed a correct +masp unshield transfer). The combination of the fee system, the validity +predicates set and the protocol checks on the unshielding operation guarantees +that even if one of the attacks explained in this section is performed: + +- The original wrapper signer doesn't suffer economic damage (the wrapper + containing the invalid unshielding forces the block rejection without fee + collection) +- The attacker has to pay fees on the rewrapped tx preventing him to submit + these transactions for free +- The invalid unshielding transaction must still be a valid transaction per the + VPs triggered + +#### Governance proposals + +Governance [proposals](../base-ledger/governance.md) may carry some wasm code to +be executed in case the proposal passed. This code is embedded into a +`DecryptedTx` directly by the validators at block processing time and is not +inserted into the block itself. + +Given that the wasm code is attached to the transaction initiating the proposal, +it could be extracted from here and inserted in a transaction before the +proposal is executed. Therefore, replay protection is not a solution to prevent +attacks on governance proposals' code. Instead, to protect these transactions, +Namada relies on its proposal id mechanism in conjunction with the VP set. + +#### Protocol transactions + +At the moment, protocol transactions are only used for ETH bridge related +operations. The current implementation already takes care of replay attempts by +keeping track of the validators' signature on the events: this also includes +replay attacks in the same block. + +In the future, new types of protocol transactions may be supported: in this +case, a review of the replay protection mechanism might be required. ### Forks -In the case of a fork, the transaction hash is not enough to prevent replay attacks. Transactions, in fact, could still be replayed on the other branch as long as their format is kept unchanged and the counters in storage match. +In the case of a fork, the transaction hash is not enough to prevent replay +attacks. Transactions, in fact, could still be replayed on the other branch as +long as their format is kept unchanged and the counters in storage match. -To mitigate this problem, transactions will need to carry a `ChainId` identifier to tie them to a specific fork. This field needs to be added to the `Tx` struct so that it applies to both `WrapperTx` and `EncryptedTx`: +To mitigate this problem, transactions will need to carry a `ChainId` identifier +to tie them to a specific fork. This field needs to be added to the `Tx` struct +so that it applies to both `WrapperTx` and `EncryptedTx`: ```rust pub struct Tx { - pub code: Vec, - pub data: Option>, - pub timestamp: DateTimeUtc, - pub chain_id: ChainId + pub code: Vec, + pub data: Option>, + pub timestamp: DateTimeUtc, + pub chain_id: ChainId } ``` -This new field will be signed just like the other ones and is therefore subject to the same guarantees explained in the [initial](#encryption-authentication) section. The validity of this identifier will be checked in `process_proposal` for both the outer and inner tx: if a transaction carries an unexpected chain id, it won't be applied, meaning that no modifications will be applied to storage. +This new field will be signed just like the other ones and is therefore subject +to the same guarantees explained in the [initial](#encryption-authentication) +section. The validity of this identifier will be checked in `process_proposal` +for both the outer and inner tx: if a transaction carries an unexpected chain +id, it won't be applied, meaning that no modifications will be applied to +storage. ### Transaction lifetime -In general, a transaction is valid at the moment of submission, but after that, a series of external factors (ledger state, etc.) might change the mind of the submitter who's now not interested in the execution of the transaction anymore. - -We have to introduce the concept of a lifetime (or timeout) for the transactions: basically, the `Tx` struct will hold an extra field called `expiration` stating the maximum `DateTimeUtc` up until which the submitter is willing to see the transaction executed. After the specified time, the transaction will be considered invalid and discarded regardless of all the other checks. - -By introducing this new field we are setting a new constraint in the transaction's contract, where the ledger will make sure to prevent the execution of the transaction after the deadline and, on the other side, the submitter commits himself to the result of the execution at least until its expiration. If the expiration is reached and the transaction has not been executed the submitter can decide to submit a new transaction if he's still interested in the changes carried by it. - -In our design, the `expiration` will hold until the transaction is executed: once it's executed, either in case of success or failure, the tx hash will be written to storage and the transaction will not be replayable. In essence, the transaction submitter commits himself to one of these three conditions: +In general, a transaction is valid at the moment of submission, but after that, +a series of external factors (ledger state, etc.) might change the mind of the +submitter who's now not interested in the execution of the transaction anymore. + +We have to introduce the concept of a lifetime (or timeout) for the +transactions: basically, the `Tx` struct will hold an optional extra field +called `expiration` stating the maximum `DateTimeUtc` up until which the +submitter is willing to see the transaction executed. After the specified time, +the transaction will be considered invalid and discarded regardless of all the +other checks. + +By introducing this new field we are setting a new constraint in the +transaction's contract, where the ledger will make sure to prevent the execution +of the transaction after the deadline and, on the other side, the submitter +commits himself to the result of the execution at least until its expiration. If +the expiration is reached and the transaction has not been executed the +submitter can decide to submit a new transaction if he's still interested in the +changes carried by it. + +In our design, the `expiration` will hold until the transaction is executed: +once it's executed, either in case of success or failure, the tx hash will be +written to storage and the transaction will not be replayable. In essence, the +transaction submitter commits himself to one of these three conditions: - Transaction is invalid regardless of the specific state -- Transaction is executed (either with success or not) and the transaction hash is saved in the storage +- Transaction is executed (either with success or not) and the transaction hash + is saved in the storage - Expiration time has passed The first condition satisfied will invalidate further executions of the same tx. -In anticipation of DKG implementation, the current struct `WrapperTx` holds a field `epoch` stating the epoch in which the tx should be executed. This is because Ferveo will produce a new public key each epoch, effectively limiting the lifetime of the transaction (see section 2.2.2 of the [documentation](https://eprint.iacr.org/2022/898.pdf)). Unfortunately, for replay protection, a resolution of 1 epoch (~ 1 day) is too low for the possible needs of the submitters, therefore we need the `expiration` field to hold a maximum `DateTimeUtc` to increase resolution down to a single block (~ 10 seconds). - ```rust pub struct Tx { - pub code: Vec, - pub data: Option>, - pub timestamp: DateTimeUtc, - pub chain_id: ChainId, - /// Lifetime of the transaction, also determines which decryption key will be used - pub expiration: DateTimeUtc, -} - -pub struct WrapperTx { - /// The fee to be payed for including the tx - pub fee: Fee, - /// Used to determine an implicit account of the fee payer - pub pk: common::PublicKey, - /// Max amount of gas that can be used when executing the inner tx - pub gas_limit: GasLimit, - /// the encrypted payload - pub inner_tx: EncryptedTx, - /// sha-2 hash of the inner transaction acting as a commitment - /// the contents of the encrypted payload - pub tx_hash: Hash, + pub code: Vec, + pub data: Option>, + pub timestamp: DateTimeUtc, + pub chain_id: ChainId, + /// Optional lifetime of the transaction + pub expiration: Option, } ``` -Since we now have more detailed information about the desired lifetime of the transaction, we can remove the `epoch` field and rely solely on `expiration`. Now, the producer of the inner transaction should make sure to set a sensible value for this field, in the sense that it should not span more than one epoch. If this happens, then the transaction will be correctly decrypted only in a subset of the desired lifetime (the one expecting the actual key used for the encryption), while, in the following epochs, the transaction will fail decryption and won't be executed. In essence, the `expiration` parameter can only restrict the implicit lifetime within the current epoch, it can not surpass it as that would make the transaction fail in the decryption phase. - -The subject encrypting the inner transaction will also be responsible for using the appropriate public key for encryption relative to the targeted time. - -The wrapper transaction will match the `expiration` of the inner for correct execution. Note that we need this field also for the wrapper to anticipate the check at mempool/proposal evaluation time, but also to prevent someone from inserting a wrapper transaction after the corresponding inner has expired forcing the wrapper signer to pay for the fees. +The wrapper transaction will match the `expiration` of the inner (if any) for a +correct execution. Note that we need this field also for the wrapper to +anticipate the check at mempool/proposal evaluation time, but also to prevent +someone from inserting a wrapper transaction after the corresponding inner has +expired forcing the wrapper signer to pay for the fees. ### Wrapper checks -In `mempool_validation` and `process_proposal` we will perform some checks on the wrapper tx to validate it. These will involve: - -- Valid signature -- Enough funds to pay the fee -- Valid chainId -- Valid transaction hash -- Valid expiration - -These checks can all be done before executing the transactions themselves (the check on the gas cannot be done ahead of time). If any of these fails, the transaction should be considered invalid and the action to take will be one of the followings: - -1. If the checks fail on the signature, chainId, expiration or transaction hash, then this transaction will be forever invalid, regardless of the possible evolution of the ledger's state. There's no need to include the transaction in the block. Moreover, we **cannot** include this transaction in the block to charge a fee (as a sort of punishment) because these errors may not depend on the signer of the tx (could be due to malicious users or simply a delay in the tx inclusion in the block) -2. If the checks fail _only_ because of an insufficient balance, the wrapper should be kept in mempool for a future play in case the funds should become available -3. If all the checks pass validation we will include the transaction in the block to store the hash and charge the fee - -The `expiration` parameter also justifies step 2 of the previous bullet points which states that if the validity checks fail only because of an insufficient balance to pay for fees then the transaction should be kept in mempool for future execution. Without it, the transaction could be potentially executed at any future moment, possibly going against the mutated interests of the submitter. With the expiration parameter, now, the submitter commits himself to accept the execution of the transaction up to the specified time: it's going to be his responsibility to provide a sensible value for this parameter. Given this constraint the transaction will be kept in memepool up until the expiration (since it would become invalid after that in any case), to prevent the mempool from increasing too much in size. - -This mechanism can also be applied to another scenario. Suppose a transaction was not propagated to the network by a node (or a group of colluding nodes). Now, this tx might be valid, but it doesn't get inserted into a block. Without an expiration, this tx can be replayed (better, applied, since it was never executed in the first place) at a future moment in time when the submitter might not be willing to execute it anymore. +In `mempool_validation` we will perform some checks on the wrapper tx to +validate it. These will involve: + +- Signature +- `GasLimit` is below the block gas limit +- `Fees` are paid with an accepted token and match the minimum amount required +- `ChainId` +- Transaction hash +- Expiration +- Wrapper signer has enough funds to pay the fee +- Unshielding tx (if present), is indeed a masp unshielding transfer +- The unshielding tx (if present) releases the minimum amount of tokens required + to pay fees +- The unshielding tx (if present) runs succesfully + +For gas, fee and the unshielding tx more details can be found in the +[fee specs](../economics/fee-system.md). + +These checks can all be done before executing the transactions themselves. If +any of these fails, the transaction should be considered invalid and the action +to take will be one of the followings: + +1. If the checks fail on the signature, chainId, expiration, transaction hash, + balance or the unshielding tx, then this transaction will be forever invalid, + regardless of the possible evolution of the ledger's state. There's no need + to include the transaction in the block. Moreover, we **cannot** include this + transaction in the block to charge a fee (as a sort of punishment) because + these errors may not depend on the signer of the tx (could be due to + malicious users or simply a delay in the tx inclusion in the block) +2. If the checks fail on `Fee` or `GasLimit` the transaction should be + discarded. In theory the gas limit of a block is a Namada parameter + controlled by governance, so there's a chance that the transaction could + become valid in the future should this limit be raised. The same applies to + the token whitelist and the minimum fee required. However we can expect a + slow rate of change of these parameters so we can reject the tx (the + submitter can always resubmit it at a future time) + +If instead all the checks pass validation we will include the transaction in the +block to store the hash and charge the fee. + +All these checks are also run in `process_proposal`. + +This mechanism can also be applied to another scenario. Suppose a transaction +was not propagated to the network by a node (or a group of colluding nodes). +Now, this tx might be valid, but it doesn't get inserted into a block. Without +an expiration, this tx can be replayed (better, applied, since it was never +executed in the first place) at a future moment in time when the submitter might +not be willing to execute it any more. + +### Block rejection + +To prevent a block proposer from including invalid transactions in a block, the +validators will reject the entire block in case they find a single invalid +wrapper transaction. + +Rejecting the single invalid transaction while still accepting the block is not +a valid solution. In this case, in fact, the block proposer has no incentive to +include invalid transactions in the block because these would gain him no fees +but, at the same time, he doesn't really have a disincentive to not include +them, since in this case the validators will simply discard the invalid tx but +accept the rest of the block granting the proposer his fees on all the other +transactions. This, of course, applies in case the proposer has no other valid +tx to include. A malicious proposer could act like this to spam the block +without suffering any penalty. + +To recap, a block is rejected when at least one of the following conditions is +met: + +- At least one `WrapperTx` is invalid with respect to the checks listed in the + [relative section](#wrapper-checks) +- The order/number of decrypted txs differs from the order/number committed in + the previous block ## Possible optimizations -In this section we describe two alternative solutions that come with some optimizations. +In this section we describe two alternative solutions that come with some +optimizations. ### Transaction counter -Instead of relying on a hash (32 bytes) we could use a 64 bits (8 bytes) transaction counter as nonce for the wrapper and inner transactions. The advantage is that the space required would be much less since we only need two 8 bytes values in storage for every address which is signing transactions. On the other hand, the handling of the counter for the inner transaction will be performed entirely in wasm (transactions and VPs) making it a bit less efficient. This solution also imposes a strict ordering on the transactions issued by a same address. +Instead of relying on a hash (32 bytes) we could use a 64 bits (8 bytes) +transaction counter as nonce for the wrapper and inner transactions. The +advantage is that the space required would be much less since we only need two 8 +bytes values in storage for every address which is signing transactions. On the +other hand, the handling of the counter for the inner transaction will be +performed entirely in wasm (transactions and VPs) making it a bit less +efficient. This solution also imposes a strict ordering on the transactions +issued by a same address. -**NOTE**: this solution requires the ability to [yield](https://github.com/wasmerio/wasmer/issues/1127) execution from Wasmer which is not implemented yet. +**NOTE**: this solution requires the ability to +[yield](https://github.com/wasmerio/wasmer/issues/1127) execution from Wasmer +which is not implemented yet. #### InnerTx -We will implement the protection entirely in Wasm: the check of the counter will be carried out by the validity predicates while the actual writing of the counter in storage will be done by the transactions themselves. +We will implement the protection entirely in Wasm: the check of the counter will +be carried out by the validity predicates while the actual writing of the +counter in storage will be done by the transactions themselves. -To do so, the `SignedTxData` attached to the transaction will hold the current value of the counter in storage: +To do so, the `SignedTxData` attached to the transaction will hold the current +value of the counter in storage: ```rust pub struct SignedTxData { - /// The original tx data bytes, if any - pub data: Option>, - /// The optional transaction counter for replay protection - pub tx_counter: Option, - /// The signature is produced on the tx data concatenated with the tx code - /// and the timestamp. - pub sig: common::Signature, + /// The original tx data bytes, if any + pub data: Option>, + /// The optional transaction counter for replay protection + pub tx_counter: Option, + /// The signature is produced on the tx data concatenated with the tx code + /// and the timestamp. + pub sig: common::Signature, } ``` -The counter must reside in `SignedTxData` and not in the data itself because this must be checked by the validity predicate which is not aware of the specific transaction that took place but only of the changes in the storage; therefore, the VP is not able to correctly deserialize the data of the transactions since it doesn't know what type of data the bytes represent. +The counter must reside in `SignedTxData` and not in the data itself because +this must be checked by the validity predicate which is not aware of the +specific transaction that took place but only of the changes in the storage; +therefore, the VP is not able to correctly deserialize the data of the +transactions since it doesn't know what type of data the bytes represent. -The counter will be signed as well to protect it from tampering and grant it the same guarantees explained at the [beginning](#encryption-authentication) of this document. +The counter will be signed as well to protect it from tampering and grant it the +same guarantees explained at the [beginning](#encryption-authentication) of this +document. -The wasm transaction will simply read the value from storage and increase its value by one. The target key in storage will be the following: +The wasm transaction will simply read the value from storage and increase its +value by one. The target key in storage will be the following: ``` /$Address/inner_tx_counter: u64 ``` -The VP of the _source_ address will then check the validity of the signature and, if it's deemed valid, will proceed to check if the pre-value of the counter in storage was equal to the one contained in the `SignedTxData` struct and if the post-value of the key in storage has been incremented by one: if any of these conditions doesn't hold the VP will discard the transactions and prevent the changes from being applied to the storage. +The VP of the _source_ address will then check the validity of the signature +and, if it's deemed valid, will proceed to check if the pre-value of the counter +in storage was equal to the one contained in the `SignedTxData` struct and if +the post-value of the key in storage has been incremented by one: if any of +these conditions doesn't hold the VP will discard the transactions and prevent +the changes from being applied to the storage. -In the specific case of a shielded transfer, since MASP already comes with replay protection as part of the Zcash design (see the [MASP specs](../masp.md) and [Zcash protocol specs](https://zips.z.cash/protocol/protocol.pdf)), the counter in `SignedTxData` is not required and therefore should be optional. +In the specific case of a shielded transfer, since MASP already comes with +replay protection as part of the Zcash design (see the [MASP specs](../masp.md) +and [Zcash protocol specs](https://zips.z.cash/protocol/protocol.pdf)), the +counter in `SignedTxData` is not required and therefore should be optional. -To implement replay protection for the inner transaction we will need to update all the VPs checking the transaction's signature to include the check on the transaction counter: at the moment the `vp_user` validity predicate is the only one to update. In addition, all the transactions involving `SignedTxData` should increment the counter. +To implement replay protection for the inner transaction we will need to update +all the VPs checking the transaction's signature to include the check on the +transaction counter: at the moment the `vp_user` validity predicate is the only +one to update. In addition, all the transactions involving `SignedTxData` should +increment the counter. #### WrapperTx -To protect this transaction we can implement an in-protocol mechanism. Since the wrapper transaction gets signed before being submitted to the network, we can leverage the `tx_counter` field of the `SignedTxData` already introduced for the inner tx. +To protect this transaction we can implement an in-protocol mechanism. Since the +wrapper transaction gets signed before being submitted to the network, we can +leverage the `tx_counter` field of the `SignedTxData` already introduced for the +inner tx. In addition, we need another counter in the storage subspace of every address: @@ -234,109 +490,229 @@ In addition, we need another counter in the storage subspace of every address: /$Address/wrapper_tx_counter: u64 ``` -where `$Address` is the one signing the transaction (the same implied by the `pk` field of the `WrapperTx` struct). +where `$Address` is the one signing the transaction (the same implied by the +`pk` field of the `WrapperTx` struct). -The check will consist of a signature check first followed by a check on the counter that will make sure that the counter attached to the transaction matches the one in storage for the signing address. This will be done in the `process_proposal` function so that validators can decide whether the transaction is valid or not; if it's not, then they will discard the transaction and skip to the following one. +The check will consist of a signature check first followed by a check on the +counter that will make sure that the counter attached to the transaction matches +the one in storage for the signing address. This will be done in the +`process_proposal` function so that validators can decide whether the +transaction is valid or not; if it's not, then they will discard the transaction +and skip to the following one. -At last, in `finalize_block`, the ledger will update the counter key in storage, increasing its value by one. This will happen when the following conditions are met: +At last, in `finalize_block`, the ledger will update the counter key in storage, +increasing its value by one. This will happen when the following conditions are +met: -- `process_proposal` has accepted the tx by validating its signature and transaction counter -- The tx was correctly applied in `finalize_block` (for `WrapperTx` this simply means inclusion in the block and gas accounting) +- `process_proposal` has accepted the tx by validating its signature and + transaction counter +- The tx was correctly applied in `finalize_block` (for `WrapperTx` this simply + means inclusion in the block and gas accounting) -Now, if a malicious user tried to replay this transaction, the `tx_counter` in the struct would no longer be equal to the one in storage and the transaction would be deemed invalid. +Now, if a malicious user tried to replay this transaction, the `tx_counter` in +the struct would no longer be equal to the one in storage and the transaction +would be deemed invalid. #### Implementation details -In this section we'll talk about some details of the replay protection mechanism that derive from the solution proposed in this section. +In this section we'll talk about some details of the replay protection mechanism +that derive from the solution proposed in this section. ##### Storage counters -Replay protection will require interaction with the storage from both the protocol and Wasm. To do so we can take advantage of the `StorageRead` and `StorageWrite` traits to work with a single interface. +Replay protection will require interaction with the storage from both the +protocol and Wasm. To do so we can take advantage of the `StorageRead` and +`StorageWrite` traits to work with a single interface. -This implementation requires two transaction counters in storage for every address, so that the storage subspace of a given address looks like the following: +This implementation requires two transaction counters in storage for every +address, so that the storage subspace of a given address looks like the +following: ``` /$Address/wrapper_tx_counter: u64 /$Address/inner_tx_counter: u64 ``` -An implementation requiring a single counter in storage has been taken into consideration and discarded because that would not support batching; see the [relative section](#single-counter-in-storage) for a more in-depth explanation. +An implementation requiring a single counter in storage has been taken into +consideration and discarded because that would not support batching; see the +[relative section](#single-counter-in-storage) for a more in-depth explanation. -For both the wrapper and inner transaction, the increase of the counter in storage is an important step that must be correctly executed. First, the implementation will return an error in case of a counter overflow to prevent wrapping, since this would allow for the replay of previous transactions. Also, we want to increase the counter as soon as we verify that the signature, the chain id and the passed-in transaction counter are valid. The increase should happen immediately after the checks because of two reasons: +For both the wrapper and inner transaction, the increase of the counter in +storage is an important step that must be correctly executed. First, the +implementation will return an error in case of a counter overflow to prevent +wrapping, since this would allow for the replay of previous transactions. Also, +we want to increase the counter as soon as we verify that the signature, the +chain id and the passed-in transaction counter are valid. The increase should +happen immediately after the checks because of two reasons: - Prevent replay attack of a transaction in the same block -- Update the transaction counter even in case the transaction fails, to prevent a possible replay attack in the future (since a transaction invalid at state Sx could become valid at state Sn where `n > x`) - -For `WrapperTx`, the counter increase and fee accounting will per performed in `finalize_block` (as stated in the [relative](#wrappertx) section). - -For `InnerTx`, instead, the logic is not straightforward. The transaction code will be executed in a Wasm environment ([Wasmer](https://wasmer.io)) till it eventually completes or raises an exception. In case of success, the counter in storage will be updated correctly but, in case of failure, the protocol will discard all of the changes brought by the transactions to the write-ahead-log, including the updated transaction counter. This is a problem because the transaction could be successfully replayed in the future if it will become valid. - -The ideal solution would be to interrupt the execution of the Wasm code after the transaction counter (if any) has been increased. This would allow performing a first run of the involved VPs and, if all of them accept the changes, let the protocol commit these changes before any possible failure. After that, the protocol would resume the execution of the transaction from the previous interrupt point until completion or failure, after which a second pass of the VPs is initiated to validate the remaining state modifications. In case of a VP rejection after the counter increase there would be no need to resume execution and the transaction could be immediately deemed invalid so that the protocol could skip to the next tx to be executed. With this solution, the counter update would be committed to storage regardless of a failure of the transaction itself. - -Unfortunately, at the moment, Wasmer doesn't allow [yielding](https://github.com/wasmerio/wasmer/issues/1127) from the execution. - -In case the transaction went out of gas (given the `gas_limit` field of the wrapper), all the changes applied will be discarded from the WAL and will not affect the state of the storage. The inner transaction could then be rewrapped with a correct gas limit and replayed until the `expiration` time has been reached. +- Update the transaction counter even in case the transaction fails, to prevent + a possible replay attack in the future (since a transaction invalid at state + Sx could become valid at state Sn where `n > x`) + +For `WrapperTx`, the counter increase and fee accounting will per performed in +`finalize_block` (as stated in the [relative](#wrappertx) section). + +For `InnerTx`, instead, the logic is not straightforward. The transaction code +will be executed in a Wasm environment ([Wasmer](https://wasmer.io)) till it +eventually completes or raises an exception. In case of success, the counter in +storage will be updated correctly but, in case of failure, the protocol will +discard all of the changes brought by the transactions to the write-ahead-log, +including the updated transaction counter. This is a problem because the +transaction could be successfully replayed in the future if it will become +valid. + +The ideal solution would be to interrupt the execution of the Wasm code after +the transaction counter (if any) has been increased. This would allow performing +a first run of the involved VPs and, if all of them accept the changes, let the +protocol commit these changes before any possible failure. After that, the +protocol would resume the execution of the transaction from the previous +interrupt point until completion or failure, after which a second pass of the +VPs is initiated to validate the remaining state modifications. In case of a VP +rejection after the counter increase there would be no need to resume execution +and the transaction could be immediately deemed invalid so that the protocol +could skip to the next tx to be executed. With this solution, the counter update +would be committed to storage regardless of a failure of the transaction itself. + +Unfortunately, at the moment, Wasmer doesn't allow +[yielding](https://github.com/wasmerio/wasmer/issues/1127) from the execution. + +In case the transaction went out of gas (given the `gas_limit` field of the +wrapper), all the changes applied will be discarded from the WAL and will not +affect the state of the storage. The inner transaction could then be rewrapped +with a correct gas limit and replayed until the `expiration` time has been +reached. ##### Batching and transaction ordering -This replay protection technique supports the execution of multiple transactions with the same address as _source_ in a single block. Actually, the presence of the transaction counters and the checks performed on them now impose a strict ordering on the execution sequence (which can be an added value for some use cases). The correct execution of more than one transaction per source address in the same block is preserved as long as: +This replay protection technique supports the execution of multiple transactions +with the same address as _source_ in a single block. Actually, the presence of +the transaction counters and the checks performed on them now impose a strict +ordering on the execution sequence (which can be an added value for some use +cases). The correct execution of more than one transaction per source address in +the same block is preserved as long as: -1. The wrapper transactions are inserted in the block with the correct ascending order +1. The wrapper transactions are inserted in the block with the correct ascending + order 2. No hole is present in the counters' sequence -3. The counter of the first transaction included in the block matches the expected one in storage - -The conditions are enforced by the block proposer who has an interest in maximizing the amount of fees extracted by the proposed block. To support this incentive, we will charge gas and fees at the same moment in which we perform the counter increase explained in the [storage counters](#storage-counters) section: this way we can avoid charging fees and gas if the transaction is invalid (invalid signature, wrong counter or wrong chain id), effectively incentivizing the block proposer to include only valid transactions and correctly reorder them to maximize the fees (see the [block rejection](#block-rejection) section for an alternative solution that was discarded in favor of this). - -In case of a missing transaction causes a hole in the sequence of transaction counters, the block proposer will include in the block all the transactions up to the missing one and discard all the ones following that one, effectively preserving the correct ordering. - -Correctly ordering the transactions is not enough to guarantee the correct execution. As already mentioned in the [WrapperTx](#wrappertx) section, the block proposer and the validators also need to access the storage to check that the first transaction counter of a sequence is actually the expected one. - -The entire counter ordering is only done on the `WrapperTx`: if the inner counter is wrong then the inner transaction will fail and the signer of the corresponding wrapper will be charged with fees. This incentivizes submitters to produce valid transactions and discourages malicious user from rewrapping and resubmitting old transactions. +3. The counter of the first transaction included in the block matches the + expected one in storage + +The conditions are enforced by the block proposer who has an interest in +maximizing the amount of fees extracted by the proposed block. To support this +incentive, validators will reject the block proposed if any of the included +wrapper transactions are invalid, effectively incentivizing the block proposer +to include only valid transactions and correctly reorder them to gain the fees. + +In case of a missing transaction causes a hole in the sequence of transaction +counters, the block proposer will include in the block all the transactions up +to the missing one and discard all the ones following that one, effectively +preserving the correct ordering. + +Correctly ordering the transactions is not enough to guarantee the correct +execution. As already mentioned in the [WrapperTx](#wrappertx) section, the +block proposer and the validators also need to access the storage to check that +the first transaction counter of a sequence is actually the expected one. + +The entire counter ordering is only done on the `WrapperTx`: if the inner +counter is wrong then the inner transaction will fail and the signer of the +corresponding wrapper will be charged with fees. This incentivizes submitters to +produce valid transactions and discourages malicious user from rewrapping and +resubmitting old transactions. ##### Mempool checks -As a form of optimization to prevent mempool spamming, some of the checks that have been introduced in this document will also be brought to the `mempool_validate` function. Of course, we always refer to checks on the `WrapperTx` only. More specifically: +As a form of optimization to prevent mempool spamming, some of the checks that +have been introduced in this document will also be brought to the +`mempool_validate` function. Of course, we always refer to checks on the +`WrapperTx` only. More specifically: - Check the `ChainId` field -- Check the signature of the transaction against the `pk` field of the `WrapperTx` +- Check the signature of the transaction against the `pk` field of the + `WrapperTx` - Perform a limited check on the transaction counter -Regarding the last point, `mempool_validate` will check if the counter in the transaction is `>=` than the one in storage for the address signing the `WrapperTx`. A complete check (checking for strict equality) is not feasible, as described in the [relative](#mempool-counter-validation) section. +Regarding the last point, `mempool_validate` will check if the counter in the +transaction is `>=` than the one in storage for the address signing the +`WrapperTx`. A complete check (checking for strict equality) is not feasible, as +described in the [relative](#mempool-counter-validation) section. #### Alternatives considered -In this section we list some possible solutions that were taken into consideration during the writing of this solution but were eventually discarded. +In this section we list some possible solutions that were taken into +consideration during the writing of this solution but were eventually discarded. ##### Mempool counter validation -The idea of performing a complete validation of the transaction counters in the `mempool_validate` function was discarded because of a possible flaw. - -Suppose a client sends five transactions (counters from 1 to 5). The mempool of the next block proposer is not guaranteed to receive them in order: something on the network could shuffle the transactions up so that they arrive in the following order: 2-3-4-5-1. Now, since we validate every single transaction to be included in the mempool in the exact order in which we receive them, we would discard the first four transactions and only accept the last one, that with counter 1. Now the next block proposer might have the four discarded transactions in its mempool (since those were not added to the previous block and therefore not evicted from the other mempools, at least they shouldn't, see [block rejection](#block-rejection)) and could therefore include them in the following block. But still, a process that could have ended in a single block actually took two blocks. Moreover, there are two more issues: - -- The next block proposer might have the remaining transactions out of order in his mempool as well, effectively propagating the same issue down to the next block proposer -- The next block proposer might not have these transactions in his mempool at all - -Finally, transactions that are not allowed into the mempool don't get propagated to the other peers, making their inclusion in a block even harder. -It is instead better to avoid a complete filter on the transactions based on their order in the mempool: instead we are going to perform a simpler check and then let the block proposer rearrange them correctly when proposing the block. +The idea of performing a complete validation of the transaction counters in the +`mempool_validate` function was discarded because of a possible flaw. + +Suppose a client sends five transactions (counters from 1 to 5). The mempool of +the next block proposer is not guaranteed to receive them in order: something on +the network could shuffle the transactions up so that they arrive in the +following order: 2-3-4-5-1. Now, since we validate every single transaction to +be included in the mempool in the exact order in which we receive them, we would +discard the first four transactions and only accept the last one, that with +counter 1. Now the next block proposer might have the four discarded +transactions in its mempool (since those were not added to the previous block +and therefore not evicted from the other mempools, at least they shouldn't, see +[block rejection](#block-rejection)) and could therefore include them in the +following block. But still, a process that could have ended in a single block +actually took two blocks. Moreover, there are two more issues: + +- The next block proposer might have the remaining transactions out of order in + his mempool as well, effectively propagating the same issue down to the next + block proposer +- The next block proposer might not have these transactions in his mempool at + all + +Finally, transactions that are not allowed into the mempool don't get propagated +to the other peers, making their inclusion in a block even harder. It is instead +better to avoid a complete filter on the transactions based on their order in +the mempool: instead we are going to perform a simpler check and then let the +block proposer rearrange them correctly when proposing the block. ##### In-protocol protection for InnerTx -An alternative implementation could place the protection for the inner tx in protocol, just like the wrapper one, based on the transaction counter inside `SignedTxData`. The check would run in `process_proposal` and the update in `finalize_block`, just like for the wrapper transaction. This implementation, though, shows two drawbacks: - -- it implies the need for an hard fork in case of a modification of the replay protection mechanism -- it's not clear who's the source of the inner transaction from the outside, as that depends on the specific code of the transaction itself. We could use specific whitelisted txs set to define when it requires a counter (would not work for future programmable transactions), but still, we have no way to define which address should be targeted for replay protection (**blocking issue**) +An alternative implementation could place the protection for the inner tx in +protocol, just like the wrapper one, based on the transaction counter inside +`SignedTxData`. The check would run in `process_proposal` and the update in +`finalize_block`, just like for the wrapper transaction. This implementation, +though, shows two drawbacks: + +- it implies the need for an hard fork in case of a modification of the replay + protection mechanism +- it's not clear who's the source of the inner transaction from the outside, as + that depends on the specific code of the transaction itself. We could use + specific whitelisted txs set to define when it requires a counter (would not + work for future programmable transactions), but still, we have no way to + define which address should be targeted for replay protection (**blocking + issue**) ##### In-protocol counter increase for InnerTx -In the [storage counter](#storage-counters) section we mentioned the issue of increasing the transaction counter for an inner tx even in case of failure. A possible solution that we took in consideration and discarded was to increase the counter from protocol in case of a failure. +In the [storage counter](#storage-counters) section we mentioned the issue of +increasing the transaction counter for an inner tx even in case of failure. A +possible solution that we took in consideration and discarded was to increase +the counter from protocol in case of a failure. -This is technically feasible since the protocol is aware of the keys modified by the transaction and also of the results of the validity predicates (useful in case the transaction updated more than one counter in storage). It is then possible to recover the value and reapply the change directly from protocol. This logic though, is quite dispersive, since it effectively splits the management of the counter for the `InnerTx` among Wasm and protocol, while our initial intent was to keep it completely in Wasm. +This is technically feasible since the protocol is aware of the keys modified by +the transaction and also of the results of the validity predicates (useful in +case the transaction updated more than one counter in storage). It is then +possible to recover the value and reapply the change directly from protocol. +This logic though, is quite dispersive, since it effectively splits the +management of the counter for the `InnerTx` among Wasm and protocol, while our +initial intent was to keep it completely in Wasm. ##### Single counter in storage -We can't use a single transaction counter in storage because this would prevent batching. +We can't use a single transaction counter in storage because this would prevent +batching. -As an example, if a client (with a current counter in storage holding value 5) generates two transactions to be included in the same block, signing both the outer and the inner (default behavior of the client), it would need to generate the following transaction counters: +As an example, if a client (with a current counter in storage holding value 5) +generates two transactions to be included in the same block, signing both the +outer and the inner (default behavior of the client), it would need to generate +the following transaction counters: ``` [ @@ -345,9 +721,15 @@ As an example, if a client (with a current counter in storage holding value 5) g ] ``` -Now, the current execution model of Namada includes the `WrapperTx` in a block first to then decrypt and execute the inner tx in the following block (respecting the committed order of the transactions). That would mean that the outer tx of `T1` would pass validation and immediately increase the counter to 6 to prevent a replay attack in the same block. Now, the outer tx of `T2` will be processed but it won't pass validation because it carries a counter with value 7 while the ledger expects 6. +Now, the current execution model of Namada includes the `WrapperTx` in a block +first to then decrypt and execute the inner tx in the following block +(respecting the committed order of the transactions). That would mean that the +outer tx of `T1` would pass validation and immediately increase the counter to 6 +to prevent a replay attack in the same block. Now, the outer tx of `T2` will be +processed but it won't pass validation because it carries a counter with value 7 +while the ledger expects 6. -To fix this, one could think to set the counters as follows: +To fix this, one could think to set the counters as follows: ``` [ @@ -356,11 +738,23 @@ To fix this, one could think to set the counters as follows: ] ``` -This way both the transactions will be considered valid and executed. The issue is that, if the second transaction is not included in the block (for any reason), than the first transaction (the only one remaining at this point) will fail. In fact, after the outer tx has correctly increased the counter in storage to value 6 the block will be accepted. In the next block the inner transaction will be decrypted and executed but this last step will fail since the counter in `SignedTxData` carries a value of 7 and the counter in storage has a value of 6. +This way both the transactions will be considered valid and executed. The issue +is that, if the second transaction is not included in the block (for any +reason), than the first transaction (the only one remaining at this point) will +fail. In fact, after the outer tx has correctly increased the counter in storage +to value 6 the block will be accepted. In the next block the inner transaction +will be decrypted and executed but this last step will fail since the counter in +`SignedTxData` carries a value of 7 and the counter in storage has a value of 6. -To cope with this there are two possible ways. The first one is that, instead of checking the exact value of the counter in storage and increasing its value by one, we could check that the transaction carries a counter `>=` than the one in storage and write this one (not increase) to storage. The problem with this is that it the lack of support for strict ordering of execution. +To cope with this there are two possible ways. The first one is that, instead of +checking the exact value of the counter in storage and increasing its value by +one, we could check that the transaction carries a counter `>=` than the one in +storage and write this one (not increase) to storage. The problem with this is +that it the lack of support for strict ordering of execution. -The second option is to keep the usual increase strategy of the counter (increase by one and check for strict equality) and simply use two different counters in storage for each address. The transaction will then look like this: +The second option is to keep the usual increase strategy of the counter +(increase by one and check for strict equality) and simply use two different +counters in storage for each address. The transaction will then look like this: ``` [ @@ -369,135 +763,282 @@ The second option is to keep the usual increase strategy of the counter (increas ] ``` -Since the order of inclusion of the `WrapperTxs` forces the same order of the execution for the inner ones, both transactions can be correctly executed and the correctness will be maintained even in case `T2` didn't make it to the block (note that the counter for an inner tx and the corresponding wrapper one don't need to coincide). - -##### Block rejection - -The implementation proposed in this document has one flaw when it comes to discontinuous transactions. If, for example, for a given address, the counter in storage for the `WrapperTx` is 5 and the block proposer receives, in order, transactions 6, 5 and 8, the proposer will have an incentive to correctly order transactions 5 and 6 to gain the fees that he would otherwise lose. Transaction 8 will never be accepted by the validators no matter the ordering (since they will expect tx 7 which got lost): this effectively means that the block proposer has no incentive to include this transaction in the block because it would gain him no fees but, at the same time, he doesn't really have a disincentive to not include it, since in this case the validators will simply discard the invalid tx but accept the rest of the block granting the proposer his fees on all the other transactions. - -A similar scenario happens in the case of a single transaction that is not the expected one (e.g. tx 5 when 4 is expected), or for a different type of inconsistencies, like a wrong `ChainId` or an invalid signature. - -It is up to the block proposer then, whether to include or not these kinds of transactions: a malicious proposer could do so to spam the block without suffering any penalty. The lack of fees could be a strong enough measure to prevent proposers from applying this behavior, together with the fact that the only damage caused to the chain would be spamming the blocks. - -If one wanted to completely prevent this scenario, the solution would be to reject the entire block: this way the proposer would have an incentive to behave correctly (by not including these transactions into the block) to gain the block fees. This would allow to shrink the size of the blocks in case of unfair block proposers but it would also cause the slow down of the block creation process, since after a block rejection a new Tendermint round has to be initiated. +Since the order of inclusion of the `WrapperTxs` forces the same order of the +execution for the inner ones, both transactions can be correctly executed and +the correctness will be maintained even in case `T2` didn't make it to the block +(note that the counter for an inner tx and the corresponding wrapper one don't +need to coincide). ### Wrapper-bound InnerTx -The solution is to tie an `InnerTx` to the corresponding `WrapperTx`. By doing so, it becomes impossible to rewrap an inner transaction and, therefore, all the attacks related to this practice would be unfeasible. This mechanism requires even less space in storage (only a 64 bit counter for every address signing wrapper transactions) and only one check on the wrapper counter in protocol. As a con, it requires communication between the signer of the inner transaction and that of the wrapper during the transaction construction. This solution also imposes a strict ordering on the wrapper transactions issued by a same address. +The solution is to tie an `InnerTx` to the corresponding `WrapperTx`. By doing +so, it becomes impossible to rewrap an inner transaction and, therefore, all the +attacks related to this practice would be unfeasible. This mechanism requires +even less space in storage (only a 64 bit counter for every address signing +wrapper transactions) and only one check on the wrapper counter in protocol. As +a con, it requires communication between the signer of the inner transaction and +that of the wrapper during the transaction construction. This solution also +imposes a strict ordering on the wrapper transactions issued by a same address. -To do so we will have to change the current definition of the two tx structs to the following: +To do so we will have to change the current definition of the two tx structs to +the following: ```rust pub struct WrapperTx { - /// The fee to be payed for including the tx - pub fee: Fee, - /// Used to determine an implicit account of the fee payer - pub pk: common::PublicKey, - /// Max amount of gas that can be used when executing the inner tx - pub gas_limit: GasLimit, - /// Lifetime of the transaction, also determines which decryption key will be used - pub expiration: DateTimeUtc, - /// Chain identifier for replay protection - pub chain_id: ChainId, - /// Transaction counter for replay protection - pub tx_counter: u64, - /// the encrypted payload - pub inner_tx: EncryptedTx, + /// The fee to be payed for including the tx + pub fee: Fee, + /// Used to determine an implicit account of the fee payer + pub pk: common::PublicKey, + /// Max amount of gas that can be used when executing the inner tx + pub gas_limit: GasLimit, + /// Lifetime of the transaction, also determines which decryption key will be used + pub expiration: DateTimeUtc, + /// Chain identifier for replay protection + pub chain_id: ChainId, + /// Transaction counter for replay protection + pub tx_counter: u64, + /// the encrypted payload + pub inner_tx: EncryptedTx, } pub struct Tx { - pub code: Vec, - pub data: Option>, - pub timestamp: DateTimeUtc, - pub wrapper_commit: Option, + pub code: Vec, + pub data: Option>, + pub timestamp: DateTimeUtc, + pub wrapper_commit: Option, } -``` +``` -The Wrapper transaction no longer holds the inner transaction hash while the inner one now holds a commit to the corresponding wrapper tx in the form of the hash of a `WrapperCommit` struct, defined as: +The Wrapper transaction no longer holds the inner transaction hash while the +inner one now holds a commit to the corresponding wrapper tx in the form of the +hash of a `WrapperCommit` struct, defined as: ```rust pub struct WrapperCommit { - pub pk: common::PublicKey, - pub tx_counter: u64, - pub expiration: DateTimeUtc, - pub chain_id: ChainId, + pub pk: common::PublicKey, + pub tx_counter: u64, + pub expiration: DateTimeUtc, + pub chain_id: ChainId, } ``` -The `pk-tx_counter` couple contained in this struct, uniquely identifies a single `WrapperTx` (since a valid tx_counter is unique given the address) so that the inner one is now bound to this specific wrapper. The remaining fields, `expiration` and `chain_id`, will tie these two values given their importance in terms of safety (see the [relative](#wrappertx-checks) section). Note that the `wrapper_commit` field must be optional because the `WrapperTx` struct itself gets converted to a `Tx` struct before submission but it doesn't need any commitment. - -Both the inner and wrapper tx get signed on their hash, as usual, to prevent tampering with data. When a wrapper gets processed by the ledger, we first check the validity of the signature, checking that none of the fields were modified: this means that the inner tx embedded within the wrapper is, in fact, the intended one. This last statement means that no external attacker has tampered data, but the tampering could still have been performed by the signer of the wrapper before signing the wrapper transaction. - -If this check (and others, explained later in the [checks](#wrappertx-checks) section) passes, then the inner tx gets decrypted in the following block proposal process. At this time we check that the order in which the inner txs are inserted in the block matches that of the corresponding wrapper txs in the previous block. To do so, we rely on an in-storage queue holding the hash of the `WrapperCommit` struct computed from the wrapper tx. From the inner tx we extract the `WrapperCommit` hash and check that it matches that in the queue: if they don't it means that the inner tx has been reordered or rewrapped and we reject the block. Note that, since we have already checked the wrapper at this point, the only way to rewrap the inner tx would be to also modify its commitment (need to change at least the `tx_counter` field), otherwise the checks on the wrapper would have spotted the inconsistency and rejected the tx. - -If this check passes then we can send the inner transaction to the wasm environment for execution: if the transaction is signed, then at least one VP will check its signature to spot possible tampering of the data (especially by the wrapper signer, since this specific case cannot be checked before this step) and, if this is the case, will reject this transaction and no storage modifications will be applied. +The `pk-tx_counter` couple contained in this struct, uniquely identifies a +single `WrapperTx` (since a valid tx_counter is unique given the address) so +that the inner one is now bound to this specific wrapper. The remaining fields, +`expiration` and `chain_id`, will tie these two values given their importance in +terms of safety (see the [relative](#wrappertx-checks) section). Note that the +`wrapper_commit` field must be optional because the `WrapperTx` struct itself +gets converted to a `Tx` struct before submission but it doesn't need any +commitment. + +Both the inner and wrapper tx get signed on their hash, as usual, to prevent +tampering with data. When a wrapper gets processed by the ledger, we first check +the validity of the signature, checking that none of the fields were modified: +this means that the inner tx embedded within the wrapper is, in fact, the +intended one. This last statement means that no external attacker has tampered +data, but the tampering could still have been performed by the signer of the +wrapper before signing the wrapper transaction. + +If this check (and others, explained later in the [checks](#wrappertx-checks) +section) passes, then the inner tx gets decrypted in the following block +proposal process. At this time we check that the order in which the inner txs +are inserted in the block matches that of the corresponding wrapper txs in the +previous block. To do so, we rely on an in-storage queue holding the hash of the +`WrapperCommit` struct computed from the wrapper tx. From the inner tx we +extract the `WrapperCommit` hash and check that it matches that in the queue: if +they don't it means that the inner tx has been reordered and we reject the +block. + +If this check passes then we can send the inner transaction to the wasm +environment for execution: if the transaction is signed, then at least one VP +will check its signature to spot possible tampering of the data (especially by +the wrapper signer, since this specific case cannot be checked before this step) +and, if this is the case, will reject this transaction and no storage +modifications will be applied. In summary: - The `InnerTx` carries a unique identifier of the `WrapperTx` embedding it - Both the inner and wrapper txs are signed on all of their data -- The signature check on the wrapper tx ensures that the inner transaction is the intended one and that this wrapper has not been used to wrap a different inner tx. It also verifies that no tampering happened with the inner transaction by a third party. Finally, it ensures that the public key is the one of the signer -- The check on the `WrapperCommit` ensures that the inner tx has not been reordered nor rewrapped (this last one is a non-exhaustive check, inner tx data could have been tampered with by the wrapper signer) -- The signature check of the inner tx performed in Vp grants that no data of the inner tx has been tampered with, effectively verifying the correctness of the previous check (`WrapperCommit`) - -This sequence of controls makes it no longer possible to rewrap an `InnerTx` which is now bound to its wrapper. This implies that replay protection is only needed on the `WrapperTx` since there's no way to extract the inner one, rewrap it and replay it. +- The signature check on the wrapper tx ensures that the inner transaction is + the intended one and that this wrapper has not been used to wrap a different + inner tx. It also verifies that no tampering happened with the inner + transaction by a third party. Finally, it ensures that the public key is the + one of the signer +- The check on the `WrapperCommit` ensures that the inner tx has not been + reordered nor rewrapped (this last one is a non-exhaustive check, inner tx + data could have been tampered with by the wrapper signer) +- The signature check of the inner tx performed in Vp grants that no data of the + inner tx has been tampered with, effectively verifying the correctness of the + previous check (`WrapperCommit`) + +This sequence of controls makes it no longer possible to rewrap an `InnerTx` +which is now bound to its wrapper. This implies that replay protection is only +needed on the `WrapperTx` since there's no way to extract the inner one, rewrap +it and replay it. #### WrapperTx checks -In `mempool_validation` and `process_proposal` we will perform some checks on the wrapper tx to validate it. These will involve: +In `mempool_validation` we will perform some checks on the wrapper tx to +validate it. These will involve: - Valid signature -- Enough funds to pay for the fee +- `GasLimit` is below the block gas limit (see the + [fee specs](../economics/fee-system.md) for more details) +- `Fees` are paid with an accepted token and match the minimum amount required + (see the [fee specs](../economics/fee-system.md) for more details) - Valid chainId - Valid transaction counter - Valid expiration -These checks can all be done before executing the transactions themselves. The check on the gas cannot be done ahead of time and we'll deal with it later. If any of these fails, the transaction should be considered invalid and the action to take will be one of the followings: - -1. If the checks fail on the signature, chainId, expiration or transaction counter, then this transaction will be forever invalid, regardless of the possible evolution of the ledger's state. There's no need to include the transaction in the block nor to increase the transaction counter. Moreover, we **cannot** include this transaction in the block to charge a fee (as a sort of punishment) because these errors may not depend on the signer of the tx (could be due to malicious users or simply a delay in the tx inclusion in the block) -2. If the checks fail _only_ because of an insufficient balance, the wrapper should be kept in mempool for a future play in case the funds should become available -3. If all the checks pass validation we will include the transaction in the block to increase the counter and charge the fee - -Note that, regarding point one, there's a distinction to be made about an invalid `tx_counter` which could be invalid because of being old or being in advance. To solve this last issue (counter greater than the expected one), we have to introduce the concept of a lifetime (or timeout) for the transactions: basically, the `WrapperTx` will hold an extra field called `expiration` stating the maximum time up until which the submitter is willing to see the transaction executed. After the specified time the transaction will be considered invalid and discarded regardless of all the other checks. This way, in case of a transaction with a counter greater than expected, it is sufficient to wait till after the expiration to submit more transactions, so that the counter in storage is not modified (kept invalid for the transaction under observation) and replaying that tx would result in a rejection. - -This actually generalizes to a more broad concept. In general, a transaction is valid at the moment of submission, but after that, a series of external factors (ledger state, etc.) might change the mind of the submitter who's now not interested in the execution of the transaction anymore. By introducing this new field we are introducing a new constraint in the transaction's contract, where the ledger will make sure to prevent the execution of the transaction after the deadline and, on the other side, the submitter commits himself to the result of the execution at least until its expiration. If the expiration is reached and the transaction has not been executed the submitter can decide to submit a new, identical transaction if he's still interested in the changes carried by it. - -In our design, the `expiration` will hold until the transaction is executed, once it's executed, either in case of success or failure, the `tx_counter` will be increased and the transaction will not be replayable. In essence, the transaction submitter commits himself to one of these three conditions: +These checks can all be done before executing the transactions themselves. If +any of these fails, the transaction should be considered invalid and the action +to take will be one of the followings: + +1. If the checks fail on the signature, chainId, expiration or transaction + counter, then this transaction will be forever invalid, regardless of the + possible evolution of the ledger's state. There's no need to include the + transaction in the block nor to increase the transaction counter. Moreover, + we **cannot** include this transaction in the block to charge a fee (as a + sort of punishment) because these errors may not depend on the signer of the + tx (could be due to malicious users or simply a delay in the tx inclusion in + the block) +2. If the checks fail on `Fee` or `GasLimit` the transaction should be + discarded. In theory the gas limit of a block is a Namada parameter + controlled by governance, so there's a chance that the transaction could + become valid in the future should this limit be raised. The same applies to + the token whitelist and the minimum fee required. However we can expect a + slow rate of change of these parameters so we can reject the tx (the + submitter can always resubmit it at a future time) +3. If all the checks pass validation we will include the transaction in the + block to increase the counter and charge the fee + +Note that, regarding point one, there's a distinction to be made about an +invalid `tx_counter` which could be invalid because of being old or being in +advance. To solve this last issue (counter greater than the expected one), we +have to introduce the concept of a lifetime (or timeout) for the transactions: +basically, the `WrapperTx` will hold an extra field called `expiration` stating +the maximum time up until which the submitter is willing to see the transaction +executed. After the specified time the transaction will be considered invalid +and discarded regardless of all the other checks. This way, in case of a +transaction with a counter greater than expected, it is sufficient to wait till +after the expiration to submit more transactions, so that the counter in storage +is not modified (kept invalid for the transaction under observation) and +replaying that tx would result in a rejection. + +This actually generalizes to a more broad concept. In general, a transaction is +valid at the moment of submission, but after that, a series of external factors +(ledger state, etc.) might change the mind of the submitter who's now not +interested in the execution of the transaction anymore. By introducing this new +field we are introducing a new constraint in the transaction's contract, where +the ledger will make sure to prevent the execution of the transaction after the +deadline and, on the other side, the submitter commits himself to the result of +the execution at least until its expiration. If the expiration is reached and +the transaction has not been executed the submitter can decide to submit a new, +identical transaction if he's still interested in the changes carried by it. + +In our design, the `expiration` will hold until the transaction is executed, +once it's executed, either in case of success or failure, the `tx_counter` will +be increased and the transaction will not be replayable. In essence, the +transaction submitter commits himself to one of these three conditions: - Transaction is invalid regardless of the specific state -- Transaction is executed (either with success or not) and the transaction counter is increased +- Transaction is executed (either with success or not) and the transaction + counter is increased - Expiration time has passed The first condition satisfied will invalidate further executions of the same tx. -The `expiration` parameter also justifies step 2 of the previous bullet points which states that if the validity checks fail only because of an insufficient balance to pay for fees than the transaction should be kept in mempool for a future execution. Without it, the transaction could be potentially executed at any future moment (provided that the counter is still valid), possibily going against the mutated interests of the submitter. With the expiration parameter, now, the submitter commits himself to accepting the execution of the transaction up to the specified time: it's going to be his responsibility to provide a sensible value for this parameter. Given this constraint the transaction will be kept in memepool up until the expiration (since it would become invalid after that in any case), to prevent the mempool from increasing too much in size. - -This mechanism can also be applied to another scenario. Suppose a transaction was not propagated to the network by a node (or a group of colluding nodes). Now, this tx might be valid, but it doesn't get inserted into a block. Without an expiration, if the submitter doesn't submit any other transaction (which gets included in a block to increase the transaction counter), this tx can be replayed (better, applied, since it was never executed in the first place) at a future moment in time when the submitter might not be willing to execute it any more. - -Since the signer of the wrapper may be different from the one of the inner we also need to include this `expiration` field in the `WrapperCommit` struct, to prevent the signer of the wrapper from setting a lifetime which is in conflict with the interests of the inner signer. Note that adding a separate lifetime for the wrapper alone (which would require two separate checks) doesn't carry any benefit: a wrapper with a lifetime greater than the inner would have no sense since the inner would fail. Restricting the lifetime would work but it also means that the wrapper could prevent a valid inner transaction from being executed. We will then keep a single `expiration` field specifying the wrapper tx max time (the inner one will actually be executed one block later because of the execution mechanism of Namada). - -To prevent the signer of the wrapper from submitting the transaction to a different chain, the `ChainId` field should also be included in the commit. - -Finally, in case the transaction run out of gas (based on the provided `gas_limit` field of the wrapper) we don't need to take any action: by this time the transaction counter will have already been incremented and the tx is not replayable anymore. In theory, we don't even need to increment the counter since the only way this transaction could become valid is a change in the way gas is accounted, which might require a fork anyway, and consequently a change in the required `ChainId`. However, since we can't tell the gas consumption before the inner tx has been executed, we cannot anticipate this check. +Since the signer of the wrapper may be different from the one of the inner we +also need to include this `expiration` field in the `WrapperCommit` struct, to +prevent the signer of the wrapper from setting a lifetime which is in conflict +with the interests of the inner signer. Note that adding a separate lifetime for +the wrapper alone (which would require two separate checks) doesn't carry any +benefit: a wrapper with a lifetime greater than the inner would have no sense +since the inner would fail. Restricting the lifetime would work but it also +means that the wrapper could prevent a valid inner transaction from being +executed. We will then keep a single `expiration` field specifying the wrapper +tx max time (the inner one will actually be executed one block later because of +the execution mechanism of Namada). + +To prevent the signer of the wrapper from submitting the transaction to a +different chain, the `ChainId` field should also be included in the commit. + +Finally, in case the transaction run out of gas (based on the provided +`GasLimit` field of the wrapper) we don't need to take any action: by this time +the transaction counter will have already been incremented and the tx is not +replayable anymore. In theory, we don't even need to increment the counter since +the only way this transaction could become valid is a change in the way gas is +accounted, which might require a fork anyway, and consequently a change in the +required `ChainId`. However, since we can't tell the gas consumption before the +inner tx has been executed, we cannot anticipate this check. + +All these checks are also run in `process_proposal` with an addition: validators +also check that the wrapper signer has enough funds to pay the fee. This check +should not be done in mempool because the funds available for a certain address +are variable in time and should only be checked at block inclusion time. If any +of the checks fail here, the entire block is rejected forcing a new Tendermint +round to begin (see a better explanation of this choice in the +[relative](#block-rejection) section). + +The `expiration` parameter also justifies that the check on funds is only done +in `process_proposal` and not in mempool. Without it, the transaction could be +potentially executed at any future moment, possibly going against the mutated +interests of the submitter. With the expiration parameter, now, the submitter +commits himself to accept the execution of the transaction up to the specified +time: it's going to be his responsibility to provide a sensible value for this +parameter. Given this constraint the transaction will be kept in mempool up +until the expiration (since it would become invalid after that in any case), to +prevent the mempool from increasing too much in size. + +This mechanism can also be applied to another scenario. Suppose a transaction +was not propagated to the network by a node (or a group of colluding nodes). +Now, this tx might be valid, but it doesn't get inserted into a block. Without +an expiration, if the submitter doesn't submit any other transaction (which gets +included in a block to increase the transaction counter), this tx can be +replayed (better, applied, since it was never executed in the first place) at a +future moment in time when the submitter might not be willing to execute it any +more. #### WrapperCommit -The fields of `WrapperTx` not included in `WrapperCommit` are at the discretion of the `WrapperTx` producer. These fields are not included in the commit because of one of these two reasons: +The fields of `WrapperTx` not included in `WrapperCommit` are at the discretion +of the `WrapperTx` producer. These fields are not included in the commit because +of one of these two reasons: -- They depend on the specific state of the wrapper signer and cannot be forced (like `fee`, since the wrapper signer must have enough funds to pay for those) -- They are not a threat (in terms of replay attacks) to the signer of the inner transaction in case of failure of the transaction +- They depend on the specific state of the wrapper signer and cannot be forced + (like `fee`, since the wrapper signer must have enough funds to pay for those) +- They are not a threat (in terms of replay attacks) to the signer of the inner + transaction in case of failure of the transaction -In a certain way, the `WrapperCommit` not only binds an `InnerTx` no a wrapper, but effectively allows the inner to control the wrapper by requesting some specific parameters for its creation and bind these parameters among the two transactions: this allows us to apply the same constraints to both txs while performing the checks on the wrapper only. +In a certain way, the `WrapperCommit` not only binds an `InnerTx` no a wrapper, +but effectively allows the inner to control the wrapper by requesting some +specific parameters for its creation and bind these parameters among the two +transactions: this allows us to apply the same constraints to both txs while +performing the checks on the wrapper only. #### Transaction creation process -To craft a transaction, the process will now be the following (optional steps are only required if the signer of the inner differs from that of the wrapper): - -- (**Optional**) the `InnerTx` constructor request, to the wrapper signer, his public key and the `tx_counter` to be used -- The `InnerTx` is constructed in its entirety with also the `wrapper_commit` field to define the constraints of the future wrapper -- The produced `Tx` struct get signed over all of its data (with `SignedTxData`) producing a new struct `Tx` -- (**Optional**) The inner tx produced is sent to the `WrapperTx` producer together with the `WrapperCommit` struct (required since the inner tx only holds the hash of it) -- The signer of the wrapper constructs a `WrapperTx` compliant with the `WrapperCommit` fields +To craft a transaction, the process will now be the following (optional steps +are only required if the signer of the inner differs from that of the wrapper): + +- (**Optional**) the `InnerTx` constructor request, to the wrapper signer, his + public key and the `tx_counter` to be used +- The `InnerTx` is constructed in its entirety with also the `wrapper_commit` + field to define the constraints of the future wrapper +- The produced `Tx` struct get signed over all of its data (with `SignedTxData`) + producing a new struct `Tx` +- (**Optional**) The inner tx produced is sent to the `WrapperTx` producer + together with the `WrapperCommit` struct (required since the inner tx only + holds the hash of it) +- The signer of the wrapper constructs a `WrapperTx` compliant with the + `WrapperCommit` fields - The produced `WrapperTx` gets signed over all of its fields -Compared to a solution not binding the inner tx to the wrapper one, this solution requires the exchange of 3 messages (request `tx_counter`, receive `tx_counter`, send `InnerTx`) between the two signers (in case they differ), instead of one. However, it allows the signer of the inner to send the `InnerTx` to the wrapper signer already encrypted, guaranteeing a higher level of safety: only the `WrapperCommit` struct should be sent clear, but this doesn't reveal any sensitive information about the inner transaction itself. +Compared to a solution not binding the inner tx to the wrapper one, this +solution requires the exchange of 3 messages (request `tx_counter`, receive +`tx_counter`, send `InnerTx`) between the two signers (in case they differ), +instead of one. However, it allows the signer of the inner to send the `InnerTx` +to the wrapper signer already encrypted, guaranteeing a higher level of safety: +only the `WrapperCommit` struct should be sent clear, but this doesn't reveal +any sensitive information about the inner transaction itself. diff --git a/documentation/specs/src/economics/inflation-system.md b/documentation/specs/src/economics/inflation-system.md index 921e78d4e51..931beab75ad 100644 --- a/documentation/specs/src/economics/inflation-system.md +++ b/documentation/specs/src/economics/inflation-system.md @@ -35,17 +35,17 @@ Second, we take as input the following state values: - $S_{NAM}$ is the current supply of NAM - $L_{PoS}$ is the current amount of NAM locked in proof-of-stake -- $I_{PoS}$ is the current proof-of-stake inflation amount, in units of tokens per epoch +- $I_{PoS-last}$ is the proof-of-stake inflation amount from the previous epoch, in units of tokens per epoch - $R_{PoS-last}$ is the proof-of-stake locked token ratio from the previous epoch - $L_{SP_A}$ is the current amount of asset $A$ locked in the shielded pool (separate value for each asset $A$) -- $I_{SP_A}$ is the current shielded pool inflation amount for asset $A$, in units of tokens per epoch +- $I_{SP_A-last}$ is the shielded pool inflation amount for asset $A$ from the previous epoch, in units of tokens per epoch - $R_{SP_A-last}$ is the shielded pool locked token ratio for asset $A$ from the previous epoch (separate value for each asset $A$) Public goods funding inflation can be calculated and paid immediately (in terms of total tokens per epoch): - $I_{PGF} = \lambda_{PGF} * S_{NAM} / EpochsPerYear$ -These tokens are distributed to the public goods funding validity predicate. +These tokens ($I_{PGF}$) are distributed to the public goods funding validity predicate. To run the PD-controllers for proof-of-stake and shielded pool rewards, we first calculate some intermediate values: @@ -64,17 +64,17 @@ Then, for proof-of-stake first, run the PD-controller: - Calculate the error $E_{PoS} = R_{PoS-target} - R_{PoS}$ - Calculate the error derivative $E'_{PoS} = E_{PoS} - E_{PoS-last} = R_{PoS-last} - R_{PoS}$ - Calculate the control value $C_{PoS} = (KP_{PoS} * E_{PoS}) - (KD_{PoS} * E'_{PoS})$ -- Calculate the new $I'_{PoS} = max(0, min(I_{PoS} + C_{PoS}, Cap_{PoS-Epoch}))$ +- Calculate the new $I_{PoS} = max(0, min(I_{PoS-last} + C_{PoS}, Cap_{PoS-Epoch}))$ -These tokens are distributed to the proof-of-stake reward distribution validity predicate. +These tokens ($I_{PoS}$) are distributed to the proof-of-stake reward distribution validity predicate. Similarly, for each asset $A$ for which shielded pool rewards are being paid: - Calculate the error $E_{SP_A} = R_{SP_A-target} - R_{SP_A}$ - Calculate the error derivative $E'_{SP_A} = E_{SP_A} - E_{SP_A-last} = R_{SP_A-last} - R_{SP_A}$ - Calculate the control value $C_{SP_A} = (KP_{SP_A} * E_{SP_A}) - (KD_{SP_A} * E'_{SP_A})$ -- Calculate the new $I'_{SP_A} = max(0, min(I_{SP_A} + C_{SP_A}, Cap_{SP_A-Epoch}))$ +- Calculate the new $I_{SP_A} = max(0, min(I_{SP_A-last} + C_{SP_A}, Cap_{SP_A-Epoch}))$ -These tokens are distributed to the shielded pool reward distribution validity predicate. +These tokens ($I_{SP_A}$) are distributed to the shielded pool reward distribution validity predicate. -Finally, we store the latest inflation and locked token ratio values for the next epoch's controller round. \ No newline at end of file +Finally, we store the latest inflation and locked token ratio values for the next epoch's controller round. diff --git a/documentation/specs/src/economics/proof-of-stake/bonding-mechanism.md b/documentation/specs/src/economics/proof-of-stake/bonding-mechanism.md index c9da0ec91b6..b73dacf5593 100644 --- a/documentation/specs/src/economics/proof-of-stake/bonding-mechanism.md +++ b/documentation/specs/src/economics/proof-of-stake/bonding-mechanism.md @@ -11,7 +11,7 @@ The data relevant to the PoS system in the ledger's state are epoched. Each data - [Validators' consensus key, state and total bonded tokens](#validator). Identified by the validator's address. - [Bonds](#bonds) are created by self-bonding and delegations. They are identified by the pair of source address and the validator's address. -Changes to the epoched data do not take effect immediately. Instead, changes in epoch `n` are queued to take effect in the epoch `n + pipeline_length` for most cases and `n + pipeline_length + unboding_length` for [unbonding](#unbond) actions. Should the same validator's data or same bonds (i.e. with the same identity) be updated more than once in the same epoch, the later update overrides the previously queued-up update. For bonds, the token amounts are added up. Once the epoch `n` has ended, the queued-up updates for epoch `n + pipeline_length` are final and the values become immutable. +Changes to the epoched data do not take effect immediately. Instead, changes in epoch `n` are queued to take effect in the epoch `n + pipeline_length` for most cases and `n + pipeline_length + unbonding_length` for [unbonding](#unbond) actions. Should the same validator's data or same bonds (i.e. with the same identity) be updated more than once in the same epoch, the later update overrides the previously queued-up update. For bonds, the token amounts are added up. Once the epoch `n` has ended, the queued-up updates for epoch `n + pipeline_length` are final and the values become immutable. Additionally, any account may submit evidence for [a slashable misbehaviour](#slashing). @@ -115,7 +115,7 @@ Once an offense has been reported: - Individual: Once someone has reported an offense it is reviewed by validators and if confirmed the offender is slashed. - [cubic slashing](./cubic-slashing.md): escalated slashing -Instead of absolute values, validators' total bonded token amounts and bonds' and unbonds' token amounts are stored as their deltas (i.e. the change of quantity from a previous epoch) to allow distinguishing changes for different epoch, which is essential for determining whether tokens should be slashed. Slashes for a fault that occurred in epoch `n` may only be applied before the beginning of epoch `n + unbonding_length`. For this reason, in epoch `m` we can sum all the deltas of total bonded token amounts and bonds and unbond with the same source and validator for epoch equal or less than `m - unboding_length` into a single total bonded token amount, single bond and single unbond record. This is to keep the total number of total bonded token amounts for a unique validator and bonds and unbonds for a unique pair of source and validator bound to a maximum number (equal to `unbonding_length`). +Instead of absolute values, validators' total bonded token amounts and bonds' and unbonds' token amounts are stored as their deltas (i.e. the change of quantity from a previous epoch) to allow distinguishing changes for different epoch, which is essential for determining whether tokens should be slashed. Slashes for a fault that occurred in epoch `n` may only be applied before the beginning of epoch `n + unbonding_length`. For this reason, in epoch `m` we can sum all the deltas of total bonded token amounts and bonds and unbond with the same source and validator for epoch equal or less than `m - unbonding_length` into a single total bonded token amount, single bond and single unbond record. This is to keep the total number of total bonded token amounts for a unique validator and bonds and unbonds for a unique pair of source and validator bound to a maximum number (equal to `unbonding_length`). To disincentivize validators misbehaviour in the PoS system a validator may be slashed for any fault that it has done. An evidence of misbehaviour may be submitted by any account for a fault that occurred in epoch `n` anytime before the beginning of epoch `n + unbonding_length`. diff --git a/documentation/specs/src/further-reading.md b/documentation/specs/src/further-reading.md index 7ec6a90ebba..464b51d55a9 100644 --- a/documentation/specs/src/further-reading.md +++ b/documentation/specs/src/further-reading.md @@ -5,6 +5,6 @@ Thanks for reading! You can find further information about the project below: - [Namada website](https://namada.net) - [Namada source code](https://github.com/anoma/namada) - [Namada community links](https://namada.net/community) -- [Namada Medium page](https://medium.com/namadanetwork) +- [Namada blog](https://blog.namada.net) - [Namada Docs](https://docs.namada.net/) -- [Namada Twitter](https://twitter.com/namadanetwork) \ No newline at end of file +- [Namada Twitter](https://twitter.com/namadanetwork) diff --git a/documentation/specs/src/introduction.md b/documentation/specs/src/introduction.md index ae847705bce..55aeba84f0b 100644 --- a/documentation/specs/src/introduction.md +++ b/documentation/specs/src/introduction.md @@ -2,7 +2,7 @@ Welcome to the Namada specification! -## What is Namada? +## What is Namada? Namada is a sovereign proof-of-stake blockchain, using Tendermint BFT consensus, which enables multi-asset private transfers for any native or non-native asset @@ -12,20 +12,20 @@ a stake-weighted governance signalling mechanism, and a dual proactive/retroacti Users of shielded transfers are rewarded for their contributions to the privacy set in the form of native protocol tokens. A multi-asset shielded transfer wallet is provided in order to facilitate safe and private user interaction with the protocol. -You can learn more about Namada [here](https://medium.com/namadanetwork/introducing-namada-shielded-transfers-with-any-assets-dce2e579384c). +You can learn more about Namada [here](https://blog.namada.net/introducing-namada-interchain-asset-agnostic-privacy/). ### What is Anoma? -The Anoma protocol is designed to facilitate the operation of networked fractal instances, which intercommunicate but can utilise varied state machines and security models. +The Anoma protocol is designed to facilitate the operation of networked fractal instances, which intercommunicate but can utilise varied state machines and security models. A fractal instance is an instance of the Anoma consensus and execution protocols operated by a set of networked validators. Anoma’s fractal instance architecture is an attempt to build a platform which is architecturally homogeneous but with a heterogeneous security model. Thus, different fractal instances may specialise in different tasks and serve different communities. -### How does Namada relate to Anoma? +### How does Namada relate to Anoma? The Namada instance is the first such fractal instance, focused exclusively on the use-case of private asset transfers. Namada is also a helpful stepping stone to finalise, test, and launch a protocol version that is simpler than the full -Anoma protocol but still encapsulates a unified and useful set of features. +Anoma protocol but still encapsulates a unified and useful set of features. ### Raison d'être @@ -41,7 +41,7 @@ and fungible or non-fungible assets (such as ERC20 tokens) sent over a custom Et reduces transfer costs and streamlines UX as much as possible. Once assets are on Namada, shielded transfers are cheap and all assets contribute to the same anonymity set. -Users on Namada can earn rewards, retain privacy of assets, and contribute to shared privacy. +Users on Namada can earn rewards, retain privacy of assets, and contribute to shared privacy. ### Layout of this specification @@ -54,4 +54,4 @@ The Namada specification documents are organised into four sub-sections: This book is written using [mdBook](https://rust-lang.github.io/mdBook/). The source can be found in the [Namada repository](https://github.com/anoma/namada/tree/main/documentation/specs). -[Contributions](https://github.com/anoma/namada/blob/main/CONTRIBUTING.md) to the contents and the structure of this book should be made via pull requests. \ No newline at end of file +[Contributions](https://github.com/anoma/namada/blob/main/CONTRIBUTING.md) to the contents and the structure of this book should be made via pull requests. diff --git a/documentation/specs/src/masp/ledger-integration.md b/documentation/specs/src/masp/ledger-integration.md index 0f4f0cabd84..fc785b44541 100644 --- a/documentation/specs/src/masp/ledger-integration.md +++ b/documentation/specs/src/masp/ledger-integration.md @@ -266,13 +266,7 @@ Below, the conditions necessary to maintain consistency between the MASP validit * the transparent transaction value pool's amount must equal the containing wrapper transaction's fee amount * the transparent transaction value pool's asset type must be derived from the containing wrapper transaction's fee token * the derivation must be done as specified in `0.3 Derivation of Asset Generator from Asset Identifer` -* If the source address is not the MASP validity predicate, then: - * there must be exactly one transparent input in the shielded transaction and: - * its value must equal that of amount in the containing transfer - this prevents stealing/losing funds from/to the pool - * its asset type must be derived from the token address raw bytes and the current epoch once Borsh serialized from the type `(Address, Epoch)`: - * the address dependency prevents stealing/losing funds from/to the pool - * the current epoch requirement ensures that withdrawers receive their full reward when leaving the shielded pool - * the derivation must be done as specified in `0.3 Derivation of Asset Generator from Asset Identifer` +* If the source address is not the MASP validity predicate, then the transparent transaction value pool's amount must equal zero ## Remarks Below are miscellaneous remarks on the capabilities and limitations of the current MASP implementation: diff --git a/encoding_spec/Cargo.toml b/encoding_spec/Cargo.toml index 3a0b35e39c9..a2aba121a87 100644 --- a/encoding_spec/Cargo.toml +++ b/encoding_spec/Cargo.toml @@ -6,7 +6,7 @@ license = "GPL-3.0" name = "namada_encoding_spec" readme = "../README.md" resolver = "2" -version = "0.14.0" +version = "0.16.0" [features] default = ["abciplus"] diff --git a/genesis/e2e-tests-single-node.toml b/genesis/e2e-tests-single-node.toml index 0abc4e32fc7..65a3e9af366 100644 --- a/genesis/e2e-tests-single-node.toml +++ b/genesis/e2e-tests-single-node.toml @@ -214,7 +214,7 @@ pipeline_len = 2 # for a fault in epoch 'n' up through epoch 'n + unbonding_len'. unbonding_len = 3 # Votes per fundamental staking token (namnam) -tm_votes_per_token = 1 +tm_votes_per_token = 0.1 # Reward for proposing a block. block_proposer_reward = 0.125 # Reward for voting on a block. diff --git a/macros/Cargo.toml b/macros/Cargo.toml index 14854c9655c..1030215fb64 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_macros" resolver = "2" -version = "0.14.0" +version = "0.16.0" [lib] proc-macro = true diff --git a/proof_of_stake/Cargo.toml b/proof_of_stake/Cargo.toml index 0f4a0f14d62..7b6033121c8 100644 --- a/proof_of_stake/Cargo.toml +++ b/proof_of_stake/Cargo.toml @@ -6,7 +6,7 @@ license = "GPL-3.0" name = "namada_proof_of_stake" readme = "../README.md" resolver = "2" -version = "0.14.0" +version = "0.16.0" [features] default = ["abciplus"] @@ -23,18 +23,18 @@ derivative = "2.2.0" hex = "0.4.3" once_cell = "1.8.0" # A fork with state machine testing -proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm", optional = true} -rust_decimal = { version = "1.26.1", features = ["borsh"] } -rust_decimal_macros = "1.26.1" -tendermint-proto = {git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9"} -thiserror = "1.0.30" +proptest = {git = "https://github.com/heliaxdev/proptest", rev = "8f1b4abe7ebd35c0781bf9a00a4ee59833ffa2a1", optional = true} +rust_decimal = { version = "=1.26.1", features = ["borsh"] } +rust_decimal_macros = "=1.26.1" +thiserror = "1.0.38" tracing = "0.1.30" data-encoding = "2.3.2" [dev-dependencies] +itertools = "0.10.5" namada_core = {path = "../core", features = ["testing"]} # A fork with state machine testing -proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm"} +proptest = {git = "https://github.com/heliaxdev/proptest", rev = "8f1b4abe7ebd35c0781bf9a00a4ee59833ffa2a1"} test-log = {version = "0.2.7", default-features = false, features = ["trace"]} tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} diff --git a/proof_of_stake/proptest-regressions/tests.txt b/proof_of_stake/proptest-regressions/tests.txt index e39d11a921d..5b0ce654572 100644 --- a/proof_of_stake/proptest-regressions/tests.txt +++ b/proof_of_stake/proptest-regressions/tests.txt @@ -5,3 +5,6 @@ # It is recommended to check this file in to source control so that # everyone who runs the test benefits from these saved cases. cc b18600cd21cdcbb0ff9ecf81f0479a1181586b37e4bd584457b5a19f4d87c060 # shrinks to pos_params = PosParams { max_validator_slots: 1, pipeline_len: 4, unbonding_len: 5, tm_votes_per_token: 0.2304, block_proposer_reward: 0.125, block_vote_reward: 0.1, max_inflation_rate: 0.1, target_staked_ratio: 0.6667, duplicate_vote_min_slash_rate: 0.001, light_client_attack_min_slash_rate: 0.001 }, genesis_validators = [GenesisValidator { address: Established: atest1v4ehgw36g4pyxdjrg5e5gsfexaq5vsfegvurvv69xqenqdp3xdrr2dzzg5engdfjgeqnzdf3ql3mlz, tokens: Amount { micro: 990878946896 }, consensus_key: Ed25519(PublicKey(VerificationKey("ee1aa49a4459dfe813a3cf6eb882041230c7b2558469de81f87c9bf23bf10a03"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }] +cc 4ca8db5e4bf570ce34d2811f42e757396bbd37a9afc636b496d0b77dcb5b7586 # shrinks to pos_params = PosParams { max_validator_slots: 1, pipeline_len: 3, unbonding_len: 5, tm_votes_per_token: 0.3531, block_proposer_reward: 0.125, block_vote_reward: 0.1, max_inflation_rate: 0.1, target_staked_ratio: 0.6667, duplicate_vote_min_slash_rate: 0.001, light_client_attack_min_slash_rate: 0.001 }, genesis_validators = [GenesisValidator { address: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, tokens: Amount { micro: 3 }, consensus_key: Ed25519(PublicKey(VerificationKey("ee1aa49a4459dfe813a3cf6eb882041230c7b2558469de81f87c9bf23bf10a03"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }, GenesisValidator { address: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, tokens: Amount { micro: 6 }, consensus_key: Ed25519(PublicKey(VerificationKey("ff87a0b0a3c7c0ce827e9cada5ff79e75a44a0633bfcb5b50f99307ddb26b337"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }] +cc 35e6eb2f6c1c5115484352d1ae883d32e6b382ed9adb00de8f2ae028d12e183e # shrinks to pos_params = PosParams { max_validator_slots: 1, pipeline_len: 3, unbonding_len: 5, tm_votes_per_token: 0.8613, block_proposer_reward: 0.125, block_vote_reward: 0.1, max_inflation_rate: 0.1, target_staked_ratio: 0.6667, duplicate_vote_min_slash_rate: 0.001, light_client_attack_min_slash_rate: 0.001 }, genesis_validators = [GenesisValidator { address: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, tokens: Amount { micro: 1 }, consensus_key: Ed25519(PublicKey(VerificationKey("ee1aa49a4459dfe813a3cf6eb882041230c7b2558469de81f87c9bf23bf10a03"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }] +cc eb5440930dc754ae0328a6c06239b4cac4c0ae9dff5cdbdf3cfb2ab0900d1fd1 # shrinks to pos_params = PosParams { max_validator_slots: 1, pipeline_len: 3, unbonding_len: 5, tm_votes_per_token: 0.217, block_proposer_reward: 0.125, block_vote_reward: 0.1, max_inflation_rate: 0.1, target_staked_ratio: 0.6667, duplicate_vote_min_slash_rate: 0.001, light_client_attack_min_slash_rate: 0.001 }, start_epoch = Epoch(472), genesis_validators = [GenesisValidator { address: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, tokens: Amount { micro: 8 }, consensus_key: Ed25519(PublicKey(VerificationKey("ee1aa49a4459dfe813a3cf6eb882041230c7b2558469de81f87c9bf23bf10a03"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }, GenesisValidator { address: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, tokens: Amount { micro: 6 }, consensus_key: Ed25519(PublicKey(VerificationKey("ff87a0b0a3c7c0ce827e9cada5ff79e75a44a0633bfcb5b50f99307ddb26b337"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }, GenesisValidator { address: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, tokens: Amount { micro: 6 }, consensus_key: Ed25519(PublicKey(VerificationKey("191fc38f134aaf1b7fdb1f86330b9d03e94bd4ba884f490389de964448e89b3f"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }, GenesisValidator { address: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, tokens: Amount { micro: 8 }, consensus_key: Ed25519(PublicKey(VerificationKey("c5bbbb60e412879bbec7bb769804fa8e36e68af10d5477280b63deeaca931bed"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }, GenesisValidator { address: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, tokens: Amount { micro: 8 }, consensus_key: Ed25519(PublicKey(VerificationKey("4f44e6c7bdfed3d9f48d86149ee3d29382cae8c83ca253e06a70be54a301828b"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }] diff --git a/proof_of_stake/proptest-regressions/tests/state_machine.txt b/proof_of_stake/proptest-regressions/tests/state_machine.txt new file mode 100644 index 00000000000..0022fdc698f --- /dev/null +++ b/proof_of_stake/proptest-regressions/tests/state_machine.txt @@ -0,0 +1,11 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc d96c87f575b0ded4d16fb2ccb9496cb70688e80965289b15f4289b27f74936e0 # shrinks to (initial_state, transitions) = (AbstractPosState { epoch: Epoch(0), params: PosParams { max_validator_slots: 2, pipeline_len: 7, unbonding_len: 10, tm_votes_per_token: 0.3869, block_proposer_reward: 0.125, block_vote_reward: 0.1, max_inflation_rate: 0.1, target_staked_ratio: 0.6667, duplicate_vote_min_slash_rate: 0.001, light_client_attack_min_slash_rate: 0.001 }, genesis_validators: [GenesisValidator { address: Established: atest1v4ehgw36x5cngdejg9pyyd3egscnwvzxgsenjvjpxaq5zvpkxccrxsejxv6y2d6xgerrsv3cjrfert, tokens: Amount { micro: 188390939637 }, consensus_key: Ed25519(PublicKey(VerificationKey("ee1aa49a4459dfe813a3cf6eb882041230c7b2558469de81f87c9bf23bf10a03"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }, GenesisValidator { address: Established: atest1v4ehgw368yeyydzpxqmnyv29xfq523p3gve5zse5g9zyy329gfqnzwfcggmnys3sgve52se4v5em0f, tokens: Amount { micro: 465797340965 }, consensus_key: Ed25519(PublicKey(VerificationKey("ff87a0b0a3c7c0ce827e9cada5ff79e75a44a0633bfcb5b50f99307ddb26b337"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }, GenesisValidator { address: Established: atest1v4ehgw36xqe5y3fn8yenzd3egezrgs3cxuurwd3hxgmr2wf4gcmrjv2rg56yy33cxfprz3f5yefak4, tokens: Amount { micro: 954894516994 }, consensus_key: Ed25519(PublicKey(VerificationKey("191fc38f134aaf1b7fdb1f86330b9d03e94bd4ba884f490389de964448e89b3f"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }], bonds: {Epoch(0): {BondId { source: Established: atest1v4ehgw36x5cngdejg9pyyd3egscnwvzxgsenjvjpxaq5zvpkxccrxsejxv6y2d6xgerrsv3cjrfert, validator: Established: atest1v4ehgw36x5cngdejg9pyyd3egscnwvzxgsenjvjpxaq5zvpkxccrxsejxv6y2d6xgerrsv3cjrfert }: 188390939637, BondId { source: Established: atest1v4ehgw368yeyydzpxqmnyv29xfq523p3gve5zse5g9zyy329gfqnzwfcggmnys3sgve52se4v5em0f, validator: Established: atest1v4ehgw368yeyydzpxqmnyv29xfq523p3gve5zse5g9zyy329gfqnzwfcggmnys3sgve52se4v5em0f }: 465797340965, BondId { source: Established: atest1v4ehgw36xqe5y3fn8yenzd3egezrgs3cxuurwd3hxgmr2wf4gcmrjv2rg56yy33cxfprz3f5yefak4, validator: Established: atest1v4ehgw36xqe5y3fn8yenzd3egezrgs3cxuurwd3hxgmr2wf4gcmrjv2rg56yy33cxfprz3f5yefak4 }: 954894516994}}, total_stakes: {Epoch(0): {Established: atest1v4ehgw36x5cngdejg9pyyd3egscnwvzxgsenjvjpxaq5zvpkxccrxsejxv6y2d6xgerrsv3cjrfert: Amount { micro: 188390939637 }, Established: atest1v4ehgw368yeyydzpxqmnyv29xfq523p3gve5zse5g9zyy329gfqnzwfcggmnys3sgve52se4v5em0f: Amount { micro: 465797340965 }, Established: atest1v4ehgw36xqe5y3fn8yenzd3egezrgs3cxuurwd3hxgmr2wf4gcmrjv2rg56yy33cxfprz3f5yefak4: Amount { micro: 954894516994 }}}, consensus_set: {Epoch(0): {Amount { micro: 188390939637 }: [Established: atest1v4ehgw36x5cngdejg9pyyd3egscnwvzxgsenjvjpxaq5zvpkxccrxsejxv6y2d6xgerrsv3cjrfert], Amount { micro: 465797340965 }: [Established: atest1v4ehgw368yeyydzpxqmnyv29xfq523p3gve5zse5g9zyy329gfqnzwfcggmnys3sgve52se4v5em0f]}}, below_capacity_set: {Epoch(0): {ReverseOrdTokenAmount(Amount { micro: 954894516994 }): [Established: atest1v4ehgw36xqe5y3fn8yenzd3egezrgs3cxuurwd3hxgmr2wf4gcmrjv2rg56yy33cxfprz3f5yefak4]}} }, [Bond { id: BondId { source: Established: atest1v4ehgw36g4zrs3fcxfpnw3pjxucrzv6pg3pyx3pex3py23pexscnzwz9xdzngvjxxfry2dzycuunza, validator: Established: atest1v4ehgw36g4zrs3fcxfpnw3pjxucrzv6pg3pyx3pex3py23pexscnzwz9xdzngvjxxfry2dzycuunza }, amount: Amount { micro: 1238 } }]) +cc 4633c576fa7c7e292a1902de35c186e539bd1fe37c4a23e9b3982e91ade7b2ca # shrinks to (initial_state, transitions) = (AbstractPosState { epoch: Epoch(0), params: PosParams { max_validator_slots: 4, pipeline_len: 7, unbonding_len: 9, tm_votes_per_token: 0.6158, block_proposer_reward: 0.125, block_vote_reward: 0.1, max_inflation_rate: 0.1, target_staked_ratio: 0.6667, duplicate_vote_min_slash_rate: 0.001, light_client_attack_min_slash_rate: 0.001 }, genesis_validators: [GenesisValidator { address: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, tokens: Amount { micro: 27248298187 }, consensus_key: Ed25519(PublicKey(VerificationKey("c5bbbb60e412879bbec7bb769804fa8e36e68af10d5477280b63deeaca931bed"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }, GenesisValidator { address: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, tokens: Amount { micro: 372197384649 }, consensus_key: Ed25519(PublicKey(VerificationKey("ee1aa49a4459dfe813a3cf6eb882041230c7b2558469de81f87c9bf23bf10a03"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }, GenesisValidator { address: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, tokens: Amount { micro: 599772865740 }, consensus_key: Ed25519(PublicKey(VerificationKey("ff87a0b0a3c7c0ce827e9cada5ff79e75a44a0633bfcb5b50f99307ddb26b337"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }, GenesisValidator { address: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, tokens: Amount { micro: 695837066404 }, consensus_key: Ed25519(PublicKey(VerificationKey("191fc38f134aaf1b7fdb1f86330b9d03e94bd4ba884f490389de964448e89b3f"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }], bonds: {Epoch(0): {BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }: 599772865740, BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }: 695837066404, BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }: 27248298187, BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }: 372197384649}}, total_stakes: {Epoch(0): {Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: 695837066404, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: 27248298187, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: 372197384649, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: 599772865740}}, consensus_set: {Epoch(0): {Amount { micro: 27248298187 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { micro: 372197384649 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6], Amount { micro: 599772865740 }: [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], Amount { micro: 695837066404 }: [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv]}, Epoch(1): {Amount { micro: 27248298187 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { micro: 372197384649 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6], Amount { micro: 599772865740 }: [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], Amount { micro: 695837066404 }: [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv]}, Epoch(2): {Amount { micro: 27248298187 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { micro: 372197384649 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6], Amount { micro: 599772865740 }: [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], Amount { micro: 695837066404 }: [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv]}, Epoch(3): {Amount { micro: 27248298187 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { micro: 372197384649 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6], Amount { micro: 599772865740 }: [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], Amount { micro: 695837066404 }: [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv]}, Epoch(4): {Amount { micro: 27248298187 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { micro: 372197384649 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6], Amount { micro: 599772865740 }: [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], Amount { micro: 695837066404 }: [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv]}, Epoch(5): {Amount { micro: 27248298187 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { micro: 372197384649 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6], Amount { micro: 599772865740 }: [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], Amount { micro: 695837066404 }: [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv]}, Epoch(6): {Amount { micro: 27248298187 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { micro: 372197384649 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6], Amount { micro: 599772865740 }: [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], Amount { micro: 695837066404 }: [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv]}, Epoch(7): {Amount { micro: 27248298187 }: [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], Amount { micro: 372197384649 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6], Amount { micro: 599772865740 }: [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk], Amount { micro: 695837066404 }: [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv]}}, below_capacity_set: {Epoch(0): {}, Epoch(1): {}, Epoch(2): {}, Epoch(3): {}, Epoch(4): {}, Epoch(5): {}, Epoch(6): {}, Epoch(7): {}}, validator_states: {Epoch(0): {Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: Consensus, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus}, Epoch(1): {Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: Consensus, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus}, Epoch(2): {Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: Consensus, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus}, Epoch(3): {Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: Consensus, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus}, Epoch(4): {Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: Consensus, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus}, Epoch(5): {Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: Consensus, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus}, Epoch(6): {Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: Consensus, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus}, Epoch(7): {Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: Consensus, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: Consensus, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: Consensus}} }, [NextEpoch, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { micro: 8782278 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, amount: Amount { micro: 1190208 } }, NextEpoch, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { micro: 7795726 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }, amount: Amount { micro: 6827686 } }, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, amount: Amount { micro: 8183306 } }, NextEpoch, NextEpoch, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { micro: 9082723 } }, NextEpoch, NextEpoch, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { micro: 162577 } }, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, amount: Amount { micro: 5422009 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, amount: Amount { micro: 9752213 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { micro: 143033 } }, Bond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { micro: 2918291 } }, Bond { id: BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }, amount: Amount { micro: 3686768 } }, NextEpoch, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { micro: 6956073 } }, NextEpoch, NextEpoch, NextEpoch, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { micro: 6091560 } }, Bond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { micro: 5082475 } }, Bond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { micro: 1116228 } }, Bond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { micro: 2420024 } }, Bond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { micro: 4430691 } }, NextEpoch, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }, amount: Amount { micro: 1521967 } }]) +cc cbb985b391e16cb35fb2279ed2530c431e894f44fd269fe6cf76a8cdf118f1a0 # shrinks to (initial_state, transitions) = (AbstractPosState { epoch: Epoch(0), params: PosParams { max_validator_slots: 1, pipeline_len: 2, unbonding_len: 3, tm_votes_per_token: 0.0001, block_proposer_reward: 0.125, block_vote_reward: 0.1, max_inflation_rate: 0.1, target_staked_ratio: 0.6667, duplicate_vote_min_slash_rate: 0.001, light_client_attack_min_slash_rate: 0.001 }, genesis_validators: [GenesisValidator { address: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, tokens: Amount { micro: 781732759169 }, consensus_key: Ed25519(PublicKey(VerificationKey("ee1aa49a4459dfe813a3cf6eb882041230c7b2558469de81f87c9bf23bf10a03"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }], bonds: {Epoch(0): {BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }: 781732759169}}, total_stakes: {Epoch(0): {Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: 781732759169}}, consensus_set: {Epoch(0): {Amount { micro: 781732759169 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6]}, Epoch(1): {Amount { micro: 781732759169 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6]}, Epoch(2): {Amount { micro: 781732759169 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6]}}, below_capacity_set: {Epoch(0): {}, Epoch(1): {}, Epoch(2): {}}, validator_states: {Epoch(0): {Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus}, Epoch(1): {Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus}, Epoch(2): {Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus}} }, [NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { micro: 1 } }, NextEpoch, NextEpoch, NextEpoch, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { micro: 1 } }]) +cc cdf16c113fb6313f325503cf9101b8b5c23ff820bd8952d82ffb82c4eebebdbc # shrinks to (initial_state, transitions) = (AbstractPosState { epoch: Epoch(0), params: PosParams { max_validator_slots: 1, pipeline_len: 2, unbonding_len: 3, tm_votes_per_token: 0.0001, block_proposer_reward: 0.125, block_vote_reward: 0.1, max_inflation_rate: 0.1, target_staked_ratio: 0.6667, duplicate_vote_min_slash_rate: 0.001, light_client_attack_min_slash_rate: 0.001 }, genesis_validators: [GenesisValidator { address: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, tokens: Amount { micro: 139124683733 }, consensus_key: Ed25519(PublicKey(VerificationKey("ee1aa49a4459dfe813a3cf6eb882041230c7b2558469de81f87c9bf23bf10a03"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }], bonds: {Epoch(0): {BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }: 139124683733}}, total_stakes: {Epoch(0): {Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: 139124683733}}, consensus_set: {Epoch(0): {Amount { micro: 139124683733 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6]}, Epoch(1): {Amount { micro: 139124683733 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6]}, Epoch(2): {Amount { micro: 139124683733 }: [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6]}}, below_capacity_set: {Epoch(0): {}, Epoch(1): {}, Epoch(2): {}}, validator_states: {Epoch(0): {Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus}, Epoch(1): {Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus}, Epoch(2): {Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: Consensus}} }, [NextEpoch, NextEpoch, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { micro: 1 } }, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, NextEpoch, Bond { id: BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }, amount: Amount { micro: 1 } }]) +cc fda96bfcdb63767251702535cfb4fd995d1fdda7d671fd085e2a536f00f2f6dd # shrinks to (initial_state, transitions) = (AbstractPosState { epoch: Epoch(0), params: PosParams { max_validator_slots: 1, pipeline_len: 2, unbonding_len: 3, tm_votes_per_token: 0.0001, block_proposer_reward: 0.125, block_vote_reward: 0.1, max_inflation_rate: 0.1, target_staked_ratio: 0.6667, duplicate_vote_min_slash_rate: 0.001, light_client_attack_min_slash_rate: 0.001 }, genesis_validators: [GenesisValidator { address: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, tokens: Amount { micro: 2 }, consensus_key: Ed25519(PublicKey(VerificationKey("ff87a0b0a3c7c0ce827e9cada5ff79e75a44a0633bfcb5b50f99307ddb26b337"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }, GenesisValidator { address: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, tokens: Amount { micro: 4 }, consensus_key: Ed25519(PublicKey(VerificationKey("191fc38f134aaf1b7fdb1f86330b9d03e94bd4ba884f490389de964448e89b3f"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }, GenesisValidator { address: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, tokens: Amount { micro: 5 }, consensus_key: Ed25519(PublicKey(VerificationKey("ee1aa49a4459dfe813a3cf6eb882041230c7b2558469de81f87c9bf23bf10a03"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }, GenesisValidator { address: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, tokens: Amount { micro: 6 }, consensus_key: Ed25519(PublicKey(VerificationKey("c5bbbb60e412879bbec7bb769804fa8e36e68af10d5477280b63deeaca931bed"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }, GenesisValidator { address: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, tokens: Amount { micro: 7 }, consensus_key: Ed25519(PublicKey(VerificationKey("4f44e6c7bdfed3d9f48d86149ee3d29382cae8c83ca253e06a70be54a301828b"))), commission_rate: 0.05, max_commission_rate_change: 0.001 }], bonds: {Epoch(0): {BondId { source: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk, validator: Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk }: 2, BondId { source: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6, validator: Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6 }: 5, BondId { source: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd, validator: Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd }: 7, BondId { source: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv, validator: Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv }: 4, BondId { source: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3, validator: Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3 }: 6}}, total_stakes: {Epoch(0): {Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: 4, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: 6, Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: 7, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: 2, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: 5}}, consensus_set: {Epoch(0): {Amount { micro: 2 }: [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk]}, Epoch(1): {Amount { micro: 2 }: [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk]}, Epoch(2): {Amount { micro: 2 }: [Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk]}}, below_capacity_set: {Epoch(0): {ReverseOrdTokenAmount(Amount { micro: 4 }): [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv], ReverseOrdTokenAmount(Amount { micro: 5 }): [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6], ReverseOrdTokenAmount(Amount { micro: 6 }): [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], ReverseOrdTokenAmount(Amount { micro: 7 }): [Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd]}, Epoch(1): {ReverseOrdTokenAmount(Amount { micro: 4 }): [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv], ReverseOrdTokenAmount(Amount { micro: 5 }): [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6], ReverseOrdTokenAmount(Amount { micro: 6 }): [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], ReverseOrdTokenAmount(Amount { micro: 7 }): [Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd]}, Epoch(2): {ReverseOrdTokenAmount(Amount { micro: 4 }): [Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv], ReverseOrdTokenAmount(Amount { micro: 5 }): [Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6], ReverseOrdTokenAmount(Amount { micro: 6 }): [Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3], ReverseOrdTokenAmount(Amount { micro: 7 }): [Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd]}}, validator_states: {Epoch(0): {Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: BelowCapacity, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: BelowCapacity, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: BelowCapacity, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: BelowCapacity, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: Consensus}, Epoch(1): {Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: BelowCapacity, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: BelowCapacity, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: BelowCapacity, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: BelowCapacity, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: Consensus}, Epoch(2): {Established: atest1v4ehgw36xgm5ydpkxq6nxdzxxveyg3jygceyzwpnx4prvwpnx5ey2wpnx9zrj3phxvcnjwzpn29wcd: BelowCapacity, Established: atest1v4ehgw36x5unyvphgc6yx32rgvcyvd35g3p5y3zx89znzd6zxgerqsjp89qnqvzyxsenyvehtufkzv: BelowCapacity, Established: atest1v4ehgw36g3qnv3fnxvu5z3jpx5urjsesxs6ny3pcgs652333x3pn2wzyx4rrqwpngveny32p9qxcv3: BelowCapacity, Established: atest1v4ehgw36gcur2v2p89z5ys6xgdqngvjxxuu52v3excm52sejx9znwdpjgfq5vv6rxgurwvzxn85ca6: BelowCapacity, Established: atest1v4ehgw36gsm5xvzygg65zvjpxpprw32z89q5y334gvenzdf5x5e5zsjpgfrygwpc8qcnswf32ad0uk: Consensus}}, unbonds: {} }, [InitValidator { address: Established: atest1v4ehgw36xgunxvj9xqmny3jyxycnzdzxxqeng33ngvunqsfsx5mnwdfjgvenvwfk89prwdpjd0cjrk, consensus_key: Ed25519(PublicKey(VerificationKey("b9c6ee1630ef3e711144a648db06bbb2284f7274cfbee53ffcee503cc1a49200"))), commission_rate: 0, max_commission_rate_change: 0 }, Bond { id: BondId { source: Established: atest1v4ehgw36xgunxvj9xqmny3jyxycnzdzxxqeng33ngvunqsfsx5mnwdfjgvenvwfk89prwdpjd0cjrk, validator: Established: atest1v4ehgw36g3pyvvekx3q52dzr8q6ngvee8pzrzv2xgscr2sfh8ymyzwfjxdzrwv3jxuur2s2ydfjhs6 }, amount: Amount { micro: 1 } }]) diff --git a/proof_of_stake/src/epoched.rs b/proof_of_stake/src/epoched.rs index 9a6fecccd31..4899ae1e1db 100644 --- a/proof_of_stake/src/epoched.rs +++ b/proof_of_stake/src/epoched.rs @@ -21,6 +21,8 @@ use crate::parameters::PosParams; pub const LAZY_MAP_SUB_KEY: &str = "lazy_map"; /// Sub-key for an epoched data structure's last (most recent) epoch of update pub const LAST_UPDATE_SUB_KEY: &str = "last_update"; +/// Sub-key for an epoched data structure's oldest epoch with some data +pub const OLDEST_EPOCH_SUB_KEY: &str = "oldest_epoch"; /// Discrete epoched data handle pub struct Epoched< @@ -81,27 +83,13 @@ where value: Data, current_epoch: Epoch, ) -> storage_api::Result<()> - where - S: StorageWrite + StorageRead, - { - self.init(storage, value, current_epoch, 0) - } - - /// Initialize new data at the given epoch offset. - pub fn init( - &self, - storage: &mut S, - value: Data, - current_epoch: Epoch, - offset: u64, - ) -> storage_api::Result<()> where S: StorageWrite + StorageRead, { let key = self.get_last_update_storage_key(); storage.write(&key, current_epoch)?; - - self.set_at_epoch(storage, value, current_epoch, offset) + self.set_oldest_epoch(storage, current_epoch)?; + self.set_at_epoch(storage, value, current_epoch, 0) } /// Find the value for the given epoch or a nearest epoch before it. @@ -144,7 +132,7 @@ where } } - /// Set the value at the given epoch offset. + /// Initialize or set the value at the given epoch offset. pub fn set( &self, storage: &mut S, @@ -175,10 +163,11 @@ where Ok(()) } - /// Update the data associated with epochs, if needed. Any key-value with - /// epoch before the oldest stored epoch is dropped. If the oldest - /// stored epoch is not already associated with some value, the latest - /// value from the dropped values, if any, is associated with it. + /// Update the data associated with epochs to trim historical data, if + /// needed. Any value with epoch before the oldest stored epoch to be + /// kept is dropped. If the oldest stored epoch is not already + /// associated with some value, the latest value from the dropped + /// values, if any, is associated with it. fn update_data( &self, storage: &mut S, @@ -188,34 +177,63 @@ where S: StorageWrite + StorageRead, { let last_update = self.get_last_update(storage)?; - if let Some(last_update) = last_update { - let expected_epoch = Self::sub_past_epochs(current_epoch); - if expected_epoch == last_update { - return Ok(()); - } else { - let diff = expected_epoch.0 - last_update.0; + let oldest_epoch = self.get_oldest_epoch(storage)?; + if let (Some(last_update), Some(oldest_epoch)) = + (last_update, oldest_epoch) + { + let oldest_to_keep = current_epoch + .0 + .checked_sub(NUM_PAST_EPOCHS) + .unwrap_or_default(); + if oldest_epoch.0 < oldest_to_keep { + let diff = oldest_to_keep - oldest_epoch.0; + // Go through the epochs before the expected oldest epoch and + // keep the latest one + tracing::debug!( + "Trimming data for epoched data in epoch {current_epoch}, \ + last updated at {last_update}." + ); let data_handler = self.get_data_handler(); let mut latest_value: Option = None; - for offset in 1..diff + 1 { - let old = data_handler - .remove(storage, &Epoch(expected_epoch.0 - offset))?; - if old.is_some() && latest_value.is_none() { - latest_value = old; + // Remove data before the new oldest epoch, keep the latest + // value + for epoch in oldest_epoch.iter_range(diff) { + let removed = data_handler.remove(storage, &epoch)?; + if removed.is_some() { + tracing::debug!("Removed value at epoch {epoch}"); + latest_value = removed; } } if let Some(latest_value) = latest_value { + let new_oldest_epoch = Self::sub_past_epochs(current_epoch); // TODO we can add `contains_key` to LazyMap - if data_handler.get(storage, &expected_epoch)?.is_none() { + if data_handler.get(storage, &new_oldest_epoch)?.is_none() { + tracing::debug!( + "Setting latest value at epoch \ + {new_oldest_epoch}: {latest_value:?}" + ); data_handler.insert( storage, - expected_epoch, + new_oldest_epoch, latest_value, )?; } + self.set_oldest_epoch(storage, new_oldest_epoch)?; } + // Update the epoch of the last update to the current epoch + let key = self.get_last_update_storage_key(); + storage.write(&key, current_epoch)?; + return Ok(()); } - let key = self.get_last_update_storage_key(); - storage.write(&key, current_epoch)?; + } + + // Set the epoch of the last update to the current epoch + let key = self.get_last_update_storage_key(); + storage.write(&key, current_epoch)?; + + // If there's no oldest epoch written yet, set it to the current one + if oldest_epoch.is_none() { + self.set_oldest_epoch(storage, current_epoch)?; } Ok(()) } @@ -248,6 +266,35 @@ where fn sub_past_epochs(epoch: Epoch) -> Epoch { Epoch(epoch.0.checked_sub(NUM_PAST_EPOCHS).unwrap_or_default()) } + + fn get_oldest_epoch_storage_key(&self) -> storage::Key { + self.storage_prefix + .push(&OLDEST_EPOCH_SUB_KEY.to_owned()) + .unwrap() + } + + fn get_oldest_epoch( + &self, + storage: &S, + ) -> storage_api::Result> + where + S: StorageRead, + { + let key = self.get_oldest_epoch_storage_key(); + storage.read(&key) + } + + fn set_oldest_epoch( + &self, + storage: &mut S, + new_oldest_epoch: Epoch, + ) -> storage_api::Result<()> + where + S: StorageRead + StorageWrite, + { + let key = self.get_oldest_epoch_storage_key(); + storage.write(&key, new_oldest_epoch) + } } impl @@ -347,6 +394,7 @@ where Data: BorshSerialize + BorshDeserialize + ops::Add + + ops::AddAssign + 'static + Debug, { @@ -366,27 +414,13 @@ where value: Data, current_epoch: Epoch, ) -> storage_api::Result<()> - where - S: StorageWrite + StorageRead, - { - self.init(storage, value, current_epoch, 0) - } - - /// Initialize new data at the given epoch offset. - pub fn init( - &self, - storage: &mut S, - value: Data, - current_epoch: Epoch, - offset: u64, - ) -> storage_api::Result<()> where S: StorageWrite + StorageRead, { let key = self.get_last_update_storage_key(); storage.write(&key, current_epoch)?; - - self.set_at_epoch(storage, value, current_epoch, offset) + self.set_oldest_epoch(storage, current_epoch)?; + self.set_at_epoch(storage, value, current_epoch, 0) } /// Get the delta value at the given epoch @@ -400,34 +434,6 @@ where S: StorageRead, { self.get_data_handler().get(storage, &epoch) - // let last_update = self.get_last_update(storage)?; - // match last_update { - // None => Ok(None), - // Some(last_update) => { - // let data_handler = self.get_data_handler(); - // let future_most_epoch = - // last_update + FutureEpochs::value(params); - // // Epoch can be a lot greater than the epoch where - // // a value is recorded, we check the upper bound - // // epoch of the LazyMap data - // let mut epoch = std::cmp::min(epoch, future_most_epoch); - // loop { - // let res = data_handler.get(storage, &epoch)?; - // match res { - // Some(_) => return Ok(res), - // None => { - // if epoch.0 > 0 - // && epoch > Self::sub_past_epochs(last_update) - // { - // epoch = Epoch(epoch.0 - 1); - // } else { - // return Ok(None); - // } - // } - // } - // } - // } - // } } /// Get the sum of the delta values up through the given epoch @@ -440,18 +446,12 @@ where where S: StorageRead, { - // TODO: oddly failing to do correctly with iter over - // self.get_data_handler() for some reason (it only finds the - // first entry in iteration then None afterward). Figure - // this out!!! - - // println!("GET_SUM AT EPOCH {}", epoch.clone()); let last_update = self.get_last_update(storage)?; match last_update { None => Ok(None), Some(last_update) => { let data_handler = self.get_data_handler(); - + let start_epoch = Self::sub_past_epochs(last_update); let future_most_epoch = last_update + FutureEpochs::value(params); @@ -459,53 +459,24 @@ where // a value is recorded, we check the upper bound // epoch of the LazyMap data let epoch = std::cmp::min(epoch, future_most_epoch); - let mut sum: Option = None; - // ! BELOW IS WHAT IS DESIRED IF ITERATION IS WORKING ! - // for next in data_handler.iter(storage).unwrap() { - // match dbg!((&mut sum, next)) { - // (Some(_), Ok((next_epoch, next_val))) => { - // if next_epoch > epoch { - // return Ok(sum); - // } else { - // sum = sum.map(|cur_sum| cur_sum + next_val) - // } - // } - // (None, Ok((next_epoch, next_val))) => { - // if epoch < next_epoch { - // return Ok(None); - // } else { - // sum = Some(next_val) - // } - // } - // (Some(_), Err(_)) => return Ok(sum), - // // perhaps elaborate with an error - // _ => return Ok(None), - // }; - // } - - // THIS IS THE HACKY METHOD UNTIL I FIGURE OUT WTF GOING ON WITH - // THE ITER - let start_epoch = Self::sub_past_epochs(last_update); - // println!("GETTING SUM OF DELTAS"); + let mut sum: Option = None; for ep in (start_epoch.0)..=(epoch.0) { - // println!("epoch {}", ep); - - if let Some(val) = data_handler.get(storage, &Epoch(ep))? { - if sum.is_none() { - sum = Some(val); - } else { - sum = sum.map(|cur_sum| cur_sum + val); + if let Some(delta) = + data_handler.get(storage, &Epoch(ep))? + { + match sum.as_mut() { + Some(sum) => *sum += delta, + None => sum = Some(delta), } } - // dbg!(&sum); } Ok(sum) } } } - /// Set the value at the given epoch offset. + /// Initialize or set the value at the given epoch offset. pub fn set( &self, storage: &mut S, @@ -536,12 +507,9 @@ where Ok(()) } - /// TODO: maybe better description - /// Update the data associated with epochs, if needed. Any key-value with - /// epoch before the oldest stored epoch is added to the key-value with the - /// oldest stored epoch that is kept. If the oldest stored epoch is not - /// already associated with some value, the latest value from the - /// dropped values, if any, is associated with it. + /// Update the data associated with epochs to trim historical data, if + /// needed. Any value with epoch before the oldest epoch to be kept is + /// added to the value at the oldest stored epoch that is kept. fn update_data( &self, storage: &mut S, @@ -551,47 +519,70 @@ where S: StorageWrite + StorageRead, { let last_update = self.get_last_update(storage)?; - if let Some(last_update) = last_update { - let expected_oldest_epoch = Self::sub_past_epochs(current_epoch); - if expected_oldest_epoch != last_update { - // dbg!(last_update, expected_oldest_epoch, current_epoch); - let diff = expected_oldest_epoch - .0 - .checked_sub(last_update.0) - .unwrap_or_default(); + let oldest_epoch = self.get_oldest_epoch(storage)?; + if let (Some(last_update), Some(oldest_epoch)) = + (last_update, oldest_epoch) + { + let oldest_to_keep = current_epoch + .0 + .checked_sub(NUM_PAST_EPOCHS) + .unwrap_or_default(); + if oldest_epoch.0 < oldest_to_keep { + let diff = oldest_to_keep - oldest_epoch.0; + // Go through the epochs before the expected oldest epoch and + // sum them into it + tracing::debug!( + "Trimming data for epoched delta data in epoch \ + {current_epoch}, last updated at {last_update}." + ); let data_handler = self.get_data_handler(); - let mut new_oldest_value: Option = None; - for offset in 1..diff + 1 { - let old = data_handler.remove( - storage, - &Epoch(expected_oldest_epoch.0 - offset), - )?; - if let Some(old) = old { - match new_oldest_value { - Some(latest) => { - new_oldest_value = Some(latest + old) - } - None => new_oldest_value = Some(old), + // Find the sum of values before the new oldest epoch to be kept + let mut sum: Option = None; + for epoch in oldest_epoch.iter_range(diff) { + let removed = data_handler.remove(storage, &epoch)?; + if let Some(removed) = removed { + tracing::debug!( + "Removed delta value at epoch {epoch}: {removed:?}" + ); + match sum.as_mut() { + Some(sum) => *sum += removed, + None => sum = Some(removed), } } } - if let Some(new_oldest_value) = new_oldest_value { - // TODO we can add `contains_key` to LazyMap - if data_handler - .get(storage, &expected_oldest_epoch)? - .is_none() - { - data_handler.insert( - storage, - expected_oldest_epoch, - new_oldest_value, - )?; - } + if let Some(sum) = sum { + let new_oldest_epoch = Self::sub_past_epochs(current_epoch); + let new_oldest_epoch_data = + match data_handler.get(storage, &new_oldest_epoch)? { + Some(oldest_epoch_data) => oldest_epoch_data + sum, + None => sum, + }; + tracing::debug!( + "Adding new sum at epoch {new_oldest_epoch}: \ + {new_oldest_epoch_data:?}" + ); + data_handler.insert( + storage, + new_oldest_epoch, + new_oldest_epoch_data, + )?; + self.set_oldest_epoch(storage, new_oldest_epoch)?; } + // Update the epoch of the last update to the current epoch + let key = self.get_last_update_storage_key(); + storage.write(&key, current_epoch)?; + return Ok(()); } } + + // Set the epoch of the last update to the current epoch let key = self.get_last_update_storage_key(); storage.write(&key, current_epoch)?; + + // If there's no oldest epoch written yet, set it to the current one + if oldest_epoch.is_none() { + self.set_oldest_epoch(storage, current_epoch)?; + } Ok(()) } @@ -637,6 +628,35 @@ where fn sub_past_epochs(epoch: Epoch) -> Epoch { Epoch(epoch.0.checked_sub(NUM_PAST_EPOCHS).unwrap_or_default()) } + + fn get_oldest_epoch_storage_key(&self) -> storage::Key { + self.storage_prefix + .push(&OLDEST_EPOCH_SUB_KEY.to_owned()) + .unwrap() + } + + fn get_oldest_epoch( + &self, + storage: &S, + ) -> storage_api::Result> + where + S: StorageRead, + { + let key = self.get_oldest_epoch_storage_key(); + storage.read(&key) + } + + fn set_oldest_epoch( + &self, + storage: &mut S, + new_oldest_epoch: Epoch, + ) -> storage_api::Result<()> + where + S: StorageRead + StorageWrite, + { + let key = self.get_oldest_epoch_storage_key(); + storage.write(&key, new_oldest_epoch) + } } /// Offset at pipeline length. @@ -732,89 +752,382 @@ pub trait EpochOffset: fn dyn_offset() -> DynEpochOffset; } -// mod test { -// use namada_core::ledger::storage::testing::TestStorage; -// use namada_core::types::address::{self, Address}; -// use namada_core::types::storage::Key; -// -// use super::{ -// storage, storage_api, Epoch, LazyMap, NestedEpoched, NestedMap, -// OffsetPipelineLen, -// }; -// -// #[test] -// fn testing_epoched_new() -> storage_api::Result<()> { -// let mut storage = TestStorage::default(); -// -// let key1 = storage::Key::parse("test_nested1").unwrap(); -// let nested1 = -// NestedEpoched::, OffsetPipelineLen>::open( -// key1, -// ); -// nested1.init(&mut storage, Epoch(0))?; -// -// let key2 = storage::Key::parse("test_nested2").unwrap(); -// let nested2 = NestedEpoched::< -// NestedMap>, -// OffsetPipelineLen, -// >::open(key2); -// nested2.init(&mut storage, Epoch(0))?; -// -// dbg!(&nested1.get_last_update_storage_key()); -// dbg!(&nested1.get_last_update(&storage)); -// -// nested1.at(&Epoch(0)).insert( -// &mut storage, -// address::testing::established_address_1(), -// 1432, -// )?; -// dbg!(&nested1.at(&Epoch(0)).iter(&mut storage)?.next()); -// dbg!(&nested1.at(&Epoch(1)).iter(&mut storage)?.next()); -// -// nested2.at(&Epoch(0)).at(&100).insert( -// &mut storage, -// 1, -// address::testing::established_address_2(), -// )?; -// dbg!(&nested2.at(&Epoch(0)).iter(&mut storage)?.next()); -// dbg!(&nested2.at(&Epoch(1)).iter(&mut storage)?.next()); -// -// dbg!(&nested_epoched.get_epoch_key(&Epoch::from(0))); -// -// let epoch = Epoch::from(0); -// let addr = address::testing::established_address_1(); -// let amount: u64 = 234235; -// -// nested_epoched -// .at(&epoch) -// .insert(&mut storage, addr.clone(), amount)?; -// -// let epoch = epoch + 3_u64; -// nested_epoched.at(&epoch).insert( -// &mut storage, -// addr.clone(), -// 999_u64, -// )?; -// -// dbg!(nested_epoched.contains_epoch(&storage, &Epoch::from(0))?); -// dbg!( -// nested_epoched -// .get_data_handler() -// .get_data_key(&Epoch::from(3)) -// ); -// dbg!(nested_epoched.contains_epoch(&storage, &Epoch::from(3))?); -// dbg!( -// nested_epoched -// .at(&Epoch::from(0)) -// .get(&storage, &addr.clone())? -// ); -// dbg!( -// nested_epoched -// .at(&Epoch::from(3)) -// .get(&storage, &addr.clone())? -// ); -// dbg!(nested_epoched.at(&Epoch::from(3)).get_data_key(&addr)); -// -// Ok(()) -// } -// } +#[cfg(test)] +mod test { + use namada_core::ledger::storage::testing::TestWlStorage; + use test_log::test; + + use super::*; + + #[test] + fn test_epoched_data_trimming() -> storage_api::Result<()> { + let mut s = TestWlStorage::default(); + + const NUM_PAST_EPOCHS: u64 = 2; + let key_prefix = storage::Key::parse("test").unwrap(); + let epoched = Epoched::::open( + key_prefix, + ); + let data_handler = epoched.get_data_handler(); + assert!(epoched.get_last_update(&s)?.is_none()); + assert!(epoched.get_oldest_epoch(&s)?.is_none()); + + epoched.init_at_genesis(&mut s, 0, Epoch(0))?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(0))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(0)); + + epoched.set(&mut s, 1, Epoch(0), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(0))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + + epoched.set(&mut s, 2, Epoch(1), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(1))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + assert_eq!(data_handler.get(&s, &Epoch(1))?, Some(2)); + + // Nothing is trimmed yet, oldest kept epoch is 0 + epoched.set(&mut s, 3, Epoch(2), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(2))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + assert_eq!(data_handler.get(&s, &Epoch(1))?, Some(2)); + assert_eq!(data_handler.get(&s, &Epoch(2))?, Some(3)); + + // Epoch 0 should be trimmed now, oldest kept epoch is 1 + epoched.set(&mut s, 4, Epoch(3), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(3))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(1))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, None); + assert_eq!(data_handler.get(&s, &Epoch(1))?, Some(2)); + assert_eq!(data_handler.get(&s, &Epoch(2))?, Some(3)); + assert_eq!(data_handler.get(&s, &Epoch(3))?, Some(4)); + + // Anything before epoch 3 should be trimmed + epoched.set(&mut s, 5, Epoch(5), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(5))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(3))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, None); + assert_eq!(data_handler.get(&s, &Epoch(1))?, None); + assert_eq!(data_handler.get(&s, &Epoch(2))?, None); + assert_eq!(data_handler.get(&s, &Epoch(3))?, Some(4)); + assert_eq!(data_handler.get(&s, &Epoch(5))?, Some(5)); + + // Anything before epoch 8 should be trimmed + epoched.set(&mut s, 6, Epoch(10), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(10))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(8))); + for epoch in Epoch(0).iter_range(7) { + assert_eq!(data_handler.get(&s, &epoch)?, None); + } + // The value from the latest epoch 5 is assigned to epoch 8 + assert_eq!(data_handler.get(&s, &Epoch(8))?, Some(5)); + assert_eq!(data_handler.get(&s, &Epoch(9))?, None); + assert_eq!(data_handler.get(&s, &Epoch(10))?, Some(6)); + + Ok(()) + } + + #[test] + fn test_epoched_without_data_trimming() -> storage_api::Result<()> { + let mut s = TestWlStorage::default(); + + const NUM_PAST_EPOCHS: u64 = u64::MAX; + let key_prefix = storage::Key::parse("test").unwrap(); + let epoched = Epoched::::open( + key_prefix, + ); + let data_handler = epoched.get_data_handler(); + assert!(epoched.get_last_update(&s)?.is_none()); + assert!(epoched.get_oldest_epoch(&s)?.is_none()); + + epoched.init_at_genesis(&mut s, 0, Epoch(0))?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(0))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(0)); + + epoched.set(&mut s, 1, Epoch(0), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(0))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + + epoched.set(&mut s, 2, Epoch(1), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(1))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + assert_eq!(data_handler.get(&s, &Epoch(1))?, Some(2)); + + epoched.set(&mut s, 3, Epoch(2), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(2))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + assert_eq!(data_handler.get(&s, &Epoch(1))?, Some(2)); + assert_eq!(data_handler.get(&s, &Epoch(2))?, Some(3)); + + epoched.set(&mut s, 4, Epoch(3), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(3))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + assert_eq!(data_handler.get(&s, &Epoch(1))?, Some(2)); + assert_eq!(data_handler.get(&s, &Epoch(2))?, Some(3)); + assert_eq!(data_handler.get(&s, &Epoch(3))?, Some(4)); + + epoched.set(&mut s, 5, Epoch(5), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(5))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + assert_eq!(data_handler.get(&s, &Epoch(1))?, Some(2)); + assert_eq!(data_handler.get(&s, &Epoch(2))?, Some(3)); + assert_eq!(data_handler.get(&s, &Epoch(3))?, Some(4)); + assert_eq!(data_handler.get(&s, &Epoch(5))?, Some(5)); + + epoched.set(&mut s, 6, Epoch(10), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(10))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + assert_eq!(data_handler.get(&s, &Epoch(1))?, Some(2)); + assert_eq!(data_handler.get(&s, &Epoch(2))?, Some(3)); + assert_eq!(data_handler.get(&s, &Epoch(3))?, Some(4)); + assert_eq!(data_handler.get(&s, &Epoch(5))?, Some(5)); + assert_eq!(data_handler.get(&s, &Epoch(6))?, None); + assert_eq!(data_handler.get(&s, &Epoch(7))?, None); + assert_eq!(data_handler.get(&s, &Epoch(8))?, None); + assert_eq!(data_handler.get(&s, &Epoch(9))?, None); + assert_eq!(data_handler.get(&s, &Epoch(10))?, Some(6)); + + Ok(()) + } + + #[test] + fn test_epoched_delta_data_trimming() -> storage_api::Result<()> { + let mut s = TestWlStorage::default(); + + const NUM_PAST_EPOCHS: u64 = 2; + let key_prefix = storage::Key::parse("test").unwrap(); + let epoched = + EpochedDelta::::open( + key_prefix, + ); + let data_handler = epoched.get_data_handler(); + assert!(epoched.get_last_update(&s)?.is_none()); + assert!(epoched.get_oldest_epoch(&s)?.is_none()); + + epoched.init_at_genesis(&mut s, 0, Epoch(0))?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(0))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(0)); + + epoched.set(&mut s, 1, Epoch(0), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(0))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + + epoched.set(&mut s, 2, Epoch(1), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(1))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + assert_eq!(data_handler.get(&s, &Epoch(1))?, Some(2)); + + // Nothing is trimmed yet, oldest kept epoch is 0 + epoched.set(&mut s, 3, Epoch(2), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(2))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + assert_eq!(data_handler.get(&s, &Epoch(1))?, Some(2)); + assert_eq!(data_handler.get(&s, &Epoch(2))?, Some(3)); + + // Epoch 0 should be trimmed now, oldest kept epoch is 1 + epoched.set(&mut s, 4, Epoch(3), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(3))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(1))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, None); + // The value from epoch 0 should be added to epoch 1 + assert_eq!(data_handler.get(&s, &Epoch(1))?, Some(3)); + assert_eq!(data_handler.get(&s, &Epoch(2))?, Some(3)); + assert_eq!(data_handler.get(&s, &Epoch(3))?, Some(4)); + + // Anything before epoch 3 should be trimmed + epoched.set(&mut s, 5, Epoch(5), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(5))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(3))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, None); + assert_eq!(data_handler.get(&s, &Epoch(1))?, None); + assert_eq!(data_handler.get(&s, &Epoch(2))?, None); + // The values from epoch 1 and 2 should be added to epoch 3 + assert_eq!(data_handler.get(&s, &Epoch(3))?, Some(10)); + assert_eq!(data_handler.get(&s, &Epoch(5))?, Some(5)); + + // Anything before epoch 8 should be trimmed + epoched.set(&mut s, 6, Epoch(10), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(10))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(8))); + for epoch in Epoch(0).iter_range(7) { + assert_eq!(data_handler.get(&s, &epoch)?, None); + } + // The values from epoch 3 and 5 should be added to epoch 3 + assert_eq!(data_handler.get(&s, &Epoch(8))?, Some(15)); + assert_eq!(data_handler.get(&s, &Epoch(9))?, None); + assert_eq!(data_handler.get(&s, &Epoch(10))?, Some(6)); + + Ok(()) + } + + #[test] + fn test_epoched_delta_without_data_trimming() -> storage_api::Result<()> { + let mut s = TestWlStorage::default(); + + // Nothing should ever get trimmed + const NUM_PAST_EPOCHS: u64 = u64::MAX; + let key_prefix = storage::Key::parse("test").unwrap(); + let epoched = + EpochedDelta::::open( + key_prefix, + ); + let data_handler = epoched.get_data_handler(); + assert!(epoched.get_last_update(&s)?.is_none()); + assert!(epoched.get_oldest_epoch(&s)?.is_none()); + + epoched.init_at_genesis(&mut s, 0, Epoch(0))?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(0))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(0)); + + epoched.set(&mut s, 1, Epoch(0), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(0))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + + epoched.set(&mut s, 2, Epoch(1), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(1))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + assert_eq!(data_handler.get(&s, &Epoch(1))?, Some(2)); + + epoched.set(&mut s, 3, Epoch(2), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(2))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + assert_eq!(data_handler.get(&s, &Epoch(1))?, Some(2)); + assert_eq!(data_handler.get(&s, &Epoch(2))?, Some(3)); + + epoched.set(&mut s, 4, Epoch(3), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(3))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + assert_eq!(data_handler.get(&s, &Epoch(1))?, Some(2)); + assert_eq!(data_handler.get(&s, &Epoch(2))?, Some(3)); + assert_eq!(data_handler.get(&s, &Epoch(3))?, Some(4)); + + epoched.set(&mut s, 5, Epoch(5), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(5))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + assert_eq!(data_handler.get(&s, &Epoch(1))?, Some(2)); + assert_eq!(data_handler.get(&s, &Epoch(2))?, Some(3)); + assert_eq!(data_handler.get(&s, &Epoch(3))?, Some(4)); + assert_eq!(data_handler.get(&s, &Epoch(5))?, Some(5)); + + epoched.set(&mut s, 6, Epoch(10), 0)?; + assert_eq!(epoched.get_last_update(&s)?, Some(Epoch(10))); + assert_eq!(epoched.get_oldest_epoch(&s)?, Some(Epoch(0))); + assert_eq!(data_handler.get(&s, &Epoch(0))?, Some(1)); + assert_eq!(data_handler.get(&s, &Epoch(1))?, Some(2)); + assert_eq!(data_handler.get(&s, &Epoch(2))?, Some(3)); + assert_eq!(data_handler.get(&s, &Epoch(3))?, Some(4)); + assert_eq!(data_handler.get(&s, &Epoch(5))?, Some(5)); + assert_eq!(data_handler.get(&s, &Epoch(6))?, None); + assert_eq!(data_handler.get(&s, &Epoch(7))?, None); + assert_eq!(data_handler.get(&s, &Epoch(8))?, None); + assert_eq!(data_handler.get(&s, &Epoch(9))?, None); + assert_eq!(data_handler.get(&s, &Epoch(10))?, Some(6)); + + Ok(()) + } + + // use namada_core::ledger::storage::testing::TestStorage; + // use namada_core::types::address::{self, Address}; + // use namada_core::types::storage::Key; + // + // use super::{ + // storage, storage_api, Epoch, LazyMap, NestedEpoched, NestedMap, + // OffsetPipelineLen, + // }; + // + // #[test] + // fn testing_epoched_new() -> storage_api::Result<()> { + // let mut storage = TestStorage::default(); + // + // let key1 = storage::Key::parse("test_nested1").unwrap(); + // let nested1 = + // NestedEpoched::, OffsetPipelineLen>::open( + // key1, + // ); + // nested1.init(&mut storage, Epoch(0))?; + // + // let key2 = storage::Key::parse("test_nested2").unwrap(); + // let nested2 = NestedEpoched::< + // NestedMap>, + // OffsetPipelineLen, + // >::open(key2); + // nested2.init(&mut storage, Epoch(0))?; + // + // dbg!(&nested1.get_last_update_storage_key()); + // dbg!(&nested1.get_last_update(&storage)); + // + // nested1.at(&Epoch(0)).insert( + // &mut storage, + // address::testing::established_address_1(), + // 1432, + // )?; + // dbg!(&nested1.at(&Epoch(0)).iter(&mut storage)?.next()); + // dbg!(&nested1.at(&Epoch(1)).iter(&mut storage)?.next()); + // + // nested2.at(&Epoch(0)).at(&100).insert( + // &mut storage, + // 1, + // address::testing::established_address_2(), + // )?; + // dbg!(&nested2.at(&Epoch(0)).iter(&mut storage)?.next()); + // dbg!(&nested2.at(&Epoch(1)).iter(&mut storage)?.next()); + // + // dbg!(&nested_epoched.get_epoch_key(&Epoch::from(0))); + // + // let epoch = Epoch::from(0); + // let addr = address::testing::established_address_1(); + // let amount: u64 = 234235; + // + // nested_epoched + // .at(&epoch) + // .insert(&mut storage, addr.clone(), amount)?; + // + // let epoch = epoch + 3_u64; + // nested_epoched.at(&epoch).insert( + // &mut storage, + // addr.clone(), + // 999_u64, + // )?; + // + // dbg!(nested_epoched.contains_epoch(&storage, &Epoch::from(0))?); + // dbg!( + // nested_epoched + // .get_data_handler() + // .get_data_key(&Epoch::from(3)) + // ); + // dbg!(nested_epoched.contains_epoch(&storage, &Epoch::from(3))?); + // dbg!( + // nested_epoched + // .at(&Epoch::from(0)) + // .get(&storage, &addr.clone())? + // ); + // dbg!( + // nested_epoched + // .at(&Epoch::from(3)) + // .get(&storage, &addr.clone())? + // ); + // dbg!(nested_epoched.at(&Epoch::from(3)).get_data_key(&addr)); + // + // Ok(()) + // } +} diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index fed35606283..01c5f277a18 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -15,6 +15,7 @@ pub mod btree_set; pub mod epoched; pub mod parameters; +pub mod pos_queries; pub mod rewards; pub mod storage; pub mod types; @@ -32,13 +33,15 @@ use epoched::{EpochOffset, OffsetPipelineLen}; use namada_core::ledger::storage_api::collections::lazy_map::{ NestedSubKey, SubKey, }; -use namada_core::ledger::storage_api::collections::LazyCollection; +use namada_core::ledger::storage_api::collections::{LazyCollection, LazySet}; use namada_core::ledger::storage_api::token::credit_tokens; use namada_core::ledger::storage_api::{ - self, OptionExt, StorageRead, StorageWrite, + self, OptionExt, ResultExt, StorageRead, StorageWrite, +}; +use namada_core::types::address::{Address, InternalAddress}; +use namada_core::types::key::{ + common, tm_consensus_key_raw_hash, PublicKeyTmRawHash, }; -use namada_core::types::address::{self, Address, InternalAddress}; -use namada_core::types::key::{common, tm_consensus_key_raw_hash}; pub use namada_core::types::storage::Epoch; use namada_core::types::token; use once_cell::unsync::Lazy; @@ -46,14 +49,14 @@ use parameters::PosParams; use rewards::PosRewardsCalculator; use rust_decimal::Decimal; use storage::{ - bonds_for_source_prefix, bonds_prefix, get_validator_address_from_bond, - into_tm_voting_power, is_bond_key, is_unbond_key, is_validator_slashes_key, - last_block_proposer_key, mult_amount, mult_change_to_amount, - num_consensus_validators_key, params_key, slashes_prefix, - unbonds_for_source_prefix, unbonds_prefix, validator_address_raw_hash_key, - validator_max_commission_rate_change_key, BondDetails, - BondsAndUnbondsDetail, BondsAndUnbondsDetails, ReverseOrdTokenAmount, - RewardsAccumulator, UnbondDetails, + bonds_for_source_prefix, bonds_prefix, consensus_keys_key, + get_validator_address_from_bond, into_tm_voting_power, is_bond_key, + is_unbond_key, is_validator_slashes_key, last_block_proposer_key, + mult_amount, mult_change_to_amount, num_consensus_validators_key, + params_key, slashes_prefix, unbonds_for_source_prefix, unbonds_prefix, + validator_address_raw_hash_key, validator_max_commission_rate_change_key, + BondDetails, BondsAndUnbondsDetail, BondsAndUnbondsDetails, + ReverseOrdTokenAmount, RewardsAccumulator, UnbondDetails, }; use thiserror::Error; use types::{ @@ -73,9 +76,11 @@ pub const ADDRESS: Address = Address::Internal(InternalAddress::PoS); pub const SLASH_POOL_ADDRESS: Address = Address::Internal(InternalAddress::PosSlashPool); -/// Address of the staking token (NAM) -pub fn staking_token_address() -> Address { - address::nam() +/// Address of the staking token (i.e. the native token) +pub fn staking_token_address(storage: &impl StorageRead) -> Address { + storage + .get_native_token() + .expect("Must be able to read native token address") } #[allow(missing_docs)] @@ -88,8 +93,8 @@ pub enum GenesisError { #[allow(missing_docs)] #[derive(Error, Debug)] pub enum InflationError { - #[error("Error")] - Error, + #[error("Error in calculating rewards: {0}")] + Rewards(rewards::RewardsError), } #[allow(missing_docs)] @@ -319,7 +324,7 @@ pub fn delegator_rewards_products_handle( RewardsProducts::open(key) } -/// new init genesis +/// Init genesis pub fn init_genesis( storage: &mut S, params: &PosParams, @@ -345,11 +350,14 @@ where max_commission_rate_change, } in validators { + // This will fail if the key is already being used - the uniqueness must + // be enforced in the genesis configuration to prevent it + try_insert_consensus_key(storage, &consensus_key)?; + total_bonded += tokens; // Insert the validator into a validator set and write its epoched // validator data - // TODO: ValidatorState inside of here insert_validator_into_validator_set( storage, params, @@ -396,7 +404,8 @@ where current_epoch, )?; // Credit bonded token amount to the PoS account - credit_tokens(storage, &staking_token_address(), &ADDRESS, total_bonded)?; + let staking_token = staking_token_address(storage); + credit_tokens(storage, &staking_token, &ADDRESS, total_bonded)?; // Copy the genesis validator set into the pipeline epoch as well for epoch in (current_epoch.next()).iter_range(params.pipeline_len) { copy_validator_sets_and_positions( @@ -408,7 +417,7 @@ where )?; } - println!("FINISHED GENESIS\n"); + tracing::debug!("FINISHED GENESIS"); Ok(()) } @@ -418,8 +427,6 @@ pub fn read_pos_params(storage: &S) -> storage_api::Result where S: StorageRead, { - // let value = storage.read_bytes(¶ms_key())?.unwrap(); - // Ok(decode(value).unwrap()) storage .read(¶ms_key()) .transpose() @@ -559,6 +566,7 @@ pub fn read_validator_stake( where S: StorageRead, { + tracing::debug!("Read validator stake at epoch {}", epoch); let handle = validator_deltas_handle(validator); let amount = handle .get_sum(storage, epoch, params)? @@ -566,8 +574,7 @@ where Ok(amount) } -/// Write PoS validator's consensus key (used for signing block votes). -/// Note: for EpochedDelta, write the value to change storage by +/// Add or remove PoS validator's stake delta value pub fn update_validator_deltas( storage: &mut S, params: &PosParams, @@ -773,7 +780,7 @@ where } } -/// NEW: Self-bond tokens to a validator when `source` is `None` or equal to +/// Self-bond tokens to a validator when `source` is `None` or equal to /// the `validator` address, or delegate tokens from the `source` to the /// `validator`. pub fn bond_tokens( @@ -787,6 +794,7 @@ where S: StorageRead + StorageWrite, { let amount = amount.change(); + tracing::debug!("Bonding token amount {amount} at epoch {current_epoch}"); let params = read_pos_params(storage)?; let pipeline_epoch = current_epoch + params.pipeline_len; if let Some(source) = source { @@ -798,8 +806,6 @@ where ); } } - // TODO: what happens if an address used to be a validator but no longer is? - // Think if the 'get' here needs to be amended. let state = validator_state_handle(validator).get( storage, pipeline_epoch, @@ -825,16 +831,15 @@ where // Initialize or update the bond at the pipeline offset let offset = params.pipeline_len; - // TODO: ensure that this method of checking if the bond exists works - - if !bond_handle.get_data_handler().is_empty(storage)? { - let cur_remain = bond_handle - .get_delta_val(storage, current_epoch + offset, ¶ms)? - .unwrap_or_default(); - bond_handle.set(storage, cur_remain + amount, current_epoch, offset)?; - } else { - bond_handle.init(storage, amount, current_epoch, offset)?; - } + let cur_remain = bond_handle + .get_delta_val(storage, current_epoch + offset, ¶ms)? + .unwrap_or_default(); + tracing::debug!( + "Bond remain at offset epoch {}: {}", + current_epoch + offset, + cur_remain + ); + bond_handle.set(storage, cur_remain + amount, current_epoch, offset)?; // Update the validator set update_validator_set(storage, ¶ms, validator, amount, current_epoch)?; @@ -851,9 +856,10 @@ where update_total_deltas(storage, ¶ms, amount, current_epoch)?; // Transfer the bonded tokens from the source to PoS + let staking_token = staking_token_address(storage); transfer_tokens( storage, - &staking_token_address(), + &staking_token, token::Amount::from_change(amount), source, &ADDRESS, @@ -888,7 +894,7 @@ where &target_epoch, address, )?; - validator_state_handle(address).init( + validator_state_handle(address).set( storage, ValidatorState::Consensus, current_epoch, @@ -934,7 +940,7 @@ where &target_epoch, address, )?; - validator_state_handle(address).init( + validator_state_handle(address).set( storage, ValidatorState::Consensus, current_epoch, @@ -949,7 +955,7 @@ where &target_epoch, address, )?; - validator_state_handle(address).init( + validator_state_handle(address).set( storage, ValidatorState::BelowCapacity, current_epoch, @@ -960,7 +966,7 @@ where Ok(()) } -/// NEW: Update validator set when a validator receives a new bond and when +/// Update validator set when a validator receives a new bond and when /// its bond is unbonded (self-bond or delegation). fn update_validator_set( storage: &mut S, @@ -976,6 +982,9 @@ where return Ok(()); } let epoch = current_epoch + params.pipeline_len; + tracing::debug!( + "Update epoch for validator set: {epoch}, validator: {validator}" + ); let consensus_validator_set = consensus_validator_set_handle(); let below_capacity_validator_set = below_capacity_validator_set_handle(); @@ -987,14 +996,12 @@ where let tokens_pre = read_validator_stake(storage, params, validator, epoch)? .unwrap_or_default(); + // tracing::debug!("VALIDATOR STAKE BEFORE UPDATE: {}", tokens_pre); + let tokens_post = tokens_pre.change() + token_change; // TODO: handle overflow or negative vals perhaps with TryFrom let tokens_post = token::Amount::from_change(tokens_post); - // let position = - // validator_set_positions_handle().at(¤t_epoch).get(storage, - // validator) - // TODO: The position is only set when the validator is in consensus or // below_capacity set (not in below_threshold set) let position = @@ -1002,13 +1009,19 @@ where .ok_or_err_msg( "Validator must have a stored validator set position", )?; - let consensus_vals_pre = consensus_val_handle.at(&tokens_pre); - if consensus_vals_pre.contains(storage, &position)? { - // It's initially consensus + let in_consensus = if consensus_vals_pre.contains(storage, &position)? { let val_address = consensus_vals_pre.get(storage, &position)?; - assert!(val_address.is_some()); + debug_assert!(val_address.is_some()); + val_address == Some(validator.clone()) + } else { + false + }; + + if in_consensus { + // It's initially consensus + tracing::debug!("Target validator is consensus"); consensus_vals_pre.remove(storage, &position)?; @@ -1019,6 +1032,7 @@ where )?; if tokens_post < max_below_capacity_validator_amount { + tracing::debug!("Need to swap validators"); // Place the validator into the below-capacity set and promote the // lowest position max below-capacity validator. @@ -1028,9 +1042,9 @@ where let lowest_position = find_first_position(&below_capacity_vals_max, storage)? .unwrap(); - let removed_max_below_capacity = - below_capacity_vals_max.remove(storage, &lowest_position)?; - debug_assert!(removed_max_below_capacity.is_some()); + let removed_max_below_capacity = below_capacity_vals_max + .remove(storage, &lowest_position)? + .expect("Must have been removed"); // Insert the previous max below-capacity validator into the // consensus set @@ -1038,8 +1052,15 @@ where &consensus_val_handle.at(&max_below_capacity_validator_amount), storage, &epoch, - &removed_max_below_capacity.unwrap(), + &removed_max_below_capacity, + )?; + validator_state_handle(&removed_max_below_capacity).set( + storage, + ValidatorState::Consensus, + current_epoch, + params.pipeline_len, )?; + // Insert the current validator into the below-capacity set insert_validator_into_set( &below_capacity_val_handle.at(&tokens_post.into()), @@ -1047,7 +1068,14 @@ where &epoch, validator, )?; + validator_state_handle(validator).set( + storage, + ValidatorState::BelowCapacity, + current_epoch, + params.pipeline_len, + )?; } else { + tracing::debug!("Validator remains in consensus set"); // The current validator should remain in the consensus set - place // it into a new position insert_validator_into_set( @@ -1095,6 +1123,12 @@ where &epoch, &removed_min_consensus, )?; + validator_state_handle(&removed_min_consensus).set( + storage, + ValidatorState::BelowCapacity, + current_epoch, + params.pipeline_len, + )?; // Insert the current validator into the consensus set insert_validator_into_set( @@ -1103,6 +1137,12 @@ where &epoch, validator, )?; + validator_state_handle(validator).set( + storage, + ValidatorState::Consensus, + current_epoch, + params.pipeline_len, + )?; } else { // The current validator should remain in the below-capacity set insert_validator_into_set( @@ -1111,13 +1151,18 @@ where &epoch, validator, )?; + validator_state_handle(validator).set( + storage, + ValidatorState::BelowCapacity, + current_epoch, + params.pipeline_len, + )?; } } Ok(()) } /// Validator sets and positions copying into a future epoch -/// TODO: do we need to copy positions? pub fn copy_validator_sets_and_positions( storage: &mut S, current_epoch: Epoch, @@ -1128,10 +1173,6 @@ pub fn copy_validator_sets_and_positions( where S: StorageRead + StorageWrite, { - // TODO: need some logic to determine if the below-capacity validator set - // even needs to be copied (it may truly be empty after having one time - // contained validators in the past) - let prev_epoch = target_epoch - 1; let (consensus, below_capacity) = ( @@ -1170,6 +1211,8 @@ where below_cap_in_mem.insert((stake, position), address); } + tracing::debug!("{consensus_in_mem:?}"); + for ((val_stake, val_position), val_address) in consensus_in_mem.into_iter() { consensus_validator_set @@ -1177,6 +1220,11 @@ where .at(&val_stake) .insert(storage, val_position, val_address)?; } + tracing::debug!("New validator set should be inserted:"); + tracing::debug!( + "{:?}", + read_consensus_validator_set_addresses(storage, target_epoch)? + ); for ((val_stake, val_position), val_address) in below_cap_in_mem.into_iter() { @@ -1320,6 +1368,12 @@ where S: StorageRead + StorageWrite, { let next_position = find_next_position(handle, storage)?; + tracing::debug!( + "Inserting validator {} into position {:?} at epoch {}", + address.clone(), + next_position.clone(), + epoch.clone() + ); handle.insert(storage, next_position, address.clone())?; validator_set_positions_handle().at(epoch).insert( storage, @@ -1329,7 +1383,7 @@ where Ok(()) } -/// NEW: Unbond. +/// Unbond. pub fn unbond_tokens( storage: &mut S, source: Option<&Address>, @@ -1341,8 +1395,14 @@ where S: StorageRead + StorageWrite, { let amount = amount.change(); + tracing::debug!("Unbonding token amount {amount} at epoch {current_epoch}"); let params = read_pos_params(storage)?; let pipeline_epoch = current_epoch + params.pipeline_len; + tracing::debug!( + "Current validator stake at pipeline: {}", + read_validator_stake(storage, ¶ms, validator, pipeline_epoch)? + .unwrap_or_default() + ); if let Some(source) = source { if source != validator @@ -1357,11 +1417,8 @@ where return Err(BondError::NotAValidator(validator.clone()).into()); } - // Should be able to unbond inactive validators, but we'll need to prevent - // jailed unbonding with slashing - - // Check that validator is not inactive at anywhere between the current - // epoch and pipeline offset + // TODO: Should be able to unbond inactive validators, but we'll need to + // prevent jailed unbonding with slashing // let validator_state_handle = validator_state_handle(validator); // for epoch in current_epoch.iter_range(params.pipeline_len) { // if let Some(ValidatorState::Inactive) = @@ -1406,9 +1463,9 @@ where .get_data_handler() .iter(storage)? .collect(); - // println!("\nBonds before decrementing:"); + // tracing::debug!("Bonds before decrementing:"); // for ep in Epoch::default().iter_range(params.unbonding_len * 3) { - // println!( + // tracing::debug!( // "bond delta at epoch {}: {}", // ep, // bond_remain_handle @@ -1460,9 +1517,9 @@ where )?; } - // println!("\nBonds after decrementing:"); + // tracing::debug!("Bonds after decrementing:"); // for ep in Epoch::default().iter_range(params.unbonding_len * 3) { - // println!( + // tracing::debug!( // "bond delta at epoch {}: {}", // ep, // bond_remain_handle @@ -1471,6 +1528,7 @@ where // ) // } + tracing::debug!("Updating validator set for unbonding"); // Update the validator set at the pipeline offset update_validator_set(storage, ¶ms, validator, -amount, current_epoch)?; @@ -1509,9 +1567,7 @@ where Ok(()) } -/// NEW: Initialize data for a new validator. -/// TODO: should this still happen at pipeline if it is occurring with 0 bonded -/// stake +/// Initialize data for a new validator. pub fn become_validator( storage: &mut S, params: &PosParams, @@ -1524,6 +1580,9 @@ pub fn become_validator( where S: StorageRead + StorageWrite, { + // This will fail if the key is already being used + try_insert_consensus_key(storage, consensus_key)?; + // Non-epoched validator data write_validator_address_raw_hash(storage, address, consensus_key)?; write_validator_max_commission_rate_change( @@ -1533,19 +1592,19 @@ where )?; // Epoched validator data - validator_consensus_key_handle(address).init( + validator_consensus_key_handle(address).set( storage, consensus_key.clone(), current_epoch, params.pipeline_len, )?; - validator_commission_rate_handle(address).init( + validator_commission_rate_handle(address).set( storage, commission_rate, current_epoch, params.pipeline_len, )?; - validator_deltas_handle(address).init( + validator_deltas_handle(address).set( storage, token::Change::default(), current_epoch, @@ -1554,7 +1613,6 @@ where let stake = token::Amount::default(); - // TODO: need to set the validator state inside of this function insert_validator_into_validator_set( storage, params, @@ -1566,7 +1624,7 @@ where Ok(()) } -/// NEW: Withdraw. +/// Withdraw. pub fn withdraw_tokens( storage: &mut S, source: Option<&Address>, @@ -1576,12 +1634,11 @@ pub fn withdraw_tokens( where S: StorageRead + StorageWrite, { + tracing::debug!("Withdrawing tokens in epoch {current_epoch}"); let params = read_pos_params(storage)?; let source = source.unwrap_or(validator); let slashes = validator_slashes_handle(validator); - // TODO: need some error handling to determine if this unbond even exists? - // A handle to an empty location is valid - we just won't see any data let unbond_handle = unbond_handle(source, validator); let mut slashed = token::Amount::default(); @@ -1598,13 +1655,16 @@ where amount, ) = unbond?; - // dbg!(&end_epoch, &start_epoch, amount); + tracing::debug!( + "Unbond delta ({start_epoch}..{withdraw_epoch}), amount {amount}", + ); // TODO: worry about updating this later after PR 740 perhaps // 1. cubic slashing // 2. adding slash rates in same epoch, applying cumulatively in dif // epochs if withdraw_epoch > current_epoch { + tracing::debug!("Not yet withdrawable"); continue; } for slash in slashes.iter(storage)? { @@ -1631,9 +1691,11 @@ where unbonds_to_remove.push((withdraw_epoch, start_epoch)); } withdrawable_amount -= slashed; + tracing::debug!("Withdrawing total {withdrawable_amount}"); // Remove the unbond data from storage for (withdraw_epoch, start_epoch) in unbonds_to_remove { + tracing::debug!("Remove ({start_epoch}..{withdraw_epoch}) from unbond"); unbond_handle .at(&withdraw_epoch) .remove(storage, &start_epoch)?; @@ -1642,9 +1704,10 @@ where } // Transfer the tokens from the PoS address back to the source + let staking_token = staking_token_address(storage); transfer_tokens( storage, - &staking_token_address(), + &staking_token, withdrawable_amount, &ADDRESS, source, @@ -1705,7 +1768,7 @@ where commission_handle.set(storage, new_rate, current_epoch, params.pipeline_len) } -/// NEW: apply a slash and write it to storage +/// apply a slash and write it to storage pub fn slash( storage: &mut S, params: &PosParams, @@ -1752,9 +1815,10 @@ where validator_slashes_handle(validator).push(storage, slash)?; // Transfer the slashed tokens from PoS account to Slash Fund address + let staking_token = staking_token_address(storage); transfer_tokens( storage, - &staking_token_address(), + &staking_token, token::Amount::from(slashed_amount), &ADDRESS, &SLASH_POOL_ADDRESS, @@ -1810,7 +1874,35 @@ where Ok(()) } -/// NEW: Get the total bond amount for a given bond ID at a given epoch +/// Check if the given consensus key is already being used to ensure uniqueness. +/// +/// If it's not being used, it will be inserted into the set that's being used +/// for this. If it's already used, this will return an Error. +pub fn try_insert_consensus_key( + storage: &mut S, + consensus_key: &common::PublicKey, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let key = consensus_keys_key(); + LazySet::open(key).try_insert(storage, consensus_key.clone()) +} + +/// Check if the given consensus key is already being used to ensure uniqueness. +pub fn is_consensus_key_used( + storage: &S, + consensus_key: &common::PublicKey, +) -> storage_api::Result +where + S: StorageRead, +{ + let key = consensus_keys_key(); + let handle = LazySet::open(key); + handle.contains(storage, consensus_key) +} + +/// Get the total bond amount for a given bond ID at a given epoch pub fn bond_amount( storage: &S, params: &PosParams, @@ -1853,16 +1945,19 @@ where } /// Communicate imminent validator set updates to Tendermint. This function is -/// called two blocks before the start of a new epoch becuase Tendermint +/// called two blocks before the start of a new epoch because Tendermint /// validator updates become active two blocks after the updates are submitted. -pub fn validator_set_update_tendermint( +pub fn validator_set_update_tendermint( storage: &S, params: &PosParams, current_epoch: Epoch, - f: impl FnMut(ValidatorSetUpdate), -) where + f: impl FnMut(ValidatorSetUpdate) -> T, +) -> storage_api::Result> +where S: StorageRead, { + // Because this is called 2 blocks before a start on an epoch, we're gonna + // give Tendermint updates for the next epoch let next_epoch: Epoch = current_epoch.next(); let cur_consensus_validators = @@ -1871,8 +1966,7 @@ pub fn validator_set_update_tendermint( consensus_validator_set_handle().at(¤t_epoch); let consensus_validators = cur_consensus_validators - .iter(storage) - .unwrap() + .iter(storage)? .filter_map(|validator| { let ( NestedSubKey::Data { @@ -1882,6 +1976,10 @@ pub fn validator_set_update_tendermint( address, ) = validator.unwrap(); + tracing::debug!( + "Consensus validator address {address}, stake {cur_stake}" + ); + // Check if the validator was consensus in the previous epoch with // the same stake // Look up previous state and prev and current voting powers @@ -1927,11 +2025,14 @@ pub fn validator_set_update_tendermint( return None; } } - let consensus_key = validator_consensus_key_handle(&address) .get(storage, next_epoch, params) .unwrap() .unwrap(); + tracing::debug!( + "{address} consensus key {}", + consensus_key.tm_raw_hash() + ); Some(ValidatorSetUpdate::Consensus(ConsensusValidator { consensus_key, bonded_stake: cur_stake.into(), @@ -1954,6 +2055,31 @@ pub fn validator_set_update_tendermint( address, ) = validator.unwrap(); + + let prev_validator_stake = validator_deltas_handle(&address) + .get_sum(storage, current_epoch, params) + .unwrap() + .map(token::Amount::from_change) + .unwrap_or_default(); + tracing::debug!( + "Below-capacity validator address {address}, stake {prev_validator_stake}" + ); + + let prev_tm_voting_power = into_tm_voting_power( + params.tm_votes_per_token, + prev_validator_stake, + ); + + // If the validator previously had no voting power, it wasn't in + // tendermint set and we have to skip it. + if prev_tm_voting_power == 0 { + tracing::debug!( + "skipping validator update {address}, it's inactive and \ + previously had no voting power" + ); + return None; + } + if !prev_below_capacity_vals.is_empty(storage).unwrap() { // Look up the previous state let prev_state = validator_state_handle(&address) @@ -1976,11 +2102,16 @@ pub fn validator_set_update_tendermint( .get(storage, next_epoch, params) .unwrap() .unwrap(); + tracing::debug!( + "{address} consensus key {}", + consensus_key.tm_raw_hash() + ); Some(ValidatorSetUpdate::Deactivated(consensus_key)) }); - consensus_validators + Ok(consensus_validators .chain(below_capacity_validators) - .for_each(f) + .map(f) + .collect()) } /// Find all validators to which a given bond `owner` (or source) has a @@ -2190,6 +2321,9 @@ where } let change: token::Change = BorshDeserialize::try_from_slice(&val_bytes).ok()?; + if change == 0 { + return None; + } return Some((bond_id, start, change)); } } @@ -2203,24 +2337,33 @@ where let mut raw_unbonds = storage_api::iter_prefix_bytes(storage, &prefix)? .filter_map(|result| { if let Ok((key, val_bytes)) = result { - if let Some((_bond_id, _start, withdraw)) = is_unbond_key(&key) - { - if let Some((bond_id, start)) = is_bond_key(&key) { - if source.is_some() - && source.as_ref().unwrap() != &bond_id.source - { - return None; + if let Some((bond_id, start, withdraw)) = is_unbond_key(&key) { + if source.is_some() + && source.as_ref().unwrap() != &bond_id.source + { + return None; + } + if validator.is_some() + && validator.as_ref().unwrap() != &bond_id.validator + { + return None; + } + match (source.clone(), validator.clone()) { + (None, Some(validator)) => { + if bond_id.validator != validator { + return None; + } } - if validator.is_some() - && validator.as_ref().unwrap() != &bond_id.validator - { - return None; + (Some(owner), None) => { + if owner != bond_id.source { + return None; + } } - let amount: token::Amount = - BorshDeserialize::try_from_slice(&val_bytes) - .ok()?; - return Some((bond_id, start, withdraw, amount)); + _ => {} } + let amount: token::Amount = + BorshDeserialize::try_from_slice(&val_bytes).ok()?; + return Some((bond_id, start, withdraw, amount)); } } None @@ -2303,6 +2446,7 @@ where let bonds = find_bonds(storage, &source, &validator)? .into_iter() + .filter(|(_start, change)| *change > token::Change::default()) .map(|(start, change)| { make_bond_details( storage, @@ -2426,15 +2570,11 @@ pub fn log_block_rewards( storage: &mut S, epoch: impl Into, proposer_address: &Address, - votes: &[VoteInfo], + votes: Vec, ) -> storage_api::Result<()> where S: StorageRead + StorageWrite, { - // TODO: all values collected here need to be consistent with the same - // block that the voting info corresponds to, which is the - // previous block from the current one we are in. - // The votes correspond to the last committed block (n-1 if we are // finalizing block n) @@ -2443,8 +2583,7 @@ where let consensus_validators = consensus_validator_set_handle().at(&epoch); // Get total stake of the consensus validator set - // TODO: this will need to account for rewards products? - let mut total_consensus_stake = 0_u64; + let mut total_consensus_stake = token::Amount::default(); for validator in consensus_validators.iter(storage)? { let ( NestedSubKey::Data { @@ -2453,52 +2592,56 @@ where }, _address, ) = validator?; - total_consensus_stake += u64::from(amount); + total_consensus_stake += amount; } // Get set of signing validator addresses and the combined stake of // these signers let mut signer_set: HashSet
= HashSet::new(); - let mut total_signing_stake: u64 = 0; - for vote in votes.iter() { - if !vote.signed_last_block || vote.validator_vp == 0 { + let mut total_signing_stake = token::Amount::default(); + for VoteInfo { + validator_address, + validator_vp, + } in votes + { + if validator_vp == 0 { continue; } - let tm_raw_hash_string = - hex::encode_upper(vote.validator_address.clone()); - let native_address = - find_validator_by_raw_hash(storage, tm_raw_hash_string)?.expect( - "Unable to read native address of validator from tendermint \ - raw hash", - ); - signer_set.insert(native_address.clone()); - total_signing_stake += vote.validator_vp; - - // Ensure TM stake updates properly with a debug_assert let stake_from_deltas = - read_validator_stake(storage, ¶ms, &native_address, epoch)? + read_validator_stake(storage, ¶ms, &validator_address, epoch)? .unwrap_or_default(); - debug_assert_eq!( - stake_from_deltas, - token::Amount::from(vote.validator_vp) - ); + + // Ensure TM stake updates properly with a debug_assert + if cfg!(debug_assertions) { + debug_assert_eq!( + into_tm_voting_power( + params.tm_votes_per_token, + stake_from_deltas + ), + i64::try_from(validator_vp).unwrap_or_default(), + ); + } + + signer_set.insert(validator_address); + total_signing_stake += stake_from_deltas; } // Get the block rewards coefficients (proposing, signing/voting, // consensus set status) - let consensus_stake: Decimal = total_consensus_stake.into(); - let signing_stake: Decimal = total_signing_stake.into(); - let rewards_calculator = PosRewardsCalculator::new( - params.block_proposer_reward, - params.block_vote_reward, - total_signing_stake, - total_consensus_stake, - ); - let coeffs = match rewards_calculator.get_reward_coeffs() { - Ok(coeffs) => coeffs, - Err(_) => return Err(InflationError::Error.into()), + let rewards_calculator = PosRewardsCalculator { + proposer_reward: params.block_proposer_reward, + signer_reward: params.block_vote_reward, + signing_stake: u64::from(total_signing_stake), + total_stake: u64::from(total_consensus_stake), }; + let coeffs = rewards_calculator + .get_reward_coeffs() + .map_err(InflationError::Rewards) + .into_storage_result()?; + tracing::debug!( + "PoS rewards coefficients {coeffs:?}, inputs: {rewards_calculator:?}." + ); // println!( // "TOTAL SIGNING STAKE (LOGGING BLOCK REWARDS) = {}", @@ -2507,6 +2650,9 @@ where // Compute the fractional block rewards for each consensus validator and // update the reward accumulators + let consensus_stake_unscaled: Decimal = + total_consensus_stake.as_dec_unscaled(); + let signing_stake_unscaled: Decimal = total_signing_stake.as_dec_unscaled(); let mut values: HashMap = HashMap::new(); for validator in consensus_validators.iter(storage)? { let ( @@ -2526,7 +2672,7 @@ where } let mut rewards_frac = Decimal::default(); - let stake: Decimal = u64::from(stake).into(); + let stake_unscaled: Decimal = stake.as_dec_unscaled(); // println!( // "NAMADA VALIDATOR STAKE (LOGGING BLOCK REWARDS) OF EPOCH {} = // {}", epoch, stake @@ -2538,11 +2684,12 @@ where } // Signer reward if signer_set.contains(&address) { - let signing_frac = stake / signing_stake; + let signing_frac = stake_unscaled / signing_stake_unscaled; rewards_frac += coeffs.signer_coeff * signing_frac; } // Consensus validator reward - rewards_frac += coeffs.active_val_coeff * (stake / consensus_stake); + rewards_frac += coeffs.active_val_coeff + * (stake_unscaled / consensus_stake_unscaled); // Update the rewards accumulator let prev = rewards_accumulator_handle() diff --git a/proof_of_stake/src/pos_queries.rs b/proof_of_stake/src/pos_queries.rs new file mode 100644 index 00000000000..a92cd14ebe8 --- /dev/null +++ b/proof_of_stake/src/pos_queries.rs @@ -0,0 +1,314 @@ +//! Storage API for querying data about Proof-of-stake related +//! data. This includes validator and epoch related data. +use borsh::BorshDeserialize; +use namada_core::ledger::parameters::storage::get_max_proposal_bytes_key; +use namada_core::ledger::parameters::EpochDuration; +use namada_core::ledger::storage::WlStorage; +use namada_core::ledger::storage_api::collections::lazy_map::NestedSubKey; +use namada_core::ledger::{storage, storage_api}; +use namada_core::tendermint_proto::google::protobuf; +use namada_core::tendermint_proto::types::EvidenceParams; +use namada_core::types::address::Address; +use namada_core::types::chain::ProposalBytes; +use namada_core::types::storage::{BlockHeight, Epoch}; +use namada_core::types::{key, token}; +use thiserror::Error; + +use crate::types::{ConsensusValidatorSet, WeightedValidator}; +use crate::{consensus_validator_set_handle, PosParams}; + +/// Errors returned by [`PosQueries`] operations. +#[derive(Error, Debug)] +pub enum Error { + /// The given address is not among the set of consensus validators for + /// the corresponding epoch. + #[error( + "The address '{0:?}' is not among the consensus validator set for \ + epoch {1}" + )] + NotValidatorAddress(Address, Epoch), + /// The given public key does not correspond to any consensus validator's + /// key at the provided epoch. + #[error( + "The public key '{0}' is not among the consensus validator set for \ + epoch {1}" + )] + NotValidatorKey(String, Epoch), + /// The given public key hash does not correspond to any consensus + /// validator's key at the provided epoch. + #[error( + "The public key hash '{0}' is not among the consensus validator set \ + for epoch {1}" + )] + NotValidatorKeyHash(String, Epoch), + /// An invalid Tendermint validator address was detected. + #[error("Invalid validator tendermint address")] + InvalidTMAddress, +} + +/// Result type returned by [`PosQueries`] operations. +pub type Result = ::std::result::Result; + +/// Methods used to query blockchain proof-of-stake related state, +/// such as the current set of consensus validators. +pub trait PosQueries { + /// The underlying storage type. + type Storage; + + /// Return a handle to [`PosQueries`]. + fn pos_queries(&self) -> PosQueriesHook<'_, Self::Storage>; +} + +impl PosQueries for WlStorage +where + D: storage::DB + for<'iter> storage::DBIter<'iter>, + H: storage::StorageHasher, +{ + type Storage = Self; + + #[inline] + fn pos_queries(&self) -> PosQueriesHook<'_, Self> { + PosQueriesHook { wl_storage: self } + } +} + +/// A handle to [`PosQueries`]. +/// +/// This type is a wrapper around a pointer to a +/// [`WlStorage`]. +#[derive(Debug)] +#[repr(transparent)] +pub struct PosQueriesHook<'db, DB> { + wl_storage: &'db DB, +} + +impl<'db, DB> Clone for PosQueriesHook<'db, DB> { + fn clone(&self) -> Self { + Self { + wl_storage: self.wl_storage, + } + } +} + +impl<'db, DB> Copy for PosQueriesHook<'db, DB> {} + +impl<'db, D, H> PosQueriesHook<'db, WlStorage> +where + D: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, + H: 'static + storage::StorageHasher, +{ + /// Return a handle to the inner [`WlStorage`]. + #[inline] + pub fn storage(self) -> &'db WlStorage { + self.wl_storage + } + + /// Get the set of consensus validators for a given epoch (defaulting to the + /// epoch of the current yet-to-be-committed block). + #[inline] + pub fn get_consensus_validators( + self, + epoch: Option, + ) -> ConsensusValidators<'db, D, H> { + let epoch = epoch + .unwrap_or_else(|| self.wl_storage.storage.get_current_epoch().0); + ConsensusValidators { + wl_storage: self.wl_storage, + validator_set: consensus_validator_set_handle().at(&epoch), + } + } + + /// Lookup the total voting power for an epoch (defaulting to the + /// epoch of the current yet-to-be-committed block). + pub fn get_total_voting_power(self, epoch: Option) -> token::Amount { + self.get_consensus_validators(epoch) + .iter() + .map(|validator| u64::from(validator.bonded_stake)) + .sum::() + .into() + } + + /// Simple helper function for the ledger to get balances + /// of the specified token at the specified address. + pub fn get_balance( + self, + token: &Address, + owner: &Address, + ) -> token::Amount { + storage_api::token::read_balance(self.wl_storage, token, owner) + .expect("Storage read in the protocol must not fail") + } + + /// Return evidence parameters. + // TODO: impove this docstring + pub fn get_evidence_params( + self, + epoch_duration: &EpochDuration, + pos_params: &PosParams, + ) -> EvidenceParams { + // Minimum number of epochs before tokens are unbonded and can be + // withdrawn + let len_before_unbonded = + std::cmp::max(pos_params.unbonding_len as i64 - 1, 0); + let max_age_num_blocks: i64 = + epoch_duration.min_num_of_blocks as i64 * len_before_unbonded; + let min_duration_secs = epoch_duration.min_duration.0 as i64; + let max_age_duration = Some(protobuf::Duration { + seconds: min_duration_secs * len_before_unbonded, + nanos: 0, + }); + EvidenceParams { + max_age_num_blocks, + max_age_duration, + ..EvidenceParams::default() + } + } + + /// Lookup data about a validator from their address. + pub fn get_validator_from_address( + self, + address: &Address, + epoch: Option, + ) -> Result<(token::Amount, key::common::PublicKey)> { + let epoch = epoch + .unwrap_or_else(|| self.wl_storage.storage.get_current_epoch().0); + self.get_consensus_validators(Some(epoch)) + .iter() + .find(|validator| address == &validator.address) + .map(|validator| { + let protocol_pk_key = key::protocol_pk_key(&validator.address); + // TODO: rewrite this, to use `StorageRead::read` + let bytes = self + .wl_storage + .storage + .read(&protocol_pk_key) + .expect("Validator should have public protocol key") + .0 + .expect("Validator should have public protocol key"); + let protocol_pk: key::common::PublicKey = + BorshDeserialize::deserialize(&mut bytes.as_ref()).expect( + "Protocol public key in storage should be \ + deserializable", + ); + (validator.bonded_stake, protocol_pk) + }) + .ok_or_else(|| Error::NotValidatorAddress(address.clone(), epoch)) + } + + /// Given a tendermint validator, the address is the hash + /// of the validators public key. We look up the native + /// address from storage using this hash. + // TODO: We may change how this lookup is done, see + // https://github.com/anoma/namada/issues/200 + pub fn get_validator_from_tm_address( + self, + _tm_address: &[u8], + _epoch: Option, + ) -> Result
{ + // let epoch = epoch.unwrap_or_else(|| self.get_current_epoch().0); + // let validator_raw_hash = core::str::from_utf8(tm_address) + // .map_err(|_| Error::InvalidTMAddress)?; + // self.read_validator_address_raw_hash(validator_raw_hash) + // .ok_or_else(|| { + // Error::NotValidatorKeyHash( + // validator_raw_hash.to_string(), + // epoch, + // ) + // }) + todo!() + } + + /// Check if we are at a given [`BlockHeight`] offset, `height_offset`, + /// within the current [`Epoch`]. + pub fn is_deciding_offset_within_epoch(self, height_offset: u64) -> bool { + let current_decision_height = self.get_current_decision_height(); + + // NOTE: the first stored height in `fst_block_heights_of_each_epoch` + // is 0, because of a bug (should be 1), so this code needs to + // handle that case + // + // we can remove this check once that's fixed + if self.wl_storage.storage.get_current_epoch().0 == Epoch(0) { + let height_offset_within_epoch = BlockHeight(1 + height_offset); + return current_decision_height == height_offset_within_epoch; + } + + let fst_heights_of_each_epoch = self + .wl_storage + .storage + .block + .pred_epochs + .first_block_heights(); + + fst_heights_of_each_epoch + .last() + .map(|&h| { + let height_offset_within_epoch = h + height_offset; + current_decision_height == height_offset_within_epoch + }) + .unwrap_or(false) + } + + #[inline] + /// Given some [`BlockHeight`], return the corresponding [`Epoch`]. + pub fn get_epoch(self, height: BlockHeight) -> Option { + self.wl_storage.storage.block.pred_epochs.get_epoch(height) + } + + #[inline] + /// Retrieves the [`BlockHeight`] that is currently being decided. + pub fn get_current_decision_height(self) -> BlockHeight { + self.wl_storage.storage.last_height + 1 + } + + /// Retrieve the `max_proposal_bytes` consensus parameter from storage. + pub fn get_max_proposal_bytes(self) -> ProposalBytes { + storage_api::StorageRead::read( + self.wl_storage, + &get_max_proposal_bytes_key(), + ) + .expect("Must be able to read ProposalBytes from storage") + .expect("ProposalBytes must be present in storage") + } +} + +/// A handle to the set of consensus validators in Namada, +/// at some given epoch. +pub struct ConsensusValidators<'db, D, H> +where + D: storage::DB + for<'iter> storage::DBIter<'iter>, + H: storage::StorageHasher, +{ + wl_storage: &'db WlStorage, + validator_set: ConsensusValidatorSet, +} + +impl<'db, D, H> ConsensusValidators<'db, D, H> +where + D: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, + H: 'static + storage::StorageHasher, +{ + /// Iterate over the set of consensus validators in Namada, at some given + /// epoch. + pub fn iter<'this: 'db>( + &'this self, + ) -> impl Iterator + 'db { + self.validator_set + .iter(self.wl_storage) + .expect("Must be able to iterate over consensus validators") + .map(|res| { + let ( + NestedSubKey::Data { + key: bonded_stake, .. + }, + address, + ) = res.expect( + "We should be able to decode validators in storage", + ); + WeightedValidator { + address, + bonded_stake, + } + }) + } +} diff --git a/proof_of_stake/src/rewards.rs b/proof_of_stake/src/rewards.rs index 9b021af6130..6f830d9c524 100644 --- a/proof_of_stake/src/rewards.rs +++ b/proof_of_stake/src/rewards.rs @@ -1,17 +1,24 @@ -//! PoS rewards +//! PoS rewards distribution. use rust_decimal::Decimal; use rust_decimal_macros::dec; use thiserror::Error; +const MIN_PROPOSER_REWARD: Decimal = dec!(0.01); + /// Errors during rewards calculation #[derive(Debug, Error)] +#[allow(missing_docs)] pub enum RewardsError { /// number of votes is less than the threshold of 2/3 #[error( - "Insufficient votes, needed at least 2/3 of the total bonded stake" + "Insufficient votes. Got {signing_stake}, needed {votes_needed} (at \ + least 2/3 of the total bonded stake)." )] - InsufficentVotes, + InsufficientVotes { + votes_needed: u64, + signing_stake: u64, + }, /// rewards coefficients are not set #[error("Rewards coefficients are not properly set.")] CoeffsNotSet, @@ -30,43 +37,44 @@ pub struct PosRewards { /// the rewards #[derive(Debug, Copy, Clone)] pub struct PosRewardsCalculator { - proposer_param: Decimal, - signer_param: Decimal, - signing_stake: u64, - total_stake: u64, + /// Rewards fraction that goes to the block proposer + pub proposer_reward: Decimal, + /// Rewards fraction that goes to the block signers + pub signer_reward: Decimal, + /// Total stake of validators who signed the block + pub signing_stake: u64, + /// Total stake of the whole consensus set + pub total_stake: u64, } impl PosRewardsCalculator { - /// Instantiate a new PosRewardsCalculator - pub fn new( - proposer_param: Decimal, - signer_param: Decimal, - signing_stake: u64, - total_stake: u64, - ) -> Self { - Self { - proposer_param, - signer_param, - signing_stake, - total_stake, - } - } - - /// Calculate the reward coefficients + /// Calculate the rewards coefficients. These are used in combination with + /// the validator's signing behavior and stake to determine the fraction of + /// the block rewards earned. pub fn get_reward_coeffs(&self) -> Result { // TODO: think about possibility of u64 overflow let votes_needed = self.get_min_required_votes(); - if self.signing_stake < votes_needed { - return Err(RewardsError::InsufficentVotes); + + let Self { + proposer_reward, + signer_reward, + signing_stake, + total_stake, + } = *self; + + if signing_stake < votes_needed { + return Err(RewardsError::InsufficientVotes { + votes_needed, + signing_stake, + }); } - // Logic for determining the coefficients - // TODO: error handling to ensure proposer_coeff is > 0? - let proposer_coeff = self.proposer_param - * Decimal::from(self.signing_stake - votes_needed) - / Decimal::from(self.total_stake) - + dec!(0.01); - let signer_coeff = self.signer_param; + // Logic for determining the coefficients. + let proposer_coeff = proposer_reward + * Decimal::from(signing_stake - votes_needed) + / Decimal::from(total_stake) + + MIN_PROPOSER_REWARD; + let signer_coeff = signer_reward; let active_val_coeff = dec!(1.0) - proposer_coeff - signer_coeff; let coeffs = PosRewards { @@ -78,7 +86,7 @@ impl PosRewardsCalculator { Ok(coeffs) } - /// Implement as ceiling (2/3) * validator set stake + /// Implement as ceiling of (2/3) * validator set stake fn get_min_required_votes(&self) -> u64 { ((2 * self.total_stake) + 3 - 1) / 3 } diff --git a/proof_of_stake/src/storage.rs b/proof_of_stake/src/storage.rs index 87644772c99..6b7a9add987 100644 --- a/proof_of_stake/src/storage.rs +++ b/proof_of_stake/src/storage.rs @@ -31,6 +31,7 @@ const NUM_CONSENSUS_VALIDATORS_STORAGE_KEY: &str = "num_consensus"; const BELOW_CAPACITY_VALIDATOR_SET_STORAGE_KEY: &str = "below_capacity"; const TOTAL_DELTAS_STORAGE_KEY: &str = "total_deltas"; const VALIDATOR_SET_POSITIONS_KEY: &str = "validator_set_positions"; +const CONSENSUS_KEYS: &str = "consensus_keys"; const LAST_BLOCK_PROPOSER_STORAGE_KEY: &str = "last_block_proposer"; const CONSENSUS_VALIDATOR_SET_ACCUMULATOR_STORAGE_KEY: &str = "validator_rewards_accumulator"; @@ -559,3 +560,15 @@ pub fn validator_set_positions_key() -> Key { .push(&VALIDATOR_SET_POSITIONS_KEY.to_owned()) .expect("Cannot obtain a storage key") } + +/// Storage key for consensus keys set. +pub fn consensus_keys_key() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&CONSENSUS_KEYS.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for consensus keys set? +pub fn is_consensus_keys_key(key: &Key) -> bool { + matches!(&key.segments[..], [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(key)] if addr == &ADDRESS && key == CONSENSUS_KEYS) +} diff --git a/proof_of_stake/src/tests.rs b/proof_of_stake/src/tests.rs index 7de2d0727a9..84bb8ebe53f 100644 --- a/proof_of_stake/src/tests.rs +++ b/proof_of_stake/src/tests.rs @@ -1,5 +1,7 @@ //! PoS system tests +mod state_machine; + use std::cmp::min; use std::ops::Range; @@ -11,7 +13,7 @@ use namada_core::types::address::testing::{ address_from_simple_seed, arb_established_address, }; use namada_core::types::address::{Address, EstablishedAddressGen}; -use namada_core::types::key::common::SecretKey; +use namada_core::types::key::common::{PublicKey, SecretKey}; use namada_core::types::key::testing::{ arb_common_keypair, common_sk_from_simple_seed, }; @@ -20,6 +22,7 @@ use namada_core::types::{address, key, token}; use proptest::prelude::*; use proptest::test_runner::Config; use rust_decimal::Decimal; +use rust_decimal_macros::dec; // Use `RUST_LOG=info` (or another tracing level) and `--nocapture` to see // `tracing` logs from tests use test_log::test; @@ -27,8 +30,9 @@ use test_log::test; use crate::parameters::testing::arb_pos_params; use crate::parameters::PosParams; use crate::types::{ - BondDetails, BondId, BondsAndUnbondsDetails, GenesisValidator, Position, - ReverseOrdTokenAmount, ValidatorState, WeightedValidator, + into_tm_voting_power, BondDetails, BondId, BondsAndUnbondsDetails, + ConsensusValidator, GenesisValidator, Position, ReverseOrdTokenAmount, + UnbondDetails, ValidatorSetUpdate, ValidatorState, WeightedValidator, }; use crate::{ become_validator, below_capacity_validator_set_handle, bond_handle, @@ -40,7 +44,8 @@ use crate::{ read_num_consensus_validators, read_total_stake, read_validator_delta_value, read_validator_stake, staking_token_address, total_deltas_handle, unbond_handle, unbond_tokens, update_validator_deltas, - update_validator_set, validator_state_handle, withdraw_tokens, + update_validator_set, validator_consensus_key_handle, + validator_set_update_tendermint, validator_state_handle, withdraw_tokens, write_validator_address_raw_hash, }; @@ -112,6 +117,7 @@ fn test_init_genesis_aux( let mut s = TestWlStorage::default(); s.storage.block.epoch = start_epoch; + validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); init_genesis(&mut s, ¶ms, validators.clone().into_iter(), start_epoch) .unwrap(); @@ -120,10 +126,7 @@ fn test_init_genesis_aux( details.unbonds.is_empty() && details.slashes.is_empty() })); - validators.sort_by(|a, b| a.tokens.cmp(&b.tokens)); - for (i, validator) in validators.into_iter().rev().enumerate() { - println!("Validator {validator:?}"); - + for (i, validator) in validators.into_iter().enumerate() { let addr = &validator.address; let self_bonds = bond_details .remove(&BondId { @@ -189,7 +192,7 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { current_epoch, ) .unwrap(); - s.commit_genesis().unwrap(); + s.commit_block().unwrap(); // Advance to epoch 1 current_epoch = advance_epoch(&mut s, ¶ms); @@ -199,9 +202,10 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { // Read some data before submitting bond let pipeline_epoch = current_epoch + params.pipeline_len; + let staking_token = staking_token_address(&s); let pos_balance_pre = s .read::(&token::balance_key( - &staking_token_address(), + &staking_token, &super::ADDRESS, )) .unwrap() @@ -211,13 +215,8 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { // Self-bond let amount_self_bond = token::Amount::from(100_500_000); - credit_tokens( - &mut s, - &staking_token_address(), - &validator.address, - amount_self_bond, - ) - .unwrap(); + credit_tokens(&mut s, &staking_token, &validator.address, amount_self_bond) + .unwrap(); bond_tokens( &mut s, None, @@ -273,7 +272,6 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { }; let check_bond_details = |ix, bond_details: BondsAndUnbondsDetails| { println!("Check index {ix}"); - assert_eq!(bond_details.len(), 1); let details = bond_details.get(&self_bond_id).unwrap(); assert_eq!( details.bonds.len(), @@ -321,9 +319,8 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { // Get a non-validating account with tokens let delegator = address::testing::gen_implicit_address(); let amount_del = token::Amount::from(201_000_000); - credit_tokens(&mut s, &staking_token_address(), &delegator, amount_del) - .unwrap(); - let balance_key = token::balance_key(&staking_token_address(), &delegator); + credit_tokens(&mut s, &staking_token, &delegator, amount_del).unwrap(); + let balance_key = token::balance_key(&staking_token, &delegator); let balance = s .read::(&balance_key) .unwrap() @@ -416,7 +413,6 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { // Check all bond details (self-bonds and delegation) let check_bond_details = |ix, bond_details: BondsAndUnbondsDetails| { println!("Check index {ix}"); - assert_eq!(bond_details.len(), 2); let self_bond_details = bond_details.get(&self_bond_id).unwrap(); let delegation_details = bond_details.get(&delegation_bond_id).unwrap(); assert_eq!( @@ -455,9 +451,28 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { } let pipeline_epoch = current_epoch + params.pipeline_len; - // Unbond the self-bond - unbond_tokens(&mut s, None, &validator.address, amount_del, current_epoch) - .unwrap(); + // Unbond the self-bond with an amount that will remove all of the self-bond + // executed after genesis and some of the genesis bond + let amount_self_unbond: token::Amount = + amount_self_bond + (u64::from(validator.tokens) / 2).into(); + // When the difference is 0, only the non-genesis self-bond is unbonded + let unbonded_genesis_self_bond = + amount_self_unbond - amount_self_bond != token::Amount::default(); + dbg!( + amount_self_unbond, + amount_self_bond, + unbonded_genesis_self_bond + ); + let self_unbond_epoch = s.storage.block.epoch; + + unbond_tokens( + &mut s, + None, + &validator.address, + amount_self_unbond, + current_epoch, + ) + .unwrap(); let val_stake_pre = read_validator_stake( &s, @@ -466,9 +481,11 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { pipeline_epoch - 1, ) .unwrap(); + let val_stake_post = read_validator_stake(&s, ¶ms, &validator.address, pipeline_epoch) .unwrap(); + let val_delta = read_validator_delta_value( &s, ¶ms, @@ -478,33 +495,109 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { .unwrap(); let unbond = unbond_handle(&validator.address, &validator.address); - assert_eq!(val_delta, Some(-amount_del.change())); + assert_eq!(val_delta, Some(-amount_self_unbond.change())); assert_eq!( unbond .at(&(pipeline_epoch + params.unbonding_len)) - .get(&s, &(self_bond_epoch + params.pipeline_len)) + .get(&s, &Epoch::default()) .unwrap(), - Some(amount_self_bond) + if unbonded_genesis_self_bond { + Some(amount_self_unbond - amount_self_bond) + } else { + None + } ); assert_eq!( unbond .at(&(pipeline_epoch + params.unbonding_len)) - .get(&s, &Epoch::default()) + .get(&s, &(self_bond_epoch + params.pipeline_len)) .unwrap(), - Some(amount_del - amount_self_bond) + Some(amount_self_bond) ); assert_eq!( val_stake_pre, Some(validator.tokens + amount_self_bond + amount_del) ); - assert_eq!(val_stake_post, Some(validator.tokens + amount_self_bond)); + assert_eq!( + val_stake_post, + Some( + validator.tokens + amount_self_bond + amount_del + - amount_self_unbond + ) + ); + + // Check all bond and unbond details (self-bonds and delegation) + let check_bond_details = |ix, bond_details: BondsAndUnbondsDetails| { + println!("Check index {ix}"); + dbg!(&bond_details); + assert_eq!(bond_details.len(), 2); + let self_bond_details = bond_details.get(&self_bond_id).unwrap(); + let delegation_details = bond_details.get(&delegation_bond_id).unwrap(); + assert_eq!( + self_bond_details.bonds.len(), + 1, + "Contains only part of the genesis bond now" + ); + assert_eq!( + self_bond_details.bonds[0], + BondDetails { + start: start_epoch, + amount: validator.tokens + amount_self_bond + - amount_self_unbond, + slashed_amount: None + }, + ); + assert_eq!( + delegation_details.bonds[0], + BondDetails { + start: delegation_epoch + params.pipeline_len, + amount: amount_del, + slashed_amount: None + }, + ); + assert_eq!( + self_bond_details.unbonds.len(), + if unbonded_genesis_self_bond { 2 } else { 1 }, + "Contains a full unbond of the last self-bond and an unbond from \ + the genesis bond" + ); + if unbonded_genesis_self_bond { + assert_eq!( + self_bond_details.unbonds[0], + UnbondDetails { + start: start_epoch, + withdraw: self_unbond_epoch + + params.pipeline_len + + params.unbonding_len, + amount: amount_self_unbond - amount_self_bond, + slashed_amount: None + } + ); + } + assert_eq!( + self_bond_details.unbonds[usize::from(unbonded_genesis_self_bond)], + UnbondDetails { + start: self_bond_epoch + params.pipeline_len, + withdraw: self_unbond_epoch + + params.pipeline_len + + params.unbonding_len, + amount: amount_self_bond, + slashed_amount: None + } + ); + }; + check_bond_details( + 0, + bonds_and_unbonds(&s, None, Some(validator.address.clone())).unwrap(), + ); // Unbond delegation + let amount_undel = token::Amount::from(1_000_000); unbond_tokens( &mut s, Some(&delegator), &validator.address, - amount_self_bond, + amount_undel, current_epoch, ) .unwrap(); @@ -528,19 +621,29 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { .unwrap(); let unbond = unbond_handle(&delegator, &validator.address); - assert_eq!(val_delta, Some(-(amount_self_bond + amount_del).change())); + assert_eq!( + val_delta, + Some(-(amount_self_unbond + amount_undel).change()) + ); assert_eq!( unbond .at(&(pipeline_epoch + params.unbonding_len)) .get(&s, &(delegation_epoch + params.pipeline_len)) .unwrap(), - Some(amount_self_bond) + Some(amount_undel) ); assert_eq!( val_stake_pre, Some(validator.tokens + amount_self_bond + amount_del) ); - assert_eq!(val_stake_post, Some(validator.tokens)); + assert_eq!( + val_stake_post, + Some( + validator.tokens + amount_self_bond - amount_self_unbond + + amount_del + - amount_undel + ) + ); let withdrawable_offset = params.unbonding_len + params.pipeline_len; @@ -553,7 +656,7 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { let pos_balance = s .read::(&token::balance_key( - &staking_token_address(), + &staking_token, &super::ADDRESS, )) .unwrap(); @@ -571,11 +674,17 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { let pos_balance = s .read::(&token::balance_key( - &staking_token_address(), + &staking_token, &super::ADDRESS, )) .unwrap(); - assert_eq!(Some(pos_balance_pre + amount_self_bond), pos_balance); + assert_eq!( + Some( + pos_balance_pre + amount_self_bond - amount_self_unbond + + amount_del + ), + pos_balance + ); // Withdraw the delegation unbond withdraw_tokens( @@ -591,11 +700,18 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { let pos_balance = s .read::(&token::balance_key( - &staking_token_address(), + &staking_token, &super::ADDRESS, )) .unwrap(); - assert_eq!(Some(pos_balance_pre), pos_balance); + assert_eq!( + Some( + pos_balance_pre + amount_self_bond - amount_self_unbond + + amount_del + - amount_undel + ), + pos_balance + ); } /// Test validator initialization. @@ -621,7 +737,7 @@ fn test_become_validator_aux( current_epoch, ) .unwrap(); - s.commit_genesis().unwrap(); + s.commit_block().unwrap(); // Advance to epoch 1 current_epoch = advance_epoch(&mut s, ¶ms); @@ -659,9 +775,9 @@ fn test_become_validator_aux( current_epoch = advance_epoch(&mut s, ¶ms); // Self-bond to the new validator + let staking_token = staking_token_address(&s); let amount = token::Amount::from(100_500_000); - credit_tokens(&mut s, &staking_token_address(), &new_validator, amount) - .unwrap(); + credit_tokens(&mut s, &staking_token, &new_validator, amount).unwrap(); bond_tokens(&mut s, None, &new_validator, amount, current_epoch).unwrap(); // Check the bond delta @@ -755,27 +871,61 @@ fn test_validator_sets() { max_validator_slots: 3, ..Default::default() }; - let seed = "seed"; - let mut address_gen = EstablishedAddressGen::new(seed); - let mut gen_validator = || address_gen.generate_address(seed); + let addr_seed = "seed"; + let mut address_gen = EstablishedAddressGen::new(addr_seed); + let mut sk_seed = 0; + let mut gen_validator = || { + let res = ( + address_gen.generate_address(addr_seed), + key::testing::common_sk_from_simple_seed(sk_seed).to_public(), + ); + // bump the sk seed + sk_seed += 1; + res + }; + + // A helper to insert a non-genesis validator + let insert_validator = |s: &mut TestWlStorage, + addr, + pk: &PublicKey, + stake: token::Amount, + epoch: Epoch| { + insert_validator_into_validator_set( + s, + ¶ms, + addr, + stake, + epoch, + params.pipeline_len, + ) + .unwrap(); + + update_validator_deltas(s, ¶ms, addr, stake.change(), epoch) + .unwrap(); + + // Set their consensus key (needed for + // `validator_set_update_tendermint` fn) + validator_consensus_key_handle(addr) + .set(s, pk.clone(), epoch, params.pipeline_len) + .unwrap(); + }; // Start with two genesis validators with 1 NAM stake let epoch = Epoch::default(); - let pipeline_epoch = epoch + params.pipeline_len; - let pk1 = key::testing::keypair_1().to_public(); - let pk2 = key::testing::keypair_2().to_public(); - let (val1, stake1) = (gen_validator(), token::Amount::whole(1)); - let (val2, stake2) = (gen_validator(), token::Amount::whole(1)); - let (val3, stake3) = (gen_validator(), token::Amount::whole(10)); - let (val4, stake4) = (gen_validator(), token::Amount::whole(100)); - let (val5, stake5) = (gen_validator(), token::Amount::whole(1)); - let (val6, stake6) = (gen_validator(), token::Amount::whole(1)); - println!("val1: {val1}, {stake1}"); - println!("val2: {val2}, {stake2}"); - println!("val3: {val3}, {stake3}"); - println!("val4: {val4}, {stake4}"); - println!("val5: {val5}, {stake5}"); - println!("val6: {val6}, {stake6}"); + let ((val1, pk1), stake1) = (gen_validator(), token::Amount::whole(1)); + let ((val2, pk2), stake2) = (gen_validator(), token::Amount::whole(1)); + let ((val3, pk3), stake3) = (gen_validator(), token::Amount::whole(10)); + let ((val4, pk4), stake4) = (gen_validator(), token::Amount::whole(1)); + let ((val5, pk5), stake5) = (gen_validator(), token::Amount::whole(100)); + let ((val6, pk6), stake6) = (gen_validator(), token::Amount::whole(1)); + let ((val7, pk7), stake7) = (gen_validator(), token::Amount::whole(1)); + println!("val1: {val1}, {pk1}, {stake1}"); + println!("val2: {val2}, {pk2}, {stake2}"); + println!("val3: {val3}, {pk3}, {stake3}"); + println!("val4: {val4}, {pk4}, {stake4}"); + println!("val5: {val5}, {pk5}, {stake5}"); + println!("val6: {val6}, {pk6}, {stake6}"); + println!("val7: {val7}, {pk7}, {stake7}"); init_genesis( &mut s, @@ -784,14 +934,14 @@ fn test_validator_sets() { GenesisValidator { address: val1.clone(), tokens: stake1, - consensus_key: pk1, + consensus_key: pk1.clone(), commission_rate: Decimal::new(1, 1), max_commission_rate_change: Decimal::new(1, 1), }, GenesisValidator { address: val2.clone(), tokens: stake2, - consensus_key: pk2, + consensus_key: pk2.clone(), commission_rate: Decimal::new(1, 1), max_commission_rate_change: Decimal::new(1, 1), }, @@ -801,19 +951,26 @@ fn test_validator_sets() { ) .unwrap(); + // Advance to EPOCH 1 + // + // We cannot call `get_tendermint_set_updates` for the genesis state as + // `validator_set_update_tendermint` is only called 2 blocks before the + // start of an epoch and so we need to give it a predecessor epoch (see + // `get_tendermint_set_updates`), which we cannot have on the first + // epoch. In any way, the initial validator set is given to Tendermint + // from InitChain, so `validator_set_update_tendermint` is + // not being used for it. + let epoch = advance_epoch(&mut s, ¶ms); + let pipeline_epoch = epoch + params.pipeline_len; + // Insert another validator with the greater stake 10 NAM - insert_validator_into_validator_set( - &mut s, - ¶ms, - &val3, - stake3, - epoch, - params.pipeline_len, - ) - .unwrap(); - // Update deltas as they are needed for validator set updates - update_validator_deltas(&mut s, ¶ms, &val3, stake3.change(), epoch) - .unwrap(); + insert_validator(&mut s, &val3, &pk3, stake3, epoch); + // Insert validator with stake 1 NAM + insert_validator(&mut s, &val4, &pk4, stake4, epoch); + + // Validator `val3` and `val4` will be added at pipeline offset (2) - epoch + // 3 + let val3_and_4_epoch = pipeline_epoch; let consensus_vals: Vec<_> = consensus_validator_set_handle() .at(&pipeline_epoch) @@ -848,20 +1005,20 @@ fn test_validator_sets() { if address == &val3 && stake == &stake3 && *position == Position(0) )); + // Check tendermint validator set updates - there should be none + let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); + assert!(tm_updates.is_empty()); + + // Advance to EPOCH 2 + let epoch = advance_epoch(&mut s, ¶ms); + let pipeline_epoch = epoch + params.pipeline_len; + // Insert another validator with a greater stake still 1000 NAM. It should // replace 2nd consensus validator with stake 1, which should become // below-capacity - insert_validator_into_validator_set( - &mut s, - ¶ms, - &val4, - stake4, - epoch, - params.pipeline_len, - ) - .unwrap(); - update_validator_deltas(&mut s, ¶ms, &val4, stake4.change(), epoch) - .unwrap(); + insert_validator(&mut s, &val5, &pk5, stake5, epoch); + // Validator `val5` will be added at pipeline offset (2) - epoch 4 + let val5_epoch = pipeline_epoch; let consensus_vals: Vec<_> = consensus_validator_set_handle() .at(&pipeline_epoch) @@ -893,7 +1050,7 @@ fn test_validator_sets() { key: stake, nested_sub_key: lazy_map::SubKey::Data(position), }, address) - if address == &val4 && stake == &stake4 && *position == Position(0) + if address == &val5 && stake == &stake5 && *position == Position(0) )); let below_capacity_vals: Vec<_> = below_capacity_validator_set_handle() @@ -903,29 +1060,49 @@ fn test_validator_sets() { .map(Result::unwrap) .collect(); - assert_eq!(below_capacity_vals.len(), 1); + assert_eq!(below_capacity_vals.len(), 2); assert!(matches!( &below_capacity_vals[0], (lazy_map::NestedSubKey::Data { key: ReverseOrdTokenAmount(stake), nested_sub_key: lazy_map::SubKey::Data(position), }, address) - if address == &val2 && stake == &stake2 && *position == Position(0) + if address == &val4 && stake == &stake4 && *position == Position(0) + )); + assert!(matches!( + &below_capacity_vals[1], + (lazy_map::NestedSubKey::Data { + key: ReverseOrdTokenAmount(stake), + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val2 && stake == &stake2 && *position == Position(1) )); + // Advance to EPOCH 3 + let epoch = advance_epoch(&mut s, ¶ms); + let pipeline_epoch = epoch + params.pipeline_len; + + // Check tendermint validator set updates + assert_eq!( + val3_and_4_epoch, epoch, + "val3 and val4 are in the validator sets now" + ); + let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); + // `val4` is newly added below-capacity, must be skipped in updated in TM + assert_eq!(tm_updates.len(), 1); + assert_eq!( + tm_updates[0], + ValidatorSetUpdate::Consensus(ConsensusValidator { + consensus_key: pk3, + bonded_stake: stake3.into(), + }) + ); + // Insert another validator with a stake 1 NAM. It should be added to the // below-capacity set - insert_validator_into_validator_set( - &mut s, - ¶ms, - &val5, - stake5, - epoch, - params.pipeline_len, - ) - .unwrap(); - update_validator_deltas(&mut s, ¶ms, &val5, stake5.change(), epoch) - .unwrap(); + insert_validator(&mut s, &val6, &pk6, stake6, epoch); + // Validator `val6` will be added at pipeline offset (2) - epoch 5 + let val6_epoch = pipeline_epoch; let below_capacity_vals: Vec<_> = below_capacity_validator_set_handle() .at(&pipeline_epoch) @@ -934,14 +1111,14 @@ fn test_validator_sets() { .map(Result::unwrap) .collect(); - assert_eq!(below_capacity_vals.len(), 2); + assert_eq!(below_capacity_vals.len(), 3); assert!(matches!( &below_capacity_vals[0], (lazy_map::NestedSubKey::Data { key: ReverseOrdTokenAmount(stake), nested_sub_key: lazy_map::SubKey::Data(position), }, address) - if address == &val2 && stake == &stake2 && *position == Position(0) + if address == &val4 && stake == &stake4 && *position == Position(0) )); assert!(matches!( &below_capacity_vals[1], @@ -949,8 +1126,33 @@ fn test_validator_sets() { key: ReverseOrdTokenAmount(stake), nested_sub_key: lazy_map::SubKey::Data(position), }, address) - if address == &val5 && stake == &stake5 && *position == Position(1) + if address == &val2 && stake == &stake2 && *position == Position(1) )); + assert!(matches!( + &below_capacity_vals[2], + (lazy_map::NestedSubKey::Data { + key: ReverseOrdTokenAmount(stake), + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val6 && stake == &stake6 && *position == Position(2) + )); + + // Advance to EPOCH 4 + let epoch = advance_epoch(&mut s, ¶ms); + let pipeline_epoch = epoch + params.pipeline_len; + + // Check tendermint validator set updates + assert_eq!(val5_epoch, epoch, "val5 is in the validator sets now"); + let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); + assert_eq!(tm_updates.len(), 2); + assert_eq!( + tm_updates[0], + ValidatorSetUpdate::Consensus(ConsensusValidator { + consensus_key: pk5, + bonded_stake: stake5.into(), + }) + ); + assert_eq!(tm_updates[1], ValidatorSetUpdate::Deactivated(pk2)); // Unbond some stake from val1, it should be be swapped with the greatest // below-capacity validator val2 into the below-capacity set @@ -964,6 +1166,8 @@ fn test_validator_sets() { .unwrap(); update_validator_deltas(&mut s, ¶ms, &val1, -unbond.change(), epoch) .unwrap(); + // Epoch 6 + let val1_unbond_epoch = pipeline_epoch; let consensus_vals: Vec<_> = consensus_validator_set_handle() .at(&pipeline_epoch) @@ -979,7 +1183,7 @@ fn test_validator_sets() { key: stake, nested_sub_key: lazy_map::SubKey::Data(position), }, address) - if address == &val2 && stake == &stake2 && *position == Position(0) + if address == &val4 && stake == &stake4 && *position == Position(0) )); assert!(matches!( &consensus_vals[1], @@ -995,7 +1199,7 @@ fn test_validator_sets() { key: stake, nested_sub_key: lazy_map::SubKey::Data(position), }, address) - if address == &val4 && stake == &stake4 && *position == Position(0) + if address == &val5 && stake == &stake5 && *position == Position(0) )); let below_capacity_vals: Vec<_> = below_capacity_validator_set_handle() @@ -1005,17 +1209,25 @@ fn test_validator_sets() { .map(Result::unwrap) .collect(); - assert_eq!(below_capacity_vals.len(), 2); + assert_eq!(below_capacity_vals.len(), 3); assert!(matches!( &below_capacity_vals[0], (lazy_map::NestedSubKey::Data { key: ReverseOrdTokenAmount(stake), nested_sub_key: lazy_map::SubKey::Data(position), }, address) - if address == &val5 && stake == &stake5 && *position == Position(1) + if address == &val2 && stake == &stake2 && *position == Position(1) )); assert!(matches!( &below_capacity_vals[1], + (lazy_map::NestedSubKey::Data { + key: ReverseOrdTokenAmount(stake), + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val6 && stake == &stake6 && *position == Position(2) + )); + assert!(matches!( + &below_capacity_vals[2], ( lazy_map::NestedSubKey::Data { key: ReverseOrdTokenAmount(stake), @@ -1026,18 +1238,20 @@ fn test_validator_sets() { if address == &val1 && stake == &stake1 && *position == Position(0) )); - // Insert another validator with stake 1 - it should be added after val1 - insert_validator_into_validator_set( - &mut s, - ¶ms, - &val6, - stake6, - epoch, - params.pipeline_len, - ) - .unwrap(); - update_validator_deltas(&mut s, ¶ms, &val6, stake6.change(), epoch) - .unwrap(); + // Advance to EPOCH 5 + let epoch = advance_epoch(&mut s, ¶ms); + let pipeline_epoch = epoch + params.pipeline_len; + + // Check tendermint validator set updates + assert_eq!(val6_epoch, epoch, "val6 is in the validator sets now"); + let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); + assert!(tm_updates.is_empty()); + + // Insert another validator with stake 1 - it should be added to below + // capacity set after val1 + insert_validator(&mut s, &val7, &pk7, stake7, epoch); + // Epoch 7 + let val7_epoch = pipeline_epoch; let consensus_vals: Vec<_> = consensus_validator_set_handle() .at(&pipeline_epoch) @@ -1053,7 +1267,7 @@ fn test_validator_sets() { key: stake, nested_sub_key: lazy_map::SubKey::Data(position), }, address) - if address == &val2 && stake == &stake2 && *position == Position(0) + if address == &val4 && stake == &stake4 && *position == Position(0) )); assert!(matches!( &consensus_vals[1], @@ -1069,7 +1283,7 @@ fn test_validator_sets() { key: stake, nested_sub_key: lazy_map::SubKey::Data(position), }, address) - if address == &val4 && stake == &stake4 && *position == Position(0) + if address == &val5 && stake == &stake5 && *position == Position(0) )); let below_capacity_vals: Vec<_> = below_capacity_validator_set_handle() @@ -1079,14 +1293,14 @@ fn test_validator_sets() { .map(Result::unwrap) .collect(); - assert_eq!(below_capacity_vals.len(), 3); + assert_eq!(below_capacity_vals.len(), 4); assert!(matches!( &below_capacity_vals[0], (lazy_map::NestedSubKey::Data { key: ReverseOrdTokenAmount(stake), nested_sub_key: lazy_map::SubKey::Data(position), }, address) - if address == &val5 && stake == &stake5 && *position == Position(1) + if address == &val2 && stake == &stake2 && *position == Position(1) )); assert!(matches!( &below_capacity_vals[1], @@ -1105,17 +1319,46 @@ fn test_validator_sets() { }, address ) + if address == &val7 && stake == &stake7 && *position == Position(3) + )); + assert!(matches!( + &below_capacity_vals[3], + ( + lazy_map::NestedSubKey::Data { + key: ReverseOrdTokenAmount(stake), + nested_sub_key: lazy_map::SubKey::Data(position), + }, + address + ) if address == &val1 && stake == &stake1 && *position == Position(0) )); - // Bond some stake to val5, it should be be swapped with the lowest + // Advance to EPOCH 6 + let epoch = advance_epoch(&mut s, ¶ms); + let pipeline_epoch = epoch + params.pipeline_len; + + // Check tendermint validator set updates + assert_eq!(val1_unbond_epoch, epoch, "val1's unbond is applied now"); + let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); + assert_eq!(tm_updates.len(), 2); + assert_eq!( + tm_updates[0], + ValidatorSetUpdate::Consensus(ConsensusValidator { + consensus_key: pk4.clone(), + bonded_stake: stake4.into(), + }) + ); + assert_eq!(tm_updates[1], ValidatorSetUpdate::Deactivated(pk1)); + + // Bond some stake to val6, it should be be swapped with the lowest // consensus validator val2 into the consensus set let bond = token::Amount::from(500_000); - let stake5 = stake5 + bond; - println!("val5 {val5} new stake {stake5}"); - update_validator_set(&mut s, ¶ms, &val5, bond.change(), epoch).unwrap(); - update_validator_deltas(&mut s, ¶ms, &val5, bond.change(), epoch) + let stake6 = stake6 + bond; + println!("val6 {val6} new stake {stake6}"); + update_validator_set(&mut s, ¶ms, &val6, bond.change(), epoch).unwrap(); + update_validator_deltas(&mut s, ¶ms, &val6, bond.change(), epoch) .unwrap(); + let val6_bond_epoch = pipeline_epoch; let consensus_vals: Vec<_> = consensus_validator_set_handle() .at(&pipeline_epoch) @@ -1131,7 +1374,7 @@ fn test_validator_sets() { key: stake, nested_sub_key: lazy_map::SubKey::Data(position), }, address) - if address == &val5 && stake == &stake5 && *position == Position(0) + if address == &val6 && stake == &stake6 && *position == Position(0) )); assert!(matches!( &consensus_vals[1], @@ -1147,7 +1390,7 @@ fn test_validator_sets() { key: stake, nested_sub_key: lazy_map::SubKey::Data(position), }, address) - if address == &val4 && stake == &stake4 && *position == Position(0) + if address == &val5 && stake == &stake5 && *position == Position(0) )); let below_capacity_vals: Vec<_> = below_capacity_validator_set_handle() @@ -1157,14 +1400,15 @@ fn test_validator_sets() { .map(Result::unwrap) .collect(); - assert_eq!(below_capacity_vals.len(), 3); + assert_eq!(below_capacity_vals.len(), 4); + dbg!(&below_capacity_vals); assert!(matches!( &below_capacity_vals[0], (lazy_map::NestedSubKey::Data { key: ReverseOrdTokenAmount(stake), nested_sub_key: lazy_map::SubKey::Data(position), }, address) - if address == &val6 && stake == &stake6 && *position == Position(2) + if address == &val2 && stake == &stake2 && *position == Position(1) )); assert!(matches!( &below_capacity_vals[1], @@ -1172,7 +1416,7 @@ fn test_validator_sets() { key: ReverseOrdTokenAmount(stake), nested_sub_key: lazy_map::SubKey::Data(position), }, address) - if address == &val2 && stake == &stake2 && *position == Position(3) + if address == &val7 && stake == &stake7 && *position == Position(3) )); assert!(matches!( &below_capacity_vals[2], @@ -1183,8 +1427,233 @@ fn test_validator_sets() { }, address ) + if address == &val4 && stake == &stake4 && *position == Position(4) + )); + assert!(matches!( + &below_capacity_vals[3], + ( + lazy_map::NestedSubKey::Data { + key: ReverseOrdTokenAmount(stake), + nested_sub_key: lazy_map::SubKey::Data(position), + }, + address + ) if address == &val1 && stake == &stake1 && *position == Position(0) )); + + // Advance to EPOCH 7 + let epoch = advance_epoch(&mut s, ¶ms); + assert_eq!(val7_epoch, epoch, "val6 is in the validator sets now"); + + // Check tendermint validator set updates + let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); + assert!(tm_updates.is_empty()); + + // Advance to EPOCH 8 + let epoch = advance_epoch(&mut s, ¶ms); + + // Check tendermint validator set updates + assert_eq!(val6_bond_epoch, epoch, "val5's bond is applied now"); + let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); + dbg!(&tm_updates); + assert_eq!(tm_updates.len(), 2); + assert_eq!( + tm_updates[0], + ValidatorSetUpdate::Consensus(ConsensusValidator { + consensus_key: pk6, + bonded_stake: stake6.into(), + }) + ); + assert_eq!(tm_updates[1], ValidatorSetUpdate::Deactivated(pk4)); +} + +/// When a consensus set validator with 0 voting power adds a bond in the same +/// epoch as another below-capacity set validator with 0 power, but who adds +/// more bonds than the validator who is in the consensus set, they get swapped +/// in the sets. But if both of their new voting powers are still 0 after +/// bonding, the newly below-capacity validator must not be given to tendermint +/// with 0 voting power, because it wasn't it its set before +#[test] +fn test_validator_sets_swap() { + let mut s = TestWlStorage::default(); + // Only 2 consensus validator slots + let params = PosParams { + max_validator_slots: 2, + // Set 0.1 votes per token + tm_votes_per_token: dec!(0.1), + ..Default::default() + }; + let addr_seed = "seed"; + let mut address_gen = EstablishedAddressGen::new(addr_seed); + let mut sk_seed = 0; + let mut gen_validator = || { + let res = ( + address_gen.generate_address(addr_seed), + key::testing::common_sk_from_simple_seed(sk_seed).to_public(), + ); + // bump the sk seed + sk_seed += 1; + res + }; + + // A helper to insert a non-genesis validator + let insert_validator = |s: &mut TestWlStorage, + addr, + pk: &PublicKey, + stake: token::Amount, + epoch: Epoch| { + insert_validator_into_validator_set( + s, + ¶ms, + addr, + stake, + epoch, + params.pipeline_len, + ) + .unwrap(); + + update_validator_deltas(s, ¶ms, addr, stake.change(), epoch) + .unwrap(); + + // Set their consensus key (needed for + // `validator_set_update_tendermint` fn) + validator_consensus_key_handle(addr) + .set(s, pk.clone(), epoch, params.pipeline_len) + .unwrap(); + }; + + // Start with two genesis validators, one with 1 voting power and other 0 + let epoch = Epoch::default(); + // 1M voting power + let ((val1, pk1), stake1) = (gen_validator(), token::Amount::whole(10)); + // 0 voting power + let ((val2, pk2), stake2) = (gen_validator(), token::Amount::from(5)); + // 0 voting power + let ((val3, pk3), stake3) = (gen_validator(), token::Amount::from(5)); + println!("val1: {val1}, {pk1}, {stake1}"); + println!("val2: {val2}, {pk2}, {stake2}"); + println!("val3: {val3}, {pk3}, {stake3}"); + + init_genesis( + &mut s, + ¶ms, + [ + GenesisValidator { + address: val1, + tokens: stake1, + consensus_key: pk1, + commission_rate: Decimal::new(1, 1), + max_commission_rate_change: Decimal::new(1, 1), + }, + GenesisValidator { + address: val2.clone(), + tokens: stake2, + consensus_key: pk2, + commission_rate: Decimal::new(1, 1), + max_commission_rate_change: Decimal::new(1, 1), + }, + ] + .into_iter(), + epoch, + ) + .unwrap(); + + // Advance to EPOCH 1 + let epoch = advance_epoch(&mut s, ¶ms); + let pipeline_epoch = epoch + params.pipeline_len; + + // Insert another validator with 0 voting power + insert_validator(&mut s, &val3, &pk3, stake3, epoch); + + assert_eq!(stake2, stake3); + + // Add 2 bonds, one for val2 and greater one for val3 + let bonds_epoch_1 = pipeline_epoch; + let bond2 = token::Amount::from(1); + let stake2 = stake2 + bond2; + let bond3 = token::Amount::from(4); + let stake3 = stake3 + bond3; + + assert!(stake2 < stake3); + assert_eq!(into_tm_voting_power(params.tm_votes_per_token, stake2), 0); + assert_eq!(into_tm_voting_power(params.tm_votes_per_token, stake3), 0); + + update_validator_set(&mut s, ¶ms, &val2, bond2.change(), epoch) + .unwrap(); + update_validator_deltas(&mut s, ¶ms, &val2, bond2.change(), epoch) + .unwrap(); + + update_validator_set(&mut s, ¶ms, &val3, bond3.change(), epoch) + .unwrap(); + update_validator_deltas(&mut s, ¶ms, &val3, bond3.change(), epoch) + .unwrap(); + + // Advance to EPOCH 2 + let epoch = advance_epoch(&mut s, ¶ms); + let pipeline_epoch = epoch + params.pipeline_len; + + // Add 2 more bonds, same amount for `val2` and val3` + let bonds_epoch_2 = pipeline_epoch; + let bonds = token::Amount::whole(1); + let stake2 = stake2 + bonds; + let stake3 = stake3 + bonds; + assert!(stake2 < stake3); + assert_eq!( + into_tm_voting_power(params.tm_votes_per_token, stake2), + into_tm_voting_power(params.tm_votes_per_token, stake3) + ); + + update_validator_set(&mut s, ¶ms, &val2, bonds.change(), epoch) + .unwrap(); + update_validator_deltas(&mut s, ¶ms, &val2, bonds.change(), epoch) + .unwrap(); + + update_validator_set(&mut s, ¶ms, &val3, bonds.change(), epoch) + .unwrap(); + update_validator_deltas(&mut s, ¶ms, &val3, bonds.change(), epoch) + .unwrap(); + + // Advance to EPOCH 3 + let epoch = advance_epoch(&mut s, ¶ms); + + // Check tendermint validator set updates + assert_eq!(bonds_epoch_1, epoch); + let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); + // `val2` must not be given to tendermint - even though it was in the + // consensus set, its voting power was 0, so it wasn't in TM set before the + // bond + assert!(tm_updates.is_empty()); + + // Advance to EPOCH 4 + let epoch = advance_epoch(&mut s, ¶ms); + + // Check tendermint validator set updates + assert_eq!(bonds_epoch_2, epoch); + let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); + dbg!(&tm_updates); + assert_eq!(tm_updates.len(), 1); + // `val2` must not be given to tendermint as it was and still is below + // capacity + assert_eq!( + tm_updates[0], + ValidatorSetUpdate::Consensus(ConsensusValidator { + consensus_key: pk3, + bonded_stake: stake3.into(), + }) + ); +} + +fn get_tendermint_set_updates( + s: &TestWlStorage, + params: &PosParams, + Epoch(epoch): Epoch, +) -> Vec { + // Because the `validator_set_update_tendermint` is called 2 blocks before + // the start of a new epoch, it expects to receive the epoch that is before + // the start of a new one too and so we give it the predecessor of the + // current epoch here to actually get the update for the current epoch. + let epoch = Epoch(epoch - 1); + validator_set_update_tendermint(s, params, epoch, |update| update).unwrap() } /// Advance to the next epoch. Returns the new epoch. @@ -1206,7 +1675,7 @@ fn arb_genesis_validators( size: Range, ) -> impl Strategy> { let tokens: Vec<_> = (0..size.end) - .map(|_| (1..=1_000_000_000_000_u64).prop_map(token::Amount::from)) + .map(|_| (1..=10_u64).prop_map(token::Amount::from)) .collect(); (size, tokens).prop_map(|(size, token_amounts)| { // use unique seeds to generate validators' address and consensus key diff --git a/proof_of_stake/src/tests/state_machine.rs b/proof_of_stake/src/tests/state_machine.rs new file mode 100644 index 00000000000..9d55c3004c6 --- /dev/null +++ b/proof_of_stake/src/tests/state_machine.rs @@ -0,0 +1,1189 @@ +//! Test PoS transitions with a state machine + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; + +use itertools::Itertools; +use namada_core::ledger::storage::testing::TestWlStorage; +use namada_core::ledger::storage_api::{token, StorageRead}; +use namada_core::types::address::{self, Address}; +use namada_core::types::key; +use namada_core::types::key::common::PublicKey; +use namada_core::types::storage::Epoch; +use proptest::prelude::*; +use proptest::prop_state_machine; +use proptest::state_machine::{ReferenceStateMachine, StateMachineTest}; +use proptest::test_runner::Config; +use rust_decimal::Decimal; +// Use `RUST_LOG=info` (or another tracing level) and `--nocapture` to see +// `tracing` logs from tests +use test_log::test; + +use super::arb_genesis_validators; +use crate::parameters::testing::{arb_pos_params, arb_rate}; +use crate::parameters::PosParams; +use crate::types::{ + BondId, GenesisValidator, ReverseOrdTokenAmount, ValidatorState, + WeightedValidator, +}; + +prop_state_machine! { + #![proptest_config(Config { + cases: 5, + verbose: 1, + .. Config::default() + })] + #[test] + /// A `StateMachineTest` implemented on `PosState` + fn pos_state_machine_test(sequential 1..200 => ConcretePosState); +} + +/// Abstract representation of a state of PoS system +#[derive(Clone, Debug)] +struct AbstractPosState { + /// Current epoch + epoch: Epoch, + /// Parameters + params: PosParams, + /// Genesis validator + genesis_validators: Vec, + /// Bonds delta values. The outer key for Epoch is pipeline offset from + /// epoch in which the bond is applied + bonds: BTreeMap>, + /// Validator stakes delta values (sum of all their bonds deltas). + /// Pipelined. + total_stakes: BTreeMap>, + /// Consensus validator set. Pipelined. + consensus_set: BTreeMap>>, + /// Below-capacity validator set. Pipelined. + below_capacity_set: + BTreeMap>>, + /// Validator states. Pipelined. + validator_states: BTreeMap>, + /// Unbonded bonds. The outer key for Epoch is pipeline + unbonding offset + /// from epoch in which the unbond is applied. + unbonds: BTreeMap>, +} + +/// The PoS system under test +#[derive(Debug)] +struct ConcretePosState { + /// Storage - contains all the PoS state + s: TestWlStorage, +} + +/// State machine transitions +#[allow(clippy::large_enum_variant)] +// TODO: remove once all the transitions are being covered +#[allow(dead_code)] +#[derive(Clone, Debug)] +enum Transition { + NextEpoch, + InitValidator { + address: Address, + consensus_key: PublicKey, + commission_rate: Decimal, + max_commission_rate_change: Decimal, + }, + Bond { + id: BondId, + amount: token::Amount, + }, + Unbond { + id: BondId, + amount: token::Amount, + }, + Withdraw { + id: BondId, + }, +} + +impl StateMachineTest for ConcretePosState { + type Reference = AbstractPosState; + type SystemUnderTest = Self; + + fn init_test( + initial_state: &::State, + ) -> Self::SystemUnderTest { + println!(); + println!("New test case"); + println!( + "Genesis validators: {:#?}", + initial_state + .genesis_validators + .iter() + .map(|val| &val.address) + .collect::>() + ); + let mut s = TestWlStorage::default(); + crate::init_genesis( + &mut s, + &initial_state.params, + initial_state.genesis_validators.clone().into_iter(), + initial_state.epoch, + ) + .unwrap(); + Self { s } + } + + fn apply( + mut state: Self::SystemUnderTest, + _ref_state: &::State, + transition: ::Transition, + ) -> Self::SystemUnderTest { + let params = crate::read_pos_params(&state.s).unwrap(); + match transition { + Transition::NextEpoch => { + super::advance_epoch(&mut state.s, ¶ms); + + state.check_next_epoch_post_conditions(¶ms); + } + Transition::InitValidator { + address, + consensus_key, + commission_rate, + max_commission_rate_change, + } => { + let epoch = state.current_epoch(); + + super::become_validator( + &mut state.s, + ¶ms, + &address, + &consensus_key, + epoch, + commission_rate, + max_commission_rate_change, + ) + .unwrap(); + + state.check_init_validator_post_conditions( + epoch, ¶ms, &address, + ) + } + Transition::Bond { id, amount } => { + let epoch = state.current_epoch(); + let pipeline = epoch + params.pipeline_len; + let validator_stake_before_bond_cur = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + epoch, + ) + .unwrap() + .unwrap_or_default(); + let validator_stake_before_bond_pipeline = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + pipeline, + ) + .unwrap() + .unwrap_or_default(); + + // Credit tokens to ensure we can apply the bond + let native_token = state.s.get_native_token().unwrap(); + let pos = address::POS; + token::credit_tokens( + &mut state.s, + &native_token, + &id.source, + amount, + ) + .unwrap(); + + let src_balance_pre = + token::read_balance(&state.s, &native_token, &id.source) + .unwrap(); + let pos_balance_pre = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + + // This must be ensured by both transitions generator and + // pre-conditions! + assert!( + crate::is_validator( + &state.s, + &id.validator, + ¶ms, + pipeline, + ) + .unwrap(), + "{} is not a validator", + id.validator + ); + + // Apply the bond + super::bond_tokens( + &mut state.s, + Some(&id.source), + &id.validator, + amount, + epoch, + ) + .unwrap(); + + state.check_bond_post_conditions( + epoch, + ¶ms, + id.clone(), + amount, + validator_stake_before_bond_cur, + validator_stake_before_bond_pipeline, + ); + + let src_balance_post = + token::read_balance(&state.s, &native_token, &id.source) + .unwrap(); + let pos_balance_post = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + + // Post-condition: PoS balance should increase + assert!(pos_balance_pre < pos_balance_post); + // Post-condition: The difference in PoS balance should be the + // same as in the source + assert_eq!( + pos_balance_post - pos_balance_pre, + src_balance_pre - src_balance_post + ); + } + Transition::Unbond { id, amount } => { + let epoch = state.current_epoch(); + let pipeline = epoch + params.pipeline_len; + let native_token = state.s.get_native_token().unwrap(); + let pos = address::POS; + let src_balance_pre = + token::read_balance(&state.s, &native_token, &id.source) + .unwrap(); + let pos_balance_pre = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + + let validator_stake_before_bond_cur = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + epoch, + ) + .unwrap() + .unwrap_or_default(); + let validator_stake_before_bond_pipeline = + crate::read_validator_stake( + &state.s, + ¶ms, + &id.validator, + pipeline, + ) + .unwrap() + .unwrap_or_default(); + + // Apply the unbond + super::unbond_tokens( + &mut state.s, + Some(&id.source), + &id.validator, + amount, + epoch, + ) + .unwrap(); + + state.check_unbond_post_conditions( + epoch, + ¶ms, + id.clone(), + amount, + validator_stake_before_bond_cur, + validator_stake_before_bond_pipeline, + ); + + let src_balance_post = + token::read_balance(&state.s, &native_token, &id.source) + .unwrap(); + let pos_balance_post = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + + // Post-condition: PoS balance should not change + assert_eq!(pos_balance_pre, pos_balance_post); + // Post-condition: Source balance should not change + assert_eq!(src_balance_post, src_balance_pre); + } + Transition::Withdraw { + id: BondId { source, validator }, + } => { + let epoch = state.current_epoch(); + let native_token = state.s.get_native_token().unwrap(); + let pos = address::POS; + let src_balance_pre = + token::read_balance(&state.s, &native_token, &source) + .unwrap(); + let pos_balance_pre = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + + // Apply the withdrawal + let withdrawn = super::withdraw_tokens( + &mut state.s, + Some(&source), + &validator, + epoch, + ) + .unwrap(); + + let src_balance_post = + token::read_balance(&state.s, &native_token, &source) + .unwrap(); + let pos_balance_post = + token::read_balance(&state.s, &native_token, &pos).unwrap(); + + // Post-condition: PoS balance should decrease or not change if + // nothing was withdrawn + assert!(pos_balance_pre >= pos_balance_post); + // Post-condition: The difference in PoS balance should be the + // same as in the source + assert_eq!( + pos_balance_pre - pos_balance_post, + src_balance_post - src_balance_pre + ); + // Post-condition: The increment in source balance should be + // equal to the withdrawn amount + assert_eq!(src_balance_post - src_balance_pre, withdrawn,); + } + } + state + } + + fn check_invariants( + _state: &Self::SystemUnderTest, + _ref_state: &::State, + ) { + } +} + +impl ConcretePosState { + fn current_epoch(&self) -> Epoch { + self.s.storage.block.epoch + } + + fn check_next_epoch_post_conditions(&self, params: &PosParams) { + let pipeline = self.current_epoch() + params.pipeline_len; + let before_pipeline = pipeline - 1; + + // Post-condition: Consensus validator sets at pipeline offset + // must be the same as at the epoch before it. + let consensus_set_before_pipeline = + crate::read_consensus_validator_set_addresses_with_stake( + &self.s, + before_pipeline, + ) + .unwrap(); + let consensus_set_at_pipeline = + crate::read_consensus_validator_set_addresses_with_stake( + &self.s, pipeline, + ) + .unwrap(); + itertools::assert_equal( + consensus_set_before_pipeline.into_iter().sorted(), + consensus_set_at_pipeline.into_iter().sorted(), + ); + + // Post-condition: Below-capacity validator sets at pipeline + // offset must be the same as at the epoch before it. + let below_cap_before_pipeline = + crate::read_below_capacity_validator_set_addresses_with_stake( + &self.s, + before_pipeline, + ) + .unwrap(); + let below_cap_at_pipeline = + crate::read_below_capacity_validator_set_addresses_with_stake( + &self.s, pipeline, + ) + .unwrap(); + itertools::assert_equal( + below_cap_before_pipeline.into_iter().sorted(), + below_cap_at_pipeline.into_iter().sorted(), + ); + } + + fn check_bond_post_conditions( + &self, + submit_epoch: Epoch, + params: &PosParams, + id: BondId, + amount: token::Amount, + validator_stake_before_bond_cur: token::Amount, + validator_stake_before_bond_pipeline: token::Amount, + ) { + let pipeline = submit_epoch + params.pipeline_len; + + let cur_stake = super::read_validator_stake( + &self.s, + params, + &id.validator, + submit_epoch, + ) + .unwrap() + .unwrap_or_default(); + + // Post-condition: the validator stake at the current epoch should not + // change + assert_eq!(cur_stake, validator_stake_before_bond_cur); + + let stake_at_pipeline = super::read_validator_stake( + &self.s, + params, + &id.validator, + pipeline, + ) + .unwrap() + .unwrap_or_default(); + + // Post-condition: the validator stake at the pipeline should be + // incremented by the bond amount + assert_eq!( + stake_at_pipeline, + validator_stake_before_bond_pipeline + amount + ); + + self.check_bond_and_unbond_post_conditions( + submit_epoch, + params, + id, + stake_at_pipeline, + ); + } + + fn check_unbond_post_conditions( + &self, + submit_epoch: Epoch, + params: &PosParams, + id: BondId, + amount: token::Amount, + validator_stake_before_bond_cur: token::Amount, + validator_stake_before_bond_pipeline: token::Amount, + ) { + let pipeline = submit_epoch + params.pipeline_len; + + let cur_stake = super::read_validator_stake( + &self.s, + params, + &id.validator, + submit_epoch, + ) + .unwrap() + .unwrap_or_default(); + + // Post-condition: the validator stake at the current epoch should not + // change + assert_eq!(cur_stake, validator_stake_before_bond_cur); + + let stake_at_pipeline = super::read_validator_stake( + &self.s, + params, + &id.validator, + pipeline, + ) + .unwrap() + .unwrap_or_default(); + + // Post-condition: the validator stake at the pipeline should be + // decremented by the bond amount + assert_eq!( + stake_at_pipeline, + validator_stake_before_bond_pipeline - amount + ); + + self.check_bond_and_unbond_post_conditions( + submit_epoch, + params, + id, + stake_at_pipeline, + ); + } + + /// These post-conditions apply to bonding and unbonding + fn check_bond_and_unbond_post_conditions( + &self, + submit_epoch: Epoch, + params: &PosParams, + id: BondId, + stake_at_pipeline: token::Amount, + ) { + let pipeline = submit_epoch + params.pipeline_len; + // Read the consensus sets data using iterator + let consensus_set = crate::consensus_validator_set_handle() + .at(&pipeline) + .iter(&self.s) + .unwrap() + .map(|res| res.unwrap()) + .collect::>(); + let below_cap_set = crate::below_capacity_validator_set_handle() + .at(&pipeline) + .iter(&self.s) + .unwrap() + .map(|res| res.unwrap()) + .collect::>(); + let num_occurrences = consensus_set + .iter() + .filter(|(_keys, addr)| addr == &id.validator) + .count() + + below_cap_set + .iter() + .filter(|(_keys, addr)| addr == &id.validator) + .count(); + + // Post-condition: There must only be one instance of this validator + // with some stake across all validator sets + assert!(num_occurrences == 1); + + let consensus_set = + crate::read_consensus_validator_set_addresses_with_stake( + &self.s, pipeline, + ) + .unwrap(); + let below_cap_set = + crate::read_below_capacity_validator_set_addresses_with_stake( + &self.s, pipeline, + ) + .unwrap(); + let weighted = WeightedValidator { + bonded_stake: stake_at_pipeline, + address: id.validator, + }; + let consensus_val = consensus_set.get(&weighted); + let below_cap_val = below_cap_set.get(&weighted); + + // Post-condition: The validator should be updated in exactly once in + // the validator sets + assert!(consensus_val.is_some() ^ below_cap_val.is_some()); + + // Post-condition: The stake of the validators in the consensus set is + // greater than or equal to below-capacity validators + for WeightedValidator { + bonded_stake: consensus_stake, + address: consensus_addr, + } in consensus_set.iter() + { + for WeightedValidator { + bonded_stake: below_cap_stake, + address: below_cap_addr, + } in below_cap_set.iter() + { + assert!( + consensus_stake >= below_cap_stake, + "Consensus validator {consensus_addr} with stake \ + {consensus_stake} and below-capacity {below_cap_addr} \ + with stake {below_cap_stake} should be swapped." + ); + } + } + } + + fn check_init_validator_post_conditions( + &self, + submit_epoch: Epoch, + params: &PosParams, + address: &Address, + ) { + let pipeline = submit_epoch + params.pipeline_len; + + // Post-condition: the validator should not be in the validator set + // until the pipeline epoch + for epoch in submit_epoch.iter_range(params.pipeline_len) { + assert!( + !crate::read_all_validator_addresses(&self.s, epoch) + .unwrap() + .contains(address) + ); + } + let weighted = WeightedValidator { + bonded_stake: Default::default(), + address: address.clone(), + }; + let in_consensus = + crate::read_consensus_validator_set_addresses_with_stake( + &self.s, pipeline, + ) + .unwrap() + .contains(&weighted); + let in_bc = + crate::read_below_capacity_validator_set_addresses_with_stake( + &self.s, pipeline, + ) + .unwrap() + .contains(&weighted); + assert!(in_consensus ^ in_bc); + } +} + +impl ReferenceStateMachine for AbstractPosState { + type State = Self; + type Transition = Transition; + + fn init_state() -> BoxedStrategy { + (arb_pos_params(Some(5)), arb_genesis_validators(1..10)) + .prop_map(|(params, genesis_validators)| { + let epoch = Epoch::default(); + let mut state = Self { + epoch, + params, + genesis_validators: genesis_validators + .into_iter() + // Sorted by stake to fill in the consensus set first + .sorted_by(|a, b| Ord::cmp(&a.tokens, &b.tokens)) + .collect(), + bonds: Default::default(), + unbonds: Default::default(), + total_stakes: Default::default(), + consensus_set: Default::default(), + below_capacity_set: Default::default(), + validator_states: Default::default(), + }; + + for GenesisValidator { + address, + tokens, + consensus_key: _, + commission_rate: _, + max_commission_rate_change: _, + } in state.genesis_validators.clone() + { + let bonds = state.bonds.entry(epoch).or_default(); + bonds.insert( + BondId { + source: address.clone(), + validator: address.clone(), + }, + token::Change::from(tokens), + ); + + let total_stakes = + state.total_stakes.entry(epoch).or_default(); + total_stakes + .insert(address.clone(), token::Change::from(tokens)); + + let consensus_set = + state.consensus_set.entry(epoch).or_default(); + let consensus_vals_len = consensus_set + .iter() + .map(|(_stake, validators)| validators.len() as u64) + .sum(); + let deque = if state.params.max_validator_slots + > consensus_vals_len + { + state + .validator_states + .entry(epoch) + .or_default() + .insert(address.clone(), ValidatorState::Consensus); + consensus_set.entry(tokens).or_default() + } else { + state + .validator_states + .entry(epoch) + .or_default() + .insert( + address.clone(), + ValidatorState::BelowCapacity, + ); + let below_cap_set = + state.below_capacity_set.entry(epoch).or_default(); + below_cap_set + .entry(ReverseOrdTokenAmount(tokens)) + .or_default() + }; + deque.push_back(address) + } + // Ensure that below-capacity set is initialized even if empty + state.below_capacity_set.entry(epoch).or_default(); + + // Copy validator sets up to pipeline epoch + for epoch in epoch.next().iter_range(state.params.pipeline_len) + { + state.copy_discrete_epoched_data(epoch) + } + + state + }) + .boxed() + } + + fn transitions(state: &Self::State) -> BoxedStrategy { + let unbondable = state.bond_sums().into_iter().collect::>(); + let withdrawable = + state.withdrawable_unbonds().into_iter().collect::>(); + + // Transitions that can be applied if there are no bonds and unbonds + let basic = prop_oneof![ + Just(Transition::NextEpoch), + add_arb_bond_amount(state), + arb_delegation(state), + ( + address::testing::arb_established_address(), + key::testing::arb_common_keypair(), + arb_rate(), + arb_rate(), + ) + .prop_map( + |( + addr, + consensus_key, + commission_rate, + max_commission_rate_change, + )| { + Transition::InitValidator { + address: Address::Established(addr), + consensus_key: consensus_key.to_public(), + commission_rate, + max_commission_rate_change, + } + }, + ), + ]; + + if unbondable.is_empty() { + basic.boxed() + } else { + let arb_unbondable = prop::sample::select(unbondable); + let arb_unbond = + arb_unbondable.prop_flat_map(|(id, deltas_sum)| { + // Generate an amount to unbond, up to the sum + assert!(deltas_sum > 0); + (0..deltas_sum).prop_map(move |to_unbond| { + let id = id.clone(); + let amount = token::Amount::from_change(to_unbond); + Transition::Unbond { id, amount } + }) + }); + + if withdrawable.is_empty() { + prop_oneof![basic, arb_unbond].boxed() + } else { + let arb_withdrawable = prop::sample::select(withdrawable); + let arb_withdrawal = arb_withdrawable + .prop_map(|(id, _)| Transition::Withdraw { id }); + + prop_oneof![basic, arb_unbond, arb_withdrawal].boxed() + } + } + } + + fn apply( + mut state: Self::State, + transition: &Self::Transition, + ) -> Self::State { + match transition { + Transition::NextEpoch => { + state.epoch = state.epoch.next(); + + // Copy the non-delta data into pipeline epoch from its pred. + state.copy_discrete_epoched_data( + state.epoch + state.params.pipeline_len, + ); + } + Transition::InitValidator { + address, + consensus_key: _, + commission_rate: _, + max_commission_rate_change: _, + } => { + // Insert into validator set at pipeline + let pipeline = state.pipeline(); + let consensus_set = + state.consensus_set.entry(pipeline).or_default(); + + let consensus_vals_len = consensus_set + .iter() + .map(|(_stake, validators)| validators.len() as u64) + .sum(); + + let deque = if state.params.max_validator_slots + > consensus_vals_len + { + state + .validator_states + .entry(pipeline) + .or_default() + .insert(address.clone(), ValidatorState::Consensus); + consensus_set.entry(token::Amount::default()).or_default() + } else { + state + .validator_states + .entry(pipeline) + .or_default() + .insert(address.clone(), ValidatorState::BelowCapacity); + let below_cap_set = + state.below_capacity_set.entry(pipeline).or_default(); + below_cap_set + .entry(ReverseOrdTokenAmount(token::Amount::default())) + .or_default() + }; + deque.push_back(address.clone()); + } + Transition::Bond { id, amount } => { + let change = token::Change::from(*amount); + state.update_bond(id, change); + state.update_validator_total_stake(&id.validator, change); + state.update_validator_sets(&id.validator, change); + } + Transition::Unbond { id, amount } => { + let change = -token::Change::from(*amount); + state.update_bond(id, change); + state.update_validator_total_stake(&id.validator, change); + state.update_validator_sets(&id.validator, change); + + let withdrawal_epoch = state.epoch + + state.params.pipeline_len + + state.params.unbonding_len + + 1_u64; + let unbonds = + state.unbonds.entry(withdrawal_epoch).or_default(); + let unbond = unbonds.entry(id.clone()).or_default(); + *unbond += *amount; + } + Transition::Withdraw { id } => { + // Remove all withdrawable unbonds with this bond ID + for (epoch, unbonds) in state.unbonds.iter_mut() { + if *epoch <= state.epoch { + unbonds.remove(id); + } + } + // Remove any epochs that have no unbonds left + state.unbonds.retain(|_epoch, unbonds| !unbonds.is_empty()); + } + } + state + } + + fn preconditions( + state: &Self::State, + transition: &Self::Transition, + ) -> bool { + match transition { + Transition::NextEpoch => true, + Transition::InitValidator { + address, + consensus_key: _, + commission_rate: _, + max_commission_rate_change: _, + } => { + let pipeline = state.epoch + state.params.pipeline_len; + // The address must not belong to an existing validator + !state.is_validator(address, pipeline) && + // There must be no delegations from this address + !state.bond_sums().into_iter().any(|(id, _sum)| + &id.source != address) + } + Transition::Bond { id, amount: _ } => { + let pipeline = state.epoch + state.params.pipeline_len; + // The validator must be known + state.is_validator(&id.validator, pipeline) + && (id.validator == id.source + // If it's not a self-bond, the source must not be a validator + || !state.is_validator(&id.source, pipeline)) + } + Transition::Unbond { id, amount } => { + let pipeline = state.epoch + state.params.pipeline_len; + + let is_unbondable = state + .bond_sums() + .get(id) + .map(|sum| *sum >= token::Change::from(*amount)) + .unwrap_or_default(); + + // The validator must be known + state.is_validator(&id.validator, pipeline) + // The amount must be available to unbond + && is_unbondable + } + Transition::Withdraw { id } => { + let pipeline = state.epoch + state.params.pipeline_len; + + let is_withdrawable = state + .withdrawable_unbonds() + .get(id) + .map(|amount| *amount >= token::Amount::default()) + .unwrap_or_default(); + + // The validator must be known + state.is_validator(&id.validator, pipeline) + // The amount must be available to unbond + && is_withdrawable + } + } + } +} + +impl AbstractPosState { + /// Copy validator sets and validator states at the given epoch from its + /// predecessor + fn copy_discrete_epoched_data(&mut self, epoch: Epoch) { + let prev_epoch = Epoch(epoch.0 - 1); + // Copy the non-delta data from the last epoch into the new one + self.consensus_set.insert( + epoch, + self.consensus_set.get(&prev_epoch).unwrap().clone(), + ); + self.below_capacity_set.insert( + epoch, + self.below_capacity_set.get(&prev_epoch).unwrap().clone(), + ); + self.validator_states.insert( + epoch, + self.validator_states.get(&prev_epoch).unwrap().clone(), + ); + } + + /// Update a bond with bonded or unbonded change + fn update_bond(&mut self, id: &BondId, change: token::Change) { + let bonds = self.bonds.entry(self.pipeline()).or_default(); + let bond = bonds.entry(id.clone()).or_default(); + *bond += change; + // Remove fully unbonded entries + if *bond == 0 { + bonds.remove(id); + } + } + + /// Update validator's total stake with bonded or unbonded change + fn update_validator_total_stake( + &mut self, + validator: &Address, + change: token::Change, + ) { + let total_stakes = self + .total_stakes + .entry(self.pipeline()) + .or_default() + .entry(validator.clone()) + .or_default(); + *total_stakes += change; + } + + /// Update validator in sets with bonded or unbonded change + fn update_validator_sets( + &mut self, + validator: &Address, + change: token::Change, + ) { + let pipeline = self.pipeline(); + let consensus_set = self.consensus_set.entry(pipeline).or_default(); + let below_cap_set = + self.below_capacity_set.entry(pipeline).or_default(); + let total_stakes = self.total_stakes.get(&pipeline).unwrap(); + let state = self + .validator_states + .get(&pipeline) + .unwrap() + .get(validator) + .unwrap(); + + let this_val_stake_pre = *total_stakes.get(validator).unwrap(); + let this_val_stake_post = + token::Amount::from_change(this_val_stake_pre + change); + let this_val_stake_pre = + token::Amount::from_change(*total_stakes.get(validator).unwrap()); + + match state { + ValidatorState::Consensus => { + // Remove from the prior stake + let vals = consensus_set.entry(this_val_stake_pre).or_default(); + vals.retain(|addr| addr != validator); + if vals.is_empty() { + consensus_set.remove(&this_val_stake_pre); + } + + // If unbonding, check the max below-cap validator's state if we + // need to do a swap + if change < token::Change::default() { + if let Some(mut max_below_cap) = below_cap_set.last_entry() + { + let max_below_cap_stake = *max_below_cap.key(); + if max_below_cap_stake.0 > this_val_stake_post { + // Swap this validator with the max below-cap + let vals = max_below_cap.get_mut(); + let first_val = vals.pop_front().unwrap(); + // Remove the key is there's nothing left + if vals.is_empty() { + below_cap_set.remove(&max_below_cap_stake); + } + // Do the swap + consensus_set + .entry(max_below_cap_stake.0) + .or_default() + .push_back(first_val); + below_cap_set + .entry(this_val_stake_post.into()) + .or_default() + .push_back(validator.clone()); + // And we're done here + return; + } + } + } + + // Insert with the posterior stake + consensus_set + .entry(this_val_stake_post) + .or_default() + .push_back(validator.clone()); + } + ValidatorState::BelowCapacity => { + // Remove from the prior stake + let vals = + below_cap_set.entry(this_val_stake_pre.into()).or_default(); + vals.retain(|addr| addr != validator); + if vals.is_empty() { + below_cap_set.remove(&this_val_stake_pre.into()); + } + + // If bonding, check the min consensus validator's state if we + // need to do a swap + if change >= token::Change::default() { + if let Some(mut min_below_cap) = consensus_set.last_entry() + { + let min_consensus_stake = *min_below_cap.key(); + if min_consensus_stake > this_val_stake_post { + // Swap this validator with the max consensus + let vals = min_below_cap.get_mut(); + let last_val = vals.pop_back().unwrap(); + // Remove the key is there's nothing left + if vals.is_empty() { + consensus_set.remove(&min_consensus_stake); + } + // Do the swap + below_cap_set + .entry(min_consensus_stake.into()) + .or_default() + .push_back(last_val); + consensus_set + .entry(this_val_stake_post) + .or_default() + .push_back(validator.clone()); + // And we're done here + return; + } + } + } + + // Insert with the posterior stake + below_cap_set + .entry(this_val_stake_post.into()) + .or_default() + .push_back(validator.clone()); + } + ValidatorState::Inactive => { + panic!("unexpected state") + } + } + } + + /// Get the pipeline epoch + fn pipeline(&self) -> Epoch { + self.epoch + self.params.pipeline_len + } + + /// Check if the given address is of a known validator + fn is_validator(&self, validator: &Address, epoch: Epoch) -> bool { + let is_in_consensus = self + .consensus_set + .get(&epoch) + .unwrap() + .iter() + .any(|(_stake, vals)| vals.iter().any(|val| val == validator)); + if is_in_consensus { + return true; + } + self.below_capacity_set + .get(&epoch) + .unwrap() + .iter() + .any(|(_stake, vals)| vals.iter().any(|val| val == validator)) + } + + /// Find the sums of the bonds across all epochs + fn bond_sums(&self) -> HashMap { + self.bonds.iter().fold( + HashMap::::new(), + |mut acc, (_epoch, bonds)| { + for (id, delta) in bonds { + let entry = acc.entry(id.clone()).or_default(); + *entry += delta; + // Remove entries that are fully unbonded + if *entry == 0 { + acc.remove(id); + } + } + acc + }, + ) + } + + /// Find the sums of withdrawable unbonds + fn withdrawable_unbonds(&self) -> HashMap { + self.unbonds.iter().fold( + HashMap::::new(), + |mut acc, (epoch, unbonds)| { + if *epoch <= self.epoch { + for (id, amount) in unbonds { + if *amount > token::Amount::default() { + *acc.entry(id.clone()).or_default() += *amount; + } + } + } + acc + }, + ) + } +} + +/// Arbitrary bond transition that adds tokens to an existing bond +fn add_arb_bond_amount( + state: &AbstractPosState, +) -> impl Strategy { + let bond_ids = state + .bonds + .iter() + .flat_map(|(_epoch, bonds)| { + bonds.keys().cloned().collect::>() + }) + .collect::>() + .into_iter() + .collect::>(); + let arb_bond_id = prop::sample::select(bond_ids); + (arb_bond_id, arb_bond_amount()) + .prop_map(|(id, amount)| Transition::Bond { id, amount }) +} + +/// Arbitrary delegation to one of the validators +fn arb_delegation( + state: &AbstractPosState, +) -> impl Strategy { + let validators = state.consensus_set.iter().fold( + HashSet::new(), + |mut acc, (_epoch, vals)| { + for vals in vals.values() { + for validator in vals { + acc.insert(validator.clone()); + } + } + acc + }, + ); + let validator_vec = validators.clone().into_iter().collect::>(); + let arb_source = address::testing::arb_non_internal_address() + .prop_filter("Must be a non-validator address", move |addr| { + !validators.contains(addr) + }); + let arb_validator = prop::sample::select(validator_vec); + (arb_source, arb_validator, arb_bond_amount()).prop_map( + |(source, validator, amount)| Transition::Bond { + id: BondId { source, validator }, + amount, + }, + ) +} + +// Bond up to 10 tokens (10M micro units) to avoid overflows +pub fn arb_bond_amount() -> impl Strategy { + (1_u64..10).prop_map(token::Amount::from) +} diff --git a/proof_of_stake/src/types.rs b/proof_of_stake/src/types.rs index ed3c61eedae..3d546397bc4 100644 --- a/proof_of_stake/src/types.rs +++ b/proof_of_stake/src/types.rs @@ -11,7 +11,9 @@ use std::ops::Sub; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use namada_core::ledger::storage_api::collections::lazy_map::NestedMap; -use namada_core::ledger::storage_api::collections::{LazyMap, LazyVec}; +use namada_core::ledger::storage_api::collections::{ + LazyMap, LazySet, LazyVec, +}; use namada_core::ledger::storage_api::{self, StorageRead}; use namada_core::types::address::Address; use namada_core::types::key::common; @@ -132,6 +134,9 @@ pub type Bonds = crate::epoched::EpochedDelta< /// Epochs validator's unbonds pub type Unbonds = NestedMap>; +/// Consensus keys set, used to ensure uniqueness +pub type ConsensusKeys = LazySet; + #[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] /// Commission rate and max commission rate change per epoch for a validator pub struct CommissionPair { @@ -176,7 +181,7 @@ pub struct GenesisValidator { } /// An update of the consensus and below-capacity validator set. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub enum ValidatorSetUpdate { /// A validator is consensus-participating Consensus(ConsensusValidator), @@ -186,7 +191,7 @@ pub enum ValidatorSetUpdate { } /// Consensus validator's consensus key and its bonded stake. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct ConsensusValidator { /// A public key used for signing validator's consensus actions pub consensus_key: common::PublicKey, @@ -369,16 +374,14 @@ pub enum SlashType { LightClientAttack, } -/// VoteInfo inspired from tendermint +/// VoteInfo inspired from tendermint for validators whose signature was +/// included in the last block #[derive(Debug, Clone, BorshDeserialize, BorshSerialize)] pub struct VoteInfo { - /// the first 20 bytes of the validator public key hash (SHA-256) taken - /// from tendermint - pub validator_address: Vec, + /// Validator address + pub validator_address: Address, /// validator voting power pub validator_vp: u64, - /// was the validator signature was included in the last block? - pub signed_last_block: bool, } /// Bonds and unbonds with all details (slashes and rewards, if any) @@ -410,7 +413,9 @@ pub struct BondDetails { } /// Unbond with all its details -#[derive(Debug, Clone, BorshDeserialize, BorshSerialize, BorshSchema)] +#[derive( + Debug, Clone, BorshDeserialize, BorshSerialize, BorshSchema, PartialEq, +)] pub struct UnbondDetails { /// The first epoch in which the source bond of this unbond contributed to /// a stake diff --git a/proto/types.proto b/proto/types.proto index 58494ec8248..710cfcd2baa 100644 --- a/proto/types.proto +++ b/proto/types.proto @@ -5,10 +5,12 @@ import "google/protobuf/timestamp.proto"; package types; message Tx { - bytes code = 1; + bytes code_or_hash = 1; // TODO this optional is useless because it's default on proto3 optional bytes data = 2; google.protobuf.Timestamp timestamp = 3; + string chain_id = 4; + optional google.protobuf.Timestamp expiration = 5; } message Dkg { string data = 1; } diff --git a/scripts/release.sh b/scripts/release.sh index 71f99dd22a9..de38343ee0c 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -1,5 +1,5 @@ #!/bin/sh -# depends on cargo-release 0.21.4, git 2.24.0 or later, unclog 0.5.0 +# depends on cargo-release 0.24.4, git 2.24.0 or later, unclog 0.5.0 set -e if [ -z "$1" ]; then diff --git a/scripts/repeat-e2e-test.sh b/scripts/repeat-e2e-test.sh new file mode 100755 index 00000000000..3257e631daa --- /dev/null +++ b/scripts/repeat-e2e-test.sh @@ -0,0 +1,29 @@ +#!/bin/sh +# Run an e2e test at most n times, exit at first failure. +# This can be handy for testing of non-deterministic issues that are tricky to +# reproduce. +# +# The first arg is the max number of repetitions and second is the exact name +# of the test. +# +# Usage example: +# $ scripts/repeat-e2e-test.sh 10 e2e::ledger_tests::run_ledger +# +# Adapted from https://gitlab.com/tezos/tezos/-/blob/master/tests_python/scripts/repeat_test.sh + +NUM=$1 +TEST=$2 +# Thanks internet https://stackoverflow.com/a/4774063/3210255 +SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +NIGHTLY=$(cat "$SCRIPTPATH"/../rust-nightly-version) + +for i in $(seq 1 "$NUM") +do + echo "Execution $i/$NUM" + if ! RUST_BACKTRACE=1 NAMADA_E2E_KEEP_TEMP=true NAMADA_E2E_DEBUG=true cargo "+$NIGHTLY" test "$TEST" -Z unstable-options -- --exact --test-threads=1 --nocapture; then + exit 1 + fi +done +exit 0 + + diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 4a5ee3a1e76..277034ae9f5 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -4,12 +4,12 @@ edition = "2021" license = "GPL-3.0" name = "namada" resolver = "2" -version = "0.14.0" +version = "0.16.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] -default = ["abciplus"] +default = ["abciplus", "namada-sdk"] mainnet = [ "namada_core/mainnet", ] @@ -70,6 +70,11 @@ ibc-mocks-abcipp = [ "namada_core/ibc-mocks-abcipp", ] +masp-tx-gen = [ + "rand", + "rand_core", +] + # for integration tests and test utilies testing = [ "namada_core/testing", @@ -79,7 +84,15 @@ testing = [ "tempfile", ] +namada-sdk = [ + "tendermint-rpc", + "masp-tx-gen", + "ferveo-tpke", + "masp_primitives/transparent-inputs", +] + [dependencies] +async-std = "1.11.0" namada_core = {path = "../core", default-features = false, features = ["secp256k1-sign-verify"]} namada_proof_of_stake = {path = "../proof_of_stake", default-features = false} async-trait = {version = "0.1.51", optional = true} @@ -92,32 +105,33 @@ clru = {git = "https://github.com/marmeladema/clru-rs.git", rev = "71ca566"} data-encoding = "2.3.2" derivative = "2.2.0" # TODO using the same version of tendermint-rs as we do here. -ibc-abcipp = {package = "ibc", git = "https://github.com/heliaxdev/ibc-rs", rev = "9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d", default-features = false, optional = true} -ibc-proto-abcipp = {package = "ibc-proto", git = "https://github.com/heliaxdev/ibc-rs", rev = "9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d", default-features = false, optional = true} -ibc = {version = "0.14.0", default-features = false, optional = true} -ibc-proto = {version = "0.17.1", default-features = false, optional = true} +ibc-abcipp = {package = "ibc", git = "https://github.com/heliaxdev/cosmos-ibc-rs", rev = "db14744bfba6239cc5f58345ff90f8b7d42637d6", features = ["serde"], optional = true} +ibc-proto-abcipp = {package = "ibc-proto", git = "https://github.com/heliaxdev/ibc-proto-rs", rev = "dd8ba23110a144ffe2074a0b889676468266435a", default-features = false, optional = true} +ibc = {version = "0.36.0", default-features = false, features = ["serde"], optional = true} +ibc-proto = {version = "0.26.0", default-features = false, optional = true} itertools = "0.10.0" loupe = {version = "0.1.3", optional = true} parity-wasm = {version = "0.45.0", features = ["sign_ext"], optional = true} paste = "1.0.9" # A fork with state machine testing -proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm", optional = true} -prost = "0.9.0" +proptest = {git = "https://github.com/heliaxdev/proptest", rev = "8f1b4abe7ebd35c0781bf9a00a4ee59833ffa2a1", optional = true} +prost = "0.11.6" pwasm-utils = {git = "https://github.com/heliaxdev/wasm-utils", tag = "v0.20.0", features = ["sign_ext"], optional = true} rayon = {version = "=1.5.3", optional = true} -rust_decimal = "1.26.1" -rust_decimal_macros = "1.26.1" +rust_decimal = "=1.26.1" +rust_decimal_macros = "=1.26.1" +serde = {version = "1.0.125", features = ["derive"]} serde_json = "1.0.62" sha2 = "0.9.3" # We switch off "blake2b" because it cannot be compiled to wasm tempfile = {version = "3.2.0", optional = true} -tendermint-abcipp = {package = "tendermint", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} -tendermint-rpc-abcipp = {package = "tendermint-rpc", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", features = ["http-client"], optional = true} -tendermint-proto-abcipp = {package = "tendermint-proto", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} +tendermint-abcipp = {package = "tendermint", git = "https://github.com/heliaxdev/tendermint-rs", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35", optional = true} +tendermint-rpc-abcipp = {package = "tendermint-rpc", git = "https://github.com/heliaxdev/tendermint-rs", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35", features = ["http-client"], optional = true} +tendermint-proto-abcipp = {package = "tendermint-proto", git = "https://github.com/heliaxdev/tendermint-rs", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35", optional = true} tendermint = {version = "0.23.6", optional = true} -tendermint-rpc = {version = "0.23.6", features = ["http-client"], optional = true} +tendermint-rpc = {version = "0.23.6", default-features = false, features = ["trait-client"], optional = true} tendermint-proto = {version = "0.23.6", optional = true} -thiserror = "1.0.30" +thiserror = "1.0.38" tracing = "0.1.30" wasmer = {version = "=2.2.0", optional = true} wasmer-cache = {version = "=2.2.0", optional = true} @@ -129,16 +143,23 @@ wasmparser = "0.83.0" #libmasp = { git = "https://github.com/anoma/masp", branch = "murisi/masp-incentive" } masp_primitives = { git = "https://github.com/anoma/masp", rev = "bee40fc465f6afbd10558d12fe96eb1742eee45c" } masp_proofs = { git = "https://github.com/anoma/masp", rev = "bee40fc465f6afbd10558d12fe96eb1742eee45c" } +rand = {version = "0.8", default-features = false, optional = true} +rand_core = {version = "0.6", default-features = false, optional = true} zeroize = "1.5.5" +toml = "0.5.8" +bimap = {version = "0.6.2", features = ["serde"]} +orion = "0.16.0" +tokio = {version = "1.8.2", default-features = false} [dev-dependencies] assert_matches = "1.5.0" async-trait = {version = "0.1.51"} byte-unit = "4.0.13" libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9"} +namada_test_utils = {path = "../test_utils"} pretty_assertions = "0.7.2" # A fork with state machine testing -proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm"} +proptest = {git = "https://github.com/heliaxdev/proptest", rev = "8f1b4abe7ebd35c0781bf9a00a4ee59833ffa2a1"} test-log = {version = "0.2.7", default-features = false, features = ["trace"]} tokio = {version = "1.8.2", default-features = false, features = ["rt", "macros"]} tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} diff --git a/shared/src/ledger/args.rs b/shared/src/ledger/args.rs new file mode 100644 index 00000000000..e3da98e61c1 --- /dev/null +++ b/shared/src/ledger/args.rs @@ -0,0 +1,523 @@ +//! Structures encapsulating SDK arguments +use namada_core::types::chain::ChainId; +use namada_core::types::time::DateTimeUtc; +use rust_decimal::Decimal; + +use crate::ibc::core::ics24_host::identifier::{ChannelId, PortId}; +use crate::types::address::Address; +use crate::types::key::{common, SchemeType}; +use crate::types::masp::MaspValue; +use crate::types::storage::Epoch; +use crate::types::transaction::GasLimit; +use crate::types::{storage, token}; + +/// Abstraction of types being used in Namada +pub trait NamadaTypes: Clone + std::fmt::Debug { + /// Represents an address on the ledger + type Address: Clone + std::fmt::Debug; + /// Represents the address of a native token + type NativeAddress: Clone + std::fmt::Debug; + /// Represents a key pair + type Keypair: Clone + std::fmt::Debug; + /// Represents the address of a Tendermint endpoint + type TendermintAddress: Clone + std::fmt::Debug; + /// Represents a viewing key + type ViewingKey: Clone + std::fmt::Debug; + /// Represents the owner of a balance + type BalanceOwner: Clone + std::fmt::Debug; + /// Represents a public key + type PublicKey: Clone + std::fmt::Debug; + /// Represents the source of a Transfer + type TransferSource: Clone + std::fmt::Debug; + /// Represents the target of a Transfer + type TransferTarget: Clone + std::fmt::Debug; + /// Represents some data that is used in a transaction + type Data: Clone + std::fmt::Debug; +} + +/// The concrete types being used in Namada SDK +#[derive(Clone, Debug)] +pub struct SdkTypes; + +impl NamadaTypes for SdkTypes { + type Address = Address; + type BalanceOwner = namada_core::types::masp::BalanceOwner; + type Data = Vec; + type Keypair = namada_core::types::key::common::SecretKey; + type NativeAddress = Address; + type PublicKey = namada_core::types::key::common::PublicKey; + type TendermintAddress = (); + type TransferSource = namada_core::types::masp::TransferSource; + type TransferTarget = namada_core::types::masp::TransferTarget; + type ViewingKey = namada_core::types::masp::ExtendedViewingKey; +} + +/// Common query arguments +#[derive(Clone, Debug)] +pub struct Query { + /// The address of the ledger node as host:port + pub ledger_address: C::TendermintAddress, +} + +/// Transaction associated results arguments +#[derive(Clone, Debug)] +pub struct QueryResult { + /// Common query args + pub query: Query, + /// Hash of transaction to lookup + pub tx_hash: String, +} + +/// Custom transaction arguments +#[derive(Clone, Debug)] +pub struct TxCustom { + /// Common tx arguments + pub tx: Tx, + /// Path to the tx WASM code file + pub code_path: C::Data, + /// Path to the data file + pub data_path: Option, +} + +/// Transfer transaction arguments +#[derive(Clone, Debug)] +pub struct TxTransfer { + /// Common tx arguments + pub tx: Tx, + /// Transfer source address + pub source: C::TransferSource, + /// Transfer target address + pub target: C::TransferTarget, + /// Transferred token address + pub token: C::Address, + /// Transferred token address + pub sub_prefix: Option, + /// Transferred token amount + pub amount: token::Amount, + /// Native token address + pub native_token: C::NativeAddress, + /// Path to the TX WASM code file + pub tx_code_path: C::Data, +} + +/// IBC transfer transaction arguments +#[derive(Clone, Debug)] +pub struct TxIbcTransfer { + /// Common tx arguments + pub tx: Tx, + /// Transfer source address + pub source: C::Address, + /// Transfer target address + pub receiver: String, + /// Transferred token address + pub token: C::Address, + /// Transferred token address + pub sub_prefix: Option, + /// Transferred token amount + pub amount: token::Amount, + /// Port ID + pub port_id: PortId, + /// Channel ID + pub channel_id: ChannelId, + /// Timeout height of the destination chain + pub timeout_height: Option, + /// Timeout timestamp offset + pub timeout_sec_offset: Option, + /// Path to the TX WASM code file + pub tx_code_path: C::Data, +} + +/// Transaction to initialize a new account +#[derive(Clone, Debug)] +pub struct TxInitAccount { + /// Common tx arguments + pub tx: Tx, + /// Address of the source account + pub source: C::Address, + /// Wasm VP for the new account + pub vp_code: C::Data, + /// Path to the VP WASM code file for the new account + pub vp_code_path: C::Data, + /// Path to the TX WASM code file + pub tx_code_path: C::Data, + /// Public key for the new account + pub public_key: C::PublicKey, +} + +/// Transaction to initialize a new account +#[derive(Clone, Debug)] +pub struct TxInitValidator { + /// Common tx arguments + pub tx: Tx, + /// Source + pub source: C::Address, + /// Signature scheme + pub scheme: SchemeType, + /// Account key + pub account_key: Option, + /// Consensus key + pub consensus_key: Option, + /// Protocol key + pub protocol_key: Option, + /// Commission rate + pub commission_rate: Decimal, + /// Maximum commission rate change + pub max_commission_rate_change: Decimal, + /// Path to the VP WASM code file + pub validator_vp_code_path: C::Data, + /// Path to the TX WASM code file + pub tx_code_path: C::Data, + /// Don't encrypt the keypair + pub unsafe_dont_encrypt: bool, +} + +/// Transaction to update a VP arguments +#[derive(Clone, Debug)] +pub struct TxUpdateVp { + /// Common tx arguments + pub tx: Tx, + /// Path to the VP WASM code file + pub vp_code_path: C::Data, + /// Path to the TX WASM code file + pub tx_code_path: C::Data, + /// Address of the account whose VP is to be updated + pub addr: C::Address, +} + +/// Bond arguments +#[derive(Clone, Debug)] +pub struct Bond { + /// Common tx arguments + pub tx: Tx, + /// Validator address + pub validator: C::Address, + /// Amount of tokens to stake in a bond + pub amount: token::Amount, + /// Source address for delegations. For self-bonds, the validator is + /// also the source. + pub source: Option, + /// Native token address + pub native_token: C::NativeAddress, + /// Path to the TX WASM code file + pub tx_code_path: C::Data, +} + +/// Unbond arguments +#[derive(Clone, Debug)] +pub struct Unbond { + /// Common tx arguments + pub tx: Tx, + /// Validator address + pub validator: C::Address, + /// Amount of tokens to unbond from a bond + pub amount: token::Amount, + /// Source address for unbonding from delegations. For unbonding from + /// self-bonds, the validator is also the source + pub source: Option, + /// Path to the TX WASM code file + pub tx_code_path: C::Data, +} + +/// Reveal public key +#[derive(Clone, Debug)] +pub struct RevealPk { + /// Common tx arguments + pub tx: Tx, + /// A public key to be revealed on-chain + pub public_key: C::PublicKey, +} + +/// Query proposal +#[derive(Clone, Debug)] +pub struct QueryProposal { + /// Common query args + pub query: Query, + /// Proposal id + pub proposal_id: Option, +} + +/// Query protocol parameters +#[derive(Clone, Debug)] +pub struct QueryProtocolParameters { + /// Common query args + pub query: Query, +} + +/// Withdraw arguments +#[derive(Clone, Debug)] +pub struct Withdraw { + /// Common tx arguments + pub tx: Tx, + /// Validator address + pub validator: C::Address, + /// Source address for withdrawing from delegations. For withdrawing + /// from self-bonds, the validator is also the source + pub source: Option, + /// Path to the TX WASM code file + pub tx_code_path: C::Data, +} + +/// Query asset conversions +#[derive(Clone, Debug)] +pub struct QueryConversions { + /// Common query args + pub query: Query, + /// Address of a token + pub token: Option, + /// Epoch of the asset + pub epoch: Option, +} + +/// Query token balance(s) +#[derive(Clone, Debug)] +pub struct QueryBalance { + /// Common query args + pub query: Query, + /// Address of an owner + pub owner: Option, + /// Address of a token + pub token: Option, + /// Whether not to convert balances + pub no_conversions: bool, + /// Sub prefix of an account + pub sub_prefix: Option, +} + +/// Query historical transfer(s) +#[derive(Clone, Debug)] +pub struct QueryTransfers { + /// Common query args + pub query: Query, + /// Address of an owner + pub owner: Option, + /// Address of a token + pub token: Option, +} + +/// Query PoS bond(s) +#[derive(Clone, Debug)] +pub struct QueryBonds { + /// Common query args + pub query: Query, + /// Address of an owner + pub owner: Option, + /// Address of a validator + pub validator: Option, +} + +/// Query PoS bonded stake +#[derive(Clone, Debug)] +pub struct QueryBondedStake { + /// Common query args + pub query: Query, + /// Address of a validator + pub validator: Option, + /// Epoch in which to find bonded stake + pub epoch: Option, +} + +#[derive(Clone, Debug)] +/// Commission rate change args +pub struct TxCommissionRateChange { + /// Common tx arguments + pub tx: Tx, + /// Validator address (should be self) + pub validator: C::Address, + /// Value to which the tx changes the commission rate + pub rate: Decimal, + /// Path to the TX WASM code file + pub tx_code_path: C::Data, +} + +/// Query PoS commission rate +#[derive(Clone, Debug)] +pub struct QueryCommissionRate { + /// Common query args + pub query: Query, + /// Address of a validator + pub validator: C::Address, + /// Epoch in which to find commission rate + pub epoch: Option, +} + +/// Query PoS slashes +#[derive(Clone, Debug)] +pub struct QuerySlashes { + /// Common query args + pub query: Query, + /// Address of a validator + pub validator: Option, +} + +/// Query PoS delegations +#[derive(Clone, Debug)] +pub struct QueryDelegations { + /// Common query args + pub query: Query, + /// Address of an owner + pub owner: C::Address, +} + +/// Query the raw bytes of given storage key +#[derive(Clone, Debug)] +pub struct QueryRawBytes { + /// The storage key to query + pub storage_key: storage::Key, + /// Common query args + pub query: Query, +} + +/// Common transaction arguments +#[derive(Clone, Debug)] +pub struct Tx { + /// Simulate applying the transaction + pub dry_run: bool, + /// Dump the transaction bytes + pub dump_tx: bool, + /// Submit the transaction even if it doesn't pass client checks + pub force: bool, + /// Do not wait for the transaction to be added to the blockchain + pub broadcast_only: bool, + /// The address of the ledger node as host:port + pub ledger_address: C::TendermintAddress, + /// If any new account is initialized by the tx, use the given alias to + /// save it in the wallet. + pub initialized_account_alias: Option, + /// Whether to force overwrite the above alias, if it is provided, in the wallet. + pub wallet_alias_force: bool, + /// The amount being payed to include the transaction + pub fee_amount: token::Amount, + /// The token in which the fee is being paid + pub fee_token: C::Address, + /// The max amount of gas used to process tx + pub gas_limit: GasLimit, + /// The optional expiration of the transaction + pub expiration: Option, + /// The chain id for which the transaction is intended + pub chain_id: Option, + /// Sign the tx with the key for the given alias from your wallet + pub signing_key: Option, + /// Sign the tx with the keypair of the public key of the given address + pub signer: Option, + /// Path to the TX WASM code file + pub tx_code_path: C::Data, + /// Password to decrypt key + pub password: Option, +} + +/// MASP add key or address arguments +#[derive(Clone, Debug)] +pub struct MaspAddrKeyAdd { + /// Key alias + pub alias: String, + /// Whether to force overwrite the alias + pub alias_force: bool, + /// Any MASP value + pub value: MaspValue, + /// Don't encrypt the keypair + pub unsafe_dont_encrypt: bool, +} + +/// MASP generate spending key arguments +#[derive(Clone, Debug)] +pub struct MaspSpendKeyGen { + /// Key alias + pub alias: String, + /// Whether to force overwrite the alias + pub alias_force: bool, + /// Don't encrypt the keypair + pub unsafe_dont_encrypt: bool, +} + +/// MASP generate payment address arguments +#[derive(Clone, Debug)] +pub struct MaspPayAddrGen { + /// Key alias + pub alias: String, + /// Whether to force overwrite the alias + pub alias_force: bool, + /// Viewing key + pub viewing_key: C::ViewingKey, + /// Pin + pub pin: bool, +} + +/// Wallet generate key and implicit address arguments +#[derive(Clone, Debug)] +pub struct KeyAndAddressGen { + /// Scheme type + pub scheme: SchemeType, + /// Key alias + pub alias: Option, + /// Whether to force overwrite the alias, if provided + pub alias_force: bool, + /// Don't encrypt the keypair + pub unsafe_dont_encrypt: bool, +} + +/// Wallet key lookup arguments +#[derive(Clone, Debug)] +pub struct KeyFind { + /// Public key to lookup keypair with + pub public_key: Option, + /// Key alias to lookup keypair with + pub alias: Option, + /// Public key hash to lookup keypair with + pub value: Option, + /// Show secret keys to user + pub unsafe_show_secret: bool, +} + +/// Wallet find shielded address or key arguments +#[derive(Clone, Debug)] +pub struct AddrKeyFind { + /// Address/key alias + pub alias: String, + /// Show secret keys to user + pub unsafe_show_secret: bool, +} + +/// Wallet list shielded keys arguments +#[derive(Clone, Debug)] +pub struct MaspKeysList { + /// Don't decrypt spending keys + pub decrypt: bool, + /// Show secret keys to user + pub unsafe_show_secret: bool, +} + +/// Wallet list keys arguments +#[derive(Clone, Debug)] +pub struct KeyList { + /// Don't decrypt keypairs + pub decrypt: bool, + /// Show secret keys to user + pub unsafe_show_secret: bool, +} + +/// Wallet key export arguments +#[derive(Clone, Debug)] +pub struct KeyExport { + /// Key alias + pub alias: String, +} + +/// Wallet address lookup arguments +#[derive(Clone, Debug)] +pub struct AddressOrAliasFind { + /// Alias to find + pub alias: Option, + /// Address to find + pub address: Option
, +} + +/// Wallet address add arguments +#[derive(Clone, Debug)] +pub struct AddressAdd { + /// Address alias + pub alias: String, + /// Whether to force overwrite the alias + pub alias_force: bool, + /// Address to add + pub address: Address, +} diff --git a/shared/src/ledger/eth_bridge/mod.rs b/shared/src/ledger/eth_bridge/mod.rs index ff8505b08e9..817623e54cc 100644 --- a/shared/src/ledger/eth_bridge/mod.rs +++ b/shared/src/ledger/eth_bridge/mod.rs @@ -1,4 +1,3 @@ //! Bridge from Ethereum -pub mod storage; pub mod vp; diff --git a/shared/src/ledger/eth_bridge/storage.rs b/shared/src/ledger/eth_bridge/storage.rs deleted file mode 100644 index e67abf921ce..00000000000 --- a/shared/src/ledger/eth_bridge/storage.rs +++ /dev/null @@ -1,12 +0,0 @@ -//! storage helpers -use super::vp::ADDRESS; -use crate::types::storage::{Key, KeySeg}; - -const QUEUE_STORAGE_KEY: &str = "queue"; - -/// Get the key corresponding to @EthBridge/queue -pub fn queue_key() -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&QUEUE_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} diff --git a/shared/src/ledger/ibc/mod.rs b/shared/src/ledger/ibc/mod.rs index 6cf1d6c9f13..1aa292ec070 100644 --- a/shared/src/ledger/ibc/mod.rs +++ b/shared/src/ledger/ibc/mod.rs @@ -1,17 +1,18 @@ //! IBC integration -pub use namada_core::ledger::ibc::{actions as handler, storage}; +pub use namada_core::ledger::ibc::storage; pub mod vp; use namada_core::ledger::ibc::storage::{ - capability_index_key, channel_counter_key, client_counter_key, - connection_counter_key, + channel_counter_key, client_counter_key, connection_counter_key, }; +use namada_core::ledger::storage::WlStorage; +use namada_core::ledger::storage_api::StorageWrite; -use crate::ledger::storage::{self as ledger_storage, Storage, StorageHasher}; +use crate::ledger::storage::{self as ledger_storage, StorageHasher}; /// Initialize storage in the genesis block. -pub fn init_genesis_storage(storage: &mut Storage) +pub fn init_genesis_storage(storage: &mut WlStorage) where DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, H: StorageHasher, @@ -23,27 +24,20 @@ where let key = client_counter_key(); let value = 0_u64.to_be_bytes().to_vec(); storage - .write(&key, value) + .write_bytes(&key, value) .expect("Unable to write the initial client counter"); // the connection counter let key = connection_counter_key(); let value = 0_u64.to_be_bytes().to_vec(); storage - .write(&key, value) + .write_bytes(&key, value) .expect("Unable to write the initial connection counter"); // the channel counter let key = channel_counter_key(); let value = 0_u64.to_be_bytes().to_vec(); storage - .write(&key, value) + .write_bytes(&key, value) .expect("Unable to write the initial channel counter"); - - // the capability index - let key = capability_index_key(); - let value = 0_u64.to_be_bytes().to_vec(); - storage - .write(&key, value) - .expect("Unable to write the initial capability index"); } diff --git a/shared/src/ledger/ibc/vp/channel.rs b/shared/src/ledger/ibc/vp/channel.rs deleted file mode 100644 index e85a2212123..00000000000 --- a/shared/src/ledger/ibc/vp/channel.rs +++ /dev/null @@ -1,1003 +0,0 @@ -//! IBC validity predicate for channel module - -use core::time::Duration; - -use namada_core::ledger::ibc::actions::{ - make_close_confirm_channel_event, make_close_init_channel_event, - make_open_ack_channel_event, make_open_confirm_channel_event, - make_open_init_channel_event, make_open_try_channel_event, - make_timeout_event, -}; -use namada_core::ledger::ibc::data::{ - Error as IbcDataError, IbcMessage, PacketReceipt, -}; -use namada_core::ledger::ibc::storage::{ - ack_key, channel_counter_key, channel_key, client_update_height_key, - client_update_timestamp_key, commitment_key, is_channel_counter_key, - next_sequence_ack_key, next_sequence_recv_key, next_sequence_send_key, - port_channel_id, receipt_key, Error as IbcStorageError, -}; -use namada_core::ledger::parameters; -use namada_core::ledger::storage::{self as ledger_storage, StorageHasher}; -use namada_core::ledger::storage_api::StorageRead; -use namada_core::types::storage::Key; -use sha2::Digest; -use thiserror::Error; - -use super::{Ibc, StateChange}; -use crate::ibc::core::ics02_client::client_consensus::AnyConsensusState; -use crate::ibc::core::ics02_client::client_state::AnyClientState; -use crate::ibc::core::ics02_client::context::ClientReader; -use crate::ibc::core::ics02_client::height::Height; -use crate::ibc::core::ics03_connection::connection::ConnectionEnd; -use crate::ibc::core::ics03_connection::context::ConnectionReader; -use crate::ibc::core::ics03_connection::error::Error as Ics03Error; -use crate::ibc::core::ics04_channel::channel::{ - ChannelEnd, Counterparty, State, -}; -use crate::ibc::core::ics04_channel::commitment::{ - AcknowledgementCommitment, PacketCommitment, -}; -use crate::ibc::core::ics04_channel::context::ChannelReader; -use crate::ibc::core::ics04_channel::error::Error as Ics04Error; -use crate::ibc::core::ics04_channel::handler::verify::verify_channel_proofs; -use crate::ibc::core::ics04_channel::msgs::chan_close_confirm::MsgChannelCloseConfirm; -use crate::ibc::core::ics04_channel::msgs::chan_open_ack::MsgChannelOpenAck; -use crate::ibc::core::ics04_channel::msgs::chan_open_confirm::MsgChannelOpenConfirm; -use crate::ibc::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; -use crate::ibc::core::ics04_channel::msgs::{ChannelMsg, PacketMsg}; -use crate::ibc::core::ics04_channel::packet::{Receipt, Sequence}; -use crate::ibc::core::ics05_port::capabilities::{ - Capability, ChannelCapability, -}; -use crate::ibc::core::ics05_port::context::PortReader; -use crate::ibc::core::ics24_host::identifier::{ - ChannelId, ClientId, ConnectionId, PortChannelId, PortId, -}; -use crate::ibc::core::ics26_routing::context::ModuleId; -use crate::ibc::core::ics26_routing::msgs::Ics26Envelope; -use crate::ibc::proofs::Proofs; -use crate::ibc::timestamp::Timestamp; -use crate::ledger::native_vp::{Error as NativeVpError, VpEnv}; -use crate::tendermint::Time; -use crate::tendermint_proto::Protobuf; -use crate::vm::WasmCacheAccess; - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum Error { - #[error("Native VP error: {0}")] - NativeVp(NativeVpError), - #[error("State change error: {0}")] - InvalidStateChange(String), - #[error("Connection error: {0}")] - InvalidConnection(String), - #[error("Channel error: {0}")] - InvalidChannel(String), - #[error("Port error: {0}")] - InvalidPort(String), - #[error("Version error: {0}")] - InvalidVersion(String), - #[error("Sequence error: {0}")] - InvalidSequence(String), - #[error("Packet info error: {0}")] - InvalidPacketInfo(String), - #[error("Client update timestamp error: {0}")] - InvalidTimestamp(String), - #[error("Client update hight error: {0}")] - InvalidHeight(String), - #[error("Proof verification error: {0}")] - ProofVerificationFailure(Ics04Error), - #[error("Decoding TX data error: {0}")] - DecodingTxData(std::io::Error), - #[error("IBC data error: {0}")] - InvalidIbcData(IbcDataError), - #[error("IBC storage error: {0}")] - IbcStorage(IbcStorageError), - #[error("IBC event error: {0}")] - IbcEvent(String), -} - -/// IBC channel functions result -pub type Result = std::result::Result; -/// ChannelReader result -type Ics04Result = core::result::Result; - -impl<'a, DB, H, CA> Ibc<'a, DB, H, CA> -where - DB: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: 'static + StorageHasher, - CA: 'static + WasmCacheAccess, -{ - pub(super) fn validate_channel( - &self, - key: &Key, - tx_data: &[u8], - ) -> Result<()> { - if is_channel_counter_key(key) { - let counter = self.channel_counter().map_err(|_| { - Error::InvalidChannel( - "The channel counter doesn't exist".to_owned(), - ) - })?; - if self.channel_counter_pre()? < counter { - return Ok(()); - } else { - return Err(Error::InvalidChannel( - "The channel counter is invalid".to_owned(), - )); - } - } - - let port_channel_id = port_channel_id(key)?; - self.authenticated_capability(&port_channel_id.port_id) - .map_err(|e| { - Error::InvalidPort(format!( - "The port is not authenticated: ID {}, {}", - port_channel_id.port_id, e - )) - })?; - - let channel = self - .channel_end(&( - port_channel_id.port_id.clone(), - port_channel_id.channel_id, - )) - .map_err(|_| { - Error::InvalidChannel(format!( - "The channel doesn't exist: Port/Channel {}", - port_channel_id - )) - })?; - // check the number of hops and empty version in the channel end - channel.validate_basic().map_err(|e| { - Error::InvalidChannel(format!( - "The channel is invalid: Port/Channel {}, {}", - port_channel_id, e - )) - })?; - - self.validate_version(&channel)?; - - match self.get_channel_state_change(&port_channel_id)? { - StateChange::Created => match channel.state() { - State::Init => { - let ibc_msg = IbcMessage::decode(tx_data)?; - let msg = ibc_msg.msg_channel_open_init()?; - let event = make_open_init_channel_event( - &port_channel_id.channel_id, - &msg, - ); - self.check_emitted_event(event) - .map_err(|e| Error::IbcEvent(e.to_string())) - } - State::TryOpen => { - let ibc_msg = IbcMessage::decode(tx_data)?; - let msg = ibc_msg.msg_channel_open_try()?; - self.verify_channel_try_proof( - &port_channel_id, - &channel, - &msg, - )?; - let event = make_open_try_channel_event( - &port_channel_id.channel_id, - &msg, - ); - self.check_emitted_event(event) - .map_err(|e| Error::IbcEvent(e.to_string())) - } - _ => Err(Error::InvalidChannel(format!( - "The channel state is invalid: Port/Channel {}, State {}", - port_channel_id, - channel.state() - ))), - }, - StateChange::Updated => self.validate_updated_channel( - &port_channel_id, - &channel, - tx_data, - ), - _ => Err(Error::InvalidStateChange(format!( - "The state change of the channel: Port/Channel {}", - port_channel_id - ))), - } - } - - fn get_channel_state_change( - &self, - port_channel_id: &PortChannelId, - ) -> Result { - let key = channel_key(port_channel_id); - self.get_state_change(&key) - .map_err(|e| Error::InvalidStateChange(e.to_string())) - } - - fn validate_version(&self, channel: &ChannelEnd) -> Result<()> { - let connection = self.connection_from_channel(channel)?; - let versions = connection.versions(); - let version = match versions { - [version] => version, - _ => { - return Err(Error::InvalidVersion( - "Multiple versions are specified or no version".to_owned(), - )); - } - }; - - let feature = channel.ordering().to_string(); - if version.is_supported_feature(feature.clone()) { - Ok(()) - } else { - Err(Error::InvalidVersion(format!( - "The version is unsupported: Feature {}", - feature - ))) - } - } - - fn validate_updated_channel( - &self, - port_channel_id: &PortChannelId, - channel: &ChannelEnd, - tx_data: &[u8], - ) -> Result<()> { - let prev_channel = self.channel_end_pre(port_channel_id)?; - - let ibc_msg = IbcMessage::decode(tx_data)?; - let event = match ibc_msg.0 { - Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenAck(msg)) => { - if !channel.state().is_open() - || !prev_channel.state_matches(&State::Init) - { - return Err(Error::InvalidStateChange(format!( - "The state change of the channel is invalid for \ - ChannelOpenAck: Port/Channel {}", - port_channel_id, - ))); - } - self.verify_channel_ack_proof(port_channel_id, channel, &msg)?; - Some(make_open_ack_channel_event(&msg, channel).map_err( - |_| { - Error::InvalidChannel(format!( - "No connection for the channel: Port/Channel {}", - port_channel_id - )) - }, - )?) - } - Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenConfirm( - msg, - )) => { - if !channel.state().is_open() - || !prev_channel.state_matches(&State::TryOpen) - { - return Err(Error::InvalidStateChange(format!( - "The state change of the channel is invalid for \ - ChannelOpenConfirm: Port/Channel {}", - port_channel_id, - ))); - } - self.verify_channel_confirm_proof( - port_channel_id, - channel, - &msg, - )?; - Some(make_open_confirm_channel_event(&msg, channel).map_err( - |_| { - Error::InvalidChannel(format!( - "No connection for the channel: Port/Channel {}", - port_channel_id - )) - }, - )?) - } - Ics26Envelope::Ics4PacketMsg(PacketMsg::ToPacket(msg)) => { - if !channel.state_matches(&State::Closed) - || !prev_channel.state().is_open() - { - return Err(Error::InvalidStateChange(format!( - "The state change of the channel is invalid for \ - Timeout: Port/Channel {}", - port_channel_id, - ))); - } - let commitment_key = ( - msg.packet.source_port.clone(), - msg.packet.source_channel, - msg.packet.sequence, - ); - self.validate_commitment_absence(commitment_key)?; - Some(make_timeout_event(msg.packet)) - } - Ics26Envelope::Ics4PacketMsg(PacketMsg::ToClosePacket(msg)) => { - let commitment_key = ( - msg.packet.source_port, - msg.packet.source_channel, - msg.packet.sequence, - ); - self.validate_commitment_absence(commitment_key)?; - None - } - Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelCloseInit( - msg, - )) => Some(make_close_init_channel_event(&msg, channel).map_err( - |_| { - Error::InvalidChannel(format!( - "No connection for the channel: Port/Channel {}", - port_channel_id - )) - }, - )?), - Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelCloseConfirm( - msg, - )) => { - self.verify_channel_close_proof( - port_channel_id, - channel, - &msg, - )?; - Some(make_close_confirm_channel_event(&msg, channel).map_err( - |_| { - Error::InvalidChannel(format!( - "No connection for the channel: Port/Channel {}", - port_channel_id - )) - }, - )?) - } - _ => { - return Err(Error::InvalidStateChange(format!( - "The state change of the channel happened with unexpected \ - IBC message: Port/Channel {}", - port_channel_id, - ))); - } - }; - - match event { - Some(event) => self - .check_emitted_event(event) - .map_err(|e| Error::IbcEvent(e.to_string()))?, - None => { - if self.ctx.write_log.get_ibc_event().is_some() { - return Err(Error::InvalidStateChange(format!( - "The state change of the channel happened with an \ - unexpected IBC event: Port/Channel {}, event {:?}", - port_channel_id, - self.ctx.write_log.get_ibc_event(), - ))); - } - } - } - - Ok(()) - } - - fn validate_commitment_absence( - &self, - port_channel_sequence_id: (PortId, ChannelId, Sequence), - ) -> Result<()> { - // check if the commitment has been deleted - let key = commitment_key( - &port_channel_sequence_id.0, - &port_channel_sequence_id.1, - port_channel_sequence_id.2, - ); - let state_change = self - .get_state_change(&key) - .map_err(|e| Error::InvalidStateChange(e.to_string()))?; - match state_change { - // the deleted commitment is validated in validate_commitment() - StateChange::Deleted => Ok(()), - _ => Err(Error::InvalidStateChange(format!( - "The commitment hasn't been deleted yet: Port {}, Channel {}, \ - Sequence {}", - port_channel_sequence_id.0, - port_channel_sequence_id.1, - port_channel_sequence_id.2, - ))), - } - } - - fn verify_channel_try_proof( - &self, - port_channel_id: &PortChannelId, - channel: &ChannelEnd, - msg: &MsgChannelOpenTry, - ) -> Result<()> { - let expected_my_side = - Counterparty::new(port_channel_id.port_id.clone(), None); - self.verify_proofs( - msg.proofs.height(), - channel, - expected_my_side, - State::Init, - &msg.proofs, - ) - } - - fn verify_channel_ack_proof( - &self, - port_channel_id: &PortChannelId, - channel: &ChannelEnd, - msg: &MsgChannelOpenAck, - ) -> Result<()> { - match channel.counterparty().channel_id() { - Some(counterpart_channel_id) => { - if *counterpart_channel_id != msg.counterparty_channel_id { - return Err(Error::InvalidChannel(format!( - "The counterpart channel ID mismatched: ID {}", - counterpart_channel_id - ))); - } - } - None => { - return Err(Error::InvalidChannel(format!( - "The channel doesn't have the counterpart channel ID: ID \ - {}", - port_channel_id - ))); - } - } - let expected_my_side = Counterparty::new( - port_channel_id.port_id.clone(), - Some(port_channel_id.channel_id), - ); - self.verify_proofs( - msg.proofs.height(), - channel, - expected_my_side, - State::TryOpen, - &msg.proofs, - ) - } - - fn verify_channel_confirm_proof( - &self, - port_channel_id: &PortChannelId, - channel: &ChannelEnd, - msg: &MsgChannelOpenConfirm, - ) -> Result<()> { - let expected_my_side = Counterparty::new( - port_channel_id.port_id.clone(), - Some(port_channel_id.channel_id), - ); - self.verify_proofs( - msg.proofs.height(), - channel, - expected_my_side, - State::Open, - &msg.proofs, - ) - } - - fn verify_channel_close_proof( - &self, - port_channel_id: &PortChannelId, - channel: &ChannelEnd, - msg: &MsgChannelCloseConfirm, - ) -> Result<()> { - let expected_my_side = Counterparty::new( - port_channel_id.port_id.clone(), - Some(port_channel_id.channel_id), - ); - self.verify_proofs( - msg.proofs.height(), - channel, - expected_my_side, - State::Closed, - &msg.proofs, - ) - } - - fn verify_proofs( - &self, - height: Height, - channel: &ChannelEnd, - expected_my_side: Counterparty, - expected_state: State, - proofs: &Proofs, - ) -> Result<()> { - let connection = self.connection_from_channel(channel)?; - let counterpart_conn_id = - match connection.counterparty().connection_id() { - Some(id) => id.clone(), - None => { - return Err(Error::InvalidConnection( - "The counterpart connection ID doesn't exist" - .to_owned(), - )); - } - }; - let expected_connection_hops = vec![counterpart_conn_id]; - let expected_channel = ChannelEnd::new( - expected_state, - *channel.ordering(), - expected_my_side, - expected_connection_hops, - channel.version().clone(), - ); - - match verify_channel_proofs( - self, - height, - channel, - &connection, - &expected_channel, - proofs, - ) { - Ok(_) => Ok(()), - Err(e) => Err(Error::ProofVerificationFailure(e)), - } - } - - fn get_sequence_pre(&self, key: &Key) -> Result { - match self.ctx.read_bytes_pre(key)? { - Some(value) => { - // As ibc-go, u64 like a counter is encoded with big-endian - let index: [u8; 8] = value.try_into().map_err(|_| { - Error::InvalidSequence( - "Encoding the prior sequence index failed".to_string(), - ) - })?; - let index = u64::from_be_bytes(index); - Ok(Sequence::from(index)) - } - // The sequence is updated for the first time. The previous sequence - // is the initial number. - None => Ok(Sequence::from(1)), - } - } - - fn get_sequence(&self, key: &Key) -> Result { - match self.ctx.read_bytes_post(key)? { - Some(value) => { - // As ibc-go, u64 like a counter is encoded with big-endian - let index: [u8; 8] = value.try_into().map_err(|_| { - Error::InvalidSequence( - "Encoding the sequence index failed".to_string(), - ) - })?; - let index = u64::from_be_bytes(index); - Ok(Sequence::from(index)) - } - // The sequence has not been used yet - None => Ok(Sequence::from(1)), - } - } - - pub(super) fn connection_from_channel( - &self, - channel: &ChannelEnd, - ) -> Result { - match channel.connection_hops().get(0) { - Some(conn_id) => ChannelReader::connection_end(self, conn_id) - .map_err(|_| { - Error::InvalidConnection(format!( - "The connection doesn't exist: ID {}", - conn_id - )) - }), - _ => Err(Error::InvalidConnection( - "the corresponding connection ID doesn't exist".to_owned(), - )), - } - } - - pub(super) fn channel_end_pre( - &self, - port_channel_id: &PortChannelId, - ) -> Result { - let key = channel_key(port_channel_id); - match self.ctx.read_bytes_pre(&key) { - Ok(Some(value)) => ChannelEnd::decode_vec(&value).map_err(|e| { - Error::InvalidChannel(format!( - "Decoding the channel failed: Port/Channel {}, {}", - port_channel_id, e - )) - }), - Ok(None) => Err(Error::InvalidChannel(format!( - "The prior channel doesn't exist: Port/Channel {}", - port_channel_id - ))), - Err(e) => Err(Error::InvalidChannel(format!( - "Reading the prior channel failed: {}", - e - ))), - } - } - - pub(super) fn get_next_sequence_send_pre( - &self, - port_channel_id: &PortChannelId, - ) -> Result { - let key = next_sequence_send_key(port_channel_id); - self.get_sequence_pre(&key) - } - - pub(super) fn get_next_sequence_recv_pre( - &self, - port_channel_id: &PortChannelId, - ) -> Result { - let key = next_sequence_recv_key(port_channel_id); - self.get_sequence_pre(&key) - } - - pub(super) fn get_next_sequence_ack_pre( - &self, - port_channel_id: &PortChannelId, - ) -> Result { - let key = next_sequence_ack_key(port_channel_id); - self.get_sequence_pre(&key) - } - - pub(super) fn get_packet_commitment_pre( - &self, - key: &(PortId, ChannelId, Sequence), - ) -> Result { - let key = commitment_key(&key.0, &key.1, key.2); - match self.ctx.read_bytes_pre(&key)? { - Some(value) => Ok(value.into()), - None => Err(Error::InvalidPacketInfo(format!( - "The prior commitment doesn't exist: Key {}", - key - ))), - } - } - - pub(super) fn client_update_time_pre( - &self, - client_id: &ClientId, - ) -> Result { - let key = client_update_timestamp_key(client_id); - match self.ctx.read_bytes_pre(&key)? { - Some(value) => { - let time = Time::decode_vec(&value).map_err(|_| { - Error::InvalidTimestamp(format!( - "Timestamp conversion failed: ID {}", - client_id - )) - })?; - Ok(time.into()) - } - None => Err(Error::InvalidTimestamp(format!( - "Timestamp doesn't exist: ID {}", - client_id - ))), - } - } - - pub(super) fn client_update_height_pre( - &self, - client_id: &ClientId, - ) -> Result { - let key = client_update_height_key(client_id); - match self.ctx.read_bytes_pre(&key)? { - Some(value) => Height::decode_vec(&value).map_err(|_| { - Error::InvalidHeight(format!( - "Height conversion failed: ID {}", - client_id - )) - }), - None => Err(Error::InvalidHeight(format!( - "Client update height doesn't exist: ID {}", - client_id - ))), - } - } - - fn channel_counter_pre(&self) -> Result { - let key = channel_counter_key(); - self.read_counter_pre(&key) - .map_err(|e| Error::InvalidChannel(e.to_string())) - } -} - -impl<'a, DB, H, CA> ChannelReader for Ibc<'a, DB, H, CA> -where - DB: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: 'static + StorageHasher, - CA: 'static + WasmCacheAccess, -{ - fn channel_end( - &self, - port_channel_id: &(PortId, ChannelId), - ) -> Ics04Result { - let port_channel_id = PortChannelId { - port_id: port_channel_id.0.clone(), - channel_id: port_channel_id.1, - }; - let key = channel_key(&port_channel_id); - match self.ctx.read_bytes_post(&key) { - Ok(Some(value)) => ChannelEnd::decode_vec(&value) - .map_err(|_| Ics04Error::implementation_specific()), - Ok(None) => Err(Ics04Error::channel_not_found( - port_channel_id.port_id, - port_channel_id.channel_id, - )), - Err(_) => Err(Ics04Error::implementation_specific()), - } - } - - fn connection_end( - &self, - conn_id: &ConnectionId, - ) -> Ics04Result { - ConnectionReader::connection_end(self, conn_id) - .map_err(Ics04Error::ics03_connection) - } - - fn connection_channels( - &self, - conn_id: &ConnectionId, - ) -> Ics04Result> { - let mut channels = vec![]; - let prefix = Key::parse("channelEnds/ports") - .expect("Creating a key for the prefix shouldn't fail"); - let post = self.ctx.post(); - let mut iter = post - .iter_prefix(&prefix) - .map_err(|_| Ics04Error::implementation_specific())?; - loop { - let next = post - .iter_next(&mut iter) - .map_err(|_| Ics04Error::implementation_specific())?; - if let Some((key, value)) = next { - let channel = ChannelEnd::decode_vec(&value) - .map_err(|_| Ics04Error::implementation_specific())?; - if let Some(id) = channel.connection_hops().get(0) { - if id == conn_id { - let key = Key::parse(key).map_err(|_| { - Ics04Error::implementation_specific() - })?; - let port_channel_id = - port_channel_id(&key).map_err(|_| { - Ics04Error::implementation_specific() - })?; - channels.push(( - port_channel_id.port_id, - port_channel_id.channel_id, - )); - } - } - } else { - break; - } - } - Ok(channels) - } - - fn client_state( - &self, - client_id: &ClientId, - ) -> Ics04Result { - ConnectionReader::client_state(self, client_id) - .map_err(Ics04Error::ics03_connection) - } - - fn client_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Ics04Result { - ConnectionReader::client_consensus_state(self, client_id, height) - .map_err(Ics04Error::ics03_connection) - } - - fn authenticated_capability( - &self, - port_id: &PortId, - ) -> Ics04Result { - let (_, port_cap) = self - .lookup_module_by_port(port_id) - .map_err(|_| Ics04Error::no_port_capability(port_id.clone()))?; - if self.authenticate(port_id.clone(), &port_cap) { - let cap: Capability = port_cap.into(); - Ok(cap.into()) - } else { - Err(Ics04Error::invalid_port_capability()) - } - } - - fn get_next_sequence_send( - &self, - port_channel_id: &(PortId, ChannelId), - ) -> Ics04Result { - let port_channel_id = PortChannelId { - port_id: port_channel_id.0.clone(), - channel_id: port_channel_id.1, - }; - let key = next_sequence_send_key(&port_channel_id); - self.get_sequence(&key).map_err(|_| { - Ics04Error::missing_next_send_seq(( - port_channel_id.port_id, - port_channel_id.channel_id, - )) - }) - } - - fn get_next_sequence_recv( - &self, - port_channel_id: &(PortId, ChannelId), - ) -> Ics04Result { - let port_channel_id = PortChannelId { - port_id: port_channel_id.0.clone(), - channel_id: port_channel_id.1, - }; - let key = next_sequence_recv_key(&port_channel_id); - self.get_sequence(&key).map_err(|_| { - Ics04Error::missing_next_recv_seq(( - port_channel_id.port_id, - port_channel_id.channel_id, - )) - }) - } - - fn get_next_sequence_ack( - &self, - port_channel_id: &(PortId, ChannelId), - ) -> Ics04Result { - let port_channel_id = PortChannelId { - port_id: port_channel_id.0.clone(), - channel_id: port_channel_id.1, - }; - let key = next_sequence_ack_key(&port_channel_id); - self.get_sequence(&key).map_err(|_| { - Ics04Error::missing_next_ack_seq(( - port_channel_id.port_id, - port_channel_id.channel_id, - )) - }) - } - - fn get_packet_commitment( - &self, - key: &(PortId, ChannelId, Sequence), - ) -> Ics04Result { - let commitment_key = commitment_key(&key.0, &key.1, key.2); - match self.ctx.read_bytes_post(&commitment_key) { - Ok(Some(value)) => Ok(value.into()), - Ok(None) => Err(Ics04Error::packet_commitment_not_found(key.2)), - Err(_) => Err(Ics04Error::implementation_specific()), - } - } - - fn get_packet_receipt( - &self, - key: &(PortId, ChannelId, Sequence), - ) -> Ics04Result { - let receipt_key = receipt_key(&key.0, &key.1, key.2); - let expect = PacketReceipt::default().as_bytes().to_vec(); - match self.ctx.read_bytes_post(&receipt_key) { - Ok(Some(v)) if v == expect => Ok(Receipt::Ok), - _ => Err(Ics04Error::packet_receipt_not_found(key.2)), - } - } - - fn get_packet_acknowledgement( - &self, - key: &(PortId, ChannelId, Sequence), - ) -> Ics04Result { - let ack_key = ack_key(&key.0, &key.1, key.2); - match self.ctx.read_bytes_post(&ack_key) { - Ok(Some(value)) => Ok(value.into()), - Ok(None) => Err(Ics04Error::packet_commitment_not_found(key.2)), - Err(_) => Err(Ics04Error::implementation_specific()), - } - } - - fn hash(&self, value: Vec) -> Vec { - sha2::Sha256::digest(&value).to_vec() - } - - fn host_height(&self) -> Height { - ClientReader::host_height(self) - } - - fn host_consensus_state( - &self, - height: Height, - ) -> Ics04Result { - ClientReader::host_consensus_state(self, height).map_err(|e| { - Ics04Error::ics03_connection(Ics03Error::ics02_client(e)) - }) - } - - fn pending_host_consensus_state(&self) -> Ics04Result { - ClientReader::pending_host_consensus_state(self).map_err(|e| { - Ics04Error::ics03_connection(Ics03Error::ics02_client(e)) - }) - } - - fn client_update_time( - &self, - client_id: &ClientId, - height: Height, - ) -> Ics04Result { - let key = client_update_timestamp_key(client_id); - match self.ctx.read_bytes_post(&key) { - Ok(Some(value)) => { - let time = Time::decode_vec(&value) - .map_err(|_| Ics04Error::implementation_specific())?; - Ok(time.into()) - } - Ok(None) => Err(Ics04Error::processed_time_not_found( - client_id.clone(), - height, - )), - Err(_) => Err(Ics04Error::implementation_specific()), - } - } - - fn client_update_height( - &self, - client_id: &ClientId, - height: Height, - ) -> Ics04Result { - let key = client_update_height_key(client_id); - match self.ctx.read_bytes_post(&key) { - Ok(Some(value)) => Height::decode_vec(&value) - .map_err(|_| Ics04Error::implementation_specific()), - Ok(None) => Err(Ics04Error::processed_height_not_found( - client_id.clone(), - height, - )), - Err(_) => Err(Ics04Error::implementation_specific()), - } - } - - fn channel_counter(&self) -> Ics04Result { - let key = channel_counter_key(); - self.read_counter(&key) - .map_err(|_| Ics04Error::implementation_specific()) - } - - fn max_expected_time_per_block(&self) -> Duration { - match parameters::read(self.ctx.storage) { - Ok((parameters, gas)) => { - match self.ctx.gas_meter.borrow_mut().add(gas) { - Ok(_) => parameters.max_expected_time_per_block.into(), - Err(_) => Duration::default(), - } - } - Err(_) => Duration::default(), - } - } - - fn lookup_module_by_channel( - &self, - _channel_id: &ChannelId, - port_id: &PortId, - ) -> Ics04Result<(ModuleId, ChannelCapability)> { - let (module_id, port_cap) = self - .lookup_module_by_port(port_id) - .map_err(Ics04Error::ics05_port)?; - let cap: Capability = port_cap.into(); - Ok((module_id, cap.into())) - } -} - -impl From for Error { - fn from(err: NativeVpError) -> Self { - Self::NativeVp(err) - } -} - -impl From for Error { - fn from(err: IbcStorageError) -> Self { - Self::IbcStorage(err) - } -} - -impl From for Error { - fn from(err: IbcDataError) -> Self { - Self::InvalidIbcData(err) - } -} - -impl From for Error { - fn from(err: std::io::Error) -> Self { - Self::DecodingTxData(err) - } -} diff --git a/shared/src/ledger/ibc/vp/client.rs b/shared/src/ledger/ibc/vp/client.rs deleted file mode 100644 index 9bc5d20efbc..00000000000 --- a/shared/src/ledger/ibc/vp/client.rs +++ /dev/null @@ -1,614 +0,0 @@ -//! IBC validity predicate for client module -use std::convert::TryInto; -use std::str::FromStr; - -use namada_core::ledger::ibc::actions::{ - make_create_client_event, make_update_client_event, - make_upgrade_client_event, -}; -use namada_core::ledger::storage_api::StorageRead; -use thiserror::Error; - -use super::super::storage::{ - client_counter_key, client_state_key, client_type_key, - client_update_height_key, client_update_timestamp_key, consensus_height, - consensus_state_key, consensus_state_prefix, -}; -use super::{Ibc, StateChange}; -use crate::ibc::clients::ics07_tendermint::consensus_state::ConsensusState as TmConsensusState; -use crate::ibc::core::ics02_client::client_consensus::{ - AnyConsensusState, ConsensusState, -}; -use crate::ibc::core::ics02_client::client_def::{AnyClient, ClientDef}; -use crate::ibc::core::ics02_client::client_state::AnyClientState; -use crate::ibc::core::ics02_client::client_type::ClientType; -use crate::ibc::core::ics02_client::context::ClientReader; -use crate::ibc::core::ics02_client::error::Error as Ics02Error; -use crate::ibc::core::ics02_client::height::Height; -use crate::ibc::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; -use crate::ibc::core::ics02_client::msgs::upgrade_client::MsgUpgradeAnyClient; -use crate::ibc::core::ics02_client::msgs::ClientMsg; -use crate::ibc::core::ics04_channel::context::ChannelReader; -use crate::ibc::core::ics23_commitment::commitment::CommitmentRoot; -use crate::ibc::core::ics24_host::identifier::ClientId; -use crate::ibc::core::ics26_routing::msgs::Ics26Envelope; -use crate::ledger::native_vp::VpEnv; -use crate::ledger::storage::{self, StorageHasher}; -use crate::tendermint_proto::Protobuf; -use crate::types::ibc::data::{Error as IbcDataError, IbcMessage}; -use crate::types::storage::{BlockHeight, Key}; -use crate::vm::WasmCacheAccess; - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum Error { - #[error("State change error: {0}")] - InvalidStateChange(String), - #[error("Client error: {0}")] - InvalidClient(String), - #[error("Header error: {0}")] - InvalidHeader(String), - #[error("Client update time error: {0}")] - InvalidTimestamp(String), - #[error("Client update height error: {0}")] - InvalidHeight(String), - #[error("Proof verification error: {0}")] - ProofVerificationFailure(String), - #[error("Decoding TX data error: {0}")] - DecodingTxData(std::io::Error), - #[error("Decoding client data error: {0}")] - DecodingClientData(std::io::Error), - #[error("IBC data error: {0}")] - InvalidIbcData(IbcDataError), - #[error("IBC event error: {0}")] - IbcEvent(String), -} - -/// IBC client functions result -pub type Result = std::result::Result; -/// ClientReader result -type Ics02Result = core::result::Result; - -impl<'a, DB, H, CA> Ibc<'a, DB, H, CA> -where - DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, - H: 'static + StorageHasher, - CA: 'static + WasmCacheAccess, -{ - pub(super) fn validate_client( - &self, - client_id: &ClientId, - tx_data: &[u8], - ) -> Result<()> { - match self.get_client_state_change(client_id)? { - StateChange::Created => { - self.validate_created_client(client_id, tx_data) - } - StateChange::Updated => { - self.validate_updated_client(client_id, tx_data) - } - _ => Err(Error::InvalidStateChange(format!( - "The state change of the client is invalid: ID {}", - client_id - ))), - } - } - - fn get_client_state_change( - &self, - client_id: &ClientId, - ) -> Result { - let key = client_state_key(client_id); - self.get_state_change(&key) - .map_err(|e| Error::InvalidStateChange(e.to_string())) - } - - fn get_client_update_time_change( - &self, - client_id: &ClientId, - ) -> Result { - let key = client_update_timestamp_key(client_id); - let timestamp_change = self - .get_state_change(&key) - .map_err(|e| Error::InvalidStateChange(e.to_string()))?; - let key = client_update_height_key(client_id); - let height_change = self - .get_state_change(&key) - .map_err(|e| Error::InvalidStateChange(e.to_string()))?; - // the time and height should be updated at once - match (timestamp_change, height_change) { - (StateChange::Created, StateChange::Created) => { - Ok(StateChange::Created) - } - (StateChange::Updated, StateChange::Updated) => { - let timestamp_pre = - self.client_update_time_pre(client_id).map_err(|e| { - Error::InvalidTimestamp(format!( - "Reading the prior client update time failed: {}", - e - )) - })?; - let timestamp_post = self - .client_update_time(client_id, Height::default()) - .map_err(|e| { - Error::InvalidTimestamp(format!( - "Reading the posterior client update time failed: \ - {}", - e - )) - })?; - if timestamp_post.nanoseconds() <= timestamp_pre.nanoseconds() { - return Err(Error::InvalidTimestamp(format!( - "The state change of the client update time is \ - invalid: ID {}", - client_id - ))); - } - let height_pre = - self.client_update_height_pre(client_id).map_err(|e| { - Error::InvalidHeight(format!( - "Reading the prior client update height failed: {}", - e - )) - })?; - let height_post = self - .client_update_height(client_id, Height::default()) - .map_err(|e| { - Error::InvalidTimestamp(format!( - "Reading the posterior client update height \ - failed: {}", - e - )) - })?; - if height_post <= height_pre { - return Err(Error::InvalidHeight(format!( - "The state change of the client update height is \ - invalid: ID {}", - client_id - ))); - } - Ok(StateChange::Updated) - } - _ => Err(Error::InvalidStateChange(format!( - "The state change of the client update time and height are \ - invalid: ID {}", - client_id - ))), - } - } - - fn validate_created_client( - &self, - client_id: &ClientId, - tx_data: &[u8], - ) -> Result<()> { - let ibc_msg = IbcMessage::decode(tx_data)?; - let msg = ibc_msg.msg_create_any_client()?; - let client_type = self.client_type(client_id).map_err(|_| { - Error::InvalidClient(format!( - "The client type doesn't exist: ID {}", - client_id - )) - })?; - let client_state = ClientReader::client_state(self, client_id) - .map_err(|_| { - Error::InvalidClient(format!( - "The client state doesn't exist: ID {}", - client_id - )) - })?; - let height = client_state.latest_height(); - let consensus_state = - self.consensus_state(client_id, height).map_err(|_| { - Error::InvalidClient(format!( - "The consensus state doesn't exist: ID {}, Height {}", - client_id, height - )) - })?; - if client_type != client_state.client_type() - || client_type != consensus_state.client_type() - { - return Err(Error::InvalidClient( - "The client type is mismatched".to_owned(), - )); - } - if self.get_client_update_time_change(client_id)? - != StateChange::Created - { - return Err(Error::InvalidClient(format!( - "The client update time or height are invalid: ID {}", - client_id, - ))); - } - - let event = make_create_client_event(client_id, &msg); - self.check_emitted_event(event) - .map_err(|e| Error::IbcEvent(e.to_string())) - } - - fn validate_updated_client( - &self, - client_id: &ClientId, - tx_data: &[u8], - ) -> Result<()> { - if self.get_client_update_time_change(client_id)? - != StateChange::Updated - { - return Err(Error::InvalidClient(format!( - "The client update time and height are invalid: ID {}", - client_id, - ))); - } - // check the type of data in tx_data - let ibc_msg = IbcMessage::decode(tx_data)?; - match ibc_msg.0 { - Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(msg)) => { - self.verify_update_client(client_id, msg) - } - Ics26Envelope::Ics2Msg(ClientMsg::UpgradeClient(msg)) => { - self.verify_upgrade_client(client_id, msg) - } - _ => Err(Error::InvalidStateChange(format!( - "The state change of the client is invalid: ID {}", - client_id - ))), - } - } - - fn verify_update_client( - &self, - client_id: &ClientId, - msg: MsgUpdateAnyClient, - ) -> Result<()> { - if msg.client_id != *client_id { - return Err(Error::InvalidClient(format!( - "The client ID is mismatched: {} in the tx data, {} in the key", - msg.client_id, client_id, - ))); - } - - // check the posterior states - let client_state = ClientReader::client_state(self, client_id) - .map_err(|_| { - Error::InvalidClient(format!( - "The client state doesn't exist: ID {}", - client_id - )) - })?; - let height = client_state.latest_height(); - let consensus_state = - self.consensus_state(client_id, height).map_err(|_| { - Error::InvalidClient(format!( - "The consensus state doesn't exist: ID {}, Height {}", - client_id, height - )) - })?; - // check the prior states - let prev_client_state = self.client_state_pre(client_id)?; - - let client = AnyClient::from_client_type(client_state.client_type()); - let (new_client_state, new_consensus_state) = client - .check_header_and_update_state( - self, - client_id.clone(), - prev_client_state, - msg.header.clone(), - ) - .map_err(|e| { - Error::InvalidHeader(format!( - "The header is invalid: ID {}, {}", - client_id, e, - )) - })?; - if new_client_state != client_state - || new_consensus_state != consensus_state - { - return Err(Error::InvalidClient( - "The updated client state or consensus state is unexpected" - .to_owned(), - )); - } - - let event = make_update_client_event(client_id, &msg); - self.check_emitted_event(event) - .map_err(|e| Error::IbcEvent(e.to_string())) - } - - fn verify_upgrade_client( - &self, - client_id: &ClientId, - msg: MsgUpgradeAnyClient, - ) -> Result<()> { - if msg.client_id != *client_id { - return Err(Error::InvalidClient(format!( - "The client ID is mismatched: {} in the tx data, {} in the key", - msg.client_id, client_id, - ))); - } - - // check the posterior states - let client_state_post = ClientReader::client_state(self, client_id) - .map_err(|_| { - Error::InvalidClient(format!( - "The client state doesn't exist: ID {}", - client_id - )) - })?; - let height = client_state_post.latest_height(); - let consensus_state_post = - self.consensus_state(client_id, height).map_err(|_| { - Error::InvalidClient(format!( - "The consensus state doesn't exist: ID {}, Height {}", - client_id, height - )) - })?; - - // verify the given states - let client_type = self.client_type(client_id).map_err(|_| { - Error::InvalidClient(format!( - "The client type doesn't exist: ID {}", - client_id - )) - })?; - let client = AnyClient::from_client_type(client_type); - match client.verify_upgrade_and_update_state( - &msg.client_state, - &msg.consensus_state, - msg.proof_upgrade_client.clone(), - msg.proof_upgrade_consensus_state.clone(), - ) { - Ok((new_client_state, new_consensus_state)) => { - if new_client_state != client_state_post - || new_consensus_state != consensus_state_post - { - return Err(Error::InvalidClient( - "The updated client state or consensus state is \ - unexpected" - .to_owned(), - )); - } - } - Err(e) => { - return Err(Error::ProofVerificationFailure(e.to_string())); - } - } - - let event = make_upgrade_client_event(client_id, &msg); - self.check_emitted_event(event) - .map_err(|e| Error::IbcEvent(e.to_string())) - } - - fn client_state_pre(&self, client_id: &ClientId) -> Result { - let key = client_state_key(client_id); - match self.ctx.read_bytes_pre(&key) { - Ok(Some(value)) => { - AnyClientState::decode_vec(&value).map_err(|e| { - Error::InvalidClient(format!( - "Decoding the client state failed: ID {}, {}", - client_id, e - )) - }) - } - _ => Err(Error::InvalidClient(format!( - "The prior client state doesn't exist: ID {}", - client_id - ))), - } - } - - pub(super) fn client_counter_pre(&self) -> Result { - let key = client_counter_key(); - self.read_counter_pre(&key) - .map_err(|e| Error::InvalidClient(e.to_string())) - } -} - -/// Load the posterior client state -impl<'a, DB, H, CA> ClientReader for Ibc<'a, DB, H, CA> -where - DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, - H: 'static + StorageHasher, - CA: 'static + WasmCacheAccess, -{ - fn client_type(&self, client_id: &ClientId) -> Ics02Result { - let key = client_type_key(client_id); - match self.ctx.read_bytes_post(&key) { - Ok(Some(value)) => { - let type_str = std::str::from_utf8(&value) - .map_err(|_| Ics02Error::implementation_specific())?; - ClientType::from_str(type_str) - .map_err(|_| Ics02Error::implementation_specific()) - } - Ok(None) => Err(Ics02Error::client_not_found(client_id.clone())), - Err(_) => Err(Ics02Error::implementation_specific()), - } - } - - fn client_state( - &self, - client_id: &ClientId, - ) -> Ics02Result { - let key = client_state_key(client_id); - match self.ctx.read_bytes_post(&key) { - Ok(Some(value)) => AnyClientState::decode_vec(&value) - .map_err(|_| Ics02Error::implementation_specific()), - Ok(None) => Err(Ics02Error::client_not_found(client_id.clone())), - Err(_) => Err(Ics02Error::implementation_specific()), - } - } - - fn consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Ics02Result { - let key = consensus_state_key(client_id, height); - match self.ctx.read_bytes_post(&key) { - Ok(Some(value)) => AnyConsensusState::decode_vec(&value) - .map_err(|_| Ics02Error::implementation_specific()), - Ok(None) => Err(Ics02Error::consensus_state_not_found( - client_id.clone(), - height, - )), - Err(_) => Err(Ics02Error::implementation_specific()), - } - } - - // Reimplement to avoid reading the posterior state - fn maybe_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Ics02Result> { - let key = consensus_state_key(client_id, height); - match self.ctx.read_bytes_pre(&key) { - Ok(Some(value)) => { - let cs = AnyConsensusState::decode_vec(&value) - .map_err(|_| Ics02Error::implementation_specific())?; - Ok(Some(cs)) - } - Ok(None) => Ok(None), - Err(_) => Err(Ics02Error::implementation_specific()), - } - } - - /// Search for the lowest consensus state higher than `height`. - fn next_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Ics02Result> { - let prefix = consensus_state_prefix(client_id); - let pre = self.ctx.pre(); - let mut iter = pre - .iter_prefix(&prefix) - .map_err(|_| Ics02Error::implementation_specific())?; - let mut lowest_height_value = None; - while let Some((key, value)) = pre - .iter_next(&mut iter) - .map_err(|_| Ics02Error::implementation_specific())? - { - let key = Key::parse(key) - .map_err(|_| Ics02Error::implementation_specific())?; - let consensus_height = consensus_height(&key) - .map_err(|_| Ics02Error::implementation_specific())?; - if consensus_height > height { - lowest_height_value = match lowest_height_value { - Some((lowest, _)) if consensus_height < lowest => { - Some((consensus_height, value)) - } - Some(_) => continue, - None => Some((consensus_height, value)), - }; - } - } - match lowest_height_value { - Some((_, value)) => { - let cs = AnyConsensusState::decode_vec(&value) - .map_err(|_| Ics02Error::implementation_specific())?; - Ok(Some(cs)) - } - None => Ok(None), - } - } - - /// Search for the highest consensus state lower than `height`. - fn prev_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Ics02Result> { - let prefix = consensus_state_prefix(client_id); - let pre = self.ctx.pre(); - let mut iter = pre - .iter_prefix(&prefix) - .map_err(|_| Ics02Error::implementation_specific())?; - let mut highest_height_value = None; - while let Some((key, value)) = pre - .iter_next(&mut iter) - .map_err(|_| Ics02Error::implementation_specific())? - { - let key = Key::parse(key) - .map_err(|_| Ics02Error::implementation_specific())?; - let consensus_height = consensus_height(&key) - .map_err(|_| Ics02Error::implementation_specific())?; - if consensus_height < height { - highest_height_value = match highest_height_value { - Some((highest, _)) if consensus_height > highest => { - Some((consensus_height, value)) - } - Some(_) => continue, - None => Some((consensus_height, value)), - }; - } - } - match highest_height_value { - Some((_, value)) => { - let cs = AnyConsensusState::decode_vec(&value) - .map_err(|_| Ics02Error::implementation_specific())?; - Ok(Some(cs)) - } - None => Ok(None), - } - } - - fn host_height(&self) -> Height { - let height = self.ctx.storage.get_block_height().0.0; - // the revision number is always 0 - Height::new(0, height) - } - - fn host_consensus_state( - &self, - height: Height, - ) -> Ics02Result { - let (header, gas) = self - .ctx - .storage - .get_block_header(Some(BlockHeight(height.revision_height))) - .map_err(|_| Ics02Error::implementation_specific())?; - self.ctx - .gas_meter - .borrow_mut() - .add(gas) - .map_err(|_| Ics02Error::implementation_specific())?; - match header { - Some(h) => Ok(TmConsensusState { - root: CommitmentRoot::from_bytes(h.hash.as_slice()), - timestamp: h.time.try_into().unwrap(), - next_validators_hash: h.next_validators_hash.into(), - } - .wrap_any()), - None => Err(Ics02Error::missing_raw_header()), - } - } - - fn pending_host_consensus_state(&self) -> Ics02Result { - let (block_height, gas) = self.ctx.storage.get_block_height(); - self.ctx - .gas_meter - .borrow_mut() - .add(gas) - .map_err(|_| Ics02Error::implementation_specific())?; - let height = Height::new(0, block_height.0); - ClientReader::host_consensus_state(self, height) - } - - fn client_counter(&self) -> Ics02Result { - let key = client_counter_key(); - self.read_counter(&key) - .map_err(|_| Ics02Error::implementation_specific()) - } -} - -impl From for Error { - fn from(err: IbcDataError) -> Self { - Self::InvalidIbcData(err) - } -} - -impl From for Error { - fn from(err: std::io::Error) -> Self { - Self::DecodingTxData(err) - } -} diff --git a/shared/src/ledger/ibc/vp/connection.rs b/shared/src/ledger/ibc/vp/connection.rs deleted file mode 100644 index 5f8df9a8a87..00000000000 --- a/shared/src/ledger/ibc/vp/connection.rs +++ /dev/null @@ -1,430 +0,0 @@ -//! IBC validity predicate for connection module - -use namada_core::ledger::ibc::actions::{ - commitment_prefix, make_open_ack_connection_event, - make_open_confirm_connection_event, make_open_init_connection_event, - make_open_try_connection_event, -}; -use thiserror::Error; - -use super::super::storage::{ - connection_counter_key, connection_id, connection_key, - is_connection_counter_key, Error as IbcStorageError, -}; -use super::{Ibc, StateChange}; -use crate::ibc::core::ics02_client::client_consensus::AnyConsensusState; -use crate::ibc::core::ics02_client::client_state::AnyClientState; -use crate::ibc::core::ics02_client::context::ClientReader; -use crate::ibc::core::ics02_client::height::Height; -use crate::ibc::core::ics03_connection::connection::{ - ConnectionEnd, Counterparty, State, -}; -use crate::ibc::core::ics03_connection::context::ConnectionReader; -use crate::ibc::core::ics03_connection::error::Error as Ics03Error; -use crate::ibc::core::ics03_connection::handler::verify::verify_proofs; -use crate::ibc::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; -use crate::ibc::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; -use crate::ibc::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; -use crate::ibc::core::ics23_commitment::commitment::CommitmentPrefix; -use crate::ibc::core::ics24_host::identifier::{ClientId, ConnectionId}; -use crate::ledger::native_vp::VpEnv; -use crate::ledger::storage::{self, StorageHasher}; -use crate::tendermint_proto::Protobuf; -use crate::types::ibc::data::{Error as IbcDataError, IbcMessage}; -use crate::types::storage::{BlockHeight, Epoch, Key}; -use crate::vm::WasmCacheAccess; - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum Error { - #[error("State change error: {0}")] - InvalidStateChange(String), - #[error("Client error: {0}")] - InvalidClient(String), - #[error("Connection error: {0}")] - InvalidConnection(String), - #[error("Version error: {0}")] - InvalidVersion(String), - #[error("Proof verification error: {0}")] - ProofVerificationFailure(Ics03Error), - #[error("Decoding TX data error: {0}")] - DecodingTxData(std::io::Error), - #[error("IBC data error: {0}")] - InvalidIbcData(IbcDataError), - #[error("IBC storage error: {0}")] - IbcStorage(IbcStorageError), - #[error("IBC event error: {0}")] - IbcEvent(String), -} - -/// IBC connection functions result -pub type Result = std::result::Result; -/// ConnectionReader result -type Ics03Result = core::result::Result; - -impl<'a, DB, H, CA> Ibc<'a, DB, H, CA> -where - DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, - H: 'static + StorageHasher, - CA: 'static + WasmCacheAccess, -{ - pub(super) fn validate_connection( - &self, - key: &Key, - tx_data: &[u8], - ) -> Result<()> { - if is_connection_counter_key(key) { - // the counter should be increased - let counter = self.connection_counter().map_err(|e| { - Error::InvalidConnection(format!( - "The connection counter doesn't exist: {}", - e - )) - })?; - if self.connection_counter_pre()? < counter { - return Ok(()); - } else { - return Err(Error::InvalidConnection( - "The connection counter is invalid".to_owned(), - )); - } - } - - let conn_id = connection_id(key)?; - let conn = self.connection_end(&conn_id).map_err(|_| { - Error::InvalidConnection(format!( - "The connection doesn't exist: ID {}", - conn_id - )) - })?; - - match self.get_connection_state_change(&conn_id)? { - StateChange::Created => { - self.validate_created_connection(&conn_id, conn, tx_data) - } - StateChange::Updated => { - self.validate_updated_connection(&conn_id, conn, tx_data) - } - _ => Err(Error::InvalidStateChange(format!( - "The state change of the connection is invalid: ID {}", - conn_id - ))), - } - } - - fn get_connection_state_change( - &self, - conn_id: &ConnectionId, - ) -> Result { - let key = connection_key(conn_id); - self.get_state_change(&key) - .map_err(|e| Error::InvalidStateChange(e.to_string())) - } - - fn validate_created_connection( - &self, - conn_id: &ConnectionId, - conn: ConnectionEnd, - tx_data: &[u8], - ) -> Result<()> { - match conn.state() { - State::Init => { - let client_id = conn.client_id(); - ConnectionReader::client_state(self, client_id).map_err( - |_| { - Error::InvalidClient(format!( - "The client state for the connection doesn't \ - exist: ID {}", - conn_id, - )) - }, - )?; - let ibc_msg = IbcMessage::decode(tx_data)?; - let msg = ibc_msg.msg_connection_open_init()?; - let event = make_open_init_connection_event(conn_id, &msg); - self.check_emitted_event(event) - .map_err(|e| Error::IbcEvent(e.to_string())) - } - State::TryOpen => { - let ibc_msg = IbcMessage::decode(tx_data)?; - let msg = ibc_msg.msg_connection_open_try()?; - self.verify_connection_try_proof(conn, &msg)?; - let event = make_open_try_connection_event(conn_id, &msg); - self.check_emitted_event(event) - .map_err(|e| Error::IbcEvent(e.to_string())) - } - _ => Err(Error::InvalidConnection(format!( - "The connection state is invalid: ID {}", - conn_id - ))), - } - } - - fn validate_updated_connection( - &self, - conn_id: &ConnectionId, - conn: ConnectionEnd, - tx_data: &[u8], - ) -> Result<()> { - match conn.state() { - State::Open => { - let prev_conn = self.connection_end_pre(conn_id)?; - match prev_conn.state() { - State::Init => { - let ibc_msg = IbcMessage::decode(tx_data)?; - let msg = ibc_msg.msg_connection_open_ack()?; - self.verify_connection_ack_proof(conn_id, conn, &msg)?; - let event = make_open_ack_connection_event(&msg); - self.check_emitted_event(event) - .map_err(|e| Error::IbcEvent(e.to_string())) - } - State::TryOpen => { - let ibc_msg = IbcMessage::decode(tx_data)?; - let msg = ibc_msg.msg_connection_open_confirm()?; - self.verify_connection_confirm_proof( - conn_id, conn, &msg, - )?; - let event = make_open_confirm_connection_event(&msg); - self.check_emitted_event(event) - .map_err(|e| Error::IbcEvent(e.to_string())) - } - _ => Err(Error::InvalidStateChange(format!( - "The state change of connection is invalid: ID {}", - conn_id - ))), - } - } - _ => Err(Error::InvalidConnection(format!( - "The state of the connection is invalid: ID {}", - conn_id - ))), - } - } - - fn verify_connection_try_proof( - &self, - conn: ConnectionEnd, - msg: &MsgConnectionOpenTry, - ) -> Result<()> { - let client_id = conn.client_id().clone(); - let counterpart_client_id = conn.counterparty().client_id().clone(); - // expected connection end - let expected_conn = ConnectionEnd::new( - State::Init, - counterpart_client_id, - Counterparty::new(client_id, None, self.commitment_prefix()), - conn.versions().to_vec(), - conn.delay_period(), - ); - - match verify_proofs( - self, - msg.client_state.clone(), - msg.proofs.height(), - &conn, - &expected_conn, - &msg.proofs, - ) { - Ok(_) => Ok(()), - Err(e) => Err(Error::ProofVerificationFailure(e)), - } - } - - fn verify_connection_ack_proof( - &self, - conn_id: &ConnectionId, - conn: ConnectionEnd, - msg: &MsgConnectionOpenAck, - ) -> Result<()> { - // version check - if !conn.versions().contains(&msg.version) { - return Err(Error::InvalidVersion( - "The version is unsupported".to_owned(), - )); - } - - // counterpart connection ID check - match conn.counterparty().connection_id() { - Some(counterpart_conn_id) => { - if *counterpart_conn_id != msg.counterparty_connection_id { - return Err(Error::InvalidConnection(format!( - "The counterpart connection ID mismatched: ID {}", - counterpart_conn_id - ))); - } - } - None => { - return Err(Error::InvalidConnection(format!( - "The connection doesn't have the counterpart connection \ - ID: ID {}", - conn_id - ))); - } - } - - // expected counterpart connection - let expected_conn = ConnectionEnd::new( - State::TryOpen, - conn.counterparty().client_id().clone(), - Counterparty::new( - conn.client_id().clone(), - Some(conn_id.clone()), - self.commitment_prefix(), - ), - conn.versions().to_vec(), - conn.delay_period(), - ); - - match verify_proofs( - self, - msg.client_state.clone(), - msg.proofs.height(), - &conn, - &expected_conn, - &msg.proofs, - ) { - Ok(_) => Ok(()), - Err(e) => Err(Error::ProofVerificationFailure(e)), - } - } - - fn verify_connection_confirm_proof( - &self, - conn_id: &ConnectionId, - conn: ConnectionEnd, - msg: &MsgConnectionOpenConfirm, - ) -> Result<()> { - // expected counterpart connection - let expected_conn = ConnectionEnd::new( - State::Open, - conn.counterparty().client_id().clone(), - Counterparty::new( - conn.client_id().clone(), - Some(conn_id.clone()), - self.commitment_prefix(), - ), - conn.versions().to_vec(), - conn.delay_period(), - ); - - match verify_proofs( - self, - None, - msg.proofs.height(), - &conn, - &expected_conn, - &msg.proofs, - ) { - Ok(_) => Ok(()), - Err(e) => Err(Error::ProofVerificationFailure(e)), - } - } - - fn connection_end_pre( - &self, - conn_id: &ConnectionId, - ) -> Result { - let key = connection_key(conn_id); - match self.ctx.read_bytes_pre(&key) { - Ok(Some(value)) => ConnectionEnd::decode_vec(&value).map_err(|e| { - Error::InvalidConnection(format!( - "Decoding the connection failed: {}", - e - )) - }), - _ => Err(Error::InvalidConnection(format!( - "Unable to get the previous connection: ID {}", - conn_id - ))), - } - } - - fn connection_counter_pre(&self) -> Result { - let key = connection_counter_key(); - self.read_counter_pre(&key) - .map_err(|e| Error::InvalidConnection(e.to_string())) - } -} - -impl<'a, DB, H, CA> ConnectionReader for Ibc<'a, DB, H, CA> -where - DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, - H: 'static + StorageHasher, - CA: 'static + WasmCacheAccess, -{ - fn connection_end( - &self, - conn_id: &ConnectionId, - ) -> Ics03Result { - let key = connection_key(conn_id); - match self.ctx.read_bytes_post(&key) { - Ok(Some(value)) => ConnectionEnd::decode_vec(&value) - .map_err(|_| Ics03Error::implementation_specific()), - Ok(None) => Err(Ics03Error::connection_not_found(conn_id.clone())), - Err(_) => Err(Ics03Error::implementation_specific()), - } - } - - fn client_state( - &self, - client_id: &ClientId, - ) -> Ics03Result { - ClientReader::client_state(self, client_id) - .map_err(Ics03Error::ics02_client) - } - - fn host_current_height(&self) -> Height { - self.host_height() - } - - fn host_oldest_height(&self) -> Height { - let epoch = Epoch::default().0; - let height = BlockHeight::default().0; - Height::new(epoch, height) - } - - fn commitment_prefix(&self) -> CommitmentPrefix { - commitment_prefix() - } - - fn client_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Ics03Result { - self.consensus_state(client_id, height) - .map_err(Ics03Error::ics02_client) - } - - fn host_consensus_state( - &self, - height: Height, - ) -> Ics03Result { - ClientReader::host_consensus_state(self, height) - .map_err(Ics03Error::ics02_client) - } - - fn connection_counter(&self) -> Ics03Result { - let key = connection_counter_key(); - self.read_counter(&key) - .map_err(|_| Ics03Error::implementation_specific()) - } -} - -impl From for Error { - fn from(err: IbcStorageError) -> Self { - Self::IbcStorage(err) - } -} - -impl From for Error { - fn from(err: IbcDataError) -> Self { - Self::InvalidIbcData(err) - } -} - -impl From for Error { - fn from(err: std::io::Error) -> Self { - Self::DecodingTxData(err) - } -} diff --git a/shared/src/ledger/ibc/vp/context.rs b/shared/src/ledger/ibc/vp/context.rs new file mode 100644 index 00000000000..c8e74979bdd --- /dev/null +++ b/shared/src/ledger/ibc/vp/context.rs @@ -0,0 +1,289 @@ +//! Contexts for IBC validity predicate + +use std::collections::{BTreeSet, HashMap, HashSet}; + +use borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::ledger::ibc::storage::is_ibc_key; +use namada_core::ledger::ibc::{IbcCommonContext, IbcStorageContext}; +use namada_core::ledger::storage::write_log::StorageModification; +use namada_core::ledger::storage::{self as ledger_storage, StorageHasher}; +use namada_core::ledger::storage_api::StorageRead; +use namada_core::types::address::{Address, InternalAddress}; +use namada_core::types::ibc::IbcEvent; +use namada_core::types::storage::{BlockHeight, Header, Key}; +use namada_core::types::token::{is_any_token_balance_key, Amount}; + +use super::Error; +use crate::ledger::native_vp::CtxPreStorageRead; +use crate::vm::WasmCacheAccess; + +#[derive(Debug)] +pub struct PseudoExecutionContext<'view, 'a, DB, H, CA> +where + DB: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: 'static + StorageHasher, + CA: 'static + WasmCacheAccess, +{ + /// Temporary store for pseudo execution + store: HashMap, + /// Context to read the previous value + ctx: CtxPreStorageRead<'view, 'a, DB, H, CA>, + /// IBC event + pub event: BTreeSet, +} + +impl<'view, 'a, DB, H, CA> PseudoExecutionContext<'view, 'a, DB, H, CA> +where + DB: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: 'static + StorageHasher, + CA: 'static + WasmCacheAccess, +{ + pub fn new(ctx: CtxPreStorageRead<'view, 'a, DB, H, CA>) -> Self { + Self { + store: HashMap::new(), + ctx, + event: BTreeSet::new(), + } + } + + pub fn get_changed_keys(&self) -> HashSet<&Key> { + self.store.keys().filter(|k| is_ibc_key(k)).collect() + } + + pub fn get_changed_value(&self, key: &Key) -> Option<&StorageModification> { + self.store.get(key) + } +} + +impl<'view, 'a, DB, H, CA> IbcStorageContext + for PseudoExecutionContext<'view, 'a, DB, H, CA> +where + DB: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: 'static + StorageHasher, + CA: 'static + WasmCacheAccess, +{ + type Error = Error; + type PrefixIter<'iter> = ledger_storage::PrefixIter<'iter, DB> where Self: 'iter; + + fn read(&self, key: &Key) -> Result>, Self::Error> { + match self.store.get(key) { + Some(StorageModification::Write { ref value }) => { + Ok(Some(value.clone())) + } + Some(StorageModification::Delete) => Ok(None), + Some(StorageModification::Temp { .. }) => { + unreachable!("Temp shouldn't be inserted") + } + Some(StorageModification::InitAccount { .. }) => { + unreachable!("InitAccount shouldn't be inserted") + } + None => self.ctx.read_bytes(key).map_err(Error::NativeVpError), + } + } + + fn iter_prefix<'iter>( + &'iter self, + prefix: &Key, + ) -> Result, Self::Error> { + // NOTE: Read only the previous state since the updated state isn't + // needed for the caller + self.ctx.iter_prefix(prefix).map_err(Error::NativeVpError) + } + + fn iter_next<'iter>( + &'iter self, + iter: &mut Self::PrefixIter<'iter>, + ) -> Result)>, Self::Error> { + self.ctx.iter_next(iter).map_err(Error::NativeVpError) + } + + fn write(&mut self, key: &Key, value: Vec) -> Result<(), Self::Error> { + self.store + .insert(key.clone(), StorageModification::Write { value }); + Ok(()) + } + + fn delete(&mut self, key: &Key) -> Result<(), Self::Error> { + self.store.insert(key.clone(), StorageModification::Delete); + Ok(()) + } + + fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<(), Self::Error> { + self.event.insert(event); + Ok(()) + } + + fn transfer_token( + &mut self, + src: &Key, + dest: &Key, + amount: Amount, + ) -> Result<(), Self::Error> { + let src_owner = is_any_token_balance_key(src); + let mut src_bal = match src_owner { + Some(Address::Internal(InternalAddress::IbcMint)) => Amount::max(), + Some(Address::Internal(InternalAddress::IbcBurn)) => { + unreachable!("Invalid transfer from IBC burn address") + } + _ => match self.read(src)? { + Some(v) => { + Amount::try_from_slice(&v[..]).map_err(Error::Decoding)? + } + None => unreachable!("The source has no balance"), + }, + }; + src_bal.spend(&amount); + let dest_owner = is_any_token_balance_key(dest); + let mut dest_bal = match dest_owner { + Some(Address::Internal(InternalAddress::IbcMint)) => { + unreachable!("Invalid transfer to IBC mint address") + } + _ => match self.read(dest)? { + Some(v) => { + Amount::try_from_slice(&v[..]).map_err(Error::Decoding)? + } + None => Amount::default(), + }, + }; + dest_bal.receive(&amount); + + self.write( + src, + src_bal.try_to_vec().expect("encoding shouldn't failed"), + )?; + self.write( + dest, + dest_bal.try_to_vec().expect("encoding shouldn't failed"), + )?; + + Ok(()) + } + + /// Get the current height of this chain + fn get_height(&self) -> Result { + self.ctx.get_block_height().map_err(Error::NativeVpError) + } + + /// Get the block header of this chain + fn get_header( + &self, + height: BlockHeight, + ) -> Result, Self::Error> { + self.ctx + .get_block_header(height) + .map_err(Error::NativeVpError) + } + + fn log_string(&self, message: String) { + tracing::debug!("{} in the pseudo execution for IBC VP", message); + } +} + +impl<'view, 'a, DB, H, CA> IbcCommonContext + for PseudoExecutionContext<'view, 'a, DB, H, CA> +where + DB: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: 'static + StorageHasher, + CA: 'static + WasmCacheAccess, +{ +} + +#[derive(Debug)] +pub struct VpValidationContext<'view, 'a, DB, H, CA> +where + DB: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: 'static + StorageHasher, + CA: 'static + WasmCacheAccess, +{ + /// Context to read the post value + ctx: CtxPreStorageRead<'view, 'a, DB, H, CA>, +} + +impl<'view, 'a, DB, H, CA> VpValidationContext<'view, 'a, DB, H, CA> +where + DB: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: 'static + StorageHasher, + CA: 'static + WasmCacheAccess, +{ + pub fn new(ctx: CtxPreStorageRead<'view, 'a, DB, H, CA>) -> Self { + Self { ctx } + } +} + +impl<'view, 'a, DB, H, CA> IbcStorageContext + for VpValidationContext<'view, 'a, DB, H, CA> +where + DB: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: 'static + StorageHasher, + CA: 'static + WasmCacheAccess, +{ + type Error = Error; + type PrefixIter<'iter> = ledger_storage::PrefixIter<'iter, DB> where Self: 'iter; + + fn read(&self, key: &Key) -> Result>, Self::Error> { + self.ctx.read_bytes(key).map_err(Error::NativeVpError) + } + + fn iter_prefix<'iter>( + &'iter self, + prefix: &Key, + ) -> Result, Self::Error> { + self.ctx.iter_prefix(prefix).map_err(Error::NativeVpError) + } + + fn iter_next<'iter>( + &'iter self, + iter: &mut Self::PrefixIter<'iter>, + ) -> Result)>, Self::Error> { + self.ctx.iter_next(iter).map_err(Error::NativeVpError) + } + + fn write(&mut self, _key: &Key, _data: Vec) -> Result<(), Self::Error> { + unimplemented!("Validation doesn't write any data") + } + + fn delete(&mut self, _key: &Key) -> Result<(), Self::Error> { + unimplemented!("Validation doesn't delete any data") + } + + fn emit_ibc_event(&mut self, _event: IbcEvent) -> Result<(), Self::Error> { + unimplemented!("Validation doesn't emit an event") + } + + /// Transfer token + fn transfer_token( + &mut self, + _src: &Key, + _dest: &Key, + _amount: Amount, + ) -> Result<(), Self::Error> { + unimplemented!("Validation doesn't transfer") + } + + fn get_height(&self) -> Result { + self.ctx.get_block_height().map_err(Error::NativeVpError) + } + + fn get_header( + &self, + height: BlockHeight, + ) -> Result, Self::Error> { + self.ctx + .get_block_header(height) + .map_err(Error::NativeVpError) + } + + /// Logging + fn log_string(&self, message: String) { + tracing::debug!("{} for validation in IBC VP", message); + } +} + +impl<'view, 'a, DB, H, CA> IbcCommonContext + for VpValidationContext<'view, 'a, DB, H, CA> +where + DB: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: 'static + StorageHasher, + CA: 'static + WasmCacheAccess, +{ +} diff --git a/shared/src/ledger/ibc/vp/denom.rs b/shared/src/ledger/ibc/vp/denom.rs index 18923f00506..d58edfdc336 100644 --- a/shared/src/ledger/ibc/vp/denom.rs +++ b/shared/src/ledger/ibc/vp/denom.rs @@ -1,26 +1,28 @@ //! IBC validity predicate for denom +use prost::Message; use thiserror::Error; use super::Ibc; +use crate::ibc::applications::transfer::packet::PacketData; +use crate::ibc::core::ics04_channel::msgs::PacketMsg; +use crate::ibc::core::ics26_routing::msgs::MsgEnvelope; +use crate::ibc_proto::google::protobuf::Any; use crate::ledger::ibc::storage; use crate::ledger::native_vp::VpEnv; use crate::ledger::storage::{self as ledger_storage, StorageHasher}; -use crate::types::ibc::data::{ - Error as IbcDataError, FungibleTokenPacketData, IbcMessage, -}; use crate::types::storage::KeySeg; use crate::vm::WasmCacheAccess; #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { - #[error("Decoding TX data error: {0}")] - DecodingTxData(std::io::Error), - #[error("IBC data error: {0}")] - InvalidIbcData(IbcDataError), - #[error("Invalid packet data: {0}")] - PacketData(String), + #[error("Decoding IBC data error: {0}")] + DecodingData(prost::DecodeError), + #[error("Invalid message: {0}")] + IbcMessage(String), + #[error("Decoding PacketData error: {0}")] + DecodingPacketData(serde_json::Error), #[error("Denom error: {0}")] Denom(String), } @@ -35,54 +37,49 @@ where CA: 'static + WasmCacheAccess, { pub(super) fn validate_denom(&self, tx_data: &[u8]) -> Result<()> { - let ibc_msg = IbcMessage::decode(tx_data)?; - let msg = ibc_msg.msg_recv_packet()?; - match serde_json::from_slice::( - &msg.packet.data, - ) { - Ok(data) => { - let denom = format!( - "{}/{}/{}", - &msg.packet.destination_port, - &msg.packet.destination_channel, - &data.denom - ); - let token_hash = storage::calc_hash(&denom); - let denom_key = storage::ibc_denom_key(token_hash.raw()); - match self.ctx.read_bytes_post(&denom_key) { - Ok(Some(v)) => match std::str::from_utf8(&v) { - Ok(d) if d == denom => Ok(()), - Ok(d) => Err(Error::Denom(format!( - "Mismatch the denom: original {}, denom {}", - denom, d - ))), - Err(e) => Err(Error::Denom(format!( - "Decoding the denom failed: key {}, error {}", - denom_key, e - ))), - }, - _ => Err(Error::Denom(format!( - "Looking up the denom failed: Key {}", - denom_key - ))), - } - } - Err(e) => Err(Error::PacketData(format!( - "unknown packet data: error {}", + let ibc_msg = Any::decode(tx_data).map_err(Error::DecodingData)?; + let envelope: MsgEnvelope = ibc_msg.try_into().map_err(|e| { + Error::IbcMessage(format!( + "Decoding a MsgRecvPacket failed: Error {}", e + )) + })?; + // A transaction only with MsgRecvPacket can update the denom store + let msg = match envelope { + MsgEnvelope::Packet(PacketMsg::Recv(msg)) => msg, + _ => { + return Err(Error::IbcMessage( + "Non-MsgRecvPacket message updated the denom store" + .to_string(), + )); + } + }; + let data = serde_json::from_slice::(&msg.packet.data) + .map_err(Error::DecodingPacketData)?; + let denom = format!( + "{}/{}/{}", + &msg.packet.port_id_on_b, + &msg.packet.chan_id_on_b, + &data.token.denom, + ); + let token_hash = storage::calc_hash(&denom); + let denom_key = storage::ibc_denom_key(token_hash.raw()); + match self.ctx.read_bytes_post(&denom_key) { + Ok(Some(v)) => match std::str::from_utf8(&v) { + Ok(d) if d == denom => Ok(()), + Ok(d) => Err(Error::Denom(format!( + "Mismatch the denom: original {}, denom {}", + denom, d + ))), + Err(e) => Err(Error::Denom(format!( + "Decoding the denom failed: key {}, error {}", + denom_key, e + ))), + }, + _ => Err(Error::Denom(format!( + "Looking up the denom failed: Key {}", + denom_key ))), } } } - -impl From for Error { - fn from(err: IbcDataError) -> Self { - Self::InvalidIbcData(err) - } -} - -impl From for Error { - fn from(err: std::io::Error) -> Self { - Self::DecodingTxData(err) - } -} diff --git a/shared/src/ledger/ibc/vp/mod.rs b/shared/src/ledger/ibc/vp/mod.rs index 108942b548f..dc0a021b6f1 100644 --- a/shared/src/ledger/ibc/vp/mod.rs +++ b/shared/src/ledger/ibc/vp/mod.rs @@ -1,31 +1,31 @@ //! IBC integration as a native validity predicate -mod channel; -mod client; -mod connection; +mod context; mod denom; -mod packet; -mod port; -mod sequence; mod token; +use std::cell::RefCell; use std::collections::{BTreeSet, HashSet}; +use std::rc::Rc; +use std::time::Duration; use borsh::BorshDeserialize; -use namada_core::ledger::ibc::storage::{ - client_id, ibc_prefix, is_client_counter_key, IbcPrefix, +use context::{PseudoExecutionContext, VpValidationContext}; +use namada_core::ledger::ibc::storage::{is_ibc_denom_key, is_ibc_key}; +use namada_core::ledger::ibc::{ + Error as ActionError, IbcActions, TransferModule, ValidationParams, }; +use namada_core::ledger::storage::write_log::StorageModification; use namada_core::ledger::storage::{self as ledger_storage, StorageHasher}; use namada_core::proto::SignedTxData; use namada_core::types::address::{Address, InternalAddress}; -use namada_core::types::ibc::IbcEvent as WrappedIbcEvent; use namada_core::types::storage::Key; +use namada_proof_of_stake::read_pos_params; use thiserror::Error; pub use token::{Error as IbcTokenError, IbcToken}; -use crate::ibc::core::ics02_client::context::ClientReader; -use crate::ibc::events::IbcEvent; use crate::ledger::native_vp::{self, Ctx, NativeVp, VpEnv}; +use crate::ledger::parameters::read_epoch_duration_parameter; use crate::vm::WasmCacheAccess; #[allow(missing_docs)] @@ -33,34 +33,22 @@ use crate::vm::WasmCacheAccess; pub enum Error { #[error("Native VP error: {0}")] NativeVpError(native_vp::Error), - #[error("Key error: {0}")] - KeyError(String), - #[error("Counter error: {0}")] - CounterError(String), - #[error("Client validation error: {0}")] - ClientError(client::Error), - #[error("Connection validation error: {0}")] - ConnectionError(connection::Error), - #[error("Channel validation error: {0}")] - ChannelError(channel::Error), - #[error("Port validation error: {0}")] - PortError(port::Error), - #[error("Packet validation error: {0}")] - PacketError(packet::Error), - #[error("Sequence validation error: {0}")] - SequenceError(sequence::Error), - #[error("Denom validation error: {0}")] - DenomError(denom::Error), - #[error("IBC event error: {0}")] - IbcEvent(String), - #[error("Decoding transaction data error: {0}")] - TxDataDecoding(std::io::Error), + #[error("Decoding error: {0}")] + Decoding(std::io::Error), #[error("IBC message is required as transaction data")] NoTxData, + #[error("IBC action error: {0}")] + IbcAction(ActionError), + #[error("State change error: {0}")] + StateChange(String), + #[error("Denom store error: {0}")] + Denom(denom::Error), + #[error("IBC event error: {0}")] + IbcEvent(String), } /// IBC functions result -pub type Result = std::result::Result; +pub type VpResult = std::result::Result; /// IBC VP pub struct Ibc<'a, DB, H, CA> @@ -88,215 +76,138 @@ where tx_data: &[u8], keys_changed: &BTreeSet, _verifiers: &BTreeSet
, - ) -> Result { - let signed = SignedTxData::try_from_slice(tx_data) - .map_err(Error::TxDataDecoding)?; + ) -> VpResult { + let signed = + SignedTxData::try_from_slice(tx_data).map_err(Error::Decoding)?; let tx_data = &signed.data.ok_or(Error::NoTxData)?; - let mut clients = HashSet::new(); - - for key in keys_changed { - if let Some(ibc_prefix) = ibc_prefix(key) { - match ibc_prefix { - IbcPrefix::Client => { - if is_client_counter_key(key) { - let counter = - self.client_counter().map_err(|_| { - Error::CounterError( - "The client counter doesn't exist" - .to_owned(), - ) - })?; - if self.client_counter_pre()? >= counter { - return Err(Error::CounterError( - "The client counter is invalid".to_owned(), - )); - } - } else { - let client_id = client_id(key) - .map_err(|e| Error::KeyError(e.to_string()))?; - if !clients.insert(client_id.clone()) { - // this client has been checked - continue; - } - self.validate_client(&client_id, tx_data)? - } - } - IbcPrefix::Connection => { - self.validate_connection(key, tx_data)? - } - IbcPrefix::Channel => { - self.validate_channel(key, tx_data)? - } - IbcPrefix::Port => self.validate_port(key)?, - IbcPrefix::Capability => self.validate_capability(key)?, - IbcPrefix::SeqSend => { - self.validate_sequence_send(key, tx_data)? - } - IbcPrefix::SeqRecv => { - self.validate_sequence_recv(key, tx_data)? - } - IbcPrefix::SeqAck => { - self.validate_sequence_ack(key, tx_data)? - } - IbcPrefix::Commitment => { - self.validate_commitment(key, tx_data)? - } - IbcPrefix::Receipt => { - self.validate_receipt(key, tx_data)? - } - IbcPrefix::Ack => self.validate_ack(key)?, - IbcPrefix::Event => {} - IbcPrefix::Denom => self.validate_denom(tx_data)?, - IbcPrefix::Unknown => { - return Err(Error::KeyError(format!( - "Invalid IBC-related key: {}", - key - ))); - } - }; - } + + // Pseudo execution and compare them + self.validate_state(tx_data, keys_changed)?; + + // Validate the state according to the given IBC message + self.validate_with_msg(tx_data)?; + + // Validate the denom store if a denom key has been changed + if keys_changed.iter().any(is_ibc_denom_key) { + self.validate_denom(tx_data).map_err(Error::Denom)?; } Ok(true) } } -#[derive(Debug, PartialEq, Eq)] -enum StateChange { - Created, - Updated, - Deleted, - NotExists, -} - impl<'a, DB, H, CA> Ibc<'a, DB, H, CA> where DB: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, H: 'static + StorageHasher, CA: 'static + WasmCacheAccess, { - fn get_state_change(&self, key: &Key) -> Result { - if self.ctx.has_key_pre(key)? { - if self.ctx.has_key_post(key)? { - Ok(StateChange::Updated) - } else { - Ok(StateChange::Deleted) - } - } else if self.ctx.has_key_post(key)? { - Ok(StateChange::Created) - } else { - Ok(StateChange::NotExists) - } - } - - fn read_counter_pre(&self, key: &Key) -> Result { - match self.ctx.read_bytes_pre(key) { - Ok(Some(value)) => { - // As ibc-go, u64 like a counter is encoded with big-endian - let counter: [u8; 8] = value.try_into().map_err(|_| { - Error::CounterError( - "Encoding the counter failed".to_string(), - ) - })?; - Ok(u64::from_be_bytes(counter)) - } - Ok(None) => { - Err(Error::CounterError("The counter doesn't exist".to_owned())) - } - Err(e) => Err(Error::CounterError(format!( - "Reading the counter failed: {}", - e - ))), + fn validate_state( + &self, + tx_data: &[u8], + keys_changed: &BTreeSet, + ) -> VpResult<()> { + let exec_ctx = PseudoExecutionContext::new(self.ctx.pre()); + let ctx = Rc::new(RefCell::new(exec_ctx)); + + let mut actions = IbcActions::new(ctx.clone()); + let module = TransferModule::new(ctx.clone()); + actions.add_transfer_route(module.module_id(), module); + actions.execute(tx_data)?; + + let changed_ibc_keys: HashSet<&Key> = + keys_changed.iter().filter(|k| is_ibc_key(k)).collect(); + if changed_ibc_keys.len() != ctx.borrow().get_changed_keys().len() { + return Err(Error::StateChange(format!( + "The changed keys mismatched: Actual {:?}, Expected {:?}", + changed_ibc_keys, + ctx.borrow().get_changed_keys(), + ))); } - } - fn read_counter(&self, key: &Key) -> Result { - match self.ctx.read_bytes_post(key) { - Ok(Some(value)) => { - // As ibc-go, u64 like a counter is encoded with big-endian - let counter: [u8; 8] = value.try_into().map_err(|_| { - Error::CounterError( - "Encoding the counter failed".to_string(), - ) - })?; - Ok(u64::from_be_bytes(counter)) - } - Ok(None) => { - Err(Error::CounterError("The counter doesn't exist".to_owned())) - } - Err(e) => Err(Error::CounterError(format!( - "Reading the counter failed: {}", - e - ))), + for key in changed_ibc_keys { + let actual = self + .ctx + .read_bytes_post(key) + .map_err(Error::NativeVpError)?; + match_value(key, actual, ctx.borrow().get_changed_value(key))?; } - } - fn check_emitted_event(&self, expected_event: IbcEvent) -> Result<()> { - match self.ctx.write_log.get_ibc_event() { - Some(event) => { - let expected = WrappedIbcEvent::try_from(expected_event) - .map_err(|e| Error::IbcEvent(e.to_string()))?; - if *event == expected { - Ok(()) - } else { - Err(Error::IbcEvent(format!( - "The IBC event is invalid: Event {}", - event - ))) - } - } - None => { - Err(Error::IbcEvent("No event has been emitted".to_owned())) - } + // check the event + let actual = self.ctx.write_log.get_ibc_events(); + if *actual != ctx.borrow().event { + return Err(Error::IbcEvent(format!( + "The IBC event is invalid: Actual {:?}, Expected {:?}", + actual, + ctx.borrow().event + ))); } - } -} -impl From for Error { - fn from(err: native_vp::Error) -> Self { - Self::NativeVpError(err) + Ok(()) } -} - -impl From for Error { - fn from(err: client::Error) -> Self { - Self::ClientError(err) - } -} -impl From for Error { - fn from(err: connection::Error) -> Self { - Self::ConnectionError(err) - } -} + fn validate_with_msg(&self, tx_data: &[u8]) -> VpResult<()> { + let validation_ctx = VpValidationContext::new(self.ctx.pre()); + let ctx = Rc::new(RefCell::new(validation_ctx)); -impl From for Error { - fn from(err: channel::Error) -> Self { - Self::ChannelError(err) - } -} + let mut actions = IbcActions::new(ctx.clone()); + actions.set_validation_params(self.validation_params()?); -impl From for Error { - fn from(err: port::Error) -> Self { - Self::PortError(err) + let module = TransferModule::new(ctx); + actions.add_transfer_route(module.module_id(), module); + actions.validate(tx_data).map_err(Error::IbcAction) } -} -impl From for Error { - fn from(err: packet::Error) -> Self { - Self::PacketError(err) + fn validation_params(&self) -> VpResult { + let chain_id = self.ctx.get_chain_id().map_err(Error::NativeVpError)?; + let proof_specs = ledger_storage::ics23_specs::ibc_proof_specs::(); + let pos_params = + read_pos_params(&self.ctx.post()).map_err(Error::NativeVpError)?; + let pipeline_len = pos_params.pipeline_len; + let epoch_duration = read_epoch_duration_parameter(&self.ctx.post()) + .map_err(Error::NativeVpError)?; + let unbonding_period_secs = + pipeline_len * epoch_duration.min_duration.0; + Ok(ValidationParams { + chain_id: chain_id.into(), + proof_specs: proof_specs.into(), + unbonding_period: Duration::from_secs(unbonding_period_secs), + upgrade_path: Vec::new(), + }) } } -impl From for Error { - fn from(err: sequence::Error) -> Self { - Self::SequenceError(err) +fn match_value( + key: &Key, + actual: Option>, + expected: Option<&StorageModification>, +) -> VpResult<()> { + match (actual, expected) { + (Some(v), Some(StorageModification::Write { value })) => { + if v == *value { + Ok(()) + } else { + Err(Error::StateChange(format!( + "The value mismatched: Key {} actual {:?}, expected {:?}", + key, v, value + ))) + } + } + (Some(_), _) => Err(Error::StateChange(format!( + "The value was invalid: Key {}", + key + ))), + (None, Some(StorageModification::Delete)) => Ok(()), + (None, _) => Err(Error::StateChange(format!( + "The key was deleted unexpectedly: Key {}", + key + ))), } } -impl From for Error { - fn from(err: denom::Error) -> Self { - Self::DenomError(err) +impl From for Error { + fn from(err: ActionError) -> Self { + Self::IbcAction(err) } } @@ -311,145 +222,251 @@ pub fn get_dummy_header() -> crate::types::storage::Header { } } +/// A dummy validator used for testing +#[cfg(any(feature = "test", feature = "testing"))] +pub fn get_dummy_genesis_validator() +-> namada_proof_of_stake::types::GenesisValidator { + use rust_decimal::prelude::Decimal; + + use crate::core::types::address::testing::established_address_1; + use crate::types::key::testing::common_sk_from_simple_seed; + use crate::types::token::Amount; + + let address = established_address_1(); + let tokens = Amount::whole(1); + let consensus_sk = common_sk_from_simple_seed(0); + let consensus_key = consensus_sk.to_public(); + + let commission_rate = Decimal::new(1, 1); + let max_commission_rate_change = Decimal::new(1, 1); + namada_proof_of_stake::types::GenesisValidator { + address, + tokens, + consensus_key, + commission_rate, + max_commission_rate_change, + } +} + #[cfg(test)] mod tests { use core::time::Duration; use std::convert::TryFrom; use std::str::FromStr; - use crate::ibc::applications::ics20_fungible_token_transfer::msgs::transfer::MsgTransfer; - use crate::ibc::core::ics02_client::client_consensus::ConsensusState; + use borsh::BorshSerialize; + use prost::Message; + use sha2::Digest; + + use super::super::storage::{ + ack_key, calc_hash, channel_counter_key, channel_key, + client_connections_key, client_counter_key, client_state_key, + client_type_key, client_update_height_key, client_update_timestamp_key, + commitment_key, connection_counter_key, connection_key, + consensus_state_key, ibc_denom_key, next_sequence_ack_key, + next_sequence_recv_key, next_sequence_send_key, receipt_key, + }; + use super::{get_dummy_header, *}; + use crate::core::ledger::storage::testing::TestWlStorage; + use crate::core::types::address::nam; + use crate::core::types::address::testing::established_address_1; + use crate::core::types::storage::Epoch; + use crate::ibc::applications::transfer::acknowledgement::TokenTransferAcknowledgement; + use crate::ibc::applications::transfer::coin::PrefixedCoin; + use crate::ibc::applications::transfer::denom::TracePrefix; + use crate::ibc::applications::transfer::events::{ + AckEvent, DenomTraceEvent, TimeoutEvent, TransferEvent, + }; + use crate::ibc::applications::transfer::msgs::transfer::MsgTransfer; + use crate::ibc::applications::transfer::packet::PacketData; + use crate::ibc::applications::transfer::VERSION; use crate::ibc::core::ics02_client::client_state::ClientState; - use crate::ibc::core::ics02_client::client_type::ClientType; - use crate::ibc::core::ics02_client::header::Header; - use crate::ibc::core::ics02_client::msgs::create_client::MsgCreateAnyClient; - use crate::ibc::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; + use crate::ibc::core::ics02_client::events::{CreateClient, UpdateClient}; + use crate::ibc::core::ics02_client::msgs::create_client::MsgCreateClient; + use crate::ibc::core::ics02_client::msgs::update_client::MsgUpdateClient; use crate::ibc::core::ics03_connection::connection::{ ConnectionEnd, Counterparty as ConnCounterparty, State as ConnState, }; + use crate::ibc::core::ics03_connection::events::{ + OpenAck as ConnOpenAck, OpenConfirm as ConnOpenConfirm, + OpenInit as ConnOpenInit, OpenTry as ConnOpenTry, + }; use crate::ibc::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; use crate::ibc::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; use crate::ibc::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; use crate::ibc::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; - use crate::ibc::core::ics03_connection::version::Version as ConnVersion; + use crate::ibc::core::ics03_connection::version::{ + get_compatible_versions, Version as ConnVersion, + }; use crate::ibc::core::ics04_channel::channel::{ ChannelEnd, Counterparty as ChanCounterparty, Order, State as ChanState, }; - use crate::ibc::core::ics04_channel::msgs::acknowledgement::MsgAcknowledgement; + use crate::ibc::core::ics04_channel::commitment::PacketCommitment; + use crate::ibc::core::ics04_channel::events::{ + AcknowledgePacket, OpenAck as ChanOpenAck, + OpenConfirm as ChanOpenConfirm, OpenInit as ChanOpenInit, + OpenTry as ChanOpenTry, ReceivePacket, SendPacket, TimeoutPacket, + WriteAcknowledgement, + }; + use crate::ibc::core::ics04_channel::msgs::acknowledgement::{ + Acknowledgement, MsgAcknowledgement, + }; use crate::ibc::core::ics04_channel::msgs::chan_open_ack::MsgChannelOpenAck; use crate::ibc::core::ics04_channel::msgs::chan_open_confirm::MsgChannelOpenConfirm; use crate::ibc::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit; use crate::ibc::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; use crate::ibc::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; + use crate::ibc::core::ics04_channel::msgs::timeout::MsgTimeout; + use crate::ibc::core::ics04_channel::msgs::timeout_on_close::MsgTimeoutOnClose; use crate::ibc::core::ics04_channel::packet::{Packet, Sequence}; + use crate::ibc::core::ics04_channel::timeout::TimeoutHeight; use crate::ibc::core::ics04_channel::Version as ChanVersion; - use crate::ibc::core::ics23_commitment::commitment::CommitmentProofBytes; + use crate::ibc::core::ics23_commitment::commitment::{ + CommitmentPrefix, CommitmentProofBytes, + }; use crate::ibc::core::ics24_host::identifier::{ ChannelId, ClientId, ConnectionId, PortChannelId, PortId, }; - use crate::ibc::mock::client_state::{MockClientState, MockConsensusState}; + use crate::ibc::events::{IbcEvent as RawIbcEvent, ModuleEvent}; + use crate::ibc::mock::client_state::{ + client_type, MockClientState, MOCK_CLIENT_TYPE, + }; + use crate::ibc::mock::consensus_state::MockConsensusState; use crate::ibc::mock::header::MockHeader; - use crate::ibc::proofs::{ConsensusProof, Proofs}; use crate::ibc::signer::Signer; use crate::ibc::timestamp::Timestamp; use crate::ibc::tx_msg::Msg; use crate::ibc::Height; use crate::ibc_proto::cosmos::base::v1beta1::Coin; - use prost::Message; - use crate::tendermint::time::Time as TmTime; - use crate::tendermint_proto::Protobuf; - - use super::get_dummy_header; - use namada_core::ledger::ibc::actions::{ - self, commitment_prefix, init_connection, make_create_client_event, - make_open_ack_channel_event, make_open_ack_connection_event, - make_open_confirm_channel_event, make_open_confirm_connection_event, - make_open_init_channel_event, make_open_init_connection_event, - make_open_try_channel_event, make_open_try_connection_event, - make_send_packet_event, make_update_client_event, packet_from_message, - try_connection, - }; - use super::super::storage::{ - ack_key, capability_key, channel_key, client_state_key, - client_type_key, client_update_height_key, client_update_timestamp_key, - commitment_key, connection_key, consensus_state_key, - next_sequence_ack_key, next_sequence_recv_key, next_sequence_send_key, - port_key, receipt_key, - }; - use super::*; - use crate::types::key::testing::keypair_1; + use crate::ibc_proto::google::protobuf::Any; + use crate::ibc_proto::ibc::core::connection::v1::MsgConnectionOpenTry as RawMsgConnectionOpenTry; + use crate::ibc_proto::protobuf::Protobuf; use crate::ledger::gas::VpGasMeter; - use crate::ledger::storage::testing::TestStorage; - use crate::ledger::storage::write_log::WriteLog; + use crate::ledger::parameters::storage::{ + get_epoch_duration_storage_key, get_max_expected_time_per_block_key, + }; + use crate::ledger::parameters::EpochDuration; + use crate::ledger::{ibc, pos}; + use crate::proof_of_stake::parameters::PosParams; use crate::proto::Tx; - use crate::types::ibc::data::{PacketAck, PacketReceipt}; + use crate::tendermint::time::Time as TmTime; + use crate::tendermint_proto::Protobuf as TmProtobuf; + use crate::types::key::testing::keypair_1; + use crate::types::storage::{BlockHash, BlockHeight, TxIndex}; + use crate::types::time::DurationSecs; + use crate::types::token::{balance_key, Amount}; use crate::vm::wasm; - use crate::types::storage::TxIndex; - use crate::types::storage::{BlockHash, BlockHeight}; const ADDRESS: Address = Address::Internal(InternalAddress::Ibc); + const COMMITMENT_PREFIX: &[u8] = b"ibc"; fn get_client_id() -> ClientId { - ClientId::from_str("test_client").expect("Creating a client ID failed") + let id = format!("{}-0", MOCK_CLIENT_TYPE); + ClientId::from_str(&id).expect("Creating a client ID failed") } - fn insert_init_states() -> (TestStorage, WriteLog) { - let mut storage = TestStorage::default(); - let mut write_log = WriteLog::default(); + fn init_storage() -> TestWlStorage { + let mut wl_storage = TestWlStorage::default(); // initialize the storage - super::super::init_genesis_storage(&mut storage); + ibc::init_genesis_storage(&mut wl_storage); + pos::init_genesis_storage( + &mut wl_storage, + &PosParams::default(), + vec![get_dummy_genesis_validator()].into_iter(), + Epoch(1), + ); + // epoch duration + let epoch_duration_key = get_epoch_duration_storage_key(); + let epoch_duration = EpochDuration { + min_num_of_blocks: 10, + min_duration: DurationSecs(100), + }; + wl_storage + .write_log + .write(&epoch_duration_key, epoch_duration.try_to_vec().unwrap()) + .expect("write failed"); + // max_expected_time_per_block + let time = DurationSecs::from(Duration::new(60, 0)); + let time_key = get_max_expected_time_per_block_key(); + wl_storage + .write_log + .write(&time_key, crate::ledger::storage::types::encode(&time)) + .expect("write failed"); // set a dummy header - storage + wl_storage + .storage .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - storage + wl_storage + .storage .begin_block(BlockHash::default(), BlockHeight(1)) .unwrap(); + wl_storage + } + + fn insert_init_client(wl_storage: &mut TestWlStorage) { // insert a mock client type let client_id = get_client_id(); let client_type_key = client_type_key(&client_id); - let client_type = ClientType::Mock.as_str().as_bytes().to_vec(); - write_log + let client_type = client_type().as_str().as_bytes().to_vec(); + wl_storage + .write_log .write(&client_type_key, client_type) .expect("write failed"); // insert a mock client state let client_state_key = client_state_key(&get_client_id()); - let height = Height::new(0, 1); + let height = Height::new(0, 1).unwrap(); let header = MockHeader { height, timestamp: Timestamp::now(), }; - let client_state = MockClientState::new(header).wrap_any(); - let bytes = client_state.encode_vec().expect("encoding failed"); - write_log + let client_state = MockClientState::new(header); + let bytes = Protobuf::::encode_vec(&client_state) + .expect("encoding failed"); + wl_storage + .write_log .write(&client_state_key, bytes) .expect("write failed"); // insert a mock consensus state let consensus_key = consensus_state_key(&client_id, height); - let consensus_state = MockConsensusState::new(header).wrap_any(); - let bytes = consensus_state.encode_vec().expect("encoding failed"); - write_log + let consensus_state = MockConsensusState::new(header); + let bytes = Protobuf::::encode_vec(&consensus_state) + .expect("encoding failed"); + wl_storage + .write_log .write(&consensus_key, bytes) .expect("write failed"); // insert update time and height let client_update_time_key = client_update_timestamp_key(&client_id); - let bytes = TmTime::now().encode_vec().expect("encoding failed"); - write_log + let time = wl_storage + .storage + .get_block_header(None) + .unwrap() + .0 + .unwrap() + .time; + let bytes = TmTime::try_from(time) + .unwrap() + .encode_vec() + .expect("encoding failed"); + wl_storage + .write_log .write(&client_update_time_key, bytes) .expect("write failed"); let client_update_height_key = client_update_height_key(&client_id); - let host_height = Height::new(10, 100); - write_log + let host_height = wl_storage.storage.get_block_height().0; + let host_height = + Height::new(0, host_height.0).expect("invalid height"); + wl_storage + .write_log .write( &client_update_height_key, host_height.encode_vec().expect("encoding failed"), ) .expect("write failed"); - write_log.commit_tx(); - - (storage, write_log) + wl_storage.write_log.commit_tx(); } fn get_connection_id() -> ConnectionId { @@ -464,11 +481,11 @@ mod tests { } fn get_port_id() -> PortId { - PortId::from_str("test_port").unwrap() + PortId::transfer() } fn get_channel_id() -> ChannelId { - ChannelId::from_str("channel-42").unwrap() + ChannelId::new(0) } fn get_connection(conn_state: ConnState) -> ConnectionEnd { @@ -477,21 +494,20 @@ mod tests { get_client_id(), get_conn_counterparty(), vec![ConnVersion::default()], - Duration::new(100, 0), + Duration::new(0, 0), ) } fn get_conn_counterparty() -> ConnCounterparty { - let counterpart_client_id = - ClientId::from_str("counterpart_test_client") - .expect("Creating a client ID failed"); - let counterpart_conn_id = - ConnectionId::from_str("counterpart_test_connection") - .expect("Creating a connection ID failed"); + let counterpart_client_id = ClientId::new(client_type(), 22).unwrap(); + let counterpart_conn_id = ConnectionId::new(32); + let commitment_prefix = + CommitmentPrefix::try_from(COMMITMENT_PREFIX.to_vec()) + .expect("the prefix should be parsable"); ConnCounterparty::new( counterpart_client_id, Some(counterpart_conn_id), - commitment_prefix(), + commitment_prefix, ) } @@ -501,32 +517,18 @@ mod tests { order, get_channel_counterparty(), vec![get_connection_id()], - ChanVersion::ics20(), + ChanVersion::new(VERSION.to_string()), ) } fn get_channel_counterparty() -> ChanCounterparty { - let counterpart_port_id = PortId::from_str("counterpart_test_port") - .expect("Creating a port ID failed"); - let counterpart_channel_id = ChannelId::from_str("channel-0") - .expect("Creating a channel ID failed"); + let counterpart_port_id = PortId::transfer(); + let counterpart_channel_id = ChannelId::new(0); ChanCounterparty::new(counterpart_port_id, Some(counterpart_channel_id)) } - fn set_port(write_log: &mut WriteLog, index: u64) { - let port_key = port_key(&get_port_id()); - write_log - .write(&port_key, index.to_be_bytes().to_vec()) - .expect("write failed"); - // insert to the reverse map - let cap_key = capability_key(index); - let port_id = get_port_id(); - let bytes = port_id.as_str().as_bytes().to_vec(); - write_log.write(&cap_key, bytes).expect("write failed"); - } - - fn get_next_seq(storage: &TestStorage, key: &Key) -> Sequence { - let (val, _) = storage.read(key).expect("read failed"); + fn get_next_seq(wl_storage: &TestWlStorage, key: &Key) -> Sequence { + let (val, _) = wl_storage.storage.read(key).expect("read failed"); match val { Some(v) => { // IBC related data is encoded without borsh @@ -539,81 +541,191 @@ mod tests { } } - fn increment_seq(write_log: &mut WriteLog, key: &Key, seq: Sequence) { - let seq_num = u64::from(seq.increment()); - write_log - .write(key, seq_num.to_be_bytes().to_vec()) + fn increment_counter(wl_storage: &mut TestWlStorage, key: &Key) { + let count = match wl_storage.storage.read(key).expect("read failed").0 { + Some(value) => { + let count: [u8; 8] = + value.try_into().expect("decoding a count failed"); + u64::from_be_bytes(count) + } + None => 0, + }; + wl_storage + .write_log + .write(key, (count + 1).to_be_bytes().to_vec()) .expect("write failed"); } + fn dummy_proof() -> CommitmentProofBytes { + CommitmentProofBytes::try_from(vec![0]).unwrap() + } + + fn packet_from_message( + msg: &MsgTransfer, + sequence: Sequence, + counterparty: &ChanCounterparty, + ) -> Packet { + let coin: PrefixedCoin = + msg.token.clone().try_into().expect("invalid token"); + let packet_data = PacketData { + token: coin, + sender: msg.sender.clone(), + receiver: msg.receiver.clone(), + }; + let data = serde_json::to_vec(&packet_data) + .expect("Encoding PacketData failed"); + + Packet { + seq_on_a: sequence, + port_id_on_a: msg.port_id_on_a.clone(), + chan_id_on_a: msg.chan_id_on_a.clone(), + port_id_on_b: counterparty.port_id.clone(), + chan_id_on_b: counterparty + .channel_id() + .expect("the counterparty channel should exist") + .clone(), + data, + timeout_height_on_b: msg.timeout_height_on_b, + timeout_timestamp_on_b: msg.timeout_timestamp_on_b, + } + } + + fn commitment(packet: &Packet) -> PacketCommitment { + let timeout = packet.timeout_timestamp_on_b.nanoseconds().to_be_bytes(); + let revision_number = packet + .timeout_height_on_b + .commitment_revision_number() + .to_be_bytes(); + let revision_height = packet + .timeout_height_on_b + .commitment_revision_height() + .to_be_bytes(); + let data = sha2::Sha256::digest(&packet.data); + let input = [ + &timeout, + &revision_number, + &revision_height, + data.as_slice(), + ] + .concat(); + sha2::Sha256::digest(&input).to_vec().into() + } + #[test] fn test_create_client() { - let storage = TestStorage::default(); - let mut write_log = WriteLog::default(); + let mut wl_storage = init_storage(); + let mut keys_changed = BTreeSet::new(); - let height = Height::new(0, 1); + let height = Height::new(0, 1).unwrap(); let header = MockHeader { height, timestamp: Timestamp::now(), }; let client_id = get_client_id(); - // insert client type, state, and consensus state + // client type let client_type_key = client_type_key(&client_id); - let client_type = ClientType::Mock.as_str().as_bytes().to_vec(); - write_log - .write(&client_type_key, client_type) + let client_type = client_type(); + let bytes = client_type.as_str().as_bytes().to_vec(); + wl_storage + .write_log + .write(&client_type_key, bytes) .expect("write failed"); - let client_state = MockClientState::new(header).wrap_any(); - let consensus_state = MockConsensusState::new(header).wrap_any(); - let msg = MsgCreateAnyClient { - client_state: client_state.clone(), - consensus_state: consensus_state.clone(), - signer: Signer::new("account0"), + keys_changed.insert(client_type_key); + // message + let client_state = MockClientState::new(header); + let consensus_state = MockConsensusState::new(header); + let msg = MsgCreateClient { + client_state: client_state.into(), + consensus_state: consensus_state.clone().into(), + signer: Signer::from_str("account0").expect("invalid signer"), }; + // client state let client_state_key = client_state_key(&get_client_id()); - let bytes = client_state.encode_vec().expect("encoding failed"); - write_log + let bytes = Protobuf::::encode_vec(&client_state) + .expect("encoding failed"); + wl_storage + .write_log .write(&client_state_key, bytes) .expect("write failed"); + keys_changed.insert(client_state_key); + // client consensus let consensus_key = consensus_state_key(&client_id, height); - let bytes = consensus_state.encode_vec().expect("encoding failed"); - write_log + let bytes = Protobuf::::encode_vec(&consensus_state) + .expect("encoding failed"); + wl_storage + .write_log .write(&consensus_key, bytes) .expect("write failed"); - // insert update time and height + keys_changed.insert(consensus_key); + // client update time let client_update_time_key = client_update_timestamp_key(&client_id); - let bytes = TmTime::now().encode_vec().expect("encoding failed"); - write_log + let time = wl_storage + .storage + .get_block_header(None) + .unwrap() + .0 + .unwrap() + .time; + let bytes = TmTime::try_from(time) + .unwrap() + .encode_vec() + .expect("encoding failed"); + wl_storage + .write_log .write(&client_update_time_key, bytes) .expect("write failed"); + keys_changed.insert(client_update_time_key); + // client update height let client_update_height_key = client_update_height_key(&client_id); - let host_height = Height::new(10, 100); - write_log + let host_height = wl_storage.storage.get_block_height().0; + let host_height = + Height::new(0, host_height.0).expect("invalid height"); + wl_storage + .write_log .write( &client_update_height_key, host_height.encode_vec().expect("encoding failed"), ) .expect("write failed"); - - let event = make_create_client_event(&get_client_id(), &msg); - write_log.set_ibc_event(event.try_into().unwrap()); + keys_changed.insert(client_update_height_key); + // client counter + let client_counter_key = client_counter_key(); + increment_counter(&mut wl_storage, &client_counter_key); + keys_changed.insert(client_counter_key); + + let event = RawIbcEvent::CreateClient(CreateClient::new( + client_id, + client_type, + client_state.latest_height(), + )); + let message_event = RawIbcEvent::Message(event.event_type()); + wl_storage + .write_log + .emit_ibc_event(message_event.try_into().unwrap()); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new( + tx_code, + Some(tx_data), + wl_storage.storage.chain_id.clone(), + None, + ) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let mut keys_changed = BTreeSet::new(); - keys_changed.insert(client_state_key); let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -636,25 +748,65 @@ mod tests { #[test] fn test_create_client_fail() { - let storage = TestStorage::default(); - let write_log = WriteLog::default(); + let mut wl_storage = TestWlStorage::default(); + let mut keys_changed = BTreeSet::new(); + + // initialize the storage + ibc::init_genesis_storage(&mut wl_storage); + // set a dummy header + wl_storage + .storage + .set_header(get_dummy_header()) + .expect("Setting a dummy header shouldn't fail"); + wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(1)) + .unwrap(); + + let height = Height::new(0, 1).unwrap(); + let header = MockHeader { + height, + timestamp: Timestamp::now(), + }; + let client_id = get_client_id(); + // insert only client type + let client_type_key = client_type_key(&client_id); + let client_type = client_type(); + let bytes = client_type.as_str().as_bytes().to_vec(); + wl_storage + .write_log + .write(&client_type_key, bytes) + .expect("write failed"); + keys_changed.insert(client_type_key); + let client_state = MockClientState::new(header); + let consensus_state = MockConsensusState::new(header); + // make a correct message + let msg = MsgCreateClient { + client_state: client_state.into(), + consensus_state: consensus_state.into(), + signer: Signer::from_str("account0").expect("invalid signer"), + }; + let tx_index = TxIndex::default(); let tx_code = vec![]; - let tx_data = vec![]; - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let mut tx_data = vec![]; + msg.to_any().encode(&mut tx_data).expect("encoding failed"); + let tx = Tx::new( + tx_code, + Some(tx_data), + wl_storage.storage.chain_id.clone(), + None, + ) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let mut keys_changed = BTreeSet::new(); - let client_state_key = client_state_key(&get_client_id()); - keys_changed.insert(client_state_key); - let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -668,73 +820,129 @@ mod tests { let result = ibc .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) .unwrap_err(); - assert_matches!( - result, - Error::ClientError(client::Error::InvalidStateChange(_)) - ); + assert_matches!(result, Error::StateChange(_)); } #[test] fn test_update_client() { - let (mut storage, mut write_log) = insert_init_states(); - write_log.commit_block(&mut storage).expect("commit failed"); + let mut keys_changed = BTreeSet::new(); + let mut wl_storage = init_storage(); + insert_init_client(&mut wl_storage); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); + + // for next block + wl_storage + .storage + .set_header(get_dummy_header()) + .expect("Setting a dummy header shouldn't fail"); + wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); // update the client let client_id = get_client_id(); let client_state_key = client_state_key(&get_client_id()); - let height = Height::new(1, 11); + let height = Height::new(0, 11).unwrap(); + // the header should be created before + let time = (TmTime::now() - std::time::Duration::new(100, 0)).unwrap(); let header = MockHeader { height, - timestamp: Timestamp::now(), + timestamp: time.into(), }; - let msg = MsgUpdateAnyClient { + let msg = MsgUpdateClient { client_id: client_id.clone(), - header: header.wrap_any(), - signer: Signer::new("account0"), + header: header.into(), + signer: Signer::from_str("account0").expect("invalid signer"), }; - let client_state = MockClientState::new(header).wrap_any(); - let bytes = client_state.encode_vec().expect("encoding failed"); - write_log + // client state + let client_state = MockClientState::new(header); + let bytes = Protobuf::::encode_vec(&client_state) + .expect("encoding failed"); + wl_storage + .write_log .write(&client_state_key, bytes) .expect("write failed"); + keys_changed.insert(client_state_key); + // consensus state let consensus_key = consensus_state_key(&client_id, height); - let consensus_state = MockConsensusState::new(header).wrap_any(); - let bytes = consensus_state.encode_vec().expect("encoding failed"); - write_log + let consensus_state = MockConsensusState::new(header); + let bytes = Protobuf::::encode_vec(&consensus_state) + .expect("encoding failed"); + wl_storage + .write_log .write(&consensus_key, bytes) .expect("write failed"); - let event = make_update_client_event(&client_id, &msg); - write_log.set_ibc_event(event.try_into().unwrap()); - // update time and height for this updating - let key = client_update_timestamp_key(&client_id); - write_log - .write(&key, TmTime::now().encode_vec().expect("encoding failed")) + keys_changed.insert(consensus_key); + // client update time + let client_update_time_key = client_update_timestamp_key(&client_id); + let time = wl_storage + .storage + .get_block_header(None) + .unwrap() + .0 + .unwrap() + .time; + let bytes = TmTime::try_from(time) + .unwrap() + .encode_vec() + .expect("encoding failed"); + wl_storage + .write_log + .write(&client_update_time_key, bytes) .expect("write failed"); - let key = client_update_height_key(&client_id); - write_log + keys_changed.insert(client_update_time_key); + // client update height + let client_update_height_key = client_update_height_key(&client_id); + let host_height = wl_storage.storage.get_block_height().0; + let host_height = + Height::new(0, host_height.0).expect("invalid height"); + wl_storage + .write_log .write( - &key, - Height::new(10, 101).encode_vec().expect("encoding failed"), + &client_update_height_key, + host_height.encode_vec().expect("encoding failed"), ) .expect("write failed"); + keys_changed.insert(client_update_height_key); + // event + let consensus_height = client_state.latest_height(); + let event = RawIbcEvent::UpdateClient(UpdateClient::new( + client_id, + client_state.client_type(), + consensus_height, + vec![consensus_height], + header.into(), + )); + let message_event = RawIbcEvent::Message(event.event_type()); + wl_storage + .write_log + .emit_ibc_event(message_event.try_into().unwrap()); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new( + tx_code, + Some(tx_data), + wl_storage.storage.chain_id.clone(), + None, + ) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let mut keys_changed = BTreeSet::new(); - keys_changed.insert(client_state_key); - let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -756,44 +964,95 @@ mod tests { #[test] fn test_init_connection() { - let (mut storage, mut write_log) = insert_init_states(); - write_log.commit_block(&mut storage).expect("commit failed"); + let mut keys_changed = BTreeSet::new(); + let mut wl_storage = init_storage(); + insert_init_client(&mut wl_storage); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); + // for next block + wl_storage + .storage + .set_header(get_dummy_header()) + .expect("Setting a dummy header shouldn't fail"); + wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); // prepare a message + let mut counterparty = get_conn_counterparty(); + counterparty.connection_id = None; let msg = MsgConnectionOpenInit { - client_id: get_client_id(), - counterparty: get_conn_counterparty(), - version: None, + client_id_on_a: get_client_id(), + counterparty, + version: Some(ConnVersion::default()), delay_period: Duration::new(100, 0), - signer: Signer::new("account0"), + signer: Signer::from_str("account0").expect("invalid signer"), }; // insert an INIT connection let conn_id = get_connection_id(); let conn_key = connection_key(&conn_id); - let conn = init_connection(&msg); + let conn = ConnectionEnd::new( + ConnState::Init, + msg.client_id_on_a.clone(), + msg.counterparty.clone(), + vec![msg.version.clone().unwrap()], + msg.delay_period, + ); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); - let event = make_open_init_connection_event(&conn_id, &msg); - write_log.set_ibc_event(event.try_into().unwrap()); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); + keys_changed.insert(conn_key); + // client connection list + let client_conn_key = client_connections_key(&msg.client_id_on_a); + let conn_list = conn_id.to_string(); + let bytes = conn_list.as_bytes().to_vec(); + wl_storage + .write_log + .write(&client_conn_key, bytes) + .expect("write failed"); + keys_changed.insert(client_conn_key); + // connection counter + let conn_counter_key = connection_counter_key(); + increment_counter(&mut wl_storage, &conn_counter_key); + keys_changed.insert(conn_counter_key); + // event + let event = RawIbcEvent::OpenInitConnection(ConnOpenInit::new( + conn_id, + msg.client_id_on_a.clone(), + msg.counterparty.client_id().clone(), + )); + let message_event = RawIbcEvent::Message(event.event_type()); + wl_storage + .write_log + .emit_ibc_event(message_event.try_into().unwrap()); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new( + tx_code, + Some(tx_data), + wl_storage.storage.chain_id.clone(), + None, + ) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let mut keys_changed = BTreeSet::new(); - keys_changed.insert(conn_key); - let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -815,41 +1074,83 @@ mod tests { #[test] fn test_init_connection_fail() { - let storage = TestStorage::default(); - let mut write_log = WriteLog::default(); + let mut wl_storage = TestWlStorage::default(); + let mut keys_changed = BTreeSet::new(); + + // initialize the storage + ibc::init_genesis_storage(&mut wl_storage); + // set a dummy header + wl_storage + .storage + .set_header(get_dummy_header()) + .expect("Setting a dummy header shouldn't fail"); + wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(1)) + .unwrap(); // prepare data + let mut counterparty = get_conn_counterparty(); + counterparty.connection_id = None; let msg = MsgConnectionOpenInit { - client_id: get_client_id(), - counterparty: get_conn_counterparty(), - version: None, + client_id_on_a: get_client_id(), + counterparty, + version: Some(ConnVersion::default()), delay_period: Duration::new(100, 0), - signer: Signer::new("account0"), + signer: Signer::from_str("account0").expect("invalid signer"), }; // insert an Init connection - let conn_key = connection_key(&get_connection_id()); - let conn = init_connection(&msg); + let conn_id = get_connection_id(); + let conn_key = connection_key(&conn_id); + let conn = ConnectionEnd::new( + ConnState::Init, + msg.client_id_on_a.clone(), + msg.counterparty.clone(), + vec![msg.version.clone().unwrap()], + msg.delay_period, + ); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); + keys_changed.insert(conn_key); + // client connection list + let client_conn_key = client_connections_key(&msg.client_id_on_a); + let conn_list = conn_id.to_string(); + let bytes = conn_list.as_bytes().to_vec(); + wl_storage + .write_log + .write(&client_conn_key, bytes) + .expect("write failed"); + keys_changed.insert(client_conn_key); + // connection counter + let conn_counter_key = connection_counter_key(); + increment_counter(&mut wl_storage, &conn_counter_key); + keys_changed.insert(conn_counter_key); + // No event let tx_index = TxIndex::default(); let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new( + tx_code, + Some(tx_data), + wl_storage.storage.chain_id.clone(), + None, + ) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let mut keys_changed = BTreeSet::new(); - keys_changed.insert(conn_key); - let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -858,80 +1159,125 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - // this should fail because no client exists + // this should fail because no event let result = ibc .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) .unwrap_err(); - assert_matches!( - result, - Error::ConnectionError(connection::Error::InvalidClient(_)) - ); + assert_matches!(result, Error::IbcEvent(_)); } #[test] fn test_try_connection() { - let (mut storage, mut write_log) = insert_init_states(); - write_log.commit_block(&mut storage).expect("commit failed"); + let mut keys_changed = BTreeSet::new(); + let mut wl_storage = init_storage(); + insert_init_client(&mut wl_storage); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); + // for next block + wl_storage + .storage + .set_header(get_dummy_header()) + .expect("Setting a dummy header shouldn't fail"); + wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); // prepare data - let height = Height::new(0, 1); + let height = Height::new(0, 1).unwrap(); let header = MockHeader { height, timestamp: Timestamp::now(), }; - let client_state = MockClientState::new(header).wrap_any(); - let proof_conn = CommitmentProofBytes::try_from(vec![0]).unwrap(); - let proof_client = CommitmentProofBytes::try_from(vec![0]).unwrap(); - let proof_consensus = ConsensusProof::new( - CommitmentProofBytes::try_from(vec![0]).unwrap(), - height, - ) - .unwrap(); - let proofs = Proofs::new( - proof_conn, - Some(proof_client), - Some(proof_consensus), - None, - Height::new(0, 1), - ) - .unwrap(); - let msg = MsgConnectionOpenTry { - previous_connection_id: None, - client_id: get_client_id(), - client_state: Some(client_state), - counterparty: get_conn_counterparty(), - counterparty_versions: vec![ConnVersion::default()], - proofs, - delay_period: Duration::new(100, 0), - signer: Signer::new("account0"), - }; + let client_state = MockClientState::new(header); + let proof_height = Height::new(0, 1).unwrap(); + // Convert a message from RawMsgConnectionOpenTry + // because MsgConnectionOpenTry cannot be created directly + #[allow(deprecated)] + let msg: MsgConnectionOpenTry = RawMsgConnectionOpenTry { + client_id: get_client_id().as_str().to_string(), + client_state: Some(client_state.into()), + counterparty: Some(get_conn_counterparty().into()), + delay_period: 0, + counterparty_versions: get_compatible_versions() + .iter() + .map(|v| v.clone().into()) + .collect(), + proof_init: dummy_proof().into(), + proof_height: Some(proof_height.into()), + proof_consensus: dummy_proof().into(), + consensus_height: Some(client_state.latest_height().into()), + proof_client: dummy_proof().into(), + signer: "account0".to_string(), + previous_connection_id: ConnectionId::default().to_string(), + } + .try_into() + .expect("invalid message"); // insert a TryOpen connection let conn_id = get_connection_id(); let conn_key = connection_key(&conn_id); - let conn = try_connection(&msg); + let conn = ConnectionEnd::new( + ConnState::TryOpen, + msg.client_id_on_b.clone(), + msg.counterparty.clone(), + msg.versions_on_a.clone(), + msg.delay_period, + ); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); - let event = make_open_try_connection_event(&conn_id, &msg); - write_log.set_ibc_event(event.try_into().unwrap()); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); + keys_changed.insert(conn_key); + // client connection list + let client_conn_key = client_connections_key(&msg.client_id_on_b); + let conn_list = conn_id.to_string(); + let bytes = conn_list.as_bytes().to_vec(); + wl_storage + .write_log + .write(&client_conn_key, bytes) + .expect("write failed"); + keys_changed.insert(client_conn_key); + // connection counter + let conn_counter_key = connection_counter_key(); + increment_counter(&mut wl_storage, &conn_counter_key); + keys_changed.insert(conn_counter_key); + // event + let event = RawIbcEvent::OpenTryConnection(ConnOpenTry::new( + conn_id, + msg.client_id_on_b.clone(), + msg.counterparty.connection_id().cloned().unwrap(), + msg.counterparty.client_id().clone(), + )); + let message_event = RawIbcEvent::Message(event.event_type()); + wl_storage + .write_log + .emit_ibc_event(message_event.try_into().unwrap()); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new( + tx_code, + Some(tx_data), + wl_storage.storage.chain_id.clone(), + None, + ) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let mut keys_changed = BTreeSet::new(); - keys_changed.insert(conn_key); - let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -953,73 +1299,96 @@ mod tests { #[test] fn test_ack_connection() { - let (mut storage, mut write_log) = insert_init_states(); + let mut keys_changed = BTreeSet::new(); + let mut wl_storage = init_storage(); + insert_init_client(&mut wl_storage); + // insert an Init connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Init); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); - write_log.commit_tx(); - write_log.commit_block(&mut storage).expect("commit failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); + // for next block + wl_storage + .storage + .set_header(get_dummy_header()) + .expect("Setting a dummy header shouldn't fail"); + wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); + // update the connection to Open let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); + keys_changed.insert(conn_key); // prepare data - let height = Height::new(0, 1); + let height = Height::new(0, 1).unwrap(); let header = MockHeader { height, timestamp: Timestamp::now(), }; - let client_state = MockClientState::new(header).wrap_any(); + let client_state = MockClientState::new(header); let counterparty = get_conn_counterparty(); - let proof_conn = CommitmentProofBytes::try_from(vec![0]).unwrap(); - let proof_client = CommitmentProofBytes::try_from(vec![0]).unwrap(); - let proof_consensus = ConsensusProof::new( - CommitmentProofBytes::try_from(vec![0]).unwrap(), - height, - ) - .unwrap(); - let proofs = Proofs::new( - proof_conn, - Some(proof_client), - Some(proof_consensus), - None, - Height::new(0, 1), - ) - .unwrap(); - let tx_code = vec![]; + let proof_height = Height::new(0, 1).unwrap(); + let msg = MsgConnectionOpenAck { - connection_id: get_connection_id(), - counterparty_connection_id: counterparty - .connection_id() - .unwrap() - .clone(), - client_state: Some(client_state), - proofs, + conn_id_on_a: get_connection_id(), + conn_id_on_b: counterparty.connection_id().cloned().unwrap(), + client_state_of_a_on_b: client_state.into(), + proof_conn_end_on_b: dummy_proof(), + proof_client_state_of_a_on_b: dummy_proof(), + proof_consensus_state_of_a_on_b: dummy_proof(), + proofs_height_on_b: proof_height, + consensus_height_of_a_on_b: client_state.latest_height(), version: ConnVersion::default(), - signer: Signer::new("account0"), + signer: Signer::from_str("account0").expect("invalid signer"), }; - let event = make_open_ack_connection_event(&msg); - write_log.set_ibc_event(event.try_into().unwrap()); + // event + let event = RawIbcEvent::OpenAckConnection(ConnOpenAck::new( + msg.conn_id_on_a.clone(), + get_client_id(), + msg.conn_id_on_b.clone(), + counterparty.client_id().clone(), + )); + let message_event = RawIbcEvent::Message(event.event_type()); + wl_storage + .write_log + .emit_ibc_event(message_event.try_into().unwrap()); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); + let tx_code = vec![]; let tx_index = TxIndex::default(); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new( + tx_code, + Some(tx_data), + wl_storage.storage.chain_id.clone(), + None, + ) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let mut keys_changed = BTreeSet::new(); - keys_changed.insert(conn_key); - let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1040,61 +1409,74 @@ mod tests { #[test] fn test_confirm_connection() { - let (mut storage, mut write_log) = insert_init_states(); + let mut keys_changed = BTreeSet::new(); + let mut wl_storage = init_storage(); + insert_init_client(&mut wl_storage); + // insert a TryOpen connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::TryOpen); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); - write_log.commit_tx(); - write_log.commit_block(&mut storage).expect("commit failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); + // update the connection to Open let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); + keys_changed.insert(conn_key); // prepare data - let height = Height::new(0, 1); - let proof_conn = CommitmentProofBytes::try_from(vec![0]).unwrap(); - let proof_client = CommitmentProofBytes::try_from(vec![0]).unwrap(); - let proof_consensus = ConsensusProof::new( - CommitmentProofBytes::try_from(vec![0]).unwrap(), - height, - ) - .unwrap(); - let proofs = Proofs::new( - proof_conn, - Some(proof_client), - Some(proof_consensus), - None, - height, - ) - .unwrap(); - let tx_code = vec![]; + let proof_height = Height::new(0, 1).unwrap(); let msg = MsgConnectionOpenConfirm { - connection_id: get_connection_id(), - proofs, - signer: Signer::new("account0"), + conn_id_on_b: get_connection_id(), + proof_conn_end_on_a: dummy_proof(), + proof_height_on_a: proof_height, + signer: Signer::from_str("account0").expect("invalid signer"), }; - let event = make_open_confirm_connection_event(&msg); - write_log.set_ibc_event(event.try_into().unwrap()); + // event + let counterparty = get_conn_counterparty(); + let event = RawIbcEvent::OpenConfirmConnection(ConnOpenConfirm::new( + get_connection_id(), + get_client_id(), + counterparty.connection_id().cloned().unwrap(), + counterparty.client_id().clone(), + )); + let message_event = RawIbcEvent::Message(event.event_type()); + wl_storage + .write_log + .emit_ibc_event(message_event.try_into().unwrap()); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); + let tx_code = vec![]; let tx_index = TxIndex::default(); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new( + tx_code, + Some(tx_data), + wl_storage.storage.chain_id.clone(), + None, + ) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let mut keys_changed = BTreeSet::new(); - keys_changed.insert(conn_key); - let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1115,47 +1497,110 @@ mod tests { #[test] fn test_init_channel() { - let (mut storage, mut write_log) = insert_init_states(); + let mut keys_changed = BTreeSet::new(); + let mut wl_storage = init_storage(); + insert_init_client(&mut wl_storage); + // insert an opened connection - let conn_key = connection_key(&get_connection_id()); + let conn_id = get_connection_id(); + let conn_key = connection_key(&conn_id); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); - write_log.commit_block(&mut storage).expect("commit failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); + // for next block + wl_storage + .storage + .set_header(get_dummy_header()) + .expect("Setting a dummy header shouldn't fail"); + wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); // prepare data - let channel = get_channel(ChanState::Init, Order::Ordered); let msg = MsgChannelOpenInit { - port_id: get_port_id(), - channel: channel.clone(), - signer: Signer::new("account0"), + port_id_on_a: get_port_id(), + connection_hops_on_a: vec![conn_id.clone()], + port_id_on_b: get_port_id(), + ordering: Order::Unordered, + signer: Signer::from_str("account0").expect("invalid signer"), + version_proposal: ChanVersion::new(VERSION.to_string()), }; // insert an Init channel - set_port(&mut write_log, 0); let channel_key = channel_key(&get_port_channel_id()); + let mut counterparty = get_channel_counterparty(); + counterparty.channel_id = None; + let channel = ChannelEnd::new( + ChanState::Init, + msg.ordering, + counterparty.clone(), + msg.connection_hops_on_a.clone(), + msg.version_proposal.clone(), + ); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); - let event = make_open_init_channel_event(&get_channel_id(), &msg); - write_log.set_ibc_event(event.try_into().unwrap()); + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); + keys_changed.insert(channel_key); + // channel counter + let chan_counter_key = channel_counter_key(); + increment_counter(&mut wl_storage, &chan_counter_key); + keys_changed.insert(chan_counter_key); + // sequences + let port_channel_id = + PortChannelId::new(get_channel_id(), msg.port_id_on_a.clone()); + let send_key = next_sequence_send_key(&port_channel_id); + increment_counter(&mut wl_storage, &send_key); + keys_changed.insert(send_key); + let recv_key = next_sequence_recv_key(&port_channel_id); + increment_counter(&mut wl_storage, &recv_key); + keys_changed.insert(recv_key); + let ack_key = next_sequence_ack_key(&port_channel_id); + increment_counter(&mut wl_storage, &ack_key); + keys_changed.insert(ack_key); + // event + let event = RawIbcEvent::OpenInitChannel(ChanOpenInit::new( + msg.port_id_on_a.clone(), + get_channel_id(), + counterparty.port_id().clone(), + conn_id, + msg.version_proposal.clone(), + )); + let message_event = RawIbcEvent::Message(event.event_type()); + wl_storage + .write_log + .emit_ibc_event(message_event.try_into().unwrap()); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new( + tx_code, + Some(tx_data), + wl_storage.storage.chain_id.clone(), + None, + ) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let mut keys_changed = BTreeSet::new(); - keys_changed.insert(channel_key); - let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1176,66 +1621,111 @@ mod tests { #[test] fn test_try_channel() { - let (mut storage, mut write_log) = insert_init_states(); - // insert an opend connection + let mut keys_changed = BTreeSet::new(); + let mut wl_storage = init_storage(); + insert_init_client(&mut wl_storage); + + // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); - write_log.commit_block(&mut storage).expect("commit failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); + // for next block + wl_storage + .storage + .set_header(get_dummy_header()) + .expect("Setting a dummy header shouldn't fail"); + wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); // prepare data - let height = Height::new(0, 1); - let proof_channel = CommitmentProofBytes::try_from(vec![0]).unwrap(); - let proof_client = CommitmentProofBytes::try_from(vec![0]).unwrap(); - let proof_consensus = ConsensusProof::new( - CommitmentProofBytes::try_from(vec![0]).unwrap(), - height, - ) - .unwrap(); - let proofs = Proofs::new( - proof_channel, - Some(proof_client), - Some(proof_consensus), - None, - height, - ) - .unwrap(); - let channel = get_channel(ChanState::TryOpen, Order::Ordered); + let proof_height = Height::new(0, 1).unwrap(); + let conn_id = get_connection_id(); + let counterparty = get_channel_counterparty(); + #[allow(deprecated)] let msg = MsgChannelOpenTry { - port_id: get_port_id(), - previous_channel_id: None, - channel: channel.clone(), - counterparty_version: ChanVersion::ics20(), - proofs, - signer: Signer::new("account0"), + port_id_on_b: get_port_id(), + connection_hops_on_b: vec![conn_id.clone()], + port_id_on_a: counterparty.port_id().clone(), + chan_id_on_a: counterparty.channel_id().cloned().unwrap(), + version_supported_on_a: ChanVersion::new(VERSION.to_string()), + proof_chan_end_on_a: dummy_proof(), + proof_height_on_a: proof_height, + ordering: Order::Unordered, + signer: Signer::from_str("account0").expect("invalid signer"), + previous_channel_id: ChannelId::default().to_string(), + version_proposal: ChanVersion::default(), }; // insert a TryOpen channel - set_port(&mut write_log, 0); let channel_key = channel_key(&get_port_channel_id()); + let channel = get_channel(ChanState::TryOpen, Order::Unordered); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); - let event = make_open_try_channel_event(&get_channel_id(), &msg); - write_log.set_ibc_event(event.try_into().unwrap()); + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); + keys_changed.insert(channel_key); + // channel counter + let chan_counter_key = channel_counter_key(); + increment_counter(&mut wl_storage, &chan_counter_key); + keys_changed.insert(chan_counter_key); + // sequences + let port_channel_id = + PortChannelId::new(get_channel_id(), msg.port_id_on_a.clone()); + let send_key = next_sequence_send_key(&port_channel_id); + increment_counter(&mut wl_storage, &send_key); + keys_changed.insert(send_key); + let recv_key = next_sequence_recv_key(&port_channel_id); + increment_counter(&mut wl_storage, &recv_key); + keys_changed.insert(recv_key); + let ack_key = next_sequence_ack_key(&port_channel_id); + increment_counter(&mut wl_storage, &ack_key); + keys_changed.insert(ack_key); + // event + let event = RawIbcEvent::OpenTryChannel(ChanOpenTry::new( + msg.port_id_on_a.clone(), + get_channel_id(), + counterparty.port_id().clone(), + counterparty.channel_id().cloned().unwrap(), + conn_id, + msg.version_supported_on_a.clone(), + )); + let message_event = RawIbcEvent::Message(event.event_type()); + wl_storage + .write_log + .emit_ibc_event(message_event.try_into().unwrap()); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new( + tx_code, + Some(tx_data), + wl_storage.storage.chain_id.clone(), + None, + ) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let mut keys_changed = BTreeSet::new(); - keys_changed.insert(channel_key); - let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1256,74 +1746,95 @@ mod tests { #[test] fn test_ack_channel() { - let (mut storage, mut write_log) = insert_init_states(); - // insert an opend connection + let mut keys_changed = BTreeSet::new(); + let mut wl_storage = init_storage(); + insert_init_client(&mut wl_storage); + + // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); // insert an Init channel - set_port(&mut write_log, 0); let channel_key = channel_key(&get_port_channel_id()); - let channel = get_channel(ChanState::Init, Order::Ordered); + let channel = get_channel(ChanState::Init, Order::Unordered); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); - write_log.commit_tx(); - write_log.commit_block(&mut storage).expect("commit failed"); + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); + // for next block + wl_storage + .storage + .set_header(get_dummy_header()) + .expect("Setting a dummy header shouldn't fail"); + wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); // prepare data - let height = Height::new(0, 1); - let proof_channel = CommitmentProofBytes::try_from(vec![0]).unwrap(); - let proof_client = CommitmentProofBytes::try_from(vec![0]).unwrap(); - let proof_consensus = ConsensusProof::new( - CommitmentProofBytes::try_from(vec![0]).unwrap(), - height, - ) - .unwrap(); - let proofs = Proofs::new( - proof_channel, - Some(proof_client), - Some(proof_consensus), - None, - height, - ) - .unwrap(); + let proof_height = Height::new(0, 1).unwrap(); + let counterparty = get_channel_counterparty(); let msg = MsgChannelOpenAck { - port_id: get_port_id(), - channel_id: get_channel_id(), - counterparty_channel_id: *get_channel_counterparty() - .channel_id() - .unwrap(), - counterparty_version: ChanVersion::ics20(), - proofs, - signer: Signer::new("account0"), + port_id_on_a: get_port_id(), + chan_id_on_a: get_channel_id(), + chan_id_on_b: counterparty.channel_id().cloned().unwrap(), + version_on_b: ChanVersion::new(VERSION.to_string()), + proof_chan_end_on_b: dummy_proof(), + proof_height_on_b: proof_height, + signer: Signer::from_str("account0").expect("invalid signer"), }; // update the channel to Open - let channel = get_channel(ChanState::Open, Order::Ordered); + let channel = get_channel(ChanState::Open, Order::Unordered); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); - let event = - make_open_ack_channel_event(&msg, &channel).expect("no connection"); - write_log.set_ibc_event(event.try_into().unwrap()); + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); + keys_changed.insert(channel_key); + // event + let event = RawIbcEvent::OpenAckChannel(ChanOpenAck::new( + msg.port_id_on_a.clone(), + msg.chan_id_on_a.clone(), + counterparty.port_id().clone(), + counterparty.channel_id().cloned().unwrap(), + get_connection_id(), + )); + let message_event = RawIbcEvent::Message(event.event_type()); + wl_storage + .write_log + .emit_ibc_event(message_event.try_into().unwrap()); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new( + tx_code, + Some(tx_data), + wl_storage.storage.chain_id.clone(), + None, + ) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let mut keys_changed = BTreeSet::new(); - keys_changed.insert(channel_key); - let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1344,71 +1855,93 @@ mod tests { #[test] fn test_confirm_channel() { - let (mut storage, mut write_log) = insert_init_states(); - // insert an opend connection + let mut keys_changed = BTreeSet::new(); + let mut wl_storage = init_storage(); + insert_init_client(&mut wl_storage); + + // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); // insert a TryOpen channel - set_port(&mut write_log, 0); let channel_key = channel_key(&get_port_channel_id()); let channel = get_channel(ChanState::TryOpen, Order::Ordered); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); - write_log.commit_tx(); - write_log.commit_block(&mut storage).expect("commit failed"); + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); + // for next block + wl_storage + .storage + .set_header(get_dummy_header()) + .expect("Setting a dummy header shouldn't fail"); + wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); // prepare data - let height = Height::new(0, 1); - let proof_channel = CommitmentProofBytes::try_from(vec![0]).unwrap(); - let proof_client = CommitmentProofBytes::try_from(vec![0]).unwrap(); - let proof_consensus = ConsensusProof::new( - CommitmentProofBytes::try_from(vec![0]).unwrap(), - height, - ) - .unwrap(); - let proofs = Proofs::new( - proof_channel, - Some(proof_client), - Some(proof_consensus), - None, - height, - ) - .unwrap(); + let proof_height = Height::new(0, 1).unwrap(); let msg = MsgChannelOpenConfirm { - port_id: get_port_id(), - channel_id: get_channel_id(), - proofs, - signer: Signer::new("account0"), + port_id_on_b: get_port_id(), + chan_id_on_b: get_channel_id(), + proof_chan_end_on_a: dummy_proof(), + proof_height_on_a: proof_height, + signer: Signer::from_str("account0").expect("invalid signer"), }; // update the channel to Open let channel = get_channel(ChanState::Open, Order::Ordered); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); - - let event = make_open_confirm_channel_event(&msg, &channel) - .expect("no connection"); - write_log.set_ibc_event(event.try_into().unwrap()); + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); + keys_changed.insert(channel_key); + // event + let counterparty = get_channel_counterparty(); + let event = RawIbcEvent::OpenConfirmChannel(ChanOpenConfirm::new( + msg.port_id_on_b.clone(), + msg.chan_id_on_b.clone(), + counterparty.port_id().clone(), + counterparty.channel_id().cloned().unwrap(), + get_connection_id(), + )); + let message_event = RawIbcEvent::Message(event.event_type()); + wl_storage + .write_log + .emit_ibc_event(message_event.try_into().unwrap()); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new( + tx_code, + Some(tx_data), + wl_storage.storage.chain_id.clone(), + None, + ) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let mut keys_changed = BTreeSet::new(); - keys_changed.insert(channel_key); - let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1427,153 +1960,128 @@ mod tests { ); } - #[test] - fn test_validate_port() { - let (storage, mut write_log) = insert_init_states(); - // insert a port - set_port(&mut write_log, 0); - - let tx_index = TxIndex::default(); - let tx_code = vec![]; - let tx_data = vec![]; - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); - let gas_meter = VpGasMeter::new(0); - let (vp_wasm_cache, _vp_cache_dir) = - wasm::compilation_cache::common::testing::cache(); - - let mut keys_changed = BTreeSet::new(); - keys_changed.insert(port_key(&get_port_id())); - - let verifiers = BTreeSet::new(); - let ctx = Ctx::new( - &ADDRESS, - &storage, - &write_log, - &tx, - &tx_index, - gas_meter, - &keys_changed, - &verifiers, - vp_wasm_cache, - ); - let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); - } + // skip test_close_init_channel() and test_close_confirm_channel() since it + // is not allowed to close the transfer channel #[test] - fn test_validate_capability() { - let (storage, mut write_log) = insert_init_states(); - // insert a port - let index = 0; - set_port(&mut write_log, index); - - let tx_index = TxIndex::default(); - let tx_code = vec![]; - let tx_data = vec![]; - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); - let gas_meter = VpGasMeter::new(0); - let (vp_wasm_cache, _vp_cache_dir) = - wasm::compilation_cache::common::testing::cache(); - + fn test_send_packet() { let mut keys_changed = BTreeSet::new(); - let cap_key = capability_key(index); - keys_changed.insert(cap_key); - - let verifiers = BTreeSet::new(); - let ctx = Ctx::new( - &ADDRESS, - &storage, - &write_log, - &tx, - &tx_index, - gas_meter, - &keys_changed, - &verifiers, - vp_wasm_cache, - ); + let mut wl_storage = init_storage(); + insert_init_client(&mut wl_storage); - let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); - } - - #[test] - fn test_validate_seq_send() { - let (mut storage, mut write_log) = insert_init_states(); - // insert an opened connection + // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); - // insert an opened channel - set_port(&mut write_log, 0); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); + // insert an Open channel let channel_key = channel_key(&get_port_channel_id()); - let channel = get_channel(ChanState::Open, Order::Ordered); + let channel = get_channel(ChanState::Open, Order::Unordered); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); - write_log.commit_tx(); - write_log.commit_block(&mut storage).expect("commit failed"); + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); + // init balance + let sender = established_address_1(); + let balance_key = balance_key(&nam(), &sender); + let amount = Amount::whole(100); + wl_storage + .write_log + .write(&balance_key, amount.try_to_vec().unwrap()) + .expect("write failed"); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); + // for next block + wl_storage + .storage + .set_header(get_dummy_header()) + .expect("Setting a dummy header shouldn't fail"); + wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); - // prepare a message - let timeout_timestamp = - (Timestamp::now() + Duration::from_secs(100)).unwrap(); + // prepare data let msg = MsgTransfer { - source_port: get_port_id(), - source_channel: get_channel_id(), - token: Some(Coin { - denom: "NAM".to_string(), + port_id_on_a: get_port_id(), + chan_id_on_a: get_channel_id(), + token: Coin { + denom: nam().to_string(), amount: 100u64.to_string(), - }), - sender: Signer::new("sender"), - receiver: Signer::new("receiver"), - timeout_height: Height::new(0, 100), - timeout_timestamp, + }, + sender: Signer::from_str(&sender.to_string()) + .expect("invalid signer"), + receiver: Signer::from_str("receiver").expect("invalid signer"), + timeout_height_on_b: TimeoutHeight::Never, + timeout_timestamp_on_b: Timestamp::none(), }; - // get and increment the nextSequenceSend + // the sequence send let seq_key = next_sequence_send_key(&get_port_channel_id()); - let sequence = get_next_seq(&storage, &seq_key); - increment_seq(&mut write_log, &seq_key, sequence); - // make a packet - let counterparty = get_channel_counterparty(); - let packet = packet_from_message(&msg, sequence, &counterparty); - // insert a commitment - let commitment = actions::commitment(&packet); - let key = commitment_key(&get_port_id(), &get_channel_id(), sequence); - write_log - .write(&key, commitment.into_vec()) + let sequence = get_next_seq(&wl_storage, &seq_key); + wl_storage + .write_log + .write(&seq_key, (u64::from(sequence) + 1).to_be_bytes().to_vec()) + .expect("write failed"); + keys_changed.insert(seq_key); + // packet commitment + let packet = + packet_from_message(&msg, sequence, &get_channel_counterparty()); + let commitment_key = + commitment_key(&msg.port_id_on_a, &msg.chan_id_on_a, sequence); + let commitment = commitment(&packet); + let bytes = commitment.into_vec(); + wl_storage + .write_log + .write(&commitment_key, bytes) .expect("write failed"); + keys_changed.insert(commitment_key); + // event + let transfer_event = TransferEvent { + sender: msg.sender.clone(), + receiver: msg.receiver.clone(), + }; + let event = RawIbcEvent::AppModule(ModuleEvent::from(transfer_event)); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); + let event = RawIbcEvent::SendPacket(SendPacket::new( + packet, + Order::Unordered, + get_connection_id(), + )); + let message_event = RawIbcEvent::Message(event.event_type()); + wl_storage + .write_log + .emit_ibc_event(message_event.try_into().unwrap()); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new( + tx_code, + Some(tx_data), + wl_storage.storage.chain_id.clone(), + None, + ) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let mut keys_changed = BTreeSet::new(); - keys_changed.insert(seq_key); - let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1593,76 +2101,165 @@ mod tests { } #[test] - fn test_validate_seq_recv() { - let (mut storage, mut write_log) = insert_init_states(); - // insert an opened connection + fn test_recv_packet() { + let mut keys_changed = BTreeSet::new(); + let mut wl_storage = init_storage(); + insert_init_client(&mut wl_storage); + + // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); - // insert an opened channel - set_port(&mut write_log, 0); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); + // insert an open channel let channel_key = channel_key(&get_port_channel_id()); - let channel = get_channel(ChanState::Open, Order::Ordered); + let channel = get_channel(ChanState::Open, Order::Unordered); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); - write_log.commit_tx(); - write_log.commit_block(&mut storage).expect("commit failed"); - - // get and increment the nextSequenceRecv - let seq_key = next_sequence_recv_key(&get_port_channel_id()); - let sequence = get_next_seq(&storage, &seq_key); - increment_seq(&mut write_log, &seq_key, sequence); - // make a packet and data - let counterparty = get_channel_counterparty(); - let timeout_timestamp = - (Timestamp::now() + Duration::from_secs(100)).unwrap(); - let packet = Packet { - sequence, - source_port: counterparty.port_id().clone(), - source_channel: *counterparty.channel_id().unwrap(), - destination_port: get_port_id(), - destination_channel: get_channel_id(), - data: vec![0], - timeout_height: Height::new(0, 100), - timeout_timestamp, + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); + // for next block + wl_storage + .storage + .set_header(get_dummy_header()) + .expect("Setting a dummy header shouldn't fail"); + wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); + + // prepare data + let receiver = established_address_1(); + let transfer_msg = MsgTransfer { + port_id_on_a: get_port_id(), + chan_id_on_a: get_channel_id(), + token: Coin { + denom: nam().to_string(), + amount: 100u64.to_string(), + }, + sender: Signer::from_str("sender").expect("invalid signer"), + receiver: Signer::from_str(&receiver.to_string()) + .expect("invalid signer"), + timeout_height_on_b: TimeoutHeight::Never, + timeout_timestamp_on_b: Timestamp::none(), }; - let proof_packet = CommitmentProofBytes::try_from(vec![0]).unwrap(); - let proofs = - Proofs::new(proof_packet, None, None, None, Height::new(0, 1)) - .unwrap(); + let counterparty = get_channel_counterparty(); + let mut packet = + packet_from_message(&transfer_msg, 1.into(), &counterparty); + packet.port_id_on_a = counterparty.port_id().clone(); + packet.chan_id_on_a = counterparty.channel_id().cloned().unwrap(); + packet.port_id_on_b = get_port_id(); + packet.chan_id_on_b = get_channel_id(); let msg = MsgRecvPacket { - packet, - proofs, - signer: Signer::new("account0"), + packet: packet.clone(), + proof_commitment_on_a: dummy_proof(), + proof_height_on_a: Height::new(0, 1).unwrap(), + signer: Signer::from_str("account0").expect("invalid signer"), }; - // insert a receipt and an ack - let key = receipt_key(&get_port_id(), &get_channel_id(), sequence); - write_log - .write(&key, PacketReceipt::default().as_bytes().to_vec()) + // the sequence send + let receipt_key = receipt_key( + &msg.packet.port_id_on_b, + &msg.packet.chan_id_on_b, + msg.packet.seq_on_a, + ); + let bytes = [1_u8].to_vec(); + wl_storage + .write_log + .write(&receipt_key, bytes) + .expect("write failed"); + keys_changed.insert(receipt_key); + // packet commitment + let ack_key = ack_key( + &packet.port_id_on_b, + &packet.chan_id_on_b, + msg.packet.seq_on_a, + ); + let transfer_ack = TokenTransferAcknowledgement::success(); + let acknowledgement = Acknowledgement::from(transfer_ack); + let bytes = sha2::Sha256::digest(acknowledgement.as_ref()).to_vec(); + wl_storage + .write_log + .write(&ack_key, bytes) .expect("write failed"); - let key = ack_key(&get_port_id(), &get_channel_id(), sequence); - let ack = PacketAck::result_success().encode_to_vec(); - write_log.write(&key, ack).expect("write failed"); + keys_changed.insert(ack_key); + // denom + let mut coin: PrefixedCoin = + transfer_msg.token.try_into().expect("invalid token"); + coin.denom.add_trace_prefix(TracePrefix::new( + packet.port_id_on_b.clone(), + packet.chan_id_on_b.clone(), + )); + let trace_hash = calc_hash(coin.denom.to_string()); + let denom_key = ibc_denom_key(&trace_hash); + let bytes = coin.denom.to_string().as_bytes().to_vec(); + wl_storage + .write_log + .write(&denom_key, bytes) + .expect("write failed"); + keys_changed.insert(denom_key); + // event + let denom_trace_event = DenomTraceEvent { + trace_hash: Some(trace_hash), + denom: coin.denom, + }; + let event = + RawIbcEvent::AppModule(ModuleEvent::from(denom_trace_event)); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); + let event = RawIbcEvent::ReceivePacket(ReceivePacket::new( + msg.packet.clone(), + Order::Unordered, + get_connection_id(), + )); + let message_event = RawIbcEvent::Message(event.event_type()); + wl_storage + .write_log + .emit_ibc_event(message_event.try_into().unwrap()); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); + let event = + RawIbcEvent::WriteAcknowledgement(WriteAcknowledgement::new( + packet, + acknowledgement, + get_connection_id(), + )); + let message_event = RawIbcEvent::Message(event.event_type()); + wl_storage + .write_log + .emit_ibc_event(message_event.try_into().unwrap()); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new( + tx_code, + Some(tx_data), + wl_storage.storage.chain_id.clone(), + None, + ) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let mut keys_changed = BTreeSet::new(); - keys_changed.insert(seq_key); - let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1682,81 +2279,134 @@ mod tests { } #[test] - fn test_validate_seq_ack() { - let (mut storage, mut write_log) = insert_init_states(); - // get the nextSequenceAck - let seq_key = next_sequence_ack_key(&get_port_channel_id()); - let sequence = get_next_seq(&storage, &seq_key); - // make a packet - let counterparty = get_channel_counterparty(); - let timeout_timestamp = - (Timestamp::now() + core::time::Duration::from_secs(100)).unwrap(); - let packet = Packet { - sequence, - source_port: get_port_id(), - source_channel: get_channel_id(), - destination_port: counterparty.port_id().clone(), - destination_channel: *counterparty.channel_id().unwrap(), - data: vec![0], - timeout_height: Height::new(0, 100), - timeout_timestamp, - }; - // insert an opened connection + fn test_ack_packet() { + let mut keys_changed = BTreeSet::new(); + let mut wl_storage = init_storage(); + insert_init_client(&mut wl_storage); + + // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); - // insert an opened channel - set_port(&mut write_log, 0); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); + // insert an Open channel let channel_key = channel_key(&get_port_channel_id()); - let channel = get_channel(ChanState::Open, Order::Ordered); + let channel = get_channel(ChanState::Open, Order::Unordered); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); - // insert a commitment - let commitment = actions::commitment(&packet); - let commitment_key = - commitment_key(&get_port_id(), &get_channel_id(), sequence); - write_log - .write(&commitment_key, commitment.into_vec()) + wl_storage + .write_log + .write(&channel_key, bytes) .expect("write failed"); - write_log.commit_tx(); - write_log.commit_block(&mut storage).expect("commit failed"); + // commitment + let sender = established_address_1(); + let transfer_msg = MsgTransfer { + port_id_on_a: get_port_id(), + chan_id_on_a: get_channel_id(), + token: Coin { + denom: nam().to_string(), + amount: 100u64.to_string(), + }, + sender: Signer::from_str(&sender.to_string()) + .expect("invalid signer"), + receiver: Signer::from_str("receiver").expect("invalid signer"), + timeout_height_on_b: TimeoutHeight::Never, + timeout_timestamp_on_b: Timestamp::none(), + }; + let sequence = 1.into(); + let packet = packet_from_message( + &transfer_msg, + sequence, + &get_channel_counterparty(), + ); + let commitment_key = commitment_key( + &transfer_msg.port_id_on_a, + &transfer_msg.chan_id_on_a, + sequence, + ); + let commitment = commitment(&packet); + let bytes = commitment.into_vec(); + wl_storage + .write_log + .write(&commitment_key, bytes) + .expect("write failed"); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); + // for next block + wl_storage + .storage + .set_header(get_dummy_header()) + .expect("Setting a dummy header shouldn't fail"); + wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); // prepare data - let ack = PacketAck::result_success().encode_to_vec(); - let proof_packet = CommitmentProofBytes::try_from(vec![0]).unwrap(); - let proofs = - Proofs::new(proof_packet, None, None, None, Height::new(0, 1)) - .unwrap(); + let transfer_ack = TokenTransferAcknowledgement::success(); + let acknowledgement = Acknowledgement::from(transfer_ack.clone()); let msg = MsgAcknowledgement { - packet, - acknowledgement: ack.into(), - proofs, - signer: Signer::new("account0"), + packet: packet.clone(), + acknowledgement, + proof_acked_on_b: dummy_proof(), + proof_height_on_b: Height::new(0, 1).unwrap(), + signer: Signer::from_str("account0").expect("invalid signer"), }; - // increment the nextSequenceAck - increment_seq(&mut write_log, &seq_key, sequence); // delete the commitment - write_log.delete(&commitment_key).expect("delete failed"); + wl_storage + .write_log + .delete(&commitment_key) + .expect("delete failed"); + keys_changed.insert(commitment_key); + // event + let data = serde_json::from_slice::(&packet.data) + .expect("decoding packet data failed"); + let ack_event = AckEvent { + receiver: data.receiver, + denom: data.token.denom, + amount: data.token.amount, + acknowledgement: transfer_ack, + }; + let event = RawIbcEvent::AppModule(ModuleEvent::from(ack_event)); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); + let event = RawIbcEvent::AcknowledgePacket(AcknowledgePacket::new( + packet, + Order::Unordered, + get_connection_id(), + )); + let message_event = RawIbcEvent::Message(event.event_type()); + wl_storage + .write_log + .emit_ibc_event(message_event.try_into().unwrap()); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new( + tx_code, + Some(tx_data), + wl_storage.storage.chain_id.clone(), + None, + ) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let mut keys_changed = BTreeSet::new(); - keys_changed.insert(seq_key); - let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1776,73 +2426,139 @@ mod tests { } #[test] - fn test_validate_commitment() { - let (mut storage, mut write_log) = insert_init_states(); - // insert an opened connection + fn test_timeout_packet() { + let mut keys_changed = BTreeSet::new(); + let mut wl_storage = init_storage(); + insert_init_client(&mut wl_storage); + + // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); - // insert an opened channel - set_port(&mut write_log, 0); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); + // insert an Open channel let channel_key = channel_key(&get_port_channel_id()); - let channel = get_channel(ChanState::Open, Order::Ordered); + let channel = get_channel(ChanState::Open, Order::Unordered); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); - write_log.commit_tx(); - write_log.commit_block(&mut storage).expect("commit failed"); - - // prepare a message - let timeout_timestamp = - (Timestamp::now() + Duration::from_secs(100)).unwrap(); - let msg = MsgTransfer { - source_port: get_port_id(), - source_channel: get_channel_id(), - token: Some(Coin { - denom: "NAM".to_string(), + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); + // init the escrow balance + let balance_key = + balance_key(&nam(), &Address::Internal(InternalAddress::IbcEscrow)); + let amount = Amount::whole(100); + wl_storage + .write_log + .write(&balance_key, amount.try_to_vec().unwrap()) + .expect("write failed"); + // commitment + let sender = established_address_1(); + let transfer_msg = MsgTransfer { + port_id_on_a: get_port_id(), + chan_id_on_a: get_channel_id(), + token: Coin { + denom: nam().to_string(), amount: 100u64.to_string(), - }), - sender: Signer::new("sender"), - receiver: Signer::new("receiver"), - timeout_height: Height::new(0, 100), - timeout_timestamp, + }, + sender: Signer::from_str(&sender.to_string()) + .expect("invalid signer"), + receiver: Signer::from_str("receiver").expect("invalid signer"), + timeout_height_on_b: TimeoutHeight::Never, + timeout_timestamp_on_b: (Timestamp::now() - Duration::new(10, 0)) + .unwrap(), }; - - // make a packet - let seq_key = next_sequence_send_key(&get_port_channel_id()); - let sequence = get_next_seq(&storage, &seq_key); - let counterparty = get_channel_counterparty(); - let packet = packet_from_message(&msg, sequence, &counterparty); - // insert a commitment - let commitment = actions::commitment(&packet); + let sequence = 1.into(); + let packet = packet_from_message( + &transfer_msg, + sequence, + &get_channel_counterparty(), + ); let commitment_key = commitment_key( - &packet.source_port, - &packet.source_channel, + &transfer_msg.port_id_on_a, + &transfer_msg.chan_id_on_a, sequence, ); - write_log - .write(&commitment_key, commitment.into_vec()) + let commitment = commitment(&packet); + let bytes = commitment.into_vec(); + wl_storage + .write_log + .write(&commitment_key, bytes) .expect("write failed"); - let event = make_send_packet_event(packet); - write_log.set_ibc_event(event.try_into().unwrap()); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); + // for next block + wl_storage + .storage + .set_header(get_dummy_header()) + .expect("Setting a dummy header shouldn't fail"); + wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); + + // prepare data + let msg = MsgTimeout { + packet: packet.clone(), + next_seq_recv_on_b: sequence, + proof_unreceived_on_b: dummy_proof(), + proof_height_on_b: Height::new(0, 1).unwrap(), + signer: Signer::from_str("account0").expect("invalid signer"), + }; + + // delete the commitment + wl_storage + .write_log + .delete(&commitment_key) + .expect("delete failed"); + keys_changed.insert(commitment_key); + // event + let data = serde_json::from_slice::(&packet.data) + .expect("decoding packet data failed"); + let timeout_event = TimeoutEvent { + refund_receiver: data.sender, + refund_denom: data.token.denom, + refund_amount: data.token.amount, + }; + let event = RawIbcEvent::AppModule(ModuleEvent::from(timeout_event)); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); + let event = RawIbcEvent::TimeoutPacket(TimeoutPacket::new( + packet, + Order::Unordered, + )); + let message_event = RawIbcEvent::Message(event.event_type()); + wl_storage + .write_log + .emit_ibc_event(message_event.try_into().unwrap()); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new( + tx_code, + Some(tx_data), + wl_storage.storage.chain_id.clone(), + None, + ) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let mut keys_changed = BTreeSet::new(); - keys_changed.insert(commitment_key); - let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1862,128 +2578,139 @@ mod tests { } #[test] - fn test_validate_receipt() { - let (mut storage, mut write_log) = insert_init_states(); - // insert an opened connection + fn test_timeout_on_close_packet() { + let mut keys_changed = BTreeSet::new(); + let mut wl_storage = init_storage(); + insert_init_client(&mut wl_storage); + + // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); - // insert an opened channel - set_port(&mut write_log, 0); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); + // insert an Open channel let channel_key = channel_key(&get_port_channel_id()); - let channel = get_channel(ChanState::Open, Order::Ordered); + let channel = get_channel(ChanState::Open, Order::Unordered); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); - write_log.commit_tx(); - write_log.commit_block(&mut storage).expect("commit failed"); - - // make a packet and data - let counterparty = get_channel_counterparty(); - let timeout_timestamp = - (Timestamp::now() + Duration::from_secs(100)).unwrap(); - let packet = Packet { - sequence: Sequence::from(1), - source_port: counterparty.port_id().clone(), - source_channel: *counterparty.channel_id().unwrap(), - destination_port: get_port_id(), - destination_channel: get_channel_id(), - data: vec![0], - timeout_height: Height::new(0, 100), - timeout_timestamp, - }; - let proof_packet = CommitmentProofBytes::try_from(vec![0]).unwrap(); - let proofs = - Proofs::new(proof_packet, None, None, None, Height::new(0, 1)) - .unwrap(); - let msg = MsgRecvPacket { - packet, - proofs, - signer: Signer::new("account0"), - }; - - // insert a receipt and an ack - let receipt_key = receipt_key( - &msg.packet.destination_port, - &msg.packet.destination_channel, - msg.packet.sequence, - ); - write_log - .write(&receipt_key, PacketReceipt::default().as_bytes().to_vec()) + wl_storage + .write_log + .write(&channel_key, bytes) .expect("write failed"); - let ack_key = ack_key( - &msg.packet.destination_port, - &msg.packet.destination_channel, - msg.packet.sequence, + // init the escrow balance + let balance_key = + balance_key(&nam(), &Address::Internal(InternalAddress::IbcEscrow)); + let amount = Amount::whole(100); + wl_storage + .write_log + .write(&balance_key, amount.try_to_vec().unwrap()) + .expect("write failed"); + // commitment + let sender = established_address_1(); + let transfer_msg = MsgTransfer { + port_id_on_a: get_port_id(), + chan_id_on_a: get_channel_id(), + token: Coin { + denom: nam().to_string(), + amount: 100u64.to_string(), + }, + sender: Signer::from_str(&sender.to_string()) + .expect("invalid signer"), + receiver: Signer::from_str("receiver").expect("invalid signer"), + timeout_height_on_b: TimeoutHeight::Never, + timeout_timestamp_on_b: Timestamp::none(), + }; + let sequence = 1.into(); + let packet = packet_from_message( + &transfer_msg, + sequence, + &get_channel_counterparty(), ); - let ack = PacketAck::result_success().encode_to_vec(); - write_log.write(&ack_key, ack).expect("write failed"); - - let tx_index = TxIndex::default(); - let tx_code = vec![]; - let mut tx_data = vec![]; - msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); - let gas_meter = VpGasMeter::new(0); - let (vp_wasm_cache, _vp_cache_dir) = - wasm::compilation_cache::common::testing::cache(); - let mut keys_changed = BTreeSet::new(); - keys_changed.insert(receipt_key); - - let verifiers = BTreeSet::new(); - let ctx = Ctx::new( - &ADDRESS, - &storage, - &write_log, - &tx, - &tx_index, - gas_meter, - &keys_changed, - &verifiers, - vp_wasm_cache, + let commitment_key = commitment_key( + &transfer_msg.port_id_on_a, + &transfer_msg.chan_id_on_a, + sequence, ); + let commitment = commitment(&packet); + let bytes = commitment.into_vec(); + wl_storage + .write_log + .write(&commitment_key, bytes) + .expect("write failed"); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); + // for next block + wl_storage + .storage + .set_header(get_dummy_header()) + .expect("Setting a dummy header shouldn't fail"); + wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); - let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); - } + // prepare data + let msg = MsgTimeoutOnClose { + packet: packet.clone(), + next_seq_recv_on_b: sequence, + proof_unreceived_on_b: dummy_proof(), + proof_close_on_b: dummy_proof(), + proof_height_on_b: Height::new(0, 1).unwrap(), + signer: Signer::from_str("account0").expect("invalid signer"), + }; - #[test] - fn test_validate_ack() { - let (storage, mut write_log) = insert_init_states(); - - // insert a receipt and an ack - let receipt_key = - receipt_key(&get_port_id(), &get_channel_id(), Sequence::from(1)); - write_log - .write(&receipt_key, PacketReceipt::default().as_bytes().to_vec()) - .expect("write failed"); - let ack_key = - ack_key(&get_port_id(), &get_channel_id(), Sequence::from(1)); - let ack = PacketAck::result_success().encode_to_vec(); - write_log.write(&ack_key, ack).expect("write failed"); + // delete the commitment + wl_storage + .write_log + .delete(&commitment_key) + .expect("delete failed"); + keys_changed.insert(commitment_key); + // event + let data = serde_json::from_slice::(&packet.data) + .expect("decoding packet data failed"); + let timeout_event = TimeoutEvent { + refund_receiver: data.sender, + refund_denom: data.token.denom, + refund_amount: data.token.amount, + }; + let event = RawIbcEvent::AppModule(ModuleEvent::from(timeout_event)); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); + let event = RawIbcEvent::TimeoutPacket(TimeoutPacket::new( + packet, + Order::Unordered, + )); + let message_event = RawIbcEvent::Message(event.event_type()); + wl_storage + .write_log + .emit_ibc_event(message_event.try_into().unwrap()); + wl_storage + .write_log + .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; - let tx_data = vec![]; - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let mut tx_data = vec![]; + msg.to_any().encode(&mut tx_data).expect("encoding failed"); + let tx = Tx::new( + tx_code, + Some(tx_data), + wl_storage.storage.chain_id.clone(), + None, + ) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let mut keys_changed = BTreeSet::new(); - keys_changed.insert(ack_key); let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1991,7 +2718,6 @@ mod tests { &verifiers, vp_wasm_cache, ); - let ibc = Ibc { ctx }; assert!( ibc.validate_tx( diff --git a/shared/src/ledger/ibc/vp/packet.rs b/shared/src/ledger/ibc/vp/packet.rs deleted file mode 100644 index 09346daf06c..00000000000 --- a/shared/src/ledger/ibc/vp/packet.rs +++ /dev/null @@ -1,774 +0,0 @@ -//! IBC validity predicate for packets - -use namada_core::ledger::ibc::actions::{ - self, make_send_packet_event, make_timeout_event, packet_from_message, -}; -use namada_core::ledger::ibc::data::{ - Error as IbcDataError, FungibleTokenPacketData, IbcMessage, -}; -use namada_core::ledger::ibc::storage::{ - ibc_denom_key, port_channel_sequence_id, token_hash_from_denom, - Error as IbcStorageError, -}; -use thiserror::Error; - -use super::{Ibc, StateChange}; -use crate::ibc::core::ics02_client::height::Height; -use crate::ibc::core::ics04_channel::channel::{ - ChannelEnd, Counterparty, Order, State, -}; -use crate::ibc::core::ics04_channel::commitment::PacketCommitment; -use crate::ibc::core::ics04_channel::context::ChannelReader; -use crate::ibc::core::ics04_channel::error::Error as Ics04Error; -use crate::ibc::core::ics04_channel::handler::verify::{ - verify_channel_proofs, verify_next_sequence_recv, - verify_packet_acknowledgement_proofs, verify_packet_receipt_absence, - verify_packet_recv_proofs, -}; -use crate::ibc::core::ics04_channel::msgs::acknowledgement::{ - Acknowledgement, MsgAcknowledgement, -}; -use crate::ibc::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; -use crate::ibc::core::ics04_channel::msgs::PacketMsg; -use crate::ibc::core::ics04_channel::packet::{Packet, Sequence}; -use crate::ibc::core::ics24_host::identifier::{ - ChannelId, ClientId, PortChannelId, PortId, -}; -use crate::ibc::core::ics26_routing::msgs::Ics26Envelope; -use crate::ibc::proofs::Proofs; -use crate::ledger::native_vp::VpEnv; -use crate::ledger::storage::{self, StorageHasher}; -use crate::types::storage::Key; -use crate::vm::WasmCacheAccess; - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum Error { - #[error("State change error: {0}")] - InvalidStateChange(String), - #[error("Client error: {0}")] - InvalidClient(String), - #[error("Connection error: {0}")] - InvalidConnection(String), - #[error("Channel error: {0}")] - InvalidChannel(String), - #[error("Port error: {0}")] - InvalidPort(String), - #[error("Packet error: {0}")] - InvalidPacket(String), - #[error("Proof verification error: {0}")] - ProofVerificationFailure(Ics04Error), - #[error("Decoding TX data error: {0}")] - DecodingTxData(std::io::Error), - #[error("IBC data error: {0}")] - InvalidIbcData(IbcDataError), - #[error("IBC storage error: {0}")] - IbcStorage(IbcStorageError), - #[error("IBC event error: {0}")] - IbcEvent(String), - #[error("IBC proof error: {0}")] - Proof(String), - #[error("IBC denom error: {0}")] - Denom(String), -} - -/// IBC packet functions result -pub type Result = std::result::Result; - -enum Phase { - Send, - Recv, - Ack, -} - -impl<'a, DB, H, CA> Ibc<'a, DB, H, CA> -where - DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, - H: 'static + StorageHasher, - CA: 'static + WasmCacheAccess, -{ - pub(super) fn validate_commitment( - &self, - key: &Key, - tx_data: &[u8], - ) -> Result<()> { - let commitment_key = port_channel_sequence_id(key)?; - match self - .get_state_change(key) - .map_err(|e| Error::InvalidStateChange(e.to_string()))? - { - StateChange::Created => { - // sending a packet - let ibc_msg = IbcMessage::decode(tx_data)?; - let msg = ibc_msg.msg_transfer()?; - // make a packet - let channel = self - .channel_end(&(commitment_key.0.clone(), commitment_key.1)) - .map_err(|e| Error::InvalidChannel(e.to_string()))?; - let mut packet = packet_from_message( - &msg, - commitment_key.2, - channel.counterparty(), - ); - self.update_denom(&mut packet)?; - let commitment = self - .get_packet_commitment(&commitment_key) - .map_err(|_| { - Error::InvalidPacket(format!( - "The commitement doesn't exist: Port {}, Channel \ - {}, Sequence {}", - commitment_key.0, - commitment_key.1, - commitment_key.2, - )) - })?; - self.validate_packet_commitment(&packet, commitment) - .map_err(|e| Error::InvalidPacket(e.to_string()))?; - - self.validate_send_packet(&commitment_key, &packet)?; - - let event = make_send_packet_event(packet); - self.check_emitted_event(event) - .map_err(|e| Error::IbcEvent(e.to_string())) - } - StateChange::Deleted => { - // check the channel state - let channel = self - .channel_end(&(commitment_key.0.clone(), commitment_key.1)) - .map_err(|_| { - Error::InvalidChannel(format!( - "The channel doesn't exist: Port {}, Channel {}", - commitment_key.0, commitment_key.1, - )) - })?; - let ibc_msg = IbcMessage::decode(tx_data)?; - match channel.state() { - State::Open => { - // "PacketAcknowledgement" or timeout for the unordered - // channel - match &ibc_msg.0 { - Ics26Envelope::Ics4PacketMsg( - PacketMsg::AckPacket(msg), - ) => self.validate_ack_packet(&commitment_key, msg), - Ics26Envelope::Ics4PacketMsg( - PacketMsg::ToPacket(_), - ) - | Ics26Envelope::Ics4PacketMsg( - PacketMsg::ToClosePacket(_), - ) => { - self.validate_timeout(&commitment_key, &ibc_msg) - } - _ => Err(Error::InvalidChannel(format!( - "The channel state is invalid: Port {}, \ - Channel {}", - commitment_key.0, commitment_key.1 - ))), - } - } - State::Closed => { - self.validate_timeout(&commitment_key, &ibc_msg) - } - _ => Err(Error::InvalidChannel(format!( - "The channel state is invalid: Port {}, Channel {}", - commitment_key.0, commitment_key.1 - ))), - } - } - _ => Err(Error::InvalidStateChange(format!( - "The state change of the commitment is invalid: Key {}", - key - ))), - } - } - - pub(super) fn validate_receipt( - &self, - key: &Key, - tx_data: &[u8], - ) -> Result<()> { - match self - .get_state_change(key) - .map_err(|e| Error::InvalidStateChange(e.to_string()))? - { - StateChange::Created => { - let receipt_key = port_channel_sequence_id(key)?; - let ibc_msg = IbcMessage::decode(tx_data)?; - let msg = ibc_msg.msg_recv_packet()?; - self.validate_recv_packet(&receipt_key, &msg) - } - _ => Err(Error::InvalidStateChange( - "The state change of the receipt is invalid".to_owned(), - )), - } - } - - pub(super) fn validate_ack(&self, key: &Key) -> Result<()> { - match self - .get_state_change(key) - .map_err(|e| Error::InvalidStateChange(e.to_string()))? - { - StateChange::Created => { - let ack_key = port_channel_sequence_id(key)?; - // The receipt should have been stored - self.get_packet_receipt(&( - ack_key.0.clone(), - ack_key.1, - ack_key.2, - )) - .map_err(|_| { - Error::InvalidPacket(format!( - "The receipt doesn't exist: Port {}, Channel {}, \ - Sequence {}", - ack_key.0, ack_key.1, ack_key.2, - )) - })?; - // The packet is validated in the receipt validation - Ok(()) - } - _ => Err(Error::InvalidStateChange( - "The state change of the acknowledgment is invalid".to_owned(), - )), - } - } - - fn validate_send_packet( - &self, - port_channel_seq_id: &(PortId, ChannelId, Sequence), - packet: &Packet, - ) -> Result<()> { - self.validate_packet(port_channel_seq_id, packet, Phase::Send)?; - - self.get_packet_commitment(port_channel_seq_id) - .map_err(|_| { - Error::InvalidPacket(format!( - "The commitment doesn't exist: Port {}, Channel {}, \ - Sequence {}", - port_channel_seq_id.0, - port_channel_seq_id.1, - port_channel_seq_id.2 - )) - })?; - - Ok(()) - } - - fn validate_recv_packet( - &self, - port_channel_seq_id: &(PortId, ChannelId, Sequence), - msg: &MsgRecvPacket, - ) -> Result<()> { - self.validate_packet(port_channel_seq_id, &msg.packet, Phase::Recv)?; - - self.get_packet_receipt(port_channel_seq_id).map_err(|_| { - Error::InvalidPacket(format!( - "The receipt doesn't exist: Port {}, Channel {}, Sequence {}", - port_channel_seq_id.0, - port_channel_seq_id.1, - port_channel_seq_id.2 - )) - })?; - self.get_packet_acknowledgement(port_channel_seq_id) - .map_err(|_| { - Error::InvalidPacket(format!( - "The acknowledgement doesn't exist: Port {}, Channel {}, \ - Sequence {}", - port_channel_seq_id.0, - port_channel_seq_id.1, - port_channel_seq_id.2 - )) - })?; - let port_channel_id = PortChannelId { - port_id: port_channel_seq_id.0.clone(), - channel_id: port_channel_seq_id.1, - }; - self.verify_recv_proof( - &port_channel_id, - msg.proofs.height(), - &msg.packet, - &msg.proofs, - ) - } - - fn validate_ack_packet( - &self, - port_channel_seq_id: &(PortId, ChannelId, Sequence), - msg: &MsgAcknowledgement, - ) -> Result<()> { - self.validate_packet(port_channel_seq_id, &msg.packet, Phase::Ack)?; - - let prev_commitment = self - .get_packet_commitment_pre(port_channel_seq_id) - .map_err(|e| Error::InvalidPacket(e.to_string()))?; - self.validate_packet_commitment(&msg.packet, prev_commitment)?; - - if self.get_packet_commitment(port_channel_seq_id).is_ok() { - return Err(Error::InvalidPacket( - "The commitment hasn't been deleted yet".to_owned(), - )); - } - - let port_channel_id = PortChannelId { - port_id: port_channel_seq_id.0.clone(), - channel_id: port_channel_seq_id.1, - }; - self.verify_ack_proof( - &port_channel_id, - msg.proofs.height(), - &msg.packet, - msg.acknowledgement.clone(), - &msg.proofs, - ) - } - - fn validate_packet( - &self, - port_channel_seq_id: &(PortId, ChannelId, Sequence), - packet: &Packet, - phase: Phase, - ) -> Result<()> { - let (port_id, channel_id, sequence) = port_channel_seq_id; - let port_channel_id = match phase { - Phase::Send | Phase::Ack => { - if *port_id != packet.source_port - || *channel_id != packet.source_channel - || *sequence != packet.sequence - { - return Err(Error::InvalidPacket( - "The packet info invalid".to_owned(), - )); - } - PortChannelId { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - } - } - Phase::Recv => { - if *port_id != packet.destination_port - || *channel_id != packet.destination_channel - || *sequence != packet.sequence - { - return Err(Error::InvalidPacket( - "The packet info invalid".to_owned(), - )); - } - PortChannelId { - port_id: packet.destination_port.clone(), - channel_id: packet.destination_channel, - } - } - }; - - // port authentication - self.authenticated_capability(&port_channel_id.port_id) - .map_err(|e| { - Error::InvalidPort(format!( - "The port is not owned: Port {}, {}", - port_channel_id.port_id, e - )) - })?; - - let channel = self - .channel_end(&( - port_channel_id.port_id.clone(), - port_channel_id.channel_id, - )) - .map_err(|_| { - Error::InvalidChannel(format!( - "The channel doesn't exist: Port/Channel {}", - port_channel_id, - )) - })?; - if !channel.is_open() { - return Err(Error::InvalidChannel(format!( - "The channel isn't open: Port/Channel {}", - port_channel_id - ))); - } - - let connection = self - .connection_from_channel(&channel) - .map_err(|e| Error::InvalidConnection(e.to_string()))?; - if !connection.is_open() { - return Err(Error::InvalidConnection( - "The connection isn't open".to_owned(), - )); - } - - // counterparty consistency - let counterparty = match phase { - Phase::Send | Phase::Ack => Counterparty::new( - packet.destination_port.clone(), - Some(packet.destination_channel), - ), - Phase::Recv => Counterparty::new( - packet.source_port.clone(), - Some(packet.source_channel), - ), - }; - if !channel.counterparty_matches(&counterparty) { - return Err(Error::InvalidPacket( - "The counterpart port or channel is mismatched".to_owned(), - )); - } - - // check timeout - match phase { - Phase::Send => { - let client_id = connection.client_id(); - let height = match self.client_state(client_id) { - Ok(s) => s.latest_height(), - Err(_) => { - return Err(Error::InvalidClient(format!( - "The client state doesn't exist: ID {}", - client_id - ))); - } - }; - self.check_timeout(client_id, height, packet) - .map_err(|e| Error::InvalidPacket(e.to_string()))?; - } - Phase::Recv => { - if packet.timed_out(&self.host_timestamp(), self.host_height()) - { - return Err(Error::InvalidPacket( - "The packet has timed out".to_owned(), - )); - } - } - Phase::Ack => (), - } - - Ok(()) - } - - fn validate_packet_commitment( - &self, - packet: &Packet, - commitment: PacketCommitment, - ) -> Result<()> { - if commitment == actions::commitment(packet) { - Ok(()) - } else { - Err(Error::InvalidPacket( - "The commitment and the packet are mismatched".to_owned(), - )) - } - } - - fn verify_recv_proof( - &self, - port_channel_id: &PortChannelId, - height: Height, - packet: &Packet, - proofs: &Proofs, - ) -> Result<()> { - let channel = self - .channel_end(&( - port_channel_id.port_id.clone(), - port_channel_id.channel_id, - )) - .map_err(|_| { - Error::InvalidChannel(format!( - "The channel doesn't exist: Port/Channel {}", - port_channel_id, - )) - })?; - let connection = self - .connection_from_channel(&channel) - .map_err(|e| Error::InvalidConnection(e.to_string()))?; - - verify_packet_recv_proofs(self, height, packet, &connection, proofs) - .map_err(Error::ProofVerificationFailure) - } - - fn verify_ack_proof( - &self, - port_channel_id: &PortChannelId, - height: Height, - packet: &Packet, - ack: Acknowledgement, - proofs: &Proofs, - ) -> Result<()> { - let channel = self - .channel_end(&( - port_channel_id.port_id.clone(), - port_channel_id.channel_id, - )) - .map_err(|_| { - Error::InvalidChannel(format!( - "The channel doesn't exist: Port/Channel {}", - port_channel_id, - )) - })?; - let connection = self - .connection_from_channel(&channel) - .map_err(|e| Error::InvalidConnection(e.to_string()))?; - - verify_packet_acknowledgement_proofs( - self, - height, - packet, - ack, - &connection, - proofs, - ) - .map_err(Error::ProofVerificationFailure) - } - - fn validate_timeout( - &self, - commitment_key: &(PortId, ChannelId, Sequence), - ibc_msg: &IbcMessage, - ) -> Result<()> { - let (height, proofs, packet, next_sequence_recv) = match &ibc_msg.0 { - Ics26Envelope::Ics4PacketMsg(PacketMsg::ToPacket(msg)) => ( - msg.proofs.height(), - msg.proofs.clone(), - msg.packet.clone(), - msg.next_sequence_recv, - ), - Ics26Envelope::Ics4PacketMsg(PacketMsg::ToClosePacket(msg)) => ( - msg.proofs.height(), - msg.proofs.clone(), - msg.packet.clone(), - msg.next_sequence_recv, - ), - _ => { - return Err(Error::InvalidChannel(format!( - "Unexpected message was given for timeout: Port/Channel \ - {}/{}", - commitment_key.0, commitment_key.1, - ))); - } - }; - // deleted commitment should be for the packet sent from this channel - let commitment = self - .get_packet_commitment_pre(commitment_key) - .map_err(|e| Error::InvalidPacket(e.to_string()))?; - self.validate_packet_commitment(&packet, commitment) - .map_err(|e| Error::InvalidPacket(e.to_string()))?; - - self.authenticated_capability(&packet.source_port) - .map_err(|e| Error::InvalidPort(e.to_string()))?; - - // the counterparty should be equal to that of the channel - let port_channel_id = PortChannelId { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - }; - let channel = self - .channel_end(&( - port_channel_id.port_id.clone(), - port_channel_id.channel_id, - )) - .map_err(|_| { - Error::InvalidChannel(format!( - "The channel doesn't exist: Port/Channel {}", - port_channel_id - )) - })?; - let counterparty = Counterparty::new( - packet.destination_port.clone(), - Some(packet.destination_channel), - ); - if !channel.counterparty_matches(&counterparty) { - return Err(Error::InvalidPacket(format!( - "The packet is invalid for the counterparty: Port/Channel \ - {}/{}", - packet.destination_port, packet.destination_channel - ))); - } - - let connection = self - .connection_from_channel(&channel) - .map_err(|e| Error::InvalidConnection(e.to_string()))?; - let client_id = connection.client_id().clone(); - - // check if the packet actually timed out - match self.check_timeout(&client_id, proofs.height(), &packet) { - Ok(()) => { - // "TimedoutOnClose" because the packet didn't time out - // check that the counterpart channel has been closed - let expected_my_side = Counterparty::new( - packet.source_port.clone(), - Some(packet.source_channel), - ); - let counterparty = connection.counterparty(); - let conn_id = - counterparty.connection_id().ok_or_else(|| { - Error::InvalidConnection( - "The counterparty doesn't have a connection ID" - .to_owned(), - ) - })?; - let expected_conn_hops = vec![conn_id.clone()]; - let expected_channel = ChannelEnd::new( - State::Closed, - *channel.ordering(), - expected_my_side, - expected_conn_hops, - channel.version().clone(), - ); - - let proofs_closed = make_proofs_for_channel(&proofs)?; - verify_channel_proofs( - self, - height, - &channel, - &connection, - &expected_channel, - &proofs_closed, - ) - .map_err(Error::ProofVerificationFailure)?; - } - Err(_) => { - // the packet timed out - let event = make_timeout_event(packet.clone()); - self.check_emitted_event(event) - .map_err(|e| Error::IbcEvent(e.to_string()))?; - } - } - - if channel.order_matches(&Order::Ordered) { - if !channel.state_matches(&State::Closed) { - return Err(Error::InvalidChannel(format!( - "The channel hasn't been closed yet: Port/Channel {}", - port_channel_id - ))); - } - if packet.sequence < next_sequence_recv { - return Err(Error::InvalidPacket( - "The sequence is invalid. The packet might have been \ - already received" - .to_owned(), - )); - } - match verify_next_sequence_recv( - self, - height, - &connection, - packet, - next_sequence_recv, - &proofs, - ) { - Ok(_) => Ok(()), - Err(e) => Err(Error::ProofVerificationFailure(e)), - } - } else { - match verify_packet_receipt_absence( - self, - height, - &connection, - packet, - &proofs, - ) { - Ok(_) => Ok(()), - Err(e) => Err(Error::ProofVerificationFailure(e)), - } - } - } - - pub(super) fn check_timeout( - &self, - client_id: &ClientId, - current_height: Height, - packet: &Packet, - ) -> Result<()> { - // timeout timestamp - let consensus_state = - match self.client_consensus_state(client_id, current_height) { - Ok(c) => c, - Err(_) => { - return Err(Error::InvalidClient(format!( - "The client consensus state doesn't exist: ID {}, \ - Height {}", - client_id, current_height - ))); - } - }; - let current_timestamp = consensus_state.timestamp(); - - if packet.timed_out(¤t_timestamp, current_height) { - Err(Error::InvalidPacket(format!( - "The packet has timed out: Timeout height {}, Timeout \ - timestamp {}, Current height {}, Current timestamp {}", - packet.timeout_height, - packet.timeout_timestamp, - current_height, - current_timestamp - ))) - } else { - Ok(()) - } - } - - fn update_denom(&self, packet: &mut Packet) -> Result<()> { - if let Ok(mut data) = - serde_json::from_slice::(&packet.data) - { - if let Some(token_hash) = token_hash_from_denom(&data.denom) - .map_err(|e| { - Error::Denom(format!("Invalid denom: error {}", e)) - })? - { - let denom_key = ibc_denom_key(token_hash); - let denom_bytes = match self.ctx.read_bytes_pre(&denom_key) { - Ok(Some(v)) => v, - _ => { - return Err(Error::Denom(format!( - "No original denom: denom_key {}", - denom_key - ))); - } - }; - let denom = std::str::from_utf8(&denom_bytes).map_err(|e| { - Error::Denom(format!( - "Decoding the denom failed: denom_key {}, error {}", - denom_key, e - )) - })?; - data.denom = denom.to_string(); - packet.data = serde_json::to_vec(&data) - .expect("encoding the packet data shouldn't fail"); - } - } - Ok(()) - } -} - -/// The proof for the counterpart channel should be in proofs.other_proof -/// `verify_channel_proofs()` requires the proof is in proofs.object_proof -fn make_proofs_for_channel(proofs: &Proofs) -> Result { - let proof_closed = match proofs.other_proof() { - Some(p) => p.clone(), - None => { - return Err(Error::Proof( - "No proof for the counterpart channel".to_string(), - )); - } - }; - Proofs::new(proof_closed, None, None, None, proofs.height()).map_err(|e| { - Error::Proof(format!( - "Creating Proofs for the counterpart channel failed: error {}", - e - )) - }) -} - -impl From for Error { - fn from(err: IbcStorageError) -> Self { - Self::IbcStorage(err) - } -} - -impl From for Error { - fn from(err: IbcDataError) -> Self { - Self::InvalidIbcData(err) - } -} - -impl From for Error { - fn from(err: std::io::Error) -> Self { - Self::DecodingTxData(err) - } -} diff --git a/shared/src/ledger/ibc/vp/port.rs b/shared/src/ledger/ibc/vp/port.rs deleted file mode 100644 index 94aa82405f6..00000000000 --- a/shared/src/ledger/ibc/vp/port.rs +++ /dev/null @@ -1,227 +0,0 @@ -//! IBC validity predicate for port module -use std::str::FromStr; - -use thiserror::Error; - -use super::super::storage::{ - capability, capability_index_key, capability_key, is_capability_index_key, - port_id, port_key, Error as IbcStorageError, -}; -use super::{Ibc, StateChange}; -use crate::ibc::core::ics04_channel::context::ChannelReader; -use crate::ibc::core::ics05_port::capabilities::{ - Capability, CapabilityName, PortCapability, -}; -use crate::ibc::core::ics05_port::context::{CapabilityReader, PortReader}; -use crate::ibc::core::ics05_port::error::Error as Ics05Error; -use crate::ibc::core::ics24_host::identifier::PortId; -use crate::ibc::core::ics26_routing::context::ModuleId; -use crate::ledger::native_vp::VpEnv; -use crate::ledger::storage::{self as ledger_storage, StorageHasher}; -use crate::types::storage::Key; -use crate::vm::WasmCacheAccess; - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum Error { - #[error("State change error: {0}")] - InvalidStateChange(String), - #[error("Port error: {0}")] - InvalidPort(String), - #[error("Capability error: {0}")] - NoCapability(String), - #[error("IBC storage error: {0}")] - IbcStorage(IbcStorageError), -} - -/// IBC port functions result -pub type Result = std::result::Result; -/// ConnectionReader result -type Ics05Result = core::result::Result; - -const MODULE_ID: &str = "ledger"; - -impl<'a, DB, H, CA> Ibc<'a, DB, H, CA> -where - DB: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: 'static + StorageHasher, - CA: 'static + WasmCacheAccess, -{ - pub(super) fn validate_port(&self, key: &Key) -> Result<()> { - let port_id = port_id(key)?; - match self.get_port_state_change(&port_id)? { - StateChange::Created => { - match self.authenticated_capability(&port_id) { - Ok(_) => Ok(()), - Err(e) => Err(Error::InvalidPort(format!( - "The port is not authenticated: ID {}, {}", - port_id, e - ))), - } - } - _ => Err(Error::InvalidPort(format!( - "The state change of the port is invalid: Port {}", - port_id - ))), - } - } - - fn get_port_state_change(&self, port_id: &PortId) -> Result { - let key = port_key(port_id); - self.get_state_change(&key) - .map_err(|e| Error::InvalidStateChange(e.to_string())) - } - - pub(super) fn validate_capability(&self, key: &Key) -> Result<()> { - if is_capability_index_key(key) { - if self.capability_index_pre()? < self.capability_index()? { - Ok(()) - } else { - Err(Error::InvalidPort( - "The capability index is invalid".to_owned(), - )) - } - } else { - match self - .get_state_change(key) - .map_err(|e| Error::InvalidStateChange(e.to_string()))? - { - StateChange::Created => { - let cap = capability(key)?; - let port_id = self.get_port_by_capability(&cap)?; - match self.lookup_module_by_port(&port_id) { - Ok((_, c)) if c == cap.into() => Ok(()), - Ok(_) => Err(Error::InvalidPort(format!( - "The port is invalid: ID {}", - port_id - ))), - Err(_) => Err(Error::NoCapability(format!( - "The capability is not mapped: Port {}", - port_id - ))), - } - } - _ => Err(Error::InvalidStateChange(format!( - "The state change of the capability is invalid: key {}", - key - ))), - } - } - } - - fn capability_index_pre(&self) -> Result { - let key = capability_index_key(); - self.read_counter_pre(&key) - .map_err(|e| Error::NoCapability(e.to_string())) - } - - fn capability_index(&self) -> Result { - let key = capability_index_key(); - self.read_counter(&key).map_err(|e| { - Error::InvalidPort(format!( - "The capability index doesn't exist: {}", - e - )) - }) - } - - fn get_port_by_capability(&self, cap: &Capability) -> Result { - let key = capability_key(cap.index()); - match self.ctx.read_bytes_post(&key) { - Ok(Some(value)) => { - let id = std::str::from_utf8(&value).map_err(|e| { - Error::InvalidPort(format!( - "Decoding the port ID failed: {}", - e - )) - })?; - PortId::from_str(id).map_err(|e| { - Error::InvalidPort(format!( - "Decoding the port ID failed: {}", - e - )) - }) - } - Ok(None) => Err(Error::InvalidPort( - "The capability is not mapped to any port".to_owned(), - )), - Err(e) => Err(Error::InvalidPort(format!( - "Reading the port failed {}", - e - ))), - } - } -} - -impl<'a, DB, H, CA> PortReader for Ibc<'a, DB, H, CA> -where - DB: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: 'static + StorageHasher, - CA: 'static + WasmCacheAccess, -{ - fn lookup_module_by_port( - &self, - port_id: &PortId, - ) -> Ics05Result<(ModuleId, PortCapability)> { - let key = port_key(port_id); - match self.ctx.read_bytes_post(&key) { - Ok(Some(value)) => { - let index: [u8; 8] = value - .try_into() - .map_err(|_| Ics05Error::implementation_specific())?; - let index = u64::from_be_bytes(index); - let module_id = ModuleId::new(MODULE_ID.into()) - .expect("Creating the module ID shouldn't fail"); - Ok((module_id, Capability::from(index).into())) - } - Ok(None) => Err(Ics05Error::unknown_port(port_id.clone())), - Err(_) => Err(Ics05Error::implementation_specific()), - } - } -} - -impl<'a, DB, H, CA> CapabilityReader for Ibc<'a, DB, H, CA> -where - DB: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: 'static + StorageHasher, - CA: 'static + WasmCacheAccess, -{ - fn get_capability(&self, name: &CapabilityName) -> Ics05Result { - let port_id = get_port_id(name)?; - let (_, capability) = self.lookup_module_by_port(&port_id)?; - Ok(capability.into()) - } - - fn authenticate_capability( - &self, - name: &CapabilityName, - capability: &Capability, - ) -> Ics05Result<()> { - // check if the capability can be read by the name and the port ID is - // read by the capability - if *capability == self.get_capability(name)? - && self - .get_port_by_capability(capability) - .map_err(|_| Ics05Error::implementation_specific())? - == get_port_id(name)? - { - Ok(()) - } else { - Err(Ics05Error::unknown_port(get_port_id(name)?)) - } - } -} - -fn get_port_id(name: &CapabilityName) -> Ics05Result { - match name.to_string().strip_prefix("ports/") { - Some(s) => PortId::from_str(s) - .map_err(|_| Ics05Error::implementation_specific()), - None => Err(Ics05Error::implementation_specific()), - } -} - -impl From for Error { - fn from(err: IbcStorageError) -> Self { - Self::IbcStorage(err) - } -} diff --git a/shared/src/ledger/ibc/vp/sequence.rs b/shared/src/ledger/ibc/vp/sequence.rs deleted file mode 100644 index dfefaab5b9a..00000000000 --- a/shared/src/ledger/ibc/vp/sequence.rs +++ /dev/null @@ -1,249 +0,0 @@ -//! IBC validity predicate for sequences - -use namada_core::ledger::ibc::actions::packet_from_message; -use thiserror::Error; - -use super::super::storage::{port_channel_id, Error as IbcStorageError}; -use super::Ibc; -use crate::ibc::core::ics04_channel::channel::Order; -use crate::ibc::core::ics04_channel::context::ChannelReader; -use crate::ibc::core::ics24_host::identifier::PortChannelId; -use crate::ledger::storage::{self as ledger_storage, StorageHasher}; -use crate::types::ibc::data::{Error as IbcDataError, IbcMessage}; -use crate::types::storage::Key; -use crate::vm::WasmCacheAccess; - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum Error { - #[error("Key error: {0}")] - InvalidKey(String), - #[error("Channel error: {0}")] - InvalidChannel(String), - #[error("Sequence error: {0}")] - InvalidSequence(String), - #[error("Packet error: {0}")] - InvalidPacket(String), - #[error("Decoding TX data error: {0}")] - DecodingTxData(std::io::Error), - #[error("IBC data error: {0}")] - InvalidIbcData(IbcDataError), - #[error("IBC storage error: {0}")] - IbcStorage(IbcStorageError), -} - -/// IBC packet functions result -pub type Result = std::result::Result; - -impl<'a, DB, H, CA> Ibc<'a, DB, H, CA> -where - DB: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: 'static + StorageHasher, - CA: 'static + WasmCacheAccess, -{ - pub(super) fn validate_sequence_send( - &self, - key: &Key, - tx_data: &[u8], - ) -> Result<()> { - let port_channel_id = port_channel_id(key)?; - let ibc_msg = IbcMessage::decode(tx_data)?; - let msg = ibc_msg.msg_transfer()?; - // make a packet - let channel = self - .channel_end(&( - port_channel_id.port_id.clone(), - port_channel_id.channel_id, - )) - .map_err(|e| Error::InvalidChannel(e.to_string()))?; - let next_seq_pre = self - .get_next_sequence_send_pre(&port_channel_id) - .map_err(|e| Error::InvalidSequence(e.to_string()))?; - let packet = - packet_from_message(&msg, next_seq_pre, channel.counterparty()); - let next_seq = self - .get_next_sequence_send(&( - port_channel_id.port_id.clone(), - port_channel_id.channel_id, - )) - .map_err(|_| { - Error::InvalidSequence( - "The nextSequenceSend doesn't exit".to_owned(), - ) - })?; - if u64::from(next_seq_pre) + 1 != u64::from(next_seq) { - return Err(Error::InvalidSequence( - "The nextSequenceSend is invalid".to_owned(), - )); - } - // when the ordered channel, the sequence number should be equal to - // nextSequenceSend - if self.is_ordered_channel(&port_channel_id)? - && packet.sequence != next_seq_pre - { - return Err(Error::InvalidPacket( - "The packet sequence is invalid".to_owned(), - )); - } - // The commitment should have been stored - let commitment_key = ( - port_channel_id.port_id, - port_channel_id.channel_id, - packet.sequence, - ); - self.get_packet_commitment(&commitment_key).map_err(|_| { - Error::InvalidSequence(format!( - "The commitement doesn't exist: Port/Channel {}/{}, Sequence \ - {}", - commitment_key.0, commitment_key.1, commitment_key.2, - )) - })?; - Ok(()) - } - - pub(super) fn validate_sequence_recv( - &self, - key: &Key, - tx_data: &[u8], - ) -> Result<()> { - let port_channel_id = port_channel_id(key)?; - let ibc_msg = IbcMessage::decode(tx_data)?; - let msg = ibc_msg.msg_recv_packet()?; - let packet = &msg.packet; - let next_seq_pre = self - .get_next_sequence_recv_pre(&port_channel_id) - .map_err(|e| Error::InvalidSequence(e.to_string()))?; - let next_seq = self - .get_next_sequence_recv(&( - port_channel_id.port_id.clone(), - port_channel_id.channel_id, - )) - .map_err(|_| { - Error::InvalidSequence( - "The nextSequenceRecv doesn't exist".to_owned(), - ) - })?; - if u64::from(next_seq_pre) + 1 != u64::from(next_seq) { - return Err(Error::InvalidSequence( - "The nextSequenceRecv is invalid".to_owned(), - )); - } - // when the ordered channel, the sequence number should be equal to - // nextSequenceRecv - if self.is_ordered_channel(&port_channel_id)? - && packet.sequence != next_seq_pre - { - return Err(Error::InvalidPacket( - "The packet sequence is invalid".to_owned(), - )); - } - // The receipt and the receipt should have been stored - let key = ( - port_channel_id.port_id, - port_channel_id.channel_id, - packet.sequence, - ); - self.get_packet_receipt(&key).map_err(|_| { - Error::InvalidSequence(format!( - "The receipt doesn't exist: Port/Channel {}/{}, Sequence {}", - key.0, key.1, key.2, - )) - })?; - self.get_packet_acknowledgement(&key).map_err(|_| { - Error::InvalidSequence(format!( - "The acknowledgment doesn't exist: Port/Channel {}/{}, \ - Sequence {}", - key.0, key.1, key.2, - )) - })?; - Ok(()) - } - - pub(super) fn validate_sequence_ack( - &self, - key: &Key, - tx_data: &[u8], - ) -> Result<()> { - let port_channel_id = port_channel_id(key)?; - let ibc_msg = IbcMessage::decode(tx_data)?; - let msg = ibc_msg.msg_acknowledgement()?; - let packet = &msg.packet; - let next_seq_pre = self - .get_next_sequence_ack_pre(&port_channel_id) - .map_err(|e| Error::InvalidSequence(e.to_string()))?; - let next_seq = self - .get_next_sequence_ack(&( - port_channel_id.port_id.clone(), - port_channel_id.channel_id, - )) - .map_err(|_| { - Error::InvalidSequence( - "The nextSequenceAck doesn't exist".to_owned(), - ) - })?; - if u64::from(next_seq_pre) + 1 != u64::from(next_seq) { - return Err(Error::InvalidSequence( - "The sequence number is invalid".to_owned(), - )); - } - // when the ordered channel, the sequence number should be equal to - // nextSequenceAck - if self.is_ordered_channel(&port_channel_id)? - && packet.sequence != next_seq_pre - { - return Err(Error::InvalidPacket( - "The packet sequence is invalid".to_owned(), - )); - } - // The commitment should have been deleted - let commitment_key = ( - port_channel_id.port_id, - port_channel_id.channel_id, - packet.sequence, - ); - if self.get_packet_commitment(&commitment_key).is_ok() { - return Err(Error::InvalidSequence(format!( - "The commitement hasn't been deleted yet: Port/Channel {}/{}, \ - Sequence {}", - commitment_key.0, commitment_key.1, commitment_key.2, - ))); - } - Ok(()) - } - - pub(super) fn is_ordered_channel( - &self, - port_channel_id: &PortChannelId, - ) -> Result { - let channel = self - .channel_end(&( - port_channel_id.port_id.clone(), - port_channel_id.channel_id, - )) - .map_err(|_| { - Error::InvalidChannel(format!( - "The channel doesn't exist: Port/Channel {}", - port_channel_id - )) - })?; - Ok(channel.order_matches(&Order::Ordered)) - } -} - -impl From for Error { - fn from(err: IbcStorageError) -> Self { - Self::IbcStorage(err) - } -} - -impl From for Error { - fn from(err: IbcDataError) -> Self { - Self::InvalidIbcData(err) - } -} - -impl From for Error { - fn from(err: std::io::Error) -> Self { - Self::DecodingTxData(err) - } -} diff --git a/shared/src/ledger/ibc/vp/token.rs b/shared/src/ledger/ibc/vp/token.rs index 455a0a49d5b..b735ababe6b 100644 --- a/shared/src/ledger/ibc/vp/token.rs +++ b/shared/src/ledger/ibc/vp/token.rs @@ -1,25 +1,30 @@ //! IBC token transfer validation as a native validity predicate use std::collections::{BTreeSet, HashMap, HashSet}; -use std::str::FromStr; use borsh::BorshDeserialize; +use prost::Message; use thiserror::Error; -use crate::ibc::applications::ics20_fungible_token_transfer::msgs::transfer::MsgTransfer; +use crate::ibc::applications::transfer::coin::PrefixedCoin; +use crate::ibc::applications::transfer::error::TokenTransferError; +use crate::ibc::applications::transfer::msgs::transfer::{ + MsgTransfer, TYPE_URL as MSG_TRANSFER_TYPE_URL, +}; +use crate::ibc::applications::transfer::packet::PacketData; +use crate::ibc::applications::transfer::{ + is_receiver_chain_source, is_sender_chain_source, +}; use crate::ibc::core::ics04_channel::msgs::PacketMsg; use crate::ibc::core::ics04_channel::packet::Packet; -use crate::ibc::core::ics26_routing::msgs::Ics26Envelope; +use crate::ibc::core::ics26_routing::error::RouterError; +use crate::ibc::core::ics26_routing::msgs::MsgEnvelope; +use crate::ibc_proto::google::protobuf::Any; use crate::ledger::ibc::storage as ibc_storage; use crate::ledger::native_vp::{self, Ctx, NativeVp, VpEnv}; use crate::ledger::storage::{self as ledger_storage, StorageHasher}; use crate::proto::SignedTxData; -use crate::types::address::{ - Address, DecodeError as AddressError, InternalAddress, -}; -use crate::types::ibc::data::{ - Error as IbcDataError, FungibleTokenPacketData, IbcMessage, -}; +use crate::types::address::{Address, InternalAddress}; use crate::types::storage::Key; use crate::types::token::{self, Amount, AmountParseError}; use crate::vm::WasmCacheAccess; @@ -30,31 +35,33 @@ pub enum Error { #[error("Native VP error: {0}")] NativeVpError(native_vp::Error), #[error("IBC message error: {0}")] - IbcMessage(IbcDataError), - #[error("Invalid message error")] + IbcMessage(RouterError), + #[error("Invalid message")] InvalidMessage, - #[error("Invalid address error: {0}")] - Address(AddressError), - #[error("Token error")] - NoToken, #[error("Parsing amount error: {0}")] Amount(AmountParseError), #[error("Decoding error: {0}")] Decoding(std::io::Error), + #[error("Decoding IBC data error: {0}")] + DecodingIbcData(prost::DecodeError), #[error("Decoding PacketData error: {0}")] DecodingPacketData(serde_json::Error), - #[error("Invalid token transfer error: {0}")] - TokenTransfer(String), #[error("IBC message is required as transaction data")] NoTxData, - #[error("Invalid denom error: {0}")] + #[error("Invalid denom: {0}")] Denom(String), + #[error("Invalid MsgTransfer: {0}")] + MsgTransfer(TokenTransferError), + #[error("Invalid token transfer: {0}")] + TokenTransfer(String), } /// Result for IBC token VP pub type Result = std::result::Result; -/// IBC token native VP for IBC token transfer +/// IBC token VP to validate the transfer for an IBC-specific account. The +/// account is a sub-prefixed account with an IBC token hash, or a normal +/// account for `IbcEscrow`, `IbcBurn`, or `IbcMint`. pub struct IbcToken<'a, DB, H, CA> where DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, @@ -90,14 +97,11 @@ where .iter() .filter(|k| { matches!( - token::is_any_multitoken_balance_key(k), - Some(( - _, - Address::Internal( - InternalAddress::IbcEscrow - | InternalAddress::IbcBurn - | InternalAddress::IbcMint - ) + token::is_any_token_balance_key(k), + Some(Address::Internal( + InternalAddress::IbcEscrow + | InternalAddress::IbcBurn + | InternalAddress::IbcMint )) ) }) @@ -139,22 +143,33 @@ where } // Check the message - let ibc_msg = IbcMessage::decode(tx_data).map_err(Error::IbcMessage)?; - match &ibc_msg.0 { - Ics26Envelope::Ics20Msg(msg) => self.validate_sending_token(msg), - Ics26Envelope::Ics4PacketMsg(PacketMsg::RecvPacket(msg)) => { - self.validate_receiving_token(&msg.packet) - } - Ics26Envelope::Ics4PacketMsg(PacketMsg::AckPacket(msg)) => { - self.validate_refunding_token(&msg.packet) + let ibc_msg = + Any::decode(&tx_data[..]).map_err(Error::DecodingIbcData)?; + match ibc_msg.type_url.as_str() { + MSG_TRANSFER_TYPE_URL => { + let msg = MsgTransfer::try_from(ibc_msg) + .map_err(Error::MsgTransfer)?; + self.validate_sending_token(&msg) } - Ics26Envelope::Ics4PacketMsg(PacketMsg::ToPacket(msg)) => { - self.validate_refunding_token(&msg.packet) - } - Ics26Envelope::Ics4PacketMsg(PacketMsg::ToClosePacket(msg)) => { - self.validate_refunding_token(&msg.packet) + _ => { + let envelope: MsgEnvelope = + ibc_msg.try_into().map_err(Error::IbcMessage)?; + match envelope { + MsgEnvelope::Packet(PacketMsg::Recv(msg)) => { + self.validate_receiving_token(&msg.packet) + } + MsgEnvelope::Packet(PacketMsg::Ack(msg)) => { + self.validate_refunding_token(&msg.packet) + } + MsgEnvelope::Packet(PacketMsg::Timeout(msg)) => { + self.validate_refunding_token(&msg.packet) + } + MsgEnvelope::Packet(PacketMsg::TimeoutOnClose(msg)) => { + self.validate_refunding_token(&msg.packet) + } + _ => Err(Error::InvalidMessage), + } } - _ => Err(Error::InvalidMessage), } } } @@ -166,15 +181,21 @@ where CA: 'static + WasmCacheAccess, { fn validate_sending_token(&self, msg: &MsgTransfer) -> Result { - let mut data = FungibleTokenPacketData::from(msg.clone()); + let mut coin = msg.token.clone(); + // lookup the original denom with the IBC token hash if let Some(token_hash) = - ibc_storage::token_hash_from_denom(&data.denom).map_err(|e| { + ibc_storage::token_hash_from_denom(&coin.denom).map_err(|e| { Error::Denom(format!("Invalid denom: error {}", e)) })? { let denom_key = ibc_storage::ibc_denom_key(token_hash); - let denom_bytes = match self.ctx.read_bytes_pre(&denom_key) { - Ok(Some(v)) => v, + coin.denom = match self.ctx.read_bytes_pre(&denom_key) { + Ok(Some(v)) => String::from_utf8(v).map_err(|e| { + Error::Denom(format!( + "Decoding the denom string failed: {}", + e + )) + })?, _ => { return Err(Error::Denom(format!( "No original denom: denom_key {}", @@ -182,73 +203,22 @@ where ))); } }; - let denom = std::str::from_utf8(&denom_bytes).map_err(|e| { - Error::Denom(format!( - "Decoding the denom failed: denom_key {}, error {}", - denom_key, e - )) - })?; - data.denom = denom.to_string(); } - let token = ibc_storage::token(&data.denom) + let coin = PrefixedCoin::try_from(coin).map_err(Error::MsgTransfer)?; + let token = ibc_storage::token(coin.denom.to_string()) .map_err(|e| Error::Denom(e.to_string()))?; - let amount = Amount::from_str(&data.amount).map_err(Error::Amount)?; - - let denom = if let Some(denom) = data - .denom - .strip_prefix(&format!("{}/", ibc_storage::MULTITOKEN_STORAGE_KEY)) - { - let denom_key = ibc_storage::ibc_denom_key(denom); - match self.ctx.read_bytes_pre(&denom_key)? { - Some(v) => std::str::from_utf8(&v) - .map_err(|e| { - Error::TokenTransfer(format!( - "Decoding the denom failed: denom_key {}, error {}", - denom_key, e - )) - })? - .to_string(), - None => { - return Err(Error::TokenTransfer(format!( - "No original denom: denom_key {}", - denom_key - ))); - } - } - } else { - data.denom.clone() - }; + let amount = Amount::try_from(coin.amount).map_err(Error::Amount)?; // check the denomination field - let prefix = format!( - "{}/{}/", - msg.source_port.clone(), - msg.source_channel.clone() - ); - let key_prefix = ibc_storage::ibc_account_prefix( - &msg.source_port, - &msg.source_channel, - &token, - ); - - let change = if denom.starts_with(&prefix) { - // sink zone - // check the amount of the token has been burned - let target_key = token::multitoken_balance_key( - &key_prefix, - &Address::Internal(InternalAddress::IbcBurn), - ); - let post = try_decode_token_amount( - self.ctx.read_bytes_temp(&target_key)?, - )? - .unwrap_or_default(); - // the previous balance of the burn address should be zero - post.change() - } else { + let change = if is_sender_chain_source( + msg.port_id_on_a.clone(), + msg.chan_id_on_a.clone(), + &coin.denom, + ) { // source zone // check the amount of the token has been escrowed - let target_key = token::multitoken_balance_key( - &key_prefix, + let target_key = token::balance_key( + &token, &Address::Internal(InternalAddress::IbcEscrow), ); let pre = @@ -259,41 +229,48 @@ where )? .unwrap_or_default(); post.change() - pre.change() + } else { + // sink zone + // check the amount of the token has been burned + let target_key = token::balance_key( + &token, + &Address::Internal(InternalAddress::IbcBurn), + ); + let post = try_decode_token_amount( + self.ctx.read_bytes_temp(&target_key)?, + )? + .unwrap_or_default(); + // the previous balance of the burn address should be zero + post.change() }; if change == amount.change() { Ok(true) } else { Err(Error::TokenTransfer(format!( - "Sending the token is invalid: {}", - data + "Sending the token is invalid: coin {}", + coin, ))) } } fn validate_receiving_token(&self, packet: &Packet) -> Result { - let data: FungibleTokenPacketData = - serde_json::from_slice(&packet.data) - .map_err(Error::DecodingPacketData)?; - let token = ibc_storage::token(&data.denom) + let data = serde_json::from_slice::(&packet.data) + .map_err(Error::DecodingPacketData)?; + let token = ibc_storage::token(data.token.denom.to_string()) .map_err(|e| Error::Denom(e.to_string()))?; - let amount = Amount::from_str(&data.amount).map_err(Error::Amount)?; + let amount = + Amount::try_from(data.token.amount).map_err(Error::Amount)?; - let prefix = format!( - "{}/{}/", - packet.source_port.clone(), - packet.source_channel.clone() - ); - let key_prefix = ibc_storage::ibc_account_prefix( - &packet.destination_port, - &packet.destination_channel, - &token, - ); - let change = if data.denom.starts_with(&prefix) { + let change = if is_receiver_chain_source( + packet.port_id_on_a.clone(), + packet.chan_id_on_a.clone(), + &data.token.denom, + ) { // this chain is the source // check the amount of the token has been unescrowed - let source_key = token::multitoken_balance_key( - &key_prefix, + let source_key = token::balance_key( + &token, &Address::Internal(InternalAddress::IbcEscrow), ); let pre = @@ -307,8 +284,8 @@ where } else { // the sender is the source // check the amount of the token has been minted - let source_key = token::multitoken_balance_key( - &key_prefix, + let source_key = token::balance_key( + &token, &Address::Internal(InternalAddress::IbcMint), ); let post = try_decode_token_amount( @@ -323,47 +300,29 @@ where Ok(true) } else { Err(Error::TokenTransfer(format!( - "Receivinging the token is invalid: {}", - data + "Receivinging the token is invalid: coin {}", + data.token ))) } } fn validate_refunding_token(&self, packet: &Packet) -> Result { - let data: FungibleTokenPacketData = - serde_json::from_slice(&packet.data) - .map_err(Error::DecodingPacketData)?; - let token_str = data.denom.split('/').last().ok_or(Error::NoToken)?; - let token = Address::decode(token_str).map_err(Error::Address)?; - let amount = Amount::from_str(&data.amount).map_err(Error::Amount)?; + let data = serde_json::from_slice::(&packet.data) + .map_err(Error::DecodingPacketData)?; + let token = ibc_storage::token(data.token.denom.to_string()) + .map_err(|e| Error::Denom(e.to_string()))?; + let amount = + Amount::try_from(data.token.amount).map_err(Error::Amount)?; // check the denom field - let prefix = format!( - "{}/{}/", - packet.source_port.clone(), - packet.source_channel.clone() - ); - let key_prefix = ibc_storage::ibc_account_prefix( - &packet.source_port, - &packet.source_channel, - &token, - ); - let change = if data.denom.starts_with(&prefix) { - // sink zone: mint the token for the refund - let source_key = token::multitoken_balance_key( - &key_prefix, - &Address::Internal(InternalAddress::IbcMint), - ); - let post = try_decode_token_amount( - self.ctx.read_bytes_temp(&source_key)?, - )? - .unwrap_or_default(); - // the previous balance of the mint address should be the maximum - Amount::max().change() - post.change() - } else { + let change = if is_sender_chain_source( + packet.port_id_on_a.clone(), + packet.chan_id_on_a.clone(), + &data.token.denom, + ) { // source zone: unescrow the token for the refund - let source_key = token::multitoken_balance_key( - &key_prefix, + let source_key = token::balance_key( + &token, &Address::Internal(InternalAddress::IbcEscrow), ); let pre = @@ -374,14 +333,26 @@ where )? .unwrap_or_default(); pre.change() - post.change() + } else { + // sink zone: mint the token for the refund + let source_key = token::balance_key( + &token, + &Address::Internal(InternalAddress::IbcMint), + ); + let post = try_decode_token_amount( + self.ctx.read_bytes_temp(&source_key)?, + )? + .unwrap_or_default(); + // the previous balance of the mint address should be the maximum + Amount::max().change() - post.change() }; if change == amount.change() { Ok(true) } else { Err(Error::TokenTransfer(format!( - "Refunding the token is invalid: {}", - data, + "Refunding the token is invalid: coin {}", + data.token, ))) } } diff --git a/shared/src/ledger/masp.rs b/shared/src/ledger/masp.rs index 10c63db4cb2..2c43028333d 100644 --- a/shared/src/ledger/masp.rs +++ b/shared/src/ledger/masp.rs @@ -1,22 +1,71 @@ //! MASP verification wrappers. +use std::collections::hash_map::Entry; +use std::collections::{BTreeMap, HashMap, HashSet}; use std::env; +use std::fmt::Debug; use std::fs::File; +#[cfg(feature = "masp-tx-gen")] use std::ops::Deref; use std::path::PathBuf; +use async_trait::async_trait; use bellman::groth16::{prepare_verifying_key, PreparedVerifyingKey}; use bls12_381::Bls12; +// use async_std::io::prelude::WriteExt; +// use async_std::io::{self}; +use borsh::{BorshDeserialize, BorshSerialize}; +use itertools::Either; use masp_primitives::asset_type::AssetType; use masp_primitives::consensus::BranchId::Sapling; +use masp_primitives::consensus::{BranchId, TestNetwork}; +use masp_primitives::convert::AllowedConversion; +use masp_primitives::ff::PrimeField; +use masp_primitives::group::cofactor::CofactorGroup; +use masp_primitives::keys::FullViewingKey; +#[cfg(feature = "masp-tx-gen")] +use masp_primitives::legacy::TransparentAddress; +use masp_primitives::merkle_tree::{ + CommitmentTree, IncrementalWitness, MerklePath, +}; +use masp_primitives::note_encryption::*; +use masp_primitives::primitives::{Diversifier, Note, ViewingKey}; use masp_primitives::redjubjub::PublicKey; +use masp_primitives::sapling::Node; +#[cfg(feature = "masp-tx-gen")] +use masp_primitives::transaction::builder::{self, secp256k1, *}; use masp_primitives::transaction::components::{ - ConvertDescription, OutputDescription, SpendDescription, + Amount, ConvertDescription, OutputDescription, SpendDescription, }; +#[cfg(feature = "masp-tx-gen")] +use masp_primitives::transaction::components::{OutPoint, TxOut}; use masp_primitives::transaction::{ signature_hash_data, Transaction, SIGHASH_ALL, }; +use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; +use masp_proofs::prover::LocalTxProver; use masp_proofs::sapling::SaplingVerificationContext; +use namada_core::types::transaction::AffineCurve; +#[cfg(feature = "masp-tx-gen")] +use rand_core::{CryptoRng, OsRng, RngCore}; +#[cfg(feature = "masp-tx-gen")] +use sha2::Digest; + +use crate::ledger::queries::Client; +use crate::ledger::{args, rpc}; +use crate::proto::{SignedTxData, Tx}; +use crate::tendermint_rpc::query::Query; +use crate::tendermint_rpc::Order; +use crate::types::address::{masp, Address}; +use crate::types::masp::{BalanceOwner, ExtendedViewingKey, PaymentAddress}; +use crate::types::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; +use crate::types::token; +use crate::types::token::{ + Transfer, HEAD_TX_KEY, PIN_KEY_PREFIX, TX_KEY_PREFIX, +}; +use crate::types::transaction::{ + process_tx, DecryptedTx, EllipticCurve, PairingEngine, TxType, WrapperTx, +}; /// Env var to point to a dir with MASP parameters. When not specified, /// the default OS specific path is used. @@ -200,3 +249,1218 @@ pub fn get_params_dir() -> PathBuf { masp_proofs::default_params_folder().unwrap() } } + +/// Abstracts platform specific details away from the logic of shielded pool +/// operations. +#[async_trait] +pub trait ShieldedUtils: + Sized + BorshDeserialize + BorshSerialize + Default + Clone +{ + /// The type of the Tendermint client to make queries with + type C: crate::ledger::queries::Client + std::marker::Sync; + + /// Get a MASP transaction prover + fn local_tx_prover(&self) -> LocalTxProver; + + /// Load up the currently saved ShieldedContext + fn load(self) -> std::io::Result>; + + /// Sace the given ShieldedContext for future loads + fn save(&self, ctx: &ShieldedContext) -> std::io::Result<()>; +} + +/// Make a ViewingKey that can view notes encrypted by given ExtendedSpendingKey +pub fn to_viewing_key(esk: &ExtendedSpendingKey) -> FullViewingKey { + ExtendedFullViewingKey::from(esk).fvk +} + +/// Generate a valid diversifier, i.e. one that has a diversified base. Return +/// also this diversified base. +#[cfg(feature = "masp-tx-gen")] +pub fn find_valid_diversifier( + rng: &mut R, +) -> (Diversifier, masp_primitives::jubjub::SubgroupPoint) { + let mut diversifier; + let g_d; + // Keep generating random diversifiers until one has a diversified base + loop { + let mut d = [0; 11]; + rng.fill_bytes(&mut d); + diversifier = Diversifier(d); + if let Some(val) = diversifier.g_d() { + g_d = val; + break; + } + } + (diversifier, g_d) +} + +/// Determine if using the current note would actually bring us closer to our +/// target +pub fn is_amount_required(src: Amount, dest: Amount, delta: Amount) -> bool { + if delta > Amount::zero() { + let gap = dest - src; + for (asset_type, value) in gap.components() { + if *value > 0 && delta[asset_type] > 0 { + return true; + } + } + } + false +} + +/// An extension of Option's cloned method for pair types +fn cloned_pair((a, b): (&T, &U)) -> (T, U) { + (a.clone(), b.clone()) +} + +/// Errors that can occur when trying to retrieve pinned transaction +#[derive(PartialEq, Eq)] +pub enum PinnedBalanceError { + /// No transaction has yet been pinned to the given payment address + NoTransactionPinned, + /// The supplied viewing key does not recognize payments to given address + InvalidViewingKey, +} + +/// Represents the amount used of different conversions +pub type Conversions = + HashMap, i64)>; + +/// Represents the changes that were made to a list of transparent accounts +pub type TransferDelta = HashMap>; + +/// Represents the changes that were made to a list of shielded accounts +pub type TransactionDelta = HashMap; + +/// Represents the current state of the shielded pool from the perspective of +/// the chosen viewing keys. +#[derive(BorshSerialize, BorshDeserialize, Debug)] +pub struct ShieldedContext { + /// Location where this shielded context is saved + #[borsh_skip] + pub utils: U, + /// The last transaction index to be processed in this context + pub last_txidx: u64, + /// The commitment tree produced by scanning all transactions up to tx_pos + pub tree: CommitmentTree, + /// Maps viewing keys to applicable note positions + pub pos_map: HashMap>, + /// Maps a nullifier to the note position to which it applies + pub nf_map: HashMap<[u8; 32], usize>, + /// Maps note positions to their corresponding notes + pub note_map: HashMap, + /// Maps note positions to their corresponding memos + pub memo_map: HashMap, + /// Maps note positions to the diversifier of their payment address + pub div_map: HashMap, + /// Maps note positions to their witness (used to make merkle paths) + pub witness_map: HashMap>, + /// Tracks what each transaction does to various account balances + pub delta_map: BTreeMap< + (BlockHeight, TxIndex), + (Epoch, TransferDelta, TransactionDelta), + >, + /// The set of note positions that have been spent + pub spents: HashSet, + /// Maps asset types to their decodings + pub asset_types: HashMap, + /// Maps note positions to their corresponding viewing keys + pub vk_map: HashMap, +} + +/// Default implementation to ease construction of TxContexts. Derive cannot be +/// used here due to CommitmentTree not implementing Default. +impl Default for ShieldedContext { + fn default() -> ShieldedContext { + ShieldedContext:: { + utils: U::default(), + last_txidx: u64::default(), + tree: CommitmentTree::empty(), + pos_map: HashMap::default(), + nf_map: HashMap::default(), + note_map: HashMap::default(), + memo_map: HashMap::default(), + div_map: HashMap::default(), + witness_map: HashMap::default(), + spents: HashSet::default(), + delta_map: BTreeMap::default(), + asset_types: HashMap::default(), + vk_map: HashMap::default(), + } + } +} + +impl ShieldedContext { + /// Try to load the last saved shielded context from the given context + /// directory. If this fails, then leave the current context unchanged. + pub fn load(&mut self) -> std::io::Result<()> { + let new_ctx = self.utils.clone().load()?; + *self = new_ctx; + Ok(()) + } + + /// Save this shielded context into its associated context directory + pub fn save(&self) -> std::io::Result<()> { + self.utils.save(self) + } + + /// Merge data from the given shielded context into the current shielded + /// context. It must be the case that the two shielded contexts share the + /// same last transaction ID and share identical commitment trees. + pub fn merge(&mut self, new_ctx: ShieldedContext) { + debug_assert_eq!(self.last_txidx, new_ctx.last_txidx); + // Merge by simply extending maps. Identical keys should contain + // identical values, so overwriting should not be problematic. + self.pos_map.extend(new_ctx.pos_map); + self.nf_map.extend(new_ctx.nf_map); + self.note_map.extend(new_ctx.note_map); + self.memo_map.extend(new_ctx.memo_map); + self.div_map.extend(new_ctx.div_map); + self.witness_map.extend(new_ctx.witness_map); + self.spents.extend(new_ctx.spents); + self.asset_types.extend(new_ctx.asset_types); + self.vk_map.extend(new_ctx.vk_map); + // The deltas are the exception because different keys can reveal + // different parts of the same transaction. Hence each delta needs to be + // merged separately. + for ((height, idx), (ep, ntfer_delta, ntx_delta)) in new_ctx.delta_map { + let (_ep, tfer_delta, tx_delta) = self + .delta_map + .entry((height, idx)) + .or_insert((ep, TransferDelta::new(), TransactionDelta::new())); + tfer_delta.extend(ntfer_delta); + tx_delta.extend(ntx_delta); + } + } + + /// Fetch the current state of the multi-asset shielded pool into a + /// ShieldedContext + pub async fn fetch( + &mut self, + client: &U::C, + sks: &[ExtendedSpendingKey], + fvks: &[ViewingKey], + ) { + // First determine which of the keys requested to be fetched are new. + // Necessary because old transactions will need to be scanned for new + // keys. + let mut unknown_keys = Vec::new(); + for esk in sks { + let vk = to_viewing_key(esk).vk; + if !self.pos_map.contains_key(&vk) { + unknown_keys.push(vk); + } + } + for vk in fvks { + if !self.pos_map.contains_key(vk) { + unknown_keys.push(*vk); + } + } + + // If unknown keys are being used, we need to scan older transactions + // for any unspent notes + let (txs, mut tx_iter); + if !unknown_keys.is_empty() { + // Load all transactions accepted until this point + txs = Self::fetch_shielded_transfers(client, 0).await; + tx_iter = txs.iter(); + // Do this by constructing a shielding context only for unknown keys + let mut tx_ctx = Self { + utils: self.utils.clone(), + ..Default::default() + }; + for vk in unknown_keys { + tx_ctx.pos_map.entry(vk).or_insert_with(HashSet::new); + } + // Update this unknown shielded context until it is level with self + while tx_ctx.last_txidx != self.last_txidx { + if let Some(((height, idx), (epoch, tx))) = tx_iter.next() { + tx_ctx.scan_tx(*height, *idx, *epoch, tx); + } else { + break; + } + } + // Merge the context data originating from the unknown keys into the + // current context + self.merge(tx_ctx); + } else { + // Load only transactions accepted from last_txid until this point + txs = Self::fetch_shielded_transfers(client, self.last_txidx).await; + tx_iter = txs.iter(); + } + // Now that we possess the unspent notes corresponding to both old and + // new keys up until tx_pos, proceed to scan the new transactions. + for ((height, idx), (epoch, tx)) in &mut tx_iter { + self.scan_tx(*height, *idx, *epoch, tx); + } + } + + /// Obtain a chronologically-ordered list of all accepted shielded + /// transactions from the ledger. The ledger conceptually stores + /// transactions as a vector. More concretely, the HEAD_TX_KEY location + /// stores the index of the last accepted transaction and each transaction + /// is stored at a key derived from its index. + pub async fn fetch_shielded_transfers( + client: &U::C, + last_txidx: u64, + ) -> BTreeMap<(BlockHeight, TxIndex), (Epoch, Transfer)> { + // The address of the MASP account + let masp_addr = masp(); + // Construct the key where last transaction pointer is stored + let head_tx_key = Key::from(masp_addr.to_db_key()) + .push(&HEAD_TX_KEY.to_owned()) + .expect("Cannot obtain a storage key"); + // Query for the index of the last accepted transaction + let head_txidx = + rpc::query_storage_value::(client, &head_tx_key) + .await + .unwrap_or(0); + let mut shielded_txs = BTreeMap::new(); + // Fetch all the transactions we do not have yet + for i in last_txidx..head_txidx { + // Construct the key for where the current transaction is stored + let current_tx_key = Key::from(masp_addr.to_db_key()) + .push(&(TX_KEY_PREFIX.to_owned() + &i.to_string())) + .expect("Cannot obtain a storage key"); + // Obtain the current transaction + let (tx_epoch, tx_height, tx_index, current_tx) = + rpc::query_storage_value::< + U::C, + (Epoch, BlockHeight, TxIndex, Transfer), + >(client, ¤t_tx_key) + .await + .unwrap(); + // Collect the current transaction + shielded_txs.insert((tx_height, tx_index), (tx_epoch, current_tx)); + } + shielded_txs + } + + /// Applies the given transaction to the supplied context. More precisely, + /// the shielded transaction's outputs are added to the commitment tree. + /// Newly discovered notes are associated to the supplied viewing keys. Note + /// nullifiers are mapped to their originating notes. Note positions are + /// associated to notes, memos, and diversifiers. And the set of notes that + /// we have spent are updated. The witness map is maintained to make it + /// easier to construct note merkle paths in other code. See + /// https://zips.z.cash/protocol/protocol.pdf#scan + pub fn scan_tx( + &mut self, + height: BlockHeight, + index: TxIndex, + epoch: Epoch, + tx: &Transfer, + ) { + // Ignore purely transparent transactions + let shielded = if let Some(shielded) = &tx.shielded { + shielded + } else { + return; + }; + // For tracking the account changes caused by this Transaction + let mut transaction_delta = TransactionDelta::new(); + // Listen for notes sent to our viewing keys + for so in &shielded.shielded_outputs { + // Create merkle tree leaf node from note commitment + let node = Node::new(so.cmu.to_repr()); + // Update each merkle tree in the witness map with the latest + // addition + for (_, witness) in self.witness_map.iter_mut() { + witness.append(node).expect("note commitment tree is full"); + } + let note_pos = self.tree.size(); + self.tree + .append(node) + .expect("note commitment tree is full"); + // Finally, make it easier to construct merkle paths to this new + // note + let witness = IncrementalWitness::::from_tree(&self.tree); + self.witness_map.insert(note_pos, witness); + // Let's try to see if any of our viewing keys can decrypt latest + // note + for (vk, notes) in self.pos_map.iter_mut() { + let decres = try_sapling_note_decryption::( + 0, + &vk.ivk().0, + &so.ephemeral_key.into_subgroup().unwrap(), + &so.cmu, + &so.enc_ciphertext, + ); + // So this current viewing key does decrypt this current note... + if let Some((note, pa, memo)) = decres { + // Add this note to list of notes decrypted by this viewing + // key + notes.insert(note_pos); + // Compute the nullifier now to quickly recognize when spent + let nf = note.nf(vk, note_pos.try_into().unwrap()); + self.note_map.insert(note_pos, note); + self.memo_map.insert(note_pos, memo); + // The payment address' diversifier is required to spend + // note + self.div_map.insert(note_pos, *pa.diversifier()); + self.nf_map.insert(nf.0, note_pos); + // Note the account changes + let balance = transaction_delta + .entry(*vk) + .or_insert_with(Amount::zero); + *balance += + Amount::from_nonnegative(note.asset_type, note.value) + .expect( + "found note with invalid value or asset type", + ); + self.vk_map.insert(note_pos, *vk); + break; + } + } + } + // Cancel out those of our notes that have been spent + for ss in &shielded.shielded_spends { + // If the shielded spend's nullifier is in our map, then target note + // is rendered unusable + if let Some(note_pos) = self.nf_map.get(&ss.nullifier) { + self.spents.insert(*note_pos); + // Note the account changes + let balance = transaction_delta + .entry(self.vk_map[note_pos]) + .or_insert_with(Amount::zero); + let note = self.note_map[note_pos]; + *balance -= + Amount::from_nonnegative(note.asset_type, note.value) + .expect("found note with invalid value or asset type"); + } + } + // Record the changes to the transparent accounts + let transparent_delta = + Amount::from_nonnegative(tx.token.clone(), u64::from(tx.amount)) + .expect("invalid value for amount"); + let mut transfer_delta = TransferDelta::new(); + transfer_delta + .insert(tx.source.clone(), Amount::zero() - &transparent_delta); + transfer_delta.insert(tx.target.clone(), transparent_delta); + self.delta_map.insert( + (height, index), + (epoch, transfer_delta, transaction_delta), + ); + self.last_txidx += 1; + } + + /// Summarize the effects on shielded and transparent accounts of each + /// Transfer in this context + pub fn get_tx_deltas( + &self, + ) -> &BTreeMap< + (BlockHeight, TxIndex), + (Epoch, TransferDelta, TransactionDelta), + > { + &self.delta_map + } + + /// Compute the total unspent notes associated with the viewing key in the + /// context. If the key is not in the context, then we do not know the + /// balance and hence we return None. + pub fn compute_shielded_balance(&self, vk: &ViewingKey) -> Option { + // Cannot query the balance of a key that's not in the map + if !self.pos_map.contains_key(vk) { + return None; + } + let mut val_acc = Amount::zero(); + // Retrieve the notes that can be spent by this key + if let Some(avail_notes) = self.pos_map.get(vk) { + for note_idx in avail_notes { + // Spent notes cannot contribute a new transaction's pool + if self.spents.contains(note_idx) { + continue; + } + // Get note associated with this ID + let note = self.note_map.get(note_idx).unwrap(); + // Finally add value to multi-asset accumulator + val_acc += + Amount::from_nonnegative(note.asset_type, note.value) + .expect("found note with invalid value or asset type"); + } + } + Some(val_acc) + } + + /// Query the ledger for the decoding of the given asset type and cache it + /// if it is found. + pub async fn decode_asset_type( + &mut self, + client: &U::C, + asset_type: AssetType, + ) -> Option<(Address, Epoch)> { + // Try to find the decoding in the cache + if let decoded @ Some(_) = self.asset_types.get(&asset_type) { + return decoded.cloned(); + } + // Query for the ID of the last accepted transaction + let (addr, ep, _conv, _path): (Address, _, Amount, MerklePath) = + rpc::query_conversion(client, asset_type).await?; + self.asset_types.insert(asset_type, (addr.clone(), ep)); + Some((addr, ep)) + } + + /// Query the ledger for the conversion that is allowed for the given asset + /// type and cache it. + async fn query_allowed_conversion<'a>( + &'a mut self, + client: &U::C, + asset_type: AssetType, + conversions: &'a mut Conversions, + ) -> Option<&'a mut (AllowedConversion, MerklePath, i64)> { + match conversions.entry(asset_type) { + Entry::Occupied(conv_entry) => Some(conv_entry.into_mut()), + Entry::Vacant(conv_entry) => { + // Query for the ID of the last accepted transaction + let (addr, ep, conv, path): (Address, _, _, _) = + rpc::query_conversion(client, asset_type).await?; + self.asset_types.insert(asset_type, (addr, ep)); + // If the conversion is 0, then we just have a pure decoding + if conv == Amount::zero() { + None + } else { + Some(conv_entry.insert((Amount::into(conv), path, 0))) + } + } + } + } + + /// Compute the total unspent notes associated with the viewing key in the + /// context and express that value in terms of the currently timestamped + /// asset types. If the key is not in the context, then we do not know the + /// balance and hence we return None. + pub async fn compute_exchanged_balance( + &mut self, + client: &U::C, + vk: &ViewingKey, + target_epoch: Epoch, + ) -> Option { + // First get the unexchanged balance + if let Some(balance) = self.compute_shielded_balance(vk) { + // And then exchange balance into current asset types + Some( + self.compute_exchanged_amount( + client, + balance, + target_epoch, + HashMap::new(), + ) + .await + .0, + ) + } else { + None + } + } + + /// Try to convert as much of the given asset type-value pair using the + /// given allowed conversion. usage is incremented by the amount of the + /// conversion used, the conversions are applied to the given input, and + /// the trace amount that could not be converted is moved from input to + /// output. + fn apply_conversion( + conv: AllowedConversion, + asset_type: AssetType, + value: i64, + usage: &mut i64, + input: &mut Amount, + output: &mut Amount, + ) { + // If conversion if possible, accumulate the exchanged amount + let conv: Amount = conv.into(); + // The amount required of current asset to qualify for conversion + let threshold = -conv[&asset_type]; + if threshold == 0 { + eprintln!( + "Asset threshold of selected conversion for asset type {} is \ + 0, this is a bug, please report it.", + asset_type + ); + } + // We should use an amount of the AllowedConversion that almost + // cancels the original amount + let required = value / threshold; + // Forget about the trace amount left over because we cannot + // realize its value + let trace = Amount::from_pair(asset_type, value % threshold).unwrap(); + // Record how much more of the given conversion has been used + *usage += required; + // Apply the conversions to input and move the trace amount to output + *input += conv * required - &trace; + *output += trace; + } + + /// Convert the given amount into the latest asset types whilst making a + /// note of the conversions that were used. Note that this function does + /// not assume that allowed conversions from the ledger are expressed in + /// terms of the latest asset types. + pub async fn compute_exchanged_amount( + &mut self, + client: &U::C, + mut input: Amount, + target_epoch: Epoch, + mut conversions: Conversions, + ) -> (Amount, Conversions) { + // Where we will store our exchanged value + let mut output = Amount::zero(); + // Repeatedly exchange assets until it is no longer possible + while let Some((asset_type, value)) = + input.components().next().map(cloned_pair) + { + let target_asset_type = self + .decode_asset_type(client, asset_type) + .await + .map(|(addr, _epoch)| make_asset_type(target_epoch, &addr)) + .unwrap_or(asset_type); + let at_target_asset_type = asset_type == target_asset_type; + if let (Some((conv, _wit, usage)), false) = ( + self.query_allowed_conversion( + client, + asset_type, + &mut conversions, + ) + .await, + at_target_asset_type, + ) { + println!( + "converting current asset type to latest asset type..." + ); + // Not at the target asset type, not at the latest asset type. + // Apply conversion to get from current asset type to the latest + // asset type. + Self::apply_conversion( + conv.clone(), + asset_type, + value, + usage, + &mut input, + &mut output, + ); + } else if let (Some((conv, _wit, usage)), false) = ( + self.query_allowed_conversion( + client, + target_asset_type, + &mut conversions, + ) + .await, + at_target_asset_type, + ) { + println!( + "converting latest asset type to target asset type..." + ); + // Not at the target asset type, yes at the latest asset type. + // Apply inverse conversion to get from latest asset type to + // the target asset type. + Self::apply_conversion( + conv.clone(), + asset_type, + value, + usage, + &mut input, + &mut output, + ); + } else { + // At the target asset type. Then move component over to output. + let comp = input.project(asset_type); + output += ∁ + // Strike from input to avoid repeating computation + input -= comp; + } + } + (output, conversions) + } + + /// Collect enough unspent notes in this context to exceed the given amount + /// of the specified asset type. Return the total value accumulated plus + /// notes and the corresponding diversifiers/merkle paths that were used to + /// achieve the total value. + pub async fn collect_unspent_notes( + &mut self, + client: &U::C, + vk: &ViewingKey, + target: Amount, + target_epoch: Epoch, + ) -> ( + Amount, + Vec<(Diversifier, Note, MerklePath)>, + Conversions, + ) { + // Establish connection with which to do exchange rate queries + let mut conversions = HashMap::new(); + let mut val_acc = Amount::zero(); + let mut notes = Vec::new(); + // Retrieve the notes that can be spent by this key + if let Some(avail_notes) = self.pos_map.get(vk).cloned() { + for note_idx in &avail_notes { + // No more transaction inputs are required once we have met + // the target amount + if val_acc >= target { + break; + } + // Spent notes cannot contribute a new transaction's pool + if self.spents.contains(note_idx) { + continue; + } + // Get note, merkle path, diversifier associated with this ID + let note = *self.note_map.get(note_idx).unwrap(); + + // The amount contributed by this note before conversion + let pre_contr = Amount::from_pair(note.asset_type, note.value) + .expect("received note has invalid value or asset type"); + let (contr, proposed_convs) = self + .compute_exchanged_amount( + client, + pre_contr, + target_epoch, + conversions.clone(), + ) + .await; + + // Use this note only if it brings us closer to our target + if is_amount_required( + val_acc.clone(), + target.clone(), + contr.clone(), + ) { + // Be sure to record the conversions used in computing + // accumulated value + val_acc += contr; + // Commit the conversions that were used to exchange + conversions = proposed_convs; + let merkle_path = + self.witness_map.get(note_idx).unwrap().path().unwrap(); + let diversifier = self.div_map.get(note_idx).unwrap(); + // Commit this note to our transaction + notes.push((*diversifier, note, merkle_path)); + } + } + } + (val_acc, notes, conversions) + } + + /// Compute the combined value of the output notes of the transaction pinned + /// at the given payment address. This computation uses the supplied viewing + /// keys to try to decrypt the output notes. If no transaction is pinned at + /// the given payment address fails with + /// `PinnedBalanceError::NoTransactionPinned`. + pub async fn compute_pinned_balance( + client: &U::C, + owner: PaymentAddress, + viewing_key: &ViewingKey, + ) -> Result<(Amount, Epoch), PinnedBalanceError> { + // Check that the supplied viewing key corresponds to given payment + // address + let counter_owner = viewing_key.to_payment_address( + *masp_primitives::primitives::PaymentAddress::diversifier( + &owner.into(), + ), + ); + match counter_owner { + Some(counter_owner) if counter_owner == owner.into() => {} + _ => return Err(PinnedBalanceError::InvalidViewingKey), + } + // The address of the MASP account + let masp_addr = masp(); + // Construct the key for where the transaction ID would be stored + let pin_key = Key::from(masp_addr.to_db_key()) + .push(&(PIN_KEY_PREFIX.to_owned() + &owner.hash())) + .expect("Cannot obtain a storage key"); + // Obtain the transaction pointer at the key + let txidx = rpc::query_storage_value::(client, &pin_key) + .await + .ok_or(PinnedBalanceError::NoTransactionPinned)?; + // Construct the key for where the pinned transaction is stored + let tx_key = Key::from(masp_addr.to_db_key()) + .push(&(TX_KEY_PREFIX.to_owned() + &txidx.to_string())) + .expect("Cannot obtain a storage key"); + // Obtain the pointed to transaction + let (tx_epoch, _tx_height, _tx_index, tx) = rpc::query_storage_value::< + U::C, + (Epoch, BlockHeight, TxIndex, Transfer), + >(client, &tx_key) + .await + .expect("Ill-formed epoch, transaction pair"); + // Accumulate the combined output note value into this Amount + let mut val_acc = Amount::zero(); + let tx = tx + .shielded + .expect("Pinned Transfers should have shielded part"); + for so in &tx.shielded_outputs { + // Let's try to see if our viewing key can decrypt current note + let decres = try_sapling_note_decryption::( + 0, + &viewing_key.ivk().0, + &so.ephemeral_key.into_subgroup().unwrap(), + &so.cmu, + &so.enc_ciphertext, + ); + match decres { + // So the given viewing key does decrypt this current note... + Some((note, pa, _memo)) if pa == owner.into() => { + val_acc += + Amount::from_nonnegative(note.asset_type, note.value) + .expect( + "found note with invalid value or asset type", + ); + break; + } + _ => {} + } + } + Ok((val_acc, tx_epoch)) + } + + /// Compute the combined value of the output notes of the pinned transaction + /// at the given payment address if there's any. The asset types may be from + /// the epoch of the transaction or even before, so exchange all these + /// amounts to the epoch of the transaction in order to get the value that + /// would have been displayed in the epoch of the transaction. + pub async fn compute_exchanged_pinned_balance( + &mut self, + client: &U::C, + owner: PaymentAddress, + viewing_key: &ViewingKey, + ) -> Result<(Amount, Epoch), PinnedBalanceError> { + // Obtain the balance that will be exchanged + let (amt, ep) = + Self::compute_pinned_balance(client, owner, viewing_key).await?; + // Finally, exchange the balance to the transaction's epoch + Ok(( + self.compute_exchanged_amount(client, amt, ep, HashMap::new()) + .await + .0, + ep, + )) + } + + /// Convert an amount whose units are AssetTypes to one whose units are + /// Addresses that they decode to. All asset types not corresponding to + /// the given epoch are ignored. + pub async fn decode_amount( + &mut self, + client: &U::C, + amt: Amount, + target_epoch: Epoch, + ) -> Amount
{ + let mut res = Amount::zero(); + for (asset_type, val) in amt.components() { + // Decode the asset type + let decoded = self.decode_asset_type(client, *asset_type).await; + // Only assets with the target timestamp count + match decoded { + Some((addr, epoch)) if epoch == target_epoch => { + res += &Amount::from_pair(addr, *val).unwrap() + } + _ => {} + } + } + res + } + + /// Convert an amount whose units are AssetTypes to one whose units are + /// Addresses that they decode to. + pub async fn decode_all_amounts( + &mut self, + client: &U::C, + amt: Amount, + ) -> Amount<(Address, Epoch)> { + let mut res = Amount::zero(); + for (asset_type, val) in amt.components() { + // Decode the asset type + let decoded = self.decode_asset_type(client, *asset_type).await; + // Only assets with the target timestamp count + if let Some((addr, epoch)) = decoded { + res += &Amount::from_pair((addr, epoch), *val).unwrap() + } + } + res + } + + /// Make shielded components to embed within a Transfer object. If no + /// shielded payment address nor spending key is specified, then no + /// shielded components are produced. Otherwise a transaction containing + /// nullifiers and/or note commitments are produced. Dummy transparent + /// UTXOs are sometimes used to make transactions balanced, but it is + /// understood that transparent account changes are effected only by the + /// amounts and signatures specified by the containing Transfer object. + #[cfg(feature = "masp-tx-gen")] + pub async fn gen_shielded_transfer( + &mut self, + client: &U::C, + args: args::TxTransfer, + shielded_gas: bool, + ) -> Result, builder::Error> + { + // No shielded components are needed when neither source nor destination + // are shielded + let spending_key = args.source.spending_key(); + let payment_address = args.target.payment_address(); + if spending_key.is_none() && payment_address.is_none() { + return Ok(None); + } + // We want to fund our transaction solely from supplied spending key + let spending_key = spending_key.map(|x| x.into()); + let spending_keys: Vec<_> = spending_key.into_iter().collect(); + // Load the current shielded context given the spending key we possess + let _ = self.load(); + self.fetch(client, &spending_keys, &[]).await; + // Save the update state so that future fetches can be short-circuited + let _ = self.save(); + // Determine epoch in which to submit potential shielded transaction + let epoch = rpc::query_epoch(client).await; + // Context required for storing which notes are in the source's + // possesion + let consensus_branch_id = BranchId::Sapling; + let amt: u64 = args.amount.into(); + let memo: Option = None; + + // Now we build up the transaction within this object + let mut builder = Builder::::new(0u32); + // Convert transaction amount into MASP types + let (asset_type, amount) = + convert_amount(epoch, &args.token, args.amount); + + // Transactions with transparent input and shielded output + // may be affected if constructed close to epoch boundary + let mut epoch_sensitive: bool = false; + // If there are shielded inputs + if let Some(sk) = spending_key { + // Transaction fees need to match the amount in the wrapper Transfer + // when MASP source is used + let (_, fee) = + convert_amount(epoch, &args.tx.fee_token, args.tx.fee_amount); + builder.set_fee(fee.clone())?; + // If the gas is coming from the shielded pool, then our shielded + // inputs must also cover the gas fee + let required_amt = if shielded_gas { amount + fee } else { amount }; + // Locate unspent notes that can help us meet the transaction amount + let (_, unspent_notes, used_convs) = self + .collect_unspent_notes( + client, + &to_viewing_key(&sk).vk, + required_amt, + epoch, + ) + .await; + // Commit the notes found to our transaction + for (diversifier, note, merkle_path) in unspent_notes { + builder.add_sapling_spend( + sk, + diversifier, + note, + merkle_path, + )?; + } + // Commit the conversion notes used during summation + for (conv, wit, value) in used_convs.values() { + if *value > 0 { + builder.add_convert( + conv.clone(), + *value as u64, + wit.clone(), + )?; + } + } + } else { + // No transfer fees come from the shielded transaction for non-MASP + // sources + builder.set_fee(Amount::zero())?; + // We add a dummy UTXO to our transaction, but only the source of + // the parent Transfer object is used to validate fund + // availability + let secp_sk = secp256k1::SecretKey::from_slice(&[0xcd; 32]) + .expect("secret key"); + let secp_ctx = + secp256k1::Secp256k1::::gen_new(); + let secp_pk = + secp256k1::PublicKey::from_secret_key(&secp_ctx, &secp_sk) + .serialize(); + let hash = ripemd160::Ripemd160::digest( + sha2::Sha256::digest(&secp_pk).as_slice(), + ); + let script = TransparentAddress::PublicKey(hash.into()).script(); + epoch_sensitive = true; + builder.add_transparent_input( + secp_sk, + OutPoint::new([0u8; 32], 0), + TxOut { + asset_type, + value: amt, + script_pubkey: script, + }, + )?; + } + // Now handle the outputs of this transaction + // If there is a shielded output + if let Some(pa) = payment_address { + let ovk_opt = spending_key.map(|x| x.expsk.ovk); + builder.add_sapling_output( + ovk_opt, + pa.into(), + asset_type, + amt, + memo.clone(), + )?; + } else { + epoch_sensitive = false; + // Embed the transparent target address into the shielded + // transaction so that it can be signed + let target_enc = args + .target + .address() + .expect("target address should be transparent") + .try_to_vec() + .expect("target address encoding"); + let hash = ripemd160::Ripemd160::digest( + sha2::Sha256::digest(target_enc.as_ref()).as_slice(), + ); + builder.add_transparent_output( + &TransparentAddress::PublicKey(hash.into()), + asset_type, + amt, + )?; + } + let prover = self.utils.local_tx_prover(); + // Build and return the constructed transaction + let mut tx = builder.build(consensus_branch_id, &prover); + + if epoch_sensitive { + let new_epoch = rpc::query_epoch(client).await; + + // If epoch has changed, recalculate shielded outputs to match new + // epoch + if new_epoch != epoch { + // Hack: build new shielded transfer with updated outputs + let mut replay_builder = + Builder::::new(0u32); + replay_builder.set_fee(Amount::zero())?; + let ovk_opt = spending_key.map(|x| x.expsk.ovk); + let (new_asset_type, _) = + convert_amount(new_epoch, &args.token, args.amount); + replay_builder.add_sapling_output( + ovk_opt, + payment_address.unwrap().into(), + new_asset_type, + amt, + memo, + )?; + + let secp_sk = secp256k1::SecretKey::from_slice(&[0xcd; 32]) + .expect("secret key"); + let secp_ctx = + secp256k1::Secp256k1::::gen_new(); + let secp_pk = + secp256k1::PublicKey::from_secret_key(&secp_ctx, &secp_sk) + .serialize(); + let hash = ripemd160::Ripemd160::digest( + sha2::Sha256::digest(&secp_pk).as_slice(), + ); + let script = + TransparentAddress::PublicKey(hash.into()).script(); + replay_builder.add_transparent_input( + secp_sk, + OutPoint::new([0u8; 32], 0), + TxOut { + asset_type: new_asset_type, + value: amt, + script_pubkey: script, + }, + )?; + + let (replay_tx, _) = + replay_builder.build(consensus_branch_id, &prover)?; + tx = tx.map(|(t, tm)| { + let mut temp = t.deref().clone(); + temp.shielded_outputs = replay_tx.shielded_outputs.clone(); + temp.value_balance = temp.value_balance.reject(asset_type) + - Amount::from_pair(new_asset_type, amt).unwrap(); + (temp.freeze().unwrap(), tm) + }); + } + } + + tx.map(Some) + } + + /// Obtain the known effects of all accepted shielded and transparent + /// transactions. If an owner is specified, then restrict the set to only + /// transactions crediting/debiting the given owner. If token is specified, + /// then restrict set to only transactions involving the given token. + pub async fn query_tx_deltas( + &mut self, + client: &U::C, + query_owner: &Either>, + query_token: &Option
, + viewing_keys: &HashMap, + ) -> BTreeMap< + (BlockHeight, TxIndex), + (Epoch, TransferDelta, TransactionDelta), + > { + const TXS_PER_PAGE: u8 = 100; + let _ = self.load(); + let vks = viewing_keys; + let fvks: Vec<_> = vks + .values() + .map(|fvk| ExtendedFullViewingKey::from(*fvk).fvk.vk) + .collect(); + self.fetch(client, &[], &fvks).await; + // Save the update state so that future fetches can be short-circuited + let _ = self.save(); + // Required for filtering out rejected transactions from Tendermint + // responses + let block_results = rpc::query_results(client).await; + let mut transfers = self.get_tx_deltas().clone(); + // Construct the set of addresses relevant to user's query + let relevant_addrs = match &query_owner { + Either::Left(BalanceOwner::Address(owner)) => vec![owner.clone()], + // MASP objects are dealt with outside of tx_search + Either::Left(BalanceOwner::FullViewingKey(_viewing_key)) => vec![], + Either::Left(BalanceOwner::PaymentAddress(_owner)) => vec![], + // Unspecified owner means all known addresses are considered + // relevant + Either::Right(addrs) => addrs.clone(), + }; + // Find all transactions to or from the relevant address set + for addr in relevant_addrs { + for prop in ["transfer.source", "transfer.target"] { + // Query transactions involving the current address + let mut tx_query = Query::eq(prop, addr.encode()); + // Elaborate the query if requested by the user + if let Some(token) = &query_token { + tx_query = + tx_query.and_eq("transfer.token", token.encode()); + } + for page in 1.. { + let txs = &client + .tx_search( + tx_query.clone(), + true, + page, + TXS_PER_PAGE, + Order::Ascending, + ) + .await + .expect("Unable to query for transactions") + .txs; + for response_tx in txs { + let height = BlockHeight(response_tx.height.value()); + let idx = TxIndex(response_tx.index); + // Only process yet unprocessed transactions which have + // been accepted by node VPs + let should_process = !transfers + .contains_key(&(height, idx)) + && block_results[u64::from(height) as usize] + .is_accepted(idx.0 as usize); + if !should_process { + continue; + } + let tx = Tx::try_from(response_tx.tx.as_ref()) + .expect("Ill-formed Tx"); + let mut wrapper = None; + let mut transfer = None; + extract_payload(tx, &mut wrapper, &mut transfer); + // Epoch data is not needed for transparent transactions + let epoch = + wrapper.map(|x| x.epoch).unwrap_or_default(); + if let Some(transfer) = transfer { + // Skip MASP addresses as they are already handled + // by ShieldedContext + if transfer.source == masp() + || transfer.target == masp() + { + continue; + } + // Describe how a Transfer simply subtracts from one + // account and adds the same to another + let mut delta = TransferDelta::default(); + let tfer_delta = Amount::from_nonnegative( + transfer.token.clone(), + u64::from(transfer.amount), + ) + .expect("invalid value for amount"); + delta.insert( + transfer.source, + Amount::zero() - &tfer_delta, + ); + delta.insert(transfer.target, tfer_delta); + // No shielded accounts are affected by this + // Transfer + transfers.insert( + (height, idx), + (epoch, delta, TransactionDelta::new()), + ); + } + } + // An incomplete page signifies no more transactions + if (txs.len() as u8) < TXS_PER_PAGE { + break; + } + } + } + } + transfers + } +} + +/// Extract the payload from the given Tx object +fn extract_payload( + tx: Tx, + wrapper: &mut Option, + transfer: &mut Option, +) { + match process_tx(tx) { + Ok(TxType::Wrapper(wrapper_tx)) => { + let privkey = ::G2Affine::prime_subgroup_generator(); + extract_payload( + Tx::from(match wrapper_tx.decrypt(privkey) { + Ok(tx) => DecryptedTx::Decrypted { + tx, + #[cfg(not(feature = "mainnet"))] + has_valid_pow: false, + }, + _ => DecryptedTx::Undecryptable(wrapper_tx.clone()), + }), + wrapper, + transfer, + ); + *wrapper = Some(wrapper_tx); + } + Ok(TxType::Decrypted(DecryptedTx::Decrypted { + tx, + #[cfg(not(feature = "mainnet"))] + has_valid_pow: _, + })) => { + let empty_vec = vec![]; + let tx_data = tx.data.as_ref().unwrap_or(&empty_vec); + let _ = SignedTxData::try_from_slice(tx_data).map(|signed| { + Transfer::try_from_slice(&signed.data.unwrap()[..]) + .map(|tfer| *transfer = Some(tfer)) + }); + } + _ => {} + } +} + +/// Make asset type corresponding to given address and epoch +fn make_asset_type(epoch: Epoch, token: &Address) -> AssetType { + // Typestamp the chosen token with the current epoch + let token_bytes = (token, epoch.0) + .try_to_vec() + .expect("token should serialize"); + // Generate the unique asset identifier from the unique token address + AssetType::new(token_bytes.as_ref()).expect("unable to create asset type") +} + +/// Convert Anoma amount and token type to MASP equivalents +fn convert_amount( + epoch: Epoch, + token: &Address, + val: token::Amount, +) -> (AssetType, Amount) { + let asset_type = make_asset_type(epoch, token); + // Combine the value and unit into one amount + let amount = Amount::from_nonnegative(asset_type, u64::from(val)) + .expect("invalid value for amount"); + (asset_type, amount) +} diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index befd64ca64e..c241916e2ec 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -1,5 +1,6 @@ //! The ledger modules +pub mod args; pub mod eth_bridge; pub mod events; pub mod ibc; @@ -9,9 +10,13 @@ pub mod pos; #[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] pub mod protocol; pub mod queries; +pub mod rpc; +pub mod signing; pub mod storage; +pub mod tx; pub mod vp_host_fns; +pub mod wallet; pub use namada_core::ledger::{ - gas, governance, inflation, parameters, storage_api, tx_env, vp_env, + gas, governance, inflation, parameters, replay_protection, storage_api, tx_env, vp_env, }; diff --git a/shared/src/ledger/native_vp/governance/mod.rs b/shared/src/ledger/native_vp/governance/mod.rs index 5cde3c54689..2e0de7a18ff 100644 --- a/shared/src/ledger/native_vp/governance/mod.rs +++ b/shared/src/ledger/native_vp/governance/mod.rs @@ -7,6 +7,8 @@ use std::collections::BTreeSet; use namada_core::ledger::governance::storage as gov_storage; use namada_core::ledger::storage; use namada_core::ledger::vp_env::VpEnv; +use namada_core::types::governance::{ProposalVote, VoteType}; +use namada_core::types::transaction::governance::ProposalType; use thiserror::Error; use utils::is_valid_validator_voting_period; @@ -73,6 +75,9 @@ where (KeyType::CONTENT, Some(proposal_id)) => { self.is_valid_content_key(proposal_id) } + (KeyType::TYPE, Some(proposal_id)) => { + self.is_valid_proposal_type(proposal_id) + } (KeyType::PROPOSAL_CODE, Some(proposal_id)) => { self.is_valid_proposal_code(proposal_id) } @@ -133,6 +138,7 @@ where counter_key.clone(), gov_storage::get_content_key(counter), gov_storage::get_author_key(counter), + gov_storage::get_proposal_type_key(counter), gov_storage::get_funds_key(counter), gov_storage::get_voting_start_epoch_key(counter), gov_storage::get_voting_end_epoch_key(counter), @@ -170,9 +176,16 @@ where let voter = gov_storage::get_voter_address(key); let delegation_address = gov_storage::get_vote_delegation_address(key); + let vote: Option = self.ctx.read_post(key)?; + + let proposal_type_key = gov_storage::get_proposal_type_key(proposal_id); + let proposal_type: Option = + self.ctx.read_pre(&proposal_type_key)?; match ( pre_counter, + proposal_type, + vote, voter, delegation_address, current_epoch, @@ -181,44 +194,90 @@ where ) { ( Some(pre_counter), + Some(proposal_type), + Some(vote), Some(voter_address), Some(delegation_address), Some(current_epoch), Some(pre_voting_start_epoch), Some(pre_voting_end_epoch), ) => { - let is_delegator = self - .is_delegator( - pre_voting_start_epoch, - verifiers, - voter_address, - delegation_address, - ) - .unwrap_or(false); - - let is_validator = self - .is_validator( - pre_voting_start_epoch, - verifiers, - voter_address, - delegation_address, - ) - .unwrap_or(false); - - let is_valid_validator_voting_period = - is_valid_validator_voting_period( - current_epoch, - pre_voting_start_epoch, - pre_voting_end_epoch, - ); + if pre_counter <= proposal_id { + // Invalid proposal id + return Ok(false); + } + if current_epoch < pre_voting_start_epoch + || current_epoch > pre_voting_end_epoch + { + // Voted outside of voting window + return Ok(false); + } - let is_valid = pre_counter > proposal_id - && current_epoch >= pre_voting_start_epoch - && current_epoch <= pre_voting_end_epoch - && (is_delegator - || (is_validator && is_valid_validator_voting_period)); + if let ProposalVote::Yay(vote_type) = vote { + if proposal_type != vote_type { + return Ok(false); + } + + // Vote type specific checks + if let VoteType::PGFCouncil(set) = vote_type { + // Check that all the addresses are established + for (address, _) in set { + match address { + Address::Established(_) => { + // Check that established address exists in + // storage + let vp_key = + Key::validity_predicate(&address); + if !self.ctx.has_key_pre(&vp_key)? { + return Ok(false); + } + } + _ => return Ok(false), + } + } + } else if let VoteType::ETHBridge(_sig) = vote_type { + // TODO: Check the validity of the signature with the + // governance ETH key in storage for the given validator + // + } + } - Ok(is_valid) + match proposal_type { + ProposalType::Default(_) | ProposalType::PGFCouncil => { + if self + .is_validator( + pre_voting_start_epoch, + verifiers, + voter_address, + delegation_address, + ) + .unwrap_or(false) + { + Ok(is_valid_validator_voting_period( + current_epoch, + pre_voting_start_epoch, + pre_voting_end_epoch, + )) + } else { + Ok(self + .is_delegator( + pre_voting_start_epoch, + verifiers, + voter_address, + delegation_address, + ) + .unwrap_or(false)) + } + } + ProposalType::ETHBridge => Ok(self + .is_validator( + pre_voting_start_epoch, + verifiers, + voter_address, + delegation_address, + ) + .unwrap_or(false)), + } } _ => Ok(false), } @@ -248,9 +307,29 @@ where } } - /// Validate a proposal_code key + /// Validate the proposal type + pub fn is_valid_proposal_type(&self, proposal_id: u64) -> Result { + let proposal_type_key = gov_storage::get_proposal_type_key(proposal_id); + Ok(self + .ctx + .read_post::(&proposal_type_key)? + .is_some()) + } + + /// Validate a proposal code pub fn is_valid_proposal_code(&self, proposal_id: u64) -> Result { - let code_key: Key = gov_storage::get_proposal_code_key(proposal_id); + let proposal_type_key: Key = + gov_storage::get_proposal_type_key(proposal_id); + let proposal_type: Option = + self.ctx.read_post(&proposal_type_key)?; + + // Check that the proposal type admits wasm code + match proposal_type { + Some(ProposalType::Default(_)) => (), + _ => return Ok(false), + } + + let code_key = gov_storage::get_proposal_code_key(proposal_id); let max_code_size_parameter_key = gov_storage::get_max_proposal_code_size_key(); @@ -608,6 +687,8 @@ enum KeyType { #[allow(non_camel_case_types)] PROPOSAL_CODE, #[allow(non_camel_case_types)] + TYPE, + #[allow(non_camel_case_types)] PROPOSAL_COMMIT, #[allow(non_camel_case_types)] GRACE_EPOCH, @@ -635,8 +716,10 @@ impl KeyType { Self::VOTE } else if gov_storage::is_content_key(key) { KeyType::CONTENT + } else if gov_storage::is_proposal_type_key(key) { + Self::TYPE } else if gov_storage::is_proposal_code_key(key) { - KeyType::PROPOSAL_CODE + Self::PROPOSAL_CODE } else if gov_storage::is_grace_epoch_key(key) { KeyType::GRACE_EPOCH } else if gov_storage::is_start_epoch_key(key) { diff --git a/shared/src/ledger/native_vp/governance/utils.rs b/shared/src/ledger/native_vp/governance/utils.rs index a0337938ff3..2511db46c94 100644 --- a/shared/src/ledger/native_vp/governance/utils.rs +++ b/shared/src/ledger/native_vp/governance/utils.rs @@ -3,9 +3,11 @@ use std::collections::HashMap; use borsh::BorshDeserialize; +use namada_core::types::governance::ProposalResult; +use namada_core::types::transaction::governance::ProposalType; use namada_proof_of_stake::{ bond_amount, read_all_validator_addresses, read_pos_params, - read_total_stake, read_validator_stake, + read_validator_stake, }; use thiserror::Error; @@ -13,7 +15,9 @@ use crate::ledger::governance::storage as gov_storage; use crate::ledger::pos::BondId; use crate::ledger::storage_api; use crate::types::address::Address; -use crate::types::governance::{ProposalVote, TallyResult, VotePower}; +use crate::types::governance::{ + ProposalVote, Tally, TallyResult, VotePower, VoteType, +}; use crate::types::storage::Epoch; use crate::types::token; @@ -21,11 +25,10 @@ use crate::types::token; /// outcome pub struct Votes { /// Map from validators who votes yay to their total stake amount - pub yay_validators: HashMap, - /// Map from delegation who votes yay to their bond amount - pub yay_delegators: HashMap>, - /// Map from delegation who votes nay to their bond amount - pub nay_delegators: HashMap>, + pub yay_validators: HashMap, + /// Map from delegation votes to their bond amount + pub delegators: + HashMap>, } /// Proposal errors @@ -37,6 +40,9 @@ pub enum Error { /// Invalid proposal field deserialization #[error("Invalid proposal {0}")] InvalidProposal(u64), + /// Error during tally + #[error("Error while tallying proposal: {0}")] + Tally(String), } /// Proposal event definition @@ -75,49 +81,291 @@ impl ProposalEvent { } } -/// Return a proposal result - accepted only when the result is `Ok(true)`. -pub fn compute_tally( - storage: &S, - epoch: Epoch, +/// Return a proposal result +pub fn compute_tally( votes: Votes, -) -> storage_api::Result -where - S: storage_api::StorageRead, -{ - let params = read_pos_params(storage)?; - let total_stake = read_total_stake(storage, ¶ms, epoch)?; - let total_stake = VotePower::from(u64::from(total_stake)); - + total_stake: VotePower, + proposal_type: &ProposalType, +) -> Result { let Votes { yay_validators, - yay_delegators, - nay_delegators, + delegators, } = votes; - let mut total_yay_staked_tokens = VotePower::from(0_u64); - for (_, amount) in yay_validators.clone().into_iter() { - total_yay_staked_tokens += amount; - } + match proposal_type { + ProposalType::Default(_) | ProposalType::ETHBridge => { + let mut total_yay_staked_tokens = VotePower::default(); + + for (_, (amount, validator_vote)) in yay_validators.iter() { + if let ProposalVote::Yay(vote_type) = validator_vote { + if proposal_type == vote_type { + total_yay_staked_tokens += amount; + } else { + // Log the error and continue + tracing::error!( + "Unexpected vote type. Expected: {}, Found: {}", + proposal_type, + validator_vote + ); + continue; + } + } else { + // Log the error and continue + tracing::error!( + "Unexpected vote type. Expected: {}, Found: {}", + proposal_type, + validator_vote + ); + continue; + } + } + + // This loop is taken only for Default proposals + for (_, vote_map) in delegators.iter() { + for (validator_address, (vote_power, delegator_vote)) in + vote_map.iter() + { + match delegator_vote { + ProposalVote::Yay(VoteType::Default) => { + if !yay_validators.contains_key(validator_address) { + // YAY: Add delegator amount whose validator + // didn't vote / voted nay + total_yay_staked_tokens += vote_power; + } + } + ProposalVote::Nay => { + // NAY: Remove delegator amount whose validator + // validator vote yay + + if yay_validators.contains_key(validator_address) { + total_yay_staked_tokens -= vote_power; + } + } - // YAY: Add delegator amount whose validator didn't vote / voted nay - for (_, vote_map) in yay_delegators.iter() { - for (validator_address, vote_power) in vote_map.iter() { - if !yay_validators.contains_key(validator_address) { - total_yay_staked_tokens += vote_power; + _ => { + // Log the error and continue + tracing::error!( + "Unexpected vote type. Expected: {}, Found: {}", + proposal_type, + delegator_vote + ); + continue; + } + } + } + } + + // Proposal passes if 2/3 of total voting power voted Yay + if total_yay_staked_tokens >= (total_stake / 3) * 2 { + let tally_result = match proposal_type { + ProposalType::Default(_) => { + TallyResult::Passed(Tally::Default) + } + ProposalType::ETHBridge => { + TallyResult::Passed(Tally::ETHBridge) + } + _ => { + return Err(Error::Tally(format!( + "Unexpected proposal type: {}", + proposal_type + ))); + } + }; + + Ok(ProposalResult { + result: tally_result, + total_voting_power: total_stake, + total_yay_power: total_yay_staked_tokens, + total_nay_power: 0, + }) + } else { + Ok(ProposalResult { + result: TallyResult::Rejected, + total_voting_power: total_stake, + total_yay_power: total_yay_staked_tokens, + total_nay_power: 0, + }) } } - } + ProposalType::PGFCouncil => { + let mut total_yay_staked_tokens = HashMap::new(); + for (_, (amount, validator_vote)) in yay_validators.iter() { + if let ProposalVote::Yay(VoteType::PGFCouncil(votes)) = + validator_vote + { + for v in votes { + *total_yay_staked_tokens.entry(v).or_insert(0) += + amount; + } + } else { + // Log the error and continue + tracing::error!( + "Unexpected vote type. Expected: PGFCouncil, Found: {}", + validator_vote + ); + continue; + } + } - // NAY: Remove delegator amount whose validator validator vote yay - for (_, vote_map) in nay_delegators.iter() { - for (validator_address, vote_power) in vote_map.iter() { - if yay_validators.contains_key(validator_address) { - total_yay_staked_tokens -= vote_power; + // YAY: Add delegator amount whose validator didn't vote / voted nay + // or adjust voting power if delegator voted yay with a + // different memo + for (_, vote_map) in delegators.iter() { + for (validator_address, (vote_power, delegator_vote)) in + vote_map.iter() + { + match delegator_vote { + ProposalVote::Yay(VoteType::PGFCouncil( + delegator_votes, + )) => { + match yay_validators.get(validator_address) { + Some((_, validator_vote)) => { + if let ProposalVote::Yay( + VoteType::PGFCouncil(validator_votes), + ) = validator_vote + { + for vote in validator_votes + .symmetric_difference( + delegator_votes, + ) + { + if validator_votes.contains(vote) { + // Delegator didn't vote for + // this, reduce voting power + if let Some(power) = + total_yay_staked_tokens + .get_mut(vote) + { + *power -= vote_power; + } else { + return Err(Error::Tally( + format!( + "Expected PGF \ + vote {:?} was \ + not in tally", + vote + ), + )); + } + } else { + // Validator didn't vote for + // this, add voting power + *total_yay_staked_tokens + .entry(vote) + .or_insert(0) += vote_power; + } + } + } else { + // Log the error and continue + tracing::error!( + "Unexpected vote type. Expected: \ + PGFCouncil, Found: {}", + validator_vote + ); + continue; + } + } + None => { + // Validator didn't vote or voted nay, add + // delegator vote + + for vote in delegator_votes { + *total_yay_staked_tokens + .entry(vote) + .or_insert(0) += vote_power; + } + } + } + } + ProposalVote::Nay => { + for ( + validator_address, + (vote_power, _delegator_vote), + ) in vote_map.iter() + { + if let Some((_, validator_vote)) = + yay_validators.get(validator_address) + { + if let ProposalVote::Yay( + VoteType::PGFCouncil(votes), + ) = validator_vote + { + for vote in votes { + if let Some(power) = + total_yay_staked_tokens + .get_mut(vote) + { + *power -= vote_power; + } else { + return Err(Error::Tally( + format!( + "Expected PGF vote \ + {:?} was not in tally", + vote + ), + )); + } + } + } else { + // Log the error and continue + tracing::error!( + "Unexpected vote type. Expected: \ + PGFCouncil, Found: {}", + validator_vote + ); + continue; + } + } + } + } + _ => { + // Log the error and continue + tracing::error!( + "Unexpected vote type. Expected: PGFCouncil, \ + Found: {}", + delegator_vote + ); + continue; + } + } + } + } + + // At least 1/3 of the total voting power must vote Yay + let total_yay_voted_power = total_yay_staked_tokens + .iter() + .fold(0, |acc, (_, vote_power)| acc + vote_power); + + match total_yay_voted_power.checked_mul(3) { + Some(v) if v < total_stake => Ok(ProposalResult { + result: TallyResult::Rejected, + total_voting_power: total_stake, + total_yay_power: total_yay_voted_power, + total_nay_power: 0, + }), + _ => { + // Select the winner council based on approval voting + // (majority) + let council = total_yay_staked_tokens + .into_iter() + .max_by(|a, b| a.1.cmp(&b.1)) + .map(|(vote, _)| vote.to_owned()) + .ok_or_else(|| { + Error::Tally( + "Missing expected elected council".to_string(), + ) + })?; + + Ok(ProposalResult { + result: TallyResult::Passed(Tally::PGFCouncil(council)), + total_voting_power: total_stake, + total_yay_power: total_yay_voted_power, + total_nay_power: 0, + }) + } } } } - - Ok(3 * total_yay_staked_tokens >= 2 * total_stake) } /// Prepare Votes structure to compute proposal tally @@ -138,10 +386,10 @@ where storage_api::iter_prefix::(storage, &vote_prefix_key)?; let mut yay_validators = HashMap::new(); - let mut yay_delegators: HashMap> = - HashMap::new(); - let mut nay_delegators: HashMap> = - HashMap::new(); + let mut delegators: HashMap< + Address, + HashMap, + > = HashMap::new(); for next_vote in vote_iter { let (vote_key, vote) = next_vote?; @@ -158,7 +406,8 @@ where .unwrap_or_default() .into(); - yay_validators.insert(voter_address.clone(), amount); + yay_validators + .insert(voter_address.clone(), (amount, vote)); } else if !validators.contains(voter_address) { let validator_address = gov_storage::get_vote_delegation_address(&vote_key); @@ -173,23 +422,13 @@ where .1; if amount != token::Amount::default() { - if vote.is_yay() { - let entry = yay_delegators - .entry(voter_address.to_owned()) - .or_default(); - entry.insert( - validator.to_owned(), - VotePower::from(amount), - ); - } else { - let entry = nay_delegators - .entry(voter_address.to_owned()) - .or_default(); - entry.insert( - validator.to_owned(), - VotePower::from(amount), - ); - } + let entry = delegators + .entry(voter_address.to_owned()) + .or_default(); + entry.insert( + validator.to_owned(), + (VotePower::from(amount), vote), + ); } } None => continue, @@ -202,8 +441,7 @@ where Ok(Votes { yay_validators, - yay_delegators, - nay_delegators, + delegators, }) } diff --git a/shared/src/ledger/native_vp/mod.rs b/shared/src/ledger/native_vp/mod.rs index 231405dde54..f1fbf9944ab 100644 --- a/shared/src/ledger/native_vp/mod.rs +++ b/shared/src/ledger/native_vp/mod.rs @@ -3,6 +3,7 @@ pub mod governance; pub mod parameters; +pub mod replay_protection; pub mod slash_fund; use std::cell::RefCell; @@ -19,7 +20,9 @@ use crate::ledger::storage::{Storage, StorageHasher}; use crate::proto::Tx; use crate::types::address::{Address, InternalAddress}; use crate::types::hash::Hash; -use crate::types::storage::{BlockHash, BlockHeight, Epoch, Key, TxIndex}; +use crate::types::storage::{ + BlockHash, BlockHeight, Epoch, Header, Key, TxIndex, +}; use crate::vm::prefix_iter::PrefixIterators; use crate::vm::WasmCacheAccess; @@ -236,6 +239,13 @@ where self.ctx.get_block_height() } + fn get_block_header( + &self, + height: BlockHeight, + ) -> Result, storage_api::Error> { + self.ctx.get_block_header(height) + } + fn get_block_hash(&self) -> Result { self.ctx.get_block_hash() } @@ -320,6 +330,13 @@ where self.ctx.get_block_height() } + fn get_block_header( + &self, + height: BlockHeight, + ) -> Result, storage_api::Error> { + self.ctx.get_block_header(height) + } + fn get_block_hash(&self) -> Result { self.ctx.get_block_hash() } @@ -396,6 +413,18 @@ where .into_storage_result() } + fn get_block_header( + &self, + height: BlockHeight, + ) -> Result, storage_api::Error> { + vp_host_fns::get_block_header( + &mut self.gas_meter.borrow_mut(), + self.storage, + height, + ) + .into_storage_result() + } + fn get_block_hash(&self) -> Result { vp_host_fns::get_block_hash( &mut self.gas_meter.borrow_mut(), @@ -443,7 +472,7 @@ where fn eval( &self, - vp_code: Vec, + vp_code_hash: Hash, input_data: Vec, ) -> Result { #[cfg(feature = "wasm-runtime")] @@ -479,7 +508,8 @@ where #[cfg(not(feature = "mainnet"))] false, ); - match eval_runner.eval_native_result(ctx, vp_code, input_data) { + match eval_runner.eval_native_result(ctx, vp_code_hash, input_data) + { Ok(result) => Ok(result), Err(err) => { tracing::warn!( @@ -494,7 +524,7 @@ where #[cfg(not(feature = "wasm-runtime"))] { // This line is here to prevent unused var clippy warning - let _ = (vp_code, input_data); + let _ = (vp_code_hash, input_data); unimplemented!( "The \"wasm-runtime\" feature must be enabled to use the \ `eval` function." diff --git a/shared/src/ledger/native_vp/replay_protection.rs b/shared/src/ledger/native_vp/replay_protection.rs new file mode 100644 index 00000000000..3e3c4b7ca0b --- /dev/null +++ b/shared/src/ledger/native_vp/replay_protection.rs @@ -0,0 +1,54 @@ +//! Native VP for replay protection + +use std::collections::BTreeSet; + +use namada_core::ledger::storage; +use namada_core::types::address::{Address, InternalAddress}; +use namada_core::types::storage::Key; +use thiserror::Error; + +use crate::ledger::native_vp::{self, Ctx, NativeVp}; +use crate::vm::WasmCacheAccess; + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum Error { + #[error("Native VP error: {0}")] + NativeVpError(#[from] native_vp::Error), +} + +/// ReplayProtection functions result +pub type Result = std::result::Result; + +/// Replay Protection VP +pub struct ReplayProtectionVp<'a, DB, H, CA> +where + DB: storage::DB + for<'iter> storage::DBIter<'iter>, + H: storage::StorageHasher, + CA: WasmCacheAccess, +{ + /// Context to interact with the host structures. + pub ctx: Ctx<'a, DB, H, CA>, +} + +impl<'a, DB, H, CA> NativeVp for ReplayProtectionVp<'a, DB, H, CA> +where + DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, + H: 'static + storage::StorageHasher, + CA: 'static + WasmCacheAccess, +{ + type Error = Error; + + const ADDR: InternalAddress = InternalAddress::ReplayProtection; + + fn validate_tx( + &self, + _tx_data: &[u8], + _keys_changed: &BTreeSet, + _verifiers: &BTreeSet
, + ) -> Result { + // VP should prevent any modification of the subspace. + // Changes are only allowed from protocol + Ok(false) + } +} diff --git a/shared/src/ledger/pos/mod.rs b/shared/src/ledger/pos/mod.rs index 22a681be01d..12d879e2f01 100644 --- a/shared/src/ledger/pos/mod.rs +++ b/shared/src/ledger/pos/mod.rs @@ -4,30 +4,26 @@ pub mod vp; pub use namada_core::ledger::storage_api; use namada_core::ledger::storage_api::{StorageRead, StorageWrite}; +use namada_core::types::address; pub use namada_core::types::key::common; pub use namada_core::types::token; pub use namada_proof_of_stake; pub use namada_proof_of_stake::parameters::PosParams; pub use namada_proof_of_stake::storage::*; -pub use namada_proof_of_stake::types; +pub use namada_proof_of_stake::{staking_token_address, types}; use rust_decimal::Decimal; pub use vp::PosVP; -use crate::types::address::{self, Address, InternalAddress}; +use crate::types::address::{Address, InternalAddress}; use crate::types::storage::Epoch; /// Address of the PoS account implemented as a native VP -pub const ADDRESS: Address = Address::Internal(InternalAddress::PoS); +pub const ADDRESS: Address = address::POS; /// Address of the PoS slash pool account pub const SLASH_POOL_ADDRESS: Address = Address::Internal(InternalAddress::PosSlashPool); -/// Address of the staking token (NAM) -pub fn staking_token_address() -> Address { - address::nam() -} - /// Calculate voting power in the tendermint context (which is stored as i64) /// from the number of tokens pub fn into_tm_voting_power( diff --git a/shared/src/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs index cfd416c14d5..4090c05a4d8 100644 --- a/shared/src/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -10,6 +10,7 @@ use crate::ledger::gas::{self, BlockGasMeter, VpGasMeter}; use crate::ledger::ibc::vp::{Ibc, IbcToken}; use crate::ledger::native_vp::governance::GovernanceVp; use crate::ledger::native_vp::parameters::{self, ParametersVp}; +use crate::ledger::native_vp::replay_protection::ReplayProtectionVp; use crate::ledger::native_vp::slash_fund::SlashFundVp; use crate::ledger::native_vp::{self, NativeVp}; use crate::ledger::pos::{self, PosVP}; @@ -17,6 +18,7 @@ use crate::ledger::storage::write_log::WriteLog; use crate::ledger::storage::{DBIter, Storage, StorageHasher, DB}; use crate::proto::{self, Tx}; use crate::types::address::{Address, InternalAddress}; +use crate::types::hash::Hash; use crate::types::storage; use crate::types::storage::TxIndex; use crate::types::transaction::{DecryptedTx, TxResult, TxType, VpsResult}; @@ -56,6 +58,10 @@ pub enum Error { SlashFundNativeVpError(crate::ledger::native_vp::slash_fund::Error), #[error("Ethereum bridge native VP error: {0}")] EthBridgeNativeVpError(crate::ledger::eth_bridge::vp::Error), + #[error("Replay protection native VP error: {0}")] + ReplayProtectionNativeVpError( + crate::ledger::native_vp::replay_protection::Error, + ), #[error("Access to an internal address {0} is forbidden")] AccessForbidden(InternalAddress), } @@ -124,14 +130,14 @@ where .map_err(Error::GasError)?; let initialized_accounts = write_log.get_initialized_accounts(); let changed_keys = write_log.get_keys(); - let ibc_event = write_log.take_ibc_event(); + let ibc_events = write_log.take_ibc_events(); Ok(TxResult { gas_used, changed_keys, vps_result, initialized_accounts, - ibc_event, + ibc_events, }) } _ => { @@ -161,9 +167,6 @@ where H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, { - gas_meter - .add_compiling_fee(tx.code.len()) - .map_err(Error::GasError)?; let empty = vec![]; let tx_data = tx.data.as_ref().unwrap_or(&empty); wasm::run::tx( @@ -171,7 +174,7 @@ where write_log, gas_meter, tx_index, - &tx.code, + &tx.code_or_hash, tx_data, vp_wasm_cache, tx_wasm_cache, @@ -252,19 +255,20 @@ where let mut gas_meter = VpGasMeter::new(initial_gas); let accept = match &addr { Address::Implicit(_) | Address::Established(_) => { - let (vp, gas) = storage + let (vp_hash, gas) = storage .validity_predicate(addr) .map_err(Error::StorageError)?; gas_meter.add(gas).map_err(Error::GasError)?; - let vp = - vp.ok_or_else(|| Error::MissingAddress(addr.clone()))?; - - gas_meter - .add_compiling_fee(vp.len()) - .map_err(Error::GasError)?; + let vp_code_hash = match vp_hash { + Some(v) => Hash::try_from(&v[..]) + .map_err(|_| Error::MissingAddress(addr.clone()))?, + None => { + return Err(Error::MissingAddress(addr.clone())); + } + }; wasm::run::vp( - vp, + &vp_code_hash, tx, tx_index, addr, @@ -389,6 +393,16 @@ where gas_meter = bridge.ctx.gas_meter.into_inner(); result } + InternalAddress::ReplayProtection => { + let replay_protection_vp = + ReplayProtectionVp { ctx }; + let result = replay_protection_vp + .validate_tx(tx_data, &keys_changed, &verifiers) + .map_err(Error::ReplayProtectionNativeVpError); + gas_meter = + replay_protection_vp.ctx.gas_meter.into_inner(); + result + } }; accepted diff --git a/shared/src/ledger/queries/mod.rs b/shared/src/ledger/queries/mod.rs index 565100b81b4..bec728188c7 100644 --- a/shared/src/ledger/queries/mod.rs +++ b/shared/src/ledger/queries/mod.rs @@ -15,6 +15,7 @@ pub use vp::{Pos, Vp}; use super::storage::{DBIter, StorageHasher, DB}; use super::storage_api; +use crate::tendermint_rpc::error::Error as RpcError; use crate::types::storage::BlockHeight; #[macro_use] @@ -91,71 +92,11 @@ pub fn require_no_data(request: &RequestQuery) -> storage_api::Result<()> { Ok(()) } -#[cfg(any(feature = "tendermint-rpc", feature = "tendermint-rpc-abcipp",))] -/// Provides [`Client`] implementation for Tendermint RPC client -pub mod tm { - use thiserror::Error; - - use super::*; - use crate::types::storage::BlockHeight; - - #[allow(missing_docs)] - #[derive(Error, Debug)] - pub enum Error { - #[error("{0}")] - Tendermint(#[from] crate::tendermint_rpc::Error), - #[error("Decoding error: {0}")] - Decoding(#[from] std::io::Error), - #[error("Info log: {0}, error code: {1}")] - Query(String, u32), - #[error("Invalid block height: {0} (overflown i64)")] - InvalidHeight(BlockHeight), - } - - #[async_trait::async_trait(?Send)] - impl Client for crate::tendermint_rpc::HttpClient { - type Error = Error; - - async fn request( - &self, - path: String, - data: Option>, - height: Option, - prove: bool, - ) -> Result { - let data = data.unwrap_or_default(); - let height = height - .map(|height| { - crate::tendermint::block::Height::try_from(height.0) - .map_err(|_err| Error::InvalidHeight(height)) - }) - .transpose()?; - let response = crate::tendermint_rpc::Client::abci_query( - self, - // TODO open the private Path constructor in tendermint-rpc - Some(std::str::FromStr::from_str(&path).unwrap()), - data, - height, - prove, - ) - .await?; - use crate::tendermint::abci::Code; - match response.code { - Code::Ok => Ok(EncodedResponseQuery { - data: response.value, - info: response.info, - proof: response.proof, - }), - Code::Err(code) => Err(Error::Query(response.info, code)), - } - } - } -} - /// Queries testing helpers #[cfg(any(test, feature = "testing"))] mod testing { use tempfile::TempDir; + use tendermint_rpc::Response; use super::*; use crate::ledger::events::log::EventLog; @@ -245,5 +186,12 @@ mod testing { let response = self.rpc.handle(ctx, &request).unwrap(); Ok(response) } + + async fn perform(&self, _request: R) -> Result + where + R: tendermint_rpc::SimpleRequest, + { + Response::from_string("TODO") + } } } diff --git a/shared/src/ledger/queries/shell.rs b/shared/src/ledger/queries/shell.rs index 43a51a1b0f2..37e6fe38bb1 100644 --- a/shared/src/ledger/queries/shell.rs +++ b/shared/src/ledger/queries/shell.rs @@ -4,7 +4,7 @@ use masp_primitives::merkle_tree::MerklePath; use masp_primitives::sapling::Node; use namada_core::types::address::Address; use namada_core::types::hash::Hash; -use namada_core::types::storage::BlockResults; +use namada_core::types::storage::{BlockResults, KeySeg}; use crate::ledger::events::log::dumb_queries; use crate::ledger::events::Event; @@ -117,12 +117,13 @@ where ctx.wl_storage.storage.block.height.0 as usize + 1 ]; iter.for_each(|(key, value, _gas)| { - let key = key - .parse::() - .expect("expected integer for block height"); + let key = u64::parse(key).expect("expected integer for block height"); let value = BlockResults::try_from_slice(&value) .expect("expected BlockResults bytes"); - results[key] = value; + let idx: usize = key + .try_into() + .expect("expected block height to fit into usize"); + results[idx] = value; }); Ok(results) } @@ -344,15 +345,16 @@ where #[cfg(test)] mod test { use borsh::BorshDeserialize; + use namada_test_utils::TestWasms; use crate::ledger::queries::testing::TestClient; use crate::ledger::queries::RPC; use crate::ledger::storage_api::{self, StorageWrite}; use crate::proto::Tx; + use crate::types::hash::Hash; + use crate::types::storage::Key; use crate::types::{address, token}; - const TX_NO_OP_WASM: &str = "../wasm_for_tests/tx_no_op.wasm"; - #[test] fn test_shell_queries_router_paths() { let path = RPC.shell().epoch_path(); @@ -379,6 +381,11 @@ mod test { { // Initialize the `TestClient` let mut client = TestClient::new(RPC); + // store the wasm code + let tx_no_op = TestWasms::TxNoOp.read_bytes(); + let tx_hash = Hash::sha256(&tx_no_op); + let key = Key::wasm_code(&tx_hash); + client.wl_storage.storage.write(&key, &tx_no_op).unwrap(); // Request last committed epoch let read_epoch = RPC.shell().epoch(&client).await.unwrap(); @@ -386,8 +393,12 @@ mod test { assert_eq!(current_epoch, read_epoch); // Request dry run tx - let tx_no_op = std::fs::read(TX_NO_OP_WASM).expect("cannot load wasm"); - let tx = Tx::new(tx_no_op, None); + let tx = Tx::new( + tx_hash.to_vec(), + None, + client.wl_storage.storage.chain_id.clone(), + None, + ); let tx_bytes = tx.to_bytes(); let result = RPC .shell() diff --git a/shared/src/ledger/queries/types.rs b/shared/src/ledger/queries/types.rs index fd65edcfd92..04e26d1ce31 100644 --- a/shared/src/ledger/queries/types.rs +++ b/shared/src/ledger/queries/types.rs @@ -1,15 +1,23 @@ use namada_core::ledger::storage::WlStorage; +use tendermint::block::Height; +use tendermint_rpc::endpoint::{ + abci_info, block, block_results, blockchain, commit, consensus_params, + consensus_state, health, net_info, status, +}; +use tendermint_rpc::query::Query; +use tendermint_rpc::Order; +use thiserror::Error; use crate::ledger::events::log::EventLog; use crate::ledger::storage::{DBIter, StorageHasher, DB}; use crate::ledger::storage_api; use crate::tendermint::merkle::proof::Proof; +use crate::tendermint_rpc::error::Error as RpcError; use crate::types::storage::BlockHeight; #[cfg(feature = "wasm-runtime")] use crate::vm::wasm::{TxCache, VpCache}; #[cfg(feature = "wasm-runtime")] use crate::vm::WasmCacheRoAccess; - /// A request context provides read-only access to storage and WASM compilation /// caches to request handlers. #[derive(Debug, Clone)] @@ -96,6 +104,250 @@ pub trait Client { height: Option, prove: bool, ) -> Result; + + /// `/abci_info`: get information about the ABCI application. + async fn abci_info(&self) -> Result { + Ok(self.perform(abci_info::Request).await?.response) + } + + /// `/broadcast_tx_sync`: broadcast a transaction, returning the response + /// from `CheckTx`. + async fn broadcast_tx_sync( + &self, + tx: tendermint::abci::Transaction, + ) -> Result + { + self.perform( + tendermint_rpc::endpoint::broadcast::tx_sync::Request::new(tx), + ) + .await + } + + /// `/block`: get the latest block. + async fn latest_block(&self) -> Result { + self.perform(block::Request::default()).await + } + + /// `/block`: get block at a given height. + async fn block(&self, height: H) -> Result + where + H: Into + Send, + { + self.perform(block::Request::new(height.into())).await + } + + /// `/block_search`: search for blocks by BeginBlock and EndBlock events. + async fn block_search( + &self, + query: Query, + page: u32, + per_page: u8, + order: Order, + ) -> Result + { + self.perform(tendermint_rpc::endpoint::block_search::Request::new( + query, page, per_page, order, + )) + .await + } + + /// `/block_results`: get ABCI results for a block at a particular height. + async fn block_results( + &self, + height: H, + ) -> Result + where + H: Into + Send, + { + self.perform(tendermint_rpc::endpoint::block_results::Request::new( + height.into(), + )) + .await + } + + /// `/tx_search`: search for transactions with their results. + async fn tx_search( + &self, + query: Query, + prove: bool, + page: u32, + per_page: u8, + order: Order, + ) -> Result { + self.perform(tendermint_rpc::endpoint::tx_search::Request::new( + query, prove, page, per_page, order, + )) + .await + } + + /// `/abci_query`: query the ABCI application + async fn abci_query( + &self, + path: Option, + data: V, + height: Option, + prove: bool, + ) -> Result + where + V: Into> + Send, + { + Ok(self + .perform(tendermint_rpc::endpoint::abci_query::Request::new( + path, data, height, prove, + )) + .await? + .response) + } + + /// `/block_results`: get ABCI results for the latest block. + async fn latest_block_results( + &self, + ) -> Result { + self.perform(block_results::Request::default()).await + } + + /// `/blockchain`: get block headers for `min` <= `height` <= `max`. + /// + /// Block headers are returned in descending order (highest first). + /// + /// Returns at most 20 items. + async fn blockchain( + &self, + min: H, + max: H, + ) -> Result + where + H: Into + Send, + { + // TODO(tarcieri): return errors for invalid params before making + // request? + self.perform(blockchain::Request::new(min.into(), max.into())) + .await + } + + /// `/commit`: get block commit at a given height. + async fn commit(&self, height: H) -> Result + where + H: Into + Send, + { + self.perform(commit::Request::new(height.into())).await + } + + /// `/consensus_params`: get current consensus parameters at the specified + /// height. + async fn consensus_params( + &self, + height: H, + ) -> Result + where + H: Into + Send, + { + self.perform(consensus_params::Request::new(Some(height.into()))) + .await + } + + /// `/consensus_state`: get current consensus state + async fn consensus_state( + &self, + ) -> Result { + self.perform(consensus_state::Request::new()).await + } + + /// `/consensus_params`: get the latest consensus parameters. + async fn latest_consensus_params( + &self, + ) -> Result { + self.perform(consensus_params::Request::new(None)).await + } + + /// `/commit`: get the latest block commit + async fn latest_commit(&self) -> Result { + self.perform(commit::Request::default()).await + } + + /// `/health`: get node health. + /// + /// Returns empty result (200 OK) on success, no response in case of an + /// error. + async fn health(&self) -> Result<(), Error> { + self.perform(health::Request).await?; + Ok(()) + } + + /// `/net_info`: obtain information about P2P and other network connections. + async fn net_info(&self) -> Result { + self.perform(net_info::Request).await + } + + /// `/status`: get Tendermint status including node info, pubkey, latest + /// block hash, app hash, block height and time. + async fn status(&self) -> Result { + self.perform(status::Request).await + } + + /// Perform a request against the RPC endpoint + async fn perform(&self, request: R) -> Result + where + R: tendermint_rpc::SimpleRequest; +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum Error { + #[error("{0}")] + Tendermint(#[from] tendermint_rpc::Error), + #[error("Decoding error: {0}")] + Decoding(#[from] std::io::Error), + #[error("Info log: {0}, error code: {1}")] + Query(String, u32), + #[error("Invalid block height: {0} (overflown i64)")] + InvalidHeight(BlockHeight), +} + +#[async_trait::async_trait(?Send)] +impl Client for C { + type Error = Error; + + async fn request( + &self, + path: String, + data: Option>, + height: Option, + prove: bool, + ) -> Result { + let data = data.unwrap_or_default(); + let height = height + .map(|height| { + tendermint::block::Height::try_from(height.0) + .map_err(|_err| Error::InvalidHeight(height)) + }) + .transpose()?; + let response = self + .abci_query( + // TODO open the private Path constructor in tendermint-rpc + Some(std::str::FromStr::from_str(&path).unwrap()), + data, + height, + prove, + ) + .await?; + use tendermint::abci::Code; + match response.code { + Code::Ok => Ok(EncodedResponseQuery { + data: response.value, + info: response.info, + proof: response.proof, + }), + Code::Err(code) => Err(Error::Query(response.info, code)), + } + } + + async fn perform(&self, request: R) -> Result + where + R: tendermint_rpc::SimpleRequest, + { + tendermint_rpc::Client::perform(self, request).await + } } /// Temporary domain-type for `tendermint_proto::abci::RequestQuery`, copied diff --git a/shared/src/ledger/rpc.rs b/shared/src/ledger/rpc.rs new file mode 100644 index 00000000000..fe16b58061e --- /dev/null +++ b/shared/src/ledger/rpc.rs @@ -0,0 +1,887 @@ +//! SDK RPC queries +use std::collections::{HashMap, HashSet}; + +use borsh::BorshDeserialize; +use masp_primitives::asset_type::AssetType; +use masp_primitives::merkle_tree::MerklePath; +use masp_primitives::sapling::Node; +use namada_core::ledger::testnet_pow; +use namada_core::types::address::Address; +use namada_core::types::storage::Key; +use namada_core::types::token::Amount; +use namada_proof_of_stake::types::CommissionPair; +use serde::Serialize; +use tokio::time::Duration; + +use crate::ledger::events::Event; +use crate::ledger::governance::parameters::GovParams; +use crate::ledger::governance::storage as gov_storage; +use crate::ledger::native_vp::governance::utils::Votes; +use crate::ledger::queries::RPC; +use crate::proto::Tx; +use crate::tendermint::merkle::proof::Proof; +use crate::tendermint_rpc::error::Error as TError; +use crate::tendermint_rpc::query::Query; +use crate::tendermint_rpc::Order; +use crate::types::governance::{ProposalVote, VotePower}; +use crate::types::hash::Hash; +use crate::types::key::*; +use crate::types::storage::{BlockHeight, BlockResults, Epoch, PrefixValue}; +use crate::types::token::balance_key; +use crate::types::{storage, token}; + +/// Query the status of a given transaction. +/// +/// If a response is not delivered until `deadline`, we exit the cli with an +/// error. +pub async fn query_tx_status( + client: &C, + status: TxEventQuery<'_>, + deadline: Duration, +) -> Event { + const ONE_SECOND: Duration = Duration::from_secs(1); + // sleep for the duration of `backoff`, + // and update the underlying value + async fn sleep_update(query: TxEventQuery<'_>, backoff: &mut Duration) { + tracing::debug!( + ?query, + duration = ?backoff, + "Retrying tx status query after timeout", + ); + // simple linear backoff - if an event is not available, + // increase the backoff duration by one second + async_std::task::sleep(*backoff).await; + *backoff += ONE_SECOND; + } + + let mut backoff = ONE_SECOND; + loop { + tracing::debug!(query = ?status, "Querying tx status"); + let maybe_event = match query_tx_events(client, status).await { + Ok(response) => response, + Err(_err) => { + // tracing::debug!(%err, "ABCI query failed"); + sleep_update(status, &mut backoff).await; + continue; + } + }; + if let Some(e) = maybe_event { + break e; + } else if deadline < backoff { + panic!( + "Transaction status query deadline of {deadline:?} exceeded" + ); + } else { + sleep_update(status, &mut backoff).await; + } + } +} + +/// Query the epoch of the last committed block +pub async fn query_epoch( + client: &C, +) -> Epoch { + let epoch = unwrap_client_response::(RPC.shell().epoch(client).await); + epoch +} + +/// Query the last committed block +pub async fn query_block( + client: &C, +) -> crate::tendermint_rpc::endpoint::block::Response { + let response = client.latest_block().await.unwrap(); + println!( + "Last committed block ID: {}, height: {}, time: {}", + response.block_id, + response.block.header.height, + response.block.header.time + ); + response +} + +/// A helper to unwrap client's response. Will shut down process on error. +fn unwrap_client_response( + response: Result, +) -> T { + response.unwrap_or_else(|_err| { + panic!("Error in the query"); + }) +} + +/// Query the results of the last committed block +pub async fn query_results( + client: &C, +) -> Vec { + unwrap_client_response::(RPC.shell().read_results(client).await) +} + +/// Query token amount of owner. +pub async fn get_token_balance( + client: &C, + token: &Address, + owner: &Address, +) -> Option { + let balance_key = balance_key(token, owner); + query_storage_value(client, &balance_key).await +} + +/// Get account's public key stored in its storage sub-space +pub async fn get_public_key( + client: &C, + address: &Address, +) -> Option { + let key = pk_key(address); + query_storage_value(client, &key).await +} + +/// Check if the given address is a known validator. +pub async fn is_validator( + client: &C, + address: &Address, +) -> bool { + unwrap_client_response::( + RPC.vp().pos().is_validator(client, address).await, + ) +} + +/// Check if a given address is a known delegator +pub async fn is_delegator( + client: &C, + address: &Address, +) -> bool { + unwrap_client_response::( + RPC.vp().pos().is_delegator(client, address, &None).await, + ) +} + +/// Check if a given address is a known delegator at the given epoch +pub async fn is_delegator_at( + client: &C, + address: &Address, + epoch: Epoch, +) -> bool { + unwrap_client_response::( + RPC.vp() + .pos() + .is_delegator(client, address, &Some(epoch)) + .await, + ) +} + +/// Check if the address exists on chain. Established address exists if it has a +/// stored validity predicate. Implicit and internal addresses always return +/// true. +pub async fn known_address( + client: &C, + address: &Address, +) -> bool { + match address { + Address::Established(_) => { + // Established account exists if it has a VP + let key = storage::Key::validity_predicate(address); + query_has_storage_key(client, &key).await + } + Address::Implicit(_) | Address::Internal(_) => true, + } +} + +#[cfg(not(feature = "mainnet"))] +/// Check if the given address is a testnet faucet account address. +pub async fn is_faucet_account( + client: &C, + address: &Address, +) -> bool { + unwrap_client_response::(RPC.vp().is_faucet(client, address).await) +} + +#[cfg(not(feature = "mainnet"))] +/// Get faucet account address, if any is setup for the network. +pub async fn get_faucet_address( + client: &C, +) -> Option
{ + unwrap_client_response::>( + RPC.vp().get_faucet_address(client).await, + ) +} + +#[cfg(not(feature = "mainnet"))] +/// Obtain a PoW challenge for a withdrawal from a testnet faucet account, if +/// any is setup for the network. +pub async fn get_testnet_pow_challenge< + C: crate::ledger::queries::Client + Sync, +>( + client: &C, + source: Address, +) -> testnet_pow::Challenge { + unwrap_client_response::( + RPC.vp().testnet_pow_challenge(client, source).await, + ) +} + +/// Query a conversion. +pub async fn query_conversion( + client: &C, + asset_type: AssetType, +) -> Option<( + Address, + Epoch, + masp_primitives::transaction::components::Amount, + MerklePath, +)> { + Some(unwrap_client_response::( + RPC.shell().read_conversion(client, &asset_type).await, + )) +} + +/// Query a wasm code hash +pub async fn query_wasm_code_hash( + client: &C, + code_path: impl AsRef, +) -> Option { + let hash_key = Key::wasm_hash(code_path.as_ref()); + match query_storage_value_bytes(client, &hash_key, None, false) + .await + .0 + { + Some(hash) => { + Some(Hash::try_from(&hash[..]).expect("Invalid code hash")) + } + None => { + eprintln!( + "The corresponding wasm code of the code path {} doesn't \ + exist on chain.", + code_path.as_ref(), + ); + None + } + } +} + +/// Query a storage value and decode it with [`BorshDeserialize`]. +pub async fn query_storage_value( + client: &C, + key: &storage::Key, +) -> Option +where + T: BorshDeserialize, + C: crate::ledger::queries::Client + Sync, +{ + // In case `T` is a unit (only thing that encodes to 0 bytes), we have to + // use `storage_has_key` instead of `storage_value`, because `storage_value` + // returns 0 bytes when the key is not found. + let maybe_unit = T::try_from_slice(&[]); + if let Ok(unit) = maybe_unit { + return if unwrap_client_response::( + RPC.shell().storage_has_key(client, key).await, + ) { + Some(unit) + } else { + None + }; + } + + let response = unwrap_client_response::( + RPC.shell() + .storage_value(client, None, None, false, key) + .await, + ); + if response.data.is_empty() { + return None; + } + T::try_from_slice(&response.data[..]) + .map(Some) + .unwrap_or_else(|err| { + panic!("Error decoding the value: {}", err); + }) +} + +/// Query a storage value and the proof without decoding. +pub async fn query_storage_value_bytes< + C: crate::ledger::queries::Client + Sync, +>( + client: &C, + key: &storage::Key, + height: Option, + prove: bool, +) -> (Option>, Option) { + let data = None; + let response = unwrap_client_response::( + RPC.shell() + .storage_value(client, data, height, prove, key) + .await, + ); + if response.data.is_empty() { + (None, response.proof) + } else { + (Some(response.data), response.proof) + } +} + +/// Query a range of storage values with a matching prefix and decode them with +/// [`BorshDeserialize`]. Returns an iterator of the storage keys paired with +/// their associated values. +pub async fn query_storage_prefix( + client: &C, + key: &storage::Key, +) -> Option> +where + T: BorshDeserialize, +{ + let values = unwrap_client_response::( + RPC.shell() + .storage_prefix(client, None, None, false, key) + .await, + ); + let decode = + |PrefixValue { key, value }: PrefixValue| match T::try_from_slice( + &value[..], + ) { + Err(err) => { + eprintln!( + "Skipping a value for key {}. Error in decoding: {}", + key, err + ); + None + } + Ok(value) => Some((key, value)), + }; + if values.data.is_empty() { + None + } else { + Some(values.data.into_iter().filter_map(decode)) + } +} + +/// Query to check if the given storage key exists. +pub async fn query_has_storage_key( + client: &C, + key: &storage::Key, +) -> bool { + unwrap_client_response::( + RPC.shell().storage_has_key(client, key).await, + ) +} + +/// Represents a query for an event pertaining to the specified transaction +#[derive(Debug, Copy, Clone)] +pub enum TxEventQuery<'a> { + /// Queries whether transaction with given hash was accepted + Accepted(&'a str), + /// Queries whether transaction with given hash was applied + Applied(&'a str), +} + +impl<'a> TxEventQuery<'a> { + /// The event type to which this event query pertains + pub fn event_type(self) -> &'static str { + match self { + TxEventQuery::Accepted(_) => "accepted", + TxEventQuery::Applied(_) => "applied", + } + } + + /// The transaction to which this event query pertains + pub fn tx_hash(self) -> &'a str { + match self { + TxEventQuery::Accepted(tx_hash) => tx_hash, + TxEventQuery::Applied(tx_hash) => tx_hash, + } + } +} + +/// Transaction event queries are semantically a subset of general queries +impl<'a> From> for Query { + fn from(tx_query: TxEventQuery<'a>) -> Self { + match tx_query { + TxEventQuery::Accepted(tx_hash) => { + Query::default().and_eq("accepted.hash", tx_hash) + } + TxEventQuery::Applied(tx_hash) => { + Query::default().and_eq("applied.hash", tx_hash) + } + } + } +} + +/// Call the corresponding `tx_event_query` RPC method, to fetch +/// the current status of a transation. +pub async fn query_tx_events( + client: &C, + tx_event_query: TxEventQuery<'_>, +) -> std::result::Result< + Option, + ::Error, +> { + let tx_hash: Hash = tx_event_query.tx_hash().try_into().unwrap(); + match tx_event_query { + TxEventQuery::Accepted(_) => { + RPC.shell().accepted(client, &tx_hash).await + } + /*.wrap_err_with(|| { + eyre!("Failed querying whether a transaction was accepted") + })*/, + TxEventQuery::Applied(_) => RPC.shell().applied(client, &tx_hash).await, /*.wrap_err_with(|| { + eyre!("Error querying whether a transaction was applied") + })*/ + } +} + +/// Dry run a transaction +pub async fn dry_run_tx( + client: &C, + tx_bytes: Vec, +) -> namada_core::types::transaction::TxResult { + let (data, height, prove) = (Some(tx_bytes), None, false); + let result = unwrap_client_response::( + RPC.shell().dry_run_tx(client, data, height, prove).await, + ) + .data; + println! {"Dry-run result: {}", result}; + result +} + +/// Data needed for broadcasting a tx and +/// monitoring its progress on chain +/// +/// Txs may be either a dry run or else +/// they should be encrypted and included +/// in a wrapper. +#[derive(Debug, Clone)] +pub enum TxBroadcastData { + /// Dry run broadcast data + DryRun(Tx), + /// Wrapper broadcast data + Wrapper { + /// Transaction to broadcast + tx: Tx, + /// Hash of the wrapper transaction + wrapper_hash: String, + /// Hash of decrypted transaction + decrypted_hash: String, + }, +} + +/// A parsed event from tendermint relating to a transaction +#[derive(Debug, Serialize)] +pub struct TxResponse { + /// Response information + pub info: String, + /// Response log + pub log: String, + /// Block height + pub height: String, + /// Transaction height + pub hash: String, + /// Response code + pub code: String, + /// Gas used + pub gas_used: String, + /// Initialized accounts + pub initialized_accounts: Vec
, +} + +impl TryFrom for TxResponse { + type Error = String; + + fn try_from(event: Event) -> Result { + fn missing_field_err(field: &str) -> String { + format!("Field \"{field}\" not present in event") + } + + let hash = event + .get("hash") + .ok_or_else(|| missing_field_err("hash"))? + .clone(); + let info = event + .get("info") + .ok_or_else(|| missing_field_err("info"))? + .clone(); + let log = event + .get("log") + .ok_or_else(|| missing_field_err("log"))? + .clone(); + let height = event + .get("height") + .ok_or_else(|| missing_field_err("height"))? + .clone(); + let code = event + .get("code") + .ok_or_else(|| missing_field_err("code"))? + .clone(); + let gas_used = event + .get("gas_used") + .ok_or_else(|| missing_field_err("gas_used"))? + .clone(); + let initialized_accounts = event + .get("initialized_accounts") + .map(String::as_str) + // TODO: fix finalize block, to return initialized accounts, + // even when we reject a tx? + .map_or(Ok(vec![]), |initialized_accounts| { + serde_json::from_str(initialized_accounts) + .map_err(|err| format!("JSON decode error: {err}")) + })?; + + Ok(TxResponse { + hash, + info, + log, + height, + code, + gas_used, + initialized_accounts, + }) + } +} + +impl TxResponse { + /// Convert an [`Event`] to a [`TxResponse`], or error out. + pub fn from_event(event: Event) -> Self { + event.try_into().unwrap_or_else(|err| { + panic!("Error fetching TxResponse: {err}"); + }) + } +} + +/// Lookup the full response accompanying the specified transaction event +// TODO: maybe remove this in favor of `query_tx_status` +pub async fn query_tx_response( + client: &C, + tx_query: TxEventQuery<'_>, +) -> Result { + // Find all blocks that apply a transaction with the specified hash + let blocks = &client + .block_search(tx_query.into(), 1, 255, Order::Ascending) + .await + .expect("Unable to query for transaction with given hash") + .blocks; + // Get the block results corresponding to a block to which + // the specified transaction belongs + let block = &blocks + .get(0) + .ok_or_else(|| { + TError::server( + "Unable to find a block applying the given transaction" + .to_string(), + ) + })? + .block; + let response_block_results = client + .block_results(block.header.height) + .await + .expect("Unable to retrieve block containing transaction"); + // Search for the event where the specified transaction is + // applied to the blockchain + let query_event_opt = + response_block_results.end_block_events.and_then(|events| { + events + .iter() + .find(|event| { + event.type_str == tx_query.event_type() + && event.attributes.iter().any(|tag| { + tag.key.as_ref() == "hash" + && tag.value.as_ref() == tx_query.tx_hash() + }) + }) + .cloned() + }); + let query_event = query_event_opt.ok_or_else(|| { + TError::server( + "Unable to find the event corresponding to the specified \ + transaction" + .to_string(), + ) + })?; + // Reformat the event attributes so as to ease value extraction + let event_map: std::collections::HashMap<&str, &str> = query_event + .attributes + .iter() + .map(|tag| (tag.key.as_ref(), tag.value.as_ref())) + .collect(); + // Summarize the transaction results that we were searching for + let result = TxResponse { + info: event_map["info"].to_string(), + log: event_map["log"].to_string(), + height: event_map["height"].to_string(), + hash: event_map["hash"].to_string(), + code: event_map["code"].to_string(), + gas_used: event_map["gas_used"].to_string(), + initialized_accounts: serde_json::from_str( + event_map["initialized_accounts"], + ) + .unwrap_or_default(), + }; + Ok(result) +} + +/// Get the votes for a given proposal id +pub async fn get_proposal_votes( + client: &C, + epoch: Epoch, + proposal_id: u64, +) -> Votes { + let validators = get_all_validators(client, epoch).await; + + let vote_prefix_key = + gov_storage::get_proposal_vote_prefix_key(proposal_id); + let vote_iter = + query_storage_prefix::(client, &vote_prefix_key).await; + + let mut yay_validators: HashMap = + HashMap::new(); + let mut delegators: HashMap< + Address, + HashMap, + > = HashMap::new(); + + if let Some(vote_iter) = vote_iter { + for (key, vote) in vote_iter { + let voter_address = gov_storage::get_voter_address(&key) + .expect("Vote key should contain the voting address.") + .clone(); + if vote.is_yay() && validators.contains(&voter_address) { + let amount: VotePower = + get_validator_stake(client, epoch, &voter_address) + .await + .into(); + yay_validators.insert(voter_address, (amount, vote)); + } else if !validators.contains(&voter_address) { + let validator_address = + gov_storage::get_vote_delegation_address(&key) + .expect( + "Vote key should contain the delegation address.", + ) + .clone(); + let delegator_token_amount = get_bond_amount_at( + client, + &voter_address, + &validator_address, + epoch, + ) + .await; + if let Some(amount) = delegator_token_amount { + let entry = delegators.entry(voter_address).or_default(); + entry.insert( + validator_address, + (VotePower::from(amount), vote), + ); + } + } + } + } + + Votes { + yay_validators, + delegators, + } +} + +/// Get all validators in the given epoch +pub async fn get_all_validators( + client: &C, + epoch: Epoch, +) -> HashSet
{ + unwrap_client_response::( + RPC.vp() + .pos() + .validator_addresses(client, &Some(epoch)) + .await, + ) +} + +/// Get the total staked tokens in the given epoch +pub async fn get_total_staked_tokens< + C: crate::ledger::queries::Client + Sync, +>( + client: &C, + epoch: Epoch, +) -> token::Amount { + unwrap_client_response::( + RPC.vp().pos().total_stake(client, &Some(epoch)).await, + ) +} + +/// Get the given validator's stake at the given epoch +pub async fn get_validator_stake( + client: &C, + epoch: Epoch, + validator: &Address, +) -> token::Amount { + unwrap_client_response::( + RPC.vp() + .pos() + .validator_stake(client, validator, &Some(epoch)) + .await, + ) + .unwrap_or_default() +} + +/// Get the delegator's delegation +pub async fn get_delegators_delegation< + C: crate::ledger::queries::Client + Sync, +>( + client: &C, + address: &Address, +) -> HashSet
{ + unwrap_client_response::( + RPC.vp().pos().delegation_validators(client, address).await, + ) +} + +/// Query and return validator's commission rate and max commission rate change +/// per epoch +pub async fn query_commission_rate( + client: &C, + validator: &Address, + epoch: Option, +) -> Option { + unwrap_client_response::>( + RPC.vp() + .pos() + .validator_commission(client, validator, &epoch) + .await, + ) +} + +/// Query a validator's bonds for a given epoch +pub async fn query_bond( + client: &C, + source: &Address, + validator: &Address, + epoch: Option, +) -> token::Amount { + unwrap_client_response::( + RPC.vp().pos().bond(client, source, validator, &epoch).await, + ) +} + +/// Query a validator's unbonds for a given epoch +pub async fn query_and_print_unbonds< + C: crate::ledger::queries::Client + Sync, +>( + client: &C, + source: &Address, + validator: &Address, +) { + let unbonds = query_unbond_with_slashing(client, source, validator).await; + let current_epoch = query_epoch(client).await; + + let mut total_withdrawable = token::Amount::default(); + let mut not_yet_withdrawable = HashMap::::new(); + for ((_start_epoch, withdraw_epoch), amount) in unbonds.into_iter() { + if withdraw_epoch <= current_epoch { + total_withdrawable += amount; + } else { + let withdrawable_amount = + not_yet_withdrawable.entry(withdraw_epoch).or_default(); + *withdrawable_amount += amount; + } + } + if total_withdrawable != token::Amount::default() { + println!("Total withdrawable now: {total_withdrawable}."); + } + if !not_yet_withdrawable.is_empty() { + println!("Current epoch: {current_epoch}.") + } + for (withdraw_epoch, amount) in not_yet_withdrawable { + println!( + "Amount {amount} withdrawable starting from epoch \ + {withdraw_epoch}." + ); + } +} + +/// Query withdrawable tokens in a validator account for a given epoch +pub async fn query_withdrawable_tokens< + C: crate::ledger::queries::Client + Sync, +>( + client: &C, + bond_source: &Address, + validator: &Address, + epoch: Option, +) -> token::Amount { + unwrap_client_response::( + RPC.vp() + .pos() + .withdrawable_tokens(client, bond_source, validator, &epoch) + .await, + ) +} + +/// Query all unbonds for a validator, applying slashes +pub async fn query_unbond_with_slashing< + C: crate::ledger::queries::Client + Sync, +>( + client: &C, + source: &Address, + validator: &Address, +) -> HashMap<(Epoch, Epoch), token::Amount> { + unwrap_client_response::>( + RPC.vp() + .pos() + .unbond_with_slashing(client, source, validator) + .await, + ) +} + +/// Get the givernance parameters +pub async fn get_governance_parameters< + C: crate::ledger::queries::Client + Sync, +>( + client: &C, +) -> GovParams { + let key = gov_storage::get_max_proposal_code_size_key(); + let max_proposal_code_size = query_storage_value::(client, &key) + .await + .expect("Parameter should be definied."); + + let key = gov_storage::get_max_proposal_content_key(); + let max_proposal_content_size = query_storage_value::(client, &key) + .await + .expect("Parameter should be definied."); + + let key = gov_storage::get_min_proposal_fund_key(); + let min_proposal_fund = query_storage_value::(client, &key) + .await + .expect("Parameter should be definied."); + + let key = gov_storage::get_min_proposal_grace_epoch_key(); + let min_proposal_grace_epochs = query_storage_value::(client, &key) + .await + .expect("Parameter should be definied."); + + let key = gov_storage::get_min_proposal_period_key(); + let min_proposal_period = query_storage_value::(client, &key) + .await + .expect("Parameter should be definied."); + + let key = gov_storage::get_max_proposal_period_key(); + let max_proposal_period = query_storage_value::(client, &key) + .await + .expect("Parameter should be definied."); + + GovParams { + min_proposal_fund: u64::from(min_proposal_fund), + max_proposal_code_size, + min_proposal_period, + max_proposal_period, + max_proposal_content_size, + min_proposal_grace_epochs, + } +} + +/// Get the bond amount at the given epoch +pub async fn get_bond_amount_at( + client: &C, + delegator: &Address, + validator: &Address, + epoch: Epoch, +) -> Option { + let (_total, total_active) = unwrap_client_response::( + RPC.vp() + .pos() + .bond_with_slashing(client, delegator, validator, &Some(epoch)) + .await, + ); + Some(total_active) +} diff --git a/shared/src/ledger/signing.rs b/shared/src/ledger/signing.rs new file mode 100644 index 00000000000..e01e7656138 --- /dev/null +++ b/shared/src/ledger/signing.rs @@ -0,0 +1,268 @@ +//! Functions to sign transactions +use borsh::BorshSerialize; +use namada_core::ledger::parameters::storage as parameter_storage; +use namada_core::types::address::{Address, ImplicitAddress}; +use namada_core::types::token::{self, Amount}; +use namada_core::types::transaction::MIN_FEE; + +use crate::ledger::rpc::TxBroadcastData; +use crate::ledger::tx::Error; +use crate::ledger::wallet::{Wallet, WalletUtils}; +use crate::ledger::{args, rpc}; +use crate::proto::Tx; +use crate::types::key::*; +use crate::types::storage::Epoch; +use crate::types::transaction::{hash_tx, Fee, WrapperTx}; + +/// Find the public key for the given address and try to load the keypair +/// for it from the wallet. If the keypair is encrypted but a password is not +/// supplied, then it is interactively prompted. Errors if the key cannot be +/// found or loaded. +pub async fn find_keypair< + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, +>( + client: &C, + wallet: &mut Wallet, + addr: &Address, + password: Option, +) -> Result { + match addr { + Address::Established(_) => { + println!( + "Looking-up public key of {} from the ledger...", + addr.encode() + ); + let public_key = rpc::get_public_key(client, addr).await.ok_or( + Error::Other(format!( + "No public key found for the address {}", + addr.encode() + )), + )?; + wallet.find_key_by_pk(&public_key, password).map_err(|err| { + Error::Other(format!( + "Unable to load the keypair from the wallet for public \ + key {}. Failed with: {}", + public_key, err + )) + }) + } + Address::Implicit(ImplicitAddress(pkh)) => { + wallet.find_key_by_pkh(pkh, password).map_err(|err| { + Error::Other(format!( + "Unable to load the keypair from the wallet for the \ + implicit address {}. Failed with: {}", + addr.encode(), + err + )) + }) + } + Address::Internal(_) => other_err(format!( + "Internal address {} doesn't have any signing keys.", + addr + )), + } +} + +/// Carries types that can be directly/indirectly used to sign a transaction. +#[allow(clippy::large_enum_variant)] +#[derive(Clone)] +pub enum TxSigningKey { + /// Do not sign any transaction + None, + /// Obtain the actual keypair from wallet and use that to sign + WalletKeypair(common::SecretKey), + /// Obtain the keypair corresponding to given address from wallet and sign + WalletAddress(Address), + /// Directly use the given secret key to sign transactions + SecretKey(common::SecretKey), +} + +/// Given CLI arguments and some defaults, determine the rightful transaction +/// signer. Return the given signing key or public key of the given signer if +/// possible. If no explicit signer given, use the `default`. If no `default` +/// is given, an `Error` is returned. +pub async fn tx_signer< + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, +>( + client: &C, + wallet: &mut Wallet, + args: &args::Tx, + default: TxSigningKey, +) -> Result { + // Override the default signing key source if possible + let default = if let Some(signing_key) = &args.signing_key { + TxSigningKey::WalletKeypair(signing_key.clone()) + } else if let Some(signer) = &args.signer { + TxSigningKey::WalletAddress(signer.clone()) + } else { + default + }; + // Now actually fetch the signing key and apply it + match default { + TxSigningKey::WalletKeypair(signing_key) => Ok(signing_key), + TxSigningKey::WalletAddress(signer) => { + let signer = signer; + let signing_key = find_keypair::( + client, + wallet, + &signer, + args.password.clone(), + ) + .await?; + // Check if the signer is implicit account that needs to reveal its + // PK first + if matches!(signer, Address::Implicit(_)) { + let pk: common::PublicKey = signing_key.ref_to(); + super::tx::reveal_pk_if_needed::( + client, wallet, &pk, args, + ) + .await?; + } + Ok(signing_key) + } + TxSigningKey::SecretKey(signing_key) => { + // Check if the signing key needs to reveal its PK first + let pk: common::PublicKey = signing_key.ref_to(); + super::tx::reveal_pk_if_needed::(client, wallet, &pk, args) + .await?; + Ok(signing_key) + } + TxSigningKey::None => other_err( + "All transactions must be signed; please either specify the key \ + or the address from which to look up the signing key." + .to_string(), + ), + } +} + +/// Sign a transaction with a given signing key or public key of a given signer. +/// If no explicit signer given, use the `default`. If no `default` is given, +/// Error. +/// +/// If this is not a dry run, the tx is put in a wrapper and returned along with +/// hashes needed for monitoring the tx on chain. +/// +/// If it is a dry run, it is not put in a wrapper, but returned as is. +pub async fn sign_tx< + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, +>( + client: &C, + wallet: &mut Wallet, + tx: Tx, + args: &args::Tx, + default: TxSigningKey, + #[cfg(not(feature = "mainnet"))] requires_pow: bool, +) -> Result { + let keypair = tx_signer::(client, wallet, args, default).await?; + let tx = tx.sign(&keypair); + + let epoch = rpc::query_epoch(client).await; + + let broadcast_data = if args.dry_run { + TxBroadcastData::DryRun(tx) + } else { + sign_wrapper( + client, + args, + epoch, + tx, + &keypair, + #[cfg(not(feature = "mainnet"))] + requires_pow, + ) + .await + }; + + Ok(broadcast_data) +} + +/// Create a wrapper tx from a normal tx. Get the hash of the +/// wrapper and its payload which is needed for monitoring its +/// progress on chain. +pub async fn sign_wrapper( + client: &C, + args: &args::Tx, + epoch: Epoch, + tx: Tx, + keypair: &common::SecretKey, + #[cfg(not(feature = "mainnet"))] requires_pow: bool, +) -> TxBroadcastData { + let fee_amount = if cfg!(feature = "mainnet") { + Amount::whole(MIN_FEE) + } else { + let wrapper_tx_fees_key = parameter_storage::get_wrapper_tx_fees_key(); + rpc::query_storage_value::( + client, + &wrapper_tx_fees_key, + ) + .await + .unwrap_or_default() + }; + let fee_token = args.fee_token.clone(); + let source = Address::from(&keypair.ref_to()); + let balance_key = token::balance_key(&fee_token, &source); + let balance = + rpc::query_storage_value::(client, &balance_key) + .await + .unwrap_or_default(); + + // todo: provide sdk clients an error if the fee balance is insufficient + + #[cfg(not(feature = "mainnet"))] + // A PoW solution can be used to allow zero-fee testnet transactions + let pow_solution: Option = { + // If the address derived from the keypair doesn't have enough balance + // to pay for the fee, allow to find a PoW solution instead. + if requires_pow || balance < fee_amount { + println!( + "The transaction requires the completion of a PoW challenge." + ); + // Obtain a PoW challenge for faucet withdrawal + let challenge = + rpc::get_testnet_pow_challenge(client, source).await; + + // Solve the solution, this blocks until a solution is found + let solution = challenge.solve(); + Some(solution) + } else { + None + } + }; + + let tx = { + WrapperTx::new( + Fee { + amount: fee_amount, + token: fee_token, + }, + keypair, + epoch, + args.gas_limit.clone(), + tx, + // TODO: Actually use the fetched encryption key + Default::default(), + #[cfg(not(feature = "mainnet"))] + pow_solution, + ) + }; + + // We use this to determine when the wrapper tx makes it on-chain + let wrapper_hash = hash_tx(&tx.try_to_vec().unwrap()).to_string(); + // We use this to determine when the decrypted inner tx makes it + // on-chain + let decrypted_hash = tx.tx_hash.to_string(); + TxBroadcastData::Wrapper { + tx: tx + .sign(keypair, args.chain_id.clone().unwrap(), args.expiration) + .expect("Wrapper tx signing keypair should be correct"), + wrapper_hash, + decrypted_hash, + } +} + +fn other_err(string: String) -> Result { + Err(Error::Other(string)) +} diff --git a/shared/src/ledger/tx.rs b/shared/src/ledger/tx.rs new file mode 100644 index 00000000000..258fb103e2b --- /dev/null +++ b/shared/src/ledger/tx.rs @@ -0,0 +1,1476 @@ +//! SDK functions to construct different types of transactions +use std::borrow::Cow; +use std::collections::BTreeMap; +use std::str::FromStr; + +use borsh::BorshSerialize; +use itertools::Either::*; +use masp_primitives::transaction::builder; +use namada_core::types::address::{masp, masp_tx_key, Address}; +use namada_proof_of_stake::parameters::PosParams; +use namada_proof_of_stake::types::CommissionPair; +use prost::EncodeError; +use rust_decimal::Decimal; +use thiserror::Error; +use tokio::time::Duration; + +use super::rpc::query_wasm_code_hash; +use crate::ibc::applications::transfer::msgs::transfer::MsgTransfer; +use crate::ibc::core::ics04_channel::timeout::TimeoutHeight; +use crate::ibc::signer::Signer; +use crate::ibc::timestamp::Timestamp as IbcTimestamp; +use crate::ibc::tx_msg::Msg; +use crate::ibc::Height as IbcHeight; +use crate::ibc_proto::cosmos::base::v1beta1::Coin; +use crate::ledger::args; +use crate::ledger::governance::storage as gov_storage; +use crate::ledger::masp::{ShieldedContext, ShieldedUtils}; +use crate::ledger::rpc::{self, TxBroadcastData, TxResponse}; +use crate::ledger::signing::{find_keypair, sign_tx, tx_signer, TxSigningKey}; +use crate::ledger::wallet::{Wallet, WalletUtils}; +use crate::proto::Tx; +use crate::tendermint_rpc::endpoint::broadcast::tx_sync::Response; +use crate::tendermint_rpc::error::Error as RpcError; +use crate::types::key::*; +use crate::types::masp::TransferTarget; +use crate::types::storage::{Epoch, RESERVED_ADDRESS_PREFIX}; +use crate::types::time::DateTimeUtc; +use crate::types::transaction::{pos, InitAccount, UpdateVp}; +use crate::types::{storage, token}; +use crate::vm; +use crate::vm::WasmValidationError; + +/// Default timeout in seconds for requests to the `/accepted` +/// and `/applied` ABCI query endpoints. +const DEFAULT_NAMADA_EVENTS_MAX_WAIT_TIME_SECONDS: u64 = 60; + +/// Errors to do with transaction events. +#[derive(Error, Debug)] +pub enum Error { + /// Expect a dry running transaction + #[error( + "Expected a dry-run transaction, received a wrapper transaction \ + instead: {0:?}" + )] + ExpectDryRun(Tx), + /// Expect a wrapped encrypted running transaction + #[error("Cannot broadcast a dry-run transaction")] + ExpectWrappedRun(Tx), + /// Error during broadcasting a transaction + #[error("Encountered error while broadcasting transaction: {0}")] + TxBroadcast(RpcError), + /// Invalid comission rate set + #[error("Invalid new commission rate, received {0}")] + InvalidCommisionRate(Decimal), + /// Invalid validator address + #[error("The address {0} doesn't belong to any known validator account.")] + InvalidValidatorAddress(Address), + /// Rate of epoch change too large for current epoch + #[error( + "New rate, {0}, is too large of a change with respect to the \ + predecessor epoch in which the rate will take effect." + )] + TooLargeOfChange(Decimal), + /// Error retrieving from storage + #[error("Error retrieving from storage")] + Retrival, + /// No unbonded bonds ready to withdraw in the current epoch + #[error( + "There are no unbonded bonds ready to withdraw in the current epoch \ + {0}." + )] + NoUnbondReady(Epoch), + /// No unbonded bonds found + #[error("No unbonded bonds found")] + NoUnbondFound, + /// No bonds found + #[error("No bonds found")] + NoBondFound, + /// Lower bond amount than the unbond + #[error( + "The total bonds of the source {0} is lower than the amount to be \ + unbonded. Amount to unbond is {1} and the total bonds is {2}." + )] + LowerBondThanUnbond(Address, token::Amount, token::Amount), + /// Balance is too low + #[error( + "The balance of the source {0} of token {1} is lower than the amount \ + to be transferred. Amount to transfer is {2} and the balance is {3}." + )] + BalanceTooLow(Address, Address, token::Amount, token::Amount), + /// Token Address does not exist on chain + #[error("The token address {0} doesn't exist on chain.")] + TokenDoesNotExist(Address), + /// Source address does not exist on chain + #[error("The address {0} doesn't exist on chain.")] + LocationDoesNotExist(Address), + /// Target Address does not exist on chain + #[error("The source address {0} doesn't exist on chain.")] + SourceDoesNotExist(Address), + /// Source Address does not exist on chain + #[error("The target address {0} doesn't exist on chain.")] + TargetLocationDoesNotExist(Address), + /// No Balance found for token + #[error("No balance found for the source {0} of token {1}")] + NoBalanceForToken(Address, Address), + /// Negative balance after transfer + #[error( + "The balance of the source {0} is lower than the amount to be \ + transferred and fees. Amount to transfer is {1} {2} and fees are {3} \ + {4}." + )] + NegativeBalanceAfterTransfer( + Address, + token::Amount, + Address, + token::Amount, + Address, + ), + /// No Balance found for token + #[error("{0}")] + MaspError(builder::Error), + /// Wasm validation failed + #[error("Validity predicate code validation failed with {0}")] + WasmValidationFailure(WasmValidationError), + /// Encoding transaction failure + #[error("Encoding tx data, {0}, shouldn't fail")] + EncodeTxFailure(std::io::Error), + /// Like EncodeTxFailure but for the encode error type + #[error("Encoding tx data, {0}, shouldn't fail")] + EncodeFailure(EncodeError), + /// Encoding public key failure + #[error("Encoding a public key, {0}, shouldn't fail")] + EncodeKeyFailure(std::io::Error), + /// Updating an VP of an implicit account + #[error( + "A validity predicate of an implicit address cannot be directly \ + updated. You can use an established address for this purpose." + )] + ImplicitUpdate, + // This should be removed? or rather refactored as it communicates + // the same information as the ImplicitUpdate + /// Updating a VP of an internal implicit address + #[error( + "A validity predicate of an internal address cannot be directly \ + updated." + )] + ImplicitInternalError, + /// Unexpected Error + #[error("Unexpected behavior reading the unbonds data has occurred")] + UnboundError, + /// Epoch not in storage + #[error("Proposal end epoch is not in the storage.")] + EpochNotInStorage, + /// Other Errors that may show up when using the interface + #[error("{0}")] + Other(String), +} + +/// Submit transaction and wait for result. Returns a list of addresses +/// initialized in the transaction if any. In dry run, this is always empty. +pub async fn process_tx< + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, +>( + client: &C, + wallet: &mut Wallet, + args: &args::Tx, + tx: Tx, + default_signer: TxSigningKey, + #[cfg(not(feature = "mainnet"))] requires_pow: bool, +) -> Result, Error> { + let to_broadcast = sign_tx::( + client, + wallet, + tx, + args, + default_signer, + #[cfg(not(feature = "mainnet"))] + requires_pow, + ) + .await?; + // NOTE: use this to print the request JSON body: + + // let request = + // tendermint_rpc::endpoint::broadcast::tx_commit::Request::new( + // tx_bytes.clone().into(), + // ); + // use tendermint_rpc::Request; + // let request_body = request.into_json(); + // println!("HTTP request body: {}", request_body); + + if args.dry_run { + expect_dry_broadcast(to_broadcast, client, vec![]).await + } else { + // Either broadcast or submit transaction and collect result into + // sum type + let result = if args.broadcast_only { + Left(broadcast_tx(client, &to_broadcast).await) + } else { + Right(submit_tx(client, to_broadcast).await) + }; + // Return result based on executed operation, otherwise deal with + // the encountered errors uniformly + match result { + Right(Ok(result)) => Ok(result.initialized_accounts), + Left(Ok(_)) => Ok(Vec::default()), + Right(Err(err)) => Err(err), + Left(Err(err)) => Err(err), + } + } +} + +/// Submit transaction to reveal public key +pub async fn submit_reveal_pk< + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, +>( + client: &C, + wallet: &mut Wallet, + args: args::RevealPk, +) -> Result<(), Error> { + let args::RevealPk { + tx: args, + public_key, + } = args; + let public_key = public_key; + if !reveal_pk_if_needed::(client, wallet, &public_key, &args).await? { + let addr: Address = (&public_key).into(); + println!("PK for {addr} is already revealed, nothing to do."); + Ok(()) + } else { + Ok(()) + } +} + +/// Submit transaction to rveeal public key if needed +pub async fn reveal_pk_if_needed< + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, +>( + client: &C, + wallet: &mut Wallet, + public_key: &common::PublicKey, + args: &args::Tx, +) -> Result { + let addr: Address = public_key.into(); + // Check if PK revealed + if args.force || !has_revealed_pk(client, &addr).await { + // If not, submit it + submit_reveal_pk_aux::(client, wallet, public_key, args).await?; + Ok(true) + } else { + Ok(false) + } +} + +/// Check if the public key for the given address has been revealed +pub async fn has_revealed_pk( + client: &C, + addr: &Address, +) -> bool { + rpc::get_public_key(client, addr).await.is_some() +} + +/// Submit transaction to reveal the given public key +pub async fn submit_reveal_pk_aux< + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, +>( + client: &C, + wallet: &mut Wallet, + public_key: &common::PublicKey, + args: &args::Tx, +) -> Result<(), Error> { + let addr: Address = public_key.into(); + println!("Submitting a tx to reveal the public key for address {addr}..."); + let tx_data = public_key.try_to_vec().map_err(Error::EncodeKeyFailure)?; + let tx_code = args.tx_code_path.clone(); + let tx = Tx::new( + tx_code, + Some(tx_data), + args.chain_id.clone().expect("value should be there"), + args.expiration, + ); + + // submit_tx without signing the inner tx + let keypair = if let Some(signing_key) = &args.signing_key { + Ok(signing_key.clone()) + } else if let Some(signer) = args.signer.as_ref() { + let signer = signer; + find_keypair::(client, wallet, signer, args.password.clone()) + .await + } else { + find_keypair::(client, wallet, &addr, args.password.clone()).await + }?; + let epoch = rpc::query_epoch(client).await; + let to_broadcast = if args.dry_run { + TxBroadcastData::DryRun(tx) + } else { + super::signing::sign_wrapper( + client, + args, + epoch, + tx, + &keypair, + #[cfg(not(feature = "mainnet"))] + false, + ) + .await + }; + + // Logic is the same as process_tx + if args.dry_run { + expect_dry_broadcast(to_broadcast, client, ()).await + } else { + // Either broadcast or submit transaction and collect result into + // sum type + let result = if args.broadcast_only { + Left(broadcast_tx(client, &to_broadcast).await) + } else { + Right(submit_tx(client, to_broadcast).await) + }; + // Return result based on executed operation, otherwise deal with + // the encountered errors uniformly + match result { + Right(Err(err)) => Err(err), + Left(Err(err)) => Err(err), + _ => Ok(()), + } + } +} + +/// Broadcast a transaction to be included in the blockchain and checks that +/// the tx has been successfully included into the mempool of a validator +/// +/// In the case of errors in any of those stages, an error message is returned +pub async fn broadcast_tx( + rpc_cli: &C, + to_broadcast: &TxBroadcastData, +) -> Result { + let (tx, wrapper_tx_hash, decrypted_tx_hash) = match to_broadcast { + TxBroadcastData::Wrapper { + tx, + wrapper_hash, + decrypted_hash, + } => Ok((tx, wrapper_hash, decrypted_hash)), + TxBroadcastData::DryRun(tx) => Err(Error::ExpectWrappedRun(tx.clone())), + }?; + + tracing::debug!( + transaction = ?to_broadcast, + "Broadcasting transaction", + ); + + // TODO: configure an explicit timeout value? we need to hack away at + // `tendermint-rs` for this, which is currently using a hard-coded 30s + // timeout. + let response = + lift_rpc_error(rpc_cli.broadcast_tx_sync(tx.to_bytes().into()).await)?; + + if response.code == 0.into() { + println!("Transaction added to mempool: {:?}", response); + // Print the transaction identifiers to enable the extraction of + // acceptance/application results later + { + println!("Wrapper transaction hash: {:?}", wrapper_tx_hash); + println!("Inner transaction hash: {:?}", decrypted_tx_hash); + } + Ok(response) + } else { + Err(Error::TxBroadcast(RpcError::server( + serde_json::to_string(&response).unwrap(), + ))) + } +} + +/// Broadcast a transaction to be included in the blockchain. +/// +/// Checks that +/// 1. The tx has been successfully included into the mempool of a validator +/// 2. The tx with encrypted payload has been included on the blockchain +/// 3. The decrypted payload of the tx has been included on the blockchain. +/// +/// In the case of errors in any of those stages, an error message is returned +pub async fn submit_tx( + client: &C, + to_broadcast: TxBroadcastData, +) -> Result { + let (_, wrapper_hash, decrypted_hash) = match &to_broadcast { + TxBroadcastData::Wrapper { + tx, + wrapper_hash, + decrypted_hash, + } => Ok((tx, wrapper_hash, decrypted_hash)), + TxBroadcastData::DryRun(tx) => Err(Error::ExpectWrappedRun(tx.clone())), + }?; + + // Broadcast the supplied transaction + broadcast_tx(client, &to_broadcast).await?; + + let deadline = + Duration::from_secs(DEFAULT_NAMADA_EVENTS_MAX_WAIT_TIME_SECONDS); + + tracing::debug!( + transaction = ?to_broadcast, + ?deadline, + "Awaiting transaction approval", + ); + + let parsed = { + let wrapper_query = + crate::ledger::rpc::TxEventQuery::Accepted(wrapper_hash.as_str()); + let event = rpc::query_tx_status(client, wrapper_query, deadline).await; + let parsed = TxResponse::from_event(event); + + println!( + "Transaction accepted with result: {}", + serde_json::to_string_pretty(&parsed).unwrap() + ); + // The transaction is now on chain. We wait for it to be decrypted + // and applied + if parsed.code == 0.to_string() { + // We also listen to the event emitted when the encrypted + // payload makes its way onto the blockchain + let decrypted_query = + rpc::TxEventQuery::Applied(decrypted_hash.as_str()); + let event = + rpc::query_tx_status(client, decrypted_query, deadline).await; + let parsed = TxResponse::from_event(event); + println!( + "Transaction applied with result: {}", + serde_json::to_string_pretty(&parsed).unwrap() + ); + Ok(parsed) + } else { + Ok(parsed) + } + }; + + tracing::debug!( + transaction = ?to_broadcast, + "Transaction approved", + ); + + parsed +} + +/// Save accounts initialized from a tx into the wallet, if any. +pub async fn save_initialized_accounts( + wallet: &mut Wallet, + args: &args::Tx, + initialized_accounts: Vec
, +) { + let len = initialized_accounts.len(); + if len != 0 { + // Store newly initialized account addresses in the wallet + println!( + "The transaction initialized {} new account{}", + len, + if len == 1 { "" } else { "s" } + ); + // Store newly initialized account addresses in the wallet + for (ix, address) in initialized_accounts.iter().enumerate() { + let encoded = address.encode(); + let alias: Cow = match &args.initialized_account_alias { + Some(initialized_account_alias) => { + if len == 1 { + // If there's only one account, use the + // alias as is + initialized_account_alias.into() + } else { + // If there're multiple accounts, use + // the alias as prefix, followed by + // index number + format!("{}{}", initialized_account_alias, ix).into() + } + } + None => U::read_alias(&encoded).into(), + }; + let alias = alias.into_owned(); + let added = wallet.add_address(alias.clone(), address.clone(), args.wallet_alias_force); + match added { + Some(new_alias) if new_alias != encoded => { + println!( + "Added alias {} for address {}.", + new_alias, encoded + ); + } + _ => println!("No alias added for address {}.", encoded), + }; + } + } +} + +/// Submit validator comission rate change +pub async fn submit_validator_commission_change< + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, +>( + client: &C, + wallet: &mut Wallet, + args: args::TxCommissionRateChange, +) -> Result<(), Error> { + let epoch = rpc::query_epoch(client).await; + + let tx_code = args.tx_code_path; + + // TODO: put following two let statements in its own function + let params_key = crate::ledger::pos::params_key(); + let params = rpc::query_storage_value::(client, ¶ms_key) + .await + .expect("Parameter should be defined."); + + let validator = args.validator.clone(); + if rpc::is_validator(client, &validator).await { + if args.rate < Decimal::ZERO || args.rate > Decimal::ONE { + if args.tx.force { + eprintln!( + "Invalid new commission rate, received {}", + args.rate + ); + Ok(()) + } else { + Err(Error::InvalidCommisionRate(args.rate)) + } + } else { + Ok(()) + }?; + + let pipeline_epoch_minus_one = epoch + params.pipeline_len - 1; + + match rpc::query_commission_rate( + client, + &validator, + Some(pipeline_epoch_minus_one), + ) + .await + { + Some(CommissionPair { + commission_rate, + max_commission_change_per_epoch, + }) => { + if (args.rate - commission_rate).abs() + > max_commission_change_per_epoch + { + eprintln!( + "New rate is too large of a change with respect to \ + the predecessor epoch in which the rate will take \ + effect." + ); + if !args.tx.force { + return Err(Error::InvalidCommisionRate(args.rate)); + } + } + } + None => { + eprintln!("Error retrieving from storage"); + if !args.tx.force { + return Err(Error::Retrival); + } + } + } + } else { + eprintln!("The given address {validator} is not a validator."); + if !args.tx.force { + return Err(Error::InvalidValidatorAddress(validator)); + } + } + + let data = pos::CommissionChange { + validator: args.validator.clone(), + new_rate: args.rate, + }; + let data = data.try_to_vec().map_err(Error::EncodeTxFailure)?; + + let chain_id = args.tx.chain_id.clone().unwrap(); + let expiration = args.tx.expiration; + + let tx = Tx::new(tx_code, Some(data), chain_id, expiration); + let default_signer = args.validator.clone(); + process_tx::( + client, + wallet, + &args.tx, + tx, + TxSigningKey::WalletAddress(default_signer), + #[cfg(not(feature = "mainnet"))] + false, + ) + .await?; + Ok(()) +} + +/// Submit transaction to withdraw an unbond +pub async fn submit_withdraw< + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, +>( + client: &C, + wallet: &mut Wallet, + args: args::Withdraw, +) -> Result<(), Error> { + let epoch = rpc::query_epoch(client).await; + + let validator = + known_validator_or_err(args.validator.clone(), args.tx.force, client) + .await?; + + let source = args.source.clone(); + let tx_code = args.tx_code_path; + + // Check the source's current unbond amount + let bond_source = source.clone().unwrap_or_else(|| validator.clone()); + let tokens = rpc::query_withdrawable_tokens( + client, + &bond_source, + &validator, + Some(epoch), + ) + .await; + if tokens == 0.into() { + eprintln!( + "There are no unbonded bonds ready to withdraw in the current \ + epoch {}.", + epoch + ); + rpc::query_and_print_unbonds(client, &bond_source, &validator).await; + if !args.tx.force { + return Err(Error::NoUnbondReady(epoch)); + } + } else { + println!("Found {tokens} tokens that can be withdrawn."); + println!("Submitting transaction to withdraw them..."); + } + + let data = pos::Withdraw { validator, source }; + let data = data.try_to_vec().map_err(Error::EncodeTxFailure)?; + + let chain_id = args.tx.chain_id.clone().unwrap(); + let expiration = args.tx.expiration; + + let tx = Tx::new(tx_code, Some(data), chain_id, expiration); + let default_signer = args.source.unwrap_or(args.validator); + process_tx::( + client, + wallet, + &args.tx, + tx, + TxSigningKey::WalletAddress(default_signer), + #[cfg(not(feature = "mainnet"))] + false, + ) + .await?; + Ok(()) +} + +/// Submit a transaction to unbond +pub async fn submit_unbond< + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, +>( + client: &C, + wallet: &mut Wallet, + args: args::Unbond, +) -> Result<(), Error> { + let validator = + known_validator_or_err(args.validator.clone(), args.tx.force, client) + .await?; + let source = args.source.clone(); + let tx_code = args.tx_code_path; + + // Check the source's current bond amount + let bond_source = source.clone().unwrap_or_else(|| validator.clone()); + let bond_amount = + rpc::query_bond(client, &bond_source, &validator, None).await; + println!("Bond amount available for unbonding: {} NAM", bond_amount); + + if args.amount > bond_amount { + eprintln!( + "The total bonds of the source {} is lower than the amount to be \ + unbonded. Amount to unbond is {} and the total bonds is {}.", + bond_source, args.amount, bond_amount + ); + if !args.tx.force { + return Err(Error::LowerBondThanUnbond( + bond_source, + args.amount, + bond_amount, + )); + } + } + + // Query the unbonds before submitting the tx + let unbonds = + rpc::query_unbond_with_slashing(client, &bond_source, &validator).await; + let mut withdrawable = BTreeMap::::new(); + for ((_start_epoch, withdraw_epoch), amount) in unbonds.into_iter() { + let to_withdraw = withdrawable.entry(withdraw_epoch).or_default(); + *to_withdraw += amount; + } + let latest_withdrawal_pre = withdrawable.into_iter().last(); + + let data = pos::Unbond { + validator: validator.clone(), + amount: args.amount, + source, + }; + let data = data.try_to_vec().map_err(Error::EncodeTxFailure)?; + + let chain_id = args.tx.chain_id.clone().unwrap(); + let expiration = args.tx.expiration; + + let tx = Tx::new(tx_code, Some(data), chain_id, expiration); + let default_signer = args.source.unwrap_or(args.validator); + process_tx::( + client, + wallet, + &args.tx, + tx, + TxSigningKey::WalletAddress(default_signer), + #[cfg(not(feature = "mainnet"))] + false, + ) + .await?; + + // Query the unbonds post-tx + let unbonds = + rpc::query_unbond_with_slashing(client, &bond_source, &validator).await; + let mut withdrawable = BTreeMap::::new(); + for ((_start_epoch, withdraw_epoch), amount) in unbonds.into_iter() { + let to_withdraw = withdrawable.entry(withdraw_epoch).or_default(); + *to_withdraw += amount; + } + let (latest_withdraw_epoch_post, latest_withdraw_amount_post) = + withdrawable.into_iter().last().unwrap(); + + if let Some((latest_withdraw_epoch_pre, latest_withdraw_amount_pre)) = + latest_withdrawal_pre + { + match latest_withdraw_epoch_post.cmp(&latest_withdraw_epoch_pre) { + std::cmp::Ordering::Less => { + if args.tx.force { + eprintln!( + "Unexpected behavior reading the unbonds data has \ + occurred" + ); + } else { + return Err(Error::UnboundError); + } + } + std::cmp::Ordering::Equal => { + println!( + "Amount {} withdrawable starting from epoch {}", + latest_withdraw_amount_post - latest_withdraw_amount_pre, + latest_withdraw_epoch_post + ); + } + std::cmp::Ordering::Greater => { + println!( + "Amount {} withdrawable starting from epoch {}", + latest_withdraw_amount_post, latest_withdraw_epoch_post + ); + } + } + } else { + println!( + "Amount {} withdrawable starting from epoch {}", + latest_withdraw_amount_post, latest_withdraw_epoch_post + ); + } + + Ok(()) +} + +/// Submit a transaction to bond +pub async fn submit_bond< + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, +>( + client: &C, + wallet: &mut Wallet, + args: args::Bond, +) -> Result<(), Error> { + let validator = + known_validator_or_err(args.validator.clone(), args.tx.force, client) + .await?; + + // Check that the source address exists on chain + let source = args.source.clone(); + let source = match args.source.clone() { + Some(source) => source_exists_or_err(source, args.tx.force, client) + .await + .map(Some), + None => Ok(source), + }?; + // Check bond's source (source for delegation or validator for self-bonds) + // balance + let bond_source = source.as_ref().unwrap_or(&validator); + let balance_key = token::balance_key(&args.native_token, bond_source); + + // TODO Should we state the same error message for the native token? + check_balance_too_low_err( + &args.native_token, + bond_source, + args.amount, + balance_key, + args.tx.force, + client, + ) + .await?; + + let tx_code = args.tx_code_path; + let bond = pos::Bond { + validator, + amount: args.amount, + source, + }; + let data = bond.try_to_vec().map_err(Error::EncodeTxFailure)?; + + let chain_id = args.tx.chain_id.clone().unwrap(); + let expiration = args.tx.expiration; + + let tx = Tx::new(tx_code, Some(data), chain_id, expiration); + let default_signer = args.source.unwrap_or(args.validator); + process_tx::( + client, + wallet, + &args.tx, + tx, + TxSigningKey::WalletAddress(default_signer), + #[cfg(not(feature = "mainnet"))] + false, + ) + .await?; + Ok(()) +} + +/// Check if current epoch is in the last third of the voting period of the +/// proposal. This ensures that it is safe to optimize the vote writing to +/// storage. +pub async fn is_safe_voting_window( + client: &C, + proposal_id: u64, + proposal_start_epoch: Epoch, +) -> Result { + let current_epoch = rpc::query_epoch(client).await; + + let proposal_end_epoch_key = + gov_storage::get_voting_end_epoch_key(proposal_id); + let proposal_end_epoch = + rpc::query_storage_value::(client, &proposal_end_epoch_key) + .await; + + match proposal_end_epoch { + Some(proposal_end_epoch) => { + Ok(!crate::ledger::native_vp::governance::utils::is_valid_validator_voting_period( + current_epoch, + proposal_start_epoch, + proposal_end_epoch, + )) + } + None => { + Err(Error::EpochNotInStorage) + } + } +} + +/// Submit an IBC transfer +pub async fn submit_ibc_transfer< + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, +>( + client: &C, + wallet: &mut Wallet, + args: args::TxIbcTransfer, +) -> Result<(), Error> { + // Check that the source address exists on chain + let source = + source_exists_or_err(args.source.clone(), args.tx.force, client) + .await?; + // We cannot check the receiver + + let token = token_exists_or_err(args.token, args.tx.force, client).await?; + + // Check source balance + let (sub_prefix, balance_key) = match args.sub_prefix { + Some(sub_prefix) => { + let sub_prefix = storage::Key::parse(sub_prefix).unwrap(); + let prefix = token::multitoken_balance_prefix(&token, &sub_prefix); + ( + Some(sub_prefix), + token::multitoken_balance_key(&prefix, &source), + ) + } + None => (None, token::balance_key(&token, &source)), + }; + + check_balance_too_low_err( + &token, + &source, + args.amount, + balance_key, + args.tx.force, + client, + ) + .await?; + + let tx_code = args.tx_code_path; + + let denom = match sub_prefix { + // To parse IbcToken address, remove the address prefix + Some(sp) => sp.to_string().replace(RESERVED_ADDRESS_PREFIX, ""), + None => token.to_string(), + }; + let token = Coin { + denom, + amount: args.amount.to_string(), + }; + + // this height should be that of the destination chain, not this chain + let timeout_height = match args.timeout_height { + Some(h) => { + TimeoutHeight::At(IbcHeight::new(0, h).expect("invalid height")) + } + None => TimeoutHeight::Never, + }; + + let now: crate::tendermint::Time = DateTimeUtc::now().try_into().unwrap(); + let now: IbcTimestamp = now.into(); + let timeout_timestamp = if let Some(offset) = args.timeout_sec_offset { + (now + Duration::new(offset, 0)).unwrap() + } else if timeout_height == TimeoutHeight::Never { + // we cannot set 0 to both the height and the timestamp + (now + Duration::new(3600, 0)).unwrap() + } else { + IbcTimestamp::none() + }; + + let msg = MsgTransfer { + port_id_on_a: args.port_id, + chan_id_on_a: args.channel_id, + token, + sender: Signer::from_str(&source.to_string()).expect("invalid signer"), + receiver: Signer::from_str(&args.receiver).expect("invalid signer"), + timeout_height_on_b: timeout_height, + timeout_timestamp_on_b: timeout_timestamp, + }; + tracing::debug!("IBC transfer message {:?}", msg); + let any_msg = msg.to_any(); + let mut data = vec![]; + prost::Message::encode(&any_msg, &mut data) + .map_err(Error::EncodeFailure)?; + + let chain_id = args.tx.chain_id.clone().unwrap(); + let expiration = args.tx.expiration; + + let tx = Tx::new(tx_code, Some(data), chain_id, expiration); + process_tx::( + client, + wallet, + &args.tx, + tx, + TxSigningKey::WalletAddress(args.source), + #[cfg(not(feature = "mainnet"))] + false, + ) + .await?; + Ok(()) +} + +/// Submit an ordinary transfer +pub async fn submit_transfer< + C: crate::ledger::queries::Client + Sync, + V: WalletUtils, + U: ShieldedUtils, +>( + client: &C, + wallet: &mut Wallet, + shielded: &mut ShieldedContext, + args: args::TxTransfer, +) -> Result<(), Error> { + // Check that the source address exists on chain + let force = args.tx.force; + let transfer_source = args.source.clone(); + let source = source_exists_or_err( + transfer_source.effective_address(), + force, + client, + ) + .await?; + // Check that the target address exists on chain + let transfer_target = args.target.clone(); + let target = target_exists_or_err( + transfer_target.effective_address(), + force, + client, + ) + .await?; + + // Check that the token address exists on chain + let token = + &(token_exists_or_err(args.token.clone(), force, client).await?); + + // Check source balance + let (sub_prefix, balance_key) = match &args.sub_prefix { + Some(sub_prefix) => { + let sub_prefix = storage::Key::parse(sub_prefix).unwrap(); + let prefix = token::multitoken_balance_prefix(token, &sub_prefix); + ( + Some(sub_prefix), + token::multitoken_balance_key(&prefix, &source), + ) + } + None => (None, token::balance_key(token, &source)), + }; + + check_balance_too_low_err( + token, + &source, + args.amount, + balance_key, + args.tx.force, + client, + ) + .await?; + + let tx_code = args.tx_code_path.clone(); + let masp_addr = masp(); + // For MASP sources, use a special sentinel key recognized by VPs as default + // signer. Also, if the transaction is shielded, redact the amount and token + // types by setting the transparent value to 0 and token type to a constant. + // This has no side-effect because transaction is to self. + let (default_signer, amount, token) = + if source == masp_addr && target == masp_addr { + // TODO Refactor me, we shouldn't rely on any specific token here. + ( + TxSigningKey::SecretKey(masp_tx_key()), + 0.into(), + args.native_token.clone(), + ) + } else if source == masp_addr { + ( + TxSigningKey::SecretKey(masp_tx_key()), + args.amount, + token.clone(), + ) + } else { + ( + TxSigningKey::WalletAddress(source.clone()), + args.amount, + token.clone(), + ) + }; + // If our chosen signer is the MASP sentinel key, then our shielded inputs + // will need to cover the gas fees. + let chosen_signer = + tx_signer::(client, wallet, &args.tx, default_signer.clone()) + .await? + .ref_to(); + let shielded_gas = masp_tx_key().ref_to() == chosen_signer; + // Determine whether to pin this transaction to a storage key + let key = match &args.target { + TransferTarget::PaymentAddress(pa) if pa.is_pinned() => Some(pa.hash()), + _ => None, + }; + + let stx_result = shielded + .gen_shielded_transfer(client, args.clone(), shielded_gas) + .await; + let shielded = match stx_result { + Ok(stx) => Ok(stx.map(|x| x.0)), + Err(builder::Error::ChangeIsNegative(_)) => { + Err(Error::NegativeBalanceAfterTransfer( + source.clone(), + args.amount, + token.clone(), + args.tx.fee_amount, + args.tx.fee_token.clone(), + )) + } + Err(err) => Err(Error::MaspError(err)), + }?; + + let transfer = token::Transfer { + source: source.clone(), + target, + token, + sub_prefix, + amount, + key, + shielded, + }; + tracing::debug!("Transfer data {:?}", transfer); + let data = transfer.try_to_vec().map_err(Error::EncodeTxFailure)?; + + let chain_id = args.tx.chain_id.clone().unwrap(); + let expiration = args.tx.expiration; + + #[cfg(not(feature = "mainnet"))] + let is_source_faucet = rpc::is_faucet_account(client, &source).await; + + let tx = Tx::new(tx_code, Some(data), chain_id, expiration); + let signing_address = TxSigningKey::WalletAddress(source); + process_tx::( + client, + wallet, + &args.tx, + tx, + signing_address, + #[cfg(not(feature = "mainnet"))] + is_source_faucet, + ) + .await?; + Ok(()) +} + +/// Submit a transaction to initialize an account +pub async fn submit_init_account< + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, +>( + client: &C, + wallet: &mut Wallet, + args: args::TxInitAccount, +) -> Result<(), Error> { + let public_key = args.public_key; + let vp_code = args.vp_code; + // Validate the VP code + validate_untrusted_code_err(&vp_code, args.tx.force)?; + let vp_code_path = String::from_utf8(args.vp_code_path).unwrap(); + let vp_code_hash = + query_wasm_code_hash(client, vp_code_path).await.unwrap(); + + let tx_code = args.tx_code_path; + let data = InitAccount { + public_key, + vp_code_hash, + }; + let data = data.try_to_vec().map_err(Error::EncodeTxFailure)?; + let chain_id = args.tx.chain_id.clone().unwrap(); + let expiration = args.tx.expiration; + let tx = Tx::new(tx_code, Some(data), chain_id, expiration); + // TODO Move unwrap to an either + let initialized_accounts = process_tx::( + client, + wallet, + &args.tx, + tx, + TxSigningKey::WalletAddress(args.source), + #[cfg(not(feature = "mainnet"))] + false, + ) + .await + .unwrap(); + save_initialized_accounts::(wallet, &args.tx, initialized_accounts) + .await; + Ok(()) +} + +/// Submit a transaction to update a VP +pub async fn submit_update_vp< + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, +>( + client: &C, + wallet: &mut Wallet, + args: args::TxUpdateVp, +) -> Result<(), Error> { + let addr = args.addr.clone(); + + // Check that the address is established and exists on chain + match &addr { + Address::Established(_) => { + let exists = rpc::known_address::(client, &addr).await; + if !exists { + if args.tx.force { + eprintln!("The address {} doesn't exist on chain.", addr); + Ok(()) + } else { + Err(Error::LocationDoesNotExist(addr.clone())) + } + } else { + Ok(()) + } + } + Address::Implicit(_) => { + if args.tx.force { + eprintln!( + "A validity predicate of an implicit address cannot be \ + directly updated. You can use an established address for \ + this purpose." + ); + Ok(()) + } else { + Err(Error::ImplicitUpdate) + } + } + Address::Internal(_) => { + if args.tx.force { + eprintln!( + "A validity predicate of an internal address cannot be \ + directly updated." + ); + Ok(()) + } else { + Err(Error::ImplicitInternalError) + } + } + }?; + + let vp_code_path = String::from_utf8(args.vp_code_path).unwrap(); + let vp_code_hash = + query_wasm_code_hash(client, vp_code_path).await.unwrap(); + + let tx_code_path = String::from_utf8(args.tx_code_path).unwrap(); + let tx_code_hash = + query_wasm_code_hash(client, tx_code_path).await.unwrap(); + + let data = UpdateVp { addr, vp_code_hash }; + let data = data.try_to_vec().map_err(Error::EncodeTxFailure)?; + + let chain_id = args.tx.chain_id.clone().unwrap(); + let expiration = args.tx.expiration; + + let tx = Tx::new(tx_code_hash.to_vec(), Some(data), chain_id, expiration); + process_tx::( + client, + wallet, + &args.tx, + tx, + TxSigningKey::WalletAddress(args.addr), + #[cfg(not(feature = "mainnet"))] + false, + ) + .await?; + Ok(()) +} + +/// Submit a custom transaction +pub async fn submit_custom< + C: crate::ledger::queries::Client + Sync, + U: WalletUtils, +>( + client: &C, + wallet: &mut Wallet, + args: args::TxCustom, +) -> Result<(), Error> { + let tx_code = args.code_path; + let data = args.data_path; + let chain_id = args.tx.chain_id.clone().unwrap(); + let expiration = args.tx.expiration; + let tx = Tx::new(tx_code, data, chain_id, expiration); + let initialized_accounts = process_tx::( + client, + wallet, + &args.tx, + tx, + TxSigningKey::None, + #[cfg(not(feature = "mainnet"))] + false, + ) + .await?; + save_initialized_accounts::(wallet, &args.tx, initialized_accounts) + .await; + Ok(()) +} + +async fn expect_dry_broadcast( + to_broadcast: TxBroadcastData, + client: &C, + ret: T, +) -> Result { + match to_broadcast { + TxBroadcastData::DryRun(tx) => { + rpc::dry_run_tx(client, tx.to_bytes()).await; + Ok(ret) + } + TxBroadcastData::Wrapper { + tx, + wrapper_hash: _, + decrypted_hash: _, + } => Err(Error::ExpectDryRun(tx)), + } +} + +fn lift_rpc_error(res: Result) -> Result { + res.map_err(Error::TxBroadcast) +} + +/// Returns the given validator if the given address is a validator, +/// otherwise returns an error, force forces the address through even +/// if it isn't a validator +async fn known_validator_or_err( + validator: Address, + force: bool, + client: &C, +) -> Result { + // Check that the validator address exists on chain + let is_validator = rpc::is_validator(client, &validator).await; + if !is_validator { + if force { + eprintln!( + "The address {} doesn't belong to any known validator account.", + validator + ); + Ok(validator) + } else { + Err(Error::InvalidValidatorAddress(validator)) + } + } else { + Ok(validator) + } +} + +/// general pattern for checking if an address exists on the chain, or +/// throwing an error if it's not forced. Takes a generic error +/// message and the error type. +async fn address_exists_or_err( + addr: Address, + force: bool, + client: &C, + message: String, + err: F, +) -> Result +where + C: crate::ledger::queries::Client + Sync, + F: FnOnce(Address) -> Error, +{ + let addr_exists = rpc::known_address::(client, &addr).await; + if !addr_exists { + if force { + eprintln!("{}", message); + Ok(addr) + } else { + Err(err(addr)) + } + } else { + Ok(addr) + } +} + +/// Returns the given token if the given address exists on chain +/// otherwise returns an error, force forces the address through even +/// if it isn't on chain +async fn token_exists_or_err( + token: Address, + force: bool, + client: &C, +) -> Result { + let message = + format!("The token address {} doesn't exist on chain.", token); + address_exists_or_err( + token, + force, + client, + message, + Error::TokenDoesNotExist, + ) + .await +} + +/// Returns the given source address if the given address exists on chain +/// otherwise returns an error, force forces the address through even +/// if it isn't on chain +async fn source_exists_or_err( + token: Address, + force: bool, + client: &C, +) -> Result { + let message = + format!("The source address {} doesn't exist on chain.", token); + address_exists_or_err( + token, + force, + client, + message, + Error::SourceDoesNotExist, + ) + .await +} + +/// Returns the given target address if the given address exists on chain +/// otherwise returns an error, force forces the address through even +/// if it isn't on chain +async fn target_exists_or_err( + token: Address, + force: bool, + client: &C, +) -> Result { + let message = + format!("The target address {} doesn't exist on chain.", token); + address_exists_or_err( + token, + force, + client, + message, + Error::TargetLocationDoesNotExist, + ) + .await +} + +/// checks the balance at the given address is enough to transfer the +/// given amount, along with the balance even existing. force +/// overrides this +async fn check_balance_too_low_err( + token: &Address, + source: &Address, + amount: token::Amount, + balance_key: storage::Key, + force: bool, + client: &C, +) -> Result<(), Error> { + match rpc::query_storage_value::(client, &balance_key) + .await + { + Some(balance) => { + if balance < amount { + if force { + eprintln!( + "The balance of the source {} of token {} is lower \ + than the amount to be transferred. Amount to \ + transfer is {} and the balance is {}.", + source, token, amount, balance + ); + Ok(()) + } else { + Err(Error::BalanceTooLow( + source.clone(), + token.clone(), + amount, + balance, + )) + } + } else { + Ok(()) + } + } + None => { + if force { + eprintln!( + "No balance found for the source {} of token {}", + source, token + ); + Ok(()) + } else { + Err(Error::NoBalanceForToken(source.clone(), token.clone())) + } + } + } +} + +fn validate_untrusted_code_err( + vp_code: &Vec, + force: bool, +) -> Result<(), Error> { + if let Err(err) = vm::validate_untrusted_wasm(vp_code) { + if force { + eprintln!("Validity predicate code validation failed with {}", err); + Ok(()) + } else { + Err(Error::WasmValidationFailure(err)) + } + } else { + Ok(()) + } +} diff --git a/shared/src/ledger/vp_host_fns.rs b/shared/src/ledger/vp_host_fns.rs index 5bcbd69cf41..de1d7b8c759 100644 --- a/shared/src/ledger/vp_host_fns.rs +++ b/shared/src/ledger/vp_host_fns.rs @@ -3,9 +3,9 @@ use std::num::TryFromIntError; use namada_core::types::address::Address; -use namada_core::types::hash::Hash; +use namada_core::types::hash::{Hash, HASH_LENGTH}; use namada_core::types::storage::{ - BlockHash, BlockHeight, Epoch, Key, TxIndex, + BlockHash, BlockHeight, Epoch, Header, Key, TxIndex, }; use thiserror::Error; @@ -36,6 +36,8 @@ pub enum RuntimeError { ReadTemporaryValueError, #[error("Trying to read a permament value with read_temp")] ReadPermanentValueError, + #[error("Invalid transaction code hash")] + InvalidCodeHash, } /// VP environment function result @@ -65,18 +67,18 @@ where let (log_val, gas) = write_log.read_pre(key); add_gas(gas_meter, gas)?; match log_val { - Some(&write_log::StorageModification::Write { ref value }) => { + Some(write_log::StorageModification::Write { ref value }) => { Ok(Some(value.clone())) } Some(&write_log::StorageModification::Delete) => { // Given key has been deleted Ok(None) } - Some(&write_log::StorageModification::InitAccount { - ref vp, .. + Some(write_log::StorageModification::InitAccount { + ref vp_code_hash, }) => { // Read the VP of a new account - Ok(Some(vp.clone())) + Ok(Some(vp_code_hash.to_vec())) } Some(&write_log::StorageModification::Temp { .. }) => { Err(RuntimeError::ReadTemporaryValueError) @@ -107,18 +109,18 @@ where let (log_val, gas) = write_log.read(key); add_gas(gas_meter, gas)?; match log_val { - Some(&write_log::StorageModification::Write { ref value }) => { + Some(write_log::StorageModification::Write { ref value }) => { Ok(Some(value.clone())) } Some(&write_log::StorageModification::Delete) => { // Given key has been deleted Ok(None) } - Some(&write_log::StorageModification::InitAccount { - ref vp, .. + Some(write_log::StorageModification::InitAccount { + ref vp_code_hash, }) => { - // Read the VP of a new account - Ok(Some(vp.clone())) + // Read the VP code hash of a new account + Ok(Some(vp_code_hash.to_vec())) } Some(&write_log::StorageModification::Temp { .. }) => { Err(RuntimeError::ReadTemporaryValueError) @@ -144,7 +146,7 @@ pub fn read_temp( let (log_val, gas) = write_log.read(key); add_gas(gas_meter, gas)?; match log_val { - Some(&write_log::StorageModification::Temp { ref value }) => { + Some(write_log::StorageModification::Temp { ref value }) => { Ok(Some(value.clone())) } None => Ok(None), @@ -247,6 +249,23 @@ where Ok(height) } +/// Getting the block header. +pub fn get_block_header( + gas_meter: &mut VpGasMeter, + storage: &Storage, + height: BlockHeight, +) -> EnvResult> +where + DB: storage::DB + for<'iter> storage::DBIter<'iter>, + H: StorageHasher, +{ + let (header, gas) = storage + .get_block_header(Some(height)) + .map_err(RuntimeError::StorageError)?; + add_gas(gas_meter, gas)?; + Ok(header) +} + /// Getting the block hash. The height is that of the block to which the /// current transaction is being applied. pub fn get_block_hash( @@ -268,7 +287,12 @@ pub fn get_tx_code_hash( gas_meter: &mut VpGasMeter, tx: &Tx, ) -> EnvResult { - let hash = Hash(tx.code_hash()); + let hash = if tx.code_or_hash.len() == HASH_LENGTH { + Hash::try_from(&tx.code_or_hash[..]) + .map_err(|_| RuntimeError::InvalidCodeHash)? + } else { + Hash(tx.code_hash()) + }; add_gas(gas_meter, MIN_STORAGE_GAS)?; Ok(hash) } diff --git a/shared/src/ledger/wallet/alias.rs b/shared/src/ledger/wallet/alias.rs new file mode 100644 index 00000000000..13d977b8524 --- /dev/null +++ b/shared/src/ledger/wallet/alias.rs @@ -0,0 +1,103 @@ +//! Wallet address and key aliases. + +use std::convert::Infallible; +use std::fmt::Display; +use std::hash::Hash; +use std::str::FromStr; + +use serde::{Deserialize, Serialize}; + +/// Aliases created from raw strings are kept in-memory as given, but their +/// `Serialize` and `Display` instance converts them to lowercase. Their +/// `PartialEq` instance is case-insensitive. +#[derive(Clone, Debug, Default, Deserialize, PartialOrd, Ord, Eq)] +#[serde(transparent)] +pub struct Alias(String); + +impl Alias { + /// Normalize an alias to lower-case + pub fn normalize(&self) -> String { + self.0.to_lowercase() + } + + /// Returns the length of the underlying `String`. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Is the underlying `String` empty? + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +impl Serialize for Alias { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + self.normalize().serialize(serializer) + } +} + +impl PartialEq for Alias { + fn eq(&self, other: &Self) -> bool { + self.normalize() == other.normalize() + } +} + +impl Hash for Alias { + fn hash(&self, state: &mut H) { + self.normalize().hash(state); + } +} + +impl From for Alias +where + T: AsRef, +{ + fn from(raw: T) -> Self { + Self(raw.as_ref().to_owned()) + } +} + +impl From for String { + fn from(alias: Alias) -> Self { + alias.normalize() + } +} + +impl<'a> From<&'a Alias> for String { + fn from(alias: &'a Alias) -> Self { + alias.normalize() + } +} + +impl Display for Alias { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.normalize().fmt(f) + } +} + +impl FromStr for Alias { + type Err = Infallible; + + fn from_str(s: &str) -> Result { + Ok(Self(s.into())) + } +} + +/// Default alias of a validator's account key +pub fn validator_key(validator_alias: &Alias) -> Alias { + format!("{validator_alias}-validator-key").into() +} + +/// Default alias of a validator's consensus key +pub fn validator_consensus_key(validator_alias: &Alias) -> Alias { + format!("{validator_alias}-consensus-key").into() +} + +/// Default alias of a validator's Tendermint node key +pub fn validator_tendermint_node_key(validator_alias: &Alias) -> Alias { + format!("{validator_alias}-tendermint-node-key").into() +} diff --git a/shared/src/ledger/wallet/keys.rs b/shared/src/ledger/wallet/keys.rs new file mode 100644 index 00000000000..ce8c1769dcf --- /dev/null +++ b/shared/src/ledger/wallet/keys.rs @@ -0,0 +1,244 @@ +//! Cryptographic keys for digital signatures support for the wallet. + +use std::fmt::Display; +use std::marker::PhantomData; +use std::str::FromStr; + +use borsh::{BorshDeserialize, BorshSerialize}; +use data_encoding::HEXLOWER; +use orion::{aead, kdf}; +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +use crate::ledger::wallet::WalletUtils; + +const ENCRYPTED_KEY_PREFIX: &str = "encrypted:"; +const UNENCRYPTED_KEY_PREFIX: &str = "unencrypted:"; + +/// A keypair stored in a wallet +#[derive(Debug)] +pub enum StoredKeypair +where + ::Err: Display, +{ + /// An encrypted keypair + Encrypted(EncryptedKeypair), + /// An raw (unencrypted) keypair + Raw(T), +} + +impl Serialize + for StoredKeypair +where + ::Err: Display, +{ + fn serialize( + &self, + serializer: S, + ) -> std::result::Result + where + S: serde::Serializer, + { + // String encoded, because toml doesn't support enums + match self { + StoredKeypair::Encrypted(encrypted) => { + let keypair_string = + format!("{}{}", ENCRYPTED_KEY_PREFIX, encrypted); + serde::Serialize::serialize(&keypair_string, serializer) + } + StoredKeypair::Raw(raw) => { + let keypair_string = + format!("{}{}", UNENCRYPTED_KEY_PREFIX, raw); + serde::Serialize::serialize(&keypair_string, serializer) + } + } + } +} + +impl<'de, T: BorshSerialize + BorshDeserialize + Display + FromStr> + Deserialize<'de> for StoredKeypair +where + ::Err: Display, +{ + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + use serde::de::Error; + + let keypair_string: String = + serde::Deserialize::deserialize(deserializer) + .map_err(|err| { + DeserializeStoredKeypairError::InvalidStoredKeypairString( + err.to_string(), + ) + }) + .map_err(D::Error::custom)?; + if let Some(raw) = keypair_string.strip_prefix(UNENCRYPTED_KEY_PREFIX) { + FromStr::from_str(raw) + .map(|keypair| Self::Raw(keypair)) + .map_err(|err| { + DeserializeStoredKeypairError::InvalidStoredKeypairString( + err.to_string(), + ) + }) + .map_err(D::Error::custom) + } else if let Some(encrypted) = + keypair_string.strip_prefix(ENCRYPTED_KEY_PREFIX) + { + FromStr::from_str(encrypted) + .map(Self::Encrypted) + .map_err(|err| { + DeserializeStoredKeypairError::InvalidStoredKeypairString( + err.to_string(), + ) + }) + .map_err(D::Error::custom) + } else { + Err(DeserializeStoredKeypairError::MissingPrefix) + .map_err(D::Error::custom) + } + } +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum DeserializeStoredKeypairError { + #[error("The stored keypair is not valid: {0}")] + InvalidStoredKeypairString(String), + #[error("The stored keypair is missing a prefix")] + MissingPrefix, +} + +/// An encrypted keypair stored in a wallet +#[derive(Debug)] +pub struct EncryptedKeypair( + Vec, + PhantomData, +); + +impl Display for EncryptedKeypair { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", HEXLOWER.encode(self.0.as_ref())) + } +} + +impl FromStr for EncryptedKeypair { + type Err = data_encoding::DecodeError; + + fn from_str(s: &str) -> Result { + HEXLOWER.decode(s.as_ref()).map(|x| Self(x, PhantomData)) + } +} + +#[allow(missing_docs)] +#[derive(Debug, Error)] +pub enum DecryptionError { + #[error("Unexpected encryption salt")] + BadSalt, + #[error("Unable to decrypt the keypair. Is the password correct?")] + DecryptionError, + #[error("Unable to deserialize the keypair")] + DeserializingError, + #[error("Asked not to decrypt")] + NotDecrypting, +} + +impl + StoredKeypair +where + ::Err: Display, +{ + /// Construct a keypair for storage. If no password is provided, the keypair + /// will be stored raw without encryption. Returns the key for storing and a + /// reference-counting point to the raw key. + pub fn new(keypair: T, password: Option) -> (Self, T) { + match password { + Some(password) => ( + Self::Encrypted(EncryptedKeypair::new(&keypair, password)), + keypair, + ), + None => (Self::Raw(keypair.clone()), keypair), + } + } + + /// Get a raw keypair from a stored keypair. If the keypair is encrypted and + /// no password is provided in the argument, a password will be prompted + /// from stdin. + pub fn get( + &self, + decrypt: bool, + password: Option, + ) -> Result { + match self { + StoredKeypair::Encrypted(encrypted_keypair) => { + if decrypt { + let password = password.unwrap_or_else(|| { + U::read_password("Enter decryption password: ") + }); + let key = encrypted_keypair.decrypt(password)?; + Ok(key) + } else { + Err(DecryptionError::NotDecrypting) + } + } + StoredKeypair::Raw(keypair) => Ok(keypair.clone()), + } + } + + /// Indicates whether this key has been encrypted or not + pub fn is_encrypted(&self) -> bool { + match self { + StoredKeypair::Encrypted(_) => true, + StoredKeypair::Raw(_) => false, + } + } +} + +impl EncryptedKeypair { + /// Encrypt a keypair and store it with its salt. + pub fn new(keypair: &T, password: String) -> Self { + let salt = encryption_salt(); + let encryption_key = encryption_key(&salt, password); + + let data = keypair + .try_to_vec() + .expect("Serializing keypair shouldn't fail"); + + let encrypted_keypair = aead::seal(&encryption_key, &data) + .expect("Encryption of data shouldn't fail"); + + let encrypted_data = [salt.as_ref(), &encrypted_keypair].concat(); + + Self(encrypted_data, PhantomData) + } + + /// Decrypt an encrypted keypair + pub fn decrypt(&self, password: String) -> Result { + let salt_len = encryption_salt().len(); + let (raw_salt, cipher) = self.0.split_at(salt_len); + + let salt = kdf::Salt::from_slice(raw_salt) + .map_err(|_| DecryptionError::BadSalt)?; + + let encryption_key = encryption_key(&salt, password); + + let decrypted_data = aead::open(&encryption_key, cipher) + .map_err(|_| DecryptionError::DecryptionError)?; + + T::try_from_slice(&decrypted_data) + .map_err(|_| DecryptionError::DeserializingError) + } +} + +/// Keypair encryption salt +fn encryption_salt() -> kdf::Salt { + kdf::Salt::default() +} + +/// Make encryption secret key from a password. +fn encryption_key(salt: &kdf::Salt, password: String) -> kdf::SecretKey { + kdf::Password::from_slice(password.as_bytes()) + .and_then(|password| kdf::derive_key(&password, salt, 3, 1 << 17, 32)) + .expect("Generation of encryption secret key shouldn't fail") +} diff --git a/shared/src/ledger/wallet/mod.rs b/shared/src/ledger/wallet/mod.rs new file mode 100644 index 00000000000..c69cc603b60 --- /dev/null +++ b/shared/src/ledger/wallet/mod.rs @@ -0,0 +1,520 @@ +//! Provides functionality for managing keys and addresses for a user +pub mod alias; +mod keys; +pub mod pre_genesis; +pub mod store; + +use std::collections::{HashMap, HashSet}; +use std::fmt::Display; +use std::marker::PhantomData; +use std::str::FromStr; + +use alias::Alias; +use borsh::{BorshDeserialize, BorshSerialize}; +use masp_primitives::zip32::ExtendedFullViewingKey; +pub use pre_genesis::gen_key_to_store; +pub use store::{gen_sk, AddressVpType, Store}; +use thiserror::Error; + +pub use self::keys::{DecryptionError, StoredKeypair}; +pub use self::store::{ConfirmationResponse, ValidatorData, ValidatorKeys}; +use crate::types::address::Address; +use crate::types::key::*; +use crate::types::masp::{ + ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, +}; + +/// Captures the interactive parts of the wallet's functioning +pub trait WalletUtils { + /// The location where the wallet is stored + type Storage; + /// Read the password for encryption/decryption from the file/env/stdin. + /// Panics if all options are empty/invalid. + fn read_password(prompt_msg: &str) -> String; + + /// Read an alias from the file/env/stdin. + fn read_alias(prompt_msg: &str) -> String; + + /// The given alias has been selected but conflicts with another alias in + /// the store. Offer the user to either replace existing mapping, alter the + /// chosen alias to a name of their chosing, or cancel the aliasing. + fn show_overwrite_confirmation( + alias: &Alias, + alias_for: &str, + ) -> store::ConfirmationResponse; +} + +/// A degenerate implementation of wallet interactivity +pub struct SdkWalletUtils(PhantomData); + +impl WalletUtils for SdkWalletUtils { + type Storage = U; + + fn read_password(_prompt_msg: &str) -> String { + panic!("attempted to prompt for password in non-interactive mode"); + } + + fn read_alias(_prompt_msg: &str) -> String { + panic!("attempted to prompt for alias in non-interactive mode"); + } + + fn show_overwrite_confirmation( + _alias: &Alias, + _alias_for: &str, + ) -> store::ConfirmationResponse { + // Automatically replace aliases in non-interactive mode + store::ConfirmationResponse::Replace + } +} + +/// The error that is produced when a given key cannot be obtained +#[derive(Error, Debug)] +pub enum FindKeyError { + /// Could not find a given key in the wallet + #[error("No matching key found")] + KeyNotFound, + /// Could not decrypt a given key in the wallet + #[error("{0}")] + KeyDecryptionError(keys::DecryptionError), +} + +/// Represents a collection of keys and addresses while caching key decryptions +#[derive(Debug)] +pub struct Wallet { + store_dir: U::Storage, + store: Store, + decrypted_key_cache: HashMap, + decrypted_spendkey_cache: HashMap, +} + +impl Wallet { + /// Create a new wallet from the given backing store and storage location + pub fn new(store_dir: U::Storage, store: Store) -> Self { + Self { + store_dir, + store, + decrypted_key_cache: HashMap::default(), + decrypted_spendkey_cache: HashMap::default(), + } + } + + /// Generate a new keypair and derive an implicit address from its public + /// and insert them into the store with the provided alias, converted to + /// lower case. If none provided, the alias will be the public key hash (in + /// lowercase too). If the key is to be encrypted, will prompt for + /// password from stdin. Stores the key in decrypted key cache and + /// returns the alias of the key and a reference-counting pointer to the + /// key. + pub fn gen_key( + &mut self, + scheme: SchemeType, + alias: Option, + password: Option, + force_alias: bool, + ) -> (String, common::SecretKey) { + let (alias, key) = self.store.gen_key::(scheme, alias, password, force_alias); + // Cache the newly added key + self.decrypted_key_cache.insert(alias.clone(), key.clone()); + (alias.into(), key) + } + + /// Generate a spending key and store it under the given alias in the wallet + pub fn gen_spending_key( + &mut self, + alias: String, + password: Option, + force_alias: bool, + ) -> (String, ExtendedSpendingKey) { + let (alias, key) = self.store.gen_spending_key::(alias, password, force_alias); + // Cache the newly added key + self.decrypted_spendkey_cache.insert(alias.clone(), key); + (alias.into(), key) + } + + /// Add validator data to the store + pub fn add_validator_data( + &mut self, + address: Address, + keys: ValidatorKeys, + ) { + self.store.add_validator_data(address, keys); + } + + /// Returns the validator data, if it exists. + pub fn get_validator_data(&self) -> Option<&ValidatorData> { + self.store.get_validator_data() + } + + /// Returns the validator data, if it exists. the save function + /// cannot be called after using this method as it involves a + /// partial move + pub fn take_validator_data(&mut self) -> Option<&mut ValidatorData> { + self.store.validator_data() + } + + /// Find the stored key by an alias, a public key hash or a public key. + /// If the key is encrypted and password not supplied, then password will be + /// interactively prompted. Any keys that are decrypted are stored in and + /// read from a cache to avoid prompting for password multiple times. + pub fn find_key( + &mut self, + alias_pkh_or_pk: impl AsRef, + password: Option, + ) -> Result { + // Try cache first + if let Some(cached_key) = self + .decrypted_key_cache + .get(&alias_pkh_or_pk.as_ref().into()) + { + return Ok(cached_key.clone()); + } + // If not cached, look-up in store + let stored_key = self + .store + .find_key(alias_pkh_or_pk.as_ref()) + .ok_or(FindKeyError::KeyNotFound)?; + Self::decrypt_stored_key::<_>( + &mut self.decrypted_key_cache, + stored_key, + alias_pkh_or_pk.into(), + password, + ) + } + + /// Find the spending key with the given alias in the wallet and return it. + /// If the spending key is encrypted but a password is not supplied, then it + /// will be interactively prompted. + pub fn find_spending_key( + &mut self, + alias: impl AsRef, + password: Option, + ) -> Result { + // Try cache first + if let Some(cached_key) = + self.decrypted_spendkey_cache.get(&alias.as_ref().into()) + { + return Ok(*cached_key); + } + // If not cached, look-up in store + let stored_spendkey = self + .store + .find_spending_key(alias.as_ref()) + .ok_or(FindKeyError::KeyNotFound)?; + Self::decrypt_stored_key::<_>( + &mut self.decrypted_spendkey_cache, + stored_spendkey, + alias.into(), + password, + ) + } + + /// Find the viewing key with the given alias in the wallet and return it + pub fn find_viewing_key( + &mut self, + alias: impl AsRef, + ) -> Result<&ExtendedViewingKey, FindKeyError> { + self.store + .find_viewing_key(alias.as_ref()) + .ok_or(FindKeyError::KeyNotFound) + } + + /// Find the payment address with the given alias in the wallet and return + /// it + pub fn find_payment_addr( + &self, + alias: impl AsRef, + ) -> Option<&PaymentAddress> { + self.store.find_payment_addr(alias.as_ref()) + } + + /// Find the stored key by a public key. + /// If the key is encrypted and password not supplied, then password will be + /// interactively prompted for. Any keys that are decrypted are stored in + /// and read from a cache to avoid prompting for password multiple times. + pub fn find_key_by_pk( + &mut self, + pk: &common::PublicKey, + password: Option, + ) -> Result { + // Try to look-up alias for the given pk. Otherwise, use the PKH string. + let pkh: PublicKeyHash = pk.into(); + let alias = self + .store + .find_alias_by_pkh(&pkh) + .unwrap_or_else(|| pkh.to_string().into()); + // Try read cache + if let Some(cached_key) = self.decrypted_key_cache.get(&alias) { + return Ok(cached_key.clone()); + } + // Look-up from store + let stored_key = self + .store + .find_key_by_pk(pk) + .ok_or(FindKeyError::KeyNotFound)?; + Self::decrypt_stored_key::<_>( + &mut self.decrypted_key_cache, + stored_key, + alias, + password, + ) + } + + /// Find the stored key by a public key hash. + /// If the key is encrypted and password is not supplied, then password will + /// be interactively prompted for. Any keys that are decrypted are stored in + /// and read from a cache to avoid prompting for password multiple times. + pub fn find_key_by_pkh( + &mut self, + pkh: &PublicKeyHash, + password: Option, + ) -> Result { + // Try to look-up alias for the given pk. Otherwise, use the PKH string. + let alias = self + .store + .find_alias_by_pkh(pkh) + .unwrap_or_else(|| pkh.to_string().into()); + // Try read cache + if let Some(cached_key) = self.decrypted_key_cache.get(&alias) { + return Ok(cached_key.clone()); + } + // Look-up from store + let stored_key = self + .store + .find_key_by_pkh(pkh) + .ok_or(FindKeyError::KeyNotFound)?; + Self::decrypt_stored_key::<_>( + &mut self.decrypted_key_cache, + stored_key, + alias, + password, + ) + } + + /// Decrypt stored key, if it's not stored un-encrypted. + /// If a given storage key needs to be decrypted and password is not + /// supplied, then interactively prompt for password and if successfully + /// decrypted, store it in a cache. + fn decrypt_stored_key< + T: FromStr + Display + BorshSerialize + BorshDeserialize + Clone, + >( + decrypted_key_cache: &mut HashMap, + stored_key: &StoredKeypair, + alias: Alias, + password: Option, + ) -> Result + where + ::Err: Display, + { + match stored_key { + StoredKeypair::Encrypted(encrypted) => { + let password = password.unwrap_or_else(|| { + U::read_password("Enter decryption password: ") + }); + let key = encrypted + .decrypt(password) + .map_err(FindKeyError::KeyDecryptionError)?; + decrypted_key_cache.insert(alias.clone(), key); + decrypted_key_cache + .get(&alias) + .cloned() + .ok_or(FindKeyError::KeyNotFound) + } + StoredKeypair::Raw(raw) => Ok(raw.clone()), + } + } + + /// Get all known keys by their alias, paired with PKH, if known. + pub fn get_keys( + &self, + ) -> HashMap< + String, + (&StoredKeypair, Option<&PublicKeyHash>), + > { + self.store + .get_keys() + .into_iter() + .map(|(alias, value)| (alias.into(), value)) + .collect() + } + + /// Find the stored address by an alias. + pub fn find_address(&self, alias: impl AsRef) -> Option<&Address> { + self.store.find_address(alias) + } + + /// Find an alias by the address if it's in the wallet. + pub fn find_alias(&self, address: &Address) -> Option<&Alias> { + self.store.find_alias(address) + } + + /// Get all known addresses by their alias, paired with PKH, if known. + pub fn get_addresses(&self) -> HashMap { + self.store + .get_addresses() + .iter() + .map(|(alias, value)| (alias.into(), value.clone())) + .collect() + } + + /// Get all known payment addresses by their alias + pub fn get_payment_addrs(&self) -> HashMap { + self.store + .get_payment_addrs() + .iter() + .map(|(alias, value)| (alias.into(), *value)) + .collect() + } + + /// Get all known viewing keys by their alias + pub fn get_viewing_keys(&self) -> HashMap { + self.store + .get_viewing_keys() + .iter() + .map(|(alias, value)| (alias.into(), *value)) + .collect() + } + + /// Get all known viewing keys by their alias + pub fn get_spending_keys( + &self, + ) -> HashMap> { + self.store + .get_spending_keys() + .iter() + .map(|(alias, value)| (alias.into(), value)) + .collect() + } + + /// Add a new address with the given alias. If the alias is already used, + /// will ask whether the existing alias should be replaced, a different + /// alias is desired, or the alias creation should be cancelled. Return + /// the chosen alias if the address has been added, otherwise return + /// nothing. + pub fn add_address( + &mut self, + alias: impl AsRef, + address: Address, + force_alias: bool, + ) -> Option { + self.store + .insert_address::(alias.into(), address, force_alias) + .map(Into::into) + } + + /// Insert a new key with the given alias. If the alias is already used, + /// will prompt for overwrite confirmation. + pub fn insert_keypair( + &mut self, + alias: String, + keypair: StoredKeypair, + pkh: PublicKeyHash, + force_alias: bool, + ) -> Option { + self.store + .insert_keypair::(alias.into(), keypair, pkh, force_alias) + .map(Into::into) + } + + /// Insert a viewing key into the wallet under the given alias + pub fn insert_viewing_key( + &mut self, + alias: String, + view_key: ExtendedViewingKey, + force_alias: bool, + ) -> Option { + self.store + .insert_viewing_key::(alias.into(), view_key, force_alias) + .map(Into::into) + } + + /// Insert a spending key into the wallet under the given alias + pub fn insert_spending_key( + &mut self, + alias: String, + spend_key: StoredKeypair, + viewkey: ExtendedViewingKey, + force_alias: bool, + ) -> Option { + self.store + .insert_spending_key::(alias.into(), spend_key, viewkey, force_alias) + .map(Into::into) + } + + /// Encrypt the given spending key and insert it into the wallet under the + /// given alias + pub fn encrypt_insert_spending_key( + &mut self, + alias: String, + spend_key: ExtendedSpendingKey, + password: Option, + force_alias: bool, + ) -> Option { + self.store + .insert_spending_key::( + alias.into(), + StoredKeypair::new(spend_key, password).0, + ExtendedFullViewingKey::from(&spend_key.into()).into(), + force_alias, + ) + .map(Into::into) + } + + /// Insert a payment address into the wallet under the given alias + pub fn insert_payment_addr( + &mut self, + alias: String, + payment_addr: PaymentAddress, + force_alias: bool, + ) -> Option { + self.store + .insert_payment_addr::(alias.into(), payment_addr, force_alias) + .map(Into::into) + } + + /// Extend this wallet from pre-genesis validator wallet. + pub fn extend_from_pre_genesis_validator( + &mut self, + validator_address: Address, + validator_alias: Alias, + other: pre_genesis::ValidatorWallet, + ) { + self.store.extend_from_pre_genesis_validator( + validator_address, + validator_alias, + other, + ) + } + + /// Gets all addresses given a vp_type + pub fn get_addresses_with_vp_type( + &self, + vp_type: AddressVpType, + ) -> HashSet
{ + self.store.get_addresses_with_vp_type(vp_type) + } + + /// Add a vp_type to a given address + pub fn add_vp_type_to_address( + &mut self, + vp_type: AddressVpType, + address: Address, + ) { + // defaults to an empty set + self.store.add_vp_type_to_address(vp_type, address) + } + + /// Provide immutable access to the backing store + pub fn store(&self) -> &Store { + &self.store + } + + /// Provide mutable access to the backing store + pub fn store_mut(&mut self) -> &mut Store { + &mut self.store + } + + /// Access storage location data + pub fn store_dir(&self) -> &U::Storage { + &self.store_dir + } +} diff --git a/shared/src/ledger/wallet/pre_genesis.rs b/shared/src/ledger/wallet/pre_genesis.rs new file mode 100644 index 00000000000..333a9f21e71 --- /dev/null +++ b/shared/src/ledger/wallet/pre_genesis.rs @@ -0,0 +1,80 @@ +//! Provides functionality for managing validator keys +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +use crate::ledger::wallet; +use crate::ledger::wallet::{store, StoredKeypair}; +use crate::types::key::{common, SchemeType}; + +/// Ways in which wallet store operations can fail +#[derive(Error, Debug)] +pub enum ReadError { + /// Failed decoding the wallet store + #[error("Failed decoding the wallet store: {0}")] + Decode(toml::de::Error), + /// Failed to read the wallet store + #[error("Failed to read the wallet store from {0}: {1}")] + ReadWallet(String, String), + /// Failed to write the wallet store + #[error("Failed to write the wallet store: {0}")] + StoreNewWallet(String), + /// Failed to decode a key + #[error("Failed to decode a key: {0}")] + Decryption(wallet::keys::DecryptionError), +} + +/// Validator pre-genesis wallet includes all the required keys for genesis +/// setup and a cache of decrypted keys. +pub struct ValidatorWallet { + /// The wallet store that can be written/read to/from TOML + pub store: ValidatorStore, + /// Cryptographic keypair for validator account key + pub account_key: common::SecretKey, + /// Cryptographic keypair for consensus key + pub consensus_key: common::SecretKey, + /// Cryptographic keypair for Tendermint node key + pub tendermint_node_key: common::SecretKey, +} + +/// Validator pre-genesis wallet store includes all the required keys for +/// genesis setup. +#[derive(Serialize, Deserialize, Debug)] +pub struct ValidatorStore { + /// Cryptographic keypair for validator account key + pub account_key: wallet::StoredKeypair, + /// Cryptographic keypair for consensus key + pub consensus_key: wallet::StoredKeypair, + /// Cryptographic keypair for Tendermint node key + pub tendermint_node_key: wallet::StoredKeypair, + /// Special validator keys + pub validator_keys: wallet::ValidatorKeys, +} + +impl ValidatorStore { + /// Decode from TOML string bytes + pub fn decode(data: Vec) -> Result { + toml::from_slice(&data) + } + + /// Encode in TOML string bytes + pub fn encode(&self) -> Vec { + toml::to_vec(self).expect( + "Serializing of validator pre-genesis wallet shouldn't fail", + ) + } +} + +/// Generate a key and then encrypt it +pub fn gen_key_to_store( + scheme: SchemeType, + password: &Option, +) -> (StoredKeypair, common::SecretKey) { + let sk = store::gen_sk(scheme); + StoredKeypair::new(sk, password.clone()) +} + +impl From for ReadError { + fn from(err: wallet::keys::DecryptionError) -> Self { + ReadError::Decryption(err) + } +} diff --git a/shared/src/ledger/wallet/store.rs b/shared/src/ledger/wallet/store.rs new file mode 100644 index 00000000000..9c58a274901 --- /dev/null +++ b/shared/src/ledger/wallet/store.rs @@ -0,0 +1,652 @@ +//! Wallet Store information + +use std::collections::{HashMap, HashSet}; +use std::fmt::Display; +use std::str::FromStr; + +use bimap::BiHashMap; +use masp_primitives::zip32::ExtendedFullViewingKey; +#[cfg(feature = "masp-tx-gen")] +use rand_core::RngCore; +use serde::{Deserialize, Serialize}; + +use super::alias::{self, Alias}; +use super::pre_genesis; +use crate::ledger::wallet::{StoredKeypair, WalletUtils}; +use crate::types::address::{Address, ImplicitAddress}; +use crate::types::key::dkg_session_keys::DkgKeypair; +use crate::types::key::*; +use crate::types::masp::{ + ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, +}; + +/// Actions that can be taken when there is an alias conflict +pub enum ConfirmationResponse { + /// Replace the existing alias + Replace, + /// Reselect the alias that is ascribed to a given entity + Reselect(Alias), + /// Skip assigning the given entity an alias + Skip, +} + +/// Special keys for a validator +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ValidatorKeys { + /// Special keypair for signing protocol txs + pub protocol_keypair: common::SecretKey, + /// Special session keypair needed by validators for participating + /// in the DKG protocol + pub dkg_keypair: Option, +} + +impl ValidatorKeys { + /// Get the protocol keypair + pub fn get_protocol_keypair(&self) -> &common::SecretKey { + &self.protocol_keypair + } +} + +/// Special data associated with a validator +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ValidatorData { + /// The address associated to a validator + pub address: Address, + /// special keys for a validator + pub keys: ValidatorKeys, +} + +/// A Storage area for keys and addresses +#[derive(Serialize, Deserialize, Debug, Default)] +pub struct Store { + /// Known viewing keys + view_keys: HashMap, + /// Known spending keys + spend_keys: HashMap>, + /// Known payment addresses + payment_addrs: HashMap, + /// Cryptographic keypairs + keys: HashMap>, + /// Namada address book + addresses: BiHashMap, + /// Known mappings of public key hashes to their aliases in the `keys` + /// field. Used for look-up by a public key. + pkhs: HashMap, + /// Special keys if the wallet belongs to a validator + pub(crate) validator_data: Option, + /// Namada address vp type + address_vp_types: HashMap>, +} + +/// Grouping of addresses by validity predicate. +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +pub enum AddressVpType { + /// The Token + Token, +} + +impl Store { + /// Find the stored key by an alias, a public key hash or a public key. + pub fn find_key( + &self, + alias_pkh_or_pk: impl AsRef, + ) -> Option<&StoredKeypair> { + let alias_pkh_or_pk = alias_pkh_or_pk.as_ref(); + // Try to find by alias + self.keys + .get(&alias_pkh_or_pk.into()) + // Try to find by PKH + .or_else(|| { + let pkh = PublicKeyHash::from_str(alias_pkh_or_pk).ok()?; + self.find_key_by_pkh(&pkh) + }) + // Try to find by PK + .or_else(|| { + let pk = common::PublicKey::from_str(alias_pkh_or_pk).ok()?; + self.find_key_by_pk(&pk) + }) + } + + /// Find the spending key with the given alias and return it + pub fn find_spending_key( + &self, + alias: impl AsRef, + ) -> Option<&StoredKeypair> { + self.spend_keys.get(&alias.into()) + } + + /// Find the viewing key with the given alias and return it + pub fn find_viewing_key( + &self, + alias: impl AsRef, + ) -> Option<&ExtendedViewingKey> { + self.view_keys.get(&alias.into()) + } + + /// Find the payment address with the given alias and return it + pub fn find_payment_addr( + &self, + alias: impl AsRef, + ) -> Option<&PaymentAddress> { + self.payment_addrs.get(&alias.into()) + } + + /// Find the stored key by a public key. + pub fn find_key_by_pk( + &self, + pk: &common::PublicKey, + ) -> Option<&StoredKeypair> { + let pkh = PublicKeyHash::from(pk); + self.find_key_by_pkh(&pkh) + } + + /// Find the stored key by a public key hash. + pub fn find_key_by_pkh( + &self, + pkh: &PublicKeyHash, + ) -> Option<&StoredKeypair> { + let alias = self.pkhs.get(pkh)?; + self.keys.get(alias) + } + + /// Find the stored alias for a public key hash. + pub fn find_alias_by_pkh(&self, pkh: &PublicKeyHash) -> Option { + self.pkhs.get(pkh).cloned() + } + + /// Find the stored address by an alias. + pub fn find_address(&self, alias: impl AsRef) -> Option<&Address> { + self.addresses.get_by_left(&alias.into()) + } + + /// Find an alias by the address if it's in the wallet. + pub fn find_alias(&self, address: &Address) -> Option<&Alias> { + self.addresses.get_by_right(address) + } + + /// Get all known keys by their alias, paired with PKH, if known. + pub fn get_keys( + &self, + ) -> HashMap< + Alias, + (&StoredKeypair, Option<&PublicKeyHash>), + > { + let mut keys: HashMap< + Alias, + (&StoredKeypair, Option<&PublicKeyHash>), + > = self + .pkhs + .iter() + .filter_map(|(pkh, alias)| { + let key = &self.keys.get(alias)?; + Some((alias.clone(), (*key, Some(pkh)))) + }) + .collect(); + self.keys.iter().for_each(|(alias, key)| { + if !keys.contains_key(alias) { + keys.insert(alias.clone(), (key, None)); + } + }); + keys + } + + /// Get all known addresses by their alias, paired with PKH, if known. + pub fn get_addresses(&self) -> &BiHashMap { + &self.addresses + } + + /// Get all known payment addresses by their alias. + pub fn get_payment_addrs(&self) -> &HashMap { + &self.payment_addrs + } + + /// Get all known viewing keys by their alias. + pub fn get_viewing_keys(&self) -> &HashMap { + &self.view_keys + } + + /// Get all known spending keys by their alias. + pub fn get_spending_keys( + &self, + ) -> &HashMap> { + &self.spend_keys + } + + #[cfg(feature = "masp-tx-gen")] + fn generate_spending_key() -> ExtendedSpendingKey { + use rand::rngs::OsRng; + let mut spend_key = [0; 32]; + OsRng.fill_bytes(&mut spend_key); + masp_primitives::zip32::ExtendedSpendingKey::master(spend_key.as_ref()) + .into() + } + + /// Generate a new keypair and insert it into the store with the provided + /// alias. If none provided, the alias will be the public key hash. + /// If no password is provided, the keypair will be stored raw without + /// encryption. Returns the alias of the key and a reference-counting + /// pointer to the key. + pub fn gen_key( + &mut self, + scheme: SchemeType, + alias: Option, + password: Option, + force_alias: bool, + ) -> (Alias, common::SecretKey) { + let sk = gen_sk(scheme); + let pkh: PublicKeyHash = PublicKeyHash::from(&sk.ref_to()); + let (keypair_to_store, raw_keypair) = StoredKeypair::new(sk, password); + let address = Address::Implicit(ImplicitAddress(pkh.clone())); + let alias: Alias = alias.unwrap_or_else(|| pkh.clone().into()).into(); + if self + .insert_keypair::(alias.clone(), keypair_to_store, pkh, force_alias) + .is_none() + { + panic!("Action cancelled, no changes persisted."); + } + if self.insert_address::(alias.clone(), address, force_alias).is_none() { + panic!("Action cancelled, no changes persisted."); + } + (alias, raw_keypair) + } + + /// Generate a spending key similarly to how it's done for keypairs + pub fn gen_spending_key( + &mut self, + alias: String, + password: Option, + force_alias: bool, + ) -> (Alias, ExtendedSpendingKey) { + let spendkey = Self::generate_spending_key(); + let viewkey = ExtendedFullViewingKey::from(&spendkey.into()).into(); + let (spendkey_to_store, _raw_spendkey) = + StoredKeypair::new(spendkey, password); + let alias = Alias::from(alias); + if self + .insert_spending_key::(alias.clone(), spendkey_to_store, viewkey, force_alias) + .is_none() + { + panic!("Action cancelled, no changes persisted."); + } + (alias, spendkey) + } + + /// Add validator data to the store + pub fn add_validator_data( + &mut self, + address: Address, + keys: ValidatorKeys, + ) { + self.validator_data = Some(ValidatorData { address, keys }); + } + + /// Returns the validator data, if it exists + pub fn get_validator_data(&self) -> Option<&ValidatorData> { + self.validator_data.as_ref() + } + + /// Returns the validator data, if it exists + pub fn validator_data(&mut self) -> Option<&mut ValidatorData> { + self.validator_data.as_mut() + } + + /// Insert a new key with the given alias. If the alias is already used, + /// will prompt for overwrite/reselection confirmation. If declined, then + /// keypair is not inserted and nothing is returned, otherwise selected + /// alias is returned. + pub fn insert_keypair( + &mut self, + alias: Alias, + keypair: StoredKeypair, + pkh: PublicKeyHash, + force: bool, + ) -> Option { + if alias.is_empty() { + println!( + "Empty alias given, defaulting to {}.", + Into::::into(pkh.to_string()) + ); + } + // Addresses and keypairs can share aliases, so first remove any + // addresses sharing the same namesake before checking if alias has been + // used. + let counterpart_address = self.addresses.remove_by_left(&alias); + if self.contains_alias(&alias) && !force { + match U::show_overwrite_confirmation(&alias, "a key") { + ConfirmationResponse::Replace => {} + ConfirmationResponse::Reselect(new_alias) => { + // Restore the removed address in case the recursive prompt + // terminates with a cancellation + counterpart_address + .map(|x| self.addresses.insert(alias.clone(), x.1)); + return self.insert_keypair::(new_alias, keypair, pkh, false); + } + ConfirmationResponse::Skip => { + // Restore the removed address since this insertion action + // has now been cancelled + counterpart_address + .map(|x| self.addresses.insert(alias.clone(), x.1)); + return None; + } + } + } + self.remove_alias(&alias); + self.keys.insert(alias.clone(), keypair); + self.pkhs.insert(pkh, alias.clone()); + // Since it is intended for the inserted keypair to share its namesake + // with the pre-existing address + counterpart_address.map(|x| self.addresses.insert(alias.clone(), x.1)); + Some(alias) + } + + /// Insert spending keys similarly to how it's done for keypairs + pub fn insert_spending_key( + &mut self, + alias: Alias, + spendkey: StoredKeypair, + viewkey: ExtendedViewingKey, + force: bool, + ) -> Option { + if alias.is_empty() { + eprintln!("Empty alias given."); + return None; + } + if self.contains_alias(&alias) && !force { + match U::show_overwrite_confirmation(&alias, "a spending key") { + ConfirmationResponse::Replace => {} + ConfirmationResponse::Reselect(new_alias) => { + return self.insert_spending_key::( + new_alias, spendkey, viewkey, false, + ); + } + ConfirmationResponse::Skip => return None, + } + } + self.remove_alias(&alias); + self.spend_keys.insert(alias.clone(), spendkey); + // Simultaneously add the derived viewing key to ease balance viewing + self.view_keys.insert(alias.clone(), viewkey); + Some(alias) + } + + /// Insert viewing keys similarly to how it's done for keypairs + pub fn insert_viewing_key( + &mut self, + alias: Alias, + viewkey: ExtendedViewingKey, + force: bool, + ) -> Option { + if alias.is_empty() { + eprintln!("Empty alias given."); + return None; + } + if self.contains_alias(&alias) && !force { + match U::show_overwrite_confirmation(&alias, "a viewing key") { + ConfirmationResponse::Replace => {} + ConfirmationResponse::Reselect(new_alias) => { + return self.insert_viewing_key::(new_alias, viewkey, false); + } + ConfirmationResponse::Skip => return None, + } + } + self.remove_alias(&alias); + self.view_keys.insert(alias.clone(), viewkey); + Some(alias) + } + + /// Check if any map of the wallet contains the given alias + fn contains_alias(&self, alias: &Alias) -> bool { + self.payment_addrs.contains_key(alias) + || self.view_keys.contains_key(alias) + || self.spend_keys.contains_key(alias) + || self.keys.contains_key(alias) + || self.addresses.contains_left(alias) + } + + /// Completely remove the given alias from all maps in the wallet + fn remove_alias(&mut self, alias: &Alias) { + self.payment_addrs.remove(alias); + self.view_keys.remove(alias); + self.spend_keys.remove(alias); + self.keys.remove(alias); + self.addresses.remove_by_left(alias); + self.pkhs.retain(|_key, val| val != alias); + } + + /// Insert payment addresses similarly to how it's done for keypairs + pub fn insert_payment_addr( + &mut self, + alias: Alias, + payment_addr: PaymentAddress, + force: bool, + ) -> Option { + if alias.is_empty() { + eprintln!("Empty alias given."); + return None; + } + if self.contains_alias(&alias) && !force { + match U::show_overwrite_confirmation(&alias, "a payment address") { + ConfirmationResponse::Replace => {} + ConfirmationResponse::Reselect(new_alias) => { + return self + .insert_payment_addr::(new_alias, payment_addr, false); + } + ConfirmationResponse::Skip => return None, + } + } + self.remove_alias(&alias); + self.payment_addrs.insert(alias.clone(), payment_addr); + Some(alias) + } + + /// Helper function to restore keypair given alias-keypair mapping and the + /// pkhs-alias mapping. + fn restore_keypair( + &mut self, + alias: Alias, + key: Option>, + pkh: Option, + ) { + key.map(|x| self.keys.insert(alias.clone(), x)); + pkh.map(|x| self.pkhs.insert(x, alias.clone())); + } + + /// Insert a new address with the given alias. If the alias is already used, + /// will prompt for overwrite/reselection confirmation, which when declined, + /// the address won't be added. Return the selected alias if the address has + /// been added. + pub fn insert_address( + &mut self, + alias: Alias, + address: Address, + force: bool, + ) -> Option { + if alias.is_empty() { + println!("Empty alias given, defaulting to {}.", address.encode()); + } + // Addresses and keypairs can share aliases, so first remove any keys + // sharing the same namesake before checking if alias has been used. + let counterpart_key = self.keys.remove(&alias); + let mut counterpart_pkh = None; + self.pkhs.retain(|k, v| { + if v == &alias { + counterpart_pkh = Some(k.clone()); + false + } else { + true + } + }); + if self.addresses.contains_left(&alias) && !force { + match U::show_overwrite_confirmation(&alias, "an address") { + ConfirmationResponse::Replace => {} + ConfirmationResponse::Reselect(new_alias) => { + // Restore the removed keypair in case the recursive prompt + // terminates with a cancellation + self.restore_keypair( + alias, + counterpart_key, + counterpart_pkh, + ); + return self.insert_address::(new_alias, address, false); + } + ConfirmationResponse::Skip => { + // Restore the removed keypair since this insertion action + // has now been cancelled + self.restore_keypair( + alias, + counterpart_key, + counterpart_pkh, + ); + return None; + } + } + } + self.remove_alias(&alias); + self.addresses.insert(alias.clone(), address); + // Since it is intended for the inserted address to share its namesake + // with the pre-existing keypair + self.restore_keypair(alias.clone(), counterpart_key, counterpart_pkh); + Some(alias) + } + + /// Extend this store from pre-genesis validator wallet. + pub fn extend_from_pre_genesis_validator( + &mut self, + validator_address: Address, + validator_alias: Alias, + other: pre_genesis::ValidatorWallet, + ) { + let account_key_alias = alias::validator_key(&validator_alias); + let consensus_key_alias = + alias::validator_consensus_key(&validator_alias); + let tendermint_node_key_alias = + alias::validator_tendermint_node_key(&validator_alias); + + let keys = [ + (account_key_alias.clone(), other.store.account_key), + (consensus_key_alias.clone(), other.store.consensus_key), + ( + tendermint_node_key_alias.clone(), + other.store.tendermint_node_key, + ), + ]; + self.keys.extend(keys.into_iter()); + + let account_pk = other.account_key.ref_to(); + let consensus_pk = other.consensus_key.ref_to(); + let tendermint_node_pk = other.tendermint_node_key.ref_to(); + let addresses = [ + (account_key_alias.clone(), (&account_pk).into()), + (consensus_key_alias.clone(), (&consensus_pk).into()), + ( + tendermint_node_key_alias.clone(), + (&tendermint_node_pk).into(), + ), + ]; + self.addresses.extend(addresses.into_iter()); + + let pkhs = [ + ((&account_pk).into(), account_key_alias), + ((&consensus_pk).into(), consensus_key_alias), + ((&tendermint_node_pk).into(), tendermint_node_key_alias), + ]; + self.pkhs.extend(pkhs.into_iter()); + + self.validator_data = Some(ValidatorData { + address: validator_address, + keys: other.store.validator_keys, + }); + } + + /// get an address with the vp type + pub fn get_addresses_with_vp_type( + &self, + vp_type: AddressVpType, + ) -> HashSet
{ + // defaults to an empty set + self.address_vp_types + .get(&vp_type) + .cloned() + .unwrap_or_default() + } + + /// Adds a VP type to the address + pub fn add_vp_type_to_address( + &mut self, + vp_type: AddressVpType, + address: Address, + ) { + // defaults to an empty set + self.address_vp_types + .entry(vp_type) + .or_default() + .insert(address); + } + + /// Decode a Store from the given bytes + pub fn decode(data: Vec) -> Result { + toml::from_slice(&data) + } + + /// Encode a store into a string of bytes + pub fn encode(&self) -> Vec { + toml::to_vec(self).expect("Serializing of store shouldn't fail") + } +} + +/// Generate a new secret key. +pub fn gen_sk(scheme: SchemeType) -> common::SecretKey { + use rand::rngs::OsRng; + let mut csprng = OsRng {}; + match scheme { + SchemeType::Ed25519 => ed25519::SigScheme::generate(&mut csprng) + .try_to_sk() + .unwrap(), + SchemeType::Secp256k1 => secp256k1::SigScheme::generate(&mut csprng) + .try_to_sk() + .unwrap(), + SchemeType::Common => common::SigScheme::generate(&mut csprng) + .try_to_sk() + .unwrap(), + } +} + +impl Display for AddressVpType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + AddressVpType::Token => write!(f, "token"), + } + } +} + +impl FromStr for AddressVpType { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + match s { + "token" => Ok(Self::Token), + _ => Err("unexpected address VP type"), + } + } +} + +impl Serialize for AddressVpType { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + self.to_string().serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for AddressVpType { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + use serde::de::Error; + + let raw: String = Deserialize::deserialize(deserializer)?; + Self::from_str(&raw).map_err(D::Error::custom) + } +} diff --git a/shared/src/types/ibc/mod.rs b/shared/src/types/ibc/mod.rs index b83b2b5f07a..3c57da52036 100644 --- a/shared/src/types/ibc/mod.rs +++ b/shared/src/types/ibc/mod.rs @@ -1,4 +1,3 @@ //! Types that are used in IBC. -pub use namada_core::ledger::ibc::data; pub use namada_core::types::ibc::*; diff --git a/shared/src/vm/host_env.rs b/shared/src/vm/host_env.rs index 2ceaee746ff..238ba58520c 100644 --- a/shared/src/vm/host_env.rs +++ b/shared/src/vm/host_env.rs @@ -19,15 +19,14 @@ use crate::ledger::storage::{self, Storage, StorageHasher}; use crate::ledger::vp_host_fns; use crate::proto::Tx; use crate::types::address::{self, Address}; +use crate::types::hash::Hash; use crate::types::ibc::IbcEvent; use crate::types::internal::HostEnvResult; use crate::types::key::*; -use crate::types::storage::{Key, TxIndex}; +use crate::types::storage::{BlockHeight, Key, TxIndex}; use crate::vm::memory::VmMemory; use crate::vm::prefix_iter::{PrefixIteratorId, PrefixIterators}; -use crate::vm::{ - validate_untrusted_wasm, HostRef, MutHostRef, WasmValidationError, -}; +use crate::vm::{HostRef, MutHostRef}; const VERIFY_TX_SIG_GAS_COST: u64 = 1000; const WASM_VALIDATION_GAS_PER_BYTE: u64 = 1; @@ -40,8 +39,10 @@ pub enum TxRuntimeError { OutOfGas(gas::Error), #[error("Trying to modify storage for an address that doesn't exit {0}")] UnknownAddressStorageModification(Address), - #[error("Trying to use a validity predicate with an invalid WASM {0}")] - InvalidVpCode(WasmValidationError), + #[error( + "Trying to use a validity predicate with an invalid WASM code hash {0}" + )] + InvalidVpCodeHash(String), #[error("A validity predicate of an account cannot be deleted")] CannotDeleteVp, #[error("Storage modification error: {0}")] @@ -603,7 +604,7 @@ where let (log_val, gas) = write_log.read(&key); tx_add_gas(env, gas)?; Ok(match log_val { - Some(&write_log::StorageModification::Write { ref value }) => { + Some(write_log::StorageModification::Write { ref value }) => { let len: i64 = value .len() .try_into() @@ -616,19 +617,19 @@ where // fail, given key has been deleted HostEnvResult::Fail.to_i64() } - Some(&write_log::StorageModification::InitAccount { - ref vp, .. + Some(write_log::StorageModification::InitAccount { + ref vp_code_hash, }) => { // read the VP of a new account - let len: i64 = vp + let len: i64 = vp_code_hash .len() .try_into() .map_err(TxRuntimeError::NumConversionError)?; let result_buffer = unsafe { env.ctx.result_buffer.get() }; - result_buffer.replace(vp.clone()); + result_buffer.replace(vp_code_hash.to_vec()); len } - Some(&write_log::StorageModification::Temp { ref value }) => { + Some(write_log::StorageModification::Temp { ref value }) => { let len: i64 = value .len() .try_into() @@ -748,7 +749,7 @@ where ); tx_add_gas(env, iter_gas + log_gas)?; match log_val { - Some(&write_log::StorageModification::Write { ref value }) => { + Some(write_log::StorageModification::Write { ref value }) => { let key_val = KeyVal { key, val: value.clone(), @@ -771,7 +772,7 @@ where // a VP of a new account doesn't need to be iterated continue; } - Some(&write_log::StorageModification::Temp { ref value }) => { + Some(write_log::StorageModification::Temp { ref value }) => { let key_val = KeyVal { key, val: value.clone(), @@ -833,7 +834,7 @@ where let key = Key::parse(key).map_err(TxRuntimeError::StorageDataError)?; if key.is_validity_predicate().is_some() { - tx_validate_vp_code(env, &value)?; + tx_validate_vp_code_hash(env, &value)?; } check_address_existence(env, &key)?; @@ -984,7 +985,7 @@ where let event: IbcEvent = BorshDeserialize::try_from_slice(&event) .map_err(TxRuntimeError::EncodingError)?; let write_log = unsafe { env.ctx.write_log.get() }; - let gas = write_log.set_ibc_event(event); + let gas = write_log.emit_ibc_event(event); tx_add_gas(env, gas) } @@ -1368,8 +1369,8 @@ pub fn tx_update_validity_predicate( env: &TxVmEnv, addr_ptr: u64, addr_len: u64, - code_ptr: u64, - code_len: u64, + code_hash_ptr: u64, + code_hash_len: u64, ) -> TxResult<()> where MEM: VmMemory, @@ -1387,17 +1388,17 @@ where tracing::debug!("tx_update_validity_predicate for addr {}", addr); let key = Key::validity_predicate(&addr); - let (code, gas) = env + let (code_hash, gas) = env .memory - .read_bytes(code_ptr, code_len as _) + .read_bytes(code_hash_ptr, code_hash_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; tx_add_gas(env, gas)?; - tx_validate_vp_code(env, &code)?; + tx_validate_vp_code_hash(env, &code_hash)?; let write_log = unsafe { env.ctx.write_log.get() }; let (gas, _size_diff) = write_log - .write(&key, code) + .write(&key, code_hash) .map_err(TxRuntimeError::StorageModificationError)?; tx_add_gas(env, gas) // TODO: charge the size diff @@ -1406,8 +1407,8 @@ where /// Initialize a new account established address. pub fn tx_init_account( env: &TxVmEnv, - code_ptr: u64, - code_len: u64, + code_hash_ptr: u64, + code_hash_len: u64, result_ptr: u64, ) -> TxResult<()> where @@ -1416,24 +1417,21 @@ where H: StorageHasher, CA: WasmCacheAccess, { - let (code, gas) = env + let (code_hash, gas) = env .memory - .read_bytes(code_ptr, code_len as _) + .read_bytes(code_hash_ptr, code_hash_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; tx_add_gas(env, gas)?; - tx_validate_vp_code(env, &code)?; - #[cfg(feature = "wasm-runtime")] - { - let vp_wasm_cache = unsafe { env.ctx.vp_wasm_cache.get() }; - vp_wasm_cache.pre_compile(&code); - } + tx_validate_vp_code_hash(env, &code_hash)?; tracing::debug!("tx_init_account"); let storage = unsafe { env.ctx.storage.get() }; let write_log = unsafe { env.ctx.write_log.get() }; - let (addr, gas) = write_log.init_account(&storage.address_gen, code); + let code_hash = Hash::try_from(&code_hash[..]) + .map_err(|e| TxRuntimeError::InvalidVpCodeHash(e.to_string()))?; + let (addr, gas) = write_log.init_account(&storage.address_gen, code_hash); let addr_bytes = addr.try_to_vec().map_err(TxRuntimeError::EncodingError)?; tx_add_gas(env, gas)?; @@ -1581,6 +1579,38 @@ where tx_add_gas(env, gas) } +/// Getting the block header function exposed to the wasm VM Tx environment. +pub fn tx_get_block_header( + env: &TxVmEnv, + height: u64, +) -> TxResult +where + MEM: VmMemory, + DB: storage::DB + for<'iter> storage::DBIter<'iter>, + H: StorageHasher, + CA: WasmCacheAccess, +{ + let storage = unsafe { env.ctx.storage.get() }; + let (header, gas) = storage + .get_block_header(Some(BlockHeight(height))) + .map_err(TxRuntimeError::StorageError)?; + Ok(match header { + Some(h) => { + let value = + h.try_to_vec().map_err(TxRuntimeError::EncodingError)?; + let len: i64 = value + .len() + .try_into() + .map_err(TxRuntimeError::NumConversionError)?; + let result_buffer = unsafe { env.ctx.result_buffer.get() }; + result_buffer.replace(value); + tx_add_gas(env, gas)?; + len + } + None => HostEnvResult::Fail.to_i64(), + }) +} + /// Getting the chain ID function exposed to the wasm VM VP environment. pub fn vp_get_chain_id( env: &VpVmEnv, @@ -1622,36 +1652,35 @@ where Ok(height.0) } -/// Getting the block time function exposed to the wasm VM Tx -/// environment. The time is that of the block header to which the current -/// transaction is being applied. -pub fn tx_get_block_time( - env: &TxVmEnv, -) -> TxResult +/// Getting the block header function exposed to the wasm VM VP environment. +pub fn vp_get_block_header( + env: &VpVmEnv, + height: u64, +) -> vp_host_fns::EnvResult where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, H: StorageHasher, + EVAL: VpEvaluator, CA: WasmCacheAccess, { + let gas_meter = unsafe { env.ctx.gas_meter.get() }; let storage = unsafe { env.ctx.storage.get() }; let (header, gas) = storage - .get_block_header(None) - .map_err(TxRuntimeError::StorageError)?; + .get_block_header(Some(BlockHeight(height))) + .map_err(vp_host_fns::RuntimeError::StorageError)?; + vp_host_fns::add_gas(gas_meter, gas)?; Ok(match header { Some(h) => { - let time = h - .time - .to_rfc3339() + let value = h .try_to_vec() - .map_err(TxRuntimeError::EncodingError)?; - let len: i64 = time + .map_err(vp_host_fns::RuntimeError::EncodingError)?; + let len: i64 = value .len() .try_into() - .map_err(TxRuntimeError::NumConversionError)?; + .map_err(vp_host_fns::RuntimeError::NumConversionError)?; let result_buffer = unsafe { env.ctx.result_buffer.get() }; - result_buffer.replace(time); - tx_add_gas(env, gas)?; + result_buffer.replace(value); len } None => HostEnvResult::Fail.to_i64(), @@ -1816,10 +1845,10 @@ where Ok(()) } -/// Validate a VP WASM code in a tx environment. -fn tx_validate_vp_code( +/// Validate a VP WASM code hash in a tx environment. +fn tx_validate_vp_code_hash( env: &TxVmEnv, - code: &[u8], + code_hash: &[u8], ) -> TxResult<()> where MEM: VmMemory, @@ -1827,8 +1856,26 @@ where H: StorageHasher, CA: WasmCacheAccess, { - tx_add_gas(env, code.len() as u64 * WASM_VALIDATION_GAS_PER_BYTE)?; - validate_untrusted_wasm(code).map_err(TxRuntimeError::InvalidVpCode) + tx_add_gas(env, code_hash.len() as u64 * WASM_VALIDATION_GAS_PER_BYTE)?; + let hash = Hash::try_from(code_hash) + .map_err(|e| TxRuntimeError::InvalidVpCodeHash(e.to_string()))?; + let key = Key::wasm_code(&hash); + let write_log = unsafe { env.ctx.write_log.get() }; + let (result, gas) = write_log.read(&key); + tx_add_gas(env, gas)?; + if result.is_none() { + let storage = unsafe { env.ctx.storage.get() }; + let (is_present, gas) = storage + .has_key(&key) + .map_err(TxRuntimeError::StorageError)?; + tx_add_gas(env, gas)?; + if !is_present { + return Err(TxRuntimeError::InvalidVpCodeHash( + "The corresponding VP code doesn't exist".to_string(), + )); + } + } + Ok(()) } /// Evaluate a validity predicate with the given input data. diff --git a/shared/src/vm/wasm/compilation_cache/common.rs b/shared/src/vm/wasm/compilation_cache/common.rs index 9f25d573c4f..64029180313 100644 --- a/shared/src/vm/wasm/compilation_cache/common.rs +++ b/shared/src/vm/wasm/compilation_cache/common.rs @@ -16,15 +16,13 @@ use std::{cmp, fs}; use clru::{CLruCache, CLruCacheConfig, WeightScale}; use wasmer::{Module, Store}; -use wasmer_cache::{FileSystemCache, Hash}; +use wasmer_cache::{FileSystemCache, Hash as CacheHash}; +use crate::core::types::hash::{Hash, HASH_LENGTH}; use crate::vm::wasm::run::untrusted_wasm_store; use crate::vm::wasm::{self, memory}; use crate::vm::{WasmCacheAccess, WasmCacheRoAccess}; -/// The size of the [`struct@Hash`] -const HASH_BYTES: usize = 32; - /// Cache handle. Thread-safe. #[derive(Debug, Clone)] pub struct Cache { @@ -67,7 +65,7 @@ impl WeightScale for ModuleCacheScale { // elements, so we use the size of the module as its scale // and subtract 1 from it to negate the increment of the cache length. - let size = loupe::size_of_val(&value) + HASH_BYTES; + let size = loupe::size_of_val(&value) + HASH_LENGTH; tracing::debug!( "WASM module hash {}, size including the hash {}", key.to_string(), @@ -105,14 +103,14 @@ impl Cache { /// it. If the cache access is set to [`crate::vm::WasmCacheRwAccess`], it /// updates the position in the LRU cache. Otherwise, the compiled /// module will not be be cached, if it's not already. - pub fn fetch_or_compile( + pub fn fetch( &mut self, - code: impl AsRef<[u8]>, - ) -> Result<(Module, Store), wasm::run::Error> { + code_hash: &Hash, + ) -> Result, wasm::run::Error> { if A::is_read_write() { - self.get_or_compile(code) + self.get(code_hash) } else { - self.peek_or_compile(code) + self.peek(code_hash) } } @@ -128,49 +126,48 @@ impl Cache { /// Get a WASM module from LRU cache, from a file or compile it and cache /// it. Updates the position in the LRU cache. - fn get_or_compile( + fn get( &mut self, - code: impl AsRef<[u8]>, - ) -> Result<(Module, Store), wasm::run::Error> { - let hash = hash_of_code(&code); - + hash: &Hash, + ) -> Result, wasm::run::Error> { let mut in_memory = self.in_memory.write().unwrap(); - if let Some(module) = in_memory.get(&hash) { + if let Some(module) = in_memory.get(hash) { tracing::trace!( "{} found {} in cache.", N::name(), hash.to_string() ); - return Ok((module.clone(), store())); + return Ok(Some((module.clone(), store()))); } drop(in_memory); let mut iter = 0; loop { - let mut progress = self.progress.write().unwrap(); - match progress.get(&hash) { + let progress = self.progress.read().unwrap(); + match progress.get(hash) { Some(Compilation::Done) => { drop(progress); let mut in_memory = self.in_memory.write().unwrap(); - if let Some(module) = in_memory.get(&hash) { + if let Some(module) = in_memory.get(hash) { tracing::info!( "{} found {} in memory cache.", N::name(), hash.to_string() ); - return Ok((module.clone(), store())); + return Ok(Some((module.clone(), store()))); } - let (module, store) = file_load_module(&self.dir, &hash); + let (module, store) = file_load_module(&self.dir, hash); tracing::info!( "{} found {} in file cache.", N::name(), hash.to_string() ); // Put into cache, ignore result if it's full - let _ = in_memory.put_with_weight(hash, module.clone()); + let _ = + in_memory.put_with_weight(hash.clone(), module.clone()); - return Ok((module, store)); + return Ok(Some((module, store))); } Some(Compilation::Compiling) => { drop(progress); @@ -184,120 +181,75 @@ impl Cache { continue; } None => { - progress.insert(hash, Compilation::Compiling); drop(progress); - - let (module, store) = - if module_file_exists(&self.dir, &hash) { - tracing::info!( - "Loading {} {} from file.", - N::name(), - hash.to_string() - ); - file_load_module(&self.dir, &hash) - } else { - tracing::info!( - "Compiling {} {}.", - N::name(), - hash.to_string() - ); - - match wasm::run::prepare_wasm_code(code) { - Ok(code) => match compile(code) { - Ok((module, store)) => { - // Write the file - file_write_module( - &self.dir, &module, &hash, - ); - - (module, store) - } - Err(err) => { - let mut progress = - self.progress.write().unwrap(); - tracing::info!( - "Failed to compile WASM {} with {}", - hash.to_string(), - err - ); - progress.remove(&hash); - drop(progress); - return Err(err); - } - }, - Err(err) => { - let mut progress = - self.progress.write().unwrap(); - tracing::info!( - "Failed to prepare WASM {} with {}", - hash.to_string(), - err - ); - progress.remove(&hash); - drop(progress); - return Err(err); - } - } - }; + let (module, store) = if module_file_exists(&self.dir, hash) + { + tracing::info!( + "Loading {} {} from file.", + N::name(), + hash.to_string() + ); + file_load_module(&self.dir, hash) + } else { + return Ok(None); + }; // Update progress let mut progress = self.progress.write().unwrap(); - progress.insert(hash, Compilation::Done); + progress.insert(hash.clone(), Compilation::Done); // Put into cache, ignore the result (fails if the module // cannot fit into the cache) let mut in_memory = self.in_memory.write().unwrap(); - let _ = in_memory.put_with_weight(hash, module.clone()); + let _ = + in_memory.put_with_weight(hash.clone(), module.clone()); - return Ok((module, store)); + return Ok(Some((module, store))); } } } } /// Peak-only is used for dry-ran txs (and VPs that the tx triggers). - /// It doesn't update the in-memory cache or persist the compiled modules to - /// files. - fn peek_or_compile( + /// It doesn't update the in-memory cache. + fn peek( &self, - code: impl AsRef<[u8]>, - ) -> Result<(Module, Store), wasm::run::Error> { - let hash = hash_of_code(&code); - + hash: &Hash, + ) -> Result, wasm::run::Error> { let in_memory = self.in_memory.read().unwrap(); - if let Some(module) = in_memory.peek(&hash) { + if let Some(module) = in_memory.peek(hash) { tracing::info!( "{} found {} in cache.", N::name(), hash.to_string() ); - return Ok((module.clone(), store())); + return Ok(Some((module.clone(), store()))); } drop(in_memory); let mut iter = 0; loop { let progress = self.progress.read().unwrap(); - match progress.get(&hash) { + match progress.get(hash) { Some(Compilation::Done) => { drop(progress); let in_memory = self.in_memory.read().unwrap(); - if let Some(module) = in_memory.peek(&hash) { + if let Some(module) = in_memory.peek(hash) { tracing::info!( "{} found {} in memory cache.", N::name(), hash.to_string() ); - return Ok((module.clone(), store())); + return Ok(Some((module.clone(), store()))); } - let (module, store) = file_load_module(&self.dir, &hash); + let (module, store) = file_load_module(&self.dir, hash); tracing::info!( "{} found {} in file cache.", N::name(), hash.to_string() ); - return Ok((module, store)); + return Ok(Some((module, store))); } Some(Compilation::Compiling) => { drop(progress); @@ -313,27 +265,90 @@ impl Cache { None => { drop(progress); - return if module_file_exists(&self.dir, &hash) { + return if module_file_exists(&self.dir, hash) { tracing::info!( "Loading {} {} from file.", N::name(), hash.to_string() ); - Ok(file_load_module(&self.dir, &hash)) + Ok(Some(file_load_module(&self.dir, hash))) } else { - tracing::info!( - "Compiling {} {}.", - N::name(), - hash.to_string() - ); - let code = wasm::run::prepare_wasm_code(code)?; - compile(code) + Ok(None) }; } } } } + /// Compile a WASM module and persist the compiled modules to files. + pub fn compile_or_fetch( + &mut self, + code: impl AsRef<[u8]>, + ) -> Result, wasm::run::Error> { + let hash = hash_of_code(&code); + + if !A::is_read_write() { + // It doesn't update the cache and files + let progress = self.progress.read().unwrap(); + match progress.get(&hash) { + Some(_) => return self.peek(&hash), + None => { + let code = wasm::run::prepare_wasm_code(code)?; + return Ok(Some(compile(code)?)); + } + } + } + + let mut progress = self.progress.write().unwrap(); + if progress.get(&hash).is_some() { + drop(progress); + return self.fetch(&hash); + } + progress.insert(hash.clone(), Compilation::Compiling); + drop(progress); + + tracing::info!("Compiling {} {}.", N::name(), hash.to_string()); + + match wasm::run::prepare_wasm_code(code) { + Ok(code) => match compile(code) { + Ok((module, store)) => { + // Write the file + file_write_module(&self.dir, &module, &hash); + + // Update progress + let mut progress = self.progress.write().unwrap(); + progress.insert(hash.clone(), Compilation::Done); + + // Put into cache, ignore result if it's full + let mut in_memory = self.in_memory.write().unwrap(); + let _ = in_memory.put_with_weight(hash, module.clone()); + + Ok(Some((module, store))) + } + Err(err) => { + tracing::info!( + "Failed to compile WASM {} with {}", + hash.to_string(), + err + ); + let mut progress = self.progress.write().unwrap(); + progress.remove(&hash); + Err(err) + } + }, + Err(err) => { + tracing::info!( + "Failed to prepare WASM {} with {}", + hash.to_string(), + err + ); + let mut progress = self.progress.write().unwrap(); + progress.remove(&hash); + Err(err) + } + } + } + /// Pre-compile a WASM module to a file. The compilation runs in a new OS /// thread and the function returns immediately. pub fn pre_compile(&mut self, code: impl AsRef<[u8]>) { @@ -349,7 +364,7 @@ impl Cache { progress.insert(hash, Compilation::Done); return; } - progress.insert(hash, Compilation::Compiling); + progress.insert(hash.clone(), Compilation::Compiling); drop(progress); let progress = self.progress.clone(); let code = code.as_ref().to_vec(); @@ -363,8 +378,10 @@ impl Cache { Ok((module, store)) => { let mut progress = progress.write().unwrap(); - progress - .insert(hash, Compilation::Done); + progress.insert( + hash.clone(), + Compilation::Done, + ); file_write_module(&dir, &module, &hash); (module, store) } @@ -418,11 +435,11 @@ fn exponential_backoff(iteration: u64) { } fn hash_of_code(code: impl AsRef<[u8]>) -> Hash { - Hash::generate(code.as_ref()) + Hash::sha256(code.as_ref()) } fn hash_to_store_dir(hash: &Hash) -> PathBuf { - PathBuf::from("vp_wasm_cache").join(hash.to_string()) + PathBuf::from("vp_wasm_cache").join(hash.to_string().to_lowercase()) } fn compile( @@ -449,14 +466,15 @@ fn store() -> Store { fn file_write_module(dir: impl AsRef, module: &Module, hash: &Hash) { use wasmer_cache::Cache; let mut fs_cache = fs_cache(dir, hash); - fs_cache.store(*hash, module).unwrap(); + fs_cache.store(CacheHash::new(hash.0), module).unwrap(); } fn file_load_module(dir: impl AsRef, hash: &Hash) -> (Module, Store) { use wasmer_cache::Cache; let fs_cache = fs_cache(dir, hash); let store = store(); - let module = unsafe { fs_cache.load(&store, *hash) }.unwrap(); + let hash = CacheHash::new(hash.0); + let module = unsafe { fs_cache.load(&store, hash) }.unwrap(); (module, store) } @@ -470,7 +488,7 @@ fn fs_cache(dir: impl AsRef, hash: &Hash) -> FileSystemCache { fn module_file_exists(dir: impl AsRef, hash: &Hash) -> bool { let file = dir.as_ref().join(hash_to_store_dir(hash)).join(format!( "{}.{}", - hash.to_string(), + hash.to_string().to_lowercase(), file_ext() )); file.exists() @@ -557,23 +575,18 @@ mod test { use std::cmp::max; use byte_unit::Byte; + use namada_test_utils::TestWasms; use tempfile::{tempdir, TempDir}; use test_log::test; use super::*; use crate::vm::WasmCacheRwAccess; - const TX_NO_OP: &str = "../wasm_for_tests/tx_no_op.wasm"; - const TX_READ_STORAGE_KEY: &str = - "../wasm_for_tests/tx_read_storage_key.wasm"; - const VP_ALWAYS_TRUE: &str = "../wasm_for_tests/vp_always_true.wasm"; - const VP_EVAL: &str = "../wasm_for_tests/vp_eval.wasm"; - #[test] fn test_fetch_or_compile_valid_wasm() { // Load some WASMs and find their hashes and in-memory size - let tx_read_storage_key = load_wasm(TX_READ_STORAGE_KEY); - let tx_no_op = load_wasm(TX_NO_OP); + let tx_read_storage_key = load_wasm(TestWasms::TxReadStorageKey.path()); + let tx_no_op = load_wasm(TestWasms::TxNoOp.path()); // Create a new cache with the limit set to // `max(tx_read_storage_key.size, tx_no_op.size) + 1` @@ -588,8 +601,20 @@ mod test { // Fetch `tx_read_storage_key` { - let (_module, _store) = - cache.fetch_or_compile(&tx_read_storage_key.code).unwrap(); + let fetched = cache.fetch(&tx_read_storage_key.hash).unwrap(); + assert_matches!( + fetched, + None, + "The module should not be in cache" + ); + + let fetched = + cache.compile_or_fetch(&tx_read_storage_key.code).unwrap(); + assert_matches!( + fetched, + Some(_), + "The code should be compiled" + ); let in_memory = cache.in_memory.read().unwrap(); assert_matches!( @@ -614,8 +639,19 @@ mod test { // Fetch `tx_no_op`. Fetching another module should get us over the // limit, so the previous one should be popped from the cache { - let (_module, _store) = - cache.fetch_or_compile(&tx_no_op.code).unwrap(); + let fetched = cache.fetch(&tx_no_op.hash).unwrap(); + assert_matches!( + fetched, + None, + "The module must not be in cache" + ); + + let fetched = cache.compile_or_fetch(&tx_no_op.code).unwrap(); + assert_matches!( + fetched, + Some(_), + "The code should be compiled" + ); let in_memory = cache.in_memory.read().unwrap(); assert_matches!( @@ -660,8 +696,12 @@ mod test { cache.in_memory = in_memory; cache.progress = Default::default(); { - let (_module, _store) = - cache.fetch_or_compile(&tx_read_storage_key.code).unwrap(); + let fetched = cache.fetch(&tx_read_storage_key.hash).unwrap(); + assert_matches!( + fetched, + Some(_), + "The module must be in file cache" + ); let in_memory = cache.in_memory.read().unwrap(); assert_matches!( @@ -697,8 +737,12 @@ mod test { // Fetch `tx_read_storage_key` again, now it should be in-memory { - let (_module, _store) = - cache.fetch_or_compile(&tx_read_storage_key.code).unwrap(); + let fetched = cache.fetch(&tx_read_storage_key.hash).unwrap(); + assert_matches!( + fetched, + Some(_), + "The module must be in memory" + ); let in_memory = cache.in_memory.read().unwrap(); assert_matches!( @@ -736,9 +780,20 @@ mod test { { let mut cache = cache.read_only(); + let fetched = cache.fetch(&tx_no_op.hash).unwrap(); + assert_matches!( + fetched, + Some(_), + "The module must be in cache" + ); + // Fetching with read-only should not modify the in-memory cache - let (_module, _store) = - cache.fetch_or_compile(&tx_no_op.code).unwrap(); + let fetched = cache.compile_or_fetch(&tx_no_op.code).unwrap(); + assert_matches!( + fetched, + Some(_), + "The module should be compiled" + ); let in_memory = cache.in_memory.read().unwrap(); assert_matches!( @@ -766,7 +821,7 @@ mod test { // Try to compile it let error = cache - .fetch_or_compile(&invalid_wasm) + .compile_or_fetch(&invalid_wasm) .expect_err("Compilation should fail"); println!("Error: {}", error); @@ -789,8 +844,8 @@ mod test { #[test] fn test_pre_compile_valid_wasm() { // Load some WASMs and find their hashes and in-memory size - let vp_always_true = load_wasm(VP_ALWAYS_TRUE); - let vp_eval = load_wasm(VP_EVAL); + let vp_always_true = load_wasm(TestWasms::VpAlwaysTrue.path()); + let vp_eval = load_wasm(TestWasms::VpEval.path()); // Create a new cache with the limit set to // `max(vp_always_true.size, vp_eval.size) + 1 + extra_bytes` @@ -817,8 +872,12 @@ mod test { // Now fetch it to wait for it finish compilation { - let (_module, _store) = - cache.fetch_or_compile(&vp_always_true.code).unwrap(); + let fetched = cache.fetch(&vp_always_true.hash).unwrap(); + assert_matches!( + fetched, + Some(_), + "The module must be in cache" + ); let in_memory = cache.in_memory.read().unwrap(); assert_matches!( @@ -856,8 +915,12 @@ mod test { // Now fetch it to wait for it finish compilation { - let (_module, _store) = - cache.fetch_or_compile(&vp_eval.code).unwrap(); + let fetched = cache.fetch(&vp_eval.hash).unwrap(); + assert_matches!( + fetched, + Some(_), + "The module must be in cache" + ); let in_memory = cache.in_memory.read().unwrap(); assert_matches!( @@ -906,10 +969,12 @@ mod test { // Now fetch it to wait for it finish compilation { - let error = cache - .fetch_or_compile(&invalid_wasm) - .expect_err("Compilation should fail"); - println!("Error: {}", error); + let fetched = cache.fetch(&hash).unwrap(); + assert_matches!( + fetched, + None, + "There should be no entry for this hash in cache" + ); let in_memory = cache.in_memory.read().unwrap(); assert_matches!( @@ -933,7 +998,7 @@ mod test { } /// Get the WASM code bytes, its hash and find the compiled module's size - fn load_wasm(file: impl AsRef) -> WasmWithMeta { + fn load_wasm(file: impl AsRef) -> WasmWithMeta { // When `WeightScale` calls `loupe::size_of_val` in the cache, for some // reason it returns 8 bytes more than the same call in here. let extra_bytes = 8; @@ -947,12 +1012,13 @@ mod test { // No in-memory cache needed, but must be non-zero 1, ); - let (module, _store) = cache.fetch_or_compile(&code).unwrap(); - loupe::size_of_val(&module) + HASH_BYTES + extra_bytes + let (module, _store) = + cache.compile_or_fetch(&code).unwrap().unwrap(); + loupe::size_of_val(&module) + HASH_LENGTH + extra_bytes }; println!( "Compiled module {} size including the hash: {} ({})", - file, + file.to_string_lossy(), Byte::from_bytes(size as u128).get_appropriate_unit(true), size, ); diff --git a/shared/src/vm/wasm/host_env.rs b/shared/src/vm/wasm/host_env.rs index 74c87ed69b9..bf438ddfcc4 100644 --- a/shared/src/vm/wasm/host_env.rs +++ b/shared/src/vm/wasm/host_env.rs @@ -75,7 +75,7 @@ where "namada_tx_get_chain_id" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_chain_id), "namada_tx_get_tx_index" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_tx_index), "namada_tx_get_block_height" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_block_height), - "namada_tx_get_block_time" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_block_time), + "namada_tx_get_block_header" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_block_header), "namada_tx_get_block_hash" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_block_hash), "namada_tx_get_block_epoch" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_block_epoch), "namada_tx_get_native_token" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_native_token), @@ -114,6 +114,7 @@ where "namada_vp_get_chain_id" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_chain_id), "namada_vp_get_tx_index" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_tx_index), "namada_vp_get_block_height" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_block_height), + "namada_vp_get_block_header" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_block_header), "namada_vp_get_block_hash" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_block_hash), "namada_vp_get_tx_code_hash" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_tx_code_hash), "namada_vp_get_block_epoch" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_block_epoch), diff --git a/shared/src/vm/wasm/mod.rs b/shared/src/vm/wasm/mod.rs index 93090a13413..96d2dd7ce60 100644 --- a/shared/src/vm/wasm/mod.rs +++ b/shared/src/vm/wasm/mod.rs @@ -5,5 +5,6 @@ pub mod host_env; pub mod memory; pub mod run; +pub use compilation_cache::common::{Cache, CacheName}; pub use compilation_cache::tx::TxCache; pub use compilation_cache::vp::VpCache; diff --git a/shared/src/vm/wasm/run.rs b/shared/src/vm/wasm/run.rs index 0efdac499cb..35e7875af2e 100644 --- a/shared/src/vm/wasm/run.rs +++ b/shared/src/vm/wasm/run.rs @@ -6,7 +6,7 @@ use std::marker::PhantomData; use parity_wasm::elements; use pwasm_utils::{self, rules}; use thiserror::Error; -use wasmer::BaseTunables; +use wasmer::{BaseTunables, Module, Store}; use super::memory::{Limit, WasmMemory}; use super::TxCache; @@ -15,13 +15,14 @@ use crate::ledger::storage::write_log::WriteLog; use crate::ledger::storage::{self, Storage, StorageHasher}; use crate::proto::Tx; use crate::types::address::Address; +use crate::types::hash::{Error as TxHashError, Hash, HASH_LENGTH}; use crate::types::internal::HostEnvResult; use crate::types::storage::{Key, TxIndex}; use crate::vm::host_env::{TxVmEnv, VpCtx, VpEvaluator, VpVmEnv}; use crate::vm::prefix_iter::PrefixIterators; use crate::vm::types::VpInput; use crate::vm::wasm::host_env::{tx_imports, vp_imports}; -use crate::vm::wasm::{memory, VpCache}; +use crate::vm::wasm::{memory, Cache, CacheName, VpCache}; use crate::vm::{ validate_untrusted_wasm, WasmCacheAccess, WasmValidationError, }; @@ -64,6 +65,12 @@ pub enum Error { }, #[error("Wasm validation error: {0}")] ValidationError(WasmValidationError), + #[error("Wasm code hash error: {0}")] + CodeHash(TxHashError), + #[error("Unable to load wasm code: {0}")] + LoadWasmCode(String), + #[error("Unable to find compiled wasm code")] + NoCompiledWasmCode, } /// Result for functions that may fail @@ -87,11 +94,17 @@ where H: 'static + StorageHasher, CA: 'static + WasmCacheAccess, { - // let wasm_store = untrusted_wasm_store(memory::tx_limit()); - - validate_untrusted_wasm(&tx_code).map_err(Error::ValidationError)?; - - let (module, store) = tx_wasm_cache.fetch_or_compile(&tx_code)?; + let (module, store) = if tx_code.as_ref().len() == HASH_LENGTH { + // we assume that there is no wasm code with HASH_LENGTH + let code_hash = + Hash::try_from(tx_code.as_ref()).map_err(Error::CodeHash)?; + fetch_or_compile(tx_wasm_cache, &code_hash, write_log, storage)? + } else { + match tx_wasm_cache.compile_or_fetch(tx_code)? { + Some((module, store)) => (module, store), + None => return Err(Error::NoCompiledWasmCode), + } + }; let mut iterators: PrefixIterators<'_, DB> = PrefixIterators::default(); let mut verifiers = BTreeSet::new(); @@ -157,7 +170,7 @@ where /// that triggered the execution. #[allow(clippy::too_many_arguments)] pub fn vp( - vp_code: impl AsRef<[u8]>, + vp_code_hash: &Hash, tx: &Tx, tx_index: &TxIndex, address: &Address, @@ -174,18 +187,14 @@ where H: 'static + StorageHasher, CA: 'static + WasmCacheAccess, { - let vp_code = vp_code.as_ref(); let input_data = match tx.data.as_ref() { Some(data) => &data[..], None => &[], }; - // let wasm_store = untrusted_wasm_store(memory::vp_limit()); - - validate_untrusted_wasm(vp_code).map_err(Error::ValidationError)?; - // Compile the wasm module - let (module, store) = vp_wasm_cache.fetch_or_compile(vp_code)?; + let (module, store) = + fetch_or_compile(&mut vp_wasm_cache, vp_code_hash, write_log, storage)?; let mut iterators: PrefixIterators<'_, DB> = PrefixIterators::default(); let mut result_buffer: Option> = None; @@ -319,10 +328,17 @@ where fn eval( &self, ctx: VpCtx<'static, DB, H, Self, CA>, - vp_code: Vec, + vp_code_hash: Vec, input_data: Vec, ) -> HostEnvResult { - match self.eval_native_result(ctx, vp_code, input_data) { + let vp_code_hash = match Hash::try_from(&vp_code_hash[..]) { + Ok(hash) => hash, + Err(err) => { + tracing::warn!("VP wasm code hash error {}", err); + return HostEnvResult::Fail; + } + }; + match self.eval_native_result(ctx, vp_code_hash, input_data) { Ok(ok) => HostEnvResult::from(ok), Err(err) => { tracing::warn!("VP eval error {}", err); @@ -342,24 +358,23 @@ where pub fn eval_native_result( &self, ctx: VpCtx<'static, DB, H, Self, CA>, - vp_code: Vec, + vp_code_hash: Hash, input_data: Vec, ) -> Result { - // let wasm_store = untrusted_wasm_store(memory::tx_limit()); - - validate_untrusted_wasm(&vp_code).map_err(Error::ValidationError)?; - let address = unsafe { ctx.address.get() }; let keys_changed = unsafe { ctx.keys_changed.get() }; let verifiers = unsafe { ctx.verifiers.get() }; let vp_wasm_cache = unsafe { ctx.vp_wasm_cache.get() }; + let write_log = unsafe { ctx.write_log.get() }; + let storage = unsafe { ctx.storage.get() }; let env = VpVmEnv { memory: WasmMemory::default(), ctx, }; // Compile the wasm module - let (module, store) = vp_wasm_cache.fetch_or_compile(&vp_code)?; + let (module, store) = + fetch_or_compile(vp_wasm_cache, &vp_code_hash, write_log, storage)?; let initial_memory = memory::prepare_vp_memory(&store).map_err(Error::MemoryError)?; @@ -400,6 +415,57 @@ pub fn prepare_wasm_code>(code: T) -> Result> { elements::serialize(module).map_err(Error::SerializationError) } +// Fetch or compile a WASM code from the cache or storage +fn fetch_or_compile( + wasm_cache: &mut Cache, + code_hash: &Hash, + write_log: &WriteLog, + storage: &Storage, +) -> Result<(Module, Store)> +where + DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, + H: 'static + StorageHasher, + CN: 'static + CacheName, + CA: 'static + WasmCacheAccess, +{ + use crate::core::ledger::storage::write_log::StorageModification; + match wasm_cache.fetch(code_hash)? { + Some((module, store)) => Ok((module, store)), + None => { + let key = Key::wasm_code(code_hash); + let code = match write_log.read(&key).0 { + Some(StorageModification::Write { value }) => value.clone(), + _ => match storage + .read(&key) + .map_err(|e| { + Error::LoadWasmCode(format!( + "Read wasm code failed from storage: key {}, \ + error {}", + key, e + )) + })? + .0 + { + Some(v) => v, + None => { + return Err(Error::LoadWasmCode(format!( + "No wasm code in storage: key {}", + key + ))); + } + }, + }; + + validate_untrusted_wasm(&code).map_err(Error::ValidationError)?; + + match wasm_cache.compile_or_fetch(code)? { + Some((module, store)) => Ok((module, store)), + None => Err(Error::NoCompiledWasmCode), + } + } + } +} + /// Get the gas rules used to meter wasm operations fn get_gas_rules() -> rules::Set { rules::Set::default().with_grow_cost(1) @@ -409,6 +475,8 @@ fn get_gas_rules() -> rules::Set { mod tests { use borsh::BorshSerialize; use itertools::Either; + use namada_core::types::chain::ChainId; + use namada_test_utils::TestWasms; use test_log::test; use wasmer_vm::TrapCode; @@ -417,16 +485,6 @@ mod tests { use crate::types::validity_predicate::EvalVp; use crate::vm::wasm; - const TX_MEMORY_LIMIT_WASM: &str = "../wasm_for_tests/tx_memory_limit.wasm"; - const TX_NO_OP_WASM: &str = "../wasm_for_tests/tx_no_op.wasm"; - const TX_READ_STORAGE_KEY_WASM: &str = - "../wasm_for_tests/tx_read_storage_key.wasm"; - const VP_ALWAYS_TRUE_WASM: &str = "../wasm_for_tests/vp_always_true.wasm"; - const VP_EVAL_WASM: &str = "../wasm_for_tests/vp_eval.wasm"; - const VP_MEMORY_LIMIT_WASM: &str = "../wasm_for_tests/vp_memory_limit.wasm"; - const VP_READ_STORAGE_KEY_WASM: &str = - "../wasm_for_tests/vp_read_storage_key.wasm"; - /// Test that when a transaction wasm goes over the stack-height limit, the /// execution is aborted. #[test] @@ -477,8 +535,11 @@ mod tests { let tx_index = TxIndex::default(); // This code will allocate memory of the given size - let tx_code = - std::fs::read(TX_MEMORY_LIMIT_WASM).expect("cannot load wasm"); + let tx_code = TestWasms::TxMemoryLimit.read_bytes(); + // store the wasm code + let code_hash = Hash::sha256(&tx_code); + let key = Key::wasm_code(&code_hash); + write_log.write(&key, tx_code).unwrap(); // Assuming 200 pages, 12.8 MiB limit assert_eq!(memory::TX_MEMORY_MAX_PAGES, 200); @@ -495,7 +556,7 @@ mod tests { &mut write_log, &mut gas_meter, &tx_index, - tx_code.clone(), + &code_hash, tx_data, &mut vp_cache, &mut tx_cache, @@ -510,7 +571,7 @@ mod tests { &mut write_log, &mut gas_meter, &tx_index, - tx_code, + &code_hash, tx_data, &mut vp_cache, &mut tx_cache, @@ -534,10 +595,17 @@ mod tests { let tx_index = TxIndex::default(); // This code will call `eval` with the other VP below - let vp_eval = std::fs::read(VP_EVAL_WASM).expect("cannot load wasm"); + let vp_eval = TestWasms::VpEval.read_bytes(); + // store the wasm code + let code_hash = Hash::sha256(&vp_eval); + let key = Key::wasm_code(&code_hash); + storage.write(&key, vp_eval).unwrap(); // This code will allocate memory of the given size - let vp_memory_limit = - std::fs::read(VP_MEMORY_LIMIT_WASM).expect("cannot load wasm"); + let vp_memory_limit = TestWasms::VpMemoryLimit.read_bytes(); + // store the wasm code + let limit_code_hash = Hash::sha256(&vp_memory_limit); + let key = Key::wasm_code(&limit_code_hash); + storage.write(&key, vp_memory_limit).unwrap(); // Assuming 200 pages, 12.8 MiB limit assert_eq!(memory::VP_MEMORY_MAX_PAGES, 200); @@ -546,16 +614,16 @@ mod tests { // shouldn't fail let input = 2_usize.pow(23).try_to_vec().unwrap(); let eval_vp = EvalVp { - vp_code: vp_memory_limit.clone(), + vp_code_hash: limit_code_hash.clone(), input, }; let tx_data = eval_vp.try_to_vec().unwrap(); - let tx = Tx::new(vec![], Some(tx_data)); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone(), None); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); // When the `eval`ed VP doesn't run out of memory, it should return // `true` let passed = vp( - vp_eval.clone(), + &code_hash, &tx, &tx_index, &addr, @@ -575,16 +643,16 @@ mod tests { // should fail let input = 2_usize.pow(24).try_to_vec().unwrap(); let eval_vp = EvalVp { - vp_code: vp_memory_limit, + vp_code_hash: limit_code_hash, input, }; let tx_data = eval_vp.try_to_vec().unwrap(); - let tx = Tx::new(vec![], Some(tx_data)); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone(), None); // When the `eval`ed VP runs out of memory, its result should be // `false`, hence we should also get back `false` from the VP that // called `eval`. let passed = vp( - vp_eval, + &code_hash, &tx, &tx_index, &addr, @@ -615,8 +683,11 @@ mod tests { let tx_index = TxIndex::default(); // This code will allocate memory of the given size - let vp_code = - std::fs::read(VP_MEMORY_LIMIT_WASM).expect("cannot load wasm"); + let vp_code = TestWasms::VpMemoryLimit.read_bytes(); + // store the wasm code + let code_hash = Hash::sha256(&vp_code); + let key = Key::wasm_code(&code_hash); + storage.write(&key, vp_code).unwrap(); // Assuming 200 pages, 12.8 MiB limit assert_eq!(memory::VP_MEMORY_MAX_PAGES, 200); @@ -624,10 +695,10 @@ mod tests { // Allocating `2^23` (8 MiB) should be below the memory limit and // shouldn't fail let tx_data = 2_usize.pow(23).try_to_vec().unwrap(); - let tx = Tx::new(vec![], Some(tx_data)); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone(), None); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let result = vp( - vp_code.clone(), + &code_hash, &tx, &tx_index, &addr, @@ -645,9 +716,9 @@ mod tests { // Allocating `2^24` (16 MiB) should be above the memory limit and // should fail let tx_data = 2_usize.pow(24).try_to_vec().unwrap(); - let tx = Tx::new(vec![], Some(tx_data)); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone(), None); let error = vp( - vp_code, + &code_hash, &tx, &tx_index, &addr, @@ -674,7 +745,11 @@ mod tests { let mut gas_meter = BlockGasMeter::default(); let tx_index = TxIndex::default(); - let tx_no_op = std::fs::read(TX_NO_OP_WASM).expect("cannot load wasm"); + let tx_no_op = TestWasms::TxNoOp.read_bytes(); + // store the wasm code + let code_hash = Hash::sha256(&tx_no_op); + let key = Key::wasm_code(&code_hash); + write_log.write(&key, tx_no_op).unwrap(); // Assuming 200 pages, 12.8 MiB limit assert_eq!(memory::TX_MEMORY_MAX_PAGES, 200); @@ -692,7 +767,7 @@ mod tests { &mut write_log, &mut gas_meter, &tx_index, - tx_no_op, + code_hash, tx_data, &mut vp_cache, &mut tx_cache, @@ -728,8 +803,11 @@ mod tests { let verifiers = BTreeSet::new(); let tx_index = TxIndex::default(); - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); + let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); + // store the wasm code + let code_hash = Hash::sha256(&vp_code); + let key = Key::wasm_code(&code_hash); + storage.write(&key, vp_code).unwrap(); // Assuming 200 pages, 12.8 MiB limit assert_eq!(memory::VP_MEMORY_MAX_PAGES, 200); @@ -738,10 +816,10 @@ mod tests { // limit and should fail let len = 2_usize.pow(24); let tx_data: Vec = vec![6_u8; len]; - let tx = Tx::new(vec![], Some(tx_data)); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone(), None); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let result = vp( - vp_code, + &code_hash, &tx, &tx_index, &addr, @@ -785,8 +863,11 @@ mod tests { let mut gas_meter = BlockGasMeter::default(); let tx_index = TxIndex::default(); - let tx_read_key = - std::fs::read(TX_READ_STORAGE_KEY_WASM).expect("cannot load wasm"); + let tx_read_key = TestWasms::TxReadStorageKey.read_bytes(); + // store the wasm code + let code_hash = Hash::sha256(&tx_read_key); + let key = Key::wasm_code(&code_hash); + write_log.write(&key, tx_read_key).unwrap(); // Allocating `2^24` (16 MiB) for a value in storage that the tx // attempts to read should be above the memory limit and should @@ -809,7 +890,7 @@ mod tests { &mut write_log, &mut gas_meter, &tx_index, - tx_read_key, + code_hash, tx_data, &mut vp_cache, &mut tx_cache, @@ -832,8 +913,11 @@ mod tests { let verifiers = BTreeSet::new(); let tx_index = TxIndex::default(); - let vp_read_key = - std::fs::read(VP_READ_STORAGE_KEY_WASM).expect("cannot load wasm"); + let vp_read_key = TestWasms::VpReadStorageKey.read_bytes(); + // store the wasm code + let code_hash = Hash::sha256(&vp_read_key); + let key = Key::wasm_code(&code_hash); + storage.write(&key, vp_read_key).unwrap(); // Allocating `2^24` (16 MiB) for a value in storage that the tx // attempts to read should be above the memory limit and should @@ -847,10 +931,10 @@ mod tests { // Borsh. storage.write(&key, value.try_to_vec().unwrap()).unwrap(); let tx_data = key.try_to_vec().unwrap(); - let tx = Tx::new(vec![], Some(tx_data)); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone(), None); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let error = vp( - vp_read_key, + &code_hash, &tx, &tx_index, &addr, @@ -883,10 +967,17 @@ mod tests { let tx_index = TxIndex::default(); // This code will call `eval` with the other VP below - let vp_eval = std::fs::read(VP_EVAL_WASM).expect("cannot load wasm"); + let vp_eval = TestWasms::VpEval.read_bytes(); + // store the wasm code + let code_hash = Hash::sha256(&vp_eval); + let key = Key::wasm_code(&code_hash); + storage.write(&key, vp_eval).unwrap(); // This code will read value from the storage - let vp_read_key = - std::fs::read(VP_READ_STORAGE_KEY_WASM).expect("cannot load wasm"); + let vp_read_key = TestWasms::VpReadStorageKey.read_bytes(); + // store the wasm code + let read_code_hash = Hash::sha256(&vp_read_key); + let key = Key::wasm_code(&read_code_hash); + storage.write(&key, vp_read_key).unwrap(); // Allocating `2^24` (16 MiB) for a value in storage that the tx // attempts to read should be above the memory limit and should @@ -901,14 +992,14 @@ mod tests { storage.write(&key, value.try_to_vec().unwrap()).unwrap(); let input = 2_usize.pow(23).try_to_vec().unwrap(); let eval_vp = EvalVp { - vp_code: vp_read_key, + vp_code_hash: read_code_hash, input, }; let tx_data = eval_vp.try_to_vec().unwrap(); - let tx = Tx::new(vec![], Some(tx_data)); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone(), None); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let passed = vp( - vp_eval, + &code_hash, &tx, &tx_index, &addr, @@ -969,12 +1060,17 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let (mut tx_cache, _) = wasm::compilation_cache::common::testing::cache(); + // store the tx code + let code_hash = Hash::sha256(&tx_code); + let key = Key::wasm_code(&code_hash); + write_log.write(&key, tx_code).unwrap(); + tx( &storage, &mut write_log, &mut gas_meter, &tx_index, - tx_code, + code_hash, tx_data, &mut vp_cache, &mut tx_cache, @@ -1010,7 +1106,7 @@ mod tests { ) .expect("unexpected error converting wat2wasm").into_owned(); - let tx = Tx::new(vec![], None); + let tx = Tx::new(vec![], None, ChainId::default(), None); let tx_index = TxIndex::default(); let mut storage = TestStorage::default(); let addr = storage.address_gen.generate_address("rng seed"); @@ -1019,8 +1115,13 @@ mod tests { let keys_changed = BTreeSet::new(); let verifiers = BTreeSet::new(); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); + // store the vp code + let code_hash = Hash::sha256(&vp_code); + let key = Key::wasm_code(&code_hash); + storage.write(&key, vp_code).unwrap(); + vp( - vp_code, + &code_hash, &tx, &tx_index, &addr, diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index f75733663bd..699092a1c93 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -4,8 +4,9 @@ edition = "2021" license = "GPL-3.0" name = "namada_test_utils" resolver = "2" -version = "0.14.0" +version = "0.16.0" [dependencies] borsh = "0.9.0" namada_core = { path = "../core" } +strum = {version = "0.24", features = ["derive"]} diff --git a/test_utils/src/lib.rs b/test_utils/src/lib.rs index c2a3b136530..6b0352bcc7d 100644 --- a/test_utils/src/lib.rs +++ b/test_utils/src/lib.rs @@ -1 +1,96 @@ +//! Utilities for use in tests. + pub mod tx_data; + +use std::env; +use std::path::PathBuf; + +use strum::EnumIter; + +/// Path from the root of the Git repo to the directory under which built test +/// wasms can be found. +pub const WASM_FOR_TESTS_DIR: &str = "wasm_for_tests"; + +/// Corresponds to wasms that we build for tests, under [`WASM_FOR_TESTS_DIR`]. +/// See the `wasm_for_tests/wasm_source` crate for documentation on what these +/// wasms do. +#[allow(missing_docs)] +#[derive(Debug, Clone, Copy, EnumIter)] +pub enum TestWasms { + TxMemoryLimit, + TxMintTokens, + TxNoOp, + TxProposalCode, + TxReadStorageKey, + TxWriteStorageKey, + VpAlwaysFalse, + VpAlwaysTrue, + VpEval, + VpMemoryLimit, + VpReadStorageKey, +} + +impl TestWasms { + /// Get the path to where this test wasm is expected to be, or panic if not + /// able to. + pub fn path(&self) -> PathBuf { + let filename = match self { + TestWasms::TxMemoryLimit => "tx_memory_limit.wasm", + TestWasms::TxMintTokens => "tx_mint_tokens.wasm", + TestWasms::TxNoOp => "tx_no_op.wasm", + TestWasms::TxProposalCode => "tx_proposal_code.wasm", + TestWasms::TxReadStorageKey => "tx_read_storage_key.wasm", + TestWasms::TxWriteStorageKey => "tx_write.wasm", + TestWasms::VpAlwaysFalse => "vp_always_false.wasm", + TestWasms::VpAlwaysTrue => "vp_always_true.wasm", + TestWasms::VpEval => "vp_eval.wasm", + TestWasms::VpMemoryLimit => "vp_memory_limit.wasm", + TestWasms::VpReadStorageKey => "vp_read_storage_key.wasm", + }; + let cwd = + env::current_dir().expect("Couldn't get current working directory"); + // crudely find the root of the repo, we can't rely on the `.git` + // directory being present, so look instead for the presence of a + // CHANGELOG.md file + let repo_root = cwd + .ancestors() + .find(|path| path.join("CHANGELOG.md").exists()) + .unwrap_or_else(|| { + panic!( + "Couldn't find the root of the repository for the current \ + working directory {}", + cwd.to_string_lossy() + ) + }); + repo_root.join(WASM_FOR_TESTS_DIR).join(filename) + } + + /// Attempts to read the contents of this test wasm. Panics if it is not + /// able to for any reason. + pub fn read_bytes(&self) -> Vec { + let path = self.path(); + std::fs::read(&path).unwrap_or_else(|err| { + panic!( + "Could not read wasm at path {}: {:?}", + path.to_string_lossy(), + err + ) + }) + } +} + +#[cfg(test)] +mod tests { + use strum::IntoEnumIterator; + + use super::*; + + #[test] + /// Tests that all expected test wasms are present on disk. + fn test_wasms_path() { + for test_wasm in TestWasms::iter() { + let path = test_wasm.path(); + assert!(path.exists()); + } + } +} diff --git a/tests/Cargo.toml b/tests/Cargo.toml index e5c4293ebd2..c9890f1f9e8 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_tests" resolver = "2" -version = "0.14.0" +version = "0.16.0" [features] default = ["abciplus", "wasm-runtime"] @@ -21,17 +21,16 @@ abciplus = [ wasm-runtime = ["namada/wasm-runtime"] [dependencies] -namada = {path = "../shared", default-features = false, features = ["testing"]} +namada = {path = "../shared", default-features = false, features = ["testing", "namada-sdk"]} namada_core = {path = "../core", default-features = false, features = ["testing"]} namada_test_utils = {path = "../test_utils"} namada_vp_prelude = {path = "../vp_prelude", default-features = false} namada_tx_prelude = {path = "../tx_prelude", default-features = false} chrono = {version = "0.4.22", default-features = false, features = ["clock", "std"]} concat-idents = "1.1.2" -ibc = {version = "0.14.0", default-features = false} -ibc-proto = {version = "0.17.1", default-features = false} -ibc-relayer = {version = "0.14.0", default-features = false} -prost = "0.9.0" +ibc-relayer = {version = "0.22.0", default-features = false} +ibc-relayer-types = {version = "0.22.0", default-features = false} +prost = "0.11.6" regex = "1.7.0" serde_json = {version = "1.0.65"} sha2 = "0.9.3" @@ -45,8 +44,8 @@ tokio = {version = "1.8.2", features = ["full"]} tracing = "0.1.30" tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} derivative = "2.2.0" -rust_decimal = "1.26.1" -rust_decimal_macros = "1.26.1" +rust_decimal = "=1.26.1" +rust_decimal_macros = "=1.26.1" [dev-dependencies] namada_apps = {path = "../apps", default-features = false, features = ["testing"]} @@ -63,7 +62,7 @@ fs_extra = "1.2.0" itertools = "0.10.0" pretty_assertions = "0.7.2" # A fork with state machine testing -proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm"} +proptest = {git = "https://github.com/heliaxdev/proptest", rev = "8f1b4abe7ebd35c0781bf9a00a4ee59833ffa2a1"} rand = "0.8" toml = "0.5.9" diff --git a/tests/src/e2e/eth_bridge_tests.rs b/tests/src/e2e/eth_bridge_tests.rs index fe97731b221..89532df10bf 100644 --- a/tests/src/e2e/eth_bridge_tests.rs +++ b/tests/src/e2e/eth_bridge_tests.rs @@ -1,26 +1,38 @@ +use std::str::FromStr; + use borsh::BorshSerialize; use namada::ledger::eth_bridge; use namada_core::types::storage; -use namada_core::types::storage::KeySeg; use namada_test_utils::tx_data::TxWriteData; +use namada_test_utils::TestWasms; use crate::e2e::helpers::get_actor_rpc; use crate::e2e::setup; -use crate::e2e::setup::constants::{wasm_abs_path, ALBERT, TX_WRITE_WASM}; +use crate::e2e::setup::constants::ALBERT; use crate::e2e::setup::{Bin, Who}; use crate::{run, run_as}; -const ETH_BRIDGE_ADDRESS: &str = "atest1v9hx7w36g42ysgzzwf5kgem9ypqkgerjv4ehxgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpq8f99ew"; +/// # Examples +/// +/// ``` +/// let storage_key = storage_key("queue"); +/// assert_eq!(storage_key, "#atest1v9hx7w36g42ysgzzwf5kgem9ypqkgerjv4ehxgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpq8f99ew/queue"); +/// ``` +fn storage_key(path: &str) -> String { + format!("#{}/{}", eth_bridge::vp::ADDRESS, path) +} +/// Test that a regular transaction cannot modify arbitrary keys of the Ethereum +/// bridge VP. #[test] -fn everything() { +fn test_unauthorized_tx_cannot_write_storage() { const LEDGER_STARTUP_TIMEOUT_SECONDS: u64 = 30; const CLIENT_COMMAND_TIMEOUT_SECONDS: u64 = 30; const SOLE_VALIDATOR: Who = Who::Validator(0); let test = setup::single_node_net().unwrap(); - let mut namadan_ledger = run_as!( + let mut ledger = run_as!( test, SOLE_VALIDATOR, Bin::Node, @@ -28,20 +40,16 @@ fn everything() { Some(LEDGER_STARTUP_TIMEOUT_SECONDS) ) .unwrap(); - namadan_ledger - .exp_string("Namada ledger node started") - .unwrap(); - namadan_ledger - .exp_string("Tendermint node started") - .unwrap(); - namadan_ledger.exp_string("Committed block hash").unwrap(); - let _bg_ledger = namadan_ledger.background(); + ledger.exp_string("Namada ledger node started").unwrap(); + ledger.exp_string("Tendermint node started").unwrap(); + ledger.exp_string("Committed block hash").unwrap(); + let _bg_ledger = ledger.background(); - let tx_data_path = test.test_dir.path().join("queue_storage_key.txt"); + let tx_data_path = test.test_dir.path().join("arbitrary_storage_key.txt"); std::fs::write( &tx_data_path, TxWriteData { - key: storage::Key::from(eth_bridge::vp::ADDRESS.to_db_key()), + key: storage::Key::from_str(&storage_key("arbitrary")).unwrap(), value: b"arbitrary value".to_vec(), } .try_to_vec() @@ -49,7 +57,7 @@ fn everything() { ) .unwrap(); - let tx_code_path = wasm_abs_path(TX_WRITE_WASM); + let tx_code_path = TestWasms::TxWriteStorageKey.path(); let tx_data_path = tx_data_path.to_string_lossy().to_string(); let tx_code_path = tx_code_path.to_string_lossy().to_string(); @@ -62,36 +70,23 @@ fn everything() { &tx_code_path, "--data-path", &tx_data_path, - "--ledger-address", + "--node", &ledger_addr, ]; - for &dry_run in &[true, false] { - let tx_args = if dry_run { - vec![tx_args.clone(), vec!["--dry-run"]].concat() - } else { - tx_args.clone() - }; - let mut namadac_tx = run!( - test, - Bin::Client, - tx_args, - Some(CLIENT_COMMAND_TIMEOUT_SECONDS) - ) - .unwrap(); + let mut client_tx = run!( + test, + Bin::Client, + tx_args, + Some(CLIENT_COMMAND_TIMEOUT_SECONDS) + ) + .unwrap(); - if !dry_run { - namadac_tx.exp_string("Transaction accepted").unwrap(); - namadac_tx.exp_string("Transaction applied").unwrap(); - } - // TODO: we should check here explicitly with the ledger via a - // Tendermint RPC call that the path `value/#EthBridge/queue` - // is unchanged rather than relying solely on looking at namadac - // stdout. - namadac_tx.exp_string("Transaction is invalid").unwrap(); - namadac_tx - .exp_string(&format!("Rejected: {}", ETH_BRIDGE_ADDRESS)) - .unwrap(); - namadac_tx.assert_success(); - } + client_tx.exp_string("Transaction accepted").unwrap(); + client_tx.exp_string("Transaction applied").unwrap(); + client_tx.exp_string("Transaction is invalid").unwrap(); + client_tx + .exp_string(&format!("Rejected: {}", eth_bridge::vp::ADDRESS)) + .unwrap(); + client_tx.assert_success(); } diff --git a/tests/src/e2e/helpers.rs b/tests/src/e2e/helpers.rs index 943962f6207..713a53c4e7d 100644 --- a/tests/src/e2e/helpers.rs +++ b/tests/src/e2e/helpers.rs @@ -173,7 +173,7 @@ pub fn find_bonded_stake( "bonded-stake", "--validator", alias.as_ref(), - "--ledger-address", + "--node", ledger_address ], Some(10) @@ -197,7 +197,7 @@ pub fn get_epoch(test: &Test, ledger_address: &str) -> Result { let mut find = run!( test, Bin::Client, - &["epoch", "--ledger-address", ledger_address], + &["epoch", "--node", ledger_address], Some(10) )?; let (unread, matched) = find.exp_regex("Last committed epoch: .*")?; @@ -220,7 +220,7 @@ pub fn get_height(test: &Test, ledger_address: &str) -> Result { let mut find = run!( test, Bin::Client, - &["block", "--ledger-address", ledger_address], + &["block", "--node", ledger_address], Some(10) )?; let (unread, matched) = find.exp_regex("Last committed block ID: .*")?; diff --git a/tests/src/e2e/ibc_tests.rs b/tests/src/e2e/ibc_tests.rs index 8151cdaaca4..00ee58d52bd 100644 --- a/tests/src/e2e/ibc_tests.rs +++ b/tests/src/e2e/ibc_tests.rs @@ -15,71 +15,74 @@ use core::time::Duration; use color_eyre::eyre::Result; use eyre::eyre; -use ibc::clients::ics07_tendermint::client_state::{ +use ibc_relayer::client_state::AnyClientState; +use ibc_relayer::config::types::{MaxMsgNum, MaxTxSize, Memo}; +use ibc_relayer::config::{AddressType, ChainConfig, GasPrice, PacketFilter}; +use ibc_relayer::event::ibc_event_try_from_abci_event; +use ibc_relayer::keyring::Store; +use ibc_relayer::light_client::tendermint::LightClient as TmLightClient; +use ibc_relayer::light_client::{LightClient, Verified}; +use ibc_relayer_types::clients::ics07_tendermint::client_state::{ AllowUpdate, ClientState as TmClientState, }; -use ibc::clients::ics07_tendermint::consensus_state::ConsensusState as TmConsensusState; -use ibc::core::ics02_client::client_consensus::{ - AnyConsensusState, ConsensusState, -}; -use ibc::core::ics02_client::client_state::{AnyClientState, ClientState}; -use ibc::core::ics02_client::header::Header; -use ibc::core::ics02_client::height::Height; -use ibc::core::ics02_client::msgs::create_client::MsgCreateAnyClient; -use ibc::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; -use ibc::core::ics02_client::trust_threshold::TrustThreshold; -use ibc::core::ics03_connection::connection::Counterparty as ConnCounterparty; -use ibc::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; -use ibc::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; -use ibc::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; -use ibc::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; -use ibc::core::ics03_connection::version::Version as ConnVersion; -use ibc::core::ics04_channel::channel::{ +use ibc_relayer_types::clients::ics07_tendermint::consensus_state::ConsensusState as TmConsensusState; +use ibc_relayer_types::core::ics02_client::msgs::create_client::MsgCreateClient; +use ibc_relayer_types::core::ics02_client::msgs::update_client::MsgUpdateClient; +use ibc_relayer_types::core::ics02_client::trust_threshold::TrustThreshold; +use ibc_relayer_types::core::ics03_connection::connection::Counterparty as ConnCounterparty; +use ibc_relayer_types::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; +use ibc_relayer_types::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; +use ibc_relayer_types::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; +use ibc_relayer_types::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; +use ibc_relayer_types::core::ics03_connection::version::Version as ConnVersion; +use ibc_relayer_types::core::ics04_channel::channel::{ ChannelEnd, Counterparty as ChanCounterparty, Order as ChanOrder, State as ChanState, }; -use ibc::core::ics04_channel::msgs::acknowledgement::MsgAcknowledgement; -use ibc::core::ics04_channel::msgs::chan_close_confirm::MsgChannelCloseConfirm; -use ibc::core::ics04_channel::msgs::chan_close_init::MsgChannelCloseInit; -use ibc::core::ics04_channel::msgs::chan_open_ack::MsgChannelOpenAck; -use ibc::core::ics04_channel::msgs::chan_open_confirm::MsgChannelOpenConfirm; -use ibc::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit; -use ibc::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; -use ibc::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; -use ibc::core::ics04_channel::msgs::timeout::MsgTimeout; -use ibc::core::ics04_channel::msgs::timeout_on_close::MsgTimeoutOnClose; -use ibc::core::ics04_channel::packet::Packet; -use ibc::core::ics04_channel::Version as ChanVersion; -use ibc::core::ics23_commitment::commitment::CommitmentProofBytes; -use ibc::core::ics23_commitment::merkle::convert_tm_to_ics_merkle_proof; -use ibc::core::ics24_host::identifier::{ +use ibc_relayer_types::core::ics04_channel::msgs::acknowledgement::MsgAcknowledgement; +use ibc_relayer_types::core::ics04_channel::msgs::chan_open_ack::MsgChannelOpenAck; +use ibc_relayer_types::core::ics04_channel::msgs::chan_open_confirm::MsgChannelOpenConfirm; +use ibc_relayer_types::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit; +use ibc_relayer_types::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; +use ibc_relayer_types::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; +use ibc_relayer_types::core::ics04_channel::msgs::timeout::MsgTimeout; +use ibc_relayer_types::core::ics04_channel::packet::Packet; +use ibc_relayer_types::core::ics04_channel::version::Version as ChanVersion; +use ibc_relayer_types::core::ics23_commitment::commitment::{ + CommitmentPrefix, CommitmentProofBytes, +}; +use ibc_relayer_types::core::ics23_commitment::merkle::convert_tm_to_ics_merkle_proof; +use ibc_relayer_types::core::ics24_host::identifier::{ ChainId, ClientId, ConnectionId, PortChannelId, PortId, }; -use ibc::events::{from_tx_response_event, IbcEvent}; -use ibc::proofs::{ConsensusProof, Proofs}; -use ibc::signer::Signer; -use ibc::tx_msg::Msg; -use ibc_relayer::config::types::{MaxMsgNum, MaxTxSize, Memo}; -use ibc_relayer::config::{AddressType, ChainConfig, GasPrice, PacketFilter}; -use ibc_relayer::keyring::Store; -use ibc_relayer::light_client::tendermint::LightClient as TmLightClient; -use ibc_relayer::light_client::{LightClient, Verified}; -use namada::core::ledger::ibc::actions::{commitment_prefix, port_channel_id}; +use ibc_relayer_types::events::IbcEvent; +use ibc_relayer_types::proofs::{ConsensusProof, Proofs}; +use ibc_relayer_types::signer::Signer; +use ibc_relayer_types::tx_msg::Msg; +use ibc_relayer_types::Height; +use namada::ibc::core::ics24_host::identifier::PortChannelId as IbcPortChannelId; +use namada::ibc::Height as IbcHeight; +use namada::ibc_proto::google::protobuf::Any; use namada::ledger::ibc::storage::*; +use namada::ledger::parameters::{storage as param_storage, EpochDuration}; +use namada::ledger::pos::{self, PosParams}; use namada::ledger::storage::ics23_specs::ibc_proof_specs; use namada::ledger::storage::Sha256Hasher; use namada::types::address::{Address, InternalAddress}; use namada::types::key::PublicKey; use namada::types::storage::{BlockHeight, Key, RESERVED_ADDRESS_PREFIX}; use namada::types::token::Amount; -use namada_apps::client::rpc::query_storage_value_bytes; +use namada_apps::client::rpc::{ + query_storage_value, query_storage_value_bytes, +}; use namada_apps::client::utils::id_from_pk; +use namada_apps::config::genesis::genesis_config::GenesisConfig; +use prost::Message; use setup::constants::*; use tendermint::block::Header as TmHeader; use tendermint::merkle::proof::Proof as TmProof; use tendermint::trust_threshold::TrustThresholdFraction; use tendermint_config::net::Address as TendermintAddress; -use tendermint_proto::Protobuf; use tendermint_rpc::{Client, HttpClient, Url}; use tokio::runtime::Runtime; @@ -89,18 +92,33 @@ use crate::{run, run_as}; #[test] fn run_ledger_ibc() -> Result<()> { - let (test_a, test_b) = setup::two_single_node_nets()?; + let (test_a, test_b) = setup_two_single_node_nets()?; // Run Chain A - let mut ledger_a = - run_as!(test_a, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; + let mut ledger_a = run_as!( + test_a, + Who::Validator(0), + Bin::Node, + &["ledger", "run", "--tx-index"], + Some(40) + )?; ledger_a.exp_string("Namada ledger node started")?; // Run Chain B - let mut ledger_b = - run_as!(test_b, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; + let mut ledger_b = run_as!( + test_b, + Who::Validator(0), + Bin::Node, + &["ledger", "run", "--tx-index"], + Some(40) + )?; ledger_b.exp_string("Namada ledger node started")?; ledger_a.exp_string("This node is a validator")?; ledger_b.exp_string("This node is a validator")?; + + // Wait for a first block + ledger_a.exp_string("Committed block hash")?; + ledger_b.exp_string("Committed block hash")?; + let _bg_ledger_a = ledger_a.background(); let _bg_ledger_b = ledger_b.background(); @@ -128,7 +146,7 @@ fn run_ledger_ibc() -> Result<()> { &client_id_b, &port_channel_id_a, )?; - check_balances(&port_channel_id_a, &port_channel_id_b, &test_a, &test_b)?; + check_balances(&port_channel_id_b, &test_a, &test_b)?; // Transfer 50000 received over IBC on Chain B transfer_received_token(&port_channel_id_b, &test_b)?; @@ -143,86 +161,67 @@ fn run_ledger_ibc() -> Result<()> { &client_id_b, &port_channel_id_b, )?; - check_balances_after_back( - &port_channel_id_a, - &port_channel_id_b, - &test_a, - &test_b, - )?; + check_balances_after_back(&port_channel_id_b, &test_a, &test_b)?; // Transfer a token and it will time out and refund transfer_timeout(&test_a, &test_b, &client_id_a, &port_channel_id_a)?; // The balance should not be changed - check_balances_after_back( - &port_channel_id_a, - &port_channel_id_b, - &test_a, - &test_b, - )?; - - // Close the channel on Chain A - close_channel_init(&test_a, &port_channel_id_a)?; + check_balances_after_back(&port_channel_id_b, &test_a, &test_b)?; - // Try transfer from Chain B and it will refund with TimeoutOnClose - transfer_timeout_on_close( - &test_a, - &test_b, - &client_id_a, - &client_id_b, - &port_channel_id_a, - &port_channel_id_b, - )?; - // The balance should not be changed - check_balances_after_back( - &port_channel_id_a, - &port_channel_id_b, - &test_a, - &test_b, - )?; - - // Close the channel on Chain B - close_channel_confirm( - &test_a, - &test_b, - &client_id_a, - &client_id_b, - &port_channel_id_a, - &port_channel_id_b, - )?; - - // Check a transfer will fail - try_transfer_on_close(&test_a, &test_b, &port_channel_id_a)?; + // Skip tests for closing a channel and timeout_on_close since the transfer + // channel cannot be closed Ok(()) } +fn setup_two_single_node_nets() -> Result<(Test, Test)> { + // epoch per 100 seconds + let update_genesis = |mut genesis: GenesisConfig| { + genesis.parameters.epochs_per_year = 315_360; + genesis + }; + let update_genesis_b = |mut genesis: GenesisConfig| { + genesis.parameters.epochs_per_year = 315_360; + setup::set_validators(1, genesis, |_| setup::ANOTHER_CHAIN_PORT_OFFSET) + }; + Ok(( + setup::network(update_genesis, None)?, + setup::network(update_genesis_b, None)?, + )) +} + fn create_client(test_a: &Test, test_b: &Test) -> Result<(ClientId, ClientId)> { let height = query_height(test_b)?; let client_state = make_client_state(test_b, height); let height = client_state.latest_height(); - let message = MsgCreateAnyClient { - client_state, - consensus_state: make_consensus_state(test_b, height)?, - signer: Signer::new("test_a"), + let message = MsgCreateClient { + client_state: client_state.into(), + consensus_state: make_consensus_state(test_b, height)?.into(), + signer: Signer::from_str("test_a").expect("invalid signer"), }; let height_a = submit_ibc_tx(test_a, message, ALBERT)?; let height = query_height(test_a)?; let client_state = make_client_state(test_a, height); let height = client_state.latest_height(); - let message = MsgCreateAnyClient { - client_state, - consensus_state: make_consensus_state(test_a, height)?, - signer: Signer::new("test_b"), + let message = MsgCreateClient { + client_state: client_state.into(), + consensus_state: make_consensus_state(test_a, height)?.into(), + signer: Signer::from_str("test_b").expect("invalid signer"), }; let height_b = submit_ibc_tx(test_b, message, ALBERT)?; + // convert the client IDs from `ibc_relayer_type` to `ibc` let client_id_a = match get_event(test_a, height_a)? { - Some(IbcEvent::CreateClient(event)) => event.client_id().clone(), + Some(IbcEvent::CreateClient(event)) => { + ClientId::from_str(event.client_id().as_str()).unwrap() + } _ => return Err(eyre!("Transaction failed")), }; let client_id_b = match get_event(test_b, height_b)? { - Some(IbcEvent::CreateClient(event)) => event.client_id().clone(), + Some(IbcEvent::CreateClient(event)) => { + ClientId::from_str(event.client_id().as_str()).unwrap() + } _ => return Err(eyre!("Transaction failed")), }; @@ -230,35 +229,54 @@ fn create_client(test_a: &Test, test_b: &Test) -> Result<(ClientId, ClientId)> { Ok((client_id_a, client_id_b)) } -fn make_client_state(test: &Test, height: Height) -> AnyClientState { - let unbonding_period = Duration::new(1814400, 0); +fn make_client_state(test: &Test, height: Height) -> TmClientState { + let rpc = get_actor_rpc(test, &Who::Validator(0)); + let ledger_address = TendermintAddress::from_str(&rpc).unwrap(); + let client = HttpClient::new(ledger_address).unwrap(); + + let key = pos::params_key(); + let pos_params = Runtime::new() + .unwrap() + .block_on(query_storage_value::(&client, &key)) + .unwrap(); + let pipeline_len = pos_params.pipeline_len; + + let key = param_storage::get_epoch_duration_storage_key(); + let epoch_duration = Runtime::new() + .unwrap() + .block_on(query_storage_value::( + &client, &key, + )) + .unwrap(); + let unbonding_period = pipeline_len * epoch_duration.min_duration.0; + let trusting_period = 2 * unbonding_period / 3; let max_clock_drift = Duration::new(60, 0); let chain_id = ChainId::from_str(test.net.chain_id.as_str()).unwrap(); + TmClientState::new( chain_id, TrustThreshold::default(), - trusting_period, - unbonding_period, + Duration::from_secs(trusting_period), + Duration::from_secs(unbonding_period), max_clock_drift, height, ibc_proof_specs::().into(), - vec!["upgrade".to_string(), "upgradedIBCState".to_string()], + vec![], AllowUpdate { after_expiry: true, after_misbehaviour: true, }, ) .unwrap() - .wrap_any() } fn make_consensus_state( test: &Test, height: Height, -) -> Result { +) -> Result { let header = query_header(test, height)?; - Ok(TmConsensusState::from(header).wrap_any()) + Ok(TmConsensusState::from(header)) } fn update_client_with_height( @@ -268,10 +286,10 @@ fn update_client_with_height( target_height: Height, ) -> Result<()> { // check the current(stale) state on the target chain - let key = client_state_key(target_client_id); - let (value, _) = query_value_with_proof(target_test, &key, target_height)?; - let client_state = match value { - Some(v) => AnyClientState::decode_vec(&v) + let key = client_state_key(&target_client_id.as_str().parse().unwrap()); + let (value, _) = query_value_with_proof(target_test, &key, None)?; + let cs = match value { + Some(v) => Any::decode(&v[..]) .map_err(|e| eyre!("Decoding the client state failed: {}", e))?, None => { return Err(eyre!( @@ -280,6 +298,8 @@ fn update_client_with_height( )); } }; + let client_state = TmClientState::try_from(cs) + .expect("the state should be a TmClientState"); let trusted_height = client_state.latest_height(); update_client( @@ -288,7 +308,7 @@ fn update_client_with_height( target_client_id, trusted_height, target_height, - &client_state, + client_state, ) } @@ -298,7 +318,7 @@ fn update_client( client_id: &ClientId, trusted_height: Height, target_height: Height, - client_state: &AnyClientState, + client_state: TmClientState, ) -> Result<()> { let config = dummy_chain_config(src_test); let pk = get_validator_pk(src_test, &Who::Validator(0)).unwrap(); @@ -306,22 +326,26 @@ fn update_client( let mut light_client = TmLightClient::from_config(&config, peer_id).unwrap(); let Verified { target, supporting } = light_client - .header_and_minimal_set(trusted_height, target_height, client_state) + .header_and_minimal_set( + trusted_height, + target_height, + &AnyClientState::Tendermint(client_state), + ) .map_err(|e| eyre!("Building the header failed: {}", e))?; for header in supporting { - let message = MsgUpdateAnyClient { - header: header.wrap_any(), + let message = MsgUpdateClient { + header: header.into(), client_id: client_id.clone(), - signer: Signer::new("test"), + signer: Signer::from_str("test").expect("invalid signer"), }; submit_ibc_tx(target_test, message, ALBERT)?; } - let message = MsgUpdateAnyClient { - header: target.wrap_any(), + let message = MsgUpdateClient { + header: target.into(), client_id: client_id.clone(), - signer: Signer::new("test"), + signer: Signer::from_str("test").expect("invalid signer"), }; submit_ibc_tx(target_test, message, ALBERT)?; @@ -334,6 +358,7 @@ fn dummy_chain_config(test: &Test) -> ChainConfig { // use only id and rpc_addr ChainConfig { id: ChainId::new(test.net.chain_id.as_str().to_string(), 0), + r#type: ibc_relayer::chain::ChainType::CosmosSdk, rpc_addr: rpc_addr.clone(), websocket_addr: rpc_addr.clone(), grpc_addr: rpc_addr, @@ -345,6 +370,7 @@ fn dummy_chain_config(test: &Test) -> ChainConfig { default_gas: None, max_gas: None, gas_adjustment: None, + gas_multiplier: None, fee_granter: None, max_msg_num: MaxMsgNum::default(), max_tx_size: MaxTxSize::default(), @@ -352,11 +378,13 @@ fn dummy_chain_config(test: &Test) -> ChainConfig { max_block_time: Duration::new(5, 0), trusting_period: None, memo_prefix: Memo::default(), - proof_specs: ibc_proof_specs::().into(), + proof_specs: Some(ibc_proof_specs::().into()), + sequential_batch_tx: true, trust_threshold: TrustThresholdFraction::ONE_THIRD, gas_price: GasPrice::new(0.0, "dummy".to_string()), packet_filter: PacketFilter::default(), address_type: AddressType::Cosmos, + extension_options: Vec::new(), } } @@ -375,14 +403,14 @@ fn connection_handshake( ), version: Some(ConnVersion::default()), delay_period: Duration::new(1, 0), - signer: Signer::new("test_a"), + signer: Signer::from_str("test_a").expect("invalid signer"), }; // OpenInitConnection on Chain A let height = submit_ibc_tx(test_a, msg, ALBERT)?; let conn_id_a = match get_event(test_a, height)? { Some(IbcEvent::OpenInitConnection(event)) => event .connection_id() - .clone() + .cloned() .ok_or(eyre!("No connection ID is set"))?, _ => return Err(eyre!("Transaction failed")), }; @@ -399,12 +427,12 @@ fn connection_handshake( let msg = MsgConnectionOpenTry { previous_connection_id: None, client_id: client_id_b.clone(), - client_state: Some(client_state), + client_state: Some(client_state.into()), counterparty, counterparty_versions: vec![ConnVersion::default()], proofs, delay_period: Duration::new(1, 0), - signer: Signer::new("test_b"), + signer: Signer::from_str("test_b").expect("invalid signer"), }; // Update the client state of Chain A on Chain B update_client_with_height(test_a, test_b, client_id_b, height_a)?; @@ -413,7 +441,7 @@ fn connection_handshake( let conn_id_b = match get_event(test_b, height)? { Some(IbcEvent::OpenTryConnection(event)) => event .connection_id() - .clone() + .cloned() .ok_or(eyre!("No connection ID is set"))?, _ => return Err(eyre!("Transaction failed")), }; @@ -425,10 +453,10 @@ fn connection_handshake( let msg = MsgConnectionOpenAck { connection_id: conn_id_a.clone(), counterparty_connection_id: conn_id_b.clone(), - client_state: Some(client_state), + client_state: Some(client_state.into()), proofs, version: ConnVersion::default(), - signer: Signer::new("test_a"), + signer: Signer::from_str("test_a").expect("invalid signer"), }; // Update the client state of Chain B on Chain A update_client_with_height(test_b, test_a, client_id_a, height_b)?; @@ -442,7 +470,7 @@ fn connection_handshake( let msg = MsgConnectionOpenConfirm { connection_id: conn_id_b.clone(), proofs, - signer: Signer::new("test_b"), + signer: Signer::from_str("test_b").expect("invalid signer"), }; // Update the client state of Chain A on Chain B update_client_with_height(test_a, test_b, client_id_b, height_a)?; @@ -458,11 +486,11 @@ fn get_connection_proofs( client_id: &ClientId, conn_id: &ConnectionId, target_height: Height, -) -> Result<(AnyClientState, Proofs)> { +) -> Result<(TmClientState, Proofs)> { // we need proofs at the height of the previous block let query_height = target_height.decrement().unwrap(); - let key = connection_key(conn_id); - let (_, tm_proof) = query_value_with_proof(test, &key, query_height)?; + let key = connection_key(&conn_id.as_str().parse().unwrap()); + let (_, tm_proof) = query_value_with_proof(test, &key, Some(query_height))?; let connection_proof = convert_proof(tm_proof)?; let (client_state, client_state_proof, consensus_proof) = @@ -489,7 +517,7 @@ fn channel_handshake( conn_id_b: &ConnectionId, ) -> Result<(PortChannelId, PortChannelId)> { // OpenInitChannel on Chain A - let port_id = PortId::from_str("test_port").unwrap(); + let port_id = PortId::transfer(); let counterparty = ChanCounterparty::new(port_id.clone(), None); let channel = ChannelEnd::new( ChanState::Init, @@ -501,7 +529,7 @@ fn channel_handshake( let msg = MsgChannelOpenInit { port_id: port_id.clone(), channel, - signer: Signer::new("test_a"), + signer: Signer::from_str("test_a").expect("invalid signer"), }; let height = submit_ibc_tx(test_a, msg, ALBERT)?; let channel_id_a = @@ -512,14 +540,15 @@ fn channel_handshake( .ok_or(eyre!("No channel ID is set"))?, _ => return Err(eyre!("Transaction failed")), }; - let port_channel_id_a = port_channel_id(port_id.clone(), channel_id_a); + let port_channel_id_a = + PortChannelId::new(channel_id_a.clone(), port_id.clone()); // get the proofs from Chain A let height_a = query_height(test_a)?; let proofs = get_channel_proofs(test_a, client_id_a, &port_channel_id_a, height_a)?; let counterparty = - ChanCounterparty::new(port_id.clone(), Some(channel_id_a)); + ChanCounterparty::new(port_id.clone(), Some(channel_id_a.clone())); let channel = ChannelEnd::new( ChanState::TryOpen, ChanOrder::Unordered, @@ -533,7 +562,7 @@ fn channel_handshake( channel, counterparty_version: ChanVersion::ics20(), proofs, - signer: Signer::new("test_b"), + signer: Signer::from_str("test_b").expect("invalid signer"), }; // Update the client state of Chain A on Chain B update_client_with_height(test_a, test_b, client_id_b, height_a)?; @@ -546,7 +575,8 @@ fn channel_handshake( .ok_or(eyre!("No channel ID is set"))?, _ => return Err(eyre!("Transaction failed")), }; - let port_channel_id_b = port_channel_id(port_id.clone(), channel_id_b); + let port_channel_id_b = + PortChannelId::new(channel_id_b.clone(), port_id.clone()); // get the A's proofs on Chain B let height_b = query_height(test_b)?; @@ -555,10 +585,10 @@ fn channel_handshake( let msg = MsgChannelOpenAck { port_id: port_id.clone(), channel_id: channel_id_a, - counterparty_channel_id: channel_id_b, + counterparty_channel_id: channel_id_b.clone(), counterparty_version: ChanVersion::ics20(), proofs, - signer: Signer::new("test_a"), + signer: Signer::from_str("test_a").expect("invalid signer"), }; // Update the client state of Chain B on Chain A update_client_with_height(test_b, test_a, client_id_a, height_b)?; @@ -573,7 +603,7 @@ fn channel_handshake( port_id, channel_id: channel_id_b, proofs, - signer: Signer::new("test_b"), + signer: Signer::from_str("test_b").expect("invalid signer"), }; // Update the client state of Chain A on Chain B update_client_with_height(test_a, test_b, client_id_b, height_a)?; @@ -583,47 +613,6 @@ fn channel_handshake( Ok((port_channel_id_a, port_channel_id_b)) } -fn close_channel_init( - test: &Test, - port_channel_id: &PortChannelId, -) -> Result<()> { - let msg = MsgChannelCloseInit { - port_id: port_channel_id.port_id.clone(), - channel_id: port_channel_id.channel_id, - signer: Signer::new("test"), - }; - // CloseInitChannel on Chain A - submit_ibc_tx(test, msg, ALBERT)?; - - Ok(()) -} - -fn close_channel_confirm( - test_a: &Test, - test_b: &Test, - client_id_a: &ClientId, - client_id_b: &ClientId, - port_channel_id_a: &PortChannelId, - port_channel_id_b: &PortChannelId, -) -> Result<()> { - // get the proofs on Chain A - let height_a = query_height(test_a)?; - let proofs = - get_channel_proofs(test_a, client_id_a, port_channel_id_a, height_a)?; - let msg = MsgChannelCloseConfirm { - port_id: port_channel_id_b.port_id.clone(), - channel_id: port_channel_id_b.channel_id, - proofs, - signer: Signer::new("test_b"), - }; - // Update the client state of Chain A on Chain B - update_client_with_height(test_a, test_b, client_id_b, height_a)?; - // CloseConfirmChannel on Chain B - submit_ibc_tx(test_b, msg, ALBERT)?; - - Ok(()) -} - fn get_channel_proofs( test: &Test, client_id: &ClientId, @@ -632,8 +621,12 @@ fn get_channel_proofs( ) -> Result { // we need proofs at the height of the previous block let query_height = target_height.decrement().unwrap(); - let key = channel_key(port_channel_id); - let (_, tm_proof) = query_value_with_proof(test, &key, query_height)?; + let port_channel_id = IbcPortChannelId::new( + port_channel_id.channel_id.as_str().parse().unwrap(), + port_channel_id.port_id.as_str().parse().unwrap(), + ); + let key = channel_key(&port_channel_id); + let (_, tm_proof) = query_value_with_proof(test, &key, Some(query_height))?; let proof = convert_proof(tm_proof)?; let (_, client_state_proof, consensus_proof) = @@ -655,11 +648,12 @@ fn get_client_states( test: &Test, client_id: &ClientId, target_height: Height, // should have been already decremented -) -> Result<(AnyClientState, CommitmentProofBytes, ConsensusProof)> { - let key = client_state_key(client_id); - let (value, tm_proof) = query_value_with_proof(test, &key, target_height)?; - let client_state = match value { - Some(v) => AnyClientState::decode_vec(&v) +) -> Result<(TmClientState, CommitmentProofBytes, ConsensusProof)> { + let key = client_state_key(&client_id.as_str().parse().unwrap()); + let (value, tm_proof) = + query_value_with_proof(test, &key, Some(target_height))?; + let cs = match value { + Some(v) => Any::decode(&v[..]) .map_err(|e| eyre!("Decoding the client state failed: {}", e))?, None => { return Err(eyre!( @@ -668,11 +662,16 @@ fn get_client_states( )); } }; + let client_state = TmClientState::try_from(cs) + .expect("the state should be a TmClientState"); let client_state_proof = convert_proof(tm_proof)?; let height = client_state.latest_height(); - let key = consensus_state_key(client_id, height); - let (_, tm_proof) = query_value_with_proof(test, &key, target_height)?; + let ibc_height = IbcHeight::new(0, height.revision_height()).unwrap(); + let key = + consensus_state_key(&client_id.as_str().parse().unwrap(), ibc_height); + let (_, tm_proof) = + query_value_with_proof(test, &key, Some(target_height))?; let proof = convert_proof(tm_proof)?; let consensus_proof = ConsensusProof::new(proof, height) .map_err(|e| eyre!("Creating ConsensusProof failed: error {}", e))?; @@ -709,7 +708,7 @@ fn transfer_token( let msg = MsgRecvPacket { packet, proofs, - signer: Signer::new("test_b"), + signer: Signer::from_str("test_b").expect("invalid signer"), }; // Update the client state of Chain A on Chain B update_client_with_height(test_a, test_b, client_id_b, height_a)?; @@ -729,7 +728,7 @@ fn transfer_token( packet, acknowledgement: acknowledgement.into(), proofs, - signer: Signer::new("test_a"), + signer: Signer::from_str("test_a").expect("invalid signer"), }; // Update the client state of Chain B on Chain A update_client_with_height(test_b, test_a, client_id_a, height_b)?; @@ -774,7 +773,7 @@ fn transfer_received_token( "0", "--gas-token", NAM, - "--ledger-address", + "--node", &rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; @@ -828,7 +827,7 @@ fn transfer_back( let msg = MsgRecvPacket { packet, proofs, - signer: Signer::new("test_a"), + signer: Signer::from_str("test_a").expect("invalid signer"), }; // Update the client state of Chain B on Chain A update_client_with_height(test_b, test_a, client_id_a, height_b)?; @@ -848,7 +847,7 @@ fn transfer_back( packet, acknowledgement: acknowledgement.into(), proofs, - signer: Signer::new("test_b"), + signer: Signer::from_str("test_b").expect("invalid signer"), }; // Update the client state of Chain A on Chain B update_client_with_height(test_a, test_b, client_id_b, height_a)?; @@ -891,7 +890,7 @@ fn transfer_timeout( next_sequence_recv: packet.sequence, packet, proofs, - signer: Signer::new("test_a"), + signer: Signer::from_str("test_a").expect("invalid signer"), }; // Update the client state of Chain B on Chain A update_client_with_height(test_b, test_a, client_id_a, height_b)?; @@ -901,83 +900,6 @@ fn transfer_timeout( Ok(()) } -fn transfer_timeout_on_close( - test_a: &Test, - test_b: &Test, - client_id_a: &ClientId, - client_id_b: &ClientId, - port_channel_id_a: &PortChannelId, - port_channel_id_b: &PortChannelId, -) -> Result<()> { - let receiver = find_address(test_a, ALBERT)?; - - // Send a token from Chain B - let height = transfer( - test_b, - BERTHA, - &receiver, - NAM, - &Amount::whole(100000), - port_channel_id_b, - None, - None, - )?; - let packet = match get_event(test_b, height)? { - Some(IbcEvent::SendPacket(event)) => event.packet, - _ => return Err(eyre!("Transaction failed")), - }; - - // get the proof for the receipt and the channel on Chain A - let height_a = query_height(test_a)?; - let proofs_receipt = get_receipt_absence_proof(test_a, &packet, height_a)?; - let proofs_closed = - get_channel_proofs(test_a, client_id_a, port_channel_id_a, height_a)?; - let proofs = Proofs::new( - proofs_receipt.object_proof().clone(), - proofs_closed.client_proof().clone(), - proofs_closed.consensus_proof(), - Some(proofs_closed.object_proof().clone()), - proofs_receipt.height(), - ) - .unwrap(); - let msg = MsgTimeoutOnClose { - next_sequence_recv: packet.sequence, - packet, - proofs, - signer: Signer::new("test_b"), - }; - // Update the client state of Chain A on Chain B - update_client_with_height(test_a, test_b, client_id_b, height_a)?; - // TimeoutOnClose on Chain B - submit_ibc_tx(test_b, msg, ALBERT)?; - - Ok(()) -} - -fn try_transfer_on_close( - test_a: &Test, - test_b: &Test, - port_channel_id_a: &PortChannelId, -) -> Result<()> { - let receiver = find_address(test_b, BERTHA)?; - // Send a token from Chain A - match transfer( - test_a, - ALBERT, - &receiver, - NAM, - &Amount::whole(100000), - port_channel_id_a, - None, - None, - ) { - Ok(_) => Err(eyre!( - "Sending a token succeeded in spite of closing the channel" - )), - Err(_) => Ok(()), - } -} - fn get_commitment_proof( test: &Test, packet: &Packet, @@ -986,11 +908,11 @@ fn get_commitment_proof( // we need proofs at the height of the previous block let query_height = target_height.decrement().unwrap(); let key = commitment_key( - &packet.source_port, - &packet.source_channel, - packet.sequence, + &packet.source_port.as_str().parse().unwrap(), + &packet.source_channel.as_str().parse().unwrap(), + u64::from(packet.sequence).into(), ); - let (_, tm_proof) = query_value_with_proof(test, &key, query_height)?; + let (_, tm_proof) = query_value_with_proof(test, &key, Some(query_height))?; let commitment_proof = convert_proof(tm_proof)?; Proofs::new(commitment_proof, None, None, None, target_height) @@ -1005,11 +927,11 @@ fn get_ack_proof( // we need proofs at the height of the previous block let query_height = target_height.decrement().unwrap(); let key = ack_key( - &packet.destination_port, - &packet.destination_channel, - packet.sequence, + &packet.destination_port.as_str().parse().unwrap(), + &packet.destination_channel.as_str().parse().unwrap(), + u64::from(packet.sequence).into(), ); - let (_, tm_proof) = query_value_with_proof(test, &key, query_height)?; + let (_, tm_proof) = query_value_with_proof(test, &key, Some(query_height))?; let ack_proof = convert_proof(tm_proof)?; Proofs::new(ack_proof, None, None, None, target_height) @@ -1024,17 +946,22 @@ fn get_receipt_absence_proof( // we need proofs at the height of the previous block let query_height = target_height.decrement().unwrap(); let key = receipt_key( - &packet.destination_port, - &packet.destination_channel, - packet.sequence, + &packet.destination_port.as_str().parse().unwrap(), + &packet.destination_channel.as_str().parse().unwrap(), + u64::from(packet.sequence).into(), ); - let (_, tm_proof) = query_value_with_proof(test, &key, query_height)?; + let (_, tm_proof) = query_value_with_proof(test, &key, Some(query_height))?; let absence_proof = convert_proof(tm_proof)?; Proofs::new(absence_proof, None, None, None, target_height) .map_err(|e| eyre!("Creating proofs failed: error {}", e)) } +fn commitment_prefix() -> CommitmentPrefix { + CommitmentPrefix::try_from(b"ibc".to_vec()) + .expect("the prefix should be parsable") +} + fn submit_ibc_tx( test: &Test, message: impl Msg + std::fmt::Debug, @@ -1044,8 +971,6 @@ fn submit_ibc_tx( let data = make_ibc_data(message); std::fs::write(&data_path, data).expect("writing data failed"); - let code_path = wasm_abs_path(TX_IBC_WASM); - let code_path = code_path.to_string_lossy(); let data_path = data_path.to_string_lossy(); let rpc = get_actor_rpc(test, &Who::Validator(0)); let mut client = run!( @@ -1054,7 +979,7 @@ fn submit_ibc_tx( [ "tx", "--code-path", - &code_path, + TX_IBC_WASM, "--data-path", &data_path, "--signer", @@ -1065,7 +990,7 @@ fn submit_ibc_tx( "0", "--gas-token", NAM, - "--ledger-address", + "--node", &rpc ], Some(40) @@ -1107,7 +1032,7 @@ fn transfer( &channel_id, "--port-id", &port_id, - "--ledger-address", + "--node", &rpc, ]; let sp = sub_prefix.clone().unwrap_or_default(); @@ -1144,14 +1069,11 @@ fn check_tx_height(test: &Test, client: &mut NamadaCmd) -> Result { .1 .replace(['"', ','], ""); if code != "0" { - return Err(eyre!( - "The IBC transfer transaction failed: unread {}", - unread - )); + return Err(eyre!("The IBC transaction failed: unread {}", unread)); } // wait for the next block to use the app hash - while height as u64 + 1 > query_height(test)?.revision_height { + while height as u64 + 1 > query_height(test)?.revision_height() { sleep(1); } @@ -1176,14 +1098,14 @@ fn query_height(test: &Test) -> Result { .block_on(client.status()) .map_err(|e| eyre!("Getting the status failed: {}", e))?; - Ok(Height::new(0, status.sync_info.latest_block_height.into())) + Ok(Height::new(0, status.sync_info.latest_block_height.into()).unwrap()) } fn query_header(test: &Test, height: Height) -> Result { let rpc = get_actor_rpc(test, &Who::Validator(0)); let ledger_address = TendermintAddress::from_str(&rpc).unwrap(); let client = HttpClient::new(ledger_address).unwrap(); - let height = height.revision_height as u32; + let height = height.revision_height() as u32; let result = Runtime::new() .unwrap() .block_on(client.blockchain(height, height)); @@ -1221,11 +1143,9 @@ fn get_event(test: &Test, height: u32) -> Result> { .end_block_events .ok_or_else(|| eyre!("IBC event was not found: height {}", height))?; for event in &events { - // The height will be set, but not be used - let dummy_height = Height::new(0, 0); - match from_tx_response_event(dummy_height, event) { - Some(ibc_event) => return Ok(Some(ibc_event)), - None => continue, + match ibc_event_try_from_abci_event(event) { + Ok(ibc_event) => return Ok(Some(ibc_event)), + Err(_) => continue, } } // No IBC event was found @@ -1235,7 +1155,7 @@ fn get_event(test: &Test, height: u32) -> Result> { fn query_value_with_proof( test: &Test, key: &Key, - height: Height, + height: Option, ) -> Result<(Option>, TmProof)> { let rpc = get_actor_rpc(test, &Who::Validator(0)); let ledger_address = TendermintAddress::from_str(&rpc).unwrap(); @@ -1243,7 +1163,7 @@ fn query_value_with_proof( let result = Runtime::new().unwrap().block_on(query_storage_value_bytes( &client, key, - Some(BlockHeight(height.revision_height)), + height.map(|h| BlockHeight(h.revision_height())), true, )); match result { @@ -1262,7 +1182,6 @@ fn convert_proof(tm_proof: TmProof) -> Result { /// Check balances after IBC transfer fn check_balances( - src_port_channel_id: &PortChannelId, dest_port_channel_id: &PortChannelId, test_a: &Test, test_b: &Test, @@ -1271,25 +1190,17 @@ fn check_balances( // Check the balances on Chain A let rpc_a = get_actor_rpc(test_a, &Who::Validator(0)); - let query_args = - vec!["balance", "--token", NAM, "--ledger-address", &rpc_a]; + let query_args = vec!["balance", "--token", NAM, "--node", &rpc_a]; let mut client = run!(test_a, Bin::Client, query_args, Some(40))?; - // Check the source balance - let expected = ": 900000, owned by albert".to_string(); - client.exp_string(&expected)?; // Check the escrowed balance - let key_prefix = ibc_account_prefix( - &src_port_channel_id.port_id, - &src_port_channel_id.channel_id, - &token, - ); - let sub_prefix = key_prefix.sub_key().unwrap().to_string(); let expected = format!( - "with {}: 100000, owned by {}", - sub_prefix, + ": 100000, owned by {}", Address::Internal(InternalAddress::IbcEscrow) ); client.exp_string(&expected)?; + // Check the source balance + let expected = ": 900000, owned by albert".to_string(); + client.exp_string(&expected)?; client.assert_success(); // Check the balance on Chain B @@ -1308,10 +1219,10 @@ fn check_balances( NAM, "--sub-prefix", &sub_prefix, - "--ledger-address", + "--node", &rpc_b, ]; - let expected = format!("NAM with {}: 100000", sub_prefix); + let expected = format!("nam with {}: 100000", sub_prefix); let mut client = run!(test_b, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; client.assert_success(); @@ -1342,10 +1253,10 @@ fn check_balances_after_non_ibc( NAM, "--sub-prefix", &sub_prefix, - "--ledger-address", + "--node", &rpc, ]; - let expected = format!("NAM with {}: 50000", sub_prefix); + let expected = format!("nam with {}: 50000", sub_prefix); let mut client = run!(test, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; client.assert_success(); @@ -1359,10 +1270,10 @@ fn check_balances_after_non_ibc( NAM, "--sub-prefix", &sub_prefix, - "--ledger-address", + "--node", &rpc, ]; - let expected = format!("NAM with {}: 50000", sub_prefix); + let expected = format!("nam with {}: 50000", sub_prefix); let mut client = run!(test, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; client.assert_success(); @@ -1372,7 +1283,6 @@ fn check_balances_after_non_ibc( /// Check balances after IBC transfer back fn check_balances_after_back( - src_port_channel_id: &PortChannelId, dest_port_channel_id: &PortChannelId, test_a: &Test, test_b: &Test, @@ -1381,25 +1291,17 @@ fn check_balances_after_back( // Check the balances on Chain A let rpc_a = get_actor_rpc(test_a, &Who::Validator(0)); - let query_args = - vec!["balance", "--token", NAM, "--ledger-address", &rpc_a]; + let query_args = vec!["balance", "--token", NAM, "--node", &rpc_a]; let mut client = run!(test_a, Bin::Client, query_args, Some(40))?; - // Check the source balance - let expected = ": 950000, owned by albert".to_string(); - client.exp_string(&expected)?; // Check the escrowed balance - let key_prefix = ibc_account_prefix( - &src_port_channel_id.port_id, - &src_port_channel_id.channel_id, - &token, - ); - let sub_prefix = key_prefix.sub_key().unwrap().to_string(); let expected = format!( - "with {}: 50000, owned by {}", - sub_prefix, + ": 50000, owned by {}", Address::Internal(InternalAddress::IbcEscrow) ); client.exp_string(&expected)?; + // Check the source balance + let expected = ": 950000, owned by albert".to_string(); + client.exp_string(&expected)?; client.assert_success(); // Check the balance on Chain B @@ -1418,10 +1320,10 @@ fn check_balances_after_back( NAM, "--sub-prefix", &sub_prefix, - "--ledger-address", + "--node", &rpc_b, ]; - let expected = format!("NAM with {}: 0", sub_prefix); + let expected = format!("nam with {}: 0", sub_prefix); let mut client = run!(test_b, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; client.assert_success(); diff --git a/tests/src/e2e/ledger_tests.rs b/tests/src/e2e/ledger_tests.rs index db57e6fba87..df9ff29117a 100644 --- a/tests/src/e2e/ledger_tests.rs +++ b/tests/src/e2e/ledger_tests.rs @@ -20,14 +20,15 @@ use borsh::BorshSerialize; use color_eyre::eyre::Result; use data_encoding::HEXLOWER; use namada::types::address::{btc, eth, masp_rewards, Address}; +use namada::types::governance::ProposalType; use namada::types::storage::Epoch; use namada::types::token; use namada::types::token::parameters::Parameters; -use namada_apps::client::tx::ShieldedContext; +use namada_apps::client::tx::CLIShieldedUtils; use namada_apps::config::genesis::genesis_config::{ GenesisConfig, ParametersConfig, PosParamsConfig, TokenAccountConfig, }; -use rust_decimal_macros::dec; +use namada_test_utils::TestWasms; use serde_json::json; use setup::constants::*; @@ -38,6 +39,7 @@ use crate::e2e::helpers::{ }; use crate::e2e::setup::{self, default_port_offset, sleep, Bin, Who}; use crate::{run, run_as}; +use rust_decimal_macros::dec; /// Test that when we "run-ledger" with all the possible command /// combinations from fresh state, the node starts-up successfully for both a @@ -119,7 +121,7 @@ fn test_node_connectivity_and_consensus() -> Result<()> { "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; @@ -149,19 +151,13 @@ fn test_node_connectivity_and_consensus() -> Result<()> { let query_balance_args = |ledger_rpc| { vec![ - "balance", - "--owner", - ALBERT, - "--token", - NAM, - "--ledger-address", - ledger_rpc, + "balance", "--owner", ALBERT, "--token", NAM, "--node", ledger_rpc, ] }; for ledger_rpc in &[validator_0_rpc, validator_1_rpc, non_validator_rpc] { let mut client = run!(test, Bin::Client, query_balance_args(ledger_rpc), Some(40))?; - client.exp_string("NAM: 1000010.1")?; + client.exp_string("nam: 1000010.1")?; client.assert_success(); } @@ -222,11 +218,13 @@ fn run_ledger_load_state_and_reset() -> Result<()> { ledger.exp_string("No state could be found")?; // Wait to commit a block ledger.exp_regex(r"Committed block hash.*, height: [0-9]+")?; + let bg_ledger = ledger.background(); // Wait for a new epoch let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); epoch_sleep(&test, &validator_one_rpc, 30)?; // 2. Shut it down + let mut ledger = bg_ledger.foreground(); ledger.send_control('c')?; // Wait for the node to stop running to finish writing the state and tx // queue @@ -271,6 +269,75 @@ fn run_ledger_load_state_and_reset() -> Result<()> { Ok(()) } +/// In this test we +/// 1. Run the ledger node until a pre-configured height, +/// at which point it should suspend. +/// 2. Check that we can still query the ledger. +/// 3. Check that we can shutdown the ledger normally afterwards. +#[test] +fn suspend_ledger() -> Result<()> { + let test = setup::single_node_net()?; + // 1. Run the ledger node + let mut ledger = run_as!( + test, + Who::Validator(0), + Bin::Node, + &["ledger", "run-until", "--block-height", "2", "--suspend",], + Some(40) + )?; + + ledger.exp_string("Namada ledger node started")?; + // There should be no previous state + ledger.exp_string("No state could be found")?; + // Wait to commit a block + ledger.exp_regex(r"Committed block hash.*, height: [0-9]+")?; + ledger.exp_string("Reached block height 2, suspending.")?; + let bg_ledger = ledger.background(); + + // 2. Query the ledger + let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let mut client = run!( + test, + Bin::Client, + &["epoch", "--ledger-address", &validator_one_rpc], + Some(40) + )?; + client.exp_string("Last committed epoch: 0")?; + + // 3. Shut it down + let mut ledger = bg_ledger.foreground(); + ledger.send_control('c')?; + // Wait for the node to stop running to finish writing the state and tx + // queue + ledger.exp_string("Namada ledger node has shut down.")?; + ledger.exp_eof()?; + Ok(()) +} + +/// Test that if we configure the ledger to +/// halt at a given height, it does indeed halt. +#[test] +fn stop_ledger_at_height() -> Result<()> { + let test = setup::single_node_net()?; + // 1. Run the ledger node + let mut ledger = run_as!( + test, + Who::Validator(0), + Bin::Node, + &["ledger", "run-until", "--block-height", "2", "--halt",], + Some(40) + )?; + + ledger.exp_string("Namada ledger node started")?; + // There should be no previous state + ledger.exp_string("No state could be found")?; + // Wait to commit a block + ledger.exp_regex(r"Committed block hash.*, height: [0-9]+")?; + ledger.exp_string("Reached block height 2, halting the chain.")?; + ledger.exp_eof()?; + Ok(()) +} + /// In this test we: /// 1. Run the ledger node /// 2. Submit a token transfer tx @@ -289,13 +356,25 @@ fn ledger_txs_and_queries() -> Result<()> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let _bg_ledger = ledger.background(); - let vp_user = wasm_abs_path(VP_USER_WASM); - let vp_user = vp_user.to_string_lossy(); - let tx_no_op = wasm_abs_path(TX_NO_OP_WASM); - let tx_no_op = tx_no_op.to_string_lossy(); + // for a custom tx + let transfer = token::Transfer { + source: find_address(&test, BERTHA).unwrap(), + target: find_address(&test, ALBERT).unwrap(), + token: find_address(&test, NAM).unwrap(), + sub_prefix: None, + amount: token::Amount::whole(10), + key: None, + shielded: None, + } + .try_to_vec() + .unwrap(); + let tx_data_path = test.test_dir.path().join("tx.data"); + std::fs::write(&tx_data_path, transfer).unwrap(); + let tx_data_path = tx_data_path.to_string_lossy(); let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); @@ -317,7 +396,7 @@ fn ledger_txs_and_queries() -> Result<()> { "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ], // Submit a token transfer tx (from an implicit account) @@ -337,7 +416,7 @@ fn ledger_txs_and_queries() -> Result<()> { "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ], // 3. Submit a transaction to update an account's validity @@ -347,14 +426,14 @@ fn ledger_txs_and_queries() -> Result<()> { "--address", BERTHA, "--code-path", - &vp_user, + VP_USER_WASM, "--gas-amount", "0", "--gas-limit", "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ], // 4. Submit a custom tx @@ -363,16 +442,16 @@ fn ledger_txs_and_queries() -> Result<()> { "--signer", BERTHA, "--code-path", - &tx_no_op, + TX_TRANSFER_WASM, "--data-path", - "README.md", + &tx_data_path, "--gas-amount", "0", "--gas-limit", "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc ], // 5. Submit a tx to initialize a new account @@ -384,7 +463,7 @@ fn ledger_txs_and_queries() -> Result<()> { // Value obtained from `namada::types::key::ed25519::tests::gen_keypair` "001be519a321e29020fa3cbfbfd01bd5e92db134305609270b71dace25b5a21168", "--code-path", - &vp_user, + VP_USER_WASM, "--alias", "Test-Account", "--gas-amount", @@ -393,7 +472,7 @@ fn ledger_txs_and_queries() -> Result<()> { "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ], // 6. Submit a tx to withdraw from faucet account (requires PoW challenge @@ -411,7 +490,7 @@ fn ledger_txs_and_queries() -> Result<()> { // Faucet withdrawal requires an explicit signer "--signer", ALBERT, - "--ledger-address", + "--node", &validator_one_rpc, ], ]; @@ -443,11 +522,11 @@ fn ledger_txs_and_queries() -> Result<()> { BERTHA, "--token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ], // expect a decimal - r"NAM: \d+(\.\d+)?", + r"nam: \d+(\.\d+)?", ), ]; for (query_args, expected) in &query_args_and_expected_response { @@ -468,7 +547,7 @@ fn ledger_txs_and_queries() -> Result<()> { "query-bytes", "--storage-key", &storage_key, - "--ledger-address", + "--node", &validator_one_rpc, ], // expect hex encoded of borsh encoded bytes @@ -503,7 +582,7 @@ fn ledger_txs_and_queries() -> Result<()> { #[test] fn masp_txs_and_queries() -> Result<()> { // Download the shielded pool parameters before starting node - let _ = ShieldedContext::new(PathBuf::new()); + let _ = CLIShieldedUtils::new(PathBuf::new()); // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. @@ -528,7 +607,8 @@ fn masp_txs_and_queries() -> Result<()> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let _bg_ledger = ledger.background(); @@ -547,7 +627,7 @@ fn masp_txs_and_queries() -> Result<()> { BTC, "--amount", "10", - "--ledger-address", + "--node", &validator_one_rpc, ], "No balance found", @@ -564,7 +644,7 @@ fn masp_txs_and_queries() -> Result<()> { BTC, "--amount", "15", - "--ledger-address", + "--node", &validator_one_rpc, ], "No balance found", @@ -581,7 +661,7 @@ fn masp_txs_and_queries() -> Result<()> { BTC, "--amount", "20", - "--ledger-address", + "--node", &validator_one_rpc, ], "Transaction is valid", @@ -600,7 +680,7 @@ fn masp_txs_and_queries() -> Result<()> { "10", "--signer", ALBERT, - "--ledger-address", + "--node", &validator_one_rpc, ], "No balance found", @@ -619,7 +699,7 @@ fn masp_txs_and_queries() -> Result<()> { "7", "--signer", ALBERT, - "--ledger-address", + "--node", &validator_one_rpc, ], "Transaction is valid", @@ -638,7 +718,7 @@ fn masp_txs_and_queries() -> Result<()> { "7", "--signer", ALBERT, - "--ledger-address", + "--node", &validator_one_rpc, ], "Transaction is valid", @@ -657,7 +737,7 @@ fn masp_txs_and_queries() -> Result<()> { "7", "--signer", ALBERT, - "--ledger-address", + "--node", &validator_one_rpc, ], "is lower than the amount to be transferred and fees", @@ -676,7 +756,7 @@ fn masp_txs_and_queries() -> Result<()> { "6", "--signer", ALBERT, - "--ledger-address", + "--node", &validator_one_rpc, ], "Transaction is valid", @@ -689,10 +769,10 @@ fn masp_txs_and_queries() -> Result<()> { AA_VIEWING_KEY, "--token", BTC, - "--ledger-address", + "--node", &validator_one_rpc, ], - "No shielded BTC balance found", + "No shielded btc balance found", ), // 11. Assert ETH balance at VK(A) is 0 ( @@ -702,10 +782,10 @@ fn masp_txs_and_queries() -> Result<()> { AA_VIEWING_KEY, "--token", ETH, - "--ledger-address", + "--node", &validator_one_rpc, ], - "No shielded ETH balance found", + "No shielded eth balance found", ), // 12. Assert balance at VK(B) is 10 BTC ( @@ -713,10 +793,10 @@ fn masp_txs_and_queries() -> Result<()> { "balance", "--owner", AB_VIEWING_KEY, - "--ledger-address", + "--node", &validator_one_rpc, ], - "BTC : 20", + "btc : 20", ), // 13. Send 10 BTC from SK(B) to Bertha ( @@ -732,7 +812,7 @@ fn masp_txs_and_queries() -> Result<()> { "20", "--signer", BERTHA, - "--ledger-address", + "--node", &validator_one_rpc, ], "Transaction is valid", @@ -772,7 +852,7 @@ fn masp_txs_and_queries() -> Result<()> { #[test] fn masp_pinned_txs() -> Result<()> { // Download the shielded pool parameters before starting node - let _ = ShieldedContext::new(PathBuf::new()); + let _ = CLIShieldedUtils::new(PathBuf::new()); // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. @@ -794,7 +874,8 @@ fn masp_pinned_txs() -> Result<()> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let _bg_ledger = ledger.background(); @@ -813,7 +894,7 @@ fn masp_pinned_txs() -> Result<()> { AC_PAYMENT_ADDRESS, "--token", BTC, - "--ledger-address", + "--node", &validator_one_rpc ], Some(300) @@ -832,7 +913,7 @@ fn masp_pinned_txs() -> Result<()> { AC_PAYMENT_ADDRESS, "--token", BTC, - "--ledger-address", + "--node", &validator_one_rpc ], Some(300) @@ -855,7 +936,7 @@ fn masp_pinned_txs() -> Result<()> { BTC, "--amount", "20", - "--ledger-address", + "--node", &validator_one_rpc ], Some(300) @@ -873,13 +954,13 @@ fn masp_pinned_txs() -> Result<()> { AC_PAYMENT_ADDRESS, "--token", BTC, - "--ledger-address", + "--node", &validator_one_rpc ], Some(300) )?; client.send_line(AC_VIEWING_KEY)?; - client.exp_string("Received 20 BTC")?; + client.exp_string("Received 20 btc")?; client.assert_success(); // Assert PPA(C) has no NAM pinned to it @@ -892,13 +973,13 @@ fn masp_pinned_txs() -> Result<()> { AC_PAYMENT_ADDRESS, "--token", NAM, - "--ledger-address", + "--node", &validator_one_rpc ], Some(300) )?; client.send_line(AC_VIEWING_KEY)?; - client.exp_string("Received no shielded NAM")?; + client.exp_string("Received no shielded nam")?; client.assert_success(); // Wait till epoch boundary @@ -914,13 +995,13 @@ fn masp_pinned_txs() -> Result<()> { AC_PAYMENT_ADDRESS, "--token", NAM, - "--ledger-address", + "--node", &validator_one_rpc ], Some(300) )?; client.send_line(AC_VIEWING_KEY)?; - client.exp_string("Received no shielded NAM")?; + client.exp_string("Received no shielded nam")?; client.assert_success(); Ok(()) @@ -932,7 +1013,7 @@ fn masp_pinned_txs() -> Result<()> { #[test] fn masp_incentives() -> Result<()> { // Download the shielded pool parameters before starting node - let _ = ShieldedContext::new(PathBuf::new()); + let _ = CLIShieldedUtils::new(PathBuf::new()); // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. @@ -1007,7 +1088,8 @@ fn masp_incentives() -> Result<()> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let _bg_ledger = ledger.background(); @@ -1030,7 +1112,7 @@ fn masp_incentives() -> Result<()> { BTC, "--amount", "200000", - "--ledger-address", + "--node", &validator_one_rpc ], Some(300) @@ -1048,12 +1130,12 @@ fn masp_incentives() -> Result<()> { AA_VIEWING_KEY, "--token", BTC, - "--ledger-address", + "--node", &validator_one_rpc ], Some(300) )?; - client.exp_string("BTC: 200000")?; + client.exp_string("btc: 200000")?; client.assert_success(); // Assert NAM balance at VK(A) is 0 @@ -1066,13 +1148,12 @@ fn masp_incentives() -> Result<()> { AA_VIEWING_KEY, "--token", NAM, - "--ledger-address", + "--node", &validator_one_rpc ], Some(300) )?; - - client.exp_string("No shielded NAM balance found")?; + client.exp_string("No shielded nam balance found")?; client.assert_success(); let masp_rewards = masp_rewards(); @@ -1090,12 +1171,12 @@ fn masp_incentives() -> Result<()> { AA_VIEWING_KEY, "--token", BTC, - "--ledger-address", + "--node", &validator_one_rpc ], Some(300) )?; - client.exp_string("BTC: 200000")?; + client.exp_string("btc: 200000")?; client.assert_success(); let amt200000 = token::Amount::from_str("200000").unwrap(); @@ -1112,7 +1193,7 @@ fn masp_incentives() -> Result<()> { AA_VIEWING_KEY, "--token", NAM, - "--ledger-address", + "--node", &validator_one_rpc ], Some(300) @@ -1126,7 +1207,7 @@ fn masp_incentives() -> Result<()> { // 0.557 * 7.61e11 - -0.1 * 7.61e11 = 5.07e11. This gets rounded // down due to masp conversion notes. - client.exp_string("NAM: 506000")?; + client.exp_string("nam: 506000")?; client.assert_success(); // Assert NAM balance at MASP pool is 200000*BTC_reward*(epoch_1-epoch_0) @@ -1139,12 +1220,12 @@ fn masp_incentives() -> Result<()> { MASP, "--token", NAM, - "--ledger-address", + "--node", &validator_one_rpc ], Some(300) )?; - client.exp_string("NAM: 506000")?; + client.exp_string("nam: 506000")?; client.assert_success(); // Wait till epoch boundary @@ -1160,12 +1241,12 @@ fn masp_incentives() -> Result<()> { AA_VIEWING_KEY, "--token", BTC, - "--ledger-address", + "--node", &validator_one_rpc ], Some(300) )?; - client.exp_string("BTC: 200000")?; + client.exp_string("btc: 200000")?; client.assert_success(); // Assert NAM balance at VK(A) is 20*BTC_reward*(epoch_2-epoch_0) @@ -1178,15 +1259,15 @@ fn masp_incentives() -> Result<()> { AA_VIEWING_KEY, "--token", NAM, - "--ledger-address", + "--node", &validator_one_rpc ], Some(300) )?; - client.exp_string("NAM: 26296240")?; + client.exp_string("nam: 26296240")?; client.assert_success(); - // // Assert NAM balance at MASP pool is 20*BTC_reward*(epoch_2-epoch_0) + // Assert NAM balance at MASP pool is 20*BTC_reward*(epoch_2-epoch_0) // let mut client = run!( // test, // Bin::Client, @@ -1196,14 +1277,14 @@ fn masp_incentives() -> Result<()> { // MASP, // "--token", // NAM, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) // )?; // client.exp_string(&format!( - // "NAM: {}", - // (amt200000 * masp_rewards[&btc()]).0 * (ep2.0 - ep0.0) + // "nam: {}", + // (amt20 * masp_rewards[&btc()]).0 * (ep2.0 - ep0.0) // ))?; // client.assert_success(); @@ -1224,7 +1305,7 @@ fn masp_incentives() -> Result<()> { // ETH, // "--amount", // "30", - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) @@ -1242,12 +1323,12 @@ fn masp_incentives() -> Result<()> { // AB_VIEWING_KEY, // "--token", // ETH, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) // )?; - // client.exp_string("ETH: 30")?; + // client.exp_string("eth: 30")?; // client.assert_success(); // // Assert NAM balance at VK(B) is 0 @@ -1260,12 +1341,12 @@ fn masp_incentives() -> Result<()> { // AB_VIEWING_KEY, // "--token", // NAM, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) // )?; - // client.exp_string("No shielded NAM balance found")?; + // client.exp_string("No shielded nam balance found")?; // client.assert_success(); // // Wait till epoch boundary @@ -1281,12 +1362,12 @@ fn masp_incentives() -> Result<()> { // AB_VIEWING_KEY, // "--token", // ETH, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) // )?; - // client.exp_string("ETH: 30")?; + // client.exp_string("eth: 30")?; // client.assert_success(); // // Assert NAM balance at VK(B) is 30*ETH_reward*(epoch_4-epoch_3) @@ -1299,13 +1380,13 @@ fn masp_incentives() -> Result<()> { // AB_VIEWING_KEY, // "--token", // NAM, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) // )?; // client.exp_string(&format!( - // "NAM: {}", + // "nam: {}", // (amt30 * masp_rewards[ð()]).0 * (ep4.0 - ep3.0) // ))?; // client.assert_success(); @@ -1321,14 +1402,14 @@ fn masp_incentives() -> Result<()> { // MASP, // "--token", // NAM, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) // )?; // client.exp_string(&format!( - // "NAM: {}", - // ((amt200000 * masp_rewards[&btc()]).0 * (ep4.0 - ep0.0)) + // "nam: {}", + // ((amt20 * masp_rewards[&btc()]).0 * (ep4.0 - ep0.0)) // + ((amt30 * masp_rewards[ð()]).0 * (ep4.0 - ep3.0)) // ))?; // client.assert_success(); @@ -1352,7 +1433,7 @@ fn masp_incentives() -> Result<()> { // "30", // "--signer", // BERTHA, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) @@ -1370,12 +1451,12 @@ fn masp_incentives() -> Result<()> { // AB_VIEWING_KEY, // "--token", // ETH, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) // )?; - // client.exp_string("No shielded ETH balance found")?; + // client.exp_string("No shielded eth balance found")?; // client.assert_success(); // let mut ep = get_epoch(&test, &validator_one_rpc)?; @@ -1390,13 +1471,13 @@ fn masp_incentives() -> Result<()> { // AB_VIEWING_KEY, // "--token", // NAM, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) // )?; // client.exp_string(&format!( - // "NAM: {}", + // "nam: {}", // (amt30 * masp_rewards[ð()]).0 * (ep.0 - ep3.0) // ))?; // client.assert_success(); @@ -1413,14 +1494,14 @@ fn masp_incentives() -> Result<()> { // MASP, // "--token", // NAM, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) // )?; // client.exp_string(&format!( - // "NAM: {}", - // ((amt200000 * masp_rewards[&btc()]).0 * (ep.0 - ep0.0)) + // "nam: {}", + // ((amt20 * masp_rewards[&btc()]).0 * (ep.0 - ep0.0)) // + ((amt30 * masp_rewards[ð()]).0 * (ep.0 - ep3.0)) // ))?; // client.assert_success(); @@ -1444,7 +1525,7 @@ fn masp_incentives() -> Result<()> { // "20", // "--signer", // ALBERT, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) @@ -1462,12 +1543,12 @@ fn masp_incentives() -> Result<()> { // AA_VIEWING_KEY, // "--token", // BTC, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) // )?; - // client.exp_string("No shielded BTC balance found")?; + // client.exp_string("No shielded btc balance found")?; // client.assert_success(); // // Assert NAM balance at VK(A) is 20*BTC_reward*(epoch_6-epoch_0) @@ -1480,14 +1561,14 @@ fn masp_incentives() -> Result<()> { // AA_VIEWING_KEY, // "--token", // NAM, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) // )?; // client.exp_string(&format!( - // "NAM: {}", - // (amt200000 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0) + // "nam: {}", + // (amt20 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0) // ))?; // client.assert_success(); @@ -1502,14 +1583,14 @@ fn masp_incentives() -> Result<()> { // MASP, // "--token", // NAM, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) // )?; // client.exp_string(&format!( - // "NAM: {}", - // ((amt200000 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0)) + // "nam: {}", + // ((amt20 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0)) // + ((amt30 * masp_rewards[ð()]).0 * (ep5.0 - ep3.0)) // ))?; // client.assert_success(); @@ -1527,14 +1608,14 @@ fn masp_incentives() -> Result<()> { // AA_VIEWING_KEY, // "--token", // NAM, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) // )?; // client.exp_string(&format!( - // "NAM: {}", - // (amt200000 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0) + // "nam: {}", + // (amt20 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0) // ))?; // client.assert_success(); @@ -1548,13 +1629,13 @@ fn masp_incentives() -> Result<()> { // AB_VIEWING_KEY, // "--token", // NAM, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) // )?; // client.exp_string(&format!( - // "NAM: {}", + // "nam: {}", // (amt30 * masp_rewards[ð()]).0 * (ep5.0 - ep3.0) // ))?; // client.assert_success(); @@ -1570,14 +1651,14 @@ fn masp_incentives() -> Result<()> { // MASP, // "--token", // NAM, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) // )?; // client.exp_string(&format!( - // "NAM: {}", - // ((amt200000 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0)) + // "nam: {}", + // ((amt20 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0)) // + ((amt30 * masp_rewards[ð()]).0 * (ep5.0 - ep3.0)) // ))?; // client.assert_success(); @@ -1599,10 +1680,10 @@ fn masp_incentives() -> Result<()> { // "--token", // NAM, // "--amount", - // &((amt30 * masp_rewards[ð()]).0 * (ep5.0 - - // ep3.0)).to_string(), "--signer", + // &((amt30 * masp_rewards[ð()]).0 * (ep5.0 - ep3.0)).to_string(), + // "--signer", // BERTHA, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) @@ -1626,11 +1707,10 @@ fn masp_incentives() -> Result<()> { // "--token", // NAM, // "--amount", - // &((amt200000 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0)) - // .to_string(), + // &((amt20 * masp_rewards[&btc()]).0 * (ep6.0 - ep0.0)).to_string(), // "--signer", // ALBERT, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) @@ -1648,12 +1728,12 @@ fn masp_incentives() -> Result<()> { // AA_VIEWING_KEY, // "--token", // NAM, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) // )?; - // client.exp_string("No shielded NAM balance found")?; + // client.exp_string("No shielded nam balance found")?; // client.assert_success(); // // Assert NAM balance at VK(B) is 0 @@ -1666,12 +1746,12 @@ fn masp_incentives() -> Result<()> { // AB_VIEWING_KEY, // "--token", // NAM, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) // )?; - // client.exp_string("No shielded NAM balance found")?; + // client.exp_string("No shielded nam balance found")?; // client.assert_success(); // // Assert NAM balance at MASP pool is 0 @@ -1684,12 +1764,12 @@ fn masp_incentives() -> Result<()> { // MASP, // "--token", // NAM, - // "--ledger-address", + // "--node", // &validator_one_rpc // ], // Some(300) // )?; - // client.exp_string("NAM: 0")?; + // client.exp_string("nam: 0")?; // client.assert_success(); Ok(()) @@ -1708,50 +1788,35 @@ fn invalid_transactions() -> Result<()> { // 1. Run the ledger node let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; - // Wait to commit a block - ledger.exp_regex(r"Committed block hash.*, height: [0-9]+")?; - let bg_ledger = ledger.background(); + // Wait for a first block + ledger.exp_string("Committed block hash")?; - // 2. Submit a an invalid transaction (trying to mint tokens should fail - // in the token's VP) - let tx_data_path = test.test_dir.path().join("tx.data"); - let transfer = token::Transfer { - source: find_address(&test, DAEWON)?, - target: find_address(&test, ALBERT)?, - token: find_address(&test, NAM)?, - sub_prefix: None, - amount: token::Amount::whole(1), - key: None, - shielded: None, - }; - let data = transfer - .try_to_vec() - .expect("Encoding unsigned transfer shouldn't fail"); - let tx_wasm_path = wasm_abs_path(TX_MINT_TOKENS_WASM); - std::fs::write(&tx_data_path, data).unwrap(); - let tx_wasm_path = tx_wasm_path.to_string_lossy(); - let tx_data_path = tx_data_path.to_string_lossy(); + let bg_ledger = ledger.background(); + // 2. Submit a an invalid transaction (trying to transfer tokens should fail + // in the user's VP due to the wrong signer) let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); - let daewon_lower = DAEWON.to_lowercase(); let tx_args = vec![ - "tx", - "--code-path", - &tx_wasm_path, - "--data-path", - &tx_data_path, + "transfer", + "--source", + DAEWON, "--signing-key", - &daewon_lower, + ALBERT_KEY, + "--target", + ALBERT, + "--token", + NAM, + "--amount", + "1", "--gas-amount", "0", "--gas-limit", "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; @@ -1759,7 +1824,7 @@ fn invalid_transactions() -> Result<()> { client.exp_string("Transaction accepted")?; client.exp_string("Transaction applied")?; client.exp_string("Transaction is invalid")?; - client.exp_string(r#""code": "1"#)?; + client.exp_string(r#""code": "4"#)?; client.assert_success(); let mut ledger = bg_ledger.foreground(); @@ -1809,7 +1874,7 @@ fn invalid_transactions() -> Result<()> { // Force to ignore client check that fails on the balance check of the // source address "--force", - "--ledger-address", + "--node", &validator_one_rpc, ]; @@ -1842,7 +1907,7 @@ fn pos_bonds() -> Result<()> { let test = setup::network( |genesis| { let parameters = ParametersConfig { - min_num_of_blocks: 2, + min_num_of_blocks: 6, max_expected_time_per_block: 1, epochs_per_year: 31_536_000, ..genesis.parameters @@ -1865,7 +1930,8 @@ fn pos_bonds() -> Result<()> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let _bg_ledger = ledger.background(); let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); @@ -1883,7 +1949,7 @@ fn pos_bonds() -> Result<()> { "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = @@ -1906,7 +1972,7 @@ fn pos_bonds() -> Result<()> { "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; @@ -1926,7 +1992,7 @@ fn pos_bonds() -> Result<()> { "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = @@ -1949,20 +2015,13 @@ fn pos_bonds() -> Result<()> { "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; let expected = "Amount 3200 withdrawable starting from epoch "; let (_unread, matched) = client.exp_regex(&format!("{expected}.*\n"))?; - let epoch_raw = matched - .trim() - .split_once(expected) - .unwrap() - .1 - .split_once('.') - .unwrap() - .0; + let epoch_raw = matched.trim().split_once(expected).unwrap().1; let delegation_withdrawable_epoch = Epoch::from_str(epoch_raw).unwrap(); client.assert_success(); @@ -1975,7 +2034,7 @@ fn pos_bonds() -> Result<()> { epoch, delegation_withdrawable_epoch ); let start = Instant::now(); - let loop_timeout = Duration::new(40, 0); + let loop_timeout = Duration::new(60, 0); loop { if Instant::now().duration_since(start) > loop_timeout { panic!( @@ -2000,7 +2059,7 @@ fn pos_bonds() -> Result<()> { "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = @@ -2021,7 +2080,7 @@ fn pos_bonds() -> Result<()> { "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; @@ -2181,6 +2240,158 @@ fn pos_rewards() -> Result<()> { Ok(()) } +/// Test for PoS bonds and unbonds queries. +/// +/// 1. Run the ledger node +/// 2. Submit a delegation to the genesis validator +/// 3. Wait for epoch 4 +/// 4. Submit another delegation to the genesis validator +/// 5. Submit an unbond of the delegation +/// 6. Wait for epoch 7 +/// 7. Check the output of the bonds query +#[test] +fn test_bond_queries() -> Result<()> { + let pipeline_len = 2; + let unbonding_len = 4; + let test = setup::network( + |genesis| { + let parameters = ParametersConfig { + min_num_of_blocks: 2, + max_expected_time_per_block: 1, + epochs_per_year: 31_536_000, + ..genesis.parameters + }; + let pos_params = PosParamsConfig { + pipeline_len, + unbonding_len, + ..genesis.pos_params + }; + GenesisConfig { + parameters, + pos_params, + ..genesis + } + }, + None, + )?; + + // 1. Run the ledger node + let mut ledger = + run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; + + // Wait for a first block + ledger.exp_string("Committed block hash")?; + let _bg_ledger = ledger.background(); + + let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_alias = "validator-0"; + + // 2. Submit a delegation to the genesis validator + let tx_args = vec![ + "bond", + "--validator", + validator_alias, + "--source", + BERTHA, + "--amount", + "200", + "--gas-amount", + "0", + "--gas-limit", + "0", + "--gas-token", + NAM, + "--ledger-address", + &validator_one_rpc, + ]; + let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction is valid.")?; + + // 3. Wait for epoch 4 + let start = Instant::now(); + let loop_timeout = Duration::new(20, 0); + loop { + if Instant::now().duration_since(start) > loop_timeout { + panic!("Timed out waiting for epoch: {}", 1); + } + let epoch = get_epoch(&test, &validator_one_rpc)?; + if epoch >= Epoch(4) { + break; + } + } + + // 4. Submit another delegation to the genesis validator + let tx_args = vec![ + "bond", + "--validator", + validator_alias, + "--source", + BERTHA, + "--amount", + "300", + "--gas-amount", + "0", + "--gas-limit", + "0", + "--gas-token", + NAM, + "--ledger-address", + &validator_one_rpc, + ]; + let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction is valid.")?; + client.assert_success(); + + // 5. Submit an unbond of the delegation + let tx_args = vec![ + "unbond", + "--validator", + validator_alias, + "--source", + BERTHA, + "--amount", + "412", + "--gas-amount", + "0", + "--gas-limit", + "0", + "--gas-token", + NAM, + "--ledger-address", + &validator_one_rpc, + ]; + let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction is valid.")?; + client.assert_success(); + + // 6. Wait for epoch 7 + let start = Instant::now(); + let loop_timeout = Duration::new(20, 0); + loop { + if Instant::now().duration_since(start) > loop_timeout { + panic!("Timed out waiting for epoch: {}", 7); + } + let epoch = get_epoch(&test, &validator_one_rpc)?; + if epoch >= Epoch(7) { + break; + } + } + + // 7. Check the output of the bonds query + let tx_args = vec!["bonds", "--ledger-address", &validator_one_rpc]; + let mut client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string( + "All bonds total active: 200088\r +All bonds total: 200088\r +All unbonds total active: 412\r +All unbonds total: 412\r +All unbonds total withdrawable: 412\r", + )?; + client.assert_success(); + + Ok(()) +} + /// PoS validator creation test. In this test we: /// /// 1. Run the ledger node with shorter epochs for faster progression @@ -2196,7 +2407,7 @@ fn pos_init_validator() -> Result<()> { let test = setup::network( |genesis| { let parameters = ParametersConfig { - min_num_of_blocks: 2, + min_num_of_blocks: 4, epochs_per_year: 31_536_000, max_expected_time_per_block: 1, ..genesis.parameters @@ -2219,7 +2430,8 @@ fn pos_init_validator() -> Result<()> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let _bg_ledger = ledger.background(); let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); @@ -2244,7 +2456,7 @@ fn pos_init_validator() -> Result<()> { "0.05", "--max-commission-rate-change", "0.01", - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; @@ -2269,7 +2481,7 @@ fn pos_init_validator() -> Result<()> { "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; @@ -2290,7 +2502,7 @@ fn pos_init_validator() -> Result<()> { "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; @@ -2314,7 +2526,7 @@ fn pos_init_validator() -> Result<()> { "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; @@ -2334,7 +2546,7 @@ fn pos_init_validator() -> Result<()> { "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; @@ -2385,10 +2597,8 @@ fn ledger_many_txs_in_a_block() -> Result<()> { let mut ledger = run_as!(*test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; - - // Wait to commit a block - ledger.exp_regex(r"Committed block hash.*, height: [0-9]+")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let bg_ledger = ledger.background(); let validator_one_rpc = Arc::new(get_actor_rpc(&test, &Who::Validator(0))); @@ -2410,7 +2620,7 @@ fn ledger_many_txs_in_a_block() -> Result<()> { "0", "--gas-token", NAM, - "--ledger-address", + "--node", ]); // 2. Spawn threads each submitting token transfer tx @@ -2468,7 +2678,7 @@ fn proposal_submission() -> Result<()> { let parameters = ParametersConfig { epochs_per_year: epochs_per_year_from_min_duration(1), max_proposal_bytes: Default::default(), - min_num_of_blocks: 2, + min_num_of_blocks: 4, max_expected_time_per_block: 1, vp_whitelist: Some(get_all_wasms_hashes( &working_dir, @@ -2501,7 +2711,8 @@ fn proposal_submission() -> Result<()> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let _bg_ledger = ledger.background(); let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); @@ -2521,7 +2732,7 @@ fn proposal_submission() -> Result<()> { "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; @@ -2530,14 +2741,23 @@ fn proposal_submission() -> Result<()> { // 2. Submit valid proposal let albert = find_address(&test, ALBERT)?; - let valid_proposal_json_path = prepare_proposal_data(&test, albert); + let valid_proposal_json_path = prepare_proposal_data( + &test, + albert, + ProposalType::Default(Some( + TestWasms::TxProposalCode + .path() + .to_string_lossy() + .to_string(), + )), + ); let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); let submit_proposal_args = vec![ "init-proposal", "--data-path", valid_proposal_json_path.to_str().unwrap(), - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, submit_proposal_args, Some(40))?; @@ -2549,7 +2769,7 @@ fn proposal_submission() -> Result<()> { "query-proposal", "--proposal-id", "0", - "--ledger-address", + "--node", &validator_one_rpc, ]; @@ -2564,12 +2784,12 @@ fn proposal_submission() -> Result<()> { ALBERT, "--token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, query_balance_args, Some(40))?; - client.exp_string("NAM: 999500")?; + client.exp_string("nam: 999500")?; client.assert_success(); // 5. Query token balance governance @@ -2579,12 +2799,12 @@ fn proposal_submission() -> Result<()> { GOVERNANCE_ADDRESS, "--token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, query_balance_args, Some(40))?; - client.exp_string("NAM: 500")?; + client.exp_string("nam: 500")?; client.assert_success(); // 6. Submit an invalid proposal @@ -2622,6 +2842,9 @@ fn proposal_submission() -> Result<()> { "voting_start_epoch": 9999_u64, "voting_end_epoch": 10000_u64, "grace_epoch": 10009_u64, + "type": { + "Default":null + } } ); let invalid_proposal_json_path = @@ -2635,7 +2858,7 @@ fn proposal_submission() -> Result<()> { "init-proposal", "--data-path", invalid_proposal_json_path.to_str().unwrap(), - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, submit_proposal_args, Some(40))?; @@ -2651,7 +2874,7 @@ fn proposal_submission() -> Result<()> { "query-proposal", "--proposal-id", "1", - "--ledger-address", + "--node", &validator_one_rpc, ]; @@ -2666,12 +2889,12 @@ fn proposal_submission() -> Result<()> { ALBERT, "--token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, query_balance_args, Some(40))?; - client.exp_string("NAM: 999500")?; + client.exp_string("nam: 999500")?; client.assert_success(); // 9. Send a yay vote from a validator @@ -2689,7 +2912,7 @@ fn proposal_submission() -> Result<()> { "yay", "--signer", "validator-0", - "--ledger-address", + "--node", &validator_one_rpc, ]; @@ -2711,7 +2934,7 @@ fn proposal_submission() -> Result<()> { "nay", "--signer", BERTHA, - "--ledger-address", + "--node", &validator_one_rpc, ]; @@ -2729,7 +2952,7 @@ fn proposal_submission() -> Result<()> { "yay", "--signer", ALBERT, - "--ledger-address", + "--node", &validator_one_rpc, ]; @@ -2750,7 +2973,7 @@ fn proposal_submission() -> Result<()> { "query-proposal-result", "--proposal-id", "0", - "--ledger-address", + "--node", &validator_one_rpc, ]; @@ -2771,12 +2994,12 @@ fn proposal_submission() -> Result<()> { ALBERT, "--token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, query_balance_args, Some(30))?; - client.exp_string("NAM: 1000000")?; + client.exp_string("nam: 1000000")?; client.assert_success(); // 13. Check if governance funds are 0 @@ -2786,20 +3009,17 @@ fn proposal_submission() -> Result<()> { GOVERNANCE_ADDRESS, "--token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, query_balance_args, Some(30))?; - client.exp_string("NAM: 0")?; + client.exp_string("nam: 0")?; client.assert_success(); // // 14. Query parameters - let query_protocol_parameters = vec![ - "query-protocol-parameters", - "--ledger-address", - &validator_one_rpc, - ]; + let query_protocol_parameters = + vec!["query-protocol-parameters", "--node", &validator_one_rpc]; let mut client = run!(test, Bin::Client, query_protocol_parameters, Some(30))?; @@ -2809,6 +3029,529 @@ fn proposal_submission() -> Result<()> { Ok(()) } +/// Test submission and vote of an ETH proposal. +/// +/// 1 - Submit proposal +/// 2 - Vote with delegator and check failure +/// 3 - Vote with validator and check success +/// 4 - Check that proposal passed and funds +#[test] +fn eth_governance_proposal() -> Result<()> { + let test = setup::network( + |genesis| { + let parameters = ParametersConfig { + epochs_per_year: epochs_per_year_from_min_duration(1), + max_proposal_bytes: Default::default(), + min_num_of_blocks: 1, + max_expected_time_per_block: 1, + ..genesis.parameters + }; + + GenesisConfig { + parameters, + ..genesis + } + }, + None, + )?; + + let namadac_help = vec!["--help"]; + + let mut client = run!(test, Bin::Client, namadac_help, Some(40))?; + client.exp_string("Namada client command line interface.")?; + client.assert_success(); + + // Run the ledger node + let mut ledger = + run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; + + ledger.exp_string("Namada ledger node started")?; + let _bg_ledger = ledger.background(); + + let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + + // Delegate some token + let tx_args = vec![ + "bond", + "--validator", + "validator-0", + "--source", + BERTHA, + "--amount", + "900", + "--gas-amount", + "0", + "--gas-limit", + "0", + "--gas-token", + NAM, + "--ledger-address", + &validator_one_rpc, + ]; + client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction is valid.")?; + client.assert_success(); + + // 1 - Submit proposal + let albert = find_address(&test, ALBERT)?; + let valid_proposal_json_path = + prepare_proposal_data(&test, albert, ProposalType::ETHBridge); + let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + + let submit_proposal_args = vec![ + "init-proposal", + "--data-path", + valid_proposal_json_path.to_str().unwrap(), + "--ledger-address", + &validator_one_rpc, + ]; + client = run!(test, Bin::Client, submit_proposal_args, Some(40))?; + client.exp_string("Transaction is valid.")?; + client.assert_success(); + + // Query the proposal + let proposal_query_args = vec![ + "query-proposal", + "--proposal-id", + "0", + "--ledger-address", + &validator_one_rpc, + ]; + + client = run!(test, Bin::Client, proposal_query_args, Some(40))?; + client.exp_string("Proposal: 0")?; + client.assert_success(); + + // Query token balance proposal author (submitted funds) + let query_balance_args = vec![ + "balance", + "--owner", + ALBERT, + "--token", + NAM, + "--ledger-address", + &validator_one_rpc, + ]; + + client = run!(test, Bin::Client, query_balance_args, Some(40))?; + client.exp_string("nam: 999500")?; + client.assert_success(); + + // Query token balance governance + let query_balance_args = vec![ + "balance", + "--owner", + GOVERNANCE_ADDRESS, + "--token", + NAM, + "--ledger-address", + &validator_one_rpc, + ]; + + client = run!(test, Bin::Client, query_balance_args, Some(40))?; + client.exp_string("nam: 500")?; + client.assert_success(); + + // 2 - Vote with delegator and check failure + let mut epoch = get_epoch(&test, &validator_one_rpc).unwrap(); + while epoch.0 <= 13 { + sleep(1); + epoch = get_epoch(&test, &validator_one_rpc).unwrap(); + } + + use namada::types::key::{self, secp256k1, SigScheme}; + use rand::prelude::ThreadRng; + use rand::thread_rng; + + // Generate a signing key to sign the eth message to sign the eth message to + // sign the eth message + let mut rng: ThreadRng = thread_rng(); + let node_sk = secp256k1::SigScheme::generate(&mut rng); + let signing_key = key::common::SecretKey::Secp256k1(node_sk); + let msg = "fd34672ab5"; + let vote_arg = format!("{} {}", signing_key, msg); + let submit_proposal_vote_delagator = vec![ + "vote-proposal", + "--proposal-id", + "0", + "--vote", + "yay", + "--eth", + &vote_arg, + "--signer", + BERTHA, + "--ledger-address", + &validator_one_rpc, + ]; + + client = run!(test, Bin::Client, submit_proposal_vote_delagator, Some(40))?; + client.exp_string("Transaction is invalid.")?; + client.assert_success(); + + // 3 - Send a yay vote from a validator + let vote_arg = format!("{} {}", signing_key, msg); + + let submit_proposal_vote = vec![ + "vote-proposal", + "--proposal-id", + "0", + "--vote", + "yay", + "--eth", + &vote_arg, + "--signer", + "validator-0", + "--ledger-address", + &validator_one_rpc, + ]; + + client = run_as!( + test, + Who::Validator(0), + Bin::Client, + submit_proposal_vote, + Some(15) + )?; + client.exp_string("Transaction is valid.")?; + client.assert_success(); + + // 4 - Wait proposals grace and check proposal author funds + while epoch.0 < 31 { + sleep(1); + epoch = get_epoch(&test, &validator_one_rpc).unwrap(); + } + + let query_balance_args = vec![ + "balance", + "--owner", + ALBERT, + "--token", + NAM, + "--ledger-address", + &validator_one_rpc, + ]; + + client = run!(test, Bin::Client, query_balance_args, Some(30))?; + client.exp_string("nam: 1000000")?; + client.assert_success(); + + // Check if governance funds are 0 + let query_balance_args = vec![ + "balance", + "--owner", + GOVERNANCE_ADDRESS, + "--token", + NAM, + "--ledger-address", + &validator_one_rpc, + ]; + + client = run!(test, Bin::Client, query_balance_args, Some(30))?; + client.exp_string("nam: 0")?; + client.assert_success(); + + Ok(()) +} + +/// Test submission and vote of a PGF proposal +/// +/// 1 - Sumbit two proposals +/// 2 - Check balance +/// 3 - Vote for the accepted proposals +/// 4 - Check one proposal passed and the other one didn't +/// 5 - Check funds +#[test] +fn pgf_governance_proposal() -> Result<()> { + let test = setup::network( + |genesis| { + let parameters = ParametersConfig { + epochs_per_year: epochs_per_year_from_min_duration(1), + max_proposal_bytes: Default::default(), + min_num_of_blocks: 1, + max_expected_time_per_block: 1, + ..genesis.parameters + }; + + GenesisConfig { + parameters, + ..genesis + } + }, + None, + )?; + + let namadac_help = vec!["--help"]; + + let mut client = run!(test, Bin::Client, namadac_help, Some(40))?; + client.exp_string("Namada client command line interface.")?; + client.assert_success(); + + // Run the ledger node + let mut ledger = + run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; + + ledger.exp_string("Namada ledger node started")?; + let _bg_ledger = ledger.background(); + + let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + + // Delegate some token + let tx_args = vec![ + "bond", + "--validator", + "validator-0", + "--source", + BERTHA, + "--amount", + "900", + "--gas-amount", + "0", + "--gas-limit", + "0", + "--gas-token", + NAM, + "--ledger-address", + &validator_one_rpc, + ]; + client = run!(test, Bin::Client, tx_args, Some(40))?; + client.exp_string("Transaction is valid.")?; + client.assert_success(); + + // 1 - Submit proposal + let albert = find_address(&test, ALBERT)?; + let valid_proposal_json_path = + prepare_proposal_data(&test, albert.clone(), ProposalType::PGFCouncil); + let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + + let submit_proposal_args = vec![ + "init-proposal", + "--data-path", + valid_proposal_json_path.to_str().unwrap(), + "--ledger-address", + &validator_one_rpc, + ]; + client = run!(test, Bin::Client, submit_proposal_args, Some(40))?; + client.exp_string("Transaction is valid.")?; + client.assert_success(); + + // Sumbit another proposal + let valid_proposal_json_path = + prepare_proposal_data(&test, albert, ProposalType::PGFCouncil); + let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + + let submit_proposal_args = vec![ + "init-proposal", + "--data-path", + valid_proposal_json_path.to_str().unwrap(), + "--ledger-address", + &validator_one_rpc, + ]; + client = run!(test, Bin::Client, submit_proposal_args, Some(40))?; + client.exp_string("Transaction is valid.")?; + client.assert_success(); + + // 2 - Query the proposal + let proposal_query_args = vec![ + "query-proposal", + "--proposal-id", + "0", + "--ledger-address", + &validator_one_rpc, + ]; + + client = run!(test, Bin::Client, proposal_query_args, Some(40))?; + client.exp_string("Proposal: 0")?; + client.assert_success(); + + let proposal_query_args = vec![ + "query-proposal", + "--proposal-id", + "1", + "--ledger-address", + &validator_one_rpc, + ]; + + client = run!(test, Bin::Client, proposal_query_args, Some(40))?; + client.exp_string("Proposal: 1")?; + client.assert_success(); + + // Query token balance proposal author (submitted funds) + let query_balance_args = vec![ + "balance", + "--owner", + ALBERT, + "--token", + NAM, + "--ledger-address", + &validator_one_rpc, + ]; + + client = run!(test, Bin::Client, query_balance_args, Some(40))?; + client.exp_string("nam: 999000")?; + client.assert_success(); + + // Query token balance governance + let query_balance_args = vec![ + "balance", + "--owner", + GOVERNANCE_ADDRESS, + "--token", + NAM, + "--ledger-address", + &validator_one_rpc, + ]; + + client = run!(test, Bin::Client, query_balance_args, Some(40))?; + client.exp_string("nam: 1000")?; + client.assert_success(); + + // 3 - Send a yay vote from a validator + let mut epoch = get_epoch(&test, &validator_one_rpc).unwrap(); + while epoch.0 <= 13 { + sleep(1); + epoch = get_epoch(&test, &validator_one_rpc).unwrap(); + } + + let albert_address = find_address(&test, ALBERT)?; + let arg_vote = format!("{} 1000", albert_address); + + let submit_proposal_vote = vec![ + "vote-proposal", + "--proposal-id", + "0", + "--vote", + "yay", + "--pgf", + &arg_vote, + "--signer", + "validator-0", + "--ledger-address", + &validator_one_rpc, + ]; + + client = run_as!( + test, + Who::Validator(0), + Bin::Client, + submit_proposal_vote, + Some(15) + )?; + client.exp_string("Transaction is valid.")?; + client.assert_success(); + + // Send different yay vote from delegator to check majority on 1/3 + let different_vote = format!("{} 900", albert_address); + let submit_proposal_vote_delagator = vec![ + "vote-proposal", + "--proposal-id", + "0", + "--vote", + "yay", + "--pgf", + &different_vote, + "--signer", + BERTHA, + "--ledger-address", + &validator_one_rpc, + ]; + + client = run!(test, Bin::Client, submit_proposal_vote_delagator, Some(40))?; + client.exp_string("Transaction is valid.")?; + client.assert_success(); + + // Send vote to the second proposal from delegator + let submit_proposal_vote_delagator = vec![ + "vote-proposal", + "--proposal-id", + "1", + "--vote", + "yay", + "--pgf", + &different_vote, + "--signer", + BERTHA, + "--ledger-address", + &validator_one_rpc, + ]; + + client = run!(test, Bin::Client, submit_proposal_vote_delagator, Some(40))?; + client.exp_string("Transaction is valid.")?; + client.assert_success(); + + // 4 - Query the proposal and check the result is the one voted by the + // validator (majority) + epoch = get_epoch(&test, &validator_one_rpc).unwrap(); + while epoch.0 <= 25 { + sleep(1); + epoch = get_epoch(&test, &validator_one_rpc).unwrap(); + } + + let query_proposal = vec![ + "query-proposal-result", + "--proposal-id", + "0", + "--ledger-address", + &validator_one_rpc, + ]; + + client = run!(test, Bin::Client, query_proposal, Some(15))?; + client.exp_string(&format!( + "Result: passed with PGF council address: {}, spending cap: 0.001", + albert_address + ))?; + client.assert_success(); + + // Query the second proposal and check the it didn't pass + let query_proposal = vec![ + "query-proposal-result", + "--proposal-id", + "1", + "--ledger-address", + &validator_one_rpc, + ]; + + client = run!(test, Bin::Client, query_proposal, Some(15))?; + client.exp_string("Result: rejected")?; + client.assert_success(); + + // 12. Wait proposals grace and check proposal author funds + while epoch.0 < 31 { + sleep(1); + epoch = get_epoch(&test, &validator_one_rpc).unwrap(); + } + + let query_balance_args = vec![ + "balance", + "--owner", + ALBERT, + "--token", + NAM, + "--ledger-address", + &validator_one_rpc, + ]; + + client = run!(test, Bin::Client, query_balance_args, Some(30))?; + client.exp_string("nam: 999500")?; + client.assert_success(); + + // Check if governance funds are 0 + let query_balance_args = vec![ + "balance", + "--owner", + GOVERNANCE_ADDRESS, + "--token", + NAM, + "--ledger-address", + &validator_one_rpc, + ]; + + client = run!(test, Bin::Client, query_balance_args, Some(30))?; + client.exp_string("nam: 0")?; + client.assert_success(); + + Ok(()) +} + /// In this test we: /// 1. Run the ledger node /// 2. Create an offline proposal @@ -2822,7 +3565,8 @@ fn proposal_offline() -> Result<()> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(20))?; - ledger.exp_string("Namada ledger node started")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let _bg_ledger = ledger.background(); let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); @@ -2842,7 +3586,7 @@ fn proposal_offline() -> Result<()> { "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; @@ -2867,7 +3611,10 @@ fn proposal_offline() -> Result<()> { "author": albert, "voting_start_epoch": 3_u64, "voting_end_epoch": 9_u64, - "grace_epoch": 18_u64 + "grace_epoch": 18_u64, + "type": { + "Default": null + } } ); let valid_proposal_json_path = @@ -2884,7 +3631,7 @@ fn proposal_offline() -> Result<()> { "--data-path", valid_proposal_json_path.to_str().unwrap(), "--offline", - "--ledger-address", + "--node", &validator_one_rpc, ]; @@ -2910,7 +3657,7 @@ fn proposal_offline() -> Result<()> { "--signer", ALBERT, "--offline", - "--ledger-address", + "--node", &validator_one_rpc, ]; @@ -2928,7 +3675,7 @@ fn proposal_offline() -> Result<()> { "--data-path", test.test_dir.path().to_str().unwrap(), "--offline", - "--ledger-address", + "--node", &validator_one_rpc, ]; @@ -3270,6 +4017,11 @@ fn test_genesis_validators() -> Result<()> { non_validator.exp_string("Namada ledger node started")?; non_validator.exp_string("This node is not a validator")?; + // Wait for a first block + validator_0.exp_string("Committed block hash")?; + validator_1.exp_string("Committed block hash")?; + non_validator.exp_string("Committed block hash")?; + let bg_validator_0 = validator_0.background(); let bg_validator_1 = validator_1.background(); let _bg_non_validator = non_validator.background(); @@ -3292,7 +4044,7 @@ fn test_genesis_validators() -> Result<()> { "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = @@ -3329,14 +4081,14 @@ fn test_genesis_validators() -> Result<()> { validator_1_alias, "--token", NAM, - "--ledger-address", + "--node", ledger_rpc, ] }; for ledger_rpc in &[validator_0_rpc, validator_1_rpc, non_validator_rpc] { let mut client = run!(test, Bin::Client, query_balance_args(ledger_rpc), Some(40))?; - client.exp_string("NAM: 1000000000010.1")?; + client.exp_string("nam: 1000000000010.1")?; client.assert_success(); } @@ -3468,7 +4220,7 @@ fn double_signing_gets_slashed() -> Result<()> { "0", "--gas-token", NAM, - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; @@ -3500,7 +4252,8 @@ fn implicit_account_reveal_pk() -> Result<()> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let _bg_ledger = ledger.background(); let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); @@ -3519,7 +4272,7 @@ fn implicit_account_reveal_pk() -> Result<()> { NAM, "--amount", "10.1", - "--ledger-address", + "--node", &validator_one_rpc, ] .into_iter() @@ -3536,7 +4289,7 @@ fn implicit_account_reveal_pk() -> Result<()> { source, "--amount", "10.1", - "--ledger-address", + "--node", &validator_one_rpc, ] .into_iter() @@ -3547,12 +4300,16 @@ fn implicit_account_reveal_pk() -> Result<()> { Box::new(|source| { // Gen data for proposal tx let source = find_address(&test, source).unwrap(); - let valid_proposal_json_path = prepare_proposal_data(&test, source); + let valid_proposal_json_path = prepare_proposal_data( + &test, + source, + ProposalType::Default(None), + ); vec![ "init-proposal", "--data-path", valid_proposal_json_path.to_str().unwrap(), - "--ledger-address", + "--node", &validator_one_rpc, ] .into_iter() @@ -3587,7 +4344,7 @@ fn implicit_account_reveal_pk() -> Result<()> { NAM, "--amount", "1000", - "--ledger-address", + "--node", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, credit_args, Some(40))?; @@ -3611,8 +4368,11 @@ fn implicit_account_reveal_pk() -> Result<()> { /// Prepare proposal data in the test's temp dir from the given source address. /// This can be submitted with "init-proposal" command. -fn prepare_proposal_data(test: &setup::Test, source: Address) -> PathBuf { - let proposal_code = wasm_abs_path(TX_PROPOSAL_CODE); +fn prepare_proposal_data( + test: &setup::Test, + source: Address, + proposal_type: ProposalType, +) -> PathBuf { let valid_proposal_json = json!( { "content": { @@ -3630,7 +4390,7 @@ fn prepare_proposal_data(test: &setup::Test, source: Address) -> PathBuf { "voting_start_epoch": 12_u64, "voting_end_epoch": 24_u64, "grace_epoch": 30_u64, - "proposal_code_path": proposal_code.to_str().unwrap() + "type": proposal_type } ); let valid_proposal_json_path = diff --git a/tests/src/e2e/multitoken_tests/helpers.rs b/tests/src/e2e/multitoken_tests/helpers.rs index fb1138ca822..7008910b5e5 100644 --- a/tests/src/e2e/multitoken_tests/helpers.rs +++ b/tests/src/e2e/multitoken_tests/helpers.rs @@ -8,13 +8,14 @@ use eyre::Context; use namada_core::types::address::Address; use namada_core::types::{storage, token}; use namada_test_utils::tx_data::TxWriteData; +use namada_test_utils::TestWasms; use namada_tx_prelude::storage::KeySeg; use rand::Rng; use regex::Regex; -use super::setup::constants::{wasm_abs_path, NAM, VP_ALWAYS_TRUE_WASM}; +use super::setup::constants::NAM; use super::setup::{Bin, NamadaCmd, Test}; -use crate::e2e::setup::constants::{ALBERT, TX_WRITE_WASM}; +use crate::e2e::setup::constants::ALBERT; use crate::run; const MULTITOKEN_KEY_SEGMENT: &str = "tokens"; @@ -29,9 +30,8 @@ pub fn init_multitoken_vp(test: &Test, rpc_addr: &str) -> Result { // we use a VP that always returns true for the multitoken VP here, as we // are testing out the VPs of the sender and receiver of multitoken // transactions here - not any multitoken VP itself - let multitoken_vp_wasm_path = wasm_abs_path(VP_ALWAYS_TRUE_WASM) - .to_string_lossy() - .to_string(); + let multitoken_vp_wasm_path = + TestWasms::VpAlwaysTrue.path().to_string_lossy().to_string(); let multitoken_alias = "multitoken"; let init_account_args = vec![ @@ -99,7 +99,7 @@ pub fn mint_red_tokens( .push(&BALANCE_KEY_SEGMENT.to_owned())? .push(owner)?; - let tx_code_path = wasm_abs_path(TX_WRITE_WASM); + let tx_code_path = TestWasms::TxWriteStorageKey.path(); let tx_data_path = write_test_file( test, TxWriteData { diff --git a/tests/src/e2e/setup.rs b/tests/src/e2e/setup.rs index 39438b92db3..8066910f819 100644 --- a/tests/src/e2e/setup.rs +++ b/tests/src/e2e/setup.rs @@ -104,17 +104,6 @@ pub fn single_node_net() -> Result { network(|genesis| genesis, None) } -/// Setup two networks with a single genesis validator node. -pub fn two_single_node_nets() -> Result<(Test, Test)> { - Ok(( - network(|genesis| genesis, None)?, - network( - |genesis| set_validators(1, genesis, |_| ANOTHER_CHAIN_PORT_OFFSET), - None, - )?, - )) -} - /// Setup a configurable network. pub fn network( mut update_genesis: impl FnMut(GenesisConfig) -> GenesisConfig, @@ -782,9 +771,6 @@ pub fn sleep(seconds: u64) { #[allow(dead_code)] pub mod constants { - use std::fs; - use std::path::PathBuf; - // User addresses aliases pub const ALBERT: &str = "Albert"; pub const ALBERT_KEY: &str = "Albert-key"; @@ -830,23 +816,9 @@ pub mod constants { pub const KARTOFFEL: &str = "Kartoffel"; // Paths to the WASMs used for tests - pub const TX_TRANSFER_WASM: &str = "wasm/tx_transfer.wasm"; - pub const VP_USER_WASM: &str = "wasm/vp_user.wasm"; - pub const TX_NO_OP_WASM: &str = "wasm_for_tests/tx_no_op.wasm"; - pub const TX_INIT_PROPOSAL: &str = "wasm_for_tests/tx_init_proposal.wasm"; - pub const TX_WRITE_WASM: &str = "wasm_for_tests/tx_write.wasm"; - pub const TX_IBC_WASM: &str = "wasm/tx_ibc.wasm"; - pub const VP_ALWAYS_TRUE_WASM: &str = "wasm_for_tests/vp_always_true.wasm"; - pub const VP_ALWAYS_FALSE_WASM: &str = - "wasm_for_tests/vp_always_false.wasm"; - pub const TX_MINT_TOKENS_WASM: &str = "wasm_for_tests/tx_mint_tokens.wasm"; - pub const TX_PROPOSAL_CODE: &str = "wasm_for_tests/tx_proposal_code.wasm"; - - /// Find the absolute path to one of the WASM files above - pub fn wasm_abs_path(file_name: &str) -> PathBuf { - let working_dir = fs::canonicalize("..").unwrap(); - working_dir.join(file_name) - } + pub const VP_USER_WASM: &str = "vp_user.wasm"; + pub const TX_IBC_WASM: &str = "tx_ibc.wasm"; + pub const TX_TRANSFER_WASM: &str = "tx_transfer.wasm"; } /// Copy WASM files from the `wasm` directory to every node's chain dir. @@ -927,7 +899,7 @@ pub fn get_all_wasms_hashes( Some( wasm.split('.').collect::>()[1] .to_owned() - .to_uppercase(), + .to_lowercase(), ) } else { None diff --git a/tests/src/native_vp/pos.rs b/tests/src/native_vp/pos.rs index d6e0b6bd0ed..b60e6278043 100644 --- a/tests/src/native_vp/pos.rs +++ b/tests/src/native_vp/pos.rs @@ -149,7 +149,7 @@ mod tests { use namada_tx_prelude::Address; use proptest::prelude::*; use proptest::prop_state_machine; - use proptest::state_machine::{AbstractStateMachine, StateMachineTest}; + use proptest::state_machine::{ReferenceStateMachine, StateMachineTest}; use proptest::test_runner::Config; use test_log::test; @@ -170,6 +170,7 @@ mod tests { // Additionally, more cases will be explored every time this test is // executed in the CI. cases: 5, + verbose: 1, .. Config::default() })] #[test] @@ -223,21 +224,21 @@ mod tests { } impl StateMachineTest for ConcretePosState { - type Abstract = AbstractPosState; - type ConcreteState = Self; + type Reference = AbstractPosState; + type SystemUnderTest = Self; fn init_test( - initial_state: ::State, - ) -> Self::ConcreteState { + initial_state: &::State, + ) -> Self::SystemUnderTest { println!(); println!("New test case"); // Initialize the transaction env init_pos(&[], &initial_state.params, initial_state.epoch); // The "genesis" block state - for change in initial_state.committed_valid_actions { + for change in &initial_state.committed_valid_actions { println!("Apply init state change {:#?}", change); - change.apply(true) + change.clone().apply(true) } // Commit the genesis block tx_host_env::commit_tx_and_block(); @@ -248,10 +249,11 @@ mod tests { } } - fn apply_concrete( - mut test_state: Self::ConcreteState, - transition: ::Transition, - ) -> Self::ConcreteState { + fn apply( + mut test_state: Self::SystemUnderTest, + _ref_state: &::State, + transition: ::Transition, + ) -> Self::SystemUnderTest { match transition { Transition::CommitTx => { if !test_state.is_current_tx_valid { @@ -314,24 +316,9 @@ mod tests { test_state } - - fn test_sequential( - initial_state: ::State, - transitions: Vec< - ::Transition, - >, - ) { - let mut state = Self::init_test(initial_state); - println!("Transitions {}", transitions.len()); - for (i, transition) in transitions.into_iter().enumerate() { - println!("Apply transition {}: {:#?}", i, transition); - state = Self::apply_concrete(state, transition); - Self::invariants(&state); - } - } } - impl AbstractStateMachine for AbstractPosState { + impl ReferenceStateMachine for AbstractPosState { type State = Self; type Transition = Transition; @@ -368,7 +355,7 @@ mod tests { .boxed() } - fn apply_abstract( + fn apply( mut state: Self::State, transition: &Self::Transition, ) -> Self::State { diff --git a/tests/src/storage_api/collections/lazy_map.rs b/tests/src/storage_api/collections/lazy_map.rs index 5268cf8b100..ffbc693d54f 100644 --- a/tests/src/storage_api/collections/lazy_map.rs +++ b/tests/src/storage_api/collections/lazy_map.rs @@ -12,7 +12,7 @@ mod tests { }; use proptest::prelude::*; use proptest::prop_state_machine; - use proptest::state_machine::{AbstractStateMachine, StateMachineTest}; + use proptest::state_machine::{ReferenceStateMachine, StateMachineTest}; use proptest::test_runner::Config; use test_log::test; @@ -28,6 +28,7 @@ mod tests { // Additionally, more cases will be explored every time this test is // executed in the CI. cases: 5, + verbose: 1, .. Config::default() })] #[test] @@ -108,7 +109,7 @@ mod tests { Update(TestKey, TestVal), } - impl AbstractStateMachine for AbstractLazyMapState { + impl ReferenceStateMachine for AbstractLazyMapState { type State = Self; type Transition = Transition; @@ -145,7 +146,7 @@ mod tests { } } - fn apply_abstract( + fn apply( mut state: Self::State, transition: &Self::Transition, ) -> Self::State { @@ -194,12 +195,12 @@ mod tests { } impl StateMachineTest for ConcreteLazyMapState { - type Abstract = AbstractLazyMapState; - type ConcreteState = Self; + type Reference = AbstractLazyMapState; + type SystemUnderTest = Self; fn init_test( - _initial_state: ::State, - ) -> Self::ConcreteState { + _initial_state: &::State, + ) -> Self::SystemUnderTest { // Init transaction env in which we'll be applying the transitions tx_host_env::init(); @@ -219,10 +220,11 @@ mod tests { } } - fn apply_concrete( - mut state: Self::ConcreteState, - transition: ::Transition, - ) -> Self::ConcreteState { + fn apply( + mut state: Self::SystemUnderTest, + _ref_state: &::State, + transition: ::Transition, + ) -> Self::SystemUnderTest { // Apply transitions in transaction env let ctx = tx_host_env::ctx(); diff --git a/tests/src/storage_api/collections/lazy_set.rs b/tests/src/storage_api/collections/lazy_set.rs new file mode 100644 index 00000000000..66c3f3e00c4 --- /dev/null +++ b/tests/src/storage_api/collections/lazy_set.rs @@ -0,0 +1,560 @@ +#[cfg(test)] +mod tests { + use std::collections::BTreeSet; + use std::convert::TryInto; + + use namada::types::address::{self, Address}; + use namada::types::storage; + use namada_tx_prelude::storage::KeySeg; + use namada_tx_prelude::storage_api::collections::{ + lazy_set, LazyCollection, LazySet, + }; + use proptest::prelude::*; + use proptest::prop_state_machine; + use proptest::state_machine::{ReferenceStateMachine, StateMachineTest}; + use proptest::test_runner::Config; + use test_log::test; + + use crate::tx::tx_host_env; + use crate::vp::vp_host_env; + + prop_state_machine! { + #![proptest_config(Config { + // Instead of the default 256, we only run 5 because otherwise it + // takes too long and it's preferable to crank up the number of + // transitions instead, to allow each case to run for more epochs as + // some issues only manifest once the model progresses further. + // Additionally, more cases will be explored every time this test is + // executed in the CI. + cases: 5, + verbose: 1, + .. Config::default() + })] + #[test] + fn lazy_set_api_state_machine_test(sequential 1..100 => ConcreteLazySetState); + } + + /// Type of key used in the set + type TestKey = u64; + + /// A `StateMachineTest` implemented on this struct manipulates it with + /// `Transition`s, which are also being accumulated into + /// `current_transitions`. It then: + /// + /// - checks its state against an in-memory `std::collections::BTreeSet` + /// - runs validation and checks that the `LazySet::Action`s reported from + /// validation match with transitions that were applied + /// + /// Additionally, one of the transitions is to commit a block and/or + /// transaction, during which the currently accumulated state changes are + /// persisted, or promoted from transaction write log to block's write log. + #[derive(Debug)] + struct ConcreteLazySetState { + /// Address is used to prefix the storage key of the `lazy_set` in + /// order to simulate a transaction and a validity predicate + /// check from changes on the `lazy_set` + address: Address, + /// In the test, we apply the same transitions on the `lazy_set` as on + /// `eager_set` to check that `lazy_set`'s state is consistent with + /// `eager_set`. + eager_set: BTreeSet, + /// Handle to a lazy set + lazy_set: LazySet, + /// Valid LazySet changes in the current transaction + current_transitions: Vec, + } + + #[derive(Clone, Debug, Default)] + struct AbstractLazySetState { + /// Valid LazySet changes in the current transaction + valid_transitions: Vec, + /// Valid LazySet changes committed to storage + committed_transitions: Vec, + } + + /// Possible transitions that can modify a [`LazySet`]. + /// This roughly corresponds to the methods that have `StorageWrite` + /// access and is very similar to [`Action`] + #[derive(Clone, Debug)] + enum Transition { + /// Commit all valid transitions in the current transaction + CommitTx, + /// Commit all valid transitions in the current transaction and also + /// commit the current block + CommitTxAndBlock, + /// Insert a key-val into a [`LazySet`] + Insert(TestKey), + /// Remove a key-val from a [`LazySet`] + Remove(TestKey), + /// Insert a key-val into a [`LazySet`] + TryInsert { key: TestKey, is_present: bool }, + } + + impl ReferenceStateMachine for AbstractLazySetState { + type State = Self; + type Transition = Transition; + + fn init_state() -> BoxedStrategy { + Just(Self::default()).boxed() + } + + // Apply a random transition to the state + fn transitions(state: &Self::State) -> BoxedStrategy { + let length = state.len(); + if length == 0 { + prop_oneof![ + 1 => Just(Transition::CommitTx), + 1 => Just(Transition::CommitTxAndBlock), + 3 => arb_set_key().prop_map(Transition::Insert) + ] + .boxed() + } else { + let keys = state.find_existing_keys(); + let keys_clone = keys.clone(); + let arb_existing_set_key = + || proptest::sample::select(keys.clone()); + prop_oneof![ + 1 => Just(Transition::CommitTx), + 1 => Just(Transition::CommitTxAndBlock), + 3 => arb_existing_set_key().prop_map(Transition::Remove), + 3 => arb_existing_set_key().prop_map(|key| + Transition::TryInsert {key, is_present: true}), + 5 => (arb_set_key().prop_filter("insert on non-existing keys only", + move |key| !keys.contains(key))) + .prop_map(Transition::Insert), + 5 => (arb_set_key().prop_filter("try_insert on non-existing keys only", + move |key| !keys_clone.contains(key))) + .prop_map(|key| + Transition::TryInsert {key, is_present: false}), + ] + .boxed() + } + } + + fn apply( + mut state: Self::State, + transition: &Self::Transition, + ) -> Self::State { + match transition { + Transition::CommitTx | Transition::CommitTxAndBlock => { + let valid_actions_to_commit = + std::mem::take(&mut state.valid_transitions); + state + .committed_transitions + .extend(valid_actions_to_commit.into_iter()); + } + _ => state.valid_transitions.push(transition.clone()), + } + state + } + + fn preconditions( + state: &Self::State, + transition: &Self::Transition, + ) -> bool { + let length = state.len(); + // Ensure that the remove or update transitions are not applied + // to an empty state + if length == 0 && matches!(transition, Transition::Remove(_)) { + return false; + } + match transition { + Transition::Remove(key) => { + let keys = state.find_existing_keys(); + // Ensure that the update/remove key is an existing one + keys.contains(key) + } + Transition::Insert(key) => { + let keys = state.find_existing_keys(); + // Ensure that the insert key is not an existing one + !keys.contains(key) + } + Transition::TryInsert { key, is_present } => { + let keys = state.find_existing_keys(); + // Ensure that the `is_present` flag is correct + if *is_present { + keys.contains(key) + } else { + !keys.contains(key) + } + } + _ => true, + } + } + } + + impl StateMachineTest for ConcreteLazySetState { + type Reference = AbstractLazySetState; + type SystemUnderTest = Self; + + fn init_test( + _initial_state: &::State, + ) -> Self::SystemUnderTest { + // Init transaction env in which we'll be applying the transitions + tx_host_env::init(); + + // The lazy_set's path must be prefixed by the address to be able + // to trigger a validity predicate on it + let address = address::testing::established_address_1(); + tx_host_env::with(|env| env.spawn_accounts([&address])); + let lazy_set_prefix: storage::Key = address.to_db_key().into(); + + Self { + address, + eager_set: BTreeSet::new(), + lazy_set: LazySet::open( + lazy_set_prefix.push(&"arbitrary".to_string()).unwrap(), + ), + current_transitions: vec![], + } + } + + fn apply( + mut state: Self::SystemUnderTest, + _ref_state: &::State, + transition: ::Transition, + ) -> Self::SystemUnderTest { + // Apply transitions in transaction env + let ctx = tx_host_env::ctx(); + + // Persist the transitions in the current tx, or clear previous ones + // if we're committing a tx + match &transition { + Transition::CommitTx | Transition::CommitTxAndBlock => { + state.current_transitions = vec![]; + } + _ => { + state.current_transitions.push(transition.clone()); + } + } + + // Transition application on lazy set and post-conditions: + match &transition { + Transition::CommitTx => { + // commit the tx without committing the block + tx_host_env::with(|env| env.wl_storage.commit_tx()); + } + Transition::CommitTxAndBlock => { + // commit the tx and the block + tx_host_env::commit_tx_and_block(); + } + Transition::Insert(key) => { + state.lazy_set.insert(ctx, *key).unwrap(); + + // Post-conditions: + let present = state.lazy_set.contains(ctx, key).unwrap(); + assert!(present, "the new item must be added to the set"); + + state.assert_validation_accepted(); + } + Transition::TryInsert { key, is_present } => { + let result = state.lazy_set.try_insert(ctx, *key); + + // Post-conditions: + if *is_present { + assert!(result.is_err()); + } else { + assert!(result.is_ok()); + state.assert_validation_accepted(); + } + } + Transition::Remove(key) => { + let removed = state.lazy_set.remove(ctx, key).unwrap(); + + // Post-conditions: + assert!(removed, "removed element"); + + state.assert_validation_accepted(); + } + } + + // Apply transition in the eager set for comparison + apply_transition_on_eager_set(&mut state.eager_set, &transition); + + // Global post-conditions: + + // All items in eager set must be present in lazy set + for key in state.eager_set.iter() { + let present = state.lazy_set.contains(ctx, key).unwrap(); + assert!(present, "at key {key}"); + } + + // All items in lazy set must be present in eager set + for key in state.lazy_set.iter(ctx).unwrap() { + let key = key.unwrap(); + let present = state.eager_set.contains(&key); + assert!(present, "at key {key}"); + } + + state + } + } + + impl AbstractLazySetState { + /// Find the length of the set from the applied transitions + fn len(&self) -> u64 { + (set_len_diff_from_transitions(self.committed_transitions.iter()) + + set_len_diff_from_transitions(self.valid_transitions.iter())) + .try_into() + .expect( + "It shouldn't be possible to underflow length from all \ + transactions applied in abstract state", + ) + } + + /// Build an eager set from the committed and current transitions + fn eager_set(&self) -> BTreeSet { + let mut eager_set = BTreeSet::new(); + for transition in &self.committed_transitions { + apply_transition_on_eager_set(&mut eager_set, transition); + } + for transition in &self.valid_transitions { + apply_transition_on_eager_set(&mut eager_set, transition); + } + eager_set + } + + /// Find the keys currently present in the set + fn find_existing_keys(&self) -> Vec { + self.eager_set().iter().cloned().collect() + } + } + + /// Find the difference in length of the set from the applied transitions + fn set_len_diff_from_transitions<'a>( + transitions: impl Iterator, + ) -> i64 { + let mut insert_count: i64 = 0; + let mut remove_count: i64 = 0; + + for trans in transitions { + match trans { + Transition::CommitTx | Transition::CommitTxAndBlock => {} + Transition::Insert(_) => insert_count += 1, + Transition::TryInsert { key: _, is_present } => { + if !is_present { + insert_count += 1 + } + } + Transition::Remove(_) => remove_count += 1, + } + } + insert_count - remove_count + } + + impl ConcreteLazySetState { + fn assert_validation_accepted(&self) { + // Init the VP env from tx env in which we applied the set + // transitions + let tx_env = tx_host_env::take(); + vp_host_env::init_from_tx(self.address.clone(), tx_env, |_| {}); + + // Simulate a validity predicate run using the lazy set's validation + // helpers + let changed_keys = + vp_host_env::with(|env| env.all_touched_storage_keys()); + + let mut validation_builder = None; + + // Push followed by pop is a no-op, in which case we'd still see the + // changed keys for these actions, but they wouldn't affect the + // validation result and they never get persisted, but we'd still + // them as changed key here. To guard against this case, + // we check that `set_len_from_transitions` is not empty. + let set_len_diff = + set_len_diff_from_transitions(self.current_transitions.iter()); + + // To help debug validation issues... + dbg!( + &self.current_transitions, + &changed_keys + .iter() + .map(storage::Key::to_string) + .collect::>() + ); + + for key in &changed_keys { + let is_sub_key = self + .lazy_set + .accumulate( + vp_host_env::ctx(), + &mut validation_builder, + key, + ) + .unwrap(); + + assert!( + is_sub_key, + "We're only modifying the lazy_set's keys here. Key: \ + \"{key}\", set length diff {set_len_diff}" + ); + } + if !changed_keys.is_empty() && set_len_diff != 0 { + assert!( + validation_builder.is_some(), + "If some keys were changed, the builder must get filled in" + ); + let actions = + LazySet::::validate(validation_builder.unwrap()) + .unwrap(); + let mut actions_to_check = actions.clone(); + + // Check that every transition has a corresponding action from + // validation. We drop the found actions to check that all + // actions are matched too. + let current_transitions = + normalize_transitions(&self.current_transitions); + for transition in ¤t_transitions { + match transition { + Transition::CommitTx | Transition::CommitTxAndBlock => { + } + Transition::Insert(expected_key) => { + for (ix, action) in + actions_to_check.iter().enumerate() + { + if let lazy_set::Action::Insert(key) = action { + if expected_key == key { + actions_to_check.remove(ix); + break; + } + } + } + } + Transition::TryInsert { + key: expected_key, + is_present, + } => { + if !is_present { + for (ix, action) in + actions_to_check.iter().enumerate() + { + if let lazy_set::Action::Insert(key) = + action + { + if expected_key == key { + actions_to_check.remove(ix); + break; + } + } + } + } + } + Transition::Remove(expected_key) => { + for (ix, action) in + actions_to_check.iter().enumerate() + { + if let lazy_set::Action::Remove(key) = action { + if expected_key == key { + actions_to_check.remove(ix); + break; + } + } + } + } + } + } + + assert!( + actions_to_check.is_empty(), + "All the actions reported from validation {actions:#?} \ + should have been matched with SM transitions \ + {current_transitions:#?}, but these actions didn't \ + match: {actions_to_check:#?}", + ) + } + + // Put the tx_env back before checking the result + tx_host_env::set_from_vp_env(vp_host_env::take()); + } + } + + /// Generate an arbitrary `TestKey` + fn arb_set_key() -> impl Strategy { + any::() + } + + /// Apply `Transition` on an eager `Set`. + fn apply_transition_on_eager_set( + set: &mut BTreeSet, + transition: &Transition, + ) { + match transition { + Transition::CommitTx | Transition::CommitTxAndBlock => {} + Transition::Insert(key) => { + set.insert(*key); + } + Transition::Remove(key) => { + let _popped = set.remove(key); + } + Transition::TryInsert { key, is_present } => { + if !is_present { + set.insert(*key); + } + } + } + } + + /// Normalize transitions: + /// - remove(key) + insert(key) -> no change + /// - remove(key) + try_insert{key, is_present: false} -> no change + /// - try_insert{is_present: true} -> no change + /// + /// Note that the normalizable transitions pairs do not have to be directly + /// next to each other, but their order does matter. + fn normalize_transitions(transitions: &[Transition]) -> Vec { + let mut collapsed = vec![]; + 'outer: for transition in transitions { + match transition { + Transition::CommitTx + | Transition::CommitTxAndBlock + | Transition::Remove(_) => collapsed.push(transition.clone()), + Transition::Insert(key) => { + for (ix, collapsed_transition) in + collapsed.iter().enumerate() + { + if let Transition::Remove(remove_key) = + collapsed_transition + { + if key == remove_key { + // remove(key) + insert(key) -> no change + + // Delete the `Remove` transition + collapsed.remove(ix); + continue 'outer; + } + } + } + collapsed.push(transition.clone()); + } + Transition::TryInsert { key, is_present } => { + if !is_present { + for (ix, collapsed_transition) in + collapsed.iter().enumerate() + { + if let Transition::Remove(remove_key) = + collapsed_transition + { + if key == remove_key { + // remove(key) + try_insert{key, + // is_present:false) -> no + // change + + // Delete the `Remove` transition + collapsed.remove(ix); + continue 'outer; + } + } + } + collapsed.push(transition.clone()); + } else { + // In else case we don't do anything to omit the + // transition: + // try_insert{is_present: true} -> no + // change + } + } + } + } + collapsed + } +} diff --git a/tests/src/storage_api/collections/lazy_vec.rs b/tests/src/storage_api/collections/lazy_vec.rs index b93c04885cb..0645f16e017 100644 --- a/tests/src/storage_api/collections/lazy_vec.rs +++ b/tests/src/storage_api/collections/lazy_vec.rs @@ -11,7 +11,7 @@ mod tests { }; use proptest::prelude::*; use proptest::prop_state_machine; - use proptest::state_machine::{AbstractStateMachine, StateMachineTest}; + use proptest::state_machine::{ReferenceStateMachine, StateMachineTest}; use proptest::test_runner::Config; use test_log::test; @@ -27,6 +27,7 @@ mod tests { // Additionally, more cases will be explored every time this test is // executed in the CI. cases: 5, + verbose: 1, .. Config::default() })] #[test] @@ -109,7 +110,7 @@ mod tests { }, } - impl AbstractStateMachine for AbstractLazyVecState { + impl ReferenceStateMachine for AbstractLazyVecState { type State = Self; type Transition = Transition; @@ -149,7 +150,7 @@ mod tests { } } - fn apply_abstract( + fn apply( mut state: Self::State, transition: &Self::Transition, ) -> Self::State { @@ -188,12 +189,12 @@ mod tests { } impl StateMachineTest for ConcreteLazyVecState { - type Abstract = AbstractLazyVecState; - type ConcreteState = Self; + type Reference = AbstractLazyVecState; + type SystemUnderTest = Self; fn init_test( - _initial_state: ::State, - ) -> Self::ConcreteState { + _initial_state: &::State, + ) -> Self::SystemUnderTest { // Init transaction env in which we'll be applying the transitions tx_host_env::init(); @@ -213,10 +214,11 @@ mod tests { } } - fn apply_concrete( - mut state: Self::ConcreteState, - transition: ::Transition, - ) -> Self::ConcreteState { + fn apply( + mut state: Self::SystemUnderTest, + _ref_state: &::State, + transition: ::Transition, + ) -> Self::SystemUnderTest { // Apply transitions in transaction env let ctx = tx_host_env::ctx(); diff --git a/tests/src/storage_api/collections/mod.rs b/tests/src/storage_api/collections/mod.rs index f39b880c09b..3af3d14c670 100644 --- a/tests/src/storage_api/collections/mod.rs +++ b/tests/src/storage_api/collections/mod.rs @@ -1,3 +1,4 @@ mod lazy_map; +mod lazy_set; mod lazy_vec; mod nested_lazy_map; diff --git a/tests/src/storage_api/collections/nested_lazy_map.rs b/tests/src/storage_api/collections/nested_lazy_map.rs index b0ff35094c2..46bfc7673ee 100644 --- a/tests/src/storage_api/collections/nested_lazy_map.rs +++ b/tests/src/storage_api/collections/nested_lazy_map.rs @@ -15,7 +15,7 @@ mod tests { }; use proptest::prelude::*; use proptest::prop_state_machine; - use proptest::state_machine::{AbstractStateMachine, StateMachineTest}; + use proptest::state_machine::{ReferenceStateMachine, StateMachineTest}; use proptest::test_runner::Config; use test_log::test; @@ -31,6 +31,7 @@ mod tests { // Additionally, more cases will be explored every time this test is // executed in the CI. cases: 5, + verbose: 1, .. Config::default() })] #[test] @@ -121,7 +122,7 @@ mod tests { /// A key for transition type Key = (KeyOuter, KeyMiddle, KeyInner); - impl AbstractStateMachine for AbstractLazyMapState { + impl ReferenceStateMachine for AbstractLazyMapState { type State = Self; type Transition = Transition; @@ -158,7 +159,7 @@ mod tests { } } - fn apply_abstract( + fn apply( mut state: Self::State, transition: &Self::Transition, ) -> Self::State { @@ -207,12 +208,12 @@ mod tests { } impl StateMachineTest for ConcreteLazyMapState { - type Abstract = AbstractLazyMapState; - type ConcreteState = Self; + type Reference = AbstractLazyMapState; + type SystemUnderTest = Self; fn init_test( - _initial_state: ::State, - ) -> Self::ConcreteState { + _initial_state: &::State, + ) -> Self::SystemUnderTest { // Init transaction env in which we'll be applying the transitions tx_host_env::init(); @@ -232,10 +233,11 @@ mod tests { } } - fn apply_concrete( - mut state: Self::ConcreteState, - transition: ::Transition, - ) -> Self::ConcreteState { + fn apply( + mut state: Self::SystemUnderTest, + _ref_state: &::State, + transition: ::Transition, + ) -> Self::SystemUnderTest { // Apply transitions in transaction env let ctx = tx_host_env::ctx(); diff --git a/tests/src/storage_api/mod.rs b/tests/src/storage_api/mod.rs index bc487bd59e1..a03a21ebd0b 100644 --- a/tests/src/storage_api/mod.rs +++ b/tests/src/storage_api/mod.rs @@ -1 +1,2 @@ mod collections; +mod testnet_pow; diff --git a/tests/src/storage_api/testnet_pow.rs b/tests/src/storage_api/testnet_pow.rs new file mode 100644 index 00000000000..cd61331858b --- /dev/null +++ b/tests/src/storage_api/testnet_pow.rs @@ -0,0 +1,92 @@ +//! Tests for [`namada_core::ledger::testnet_pow`]. + +use namada_core::ledger::storage_api; +use namada_core::ledger::testnet_pow::*; +use namada_core::types::{address, token}; + +use crate::tx::{self, TestTxEnv}; +use crate::vp; + +#[test] +fn test_challenge_and_solution() -> storage_api::Result<()> { + let faucet_address = address::testing::established_address_1(); + let difficulty = Difficulty::try_new(1).unwrap(); + let withdrawal_limit = token::Amount::whole(1_000); + + let mut tx_env = TestTxEnv::default(); + + // Source address that's using PoW (this would be derived from the tx + // wrapper pk) + let source = address::testing::established_address_2(); + + // Ensure that the addresses exists, so we can use them in a tx + tx_env.spawn_accounts([&faucet_address, &source]); + + init_faucet_storage( + &mut tx_env.wl_storage, + &faucet_address, + difficulty, + withdrawal_limit, + )?; + tx_env.commit_genesis(); + + let challenge = Challenge::new( + &mut tx_env.wl_storage, + &faucet_address, + source.clone(), + )?; + + let solution = challenge.solve(); + + // The solution must be valid + assert!(solution.verify_solution(source.clone())); + + // Changing the solution to `0` invalidates it + { + let mut solution = solution.clone(); + solution.value = 0; + // If you're unlucky and this fails, try changing the solution to + // a different literal. + assert!(!solution.verify_solution(source.clone())); + } + // Changing the counter invalidates it + { + let mut solution = solution.clone(); + solution.params.counter = 10; + // If you're unlucky and this fails, try changing the counter to + // a different literal. + assert!(!solution.verify_solution(source.clone())); + } + + // Apply the solution from a tx + vp::vp_host_env::init_from_tx(faucet_address.clone(), tx_env, |_addr| { + solution + .apply_from_tx(tx::ctx(), &faucet_address, &source) + .unwrap(); + }); + + // Check that it's valid + let is_valid = + solution.validate(&vp::ctx().pre(), &faucet_address, source.clone())?; + assert!(is_valid); + + // Commit the tx + let vp_env = vp::vp_host_env::take(); + tx::tx_host_env::set_from_vp_env(vp_env); + tx::tx_host_env::commit_tx_and_block(); + let tx_env = tx::tx_host_env::take(); + + // Re-apply the same solution from a tx + vp::vp_host_env::init_from_tx(faucet_address.clone(), tx_env, |_addr| { + solution + .apply_from_tx(tx::ctx(), &faucet_address, &source) + .unwrap(); + }); + + // Check that it's not longer valid + let is_valid = + solution.validate(&vp::ctx().pre(), &faucet_address, source)?; + assert!(!is_valid); + + Ok(()) +} diff --git a/tests/src/vm_host_env/ibc.rs b/tests/src/vm_host_env/ibc.rs index 1e604801860..47ff3808fd5 100644 --- a/tests/src/vm_host_env/ibc.rs +++ b/tests/src/vm_host_env/ibc.rs @@ -2,17 +2,20 @@ use core::time::Duration; use std::collections::HashMap; use std::str::FromStr; -pub use namada::core::ledger::ibc::actions::*; -use namada::ibc::applications::ics20_fungible_token_transfer::msgs::transfer::MsgTransfer; -use namada::ibc::core::ics02_client::client_consensus::ConsensusState; -use namada::ibc::core::ics02_client::client_state::{ - AnyClientState, ClientState, +use namada::ibc::applications::transfer::acknowledgement::TokenTransferAcknowledgement; +use namada::ibc::applications::transfer::coin::PrefixedCoin; +use namada::ibc::applications::transfer::msgs::transfer::MsgTransfer; +use namada::ibc::applications::transfer::packet::PacketData; +use namada::ibc::applications::transfer::VERSION; +use namada::ibc::core::ics02_client::client_state::ClientState; +use namada::ibc::core::ics02_client::client_type::ClientType; +use namada::ibc::core::ics02_client::consensus_state::ConsensusState; +use namada::ibc::core::ics02_client::msgs::create_client::MsgCreateClient; +use namada::ibc::core::ics02_client::msgs::update_client::MsgUpdateClient; +use namada::ibc::core::ics02_client::msgs::upgrade_client::MsgUpgradeClient; +use namada::ibc::core::ics03_connection::connection::{ + ConnectionEnd, Counterparty as ConnCounterparty, State as ConnState, }; -use namada::ibc::core::ics02_client::header::Header; -use namada::ibc::core::ics02_client::msgs::create_client::MsgCreateAnyClient; -use namada::ibc::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; -use namada::ibc::core::ics02_client::msgs::upgrade_client::MsgUpgradeAnyClient; -use namada::ibc::core::ics03_connection::connection::Counterparty as ConnCounterparty; use namada::ibc::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; use namada::ibc::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; use namada::ibc::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; @@ -31,50 +34,70 @@ use namada::ibc::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; use namada::ibc::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; use namada::ibc::core::ics04_channel::msgs::timeout::MsgTimeout; use namada::ibc::core::ics04_channel::msgs::timeout_on_close::MsgTimeoutOnClose; -use namada::ibc::core::ics04_channel::packet::{Packet, Sequence}; +pub use namada::ibc::core::ics04_channel::packet::{Packet, Sequence}; +use namada::ibc::core::ics04_channel::timeout::TimeoutHeight; use namada::ibc::core::ics04_channel::Version as ChanVersion; -use namada::ibc::core::ics24_host::identifier::{ - ChannelId, ClientId, ConnectionId, PortId, +use namada::ibc::core::ics23_commitment::commitment::{ + CommitmentPrefix, CommitmentProofBytes, }; -use namada::ibc::mock::client_state::{MockClientState, MockConsensusState}; +pub use namada::ibc::core::ics24_host::identifier::{ + ChannelId, ClientId, ConnectionId, PortChannelId, PortId, +}; +use namada::ibc::mock::client_state::{MockClientState, MOCK_CLIENT_TYPE}; +use namada::ibc::mock::consensus_state::MockConsensusState; use namada::ibc::mock::header::MockHeader; -use namada::ibc::proofs::{ConsensusProof, Proofs}; use namada::ibc::signer::Signer; use namada::ibc::timestamp::Timestamp; use namada::ibc::Height; use namada::ibc_proto::cosmos::base::v1beta1::Coin; +use namada::ibc_proto::google::protobuf::Any; use namada::ibc_proto::ibc::core::commitment::v1::MerkleProof; +use namada::ibc_proto::ibc::core::connection::v1::MsgConnectionOpenTry as RawMsgConnectionOpenTry; use namada::ibc_proto::ics23::CommitmentProof; +use namada::ibc_proto::protobuf::Protobuf; use namada::ledger::gas::VpGasMeter; -use namada::ledger::ibc::init_genesis_storage; pub use namada::ledger::ibc::storage::{ - ack_key, capability_index_key, capability_key, channel_counter_key, - channel_key, client_counter_key, client_state_key, client_type_key, - commitment_key, connection_counter_key, connection_key, - consensus_state_key, next_sequence_ack_key, next_sequence_recv_key, - next_sequence_send_key, port_key, receipt_key, + ack_key, channel_counter_key, channel_key, client_counter_key, + client_state_key, client_type_key, client_update_height_key, + client_update_timestamp_key, commitment_key, connection_counter_key, + connection_key, consensus_state_key, ibc_token_prefix, + next_sequence_ack_key, next_sequence_recv_key, next_sequence_send_key, + port_key, receipt_key, }; use namada::ledger::ibc::vp::{ - get_dummy_header as tm_dummy_header, Ibc, IbcToken, + get_dummy_genesis_validator, get_dummy_header as tm_dummy_header, Ibc, + IbcToken, }; use namada::ledger::native_vp::{Ctx, NativeVp}; +use namada::ledger::parameters::storage::{ + get_epoch_duration_storage_key, get_max_expected_time_per_block_key, +}; +use namada::ledger::parameters::EpochDuration; use namada::ledger::storage::mockdb::MockDB; use namada::ledger::storage::Sha256Hasher; use namada::ledger::tx_env::TxEnv; +use namada::ledger::{ibc, pos}; +use namada::proof_of_stake::parameters::PosParams; use namada::proto::Tx; -use namada::tendermint_proto::Protobuf; +use namada::tendermint::time::Time as TmTime; +use namada::tendermint_proto::Protobuf as TmProtobuf; use namada::types::address::{self, Address, InternalAddress}; -use namada::types::ibc::data::{FungibleTokenPacketData, PacketAck}; -use namada::types::storage::{self, BlockHash, BlockHeight, Key, TxIndex}; +use namada::types::hash::Hash; +use namada::types::storage::{ + self, BlockHash, BlockHeight, Epoch, Key, TxIndex, +}; +use namada::types::time::DurationSecs; use namada::types::token::{self, Amount}; use namada::vm::{wasm, WasmCacheRwAccess}; -use namada_tx_prelude::StorageWrite; +use namada_test_utils::TestWasms; +use namada_tx_prelude::BorshSerialize; use crate::tx::{self, *}; -const VP_ALWAYS_TRUE_WASM: &str = "../wasm_for_tests/vp_always_true.wasm"; const ADDRESS: Address = Address::Internal(InternalAddress::Ibc); +const COMMITMENT_PREFIX: &[u8] = b"ibc"; + pub struct TestIbcVp<'a> { pub ibc: Ibc<'a, MockDB, Sha256Hasher, WasmCacheRwAccess>, } @@ -182,8 +205,22 @@ pub fn validate_token_vp_from_tx<'a>( /// Initialize the test storage. Requires initialized [`tx_host_env::ENV`]. pub fn init_storage() -> (Address, Address) { + // wasm for init_account + let code = TestWasms::VpAlwaysTrue.read_bytes(); + let code_hash = Hash::sha256(&code); + tx_host_env::with(|env| { - init_genesis_storage(&mut env.wl_storage.storage); + ibc::init_genesis_storage(&mut env.wl_storage); + pos::init_genesis_storage( + &mut env.wl_storage, + &PosParams::default(), + vec![get_dummy_genesis_validator()].into_iter(), + Epoch(1), + ); + // store wasm code + let key = Key::wasm_code(&code_hash); + env.wl_storage.storage.write(&key, code.clone()).unwrap(); + // block header to check timeout timestamp env.wl_storage .storage @@ -196,28 +233,55 @@ pub fn init_storage() -> (Address, Address) { }); // initialize a token - let code = std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); - let token = tx::ctx().init_account(code.clone()).unwrap(); + let token = tx::ctx().init_account(code_hash.clone()).unwrap(); // initialize an account - let account = tx::ctx().init_account(code).unwrap(); + let account = tx::ctx().init_account(code_hash).unwrap(); let key = token::balance_key(&token, &account); - let init_bal = Amount::from(1_000_000_000u64); - tx::ctx().write(&key, init_bal).unwrap(); + let init_bal = Amount::whole(100); + let bytes = init_bal.try_to_vec().expect("encoding failed"); + tx_host_env::with(|env| { + env.wl_storage.storage.write(&key, &bytes).unwrap(); + }); + + // epoch duration + let key = get_epoch_duration_storage_key(); + let epoch_duration = EpochDuration { + min_num_of_blocks: 10, + min_duration: DurationSecs(100), + }; + let bytes = epoch_duration.try_to_vec().unwrap(); + tx_host_env::with(|env| { + env.wl_storage.storage.write(&key, &bytes).unwrap(); + }); + + // max_expected_time_per_block + let time = DurationSecs::from(Duration::new(60, 0)); + let key = get_max_expected_time_per_block_key(); + let bytes = namada::ledger::storage::types::encode(&time); + tx_host_env::with(|env| { + env.wl_storage.storage.write(&key, &bytes).unwrap(); + }); + (token, account) } -pub fn prepare_client() --> (ClientId, AnyClientState, HashMap>) { +pub fn client_id() -> ClientId { + let (client_state, _) = dummy_client(); + ClientId::new(client_state.client_type(), 0).expect("invalid client ID") +} + +pub fn prepare_client() -> (ClientId, Any, HashMap>) { let mut writes = HashMap::new(); - let msg = msg_create_client(); + let (client_state, consensus_state) = dummy_client(); // client state - let client_state = msg.client_state.clone(); - let client_id = - client_id(client_state.client_type(), 0).expect("invalid client ID"); + let client_id = client_id(); let key = client_state_key(&client_id); - let bytes = msg.client_state.encode_vec().expect("encoding failed"); + let bytes = client_state + .into_box() + .encode_vec() + .expect("encoding failed"); writes.insert(key, bytes); // client type let key = client_type_key(&client_id); @@ -227,14 +291,54 @@ pub fn prepare_client() // consensus state let height = client_state.latest_height(); let key = consensus_state_key(&client_id, height); - let bytes = msg.consensus_state.encode_vec().expect("encoding failed"); + let bytes = consensus_state + .into_box() + .encode_vec() + .expect("encoding failed"); + writes.insert(key, bytes); + // client update time + let key = client_update_timestamp_key(&client_id); + let time = tx_host_env::with(|env| { + let header = env + .wl_storage + .storage + .get_block_header(None) + .unwrap() + .0 + .unwrap(); + header.time + }); + let bytes = TmTime::try_from(time) + .unwrap() + .encode_vec() + .expect("encoding failed"); + writes.insert(key, bytes); + // client update height + let key = client_update_height_key(&client_id); + let height = tx_host_env::with(|env| { + let height = env.wl_storage.storage.get_block_height().0; + Height::new(0, height.0).expect("invalid height") + }); + let bytes = height.encode_vec().expect("encoding failed"); writes.insert(key, bytes); // client counter let key = client_counter_key(); let bytes = 1_u64.to_be_bytes().to_vec(); writes.insert(key, bytes); - (client_id, client_state, writes) + (client_id, client_state.into(), writes) +} + +fn dummy_client() -> (MockClientState, MockConsensusState) { + let height = Height::new(0, 1).expect("invalid height"); + let header = MockHeader { + height, + timestamp: Timestamp::now(), + }; + let client_state = MockClientState::new(header); + let consensus_state = MockConsensusState::new(header); + + (client_state, consensus_state) } pub fn prepare_opened_connection( @@ -242,11 +346,16 @@ pub fn prepare_opened_connection( ) -> (ConnectionId, HashMap>) { let mut writes = HashMap::new(); - let conn_id = connection_id(0); + let conn_id = ConnectionId::new(0); let key = connection_key(&conn_id); let msg = msg_connection_open_init(client_id.clone()); - let mut conn = init_connection(&msg); - open_connection(&mut conn); + let conn = ConnectionEnd::new( + ConnState::Open, + msg.client_id_on_a.clone(), + msg.counterparty.clone(), + vec![msg.version.clone().unwrap_or_default()], + msg.delay_period, + ); let bytes = conn.encode_vec().expect("encoding failed"); writes.insert(key, bytes); // connection counter @@ -264,22 +373,23 @@ pub fn prepare_opened_channel( let mut writes = HashMap::new(); // port - let port_id = port_id("test_port").expect("invalid port ID"); + let port_id = PortId::transfer(); let key = port_key(&port_id); writes.insert(key, 0_u64.to_be_bytes().to_vec()); - // capability - let key = capability_key(0); - let bytes = port_id.as_bytes().to_vec(); - writes.insert(key, bytes); // channel - let channel_id = channel_id(0); - let port_channel_id = port_channel_id(port_id.clone(), channel_id); + let channel_id = ChannelId::new(0); + let port_channel_id = + PortChannelId::new(channel_id.clone(), port_id.clone()); let key = channel_key(&port_channel_id); - let msg = msg_channel_open_init(port_id.clone(), conn_id.clone()); - let mut channel = msg.channel; - open_channel(&mut channel); - if !is_ordered { - channel.ordering = Order::Unordered; + let mut channel = ChannelEnd::new( + ChanState::Open, + Order::Unordered, + dummy_channel_counterparty(), + vec![conn_id.clone()], + ChanVersion::new(VERSION.to_string()), + ); + if is_ordered { + channel.ordering = Order::Ordered; } let bytes = channel.encode_vec().expect("encoding failed"); writes.insert(key, bytes); @@ -287,99 +397,106 @@ pub fn prepare_opened_channel( (port_id, channel_id, writes) } -pub fn msg_create_client() -> MsgCreateAnyClient { - let height = Height::new(0, 1); - let header = MockHeader { - height, - timestamp: Timestamp::now(), - }; - let client_state = MockClientState::new(header).wrap_any(); - let consensus_state = MockConsensusState::new(header).wrap_any(); - MsgCreateAnyClient { - client_state, - consensus_state, - signer: Signer::new("test"), +pub fn msg_create_client() -> MsgCreateClient { + let (client_state, consensus_state) = dummy_client(); + MsgCreateClient { + client_state: client_state.into(), + consensus_state: consensus_state.into(), + signer: Signer::from_str("test").expect("invalid signer"), } } -pub fn msg_update_client(client_id: ClientId) -> MsgUpdateAnyClient { - let height = Height::new(0, 2); +pub fn msg_update_client(client_id: ClientId) -> MsgUpdateClient { + let height = Height::new(0, 2).expect("invalid height"); let header = MockHeader { height, timestamp: Timestamp::now(), } - .wrap_any(); - MsgUpdateAnyClient { + .into(); + MsgUpdateClient { client_id, header, - signer: Signer::new("test"), + signer: Signer::from_str("test").expect("invalid signer"), } } -pub fn msg_upgrade_client(client_id: ClientId) -> MsgUpgradeAnyClient { - let height = Height::new(0, 1); +pub fn msg_upgrade_client(client_id: ClientId) -> MsgUpgradeClient { + let height = Height::new(0, 1).expect("invalid height"); let header = MockHeader { height, timestamp: Timestamp::now(), }; - let client_state = MockClientState::new(header).wrap_any(); - let consensus_state = MockConsensusState::new(header).wrap_any(); + let client_state = MockClientState::new(header).into(); + let consensus_state = MockConsensusState::new(header).into(); let proof_upgrade_client = MerkleProof { proofs: vec![CommitmentProof { proof: None }], }; let proof_upgrade_consensus_state = MerkleProof { proofs: vec![CommitmentProof { proof: None }], }; - MsgUpgradeAnyClient { + MsgUpgradeClient { client_id, client_state, consensus_state, proof_upgrade_client, proof_upgrade_consensus_state, - signer: Signer::new("test"), + signer: Signer::from_str("test").expect("invalid signer"), } } pub fn msg_connection_open_init(client_id: ClientId) -> MsgConnectionOpenInit { MsgConnectionOpenInit { - client_id, + client_id_on_a: client_id, counterparty: dummy_connection_counterparty(), version: None, - delay_period: Duration::new(100, 0), - signer: Signer::new("test"), + delay_period: Duration::new(0, 0), + signer: Signer::from_str("test").expect("invalid signer"), } } pub fn msg_connection_open_try( client_id: ClientId, - client_state: AnyClientState, + client_state: Any, ) -> MsgConnectionOpenTry { - MsgConnectionOpenTry { - previous_connection_id: None, - client_id, + let consensus_height = Height::new(0, 1).expect("invalid height"); + // Convert a message from RawMsgConnectionOpenTry + // because MsgConnectionOpenTry cannot be created directly + #[allow(deprecated)] + RawMsgConnectionOpenTry { + client_id: client_id.as_str().to_string(), client_state: Some(client_state), - counterparty: dummy_connection_counterparty(), - counterparty_versions: vec![ConnVersion::default()], - proofs: dummy_proofs(), - delay_period: Duration::new(100, 0), - signer: Signer::new("test"), + counterparty: Some(dummy_connection_counterparty().into()), + delay_period: 0, + counterparty_versions: vec![ConnVersion::default().into()], + proof_init: dummy_proof().into(), + proof_height: Some(dummy_proof_height().into()), + proof_consensus: dummy_proof().into(), + consensus_height: Some(consensus_height.into()), + proof_client: dummy_proof().into(), + signer: "test".to_string(), + previous_connection_id: ConnectionId::default().to_string(), } + .try_into() + .expect("invalid message") } pub fn msg_connection_open_ack( connection_id: ConnectionId, - client_state: AnyClientState, + client_state: Any, ) -> MsgConnectionOpenAck { - let counterparty_connection_id = - ConnectionId::from_str("counterpart_test_connection") - .expect("Creating a connection ID failed"); + let consensus_height = Height::new(0, 1).expect("invalid height"); + let counterparty = dummy_connection_counterparty(); MsgConnectionOpenAck { - connection_id, - counterparty_connection_id, - client_state: Some(client_state), - proofs: dummy_proofs(), + conn_id_on_a: connection_id, + conn_id_on_b: counterparty.connection_id().cloned().unwrap(), + client_state_of_a_on_b: client_state, + proof_conn_end_on_b: dummy_proof(), + proof_client_state_of_a_on_b: dummy_proof(), + proof_consensus_state_of_a_on_b: dummy_proof(), + proofs_height_on_b: dummy_proof_height(), + consensus_height_of_a_on_b: consensus_height, version: ConnVersion::default(), - signer: Signer::new("test"), + signer: Signer::from_str("test").expect("invalid signer"), } } @@ -387,33 +504,29 @@ pub fn msg_connection_open_confirm( connection_id: ConnectionId, ) -> MsgConnectionOpenConfirm { MsgConnectionOpenConfirm { - connection_id, - proofs: dummy_proofs(), - signer: Signer::new("test"), + conn_id_on_b: connection_id, + proof_conn_end_on_a: dummy_proof(), + proof_height_on_a: dummy_proof_height(), + signer: Signer::from_str("test").expect("invalid signer"), } } -fn dummy_proofs() -> Proofs { - let height = Height::new(0, 1); - let consensus_proof = - ConsensusProof::new(vec![0].try_into().unwrap(), height).unwrap(); - Proofs::new( - vec![0].try_into().unwrap(), - Some(vec![0].try_into().unwrap()), - Some(consensus_proof), - None, - height, - ) - .unwrap() +fn dummy_proof() -> CommitmentProofBytes { + vec![0].try_into().unwrap() +} + +fn dummy_proof_height() -> Height { + Height::new(0, 1).unwrap() } fn dummy_connection_counterparty() -> ConnCounterparty { - let counterpart_client_id = ClientId::from_str("counterpart_test_client") - .expect("Creating a client ID failed"); - let counterpart_conn_id = - ConnectionId::from_str("counterpart_test_connection") - .expect("Creating a connection ID failed"); - connection_counterparty(counterpart_client_id, counterpart_conn_id) + let client_type = ClientType::new(MOCK_CLIENT_TYPE.to_string()); + let client_id = ClientId::new(client_type, 42).expect("invalid client ID"); + let conn_id = ConnectionId::new(12); + let commitment_prefix = + CommitmentPrefix::try_from(COMMITMENT_PREFIX.to_vec()) + .expect("the prefix should be parsable"); + ConnCounterparty::new(client_id, Some(conn_id), commitment_prefix) } pub fn msg_channel_open_init( @@ -421,9 +534,12 @@ pub fn msg_channel_open_init( conn_id: ConnectionId, ) -> MsgChannelOpenInit { MsgChannelOpenInit { - port_id, - channel: dummy_channel(ChanState::Init, Order::Ordered, conn_id), - signer: Signer::new("test"), + port_id_on_a: port_id, + connection_hops_on_a: vec![conn_id], + port_id_on_b: PortId::transfer(), + ordering: Order::Unordered, + signer: Signer::from_str("test").expect("invalid signer"), + version_proposal: ChanVersion::new(VERSION.to_string()), } } @@ -431,13 +547,20 @@ pub fn msg_channel_open_try( port_id: PortId, conn_id: ConnectionId, ) -> MsgChannelOpenTry { + let counterparty = dummy_channel_counterparty(); + #[allow(deprecated)] MsgChannelOpenTry { - port_id, - previous_channel_id: None, - channel: dummy_channel(ChanState::TryOpen, Order::Ordered, conn_id), - counterparty_version: ChanVersion::ics20(), - proofs: dummy_proofs(), - signer: Signer::new("test"), + port_id_on_b: port_id, + connection_hops_on_b: vec![conn_id], + port_id_on_a: counterparty.port_id().clone(), + chan_id_on_a: counterparty.channel_id().cloned().unwrap(), + version_supported_on_a: ChanVersion::new(VERSION.to_string()), + proof_chan_end_on_a: dummy_proof(), + proof_height_on_a: dummy_proof_height(), + ordering: Order::Unordered, + signer: Signer::from_str("test").expect("invalid signer"), + previous_channel_id: ChannelId::default().to_string(), + version_proposal: ChanVersion::default(), } } @@ -445,15 +568,15 @@ pub fn msg_channel_open_ack( port_id: PortId, channel_id: ChannelId, ) -> MsgChannelOpenAck { + let counterparty = dummy_channel_counterparty(); MsgChannelOpenAck { - port_id, - channel_id, - counterparty_channel_id: *dummy_channel_counterparty() - .channel_id() - .unwrap(), - counterparty_version: ChanVersion::ics20(), - proofs: dummy_proofs(), - signer: Signer::new("test"), + port_id_on_a: port_id, + chan_id_on_a: channel_id, + chan_id_on_b: counterparty.channel_id().cloned().unwrap(), + version_on_b: ChanVersion::new(VERSION.to_string()), + proof_chan_end_on_b: dummy_proof(), + proof_height_on_b: dummy_proof_height(), + signer: Signer::from_str("test").expect("invalid signer"), } } @@ -462,10 +585,11 @@ pub fn msg_channel_open_confirm( channel_id: ChannelId, ) -> MsgChannelOpenConfirm { MsgChannelOpenConfirm { - port_id, - channel_id, - proofs: dummy_proofs(), - signer: Signer::new("test"), + port_id_on_b: port_id, + chan_id_on_b: channel_id, + proof_chan_end_on_a: dummy_proof(), + proof_height_on_a: dummy_proof_height(), + signer: Signer::from_str("test").expect("invalid signer"), } } @@ -474,9 +598,9 @@ pub fn msg_channel_close_init( channel_id: ChannelId, ) -> MsgChannelCloseInit { MsgChannelCloseInit { - port_id, - channel_id, - signer: Signer::new("test"), + port_id_on_a: port_id, + chan_id_on_a: channel_id, + signer: Signer::from_str("test").expect("invalid signer"), } } @@ -485,33 +609,18 @@ pub fn msg_channel_close_confirm( channel_id: ChannelId, ) -> MsgChannelCloseConfirm { MsgChannelCloseConfirm { - port_id, - channel_id, - proofs: dummy_proofs(), - signer: Signer::new("test"), + port_id_on_b: port_id, + chan_id_on_b: channel_id, + proof_chan_end_on_a: dummy_proof(), + proof_height_on_a: dummy_proof_height(), + signer: Signer::from_str("test").expect("invalid signer"), } } -fn dummy_channel( - state: ChanState, - order: Order, - connection_id: ConnectionId, -) -> ChannelEnd { - ChannelEnd::new( - state, - order, - dummy_channel_counterparty(), - vec![connection_id], - ChanVersion::ics20(), - ) -} - pub fn dummy_channel_counterparty() -> ChanCounterparty { - let counterpart_port_id = PortId::from_str("counterpart_test_port") - .expect("Creating a port ID failed"); - let counterpart_channel_id = ChannelId::from_str("channel-42") - .expect("Creating a channel ID failed"); - channel_counterparty(counterpart_port_id, counterpart_channel_id) + let port_id = PortId::transfer(); + let channel_id = ChannelId::new(42); + ChanCounterparty::new(port_id, Some(channel_id)) } pub fn unorder_channel(channel: &mut ChannelEnd) { @@ -524,43 +633,46 @@ pub fn msg_transfer( token: String, sender: &Address, ) -> MsgTransfer { - let timeout_timestamp = - (Timestamp::now() + Duration::from_secs(100)).unwrap(); + let timestamp = (Timestamp::now() + Duration::from_secs(100)).unwrap(); MsgTransfer { - source_port: port_id, - source_channel: channel_id, - token: Some(Coin { + port_id_on_a: port_id, + chan_id_on_a: channel_id, + token: Coin { denom: token, amount: 100u64.to_string(), - }), - sender: Signer::new(sender.to_string()), - receiver: Signer::new( - address::testing::gen_established_address().to_string(), - ), - timeout_height: Height::new(1, 100), - timeout_timestamp, + }, + sender: Signer::from_str(&sender.to_string()).expect("invalid signer"), + receiver: Signer::from_str( + &address::testing::gen_established_address().to_string(), + ) + .expect("invalid signer"), + timeout_height_on_b: TimeoutHeight::Never, + timeout_timestamp_on_b: timestamp, } } pub fn set_timeout_timestamp(msg: &mut MsgTransfer) { - msg.timeout_timestamp = - (msg.timeout_timestamp - Duration::from_secs(101)).unwrap(); + msg.timeout_timestamp_on_b = + (msg.timeout_timestamp_on_b - Duration::from_secs(101)).unwrap(); } pub fn msg_packet_recv(packet: Packet) -> MsgRecvPacket { MsgRecvPacket { packet, - proofs: dummy_proofs(), - signer: Signer::new("test"), + proof_commitment_on_a: dummy_proof(), + proof_height_on_a: dummy_proof_height(), + signer: Signer::from_str("test").expect("invalid signer"), } } pub fn msg_packet_ack(packet: Packet) -> MsgAcknowledgement { + let packet_ack = TokenTransferAcknowledgement::success(); MsgAcknowledgement { packet, - acknowledgement: PacketAck::result_success().encode_to_vec().into(), - proofs: dummy_proofs(), - signer: Signer::new("test"), + acknowledgement: packet_ack.into(), + proof_acked_on_b: dummy_proof(), + proof_height_on_b: dummy_proof_height(), + signer: Signer::from_str("test").expect("invalid signer"), } } @@ -572,32 +684,37 @@ pub fn received_packet( receiver: &Address, ) -> Packet { let counterparty = dummy_channel_counterparty(); - let timeout_timestamp = - (Timestamp::now() + Duration::from_secs(100)).unwrap(); - let data = FungibleTokenPacketData { - denom: token, - amount: 100u64.to_string(), - sender: address::testing::gen_established_address().to_string(), - receiver: receiver.to_string(), + let timestamp = (Timestamp::now() + Duration::from_secs(100)).unwrap(); + let coin = PrefixedCoin { + denom: token.parse().expect("invalid denom"), + amount: 100.into(), + }; + let sender = address::testing::gen_established_address().to_string(); + let data = PacketData { + token: coin, + sender: Signer::from_str(&sender).expect("invalid signer"), + receiver: Signer::from_str(&receiver.to_string()) + .expect("invalid signer"), }; Packet { - sequence, - source_port: counterparty.port_id().clone(), - source_channel: *counterparty.channel_id().unwrap(), - destination_port: port_id, - destination_channel: channel_id, + seq_on_a: sequence, + port_id_on_a: counterparty.port_id().clone(), + chan_id_on_a: counterparty.channel_id().unwrap().clone(), + port_id_on_b: port_id, + chan_id_on_b: channel_id, data: serde_json::to_vec(&data).unwrap(), - timeout_height: Height::new(1, 10), - timeout_timestamp, + timeout_height_on_b: TimeoutHeight::Never, + timeout_timestamp_on_b: timestamp, } } pub fn msg_timeout(packet: Packet, next_sequence_recv: Sequence) -> MsgTimeout { MsgTimeout { packet, - next_sequence_recv, - proofs: dummy_proofs(), - signer: Signer::new("test"), + next_seq_recv_on_b: next_sequence_recv, + proof_unreceived_on_b: dummy_proof(), + proof_height_on_b: dummy_proof_height(), + signer: Signer::from_str("test").expect("invalid signer"), } } @@ -605,22 +722,46 @@ pub fn msg_timeout_on_close( packet: Packet, next_sequence_recv: Sequence, ) -> MsgTimeoutOnClose { - // add the channel proof - let height = Height::new(0, 1); - let consensus_proof = - ConsensusProof::new(vec![0].try_into().unwrap(), height).unwrap(); - let proofs = Proofs::new( - vec![0].try_into().unwrap(), - Some(vec![0].try_into().unwrap()), - Some(consensus_proof), - Some(vec![0].try_into().unwrap()), - height, - ) - .unwrap(); MsgTimeoutOnClose { packet, - next_sequence_recv, - proofs, - signer: Signer::new("test"), + next_seq_recv_on_b: next_sequence_recv, + proof_unreceived_on_b: dummy_proof(), + proof_close_on_b: dummy_proof(), + proof_height_on_b: dummy_proof_height(), + signer: Signer::from_str("test").expect("invalid signer"), } } + +pub fn packet_from_message( + msg: &MsgTransfer, + sequence: Sequence, + counterparty: &ChanCounterparty, +) -> Packet { + let coin = PrefixedCoin::try_from(msg.token.clone()).expect("invalid coin"); + let packet_data = PacketData { + token: coin, + sender: msg.sender.clone(), + receiver: msg.receiver.clone(), + }; + let data = + serde_json::to_vec(&packet_data).expect("Encoding PacketData failed"); + + Packet { + seq_on_a: sequence, + port_id_on_a: msg.port_id_on_a.clone(), + chan_id_on_a: msg.chan_id_on_a.clone(), + port_id_on_b: counterparty.port_id.clone(), + chan_id_on_b: counterparty + .channel_id() + .cloned() + .expect("the counterparty channel should exist"), + data, + timeout_height_on_b: msg.timeout_height_on_b, + timeout_timestamp_on_b: msg.timeout_timestamp_on_b, + } +} + +pub fn balance_key_with_ibc_prefix(denom: String, owner: &Address) -> Key { + let prefix = ibc_token_prefix(denom).expect("invalid denom"); + token::multitoken_balance_key(&prefix, owner) +} diff --git a/tests/src/vm_host_env/mod.rs b/tests/src/vm_host_env/mod.rs index 04a545e8b10..1ad4b225997 100644 --- a/tests/src/vm_host_env/mod.rs +++ b/tests/src/vm_host_env/mod.rs @@ -21,7 +21,6 @@ mod tests { use std::panic; use itertools::Itertools; - use namada::core::ledger::ibc::actions::IbcActions; use namada::ibc::tx_msg::Msg; use namada::ledger::ibc::storage as ibc_storage; use namada::ledger::ibc::vp::{ @@ -29,12 +28,16 @@ mod tests { }; use namada::ledger::tx_env::TxEnv; use namada::proto::{SignedTxData, Tx}; - use namada::tendermint_proto::Protobuf; + use namada::types::chain::ChainId; + use namada::types::hash::Hash; use namada::types::key::*; use namada::types::storage::{self, BlockHash, BlockHeight, Key, KeySeg}; use namada::types::time::DateTimeUtc; use namada::types::token::{self, Amount}; use namada::types::{address, key}; + use namada_core::ledger::ibc::context::transfer_mod::testing::DummyTransferModule; + use namada_core::ledger::ibc::Error as IbcActionError; + use namada_test_utils::TestWasms; use namada_tx_prelude::{ BorshDeserialize, BorshSerialize, StorageRead, StorageWrite, }; @@ -46,10 +49,6 @@ mod tests { use crate::tx::{tx_host_env, TestTxEnv}; use crate::vp::{vp_host_env, TestVpEnv}; - // paths to the WASMs used for tests - const VP_ALWAYS_TRUE_WASM: &str = "../wasm_for_tests/vp_always_true.wasm"; - const VP_ALWAYS_FALSE_WASM: &str = "../wasm_for_tests/vp_always_false.wasm"; - #[test] fn test_tx_read_write() { // The environment must be initialized first @@ -220,9 +219,14 @@ mod tests { // The environment must be initialized first tx_host_env::init(); - let code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); - tx::ctx().init_account(code).unwrap(); + let code = TestWasms::VpAlwaysTrue.read_bytes(); + let code_hash = Hash::sha256(&code); + tx_host_env::with(|env| { + // store wasm code + let key = Key::wasm_code(&code_hash); + env.wl_storage.storage.write(&key, code.clone()).unwrap(); + }); + tx::ctx().init_account(code_hash).unwrap(); } #[test] @@ -445,6 +449,7 @@ mod tests { // Use some arbitrary bytes for tx code let code = vec![4, 3, 2, 1, 0]; + let expiration = Some(DateTimeUtc::now()); for data in &[ // Tx with some arbitrary data Some(vec![1, 2, 3, 4].repeat(10)), @@ -452,7 +457,13 @@ mod tests { None, ] { let signed_tx_data = vp_host_env::with(|env| { - env.tx = Tx::new(code.clone(), data.clone()).sign(&keypair); + env.tx = Tx::new( + code.clone(), + data.clone(), + env.wl_storage.storage.chain_id.clone(), + expiration, + ) + .sign(&keypair); let tx_data = env.tx.data.as_ref().expect("data should exist"); SignedTxData::try_from_slice(&tx_data[..]) @@ -522,24 +533,34 @@ mod tests { vp_host_env::init(); // evaluating without any code should fail - let empty_code = vec![]; + let empty_code = Hash::zero(); let input_data = vec![]; let result = vp::CTX.eval(empty_code, input_data).unwrap(); assert!(!result); // evaluating the VP template which always returns `true` should pass - let code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); + let code = TestWasms::VpAlwaysTrue.read_bytes(); + let code_hash = Hash::sha256(&code); + vp_host_env::with(|env| { + // store wasm codes + let key = Key::wasm_code(&code_hash); + env.wl_storage.storage.write(&key, code.clone()).unwrap(); + }); let input_data = vec![]; - let result = vp::CTX.eval(code, input_data).unwrap(); + let result = vp::CTX.eval(code_hash, input_data).unwrap(); assert!(result); // evaluating the VP template which always returns `false` shouldn't // pass - let code = - std::fs::read(VP_ALWAYS_FALSE_WASM).expect("cannot load wasm"); + let code = TestWasms::VpAlwaysFalse.read_bytes(); + let code_hash = Hash::sha256(&code); + vp_host_env::with(|env| { + // store wasm codes + let key = Key::wasm_code(&code_hash); + env.wl_storage.storage.write(&key, code.clone()).unwrap(); + }); let input_data = vec![]; - let result = vp::CTX.eval(code, input_data).unwrap(); + let result = vp::CTX.eval(code_hash, input_data).unwrap(); assert!(!result); } @@ -550,60 +571,22 @@ mod tests { ibc::init_storage(); - // Start an invalid transaction - let msg = ibc::msg_create_client(); - let mut tx_data = vec![]; - msg.clone() - .to_any() - .encode(&mut tx_data) - .expect("encoding failed"); - let tx = Tx { - code: vec![], - data: Some(tx_data.clone()), - timestamp: DateTimeUtc::now(), - } - .sign(&key::testing::keypair_1()); - // get and increment the connection counter - let counter_key = ibc::client_counter_key(); - let counter = tx::ctx() - .get_and_inc_counter(&counter_key) - .expect("getting the counter failed"); - let client_id = ibc::client_id(msg.client_state.client_type(), counter) - .expect("invalid client ID"); - // only insert a client type - let client_type_key = ibc::client_type_key(&client_id); - tx::ctx() - .write( - &client_type_key, - msg.client_state.client_type().as_str().as_bytes(), - ) - .unwrap(); - - // Check should fail due to no client state - let mut env = tx_host_env::take(); - let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(matches!( - result.expect_err("validation succeeded unexpectedly"), - IbcError::ClientError(_), - )); - // drop the transaction - env.wl_storage.drop_tx(); - // Start a transaction to create a new client - tx_host_env::set(env); let msg = ibc::msg_create_client(); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // create a client with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) .expect("creating a client failed"); // Check @@ -623,102 +606,24 @@ mod tests { .set_header(tm_dummy_header()) .unwrap(); - // Start an invalid transaction - tx_host_env::set(env); - let msg = ibc::msg_update_client(client_id); - let mut tx_data = vec![]; - msg.clone() - .to_any() - .encode(&mut tx_data) - .expect("encoding failed"); - let tx = Tx { - code: vec![], - data: Some(tx_data.clone()), - timestamp: DateTimeUtc::now(), - } - .sign(&key::testing::keypair_1()); - // get and update the client without a header - let client_id = msg.client_id.clone(); - // update the client with the same state - let old_data = ibc::msg_create_client(); - let same_client_state = old_data.client_state.clone(); - let height = same_client_state.latest_height(); - let same_consensus_state = old_data.consensus_state; - let client_state_key = ibc::client_state_key(&client_id); - tx::ctx() - .write_bytes( - &client_state_key, - same_client_state.encode_vec().unwrap(), - ) - .unwrap(); - let consensus_state_key = ibc::consensus_state_key(&client_id, height); - tx::ctx() - .write( - &consensus_state_key, - same_consensus_state.encode_vec().unwrap(), - ) - .unwrap(); - let event = ibc::make_update_client_event(&client_id, &msg); - TxEnv::emit_ibc_event(tx::ctx(), &event.try_into().unwrap()).unwrap(); - - // Check should fail due to the invalid updating - let mut env = tx_host_env::take(); - let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(matches!( - result.expect_err("validation succeeded unexpectedly"), - IbcError::ClientError(_), - )); - // drop the transaction - env.wl_storage.drop_tx(); - // Start a transaction to update the client tx_host_env::set(env); - let msg = ibc::msg_update_client(client_id.clone()); + let client_id = ibc::client_id(); + let msg = ibc::msg_update_client(client_id); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // update the client with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) - .expect("updating the client failed"); - - // Check - let mut env = tx_host_env::take(); - let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); - - // Commit - env.commit_tx_and_block(); - // update the block height for the following client update - env.wl_storage - .storage - .begin_block(BlockHash::default(), BlockHeight(3)) - .unwrap(); - env.wl_storage - .storage - .set_header(tm_dummy_header()) - .unwrap(); - - // Start a transaction to upgrade the client - tx_host_env::set(env); - let msg = ibc::msg_upgrade_client(client_id); - let mut tx_data = vec![]; - msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx { - code: vec![], - data: Some(tx_data.clone()), - timestamp: DateTimeUtc::now(), - } - .sign(&key::testing::keypair_1()); - // upgrade the client with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) - .expect("upgrading the client failed"); + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) + .expect("updating a client failed"); // Check let env = tx_host_env::take(); @@ -743,59 +648,21 @@ mod tests { }); }); - // Start an invalid transaction - let msg = ibc::msg_connection_open_init(client_id.clone()); - let mut tx_data = vec![]; - msg.clone() - .to_any() - .encode(&mut tx_data) - .expect("encoding failed"); - let tx = Tx { - code: vec![], - data: Some(tx_data.clone()), - timestamp: DateTimeUtc::now(), - } - .sign(&key::testing::keypair_1()); - // get and increment the connection counter - let counter_key = ibc::connection_counter_key(); - let counter = tx::ctx() - .get_and_inc_counter(&counter_key) - .expect("getting the counter failed"); - // insert a new opened connection - let conn_id = ibc::connection_id(counter); - let conn_key = ibc::connection_key(&conn_id); - let mut connection = ibc::init_connection(&msg); - ibc::open_connection(&mut connection); - tx::ctx() - .write_bytes(&conn_key, connection.encode_vec().unwrap()) - .unwrap(); - let event = ibc::make_open_init_connection_event(&conn_id, &msg); - TxEnv::emit_ibc_event(tx::ctx(), &event.try_into().unwrap()).unwrap(); - - // Check should fail due to directly opening a connection - let mut env = tx_host_env::take(); - let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(matches!( - result.expect_err("validation succeeded unexpectedly"), - IbcError::ConnectionError(_), - )); - // drop the transaction - env.wl_storage.drop_tx(); - // Start a transaction for ConnectionOpenInit - tx_host_env::set(env); let msg = ibc::msg_connection_open_init(client_id); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // init a connection with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) .expect("creating a connection failed"); // Check @@ -805,26 +672,33 @@ mod tests { // Commit env.commit_tx_and_block(); - // set a block header again + // for the next block + env.wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); env.wl_storage .storage .set_header(tm_dummy_header()) .unwrap(); + tx_host_env::set(env); // Start the next transaction for ConnectionOpenAck - tx_host_env::set(env); + let conn_id = ibc::ConnectionId::new(0); let msg = ibc::msg_connection_open_ack(conn_id, client_state); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // open the connection with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) .expect("opening the connection failed"); // Check @@ -841,29 +715,31 @@ mod tests { // Set the initial state before starting transactions ibc::init_storage(); - let mut env = tx_host_env::take(); let (client_id, client_state, writes) = ibc::prepare_client(); writes.into_iter().for_each(|(key, val)| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + tx_host_env::with(|env| { + env.wl_storage + .storage + .write(&key, &val) + .expect("write error"); + }) }); // Start a transaction for ConnectionOpenTry - tx_host_env::set(env); let msg = ibc::msg_connection_open_try(client_id, client_state); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // open try a connection with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) .expect("creating a connection failed"); // Check @@ -873,27 +749,33 @@ mod tests { // Commit env.commit_tx_and_block(); - // set a block header again + // for the next block + env.wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); env.wl_storage .storage .set_header(tm_dummy_header()) .unwrap(); + tx_host_env::set(env); // Start the next transaction for ConnectionOpenConfirm - tx_host_env::set(env); - let conn_id = ibc::connection_id(0); + let conn_id = ibc::ConnectionId::new(0); let msg = ibc::msg_connection_open_confirm(conn_id); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // open the connection with the mssage - tx::ctx() - .dispatch_ibc_action(&tx_data) + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) .expect("opening the connection failed"); // Check @@ -921,108 +803,22 @@ mod tests { }); }); - // Start an invalid transaction - let port_id = ibc::port_id("test_port").expect("invalid port ID"); - let msg = ibc::msg_channel_open_init(port_id.clone(), conn_id.clone()); - let mut tx_data = vec![]; - msg.clone() - .to_any() - .encode(&mut tx_data) - .expect("encoding failed"); - let tx = Tx { - code: vec![], - data: Some(tx_data.clone()), - timestamp: DateTimeUtc::now(), - } - .sign(&key::testing::keypair_1()); - // not bind a port - // get and increment the channel counter - let counter_key = ibc::channel_counter_key(); - let counter = tx::ctx() - .get_and_inc_counter(&counter_key) - .expect("getting the counter failed"); - // channel - let channel_id = ibc::channel_id(counter); - let port_channel_id = ibc::port_channel_id(port_id, channel_id); - let channel_key = ibc::channel_key(&port_channel_id); - tx::ctx() - .write_bytes(&channel_key, msg.channel.encode_vec().unwrap()) - .unwrap(); - let event = ibc::make_open_init_channel_event(&channel_id, &msg); - TxEnv::emit_ibc_event(tx::ctx(), &event.try_into().unwrap()).unwrap(); - - // Check should fail due to no port binding - let mut env = tx_host_env::take(); - let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(matches!( - result.expect_err("validation succeeded unexpectedly"), - IbcError::ChannelError(_), - )); - // drop the transaction - env.wl_storage.drop_tx(); - - // Start an invalid transaction - tx_host_env::set(env); - let port_id = ibc::port_id("test_port").expect("invalid port ID"); - let msg = ibc::msg_channel_open_init(port_id.clone(), conn_id.clone()); - let mut tx_data = vec![]; - msg.clone() - .to_any() - .encode(&mut tx_data) - .expect("encoding failed"); - let tx = Tx { - code: vec![], - data: Some(tx_data.clone()), - timestamp: DateTimeUtc::now(), - } - .sign(&key::testing::keypair_1()); - // bind a port - tx::ctx() - .bind_port(&port_id) - .expect("binding the port failed"); - // get and increment the channel counter - let counter_key = ibc::channel_counter_key(); - let counter = tx::ctx() - .get_and_inc_counter(&counter_key) - .expect("getting the counter failed"); - // insert a opened channel - let channel_id = ibc::channel_id(counter); - let port_channel_id = ibc::port_channel_id(port_id, channel_id); - let channel_key = ibc::channel_key(&port_channel_id); - let mut channel = msg.channel.clone(); - ibc::open_channel(&mut channel); - tx::ctx() - .write_bytes(&channel_key, channel.encode_vec().unwrap()) - .unwrap(); - let event = ibc::make_open_init_channel_event(&channel_id, &msg); - TxEnv::emit_ibc_event(tx::ctx(), &event.try_into().unwrap()).unwrap(); - - // Check should fail due to directly opening a channel - - let mut env = tx_host_env::take(); - let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(matches!( - result.expect_err("validation succeeded unexpectedly"), - IbcError::ChannelError(_), - )); - // drop the transaction - env.wl_storage.drop_tx(); - // Start a transaction for ChannelOpenInit - tx_host_env::set(env); - let port_id = ibc::port_id("test_port").expect("invalid port ID"); + let port_id = ibc::PortId::transfer(); let msg = ibc::msg_channel_open_init(port_id.clone(), conn_id); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // init a channel with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) .expect("creating a channel failed"); // Check @@ -1032,21 +828,33 @@ mod tests { // Commit env.commit_tx_and_block(); + // for the next block + env.wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); + env.wl_storage + .storage + .set_header(tm_dummy_header()) + .unwrap(); tx_host_env::set(env); // Start the next transaction for ChannelOpenAck + let channel_id = ibc::ChannelId::new(0); let msg = ibc::msg_channel_open_ack(port_id, channel_id); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // open the channle with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) .expect("opening the channel failed"); // Check @@ -1075,19 +883,21 @@ mod tests { }); // Start a transaction for ChannelOpenTry - let port_id = ibc::port_id("test_port").expect("invalid port ID"); + let port_id = ibc::PortId::transfer(); let msg = ibc::msg_channel_open_try(port_id.clone(), conn_id); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // try open a channel with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) .expect("creating a channel failed"); // Check @@ -1097,22 +907,33 @@ mod tests { // Commit env.commit_tx_and_block(); + // for the next block + env.wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); + env.wl_storage + .storage + .set_header(tm_dummy_header()) + .unwrap(); + tx_host_env::set(env); // Start the next transaction for ChannelOpenConfirm - tx_host_env::set(env); - let channel_id = ibc::channel_id(0); + let channel_id = ibc::ChannelId::new(0); let msg = ibc::msg_channel_open_confirm(port_id, channel_id); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // open a channel with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) .expect("opening the channel failed"); // Check @@ -1122,7 +943,7 @@ mod tests { } #[test] - fn test_ibc_channel_close_init() { + fn test_ibc_channel_close_init_fail() { // The environment must be initialized first tx_host_env::init(); @@ -1148,20 +969,30 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // close the channel with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) + let mut actions = tx_host_env::ibc::ibc_actions(tx::ctx()); + // the dummy module closes the channel + let dummy_module = DummyTransferModule {}; + actions.add_transfer_route(dummy_module.module_id(), dummy_module); + actions + .execute(&tx_data) .expect("closing the channel failed"); // Check let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + // VP should fail because the transfer channel cannot be closed + assert!(matches!( + result.expect_err("validation succeeded unexpectedly"), + IbcError::IbcAction(IbcActionError::Execution(_)), + )); } #[test] @@ -1191,15 +1022,17 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // close the channel with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) .expect("closing the channel failed"); // Check @@ -1239,28 +1072,25 @@ mod tests { .encode(&mut tx_data) .expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // send the token and a packet with the data - tx::ctx() - .dispatch_ibc_action(&tx_data) - .expect("sending a packet failed"); + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) + .expect("sending a token failed"); // Check let mut env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); assert!(result.expect("validation failed unexpectedly")); // Check if the token was escrowed - let key_prefix = ibc_storage::ibc_account_prefix( - &msg.source_port, - &msg.source_channel, + let escrow = token::balance_key( &token, - ); - let escrow = token::multitoken_balance_key( - &key_prefix, &address::Address::Internal(address::InternalAddress::IbcEscrow), ); let token_vp_result = @@ -1269,30 +1099,59 @@ mod tests { // Commit env.commit_tx_and_block(); + // for the next block + env.wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); + env.wl_storage + .storage + .set_header(tm_dummy_header()) + .unwrap(); + tx_host_env::set(env); // Start the next transaction for receiving an ack - tx_host_env::set(env); let counterparty = ibc::dummy_channel_counterparty(); - let packet = - ibc::packet_from_message(&msg, ibc::sequence(1), &counterparty); + let packet = ibc::packet_from_message( + &msg, + ibc::Sequence::from(1), + &counterparty, + ); let msg = ibc::msg_packet_ack(packet); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // ack the packet with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) - .expect("the packet ack failed"); + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) + .expect("ack failed"); // Check let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); assert!(result.expect("validation failed unexpectedly")); + // Check the balance + tx_host_env::set(env); + let balance_key = token::balance_key(&token, &sender); + let balance: Option = tx_host_env::with(|env| { + env.wl_storage.read(&balance_key).expect("read error") + }); + assert_eq!(balance, Some(Amount::whole(0))); + let escrow_key = token::balance_key( + &token, + &address::Address::Internal(address::InternalAddress::IbcEscrow), + ); + let escrow: Option = tx_host_env::with(|env| { + env.wl_storage.read(&escrow_key).expect("read error") + }); + assert_eq!(escrow, Some(Amount::whole(100))); } #[test] @@ -1310,10 +1169,14 @@ mod tests { writes.extend(channel_writes); // the origin-specific token let denom = format!("{}/{}/{}", port_id, channel_id, token); - let key_prefix = ibc_storage::ibc_token_prefix(denom).unwrap(); - let key = token::multitoken_balance_key(&key_prefix, &sender); - let init_bal = Amount::from(1_000_000_000u64); - writes.insert(key, init_bal.try_to_vec().unwrap()); + let key_prefix = ibc_storage::ibc_token_prefix(&denom).unwrap(); + let balance_key = token::multitoken_balance_key(&key_prefix, &sender); + let init_bal = Amount::whole(100); + writes.insert(balance_key.clone(), init_bal.try_to_vec().unwrap()); + // original denom + let hash = ibc_storage::calc_hash(&denom); + let denom_key = ibc_storage::ibc_denom_key(&hash); + writes.insert(denom_key, denom.as_bytes().to_vec()); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { env.wl_storage @@ -1325,35 +1188,52 @@ mod tests { // Start a transaction to send a packet // Set this chain is the sink zone - let denom = format!("{}/{}/{}", port_id, channel_id, token); - let msg = - ibc::msg_transfer(port_id.clone(), channel_id, denom, &sender); + let ibc_token = address::Address::Internal( + address::InternalAddress::IbcToken(hash), + ); + let hashed_denom = + format!("{}/{}", ibc_storage::MULTITOKEN_STORAGE_KEY, ibc_token); + let msg = ibc::msg_transfer(port_id, channel_id, hashed_denom, &sender); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // send the token and a packet with the data - tx::ctx() - .dispatch_ibc_action(&tx_data) - .expect("sending a packet failed"); + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) + .expect("sending a token failed"); // Check let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); assert!(result.expect("validation failed unexpectedly")); // Check if the token was burned - let key_prefix = - ibc_storage::ibc_account_prefix(&port_id, &channel_id, &token); - let burn = token::multitoken_balance_key( - &key_prefix, + let burn = token::balance_key( + &token, &address::Address::Internal(address::InternalAddress::IbcBurn), ); let result = ibc::validate_token_vp_from_tx(&env, &tx, &burn); assert!(result.expect("token validation failed unexpectedly")); + // Check the balance + tx_host_env::set(env); + let balance: Option = tx_host_env::with(|env| { + env.wl_storage.read(&balance_key).expect("read error") + }); + assert_eq!(balance, Some(Amount::whole(0))); + let burn_key = token::balance_key( + &token, + &address::Address::Internal(address::InternalAddress::IbcBurn), + ); + let burn: Option = tx_host_env::with(|env| { + env.wl_storage.read(&burn_key).expect("read error") + }); + assert_eq!(burn, Some(Amount::whole(100))); } #[test] @@ -1369,12 +1249,6 @@ mod tests { let (port_id, channel_id, channel_writes) = ibc::prepare_opened_channel(&conn_id, false); writes.extend(channel_writes); - // the origin-specific token - let denom = format!("{}/{}/{}", port_id, channel_id, token); - let key_prefix = ibc_storage::ibc_token_prefix(denom).unwrap(); - let key = token::multitoken_balance_key(&key_prefix, &receiver); - let init_bal = Amount::from(1_000_000_000u64); - writes.insert(key, init_bal.try_to_vec().unwrap()); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { @@ -1388,8 +1262,8 @@ mod tests { // packet let packet = ibc::received_packet( port_id.clone(), - channel_id, - ibc::sequence(1), + channel_id.clone(), + ibc::Sequence::from(1), token.to_string(), &receiver, ); @@ -1399,29 +1273,37 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // receive a packet with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) - .expect("receiving a packet failed"); + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) + .expect("receiving the token failed"); // Check let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); assert!(result.expect("validation failed unexpectedly")); // Check if the token was minted - let key_prefix = - ibc_storage::ibc_account_prefix(&port_id, &channel_id, &token); - let mint = token::multitoken_balance_key( - &key_prefix, + let mint = token::balance_key( + &token, &address::Address::Internal(address::InternalAddress::IbcMint), ); let result = ibc::validate_token_vp_from_tx(&env, &tx, &mint); assert!(result.expect("token validation failed unexpectedly")); + // Check the balance + tx_host_env::set(env); + let denom = format!("{}/{}/{}", port_id, channel_id, token); + let key = ibc::balance_key_with_ibc_prefix(denom, &receiver); + let balance: Option = tx_host_env::with(|env| { + env.wl_storage.read(&key).expect("read error") + }); + assert_eq!(balance, Some(Amount::whole(100))); } #[test] @@ -1446,23 +1328,21 @@ mod tests { }); }); // escrow in advance - let key_prefix = - ibc_storage::ibc_account_prefix(&port_id, &channel_id, &token); - let escrow = token::multitoken_balance_key( - &key_prefix, + let escrow_key = token::balance_key( + &token, &address::Address::Internal(address::InternalAddress::IbcEscrow), ); - let val = Amount::from(1_000_000_000u64).try_to_vec().unwrap(); + let val = Amount::whole(100).try_to_vec().unwrap(); tx_host_env::with(|env| { env.wl_storage .storage - .write(&escrow, &val) + .write(&escrow_key, &val) .expect("write error"); }); // Set this chain as the source zone let counterparty = ibc::dummy_channel_counterparty(); - let token = format!( + let denom = format!( "{}/{}/{}", counterparty.port_id().clone(), counterparty.channel_id().unwrap().clone(), @@ -1472,8 +1352,8 @@ mod tests { let packet = ibc::received_packet( port_id, channel_id, - ibc::sequence(1), - token, + ibc::Sequence::from(1), + denom, &receiver, ); @@ -1482,105 +1362,40 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // receive a packet with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) - .expect("receiving a packet failed"); + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) + .expect("receiving a token failed"); // Check let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); assert!(result.expect("validation failed unexpectedly")); // Check if the token was unescrowed - let result = ibc::validate_token_vp_from_tx(&env, &tx, &escrow); + let result = ibc::validate_token_vp_from_tx(&env, &tx, &escrow_key); assert!(result.expect("token validation failed unexpectedly")); - } - - #[test] - fn test_ibc_send_packet_unordered() { - // The environment must be initialized first - tx_host_env::init(); - - // Set the initial state before starting transactions - let (token, sender) = ibc::init_storage(); - let (client_id, _client_state, mut writes) = ibc::prepare_client(); - let (conn_id, conn_writes) = ibc::prepare_opened_connection(&client_id); - writes.extend(conn_writes); - let (port_id, channel_id, channel_writes) = - ibc::prepare_opened_channel(&conn_id, false); - writes.extend(channel_writes); - writes.into_iter().for_each(|(key, val)| { - tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); - }); - }); - - // Start a transaction to send a packet - let msg = - ibc::msg_transfer(port_id, channel_id, token.to_string(), &sender); - let mut tx_data = vec![]; - msg.clone() - .to_any() - .encode(&mut tx_data) - .expect("encoding failed"); - let tx = Tx { - code: vec![], - data: Some(tx_data.clone()), - timestamp: DateTimeUtc::now(), - } - .sign(&key::testing::keypair_1()); - // send a packet with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) - .expect("sending a packet failed"); - - // the transaction does something before senging a packet - - // Check - let mut env = tx_host_env::take(); - let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); - - // Commit - env.commit_tx_and_block(); - - // Start the next transaction for receiving an ack + // Check the balance tx_host_env::set(env); - let counterparty = ibc::dummy_channel_counterparty(); - let packet = - ibc::packet_from_message(&msg, ibc::sequence(1), &counterparty); - let msg = ibc::msg_packet_ack(packet); - let mut tx_data = vec![]; - msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx { - code: vec![], - data: Some(tx_data.clone()), - timestamp: DateTimeUtc::now(), - } - .sign(&key::testing::keypair_1()); - // ack the packet with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) - .expect("the packet ack failed"); - - // the transaction does something after the ack - - // Check - let env = tx_host_env::take(); - let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + let key = token::balance_key(&token, &receiver); + let balance: Option = tx_host_env::with(|env| { + env.wl_storage.read(&key).expect("read error") + }); + assert_eq!(balance, Some(Amount::whole(200))); + let escrow: Option = tx_host_env::with(|env| { + env.wl_storage.read(&escrow_key).expect("read error") + }); + assert_eq!(escrow, Some(Amount::whole(0))); } #[test] - fn test_ibc_receive_packet_unordered() { + fn test_ibc_unescrow_received_token() { // The environment must be initialized first tx_host_env::init(); @@ -1600,13 +1415,37 @@ mod tests { .expect("write error"); }); }); + // escrow in advance + let escrow_key = token::balance_key( + &token, + &address::Address::Internal(address::InternalAddress::IbcEscrow), + ); + let val = Amount::whole(100).try_to_vec().unwrap(); + tx_host_env::with(|env| { + env.wl_storage + .storage + .write(&escrow_key, &val) + .expect("write error"); + }); - // packet (sequence number isn't checked for the unordered channel) + // Set this chain as the source zone + let counterparty = ibc::dummy_channel_counterparty(); + let dummy_src_port = "dummy_transfer"; + let dummy_src_channel = "channel_42"; + let denom = format!( + "{}/{}/{}/{}/{}", + counterparty.port_id().clone(), + counterparty.channel_id().unwrap().clone(), + dummy_src_port, + dummy_src_channel, + token + ); + // packet let packet = ibc::received_packet( port_id, channel_id, - ibc::sequence(100), - token.to_string(), + ibc::Sequence::from(1), + denom, &receiver, ); @@ -1615,22 +1454,39 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // receive a packet with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) - .expect("receiving a packet failed"); - - // the transaction does something according to the packet + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) + .expect("receiving a token failed"); // Check let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); assert!(result.expect("validation failed unexpectedly")); + // Check if the token was unescrowed + let result = ibc::validate_token_vp_from_tx(&env, &tx, &escrow_key); + assert!(result.expect("token validation failed unexpectedly")); + // Check the balance + tx_host_env::set(env); + // without the source trace path + let denom = + format!("{}/{}/{}", dummy_src_port, dummy_src_channel, token); + let key = ibc::balance_key_with_ibc_prefix(denom, &receiver); + let balance: Option = tx_host_env::with(|env| { + env.wl_storage.read(&key).expect("read error") + }); + assert_eq!(balance, Some(Amount::whole(100))); + let escrow: Option = tx_host_env::with(|env| { + env.wl_storage.read(&escrow_key).expect("read error") + }); + assert_eq!(escrow, Some(Amount::whole(0))); } #[test] @@ -1665,44 +1521,55 @@ mod tests { .encode(&mut tx_data) .expect("encoding failed"); // send a packet with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) - .expect("sending apacket failed"); + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) + .expect("sending a token failed"); // Commit - tx_host_env::commit_tx_and_block(); + let mut env = tx_host_env::take(); + env.commit_tx_and_block(); + // for the next block + env.wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); + env.wl_storage + .storage + .set_header(tm_dummy_header()) + .unwrap(); + tx_host_env::set(env); // Start a transaction to notify the timeout let counterparty = ibc::dummy_channel_counterparty(); - let packet = - ibc::packet_from_message(&msg, ibc::sequence(1), &counterparty); - let msg = ibc::msg_timeout(packet.clone(), ibc::sequence(1)); + let packet = ibc::packet_from_message( + &msg, + ibc::Sequence::from(1), + &counterparty, + ); + let msg = ibc::msg_timeout(packet, ibc::Sequence::from(1)); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); - // close the channel with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) - .expect("closing the channel failed"); + // timeout the packet + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) + .expect("timeout failed"); // Check let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); assert!(result.expect("validation failed unexpectedly")); // Check if the token was refunded - let key_prefix = ibc_storage::ibc_account_prefix( - &packet.source_port, - &packet.source_channel, + let escrow = token::balance_key( &token, - ); - let escrow = token::multitoken_balance_key( - &key_prefix, &address::Address::Internal(address::InternalAddress::IbcEscrow), ); let result = ibc::validate_token_vp_from_tx(&env, &tx, &escrow); @@ -1740,44 +1607,55 @@ mod tests { .encode(&mut tx_data) .expect("encoding failed"); // send a packet with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) - .expect("sending a packet failed"); + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) + .expect("sending a token failed"); // Commit - tx_host_env::commit_tx_and_block(); + let mut env = tx_host_env::take(); + env.commit_tx_and_block(); + // for the next block + env.wl_storage + .storage + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); + env.wl_storage + .storage + .set_header(tm_dummy_header()) + .unwrap(); + tx_host_env::set(env); // Start a transaction to notify the timing-out on closed let counterparty = ibc::dummy_channel_counterparty(); - let packet = - ibc::packet_from_message(&msg, ibc::sequence(1), &counterparty); - let msg = ibc::msg_timeout_on_close(packet.clone(), ibc::sequence(1)); + let packet = ibc::packet_from_message( + &msg, + ibc::Sequence::from(1), + &counterparty, + ); + let msg = ibc::msg_timeout_on_close(packet, ibc::Sequence::from(1)); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let tx = Tx { - code: vec![], + code_or_hash: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); - // close the channel with the message - tx::ctx() - .dispatch_ibc_action(&tx_data) - .expect("closing the channel failed"); + // timeout the packet + tx_host_env::ibc::ibc_actions(tx::ctx()) + .execute(&tx_data) + .expect("timeout on close failed"); // Check let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); assert!(result.expect("validation failed unexpectedly")); // Check if the token was refunded - let key_prefix = ibc_storage::ibc_account_prefix( - &packet.source_port, - &packet.source_channel, + let escrow = token::balance_key( &token, - ); - let escrow = token::multitoken_balance_key( - &key_prefix, &address::Address::Internal(address::InternalAddress::IbcEscrow), ); let result = ibc::validate_token_vp_from_tx(&env, &tx, &escrow); diff --git a/tests/src/vm_host_env/tx.rs b/tests/src/vm_host_env/tx.rs index 6c3ccd55ae7..b67cac24169 100644 --- a/tests/src/vm_host_env/tx.rs +++ b/tests/src/vm_host_env/tx.rs @@ -16,6 +16,7 @@ use namada::vm::prefix_iter::PrefixIterators; use namada::vm::wasm::run::Error; use namada::vm::wasm::{self, TxCache, VpCache}; use namada::vm::{self, WasmCacheRwAccess}; +use namada_core::types::hash::Hash; use namada_tx_prelude::{BorshSerialize, Ctx}; use tempfile::TempDir; @@ -62,11 +63,13 @@ impl Default for TestTxEnv { let (tx_wasm_cache, tx_cache_dir) = wasm::compilation_cache::common::testing::cache(); + let wl_storage = WlStorage { + storage: TestStorage::default(), + write_log: WriteLog::default(), + }; + let chain_id = wl_storage.storage.chain_id.clone(); Self { - wl_storage: WlStorage { - storage: TestStorage::default(), - write_log: WriteLog::default(), - }, + wl_storage, iterators: PrefixIterators::default(), gas_meter: BlockGasMeter::default(), tx_index: TxIndex::default(), @@ -76,7 +79,7 @@ impl Default for TestTxEnv { vp_cache_dir, tx_wasm_cache, tx_cache_dir, - tx: Tx::new(vec![], None), + tx: Tx::new(vec![], None, chain_id, None), } } } @@ -99,21 +102,30 @@ impl TestTxEnv { vp_whitelist: Option>, tx_whitelist: Option>, ) { - let _ = parameters::update_epoch_parameter( - &mut self.wl_storage.storage, + parameters::update_epoch_parameter( + &mut self.wl_storage, &epoch_duration.unwrap_or(EpochDuration { min_num_of_blocks: 1, min_duration: DurationSecs(5), }), - ); - let _ = parameters::update_tx_whitelist_parameter( - &mut self.wl_storage.storage, + ) + .unwrap(); + parameters::update_tx_whitelist_parameter( + &mut self.wl_storage, tx_whitelist.unwrap_or_default(), - ); - let _ = parameters::update_vp_whitelist_parameter( - &mut self.wl_storage.storage, + ) + .unwrap(); + parameters::update_vp_whitelist_parameter( + &mut self.wl_storage, vp_whitelist.unwrap_or_default(), - ); + ) + .unwrap(); + } + + pub fn store_wasm_code(&mut self, code: Vec) { + let hash = Hash::sha256(&code); + let key = Key::wasm_code(&hash); + self.wl_storage.storage.write(&key, code).unwrap(); } /// Fake accounts' existence by initializing their VP storage. @@ -145,7 +157,7 @@ impl TestTxEnv { /// Commit the genesis state. Typically, you'll want to call this after /// setting up the initial state, before running a transaction. pub fn commit_genesis(&mut self) { - self.wl_storage.commit_genesis().unwrap(); + self.wl_storage.commit_block().unwrap(); } pub fn commit_tx_and_block(&mut self) { @@ -202,7 +214,7 @@ impl TestTxEnv { &mut self.wl_storage.write_log, &mut self.gas_meter, &self.tx_index, - &self.tx.code, + &self.tx.code_or_hash, self.tx.data.as_ref().unwrap_or(&empty_data), &mut self.vp_wasm_cache, &mut self.tx_wasm_cache, @@ -424,7 +436,7 @@ mod native_tx_host_env { native_host_fn!(tx_get_chain_id(result_ptr: u64)); native_host_fn!(tx_get_block_height() -> u64); native_host_fn!(tx_get_tx_index() -> u32); - native_host_fn!(tx_get_block_time() -> i64); + native_host_fn!(tx_get_block_header(height: u64) -> i64); native_host_fn!(tx_get_block_hash(result_ptr: u64)); native_host_fn!(tx_get_block_epoch() -> u64); native_host_fn!(tx_get_native_token(result_ptr: u64)); diff --git a/tests/src/vm_host_env/vp.rs b/tests/src/vm_host_env/vp.rs index 4d5bbf3ddec..8220ce4c64b 100644 --- a/tests/src/vm_host_env/vp.rs +++ b/tests/src/vm_host_env/vp.rs @@ -64,15 +64,17 @@ impl Default for TestVpEnv { let (vp_wasm_cache, vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); + let wl_storage = WlStorage { + storage: TestStorage::default(), + write_log: WriteLog::default(), + }; + let chain_id = wl_storage.storage.chain_id.clone(); Self { addr: address::testing::established_address_1(), - wl_storage: WlStorage { - storage: TestStorage::default(), - write_log: WriteLog::default(), - }, + wl_storage, iterators: PrefixIterators::default(), gas_meter: VpGasMeter::default(), - tx: Tx::new(vec![], None), + tx: Tx::new(vec![], None, chain_id, None), tx_index: TxIndex::default(), keys_changed: BTreeSet::default(), verifiers: BTreeSet::default(), @@ -344,6 +346,7 @@ mod native_vp_host_env { // [`namada_vm_env::imports::vp`] `extern "C"` section. native_host_fn!(vp_read_pre(key_ptr: u64, key_len: u64) -> i64); native_host_fn!(vp_read_post(key_ptr: u64, key_len: u64) -> i64); + native_host_fn!(vp_read_temp(key_ptr: u64, key_len: u64) -> i64); native_host_fn!(vp_result_buffer(result_ptr: u64)); native_host_fn!(vp_has_key_pre(key_ptr: u64, key_len: u64) -> i64); native_host_fn!(vp_has_key_post(key_ptr: u64, key_len: u64) -> i64); @@ -353,6 +356,7 @@ mod native_vp_host_env { native_host_fn!(vp_get_chain_id(result_ptr: u64)); native_host_fn!(vp_get_block_height() -> u64); native_host_fn!(vp_get_tx_index() -> u32); + native_host_fn!(vp_get_block_header(height: u64) -> i64); native_host_fn!(vp_get_block_hash(result_ptr: u64)); native_host_fn!(vp_get_tx_code_hash(result_ptr: u64)); native_host_fn!(vp_get_block_epoch() -> u64); diff --git a/tx_prelude/Cargo.toml b/tx_prelude/Cargo.toml index c3c40355cb8..8e7cdb3f1b5 100644 --- a/tx_prelude/Cargo.toml +++ b/tx_prelude/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_tx_prelude" resolver = "2" -version = "0.14.0" +version = "0.16.0" [features] default = ["abciplus"] @@ -22,5 +22,5 @@ namada_proof_of_stake = {path = "../proof_of_stake", default-features = false} namada_vm_env = {path = "../vm_env", default-features = false} borsh = "0.9.0" sha2 = "0.10.1" -thiserror = "1.0.30" -rust_decimal = "1.26.1" +thiserror = "1.0.38" +rust_decimal = "=1.26.1" diff --git a/tx_prelude/src/ibc.rs b/tx_prelude/src/ibc.rs index 898988d35c7..12654df964e 100644 --- a/tx_prelude/src/ibc.rs +++ b/tx_prelude/src/ibc.rs @@ -1,50 +1,73 @@ //! IBC lower-level functions for transactions. -pub use namada_core::ledger::ibc::actions::{Error, IbcActions, Result}; +use std::cell::RefCell; +use std::rc::Rc; + +pub use namada_core::ledger::ibc::{ + Error, IbcActions, IbcCommonContext, IbcStorageContext, ProofSpec, + TransferModule, +}; use namada_core::ledger::storage_api::{StorageRead, StorageWrite}; use namada_core::ledger::tx_env::TxEnv; pub use namada_core::types::ibc::IbcEvent; -use namada_core::types::storage::{BlockHeight, Key}; -use namada_core::types::time::Rfc3339String; +use namada_core::types::storage::{BlockHeight, Header, Key}; use namada_core::types::token::Amount; use crate::token::transfer_with_keys; -use crate::Ctx; +use crate::{Ctx, KeyValIterator}; + +/// IBC actions to handle an IBC message +pub fn ibc_actions(ctx: &mut Ctx) -> IbcActions { + let ctx = Rc::new(RefCell::new(ctx.clone())); + let mut actions = IbcActions::new(ctx.clone()); + let module = TransferModule::new(ctx); + actions.add_transfer_route(module.module_id(), module); + actions +} -impl IbcActions for Ctx { +impl IbcStorageContext for Ctx { type Error = crate::Error; + type PrefixIter<'iter> = KeyValIterator<(String, Vec)>; - fn read_ibc_data( + fn read( &self, key: &Key, ) -> std::result::Result>, Self::Error> { - let data = self.read_bytes(key)?; - Ok(data) + self.read_bytes(key) } - fn write_ibc_data( + fn write( &mut self, key: &Key, - data: impl AsRef<[u8]>, + data: Vec, ) -> std::result::Result<(), Self::Error> { self.write_bytes(key, data)?; Ok(()) } - fn delete_ibc_data( - &mut self, - key: &Key, - ) -> std::result::Result<(), Self::Error> { - self.delete(key)?; - Ok(()) + fn iter_prefix<'iter>( + &'iter self, + prefix: &Key, + ) -> Result, Self::Error> { + StorageRead::iter_prefix(self, prefix) + } + + fn iter_next<'iter>( + &'iter self, + iter: &mut Self::PrefixIter<'iter>, + ) -> Result)>, Self::Error> { + StorageRead::iter_next(self, iter) + } + + fn delete(&mut self, key: &Key) -> std::result::Result<(), Self::Error> { + StorageWrite::delete(self, key) } fn emit_ibc_event( &mut self, event: IbcEvent, ) -> std::result::Result<(), Self::Error> { - ::emit_ibc_event(self, &event)?; - Ok(()) + ::emit_ibc_event(self, &event) } fn transfer_token( @@ -53,19 +76,23 @@ impl IbcActions for Ctx { dest: &Key, amount: Amount, ) -> std::result::Result<(), Self::Error> { - transfer_with_keys(self, src, dest, amount)?; - Ok(()) + transfer_with_keys(self, src, dest, amount) } fn get_height(&self) -> std::result::Result { - let val = self.get_block_height()?; - Ok(val) + self.get_block_height() } - fn get_header_time( + fn get_header( &self, - ) -> std::result::Result { - let val = self.get_block_time()?; - Ok(val) + height: BlockHeight, + ) -> std::result::Result, Self::Error> { + self.get_block_header(height) + } + + fn log_string(&self, message: String) { + super::log_string(message); } } + +impl IbcCommonContext for Ctx {} diff --git a/tx_prelude/src/lib.rs b/tx_prelude/src/lib.rs index d7d6b84c969..e4f991cad21 100644 --- a/tx_prelude/src/lib.rs +++ b/tx_prelude/src/lib.rs @@ -30,16 +30,13 @@ use namada_core::types::chain::CHAIN_ID_LENGTH; use namada_core::types::internal::HostEnvResult; use namada_core::types::storage::TxIndex; pub use namada_core::types::storage::{ - self, BlockHash, BlockHeight, Epoch, BLOCK_HASH_LENGTH, + self, BlockHash, BlockHeight, Epoch, Header, BLOCK_HASH_LENGTH, }; -use namada_core::types::time::Rfc3339String; pub use namada_core::types::*; pub use namada_macros::transaction; use namada_vm_env::tx::*; use namada_vm_env::{read_from_buffer, read_key_val_bytes_from_buffer}; -pub use crate::ibc::IbcActions; - /// Log a string. The message will be printed at the `tracing::Level::Info`. pub fn log_string>(msg: T) { let msg = msg.as_ref(); @@ -73,6 +70,7 @@ macro_rules! debug_log { } /// Execution context provides access to the host environment functions +#[derive(Debug, Clone)] pub struct Ctx(()); impl Ctx { @@ -137,12 +135,24 @@ impl StorageRead for Ctx { .expect("Cannot convert the ID string")) } - fn get_block_height( - &self, - ) -> Result { + fn get_block_height(&self) -> Result { Ok(BlockHeight(unsafe { namada_tx_get_block_height() })) } + fn get_block_header( + &self, + height: BlockHeight, + ) -> Result, Error> { + let read_result = unsafe { namada_tx_get_block_header(height.0) }; + match read_from_buffer(read_result, namada_tx_result_buffer) { + Some(value) => Ok(Some( + Header::try_from_slice(&value[..]) + .expect("The conversion shouldn't fail"), + )), + None => Ok(None), + } + } + fn get_block_hash( &self, ) -> Result { @@ -230,16 +240,6 @@ impl StorageWrite for Ctx { } impl TxEnv for Ctx { - fn get_block_time(&self) -> Result { - let read_result = unsafe { namada_tx_get_block_time() }; - let time_value = read_from_buffer(read_result, namada_tx_result_buffer) - .expect("The block time should exist"); - Ok(Rfc3339String( - String::try_from_slice(&time_value[..]) - .expect("The conversion shouldn't fail"), - )) - } - fn write_temp( &mut self, key: &storage::Key, diff --git a/tx_prelude/src/proof_of_stake.rs b/tx_prelude/src/proof_of_stake.rs index 6e4c4cf136b..22c00ca5993 100644 --- a/tx_prelude/src/proof_of_stake.rs +++ b/tx_prelude/src/proof_of_stake.rs @@ -72,12 +72,12 @@ impl Ctx { dkg_key, commission_rate, max_commission_rate_change, - validator_vp_code, + validator_vp_code_hash, }: InitValidator, ) -> EnvResult
{ let current_epoch = self.get_block_epoch()?; // Init validator account - let validator_address = self.init_account(&validator_vp_code)?; + let validator_address = self.init_account(&validator_vp_code_hash)?; let pk_key = key::pk_key(&validator_address); self.write(&pk_key, &account_key)?; let protocol_pk_key = key::protocol_pk_key(&validator_address); diff --git a/tx_prelude/src/token.rs b/tx_prelude/src/token.rs index 5ea85495544..adf8fa92e9b 100644 --- a/tx_prelude/src/token.rs +++ b/tx_prelude/src/token.rs @@ -138,7 +138,7 @@ pub fn transfer_with_keys( dest_key: &storage::Key, amount: Amount, ) -> TxResult { - let src_owner = is_any_multitoken_balance_key(src_key).map(|(_, o)| o); + let src_owner = is_any_token_balance_key(src_key); let src_bal: Option = match src_owner { Some(Address::Internal(InternalAddress::IbcMint)) => { Some(Amount::max()) @@ -147,37 +147,20 @@ pub fn transfer_with_keys( log_string("invalid transfer from the burn address"); unreachable!() } - Some(_) => ctx.read(src_key)?, - None => { - // the key is not a multitoken key - match is_any_token_balance_key(src_key) { - Some(_) => ctx.read(src_key)?, - None => { - log_string(format!("invalid balance key: {}", src_key)); - unreachable!() - } - } - } + _ => ctx.read(src_key)?, }; let mut src_bal = src_bal.unwrap_or_else(|| { log_string(format!("src {} has no balance", src_key)); unreachable!() }); src_bal.spend(&amount); - let dest_owner = is_any_multitoken_balance_key(dest_key).map(|(_, o)| o); + let dest_owner = is_any_token_balance_key(dest_key); let mut dest_bal: Amount = match dest_owner { Some(Address::Internal(InternalAddress::IbcMint)) => { log_string("invalid transfer to the mint address"); unreachable!() } - Some(_) => ctx.read(dest_key)?.unwrap_or_default(), - None => match is_any_token_balance_key(dest_key) { - Some(_) => ctx.read(dest_key)?.unwrap_or_default(), - None => { - log_string(format!("invalid balance key: {}", dest_key)); - unreachable!() - } - }, + _ => ctx.read(dest_key)?.unwrap_or_default(), }; dest_bal.receive(&amount); match src_owner { diff --git a/vm_env/Cargo.toml b/vm_env/Cargo.toml index ced6ae52ed1..00141fcd8c3 100644 --- a/vm_env/Cargo.toml +++ b/vm_env/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_vm_env" resolver = "2" -version = "0.14.0" +version = "0.16.0" [features] default = ["abciplus"] diff --git a/vm_env/src/lib.rs b/vm_env/src/lib.rs index e275c840d10..9df39d62713 100644 --- a/vm_env/src/lib.rs +++ b/vm_env/src/lib.rs @@ -84,8 +84,8 @@ pub mod tx { // Get the current block height pub fn namada_tx_get_block_height() -> u64; - // Get the time of the current block header - pub fn namada_tx_get_block_time() -> i64; + // Get the current block header + pub fn namada_tx_get_block_header(height: u64) -> i64; // Get the current block hash pub fn namada_tx_get_block_hash(result_ptr: u64); @@ -166,6 +166,9 @@ pub mod vp { // Get the current block height pub fn namada_vp_get_block_height() -> u64; + // Get the current block header + pub fn namada_vp_get_block_header(height: u64) -> i64; + // Get the current block hash pub fn namada_vp_get_block_hash(result_ptr: u64); diff --git a/vm_env/src/token.rs b/vm_env/src/token.rs deleted file mode 100644 index 2f718ee0335..00000000000 --- a/vm_env/src/token.rs +++ /dev/null @@ -1,162 +0,0 @@ -use std::collections::BTreeSet; - -use masp_primitives::transaction::Transaction; -use namada::types::address::{masp, Address, InternalAddress}; -use namada::types::storage::{Key, KeySeg}; -use namada::types::token; - -/// Vp imports and functions. -pub mod vp { - use namada::types::storage::KeySeg; - pub use namada::types::token::*; - - use super::*; - use crate::imports::vp; - - /// A token validity predicate. - pub fn vp( - token: &Address, - keys_changed: &BTreeSet, - verifiers: &BTreeSet
, - ) -> bool { - let mut change: Change = 0; - let all_checked = keys_changed.iter().all(|key| { - match token::is_balance_key(token, key) { - None => { - // Unknown changes to this address space are disallowed, but - // unknown changes anywhere else are permitted - key.segments.get(0) != Some(&token.to_db_key()) - } - Some(owner) => { - // accumulate the change - let key = key.to_string(); - let pre: Amount = match owner { - Address::Internal(InternalAddress::IbcMint) => { - Amount::max() - } - Address::Internal(InternalAddress::IbcBurn) => { - Amount::default() - } - _ => vp::read_pre(&key).unwrap_or_default(), - }; - let post: Amount = match owner { - Address::Internal(InternalAddress::IbcMint) => { - vp::read_temp(&key).unwrap_or_else(Amount::max) - } - Address::Internal(InternalAddress::IbcBurn) => { - vp::read_temp(&key).unwrap_or_default() - } - _ => vp::read_post(&key).unwrap_or_default(), - }; - let this_change = post.change() - pre.change(); - change += this_change; - // make sure that the spender approved the transaction - if this_change < 0 { - return verifiers.contains(owner) || *owner == masp(); - } - true - } - } - }); - all_checked && change == 0 - } -} - -/// Tx imports and functions. -pub mod tx { - pub use namada::types::token::*; - - use super::*; - use crate::imports::tx; - - /// A token transfer that can be used in a transaction. - pub fn transfer( - src: &Address, - dest: &Address, - token: &Address, - amount: Amount, - key: &Option, - shielded: &Option, - ) { - let src_key = token::balance_key(token, src); - let dest_key = token::balance_key(token, dest); - let src_bal: Option = tx::read(&src_key.to_string()); - let mut src_bal = src_bal.unwrap_or_else(|| match src { - Address::Internal(InternalAddress::IbcMint) => Amount::max(), - _ => { - tx::log_string(format!("src {} has no balance", src)); - unreachable!() - } - }); - let mut dest_bal: Amount = - tx::read(&dest_key.to_string()).unwrap_or_default(); - // Only make changes to transparent balances if asset is not being - // transferred to self - if src != dest { - src_bal.spend(&amount); - dest_bal.receive(&amount); - match src { - Address::Internal(InternalAddress::IbcMint) => { - tx::write_temp(&src_key.to_string(), src_bal) - } - Address::Internal(InternalAddress::IbcBurn) => { - tx::log_string("invalid transfer from the burn address"); - unreachable!() - } - _ => tx::write(&src_key.to_string(), src_bal), - } - match dest { - Address::Internal(InternalAddress::IbcMint) => { - tx::log_string("invalid transfer to the mint address"); - unreachable!() - } - Address::Internal(InternalAddress::IbcBurn) => { - tx::write_temp(&dest_key.to_string(), dest_bal) - } - _ => tx::write(&dest_key.to_string(), dest_bal), - } - } - // If this transaction has a shielded component, then handle it - // separately - if let Some(shielded) = shielded { - let masp_addr = masp(); - tx::insert_verifier(&masp_addr); - let head_tx_key = Key::from(masp_addr.to_db_key()) - .push(&HEAD_TX_KEY.to_owned()) - .expect("Cannot obtain a storage key"); - let current_tx_idx: u64 = - tx::read(&head_tx_key.to_string()).unwrap_or(0); - let current_tx_key = Key::from(masp_addr.to_db_key()) - .push(&(TX_KEY_PREFIX.to_owned() + ¤t_tx_idx.to_string())) - .expect("Cannot obtain a storage key"); - // Save the Transfer object and its location within the blockchain - // so that clients do not have to separately look these - // up - let transfer = Transfer { - source: src.clone(), - target: dest.clone(), - token: token.clone(), - amount, - key: key.clone(), - shielded: Some(shielded.clone()), - }; - tx::write( - ¤t_tx_key.to_string(), - ( - tx::get_block_epoch(), - tx::get_block_height(), - tx::get_tx_index(), - transfer, - ), - ); - tx::write(&head_tx_key.to_string(), current_tx_idx + 1); - // If storage key has been supplied, then pin this transaction to it - if let Some(key) = key { - let pin_key = Key::from(masp_addr.to_db_key()) - .push(&(PIN_KEY_PREFIX.to_owned() + key)) - .expect("Cannot obtain a storage key"); - tx::write(&pin_key.to_string(), current_tx_idx); - } - } - } -} diff --git a/vp_prelude/Cargo.toml b/vp_prelude/Cargo.toml index 4cc10075bf5..f5c1c937fe8 100644 --- a/vp_prelude/Cargo.toml +++ b/vp_prelude/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_vp_prelude" resolver = "2" -version = "0.14.0" +version = "0.16.0" [features] default = ["abciplus"] @@ -21,4 +21,4 @@ namada_proof_of_stake = {path = "../proof_of_stake", default-features = false} namada_vm_env = {path = "../vm_env", default-features = false} borsh = "0.9.0" sha2 = "0.10.1" -thiserror = "1.0.30" +thiserror = "1.0.38" diff --git a/vp_prelude/src/lib.rs b/vp_prelude/src/lib.rs index 0d0680a2e69..57ac5a388fe 100644 --- a/vp_prelude/src/lib.rs +++ b/vp_prelude/src/lib.rs @@ -7,7 +7,6 @@ #![deny(rustdoc::private_intra_doc_links)] pub mod key; -pub mod token; // used in the VP input use core::convert::AsRef; @@ -31,7 +30,7 @@ use namada_core::types::hash::{Hash, HASH_LENGTH}; use namada_core::types::internal::HostEnvResult; use namada_core::types::key::*; use namada_core::types::storage::{ - BlockHash, BlockHeight, Epoch, TxIndex, BLOCK_HASH_LENGTH, + BlockHash, BlockHeight, Epoch, Header, TxIndex, BLOCK_HASH_LENGTH, }; pub use namada_core::types::*; pub use namada_macros::validity_predicate; @@ -54,8 +53,8 @@ pub fn is_tx_whitelisted(ctx: &Ctx) -> VpResult { || whitelist.contains(&tx_hash.to_string().to_lowercase())) } -pub fn is_vp_whitelisted(ctx: &Ctx, vp_bytes: &[u8]) -> VpResult { - let vp_hash = sha256(vp_bytes); +pub fn is_vp_whitelisted(ctx: &Ctx, vp_hash: &[u8]) -> VpResult { + let vp_hash = Hash::try_from(vp_hash).unwrap(); let key = parameters::storage::get_vp_whitelist_storage_key(); let whitelist: Vec = ctx.read_pre(&key)?.unwrap_or_default(); // if whitelist is empty, allow any transaction @@ -239,6 +238,14 @@ impl<'view> VpEnv<'view> for Ctx { get_block_height() } + fn get_block_header( + &self, + height: BlockHeight, + ) -> Result, Error> { + // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl + get_block_header(height) + } + fn get_block_hash(&self) -> Result { // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl get_block_hash() @@ -265,11 +272,7 @@ impl<'view> VpEnv<'view> for Ctx { iter_prefix_pre_impl(prefix) } - fn eval( - &self, - vp_code: Vec, - input_data: Vec, - ) -> Result { + fn eval(&self, vp_code: Hash, input_data: Vec) -> Result { let result = unsafe { namada_vp_eval( vp_code.as_ptr() as _, @@ -361,6 +364,13 @@ impl StorageRead for CtxPreStorageRead<'_> { get_block_height() } + fn get_block_header( + &self, + height: BlockHeight, + ) -> Result, Error> { + get_block_header(height) + } + fn get_block_hash(&self) -> Result { get_block_hash() } @@ -424,6 +434,13 @@ impl StorageRead for CtxPostStorageRead<'_> { get_block_height() } + fn get_block_header( + &self, + height: BlockHeight, + ) -> Result, Error> { + get_block_header(height) + } + fn get_block_hash(&self) -> Result { get_block_hash() } @@ -478,6 +495,17 @@ fn get_block_height() -> Result { Ok(BlockHeight(unsafe { namada_vp_get_block_height() })) } +fn get_block_header(height: BlockHeight) -> Result, Error> { + let read_result = unsafe { namada_vp_get_block_header(height.0) }; + match read_from_buffer(read_result, namada_vp_result_buffer) { + Some(value) => Ok(Some( + Header::try_from_slice(&value[..]) + .expect("The conversion shouldn't fail"), + )), + None => Ok(None), + } +} + fn get_block_hash() -> Result { let result = Vec::with_capacity(BLOCK_HASH_LENGTH); unsafe { diff --git a/vp_prelude/src/token.rs b/vp_prelude/src/token.rs deleted file mode 100644 index 7d0a695b20a..00000000000 --- a/vp_prelude/src/token.rs +++ /dev/null @@ -1,76 +0,0 @@ -//! A fungible token validity predicate. - -use std::collections::BTreeSet; - -use namada_core::types::address::{self, Address, InternalAddress}; -use namada_core::types::storage::Key; -/// Vp imports and functions. -use namada_core::types::storage::KeySeg; -use namada_core::types::token; -pub use namada_core::types::token::*; - -use super::*; - -/// A token validity predicate. -pub fn vp( - ctx: &Ctx, - token: &Address, - keys_touched: &BTreeSet, - verifiers: &BTreeSet
, -) -> VpResult { - let mut change: Change = 0; - for key in keys_touched.iter() { - let owner: Option<&Address> = token::is_balance_key(token, key) - .or_else(|| { - token::is_multitoken_balance_key(token, key).map(|a| a.1) - }); - - match owner { - None => { - if token::is_total_supply_key(key, token) { - // check if total supply is changed, which it should never - // be from a tx - let total_pre: Amount = ctx.read_pre(key)?.unwrap(); - let total_post: Amount = ctx.read_post(key)?.unwrap(); - if total_pre != total_post { - return reject(); - } - } else if key.segments.get(0) == Some(&token.to_db_key()) { - // Unknown changes to this address space are disallowed, but - // unknown changes anywhere else are permitted - return reject(); - } - } - Some(owner) => { - // accumulate the change - let pre: Amount = match owner { - Address::Internal(InternalAddress::IbcMint) => { - Amount::max() - } - Address::Internal(InternalAddress::IbcBurn) => { - Amount::default() - } - _ => ctx.read_pre(key)?.unwrap_or_default(), - }; - let post: Amount = match owner { - Address::Internal(InternalAddress::IbcMint) => { - ctx.read_temp(key)?.unwrap_or_else(Amount::max) - } - Address::Internal(InternalAddress::IbcBurn) => { - ctx.read_temp(key)?.unwrap_or_default() - } - _ => ctx.read_post(key)?.unwrap_or_default(), - }; - let this_change = post.change() - pre.change(); - change += this_change; - // make sure that the spender approved the transaction - if this_change < 0 - && !(verifiers.contains(owner) || *owner == address::masp()) - { - return reject(); - } - } - } - } - Ok(change == 0) -} diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index 369c9aa78d5..5d8409ef247 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -4,11 +4,11 @@ version = 3 [[package]] name = "addr2line" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" dependencies = [ - "gimli 0.26.2", + "gimli 0.27.2", ] [[package]] @@ -44,16 +44,16 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "0.7.19" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" +checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" dependencies = [ "memchr", ] @@ -69,9 +69,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.66" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" +checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] name = "ark-bls12-381" @@ -98,6 +98,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ark-ed-on-bls12-381" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43b7ada17db3854f5994e74e60b18e10e818594935ee7e1d329800c117b32970" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-ff", + "ark-std", +] + [[package]] name = "ark-ff" version = "0.3.0" @@ -112,7 +124,7 @@ dependencies = [ "num-bigint", "num-traits", "paste", - "rustc_version", + "rustc_version 0.3.3", "zeroize", ] @@ -123,7 +135,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -135,7 +147,20 @@ dependencies = [ "num-bigint", "num-traits", "quote", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "ark-poly" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b0f78f47537c2f15706db7e98fe64cc1711dbf9def81218194e17239e53e5aa" +dependencies = [ + "ark-ff", + "ark-serialize", + "ark-std", + "derivative", + "hashbrown 0.11.2", ] [[package]] @@ -157,7 +182,7 @@ checksum = "8dd4e5f0bf8285d5ed538d27fab7411f3e297908fd93c62195de8bee3f199e82" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -172,9 +197,9 @@ dependencies = [ [[package]] name = "arrayref" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -188,72 +213,225 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +[[package]] +name = "async-channel" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" +dependencies = [ + "concurrent-queue", + "event-listener", + "futures-core", +] + +[[package]] +name = "async-executor" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" +dependencies = [ + "async-lock", + "async-task", + "concurrent-queue", + "fastrand", + "futures-lite", + "slab", +] + +[[package]] +name = "async-global-executor" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" +dependencies = [ + "async-channel", + "async-executor", + "async-io", + "async-lock", + "blocking", + "futures-lite", + "once_cell", +] + +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock", + "autocfg", + "cfg-if 1.0.0", + "concurrent-queue", + "futures-lite", + "log", + "parking", + "polling", + "rustix", + "slab", + "socket2", + "waker-fn", +] + +[[package]] +name = "async-lock" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" +dependencies = [ + "event-listener", +] + +[[package]] +name = "async-std" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +dependencies = [ + "async-channel", + "async-global-executor", + "async-io", + "async-lock", + "crossbeam-utils 0.8.15", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", +] + [[package]] name = "async-stream" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", + "pin-project-lite", ] [[package]] name = "async-stream-impl" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] +[[package]] +name = "async-task" +version = "4.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" + [[package]] name = "async-trait" -version = "0.1.58" +version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" +checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "async-tungstenite" -version = "0.12.0" +version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e00550829ef8e2c4115250d0ee43305649b0fa95f78a32ce5b07da0b73d95c5c" +checksum = "a1b71b31561643aa8e7df3effe284fa83ab1a840e52294c5f4bd7bfd8b2becbb" dependencies = [ "futures-io", "futures-util", "log", "pin-project-lite", + "rustls-native-certs 0.6.2", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.4", "tungstenite", - "webpki-roots", ] +[[package]] +name = "atomic-waker" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" + [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "axum" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" +dependencies = [ + "async-trait", + "axum-core", + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + [[package]] name = "backtrace" -version = "0.3.66" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" +checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" dependencies = [ "addr2line", "cc", "cfg-if 1.0.0", "libc", "miniz_oxide", - "object 0.29.0", + "object 0.30.3", "rustc-demangle", ] @@ -269,6 +447,12 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +[[package]] +name = "base64" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" + [[package]] name = "base64ct" version = "1.0.1" @@ -281,18 +465,24 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf9ff0bbfd639f15c74af777d81383cf53efb7c93613f6cab67c6c11e05bbf8b" +[[package]] +name = "bech32" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" + [[package]] name = "bellman" version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43473b34abc4b0b405efa0a250bac87eea888182b21687ee5c8115d279b0fda5" dependencies = [ - "bitvec", + "bitvec 0.22.3", "blake2s_simd 0.5.11", "byteorder", - "crossbeam-channel 0.5.6", - "ff", - "group", + "crossbeam-channel 0.5.8", + "ff 0.11.1", + "group 0.11.0", "lazy_static", "log", "num_cpus", @@ -312,6 +502,24 @@ dependencies = [ "crunchy 0.1.6", ] +[[package]] +name = "bimap" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" +dependencies = [ + "serde", +] + +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + [[package]] name = "bip0039" version = "0.9.0" @@ -343,21 +551,21 @@ checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" [[package]] name = "bitcoin" -version = "0.28.0" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42b2a9a8e3c7544f5ce2b475f2f56580a3102b37e0ee001558ad4faedcf56cf4" +checksum = "0694ea59225b0c5f3cb405ff3f670e4828358ed26aec49dc352f730f0cb1a8a3" dependencies = [ - "bech32", + "bech32 0.9.1", "bitcoin_hashes", - "secp256k1", + "secp256k1 0.24.3", "serde", ] [[package]] name = "bitcoin_hashes" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006cc91e1a1d99819bc5b8214be3555c1f0611b169f527a1fdc54ed1f2b745b0" +checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" dependencies = [ "serde", ] @@ -374,10 +582,31 @@ version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5237f00a8c86130a0cc317830e558b966dd7850d48a953d998c813f01a41b527" dependencies = [ - "funty", - "radium", + "funty 1.2.0", + "radium 0.6.2", + "tap", + "wyz 0.4.0", +] + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty 2.0.0", + "radium 0.7.0", "tap", - "wyz", + "wyz 0.5.1", +] + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest 0.10.6", ] [[package]] @@ -388,18 +617,18 @@ checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" dependencies = [ "arrayref", "arrayvec 0.5.2", - "constant_time_eq", + "constant_time_eq 0.1.5", ] [[package]] name = "blake2b_simd" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72936ee4afc7f8f736d1c38383b56480b5497b4617b4a77bdbf1d2ababc76127" +checksum = "3c2f0dc9a68c6317d884f97cc36cf5a3d20ba14ce404227df55e1af708ab04bc" dependencies = [ "arrayref", "arrayvec 0.7.2", - "constant_time_eq", + "constant_time_eq 0.2.5", ] [[package]] @@ -410,32 +639,32 @@ checksum = "9e461a7034e85b211a4acb57ee2e6730b32912b06c08cc242243c39fc21ae6a2" dependencies = [ "arrayref", "arrayvec 0.5.2", - "constant_time_eq", + "constant_time_eq 0.1.5", ] [[package]] name = "blake2s_simd" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db539cc2b5f6003621f1cd9ef92d7ded8ea5232c7de0f9faa2de251cd98730d4" +checksum = "6637f448b9e61dfadbdcbae9a885fadee1f3eaffb1f8d3c1965d3ade8bdfd44f" dependencies = [ "arrayref", "arrayvec 0.7.2", - "constant_time_eq", + "constant_time_eq 0.2.5", ] [[package]] name = "blake3" -version = "1.3.1" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08e53fc5a564bb15bfe6fae56bd71522205f1f91893f9c0116edad6496c183f" +checksum = "42ae2468a89544a466886840aa467a25b766499f4f04bf7d9fcd10ecee9fccef" dependencies = [ "arrayref", "arrayvec 0.7.2", "cc", "cfg-if 1.0.0", - "constant_time_eq", - "digest 0.10.5", + "constant_time_eq 0.2.5", + "digest 0.10.6", ] [[package]] @@ -444,15 +673,14 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "block-padding", "generic-array", ] [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] @@ -473,14 +701,29 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" +[[package]] +name = "blocking" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" +dependencies = [ + "async-channel", + "async-lock", + "async-task", + "atomic-waker", + "fastrand", + "futures-lite", + "log", +] + [[package]] name = "bls12_381" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a829c821999c06be34de314eaeb7dd1b42be38661178bc26ad47a4eacebdb0f9" dependencies = [ - "ff", - "group", + "ff 0.11.1", + "group 0.11.0", "pairing", "rand_core 0.6.4", "subtle", @@ -502,9 +745,9 @@ source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c dependencies = [ "borsh-derive-internal", "borsh-schema-derive-internal", - "proc-macro-crate", + "proc-macro-crate 0.1.5", "proc-macro2", - "syn", + "syn 1.0.109", ] [[package]] @@ -514,7 +757,7 @@ source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -524,34 +767,47 @@ source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] +[[package]] +name = "bs58" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" + [[package]] name = "bumpalo" -version = "3.11.1" +version = "3.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" +checksum = "3c6ed94e98ecff0c12dd1b04c15ec0d7d9458ca8fe806cea6f12954efe74c63b" + +[[package]] +name = "byte-slice-cast" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytecheck" -version = "0.6.9" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d11cac2c12b5adc6570dad2ee1b87eff4955dac476fe12d81e5fdd352e52406f" +checksum = "13fe11640a23eb24562225322cd3e452b93a3d4091d62fab69c70542fcd17d1f" dependencies = [ "bytecheck_derive", "ptr_meta", + "simdutf8", ] [[package]] name = "bytecheck_derive" -version = "0.6.9" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" +checksum = "e31225543cb46f81a7e224762764f4a6a0f097b1db0b175f69e8065efaa42de5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -568,15 +824,18 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.2.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +dependencies = [ + "serde", +] [[package]] name = "camino" -version = "1.1.1" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ad0e1e3e88dd237a156ab9f571021b8a158caa0ae44b1968a241efb5144c1e" +checksum = "c530edf18f37068ac2d977409ed5cd50d53d73bc653c7647b48eb78976ac9ae2" dependencies = [ "serde", ] @@ -598,16 +857,16 @@ checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" dependencies = [ "camino", "cargo-platform", - "semver 1.0.14", + "semver 1.0.17", "serde", "serde_json", ] [[package]] name = "cc" -version = "1.0.76" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76a284da2e6fe2092f2353e51713435363112dfd60030e22add80be333fb928f" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" [[package]] name = "cfg-if" @@ -648,9 +907,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", "num-integer", @@ -682,30 +941,29 @@ version = "0.5.0" source = "git+https://github.com/marmeladema/clru-rs.git?rev=71ca566#71ca566915f21f3c308091ca7756a91b0f8b5afc" [[package]] -name = "codespan-reporting" -version = "0.11.1" +name = "concat-idents" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +checksum = "0fe0e1d9f7de897d18e590a7496b5facbe87813f746cf4b8db596ba77e07e832" dependencies = [ - "termcolor", - "unicode-width", + "quote", + "syn 1.0.109", ] [[package]] -name = "concat-idents" -version = "1.1.4" +name = "concurrent-queue" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fe0e1d9f7de897d18e590a7496b5facbe87813f746cf4b8db596ba77e07e832" +checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" dependencies = [ - "quote", - "syn", + "crossbeam-utils 0.8.15", ] [[package]] name = "const-oid" -version = "0.7.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" +checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" [[package]] name = "constant_time_eq" @@ -713,6 +971,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "constant_time_eq" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13418e745008f7349ec7e449155f419a61b92b58a99cc3616942b926825ec76b" + [[package]] name = "contracts" version = "0.6.3" @@ -721,7 +985,7 @@ checksum = "f1d1429e3bd78171c65aa010eabcdf8f863ba3254728dbfb0ad4b1545beac15c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -736,15 +1000,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" dependencies = [ "libc", ] @@ -830,50 +1094,35 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.15", ] [[package]] name = "crossbeam-deque" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.11", - "crossbeam-utils 0.8.12", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset 0.5.6", - "scopeguard", + "crossbeam-epoch", + "crossbeam-utils 0.8.15", ] [[package]] name = "crossbeam-epoch" -version = "0.9.11" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ "autocfg", "cfg-if 1.0.0", - "crossbeam-utils 0.8.12", - "memoffset 0.6.5", + "crossbeam-utils 0.8.15", + "memoffset 0.8.0", "scopeguard", ] @@ -890,9 +1139,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.12" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if 1.0.0", ] @@ -911,9 +1160,9 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" -version = "0.3.2" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ "generic-array", "rand_core 0.6.4", @@ -966,13 +1215,29 @@ dependencies = [ "crypto_api", ] +[[package]] +name = "ct-codecs" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3b7eb4404b8195a9abb6356f4ac07d8ba267045c8d6d220ac4dc992e6cc75df" + [[package]] name = "ct-logs" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1a816186fa68d9e426e3cb4ae4dff1fcd8e4a2c34b781bf7a822574a0d0aac8" dependencies = [ - "sct", + "sct 0.6.1", +] + +[[package]] +name = "ctor" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" +dependencies = [ + "quote", + "syn 1.0.109", ] [[package]] @@ -1001,55 +1266,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "cxx" -version = "1.0.81" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97abf9f0eca9e52b7f81b945524e76710e6cb2366aead23b7d4fbf72e281f888" -dependencies = [ - "cc", - "cxxbridge-flags", - "cxxbridge-macro", - "link-cplusplus", -] - -[[package]] -name = "cxx-build" -version = "1.0.81" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cc32cc5fea1d894b77d269ddb9f192110069a8a9c1f1d441195fba90553dea3" -dependencies = [ - "cc", - "codespan-reporting", - "once_cell", - "proc-macro2", - "quote", - "scratch", - "syn", -] - -[[package]] -name = "cxxbridge-flags" -version = "1.0.81" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ca220e4794c934dc6b1207c3b42856ad4c302f2df1712e9f8d2eec5afaacf1f" - -[[package]] -name = "cxxbridge-macro" -version = "1.0.81" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b846f081361125bfc8dc9d3940c84e1fd83ba54bbca7b17cd29483c828be0704" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "darling" -version = "0.14.2" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0dd3cd20dc6b5a876612a6e5accfe7f3dd883db6d07acfbf14c128f61550dfa" +checksum = "0558d22a7b463ed0241e993f76f09f30b126687447751a8638587b864e4b3944" dependencies = [ "darling_core", "darling_macro", @@ -1057,43 +1278,49 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.14.2" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f" +checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "darling_macro" -version = "0.14.2" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e" +checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "data-encoding" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" +checksum = "23d8666cb01533c39dde32bcbab8e227b4ed6679b2c925eba05feabea39508fb" [[package]] name = "der" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ "const-oid", ] +[[package]] +name = "derivation-path" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e5c37193a1db1d8ed868c03ec7b152175f26160a5b740e5e484143877e0adf0" + [[package]] name = "derivative" version = "2.2.0" @@ -1102,7 +1329,7 @@ checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1113,7 +1340,7 @@ checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1127,11 +1354,11 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.3", + "block-buffer 0.10.4", "crypto-common", "subtle", ] @@ -1177,6 +1404,23 @@ dependencies = [ "winapi", ] +[[package]] +name = "displaydoc" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.15", +] + +[[package]] +name = "dyn-clone" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68b0cf012f1230e43cd00ebb729c6bb58707ecfa8ad08b52ef3a4ccd2697fc30" + [[package]] name = "dynasm" version = "1.2.3" @@ -1189,7 +1433,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1205,9 +1449,9 @@ dependencies = [ [[package]] name = "ecdsa" -version = "0.13.4" +version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0d69ae62e0ce582d56380743515fefaf1a8c70cec685d9677636d7e30ae9dc9" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" dependencies = [ "der", "elliptic-curve", @@ -1217,10 +1461,11 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.5.2" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" +checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" dependencies = [ + "serde", "signature", ] @@ -1247,28 +1492,45 @@ checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ "curve25519-dalek", "ed25519", + "merlin", + "rand 0.7.3", + "serde", + "serde_bytes", "sha2 0.9.9", "zeroize", ] +[[package]] +name = "ed25519-dalek-bip32" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d2be62a4061b872c8c0873ee4fc6f101ce7b889d039f019c5fa2af471a59908" +dependencies = [ + "derivation-path", + "ed25519-dalek", + "hmac 0.12.1", + "sha2 0.10.6", +] + [[package]] name = "either" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "elliptic-curve" -version = "0.11.12" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b477563c2bfed38a3b7a60964c49e058b2510ad3f12ba3483fd8f62c2306d6" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" dependencies = [ "base16ct", "crypto-bigint", "der", - "ff", + "digest 0.10.6", + "ff 0.12.1", "generic-array", - "group", + "group 0.12.1", "rand_core 0.6.4", "sec1", "subtle", @@ -1292,28 +1554,28 @@ checksum = "c134c37760b27a871ba422106eedbb8247da973a09e82558bf26d619c882b159" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "enumset" -version = "1.0.12" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19be8061a06ab6f3a6cf21106c873578bf01bd42ad15e0311a9c76161cb1c753" +checksum = "e875f1719c16de097dee81ed675e2d9bb63096823ed3f0ca827b7dea3028bbbb" dependencies = [ "enumset_derive", ] [[package]] name = "enumset_derive" -version = "0.6.1" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03e7b551eba279bf0fa88b83a46330168c1560a52a94f5126f892f0b364ab3e0" +checksum = "e08b6c6ab82d70f08844964ba10c7babb716de2ecaeab9be5717918a5177d3af" dependencies = [ "darling", "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -1321,10 +1583,40 @@ name = "equihash" version = "0.1.0" source = "git+https://github.com/zcash/librustzcash/?rev=2425a08#2425a0869098e3b0588ccd73c42716bcf418612c" dependencies = [ - "blake2b_simd 1.0.0", + "blake2b_simd 1.0.1", "byteorder", ] +[[package]] +name = "erased-serde" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f2b0c2380453a92ea8b6c8e5f64ecaafccddde8ceab55ff7a8ac1029f894569" +dependencies = [ + "serde", +] + +[[package]] +name = "errno" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +dependencies = [ + "errno-dragonfly", + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "error-chain" version = "0.12.4" @@ -1334,6 +1626,12 @@ dependencies = [ "version_check", ] +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + [[package]] name = "eyre" version = "0.6.8" @@ -1352,17 +1650,54 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fastrand" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] +[[package]] +name = "ferveo" +version = "0.1.1" +source = "git+https://github.com/anoma/ferveo?rev=e5abd0acc938da90140351a65a26472eb495ce4d#e5abd0acc938da90140351a65a26472eb495ce4d" +dependencies = [ + "anyhow", + "ark-bls12-381", + "ark-ec", + "ark-ed-on-bls12-381", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "bincode", + "blake2", + "blake2b_simd 1.0.1", + "borsh", + "digest 0.10.6", + "ed25519-dalek", + "either", + "ferveo-common", + "group-threshold-cryptography", + "hex", + "itertools", + "measure_time", + "miracl_core", + "num", + "rand 0.7.3", + "rand 0.8.5", + "serde", + "serde_bytes", + "serde_json", + "subproductdomain", + "subtle", + "zeroize", +] + [[package]] name = "ferveo-common" version = "0.1.0" -source = "git+https://github.com/anoma/ferveo#1022ab2c7ccc689abcc05e5a08df6fb0c2a3fc65" +source = "git+https://github.com/anoma/ferveo?rev=e5abd0acc938da90140351a65a26472eb495ce4d#e5abd0acc938da90140351a65a26472eb495ce4d" dependencies = [ "anyhow", "ark-ec", @@ -1378,11 +1713,33 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "131655483be284720a17d74ff97592b8e76576dc25563148601df2d7c9080924" dependencies = [ - "bitvec", + "bitvec 0.22.3", "rand_core 0.6.4", "subtle", ] +[[package]] +name = "ff" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + [[package]] name = "fixedbitset" version = "0.4.2" @@ -1431,14 +1788,19 @@ dependencies = [ [[package]] name = "funty" version = "1.2.0" +source = "git+https://github.com/bitvecto-rs/funty/?rev=7ef0d890fbcd8b3def1635ac1a877fc298488446#7ef0d890fbcd8b3def1635ac1a877fc298488446" + +[[package]] +name = "funty" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1847abb9cb65d566acd5942e94aea9c8f547ad02c98e1649326fc0e8910b8b1e" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -1451,9 +1813,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -1461,15 +1823,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -1478,38 +1840,53 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" + +[[package]] +name = "futures-lite" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] [[package]] name = "futures-macro" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "futures-sink" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-util" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", @@ -1525,9 +1902,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -1540,21 +1917,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ "cfg-if 1.0.0", - "js-sys", "libc", "wasi 0.9.0+wasi-snapshot-preview1", - "wasm-bindgen", ] [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if 1.0.0", + "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] @@ -1570,15 +1947,27 @@ dependencies = [ [[package]] name = "gimli" -version = "0.26.2" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" [[package]] name = "glob" -version = "0.3.0" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" + +[[package]] +name = "gloo-timers" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] [[package]] name = "group" @@ -1587,11 +1976,46 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5ac374b108929de78460075f3dc439fa66df9d8fc77e8f12caa5165fcf0c89" dependencies = [ "byteorder", - "ff", + "ff 0.11.1", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "group" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +dependencies = [ + "ff 0.12.1", "rand_core 0.6.4", "subtle", ] +[[package]] +name = "group-threshold-cryptography" +version = "0.1.0" +source = "git+https://github.com/anoma/ferveo?rev=e5abd0acc938da90140351a65a26472eb495ce4d#e5abd0acc938da90140351a65a26472eb495ce4d" +dependencies = [ + "anyhow", + "ark-bls12-381", + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "blake2b_simd 1.0.1", + "chacha20", + "hex", + "itertools", + "miracl_core", + "rand 0.8.5", + "rand_core 0.6.4", + "rayon", + "subproductdomain", + "thiserror", +] + [[package]] name = "gumdrop" version = "0.8.1" @@ -1609,14 +2033,14 @@ checksum = "729f9bd3449d77e7831a18abfb7ba2f99ee813dfd15b8c2167c9a54ba20aa99d" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "h2" -version = "0.3.15" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" dependencies = [ "bytes", "fnv", @@ -1627,7 +2051,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.4", + "tokio-util", "tracing", ] @@ -1644,8 +2068,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f186b85ed81082fb1cf59d52b0111f02915e89a4ac61d292b38d075e570f3a9" dependencies = [ "blake2b_simd 0.5.11", - "ff", - "group", + "ff 0.11.1", + "group 0.11.0", "pasta_curves", "rand 0.8.5", "rayon", @@ -1671,9 +2095,9 @@ dependencies = [ [[package]] name = "hdpath" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dafb09e5d85df264339ad786a147d9de1da13687a3697c52244297e5e7c32d9c" +checksum = "09ae1615f843ce3981b47468f3f7c435ac17deb33c2261e64d7f1e87f5c11acc" dependencies = [ "byteorder", ] @@ -1684,7 +2108,7 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ - "base64", + "base64 0.13.1", "bitflags", "bytes", "headers-core", @@ -1705,22 +2129,25 @@ dependencies = [ [[package]] name = "heck" -version = "0.3.3" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" -dependencies = [ - "unicode-segmentation", -] +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.1.19" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" + [[package]] name = "hex" version = "0.4.3" @@ -1747,6 +2174,15 @@ dependencies = [ "digest 0.9.0", ] +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.6", +] + [[package]] name = "hmac-drbg" version = "0.3.0" @@ -1760,9 +2196,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", @@ -1810,9 +2246,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.23" +version = "0.14.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" +checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" dependencies = [ "bytes", "futures-channel", @@ -1844,11 +2280,11 @@ dependencies = [ "http", "hyper", "hyper-rustls", - "rustls-native-certs", + "rustls-native-certs 0.5.0", "tokio", - "tokio-rustls", + "tokio-rustls 0.22.0", "tower-service", - "webpki", + "webpki 0.21.4", ] [[package]] @@ -1861,11 +2297,11 @@ dependencies = [ "futures-util", "hyper", "log", - "rustls", - "rustls-native-certs", + "rustls 0.19.1", + "rustls-native-certs 0.5.0", "tokio", - "tokio-rustls", - "webpki", + "tokio-rustls 0.22.0", + "webpki 0.21.4", "webpki-roots", ] @@ -1883,113 +2319,138 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.53" +version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "winapi", + "windows", ] [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] name = "ibc" -version = "0.14.0" -source = "git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2#f4703dfe2c1f25cc431279ab74f10f3e0f6827e2" +version = "0.36.0" +source = "git+https://github.com/heliaxdev/cosmos-ibc-rs.git?rev=2d7edc16412b60cabf78163fe24a6264e11f77a9#2d7edc16412b60cabf78163fe24a6264e11f77a9" dependencies = [ "bytes", + "cfg-if 1.0.0", "derive_more", - "flex-error", - "ibc-proto", + "displaydoc", + "dyn-clone", + "erased-serde", + "ibc-proto 0.26.0", "ics23", "num-traits", + "parking_lot", + "primitive-types", "prost", - "prost-types", "safe-regex", "serde", "serde_derive", "serde_json", "sha2 0.10.6", "subtle-encoding", - "tendermint", - "tendermint-light-client-verifier", + "tendermint 0.23.6", + "tendermint-light-client-verifier 0.23.6", "tendermint-proto 0.23.6", - "tendermint-testgen", + "tendermint-testgen 0.23.6", "time", "tracing", + "uint", ] [[package]] name = "ibc-proto" -version = "0.17.1" -source = "git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2#f4703dfe2c1f25cc431279ab74f10f3e0f6827e2" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b46bcc4540116870cfb184f338b45174a7560ad46dd74e4cb4e81e005e2056" dependencies = [ - "base64", + "base64 0.13.1", "bytes", + "flex-error", "prost", - "prost-types", "serde", - "tendermint-proto 0.23.6", + "subtle-encoding", + "tendermint-proto 0.28.0", "tonic", ] +[[package]] +name = "ibc-proto" +version = "0.26.0" +source = "git+https://github.com/heliaxdev/ibc-proto-rs.git?rev=7e527b5b8c95d83351e93ceafc14ac853224283f#7e527b5b8c95d83351e93ceafc14ac853224283f" +dependencies = [ + "base64 0.13.1", + "bytes", + "flex-error", + "prost", + "serde", + "subtle-encoding", + "tendermint-proto 0.23.6", +] + [[package]] name = "ibc-relayer" -version = "0.14.0" -source = "git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2#f4703dfe2c1f25cc431279ab74f10f3e0f6827e2" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74599e4f602e8487c47955ca9f20aebc0199da3289cc6d5e2b39c6e4b9e65086" dependencies = [ "anyhow", "async-stream", - "bech32", + "bech32 0.9.1", "bitcoin", + "bs58", "bytes", - "crossbeam-channel 0.5.6", + "crossbeam-channel 0.5.8", + "digest 0.10.6", "dirs-next", + "ed25519", + "ed25519-dalek", + "ed25519-dalek-bip32", "flex-error", "futures", + "generic-array", "hdpath", "hex", "http", "humantime", "humantime-serde", - "ibc", - "ibc-proto", + "ibc-proto 0.24.1", + "ibc-relayer-types", "itertools", - "k256", "moka", - "nanoid", "num-bigint", "num-rational", "prost", - "prost-types", "regex", "retry", - "ripemd160", - "semver 1.0.14", + "ripemd", + "secp256k1 0.24.3", + "semver 1.0.17", "serde", "serde_derive", "serde_json", "sha2 0.10.6", "signature", + "strum", "subtle-encoding", - "tendermint", + "tendermint 0.28.0", "tendermint-light-client", - "tendermint-light-client-verifier", - "tendermint-proto 0.23.6", - "tendermint-rpc", + "tendermint-light-client-verifier 0.28.0", + "tendermint-rpc 0.28.0", "thiserror", "tiny-bip39", "tiny-keccak", @@ -1997,23 +2458,53 @@ dependencies = [ "toml", "tonic", "tracing", + "uuid 1.3.2", +] + +[[package]] +name = "ibc-relayer-types" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc9fadabf5846e11b8f9a4093a2cb7d2920b0ef49323b4737739e69ed9bfa2bc" +dependencies = [ + "bytes", + "derive_more", + "dyn-clone", + "erased-serde", + "flex-error", + "ibc-proto 0.24.1", + "ics23", + "itertools", + "num-rational", + "primitive-types", + "prost", + "safe-regex", + "serde", + "serde_derive", + "serde_json", + "subtle-encoding", + "tendermint 0.28.0", + "tendermint-light-client-verifier 0.28.0", + "tendermint-proto 0.28.0", + "tendermint-rpc 0.28.0", + "tendermint-testgen 0.28.0", + "time", "uint", ] [[package]] name = "ics23" -version = "0.7.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d454cc0a22bd556cc3d3c69f9d75a392a36244634840697a4b9eb81bc5c8ae0" +checksum = "ca44b684ce1859cff746ff46f5765ab72e12e3c06f76a8356db8f9a2ecf43f17" dependencies = [ "anyhow", "bytes", "hex", "prost", - "ripemd160", - "sha2 0.9.9", + "ripemd", + "sha2 0.10.6", "sha3", - "sp-std", ] [[package]] @@ -2032,6 +2523,35 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-serde" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" +dependencies = [ + "serde", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "incrementalmerkletree" version = "0.2.0" @@ -2058,9 +2578,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.1" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", @@ -2068,21 +2588,26 @@ dependencies = [ ] [[package]] -name = "input_buffer" -version = "0.4.0" +name = "instant" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f97967975f448f1a7ddb12b0bc41069d09ed6a1c161a92687e057325db35d413" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "bytes", + "cfg-if 1.0.0", + "js-sys", + "wasm-bindgen", + "web-sys", ] [[package]] -name = "instant" -version = "0.1.12" +name = "io-lifetimes" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" dependencies = [ - "cfg-if 1.0.0", + "hermit-abi 0.3.1", + "libc", + "windows-sys 0.48.0", ] [[package]] @@ -2096,15 +2621,15 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.4" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "js-sys" -version = "0.3.60" +version = "0.3.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" +checksum = "68c16e1bfd491478ab155fd8b4896b86f9ede344949b641e61501e07c2b8b4d5" dependencies = [ "wasm-bindgen", ] @@ -2115,32 +2640,43 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2e7baec19d4e83f9145d4891178101a604565edff9645770fc979804138b04c" dependencies = [ - "bitvec", + "bitvec 0.22.3", "bls12_381", - "ff", - "group", + "ff 0.11.1", + "group 0.11.0", "rand_core 0.6.4", "subtle", ] [[package]] name = "k256" -version = "0.10.4" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19c3a5e0a0b8450278feda242592512e09f61c72e018b8cd5c859482802daf2d" +checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" dependencies = [ "cfg-if 1.0.0", "ecdsa", "elliptic-curve", - "sec1", - "sha2 0.9.9", + "sha2 0.10.6", ] [[package]] name = "keccak" -version = "0.1.2" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "kv-log-macro" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9b7d56ba4a8344d6be9729995e6b06f928af29998cdf79fe390cbf6b1fee838" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] [[package]] name = "lazy_static" @@ -2156,9 +2692,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.137" +version = "0.2.144" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" +checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" [[package]] name = "libloading" @@ -2182,7 +2718,7 @@ version = "0.7.0" source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" dependencies = [ "arrayref", - "base64", + "base64 0.13.1", "digest 0.9.0", "hmac-drbg", "libsecp256k1-core", @@ -2221,13 +2757,10 @@ dependencies = [ ] [[package]] -name = "link-cplusplus" -version = "1.0.7" +name = "linux-raw-sys" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369" -dependencies = [ - "cc", -] +checksum = "ece97ea872ece730aed82664c424eb4c8291e1ff2480247ccf7409044bc6479f" [[package]] name = "lock_api" @@ -2246,6 +2779,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ "cfg-if 1.0.0", + "value-bag", ] [[package]] @@ -2266,7 +2800,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0fbfc88337168279f2e9ae06e157cfed4efd3316e14dc96ed074d4f2e6c5952" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2285,23 +2819,25 @@ source = "git+https://github.com/anoma/masp?rev=bee40fc465f6afbd10558d12fe96eb17 dependencies = [ "aes", "bip0039", - "bitvec", - "blake2b_simd 1.0.0", - "blake2s_simd 1.0.0", + "bitvec 0.22.3", + "blake2b_simd 1.0.1", + "blake2s_simd 1.0.1", "bls12_381", "borsh", "byteorder", "chacha20poly1305", "crypto_api_chachapoly", - "ff", + "ff 0.11.1", "fpe", - "group", + "group 0.11.0", "hex", "incrementalmerkletree", "jubjub", "lazy_static", "rand 0.8.5", "rand_core 0.6.4", + "ripemd160", + "secp256k1 0.20.3", "serde", "sha2 0.9.9", "subtle", @@ -2315,12 +2851,12 @@ version = "0.5.0" source = "git+https://github.com/anoma/masp?rev=bee40fc465f6afbd10558d12fe96eb1742eee45c#bee40fc465f6afbd10558d12fe96eb1742eee45c" dependencies = [ "bellman", - "blake2b_simd 1.0.0", + "blake2b_simd 1.0.1", "bls12_381", "byteorder", "directories", - "ff", - "group", + "ff 0.11.1", + "group 0.11.0", "itertools", "jubjub", "lazy_static", @@ -2339,12 +2875,28 @@ dependencies = [ "regex-automata", ] +[[package]] +name = "matchit" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" + [[package]] name = "maybe-uninit" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" +[[package]] +name = "measure_time" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56220900f1a0923789ecd6bf25fbae8af3b2f1ff3e9e297fc9b6b8674dd4d852" +dependencies = [ + "instant", + "log", +] + [[package]] name = "memchr" version = "2.5.0" @@ -2353,27 +2905,27 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memmap2" -version = "0.5.8" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b182332558b18d807c4ce1ca8ca983b34c3ee32765e47b3f0f69b90355cc1dc" +checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327" dependencies = [ "libc", ] [[package]] name = "memoffset" -version = "0.5.6" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ "autocfg", ] [[package]] name = "memoffset" -version = "0.6.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg", ] @@ -2393,53 +2945,72 @@ dependencies = [ "nonempty", ] +[[package]] +name = "merlin" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.5.1", + "zeroize", +] + [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "miniz_oxide" -version = "0.5.4" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] +[[package]] +name = "miracl_core" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94c7128ba23c81f6471141b90f17654f89ef44a56e14b8a4dd0fddfccd655277" + [[package]] name = "moka" -version = "0.8.6" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "975fa04238144061e7f8df9746b2e9cd93ef85881da5548d842a7c6a4b614415" +checksum = "19b9268097a2cf211ac9955b1cc95e80fa84fff5c2d13ba292916445dc8a311f" dependencies = [ - "crossbeam-channel 0.5.6", - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.8.12", + "crossbeam-channel 0.5.8", + "crossbeam-epoch", + "crossbeam-utils 0.8.15", "num_cpus", "once_cell", "parking_lot", "quanta", + "rustc_version 0.4.0", "scheduled-thread-pool", "skeptic", "smallvec", "tagptr", "thiserror", "triomphe", - "uuid 1.2.1", + "uuid 1.3.2", ] [[package]] @@ -2456,10 +3027,12 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "namada" -version = "0.14.0" +version = "0.16.0" dependencies = [ + "async-std", "async-trait", "bellman", + "bimap", "bls12_381", "borsh", "circular-queue", @@ -2467,27 +3040,34 @@ dependencies = [ "data-encoding", "derivative", "ibc", - "ibc-proto", + "ibc-proto 0.26.0", "itertools", "loupe", "masp_primitives", "masp_proofs", "namada_core", "namada_proof_of_stake", + "orion", "parity-wasm", "paste", "proptest", "prost", "pwasm-utils", + "rand 0.8.5", + "rand_core 0.6.4", "rayon", "rust_decimal", "rust_decimal_macros", + "serde", "serde_json", "sha2 0.9.9", "tempfile", - "tendermint", + "tendermint 0.23.6", "tendermint-proto 0.23.6", + "tendermint-rpc 0.23.6", "thiserror", + "tokio", + "toml", "tracing", "wasmer", "wasmer-cache", @@ -2501,20 +3081,23 @@ dependencies = [ [[package]] name = "namada_core" -version = "0.14.0" +version = "0.16.0" dependencies = [ "ark-bls12-381", + "ark-ec", "ark-serialize", - "bech32", + "bech32 0.8.1", "bellman", "borsh", "chrono", "data-encoding", "derivative", "ed25519-consensus", + "ferveo", "ferveo-common", + "group-threshold-cryptography", "ibc", - "ibc-proto", + "ibc-proto 0.26.0", "ics23", "index-set", "itertools", @@ -2533,7 +3116,7 @@ dependencies = [ "serde_json", "sha2 0.9.9", "sparse-merkle-tree", - "tendermint", + "tendermint 0.23.6", "tendermint-proto 0.23.6", "thiserror", "tonic-build", @@ -2543,16 +3126,16 @@ dependencies = [ [[package]] name = "namada_macros" -version = "0.14.0" +version = "0.16.0" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "namada_proof_of_stake" -version = "0.14.0" +version = "0.16.0" dependencies = [ "borsh", "data-encoding", @@ -2563,29 +3146,28 @@ dependencies = [ "proptest", "rust_decimal", "rust_decimal_macros", - "tendermint-proto 0.23.5", "thiserror", "tracing", ] [[package]] name = "namada_test_utils" -version = "0.14.0" +version = "0.16.0" dependencies = [ "borsh", "namada_core", + "strum", ] [[package]] name = "namada_tests" -version = "0.14.0" +version = "0.16.0" dependencies = [ "chrono", "concat-idents", "derivative", - "ibc", - "ibc-proto", "ibc-relayer", + "ibc-relayer-types", "namada", "namada_core", "namada_test_utils", @@ -2598,10 +3180,10 @@ dependencies = [ "serde_json", "sha2 0.9.9", "tempfile", - "tendermint", - "tendermint-config", + "tendermint 0.23.6", + "tendermint-config 0.23.6", "tendermint-proto 0.23.6", - "tendermint-rpc", + "tendermint-rpc 0.23.6", "test-log", "tokio", "tracing", @@ -2610,7 +3192,7 @@ dependencies = [ [[package]] name = "namada_tx_prelude" -version = "0.14.0" +version = "0.16.0" dependencies = [ "borsh", "masp_primitives", @@ -2625,7 +3207,7 @@ dependencies = [ [[package]] name = "namada_vm_env" -version = "0.14.0" +version = "0.16.0" dependencies = [ "borsh", "hex", @@ -2636,7 +3218,7 @@ dependencies = [ [[package]] name = "namada_vp_prelude" -version = "0.14.0" +version = "0.16.0" dependencies = [ "borsh", "namada_core", @@ -2649,39 +3231,46 @@ dependencies = [ [[package]] name = "namada_wasm" -version = "0.14.0" +version = "0.16.0" dependencies = [ "borsh", - "getrandom 0.2.8", + "getrandom 0.2.9", "masp_primitives", "masp_proofs", "namada", + "namada_test_utils", "namada_tests", "namada_tx_prelude", "namada_vp_prelude", "once_cell", "proptest", + "ripemd", "rust_decimal", "tracing", "tracing-subscriber", "wee_alloc", ] -[[package]] -name = "nanoid" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" -dependencies = [ - "rand 0.8.5", -] - [[package]] name = "nonempty" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" +[[package]] +name = "num" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + [[package]] name = "num-bigint" version = "0.4.3" @@ -2694,6 +3283,15 @@ dependencies = [ "serde", ] +[[package]] +name = "num-complex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d" +dependencies = [ + "num-traits", +] + [[package]] name = "num-derive" version = "0.3.3" @@ -2702,7 +3300,7 @@ checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2715,6 +3313,17 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-iter" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-rational" version = "0.4.1" @@ -2735,15 +3344,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", + "libm", ] [[package]] name = "num_cpus" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5" +checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" dependencies = [ - "hermit-abi", + "hermit-abi 0.2.6", "libc", ] @@ -2761,18 +3371,18 @@ dependencies = [ [[package]] name = "object" -version = "0.29.0" +version = "0.30.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" +checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.16.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "opaque-debug" @@ -2795,11 +3405,11 @@ dependencies = [ "aes", "arrayvec 0.7.2", "bigint", - "bitvec", - "blake2b_simd 1.0.0", - "ff", + "bitvec 0.22.3", + "blake2b_simd 1.0.1", + "ff 0.11.1", "fpe", - "group", + "group 0.11.0", "halo2", "incrementalmerkletree", "lazy_static", @@ -2813,13 +3423,51 @@ dependencies = [ "zcash_note_encryption 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "orion" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6624905ddd92e460ff0685567539ed1ac985b2dee4c92c7edcd64fce905b00c" +dependencies = [ + "ct-codecs", + "getrandom 0.2.9", + "subtle", + "zeroize", +] + [[package]] name = "pairing" version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2e415e349a3006dd7d9482cdab1c980a845bed1377777d768cb693a44540b42" dependencies = [ - "group", + "group 0.11.0", +] + +[[package]] +name = "parity-scale-codec" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ddb756ca205bd108aee3c62c6d3c994e1df84a59b9d6d4a5ea42ee1fd5a9a28" +dependencies = [ + "arrayvec 0.7.2", + "bitvec 1.0.1", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" +dependencies = [ + "proc-macro-crate 1.3.1", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] @@ -2828,6 +3476,12 @@ version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1ad0aff30c1da14b1254fcb2af73e1fa9a28670e584a626f53a369d0e157304" +[[package]] +name = "parking" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" + [[package]] name = "parking_lot" version = "0.12.1" @@ -2840,15 +3494,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.4" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" +checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -2869,8 +3523,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d647d91972bad78120fd61e06b225fcda117805c9bbf17676b51bd03a251278b" dependencies = [ "blake2b_simd 0.5.11", - "ff", - "group", + "ff 0.11.1", + "group 0.11.0", "lazy_static", "rand 0.8.5", "static_assertions", @@ -2879,27 +3533,27 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.9" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" +checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" [[package]] name = "pbkdf2" -version = "0.4.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" +checksum = "f05894bce6a1ba4be299d0c5f29563e08af2bc18bb7d48313113bed71e904739" dependencies = [ - "crypto-mac 0.8.0", + "crypto-mac 0.11.1", + "password-hash", ] [[package]] name = "pbkdf2" -version = "0.9.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f05894bce6a1ba4be299d0c5f29563e08af2bc18bb7d48313113bed71e904739" +checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ - "crypto-mac 0.11.1", - "password-hash", + "digest 0.10.6", ] [[package]] @@ -2937,9 +3591,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.4.1" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a528564cc62c19a7acac4d81e01f39e53e25e17b934878f4c6d25cc2836e62f8" +checksum = "e68e84bfb01f0507134eac1e9b410a12ba379d064eab48c50ba4ce329a527b70" dependencies = [ "thiserror", "ucd-trie", @@ -2947,9 +3601,9 @@ dependencies = [ [[package]] name = "petgraph" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" +checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" dependencies = [ "fixedbitset", "indexmap", @@ -2972,7 +3626,7 @@ checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2988,14 +3642,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] -name = "pkcs8" -version = "0.8.0" +name = "polling" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ - "der", - "spki", - "zeroize", + "autocfg", + "bitflags", + "cfg-if 1.0.0", + "concurrent-queue", + "libc", + "log", + "pin-project-lite", + "windows-sys 0.48.0", ] [[package]] @@ -3015,6 +3674,28 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "prettyplease" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" +dependencies = [ + "proc-macro2", + "syn 1.0.109", +] + +[[package]] +name = "primitive-types" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f3486ccba82358b11a77516035647c34ba167dfa53312630de83b12bd4f3d66" +dependencies = [ + "fixed-hash", + "impl-codec", + "impl-serde", + "uint", +] + [[package]] name = "proc-macro-crate" version = "0.1.5" @@ -3024,6 +3705,16 @@ dependencies = [ "toml", ] +[[package]] +name = "proc-macro-crate" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +dependencies = [ + "once_cell", + "toml_edit", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -3033,7 +3724,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "version_check", ] @@ -3050,37 +3741,37 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.47" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" +checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" dependencies = [ "unicode-ident", ] [[package]] name = "proptest" -version = "1.0.0" -source = "git+https://github.com/heliaxdev/proptest?branch=tomas/sm#b9517a726c032897a8b41c215147f44588b33dcc" +version = "1.1.0" +source = "git+https://github.com/heliaxdev/proptest?rev=8f1b4abe7ebd35c0781bf9a00a4ee59833ffa2a1#8f1b4abe7ebd35c0781bf9a00a4ee59833ffa2a1" dependencies = [ "bit-set", "bitflags", "byteorder", "lazy_static", "num-traits", - "quick-error 2.0.1", "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", - "regex-syntax", + "regex-syntax 0.6.29", "rusty-fork", "tempfile", + "unarray", ] [[package]] name = "prost" -version = "0.9.0" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ "bytes", "prost-derive", @@ -3088,9 +3779,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.9.0" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" +checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck", @@ -3099,33 +3790,34 @@ dependencies = [ "log", "multimap", "petgraph", + "prettyplease", "prost", "prost-types", "regex", + "syn 1.0.109", "tempfile", "which", ] [[package]] name = "prost-derive" -version = "0.9.0" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "prost-types" -version = "0.9.0" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" +checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" dependencies = [ - "bytes", "prost", ] @@ -3146,7 +3838,7 @@ checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -3176,7 +3868,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7e31331286705f455e56cca62e0e717158474ff02b7936c1fa596d983f4ae27" dependencies = [ - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.15", "libc", "mach", "once_cell", @@ -3192,17 +3884,11 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" -[[package]] -name = "quick-error" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" - [[package]] name = "quote" -version = "1.0.21" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" +checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" dependencies = [ "proc-macro2", ] @@ -3213,6 +3899,12 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + [[package]] name = "rand" version = "0.7.3" @@ -3272,7 +3964,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", ] [[package]] @@ -3295,9 +3987,9 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "10.6.0" +version = "10.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6823ea29436221176fe662da99998ad3b4db2c7f31e7b6f5fe43adccd6320bb" +checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" dependencies = [ "bitflags", ] @@ -3316,13 +4008,13 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.9.3" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ - "crossbeam-channel 0.5.6", + "crossbeam-channel 0.5.8", "crossbeam-deque", - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.15", "num_cpus", ] @@ -3335,7 +4027,7 @@ dependencies = [ "blake2b_simd 0.5.11", "byteorder", "digest 0.9.0", - "group", + "group 0.11.0", "jubjub", "pasta_curves", "rand_core 0.6.4", @@ -3353,14 +4045,23 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.8", - "redox_syscall", + "getrandom 0.2.9", + "redox_syscall 0.2.16", "thiserror", ] @@ -3377,13 +4078,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" +checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.7.1", ] [[package]] @@ -3392,14 +4093,20 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "regex-syntax", + "regex-syntax 0.6.29", ] [[package]] name = "regex-syntax" -version = "0.6.28" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" +checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" [[package]] name = "region" @@ -3413,38 +4120,29 @@ dependencies = [ "winapi", ] -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] - [[package]] name = "rend" -version = "0.3.6" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79af64b4b6362ffba04eef3a4e10829718a4896dac19daa741851c86781edf95" +checksum = "581008d2099240d37fb08d77ad713bcaec2c4d89d50b5b21a8bb1996bbab68ab" dependencies = [ "bytecheck", ] [[package]] name = "retry" -version = "1.3.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac95c60a949a63fd2822f4964939662d8f2c16c4fa0624fd954bc6e703b9a3f6" +checksum = "9166d72162de3575f950507683fac47e30f6f2c3836b71b7fbc61aa517c9c5f4" [[package]] name = "rfc6979" -version = "0.1.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96ef608575f6392792f9ecf7890c00086591d29a83910939d430753f7c050525" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" dependencies = [ "crypto-bigint", - "hmac 0.11.0", + "hmac 0.12.1", "zeroize", ] @@ -3463,6 +4161,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.6", +] + [[package]] name = "ripemd160" version = "0.9.1" @@ -3476,9 +4183,9 @@ dependencies = [ [[package]] name = "rkyv" -version = "0.7.39" +version = "0.7.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cec2b3485b07d96ddfd3134767b8a447b45ea4eb91448d0a35180ec0ffd5ed15" +checksum = "21499ed91807f07ae081880aabb2ccc0235e9d88011867d984525e9a4c3cfa3e" dependencies = [ "bytecheck", "hashbrown 0.12.3", @@ -3490,13 +4197,13 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.39" +version = "0.7.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eaedadc88b53e36dd32d940ed21ae4d850d5916f2581526921f553a72ac34c4" +checksum = "ac1c672430eb41556291981f45ca900a0239ad007242d1cb4b4167af842db666" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -3523,9 +4230,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" @@ -3533,6 +4240,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + [[package]] name = "rustc_version" version = "0.3.3" @@ -3542,17 +4255,52 @@ dependencies = [ "semver 0.11.0", ] +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver 1.0.17", +] + +[[package]] +name = "rustix" +version = "0.37.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acf8729d8542766f1b2cf77eb034d52f40d375bb8b615d0b147089946e16613d" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys 0.48.0", +] + [[package]] name = "rustls" version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ - "base64", + "base64 0.13.1", "log", "ring", - "sct", - "webpki", + "sct 0.6.1", + "webpki 0.21.4", +] + +[[package]] +name = "rustls" +version = "0.20.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" +dependencies = [ + "log", + "ring", + "sct 0.7.0", + "webpki 0.22.0", ] [[package]] @@ -3562,16 +4310,37 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" dependencies = [ "openssl-probe", - "rustls", + "rustls 0.19.1", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-native-certs" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" +dependencies = [ + "openssl-probe", + "rustls-pemfile", "schannel", "security-framework", ] +[[package]] +name = "rustls-pemfile" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +dependencies = [ + "base64 0.21.0", +] + [[package]] name = "rustversion" -version = "1.0.9" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "rusty-fork" @@ -3580,16 +4349,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" dependencies = [ "fnv", - "quick-error 1.2.3", + "quick-error", "tempfile", "wait-timeout", ] [[package]] name = "ryu" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "safe-proc-macro2" @@ -3649,19 +4418,18 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" +checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" dependencies = [ - "lazy_static", - "windows-sys 0.36.1", + "windows-sys 0.42.0", ] [[package]] name = "scheduled-thread-pool" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "977a7519bff143a44f842fd07e80ad1329295bd71686457f18e496736f4bf9bf" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" dependencies = [ "parking_lot", ] @@ -3673,16 +4441,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] -name = "scratch" -version = "1.0.2" +name = "sct" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898" +checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" +dependencies = [ + "ring", + "untrusted", +] [[package]] name = "sct" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ "ring", "untrusted", @@ -3696,41 +4468,61 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] name = "sec1" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ + "base16ct", "der", "generic-array", - "pkcs8", "subtle", "zeroize", ] [[package]] name = "secp256k1" -version = "0.22.1" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d03ceae636d0fed5bae6a7f4f664354c5f4fcedf6eef053fef17e49f837d0a" +dependencies = [ + "secp256k1-sys 0.4.2", +] + +[[package]] +name = "secp256k1" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26947345339603ae8395f68e2f3d85a6b0a8ddfe6315818e80b8504415099db0" +checksum = "6b1629c9c557ef9b293568b338dddfc8208c98a18c59d722a9d53f859d9c9b62" dependencies = [ - "secp256k1-sys", + "bitcoin_hashes", + "rand 0.8.5", + "secp256k1-sys 0.6.1", "serde", ] [[package]] name = "secp256k1-sys" -version = "0.5.2" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" +dependencies = [ + "cc", +] + +[[package]] +name = "secp256k1-sys" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "152e20a0fd0519390fc43ab404663af8a0b794273d2a91d60ad4a39f13ffe110" +checksum = "83080e2c2fc1006e625be82e5d1eb6a43b7fd9578b617fcc55814daf286bba4b" dependencies = [ "cc", ] [[package]] name = "security-framework" -version = "2.7.0" +version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" +checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" dependencies = [ "bitflags", "core-foundation", @@ -3741,9 +4533,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.6.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" dependencies = [ "core-foundation-sys", "libc", @@ -3760,9 +4552,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.14" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" dependencies = [ "serde", ] @@ -3778,18 +4570,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.147" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" +checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" dependencies = [ "serde_derive", ] [[package]] name = "serde_bytes" -version = "0.11.7" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfc50e8183eeeb6178dcb167ae34a8051d63535023ae38b5d8d12beae193d37b" +checksum = "416bda436f9aab92e02c8e10d49a15ddd339cea90b6e340fe51ed97abb548294" dependencies = [ "serde", ] @@ -3806,20 +4598,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.147" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" +checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "serde_json" -version = "1.0.87" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" +checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" dependencies = [ "itoa", "ryu", @@ -3828,26 +4620,24 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.9" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" +checksum = "bcec881020c684085e55a25f7fd888954d56609ef363479dc5a1305eb0d40cab" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "sha-1" -version = "0.9.8" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ - "block-buffer 0.9.0", "cfg-if 1.0.0", "cpufeatures", - "digest 0.9.0", - "opaque-debug", + "digest 0.10.6", ] [[package]] @@ -3858,7 +4648,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -3882,19 +4672,17 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] name = "sha3" -version = "0.9.1" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", + "digest 0.10.6", "keccak", - "opaque-debug", ] [[package]] @@ -3908,23 +4696,29 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] [[package]] name = "signature" -version = "1.4.0" +version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02658e48d89f2bec991f9a78e69cfa4c316f8d6a6c4ec12fae1aeb263d486788" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ - "digest 0.9.0", + "digest 0.10.6", "rand_core 0.6.4", ] +[[package]] +name = "simdutf8" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" + [[package]] name = "simple-error" version = "0.2.3" @@ -3948,9 +4742,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg", ] @@ -3963,24 +4757,18 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", ] -[[package]] -name = "sp-std" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35391ea974fa5ee869cb094d5b437688fbf3d8127d64d1b9fed5822a1ed39b12" - [[package]] name = "sparse-merkle-tree" version = "0.3.1-pre" -source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=04ad1eeb28901b57a7599bbe433b3822965dabe8#04ad1eeb28901b57a7599bbe433b3822965dabe8" +source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=e086b235ed6e68929bf73f617dd61cd17b000a56#e086b235ed6e68929bf73f617dd61cd17b000a56" dependencies = [ "borsh", "cfg-if 1.0.0", @@ -3994,16 +4782,6 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -[[package]] -name = "spki" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" -dependencies = [ - "base64ct", - "der", -] - [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -4016,6 +4794,41 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "strum" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.24.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn 1.0.109", +] + +[[package]] +name = "subproductdomain" +version = "0.1.0" +source = "git+https://github.com/anoma/ferveo?rev=e5abd0acc938da90140351a65a26472eb495ce4d#e5abd0acc938da90140351a65a26472eb495ce4d" +dependencies = [ + "anyhow", + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", +] + [[package]] name = "subtle" version = "2.4.1" @@ -4039,9 +4852,9 @@ checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" [[package]] name = "syn" -version = "1.0.103" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", @@ -4049,17 +4862,22 @@ dependencies = [ ] [[package]] -name = "synstructure" -version = "0.12.6" +name = "syn" +version = "2.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" +checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" dependencies = [ "proc-macro2", "quote", - "syn", - "unicode-xid", + "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + [[package]] name = "tagptr" version = "0.2.0" @@ -4074,30 +4892,57 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-lexicon" -version = "0.12.5" +version = "0.12.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9410d0f6853b1d94f0e519fb95df60f29d2c1eff2d921ffdf01a4c8a3b54f12d" +checksum = "fd1ba337640d60c3e96bc6f0638a939b9c9a7f2c316a1598c279828b3d1dc8c5" [[package]] name = "tempfile" -version = "3.3.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" dependencies = [ "cfg-if 1.0.0", "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi", + "redox_syscall 0.3.5", + "rustix", + "windows-sys 0.45.0", ] [[package]] name = "tendermint" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=4db3c5ea09fae4057008d22bf9e96bf541b55b35#4db3c5ea09fae4057008d22bf9e96bf541b55b35" dependencies = [ "async-trait", + "bytes", + "ed25519", + "ed25519-dalek", + "flex-error", + "futures", + "num-traits", + "once_cell", + "prost", + "prost-types", + "serde", + "serde_bytes", + "serde_json", + "serde_repr", + "sha2 0.9.9", + "signature", + "subtle", + "subtle-encoding", + "tendermint-proto 0.23.6", + "time", + "zeroize", +] + +[[package]] +name = "tendermint" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c518c082146825f10d6f9a32159ae46edcfd7dae8ac630c8067594bb2a784d72" +dependencies = [ "bytes", "ed25519", "ed25519-dalek", @@ -4117,7 +4962,7 @@ dependencies = [ "signature", "subtle", "subtle-encoding", - "tendermint-proto 0.23.6", + "tendermint-proto 0.28.0", "time", "zeroize", ] @@ -4125,20 +4970,35 @@ dependencies = [ [[package]] name = "tendermint-config" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=4db3c5ea09fae4057008d22bf9e96bf541b55b35#4db3c5ea09fae4057008d22bf9e96bf541b55b35" +dependencies = [ + "flex-error", + "serde", + "serde_json", + "tendermint 0.23.6", + "toml", + "url", +] + +[[package]] +name = "tendermint-config" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f58b86374e3bcfc8135770a6c55388fce101c00de4a5f03224fa5830c9240b7" dependencies = [ "flex-error", "serde", "serde_json", - "tendermint", + "tendermint 0.28.0", "toml", "url", ] [[package]] name = "tendermint-light-client" -version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ab1450566e4347f3a81e27d3e701d74313f9fc2efb072fc3f49e0a762cb2a0f" dependencies = [ "contracts", "crossbeam-channel 0.4.4", @@ -4149,9 +5009,9 @@ dependencies = [ "serde_cbor", "serde_derive", "static_assertions", - "tendermint", - "tendermint-light-client-verifier", - "tendermint-rpc", + "tendermint 0.28.0", + "tendermint-light-client-verifier 0.28.0", + "tendermint-rpc 0.28.0", "time", "tokio", ] @@ -4159,19 +5019,32 @@ dependencies = [ [[package]] name = "tendermint-light-client-verifier" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=4db3c5ea09fae4057008d22bf9e96bf541b55b35#4db3c5ea09fae4057008d22bf9e96bf541b55b35" +dependencies = [ + "derive_more", + "flex-error", + "serde", + "tendermint 0.23.6", + "time", +] + +[[package]] +name = "tendermint-light-client-verifier" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c742bb914f9fb025ce0e481fbef9bb59c94d5a4bbd768798102675a2e0fb7440" dependencies = [ "derive_more", "flex-error", "serde", - "tendermint", + "tendermint 0.28.0", "time", ] [[package]] name = "tendermint-proto" -version = "0.23.5" -source = "git+https://github.com/heliaxdev/tendermint-rs?rev=95c52476bc37927218374f94ac8e2a19bd35bec9#95c52476bc37927218374f94ac8e2a19bd35bec9" +version = "0.23.6" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=4db3c5ea09fae4057008d22bf9e96bf541b55b35#4db3c5ea09fae4057008d22bf9e96bf541b55b35" dependencies = [ "bytes", "flex-error", @@ -4187,8 +5060,9 @@ dependencies = [ [[package]] name = "tendermint-proto" -version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "890f1fb6dee48900c85f0cdf711ebf130e505ac09ad918cee5c34ed477973b05" dependencies = [ "bytes", "flex-error", @@ -4205,14 +5079,13 @@ dependencies = [ [[package]] name = "tendermint-rpc" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=4db3c5ea09fae4057008d22bf9e96bf541b55b35#4db3c5ea09fae4057008d22bf9e96bf541b55b35" dependencies = [ "async-trait", - "async-tungstenite", "bytes", "flex-error", "futures", - "getrandom 0.2.8", + "getrandom 0.2.9", "http", "hyper", "hyper-proxy", @@ -4223,8 +5096,8 @@ dependencies = [ "serde_bytes", "serde_json", "subtle-encoding", - "tendermint", - "tendermint-config", + "tendermint 0.23.6", + "tendermint-config 0.23.6", "tendermint-proto 0.23.6", "thiserror", "time", @@ -4235,10 +5108,44 @@ dependencies = [ "walkdir", ] +[[package]] +name = "tendermint-rpc" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06df4715f9452ec0a21885d6da8d804799455ba50d8bc40be1ec1c800afd4bd8" +dependencies = [ + "async-trait", + "async-tungstenite", + "bytes", + "flex-error", + "futures", + "getrandom 0.2.9", + "http", + "hyper", + "hyper-proxy", + "hyper-rustls", + "peg", + "pin-project", + "serde", + "serde_bytes", + "serde_json", + "subtle", + "subtle-encoding", + "tendermint 0.28.0", + "tendermint-config 0.28.0", + "thiserror", + "time", + "tokio", + "tracing", + "url", + "uuid 0.8.2", + "walkdir", +] + [[package]] name = "tendermint-testgen" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=4db3c5ea09fae4057008d22bf9e96bf541b55b35#4db3c5ea09fae4057008d22bf9e96bf541b55b35" dependencies = [ "ed25519-dalek", "gumdrop", @@ -4246,17 +5153,24 @@ dependencies = [ "serde_json", "simple-error", "tempfile", - "tendermint", + "tendermint 0.23.6", "time", ] [[package]] -name = "termcolor" -version = "1.1.3" +name = "tendermint-testgen" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +checksum = "05912d3072284786c0dec18e82779003724e0da566676fbd90e4fba6845fd81a" dependencies = [ - "winapi-util", + "ed25519-dalek", + "gumdrop", + "serde", + "serde_json", + "simple-error", + "tempfile", + "tendermint 0.28.0", + "time", ] [[package]] @@ -4267,35 +5181,36 @@ checksum = "38f0c854faeb68a048f0f2dc410c5ddae3bf83854ef0e4977d58306a5edef50e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "thiserror" -version = "1.0.37" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.37" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ + "cfg-if 1.0.0", "once_cell", ] @@ -4327,17 +5242,17 @@ dependencies = [ [[package]] name = "tiny-bip39" -version = "0.8.2" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc59cb9dfc85bb312c3a78fd6aa8a8582e310b0fa885d5bb877f6dcc601839d" +checksum = "62cc94d358b5a1e84a5cb9109f559aa3c4d634d2b1b4de3d0fa4adc7c78e2861" dependencies = [ "anyhow", - "hmac 0.8.1", + "hmac 0.12.1", "once_cell", - "pbkdf2 0.4.0", - "rand 0.7.3", + "pbkdf2 0.11.0", + "rand 0.8.5", "rustc-hash", - "sha2 0.9.9", + "sha2 0.10.6", "thiserror", "unicode-normalization", "wasm-bindgen", @@ -4364,20 +5279,19 @@ dependencies = [ [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.21.2" +version = "1.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" +checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105" dependencies = [ "autocfg", "bytes", "libc", - "memchr", "mio", "num_cpus", "parking_lot", @@ -4385,7 +5299,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -4400,13 +5314,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.8.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -4415,41 +5329,38 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ - "rustls", + "rustls 0.19.1", "tokio", - "webpki", + "webpki 0.21.4", ] [[package]] -name = "tokio-stream" -version = "0.1.11" +name = "tokio-rustls" +version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ - "futures-core", - "pin-project-lite", + "rustls 0.20.8", "tokio", + "webpki 0.22.0", ] [[package]] -name = "tokio-util" -version = "0.6.10" +name = "tokio-stream" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ - "bytes", "futures-core", - "futures-sink", - "log", "pin-project-lite", "tokio", ] [[package]] name = "tokio-util" -version = "0.7.4" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes", "futures-core", @@ -4461,22 +5372,40 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.9" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ "serde", ] +[[package]] +name = "toml_datetime" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" + +[[package]] +name = "toml_edit" +version = "0.19.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "239410c8609e8125456927e6707163a3b1fdb40561e4b803bc041f466ccfdc13" +dependencies = [ + "indexmap", + "toml_datetime", + "winnow", +] + [[package]] name = "tonic" -version = "0.6.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff08f4649d10a70ffa3522ca559031285d8e421d727ac85c60825761818f5d0a" +checksum = "8f219fad3b929bef19b1f86fbc0358d35daed8f2cac972037ac0dc10bbb8d5fb" dependencies = [ "async-stream", "async-trait", - "base64", + "axum", + "base64 0.13.1", "bytes", "futures-core", "futures-util", @@ -4489,11 +5418,12 @@ dependencies = [ "pin-project", "prost", "prost-derive", - "rustls-native-certs", + "rustls-native-certs 0.6.2", + "rustls-pemfile", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.4", "tokio-stream", - "tokio-util 0.6.10", + "tokio-util", "tower", "tower-layer", "tower-service", @@ -4503,14 +5433,15 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.6.2" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" +checksum = "5bf5e9b9c0f7e0a7c027dcfaba7b2c60816c7049171f679d99ee2ff65d0de8c4" dependencies = [ + "prettyplease", "proc-macro2", "prost-build", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -4527,7 +5458,7 @@ dependencies = [ "rand 0.8.5", "slab", "tokio", - "tokio-util 0.7.4", + "tokio-util", "tower-layer", "tower-service", "tracing", @@ -4560,20 +5491,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", ] @@ -4590,9 +5521,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ "matchers", "once_cell", @@ -4611,35 +5542,37 @@ checksum = "f1ee9bd9239c339d714d657fac840c6d2a4f9c45f4f9ec7b0975113458be78db" [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "tungstenite" -version = "0.12.0" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ada8297e8d70872fa9a551d93250a9f407beb9f37ef86494eb20012a2ff7c24" +checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ - "base64", + "base64 0.13.1", "byteorder", "bytes", "http", "httparse", - "input_buffer", "log", "rand 0.8.5", + "rustls 0.20.8", "sha-1", + "thiserror", "url", "utf-8", + "webpki 0.22.0", ] [[package]] name = "tx_template" -version = "0.14.0" +version = "0.16.0" dependencies = [ "borsh", - "getrandom 0.2.8", + "getrandom 0.2.9", "namada_tests", "namada_tx_prelude", "wee_alloc", @@ -4647,9 +5580,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "ucd-trie" @@ -4659,9 +5592,9 @@ checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" [[package]] name = "uint" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a45526d29728d135c2900b0d30573fe3ee79fceb12ef534c7bb30e810a91b601" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" dependencies = [ "byteorder", "crunchy 0.2.2", @@ -4669,6 +5602,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "unicase" version = "2.6.0" @@ -4680,15 +5619,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.5" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -4699,12 +5638,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fdbf052a0783de01e944a6ce7a8cb939e295b1e7be835a1112c3b9a7f047a5a" - [[package]] name = "unicode-width" version = "0.1.10" @@ -4758,11 +5691,21 @@ checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" [[package]] name = "uuid" -version = "1.2.1" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dad5567ad0cf5b760e5665964bec1b47dfd077ba8a2544b513f3556d3d239a2" +dependencies = [ + "getrandom 0.2.9", +] + +[[package]] +name = "value-bag" +version = "1.0.0-alpha.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb41e78f93363bb2df8b0e86a2ca30eed7806ea16ea0c790d757cf93f79be83" +checksum = "2209b78d1249f7e6f3293657c9779fe31ced465df091bbd433a1cf88e916ec55" dependencies = [ - "getrandom 0.2.8", + "ctor", + "version_check", ] [[package]] @@ -4773,10 +5716,10 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "vp_template" -version = "0.14.0" +version = "0.16.0" dependencies = [ "borsh", - "getrandom 0.2.8", + "getrandom 0.2.9", "namada_tests", "namada_vp_prelude", "wee_alloc", @@ -4791,14 +5734,19 @@ dependencies = [ "libc", ] +[[package]] +name = "waker-fn" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" + [[package]] name = "walkdir" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", - "winapi", "winapi-util", ] @@ -4832,9 +5780,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.83" +version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" +checksum = "5b6cb788c4e39112fbe1822277ef6fb3c55cd86b95cb3d3c4c1c9597e4ac74b4" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -4842,24 +5790,36 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.83" +version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" +checksum = "35e522ed4105a9d626d885b35d62501b30d9666283a5c8be12c14a8bdafe7822" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn", + "syn 2.0.15", "wasm-bindgen-shared", ] +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "083abe15c5d88556b77bdf7aef403625be9e327ad37c62c4e4129af740168163" +dependencies = [ + "cfg-if 1.0.0", + "js-sys", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "wasm-bindgen-macro" -version = "0.2.83" +version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" +checksum = "358a79a0cb89d21db8120cbfb91392335913e4890665b1a7981d9e956903b434" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4867,28 +5827,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.83" +version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" +checksum = "4783ce29f09b9d93134d41297aded3a712b7b979e9c6f28c32cb88c973a94869" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.83" +version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" +checksum = "a901d592cafaa4d711bc324edfaff879ac700b19c3dfd60058d2b445be2691eb" [[package]] name = "wasm-encoder" -version = "0.19.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9424cdab516a16d4ea03c8f4a01b14e7b2d04a129dcc2bcdde5bcc5f68f06c41" +checksum = "d05d0b6fcd0aeb98adf16e7975331b3c17222aa815148f5b976370ce589d80ef" dependencies = [ "leb128", ] @@ -4999,7 +5959,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5131,9 +6091,9 @@ checksum = "718ed7c55c2add6548cca3ddd6383d738cd73b892df400e96b9aa876f0141d7a" [[package]] name = "wast" -version = "49.0.0" +version = "57.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ef81fcd60d244cafffeafac3d17615fdb2fddda6aca18f34a8ae233353587c" +checksum = "6eb0f5ed17ac4421193c7477da05892c2edafd67f9639e3c11a82086416662dc" dependencies = [ "leb128", "memchr", @@ -5143,18 +6103,18 @@ dependencies = [ [[package]] name = "wat" -version = "1.0.51" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c347c4460ffb311e95aafccd8c29e4888f241b9e4b3bb0e0ccbd998de2c8c0d" +checksum = "ab9ab0d87337c3be2bb6fc5cd331c4ba9fd6bcb4ee85048a0dd59ed9ecf92e53" dependencies = [ "wast", ] [[package]] name = "web-sys" -version = "0.3.60" +version = "0.3.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" +checksum = "16b5f940c7edfdc6d12126d98c9ef4d1b3d470011c47c76a6581df47ad9ba721" dependencies = [ "js-sys", "wasm-bindgen", @@ -5170,13 +6130,23 @@ dependencies = [ "untrusted", ] +[[package]] +name = "webpki" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "webpki-roots" version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940" dependencies = [ - "webpki", + "webpki 0.21.4", ] [[package]] @@ -5193,9 +6163,9 @@ dependencies = [ [[package]] name = "which" -version = "4.3.0" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" +checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" dependencies = [ "either", "libc", @@ -5234,16 +6204,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] -name = "windows-sys" -version = "0.36.1" +name = "windows" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows_aarch64_msvc 0.36.1", - "windows_i686_gnu 0.36.1", - "windows_i686_msvc 0.36.1", - "windows_x86_64_gnu 0.36.1", - "windows_x86_64_msvc 0.36.1", + "windows-targets 0.48.0", ] [[package]] @@ -5252,86 +6218,155 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.0", - "windows_i686_gnu 0.42.0", - "windows_i686_msvc 0.42.0", - "windows_x86_64_gnu 0.42.0", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.0", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.0" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" [[package]] name = "windows_aarch64_msvc" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] name = "windows_i686_gnu" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] name = "windows_i686_msvc" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_x86_64_gnu" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.0" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_msvc" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + +[[package]] +name = "winnow" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61de7bac303dc551fe038e2b3cef0f571087a47571ea6e79a87692ac99b99699" +dependencies = [ + "memchr", +] [[package]] name = "wyz" @@ -5342,6 +6377,15 @@ dependencies = [ "tap", ] +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + [[package]] name = "zcash_encoding" version = "0.0.0" @@ -5381,16 +6425,16 @@ source = "git+https://github.com/zcash/librustzcash/?rev=2425a08#2425a0869098e3b dependencies = [ "aes", "bip0039", - "bitvec", - "blake2b_simd 1.0.0", - "blake2s_simd 1.0.0", + "bitvec 0.22.3", + "blake2b_simd 1.0.1", + "blake2s_simd 1.0.1", "bls12_381", "byteorder", "chacha20poly1305", "equihash", - "ff", + "ff 0.11.1", "fpe", - "group", + "group 0.11.0", "hex", "incrementalmerkletree", "jubjub", @@ -5412,12 +6456,12 @@ version = "0.5.0" source = "git+https://github.com/zcash/librustzcash/?rev=2425a08#2425a0869098e3b0588ccd73c42716bcf418612c" dependencies = [ "bellman", - "blake2b_simd 1.0.0", + "blake2b_simd 1.0.1", "bls12_381", "byteorder", "directories", - "ff", - "group", + "ff 0.11.1", + "group 0.11.0", "jubjub", "lazy_static", "rand_core 0.6.4", @@ -5426,21 +6470,20 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.3.2" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn", - "synstructure", + "syn 2.0.15", ] diff --git a/wasm/Cargo.toml b/wasm/Cargo.toml index 062d9fb3dcd..2fd275d11bb 100644 --- a/wasm/Cargo.toml +++ b/wasm/Cargo.toml @@ -14,18 +14,19 @@ borsh-derive = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223 borsh-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} borsh-schema-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} # patched to a commit on the `eth-bridge-integration+consensus-timeout` branch of our fork -tendermint = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} -tendermint-config = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} -tendermint-proto = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} -tendermint-rpc = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} -tendermint-testgen = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} -tendermint-light-client = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} -tendermint-light-client-verifier = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} +tendermint = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35"} +tendermint-config = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35"} +tendermint-proto = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35"} +tendermint-rpc = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35"} +tendermint-testgen = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35"} +tendermint-light-client-verifier = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35"} # patched to a commit on the `eth-bridge-integration` branch of our fork -ibc = {git = "https://github.com/heliaxdev/ibc-rs.git", rev = "f4703dfe2c1f25cc431279ab74f10f3e0f6827e2"} -ibc-proto = {git = "https://github.com/heliaxdev/ibc-rs.git", rev = "f4703dfe2c1f25cc431279ab74f10f3e0f6827e2"} -ibc-relayer = {git = "https://github.com/heliaxdev/ibc-rs.git", rev = "f4703dfe2c1f25cc431279ab74f10f3e0f6827e2"} +ibc = {git = "https://github.com/heliaxdev/cosmos-ibc-rs.git", rev = "2d7edc16412b60cabf78163fe24a6264e11f77a9"} +ibc-proto = {git = "https://github.com/heliaxdev/ibc-proto-rs.git", rev = "7e527b5b8c95d83351e93ceafc14ac853224283f"} + +# patched to the yanked 1.2.0 until masp updates bitvec +funty = { git = "https://github.com/bitvecto-rs/funty/", rev = "7ef0d890fbcd8b3def1635ac1a877fc298488446" } [profile.release] # smaller and faster wasm (https://rustwasm.github.io/book/reference/code-size.html#compiling-with-link-time-optimizations-lto) diff --git a/wasm/checksums.json b/wasm/checksums.json index 3400d1a135a..80b216ed24b 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,20 +1,20 @@ { - "tx_bond.wasm": "tx_bond.2910631adf11b0368d2f09ce30ecba7ca7a18a38e17c00d920765075b4064a14.wasm", - "tx_change_validator_commission.wasm": "tx_change_validator_commission.d4f15084da1355bf2749f2bcb380a89d3a4b90cf00288daf0fc975208c9d93e7.wasm", - "tx_ibc.wasm": "tx_ibc.80df46ff21d54755af23a6a7395c72fc320fbc61e9f5487a438ec4a306ba1484.wasm", - "tx_init_account.wasm": "tx_init_account.ff822951455cc89960d3ca1221488f1271012b38629c5a9662bb930595bb1997.wasm", - "tx_init_proposal.wasm": "tx_init_proposal.0a842c6e0bfa10f3d2261048c28e84bd8e775f323d7db740ec656339298cfbd8.wasm", - "tx_init_validator.wasm": "tx_init_validator.9f1e01de1dc5f09b649ef34226e6064ddf39035fec3259edcc46858997df1f15.wasm", - "tx_reveal_pk.wasm": "tx_reveal_pk.bd9674f90665eafb89f6c96d33760b22e5f33244cecbd29184bf1fe4168dc1ce.wasm", - "tx_transfer.wasm": "tx_transfer.f1f313acc5d1da6eed60deeb6b95a4bf57d5011e64da4c1baf8107ae2986e44c.wasm", - "tx_unbond.wasm": "tx_unbond.ba355d2459b55234f27f793132735d80b5daf5f70196ffcbe4bbe8bd64951cf3.wasm", - "tx_update_vp.wasm": "tx_update_vp.56c511136750585d0b5ee2ddadc98932569993bfb382b179868e653036564a26.wasm", - "tx_vote_proposal.wasm": "tx_vote_proposal.eff1f05103c2ae1a2f33a8dee67d7fc01067b1951d99c1d147b74a6ca119d470.wasm", - "tx_withdraw.wasm": "tx_withdraw.c61702c41e8fa51c9ed77d67e934dd9eaa8fea54ff1e12cac841d14f6611c98d.wasm", - "vp_implicit.wasm": "vp_implicit.7d6bb83dce87d30c5cf209a5301679bbcf91cd15e031ef22f15429498f5f9eb5.wasm", - "vp_masp.wasm": "vp_masp.fc8e582dc105e2dd9d08c58459df2f98753c550c9b43cd54d07570ff24c92d9e.wasm", - "vp_testnet_faucet.wasm": "vp_testnet_faucet.08c662d6dab741cbef58a6c95f22646ab08df4769fcee4059c47bcb9185bf3a9.wasm", - "vp_token.wasm": "vp_token.2824f68745a628faa79f7fa2bdf6582809c969f9fdca68913b4e84e5e4a83852.wasm", - "vp_user.wasm": "vp_user.af8d1c7035ced526ea22108a1226a61e3eeff4db919cb73df8d6ec42ec2018ad.wasm", - "vp_validator.wasm": "vp_validator.ec97ca9f091c6f2cbe558f98b39bc2a699bf9aa780c47b405b4bd1dd8ec68dad.wasm" + "tx_bond.wasm": "tx_bond.4861430f580e9973c96e99f615d81950c088317768d1dbdb1088ca1889db13a0.wasm", + "tx_change_validator_commission.wasm": "tx_change_validator_commission.a6013f149f61e1d80b6858db12c6203b6dd78443c2d3006527e38617388337ae.wasm", + "tx_ibc.wasm": "tx_ibc.5adb9e98dd7930c6442eff946cec0eec50a447873a6e97cd79f1552639c0ca9a.wasm", + "tx_init_account.wasm": "tx_init_account.139590861c2c86459acbccf058ba7147be83bd7d90f914ac605dd71077937c5d.wasm", + "tx_init_proposal.wasm": "tx_init_proposal.8449c431543e02466d9284df81317a4e4cd451772a43a5f45062a144b2a41424.wasm", + "tx_init_validator.wasm": "tx_init_validator.a38b2664989e7e87f9016690b415b77e6b0f36443d46a947b04f809efc9caa59.wasm", + "tx_reveal_pk.wasm": "tx_reveal_pk.69aa3ad6305a26cbba8667efa923389afbb6db6feb025e7c88eac8f413c9e36b.wasm", + "tx_transfer.wasm": "tx_transfer.e68e6cb3336962c1f3e5d63054fb4e5fbca02b12125a71179aa86169e12174d7.wasm", + "tx_unbond.wasm": "tx_unbond.53119371c8f4b20424774d9312fa50333c4f8250b6018384142e0e8553009a31.wasm", + "tx_update_vp.wasm": "tx_update_vp.24b8501c7df4ee482fbab8411a5e8666d36267302783998e6dd3ccc92a2eb802.wasm", + "tx_vote_proposal.wasm": "tx_vote_proposal.45d73369f04f4261de0a89f1c6e398f26aed18ee921832bcfe845de169e8d21f.wasm", + "tx_withdraw.wasm": "tx_withdraw.f2acfee621b4805ef092143195067728312d60d30907d3f82be850ef295092d3.wasm", + "vp_implicit.wasm": "vp_implicit.5f4c7920478e2db6db63765f9af9800d2d9c164728e3eb48360a2617e173aefb.wasm", + "vp_masp.wasm": "vp_masp.f17ee14abb38853066fada237164faaf6cf49a0c121b4b2e9cfd48030c17d620.wasm", + "vp_testnet_faucet.wasm": "vp_testnet_faucet.79f768c6d0dd20c4b38b982afdc72645d319d94abe4c577e99b086a24ba94984.wasm", + "vp_token.wasm": "vp_token.b09a3aa221e995e82fe58082bfa428cdde161fdc444f17e135800b21b3c7d1cb.wasm", + "vp_user.wasm": "vp_user.c5a15735bfd5cf5dfc8019805f36e8438b68b2c1c1ba653518d465c7159ef5d3.wasm", + "vp_validator.wasm": "vp_validator.12d8726feaf1c9a3465be9f25ce6d3e2599cb79ab1f09df656326f73d1a577ff.wasm" } \ No newline at end of file diff --git a/wasm/tx_template/Cargo.toml b/wasm/tx_template/Cargo.toml index 0569f0c0fa7..f8b7b8dc5e8 100644 --- a/wasm/tx_template/Cargo.toml +++ b/wasm/tx_template/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "tx_template" resolver = "2" -version = "0.14.0" +version = "0.16.0" [lib] crate-type = ["cdylib"] diff --git a/wasm/vp_template/Cargo.toml b/wasm/vp_template/Cargo.toml index 01faca5f459..94745366f84 100644 --- a/wasm/vp_template/Cargo.toml +++ b/wasm/vp_template/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "vp_template" resolver = "2" -version = "0.14.0" +version = "0.16.0" [lib] crate-type = ["cdylib"] diff --git a/wasm/wasm_source/Cargo.toml b/wasm/wasm_source/Cargo.toml index a2cd2ba6740..85194462e67 100644 --- a/wasm/wasm_source/Cargo.toml +++ b/wasm/wasm_source/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_wasm" resolver = "2" -version = "0.14.0" +version = "0.16.0" [lib] crate-type = ["cdylib"] @@ -37,19 +37,21 @@ namada_tx_prelude = {path = "../../tx_prelude", optional = true} namada_vp_prelude = {path = "../../vp_prelude", optional = true} borsh = "0.9.0" once_cell = {version = "1.8.0", optional = true} -rust_decimal = {version = "1.26.1", optional = true} +rust_decimal = {version = "=1.26.1", optional = true} wee_alloc = "0.4.5" getrandom = { version = "0.2", features = ["custom"] } masp_proofs = { git = "https://github.com/anoma/masp", rev = "bee40fc465f6afbd10558d12fe96eb1742eee45c", optional = true } masp_primitives = { git = "https://github.com/anoma/masp", rev = "bee40fc465f6afbd10558d12fe96eb1742eee45c", optional = true } +ripemd = "0.1.3" [dev-dependencies] namada = {path = "../../shared"} namada_tests = {path = "../../tests"} +namada_test_utils = {path = "../../test_utils"} namada_tx_prelude = {path = "../../tx_prelude"} namada_vp_prelude = {path = "../../vp_prelude"} # A fork with state machine testing -proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm"} +proptest = {git = "https://github.com/heliaxdev/proptest", rev = "8f1b4abe7ebd35c0781bf9a00a4ee59833ffa2a1"} tracing = "0.1.30" tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} rust_decimal = "1.26.1" diff --git a/wasm/wasm_source/src/tx_bond.rs b/wasm/wasm_source/src/tx_bond.rs index b949731a704..8215045d5c1 100644 --- a/wasm/wasm_source/src/tx_bond.rs +++ b/wasm/wasm_source/src/tx_bond.rs @@ -25,6 +25,7 @@ mod tests { read_total_stake, read_validator_stake, }; use namada::proto::Tx; + use namada::types::chain::ChainId; use namada::types::storage::Epoch; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; @@ -99,7 +100,7 @@ mod tests { let tx_code = vec![]; let tx_data = bond.try_to_vec().unwrap(); - let tx = Tx::new(tx_code, Some(tx_data)); + let tx = Tx::new(tx_code, Some(tx_data), ChainId::default(), None); let signed_tx = tx.sign(&key); let tx_data = signed_tx.data.unwrap(); diff --git a/wasm/wasm_source/src/tx_change_validator_commission.rs b/wasm/wasm_source/src/tx_change_validator_commission.rs index 3b77c9197cd..30ae263269b 100644 --- a/wasm/wasm_source/src/tx_change_validator_commission.rs +++ b/wasm/wasm_source/src/tx_change_validator_commission.rs @@ -23,6 +23,7 @@ mod tests { use namada::ledger::pos::{PosParams, PosVP}; use namada::proof_of_stake::validator_commission_rate_handle; use namada::proto::Tx; + use namada::types::chain::ChainId; use namada::types::storage::Epoch; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; @@ -78,7 +79,7 @@ mod tests { let tx_code = vec![]; let tx_data = commission_change.try_to_vec().unwrap(); - let tx = Tx::new(tx_code, Some(tx_data)); + let tx = Tx::new(tx_code, Some(tx_data), ChainId::default(), None); let signed_tx = tx.sign(&key); let tx_data = signed_tx.data.unwrap(); diff --git a/wasm/wasm_source/src/tx_ibc.rs b/wasm/wasm_source/src/tx_ibc.rs index 79cbc6cf966..b9473170b2c 100644 --- a/wasm/wasm_source/src/tx_ibc.rs +++ b/wasm/wasm_source/src/tx_ibc.rs @@ -10,5 +10,6 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { let signed = SignedTxData::try_from_slice(&tx_data[..]) .wrap_err("failed to decode SignedTxData")?; let data = signed.data.ok_or_err_msg("Missing data")?; - ctx.dispatch_ibc_action(&data) + + ibc::ibc_actions(ctx).execute(&data).into_storage_result() } diff --git a/wasm/wasm_source/src/tx_init_account.rs b/wasm/wasm_source/src/tx_init_account.rs index e0fe700d630..b1041c40a2d 100644 --- a/wasm/wasm_source/src/tx_init_account.rs +++ b/wasm/wasm_source/src/tx_init_account.rs @@ -12,7 +12,7 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { .wrap_err("failed to decode InitAccount")?; debug_log!("apply_tx called to init a new established account"); - let address = ctx.init_account(&tx_data.vp_code)?; + let address = ctx.init_account(&tx_data.vp_code_hash)?; let pk_key = key::pk_key(&address); ctx.write(&pk_key, &tx_data.public_key)?; Ok(()) diff --git a/wasm/wasm_source/src/tx_unbond.rs b/wasm/wasm_source/src/tx_unbond.rs index 42fa0666bca..33cf9f56eaa 100644 --- a/wasm/wasm_source/src/tx_unbond.rs +++ b/wasm/wasm_source/src/tx_unbond.rs @@ -25,6 +25,7 @@ mod tests { read_total_stake, read_validator_stake, unbond_handle, }; use namada::proto::Tx; + use namada::types::chain::ChainId; use namada::types::storage::Epoch; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; @@ -121,7 +122,7 @@ mod tests { let tx_code = vec![]; let tx_data = unbond.try_to_vec().unwrap(); - let tx = Tx::new(tx_code, Some(tx_data)); + let tx = Tx::new(tx_code, Some(tx_data), ChainId::default(), None); let signed_tx = tx.sign(&key); let tx_data = signed_tx.data.unwrap(); diff --git a/wasm/wasm_source/src/tx_update_vp.rs b/wasm/wasm_source/src/tx_update_vp.rs index 0bb819f0264..fb0b80af400 100644 --- a/wasm/wasm_source/src/tx_update_vp.rs +++ b/wasm/wasm_source/src/tx_update_vp.rs @@ -14,5 +14,5 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { debug_log!("update VP for: {:#?}", update_vp.addr); - ctx.update_validity_predicate(&update_vp.addr, update_vp.vp_code) + ctx.update_validity_predicate(&update_vp.addr, update_vp.vp_code_hash) } diff --git a/wasm/wasm_source/src/tx_withdraw.rs b/wasm/wasm_source/src/tx_withdraw.rs index 80c0f00265c..b00661261e1 100644 --- a/wasm/wasm_source/src/tx_withdraw.rs +++ b/wasm/wasm_source/src/tx_withdraw.rs @@ -24,6 +24,7 @@ mod tests { use namada::ledger::pos::{GenesisValidator, PosParams, PosVP}; use namada::proof_of_stake::unbond_handle; use namada::proto::Tx; + use namada::types::chain::ChainId; use namada::types::storage::Epoch; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; @@ -154,7 +155,7 @@ mod tests { let tx_code = vec![]; let tx_data = withdraw.try_to_vec().unwrap(); - let tx = Tx::new(tx_code, Some(tx_data)); + let tx = Tx::new(tx_code, Some(tx_data), ChainId::default(), None); let signed_tx = tx.sign(&key); let tx_data = signed_tx.data.unwrap(); diff --git a/wasm/wasm_source/src/vp_implicit.rs b/wasm/wasm_source/src/vp_implicit.rs index 31a920a5404..26a88d84ea3 100644 --- a/wasm/wasm_source/src/vp_implicit.rs +++ b/wasm/wasm_source/src/vp_implicit.rs @@ -203,6 +203,7 @@ mod tests { // Use this as `#[test]` annotation to enable logging use namada::ledger::pos::{GenesisValidator, PosParams}; use namada::types::storage::Epoch; + use namada_test_utils::TestWasms; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; use namada_tests::tx::{self, tx_host_env, TestTxEnv}; @@ -215,9 +216,6 @@ mod tests { use super::*; - const VP_ALWAYS_TRUE_WASM: &str = - "../../wasm_for_tests/vp_always_true.wasm"; - /// Test that no-op transaction (i.e. no storage modifications) accepted. #[test] fn test_no_op_transaction() { @@ -765,8 +763,10 @@ mod tests { let secret_key = key::testing::keypair_1(); let public_key = secret_key.ref_to(); let vp_owner: Address = (&public_key).into(); - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); + let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); + let vp_hash = sha256(&vp_code); + // for the update + tx_env.store_wasm_code(vp_code); // Spawn the accounts to be able to modify their storage tx_env.spawn_accounts([&vp_owner]); @@ -775,7 +775,7 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction tx::ctx() - .update_validity_predicate(address, &vp_code) + .update_validity_predicate(address, &vp_hash) .unwrap(); }); @@ -800,10 +800,11 @@ mod tests { let secret_key = key::testing::keypair_1(); let public_key = secret_key.ref_to(); let vp_owner: Address = (&public_key).into(); - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); - + let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); let vp_hash = sha256(&vp_code); + // for the update + tx_env.store_wasm_code(vp_code); + tx_env.init_parameters( None, Some(vec![vp_hash.to_string()]), @@ -819,7 +820,7 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction tx::ctx() - .update_validity_predicate(address, &vp_code) + .update_validity_predicate(address, &vp_hash) .unwrap(); }); @@ -846,8 +847,10 @@ mod tests { let secret_key = key::testing::keypair_1(); let public_key = secret_key.ref_to(); let vp_owner: Address = (&public_key).into(); - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); + let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); + let vp_hash = sha256(&vp_code); + // for the update + tx_env.store_wasm_code(vp_code); // hardcoded hash of VP_ALWAYS_TRUE_WASM tx_env.init_parameters(None, None, Some(vec!["E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855".to_string()])); @@ -861,7 +864,7 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction tx::ctx() - .update_validity_predicate(address, &vp_code) + .update_validity_predicate(address, &vp_hash) .unwrap(); }); diff --git a/wasm/wasm_source/src/vp_masp.rs b/wasm/wasm_source/src/vp_masp.rs index a9b7f532309..958501c96c3 100644 --- a/wasm/wasm_source/src/vp_masp.rs +++ b/wasm/wasm_source/src/vp_masp.rs @@ -1,11 +1,59 @@ use std::cmp::Ordering; use masp_primitives::asset_type::AssetType; -use masp_primitives::transaction::components::Amount; +use masp_primitives::legacy::TransparentAddress::{PublicKey, Script}; +use masp_primitives::transaction::components::{Amount, TxOut}; /// Multi-asset shielded pool VP. use namada_vp_prelude::address::masp; use namada_vp_prelude::storage::Epoch; use namada_vp_prelude::*; +use ripemd::{Digest, Ripemd160}; + +/// Generates the current asset type given the current epoch and an +/// unique token address +fn asset_type_from_epoched_address(epoch: Epoch, token: &Address) -> AssetType { + // Timestamp the chosen token with the current epoch + let token_bytes = (token, epoch.0) + .try_to_vec() + .expect("token should serialize"); + // Generate the unique asset identifier from the unique token address + AssetType::new(token_bytes.as_ref()).expect("unable to create asset type") +} + +/// Checks if the asset type matches the expected asset type, Adds a +/// debug log if the values do not match. +fn valid_asset_type( + asset_type: &AssetType, + asset_type_to_test: &AssetType, +) -> bool { + let res = + asset_type.get_identifier() == asset_type_to_test.get_identifier(); + if !res { + debug_log!( + "The asset type must be derived from the token address and \ + current epoch" + ); + } + res +} + +/// Checks if the reported transparent amount and the unshielded +/// values agree, if not adds to the debug log +fn valid_transfer_amount( + reporeted_transparent_value: u64, + unshielded_transfer_value: u64, +) -> bool { + let res = reporeted_transparent_value == unshielded_transfer_value; + if !res { + debug_log!( + "The unshielded amount {} disagrees with the calculated masp \ + transparented value {}", + unshielded_transfer_value, + reporeted_transparent_value + ) + } + res +} /// Convert Namada amount and token type to MASP equivalents fn convert_amount( @@ -13,13 +61,7 @@ fn convert_amount( token: &Address, val: token::Amount, ) -> (AssetType, Amount) { - // Timestamp the chosen token with the current epoch - let token_bytes = (token, epoch.0) - .try_to_vec() - .expect("token should serialize"); - // Generate the unique asset identifier from the unique token address - let asset_type = AssetType::new(token_bytes.as_ref()) - .expect("unable to create asset type"); + let asset_type = asset_type_from_epoched_address(epoch, token); // Combine the value and unit into one amount let amount = Amount::from_nonnegative(asset_type, u64::from(val)) .expect("invalid value or asset type for amount"); @@ -54,8 +96,8 @@ fn validate_tx( // The Sapling value balance adds to the transparent tx pool transparent_tx_pool += shielded_tx.value_balance.clone(); - // Handle shielding/transparent input if transfer.source != masp() { + // Handle transparent input // Note that the asset type is timestamped so shields // where the shielded value has an incorrect timestamp // are automatically rejected @@ -67,20 +109,100 @@ fn validate_tx( // Non-masp sources add to transparent tx pool transparent_tx_pool += transp_amt; + } else { + // Handle shielded input + // The following boundary conditions must be satisfied + // 1. Zero transparent inupt + // 2. the transparent transaction value pool's amount must equal the + // containing wrapper transaction's fee amount + // Satisfies 1. + if !shielded_tx.vin.is_empty() { + debug_log!( + "Transparent input to a transaction from the masp must be \ + 0 but is {}", + shielded_tx.vin.len() + ); + return reject(); + } } - // Handle unshielding/transparent output if transfer.target != masp() { - // Timestamp is derived to allow unshields for older tokens - let atype = - shielded_tx.value_balance.components().next().unwrap().0; + // Handle transparent output + // The following boundary conditions must be satisfied + // 1. One transparent output + // 2. Asset type must be properly derived + // 3. Value from the output must be the same as the containing + // transfer + // 4. Public key must be the hash of the target + + // Satisfies 1. + if shielded_tx.vout.len() != 1 { + debug_log!( + "Transparent output to a transaction to the masp must be \ + 1 but is {}", + shielded_tx.vin.len() + ); + return reject(); + } + + let out: &TxOut = &shielded_tx.vout[0]; + + let expected_asset_type: AssetType = + asset_type_from_epoched_address( + ctx.get_block_epoch().unwrap(), + &transfer.token, + ); - let transp_amt = - Amount::from_nonnegative(*atype, u64::from(transfer.amount)) - .expect("invalid value or asset type for amount"); + // Satisfies 2. and 3. + if !(valid_asset_type(&expected_asset_type, &out.asset_type) + && valid_transfer_amount(out.value, u64::from(transfer.amount))) + { + return reject(); + } + + let (_transp_asset, transp_amt) = convert_amount( + ctx.get_block_epoch().unwrap(), + &transfer.token, + transfer.amount, + ); // Non-masp destinations subtract from transparent tx pool transparent_tx_pool -= transp_amt; + + // Satisfies 4. + match out.script_pubkey.address() { + None | Some(Script(_)) => {} + Some(PublicKey(pub_bytes)) => { + let target_enc = transfer + .target + .try_to_vec() + .expect("target address encoding"); + + let hash = + Ripemd160::digest(sha256(&target_enc).as_slice()); + + if <[u8; 20]>::from(hash) != pub_bytes { + debug_log!( + "the public key of the output account does not \ + match the transfer target" + ); + return reject(); + } + } + } + } else { + // Handle shielded output + // The following boundary conditions must be satisfied + // 1. Zero transparent output + // Satisfies 1. + if !shielded_tx.vout.is_empty() { + debug_log!( + "Transparent output to a transaction from the masp must \ + be 0 but is {}", + shielded_tx.vin.len() + ); + return reject(); + } } match transparent_tx_pool.partial_cmp(&Amount::zero()) { diff --git a/wasm/wasm_source/src/vp_testnet_faucet.rs b/wasm/wasm_source/src/vp_testnet_faucet.rs index 1b8802df6e9..e7067e664f1 100644 --- a/wasm/wasm_source/src/vp_testnet_faucet.rs +++ b/wasm/wasm_source/src/vp_testnet_faucet.rs @@ -83,14 +83,14 @@ fn validate_tx( let has_post: bool = ctx.has_key_post(key)?; if owner == &addr { if has_post { - let vp: Vec = ctx.read_bytes_post(key)?.unwrap(); - return Ok(*valid_sig && is_vp_whitelisted(ctx, &vp)?); + let vp_hash: Vec = ctx.read_bytes_post(key)?.unwrap(); + return Ok(*valid_sig && is_vp_whitelisted(ctx, &vp_hash)?); } else { return reject(); } } else { - let vp: Vec = ctx.read_bytes_post(key)?.unwrap(); - return is_vp_whitelisted(ctx, &vp); + let vp_hash: Vec = ctx.read_bytes_post(key)?.unwrap(); + return is_vp_whitelisted(ctx, &vp_hash); } } else { // Allow any other key change if authorized by a signature @@ -109,6 +109,7 @@ fn validate_tx( #[cfg(test)] mod tests { use address::testing::arb_non_internal_address; + use namada_test_utils::TestWasms; // Use this as `#[test]` annotation to enable logging use namada_tests::log::test; use namada_tests::tx::{self, tx_host_env, TestTxEnv}; @@ -121,9 +122,6 @@ mod tests { use super::*; - const VP_ALWAYS_TRUE_WASM: &str = - "../../wasm_for_tests/vp_always_true.wasm"; - /// Allows anyone to withdraw up to 1_000 tokens in a single tx pub const MAX_FREE_DEBIT: i128 = 1_000_000_000; // in micro units @@ -197,8 +195,10 @@ mod tests { let mut tx_env = TestTxEnv::default(); let vp_owner = address::testing::established_address_1(); - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); + let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); + let vp_hash = sha256(&vp_code); + // for the update + tx_env.store_wasm_code(vp_code); // Spawn the accounts to be able to modify their storage tx_env.spawn_accounts([&vp_owner]); @@ -207,7 +207,7 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction tx::ctx() - .update_validity_predicate(address, &vp_code) + .update_validity_predicate(address, &vp_hash) .unwrap(); }); @@ -233,8 +233,10 @@ mod tests { let vp_owner = address::testing::established_address_1(); let keypair = key::testing::keypair_1(); let public_key = &keypair.ref_to(); - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); + let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); + let vp_hash = sha256(&vp_code); + // for the update + tx_env.store_wasm_code(vp_code); // Spawn the accounts to be able to modify their storage tx_env.spawn_accounts([&vp_owner]); @@ -245,7 +247,7 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction tx::ctx() - .update_validity_predicate(address, &vp_code) + .update_validity_predicate(address, &vp_hash) .unwrap(); }); diff --git a/wasm/wasm_source/src/vp_token.rs b/wasm/wasm_source/src/vp_token.rs index 849e32efecd..cc7aee33114 100644 --- a/wasm/wasm_source/src/vp_token.rs +++ b/wasm/wasm_source/src/vp_token.rs @@ -1,7 +1,11 @@ //! A VP for a fungible token. Enforces that the total supply is unchanged in a //! transaction that moves balance(s). -use namada_vp_prelude::*; +use std::collections::BTreeSet; + +use namada_vp_prelude::address::{self, Address, InternalAddress}; +use namada_vp_prelude::storage::KeySeg; +use namada_vp_prelude::{storage, token, *}; #[validity_predicate] fn validate_tx( @@ -25,12 +29,242 @@ fn validate_tx( for key in keys_changed.iter() { if key.is_validity_predicate().is_some() { - let vp: Vec = ctx.read_bytes_post(key)?.unwrap(); - if !is_vp_whitelisted(ctx, &vp)? { + let vp_hash: Vec = ctx.read_bytes_post(key)?.unwrap(); + if !is_vp_whitelisted(ctx, &vp_hash)? { return reject(); } } } - token::vp(ctx, &addr, &keys_changed, &verifiers) + token_checks(ctx, &addr, &keys_changed, &verifiers) +} + +/// A token validity predicate checks that the total supply is preserved. +/// This implies that: +/// +/// - The value associated with the `total_supply` storage key may not change. +/// - For any balance changes, the total of outputs must be equal to the total +/// of inputs. +fn token_checks( + ctx: &Ctx, + token: &Address, + keys_touched: &BTreeSet, + verifiers: &BTreeSet
, +) -> VpResult { + let mut change: token::Change = 0; + for key in keys_touched.iter() { + let owner: Option<&Address> = token::is_balance_key(token, key) + .or_else(|| { + token::is_multitoken_balance_key(token, key).map(|a| a.1) + }); + + match owner { + None => { + if token::is_total_supply_key(key, token) { + // check if total supply is changed, which it should never + // be from a tx + let total_pre: token::Amount = ctx.read_pre(key)?.unwrap(); + let total_post: token::Amount = + ctx.read_post(key)?.unwrap(); + if total_pre != total_post { + return reject(); + } + } else if key.segments.get(0) == Some(&token.to_db_key()) { + // Unknown changes to this address space are disallowed, but + // unknown changes anywhere else are permitted + return reject(); + } + } + Some(owner) => { + // accumulate the change + let pre: token::Amount = match owner { + Address::Internal(InternalAddress::IbcMint) => { + token::Amount::max() + } + Address::Internal(InternalAddress::IbcBurn) => { + token::Amount::default() + } + _ => ctx.read_pre(key)?.unwrap_or_default(), + }; + let post: token::Amount = match owner { + Address::Internal(InternalAddress::IbcMint) => { + ctx.read_temp(key)?.unwrap_or_else(token::Amount::max) + } + Address::Internal(InternalAddress::IbcBurn) => { + ctx.read_temp(key)?.unwrap_or_default() + } + _ => ctx.read_post(key)?.unwrap_or_default(), + }; + let this_change = post.change() - pre.change(); + change += this_change; + // make sure that the spender approved the transaction + if this_change < 0 + && !(verifiers.contains(owner) || *owner == address::masp()) + { + return reject(); + } + } + } + } + Ok(change == 0) +} + +#[cfg(test)] +mod tests { + // Use this as `#[test]` annotation to enable logging + use namada::core::ledger::storage_api::token; + use namada_tests::log::test; + use namada_tests::tx::{self, TestTxEnv}; + use namada_tests::vp::*; + use namada_vp_prelude::storage_api::StorageWrite; + + use super::*; + + #[test] + fn test_transfer_inputs_eq_outputs_is_accepted() { + // Initialize a tx environment + let mut tx_env = TestTxEnv::default(); + let token = address::nam(); + let src = address::testing::established_address_1(); + let dest = address::testing::established_address_2(); + let total_supply = token::Amount::from(10_098_123); + + // Spawn the accounts to be able to modify their storage + tx_env.spawn_accounts([&token, &src, &dest]); + token::credit_tokens( + &mut tx_env.wl_storage, + &token, + &src, + total_supply, + ) + .unwrap(); + // Commit the initial state + tx_env.commit_tx_and_block(); + + // Initialize VP environment from a transaction + vp_host_env::init_from_tx(token.clone(), tx_env, |_address| { + // Apply a transfer + + let amount = token::Amount::from(100); + token::transfer(tx::ctx(), &token, &src, &dest, amount).unwrap(); + }); + + let vp_env = vp_host_env::take(); + let tx_data: Vec = vec![]; + let keys_changed: BTreeSet = + vp_env.all_touched_storage_keys(); + let verifiers = vp_env.get_verifiers(); + vp_host_env::set(vp_env); + + assert!( + validate_tx(&CTX, tx_data, token, keys_changed, verifiers).unwrap(), + "A transfer where inputs == outputs should be accepted" + ); + } + + #[test] + fn test_transfer_inputs_neq_outputs_is_rejected() { + // Initialize a tx environment + let mut tx_env = TestTxEnv::default(); + let token = address::nam(); + let src = address::testing::established_address_1(); + let dest = address::testing::established_address_2(); + let total_supply = token::Amount::from(10_098_123); + + // Spawn the accounts to be able to modify their storage + tx_env.spawn_accounts([&token, &src, &dest]); + token::credit_tokens( + &mut tx_env.wl_storage, + &token, + &src, + total_supply, + ) + .unwrap(); + // Commit the initial state + tx_env.commit_tx_and_block(); + + // Initialize VP environment from a transaction + vp_host_env::init_from_tx(token.clone(), tx_env, |_address| { + // Apply a transfer + + let amount_in = token::Amount::from(100); + let amount_out = token::Amount::from(900); + + let src_key = token::balance_key(&token, &src); + let src_balance = + token::read_balance(tx::ctx(), &token, &src).unwrap(); + let new_src_balance = src_balance + amount_out; + let dest_key = token::balance_key(&token, &dest); + let dest_balance = + token::read_balance(tx::ctx(), &token, &dest).unwrap(); + let new_dest_balance = dest_balance + amount_in; + tx::ctx().write(&src_key, new_src_balance).unwrap(); + tx::ctx().write(&dest_key, new_dest_balance).unwrap(); + }); + + let vp_env = vp_host_env::take(); + let tx_data: Vec = vec![]; + let keys_changed: BTreeSet = + vp_env.all_touched_storage_keys(); + let verifiers = vp_env.get_verifiers(); + vp_host_env::set(vp_env); + + assert!( + !validate_tx(&CTX, tx_data, token, keys_changed, verifiers) + .unwrap(), + "A transfer where inputs != outputs should be rejected" + ); + } + + #[test] + fn test_total_supply_change_is_rejected() { + // Initialize a tx environment + let mut tx_env = TestTxEnv::default(); + let token = address::nam(); + let owner = address::testing::established_address_1(); + let total_supply = token::Amount::from(10_098_123); + + // Spawn the accounts to be able to modify their storage + tx_env.spawn_accounts([&token, &owner]); + token::credit_tokens( + &mut tx_env.wl_storage, + &token, + &owner, + total_supply, + ) + .unwrap(); + // Commit the initial state + tx_env.commit_tx_and_block(); + + let total_supply_key = token::total_supply_key(&token); + + // Initialize VP environment from a transaction + vp_host_env::init_from_tx(token.clone(), tx_env, |_address| { + // Try to change total supply from a tx + + let current_supply = tx::ctx() + .read::(&total_supply_key) + .unwrap() + .unwrap_or_default(); + tx::ctx() + .write( + &total_supply_key, + current_supply + token::Amount::from(1), + ) + .unwrap(); + }); + + let vp_env = vp_host_env::take(); + let tx_data: Vec = vec![]; + let keys_changed: BTreeSet = + vp_env.all_touched_storage_keys(); + let verifiers = vp_env.get_verifiers(); + vp_host_env::set(vp_env); + + assert!( + !validate_tx(&CTX, tx_data, token, keys_changed, verifiers) + .unwrap(), + "Change of a `total_supply` value should be rejected" + ); + } } diff --git a/wasm/wasm_source/src/vp_user.rs b/wasm/wasm_source/src/vp_user.rs index b8cbc20982e..0f5df005772 100644 --- a/wasm/wasm_source/src/vp_user.rs +++ b/wasm/wasm_source/src/vp_user.rs @@ -157,14 +157,15 @@ fn validate_tx( let has_post: bool = ctx.has_key_post(key)?; if owner == &addr { if has_post { - let vp: Vec = ctx.read_bytes_post(key)?.unwrap(); - *valid_sig && is_vp_whitelisted(ctx, &vp)? + let vp_hash: Vec = + ctx.read_bytes_post(key)?.unwrap(); + *valid_sig && is_vp_whitelisted(ctx, &vp_hash)? } else { false } } else { - let vp: Vec = ctx.read_bytes_post(key)?.unwrap(); - is_vp_whitelisted(ctx, &vp)? + let vp_hash: Vec = ctx.read_bytes_post(key)?.unwrap(); + is_vp_whitelisted(ctx, &vp_hash)? } } KeyType::Masp => true, @@ -193,6 +194,7 @@ mod tests { use address::testing::arb_non_internal_address; use namada::ledger::pos::{GenesisValidator, PosParams}; use namada::types::storage::Epoch; + use namada_test_utils::TestWasms; // Use this as `#[test]` annotation to enable logging use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; @@ -206,9 +208,6 @@ mod tests { use super::*; - const VP_ALWAYS_TRUE_WASM: &str = - "../../wasm_for_tests/vp_always_true.wasm"; - /// Test that no-op transaction (i.e. no storage modifications) accepted. #[test] fn test_no_op_transaction() { @@ -657,8 +656,10 @@ mod tests { let mut tx_env = TestTxEnv::default(); let vp_owner = address::testing::established_address_1(); - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); + let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); + let vp_hash = sha256(&vp_code); + // for the update + tx_env.store_wasm_code(vp_code); // Spawn the accounts to be able to modify their storage tx_env.spawn_accounts([&vp_owner]); @@ -667,7 +668,7 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction tx::ctx() - .update_validity_predicate(address, &vp_code) + .update_validity_predicate(address, &vp_hash) .unwrap(); }); @@ -694,8 +695,10 @@ mod tests { let vp_owner = address::testing::established_address_1(); let keypair = key::testing::keypair_1(); let public_key = keypair.ref_to(); - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); + let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); + let vp_hash = sha256(&vp_code); + // for the update + tx_env.store_wasm_code(vp_code); // Spawn the accounts to be able to modify their storage tx_env.spawn_accounts([&vp_owner]); @@ -706,7 +709,7 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction tx::ctx() - .update_validity_predicate(address, &vp_code) + .update_validity_predicate(address, &vp_hash) .unwrap(); }); @@ -735,8 +738,10 @@ mod tests { let vp_owner = address::testing::established_address_1(); let keypair = key::testing::keypair_1(); let public_key = keypair.ref_to(); - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); + let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); + let vp_hash = sha256(&vp_code); + // for the update + tx_env.store_wasm_code(vp_code); // Spawn the accounts to be able to modify their storage tx_env.spawn_accounts([&vp_owner]); @@ -747,7 +752,7 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction tx::ctx() - .update_validity_predicate(address, &vp_code) + .update_validity_predicate(address, &vp_hash) .unwrap(); }); @@ -775,10 +780,11 @@ mod tests { let vp_owner = address::testing::established_address_1(); let keypair = key::testing::keypair_1(); let public_key = keypair.ref_to(); - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); - + let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); let vp_hash = sha256(&vp_code); + // for the update + tx_env.store_wasm_code(vp_code); + tx_env.init_parameters(None, Some(vec![vp_hash.to_string()]), None); // Spawn the accounts to be able to modify their storage @@ -790,7 +796,7 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction tx::ctx() - .update_validity_predicate(address, &vp_code) + .update_validity_predicate(address, &vp_hash) .unwrap(); }); @@ -818,10 +824,11 @@ mod tests { let vp_owner = address::testing::established_address_1(); let keypair = key::testing::keypair_1(); let public_key = keypair.ref_to(); - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); - + let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); let vp_hash = sha256(&vp_code); + // for the update + tx_env.store_wasm_code(vp_code); + tx_env.init_parameters( None, Some(vec![vp_hash.to_string()]), @@ -837,7 +844,7 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction tx::ctx() - .update_validity_predicate(address, &vp_code) + .update_validity_predicate(address, &vp_hash) .unwrap(); }); @@ -864,8 +871,10 @@ mod tests { let vp_owner = address::testing::established_address_1(); let keypair = key::testing::keypair_1(); let public_key = keypair.ref_to(); - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); + let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); + let vp_hash = sha256(&vp_code); + // for the update + tx_env.store_wasm_code(vp_code); // hardcoded hash of VP_ALWAYS_TRUE_WASM tx_env.init_parameters(None, None, Some(vec!["E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855".to_string()])); @@ -879,7 +888,7 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction tx::ctx() - .update_validity_predicate(address, &vp_code) + .update_validity_predicate(address, &vp_hash) .unwrap(); }); diff --git a/wasm/wasm_source/src/vp_validator.rs b/wasm/wasm_source/src/vp_validator.rs index c9e4700d8ee..d57d09b3045 100644 --- a/wasm/wasm_source/src/vp_validator.rs +++ b/wasm/wasm_source/src/vp_validator.rs @@ -166,14 +166,15 @@ fn validate_tx( let has_post: bool = ctx.has_key_post(key)?; if owner == &addr { if has_post { - let vp: Vec = ctx.read_bytes_post(key)?.unwrap(); - *valid_sig && is_vp_whitelisted(ctx, &vp)? + let vp_hash: Vec = + ctx.read_bytes_post(key)?.unwrap(); + *valid_sig && is_vp_whitelisted(ctx, &vp_hash)? } else { false } } else { - let vp: Vec = ctx.read_bytes_post(key)?.unwrap(); - is_vp_whitelisted(ctx, &vp)? + let vp_hash: Vec = ctx.read_bytes_post(key)?.unwrap(); + is_vp_whitelisted(ctx, &vp_hash)? } } KeyType::Unknown => { @@ -201,6 +202,7 @@ mod tests { use address::testing::arb_non_internal_address; use namada::ledger::pos::{GenesisValidator, PosParams}; use namada::types::storage::Epoch; + use namada_test_utils::TestWasms; // Use this as `#[test]` annotation to enable logging use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; @@ -215,9 +217,6 @@ mod tests { use super::*; - const VP_ALWAYS_TRUE_WASM: &str = - "../../wasm_for_tests/vp_always_true.wasm"; - /// Test that no-op transaction (i.e. no storage modifications) accepted. #[test] fn test_no_op_transaction() { @@ -678,8 +677,10 @@ mod tests { let mut tx_env = TestTxEnv::default(); let vp_owner = address::testing::established_address_1(); - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); + let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); + let vp_hash = sha256(&vp_code); + // for the update + tx_env.store_wasm_code(vp_code); // Spawn the accounts to be able to modify their storage tx_env.spawn_accounts([&vp_owner]); @@ -688,7 +689,7 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction tx::ctx() - .update_validity_predicate(address, &vp_code) + .update_validity_predicate(address, &vp_hash) .unwrap(); }); @@ -715,8 +716,10 @@ mod tests { let vp_owner = address::testing::established_address_1(); let keypair = key::testing::keypair_1(); let public_key = keypair.ref_to(); - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); + let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); + let vp_hash = sha256(&vp_code); + // for the update + tx_env.store_wasm_code(vp_code); // Spawn the accounts to be able to modify their storage tx_env.spawn_accounts([&vp_owner]); @@ -727,7 +730,7 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction tx::ctx() - .update_validity_predicate(address, &vp_code) + .update_validity_predicate(address, &vp_hash) .unwrap(); }); @@ -756,8 +759,10 @@ mod tests { let vp_owner = address::testing::established_address_1(); let keypair = key::testing::keypair_1(); let public_key = keypair.ref_to(); - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); + let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); + let vp_hash = sha256(&vp_code); + // for the update + tx_env.store_wasm_code(vp_code); // Spawn the accounts to be able to modify their storage tx_env.spawn_accounts([&vp_owner]); @@ -768,7 +773,7 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction tx::ctx() - .update_validity_predicate(address, &vp_code) + .update_validity_predicate(address, &vp_hash) .unwrap(); }); @@ -796,10 +801,11 @@ mod tests { let vp_owner = address::testing::established_address_1(); let keypair = key::testing::keypair_1(); let public_key = keypair.ref_to(); - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); - + let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); let vp_hash = sha256(&vp_code); + // for the update + tx_env.store_wasm_code(vp_code); + tx_env.init_parameters(None, Some(vec![vp_hash.to_string()]), None); // Spawn the accounts to be able to modify their storage @@ -811,7 +817,7 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction tx::ctx() - .update_validity_predicate(address, &vp_code) + .update_validity_predicate(address, &vp_hash) .unwrap(); }); @@ -839,10 +845,11 @@ mod tests { let vp_owner = address::testing::established_address_1(); let keypair = key::testing::keypair_1(); let public_key = keypair.ref_to(); - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); - + let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); let vp_hash = sha256(&vp_code); + // for the update + tx_env.store_wasm_code(vp_code); + tx_env.init_parameters( None, Some(vec![vp_hash.to_string()]), @@ -858,7 +865,7 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction tx::ctx() - .update_validity_predicate(address, &vp_code) + .update_validity_predicate(address, &vp_hash) .unwrap(); }); @@ -885,8 +892,10 @@ mod tests { let vp_owner = address::testing::established_address_1(); let keypair = key::testing::keypair_1(); let public_key = keypair.ref_to(); - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); + let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); + let vp_hash = sha256(&vp_code); + // for the update + tx_env.store_wasm_code(vp_code); // hardcoded hash of VP_ALWAYS_TRUE_WASM tx_env.init_parameters(None, None, Some(vec!["E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855".to_string()])); @@ -900,7 +909,7 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction tx::ctx() - .update_validity_predicate(address, &vp_code) + .update_validity_predicate(address, &vp_hash) .unwrap(); }); diff --git a/wasm_for_tests/tx_memory_limit.wasm b/wasm_for_tests/tx_memory_limit.wasm index f8db36d0043..330e3427268 100755 Binary files a/wasm_for_tests/tx_memory_limit.wasm and b/wasm_for_tests/tx_memory_limit.wasm differ diff --git a/wasm_for_tests/tx_mint_tokens.wasm b/wasm_for_tests/tx_mint_tokens.wasm index 5713105bbe6..0a0d4853252 100755 Binary files a/wasm_for_tests/tx_mint_tokens.wasm and b/wasm_for_tests/tx_mint_tokens.wasm differ diff --git a/wasm_for_tests/tx_proposal_code.wasm b/wasm_for_tests/tx_proposal_code.wasm index f05e0e95bd1..ec1656dbabb 100755 Binary files a/wasm_for_tests/tx_proposal_code.wasm and b/wasm_for_tests/tx_proposal_code.wasm differ diff --git a/wasm_for_tests/tx_read_storage_key.wasm b/wasm_for_tests/tx_read_storage_key.wasm index ff5d78b8e1c..9a511a0cc7a 100755 Binary files a/wasm_for_tests/tx_read_storage_key.wasm and b/wasm_for_tests/tx_read_storage_key.wasm differ diff --git a/wasm_for_tests/tx_write.wasm b/wasm_for_tests/tx_write.wasm index 761eded647e..ae2cb1c6e30 100755 Binary files a/wasm_for_tests/tx_write.wasm and b/wasm_for_tests/tx_write.wasm differ diff --git a/wasm_for_tests/vp_always_false.wasm b/wasm_for_tests/vp_always_false.wasm index f0693e332dd..e770000aeec 100755 Binary files a/wasm_for_tests/vp_always_false.wasm and b/wasm_for_tests/vp_always_false.wasm differ diff --git a/wasm_for_tests/vp_always_true.wasm b/wasm_for_tests/vp_always_true.wasm index 27fe81fde50..0ebbab310cd 100755 Binary files a/wasm_for_tests/vp_always_true.wasm and b/wasm_for_tests/vp_always_true.wasm differ diff --git a/wasm_for_tests/vp_eval.wasm b/wasm_for_tests/vp_eval.wasm index 192b3a93670..08456c8fbe1 100755 Binary files a/wasm_for_tests/vp_eval.wasm and b/wasm_for_tests/vp_eval.wasm differ diff --git a/wasm_for_tests/vp_memory_limit.wasm b/wasm_for_tests/vp_memory_limit.wasm index 717ceeba4d5..19e8950beb9 100755 Binary files a/wasm_for_tests/vp_memory_limit.wasm and b/wasm_for_tests/vp_memory_limit.wasm differ diff --git a/wasm_for_tests/vp_read_storage_key.wasm b/wasm_for_tests/vp_read_storage_key.wasm index 3790bd302dd..e808459a13a 100755 Binary files a/wasm_for_tests/vp_read_storage_key.wasm and b/wasm_for_tests/vp_read_storage_key.wasm differ diff --git a/wasm_for_tests/wasm_source/Cargo.lock b/wasm_for_tests/wasm_source/Cargo.lock index 4f8a7e98a31..2225bce9db6 100644 --- a/wasm_for_tests/wasm_source/Cargo.lock +++ b/wasm_for_tests/wasm_source/Cargo.lock @@ -4,11 +4,11 @@ version = 3 [[package]] name = "addr2line" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" dependencies = [ - "gimli 0.26.2", + "gimli 0.27.2", ] [[package]] @@ -44,16 +44,16 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "0.7.19" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" +checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" dependencies = [ "memchr", ] @@ -69,9 +69,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.66" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" +checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] name = "ark-bls12-381" @@ -98,6 +98,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ark-ed-on-bls12-381" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43b7ada17db3854f5994e74e60b18e10e818594935ee7e1d329800c117b32970" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-ff", + "ark-std", +] + [[package]] name = "ark-ff" version = "0.3.0" @@ -112,7 +124,7 @@ dependencies = [ "num-bigint", "num-traits", "paste", - "rustc_version", + "rustc_version 0.3.3", "zeroize", ] @@ -123,7 +135,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -135,7 +147,20 @@ dependencies = [ "num-bigint", "num-traits", "quote", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "ark-poly" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b0f78f47537c2f15706db7e98fe64cc1711dbf9def81218194e17239e53e5aa" +dependencies = [ + "ark-ff", + "ark-serialize", + "ark-std", + "derivative", + "hashbrown 0.11.2", ] [[package]] @@ -157,7 +182,7 @@ checksum = "8dd4e5f0bf8285d5ed538d27fab7411f3e297908fd93c62195de8bee3f199e82" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -172,9 +197,9 @@ dependencies = [ [[package]] name = "arrayref" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -188,72 +213,225 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +[[package]] +name = "async-channel" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" +dependencies = [ + "concurrent-queue", + "event-listener", + "futures-core", +] + +[[package]] +name = "async-executor" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" +dependencies = [ + "async-lock", + "async-task", + "concurrent-queue", + "fastrand", + "futures-lite", + "slab", +] + +[[package]] +name = "async-global-executor" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" +dependencies = [ + "async-channel", + "async-executor", + "async-io", + "async-lock", + "blocking", + "futures-lite", + "once_cell", +] + +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock", + "autocfg", + "cfg-if 1.0.0", + "concurrent-queue", + "futures-lite", + "log", + "parking", + "polling", + "rustix", + "slab", + "socket2", + "waker-fn", +] + +[[package]] +name = "async-lock" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" +dependencies = [ + "event-listener", +] + +[[package]] +name = "async-std" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +dependencies = [ + "async-channel", + "async-global-executor", + "async-io", + "async-lock", + "crossbeam-utils 0.8.15", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", +] + [[package]] name = "async-stream" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", + "pin-project-lite", ] [[package]] name = "async-stream-impl" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] +[[package]] +name = "async-task" +version = "4.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" + [[package]] name = "async-trait" -version = "0.1.58" +version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" +checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "async-tungstenite" -version = "0.12.0" +version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e00550829ef8e2c4115250d0ee43305649b0fa95f78a32ce5b07da0b73d95c5c" +checksum = "a1b71b31561643aa8e7df3effe284fa83ab1a840e52294c5f4bd7bfd8b2becbb" dependencies = [ "futures-io", "futures-util", "log", "pin-project-lite", + "rustls-native-certs 0.6.2", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.4", "tungstenite", - "webpki-roots", ] +[[package]] +name = "atomic-waker" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" + [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "axum" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" +dependencies = [ + "async-trait", + "axum-core", + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + [[package]] name = "backtrace" -version = "0.3.66" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" +checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" dependencies = [ "addr2line", "cc", "cfg-if 1.0.0", "libc", "miniz_oxide", - "object 0.29.0", + "object 0.30.3", "rustc-demangle", ] @@ -269,6 +447,12 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +[[package]] +name = "base64" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" + [[package]] name = "base64ct" version = "1.0.1" @@ -281,18 +465,24 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf9ff0bbfd639f15c74af777d81383cf53efb7c93613f6cab67c6c11e05bbf8b" +[[package]] +name = "bech32" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" + [[package]] name = "bellman" version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43473b34abc4b0b405efa0a250bac87eea888182b21687ee5c8115d279b0fda5" dependencies = [ - "bitvec", + "bitvec 0.22.3", "blake2s_simd 0.5.11", "byteorder", - "crossbeam-channel 0.5.6", - "ff", - "group", + "crossbeam-channel 0.5.8", + "ff 0.11.1", + "group 0.11.0", "lazy_static", "log", "num_cpus", @@ -312,6 +502,24 @@ dependencies = [ "crunchy 0.1.6", ] +[[package]] +name = "bimap" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" +dependencies = [ + "serde", +] + +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + [[package]] name = "bip0039" version = "0.9.0" @@ -343,21 +551,21 @@ checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" [[package]] name = "bitcoin" -version = "0.28.0" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42b2a9a8e3c7544f5ce2b475f2f56580a3102b37e0ee001558ad4faedcf56cf4" +checksum = "0694ea59225b0c5f3cb405ff3f670e4828358ed26aec49dc352f730f0cb1a8a3" dependencies = [ - "bech32", + "bech32 0.9.1", "bitcoin_hashes", - "secp256k1", + "secp256k1 0.24.3", "serde", ] [[package]] name = "bitcoin_hashes" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006cc91e1a1d99819bc5b8214be3555c1f0611b169f527a1fdc54ed1f2b745b0" +checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" dependencies = [ "serde", ] @@ -374,10 +582,31 @@ version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5237f00a8c86130a0cc317830e558b966dd7850d48a953d998c813f01a41b527" dependencies = [ - "funty", - "radium", + "funty 1.2.0", + "radium 0.6.2", + "tap", + "wyz 0.4.0", +] + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty 2.0.0", + "radium 0.7.0", "tap", - "wyz", + "wyz 0.5.1", +] + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest 0.10.6", ] [[package]] @@ -388,18 +617,18 @@ checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" dependencies = [ "arrayref", "arrayvec 0.5.2", - "constant_time_eq", + "constant_time_eq 0.1.5", ] [[package]] name = "blake2b_simd" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72936ee4afc7f8f736d1c38383b56480b5497b4617b4a77bdbf1d2ababc76127" +checksum = "3c2f0dc9a68c6317d884f97cc36cf5a3d20ba14ce404227df55e1af708ab04bc" dependencies = [ "arrayref", "arrayvec 0.7.2", - "constant_time_eq", + "constant_time_eq 0.2.5", ] [[package]] @@ -410,32 +639,32 @@ checksum = "9e461a7034e85b211a4acb57ee2e6730b32912b06c08cc242243c39fc21ae6a2" dependencies = [ "arrayref", "arrayvec 0.5.2", - "constant_time_eq", + "constant_time_eq 0.1.5", ] [[package]] name = "blake2s_simd" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db539cc2b5f6003621f1cd9ef92d7ded8ea5232c7de0f9faa2de251cd98730d4" +checksum = "6637f448b9e61dfadbdcbae9a885fadee1f3eaffb1f8d3c1965d3ade8bdfd44f" dependencies = [ "arrayref", "arrayvec 0.7.2", - "constant_time_eq", + "constant_time_eq 0.2.5", ] [[package]] name = "blake3" -version = "1.3.1" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08e53fc5a564bb15bfe6fae56bd71522205f1f91893f9c0116edad6496c183f" +checksum = "42ae2468a89544a466886840aa467a25b766499f4f04bf7d9fcd10ecee9fccef" dependencies = [ "arrayref", "arrayvec 0.7.2", "cc", "cfg-if 1.0.0", - "constant_time_eq", - "digest 0.10.5", + "constant_time_eq 0.2.5", + "digest 0.10.6", ] [[package]] @@ -444,15 +673,14 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "block-padding", "generic-array", ] [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] @@ -473,14 +701,29 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" +[[package]] +name = "blocking" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" +dependencies = [ + "async-channel", + "async-lock", + "async-task", + "atomic-waker", + "fastrand", + "futures-lite", + "log", +] + [[package]] name = "bls12_381" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a829c821999c06be34de314eaeb7dd1b42be38661178bc26ad47a4eacebdb0f9" dependencies = [ - "ff", - "group", + "ff 0.11.1", + "group 0.11.0", "pairing", "rand_core 0.6.4", "subtle", @@ -502,9 +745,9 @@ source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c dependencies = [ "borsh-derive-internal", "borsh-schema-derive-internal", - "proc-macro-crate", + "proc-macro-crate 0.1.5", "proc-macro2", - "syn", + "syn 1.0.109", ] [[package]] @@ -514,7 +757,7 @@ source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -524,34 +767,47 @@ source = "git+https://github.com/heliaxdev/borsh-rs.git?rev=cd5223e5103c4f139e0c dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] +[[package]] +name = "bs58" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" + [[package]] name = "bumpalo" -version = "3.11.1" +version = "3.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c6ed94e98ecff0c12dd1b04c15ec0d7d9458ca8fe806cea6f12954efe74c63b" + +[[package]] +name = "byte-slice-cast" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytecheck" -version = "0.6.9" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d11cac2c12b5adc6570dad2ee1b87eff4955dac476fe12d81e5fdd352e52406f" +checksum = "13fe11640a23eb24562225322cd3e452b93a3d4091d62fab69c70542fcd17d1f" dependencies = [ "bytecheck_derive", "ptr_meta", + "simdutf8", ] [[package]] name = "bytecheck_derive" -version = "0.6.9" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" +checksum = "e31225543cb46f81a7e224762764f4a6a0f097b1db0b175f69e8065efaa42de5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -568,15 +824,18 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.2.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +dependencies = [ + "serde", +] [[package]] name = "camino" -version = "1.1.1" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ad0e1e3e88dd237a156ab9f571021b8a158caa0ae44b1968a241efb5144c1e" +checksum = "c530edf18f37068ac2d977409ed5cd50d53d73bc653c7647b48eb78976ac9ae2" dependencies = [ "serde", ] @@ -598,16 +857,16 @@ checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" dependencies = [ "camino", "cargo-platform", - "semver 1.0.14", + "semver 1.0.17", "serde", "serde_json", ] [[package]] name = "cc" -version = "1.0.76" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76a284da2e6fe2092f2353e51713435363112dfd60030e22add80be333fb928f" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" [[package]] name = "cfg-if" @@ -648,9 +907,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", "num-integer", @@ -682,30 +941,29 @@ version = "0.5.0" source = "git+https://github.com/marmeladema/clru-rs.git?rev=71ca566#71ca566915f21f3c308091ca7756a91b0f8b5afc" [[package]] -name = "codespan-reporting" -version = "0.11.1" +name = "concat-idents" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +checksum = "0fe0e1d9f7de897d18e590a7496b5facbe87813f746cf4b8db596ba77e07e832" dependencies = [ - "termcolor", - "unicode-width", + "quote", + "syn 1.0.109", ] [[package]] -name = "concat-idents" -version = "1.1.4" +name = "concurrent-queue" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fe0e1d9f7de897d18e590a7496b5facbe87813f746cf4b8db596ba77e07e832" +checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" dependencies = [ - "quote", - "syn", + "crossbeam-utils 0.8.15", ] [[package]] name = "const-oid" -version = "0.7.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" +checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" [[package]] name = "constant_time_eq" @@ -713,6 +971,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "constant_time_eq" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13418e745008f7349ec7e449155f419a61b92b58a99cc3616942b926825ec76b" + [[package]] name = "contracts" version = "0.6.3" @@ -721,7 +985,7 @@ checksum = "f1d1429e3bd78171c65aa010eabcdf8f863ba3254728dbfb0ad4b1545beac15c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -736,15 +1000,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" dependencies = [ "libc", ] @@ -830,50 +1094,35 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.15", ] [[package]] name = "crossbeam-deque" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.11", - "crossbeam-utils 0.8.12", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset 0.5.6", - "scopeguard", + "crossbeam-epoch", + "crossbeam-utils 0.8.15", ] [[package]] name = "crossbeam-epoch" -version = "0.9.11" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ "autocfg", "cfg-if 1.0.0", - "crossbeam-utils 0.8.12", - "memoffset 0.6.5", + "crossbeam-utils 0.8.15", + "memoffset 0.8.0", "scopeguard", ] @@ -890,9 +1139,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.12" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if 1.0.0", ] @@ -911,9 +1160,9 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" -version = "0.3.2" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ "generic-array", "rand_core 0.6.4", @@ -966,13 +1215,29 @@ dependencies = [ "crypto_api", ] +[[package]] +name = "ct-codecs" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3b7eb4404b8195a9abb6356f4ac07d8ba267045c8d6d220ac4dc992e6cc75df" + [[package]] name = "ct-logs" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1a816186fa68d9e426e3cb4ae4dff1fcd8e4a2c34b781bf7a822574a0d0aac8" dependencies = [ - "sct", + "sct 0.6.1", +] + +[[package]] +name = "ctor" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" +dependencies = [ + "quote", + "syn 1.0.109", ] [[package]] @@ -1001,55 +1266,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "cxx" -version = "1.0.81" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97abf9f0eca9e52b7f81b945524e76710e6cb2366aead23b7d4fbf72e281f888" -dependencies = [ - "cc", - "cxxbridge-flags", - "cxxbridge-macro", - "link-cplusplus", -] - -[[package]] -name = "cxx-build" -version = "1.0.81" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cc32cc5fea1d894b77d269ddb9f192110069a8a9c1f1d441195fba90553dea3" -dependencies = [ - "cc", - "codespan-reporting", - "once_cell", - "proc-macro2", - "quote", - "scratch", - "syn", -] - -[[package]] -name = "cxxbridge-flags" -version = "1.0.81" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ca220e4794c934dc6b1207c3b42856ad4c302f2df1712e9f8d2eec5afaacf1f" - -[[package]] -name = "cxxbridge-macro" -version = "1.0.81" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b846f081361125bfc8dc9d3940c84e1fd83ba54bbca7b17cd29483c828be0704" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "darling" -version = "0.14.2" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0dd3cd20dc6b5a876612a6e5accfe7f3dd883db6d07acfbf14c128f61550dfa" +checksum = "0558d22a7b463ed0241e993f76f09f30b126687447751a8638587b864e4b3944" dependencies = [ "darling_core", "darling_macro", @@ -1057,43 +1278,49 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.14.2" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f" +checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "darling_macro" -version = "0.14.2" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e" +checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "data-encoding" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" +checksum = "23d8666cb01533c39dde32bcbab8e227b4ed6679b2c925eba05feabea39508fb" [[package]] name = "der" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ "const-oid", ] +[[package]] +name = "derivation-path" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e5c37193a1db1d8ed868c03ec7b152175f26160a5b740e5e484143877e0adf0" + [[package]] name = "derivative" version = "2.2.0" @@ -1102,7 +1329,7 @@ checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1113,7 +1340,7 @@ checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1127,11 +1354,11 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.3", + "block-buffer 0.10.4", "crypto-common", "subtle", ] @@ -1177,6 +1404,23 @@ dependencies = [ "winapi", ] +[[package]] +name = "displaydoc" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.15", +] + +[[package]] +name = "dyn-clone" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68b0cf012f1230e43cd00ebb729c6bb58707ecfa8ad08b52ef3a4ccd2697fc30" + [[package]] name = "dynasm" version = "1.2.3" @@ -1189,7 +1433,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1205,9 +1449,9 @@ dependencies = [ [[package]] name = "ecdsa" -version = "0.13.4" +version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0d69ae62e0ce582d56380743515fefaf1a8c70cec685d9677636d7e30ae9dc9" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" dependencies = [ "der", "elliptic-curve", @@ -1217,10 +1461,11 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.5.2" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" +checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" dependencies = [ + "serde", "signature", ] @@ -1247,28 +1492,45 @@ checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ "curve25519-dalek", "ed25519", + "merlin", + "rand 0.7.3", + "serde", + "serde_bytes", "sha2 0.9.9", "zeroize", ] +[[package]] +name = "ed25519-dalek-bip32" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d2be62a4061b872c8c0873ee4fc6f101ce7b889d039f019c5fa2af471a59908" +dependencies = [ + "derivation-path", + "ed25519-dalek", + "hmac 0.12.1", + "sha2 0.10.6", +] + [[package]] name = "either" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "elliptic-curve" -version = "0.11.12" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b477563c2bfed38a3b7a60964c49e058b2510ad3f12ba3483fd8f62c2306d6" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" dependencies = [ "base16ct", "crypto-bigint", "der", - "ff", + "digest 0.10.6", + "ff 0.12.1", "generic-array", - "group", + "group 0.12.1", "rand_core 0.6.4", "sec1", "subtle", @@ -1292,28 +1554,28 @@ checksum = "c134c37760b27a871ba422106eedbb8247da973a09e82558bf26d619c882b159" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "enumset" -version = "1.0.12" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19be8061a06ab6f3a6cf21106c873578bf01bd42ad15e0311a9c76161cb1c753" +checksum = "e875f1719c16de097dee81ed675e2d9bb63096823ed3f0ca827b7dea3028bbbb" dependencies = [ "enumset_derive", ] [[package]] name = "enumset_derive" -version = "0.6.1" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03e7b551eba279bf0fa88b83a46330168c1560a52a94f5126f892f0b364ab3e0" +checksum = "e08b6c6ab82d70f08844964ba10c7babb716de2ecaeab9be5717918a5177d3af" dependencies = [ "darling", "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -1321,10 +1583,40 @@ name = "equihash" version = "0.1.0" source = "git+https://github.com/zcash/librustzcash/?rev=2425a08#2425a0869098e3b0588ccd73c42716bcf418612c" dependencies = [ - "blake2b_simd 1.0.0", + "blake2b_simd 1.0.1", "byteorder", ] +[[package]] +name = "erased-serde" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f2b0c2380453a92ea8b6c8e5f64ecaafccddde8ceab55ff7a8ac1029f894569" +dependencies = [ + "serde", +] + +[[package]] +name = "errno" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +dependencies = [ + "errno-dragonfly", + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "error-chain" version = "0.12.4" @@ -1334,6 +1626,12 @@ dependencies = [ "version_check", ] +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + [[package]] name = "eyre" version = "0.6.8" @@ -1352,17 +1650,54 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fastrand" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] +[[package]] +name = "ferveo" +version = "0.1.1" +source = "git+https://github.com/anoma/ferveo?rev=e5abd0acc938da90140351a65a26472eb495ce4d#e5abd0acc938da90140351a65a26472eb495ce4d" +dependencies = [ + "anyhow", + "ark-bls12-381", + "ark-ec", + "ark-ed-on-bls12-381", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "bincode", + "blake2", + "blake2b_simd 1.0.1", + "borsh", + "digest 0.10.6", + "ed25519-dalek", + "either", + "ferveo-common", + "group-threshold-cryptography", + "hex", + "itertools", + "measure_time", + "miracl_core", + "num", + "rand 0.7.3", + "rand 0.8.5", + "serde", + "serde_bytes", + "serde_json", + "subproductdomain", + "subtle", + "zeroize", +] + [[package]] name = "ferveo-common" version = "0.1.0" -source = "git+https://github.com/anoma/ferveo#1022ab2c7ccc689abcc05e5a08df6fb0c2a3fc65" +source = "git+https://github.com/anoma/ferveo?rev=e5abd0acc938da90140351a65a26472eb495ce4d#e5abd0acc938da90140351a65a26472eb495ce4d" dependencies = [ "anyhow", "ark-ec", @@ -1378,11 +1713,33 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "131655483be284720a17d74ff97592b8e76576dc25563148601df2d7c9080924" dependencies = [ - "bitvec", + "bitvec 0.22.3", "rand_core 0.6.4", "subtle", ] +[[package]] +name = "ff" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + [[package]] name = "fixedbitset" version = "0.4.2" @@ -1431,14 +1788,19 @@ dependencies = [ [[package]] name = "funty" version = "1.2.0" +source = "git+https://github.com/bitvecto-rs/funty/?rev=7ef0d890fbcd8b3def1635ac1a877fc298488446#7ef0d890fbcd8b3def1635ac1a877fc298488446" + +[[package]] +name = "funty" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1847abb9cb65d566acd5942e94aea9c8f547ad02c98e1649326fc0e8910b8b1e" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -1451,9 +1813,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -1461,15 +1823,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -1478,38 +1840,53 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" + +[[package]] +name = "futures-lite" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] [[package]] name = "futures-macro" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "futures-sink" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-util" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", @@ -1525,9 +1902,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -1540,21 +1917,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ "cfg-if 1.0.0", - "js-sys", "libc", "wasi 0.9.0+wasi-snapshot-preview1", - "wasm-bindgen", ] [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if 1.0.0", + "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] @@ -1570,15 +1947,27 @@ dependencies = [ [[package]] name = "gimli" -version = "0.26.2" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" [[package]] name = "glob" -version = "0.3.0" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" + +[[package]] +name = "gloo-timers" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] [[package]] name = "group" @@ -1587,11 +1976,46 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5ac374b108929de78460075f3dc439fa66df9d8fc77e8f12caa5165fcf0c89" dependencies = [ "byteorder", - "ff", + "ff 0.11.1", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "group" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +dependencies = [ + "ff 0.12.1", "rand_core 0.6.4", "subtle", ] +[[package]] +name = "group-threshold-cryptography" +version = "0.1.0" +source = "git+https://github.com/anoma/ferveo?rev=e5abd0acc938da90140351a65a26472eb495ce4d#e5abd0acc938da90140351a65a26472eb495ce4d" +dependencies = [ + "anyhow", + "ark-bls12-381", + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "blake2b_simd 1.0.1", + "chacha20", + "hex", + "itertools", + "miracl_core", + "rand 0.8.5", + "rand_core 0.6.4", + "rayon", + "subproductdomain", + "thiserror", +] + [[package]] name = "gumdrop" version = "0.8.1" @@ -1609,14 +2033,14 @@ checksum = "729f9bd3449d77e7831a18abfb7ba2f99ee813dfd15b8c2167c9a54ba20aa99d" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "h2" -version = "0.3.15" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" dependencies = [ "bytes", "fnv", @@ -1627,7 +2051,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.4", + "tokio-util", "tracing", ] @@ -1644,8 +2068,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f186b85ed81082fb1cf59d52b0111f02915e89a4ac61d292b38d075e570f3a9" dependencies = [ "blake2b_simd 0.5.11", - "ff", - "group", + "ff 0.11.1", + "group 0.11.0", "pasta_curves", "rand 0.8.5", "rayon", @@ -1671,9 +2095,9 @@ dependencies = [ [[package]] name = "hdpath" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dafb09e5d85df264339ad786a147d9de1da13687a3697c52244297e5e7c32d9c" +checksum = "09ae1615f843ce3981b47468f3f7c435ac17deb33c2261e64d7f1e87f5c11acc" dependencies = [ "byteorder", ] @@ -1684,7 +2108,7 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ - "base64", + "base64 0.13.1", "bitflags", "bytes", "headers-core", @@ -1705,22 +2129,25 @@ dependencies = [ [[package]] name = "heck" -version = "0.3.3" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" -dependencies = [ - "unicode-segmentation", -] +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.1.19" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" + [[package]] name = "hex" version = "0.4.3" @@ -1747,6 +2174,15 @@ dependencies = [ "digest 0.9.0", ] +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.6", +] + [[package]] name = "hmac-drbg" version = "0.3.0" @@ -1760,9 +2196,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", @@ -1810,9 +2246,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.23" +version = "0.14.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" +checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" dependencies = [ "bytes", "futures-channel", @@ -1844,11 +2280,11 @@ dependencies = [ "http", "hyper", "hyper-rustls", - "rustls-native-certs", + "rustls-native-certs 0.5.0", "tokio", - "tokio-rustls", + "tokio-rustls 0.22.0", "tower-service", - "webpki", + "webpki 0.21.4", ] [[package]] @@ -1861,11 +2297,11 @@ dependencies = [ "futures-util", "hyper", "log", - "rustls", - "rustls-native-certs", + "rustls 0.19.1", + "rustls-native-certs 0.5.0", "tokio", - "tokio-rustls", - "webpki", + "tokio-rustls 0.22.0", + "webpki 0.21.4", "webpki-roots", ] @@ -1883,113 +2319,138 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.53" +version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "winapi", + "windows", ] [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] name = "ibc" -version = "0.14.0" -source = "git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2#f4703dfe2c1f25cc431279ab74f10f3e0f6827e2" +version = "0.36.0" +source = "git+https://github.com/heliaxdev/cosmos-ibc-rs.git?rev=2d7edc16412b60cabf78163fe24a6264e11f77a9#2d7edc16412b60cabf78163fe24a6264e11f77a9" dependencies = [ "bytes", + "cfg-if 1.0.0", "derive_more", - "flex-error", - "ibc-proto", + "displaydoc", + "dyn-clone", + "erased-serde", + "ibc-proto 0.26.0", "ics23", "num-traits", + "parking_lot", + "primitive-types", "prost", - "prost-types", "safe-regex", "serde", "serde_derive", "serde_json", "sha2 0.10.6", "subtle-encoding", - "tendermint", - "tendermint-light-client-verifier", + "tendermint 0.23.6", + "tendermint-light-client-verifier 0.23.6", "tendermint-proto 0.23.6", - "tendermint-testgen", + "tendermint-testgen 0.23.6", "time", "tracing", + "uint", ] [[package]] name = "ibc-proto" -version = "0.17.1" -source = "git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2#f4703dfe2c1f25cc431279ab74f10f3e0f6827e2" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b46bcc4540116870cfb184f338b45174a7560ad46dd74e4cb4e81e005e2056" dependencies = [ - "base64", + "base64 0.13.1", "bytes", + "flex-error", "prost", - "prost-types", "serde", - "tendermint-proto 0.23.6", + "subtle-encoding", + "tendermint-proto 0.28.0", "tonic", ] +[[package]] +name = "ibc-proto" +version = "0.26.0" +source = "git+https://github.com/heliaxdev/ibc-proto-rs.git?rev=7e527b5b8c95d83351e93ceafc14ac853224283f#7e527b5b8c95d83351e93ceafc14ac853224283f" +dependencies = [ + "base64 0.13.1", + "bytes", + "flex-error", + "prost", + "serde", + "subtle-encoding", + "tendermint-proto 0.23.6", +] + [[package]] name = "ibc-relayer" -version = "0.14.0" -source = "git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2#f4703dfe2c1f25cc431279ab74f10f3e0f6827e2" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74599e4f602e8487c47955ca9f20aebc0199da3289cc6d5e2b39c6e4b9e65086" dependencies = [ "anyhow", "async-stream", - "bech32", + "bech32 0.9.1", "bitcoin", + "bs58", "bytes", - "crossbeam-channel 0.5.6", + "crossbeam-channel 0.5.8", + "digest 0.10.6", "dirs-next", + "ed25519", + "ed25519-dalek", + "ed25519-dalek-bip32", "flex-error", "futures", + "generic-array", "hdpath", "hex", "http", "humantime", "humantime-serde", - "ibc", - "ibc-proto", + "ibc-proto 0.24.1", + "ibc-relayer-types", "itertools", - "k256", "moka", - "nanoid", "num-bigint", "num-rational", "prost", - "prost-types", "regex", "retry", - "ripemd160", - "semver 1.0.14", + "ripemd", + "secp256k1 0.24.3", + "semver 1.0.17", "serde", "serde_derive", "serde_json", "sha2 0.10.6", "signature", + "strum", "subtle-encoding", - "tendermint", + "tendermint 0.28.0", "tendermint-light-client", - "tendermint-light-client-verifier", - "tendermint-proto 0.23.6", - "tendermint-rpc", + "tendermint-light-client-verifier 0.28.0", + "tendermint-rpc 0.28.0", "thiserror", "tiny-bip39", "tiny-keccak", @@ -1997,23 +2458,53 @@ dependencies = [ "toml", "tonic", "tracing", + "uuid 1.3.2", +] + +[[package]] +name = "ibc-relayer-types" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc9fadabf5846e11b8f9a4093a2cb7d2920b0ef49323b4737739e69ed9bfa2bc" +dependencies = [ + "bytes", + "derive_more", + "dyn-clone", + "erased-serde", + "flex-error", + "ibc-proto 0.24.1", + "ics23", + "itertools", + "num-rational", + "primitive-types", + "prost", + "safe-regex", + "serde", + "serde_derive", + "serde_json", + "subtle-encoding", + "tendermint 0.28.0", + "tendermint-light-client-verifier 0.28.0", + "tendermint-proto 0.28.0", + "tendermint-rpc 0.28.0", + "tendermint-testgen 0.28.0", + "time", "uint", ] [[package]] name = "ics23" -version = "0.7.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d454cc0a22bd556cc3d3c69f9d75a392a36244634840697a4b9eb81bc5c8ae0" +checksum = "ca44b684ce1859cff746ff46f5765ab72e12e3c06f76a8356db8f9a2ecf43f17" dependencies = [ "anyhow", "bytes", "hex", "prost", - "ripemd160", - "sha2 0.9.9", + "ripemd", + "sha2 0.10.6", "sha3", - "sp-std", ] [[package]] @@ -2032,6 +2523,35 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-serde" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" +dependencies = [ + "serde", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "incrementalmerkletree" version = "0.2.0" @@ -2058,9 +2578,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.1" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", @@ -2068,21 +2588,26 @@ dependencies = [ ] [[package]] -name = "input_buffer" -version = "0.4.0" +name = "instant" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f97967975f448f1a7ddb12b0bc41069d09ed6a1c161a92687e057325db35d413" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "bytes", + "cfg-if 1.0.0", + "js-sys", + "wasm-bindgen", + "web-sys", ] [[package]] -name = "instant" -version = "0.1.12" +name = "io-lifetimes" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" dependencies = [ - "cfg-if 1.0.0", + "hermit-abi 0.3.1", + "libc", + "windows-sys 0.48.0", ] [[package]] @@ -2096,15 +2621,15 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.4" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "js-sys" -version = "0.3.60" +version = "0.3.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" +checksum = "68c16e1bfd491478ab155fd8b4896b86f9ede344949b641e61501e07c2b8b4d5" dependencies = [ "wasm-bindgen", ] @@ -2115,32 +2640,43 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2e7baec19d4e83f9145d4891178101a604565edff9645770fc979804138b04c" dependencies = [ - "bitvec", + "bitvec 0.22.3", "bls12_381", - "ff", - "group", + "ff 0.11.1", + "group 0.11.0", "rand_core 0.6.4", "subtle", ] [[package]] name = "k256" -version = "0.10.4" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19c3a5e0a0b8450278feda242592512e09f61c72e018b8cd5c859482802daf2d" +checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" dependencies = [ "cfg-if 1.0.0", "ecdsa", "elliptic-curve", - "sec1", - "sha2 0.9.9", + "sha2 0.10.6", ] [[package]] name = "keccak" -version = "0.1.2" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "kv-log-macro" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9b7d56ba4a8344d6be9729995e6b06f928af29998cdf79fe390cbf6b1fee838" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] [[package]] name = "lazy_static" @@ -2156,9 +2692,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.137" +version = "0.2.144" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" +checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" [[package]] name = "libloading" @@ -2182,7 +2718,7 @@ version = "0.7.0" source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" dependencies = [ "arrayref", - "base64", + "base64 0.13.1", "digest 0.9.0", "hmac-drbg", "libsecp256k1-core", @@ -2221,13 +2757,10 @@ dependencies = [ ] [[package]] -name = "link-cplusplus" -version = "1.0.7" +name = "linux-raw-sys" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369" -dependencies = [ - "cc", -] +checksum = "ece97ea872ece730aed82664c424eb4c8291e1ff2480247ccf7409044bc6479f" [[package]] name = "lock_api" @@ -2246,6 +2779,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ "cfg-if 1.0.0", + "value-bag", ] [[package]] @@ -2266,7 +2800,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0fbfc88337168279f2e9ae06e157cfed4efd3316e14dc96ed074d4f2e6c5952" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2285,23 +2819,25 @@ source = "git+https://github.com/anoma/masp?rev=bee40fc465f6afbd10558d12fe96eb17 dependencies = [ "aes", "bip0039", - "bitvec", - "blake2b_simd 1.0.0", - "blake2s_simd 1.0.0", + "bitvec 0.22.3", + "blake2b_simd 1.0.1", + "blake2s_simd 1.0.1", "bls12_381", "borsh", "byteorder", "chacha20poly1305", "crypto_api_chachapoly", - "ff", + "ff 0.11.1", "fpe", - "group", + "group 0.11.0", "hex", "incrementalmerkletree", "jubjub", "lazy_static", "rand 0.8.5", "rand_core 0.6.4", + "ripemd160", + "secp256k1 0.20.3", "serde", "sha2 0.9.9", "subtle", @@ -2315,12 +2851,12 @@ version = "0.5.0" source = "git+https://github.com/anoma/masp?rev=bee40fc465f6afbd10558d12fe96eb1742eee45c#bee40fc465f6afbd10558d12fe96eb1742eee45c" dependencies = [ "bellman", - "blake2b_simd 1.0.0", + "blake2b_simd 1.0.1", "bls12_381", "byteorder", "directories", - "ff", - "group", + "ff 0.11.1", + "group 0.11.0", "itertools", "jubjub", "lazy_static", @@ -2339,12 +2875,28 @@ dependencies = [ "regex-automata", ] +[[package]] +name = "matchit" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" + [[package]] name = "maybe-uninit" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" +[[package]] +name = "measure_time" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56220900f1a0923789ecd6bf25fbae8af3b2f1ff3e9e297fc9b6b8674dd4d852" +dependencies = [ + "instant", + "log", +] + [[package]] name = "memchr" version = "2.5.0" @@ -2353,27 +2905,27 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memmap2" -version = "0.5.8" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b182332558b18d807c4ce1ca8ca983b34c3ee32765e47b3f0f69b90355cc1dc" +checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327" dependencies = [ "libc", ] [[package]] name = "memoffset" -version = "0.5.6" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ "autocfg", ] [[package]] name = "memoffset" -version = "0.6.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg", ] @@ -2393,53 +2945,72 @@ dependencies = [ "nonempty", ] +[[package]] +name = "merlin" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.5.1", + "zeroize", +] + [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "miniz_oxide" -version = "0.5.4" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] +[[package]] +name = "miracl_core" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94c7128ba23c81f6471141b90f17654f89ef44a56e14b8a4dd0fddfccd655277" + [[package]] name = "moka" -version = "0.8.6" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "975fa04238144061e7f8df9746b2e9cd93ef85881da5548d842a7c6a4b614415" +checksum = "19b9268097a2cf211ac9955b1cc95e80fa84fff5c2d13ba292916445dc8a311f" dependencies = [ - "crossbeam-channel 0.5.6", - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.8.12", + "crossbeam-channel 0.5.8", + "crossbeam-epoch", + "crossbeam-utils 0.8.15", "num_cpus", "once_cell", "parking_lot", "quanta", + "rustc_version 0.4.0", "scheduled-thread-pool", "skeptic", "smallvec", "tagptr", "thiserror", "triomphe", - "uuid 1.2.1", + "uuid 1.3.2", ] [[package]] @@ -2456,10 +3027,12 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "namada" -version = "0.14.0" +version = "0.16.0" dependencies = [ + "async-std", "async-trait", "bellman", + "bimap", "bls12_381", "borsh", "circular-queue", @@ -2467,27 +3040,34 @@ dependencies = [ "data-encoding", "derivative", "ibc", - "ibc-proto", + "ibc-proto 0.26.0", "itertools", "loupe", "masp_primitives", "masp_proofs", "namada_core", "namada_proof_of_stake", + "orion", "parity-wasm", "paste", "proptest", "prost", "pwasm-utils", + "rand 0.8.5", + "rand_core 0.6.4", "rayon", "rust_decimal", "rust_decimal_macros", + "serde", "serde_json", "sha2 0.9.9", "tempfile", - "tendermint", + "tendermint 0.23.6", "tendermint-proto 0.23.6", + "tendermint-rpc 0.23.6", "thiserror", + "tokio", + "toml", "tracing", "wasmer", "wasmer-cache", @@ -2501,20 +3081,23 @@ dependencies = [ [[package]] name = "namada_core" -version = "0.14.0" +version = "0.16.0" dependencies = [ "ark-bls12-381", + "ark-ec", "ark-serialize", - "bech32", + "bech32 0.8.1", "bellman", "borsh", "chrono", "data-encoding", "derivative", "ed25519-consensus", + "ferveo", "ferveo-common", + "group-threshold-cryptography", "ibc", - "ibc-proto", + "ibc-proto 0.26.0", "ics23", "index-set", "itertools", @@ -2533,7 +3116,7 @@ dependencies = [ "serde_json", "sha2 0.9.9", "sparse-merkle-tree", - "tendermint", + "tendermint 0.23.6", "tendermint-proto 0.23.6", "thiserror", "tonic-build", @@ -2543,16 +3126,16 @@ dependencies = [ [[package]] name = "namada_macros" -version = "0.14.0" +version = "0.16.0" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "namada_proof_of_stake" -version = "0.14.0" +version = "0.16.0" dependencies = [ "borsh", "data-encoding", @@ -2563,29 +3146,28 @@ dependencies = [ "proptest", "rust_decimal", "rust_decimal_macros", - "tendermint-proto 0.23.5", "thiserror", "tracing", ] [[package]] name = "namada_test_utils" -version = "0.14.0" +version = "0.16.0" dependencies = [ "borsh", "namada_core", + "strum", ] [[package]] name = "namada_tests" -version = "0.14.0" +version = "0.16.0" dependencies = [ "chrono", "concat-idents", "derivative", - "ibc", - "ibc-proto", "ibc-relayer", + "ibc-relayer-types", "namada", "namada_core", "namada_test_utils", @@ -2598,10 +3180,10 @@ dependencies = [ "serde_json", "sha2 0.9.9", "tempfile", - "tendermint", - "tendermint-config", + "tendermint 0.23.6", + "tendermint-config 0.23.6", "tendermint-proto 0.23.6", - "tendermint-rpc", + "tendermint-rpc 0.23.6", "test-log", "tokio", "tracing", @@ -2610,7 +3192,7 @@ dependencies = [ [[package]] name = "namada_tx_prelude" -version = "0.14.0" +version = "0.16.0" dependencies = [ "borsh", "masp_primitives", @@ -2625,7 +3207,7 @@ dependencies = [ [[package]] name = "namada_vm_env" -version = "0.14.0" +version = "0.16.0" dependencies = [ "borsh", "hex", @@ -2636,7 +3218,7 @@ dependencies = [ [[package]] name = "namada_vp_prelude" -version = "0.14.0" +version = "0.16.0" dependencies = [ "borsh", "namada_core", @@ -2649,10 +3231,10 @@ dependencies = [ [[package]] name = "namada_wasm_for_tests" -version = "0.14.0" +version = "0.16.0" dependencies = [ "borsh", - "getrandom 0.2.8", + "getrandom 0.2.9", "namada_test_utils", "namada_tests", "namada_tx_prelude", @@ -2660,21 +3242,26 @@ dependencies = [ "wee_alloc", ] -[[package]] -name = "nanoid" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" -dependencies = [ - "rand 0.8.5", -] - [[package]] name = "nonempty" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" +[[package]] +name = "num" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + [[package]] name = "num-bigint" version = "0.4.3" @@ -2687,6 +3274,15 @@ dependencies = [ "serde", ] +[[package]] +name = "num-complex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d" +dependencies = [ + "num-traits", +] + [[package]] name = "num-derive" version = "0.3.3" @@ -2695,7 +3291,7 @@ checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2708,6 +3304,17 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-iter" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-rational" version = "0.4.1" @@ -2728,15 +3335,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", + "libm", ] [[package]] name = "num_cpus" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5" +checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" dependencies = [ - "hermit-abi", + "hermit-abi 0.2.6", "libc", ] @@ -2754,18 +3362,18 @@ dependencies = [ [[package]] name = "object" -version = "0.29.0" +version = "0.30.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" +checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.16.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "opaque-debug" @@ -2788,11 +3396,11 @@ dependencies = [ "aes", "arrayvec 0.7.2", "bigint", - "bitvec", - "blake2b_simd 1.0.0", - "ff", + "bitvec 0.22.3", + "blake2b_simd 1.0.1", + "ff 0.11.1", "fpe", - "group", + "group 0.11.0", "halo2", "incrementalmerkletree", "lazy_static", @@ -2806,13 +3414,51 @@ dependencies = [ "zcash_note_encryption 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "orion" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6624905ddd92e460ff0685567539ed1ac985b2dee4c92c7edcd64fce905b00c" +dependencies = [ + "ct-codecs", + "getrandom 0.2.9", + "subtle", + "zeroize", +] + [[package]] name = "pairing" version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2e415e349a3006dd7d9482cdab1c980a845bed1377777d768cb693a44540b42" dependencies = [ - "group", + "group 0.11.0", +] + +[[package]] +name = "parity-scale-codec" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ddb756ca205bd108aee3c62c6d3c994e1df84a59b9d6d4a5ea42ee1fd5a9a28" +dependencies = [ + "arrayvec 0.7.2", + "bitvec 1.0.1", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" +dependencies = [ + "proc-macro-crate 1.3.1", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] @@ -2821,6 +3467,12 @@ version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1ad0aff30c1da14b1254fcb2af73e1fa9a28670e584a626f53a369d0e157304" +[[package]] +name = "parking" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" + [[package]] name = "parking_lot" version = "0.12.1" @@ -2833,15 +3485,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.4" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" +checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -2862,8 +3514,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d647d91972bad78120fd61e06b225fcda117805c9bbf17676b51bd03a251278b" dependencies = [ "blake2b_simd 0.5.11", - "ff", - "group", + "ff 0.11.1", + "group 0.11.0", "lazy_static", "rand 0.8.5", "static_assertions", @@ -2872,27 +3524,27 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.9" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" +checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" [[package]] name = "pbkdf2" -version = "0.4.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" +checksum = "f05894bce6a1ba4be299d0c5f29563e08af2bc18bb7d48313113bed71e904739" dependencies = [ - "crypto-mac 0.8.0", + "crypto-mac 0.11.1", + "password-hash", ] [[package]] name = "pbkdf2" -version = "0.9.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f05894bce6a1ba4be299d0c5f29563e08af2bc18bb7d48313113bed71e904739" +checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ - "crypto-mac 0.11.1", - "password-hash", + "digest 0.10.6", ] [[package]] @@ -2930,9 +3582,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.4.1" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a528564cc62c19a7acac4d81e01f39e53e25e17b934878f4c6d25cc2836e62f8" +checksum = "e68e84bfb01f0507134eac1e9b410a12ba379d064eab48c50ba4ce329a527b70" dependencies = [ "thiserror", "ucd-trie", @@ -2940,9 +3592,9 @@ dependencies = [ [[package]] name = "petgraph" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" +checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" dependencies = [ "fixedbitset", "indexmap", @@ -2965,7 +3617,7 @@ checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2981,14 +3633,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] -name = "pkcs8" -version = "0.8.0" +name = "polling" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ - "der", - "spki", - "zeroize", + "autocfg", + "bitflags", + "cfg-if 1.0.0", + "concurrent-queue", + "libc", + "log", + "pin-project-lite", + "windows-sys 0.48.0", ] [[package]] @@ -3008,6 +3665,28 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "prettyplease" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" +dependencies = [ + "proc-macro2", + "syn 1.0.109", +] + +[[package]] +name = "primitive-types" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f3486ccba82358b11a77516035647c34ba167dfa53312630de83b12bd4f3d66" +dependencies = [ + "fixed-hash", + "impl-codec", + "impl-serde", + "uint", +] + [[package]] name = "proc-macro-crate" version = "0.1.5" @@ -3017,6 +3696,16 @@ dependencies = [ "toml", ] +[[package]] +name = "proc-macro-crate" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +dependencies = [ + "once_cell", + "toml_edit", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -3026,7 +3715,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "version_check", ] @@ -3043,37 +3732,37 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.47" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" +checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" dependencies = [ "unicode-ident", ] [[package]] name = "proptest" -version = "1.0.0" -source = "git+https://github.com/heliaxdev/proptest?branch=tomas/sm#b9517a726c032897a8b41c215147f44588b33dcc" +version = "1.1.0" +source = "git+https://github.com/heliaxdev/proptest?rev=8f1b4abe7ebd35c0781bf9a00a4ee59833ffa2a1#8f1b4abe7ebd35c0781bf9a00a4ee59833ffa2a1" dependencies = [ "bit-set", "bitflags", "byteorder", "lazy_static", "num-traits", - "quick-error 2.0.1", "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", - "regex-syntax", + "regex-syntax 0.6.29", "rusty-fork", "tempfile", + "unarray", ] [[package]] name = "prost" -version = "0.9.0" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ "bytes", "prost-derive", @@ -3081,9 +3770,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.9.0" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" +checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck", @@ -3092,33 +3781,34 @@ dependencies = [ "log", "multimap", "petgraph", + "prettyplease", "prost", "prost-types", "regex", + "syn 1.0.109", "tempfile", "which", ] [[package]] name = "prost-derive" -version = "0.9.0" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "prost-types" -version = "0.9.0" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" +checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" dependencies = [ - "bytes", "prost", ] @@ -3139,7 +3829,7 @@ checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -3169,7 +3859,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7e31331286705f455e56cca62e0e717158474ff02b7936c1fa596d983f4ae27" dependencies = [ - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.15", "libc", "mach", "once_cell", @@ -3185,17 +3875,11 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" -[[package]] -name = "quick-error" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" - [[package]] name = "quote" -version = "1.0.21" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" +checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" dependencies = [ "proc-macro2", ] @@ -3206,6 +3890,12 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + [[package]] name = "rand" version = "0.7.3" @@ -3265,7 +3955,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", ] [[package]] @@ -3288,9 +3978,9 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "10.6.0" +version = "10.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6823ea29436221176fe662da99998ad3b4db2c7f31e7b6f5fe43adccd6320bb" +checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" dependencies = [ "bitflags", ] @@ -3309,13 +3999,13 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.9.3" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ - "crossbeam-channel 0.5.6", + "crossbeam-channel 0.5.8", "crossbeam-deque", - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.15", "num_cpus", ] @@ -3328,7 +4018,7 @@ dependencies = [ "blake2b_simd 0.5.11", "byteorder", "digest 0.9.0", - "group", + "group 0.11.0", "jubjub", "pasta_curves", "rand_core 0.6.4", @@ -3346,14 +4036,23 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.8", - "redox_syscall", + "getrandom 0.2.9", + "redox_syscall 0.2.16", "thiserror", ] @@ -3370,13 +4069,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" +checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.7.1", ] [[package]] @@ -3385,14 +4084,20 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "regex-syntax", + "regex-syntax 0.6.29", ] [[package]] name = "regex-syntax" -version = "0.6.28" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" +checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" [[package]] name = "region" @@ -3406,38 +4111,29 @@ dependencies = [ "winapi", ] -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] - [[package]] name = "rend" -version = "0.3.6" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79af64b4b6362ffba04eef3a4e10829718a4896dac19daa741851c86781edf95" +checksum = "581008d2099240d37fb08d77ad713bcaec2c4d89d50b5b21a8bb1996bbab68ab" dependencies = [ "bytecheck", ] [[package]] name = "retry" -version = "1.3.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac95c60a949a63fd2822f4964939662d8f2c16c4fa0624fd954bc6e703b9a3f6" +checksum = "9166d72162de3575f950507683fac47e30f6f2c3836b71b7fbc61aa517c9c5f4" [[package]] name = "rfc6979" -version = "0.1.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96ef608575f6392792f9ecf7890c00086591d29a83910939d430753f7c050525" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" dependencies = [ "crypto-bigint", - "hmac 0.11.0", + "hmac 0.12.1", "zeroize", ] @@ -3456,6 +4152,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.6", +] + [[package]] name = "ripemd160" version = "0.9.1" @@ -3469,9 +4174,9 @@ dependencies = [ [[package]] name = "rkyv" -version = "0.7.39" +version = "0.7.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cec2b3485b07d96ddfd3134767b8a447b45ea4eb91448d0a35180ec0ffd5ed15" +checksum = "21499ed91807f07ae081880aabb2ccc0235e9d88011867d984525e9a4c3cfa3e" dependencies = [ "bytecheck", "hashbrown 0.12.3", @@ -3483,13 +4188,13 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.39" +version = "0.7.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eaedadc88b53e36dd32d940ed21ae4d850d5916f2581526921f553a72ac34c4" +checksum = "ac1c672430eb41556291981f45ca900a0239ad007242d1cb4b4167af842db666" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -3516,9 +4221,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" @@ -3526,6 +4231,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + [[package]] name = "rustc_version" version = "0.3.3" @@ -3535,17 +4246,52 @@ dependencies = [ "semver 0.11.0", ] +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver 1.0.17", +] + +[[package]] +name = "rustix" +version = "0.37.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acf8729d8542766f1b2cf77eb034d52f40d375bb8b615d0b147089946e16613d" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys 0.48.0", +] + [[package]] name = "rustls" version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ - "base64", + "base64 0.13.1", "log", "ring", - "sct", - "webpki", + "sct 0.6.1", + "webpki 0.21.4", +] + +[[package]] +name = "rustls" +version = "0.20.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" +dependencies = [ + "log", + "ring", + "sct 0.7.0", + "webpki 0.22.0", ] [[package]] @@ -3555,16 +4301,37 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" dependencies = [ "openssl-probe", - "rustls", + "rustls 0.19.1", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-native-certs" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" +dependencies = [ + "openssl-probe", + "rustls-pemfile", "schannel", "security-framework", ] +[[package]] +name = "rustls-pemfile" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +dependencies = [ + "base64 0.21.0", +] + [[package]] name = "rustversion" -version = "1.0.9" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "rusty-fork" @@ -3573,16 +4340,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" dependencies = [ "fnv", - "quick-error 1.2.3", + "quick-error", "tempfile", "wait-timeout", ] [[package]] name = "ryu" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "safe-proc-macro2" @@ -3642,19 +4409,18 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" +checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" dependencies = [ - "lazy_static", - "windows-sys 0.36.1", + "windows-sys 0.42.0", ] [[package]] name = "scheduled-thread-pool" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "977a7519bff143a44f842fd07e80ad1329295bd71686457f18e496736f4bf9bf" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" dependencies = [ "parking_lot", ] @@ -3666,16 +4432,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] -name = "scratch" -version = "1.0.2" +name = "sct" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898" +checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" +dependencies = [ + "ring", + "untrusted", +] [[package]] name = "sct" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ "ring", "untrusted", @@ -3689,41 +4459,61 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] name = "sec1" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ + "base16ct", "der", "generic-array", - "pkcs8", "subtle", "zeroize", ] [[package]] name = "secp256k1" -version = "0.22.1" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d03ceae636d0fed5bae6a7f4f664354c5f4fcedf6eef053fef17e49f837d0a" +dependencies = [ + "secp256k1-sys 0.4.2", +] + +[[package]] +name = "secp256k1" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26947345339603ae8395f68e2f3d85a6b0a8ddfe6315818e80b8504415099db0" +checksum = "6b1629c9c557ef9b293568b338dddfc8208c98a18c59d722a9d53f859d9c9b62" dependencies = [ - "secp256k1-sys", + "bitcoin_hashes", + "rand 0.8.5", + "secp256k1-sys 0.6.1", "serde", ] [[package]] name = "secp256k1-sys" -version = "0.5.2" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" +dependencies = [ + "cc", +] + +[[package]] +name = "secp256k1-sys" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "152e20a0fd0519390fc43ab404663af8a0b794273d2a91d60ad4a39f13ffe110" +checksum = "83080e2c2fc1006e625be82e5d1eb6a43b7fd9578b617fcc55814daf286bba4b" dependencies = [ "cc", ] [[package]] name = "security-framework" -version = "2.7.0" +version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" +checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" dependencies = [ "bitflags", "core-foundation", @@ -3734,9 +4524,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.6.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" dependencies = [ "core-foundation-sys", "libc", @@ -3753,9 +4543,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.14" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" dependencies = [ "serde", ] @@ -3771,18 +4561,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.147" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" +checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" dependencies = [ "serde_derive", ] [[package]] name = "serde_bytes" -version = "0.11.7" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfc50e8183eeeb6178dcb167ae34a8051d63535023ae38b5d8d12beae193d37b" +checksum = "416bda436f9aab92e02c8e10d49a15ddd339cea90b6e340fe51ed97abb548294" dependencies = [ "serde", ] @@ -3799,20 +4589,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.147" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" +checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "serde_json" -version = "1.0.87" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" +checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" dependencies = [ "itoa", "ryu", @@ -3821,26 +4611,24 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.9" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" +checksum = "bcec881020c684085e55a25f7fd888954d56609ef363479dc5a1305eb0d40cab" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "sha-1" -version = "0.9.8" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ - "block-buffer 0.9.0", "cfg-if 1.0.0", "cpufeatures", - "digest 0.9.0", - "opaque-debug", + "digest 0.10.6", ] [[package]] @@ -3851,7 +4639,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -3875,19 +4663,17 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] name = "sha3" -version = "0.9.1" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", + "digest 0.10.6", "keccak", - "opaque-debug", ] [[package]] @@ -3901,23 +4687,29 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] [[package]] name = "signature" -version = "1.4.0" +version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02658e48d89f2bec991f9a78e69cfa4c316f8d6a6c4ec12fae1aeb263d486788" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ - "digest 0.9.0", + "digest 0.10.6", "rand_core 0.6.4", ] +[[package]] +name = "simdutf8" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" + [[package]] name = "simple-error" version = "0.2.3" @@ -3941,9 +4733,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg", ] @@ -3956,24 +4748,18 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", ] -[[package]] -name = "sp-std" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35391ea974fa5ee869cb094d5b437688fbf3d8127d64d1b9fed5822a1ed39b12" - [[package]] name = "sparse-merkle-tree" version = "0.3.1-pre" -source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=04ad1eeb28901b57a7599bbe433b3822965dabe8#04ad1eeb28901b57a7599bbe433b3822965dabe8" +source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=e086b235ed6e68929bf73f617dd61cd17b000a56#e086b235ed6e68929bf73f617dd61cd17b000a56" dependencies = [ "borsh", "cfg-if 1.0.0", @@ -3987,16 +4773,6 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -[[package]] -name = "spki" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" -dependencies = [ - "base64ct", - "der", -] - [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -4009,6 +4785,41 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "strum" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.24.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn 1.0.109", +] + +[[package]] +name = "subproductdomain" +version = "0.1.0" +source = "git+https://github.com/anoma/ferveo?rev=e5abd0acc938da90140351a65a26472eb495ce4d#e5abd0acc938da90140351a65a26472eb495ce4d" +dependencies = [ + "anyhow", + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", +] + [[package]] name = "subtle" version = "2.4.1" @@ -4032,9 +4843,9 @@ checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" [[package]] name = "syn" -version = "1.0.103" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", @@ -4042,17 +4853,22 @@ dependencies = [ ] [[package]] -name = "synstructure" -version = "0.12.6" +name = "syn" +version = "2.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" +checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" dependencies = [ "proc-macro2", "quote", - "syn", - "unicode-xid", + "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + [[package]] name = "tagptr" version = "0.2.0" @@ -4067,30 +4883,57 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-lexicon" -version = "0.12.5" +version = "0.12.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9410d0f6853b1d94f0e519fb95df60f29d2c1eff2d921ffdf01a4c8a3b54f12d" +checksum = "fd1ba337640d60c3e96bc6f0638a939b9c9a7f2c316a1598c279828b3d1dc8c5" [[package]] name = "tempfile" -version = "3.3.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" dependencies = [ "cfg-if 1.0.0", "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi", + "redox_syscall 0.3.5", + "rustix", + "windows-sys 0.45.0", ] [[package]] name = "tendermint" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=4db3c5ea09fae4057008d22bf9e96bf541b55b35#4db3c5ea09fae4057008d22bf9e96bf541b55b35" dependencies = [ "async-trait", + "bytes", + "ed25519", + "ed25519-dalek", + "flex-error", + "futures", + "num-traits", + "once_cell", + "prost", + "prost-types", + "serde", + "serde_bytes", + "serde_json", + "serde_repr", + "sha2 0.9.9", + "signature", + "subtle", + "subtle-encoding", + "tendermint-proto 0.23.6", + "time", + "zeroize", +] + +[[package]] +name = "tendermint" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c518c082146825f10d6f9a32159ae46edcfd7dae8ac630c8067594bb2a784d72" +dependencies = [ "bytes", "ed25519", "ed25519-dalek", @@ -4110,7 +4953,7 @@ dependencies = [ "signature", "subtle", "subtle-encoding", - "tendermint-proto 0.23.6", + "tendermint-proto 0.28.0", "time", "zeroize", ] @@ -4118,20 +4961,35 @@ dependencies = [ [[package]] name = "tendermint-config" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=4db3c5ea09fae4057008d22bf9e96bf541b55b35#4db3c5ea09fae4057008d22bf9e96bf541b55b35" +dependencies = [ + "flex-error", + "serde", + "serde_json", + "tendermint 0.23.6", + "toml", + "url", +] + +[[package]] +name = "tendermint-config" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f58b86374e3bcfc8135770a6c55388fce101c00de4a5f03224fa5830c9240b7" dependencies = [ "flex-error", "serde", "serde_json", - "tendermint", + "tendermint 0.28.0", "toml", "url", ] [[package]] name = "tendermint-light-client" -version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ab1450566e4347f3a81e27d3e701d74313f9fc2efb072fc3f49e0a762cb2a0f" dependencies = [ "contracts", "crossbeam-channel 0.4.4", @@ -4142,9 +5000,9 @@ dependencies = [ "serde_cbor", "serde_derive", "static_assertions", - "tendermint", - "tendermint-light-client-verifier", - "tendermint-rpc", + "tendermint 0.28.0", + "tendermint-light-client-verifier 0.28.0", + "tendermint-rpc 0.28.0", "time", "tokio", ] @@ -4152,19 +5010,32 @@ dependencies = [ [[package]] name = "tendermint-light-client-verifier" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=4db3c5ea09fae4057008d22bf9e96bf541b55b35#4db3c5ea09fae4057008d22bf9e96bf541b55b35" +dependencies = [ + "derive_more", + "flex-error", + "serde", + "tendermint 0.23.6", + "time", +] + +[[package]] +name = "tendermint-light-client-verifier" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c742bb914f9fb025ce0e481fbef9bb59c94d5a4bbd768798102675a2e0fb7440" dependencies = [ "derive_more", "flex-error", "serde", - "tendermint", + "tendermint 0.28.0", "time", ] [[package]] name = "tendermint-proto" -version = "0.23.5" -source = "git+https://github.com/heliaxdev/tendermint-rs?rev=95c52476bc37927218374f94ac8e2a19bd35bec9#95c52476bc37927218374f94ac8e2a19bd35bec9" +version = "0.23.6" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=4db3c5ea09fae4057008d22bf9e96bf541b55b35#4db3c5ea09fae4057008d22bf9e96bf541b55b35" dependencies = [ "bytes", "flex-error", @@ -4180,8 +5051,9 @@ dependencies = [ [[package]] name = "tendermint-proto" -version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "890f1fb6dee48900c85f0cdf711ebf130e505ac09ad918cee5c34ed477973b05" dependencies = [ "bytes", "flex-error", @@ -4198,14 +5070,13 @@ dependencies = [ [[package]] name = "tendermint-rpc" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=4db3c5ea09fae4057008d22bf9e96bf541b55b35#4db3c5ea09fae4057008d22bf9e96bf541b55b35" dependencies = [ "async-trait", - "async-tungstenite", "bytes", "flex-error", "futures", - "getrandom 0.2.8", + "getrandom 0.2.9", "http", "hyper", "hyper-proxy", @@ -4216,8 +5087,8 @@ dependencies = [ "serde_bytes", "serde_json", "subtle-encoding", - "tendermint", - "tendermint-config", + "tendermint 0.23.6", + "tendermint-config 0.23.6", "tendermint-proto 0.23.6", "thiserror", "time", @@ -4228,10 +5099,44 @@ dependencies = [ "walkdir", ] +[[package]] +name = "tendermint-rpc" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06df4715f9452ec0a21885d6da8d804799455ba50d8bc40be1ec1c800afd4bd8" +dependencies = [ + "async-trait", + "async-tungstenite", + "bytes", + "flex-error", + "futures", + "getrandom 0.2.9", + "http", + "hyper", + "hyper-proxy", + "hyper-rustls", + "peg", + "pin-project", + "serde", + "serde_bytes", + "serde_json", + "subtle", + "subtle-encoding", + "tendermint 0.28.0", + "tendermint-config 0.28.0", + "thiserror", + "time", + "tokio", + "tracing", + "url", + "uuid 0.8.2", + "walkdir", +] + [[package]] name = "tendermint-testgen" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=4db3c5ea09fae4057008d22bf9e96bf541b55b35#4db3c5ea09fae4057008d22bf9e96bf541b55b35" dependencies = [ "ed25519-dalek", "gumdrop", @@ -4239,17 +5144,24 @@ dependencies = [ "serde_json", "simple-error", "tempfile", - "tendermint", + "tendermint 0.23.6", "time", ] [[package]] -name = "termcolor" -version = "1.1.3" +name = "tendermint-testgen" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +checksum = "05912d3072284786c0dec18e82779003724e0da566676fbd90e4fba6845fd81a" dependencies = [ - "winapi-util", + "ed25519-dalek", + "gumdrop", + "serde", + "serde_json", + "simple-error", + "tempfile", + "tendermint 0.28.0", + "time", ] [[package]] @@ -4260,35 +5172,36 @@ checksum = "38f0c854faeb68a048f0f2dc410c5ddae3bf83854ef0e4977d58306a5edef50e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "thiserror" -version = "1.0.37" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.37" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ + "cfg-if 1.0.0", "once_cell", ] @@ -4320,17 +5233,17 @@ dependencies = [ [[package]] name = "tiny-bip39" -version = "0.8.2" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc59cb9dfc85bb312c3a78fd6aa8a8582e310b0fa885d5bb877f6dcc601839d" +checksum = "62cc94d358b5a1e84a5cb9109f559aa3c4d634d2b1b4de3d0fa4adc7c78e2861" dependencies = [ "anyhow", - "hmac 0.8.1", + "hmac 0.12.1", "once_cell", - "pbkdf2 0.4.0", - "rand 0.7.3", + "pbkdf2 0.11.0", + "rand 0.8.5", "rustc-hash", - "sha2 0.9.9", + "sha2 0.10.6", "thiserror", "unicode-normalization", "wasm-bindgen", @@ -4357,20 +5270,19 @@ dependencies = [ [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.21.2" +version = "1.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" +checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105" dependencies = [ "autocfg", "bytes", "libc", - "memchr", "mio", "num_cpus", "parking_lot", @@ -4378,7 +5290,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -4393,13 +5305,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.8.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -4408,41 +5320,38 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ - "rustls", + "rustls 0.19.1", "tokio", - "webpki", + "webpki 0.21.4", ] [[package]] -name = "tokio-stream" -version = "0.1.11" +name = "tokio-rustls" +version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ - "futures-core", - "pin-project-lite", + "rustls 0.20.8", "tokio", + "webpki 0.22.0", ] [[package]] -name = "tokio-util" -version = "0.6.10" +name = "tokio-stream" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ - "bytes", "futures-core", - "futures-sink", - "log", "pin-project-lite", "tokio", ] [[package]] name = "tokio-util" -version = "0.7.4" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes", "futures-core", @@ -4454,22 +5363,40 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.9" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ "serde", ] +[[package]] +name = "toml_datetime" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" + +[[package]] +name = "toml_edit" +version = "0.19.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "239410c8609e8125456927e6707163a3b1fdb40561e4b803bc041f466ccfdc13" +dependencies = [ + "indexmap", + "toml_datetime", + "winnow", +] + [[package]] name = "tonic" -version = "0.6.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff08f4649d10a70ffa3522ca559031285d8e421d727ac85c60825761818f5d0a" +checksum = "8f219fad3b929bef19b1f86fbc0358d35daed8f2cac972037ac0dc10bbb8d5fb" dependencies = [ "async-stream", "async-trait", - "base64", + "axum", + "base64 0.13.1", "bytes", "futures-core", "futures-util", @@ -4482,11 +5409,12 @@ dependencies = [ "pin-project", "prost", "prost-derive", - "rustls-native-certs", + "rustls-native-certs 0.6.2", + "rustls-pemfile", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.4", "tokio-stream", - "tokio-util 0.6.10", + "tokio-util", "tower", "tower-layer", "tower-service", @@ -4496,14 +5424,15 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.6.2" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" +checksum = "5bf5e9b9c0f7e0a7c027dcfaba7b2c60816c7049171f679d99ee2ff65d0de8c4" dependencies = [ + "prettyplease", "proc-macro2", "prost-build", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -4520,7 +5449,7 @@ dependencies = [ "rand 0.8.5", "slab", "tokio", - "tokio-util 0.7.4", + "tokio-util", "tower-layer", "tower-service", "tracing", @@ -4553,20 +5482,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", ] @@ -4583,9 +5512,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ "matchers", "once_cell", @@ -4604,34 +5533,36 @@ checksum = "f1ee9bd9239c339d714d657fac840c6d2a4f9c45f4f9ec7b0975113458be78db" [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "tungstenite" -version = "0.12.0" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ada8297e8d70872fa9a551d93250a9f407beb9f37ef86494eb20012a2ff7c24" +checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ - "base64", + "base64 0.13.1", "byteorder", "bytes", "http", "httparse", - "input_buffer", "log", "rand 0.8.5", + "rustls 0.20.8", "sha-1", + "thiserror", "url", "utf-8", + "webpki 0.22.0", ] [[package]] name = "typenum" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "ucd-trie" @@ -4641,9 +5572,9 @@ checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" [[package]] name = "uint" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a45526d29728d135c2900b0d30573fe3ee79fceb12ef534c7bb30e810a91b601" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" dependencies = [ "byteorder", "crunchy 0.2.2", @@ -4651,6 +5582,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "unicase" version = "2.6.0" @@ -4662,15 +5599,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.5" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -4681,12 +5618,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fdbf052a0783de01e944a6ce7a8cb939e295b1e7be835a1112c3b9a7f047a5a" - [[package]] name = "unicode-width" version = "0.1.10" @@ -4740,11 +5671,21 @@ checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" [[package]] name = "uuid" -version = "1.2.1" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dad5567ad0cf5b760e5665964bec1b47dfd077ba8a2544b513f3556d3d239a2" +dependencies = [ + "getrandom 0.2.9", +] + +[[package]] +name = "value-bag" +version = "1.0.0-alpha.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb41e78f93363bb2df8b0e86a2ca30eed7806ea16ea0c790d757cf93f79be83" +checksum = "2209b78d1249f7e6f3293657c9779fe31ced465df091bbd433a1cf88e916ec55" dependencies = [ - "getrandom 0.2.8", + "ctor", + "version_check", ] [[package]] @@ -4762,14 +5703,19 @@ dependencies = [ "libc", ] +[[package]] +name = "waker-fn" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" + [[package]] name = "walkdir" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", - "winapi", "winapi-util", ] @@ -4803,9 +5749,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.83" +version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" +checksum = "5b6cb788c4e39112fbe1822277ef6fb3c55cd86b95cb3d3c4c1c9597e4ac74b4" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -4813,24 +5759,36 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.83" +version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" +checksum = "35e522ed4105a9d626d885b35d62501b30d9666283a5c8be12c14a8bdafe7822" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn", + "syn 2.0.15", "wasm-bindgen-shared", ] +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "083abe15c5d88556b77bdf7aef403625be9e327ad37c62c4e4129af740168163" +dependencies = [ + "cfg-if 1.0.0", + "js-sys", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "wasm-bindgen-macro" -version = "0.2.83" +version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" +checksum = "358a79a0cb89d21db8120cbfb91392335913e4890665b1a7981d9e956903b434" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4838,28 +5796,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.83" +version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" +checksum = "4783ce29f09b9d93134d41297aded3a712b7b979e9c6f28c32cb88c973a94869" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.83" +version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" +checksum = "a901d592cafaa4d711bc324edfaff879ac700b19c3dfd60058d2b445be2691eb" [[package]] name = "wasm-encoder" -version = "0.19.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9424cdab516a16d4ea03c8f4a01b14e7b2d04a129dcc2bcdde5bcc5f68f06c41" +checksum = "d05d0b6fcd0aeb98adf16e7975331b3c17222aa815148f5b976370ce589d80ef" dependencies = [ "leb128", ] @@ -4970,7 +5928,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5102,9 +6060,9 @@ checksum = "718ed7c55c2add6548cca3ddd6383d738cd73b892df400e96b9aa876f0141d7a" [[package]] name = "wast" -version = "49.0.0" +version = "57.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ef81fcd60d244cafffeafac3d17615fdb2fddda6aca18f34a8ae233353587c" +checksum = "6eb0f5ed17ac4421193c7477da05892c2edafd67f9639e3c11a82086416662dc" dependencies = [ "leb128", "memchr", @@ -5114,18 +6072,18 @@ dependencies = [ [[package]] name = "wat" -version = "1.0.51" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c347c4460ffb311e95aafccd8c29e4888f241b9e4b3bb0e0ccbd998de2c8c0d" +checksum = "ab9ab0d87337c3be2bb6fc5cd331c4ba9fd6bcb4ee85048a0dd59ed9ecf92e53" dependencies = [ "wast", ] [[package]] name = "web-sys" -version = "0.3.60" +version = "0.3.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" +checksum = "16b5f940c7edfdc6d12126d98c9ef4d1b3d470011c47c76a6581df47ad9ba721" dependencies = [ "js-sys", "wasm-bindgen", @@ -5141,13 +6099,23 @@ dependencies = [ "untrusted", ] +[[package]] +name = "webpki" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "webpki-roots" version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940" dependencies = [ - "webpki", + "webpki 0.21.4", ] [[package]] @@ -5164,9 +6132,9 @@ dependencies = [ [[package]] name = "which" -version = "4.3.0" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" +checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" dependencies = [ "either", "libc", @@ -5205,16 +6173,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] -name = "windows-sys" -version = "0.36.1" +name = "windows" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows_aarch64_msvc 0.36.1", - "windows_i686_gnu 0.36.1", - "windows_i686_msvc 0.36.1", - "windows_x86_64_gnu 0.36.1", - "windows_x86_64_msvc 0.36.1", + "windows-targets 0.48.0", ] [[package]] @@ -5223,86 +6187,155 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.0", - "windows_i686_gnu 0.42.0", - "windows_i686_msvc 0.42.0", - "windows_x86_64_gnu 0.42.0", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.0", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.0" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" [[package]] name = "windows_aarch64_msvc" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] name = "windows_i686_gnu" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] name = "windows_i686_msvc" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_x86_64_gnu" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.0" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_msvc" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + +[[package]] +name = "winnow" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61de7bac303dc551fe038e2b3cef0f571087a47571ea6e79a87692ac99b99699" +dependencies = [ + "memchr", +] [[package]] name = "wyz" @@ -5313,6 +6346,15 @@ dependencies = [ "tap", ] +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + [[package]] name = "zcash_encoding" version = "0.0.0" @@ -5352,16 +6394,16 @@ source = "git+https://github.com/zcash/librustzcash/?rev=2425a08#2425a0869098e3b dependencies = [ "aes", "bip0039", - "bitvec", - "blake2b_simd 1.0.0", - "blake2s_simd 1.0.0", + "bitvec 0.22.3", + "blake2b_simd 1.0.1", + "blake2s_simd 1.0.1", "bls12_381", "byteorder", "chacha20poly1305", "equihash", - "ff", + "ff 0.11.1", "fpe", - "group", + "group 0.11.0", "hex", "incrementalmerkletree", "jubjub", @@ -5383,12 +6425,12 @@ version = "0.5.0" source = "git+https://github.com/zcash/librustzcash/?rev=2425a08#2425a0869098e3b0588ccd73c42716bcf418612c" dependencies = [ "bellman", - "blake2b_simd 1.0.0", + "blake2b_simd 1.0.1", "bls12_381", "byteorder", "directories", - "ff", - "group", + "ff 0.11.1", + "group 0.11.0", "jubjub", "lazy_static", "rand_core 0.6.4", @@ -5397,21 +6439,20 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.3.2" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn", - "synstructure", + "syn 2.0.15", ] diff --git a/wasm_for_tests/wasm_source/Cargo.toml b/wasm_for_tests/wasm_source/Cargo.toml index 7516a5e00d9..e2f9cd4d3ee 100644 --- a/wasm_for_tests/wasm_source/Cargo.toml +++ b/wasm_for_tests/wasm_source/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_wasm_for_tests" resolver = "2" -version = "0.14.0" +version = "0.16.0" [lib] crate-type = ["cdylib"] @@ -39,18 +39,19 @@ borsh-derive = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223 borsh-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} borsh-schema-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} # patched to a commit on the `eth-bridge-integration+consensus-timeout` branch of our fork -tendermint = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} -tendermint-config = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} -tendermint-proto = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} -tendermint-rpc = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} -tendermint-testgen = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} -tendermint-light-client = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} -tendermint-light-client-verifier = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} +tendermint = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35"} +tendermint-config = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35"} +tendermint-proto = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35"} +tendermint-rpc = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35"} +tendermint-testgen = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35"} +tendermint-light-client-verifier = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "4db3c5ea09fae4057008d22bf9e96bf541b55b35"} # patched to a commit on the `eth-bridge-integration` branch of our fork -ibc = {git = "https://github.com/heliaxdev/ibc-rs.git", rev = "f4703dfe2c1f25cc431279ab74f10f3e0f6827e2"} -ibc-proto = {git = "https://github.com/heliaxdev/ibc-rs.git", rev = "f4703dfe2c1f25cc431279ab74f10f3e0f6827e2"} -ibc-relayer = {git = "https://github.com/heliaxdev/ibc-rs.git", rev = "f4703dfe2c1f25cc431279ab74f10f3e0f6827e2"} +ibc = {git = "https://github.com/heliaxdev/cosmos-ibc-rs.git", rev = "2d7edc16412b60cabf78163fe24a6264e11f77a9"} +ibc-proto = {git = "https://github.com/heliaxdev/ibc-proto-rs.git", rev = "7e527b5b8c95d83351e93ceafc14ac853224283f"} + +# patched to the yanked 1.2.0 until masp updates bitvec +funty = { git = "https://github.com/bitvecto-rs/funty/", rev = "7ef0d890fbcd8b3def1635ac1a877fc298488446" } [dev-dependencies] namada_tests = {path = "../../tests"} diff --git a/wasm_for_tests/wasm_source/src/lib.rs b/wasm_for_tests/wasm_source/src/lib.rs index 7f3584ad442..f271acc97ab 100644 --- a/wasm_for_tests/wasm_source/src/lib.rs +++ b/wasm_for_tests/wasm_source/src/lib.rs @@ -207,9 +207,9 @@ pub mod main { _verifiers: BTreeSet
, ) -> VpResult { use validity_predicate::EvalVp; - let EvalVp { vp_code, input }: EvalVp = + let EvalVp { vp_code_hash, input }: EvalVp = EvalVp::try_from_slice(&tx_data[..]).unwrap(); - ctx.eval(vp_code, input) + ctx.eval(vp_code_hash, input) } }