diff --git a/.changelog/unreleased/improvements/2355-new-storage-write.md b/.changelog/unreleased/improvements/2355-new-storage-write.md new file mode 100644 index 0000000000..52b59cbc96 --- /dev/null +++ b/.changelog/unreleased/improvements/2355-new-storage-write.md @@ -0,0 +1,2 @@ +- Skip writing some MASP and IBC storage keys to merkle tree and DB diffs. + ([\#2355](https://github.com/anoma/namada/pull/2355)) \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 71515d9ae8..5abd33e1d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -533,9 +533,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" [[package]] name = "bitvec" @@ -2543,7 +2543,7 @@ version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbf97ba92db08df386e10c8ede66a2a0369bd277090afd8710e19e38de9ec0cd" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "libc", "libgit2-sys", "log", @@ -3757,7 +3757,7 @@ version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "libc", "redox_syscall", ] @@ -4663,6 +4663,7 @@ dependencies = [ name = "namada_storage" version = "0.30.1" dependencies = [ + "bitflags 2.4.2", "borsh", "itertools 0.10.5", "namada_core", @@ -5206,7 +5207,7 @@ version = "0.10.61" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b8419dc8cc6d866deb801274bba2e6f8f6108c1bb7fcc10ee5ab864931dbb45" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "cfg-if 1.0.0", "foreign-types", "libc", @@ -5727,7 +5728,7 @@ checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.1", + "bitflags 2.4.2", "lazy_static", "num-traits 0.2.17", "rand 0.8.5", @@ -6331,7 +6332,7 @@ version = "0.38.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9470c4bf8246c8daf25f9598dca807fb6510347b1e1cfa55749113850c79d88a" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "errno", "libc", "linux-raw-sys", diff --git a/Cargo.toml b/Cargo.toml index b2b053593c..bf3b8b0220 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -67,6 +67,7 @@ base58 = "0.2.0" base64 = "0.13.0" bech32 = "0.8.0" bimap = {version = "0.6.2", features = ["serde"]} +bitflags = "2.4.2" bit-set = "0.5.2" blake2b-rs = "0.2.0" byte-unit = "4.0.13" diff --git a/crates/apps/src/lib/node/ledger/shell/finalize_block.rs b/crates/apps/src/lib/node/ledger/shell/finalize_block.rs index 9707ee7811..75bc6707cd 100644 --- a/crates/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/crates/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -598,7 +598,7 @@ where // Update the MASP commitment tree anchor if the tree was updated let tree_key = token::storage_key::masp_commitment_tree_key(); - if let Some(StorageModification::Write { value }) = + if let Some(StorageModification::Write { value, action: _ }) = self.wl_storage.write_log.read(&tree_key).0 { let updated_tree = CommitmentTree::::try_from_slice(value) @@ -606,7 +606,8 @@ where let anchor_key = token::storage_key::masp_commitment_anchor_key( updated_tree.root(), ); - self.wl_storage.write(&anchor_key, ())?; + self.wl_storage + .write_without_merkle_diffs(&anchor_key, ())?; } if update_for_tendermint { diff --git a/crates/apps/src/lib/node/ledger/shell/init_chain.rs b/crates/apps/src/lib/node/ledger/shell/init_chain.rs index 1b52009eec..815bec90d9 100644 --- a/crates/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/crates/apps/src/lib/node/ledger/shell/init_chain.rs @@ -141,17 +141,20 @@ where let note_commitment_tree_key = token::storage_key::masp_commitment_tree_key(); self.wl_storage - .write(¬e_commitment_tree_key, empty_commitment_tree) + .write_without_merkle_diffs( + ¬e_commitment_tree_key, + empty_commitment_tree, + ) .unwrap(); let commitment_tree_anchor_key = token::storage_key::masp_commitment_anchor_key(anchor); self.wl_storage - .write(&commitment_tree_anchor_key, ()) + .write_without_merkle_diffs(&commitment_tree_anchor_key, ()) .unwrap(); // Init masp convert anchor let convert_anchor_key = token::storage_key::masp_convert_anchor_key(); - self.wl_storage.write( + self.wl_storage.write_without_merkle_diffs( &convert_anchor_key, namada::types::hash::Hash( bls12_381::Scalar::from( @@ -215,9 +218,9 @@ where self.update_eth_oracle(&Default::default()); } else { self.wl_storage - .write_bytes( + .write( &namada::eth_bridge::storage::active_key(), - EthBridgeStatus::Disabled.serialize_to_vec(), + EthBridgeStatus::Disabled, ) .unwrap(); } diff --git a/crates/apps/src/lib/node/ledger/shell/mod.rs b/crates/apps/src/lib/node/ledger/shell/mod.rs index a9cbba5efe..72b3cac6ff 100644 --- a/crates/apps/src/lib/node/ledger/shell/mod.rs +++ b/crates/apps/src/lib/node/ledger/shell/mod.rs @@ -2041,10 +2041,7 @@ mod test_utils { use namada::eth_bridge::storage::eth_bridge_queries::EthBridgeStatus; shell .wl_storage - .write_bytes( - &active_key(), - EthBridgeStatus::Disabled.serialize_to_vec(), - ) + .write(&active_key(), EthBridgeStatus::Disabled) .expect("Test failed"); } diff --git a/crates/apps/src/lib/node/ledger/storage/mod.rs b/crates/apps/src/lib/node/ledger/storage/mod.rs index 6b2b8e461d..be406dbf90 100644 --- a/crates/apps/src/lib/node/ledger/storage/mod.rs +++ b/crates/apps/src/lib/node/ledger/storage/mod.rs @@ -52,6 +52,7 @@ fn new_blake2b() -> Blake2b { mod tests { use std::collections::HashMap; + use borsh::BorshDeserialize; use itertools::Itertools; use namada::eth_bridge::storage::proof::BridgePoolRootProof; use namada::ledger::eth_bridge::storage::bridge_pool; @@ -59,7 +60,9 @@ mod tests { use namada::ledger::ibc::storage::ibc_key; use namada::ledger::parameters::{EpochDuration, Parameters}; use namada::state::write_log::WriteLog; - use namada::state::{self, StorageWrite, StoreType, WlStorage}; + use namada::state::{ + self, StorageRead, StorageWrite, StoreType, WlStorage, WriteOpts, DB, + }; use namada::token::conversion::update_allowed_conversions; use namada::types::chain::ChainId; use namada::types::ethereum_events::Uint; @@ -530,7 +533,11 @@ mod tests { storage.write(&key, value_bytes)?; } 3 => { - storage.batch_delete_subspace_val(&mut batch, &key)?; + storage.batch_delete_subspace_val( + &mut batch, + &key, + WriteOpts::ALL, + )?; } _ => { let value_bytes = types::encode(&storage.block.height); @@ -538,6 +545,7 @@ mod tests { &mut batch, &key, value_bytes, + WriteOpts::ALL, )?; } } @@ -783,4 +791,191 @@ mod tests { .map(Result::unwrap); itertools::assert_equal(iter, expected); } + + #[test] + fn test_persistent_storage_writing_without_merklizing_or_diffs() { + let db_path = + TempDir::new().expect("Unable to create a temporary DB directory"); + let storage = PersistentStorage::open( + db_path.path(), + ChainId::default(), + address::nam(), + None, + None, + ); + let mut wls = WlStorage { + storage, + write_log: Default::default(), + }; + // Start the first block + let first_height = BlockHeight::first(); + wls.storage.block.height = first_height; + + let key1 = Key::parse("testing1").unwrap(); + let val1 = 1u64; + let key2 = Key::parse("testing2").unwrap(); + let val2 = 2u64; + + // Standard write of key-val-1 + wls.write(&key1, val1).unwrap(); + + // Read from WlStorage should return val1 + let res = wls.read::(&key1).unwrap().unwrap(); + assert_eq!(res, val1); + + // Read from Storage shouldn't return val1 because the block hasn't been + // committed + let (res, _) = wls.storage.read(&key1).unwrap(); + assert!(res.is_none()); + + // Write key-val-2 without merklizing or diffs + wls.write_without_merkle_diffs(&key2, val2).unwrap(); + + // Read from WlStorage should return val2 + let res = wls.read::(&key2).unwrap().unwrap(); + assert_eq!(res, val2); + + // Commit block and storage changes + wls.commit_block().unwrap(); + wls.storage.block.height = wls.storage.block.height.next_height(); + let second_height = wls.storage.block.height; + + // Read key1 from Storage should return val1 + let (res1, _) = wls.storage.read(&key1).unwrap(); + let res1 = u64::try_from_slice(&res1.unwrap()).unwrap(); + assert_eq!(res1, val1); + + // Check merkle tree inclusion of key-val-1 explicitly + let is_merklized1 = wls.storage.block.tree.has_key(&key1).unwrap(); + assert!(is_merklized1); + + // Key2 should be in storage. Confirm by reading from + // WlStorage and also by reading Storage subspace directly + let res2 = wls.read::(&key2).unwrap().unwrap(); + assert_eq!(res2, val2); + let res2 = wls.storage.db.read_subspace_val(&key2).unwrap().unwrap(); + let res2 = u64::try_from_slice(&res2).unwrap(); + assert_eq!(res2, val2); + + // Check explicitly that key-val-2 is not in merkle tree + let is_merklized2 = wls.storage.block.tree.has_key(&key2).unwrap(); + assert!(!is_merklized2); + + // Check that the proper diffs exist for key-val-1 + let res1 = wls + .storage + .db + .read_diffs_val(&key1, first_height, true) + .unwrap(); + assert!(res1.is_none()); + + let res1 = wls + .storage + .db + .read_diffs_val(&key1, first_height, false) + .unwrap() + .unwrap(); + let res1 = u64::try_from_slice(&res1).unwrap(); + assert_eq!(res1, val1); + + // Check that there are diffs for key-val-2 in block 0, since all keys + // need to have diffs for at least 1 block for rollback purposes + let res2 = wls + .storage + .db + .read_diffs_val(&key2, first_height, true) + .unwrap(); + assert!(res2.is_none()); + let res2 = wls + .storage + .db + .read_diffs_val(&key2, first_height, false) + .unwrap() + .unwrap(); + let res2 = u64::try_from_slice(&res2).unwrap(); + assert_eq!(res2, val2); + + // Delete the data then commit the block + wls.delete(&key1).unwrap(); + wls.delete_without_diffs(&key2).unwrap(); + wls.commit_block().unwrap(); + wls.storage.block.height = wls.storage.block.height.next_height(); + + // Check the key-vals are removed from the storage subspace + let res1 = wls.read::(&key1).unwrap(); + let res2 = wls.read::(&key2).unwrap(); + assert!(res1.is_none() && res2.is_none()); + let res1 = wls.storage.db.read_subspace_val(&key1).unwrap(); + let res2 = wls.storage.db.read_subspace_val(&key2).unwrap(); + assert!(res1.is_none() && res2.is_none()); + + // Check that the key-vals don't exist in the merkle tree anymore + let is_merklized1 = wls.storage.block.tree.has_key(&key1).unwrap(); + let is_merklized2 = wls.storage.block.tree.has_key(&key2).unwrap(); + assert!(!is_merklized1 && !is_merklized2); + + // Check that key-val-1 diffs are properly updated for blocks 0 and 1 + let res1 = wls + .storage + .db + .read_diffs_val(&key1, first_height, true) + .unwrap(); + assert!(res1.is_none()); + + let res1 = wls + .storage + .db + .read_diffs_val(&key1, first_height, false) + .unwrap() + .unwrap(); + let res1 = u64::try_from_slice(&res1).unwrap(); + assert_eq!(res1, val1); + + let res1 = wls + .storage + .db + .read_diffs_val(&key1, second_height, true) + .unwrap() + .unwrap(); + let res1 = u64::try_from_slice(&res1).unwrap(); + assert_eq!(res1, val1); + + let res1 = wls + .storage + .db + .read_diffs_val(&key1, second_height, false) + .unwrap(); + assert!(res1.is_none()); + + // Check that key-val-2 diffs don't exist for block 0 anymore + let res2 = wls + .storage + .db + .read_diffs_val(&key2, first_height, true) + .unwrap(); + assert!(res2.is_none()); + let res2 = wls + .storage + .db + .read_diffs_val(&key2, first_height, false) + .unwrap(); + assert!(res2.is_none()); + + // Check that the block 1 diffs for key-val-2 include an "old" value of + // val2 and no "new" value + let res2 = wls + .storage + .db + .read_diffs_val(&key2, second_height, true) + .unwrap() + .unwrap(); + let res2 = u64::try_from_slice(&res2).unwrap(); + assert_eq!(res2, val2); + let res2 = wls + .storage + .db + .read_diffs_val(&key2, second_height, false) + .unwrap(); + assert!(res2.is_none()); + } } diff --git a/crates/apps/src/lib/node/ledger/storage/rocksdb.rs b/crates/apps/src/lib/node/ledger/storage/rocksdb.rs index b98b60087e..1ac43caff7 100644 --- a/crates/apps/src/lib/node/ledger/storage/rocksdb.rs +++ b/crates/apps/src/lib/node/ledger/storage/rocksdb.rs @@ -56,7 +56,7 @@ use namada::state::merkle_tree::{base_tree_key_prefix, subtree_key_prefix}; use namada::state::types::PrefixIterator; use namada::state::{ BlockStateRead, BlockStateWrite, DBIter, DBWriteBatch, DbError as Error, - DbResult as Result, MerkleTreeStoresRead, StoreType, DB, + DbResult as Result, MerkleTreeStoresRead, StoreType, WriteOpts, DB, }; use namada::types; use namada::types::storage::{ @@ -87,6 +87,9 @@ const STATE_CF: &str = "state"; const BLOCK_CF: &str = "block"; const REPLAY_PROTECTION_CF: &str = "replay_protection"; +const OLD_DIFF_PREFIX: &str = "old"; +const NEW_DIFF_PREFIX: &str = "new"; + /// RocksDB handle #[derive(Debug)] pub struct RocksDB(rocksdb::DB); @@ -214,31 +217,55 @@ impl RocksDB { key: &Key, old_value: Option<&[u8]>, new_value: Option<&[u8]>, + persist_diffs: bool, ) -> Result<()> { let cf = self.get_column_family(DIFFS_CF)?; - let key_prefix = Key::from(height.to_db_key()); + let (old_val_key, new_val_key) = old_and_new_diff_key(key, height)?; if let Some(old_value) = old_value { - let old_val_key = key_prefix - .push(&"old".to_owned()) - .map_err(Error::KeyError)? - .join(key) - .to_string(); self.0 .put_cf(cf, old_val_key, old_value) .map_err(|e| Error::DBError(e.into_string()))?; } if let Some(new_value) = new_value { - let new_val_key = key_prefix - .push(&"new".to_owned()) - .map_err(Error::KeyError)? - .join(key) - .to_string(); self.0 .put_cf(cf, new_val_key, new_value) .map_err(|e| Error::DBError(e.into_string()))?; } + + // If not persisting the diffs, remove the last diffs. + if !persist_diffs && height > BlockHeight::first() { + let mut height = height.prev_height(); + while height >= BlockHeight::first() { + let (old_diff_key, new_diff_key) = + old_and_new_diff_key(key, height)?; + let has_old_diff = self + .0 + .get_cf(cf, &old_diff_key) + .map_err(|e| Error::DBError(e.into_string()))? + .is_some(); + let has_new_diff = self + .0 + .get_cf(cf, &new_diff_key) + .map_err(|e| Error::DBError(e.into_string()))? + .is_some(); + if has_old_diff { + self.0 + .delete_cf(cf, old_diff_key) + .map_err(|e| Error::DBError(e.into_string()))?; + } + if has_new_diff { + self.0 + .delete_cf(cf, new_diff_key) + .map_err(|e| Error::DBError(e.into_string()))?; + } + if has_old_diff || has_new_diff { + break; + } + height = height.prev_height(); + } + } Ok(()) } @@ -251,27 +278,47 @@ impl RocksDB { key: &Key, old_value: Option<&[u8]>, new_value: Option<&[u8]>, + persist_diffs: bool, ) -> Result<()> { let cf = self.get_column_family(DIFFS_CF)?; - let key_prefix = Key::from(height.to_db_key()); + let (old_val_key, new_val_key) = old_and_new_diff_key(key, height)?; if let Some(old_value) = old_value { - let old_val_key = key_prefix - .push(&"old".to_owned()) - .map_err(Error::KeyError)? - .join(key) - .to_string(); batch.0.put_cf(cf, old_val_key, old_value); } if let Some(new_value) = new_value { - let new_val_key = key_prefix - .push(&"new".to_owned()) - .map_err(Error::KeyError)? - .join(key) - .to_string(); batch.0.put_cf(cf, new_val_key, new_value); } + + // If not persisting the diffs, remove the last diffs. + if !persist_diffs && height > BlockHeight::first() { + let mut height = height.prev_height(); + while height >= BlockHeight::first() { + let (old_diff_key, new_diff_key) = + old_and_new_diff_key(key, height)?; + let has_old_diff = self + .0 + .get_cf(cf, &old_diff_key) + .map_err(|e| Error::DBError(e.into_string()))? + .is_some(); + let has_new_diff = self + .0 + .get_cf(cf, &new_diff_key) + .map_err(|e| Error::DBError(e.into_string()))? + .is_some(); + if has_old_diff { + batch.0.delete_cf(cf, old_diff_key); + } + if has_new_diff { + batch.0.delete_cf(cf, new_diff_key); + } + if has_old_diff || has_new_diff { + break; + } + height = height.prev_height(); + } + } Ok(()) } @@ -541,7 +588,7 @@ impl RocksDB { let diff_new_key_prefix = Key { segments: vec![ last_block.height.to_db_key(), - "new".to_string().to_db_key(), + NEW_DIFF_PREFIX.to_string().to_db_key(), ], }; { @@ -1179,6 +1226,28 @@ impl DB for RocksDB { Ok(false) } + fn read_diffs_val( + &self, + key: &Key, + height: BlockHeight, + is_old: bool, + ) -> Result>> { + let diffs_cf = self.get_column_family(DIFFS_CF)?; + let old_new_seg = if is_old { + OLD_DIFF_PREFIX + } else { + NEW_DIFF_PREFIX + }; + let key = Key::from(height.to_db_key()) + .push(&old_new_seg.to_string().to_db_key()) + .unwrap() + .join(key); + + self.0 + .get_cf(diffs_cf, key.to_string()) + .map_err(|e| Error::DBError(e.into_string())) + } + fn read_subspace_val(&self, key: &Key) -> Result>> { let subspace_cf = self.get_column_family(SUBSPACE_CF)?; self.0 @@ -1194,12 +1263,7 @@ impl DB for RocksDB { ) -> Result>> { // Check if the value changed at this height let diffs_cf = self.get_column_family(DIFFS_CF)?; - let key_prefix = Key::from(height.to_db_key()); - let new_val_key = key_prefix - .push(&"new".to_owned()) - .map_err(Error::KeyError)? - .join(key) - .to_string(); + let (old_val_key, new_val_key) = old_and_new_diff_key(key, height)?; // If it has a "new" val, it was written at this height match self @@ -1211,13 +1275,8 @@ impl DB for RocksDB { return Ok(Some(new_val)); } None => { - let old_val_key = key_prefix - .push(&"old".to_owned()) - .map_err(Error::KeyError)? - .join(key) - .to_string(); // If it has an "old" val, it was deleted at this height - if self.0.key_may_exist_cf(diffs_cf, old_val_key.clone()) { + if self.0.key_may_exist_cf(diffs_cf, &old_val_key) { // check if it actually exists if self .0 @@ -1236,15 +1295,11 @@ impl DB for RocksDB { let mut raw_height = height.0 + 1; loop { // Try to find the next diff on this key - let key_prefix = Key::from(BlockHeight(raw_height).to_db_key()); - let old_val_key = key_prefix - .push(&"old".to_owned()) - .map_err(Error::KeyError)? - .join(key) - .to_string(); + let (old_val_key, new_val_key) = + old_and_new_diff_key(key, BlockHeight(raw_height))?; let old_val = self .0 - .get_cf(diffs_cf, old_val_key) + .get_cf(diffs_cf, &old_val_key) .map_err(|e| Error::DBError(e.into_string()))?; // If it has an "old" val, it's the one we're looking for match old_val { @@ -1252,12 +1307,7 @@ impl DB for RocksDB { None => { // Check if the value was created at this height instead, // which would mean that it wasn't present before - let new_val_key = key_prefix - .push(&"new".to_owned()) - .map_err(Error::KeyError)? - .join(key) - .to_string(); - if self.0.key_may_exist_cf(diffs_cf, new_val_key.clone()) { + if self.0.key_may_exist_cf(diffs_cf, &new_val_key) { // check if it actually exists if self .0 @@ -1285,9 +1335,13 @@ impl DB for RocksDB { height: BlockHeight, key: &Key, value: impl AsRef<[u8]>, + action: WriteOpts, ) -> Result { let subspace_cf = self.get_column_family(SUBSPACE_CF)?; let value = value.as_ref(); + + let persist_diffs = action.contains(WriteOpts::WRITE_DIFFS); + let size_diff = match self .0 .get_cf(subspace_cf, key.to_string()) @@ -1300,11 +1354,18 @@ impl DB for RocksDB { key, Some(&prev_value), Some(value), + persist_diffs, )?; size_diff } None => { - self.write_subspace_diff(height, key, None, Some(value))?; + self.write_subspace_diff( + height, + key, + None, + Some(value), + persist_diffs, + )?; value.len() as i64 } }; @@ -1321,9 +1382,12 @@ impl DB for RocksDB { &mut self, height: BlockHeight, key: &Key, + action: WriteOpts, ) -> Result { let subspace_cf = self.get_column_family(SUBSPACE_CF)?; + let persists_diffs = action.contains(WriteOpts::WRITE_DIFFS); + // Check the length of previous value, if any let prev_len = match self .0 @@ -1332,7 +1396,13 @@ impl DB for RocksDB { { Some(prev_value) => { let prev_len = prev_value.len() as i64; - self.write_subspace_diff(height, key, Some(&prev_value), None)?; + self.write_subspace_diff( + height, + key, + Some(&prev_value), + None, + persists_diffs, + )?; prev_len } None => 0, @@ -1360,9 +1430,14 @@ impl DB for RocksDB { height: BlockHeight, key: &Key, value: impl AsRef<[u8]>, + action: WriteOpts, ) -> Result { let value = value.as_ref(); let subspace_cf = self.get_column_family(SUBSPACE_CF)?; + + let persist_diffs = action.contains(WriteOpts::WRITE_DIFFS); + + // Diffs let size_diff = match self .0 .get_cf(subspace_cf, key.to_string()) @@ -1377,6 +1452,7 @@ impl DB for RocksDB { key, Some(&old_value), Some(value), + persist_diffs, )?; size_diff } @@ -1387,6 +1463,7 @@ impl DB for RocksDB { key, None, Some(value), + persist_diffs, )?; value.len() as i64 } @@ -1403,9 +1480,12 @@ impl DB for RocksDB { batch: &mut Self::WriteBatch, height: BlockHeight, key: &Key, + action: WriteOpts, ) -> Result { let subspace_cf = self.get_column_family(SUBSPACE_CF)?; + let persist_diffs = action.contains(WriteOpts::WRITE_DIFFS); + // Check the length of previous value, if any let prev_len = match self .0 @@ -1421,6 +1501,7 @@ impl DB for RocksDB { key, Some(&prev_value), None, + persist_diffs, )?; prev_len } @@ -1570,7 +1651,11 @@ fn iter_diffs_prefix<'a>( let diffs_cf = db .get_column_family(DIFFS_CF) .expect("{DIFFS_CF} column family should exist"); - let kind = if is_old { "old" } else { "new" }; + let kind = if is_old { + OLD_DIFF_PREFIX + } else { + NEW_DIFF_PREFIX + }; let stripped_prefix = Some( Key::from(height.to_db_key()) .push(&kind.to_string()) @@ -1662,6 +1747,22 @@ fn make_iter_read_opts(prefix: Option) -> ReadOptions { impl DBWriteBatch for RocksDBWriteBatch {} +fn old_and_new_diff_key( + key: &Key, + height: BlockHeight, +) -> Result<(String, String)> { + let key_prefix = Key::from(height.to_db_key()); + let old = key_prefix + .push(&OLD_DIFF_PREFIX.to_owned()) + .map_err(Error::KeyError)? + .join(key); + let new = key_prefix + .push(&NEW_DIFF_PREFIX.to_owned()) + .map_err(Error::KeyError)? + .join(key); + Ok((old.to_string(), new.to_string())) +} + fn unknown_key_error(key: &str) -> Result<()> { Err(Error::UnknownKey { key: key.to_owned(), @@ -1752,6 +1853,7 @@ mod test { last_height, &Key::parse("test").unwrap(), vec![1_u8, 1, 1, 1], + WriteOpts::ALL, ) .unwrap(); @@ -1787,12 +1889,18 @@ mod test { last_height, &batch_key, vec![1_u8, 1, 1, 1], + WriteOpts::ALL, ) .unwrap(); db.exec_batch(batch.0).unwrap(); - db.write_subspace_val(last_height, &key, vec![1_u8, 1, 1, 0]) - .unwrap(); + db.write_subspace_val( + last_height, + &key, + vec![1_u8, 1, 1, 0], + WriteOpts::ALL, + ) + .unwrap(); let mut batch = RocksDB::batch(); let last_height = BlockHeight(111); @@ -1801,12 +1909,18 @@ mod test { last_height, &batch_key, vec![2_u8, 2, 2, 2], + WriteOpts::ALL, ) .unwrap(); db.exec_batch(batch.0).unwrap(); - db.write_subspace_val(last_height, &key, vec![2_u8, 2, 2, 0]) - .unwrap(); + db.write_subspace_val( + last_height, + &key, + vec![2_u8, 2, 2, 0], + WriteOpts::ALL, + ) + .unwrap(); let prev_value = db .read_subspace_val_with_height( @@ -1844,11 +1958,17 @@ mod test { let mut batch = RocksDB::batch(); let last_height = BlockHeight(222); - db.batch_delete_subspace_val(&mut batch, last_height, &batch_key) - .unwrap(); + db.batch_delete_subspace_val( + &mut batch, + last_height, + &batch_key, + WriteOpts::ALL, + ) + .unwrap(); db.exec_batch(batch.0).unwrap(); - db.delete_subspace_val(last_height, &key).unwrap(); + db.delete_subspace_val(last_height, &key, WriteOpts::ALL) + .unwrap(); let deleted_value = db .read_subspace_val_with_height( @@ -1897,8 +2017,14 @@ mod test { let mut batch = RocksDB::batch(); let height = BlockHeight(1); for key in &all_keys { - db.batch_write_subspace_val(&mut batch, height, key, [0_u8]) - .unwrap(); + db.batch_write_subspace_val( + &mut batch, + height, + key, + [0_u8], + WriteOpts::ALL, + ) + .unwrap(); } db.exec_batch(batch.0).unwrap(); @@ -1950,6 +2076,7 @@ mod test { height_0, &delete_key, &to_delete_val, + WriteOpts::ALL, ) .unwrap(); db.batch_write_subspace_val( @@ -1957,6 +2084,7 @@ mod test { height_0, &overwrite_key, &to_overwrite_val, + WriteOpts::ALL, ) .unwrap(); @@ -1981,17 +2109,29 @@ mod test { .insert("dummy2".to_string(), gen_established_address("test")); let add_val = vec![1_u8, 0, 0, 0]; let overwrite_val = vec![1_u8, 1, 1, 1]; - db.batch_write_subspace_val(&mut batch, height_1, &add_key, &add_val) - .unwrap(); + db.batch_write_subspace_val( + &mut batch, + height_1, + &add_key, + &add_val, + WriteOpts::ALL, + ) + .unwrap(); db.batch_write_subspace_val( &mut batch, height_1, &overwrite_key, &overwrite_val, + WriteOpts::ALL, + ) + .unwrap(); + db.batch_delete_subspace_val( + &mut batch, + height_1, + &delete_key, + WriteOpts::ALL, ) .unwrap(); - db.batch_delete_subspace_val(&mut batch, height_1, &delete_key) - .unwrap(); add_block_to_batch( &db, @@ -2031,6 +2171,153 @@ mod test { assert_eq!(conversion_state, types::encode(&conversion_state_0)); } + #[test] + fn test_diffs() { + let dir = tempdir().unwrap(); + let mut db = open(dir.path(), None).unwrap(); + + let key_with_diffs = Key::parse("with_diffs").unwrap(); + let key_without_diffs = Key::parse("without_diffs").unwrap(); + + let initial_val = vec![1_u8, 1, 0, 0]; + let overwrite_val = vec![1_u8, 1, 1, 0]; + + // Write first block + let mut batch = RocksDB::batch(); + let height_0 = BlockHeight::first(); + db.batch_write_subspace_val( + &mut batch, + height_0, + &key_with_diffs, + &initial_val, + WriteOpts::ALL, + ) + .unwrap(); + db.batch_write_subspace_val( + &mut batch, + height_0, + &key_without_diffs, + &initial_val, + WriteOpts::NONE, + ) + .unwrap(); + db.exec_batch(batch.0).unwrap(); + + { + let diffs_cf = db.get_column_family(DIFFS_CF).unwrap(); + + // Diffs new key for `key_with_diffs` at height_0 must be present + let (old_with_h0, new_with_h0) = + old_and_new_diff_key(&key_with_diffs, height_0).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_with_h0).unwrap().is_none()); + assert!(db.0.get_cf(diffs_cf, new_with_h0).unwrap().is_some()); + + // Diffs new key for `key_without_diffs` at height_0 must be present + let (old_wo_h0, new_wo_h0) = + old_and_new_diff_key(&key_without_diffs, height_0).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_wo_h0).unwrap().is_none()); + assert!(db.0.get_cf(diffs_cf, new_wo_h0).unwrap().is_some()); + } + + // Write second block + let mut batch = RocksDB::batch(); + let height_1 = height_0 + 10; + db.batch_write_subspace_val( + &mut batch, + height_1, + &key_with_diffs, + &overwrite_val, + WriteOpts::ALL, + ) + .unwrap(); + db.batch_write_subspace_val( + &mut batch, + height_1, + &key_without_diffs, + &overwrite_val, + WriteOpts::NONE, + ) + .unwrap(); + db.exec_batch(batch.0).unwrap(); + + { + let diffs_cf = db.get_column_family(DIFFS_CF).unwrap(); + + // Diffs keys for `key_with_diffs` at height_0 must be present + let (old_with_h0, new_with_h0) = + old_and_new_diff_key(&key_with_diffs, height_0).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_with_h0).unwrap().is_none()); + assert!(db.0.get_cf(diffs_cf, new_with_h0).unwrap().is_some()); + + // Diffs keys for `key_without_diffs` at height_0 must be gone + let (old_wo_h0, new_wo_h0) = + old_and_new_diff_key(&key_without_diffs, height_0).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_wo_h0).unwrap().is_none()); + assert!(db.0.get_cf(diffs_cf, new_wo_h0).unwrap().is_none()); + + // Diffs keys for `key_with_diffs` at height_1 must be present + let (old_with_h1, new_with_h1) = + old_and_new_diff_key(&key_with_diffs, height_1).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_with_h1).unwrap().is_some()); + assert!(db.0.get_cf(diffs_cf, new_with_h1).unwrap().is_some()); + + // Diffs keys for `key_without_diffs` at height_1 must be present + let (old_wo_h1, new_wo_h1) = + old_and_new_diff_key(&key_without_diffs, height_1).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_wo_h1).unwrap().is_some()); + assert!(db.0.get_cf(diffs_cf, new_wo_h1).unwrap().is_some()); + } + + // Write third block + let mut batch = RocksDB::batch(); + let height_2 = height_1 + 10; + db.batch_write_subspace_val( + &mut batch, + height_2, + &key_with_diffs, + &initial_val, + WriteOpts::ALL, + ) + .unwrap(); + db.batch_write_subspace_val( + &mut batch, + height_2, + &key_without_diffs, + &initial_val, + WriteOpts::NONE, + ) + .unwrap(); + db.exec_batch(batch.0).unwrap(); + + { + let diffs_cf = db.get_column_family(DIFFS_CF).unwrap(); + + // Diffs keys for `key_with_diffs` at height_1 must be present + let (old_with_h1, new_with_h1) = + old_and_new_diff_key(&key_with_diffs, height_1).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_with_h1).unwrap().is_some()); + assert!(db.0.get_cf(diffs_cf, new_with_h1).unwrap().is_some()); + + // Diffs keys for `key_without_diffs` at height_1 must be gone + let (old_wo_h1, new_wo_h1) = + old_and_new_diff_key(&key_without_diffs, height_1).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_wo_h1).unwrap().is_none()); + assert!(db.0.get_cf(diffs_cf, new_wo_h1).unwrap().is_none()); + + // Diffs keys for `key_with_diffs` at height_2 must be present + let (old_with_h2, new_with_h2) = + old_and_new_diff_key(&key_with_diffs, height_2).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_with_h2).unwrap().is_some()); + assert!(db.0.get_cf(diffs_cf, new_with_h2).unwrap().is_some()); + + // Diffs keys for `key_without_diffs` at height_2 must be present + let (old_wo_h2, new_wo_h2) = + old_and_new_diff_key(&key_without_diffs, height_2).unwrap(); + assert!(db.0.get_cf(diffs_cf, old_wo_h2).unwrap().is_some()); + assert!(db.0.get_cf(diffs_cf, new_wo_h2).unwrap().is_some()); + } + } + /// A test helper to write a block fn add_block_to_batch( db: &RocksDB, diff --git a/crates/benches/host_env.rs b/crates/benches/host_env.rs index ca38170977..47a2c3a54c 100644 --- a/crates/benches/host_env.rs +++ b/crates/benches/host_env.rs @@ -3,7 +3,7 @@ use std::collections::{HashMap, HashSet}; use criterion::{criterion_group, criterion_main, Criterion}; use namada::core::types::account::AccountPublicKeysMap; use namada::core::types::address; -use namada::ledger::storage::DB; +use namada::state::{WriteOpts, DB}; use namada::token::{Amount, Transfer}; use namada::tx::Signature; use namada::vm::wasm::TxCache; @@ -318,7 +318,12 @@ fn storage_write(c: &mut Criterion) { .wl_storage .storage .db - .write_subspace_val(block_height, &key, value) + .write_subspace_val( + block_height, + &key, + value, + WriteOpts::ALL, + ) .unwrap(); }, criterion::BatchSize::SmallInput, diff --git a/crates/benches/native_vps.rs b/crates/benches/native_vps.rs index b1be552bc1..dd64893cd9 100644 --- a/crates/benches/native_vps.rs +++ b/crates/benches/native_vps.rs @@ -515,7 +515,7 @@ fn setup_storage_for_masp_verification( shielded_ctx .shell .wl_storage - .write(&anchor_key, ()) + .write_without_merkle_diffs(&anchor_key, ()) .unwrap(); shielded_ctx.shell.commit_block(); // Cache the masp tx so that it can be returned when queried diff --git a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs index 3d9e7cf14f..04335c15a2 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs @@ -590,7 +590,7 @@ mod tests { }; use namada_core::types::time::DurationSecs; use namada_core::types::token::Amount; - use namada_core::types::{address, encode, eth_bridge_pool}; + use namada_core::types::{address, eth_bridge_pool}; use namada_parameters::{update_epoch_parameter, EpochDuration}; use namada_state::testing::TestWlStorage; use namada_storage::mockdb::MockDBWriteBatch; @@ -611,7 +611,7 @@ mod tests { .expect("Test failed"); // set native ERC20 token wl_storage - .write_bytes(&bridge_storage::native_erc20_key(), encode(&wnam())) + .write(&bridge_storage::native_erc20_key(), wnam()) .expect("Test failed"); } @@ -757,7 +757,7 @@ mod tests { let payer_key = balance_key(&transfer.gas_fee.token, &payer); let payer_balance = Amount::from(0); wl_storage - .write_bytes(&payer_key, payer_balance.serialize_to_vec()) + .write(&payer_key, payer_balance) .expect("Test failed"); let escrow_key = balance_key(&transfer.gas_fee.token, &BRIDGE_POOL_ADDRESS); @@ -772,24 +772,24 @@ mod tests { let sender_key = balance_key(&nam(), &transfer.transfer.sender); let sender_balance = Amount::from(0); wl_storage - .write_bytes(&sender_key, sender_balance.serialize_to_vec()) + .write(&sender_key, sender_balance) .expect("Test failed"); let escrow_key = balance_key(&nam(), &BRIDGE_ADDRESS); let escrow_balance = Amount::from(10); wl_storage - .write_bytes(&escrow_key, escrow_balance.serialize_to_vec()) + .write(&escrow_key, escrow_balance) .expect("Test failed"); } else { let token = transfer.token_address(); let sender_key = balance_key(&token, &transfer.transfer.sender); let sender_balance = Amount::from(0); wl_storage - .write_bytes(&sender_key, sender_balance.serialize_to_vec()) + .write(&sender_key, sender_balance) .expect("Test failed"); let escrow_key = balance_key(&token, &BRIDGE_POOL_ADDRESS); let escrow_balance = Amount::from(10); wl_storage - .write_bytes(&escrow_key, escrow_balance.serialize_to_vec()) + .write(&escrow_key, escrow_balance) .expect("Test failed"); update::amount( wl_storage, diff --git a/crates/ethereum_bridge/src/protocol/transactions/read.rs b/crates/ethereum_bridge/src/protocol/transactions/read.rs index d8da8f6520..c618b0335d 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/read.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/read.rs @@ -54,7 +54,6 @@ where #[cfg(test)] mod tests { use assert_matches::assert_matches; - use namada_core::borsh::BorshSerializeExt; use namada_core::types::storage; use namada_core::types::token::Amount; use namada_state::testing::TestWlStorage; @@ -79,9 +78,7 @@ mod tests { let key = storage::Key::parse("some arbitrary key").unwrap(); let amount = Amount::from(1_000_000); let mut fake_storage = TestWlStorage::default(); - fake_storage - .write_bytes(&key, amount.serialize_to_vec()) - .unwrap(); + fake_storage.write(&key, amount).unwrap(); let amt = read::amount_or_default(&fake_storage, &key).unwrap(); assert_eq!(amt, amount); @@ -92,7 +89,7 @@ mod tests { let key = storage::Key::parse("some arbitrary key").unwrap(); let amount = "not an Amount type"; let mut fake_storage = TestWlStorage::default(); - fake_storage.write_bytes(&key, amount.as_bytes()).unwrap(); + fake_storage.write(&key, amount).unwrap(); assert_matches!(read::amount_or_default(&fake_storage, &key), Err(_)); } diff --git a/crates/ethereum_bridge/src/protocol/transactions/update.rs b/crates/ethereum_bridge/src/protocol/transactions/update.rs index 6d1e1b3e7b..4bb0963e93 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/update.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/update.rs @@ -19,7 +19,7 @@ where { let mut amount = super::read::amount_or_default(wl_storage, key)?; update(&mut amount); - wl_storage.write_bytes(key, borsh::to_vec(&amount)?)?; + wl_storage.write(key, amount)?; Ok(amount) } @@ -36,14 +36,13 @@ where { let mut value = super::read::value(wl_storage, key)?; update(&mut value); - wl_storage.write_bytes(key, borsh::to_vec(&value)?)?; + wl_storage.write(key, &value)?; Ok(value) } #[cfg(test)] mod tests { use eyre::{eyre, Result}; - use namada_core::borsh::BorshSerializeExt; use namada_core::types::storage; use namada_state::testing::TestWlStorage; use namada_storage::{StorageRead, StorageWrite}; @@ -57,9 +56,8 @@ mod tests { .expect("could not set up test"); let value = 21i32; let mut wl_storage = TestWlStorage::default(); - let serialized = value.serialize_to_vec(); wl_storage - .write_bytes(&key, serialized) + .write(&key, value) .expect("could not set up test"); super::value(&mut wl_storage, &key, |v: &mut i32| *v *= 2)?; diff --git a/crates/ethereum_bridge/src/protocol/transactions/votes/storage.rs b/crates/ethereum_bridge/src/protocol/transactions/votes/storage.rs index 5f4c9e6423..8830059b63 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/votes/storage.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/votes/storage.rs @@ -1,5 +1,5 @@ use eyre::{Result, WrapErr}; -use namada_core::borsh::{BorshDeserialize, BorshSerialize, BorshSerializeExt}; +use namada_core::borsh::{BorshDeserialize, BorshSerialize}; use namada_core::hints; use namada_core::types::storage::Key; use namada_core::types::voting_power::FractionalVotingPower; @@ -21,19 +21,15 @@ where H: 'static + StorageHasher + Sync, T: BorshSerialize, { - wl_storage.write_bytes(&keys.body(), &body.serialize_to_vec())?; - wl_storage.write_bytes(&keys.seen(), &tally.seen.serialize_to_vec())?; - wl_storage - .write_bytes(&keys.seen_by(), &tally.seen_by.serialize_to_vec())?; - wl_storage.write_bytes( - &keys.voting_power(), - &tally.voting_power.serialize_to_vec(), - )?; + wl_storage.write(&keys.body(), body)?; + wl_storage.write(&keys.seen(), tally.seen)?; + wl_storage.write(&keys.seen_by(), tally.seen_by.clone())?; + wl_storage.write(&keys.voting_power(), tally.voting_power.clone())?; if !already_present { // add the current epoch for the inserted event - wl_storage.write_bytes( + wl_storage.write( &keys.voting_started_epoch(), - &wl_storage.storage.get_current_epoch().0.serialize_to_vec(), + wl_storage.storage.get_current_epoch().0, )?; } Ok(()) @@ -138,6 +134,7 @@ mod tests { use std::collections::BTreeMap; use assert_matches::assert_matches; + use namada_core::borsh::BorshSerializeExt; use namada_core::types::ethereum_events::EthereumEvent; use super::*; @@ -239,25 +236,16 @@ mod tests { seen_by: BTreeMap::from([(validator, 10.into())]), seen: false, }; + wl_storage.write(&keys.body(), &event).unwrap(); + wl_storage.write(&keys.seen(), tally.seen).unwrap(); + wl_storage.write(&keys.seen_by(), &tally.seen_by).unwrap(); wl_storage - .write_bytes(&keys.body(), &event.serialize_to_vec()) - .unwrap(); - wl_storage - .write_bytes(&keys.seen(), &tally.seen.serialize_to_vec()) - .unwrap(); - wl_storage - .write_bytes(&keys.seen_by(), &tally.seen_by.serialize_to_vec()) - .unwrap(); - wl_storage - .write_bytes( - &keys.voting_power(), - &tally.voting_power.serialize_to_vec(), - ) + .write(&keys.voting_power(), &tally.voting_power) .unwrap(); wl_storage - .write_bytes( + .write( &keys.voting_started_epoch(), - &wl_storage.storage.get_block_height().0.serialize_to_vec(), + wl_storage.storage.get_block_height().0, ) .unwrap(); diff --git a/crates/ethereum_bridge/src/storage/parameters.rs b/crates/ethereum_bridge/src/storage/parameters.rs index dd420de67b..ead8b612f4 100644 --- a/crates/ethereum_bridge/src/storage/parameters.rs +++ b/crates/ethereum_bridge/src/storage/parameters.rs @@ -4,9 +4,9 @@ use std::num::NonZeroU64; use eyre::{eyre, Result}; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; use namada_core::types::ethereum_events::EthAddress; +use namada_core::types::ethereum_structs; use namada_core::types::storage::Key; use namada_core::types::token::{DenominatedAmount, NATIVE_MAX_DECIMAL_PLACES}; -use namada_core::types::{encode, ethereum_structs}; use namada_state::{DBIter, StorageHasher, WlStorage, DB}; use namada_storage::{StorageRead, StorageWrite}; use serde::{Deserialize, Serialize}; @@ -188,22 +188,18 @@ impl EthereumBridgeParams { let bridge_contract_key = bridge_storage::bridge_contract_key(); let eth_start_height_key = bridge_storage::eth_start_height_key(); wl_storage - .write_bytes( + .write( &active_key, - encode(&EthBridgeStatus::Enabled(EthBridgeEnabled::AtGenesis)), + EthBridgeStatus::Enabled(EthBridgeEnabled::AtGenesis), ) .unwrap(); wl_storage - .write_bytes(&min_confirmations_key, encode(min_confirmations)) + .write(&min_confirmations_key, min_confirmations) .unwrap(); + wl_storage.write(&native_erc20_key, native_erc20).unwrap(); + wl_storage.write(&bridge_contract_key, bridge).unwrap(); wl_storage - .write_bytes(&native_erc20_key, encode(native_erc20)) - .unwrap(); - wl_storage - .write_bytes(&bridge_contract_key, encode(bridge)) - .unwrap(); - wl_storage - .write_bytes(ð_start_height_key, encode(eth_start_height)) + .write(ð_start_height_key, eth_start_height) .unwrap(); for Erc20WhitelistEntry { token_address: addr, @@ -225,21 +221,21 @@ impl EthereumBridgeParams { suffix: whitelist::KeyType::Whitelisted, } .into(); - wl_storage.write_bytes(&key, encode(&true)).unwrap(); + wl_storage.write(&key, true).unwrap(); let key = whitelist::Key { asset: *addr, suffix: whitelist::KeyType::Cap, } .into(); - wl_storage.write_bytes(&key, encode(&cap)).unwrap(); + wl_storage.write(&key, cap).unwrap(); let key = whitelist::Key { asset: *addr, suffix: whitelist::KeyType::Denomination, } .into(); - wl_storage.write_bytes(&key, encode(&denom)).unwrap(); + wl_storage.write(&key, denom).unwrap(); } // Initialize the storage for the Ethereum Bridge VP. vp::ethereum_bridge::init_storage(wl_storage); @@ -368,7 +364,6 @@ where #[cfg(test)] mod tests { use eyre::Result; - use namada_core::borsh::BorshSerializeExt; use namada_core::types::ethereum_events::EthAddress; use namada_state::testing::TestWlStorage; @@ -462,16 +457,16 @@ mod tests { fn test_ethereum_bridge_config_storage_partially_configured() { let mut wl_storage = TestWlStorage::default(); wl_storage - .write_bytes( + .write( &bridge_storage::active_key(), - encode(&EthBridgeStatus::Enabled(EthBridgeEnabled::AtGenesis)), + EthBridgeStatus::Enabled(EthBridgeEnabled::AtGenesis), ) .unwrap(); // Write a valid min_confirmations value wl_storage - .write_bytes( + .write( &bridge_storage::min_confirmations_key(), - MinimumConfirmations::default().serialize_to_vec(), + MinimumConfirmations::default(), ) .unwrap(); diff --git a/crates/ethereum_bridge/src/storage/vp/ethereum_bridge.rs b/crates/ethereum_bridge/src/storage/vp/ethereum_bridge.rs index 4f84b855ab..644555548b 100644 --- a/crates/ethereum_bridge/src/storage/vp/ethereum_bridge.rs +++ b/crates/ethereum_bridge/src/storage/vp/ethereum_bridge.rs @@ -1,4 +1,3 @@ -use namada_core::borsh::BorshSerializeExt; use namada_core::ledger::eth_bridge::ADDRESS; use namada_core::types::hash::StorageHasher; use namada_state::{DBIter, WlStorage, DB}; @@ -16,10 +15,8 @@ where H: StorageHasher, { let escrow_key = balance_key(&wl_storage.storage.native_token, &ADDRESS); - wl_storage - .write_bytes(&escrow_key, Amount::default().serialize_to_vec()) - .expect( - "Initializing the escrow balance of the Ethereum Bridge VP \ - shouldn't fail.", - ); + wl_storage.write(&escrow_key, Amount::default()).expect( + "Initializing the escrow balance of the Ethereum Bridge VP shouldn't \ + fail.", + ); } diff --git a/crates/ibc/src/actions.rs b/crates/ibc/src/actions.rs index f56192802b..b8acfc99e1 100644 --- a/crates/ibc/src/actions.rs +++ b/crates/ibc/src/actions.rs @@ -21,7 +21,7 @@ use namada_state::wl_storage::{PrefixIter, WriteLogAndStorage}; use namada_state::write_log::{self, WriteLog}; use namada_state::{ self as storage, iter_prefix_post, DBIter, ResultExt, State, StorageError, - StorageHasher, StorageResult, StorageWrite, WlStorage, DB, + StorageHasher, StorageResult, StorageWrite, WlStorage, WriteOpts, DB, }; use namada_storage::StorageRead; use namada_trans_token as token; diff --git a/crates/ibc/src/context/common.rs b/crates/ibc/src/context/common.rs index b5b761bd86..11fa0693cc 100644 --- a/crates/ibc/src/context/common.rs +++ b/crates/ibc/src/context/common.rs @@ -643,7 +643,8 @@ pub trait IbcCommonContext: IbcStorageContext { u64::checked_add(count, 1).ok_or_else(|| ClientError::Other { description: format!("The counter overflow: Key {key}"), })?; - self.write(key, count).map_err(ContextError::from) + self.write_without_merkle_diffs(key, count) + .map_err(ContextError::from) } /// Write the IBC denom. The given address could be a non-Namada token. diff --git a/crates/ibc/src/storage.rs b/crates/ibc/src/storage.rs index 2defc63ef9..2137a6e6fa 100644 --- a/crates/ibc/src/storage.rs +++ b/crates/ibc/src/storage.rs @@ -19,9 +19,10 @@ use namada_core::types::storage::{DbKeySeg, Key, KeySeg}; use sha2::{Digest, Sha256}; use thiserror::Error; -const CLIENTS_COUNTER: &str = "clients/counter"; -const CONNECTIONS_COUNTER: &str = "connections/counter"; -const CHANNELS_COUNTER: &str = "channelEnds/counter"; +const CLIENTS_COUNTER_PREFIX: &str = "clients"; +const CONNECTIONS_COUNTER_PREFIX: &str = "connections"; +const CHANNELS_COUNTER_PREFIX: &str = "channelEnds"; +const COUNTER_SEG: &str = "counter"; const DENOM: &str = "ibc_denom"; #[allow(missing_docs)] @@ -52,20 +53,20 @@ pub fn ibc_key(path: impl AsRef) -> Result { /// Returns a key of the IBC client counter pub fn client_counter_key() -> Key { - let path = CLIENTS_COUNTER.to_owned(); + let path = format!("{}/{}", CLIENTS_COUNTER_PREFIX, COUNTER_SEG); ibc_key(path).expect("Creating a key for the client counter shouldn't fail") } /// Returns a key of the IBC connection counter pub fn connection_counter_key() -> Key { - let path = CONNECTIONS_COUNTER.to_owned(); + let path = format!("{}/{}", CONNECTIONS_COUNTER_PREFIX, COUNTER_SEG); ibc_key(path) .expect("Creating a key for the connection counter shouldn't fail") } /// Returns a key of the IBC channel counter pub fn channel_counter_key() -> Key { - let path = CHANNELS_COUNTER.to_owned(); + let path = format!("{}/{}", CHANNELS_COUNTER_PREFIX, COUNTER_SEG); ibc_key(path) .expect("Creating a key for the channel counter shouldn't fail") } @@ -449,3 +450,15 @@ pub fn is_ibc_denom_key(key: &Key) -> Option<(String, String)> { _ => None, } } + +/// Returns true if the given key is for an IBC counter for clients, +/// connections, or channelEnds +pub fn is_ibc_counter_key(key: &Key) -> bool { + matches!(&key.segments[..], + [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(prefix), DbKeySeg::StringSeg(counter)] + if addr == &Address::Internal(InternalAddress::Ibc) + && (prefix == CLIENTS_COUNTER_PREFIX + || prefix == CONNECTIONS_COUNTER_PREFIX + || prefix == CHANNELS_COUNTER_PREFIX) && counter == COUNTER_SEG + ) +} diff --git a/crates/namada/src/ledger/ibc/mod.rs b/crates/namada/src/ledger/ibc/mod.rs index 4f0a9970be..606d0133e9 100644 --- a/crates/namada/src/ledger/ibc/mod.rs +++ b/crates/namada/src/ledger/ibc/mod.rs @@ -15,24 +15,23 @@ where // In ibc-go, u64 like a counter is encoded with big-endian: // https://github.com/cosmos/ibc-go/blob/89ffaafb5956a5ea606e1f1bf249c880bea802ed/modules/core/04-channel/keeper/keeper.go#L115 + let init_value = 0_u64; + // the client counter let key = client_counter_key(); - let value = 0_u64.to_be_bytes().to_vec(); storage - .write_bytes(&key, value) + .write_without_merkle_diffs(&key, init_value) .expect("Unable to write the initial client counter"); // the connection counter let key = connection_counter_key(); - let value = 0_u64.to_be_bytes().to_vec(); storage - .write_bytes(&key, value) + .write_without_merkle_diffs(&key, init_value) .expect("Unable to write the initial connection counter"); // the channel counter let key = channel_counter_key(); - let value = 0_u64.to_be_bytes().to_vec(); storage - .write_bytes(&key, value) + .write_without_merkle_diffs(&key, init_value) .expect("Unable to write the initial channel counter"); } diff --git a/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs b/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs index f1d50cad81..c6ea31000b 100644 --- a/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs +++ b/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs @@ -1622,16 +1622,13 @@ mod test_bridge_pool_vp { let eb_account_key = balance_key(&nam(), &Address::Internal(InternalAddress::EthBridge)); wl_storage - .write_bytes(&eb_account_key, Amount::default().serialize_to_vec()) + .write(&eb_account_key, Amount::default()) .expect("Test failed"); // initialize the gas payers account let gas_payer_balance_key = balance_key(&nam(), &established_address_1()); wl_storage - .write_bytes( - &gas_payer_balance_key, - Amount::from(BERTHA_WEALTH).serialize_to_vec(), - ) + .write(&gas_payer_balance_key, Amount::from(BERTHA_WEALTH)) .expect("Test failed"); wl_storage.write_log.commit_tx(); let tx = Tx::from_type(TxType::Raw); diff --git a/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs b/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs index 34b204e2e4..b72fe7d13f 100644 --- a/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs +++ b/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs @@ -219,10 +219,9 @@ mod tests { &Address::decode(ARBITRARY_OWNER_A_ADDRESS).expect("Test failed"), ); wl_storage - .write_bytes( + .write( &balance_key, - Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE) - .serialize_to_vec(), + Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE), ) .expect("Test failed"); diff --git a/crates/namada/src/ledger/native_vp/ibc/context.rs b/crates/namada/src/ledger/native_vp/ibc/context.rs index 592c578bc2..5cb8e12a99 100644 --- a/crates/namada/src/ledger/native_vp/ibc/context.rs +++ b/crates/namada/src/ledger/native_vp/ibc/context.rs @@ -5,7 +5,7 @@ use std::collections::{BTreeSet, HashMap, HashSet}; use borsh_ext::BorshSerializeExt; use namada_core::types::storage::Epochs; use namada_ibc::{IbcCommonContext, IbcStorageContext}; -use namada_state::{StorageRead, StorageWrite}; +use namada_state::{StorageRead, StorageWrite, WriteOpts}; use crate::ledger::ibc::storage::is_ibc_key; use crate::ledger::native_vp::CtxPreStorageRead; @@ -78,10 +78,11 @@ where fn read_bytes(&self, key: &Key) -> Result>> { match self.store.get(key) { - Some(StorageModification::Write { ref value }) => { - Ok(Some(value.clone())) - } - Some(StorageModification::Delete) => Ok(None), + Some(StorageModification::Write { + ref value, + action: _, + }) => Ok(Some(value.clone())), + Some(StorageModification::Delete { .. }) => Ok(None), Some(StorageModification::Temp { .. }) => { unreachable!("Temp shouldn't be inserted") } @@ -152,22 +153,25 @@ where H: 'static + StorageHasher, CA: 'static + WasmCacheAccess, { - fn write_bytes( + fn write_bytes_with_opts( &mut self, key: &Key, value: impl AsRef<[u8]>, + action: WriteOpts, ) -> Result<()> { self.store.insert( key.clone(), StorageModification::Write { value: value.as_ref().to_vec(), + action, }, ); Ok(()) } - fn delete(&mut self, key: &Key) -> Result<()> { - self.store.insert(key.clone(), StorageModification::Delete); + fn delete_with_opts(&mut self, key: &Key, action: WriteOpts) -> Result<()> { + self.store + .insert(key.clone(), StorageModification::Delete { action }); Ok(()) } } @@ -383,15 +387,20 @@ where H: 'static + StorageHasher, CA: 'static + WasmCacheAccess, { - fn write_bytes( + fn write_bytes_with_opts( &mut self, _key: &Key, _val: impl AsRef<[u8]>, + _action: WriteOpts, ) -> Result<()> { unimplemented!("Validation doesn't write any data") } - fn delete(&mut self, _key: &Key) -> Result<()> { + fn delete_with_opts( + &mut self, + _key: &Key, + _action: WriteOpts, + ) -> Result<()> { unimplemented!("Validation doesn't delete any data") } } diff --git a/crates/namada/src/ledger/native_vp/ibc/mod.rs b/crates/namada/src/ledger/native_vp/ibc/mod.rs index 7774c41cf1..607a207eec 100644 --- a/crates/namada/src/ledger/native_vp/ibc/mod.rs +++ b/crates/namada/src/ledger/native_vp/ibc/mod.rs @@ -217,7 +217,7 @@ fn match_value( expected: Option<&StorageModification>, ) -> VpResult<()> { match (actual, expected) { - (Some(v), Some(StorageModification::Write { value })) => { + (Some(v), Some(StorageModification::Write { value, action: _ })) => { if v == *value { Ok(()) } else { @@ -231,7 +231,7 @@ fn match_value( "The value was invalid: Key {}", key ))), - (None, Some(StorageModification::Delete)) => Ok(()), + (None, Some(StorageModification::Delete { action: _ })) => Ok(()), (None, _) => Err(Error::StateChange(format!( "The key was deleted unexpectedly: Key {}", key diff --git a/crates/namada/src/ledger/vp_host_fns.rs b/crates/namada/src/ledger/vp_host_fns.rs index 6b56c68433..a7e2fdce84 100644 --- a/crates/namada/src/ledger/vp_host_fns.rs +++ b/crates/namada/src/ledger/vp_host_fns.rs @@ -75,10 +75,11 @@ where let (log_val, gas) = write_log.read_pre(key); add_gas(gas_meter, gas, sentinel)?; match log_val { - Some(write_log::StorageModification::Write { ref value }) => { - Ok(Some(value.clone())) - } - Some(&write_log::StorageModification::Delete) => { + Some(write_log::StorageModification::Write { + ref value, + action: _, + }) => Ok(Some(value.clone())), + Some(&write_log::StorageModification::Delete { action: _ }) => { // Given key has been deleted Ok(None) } @@ -118,10 +119,11 @@ where let (log_val, gas) = write_log.read(key); add_gas(gas_meter, gas, sentinel)?; match log_val { - Some(write_log::StorageModification::Write { ref value }) => { - Ok(Some(value.clone())) - } - Some(&write_log::StorageModification::Delete) => { + Some(write_log::StorageModification::Write { + ref value, + action: _, + }) => Ok(Some(value.clone())), + Some(&write_log::StorageModification::Delete { action: _ }) => { // Given key has been deleted Ok(None) } @@ -182,7 +184,7 @@ where add_gas(gas_meter, gas, sentinel)?; match log_val { Some(&write_log::StorageModification::Write { .. }) => Ok(true), - Some(&write_log::StorageModification::Delete) => { + Some(&write_log::StorageModification::Delete { .. }) => { // The given key has been deleted Ok(false) } @@ -216,7 +218,7 @@ where add_gas(gas_meter, gas, sentinel)?; match log_val { Some(&write_log::StorageModification::Write { .. }) => Ok(true), - Some(&write_log::StorageModification::Delete) => { + Some(&write_log::StorageModification::Delete { .. }) => { // The given key has been deleted Ok(false) } diff --git a/crates/namada/src/vm/host_env.rs b/crates/namada/src/vm/host_env.rs index 908459fcae..9141e39b48 100644 --- a/crates/namada/src/vm/host_env.rs +++ b/crates/namada/src/vm/host_env.rs @@ -72,6 +72,8 @@ pub enum TxRuntimeError { MissingTxData, #[error("IBC: {0}")] Ibc(#[from] namada_ibc::Error), + #[error("Invalid Write Options")] + InvalidWriteOptions, } type TxResult = std::result::Result; @@ -552,7 +554,7 @@ where Some(&write_log::StorageModification::Write { .. }) => { HostEnvResult::Success.to_i64() } - Some(&write_log::StorageModification::Delete) => { + Some(&write_log::StorageModification::Delete { .. }) => { // the given key has been deleted HostEnvResult::Fail.to_i64() } @@ -605,7 +607,10 @@ where let (log_val, gas) = write_log.read(&key); tx_charge_gas(env, gas)?; Ok(match log_val { - Some(write_log::StorageModification::Write { ref value }) => { + Some(write_log::StorageModification::Write { + ref value, + action: _, + }) => { let len: i64 = value .len() .try_into() @@ -614,7 +619,7 @@ where result_buffer.replace(value.clone()); len } - Some(&write_log::StorageModification::Delete) => { + Some(&write_log::StorageModification::Delete { .. }) => { // fail, given key has been deleted HostEnvResult::Fail.to_i64() } @@ -751,7 +756,10 @@ where ); tx_charge_gas(env, iter_gas + log_gas)?; match log_val { - Some(write_log::StorageModification::Write { ref value }) => { + Some(write_log::StorageModification::Write { + ref value, + action: _, + }) => { let key_val = borsh::to_vec(&KeyVal { key, val: value.clone(), @@ -765,7 +773,7 @@ where result_buffer.replace(key_val); return Ok(len); } - Some(&write_log::StorageModification::Delete) => { + Some(&write_log::StorageModification::Delete { .. }) => { // check the next because the key has already deleted continue; } @@ -811,6 +819,7 @@ pub fn tx_write( key_len: u64, val_ptr: u64, val_len: u64, + write_opts: u8, ) -> TxResult<()> where MEM: VmMemory, @@ -838,9 +847,12 @@ where check_address_existence(env, &key)?; + let write_opts = WriteOpts::from_bits(write_opts) + .ok_or_else(|| TxRuntimeError::InvalidWriteOptions)?; + let write_log = unsafe { env.ctx.write_log.get() }; let (gas, _size_diff) = write_log - .write(&key, value) + .write_with_opts(&key, value, write_opts) .map_err(TxRuntimeError::StorageModificationError)?; tx_charge_gas(env, gas) } @@ -944,6 +956,7 @@ pub fn tx_delete( env: &TxVmEnv, key_ptr: u64, key_len: u64, + write_opts: u8, ) -> TxResult<()> where MEM: VmMemory, @@ -964,9 +977,12 @@ where return Err(TxRuntimeError::CannotDeleteVp); } + let write_opts = WriteOpts::from_bits(write_opts) + .ok_or_else(|| TxRuntimeError::InvalidWriteOptions)?; + let write_log = unsafe { env.ctx.write_log.get() }; let (gas, _size_diff) = write_log - .delete(&key) + .delete_with_opts(&key, write_opts) .map_err(TxRuntimeError::StorageModificationError)?; tx_charge_gas(env, gas) } @@ -2344,7 +2360,7 @@ where } // Temp. workaround for -use namada_state::StorageRead; +use namada_state::{StorageRead, WriteOpts}; use crate::types::storage::BlockHash; impl<'a, DB, H, CA> StorageRead for TxCtx<'a, DB, H, CA> @@ -2364,10 +2380,11 @@ where let (log_val, gas) = write_log.read(key); ibc_tx_charge_gas(self, gas)?; Ok(match log_val { - Some(write_log::StorageModification::Write { ref value }) => { - Some(value.clone()) - } - Some(&write_log::StorageModification::Delete) => None, + Some(write_log::StorageModification::Write { + ref value, + action: _, + }) => Some(value.clone()), + Some(&write_log::StorageModification::Delete { .. }) => None, Some(write_log::StorageModification::InitAccount { ref vp_code_hash, }) => Some(vp_code_hash.to_vec()), @@ -2391,7 +2408,7 @@ where ibc_tx_charge_gas(self, gas)?; Ok(match log_val { Some(&write_log::StorageModification::Write { .. }) => true, - Some(&write_log::StorageModification::Delete) => false, + Some(&write_log::StorageModification::Delete { .. }) => false, Some(&write_log::StorageModification::InitAccount { .. }) => true, Some(&write_log::StorageModification::Temp { .. }) => true, None => { @@ -2431,10 +2448,13 @@ where write_log.read(&Key::parse(key.clone()).into_storage_result()?); ibc_tx_charge_gas(self, iter_gas + log_gas)?; match log_val { - Some(write_log::StorageModification::Write { ref value }) => { + Some(write_log::StorageModification::Write { + ref value, + action: _, + }) => { return Ok(Some((key, value.clone()))); } - Some(&write_log::StorageModification::Delete) => { + Some(&write_log::StorageModification::Delete { .. }) => { // check the next because the key has already deleted continue; } @@ -2537,25 +2557,32 @@ where H: StorageHasher, CA: WasmCacheAccess, { - fn write_bytes( + fn write_bytes_with_opts( &mut self, key: &Key, data: impl AsRef<[u8]>, + action: WriteOpts, ) -> Result<(), namada_state::StorageError> { let write_log = unsafe { self.write_log.get() }; let (gas, _size_diff) = write_log - .write(key, data.as_ref().to_vec()) + .write_with_opts(key, data.as_ref().to_vec(), action) .into_storage_result()?; ibc_tx_charge_gas(self, gas) } - fn delete(&mut self, key: &Key) -> Result<(), namada_state::StorageError> { + fn delete_with_opts( + &mut self, + key: &Key, + action: WriteOpts, + ) -> Result<(), namada_state::StorageError> { if key.is_validity_predicate().is_some() { return Err(TxRuntimeError::CannotDeleteVp).into_storage_result(); } let write_log = unsafe { self.write_log.get() }; - let (gas, _size_diff) = write_log.delete(key).into_storage_result()?; + let (gas, _size_diff) = write_log + .delete_with_opts(key, action) + .into_storage_result()?; ibc_tx_charge_gas(self, gas) } } diff --git a/crates/namada/src/vm/wasm/run.rs b/crates/namada/src/vm/wasm/run.rs index 8d84185135..a1c5a0d098 100644 --- a/crates/namada/src/vm/wasm/run.rs +++ b/crates/namada/src/vm/wasm/run.rs @@ -524,11 +524,11 @@ where // Gas accounting even if the compiled module is in cache let key = Key::wasm_code_len(code_hash); let tx_len = match write_log.read(&key).0 { - Some(StorageModification::Write { value }) => { - u64::try_from_slice(value).map_err(|e| { - Error::ConversionError(e.to_string()) - }) - } + Some(StorageModification::Write { + value, + action: _, + }) => u64::try_from_slice(value) + .map_err(|e| Error::ConversionError(e.to_string())), _ => match storage .read(&key) .map_err(|e| { @@ -555,9 +555,10 @@ where None => { let key = Key::wasm_code(code_hash); let code = match write_log.read(&key).0 { - Some(StorageModification::Write { value }) => { - value.clone() - } + Some(StorageModification::Write { + value, + action: _, + }) => value.clone(), _ => match storage .read(&key) .map_err(|e| { diff --git a/crates/parameters/src/lib.rs b/crates/parameters/src/lib.rs index 9caaec48a6..d64f445c28 100644 --- a/crates/parameters/src/lib.rs +++ b/crates/parameters/src/lib.rs @@ -286,8 +286,6 @@ where S: StorageRead + StorageWrite, { let key = storage::get_max_signatures_per_transaction_key(); - // Using `fn write_bytes` here, because implicit_vp doesn't need to be - // encoded, it's bytes already. storage.write(&key, value) } diff --git a/crates/sdk/src/queries/shell/eth_bridge.rs b/crates/sdk/src/queries/shell/eth_bridge.rs index 334ddbb97f..92ee5f3ca8 100644 --- a/crates/sdk/src/queries/shell/eth_bridge.rs +++ b/crates/sdk/src/queries/shell/eth_bridge.rs @@ -1079,10 +1079,7 @@ mod test_ethbridge_router { client.wl_storage.storage.block.height = 1.into(); client .wl_storage - .write_bytes( - &get_pending_key(&transfer), - transfer.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); // commit the changes and increase block height @@ -1122,10 +1119,7 @@ mod test_ethbridge_router { // write a transfer into the bridge pool client .wl_storage - .write_bytes( - &get_pending_key(&transfer), - transfer.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); // commit the changes and increase block height @@ -1141,10 +1135,7 @@ mod test_ethbridge_router { transfer2.transfer.amount = 1.into(); client .wl_storage - .write_bytes( - &get_pending_key(&transfer2), - transfer2.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer2), &transfer2) .expect("Test failed"); // commit the changes and increase block height @@ -1187,10 +1178,7 @@ mod test_ethbridge_router { // write a transfer into the bridge pool client .wl_storage - .write_bytes( - &get_pending_key(&transfer), - transfer.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); // create a signed Merkle root for this pool @@ -1209,18 +1197,15 @@ mod test_ethbridge_router { transfer2.transfer.amount = 1.into(); client .wl_storage - .write_bytes( - &get_pending_key(&transfer2), - transfer2.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer2), transfer2) .expect("Test failed"); // add the signature for the pool at the previous block height client .wl_storage - .write_bytes( + .write( &get_signed_root_key(), - (signed_root.clone(), written_height).serialize_to_vec(), + (signed_root.clone(), written_height), ) .expect("Test failed"); @@ -1303,10 +1288,7 @@ mod test_ethbridge_router { // write a transfer into the bridge pool client .wl_storage - .write_bytes( - &get_pending_key(&transfer), - transfer.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); // create a signed Merkle root for this pool @@ -1328,19 +1310,13 @@ mod test_ethbridge_router { transfer2.transfer.amount = 1.into(); client .wl_storage - .write_bytes( - &get_pending_key(&transfer2), - transfer2.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer2), &transfer2) .expect("Test failed"); // add the signature for the pool at the previous block height client .wl_storage - .write_bytes( - &get_signed_root_key(), - (signed_root, BlockHeight::from(0)).serialize_to_vec(), - ) + .write(&get_signed_root_key(), (signed_root, BlockHeight::from(0))) .expect("Test failed"); // commit the changes and increase block height @@ -1398,10 +1374,7 @@ mod test_ethbridge_router { // write a transfer into the bridge pool client .wl_storage - .write_bytes( - &get_pending_key(&transfer), - transfer.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); // create a signed Merkle root for this pool @@ -1420,19 +1393,13 @@ mod test_ethbridge_router { transfer2.transfer.amount = 1.into(); client .wl_storage - .write_bytes( - &get_pending_key(&transfer2), - transfer2.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer2), transfer2) .expect("Test failed"); // add the signature for the pool at the previous block height client .wl_storage - .write_bytes( - &get_signed_root_key(), - (signed_root, written_height).serialize_to_vec(), - ) + .write(&get_signed_root_key(), (signed_root, written_height)) .expect("Test failed"); // commit the changes and increase block height @@ -1473,10 +1440,7 @@ mod test_ethbridge_router { // write a transfer into the bridge pool client .wl_storage - .write_bytes( - &get_pending_key(&transfer), - transfer.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); let event_transfer: namada_core::types::ethereum_events::TransferToEthereum @@ -1490,17 +1454,16 @@ mod test_ethbridge_router { let voting_power = FractionalVotingPower::HALF; client .wl_storage - .write_bytes(ð_msg_key.body(), eth_event.serialize_to_vec()) + .write(ð_msg_key.body(), eth_event) .expect("Test failed"); client .wl_storage - .write_bytes( + .write( ð_msg_key.voting_power(), EpochedVotingPower::from([( 0.into(), voting_power * dummy_validator_stake, - )]) - .serialize_to_vec(), + )]), ) .expect("Test failed"); client @@ -1520,10 +1483,7 @@ mod test_ethbridge_router { transfer2.transfer.amount = 1.into(); client .wl_storage - .write_bytes( - &get_pending_key(&transfer2), - transfer2.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer2), transfer2) .expect("Test failed"); // commit the changes and increase block height @@ -1572,10 +1532,7 @@ mod test_ethbridge_router { // write a transfer into the bridge pool client .wl_storage - .write_bytes( - &get_pending_key(&transfer), - transfer.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); // create a signed Merkle root for this pool @@ -1594,19 +1551,13 @@ mod test_ethbridge_router { transfer2.transfer.amount = 1.into(); client .wl_storage - .write_bytes( - &get_pending_key(&transfer2), - transfer2.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer2), transfer2) .expect("Test failed"); // add the signature for the pool at the previous block height client .wl_storage - .write_bytes( - &get_signed_root_key(), - (signed_root, written_height).serialize_to_vec(), - ) + .write(&get_signed_root_key(), (signed_root, written_height)) .expect("Test failed"); // commit the changes and increase block height @@ -1739,10 +1690,7 @@ mod test_ethbridge_router { }; client .wl_storage - .write_bytes( - &get_pending_key(&transfer), - transfer.serialize_to_vec(), - ) + .write(&get_pending_key(&transfer), transfer.clone()) .expect("Test failed"); // write transfers into the event log @@ -1781,10 +1729,7 @@ mod test_ethbridge_router { let written_height = client.wl_storage.storage.block.height; client .wl_storage - .write_bytes( - &get_signed_root_key(), - (signed_root, written_height).serialize_to_vec(), - ) + .write(&get_signed_root_key(), (signed_root, written_height)) .expect("Test failed"); client .wl_storage diff --git a/crates/shielded_token/src/conversion.rs b/crates/shielded_token/src/conversion.rs index 48945e13d9..abf62e4502 100644 --- a/crates/shielded_token/src/conversion.rs +++ b/crates/shielded_token/src/conversion.rs @@ -163,9 +163,15 @@ where // but we should make sure the return value's ratio matches // this new inflation rate in 'update_allowed_conversions', // otherwise we will have an inaccurate view of inflation - wl_storage.write(&masp_last_inflation_key(addr), inflation_amount)?; + wl_storage.write_without_merkle_diffs( + &masp_last_inflation_key(addr), + inflation_amount, + )?; - wl_storage.write(&masp_last_locked_ratio_key(addr), locked_ratio)?; + wl_storage.write_without_merkle_diffs( + &masp_last_locked_ratio_key(addr), + locked_ratio, + )?; Ok((noterized_inflation, precision)) } @@ -436,7 +442,7 @@ where wl_storage.storage.conversion_state.tree = FrozenCommitmentTree::merge(&tree_parts); // Update the anchor in storage - wl_storage.write( + wl_storage.write_without_merkle_diffs( &crate::storage_key::masp_convert_anchor_key(), namada_core::types::hash::Hash( bls12_381::Scalar::from( diff --git a/crates/shielded_token/src/storage.rs b/crates/shielded_token/src/storage.rs index 17ebfdf927..bd29634a3e 100644 --- a/crates/shielded_token/src/storage.rs +++ b/crates/shielded_token/src/storage.rs @@ -22,11 +22,25 @@ where kp_gain_nom, locked_ratio_target: locked_target, } = params; - storage.write(&masp_last_inflation_key(address), Amount::zero())?; - storage.write(&masp_last_locked_ratio_key(address), Dec::zero())?; - storage.write(&masp_max_reward_rate_key(address), max_rate)?; - storage.write(&masp_locked_ratio_target_key(address), locked_target)?; - storage.write(&masp_kp_gain_key(address), kp_gain_nom)?; - storage.write(&masp_kd_gain_key(address), kd_gain_nom)?; + storage.write_without_merkle_diffs( + &masp_last_inflation_key(address), + Amount::zero(), + )?; + storage.write_without_merkle_diffs( + &masp_last_locked_ratio_key(address), + Dec::zero(), + )?; + storage.write_without_merkle_diffs( + &masp_max_reward_rate_key(address), + max_rate, + )?; + storage.write_without_merkle_diffs( + &masp_locked_ratio_target_key(address), + locked_target, + )?; + storage + .write_without_merkle_diffs(&masp_kp_gain_key(address), kp_gain_nom)?; + storage + .write_without_merkle_diffs(&masp_kd_gain_key(address), kd_gain_nom)?; Ok(()) } diff --git a/crates/shielded_token/src/utils.rs b/crates/shielded_token/src/utils.rs index 4dfbaa9e89..ffa0068a99 100644 --- a/crates/shielded_token/src/utils.rs +++ b/crates/shielded_token/src/utils.rs @@ -19,7 +19,10 @@ fn reveal_nullifiers( .sapling_bundle() .map_or(&vec![], |description| &description.shielded_spends) { - ctx.write(&masp_nullifier_key(&description.nullifier), ())?; + ctx.write_without_merkle_diffs( + &masp_nullifier_key(&description.nullifier), + (), + )?; } Ok(()) @@ -50,7 +53,7 @@ pub fn update_note_commitment_tree( })?; } - ctx.write(&tree_key, commitment_tree)?; + ctx.write_without_merkle_diffs(&tree_key, commitment_tree)?; } } @@ -71,7 +74,7 @@ pub fn handle_masp_tx( // If storage key has been supplied, then pin this transaction to it if let Some(key) = pin_key { - ctx.write( + ctx.write_without_merkle_diffs( &masp_pin_tx_key(key), IndexedTx { height: ctx.get_block_height()?, diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index 5635e8226e..10a4c86da5 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -338,7 +338,7 @@ where /// gas cost. pub fn has_key(&self, key: &Key) -> Result<(bool, u64)> { Ok(( - self.block.tree.has_key(key)?, + self.db.read_subspace_val(key)?.is_some(), key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE, )) } @@ -346,10 +346,6 @@ where /// Returns a value from the specified subspace and the gas cost pub fn read(&self, key: &Key) -> Result<(Option>, u64)> { tracing::debug!("storage read key {}", key); - let (present, gas) = self.has_key(key)?; - if !present { - return Ok((None, gas)); - } match self.db.read_subspace_val(key)? { Some(v) => { @@ -411,10 +407,11 @@ where /// Write a value to the specified subspace and returns the gas cost and the /// size difference - pub fn write( + pub fn write_with_opts( &mut self, key: &Key, value: impl AsRef<[u8]>, + action: WriteOpts, ) -> Result<(u64, i64)> { // Note that this method is the same as `StorageWrite::write_bytes`, // but with gas and storage bytes len diff accounting @@ -427,32 +424,80 @@ where self.block.tree.update(key, height)?; } else { // Update the merkle tree - self.block.tree.update(key, value)?; + if action.contains(WriteOpts::MERKLIZE) { + self.block.tree.update(key, value)?; + } } let len = value.len(); let gas = (key.len() + len) as u64 * STORAGE_WRITE_GAS_PER_BYTE; - let size_diff = - self.db.write_subspace_val(self.block.height, key, value)?; + let size_diff = self.db.write_subspace_val( + self.block.height, + key, + value, + action, + )?; Ok((gas, size_diff)) } + /// Write with merklization and diffs + pub fn write( + &mut self, + key: &Key, + value: impl AsRef<[u8]>, + ) -> Result<(u64, i64)> { + self.write_with_opts(key, value, WriteOpts::ALL) + } + + /// Write with diffs but no merklization + pub fn write_without_merkle( + &mut self, + key: &Key, + value: impl AsRef<[u8]>, + ) -> Result<(u64, i64)> { + self.write_with_opts(key, value, WriteOpts::WRITE_DIFFS) + } + + /// Write without diffs or merklization + pub fn write_without_merkle_diffs( + &mut self, + key: &Key, + value: impl AsRef<[u8]>, + ) -> Result<(u64, i64)> { + self.write_with_opts(key, value, WriteOpts::NONE) + } + /// Delete the specified subspace and returns the gas cost and the size /// difference - pub fn delete(&mut self, key: &Key) -> Result<(u64, i64)> { + pub fn delete_with_opts( + &mut self, + key: &Key, + action: WriteOpts, + ) -> Result<(u64, i64)> { // Note that this method is the same as `StorageWrite::delete`, // but with gas and storage bytes len diff accounting let mut deleted_bytes_len = 0; if self.has_key(key)?.0 { self.block.tree.delete(key)?; deleted_bytes_len = - self.db.delete_subspace_val(self.block.height, key)?; + self.db + .delete_subspace_val(self.block.height, key, action)?; } let gas = (key.len() + deleted_bytes_len as usize) as u64 * STORAGE_WRITE_GAS_PER_BYTE; Ok((gas, deleted_bytes_len)) } + /// Delete including from the diffs storage + pub fn delete(&mut self, key: &Key) -> Result<(u64, i64)> { + self.delete_with_opts(key, WriteOpts::ALL) + } + + /// Delete without manipulating the diffs storage + pub fn delete_without_diffs(&mut self, key: &Key) -> Result<(u64, i64)> { + self.delete_with_opts(key, WriteOpts::NONE) + } + /// Set the block header. /// The header is not in the Merkle tree as it's tracked by Tendermint. /// Hence, we don't update the tree when this is set. @@ -866,6 +911,7 @@ where batch: &mut D::WriteBatch, key: &Key, value: impl AsRef<[u8]>, + action: WriteOpts, ) -> Result { let value = value.as_ref(); if is_pending_transfer_key(key) { @@ -875,13 +921,16 @@ where self.block.tree.update(key, height)?; } else { // Update the merkle tree - self.block.tree.update(key, value)?; + if action.contains(WriteOpts::MERKLIZE) { + self.block.tree.update(key, value)?; + } } Ok(self.db.batch_write_subspace_val( batch, self.block.height, key, value, + action, )?) } @@ -892,12 +941,16 @@ where &mut self, batch: &mut D::WriteBatch, key: &Key, + action: WriteOpts, ) -> Result { // Update the merkle tree self.block.tree.delete(key)?; - Ok(self - .db - .batch_delete_subspace_val(batch, self.block.height, key)?) + Ok(self.db.batch_delete_subspace_val( + batch, + self.block.height, + key, + action, + )?) } // Prune merkle tree stores. Use after updating self.block.height in the @@ -1128,6 +1181,7 @@ mod tests { use namada_core::types::time::{self, Duration}; use namada_core::types::token; use namada_parameters::Parameters; + use namada_storage::{StorageRead, StorageWrite}; use proptest::prelude::*; use proptest::test_runner::Config; @@ -1342,4 +1396,178 @@ mod tests { assert_eq!(wl_storage.storage.block.epoch, epoch_before.next()); } } + + #[test] + fn test_writing_without_merklizing_or_diffs() { + let mut wls = TestWlStorage::default(); + assert_eq!(wls.storage.block.height.0, 0); + + let key1 = Key::parse("testing1").unwrap(); + let val1 = 1u64; + let key2 = Key::parse("testing2").unwrap(); + let val2 = 2u64; + + // Standard write of key-val-1 + wls.write(&key1, val1).unwrap(); + + // Read from WlStorage should return val1 + let res = wls.read::(&key1).unwrap().unwrap(); + assert_eq!(res, val1); + + // Read from Storage shouldn't return val1 bc the block hasn't been + // committed + let (res, _) = wls.storage.read(&key1).unwrap(); + assert!(res.is_none()); + + // Write key-val-2 without merklizing or diffs + wls.write_without_merkle_diffs(&key2, val2).unwrap(); + + // Read from WlStorage should return val2 + let res = wls.read::(&key2).unwrap().unwrap(); + assert_eq!(res, val2); + + // Commit block and storage changes + wls.commit_block().unwrap(); + wls.storage.block.height = wls.storage.block.height.next_height(); + + // Read key1 from Storage should return val1 + let (res1, _) = wls.storage.read(&key1).unwrap(); + let res1 = u64::try_from_slice(&res1.unwrap()).unwrap(); + assert_eq!(res1, val1); + + // Check merkle tree inclusion of key-val-1 explicitly + let is_merklized1 = wls.storage.block.tree.has_key(&key1).unwrap(); + assert!(is_merklized1); + + // Key2 should be in storage. Confirm by reading from + // WlStorage and also by reading Storage subspace directly + let res2 = wls.read::(&key2).unwrap().unwrap(); + assert_eq!(res2, val2); + let res2 = wls.storage.db.read_subspace_val(&key2).unwrap().unwrap(); + let res2 = u64::try_from_slice(&res2).unwrap(); + assert_eq!(res2, val2); + + // Check explicitly that key-val-2 is not in merkle tree + let is_merklized2 = wls.storage.block.tree.has_key(&key2).unwrap(); + assert!(!is_merklized2); + + // Check that the proper diffs exist for key-val-1 + let res1 = wls + .storage + .db + .read_diffs_val(&key1, Default::default(), true) + .unwrap(); + assert!(res1.is_none()); + + let res1 = wls + .storage + .db + .read_diffs_val(&key1, Default::default(), false) + .unwrap() + .unwrap(); + let res1 = u64::try_from_slice(&res1).unwrap(); + assert_eq!(res1, val1); + + // Check that there are diffs for key-val-2 in block 0, since all keys + // need to have diffs for at least 1 block for rollback purposes + let res2 = wls + .storage + .db + .read_diffs_val(&key2, BlockHeight(0), true) + .unwrap(); + assert!(res2.is_none()); + let res2 = wls + .storage + .db + .read_diffs_val(&key2, BlockHeight(0), false) + .unwrap() + .unwrap(); + let res2 = u64::try_from_slice(&res2).unwrap(); + assert_eq!(res2, val2); + + // Now delete the keys properly + wls.delete(&key1).unwrap(); + wls.delete_without_diffs(&key2).unwrap(); + + // Commit the block again + wls.commit_block().unwrap(); + wls.storage.block.height = wls.storage.block.height.next_height(); + + // Check the key-vals are removed from the storage subspace + let res1 = wls.read::(&key1).unwrap(); + let res2 = wls.read::(&key2).unwrap(); + assert!(res1.is_none() && res2.is_none()); + let res1 = wls.storage.db.read_subspace_val(&key1).unwrap(); + let res2 = wls.storage.db.read_subspace_val(&key2).unwrap(); + assert!(res1.is_none() && res2.is_none()); + + // Check that the key-vals don't exist in the merkle tree anymore + let is_merklized1 = wls.storage.block.tree.has_key(&key1).unwrap(); + let is_merklized2 = wls.storage.block.tree.has_key(&key2).unwrap(); + assert!(!is_merklized1 && !is_merklized2); + + // Check that key-val-1 diffs are properly updated for blocks 0 and 1 + let res1 = wls + .storage + .db + .read_diffs_val(&key1, BlockHeight(0), true) + .unwrap(); + assert!(res1.is_none()); + + let res1 = wls + .storage + .db + .read_diffs_val(&key1, BlockHeight(0), false) + .unwrap() + .unwrap(); + let res1 = u64::try_from_slice(&res1).unwrap(); + assert_eq!(res1, val1); + + let res1 = wls + .storage + .db + .read_diffs_val(&key1, BlockHeight(1), true) + .unwrap() + .unwrap(); + let res1 = u64::try_from_slice(&res1).unwrap(); + assert_eq!(res1, val1); + + let res1 = wls + .storage + .db + .read_diffs_val(&key1, BlockHeight(1), false) + .unwrap(); + assert!(res1.is_none()); + + // Check that key-val-2 diffs don't exist for block 0 anymore + let res2 = wls + .storage + .db + .read_diffs_val(&key2, BlockHeight(0), true) + .unwrap(); + assert!(res2.is_none()); + let res2 = wls + .storage + .db + .read_diffs_val(&key2, BlockHeight(0), false) + .unwrap(); + assert!(res2.is_none()); + + // Check that the block 1 diffs for key-val-2 include an "old" value of + // val2 and no "new" value + let res2 = wls + .storage + .db + .read_diffs_val(&key2, BlockHeight(1), true) + .unwrap() + .unwrap(); + let res2 = u64::try_from_slice(&res2).unwrap(); + assert_eq!(res2, val2); + let res2 = wls + .storage + .db + .read_diffs_val(&key2, BlockHeight(1), false) + .unwrap(); + assert!(res2.is_none()); + } } diff --git a/crates/state/src/wl_storage.rs b/crates/state/src/wl_storage.rs index 5c1d17a091..26e8de5dc5 100644 --- a/crates/state/src/wl_storage.rs +++ b/crates/state/src/wl_storage.rs @@ -11,7 +11,7 @@ use namada_storage::{ResultExt, StorageRead, StorageWrite}; use super::EPOCH_SWITCH_BLOCKS_DELAY; use crate::write_log::{self, WriteLog}; -use crate::{DBIter, State, DB}; +use crate::{DBIter, State, WriteOpts, DB}; /// Storage with write log that allows to implement prefix iterator that works /// with changes not yet committed to the DB. @@ -402,7 +402,10 @@ where self.write_log_iter.next() { match modification { - write_log::StorageModification::Write { value } + write_log::StorageModification::Write { + value, + action: _, + } | write_log::StorageModification::Temp { value } => { let gas = value.len() as u64; return Some((key, value, gas)); @@ -413,7 +416,9 @@ where let gas = vp_code_hash.len() as u64; return Some((key, vp_code_hash.to_vec(), gas)); } - write_log::StorageModification::Delete => { + write_log::StorageModification::Delete { + action: _, + } => { continue; } } @@ -446,10 +451,10 @@ macro_rules! impl_storage_traits { // try to read from the write log first let (log_val, _gas) = self.write_log().read(key); match log_val { - Some(write_log::StorageModification::Write { ref value }) => { + Some(write_log::StorageModification::Write { ref value, action: _ }) => { Ok(Some(value.clone())) } - Some(write_log::StorageModification::Delete) => Ok(None), + Some(write_log::StorageModification::Delete { action: _ }) => Ok(None), Some(write_log::StorageModification::InitAccount { ref vp_code_hash, }) => Ok(Some(vp_code_hash.to_vec())), @@ -473,7 +478,7 @@ macro_rules! impl_storage_traits { Some(&write_log::StorageModification::Write { .. }) | Some(&write_log::StorageModification::InitAccount { .. }) | Some(&write_log::StorageModification::Temp { .. }) => Ok(true), - Some(&write_log::StorageModification::Delete) => { + Some(&write_log::StorageModification::Delete { .. }) => { // the given key has been deleted Ok(false) } @@ -557,22 +562,27 @@ macro_rules! impl_storage_traits { { // N.B. Calling this when testing pre- and post- reads in // regards to testing native vps is incorrect. - fn write_bytes( + fn write_bytes_with_opts( &mut self, key: &storage::Key, val: impl AsRef<[u8]>, + action: WriteOpts, ) -> namada_storage::Result<()> { let _ = self .write_log_mut() - .protocol_write(key, val.as_ref().to_vec()) + .protocol_write(key, val.as_ref().to_vec(), action) .into_storage_result(); Ok(()) } - fn delete(&mut self, key: &storage::Key) -> namada_storage::Result<()> { + fn delete_with_opts( + &mut self, + key: &storage::Key, + action: WriteOpts + ) -> namada_storage::Result<()> { let _ = self .write_log_mut() - .protocol_delete(key) + .protocol_delete(key, action) .into_storage_result(); Ok(()) } @@ -720,12 +730,18 @@ mod tests { | Level::BlockWriteLog(WlMod::Delete | WlMod::DeletePrefix) => { } Level::TxWriteLog(WlMod::Write(val)) => { + // NOTE: Will write to merkle tree and DB diff s.write_log.write(key, val.serialize_to_vec()).unwrap(); } Level::BlockWriteLog(WlMod::Write(val)) => { s.write_log // protocol only writes at block level - .protocol_write(key, val.serialize_to_vec()) + // NOTE: Will write to merkle tree and DB diff + .protocol_write( + key, + val.serialize_to_vec(), + WriteOpts::ALL, + ) .unwrap(); } Level::Storage(val) => { diff --git a/crates/state/src/write_log.rs b/crates/state/src/write_log.rs index 28ca56cd2d..1aec7a284a 100644 --- a/crates/state/src/write_log.rs +++ b/crates/state/src/write_log.rs @@ -17,7 +17,7 @@ use namada_trans_token::storage_key::{ }; use thiserror::Error; -use crate::{DBIter, State, DB}; +use crate::{DBIter, State, WriteOpts, DB}; #[allow(missing_docs)] #[derive(Error, Debug)] @@ -49,9 +49,14 @@ pub enum StorageModification { Write { /// Value bytes value: Vec, + /// Action to determine what data to write + action: WriteOpts, }, /// Delete an existing key-value - Delete, + Delete { + /// Action to determine what data to delet + action: WriteOpts, + }, /// Initialize a new account with established address and a given validity /// predicate hash. The key for `InitAccount` inside the [`WriteLog`] must /// point to its validity predicate. @@ -154,10 +159,11 @@ impl WriteLog { }) { Some(v) => { let gas = match v { - StorageModification::Write { ref value } => { - key.len() + value.len() - } - StorageModification::Delete => key.len(), + StorageModification::Write { + ref value, + action: _, + } => key.len() + value.len(), + StorageModification::Delete { action: _ } => key.len(), StorageModification::InitAccount { ref vp_code_hash } => { key.len() + vp_code_hash.len() } @@ -181,10 +187,11 @@ impl WriteLog { match self.block_write_log.get(key) { Some(v) => { let gas = match v { - StorageModification::Write { ref value } => { - key.len() + value.len() - } - StorageModification::Delete => key.len(), + StorageModification::Write { + ref value, + action: _, + } => key.len() + value.len(), + StorageModification::Delete { action: _ } => key.len(), StorageModification::InitAccount { ref vp_code_hash } => { key.len() + vp_code_hash.len() } @@ -198,27 +205,30 @@ impl WriteLog { } } - /// Write a key and a value and return the gas cost and the size difference - /// Fails with [`Error::UpdateVpOfNewAccount`] when attempting to update a - /// validity predicate of a new account that's not yet committed to storage. - /// Fails with [`Error::UpdateTemporaryValue`] when attempting to update a + /// Write a key and a value, with write options, and return the gas cost and + /// the size difference. Fails with [`Error::UpdateVpOfNewAccount`] when + /// attempting to update a validity predicate of a new account that's + /// not yet committed to storage. Fails with + /// [`Error::UpdateTemporaryValue`] when attempting to update a /// temporary value. - pub fn write( + pub fn write_with_opts( &mut self, key: &storage::Key, value: Vec, + action: WriteOpts, ) -> Result<(u64, i64)> { let len = value.len(); let gas = key.len() + len; let size_diff = match self .tx_write_log - .insert(key.clone(), StorageModification::Write { value }) + .insert(key.clone(), StorageModification::Write { value, action }) { Some(prev) => match prev { - StorageModification::Write { ref value } => { - len as i64 - value.len() as i64 - } - StorageModification::Delete => len as i64, + StorageModification::Write { + ref value, + action: _, + } => len as i64 - value.len() as i64, + StorageModification::Delete { action: _ } => len as i64, StorageModification::InitAccount { .. } => { return Err(Error::UpdateVpOfNewAccount); } @@ -233,6 +243,16 @@ impl WriteLog { Ok((gas as u64 * STORAGE_WRITE_GAS_PER_BYTE, size_diff)) } + /// Write a key and value using all write options, and return the gas cost + /// and size difference + pub fn write( + &mut self, + key: &storage::Key, + value: Vec, + ) -> Result<(u64, i64)> { + self.write_with_opts(key, value, WriteOpts::ALL) + } + /// Write a key and a value. /// Fails with [`Error::UpdateVpOfNewAccount`] when attempting to update a /// validity predicate of a new account that's not yet committed to storage. @@ -242,10 +262,11 @@ impl WriteLog { &mut self, key: &storage::Key, value: Vec, + action: WriteOpts, ) -> Result<()> { if let Some(prev) = self .block_write_log - .insert(key.clone(), StorageModification::Write { value }) + .insert(key.clone(), StorageModification::Write { value, action }) { match prev { StorageModification::InitAccount { .. } => { @@ -255,7 +276,7 @@ impl WriteLog { return Err(Error::UpdateTemporaryValue); } StorageModification::Write { .. } - | StorageModification::Delete => {} + | StorageModification::Delete { .. } => {} } } Ok(()) @@ -278,10 +299,11 @@ impl WriteLog { .insert(key.clone(), StorageModification::Temp { value }) { Some(prev) => match prev { - StorageModification::Write { ref value } => { - len as i64 - value.len() as i64 - } - StorageModification::Delete => { + StorageModification::Write { + ref value, + action: _, + } => len as i64 - value.len() as i64, + StorageModification::Delete { .. } => { return Err(Error::WriteTempAfterDelete); } StorageModification::InitAccount { .. } => { @@ -300,21 +322,28 @@ impl WriteLog { Ok((gas as u64 * MEMORY_ACCESS_GAS_PER_BYTE, size_diff)) } - /// Delete a key and its value, and return the gas cost and the size - /// difference. + /// Delete a key and its value, with write options, and return the gas cost + /// and the size difference. /// Fails with [`Error::DeleteVp`] for a validity predicate key, which are /// not possible to delete. - pub fn delete(&mut self, key: &storage::Key) -> Result<(u64, i64)> { + pub fn delete_with_opts( + &mut self, + key: &storage::Key, + action: WriteOpts, + ) -> Result<(u64, i64)> { if key.is_validity_predicate().is_some() { return Err(Error::DeleteVp); } let size_diff = match self .tx_write_log - .insert(key.clone(), StorageModification::Delete) + .insert(key.clone(), StorageModification::Delete { action }) { Some(prev) => match prev { - StorageModification::Write { ref value } => value.len() as i64, - StorageModification::Delete => 0, + StorageModification::Write { + ref value, + action: _, + } => value.len() as i64, + StorageModification::Delete { .. } => 0, StorageModification::InitAccount { .. } => { return Err(Error::DeleteVp); } @@ -328,23 +357,33 @@ impl WriteLog { Ok((gas as u64 * STORAGE_WRITE_GAS_PER_BYTE, -size_diff)) } + /// Delete a key and its value, with all write options, and return the gas + /// cost and the size difference. + pub fn delete(&mut self, key: &storage::Key) -> Result<(u64, i64)> { + self.delete_with_opts(key, WriteOpts::ALL) + } + /// Delete a key and its value. /// Fails with [`Error::DeleteVp`] for a validity predicate key, which are /// not possible to delete. - pub fn protocol_delete(&mut self, key: &storage::Key) -> Result<()> { + pub fn protocol_delete( + &mut self, + key: &storage::Key, + action: WriteOpts, + ) -> Result<()> { if key.is_validity_predicate().is_some() { return Err(Error::DeleteVp); } if let Some(prev) = self .block_write_log - .insert(key.clone(), StorageModification::Delete) + .insert(key.clone(), StorageModification::Delete { action }) { match prev { StorageModification::InitAccount { .. } => { return Err(Error::DeleteVp); } StorageModification::Write { .. } - | StorageModification::Delete + | StorageModification::Delete { .. } | StorageModification::Temp { .. } => {} } }; @@ -505,19 +544,29 @@ impl WriteLog { { for (key, entry) in self.block_write_log.iter() { match entry { - StorageModification::Write { value } => { + StorageModification::Write { value, action } => { storage - .batch_write_subspace_val(batch, key, value.clone()) + .batch_write_subspace_val( + batch, + key, + value.clone(), + action.clone(), + ) .map_err(Error::StorageError)?; } - StorageModification::Delete => { + StorageModification::Delete { action } => { storage - .batch_delete_subspace_val(batch, key) + .batch_delete_subspace_val(batch, key, action.clone()) .map_err(Error::StorageError)?; } StorageModification::InitAccount { vp_code_hash } => { storage - .batch_write_subspace_val(batch, key, *vp_code_hash) + .batch_write_subspace_val( + batch, + key, + *vp_code_hash, + WriteOpts::ALL, + ) .map_err(Error::StorageError)?; } // temporary value isn't persisted @@ -525,6 +574,7 @@ impl WriteLog { } } + // Replay protections specifically for (hash, entry) in self.replay_protection.iter() { match entry { ReProtStorageModification::Write => storage @@ -763,7 +813,7 @@ mod tests { // read the value let (value, gas) = write_log.read(&key); match value.expect("no read value") { - StorageModification::Write { value } => { + StorageModification::Write { value, action: _ } => { assert_eq!(*value, inserted) } _ => panic!("unexpected read result"), @@ -798,7 +848,7 @@ mod tests { // read the deleted key let (value, gas) = write_log.read(&key); match &value.expect("no read value") { - StorageModification::Delete => {} + StorageModification::Delete { .. } => {} _ => panic!("unexpected result"), } assert_eq!(gas, key.len() as u64 * MEMORY_ACCESS_GAS_PER_BYTE); @@ -1139,15 +1189,42 @@ pub mod testing { collection::btree_set(arb_address(), 0..10) } + fn arb_write_actions() -> impl Strategy { + prop_oneof![ + Just(WriteOpts::ALL), + Just(WriteOpts::WRITE_DIFFS), + Just(WriteOpts::NONE), + ] + } + + fn arb_write_data() -> impl Strategy, WriteOpts)> { + let arb_bytes = any::>(); + arb_bytes.prop_flat_map(move |bytes| { + let arb_action = arb_write_actions(); + (Just(bytes), arb_action) + }) + } + /// Generate an arbitrary [`StorageModification`]. + /// + /// NOTE / TODO: not using the arb action here and defaulting to + /// `WriteActions::All`. Proper use of the arb action requires extra logic + /// to ensure that a `Delete` with a particular action only operates on a + /// key that was written with the same action. pub fn arb_storage_modification( can_init_account: bool, ) -> impl Strategy { if can_init_account { prop_oneof![ - any::>() - .prop_map(|value| StorageModification::Write { value }), - Just(StorageModification::Delete), + arb_write_data().prop_map(|(value, _action)| { + StorageModification::Write { + value, + action: WriteOpts::ALL, + } + }), + Just(StorageModification::Delete { + action: WriteOpts::ALL + }), any::<[u8; HASH_LENGTH]>().prop_map(|hash| { StorageModification::InitAccount { vp_code_hash: Hash(hash), @@ -1159,9 +1236,15 @@ pub mod testing { .boxed() } else { prop_oneof![ - any::>() - .prop_map(|value| StorageModification::Write { value }), - Just(StorageModification::Delete), + arb_write_data().prop_map(|(value, _action)| { + StorageModification::Write { + value, + action: WriteOpts::ALL, + } + }), + Just(StorageModification::Delete { + action: WriteOpts::ALL + }), any::>() .prop_map(|value| StorageModification::Temp { value }), ] diff --git a/crates/storage/Cargo.toml b/crates/storage/Cargo.toml index 17ee879e6b..4d1010aad1 100644 --- a/crates/storage/Cargo.toml +++ b/crates/storage/Cargo.toml @@ -24,6 +24,7 @@ namada_gas = { path = "../gas" } namada_merkle_tree = { path = "../merkle_tree" } namada_tx = { path = "../tx" } +bitflags.workspace = true borsh.workspace = true itertools.workspace = true thiserror.workspace = true diff --git a/crates/storage/src/db.rs b/crates/storage/src/db.rs index a7bbf61dba..d812bdafe1 100644 --- a/crates/storage/src/db.rs +++ b/crates/storage/src/db.rs @@ -16,6 +16,7 @@ use namada_merkle_tree::{ use thiserror::Error; use crate::tx_queue::TxQueue; +use crate::WriteOpts; #[allow(missing_docs)] #[derive(Error, Debug)] @@ -173,6 +174,15 @@ pub trait DB: Debug { last_height: BlockHeight, ) -> Result>>; + /// Read the value for the account diffs at the corresponding height from + /// the DB + fn read_diffs_val( + &self, + key: &Key, + height: BlockHeight, + is_old: bool, + ) -> Result>>; + /// Write the value with the given height and account subspace key to the /// DB. Returns the size difference from previous value, if any, or the /// size of the value otherwise. @@ -181,6 +191,7 @@ pub trait DB: Debug { height: BlockHeight, key: &Key, value: impl AsRef<[u8]>, + action: WriteOpts, ) -> Result; /// Delete the value with the given height and account subspace key from the @@ -190,6 +201,7 @@ pub trait DB: Debug { &mut self, height: BlockHeight, key: &Key, + action: WriteOpts, ) -> Result; /// Start write batch. @@ -207,6 +219,7 @@ pub trait DB: Debug { height: BlockHeight, key: &Key, value: impl AsRef<[u8]>, + action: WriteOpts, ) -> Result; /// Batch delete the value with the given height and account subspace key @@ -217,6 +230,7 @@ pub trait DB: Debug { batch: &mut Self::WriteBatch, height: BlockHeight, key: &Key, + action: WriteOpts, ) -> Result; /// Prune Merkle tree stores at the given epoch diff --git a/crates/storage/src/lib.rs b/crates/storage/src/lib.rs index c2d83b6bac..674da8c044 100644 --- a/crates/storage/src/lib.rs +++ b/crates/storage/src/lib.rs @@ -8,6 +8,7 @@ pub mod mockdb; pub mod tx_queue; pub mod types; +use bitflags::bitflags; pub use db::{Error as DbError, Result as DbResult, *}; pub use error::{CustomError, Error, OptionExt, Result, ResultExt}; use namada_core::borsh::{BorshDeserialize, BorshSerialize, BorshSerializeExt}; @@ -17,6 +18,22 @@ use namada_core::types::storage::{ self, BlockHash, BlockHeight, Epoch, Epochs, Header, TxIndex, }; +bitflags! { + /// Write options to help commit_block determine what parts of the storage to + /// update + #[derive(Clone, Debug, PartialEq, Eq)] + pub struct WriteOpts: u8 { + /// Modify the merkle tree + const MERKLIZE = 0b01; + /// Write value to the diffs + const WRITE_DIFFS = 0b10; + /// Neither update the merkle tree nor write to the diffs + const NONE = Self::MERKLIZE.bits() & Self::WRITE_DIFFS.bits(); + /// Both modify the merkle tree and write to diffs + const ALL = Self::MERKLIZE.bits() | Self::WRITE_DIFFS.bits(); + } +} + /// Common storage read interface /// /// If you're using this trait and having compiler complaining about needing an @@ -134,6 +151,7 @@ pub trait StorageRead { /// Common storage write interface pub trait StorageWrite { /// Write a value to be encoded with Borsh at the given key to storage. + /// Additionally, write the data to the diffs and add it to the merkle tree. fn write( &mut self, key: &storage::Key, @@ -143,15 +161,74 @@ pub trait StorageWrite { self.write_bytes(key, bytes) } + /// Write a value to be encoded with Borsh at the given key to storage. + fn write_with_opts( + &mut self, + key: &storage::Key, + val: T, + action: WriteOpts, + ) -> Result<()> { + let bytes = val.serialize_to_vec(); + self.write_bytes_with_opts(key, bytes, action) + } + + /// Write a value to be encoded with Borsh at the given key to storage. + /// Additionally, write the data to the diffs. Do not add to the merkle + /// tree. + fn write_without_merkle( + &mut self, + key: &storage::Key, + val: T, + ) -> Result<()> { + self.write_with_opts(key, val, WriteOpts::WRITE_DIFFS) + } + + /// Write a value to be encoded with Borsh at the given key to storage. + /// Do not update the diffs or merkle tree. + fn write_without_merkle_diffs( + &mut self, + key: &storage::Key, + val: T, + ) -> Result<()> { + self.write_with_opts(key, val, WriteOpts::NONE) + } + /// Write a value as bytes at the given key to storage. fn write_bytes( &mut self, key: &storage::Key, val: impl AsRef<[u8]>, + ) -> Result<()> { + self.write_bytes_with_opts(key, val, WriteOpts::ALL) + } + + /// Write a value as bytes at the given key to storage, with provided + /// options. + fn write_bytes_with_opts( + &mut self, + key: &storage::Key, + val: impl AsRef<[u8]>, + action: WriteOpts, ) -> Result<()>; - /// Delete a value at the given key from storage. - fn delete(&mut self, key: &storage::Key) -> Result<()>; + /// Delete a value at the given key from storage + fn delete_with_opts( + &mut self, + key: &storage::Key, + action: WriteOpts, + ) -> Result<()>; + + /// Delete a value at the given key from storage, including from the diffs + /// storage. + fn delete(&mut self, key: &storage::Key) -> Result<()> { + self.delete_with_opts(key, WriteOpts::ALL) + } + + /// Delete a value at the given key from storage, excluding the diffs + /// storage. + fn delete_without_diffs(&mut self, key: &storage::Key) -> Result<()> { + self.delete_with_opts(key, WriteOpts::NONE) + } /// Delete all key-vals with a matching prefix. fn delete_prefix(&mut self, prefix: &storage::Key) -> Result<()> @@ -400,20 +477,25 @@ pub mod testing { } impl StorageWrite for TestStorage { - fn write_bytes( + fn write_bytes_with_opts( &mut self, key: &storage::Key, val: impl AsRef<[u8]>, + action: WriteOpts, ) -> Result<()> { self.db - .write_subspace_val(self.height, key, val) + .write_subspace_val(self.height, key, val, action) .into_storage_result()?; Ok(()) } - fn delete(&mut self, key: &storage::Key) -> Result<()> { + fn delete_with_opts( + &mut self, + key: &storage::Key, + action: WriteOpts, + ) -> Result<()> { self.db - .delete_subspace_val(self.height, key) + .delete_subspace_val(self.height, key, action) .into_storage_result()?; Ok(()) } diff --git a/crates/storage/src/mockdb.rs b/crates/storage/src/mockdb.rs index ad643ca341..2ae89496bf 100644 --- a/crates/storage/src/mockdb.rs +++ b/crates/storage/src/mockdb.rs @@ -27,6 +27,12 @@ use crate::db::{ }; use crate::tx_queue::TxQueue; use crate::types::{KVBytes, PrefixIterator}; +use crate::WriteOpts; + +const SUBSPACE_CF: &str = "subspace"; + +const OLD_DIFF_PREFIX: &str = "old"; +const NEW_DIFF_PREFIX: &str = "new"; /// An in-memory DB for testing. #[derive(Debug, Default)] @@ -459,8 +465,28 @@ impl DB for MockDB { Ok(false) } + fn read_diffs_val( + &self, + key: &Key, + height: BlockHeight, + is_old: bool, + ) -> Result>> { + let old_new_seg = if is_old { + OLD_DIFF_PREFIX + } else { + NEW_DIFF_PREFIX + }; + + let prefix = Key::from(height.to_db_key()) + .push(&old_new_seg.to_string().to_db_key()) + .map_err(Error::KeyError)? + .join(key); + + Ok(self.0.borrow().get(&prefix.to_string()).cloned()) + } + fn read_subspace_val(&self, key: &Key) -> Result>> { - let key = Key::parse("subspace").map_err(Error::KeyError)?.join(key); + let key = Key::parse(SUBSPACE_CF).map_err(Error::KeyError)?.join(key); Ok(self.0.borrow().get(&key.to_string()).cloned()) } @@ -482,18 +508,31 @@ impl DB for MockDB { height: BlockHeight, key: &Key, value: impl AsRef<[u8]>, + action: WriteOpts, ) -> Result { // batch_write are directly committed - self.batch_write_subspace_val(&mut MockDBWriteBatch, height, key, value) + self.batch_write_subspace_val( + &mut MockDBWriteBatch, + height, + key, + value, + action, + ) } fn delete_subspace_val( &mut self, height: BlockHeight, key: &Key, + action: WriteOpts, ) -> Result { // batch_delete are directly committed - self.batch_delete_subspace_val(&mut MockDBWriteBatch, height, key) + self.batch_delete_subspace_val( + &mut MockDBWriteBatch, + height, + key, + action, + ) } fn batch() -> Self::WriteBatch { @@ -512,6 +551,7 @@ impl DB for MockDB { height: BlockHeight, key: &Key, value: impl AsRef<[u8]>, + action: WriteOpts, ) -> Result { let value = value.as_ref(); let subspace_key = @@ -519,16 +559,20 @@ impl DB for MockDB { let current_len = value.len() as i64; let diff_prefix = Key::from(height.to_db_key()); let mut db = self.0.borrow_mut(); - Ok( + + let persist_diffs = action.contains(WriteOpts::WRITE_DIFFS); + + // Diffs + let size_diff = match db.insert(subspace_key.to_string(), value.to_owned()) { Some(prev_value) => { let old_key = diff_prefix - .push(&"old".to_string().to_db_key()) + .push(&OLD_DIFF_PREFIX.to_string().to_db_key()) .unwrap() .join(key); db.insert(old_key.to_string(), prev_value.clone()); let new_key = diff_prefix - .push(&"new".to_string().to_db_key()) + .push(&NEW_DIFF_PREFIX.to_string().to_db_key()) .unwrap() .join(key); db.insert(new_key.to_string(), value.to_owned()); @@ -536,14 +580,33 @@ impl DB for MockDB { } None => { let new_key = diff_prefix - .push(&"new".to_string().to_db_key()) + .push(&NEW_DIFF_PREFIX.to_string().to_db_key()) .unwrap() .join(key); db.insert(new_key.to_string(), value.to_owned()); current_len } - }, - ) + }; + + if !persist_diffs { + if let Some(pruned_height) = height.0.checked_sub(1) { + let pruned_key_prefix = Key::from(pruned_height.to_db_key()); + let old_val_key = pruned_key_prefix + .push(&NEW_DIFF_PREFIX.to_string().to_db_key()) + .unwrap() + .join(key) + .to_string(); + db.remove(&old_val_key); + let new_val_key = pruned_key_prefix + .push(&NEW_DIFF_PREFIX.to_string().to_db_key()) + .unwrap() + .join(key) + .to_string(); + db.remove(&new_val_key); + } + } + + Ok(size_diff) } fn batch_delete_subspace_val( @@ -551,22 +614,47 @@ impl DB for MockDB { _batch: &mut Self::WriteBatch, height: BlockHeight, key: &Key, + action: WriteOpts, ) -> Result { let subspace_key = - Key::parse("subspace").map_err(Error::KeyError)?.join(key); + Key::parse(SUBSPACE_CF).map_err(Error::KeyError)?.join(key); let diff_prefix = Key::from(height.to_db_key()); let mut db = self.0.borrow_mut(); - Ok(match db.remove(&subspace_key.to_string()) { + + let persist_diffs = action.contains(WriteOpts::WRITE_DIFFS); + + let size_diff = match db.remove(&subspace_key.to_string()) { Some(value) => { let old_key = diff_prefix - .push(&"old".to_string().to_db_key()) + .push(&OLD_DIFF_PREFIX.to_string().to_db_key()) .unwrap() .join(key); db.insert(old_key.to_string(), value.clone()); + + if !persist_diffs { + if let Some(pruned_height) = height.0.checked_sub(1) { + let pruned_key_prefix = + Key::from(pruned_height.to_db_key()); + let old_val_key = pruned_key_prefix + .push(&NEW_DIFF_PREFIX.to_string().to_db_key()) + .unwrap() + .join(key) + .to_string(); + db.remove(&old_val_key); + let new_val_key = pruned_key_prefix + .push(&NEW_DIFF_PREFIX.to_string().to_db_key()) + .unwrap() + .join(key) + .to_string(); + db.remove(&new_val_key); + } + } value.len() as i64 } None => 0, - }) + }; + + Ok(size_diff) } fn prune_merkle_tree_store( diff --git a/crates/tests/src/vm_host_env/tx.rs b/crates/tests/src/vm_host_env/tx.rs index fcb6b8bcef..e672d89e29 100644 --- a/crates/tests/src/vm_host_env/tx.rs +++ b/crates/tests/src/vm_host_env/tx.rs @@ -471,7 +471,8 @@ mod native_tx_host_env { key_ptr: u64, key_len: u64, val_ptr: u64, - val_len: u64 + val_len: u64, + write_opts: u8 )); native_host_fn!(tx_write_temp( key_ptr: u64, @@ -479,7 +480,7 @@ mod native_tx_host_env { val_ptr: u64, val_len: u64 )); - native_host_fn!(tx_delete(key_ptr: u64, key_len: u64)); + native_host_fn!(tx_delete(key_ptr: u64, key_len: u64, write_opts: u8)); native_host_fn!(tx_iter_prefix(prefix_ptr: u64, prefix_len: u64) -> u64); native_host_fn!(tx_iter_next(iter_id: u64) -> i64); native_host_fn!(tx_insert_verifier(addr_ptr: u64, addr_len: u64)); diff --git a/crates/tx_prelude/src/lib.rs b/crates/tx_prelude/src/lib.rs index 4d04314a53..e6b91890b6 100644 --- a/crates/tx_prelude/src/lib.rs +++ b/crates/tx_prelude/src/lib.rs @@ -37,7 +37,7 @@ pub use namada_macros::transaction; pub use namada_parameters::storage as parameters_storage; pub use namada_storage::{ collections, iter_prefix, iter_prefix_bytes, Error, OptionExt, ResultExt, - StorageRead, StorageWrite, + StorageRead, StorageWrite, WriteOpts, }; pub use namada_tx::{data as transaction, Section, Tx}; pub use namada_tx_env::TxEnv; @@ -235,10 +235,11 @@ impl StorageRead for Ctx { } impl StorageWrite for Ctx { - fn write_bytes( + fn write_bytes_with_opts( &mut self, key: &storage::Key, val: impl AsRef<[u8]>, + action: WriteOpts, ) -> namada_storage::Result<()> { let key = key.to_string(); unsafe { @@ -247,14 +248,25 @@ impl StorageWrite for Ctx { key.len() as _, val.as_ref().as_ptr() as _, val.as_ref().len() as _, + action.bits() as _, ) }; Ok(()) } - fn delete(&mut self, key: &storage::Key) -> namada_storage::Result<()> { + fn delete_with_opts( + &mut self, + key: &storage::Key, + action: WriteOpts, + ) -> namada_storage::Result<()> { let key = key.to_string(); - unsafe { namada_tx_delete(key.as_ptr() as _, key.len() as _) }; + unsafe { + namada_tx_delete( + key.as_ptr() as _, + key.len() as _, + action.bits() as _, + ) + }; Ok(()) } } diff --git a/crates/vm_env/src/lib.rs b/crates/vm_env/src/lib.rs index 63b5b01bc7..fca034dc03 100644 --- a/crates/vm_env/src/lib.rs +++ b/crates/vm_env/src/lib.rs @@ -34,6 +34,7 @@ pub mod tx { key_len: u64, val_ptr: u64, val_len: u64, + write_opts: u8, ); // Write a temporary key/value @@ -45,7 +46,7 @@ pub mod tx { ); // Delete the given key and its value - pub fn namada_tx_delete(key_ptr: u64, key_len: u64); + pub fn namada_tx_delete(key_ptr: u64, key_len: u64, write_opts: u8); // Get an ID of a data iterator with key prefix, ordered by storage // keys. diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index d233c0c8ec..2beec6e8a7 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -393,9 +393,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" [[package]] name = "bitvec" @@ -3058,7 +3058,7 @@ version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "libc", "redox_syscall", ] @@ -3673,6 +3673,7 @@ dependencies = [ name = "namada_storage" version = "0.30.1" dependencies = [ + "bitflags 2.4.2", "borsh", "itertools 0.10.5", "namada_core", @@ -4516,7 +4517,7 @@ checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.1", + "bitflags 2.4.2", "lazy_static", "num-traits", "rand 0.8.5", @@ -5064,7 +5065,7 @@ version = "0.38.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9470c4bf8246c8daf25f9598dca807fb6510347b1e1cfa55749113850c79d88a" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "errno", "libc", "linux-raw-sys", diff --git a/wasm/wasm_source/src/tx_bridge_pool.rs b/wasm/wasm_source/src/tx_bridge_pool.rs index f4826b3d41..47f401f7d9 100644 --- a/wasm/wasm_source/src/tx_bridge_pool.rs +++ b/wasm/wasm_source/src/tx_bridge_pool.rs @@ -60,7 +60,7 @@ fn apply_tx(ctx: &mut Ctx, signed: Tx) -> TxResult { log_string("Escrow succeeded"); // add transfer into the pool let pending_key = get_pending_key(&transfer); - ctx.write_bytes(&pending_key, transfer.serialize_to_vec()) + ctx.write(&pending_key, transfer) .wrap_err("Could not write transfer to bridge pool")?; Ok(()) } diff --git a/wasm_for_tests/tx_fail.wasm b/wasm_for_tests/tx_fail.wasm index d0d6e8b2d4..a724bf5ff1 100755 Binary files a/wasm_for_tests/tx_fail.wasm and b/wasm_for_tests/tx_fail.wasm differ diff --git a/wasm_for_tests/tx_memory_limit.wasm b/wasm_for_tests/tx_memory_limit.wasm index fa0c6ce860..0e0f6e3b92 100755 Binary files a/wasm_for_tests/tx_memory_limit.wasm and b/wasm_for_tests/tx_memory_limit.wasm differ diff --git a/wasm_for_tests/tx_mint_tokens.wasm b/wasm_for_tests/tx_mint_tokens.wasm index 761e1a14e9..e79d206dfd 100755 Binary files a/wasm_for_tests/tx_mint_tokens.wasm and b/wasm_for_tests/tx_mint_tokens.wasm differ diff --git a/wasm_for_tests/tx_no_op.wasm b/wasm_for_tests/tx_no_op.wasm index 7be3c565a7..2028e6d1b1 100755 Binary files a/wasm_for_tests/tx_no_op.wasm and b/wasm_for_tests/tx_no_op.wasm differ diff --git a/wasm_for_tests/tx_proposal_code.wasm b/wasm_for_tests/tx_proposal_code.wasm index 470473d963..067f6174c7 100755 Binary files a/wasm_for_tests/tx_proposal_code.wasm and b/wasm_for_tests/tx_proposal_code.wasm differ diff --git a/wasm_for_tests/tx_read_storage_key.wasm b/wasm_for_tests/tx_read_storage_key.wasm index 36c0a6b7e0..3f9ece4a36 100755 Binary files a/wasm_for_tests/tx_read_storage_key.wasm and b/wasm_for_tests/tx_read_storage_key.wasm differ diff --git a/wasm_for_tests/tx_write.wasm b/wasm_for_tests/tx_write.wasm index 78aa1a6511..e9d2bc6cdd 100755 Binary files a/wasm_for_tests/tx_write.wasm and b/wasm_for_tests/tx_write.wasm differ diff --git a/wasm_for_tests/tx_write_storage_key.wasm b/wasm_for_tests/tx_write_storage_key.wasm index 2f6e3946b4..902d297af7 100755 Binary files a/wasm_for_tests/tx_write_storage_key.wasm and b/wasm_for_tests/tx_write_storage_key.wasm differ diff --git a/wasm_for_tests/vp_always_false.wasm b/wasm_for_tests/vp_always_false.wasm index 5fa26fb17d..15329ff4bd 100755 Binary files a/wasm_for_tests/vp_always_false.wasm and b/wasm_for_tests/vp_always_false.wasm differ diff --git a/wasm_for_tests/vp_always_true.wasm b/wasm_for_tests/vp_always_true.wasm index d1a969e99a..5945fb3d93 100755 Binary files a/wasm_for_tests/vp_always_true.wasm and b/wasm_for_tests/vp_always_true.wasm differ diff --git a/wasm_for_tests/vp_eval.wasm b/wasm_for_tests/vp_eval.wasm index 3e18241622..60d81f0ff3 100755 Binary files a/wasm_for_tests/vp_eval.wasm and b/wasm_for_tests/vp_eval.wasm differ diff --git a/wasm_for_tests/vp_memory_limit.wasm b/wasm_for_tests/vp_memory_limit.wasm index 551b40e67f..b70c75ca95 100755 Binary files a/wasm_for_tests/vp_memory_limit.wasm and b/wasm_for_tests/vp_memory_limit.wasm differ diff --git a/wasm_for_tests/vp_read_storage_key.wasm b/wasm_for_tests/vp_read_storage_key.wasm index a2992674d7..310324c248 100755 Binary files a/wasm_for_tests/vp_read_storage_key.wasm and b/wasm_for_tests/vp_read_storage_key.wasm differ diff --git a/wasm_for_tests/wasm_source/Cargo.lock b/wasm_for_tests/wasm_source/Cargo.lock index 3c37044705..4b795d2e1c 100644 --- a/wasm_for_tests/wasm_source/Cargo.lock +++ b/wasm_for_tests/wasm_source/Cargo.lock @@ -393,9 +393,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" [[package]] name = "bitvec" @@ -3058,7 +3058,7 @@ version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "libc", "redox_syscall", ] @@ -3673,6 +3673,7 @@ dependencies = [ name = "namada_storage" version = "0.30.1" dependencies = [ + "bitflags 2.4.2", "borsh", "itertools 0.10.5", "namada_core", @@ -4510,7 +4511,7 @@ checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.1", + "bitflags 2.4.2", "lazy_static", "num-traits", "rand 0.8.5", @@ -5058,7 +5059,7 @@ version = "0.38.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9470c4bf8246c8daf25f9598dca807fb6510347b1e1cfa55749113850c79d88a" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "errno", "libc", "linux-raw-sys",