From e9f5a4607763dfe0ded5696159eb438d8d13d0e2 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Sun, 2 Jun 2024 19:51:41 +0200 Subject: [PATCH 01/40] Vectorizes transparent transfers --- crates/apps_lib/src/cli.rs | 23 ++- crates/apps_lib/src/client/tx.rs | 13 +- crates/benches/host_env.rs | 6 +- crates/benches/native_vps.rs | 6 +- crates/benches/process_wrapper.rs | 8 +- crates/light_sdk/src/transaction/transfer.rs | 2 +- crates/sdk/src/args.rs | 19 +- crates/sdk/src/lib.rs | 19 +- crates/sdk/src/signing.rs | 173 ++++++++++++++----- crates/sdk/src/tx.rs | 119 ++++++++----- crates/tests/src/integration/ledger_tests.rs | 21 +-- crates/token/src/lib.rs | 33 +++- wasm/tx_transparent_transfer/src/lib.rs | 24 +-- 13 files changed, 317 insertions(+), 149 deletions(-) diff --git a/crates/apps_lib/src/cli.rs b/crates/apps_lib/src/cli.rs index eadf78063a..2e59687eb1 100644 --- a/crates/apps_lib/src/cli.rs +++ b/crates/apps_lib/src/cli.rs @@ -4338,14 +4338,21 @@ pub mod args { ctx: &mut Context, ) -> Result, Self::Error> { let tx = self.tx.to_sdk(ctx)?; + let mut data = vec![]; let chain_ctx = ctx.borrow_mut_chain_or_exit(); + for transfer_data in self.data { + data.push(TxTransparentTransferData { + source: chain_ctx.get(&transfer_data.source), + target: chain_ctx.get(&transfer_data.target), + token: chain_ctx.get(&transfer_data.token), + amount: transfer_data.amount, + }); + } + Ok(TxTransparentTransfer:: { tx, - source: chain_ctx.get(&self.source), - target: chain_ctx.get(&self.target), - token: chain_ctx.get(&self.token), - amount: self.amount, + data, tx_code_path: self.tx_code_path.to_path_buf(), }) } @@ -4359,12 +4366,16 @@ pub mod args { let token = TOKEN.parse(matches); let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); let tx_code_path = PathBuf::from(TX_TRANSPARENT_TRANSFER_WASM); - Self { - tx, + + let data = vec![TxTransparentTransferData { source, target, token, amount, + }]; + Self { + tx, + data, tx_code_path, } } diff --git a/crates/apps_lib/src/client/tx.rs b/crates/apps_lib/src/client/tx.rs index 80e6e52e09..944e204279 100644 --- a/crates/apps_lib/src/client/tx.rs +++ b/crates/apps_lib/src/client/tx.rs @@ -743,7 +743,18 @@ pub async fn submit_transparent_transfer( namada: &impl Namada, args: args::TxTransparentTransfer, ) -> Result<(), error::Error> { - submit_reveal_aux(namada, args.tx.clone(), &args.source).await?; + submit_reveal_aux( + namada, + args.tx.clone(), + &args + .data + .first() + .ok_or_else(|| { + error::Error::Other("Missing transfer data".to_string()) + })? + .source, + ) + .await?; let (mut tx, signing_data) = args.clone().build(namada).await?; diff --git a/crates/benches/host_env.rs b/crates/benches/host_env.rs index 1eee561163..48e521cfd3 100644 --- a/crates/benches/host_env.rs +++ b/crates/benches/host_env.rs @@ -3,7 +3,7 @@ use namada::core::account::AccountPublicKeysMap; use namada::core::address; use namada::core::collections::{HashMap, HashSet}; use namada::ledger::storage::DB; -use namada::token::{Amount, TransparentTransfer}; +use namada::token::{Amount, TransparentTransfer, TransparentTransferData}; use namada::tx::Authorization; use namada::vm::wasm::TxCache; use namada_apps_lib::wallet::defaults; @@ -18,12 +18,12 @@ use namada_node::bench_utils::{ // transaction fn tx_section_signature_validation(c: &mut Criterion) { let shell = BenchShell::default(); - let transfer_data = TransparentTransfer { + let transfer_data = TransparentTransfer(vec![TransparentTransferData { source: defaults::albert_address(), target: defaults::bertha_address(), token: address::testing::nam(), amount: Amount::native_whole(500).native_denominated(), - }; + }]); let tx = shell.generate_tx( TX_TRANSPARENT_TRANSFER_WASM, transfer_data, diff --git a/crates/benches/native_vps.rs b/crates/benches/native_vps.rs index 298c37fcf5..aba3cb9802 100644 --- a/crates/benches/native_vps.rs +++ b/crates/benches/native_vps.rs @@ -55,7 +55,7 @@ use namada::sdk::masp_primitives::merkle_tree::CommitmentTree; use namada::sdk::masp_primitives::transaction::Transaction; use namada::sdk::masp_proofs::sapling::SaplingVerificationContextInner; use namada::state::{Epoch, StorageRead, StorageWrite, TxIndex}; -use namada::token::{Amount, TransparentTransfer}; +use namada::token::{Amount, TransparentTransfer, TransparentTransferData}; use namada::tx::{BatchedTx, Code, Section, Tx}; use namada_apps_lib::wallet::defaults; use namada_node::bench_utils::{ @@ -476,12 +476,12 @@ fn vp_multitoken(c: &mut Criterion) { let transfer = shell.generate_tx( TX_TRANSPARENT_TRANSFER_WASM, - TransparentTransfer { + TransparentTransfer(vec![TransparentTransferData { source: defaults::albert_address(), target: defaults::bertha_address(), token: address::testing::nam(), amount: Amount::native_whole(1000).native_denominated(), - }, + }]), None, None, vec![&defaults::albert_keypair()], diff --git a/crates/benches/process_wrapper.rs b/crates/benches/process_wrapper.rs index 9510ee3125..65edb93467 100644 --- a/crates/benches/process_wrapper.rs +++ b/crates/benches/process_wrapper.rs @@ -3,7 +3,9 @@ use namada::core::address; use namada::core::key::RefTo; use namada::core::storage::BlockHeight; use namada::core::time::DateTimeUtc; -use namada::token::{Amount, DenominatedAmount, TransparentTransfer}; +use namada::token::{ + Amount, DenominatedAmount, TransparentTransfer, TransparentTransferData, +}; use namada::tx::data::{Fee, WrapperTx}; use namada::tx::Authorization; use namada_apps_lib::wallet::defaults; @@ -19,12 +21,12 @@ fn process_tx(c: &mut Criterion) { let mut batched_tx = shell.generate_tx( TX_TRANSPARENT_TRANSFER_WASM, - TransparentTransfer { + TransparentTransfer(vec![TransparentTransferData { source: defaults::albert_address(), target: defaults::bertha_address(), token: address::testing::nam(), amount: Amount::native_whole(1).native_denominated(), - }, + }]), None, None, vec![&defaults::albert_keypair()], diff --git a/crates/light_sdk/src/transaction/transfer.rs b/crates/light_sdk/src/transaction/transfer.rs index 84475660ea..29846ab7e0 100644 --- a/crates/light_sdk/src/transaction/transfer.rs +++ b/crates/light_sdk/src/transaction/transfer.rs @@ -25,7 +25,7 @@ impl Transfer { amount: DenominatedAmount, args: GlobalArgs, ) -> Self { - let data = namada_sdk::token::TransparentTransfer { + let data = namada_sdk::token::TransparentTransferData { source, target, token, diff --git a/crates/sdk/src/args.rs b/crates/sdk/src/args.rs index 8db6155e6f..6847bf0694 100644 --- a/crates/sdk/src/args.rs +++ b/crates/sdk/src/args.rs @@ -229,11 +229,9 @@ impl From for InputAmount { } } -/// Transparent transfer transaction arguments +/// Transparent transfer-specific arguments #[derive(Clone, Debug)] -pub struct TxTransparentTransfer { - /// Common tx arguments - pub tx: Tx, +pub struct TxTransparentTransferData { /// Transfer source address pub source: C::Address, /// Transfer target address @@ -242,6 +240,15 @@ pub struct TxTransparentTransfer { pub token: C::Address, /// Transferred token amount pub amount: InputAmount, +} + +/// Transparent transfer transaction arguments +#[derive(Clone, Debug)] +pub struct TxTransparentTransfer { + /// Common tx arguments + pub tx: Tx, + /// The transfer specific data + pub data: Vec>, /// Path to the TX WASM code file pub tx_code_path: PathBuf, } @@ -258,7 +265,7 @@ impl TxBuilder for TxTransparentTransfer { } } -impl TxTransparentTransfer { +impl TxTransparentTransferData { /// Transfer source address pub fn source(self, source: C::Address) -> Self { Self { source, ..self } @@ -278,7 +285,9 @@ impl TxTransparentTransfer { pub fn amount(self, amount: InputAmount) -> Self { Self { amount, ..self } } +} +impl TxTransparentTransfer { /// Path to the TX WASM code file pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { Self { diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs index 7832b23d33..f08c87f2e3 100644 --- a/crates/sdk/src/lib.rs +++ b/crates/sdk/src/lib.rs @@ -175,16 +175,10 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { /// arguments fn new_transparent_transfer( &self, - source: Address, - target: Address, - token: Address, - amount: InputAmount, + data: Vec, ) -> args::TxTransparentTransfer { args::TxTransparentTransfer { - source, - target, - token, - amount, + data, tx_code_path: PathBuf::from(TX_TRANSPARENT_TRANSFER_WASM), tx: self.tx_builder(), } @@ -872,9 +866,7 @@ pub mod testing { }; use namada_governance::{InitProposalData, VoteProposalData}; use namada_ibc::testing::arb_ibc_any; - use namada_token::testing::{ - arb_denominated_amount, arb_transparent_transfer, - }; + use namada_token::testing::arb_denominated_amount; use namada_token::{ ShieldedTransfer, ShieldingTransfer, TransparentTransfer, UnshieldingTransfer, @@ -890,6 +882,9 @@ pub mod testing { use prost::Message; use ripemd::Digest as RipemdDigest; use sha2::Digest; + use token::testing::{ + arb_transparent_transfer, arb_vectorized_transparent_transfer, + }; use super::*; use crate::account::tests::{arb_init_account, arb_update_account}; @@ -1093,7 +1088,7 @@ pub mod testing { pub fn arb_transparent_transfer_tx()( mut header in arb_header(), wrapper in arb_wrapper_tx(), - transfer in arb_transparent_transfer(), + transfer in arb_vectorized_transparent_transfer(10), code_hash in arb_hash(), ) -> (Tx, TxData) { header.tx_type = TxType::Wrapper(Box::new(wrapper)); diff --git a/crates/sdk/src/signing.rs b/crates/sdk/src/signing.rs index a6e1ef5f44..1f652ff544 100644 --- a/crates/sdk/src/signing.rs +++ b/crates/sdk/src/signing.rs @@ -702,6 +702,11 @@ fn format_outputs(output: &mut Vec) { } } +enum TransferSide<'a> { + Source(&'a Address), + Target(&'a Address), +} + enum TokenTransfer<'a> { Transparent(&'a token::TransparentTransfer), Shielded, @@ -710,53 +715,128 @@ enum TokenTransfer<'a> { } impl TokenTransfer<'_> { - fn source(&self) -> Option<&Address> { + fn sources(&self) -> Vec<&Address> { match self { - TokenTransfer::Transparent(transfer) => Some(&transfer.source), - TokenTransfer::Shielded => None, - TokenTransfer::Shielding(transfer) => Some(&transfer.source), - TokenTransfer::Unshielding(_) => None, + TokenTransfer::Transparent(transfers) => transfers + .0 + .iter() + .map(|transfer| &transfer.source) + .collect(), + + TokenTransfer::Shielded => Default::default(), + TokenTransfer::Shielding(transfer) => vec![&transfer.source], + TokenTransfer::Unshielding(_) => Default::default(), } } - fn target(&self) -> Option<&Address> { + fn targets(&self) -> Vec<&Address> { match self { - TokenTransfer::Transparent(transfer) => Some(&transfer.target), - TokenTransfer::Shielded => None, - TokenTransfer::Shielding(_) => None, - TokenTransfer::Unshielding(transfer) => Some(&transfer.target), + TokenTransfer::Transparent(transfers) => transfers + .0 + .iter() + .map(|transfer| &transfer.target) + .collect(), + + TokenTransfer::Shielded => Default::default(), + TokenTransfer::Shielding(_) => Default::default(), + TokenTransfer::Unshielding(transfer) => { + vec![&transfer.target] + } } } - fn token_and_amount(&self) -> Option<(&Address, DenominatedAmount)> { - match self { - TokenTransfer::Transparent(transfer) => { - Some((&transfer.token, transfer.amount)) + fn tokens_and_amounts( + &self, + address: TransferSide<'_>, + ) -> Result, Error> { + Ok(match self { + TokenTransfer::Transparent(transfers) => { + let mut map: HashMap<&Address, DenominatedAmount> = + HashMap::new(); + + for transfer in &transfers.0 { + match address { + TransferSide::Source(source) + if source == &transfer.source => + { + match map.get_mut(&transfer.token) { + Some(amount) => { + *amount = amount + .checked_add(transfer.amount) + .ok_or_else(|| { + Error::Other( + "Overflow in amount" + .to_string(), + ) + })?; + } + None => { + map.insert( + &transfer.token, + transfer.amount, + ); + } + } + } + TransferSide::Target(target) + if target == &transfer.target => + { + match map.get_mut(&transfer.token) { + Some(amount) => { + *amount = amount + .checked_add(transfer.amount) + .ok_or_else(|| { + Error::Other( + "Overflow in amount" + .to_string(), + ) + })?; + } + None => { + map.insert( + &transfer.token, + transfer.amount, + ); + } + } + } + _ => (), + } + } + + map } - TokenTransfer::Shielded => None, + TokenTransfer::Shielded => Default::default(), TokenTransfer::Shielding(transfer) => { - Some((&transfer.token, transfer.amount)) + [(&transfer.token, transfer.amount)].into_iter().collect() } TokenTransfer::Unshielding(transfer) => { - Some((&transfer.token, transfer.amount)) + [(&transfer.token, transfer.amount)].into_iter().collect() } - } + }) } } -/// Adds a Ledger output for the sender and destination for transparent and MASP -/// transactions +/// Adds a Ledger output for the senders and destinations for transparent and +/// MASP transactions async fn make_ledger_token_transfer_endpoints( tokens: &HashMap, output: &mut Vec, transfer: TokenTransfer<'_>, builder: Option<&MaspBuilder>, assets: &HashMap, -) { - if let Some(source) = transfer.source() { - output.push(format!("Sender : {}", source)); - if let Some((token, amount)) = transfer.token_and_amount() { - make_ledger_amount_addr(tokens, output, amount, token, "Sending "); +) -> Result<(), Error> { + let sources = transfer.sources(); + if !sources.is_empty() { + for source in transfer.sources() { + output.push(format!("Sender : {}", source)); + for (token, amount) in + transfer.tokens_and_amounts(TransferSide::Source(source))? + { + make_ledger_amount_addr( + tokens, output, amount, token, "Sending ", + ); + } } } else if let Some(builder) = builder { for sapling_input in builder.builder.sapling_inputs() { @@ -773,16 +853,21 @@ async fn make_ledger_token_transfer_endpoints( .await; } } - if let Some(target) = transfer.target() { - output.push(format!("Destination : {}", target)); - if let Some((token, amount)) = transfer.token_and_amount() { - make_ledger_amount_addr( - tokens, - output, - amount, - token, - "Receiving ", - ); + let targets = transfer.targets(); + if !targets.is_empty() { + for target in targets { + output.push(format!("Destination : {}", target)); + for (token, amount) in + transfer.tokens_and_amounts(TransferSide::Target(target))? + { + make_ledger_amount_addr( + tokens, + output, + amount, + token, + "Receiving ", + ); + } } } else if let Some(builder) = builder { for sapling_output in builder.builder.sapling_outputs() { @@ -799,6 +884,8 @@ async fn make_ledger_token_transfer_endpoints( .await; } } + + Ok(()) } /// Convert decimal numbers into the format used by Ledger. Specifically remove @@ -1294,7 +1381,7 @@ pub async fn to_ledger_vector( None, &HashMap::default(), ) - .await; + .await?; make_ledger_token_transfer_endpoints( &tokens, &mut tv.output_expert, @@ -1302,7 +1389,7 @@ pub async fn to_ledger_vector( None, &HashMap::default(), ) - .await; + .await?; } else if code_sec.tag == Some(TX_SHIELDED_TRANSFER_WASM.to_string()) { let transfer = token::ShieldedTransfer::try_from_slice( &tx.data(cmt) @@ -1341,7 +1428,7 @@ pub async fn to_ledger_vector( builder, &asset_types, ) - .await; + .await?; make_ledger_token_transfer_endpoints( &tokens, &mut tv.output_expert, @@ -1349,7 +1436,7 @@ pub async fn to_ledger_vector( builder, &asset_types, ) - .await; + .await?; } else if code_sec.tag == Some(TX_SHIELDING_TRANSFER_WASM.to_string()) { let transfer = token::ShieldingTransfer::try_from_slice( &tx.data(cmt) @@ -1388,7 +1475,7 @@ pub async fn to_ledger_vector( builder, &asset_types, ) - .await; + .await?; make_ledger_token_transfer_endpoints( &tokens, &mut tv.output_expert, @@ -1396,7 +1483,7 @@ pub async fn to_ledger_vector( builder, &asset_types, ) - .await; + .await?; } else if code_sec.tag == Some(TX_UNSHIELDING_TRANSFER_WASM.to_string()) { let transfer = token::UnshieldingTransfer::try_from_slice( @@ -1436,7 +1523,7 @@ pub async fn to_ledger_vector( builder, &asset_types, ) - .await; + .await?; make_ledger_token_transfer_endpoints( &tokens, &mut tv.output_expert, @@ -1444,7 +1531,7 @@ pub async fn to_ledger_vector( builder, &asset_types, ) - .await; + .await?; } else if code_sec.tag == Some(TX_IBC_WASM.to_string()) { let any_msg = Any::decode( tx.data(cmt) diff --git a/crates/sdk/src/tx.rs b/crates/sdk/src/tx.rs index b9daabeca2..17014589a6 100644 --- a/crates/sdk/src/tx.rs +++ b/crates/sdk/src/tx.rs @@ -66,6 +66,7 @@ pub use namada_tx::{Authorization, *}; use num_traits::Zero; use rand_core::{OsRng, RngCore}; +use crate::args::TxTransparentTransferData; use crate::control_flow::time; use crate::error::{EncodingError, Error, QueryError, Result, TxSubmitError}; use crate::io::Io; @@ -2831,64 +2832,86 @@ pub async fn build_transparent_transfer( context: &N, args: &mut args::TxTransparentTransfer, ) -> Result<(Tx, SigningTxData)> { - let source = &args.source; - let target = &args.target; + let mut transfers = vec![]; - let default_signer = Some(source.clone()); - let signing_data = signing::aux_signing_data( - context, - &args.tx, - Some(source.clone()), - default_signer, - ) - .await?; + // Evaluate signer and fees + let (signing_data, fee_amount, updated_balance) = { + let signing_data = signing::aux_signing_data( + context, + &args.tx, + None, + // If signing keys arg is not provided assume a single transfer and + // take the source + args.data + .first() + .map(|transfer_data| transfer_data.source.clone()), + ) + .await?; - // Transparent fee payment - let (fee_amount, updated_balance) = - validate_transparent_fee(context, &args.tx, &signing_data.fee_payer) - .await - .map(|(fee_amount, updated_balance)| { - (fee_amount, Some(updated_balance)) - })?; + // Transparent fee payment + let (fee_amount, updated_balance) = validate_transparent_fee( + context, + &args.tx, + &signing_data.fee_payer, + ) + .await + .map(|(fee_amount, updated_balance)| { + (fee_amount, Some(updated_balance)) + })?; - // Check that the source address exists on chain - source_exists_or_err(source.clone(), args.tx.force, context).await?; - // Check that the target address exists on chain - target_exists_or_err(target.clone(), args.tx.force, context).await?; + (signing_data, fee_amount, updated_balance) + }; - // Validate the amount given - let validated_amount = - validate_amount(context, args.amount, &args.token, args.tx.force) + for TxTransparentTransferData { + source, + target, + token, + amount, + } in &args.data + { + // Check that the source address exists on chain + source_exists_or_err(source.clone(), args.tx.force, context).await?; + // Check that the target address exists on chain + target_exists_or_err(target.clone(), args.tx.force, context).await?; + + // Validate the amount given + let validated_amount = + validate_amount(context, amount.to_owned(), token, args.tx.force) + .await?; + + // Check the balance of the source + if let Some(updated_balance) = &updated_balance { + let check_balance = if &updated_balance.source == source + && &updated_balance.token == token + { + CheckBalance::Balance(updated_balance.post_balance) + } else { + CheckBalance::Query(balance_key(token, source)) + }; + + check_balance_too_low_err( + token, + source, + validated_amount.amount(), + check_balance, + args.tx.force, + context, + ) .await?; + } - // Check the balance of the source - if let Some(updated_balance) = updated_balance { - let check_balance = if &updated_balance.source == source - && updated_balance.token == args.token - { - CheckBalance::Balance(updated_balance.post_balance) - } else { - CheckBalance::Query(balance_key(&args.token, source)) + // Construct the corresponding transparent Transfer object + let transfer_data = token::TransparentTransferData { + source: source.to_owned(), + target: target.to_owned(), + token: token.to_owned(), + amount: validated_amount, }; - check_balance_too_low_err( - &args.token, - source, - validated_amount.amount(), - check_balance, - args.tx.force, - context, - ) - .await?; + transfers.push(transfer_data); } - // Construct the corresponding transparent Transfer object - let transfer = token::TransparentTransfer { - source: source.clone(), - target: target.clone(), - token: args.token.clone(), - amount: validated_amount, - }; + let transfer = token::TransparentTransfer(transfers); let tx = build_pow_flag( context, diff --git a/crates/tests/src/integration/ledger_tests.rs b/crates/tests/src/integration/ledger_tests.rs index 214ab32a1d..3ec0da9b0e 100644 --- a/crates/tests/src/integration/ledger_tests.rs +++ b/crates/tests/src/integration/ledger_tests.rs @@ -59,16 +59,17 @@ fn ledger_txs_and_queries() -> Result<()> { let validator_one_rpc = "http://127.0.0.1:26567"; let (node, _services) = setup::setup()?; - let transfer = token::TransparentTransfer { - source: defaults::bertha_address(), - target: defaults::albert_address(), - token: node.native_token(), - amount: token::DenominatedAmount::new( - token::Amount::native_whole(10), - token::NATIVE_MAX_DECIMAL_PLACES.into(), - ), - } - .serialize_to_vec(); + let transfer = + token::TransparentTransfer(vec![token::TransparentTransferData { + source: defaults::bertha_address(), + target: defaults::albert_address(), + token: node.native_token(), + amount: token::DenominatedAmount::new( + token::Amount::native_whole(10), + token::NATIVE_MAX_DECIMAL_PLACES.into(), + ), + }]) + .serialize_to_vec(); let tx_data_path = node.test_dir.path().join("tx.data"); std::fs::write(&tx_data_path, transfer).unwrap(); let tx_data_path = tx_data_path.to_string_lossy(); diff --git a/crates/token/src/lib.rs b/crates/token/src/lib.rs index c14d93f430..4a67f36518 100644 --- a/crates/token/src/lib.rs +++ b/crates/token/src/lib.rs @@ -67,6 +67,23 @@ where Ok(()) } +/// Arguments for a multi-party transparent token transfer +#[derive( + Debug, + Clone, + PartialEq, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Hash, + Eq, + PartialOrd, + Serialize, + Deserialize, +)] +pub struct TransparentTransfer(pub Vec); + /// Arguments for a transparent token transfer #[derive( Debug, @@ -82,7 +99,7 @@ where Serialize, Deserialize, )] -pub struct TransparentTransfer { +pub struct TransparentTransferData { /// Source address will spend the tokens pub source: Address, /// Target address will receive the tokens @@ -178,7 +195,7 @@ pub mod testing { pub use namada_trans_token::testing::*; use proptest::prelude::*; - use super::TransparentTransfer; + use super::{TransparentTransfer, TransparentTransferData}; prop_compose! { /// Generate a transparent transfer @@ -187,8 +204,8 @@ pub mod testing { target in arb_non_internal_address(), token in arb_established_address().prop_map(Address::Established), amount in arb_denominated_amount(), - ) -> TransparentTransfer { - TransparentTransfer { + ) -> TransparentTransferData{ + TransparentTransferData { source, target, token, @@ -196,4 +213,12 @@ pub mod testing { } } } + + /// Generate a vectorized transparent transfer + pub fn arb_vectorized_transparent_transfer( + number_of_txs: usize, + ) -> impl Strategy { + proptest::collection::vec(arb_transparent_transfer(), 0..number_of_txs) + .prop_map(TransparentTransfer) + } } diff --git a/wasm/tx_transparent_transfer/src/lib.rs b/wasm/tx_transparent_transfer/src/lib.rs index 76aacc3891..0da40600d8 100644 --- a/wasm/tx_transparent_transfer/src/lib.rs +++ b/wasm/tx_transparent_transfer/src/lib.rs @@ -7,16 +7,20 @@ use namada_tx_prelude::*; #[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: BatchedTx) -> TxResult { let data = ctx.get_tx_data(&tx_data)?; - let transfer = token::TransparentTransfer::try_from_slice(&data[..]) + let transfers = token::TransparentTransfer::try_from_slice(&data[..]) .wrap_err("Failed to decode token::TransparentTransfer tx data")?; - debug_log!("apply_tx called with transfer: {:#?}", transfer); + debug_log!("apply_tx called with transfer: {:#?}", transfers); - token::transfer( - ctx, - &transfer.source, - &transfer.target, - &transfer.token, - transfer.amount.amount(), - ) - .wrap_err("Token transfer failed") + for transfer in transfers.0 { + token::transfer( + ctx, + &transfer.source, + &transfer.target, + &transfer.token, + transfer.amount.amount(), + ) + .wrap_err("Token transfer failed")?; + } + + Ok(()) } From 36e215bb9ad00d333715f0293c0d449454cfca79 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Mon, 3 Jun 2024 17:58:09 +0200 Subject: [PATCH 02/40] Vectorizes masp transfers --- crates/apps_lib/src/cli.rs | 71 ++- crates/light_sdk/src/transaction/transfer.rs | 69 +-- crates/node/src/bench_utils.rs | 31 +- crates/sdk/src/args.rs | 53 +- crates/sdk/src/lib.rs | 76 +-- crates/sdk/src/masp.rs | 540 ++++++++++--------- crates/sdk/src/signing.rs | 127 +++-- crates/sdk/src/tx.rs | 249 +++++---- crates/token/src/lib.rs | 94 +++- crates/tx_prelude/src/token.rs | 3 +- wasm/tx_shielding_transfer/src/lib.rs | 24 +- wasm/tx_unshielding_transfer/src/lib.rs | 24 +- 12 files changed, 833 insertions(+), 528 deletions(-) diff --git a/crates/apps_lib/src/cli.rs b/crates/apps_lib/src/cli.rs index 2e59687eb1..ff0745ffc1 100644 --- a/crates/apps_lib/src/cli.rs +++ b/crates/apps_lib/src/cli.rs @@ -4366,13 +4366,13 @@ pub mod args { let token = TOKEN.parse(matches); let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); let tx_code_path = PathBuf::from(TX_TRANSPARENT_TRANSFER_WASM); - let data = vec![TxTransparentTransferData { source, target, token, amount, }]; + Self { tx, data, @@ -4404,14 +4404,21 @@ pub mod args { ctx: &mut Context, ) -> Result, Self::Error> { let tx = self.tx.to_sdk(ctx)?; + let mut data = vec![]; let chain_ctx = ctx.borrow_mut_chain_or_exit(); + for transfer_data in self.data { + data.push(TxShieldedTransferData { + source: chain_ctx.get_cached(&transfer_data.source), + target: chain_ctx.get(&transfer_data.target), + token: chain_ctx.get(&transfer_data.token), + amount: transfer_data.amount, + }); + } + Ok(TxShieldedTransfer:: { tx, - source: chain_ctx.get_cached(&self.source), - target: chain_ctx.get(&self.target), - token: chain_ctx.get(&self.token), - amount: self.amount, + data, tx_code_path: self.tx_code_path.to_path_buf(), }) } @@ -4425,12 +4432,16 @@ pub mod args { let token = TOKEN.parse(matches); let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); let tx_code_path = PathBuf::from(TX_SHIELDED_TRANSFER_WASM); - Self { - tx, + let data = vec![TxShieldedTransferData { source, target, token, amount, + }]; + + Self { + tx, + data, tx_code_path, } } @@ -4464,14 +4475,21 @@ pub mod args { ctx: &mut Context, ) -> Result, Self::Error> { let tx = self.tx.to_sdk(ctx)?; + let mut data = vec![]; let chain_ctx = ctx.borrow_mut_chain_or_exit(); + for transfer_data in self.data { + data.push(TxShieldingTransferData { + source: chain_ctx.get(&transfer_data.source), + token: chain_ctx.get(&transfer_data.token), + amount: transfer_data.amount, + }); + } + Ok(TxShieldingTransfer:: { tx, - source: chain_ctx.get(&self.source), + data, target: chain_ctx.get(&self.target), - token: chain_ctx.get(&self.token), - amount: self.amount, tx_code_path: self.tx_code_path.to_path_buf(), }) } @@ -4485,12 +4503,16 @@ pub mod args { let token = TOKEN.parse(matches); let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); let tx_code_path = PathBuf::from(TX_SHIELDING_TRANSFER_WASM); - Self { - tx, + let data = vec![TxShieldingTransferData { source, - target, token, amount, + }]; + + Self { + tx, + data, + target, tx_code_path, } } @@ -4525,14 +4547,21 @@ pub mod args { ctx: &mut Context, ) -> Result, Self::Error> { let tx = self.tx.to_sdk(ctx)?; + let mut data = vec![]; let chain_ctx = ctx.borrow_mut_chain_or_exit(); + for transfer_data in self.data { + data.push(TxUnshieldingTransferData { + target: chain_ctx.get(&transfer_data.target), + token: chain_ctx.get(&transfer_data.token), + amount: transfer_data.amount, + }); + } + Ok(TxUnshieldingTransfer:: { tx, + data, source: chain_ctx.get_cached(&self.source), - target: chain_ctx.get(&self.target), - token: chain_ctx.get(&self.token), - amount: self.amount, tx_code_path: self.tx_code_path.to_path_buf(), }) } @@ -4546,12 +4575,16 @@ pub mod args { let token = TOKEN.parse(matches); let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); let tx_code_path = PathBuf::from(TX_UNSHIELDING_TRANSFER_WASM); - Self { - tx, - source, + let data = vec![TxUnshieldingTransferData { target, token, amount, + }]; + + Self { + tx, + source, + data, tx_code_path, } } diff --git a/crates/light_sdk/src/transaction/transfer.rs b/crates/light_sdk/src/transaction/transfer.rs index 29846ab7e0..9dc7aa8e45 100644 --- a/crates/light_sdk/src/transaction/transfer.rs +++ b/crates/light_sdk/src/transaction/transfer.rs @@ -1,7 +1,11 @@ use namada_sdk::address::Address; use namada_sdk::hash::Hash; use namada_sdk::key::common; -use namada_sdk::token::DenominatedAmount; +use namada_sdk::token::transaction::Transaction; +use namada_sdk::token::ShieldingTransferData; +pub use namada_sdk::token::{ + DenominatedAmount, TransparentTransfer, UnshieldingTransferData, +}; use namada_sdk::tx::data::GasLimit; use namada_sdk::tx::{ Authorization, Tx, TxError, TX_SHIELDED_TRANSFER_WASM, @@ -19,81 +23,78 @@ pub struct Transfer(Tx); impl Transfer { /// Build a transparent transfer transaction from the given parameters pub fn transparent( - source: Address, - target: Address, - token: Address, - amount: DenominatedAmount, + transfers: TransparentTransfer, args: GlobalArgs, ) -> Self { - let data = namada_sdk::token::TransparentTransferData { - source, - target, - token, - amount, - }; - Self(transaction::build_tx( args, - data, + transfers, TX_TRANSPARENT_TRANSFER_WASM.to_string(), )) } /// Build a shielded transfer transaction from the given parameters - pub fn shielded(shielded_section_hash: Hash, args: GlobalArgs) -> Self { + pub fn shielded( + shielded_section_hash: Hash, + transaction: Transaction, + args: GlobalArgs, + ) -> Self { let data = namada_sdk::token::ShieldedTransfer { section_hash: shielded_section_hash, }; - Self(transaction::build_tx( + let mut tx = transaction::build_tx( args, data, TX_SHIELDED_TRANSFER_WASM.to_string(), - )) + ); + tx.add_masp_tx_section(transaction); + + Self(tx) } /// Build a shielding transfer transaction from the given parameters pub fn shielding( - source: Address, - token: Address, - amount: DenominatedAmount, + transfers: Vec, shielded_section_hash: Hash, + transaction: Transaction, args: GlobalArgs, ) -> Self { - let data = namada_sdk::token::ShieldingTransfer { - source, - token, - amount, + let data = namada_sdk::token::ShieldingMultiTransfer { + data: transfers, shielded_section_hash, }; - Self(transaction::build_tx( + let mut tx = transaction::build_tx( args, data, TX_SHIELDING_TRANSFER_WASM.to_string(), - )) + ); + tx.add_masp_tx_section(transaction); + + Self(tx) } /// Build an unshielding transfer transaction from the given parameters pub fn unshielding( - target: Address, - token: Address, - amount: DenominatedAmount, + transfers: Vec, shielded_section_hash: Hash, + transaction: Transaction, args: GlobalArgs, ) -> Self { - let data = namada_sdk::token::UnshieldingTransfer { - target, - token, - amount, + let data = namada_sdk::token::UnshieldingMultiTransfer { + data: transfers, shielded_section_hash, }; - Self(transaction::build_tx( + let mut tx = transaction::build_tx( args, data, TX_UNSHIELDING_TRANSFER_WASM.to_string(), - )) + ); + tx.add_masp_tx_section(transaction); + + Self(tx) } /// Get the bytes to sign for the given transaction diff --git a/crates/node/src/bench_utils.rs b/crates/node/src/bench_utils.rs index ef80036715..9a910df04e 100644 --- a/crates/node/src/bench_utils.rs +++ b/crates/node/src/bench_utils.rs @@ -77,7 +77,7 @@ use namada::masp::MaspTxRefs; use namada::state::StorageRead; use namada::token::{ Amount, DenominatedAmount, ShieldedTransfer, ShieldingTransfer, - UnshieldingTransfer, + ShieldingTransferData, UnshieldingTransfer, UnshieldingTransferData, }; use namada::tx::data::pos::Bond; use namada::tx::data::{ @@ -94,7 +94,7 @@ use namada_apps_lib::cli::context::FromContext; use namada_apps_lib::cli::Context; use namada_apps_lib::wallet::{defaults, CliWalletUtils}; use namada_sdk::masp::{ - self, ContextSyncStatus, ShieldedContext, ShieldedUtils, + self, ContextSyncStatus, MaspTransferData, ShieldedContext, ShieldedUtils, }; pub use namada_sdk::tx::{ TX_BECOME_VALIDATOR_WASM, TX_BOND_WASM, TX_BRIDGE_POOL_WASM, @@ -1080,14 +1080,17 @@ impl BenchShieldedCtx { StdIo, native_token, ); + let masp_transfer_data = MaspTransferData { + source: source.clone(), + target: target.clone(), + token: address::testing::nam(), + amount: denominated_amount, + }; let shielded = async_runtime .block_on( ShieldedContext::::gen_shielded_transfer( &namada, - &source, - &target, - &address::testing::nam(), - denominated_amount, + vec![masp_transfer_data], true, ), ) @@ -1125,9 +1128,11 @@ impl BenchShieldedCtx { namada.client().generate_tx( TX_SHIELDING_TRANSFER_WASM, ShieldingTransfer { - source: source.effective_address(), - token: address::testing::nam(), - amount: DenominatedAmount::native(amount), + data: ShieldingTransferData { + source: source.effective_address(), + token: address::testing::nam(), + amount: DenominatedAmount::native(amount), + }, shielded_section_hash, }, Some(shielded), @@ -1138,9 +1143,11 @@ impl BenchShieldedCtx { namada.client().generate_tx( TX_UNSHIELDING_TRANSFER_WASM, UnshieldingTransfer { - target: target.effective_address(), - token: address::testing::nam(), - amount: DenominatedAmount::native(amount), + data: UnshieldingTransferData { + target: target.effective_address(), + token: address::testing::nam(), + amount: DenominatedAmount::native(amount), + }, shielded_section_hash, }, Some(shielded), diff --git a/crates/sdk/src/args.rs b/crates/sdk/src/args.rs index 6847bf0694..7cc775829a 100644 --- a/crates/sdk/src/args.rs +++ b/crates/sdk/src/args.rs @@ -307,11 +307,9 @@ impl TxTransparentTransfer { } } -/// Shielded transfer transaction arguments +/// Shielded transfer-specific arguments #[derive(Clone, Debug)] -pub struct TxShieldedTransfer { - /// Common tx arguments - pub tx: Tx, +pub struct TxShieldedTransferData { /// Transfer source spending key pub source: C::SpendingKey, /// Transfer target address @@ -320,6 +318,15 @@ pub struct TxShieldedTransfer { pub token: C::Address, /// Transferred token amount pub amount: InputAmount, +} + +/// Shielded transfer transaction arguments +#[derive(Clone, Debug)] +pub struct TxShieldedTransfer { + /// Common tx arguments + pub tx: Tx, + /// Transfer-specific data + pub data: Vec>, /// Path to the TX WASM code file pub tx_code_path: PathBuf, } @@ -334,19 +341,26 @@ impl TxShieldedTransfer { } } +/// Shielded transfer-specific arguments +#[derive(Clone, Debug)] +pub struct TxShieldingTransferData { + /// Transfer source spending key + pub source: C::Address, + /// Transferred token address + pub token: C::Address, + /// Transferred token amount + pub amount: InputAmount, +} + /// Shielding transfer transaction arguments #[derive(Clone, Debug)] pub struct TxShieldingTransfer { /// Common tx arguments pub tx: Tx, - /// Transfer source address - pub source: C::Address, /// Transfer target address pub target: C::PaymentAddress, - /// Transferred token address - pub token: C::Address, - /// Transferred token amount - pub amount: InputAmount, + /// Transfer-specific data + pub data: Vec>, /// Path to the TX WASM code file pub tx_code_path: PathBuf, } @@ -361,19 +375,26 @@ impl TxShieldingTransfer { } } -/// Unshielding transfer transaction arguments +/// Unshielding transfer-specific arguments #[derive(Clone, Debug)] -pub struct TxUnshieldingTransfer { - /// Common tx arguments - pub tx: Tx, - /// Transfer source spending key - pub source: C::SpendingKey, +pub struct TxUnshieldingTransferData { /// Transfer target address pub target: C::Address, /// Transferred token address pub token: C::Address, /// Transferred token amount pub amount: InputAmount, +} + +/// Unshielding transfer transaction arguments +#[derive(Clone, Debug)] +pub struct TxUnshieldingTransfer { + /// Common tx arguments + pub tx: Tx, + /// Transfer source spending key + pub source: C::SpendingKey, + /// Transfer-specific data + pub data: Vec>, /// Path to the TX WASM code file pub tx_code_path: PathBuf, } diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs index f08c87f2e3..c4778e285d 100644 --- a/crates/sdk/src/lib.rs +++ b/crates/sdk/src/lib.rs @@ -188,16 +188,10 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { /// arguments fn new_shielded_transfer( &self, - source: ExtendedSpendingKey, - target: PaymentAddress, - token: Address, - amount: InputAmount, + data: Vec, ) -> args::TxShieldedTransfer { args::TxShieldedTransfer { - source, - target, - token, - amount, + data, tx_code_path: PathBuf::from(TX_SHIELDED_TRANSFER_WASM), tx: self.tx_builder(), } @@ -207,16 +201,12 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { /// arguments fn new_shielding_transfer( &self, - source: Address, target: PaymentAddress, - token: Address, - amount: InputAmount, + data: Vec, ) -> args::TxShieldingTransfer { args::TxShieldingTransfer { - source, + data, target, - token, - amount, tx_code_path: PathBuf::from(TX_SHIELDING_TRANSFER_WASM), tx: self.tx_builder(), } @@ -227,15 +217,11 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { fn new_unshielding_transfer( &self, source: ExtendedSpendingKey, - target: Address, - token: Address, - amount: InputAmount, + data: Vec, ) -> args::TxUnshieldingTransfer { args::TxUnshieldingTransfer { source, - target, - token, - amount, + data, tx_code_path: PathBuf::from(TX_UNSHIELDING_TRANSFER_WASM), tx: self.tx_builder(), } @@ -627,8 +613,8 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { args: &args::Tx, signing_data: SigningTxData, with: impl Fn(Tx, common::PublicKey, HashSet, D) -> F - + MaybeSend - + MaybeSync, + + MaybeSend + + MaybeSync, user_data: D, ) -> crate::error::Result<()> where @@ -867,10 +853,7 @@ pub mod testing { use namada_governance::{InitProposalData, VoteProposalData}; use namada_ibc::testing::arb_ibc_any; use namada_token::testing::arb_denominated_amount; - use namada_token::{ - ShieldedTransfer, ShieldingTransfer, TransparentTransfer, - UnshieldingTransfer, - }; + use namada_token::{ShieldedTransfer, TransparentTransfer}; use namada_tx::data::pgf::UpdateStewardCommission; use namada_tx::data::pos::{ BecomeValidator, Bond, CommissionChange, ConsensusKeyChange, @@ -882,8 +865,10 @@ pub mod testing { use prost::Message; use ripemd::Digest as RipemdDigest; use sha2::Digest; - use token::testing::{ - arb_transparent_transfer, arb_vectorized_transparent_transfer, + use token::testing::arb_vectorized_transparent_transfer; + use token::{ + ShieldingMultiTransfer, ShieldingTransferData, + UnshieldingMultiTransfer, UnshieldingTransferData, }; use super::*; @@ -928,8 +913,11 @@ pub mod testing { Withdraw(Withdraw), TransparentTransfer(TransparentTransfer), ShieldedTransfer(ShieldedTransfer, (StoredBuildParams, String)), - ShieldingTransfer(ShieldingTransfer, (StoredBuildParams, String)), - UnshieldingTransfer(UnshieldingTransfer, (StoredBuildParams, String)), + ShieldingTransfer(ShieldingMultiTransfer, (StoredBuildParams, String)), + UnshieldingTransfer( + UnshieldingMultiTransfer, + (StoredBuildParams, String), + ), Bond(Bond), Redelegation(Redelegation), UpdateStewardCommission(UpdateStewardCommission), @@ -1122,17 +1110,17 @@ pub mod testing { } prop_compose! { - /// Generate an arbitrary transfer transaction - pub fn arb_masp_transfer_tx()(transfer in arb_transparent_transfer())( + /// Generate an arbitrary masp transfer transaction + pub fn arb_masp_transfer_tx()(transfers in arb_vectorized_transparent_transfer(5))( mut header in arb_header(), wrapper in arb_wrapper_tx(), code_hash in arb_hash(), (masp_tx_type, (shielded_transfer, asset_types, build_params)) in prop_oneof![ (Just(MaspTxType::Shielded), arb_shielded_transfer(0..MAX_ASSETS)), - (Just(MaspTxType::Shielding), arb_shielding_transfer(encode_address(&transfer.source), 1)), - (Just(MaspTxType::Unshielding), arb_deshielding_transfer(encode_address(&transfer.target), 1)), + (Just(MaspTxType::Shielding), arb_shielding_transfer(encode_address(&transfers.0.first().unwrap().source), 1)), + (Just(MaspTxType::Unshielding), arb_deshielding_transfer(encode_address(&transfers.0.first().unwrap().target), 1)), ], - transfer in Just(transfer), + transfers in Just(transfers), ) -> (Tx, TxData) { header.tx_type = TxType::Wrapper(Box::new(wrapper)); let mut tx = Tx { header, sections: vec![] }; @@ -1155,7 +1143,14 @@ pub mod testing { decoded.denom, ); tx.add_code_from_hash(code_hash, Some(TX_SHIELDING_TRANSFER_WASM.to_owned())); - let data = ShieldingTransfer {source: transfer.source, token, amount, shielded_section_hash }; + let data = transfers.0.into_iter().map(|transfer| + ShieldingTransferData{ + source: transfer.source, + token: token.clone(), + amount + } + ).collect(); + let data = ShieldingMultiTransfer{data, shielded_section_hash }; tx.add_data(data.clone()); TxData::ShieldingTransfer(data, (build_params, build_param_bytes)) }, @@ -1168,7 +1163,14 @@ pub mod testing { decoded.denom, ); tx.add_code_from_hash(code_hash, Some(TX_UNSHIELDING_TRANSFER_WASM.to_owned())); - let data = UnshieldingTransfer {target: transfer.target, token, amount, shielded_section_hash }; + let data = transfers.0.into_iter().map(|transfer| + UnshieldingTransferData{ + target: transfer.target, + token: token.clone(), + amount + } + ).collect(); + let data = UnshieldingMultiTransfer{data, shielded_section_hash }; tx.add_data(data.clone()); TxData::UnshieldingTransfer(data, (build_params, build_param_bytes)) }, diff --git a/crates/sdk/src/masp.rs b/crates/sdk/src/masp.rs index 516a7ad611..0ae8d00e3b 100644 --- a/crates/sdk/src/masp.rs +++ b/crates/sdk/src/masp.rs @@ -121,6 +121,16 @@ pub struct ShieldedTransfer { pub epoch: MaspEpoch, } +/// The data for a single masp transfer +#[allow(missing_docs)] +#[derive(Debug)] +pub struct MaspTransferData { + pub source: TransferSource, + pub target: TransferTarget, + pub token: Address, + pub amount: token::DenominatedAmount, +} + /// Shielded pool data for a token #[allow(missing_docs)] #[derive(Debug, BorshSerialize, BorshDeserialize, BorshDeserializer)] @@ -134,11 +144,17 @@ pub struct MaspTokenRewardData { } /// A return type for gen_shielded_transfer +#[allow(clippy::large_enum_variant)] #[derive(Error, Debug)] pub enum TransferErr { /// Build error for masp errors - #[error("{0}")] - Build(#[from] builder::Error), + #[error("{error}")] + Build { + /// The error + error: builder::Error, + /// The optional associated transfer data for logging purposes + data: Option, + }, /// errors #[error("{0}")] General(#[from] Error), @@ -1515,36 +1531,9 @@ impl ShieldedContext { /// amounts and signatures specified by the containing Transfer object. pub async fn gen_shielded_transfer( context: &impl Namada, - source: &TransferSource, - target: &TransferTarget, - token: &Address, - amount: token::DenominatedAmount, + data: Vec, update_ctx: bool, ) -> Result, TransferErr> { - // No shielded components are needed when neither source nor destination - // are shielded - - let spending_key = source.spending_key(); - let payment_address = target.payment_address(); - // No shielded components are needed when neither source nor - // destination are shielded - if spending_key.is_none() && payment_address.is_none() { - return Ok(None); - } - // We want to fund our transaction solely from supplied spending key - let spending_key = spending_key.map(|x| x.into()); - { - // Load the current shielded context given the spending key we - // possess - let mut shielded = context.shielded_mut().await; - let _ = shielded.load().await; - } - // Determine epoch in which to submit potential shielded transaction - let epoch = rpc::query_masp_epoch(context.client()).await?; - // Context required for storing which notes are in the source's - // possession - let memo = MemoBytes::empty(); - // Try to get a seed from env var, if any. #[allow(unused_mut)] let mut rng = StdRng::from_rng(OsRng).unwrap(); @@ -1567,7 +1556,6 @@ impl ShieldedContext { rng }; - // Now we build up the transaction within this object // TODO: if the user requested the default expiration, there might be a // small discrepancy between the datetime we calculate here and the one // we set for the transaction. This should be small enough to not cause @@ -1621,235 +1609,305 @@ impl ShieldedContext { // use from the masp crate to specify the expiration better expiration_height.into(), ); + // Determine epoch in which to submit potential shielded transaction + let epoch = rpc::query_masp_epoch(context.client()).await?; - // Convert transaction amount into MASP types - let Some(denom) = query_denom(context.client(), token).await else { - return Err(TransferErr::General(Error::from( - QueryError::General(format!("denomination for token {token}")), - ))); - }; - let (asset_types, masp_amount) = { - let mut shielded = context.shielded_mut().await; - // Do the actual conversion to an asset type - let amount = shielded - .convert_amount( - context.client(), - epoch, - token, - denom, - amount.amount(), - ) - .await?; - // Make sure to save any decodings of the asset types used so that - // balance queries involving them are successful - let _ = shielded.save().await; - amount - }; - - // If there are shielded inputs - if let Some(sk) = spending_key { - // Locate unspent notes that can help us meet the transaction amount - let (_, unspent_notes, used_convs) = context - .shielded_mut() - .await - .collect_unspent_notes( - context, - &to_viewing_key(&sk).vk, - I128Sum::from_sum(masp_amount), - epoch, - ) - .await?; - // Commit the notes found to our transaction - for (diversifier, note, merkle_path) in unspent_notes { - builder - .add_sapling_spend(sk, diversifier, note, merkle_path) - .map_err(builder::Error::SaplingBuild)?; - } - // Commit the conversion notes used during summation - for (conv, wit, value) in used_convs.values() { - if value.is_positive() { - builder - .add_sapling_convert( - conv.clone(), - *value as u64, - wit.clone(), - ) - .map_err(builder::Error::SaplingBuild)?; - } + for MaspTransferData { + source, + target, + token, + amount, + } in data + { + let spending_key = source.spending_key(); + let payment_address = target.payment_address(); + // No shielded components are needed when neither source nor + // destination are shielded + if spending_key.is_none() && payment_address.is_none() { + return Ok(None); } - } else { - // We add a dummy UTXO to our transaction, but only the source of - // the parent Transfer object is used to validate fund - // availability - let source_enc = source - .address() - .ok_or_else(|| { - Error::Other( - "source address should be transparent".to_string(), - ) - })? - .serialize_to_vec(); - - let hash = ripemd::Ripemd160::digest(sha2::Sha256::digest( - source_enc.as_ref(), - )); - let script = TransparentAddress(hash.into()); - for (digit, asset_type) in - MaspDigitPos::iter().zip(asset_types.iter()) + // We want to fund our transaction solely from supplied spending key + let spending_key = spending_key.map(|x| x.into()); { - let amount_part = digit.denominate(&amount.amount()); - // Skip adding an input if its value is 0 - if amount_part != 0 { - builder - .add_transparent_input(TxOut { - asset_type: *asset_type, - value: amount_part, - address: script, - }) - .map_err(builder::Error::TransparentBuild)?; - } + // Load the current shielded context given the spending key we + // possess + let mut shielded = context.shielded_mut().await; + let _ = shielded.load().await; } - } + // Context required for storing which notes are in the source's + // possession + let memo = MemoBytes::empty(); - // Anotate the asset type in the value balance with its decoding in - // order to facilitate cross-epoch computations - let value_balance = builder.value_balance(); - let value_balance = context - .shielded_mut() - .await - .decode_sum(context.client(), value_balance) - .await; + // Now we build up the transaction within this object - // If we are sending to a transparent output, then we will need to embed - // the transparent target address into the shielded transaction so that - // it can be signed - let transparent_target_hash = if payment_address.is_none() { - let target_enc = target - .address() - .ok_or_else(|| { - Error::Other( - "target address should be transparent".to_string(), + // Convert transaction amount into MASP types + let Some(denom) = query_denom(context.client(), &token).await + else { + return Err(TransferErr::General(Error::from( + QueryError::General(format!( + "denomination for token {token}" + )), + ))); + }; + let (asset_types, masp_amount) = { + let mut shielded = context.shielded_mut().await; + // Do the actual conversion to an asset type + let amount = shielded + .convert_amount( + context.client(), + epoch, + &token, + denom, + amount.amount(), ) - })? - .serialize_to_vec(); - Some(ripemd::Ripemd160::digest(sha2::Sha256::digest( - target_enc.as_ref(), - ))) - } else { - None - }; - // This indicates how many more assets need to be sent to the receiver - // in order to satisfy the requested transfer amount. - let mut rem_amount = amount.amount().raw_amount().0; - // If we are sending to a shielded address, we may need the outgoing - // viewing key in the following computations. - let ovk_opt = spending_key.map(|x| x.expsk.ovk); - - // Now handle the outputs of this transaction - // Loop through the value balance components and see which - // ones can be given to the receiver - for ((asset_type, decoded), val) in value_balance.components() { - let rem_amount = &mut rem_amount[decoded.position as usize]; - // Only asset types with the correct token can contribute. But - // there must be a demonstrated need for it. - if decoded.token == *token - && decoded.denom == denom - && decoded.epoch.map_or(true, |vbal_epoch| vbal_epoch <= epoch) - && *rem_amount > 0 - { - let val = u128::try_from(*val).expect( - "value balance in absence of output descriptors should be \ - non-negative", - ); - // We want to take at most the remaining quota for the - // current denomination to the receiver - let contr = std::cmp::min(*rem_amount as u128, val) as u64; - // Make transaction output tied to the current token, - // denomination, and epoch. - if let Some(pa) = payment_address { - // If there is a shielded output - builder - .add_sapling_output( - ovk_opt, - pa.into(), - *asset_type, - contr, - memo.clone(), - ) - .map_err(builder::Error::SaplingBuild)?; - } else { - // If there is a transparent output - let hash = transparent_target_hash - .expect( - "transparent target hash should have been \ - computed already", - ) - .into(); + .await?; + // Make sure to save any decodings of the asset types used so + // that balance queries involving them are + // successful + let _ = shielded.save().await; + amount + }; + + // If there are shielded inputs + if let Some(sk) = spending_key { + // Locate unspent notes that can help us meet the transaction + // amount + let (_, unspent_notes, used_convs) = context + .shielded_mut() + .await + .collect_unspent_notes( + context, + &to_viewing_key(&sk).vk, + I128Sum::from_sum(masp_amount), + epoch, + ) + .await?; + // Commit the notes found to our transaction + for (diversifier, note, merkle_path) in unspent_notes { builder - .add_transparent_output( - &TransparentAddress(hash), - *asset_type, - contr, + .add_sapling_spend(sk, diversifier, note, merkle_path) + .map_err(|e| TransferErr::Build { + error: builder::Error::SaplingBuild(e), + data: None, + })?; + } + // Commit the conversion notes used during summation + for (conv, wit, value) in used_convs.values() { + if value.is_positive() { + builder + .add_sapling_convert( + conv.clone(), + *value as u64, + wit.clone(), + ) + .map_err(|e| TransferErr::Build { + error: builder::Error::SaplingBuild(e), + data: None, + })?; + } + } + } else { + // We add a dummy UTXO to our transaction, but only the source + // of the parent Transfer object is used to + // validate fund availability + let source_enc = source + .address() + .ok_or_else(|| { + Error::Other( + "source address should be transparent".to_string(), ) - .map_err(builder::Error::TransparentBuild)?; + })? + .serialize_to_vec(); + + let hash = ripemd::Ripemd160::digest(sha2::Sha256::digest( + source_enc.as_ref(), + )); + let script = TransparentAddress(hash.into()); + for (digit, asset_type) in + MaspDigitPos::iter().zip(asset_types.iter()) + { + let amount_part = digit.denominate(&amount.amount()); + // Skip adding an input if its value is 0 + if amount_part != 0 { + builder + .add_transparent_input(TxOut { + asset_type: *asset_type, + value: amount_part, + address: script, + }) + .map_err(|e| TransferErr::Build { + error: builder::Error::TransparentBuild(e), + data: None, + })?; + } } - // Lower what is required of the remaining contribution - *rem_amount -= contr; } - } - // Nothing must remain to be included in output - if rem_amount != [0; 4] { - // Convert the shortfall into a I128Sum - let mut shortfall = I128Sum::zero(); - for (asset_type, val) in asset_types.iter().zip(rem_amount) { - shortfall += I128Sum::from_pair(*asset_type, val.into()); - } - // Return an insufficient ffunds error - return Result::Err(TransferErr::from( - builder::Error::InsufficientFunds(shortfall), - )); - } + // Anotate the asset type in the value balance with its decoding in + // order to facilitate cross-epoch computations + let value_balance = builder.value_balance(); + let value_balance = context + .shielded_mut() + .await + .decode_sum(context.client(), value_balance) + .await; - // Now add outputs representing the change from this payment - if let Some(sk) = spending_key { - // Represents the amount of inputs we are short by - let mut additional = I128Sum::zero(); - for (asset_type, amt) in builder.value_balance().components() { - match amt.cmp(&0) { - Ordering::Greater => { - // Send the change in this asset type back to the sender + // If we are sending to a transparent output, then we will need to + // embed the transparent target address into the + // shielded transaction so that it can be signed + let transparent_target_hash = if payment_address.is_none() { + let target_enc = target + .address() + .ok_or_else(|| { + Error::Other( + "target address should be transparent".to_string(), + ) + })? + .serialize_to_vec(); + Some(ripemd::Ripemd160::digest(sha2::Sha256::digest( + target_enc.as_ref(), + ))) + } else { + None + }; + // This indicates how many more assets need to be sent to the + // receiver in order to satisfy the requested transfer + // amount. + let mut rem_amount = amount.amount().raw_amount().0; + // If we are sending to a shielded address, we may need the outgoing + // viewing key in the following computations. + let ovk_opt = spending_key.map(|x| x.expsk.ovk); + + // Now handle the outputs of this transaction + // Loop through the value balance components and see which + // ones can be given to the receiver + for ((asset_type, decoded), val) in value_balance.components() { + let rem_amount = &mut rem_amount[decoded.position as usize]; + // Only asset types with the correct token can contribute. But + // there must be a demonstrated need for it. + if decoded.token == token + && decoded.denom == denom + && decoded + .epoch + .map_or(true, |vbal_epoch| vbal_epoch <= epoch) + && *rem_amount > 0 + { + let val = u128::try_from(*val).expect( + "value balance in absence of output descriptors \ + should be non-negative", + ); + // We want to take at most the remaining quota for the + // current denomination to the receiver + let contr = std::cmp::min(*rem_amount as u128, val) as u64; + // Make transaction output tied to the current token, + // denomination, and epoch. + if let Some(pa) = payment_address { + // If there is a shielded output builder .add_sapling_output( - Some(sk.expsk.ovk), - sk.default_address().1, + ovk_opt, + pa.into(), *asset_type, - *amt as u64, + contr, memo.clone(), ) - .map_err(builder::Error::SaplingBuild)?; + .map_err(|e| TransferErr::Build { + error: builder::Error::SaplingBuild(e), + data: None, + })?; + } else { + // If there is a transparent output + let hash = transparent_target_hash + .expect( + "transparent target hash should have been \ + computed already", + ) + .into(); + builder + .add_transparent_output( + &TransparentAddress(hash), + *asset_type, + contr, + ) + .map_err(|e| TransferErr::Build { + error: builder::Error::TransparentBuild(e), + data: None, + })?; } - Ordering::Less => { - // Record how much of the current asset type we are - // short by - additional += - I128Sum::from_nonnegative(*asset_type, -*amt) - .map_err(|()| { + // Lower what is required of the remaining contribution + *rem_amount -= contr; + } + } + + // Nothing must remain to be included in output + if rem_amount != [0; 4] { + // Convert the shortfall into a I128Sum + let mut shortfall = I128Sum::zero(); + for (asset_type, val) in asset_types.iter().zip(rem_amount) { + shortfall += I128Sum::from_pair(*asset_type, val.into()); + } + // Return an insufficient funds error + return Result::Err(TransferErr::Build { + error: builder::Error::InsufficientFunds(shortfall), + data: Some(MaspTransferData { + source, + target, + token, + amount, + }), + }); + } + + // Now add outputs representing the change from this payment + if let Some(sk) = spending_key { + // Represents the amount of inputs we are short by + let mut additional = I128Sum::zero(); + for (asset_type, amt) in builder.value_balance().components() { + match amt.cmp(&0) { + Ordering::Greater => { + // Send the change in this asset type back to the + // sender + builder + .add_sapling_output( + Some(sk.expsk.ovk), + sk.default_address().1, + *asset_type, + *amt as u64, + memo.clone(), + ) + .map_err(|e| TransferErr::Build { + error: builder::Error::SaplingBuild(e), + data: None, + })?; + } + Ordering::Less => { + // Record how much of the current asset type we are + // short by + additional += + I128Sum::from_nonnegative(*asset_type, -*amt) + .map_err(|()| { Error::Other(format!( "from non negative conversion: {}", line!() )) })?; + } + Ordering::Equal => {} } - Ordering::Equal => {} } - } - // If we are short by a non-zero amount, then we have insufficient - // funds - if !additional.is_zero() { - return Err(TransferErr::from( - builder::Error::InsufficientFunds(additional), - )); + // If we are short by a non-zero amount, then we have + // insufficient funds + if !additional.is_zero() { + return Result::Err(TransferErr::Build { + error: builder::Error::InsufficientFunds(additional), + data: Some(MaspTransferData { + source, + target, + token, + amount, + }), + }); + } } } @@ -1859,12 +1917,14 @@ impl ShieldedContext { let prover = context.shielded().await.utils.local_tx_prover(); #[cfg(feature = "testing")] let prover = testing::MockTxProver(std::sync::Mutex::new(OsRng)); - let (masp_tx, metadata) = builder.build( - &prover, - &FeeRule::non_standard(U64Sum::zero()), - &mut rng, - &mut RngBuildParams::new(OsRng), - )?; + let (masp_tx, metadata) = builder + .build( + &prover, + &FeeRule::non_standard(U64Sum::zero()), + &mut rng, + &mut RngBuildParams::new(OsRng), + ) + .map_err(|error| TransferErr::Build { error, data: None })?; if update_ctx { // Cache the generated transfer diff --git a/crates/sdk/src/signing.rs b/crates/sdk/src/signing.rs index 1f652ff544..a92370a3b3 100644 --- a/crates/sdk/src/signing.rs +++ b/crates/sdk/src/signing.rs @@ -710,8 +710,8 @@ enum TransferSide<'a> { enum TokenTransfer<'a> { Transparent(&'a token::TransparentTransfer), Shielded, - Shielding(&'a token::ShieldingTransfer), - Unshielding(&'a token::UnshieldingTransfer), + Shielding(&'a token::ShieldingMultiTransfer), + Unshielding(&'a token::UnshieldingMultiTransfer), } impl TokenTransfer<'_> { @@ -722,9 +722,12 @@ impl TokenTransfer<'_> { .iter() .map(|transfer| &transfer.source) .collect(), - TokenTransfer::Shielded => Default::default(), - TokenTransfer::Shielding(transfer) => vec![&transfer.source], + TokenTransfer::Shielding(transfers) => transfers + .data + .iter() + .map(|transfer| &transfer.source) + .collect(), TokenTransfer::Unshielding(_) => Default::default(), } } @@ -739,9 +742,11 @@ impl TokenTransfer<'_> { TokenTransfer::Shielded => Default::default(), TokenTransfer::Shielding(_) => Default::default(), - TokenTransfer::Unshielding(transfer) => { - vec![&transfer.target] - } + TokenTransfer::Unshielding(transfers) => transfers + .data + .iter() + .map(|transfer| &transfer.target) + .collect(), } } @@ -759,46 +764,20 @@ impl TokenTransfer<'_> { TransferSide::Source(source) if source == &transfer.source => { - match map.get_mut(&transfer.token) { - Some(amount) => { - *amount = amount - .checked_add(transfer.amount) - .ok_or_else(|| { - Error::Other( - "Overflow in amount" - .to_string(), - ) - })?; - } - None => { - map.insert( - &transfer.token, - transfer.amount, - ); - } - } + Self::update_token_amount_map( + &mut map, + &transfer.token, + transfer.amount, + )?; } TransferSide::Target(target) if target == &transfer.target => { - match map.get_mut(&transfer.token) { - Some(amount) => { - *amount = amount - .checked_add(transfer.amount) - .ok_or_else(|| { - Error::Other( - "Overflow in amount" - .to_string(), - ) - })?; - } - None => { - map.insert( - &transfer.token, - transfer.amount, - ); - } - } + Self::update_token_amount_map( + &mut map, + &transfer.token, + transfer.amount, + )?; } _ => (), } @@ -807,14 +786,64 @@ impl TokenTransfer<'_> { map } TokenTransfer::Shielded => Default::default(), - TokenTransfer::Shielding(transfer) => { - [(&transfer.token, transfer.amount)].into_iter().collect() + TokenTransfer::Shielding(transfers) => { + let mut map: HashMap<&Address, DenominatedAmount> = + HashMap::new(); + + if let TransferSide::Source(source_addr) = address { + for transfer in &transfers.data { + if &transfer.source == source_addr { + Self::update_token_amount_map( + &mut map, + &transfer.token, + transfer.amount, + )?; + } + } + } + + map } - TokenTransfer::Unshielding(transfer) => { - [(&transfer.token, transfer.amount)].into_iter().collect() + TokenTransfer::Unshielding(transfers) => { + let mut map: HashMap<&Address, DenominatedAmount> = + HashMap::new(); + + if let TransferSide::Target(target_addr) = address { + for transfer in &transfers.data { + if &transfer.target == target_addr { + Self::update_token_amount_map( + &mut map, + &transfer.token, + transfer.amount, + )?; + } + } + } + + map } }) } + + fn update_token_amount_map<'a>( + map: &mut HashMap<&'a Address, DenominatedAmount>, + token: &'a Address, + amount: DenominatedAmount, + ) -> Result<(), Error> { + match map.get_mut(token) { + Some(prev_amount) => { + *prev_amount = + prev_amount.checked_add(amount).ok_or_else(|| { + Error::Other("Overflow in amount".to_string()) + })?; + } + None => { + map.insert(token, amount); + } + } + + Ok(()) + } } /// Adds a Ledger output for the senders and destinations for transparent and @@ -1438,7 +1467,7 @@ pub async fn to_ledger_vector( ) .await?; } else if code_sec.tag == Some(TX_SHIELDING_TRANSFER_WASM.to_string()) { - let transfer = token::ShieldingTransfer::try_from_slice( + let transfer = token::ShieldingMultiTransfer::try_from_slice( &tx.data(cmt) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) @@ -1486,7 +1515,7 @@ pub async fn to_ledger_vector( .await?; } else if code_sec.tag == Some(TX_UNSHIELDING_TRANSFER_WASM.to_string()) { - let transfer = token::UnshieldingTransfer::try_from_slice( + let transfer = token::UnshieldingMultiTransfer::try_from_slice( &tx.data(cmt) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) diff --git a/crates/sdk/src/tx.rs b/crates/sdk/src/tx.rs index 17014589a6..4100911fb3 100644 --- a/crates/sdk/src/tx.rs +++ b/crates/sdk/src/tx.rs @@ -65,13 +65,17 @@ use namada_tx::data::{pos, BatchedTxResult, ResultCode, TxResult}; pub use namada_tx::{Authorization, *}; use num_traits::Zero; use rand_core::{OsRng, RngCore}; +use token::ShieldingTransferData; -use crate::args::TxTransparentTransferData; +use crate::args::{ + TxShieldedTransferData, TxShieldingTransferData, TxTransparentTransferData, + TxUnshieldingTransferData, +}; use crate::control_flow::time; use crate::error::{EncodingError, Error, QueryError, Result, TxSubmitError}; use crate::io::Io; use crate::masp::TransferErr::Build; -use crate::masp::{ShieldedContext, ShieldedTransfer}; +use crate::masp::{MaspTransferData, ShieldedContext, ShieldedTransfer}; use crate::queries::Client; use crate::rpc::{ self, get_validator_stake, query_wasm_code_hash, validate_amount, @@ -2514,15 +2518,19 @@ pub async fn build_ibc_transfer( query_wasm_code_hash(context, args.tx_code_path.to_str().unwrap()) .await .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; + let masp_transfer_data = MaspTransferData { + source: args.source.clone(), + target: TransferTarget::Address(Address::Internal( + InternalAddress::Ibc, + )), + token: args.token.clone(), + amount: validated_amount, + }; // For transfer from a spending key let shielded_parts = construct_shielded_parts( context, - &args.source, - // The token will be escrowed to IBC address - &TransferTarget::Address(Address::Internal(InternalAddress::Ibc)), - &args.token, - validated_amount, + vec![masp_transfer_data], !(args.tx.dry_run || args.tx.dry_run_wrapper), ) .await?; @@ -2569,10 +2577,12 @@ pub async fn build_ibc_transfer( let masp_tx_hash = tx.add_masp_tx_section(shielded_transfer.masp_tx.clone()).1; let transfer = token::ShieldingTransfer { - // The token will be escrowed to IBC address - source: source.clone(), - token: args.token.clone(), - amount: validated_amount, + data: ShieldingTransferData { + // The token will be escrowed to IBC address + source: source.clone(), + token: args.token.clone(), + amount: validated_amount, + }, // Link the Transfer to the MASP Transaction by hash code shielded_section_hash: masp_tx_hash, }; @@ -2931,31 +2941,39 @@ pub async fn build_shielded_transfer( context: &N, args: &mut args::TxShieldedTransfer, ) -> Result<(Tx, SigningTxData)> { - let default_signer = Some(MASP); - let signing_data = signing::aux_signing_data( - context, - &args.tx, - Some(MASP), - default_signer, - ) - .await?; + let signing_data = + signing::aux_signing_data(context, &args.tx, Some(MASP), Some(MASP)) + .await?; // Shielded fee payment let fee_amount = validate_fee(context, &args.tx).await?; - // Validate the amount given - let validated_amount = - validate_amount(context, args.amount, &args.token, args.tx.force) - .await?; + let mut transfer_data = vec![]; + for TxShieldedTransferData { + source, + target, + token, + amount, + } in &args.data + { + // Validate the amount given + let validated_amount = + validate_amount(context, amount.to_owned(), token, args.tx.force) + .await?; + + transfer_data.push(MaspTransferData { + source: TransferSource::ExtendedSpendingKey(source.to_owned()), + target: TransferTarget::PaymentAddress(target.to_owned()), + token: token.to_owned(), + amount: validated_amount, + }); + } // TODO(namada#2597): this function should also take another arg as the fees // token and amount let shielded_parts = construct_shielded_parts( context, - &TransferSource::ExtendedSpendingKey(args.source), - &TransferTarget::PaymentAddress(args.target), - &args.token, - validated_amount, + transfer_data, !(args.tx.dry_run || args.tx.dry_run_wrapper), ) .await? @@ -3013,13 +3031,13 @@ pub async fn build_shielding_transfer( context: &N, args: &mut args::TxShieldingTransfer, ) -> Result<(Tx, SigningTxData, MaspEpoch)> { - let source = &args.source; - let default_signer = Some(source.clone()); let signing_data = signing::aux_signing_data( context, &args.tx, - Some(source.clone()), - default_signer, + None, + args.data + .first() + .map(|transfer_data| transfer_data.source.clone()), ) .await?; @@ -3031,38 +3049,57 @@ pub async fn build_shielding_transfer( (fee_amount, Some(updated_balance)) })?; - // Validate the amount given - let validated_amount = - validate_amount(context, args.amount, &args.token, args.tx.force) + let mut transfer_data = vec![]; + let mut data = vec![]; + for TxShieldingTransferData { + source, + token, + amount, + } in &args.data + { + // Validate the amount given + let validated_amount = + validate_amount(context, amount.to_owned(), token, args.tx.force) + .await?; + + // Check the balance of the source + if let Some(updated_balance) = &updated_balance { + let check_balance = if &updated_balance.source == source + && &updated_balance.token == token + { + CheckBalance::Balance(updated_balance.post_balance) + } else { + CheckBalance::Query(balance_key(token, source)) + }; + + check_balance_too_low_err( + token, + source, + validated_amount.amount(), + check_balance, + args.tx.force, + context, + ) .await?; + } - // Check the balance of the source - if let Some(updated_balance) = updated_balance { - let check_balance = if &updated_balance.source == source - && updated_balance.token == args.token - { - CheckBalance::Balance(updated_balance.post_balance) - } else { - CheckBalance::Query(balance_key(&args.token, source)) - }; + transfer_data.push(MaspTransferData { + source: TransferSource::Address(source.to_owned()), + target: TransferTarget::PaymentAddress(args.target), + token: token.to_owned(), + amount: validated_amount, + }); - check_balance_too_low_err( - &args.token, - source, - validated_amount.amount(), - check_balance, - args.tx.force, - context, - ) - .await?; + data.push(token::ShieldingTransferData { + source: source.to_owned(), + token: token.to_owned(), + amount: validated_amount, + }); } let shielded_parts = construct_shielded_parts( context, - &TransferSource::Address(source.clone()), - &TransferTarget::PaymentAddress(args.target), - &args.token, - validated_amount, + transfer_data, !(args.tx.dry_run || args.tx.dry_run_wrapper), ) .await? @@ -3070,7 +3107,7 @@ pub async fn build_shielding_transfer( let shielded_tx_epoch = shielded_parts.0.epoch; let add_shielded_parts = - |tx: &mut Tx, data: &mut token::ShieldingTransfer| { + |tx: &mut Tx, data: &mut token::ShieldingMultiTransfer| { // Add the MASP Transaction and its Builder to facilitate validation let ( ShieldedTransfer { @@ -3100,10 +3137,8 @@ pub async fn build_shielding_transfer( }; // Construct the tx data with a placeholder shielded section hash - let data = token::ShieldingTransfer { - source: source.clone(), - token: args.token.clone(), - amount: validated_amount, + let data = token::ShieldingMultiTransfer { + data, shielded_section_hash: Hash::zero(), }; @@ -3125,38 +3160,52 @@ pub async fn build_unshielding_transfer( context: &N, args: &mut args::TxUnshieldingTransfer, ) -> Result<(Tx, SigningTxData)> { - let default_signer = Some(MASP); - let signing_data = signing::aux_signing_data( - context, - &args.tx, - Some(MASP), - default_signer, - ) - .await?; + let signing_data = + signing::aux_signing_data(context, &args.tx, Some(MASP), Some(MASP)) + .await?; // Shielded fee payment let fee_amount = validate_fee(context, &args.tx).await?; - // Validate the amount given - let validated_amount = - validate_amount(context, args.amount, &args.token, args.tx.force) - .await?; + let mut transfer_data = vec![]; + let mut data = vec![]; + for TxUnshieldingTransferData { + target, + token, + amount, + } in &args.data + { + // Validate the amount given + let validated_amount = + validate_amount(context, amount.to_owned(), token, args.tx.force) + .await?; + + transfer_data.push(MaspTransferData { + source: TransferSource::ExtendedSpendingKey(args.source), + target: TransferTarget::Address(target.to_owned()), + token: token.to_owned(), + amount: validated_amount, + }); + + data.push(token::UnshieldingTransferData { + target: target.to_owned(), + token: token.to_owned(), + amount: validated_amount, + }); + } // TODO(namada#2597): this function should also take another arg as the fees // token and amount let shielded_parts = construct_shielded_parts( context, - &TransferSource::ExtendedSpendingKey(args.source), - &TransferTarget::Address(args.target.clone()), - &args.token, - validated_amount, + transfer_data, !(args.tx.dry_run || args.tx.dry_run_wrapper), ) .await? .expect("Shielding transfer must have shielded parts"); let add_shielded_parts = - |tx: &mut Tx, data: &mut token::UnshieldingTransfer| { + |tx: &mut Tx, data: &mut token::UnshieldingMultiTransfer| { // Add the MASP Transaction and its Builder to facilitate validation let ( ShieldedTransfer { @@ -3186,12 +3235,11 @@ pub async fn build_unshielding_transfer( }; // Construct the tx data with a placeholder shielded section hash - let data = token::UnshieldingTransfer { - target: args.target.clone(), - token: args.token.clone(), - amount: validated_amount, + let data = token::UnshieldingMultiTransfer { + data, shielded_section_hash: Hash::zero(), }; + let tx = build_pow_flag( context, &args.tx, @@ -3208,10 +3256,7 @@ pub async fn build_unshielding_transfer( // Construct the shielded part of the transaction, if any async fn construct_shielded_parts( context: &N, - source: &TransferSource, - target: &TransferTarget, - token: &Address, - amount: token::DenominatedAmount, + data: Vec, update_ctx: bool, ) -> Result)>> { // Precompute asset types to increase chances of success in decoding @@ -3224,14 +3269,23 @@ async fn construct_shielded_parts( .await; let stx_result = ShieldedContext::::gen_shielded_transfer( - context, source, target, token, amount, update_ctx, + context, data, update_ctx, ) .await; let shielded_parts = match stx_result { Ok(Some(stx)) => stx, Ok(None) => return Ok(None), - Err(Build(builder::Error::InsufficientFunds(_))) => { + Err(Build { + error: builder::Error::InsufficientFunds(_), + data, + }) => { + let MaspTransferData { + source, + token, + amount, + .. + } = data.unwrap(); return Err(TxSubmitError::NegativeBalanceAfterTransfer( Box::new(source.effective_address()), amount.to_string(), @@ -3549,13 +3603,16 @@ pub async fn gen_ibc_shielding_transfer( .precompute_asset_types(context.client(), tokens) .await; + let masp_transfer_data = MaspTransferData { + source: TransferSource::Address(source.clone()), + target: args.target, + token: token.clone(), + amount: validated_amount, + }; let shielded_transfer = ShieldedContext::::gen_shielded_transfer( context, - &TransferSource::Address(source.clone()), - &args.target, - &token, - validated_amount, + vec![masp_transfer_data], true, ) .await @@ -3565,9 +3622,11 @@ pub async fn gen_ibc_shielding_transfer( let masp_tx_hash = Section::MaspTx(shielded_transfer.masp_tx.clone()).get_hash(); let transfer = token::ShieldingTransfer { - source: source.clone(), - token: token.clone(), - amount: validated_amount, + data: ShieldingTransferData { + source: source.clone(), + token: token.clone(), + amount: validated_amount, + }, shielded_section_hash: masp_tx_hash, }; Ok(Some((transfer, shielded_transfer.masp_tx))) diff --git a/crates/token/src/lib.rs b/crates/token/src/lib.rs index 4a67f36518..4470d08925 100644 --- a/crates/token/src/lib.rs +++ b/crates/token/src/lib.rs @@ -146,13 +146,57 @@ pub struct ShieldedTransfer { Serialize, Deserialize, )] -pub struct ShieldingTransfer { +pub struct ShieldingTransferData { /// Source address will spend the tokens pub source: Address, /// Token's address pub token: Address, /// The amount of tokens pub amount: DenominatedAmount, +} + +/// Arguments for a shielding transfer (from a transparent token to a shielded +/// token) +#[derive( + Debug, + Clone, + PartialEq, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Hash, + Eq, + PartialOrd, + Serialize, + Deserialize, +)] +pub struct ShieldingTransfer { + /// Transfer-specific data + pub data: ShieldingTransferData, + /// Hash of tx section that contains the MASP transaction + pub shielded_section_hash: Hash, +} + +/// Arguments for a shielding transfer (from a transparent token to a shielded +/// token) +#[derive( + Debug, + Clone, + PartialEq, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Hash, + Eq, + PartialOrd, + Serialize, + Deserialize, +)] +pub struct ShieldingMultiTransfer { + /// Transfer-specific data + pub data: Vec, /// Hash of tx section that contains the MASP transaction pub shielded_section_hash: Hash, } @@ -173,13 +217,57 @@ pub struct ShieldingTransfer { Serialize, Deserialize, )] -pub struct UnshieldingTransfer { +pub struct UnshieldingTransferData { /// Target address will receive the tokens pub target: Address, /// Token's address pub token: Address, /// The amount of tokens pub amount: DenominatedAmount, +} + +/// Arguments for an unshielding transfer (from a shielded token to a +/// transparent token) +#[derive( + Debug, + Clone, + PartialEq, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Hash, + Eq, + PartialOrd, + Serialize, + Deserialize, +)] +pub struct UnshieldingTransfer { + /// Transfer-specific data + pub data: UnshieldingTransferData, + /// Hash of tx section that contains the MASP transaction + pub shielded_section_hash: Hash, +} + +/// Arguments for a multi-source unshielding transfer (from a shielded token to +/// a transparent token) +#[derive( + Debug, + Clone, + PartialEq, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Hash, + Eq, + PartialOrd, + Serialize, + Deserialize, +)] +pub struct UnshieldingMultiTransfer { + /// Transfer-specific data + pub data: Vec, /// Hash of tx section that contains the MASP transaction pub shielded_section_hash: Hash, } @@ -199,7 +287,7 @@ pub mod testing { prop_compose! { /// Generate a transparent transfer - pub fn arb_transparent_transfer()( + fn arb_transparent_transfer()( source in arb_non_internal_address(), target in arb_non_internal_address(), token in arb_established_address().prop_map(Address::Established), diff --git a/crates/tx_prelude/src/token.rs b/crates/tx_prelude/src/token.rs index ab9ecaf571..cd59dcdcd4 100644 --- a/crates/tx_prelude/src/token.rs +++ b/crates/tx_prelude/src/token.rs @@ -6,7 +6,8 @@ use namada_events::{EmitEvents, EventLevel}; pub use namada_token::testing; pub use namada_token::{ storage_key, utils, Amount, DenominatedAmount, ShieldedTransfer, - ShieldingTransfer, TransparentTransfer, UnshieldingTransfer, + ShieldingMultiTransfer, ShieldingTransfer, TransparentTransfer, + UnshieldingMultiTransfer, UnshieldingTransfer, }; use namada_tx_env::TxEnv; diff --git a/wasm/tx_shielding_transfer/src/lib.rs b/wasm/tx_shielding_transfer/src/lib.rs index 389942686b..499f983b3c 100644 --- a/wasm/tx_shielding_transfer/src/lib.rs +++ b/wasm/tx_shielding_transfer/src/lib.rs @@ -6,20 +6,22 @@ use namada_tx_prelude::*; #[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: BatchedTx) -> TxResult { let data = ctx.get_tx_data(&tx_data)?; - let transfer = token::ShieldingTransfer::try_from_slice(&data[..]) + let transfers = token::ShieldingMultiTransfer::try_from_slice(&data[..]) .wrap_err("Failed to decode token::ShieldingTransfer tx data")?; - debug_log!("apply_tx called with transfer: {:#?}", transfer); + debug_log!("apply_tx called with transfer: {:#?}", transfers); - token::transfer( - ctx, - &transfer.source, - &address::MASP, - &transfer.token, - transfer.amount.amount(), - ) - .wrap_err("Token transfer failed")?; + for transfer in transfers.data { + token::transfer( + ctx, + &transfer.source, + &address::MASP, + &transfer.token, + transfer.amount.amount(), + ) + .wrap_err("Token transfer failed")?; + } - let masp_section_ref = transfer.shielded_section_hash; + let masp_section_ref = transfers.shielded_section_hash; let shielded = tx_data .tx .get_section(&masp_section_ref) diff --git a/wasm/tx_unshielding_transfer/src/lib.rs b/wasm/tx_unshielding_transfer/src/lib.rs index 79bdac0757..f0bf375c73 100644 --- a/wasm/tx_unshielding_transfer/src/lib.rs +++ b/wasm/tx_unshielding_transfer/src/lib.rs @@ -6,20 +6,22 @@ use namada_tx_prelude::*; #[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: BatchedTx) -> TxResult { let data = ctx.get_tx_data(&tx_data)?; - let transfer = token::UnshieldingTransfer::try_from_slice(&data[..]) + let transfers = token::UnshieldingMultiTransfer::try_from_slice(&data[..]) .wrap_err("Failed to decode token::UnshieldingTransfer tx data")?; - debug_log!("apply_tx called with transfer: {:#?}", transfer); + debug_log!("apply_tx called with transfer: {:#?}", transfers); - token::transfer( - ctx, - &address::MASP, - &transfer.target, - &transfer.token, - transfer.amount.amount(), - ) - .wrap_err("Token transfer failed")?; + for transfer in transfers.data { + token::transfer( + ctx, + &address::MASP, + &transfer.target, + &transfer.token, + transfer.amount.amount(), + ) + .wrap_err("Token transfer failed")?; + } - let masp_section_ref = transfer.shielded_section_hash; + let masp_section_ref = transfers.shielded_section_hash; let shielded = tx_data .tx .get_section(&masp_section_ref) From 3eff3b4fbd2f5a57940b4b3a1d83b134e9aae9c7 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Mon, 3 Jun 2024 20:57:58 +0200 Subject: [PATCH 03/40] Fixes benchmarks --- crates/node/src/bench_utils.rs | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/crates/node/src/bench_utils.rs b/crates/node/src/bench_utils.rs index 9a910df04e..bd5503f316 100644 --- a/crates/node/src/bench_utils.rs +++ b/crates/node/src/bench_utils.rs @@ -76,8 +76,9 @@ use namada::ledger::queries::{ use namada::masp::MaspTxRefs; use namada::state::StorageRead; use namada::token::{ - Amount, DenominatedAmount, ShieldedTransfer, ShieldingTransfer, - ShieldingTransferData, UnshieldingTransfer, UnshieldingTransferData, + Amount, DenominatedAmount, ShieldedTransfer, ShieldingMultiTransfer, + ShieldingTransfer, ShieldingTransferData, UnshieldingMultiTransfer, + UnshieldingTransferData, }; use namada::tx::data::pos::Bond; use namada::tx::data::{ @@ -1127,12 +1128,12 @@ impl BenchShieldedCtx { } else if target.effective_address() == MASP { namada.client().generate_tx( TX_SHIELDING_TRANSFER_WASM, - ShieldingTransfer { - data: ShieldingTransferData { + ShieldingMultiTransfer { + data: vec![ShieldingTransferData { source: source.effective_address(), token: address::testing::nam(), amount: DenominatedAmount::native(amount), - }, + }], shielded_section_hash, }, Some(shielded), @@ -1142,12 +1143,12 @@ impl BenchShieldedCtx { } else { namada.client().generate_tx( TX_UNSHIELDING_TRANSFER_WASM, - UnshieldingTransfer { - data: UnshieldingTransferData { + UnshieldingMultiTransfer { + data: vec![UnshieldingTransferData { target: target.effective_address(), token: address::testing::nam(), amount: DenominatedAmount::native(amount), - }, + }], shielded_section_hash, }, Some(shielded), @@ -1213,10 +1214,14 @@ impl BenchShieldedCtx { timeout_timestamp_on_b: timeout_timestamp, }; - let transfer = ShieldingTransfer::deserialize( + let vectorized_transfer = ShieldingMultiTransfer::deserialize( &mut tx.tx.data(&tx.cmt).unwrap().as_slice(), ) .unwrap(); + let transfer = ShieldingTransfer { + data: vectorized_transfer.data.first().unwrap().to_owned(), + shielded_section_hash: vectorized_transfer.shielded_section_hash, + }; let masp_tx = tx .tx .get_section(&transfer.shielded_section_hash) From 453b21196dea8ed7a96c91f614d9351966b39647 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 4 Jun 2024 11:01:44 +0200 Subject: [PATCH 04/40] Fixes signature generation for vectorized transfers --- crates/sdk/src/tx.rs | 37 ++++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/crates/sdk/src/tx.rs b/crates/sdk/src/tx.rs index 4100911fb3..60e6c19074 100644 --- a/crates/sdk/src/tx.rs +++ b/crates/sdk/src/tx.rs @@ -2846,15 +2846,22 @@ pub async fn build_transparent_transfer( // Evaluate signer and fees let (signing_data, fee_amount, updated_balance) = { + let source = if args.data.len() == 1 { + // If only one transfer take its source as the signer + args.data + .first() + .map(|transfer_data| transfer_data.source.clone()) + } else { + // Otherwise the caller is required to pass the public keys in the + // argument + None + }; + let signing_data = signing::aux_signing_data( context, &args.tx, - None, - // If signing keys arg is not provided assume a single transfer and - // take the source - args.data - .first() - .map(|transfer_data| transfer_data.source.clone()), + source.clone(), + source, ) .await?; @@ -3031,15 +3038,19 @@ pub async fn build_shielding_transfer( context: &N, args: &mut args::TxShieldingTransfer, ) -> Result<(Tx, SigningTxData, MaspEpoch)> { - let signing_data = signing::aux_signing_data( - context, - &args.tx, - None, + let source = if args.data.len() == 1 { + // If only one transfer take its source as the signer args.data .first() - .map(|transfer_data| transfer_data.source.clone()), - ) - .await?; + .map(|transfer_data| transfer_data.source.clone()) + } else { + // Otherwise the caller is required to pass the public keys in the + // argument + None + }; + let signing_data = + signing::aux_signing_data(context, &args.tx, source.clone(), source) + .await?; // Transparent fee payment let (fee_amount, updated_balance) = From 4a94e0e99ee300833776caeba4073c660f5f0f4d Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 6 Jun 2024 10:48:41 +0200 Subject: [PATCH 05/40] Check no vectorized transfers in cli --- crates/apps_lib/src/client/tx.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crates/apps_lib/src/client/tx.rs b/crates/apps_lib/src/client/tx.rs index 944e204279..12424f5b5c 100644 --- a/crates/apps_lib/src/client/tx.rs +++ b/crates/apps_lib/src/client/tx.rs @@ -743,6 +743,14 @@ pub async fn submit_transparent_transfer( namada: &impl Namada, args: args::TxTransparentTransfer, ) -> Result<(), error::Error> { + if args.data.len() > 1 { + // TODO(namada#3379): Vectorized transfers are not yet supported in the + // CLI + return Err(error::Error::Other( + "Unexpected vectorized transparent transfer".to_string(), + )); + } + submit_reveal_aux( namada, args.tx.clone(), From 9d6c19d1cb421b2bf8f06ebc2f8cbfe50220718c Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 6 Jun 2024 10:49:46 +0200 Subject: [PATCH 06/40] Misc improvements to signing for vectorized transfers --- crates/sdk/src/signing.rs | 44 +++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/crates/sdk/src/signing.rs b/crates/sdk/src/signing.rs index a92370a3b3..aa9417c36f 100644 --- a/crates/sdk/src/signing.rs +++ b/crates/sdk/src/signing.rs @@ -759,27 +759,28 @@ impl TokenTransfer<'_> { let mut map: HashMap<&Address, DenominatedAmount> = HashMap::new(); - for transfer in &transfers.0 { - match address { - TransferSide::Source(source) - if source == &transfer.source => - { - Self::update_token_amount_map( - &mut map, - &transfer.token, - transfer.amount, - )?; + match address { + TransferSide::Source(source) => { + for transfer in &transfers.0 { + if source == &transfer.source { + Self::update_token_amount_map( + &mut map, + &transfer.token, + transfer.amount, + )?; + } } - TransferSide::Target(target) - if target == &transfer.target => - { - Self::update_token_amount_map( - &mut map, - &transfer.token, - transfer.amount, - )?; + } + TransferSide::Target(target) => { + for transfer in &transfers.0 { + if target == &transfer.target { + Self::update_token_amount_map( + &mut map, + &transfer.token, + transfer.amount, + )?; + } } - _ => (), } } @@ -832,10 +833,7 @@ impl TokenTransfer<'_> { ) -> Result<(), Error> { match map.get_mut(token) { Some(prev_amount) => { - *prev_amount = - prev_amount.checked_add(amount).ok_or_else(|| { - Error::Other("Overflow in amount".to_string()) - })?; + *prev_amount = checked!(prev_amount + amount)?; } None => { map.insert(token, amount); From 561e4e89e8ed9b7a843c5b26f6a6bde3e2d35b75 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 6 Jun 2024 10:52:46 +0200 Subject: [PATCH 07/40] Avoids reloading shielded context --- crates/sdk/src/masp.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/crates/sdk/src/masp.rs b/crates/sdk/src/masp.rs index 0ae8d00e3b..bed3d1cf52 100644 --- a/crates/sdk/src/masp.rs +++ b/crates/sdk/src/masp.rs @@ -1612,6 +1612,7 @@ impl ShieldedContext { // Determine epoch in which to submit potential shielded transaction let epoch = rpc::query_masp_epoch(context.client()).await?; + let mut is_context_loaded = false; for MaspTransferData { source, target, @@ -1629,10 +1630,13 @@ impl ShieldedContext { // We want to fund our transaction solely from supplied spending key let spending_key = spending_key.map(|x| x.into()); { - // Load the current shielded context given the spending key we - // possess - let mut shielded = context.shielded_mut().await; - let _ = shielded.load().await; + if !is_context_loaded { + // Load the current shielded context (at most once) given + // the spending key we possess + let mut shielded = context.shielded_mut().await; + let _ = shielded.load().await; + is_context_loaded = true; + } } // Context required for storing which notes are in the source's // possession From 3a77b4a600481e32275ab696158e53826c9a2f24 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 6 Jun 2024 11:28:24 +0200 Subject: [PATCH 08/40] Fmt --- crates/sdk/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs index c4778e285d..7d83a93895 100644 --- a/crates/sdk/src/lib.rs +++ b/crates/sdk/src/lib.rs @@ -613,8 +613,8 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { args: &args::Tx, signing_data: SigningTxData, with: impl Fn(Tx, common::PublicKey, HashSet, D) -> F - + MaybeSend - + MaybeSync, + + MaybeSend + + MaybeSync, user_data: D, ) -> crate::error::Result<()> where From 1e98403560a297f56f38ff67397a4bb9182068aa Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 4 Jun 2024 11:05:19 +0200 Subject: [PATCH 09/40] Changelog #3356 --- .changelog/unreleased/features/3356-vectorize-transfers.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/features/3356-vectorize-transfers.md diff --git a/.changelog/unreleased/features/3356-vectorize-transfers.md b/.changelog/unreleased/features/3356-vectorize-transfers.md new file mode 100644 index 0000000000..3b7f8e66da --- /dev/null +++ b/.changelog/unreleased/features/3356-vectorize-transfers.md @@ -0,0 +1,2 @@ +- Reworked transparent and masp transfers to allow for multiple sources, targets, + tokens and amounts. ([\#3356](https://github.com/anoma/namada/pull/3356)) \ No newline at end of file From 69980bec7248a9154dcd4d0d704b17e5ab8cab4d Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 29 May 2024 16:35:38 +0200 Subject: [PATCH 10/40] Introduces masp fee payment --- crates/namada/src/ledger/governance/mod.rs | 42 +- crates/namada/src/ledger/mod.rs | 10 +- .../ledger/native_vp/ethereum_bridge/nut.rs | 2 +- .../ledger/native_vp/ethereum_bridge/vp.rs | 6 +- crates/namada/src/ledger/native_vp/ibc/mod.rs | 38 +- crates/namada/src/ledger/protocol/mod.rs | 317 +++++++++---- crates/namada/src/vm/wasm/run.rs | 108 ++--- crates/node/src/shell/finalize_block.rs | 434 ++++++++---------- crates/node/src/shell/prepare_proposal.rs | 15 +- crates/node/src/shell/process_proposal.rs | 15 +- crates/shielded_token/src/utils.rs | 16 +- crates/tests/src/vm_host_env/mod.rs | 152 +++--- crates/tx/src/types.rs | 9 +- 13 files changed, 639 insertions(+), 525 deletions(-) diff --git a/crates/namada/src/ledger/governance/mod.rs b/crates/namada/src/ledger/governance/mod.rs index 93d566f9c4..ea29733dea 100644 --- a/crates/namada/src/ledger/governance/mod.rs +++ b/crates/namada/src/ledger/governance/mod.rs @@ -1334,7 +1334,7 @@ mod test { [(0, keypair_1())].into_iter().collect(), None, ))); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, @@ -1589,7 +1589,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1685,7 +1685,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1784,7 +1784,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1882,7 +1882,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1960,7 +1960,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2038,7 +2038,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2134,7 +2134,7 @@ mod test { true, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2230,7 +2230,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2308,7 +2308,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2359,7 +2359,7 @@ mod test { verifiers.clear(); verifiers.insert(validator_address); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2437,7 +2437,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2488,7 +2488,7 @@ mod test { verifiers.clear(); verifiers.insert(validator_address); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2566,7 +2566,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2617,7 +2617,7 @@ mod test { verifiers.clear(); verifiers.insert(validator_address); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2695,7 +2695,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2763,7 +2763,7 @@ mod test { verifiers.clear(); verifiers.insert(delegator_address); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2841,7 +2841,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2909,7 +2909,7 @@ mod test { verifiers.clear(); verifiers.insert(delegator_address); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2987,7 +2987,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -3055,7 +3055,7 @@ mod test { verifiers.clear(); verifiers.insert(validator_address); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, diff --git a/crates/namada/src/ledger/mod.rs b/crates/namada/src/ledger/mod.rs index 2f952e4f99..6174b4a65c 100644 --- a/crates/namada/src/ledger/mod.rs +++ b/crates/namada/src/ledger/mod.rs @@ -60,15 +60,23 @@ mod dry_run_tx { let gas_limit = Gas::try_from(wrapper.gas_limit).into_storage_result()?; let tx_gas_meter = RefCell::new(TxGasMeter::new(gas_limit)); + let mut shell_params = ShellParams::new( + &tx_gas_meter, + &mut temp_state, + &mut ctx.vp_wasm_cache, + &mut ctx.tx_wasm_cache, + ); let tx_result = protocol::apply_wrapper_tx( &tx, &wrapper, &request.data, &tx_gas_meter, - &mut temp_state, + &mut shell_params, None, ) .into_storage_result()?; + // FIXME: if fees were paid with first inner tx skip it when + // executing the batch temp_state.write_log_mut().commit_tx(); let available_gas = tx_gas_meter.borrow().get_available_gas(); diff --git a/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs b/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs index e97a7cd92c..f24c4e0e49 100644 --- a/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs +++ b/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs @@ -202,7 +202,7 @@ mod test_nuts { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::<_, WasmCacheRwAccess>::new( &Address::Internal(InternalAddress::Nut(DAI_ERC20_ETH_ADDRESS)), &state, diff --git a/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs b/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs index e9d9592331..21e89fcfa2 100644 --- a/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs +++ b/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs @@ -393,7 +393,7 @@ mod tests { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let vp = EthBridge { ctx: setup_ctx( batched_tx.tx, @@ -447,7 +447,7 @@ mod tests { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let vp = EthBridge { ctx: setup_ctx( batched_tx.tx, @@ -504,7 +504,7 @@ mod tests { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let vp = EthBridge { ctx: setup_ctx( batched_tx.tx, diff --git a/crates/namada/src/ledger/native_vp/ibc/mod.rs b/crates/namada/src/ledger/native_vp/ibc/mod.rs index 3f5bc24cc0..75f40e91e2 100644 --- a/crates/namada/src/ledger/native_vp/ibc/mod.rs +++ b/crates/namada/src/ledger/native_vp/ibc/mod.rs @@ -952,7 +952,7 @@ mod tests { [(0, keypair_1())].into_iter().collect(), None, ))); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1028,7 +1028,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1153,7 +1153,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1262,7 +1262,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1356,7 +1356,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1476,7 +1476,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1586,7 +1586,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1681,7 +1681,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1804,7 +1804,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1926,7 +1926,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2033,7 +2033,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2135,7 +2135,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2296,7 +2296,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2507,7 +2507,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2662,7 +2662,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2824,7 +2824,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2987,7 +2987,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -3165,7 +3165,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -3399,7 +3399,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index b19098e18d..f4a9e87618 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -12,8 +12,8 @@ use namada_events::extend::{ }; use namada_events::EventLevel; use namada_gas::TxGasMeter; -use namada_state::StorageWrite; use namada_token::event::{TokenEvent, TokenOperation, UserAccount}; +use namada_token::utils::is_masp_transfer; use namada_tx::data::protocol::{ProtocolTx, ProtocolTxType}; use namada_tx::data::{ BatchResults, BatchedTxResult, ExtendedTxResult, TxResult, VpStatusFlags, @@ -199,6 +199,10 @@ pub enum DispatchArgs<'a, CA: 'static + WasmCacheAccess + Sync> { tx_bytes: &'a [u8], /// The block proposer block_proposer: &'a Address, + /// Vp cache + vp_wasm_cache: &'a mut VpCache, + /// Tx cache + tx_wasm_cache: &'a mut TxCache, }, } @@ -292,13 +296,22 @@ where wrapper, tx_bytes, block_proposer, + vp_wasm_cache, + tx_wasm_cache, } => { + let mut shell_params = ShellParams::new( + tx_gas_meter, + state, + vp_wasm_cache, + tx_wasm_cache, + ); + let tx_result = apply_wrapper_tx( tx, wrapper, tx_bytes, tx_gas_meter, - state, + &mut shell_params, Some(block_proposer), ) .map_err(|e| Error::WrapperRunnerError(e.to_string()))?; @@ -400,29 +413,30 @@ where /// - gas accounting // TODO(namada#2597): this must signal to the caller if we need masp fee payment // in the first inner tx of the batch -pub(crate) fn apply_wrapper_tx( +pub(crate) fn apply_wrapper_tx( tx: &Tx, wrapper: &WrapperTx, tx_bytes: &[u8], tx_gas_meter: &RefCell, - state: &mut S, + shell_params: &mut ShellParams<'_, S, D, H, CA>, block_proposer: Option<&Address>, ) -> Result> where S: State + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, + CA: 'static + WasmCacheAccess + Sync, { - let wrapper_tx_hash = tx.header_hash(); - // Write wrapper tx hash to storage - state + shell_params + .state .write_log_mut() - .write_tx_hash(wrapper_tx_hash) + .write_tx_hash(tx.header_hash()) .expect("Error while writing tx hash to storage"); // Charge fee before performing any fallible operations - charge_fee(wrapper, wrapper_tx_hash, state, block_proposer)?; + charge_fee(shell_params, tx, wrapper, block_proposer)?; + // FIXME: if fees were paid with first inner tx signal it to the caller // Account for gas tx_gas_meter @@ -440,47 +454,59 @@ where /// - Fee amount overflows /// - Not enough funds are available to pay the entire amount of the fee /// - The accumulated fee amount to be credited to the block proposer overflows -fn charge_fee( +fn charge_fee( + shell_params: &mut ShellParams<'_, S, D, H, CA>, + tx: &Tx, wrapper: &WrapperTx, - wrapper_tx_hash: Hash, - state: &mut S, block_proposer: Option<&Address>, ) -> Result<()> where S: State + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, + CA: 'static + WasmCacheAccess + Sync, { // Charge or check fees before propagating any possible error let payment_result = match block_proposer { Some(block_proposer) => { - transfer_fee(state, block_proposer, wrapper, wrapper_tx_hash) + transfer_fee(shell_params, block_proposer, tx, wrapper) } None => { - check_fees(state, wrapper)?; + check_fees(shell_params.state, wrapper)?; Ok(()) } }; - // Commit tx write log even in case of subsequent errors - state.write_log_mut().commit_tx(); + // FIXME: where should we commit? To block or batch? + match payment_result { + // Commit tx write log even in case of subsequent errors + Ok(()) | Err(Error::FeeError(_)) => { + shell_params.state.write_log_mut().commit_tx() + } + // FIXME: correct to drop here? I can probably avoid cause I drop later + // anyway + _ => shell_params.state.write_log_mut().drop_tx(), + } payment_result } /// Perform the actual transfer of fees from the fee payer to the block /// proposer. -pub fn transfer_fee( - state: &mut S, +pub fn transfer_fee( + shell_params: &mut ShellParams<'_, S, D, H, CA>, block_proposer: &Address, + tx: &Tx, wrapper: &WrapperTx, - wrapper_tx_hash: Hash, ) -> Result<()> where - S: State + StorageRead + StorageWrite, + S: State + StorageRead + Sync, + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + CA: 'static + WasmCacheAccess + Sync, { let balance = crate::token::read_balance( - state, + shell_params.state, &wrapper.fee.token, &wrapper.fee_payer(), ) @@ -491,16 +517,23 @@ where match wrapper.get_tx_fee() { Ok(fees) => { - let fees = - crate::token::denom_to_amount(fees, &wrapper.fee.token, state) - .map_err(|e| Error::FeeError(e.to_string()))?; + let fees = crate::token::denom_to_amount( + fees, + &wrapper.fee.token, + shell_params.state, + ) + .map_err(|e| Error::FeeError(e.to_string()))?; - let current_block_height = - state.in_mem().get_last_block_height().next_height(); + let current_block_height = shell_params + .state + .in_mem() + .get_last_block_height() + .next_height(); - if let Some(post_bal) = balance.checked_sub(fees) { + // FIXME: refactor + let post_bal = if let Some(post_bal) = balance.checked_sub(fees) { token_transfer( - state, + shell_params.state, &wrapper.fee.token, &wrapper.fee_payer(), block_proposer, @@ -508,90 +541,96 @@ where ) .map_err(|e| Error::FeeError(e.to_string()))?; - let target_post_balance = Some( - namada_token::read_balance( - state, + Some(post_bal) + } else { + // See if the first inner transaction of the batch pays the fees + // with a masp unshield + let is_valid_masp_transaction = + try_masp_fee_payment(shell_params, tx); + if let Ok(true) = is_valid_masp_transaction { + let balance = crate::token::read_balance( + shell_params.state, &wrapper.fee.token, - block_proposer, + &wrapper.fee_payer(), ) - .map_err(Error::StorageError)? - .into(), - ); + .unwrap(); + + if let Some(post_bal) = balance.checked_sub(fees) { + token_transfer( + shell_params.state, + &wrapper.fee.token, + &wrapper.fee_payer(), + block_proposer, + fees, + ) + .map_err(|e| Error::FeeError(e.to_string()))?; - state.write_log_mut().emit_event( - TokenEvent { - descriptor: FEE_PAYMENT_DESCRIPTOR, - level: EventLevel::Tx, - token: wrapper.fee.token.clone(), - operation: TokenOperation::Transfer { - amount: fees.into(), - source: UserAccount::Internal(wrapper.fee_payer()), - target: UserAccount::Internal( - block_proposer.clone(), - ), - source_post_balance: post_bal.into(), - target_post_balance, - }, + Some(post_bal) + } else { + None } - .with(HeightAttr(current_block_height)) - .with(TxHashAttr(wrapper_tx_hash)), - ); + } else { + None + } + }; - Ok(()) - } else { + if post_bal.is_none() { // Balance was insufficient for fee payment, move all the // available funds in the transparent balance of // the fee payer. tracing::error!( "Transfer of tx fee cannot be applied to due to \ insufficient funds. Falling back to transferring the \ - available balance which is less than the fee." + available balance which is less than the fee. This \ + shouldn't happen." ); token_transfer( - state, + shell_params.state, &wrapper.fee.token, &wrapper.fee_payer(), block_proposer, balance, ) .map_err(|e| Error::FeeError(e.to_string()))?; + } - let target_post_balance = Some( - namada_token::read_balance( - state, - &wrapper.fee.token, - block_proposer, - ) - .map_err(Error::StorageError)? - .into(), - ); + let target_post_balance = Some( + namada_token::read_balance( + shell_params.state, + &wrapper.fee.token, + block_proposer, + ) + .map_err(Error::StorageError)? + .into(), + ); - state.write_log_mut().emit_event( - TokenEvent { - descriptor: FEE_PAYMENT_DESCRIPTOR, - level: EventLevel::Tx, - token: wrapper.fee.token.clone(), - operation: TokenOperation::Transfer { - amount: balance.into(), - source: UserAccount::Internal(wrapper.fee_payer()), - target: UserAccount::Internal( - block_proposer.clone(), - ), - source_post_balance: namada_core::uint::ZERO, - target_post_balance, - }, - } - .with(HeightAttr(current_block_height)) - .with(TxHashAttr(wrapper_tx_hash)), - ); + shell_params.state.write_log_mut().emit_event( + TokenEvent { + descriptor: FEE_PAYMENT_DESCRIPTOR, + level: EventLevel::Tx, + token: wrapper.fee.token.clone(), + operation: TokenOperation::Transfer { + amount: fees.into(), + source: UserAccount::Internal(wrapper.fee_payer()), + target: UserAccount::Internal(block_proposer.clone()), + source_post_balance: post_bal + .map(namada_core::uint::Uint::from) + .unwrap_or(namada_core::uint::ZERO), + target_post_balance, + }, + } + .with(HeightAttr(current_block_height)) + .with(TxHashAttr(tx.header_hash())), + ); - Err(Error::FeeError( + post_bal.map(|_| ()).ok_or_else(|| { + Error::FeeError( "Transparent balance of wrapper's signer was insufficient \ to pay fee. All the available transparent funds have \ - been moved to the block proposer" + been moved to the block proposer. This shouldn't happen." .to_string(), - )) - } + ) + }) } Err(e) => { // Fee overflow. This shouldn't happen as it should be prevented @@ -606,6 +645,107 @@ where } } +// FIXME: add tests +fn try_masp_fee_payment( + ShellParams { + tx_gas_meter, + state, + vp_wasm_cache, + tx_wasm_cache, + }: &mut ShellParams<'_, S, D, H, CA>, + tx: &Tx, +) -> Result +where + S: State + StorageRead + Sync, + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + CA: 'static + WasmCacheAccess + Sync, +{ + // The fee payment is subject to a gas limit imposed by a protocol + // parameter. Here we instantiate a custom gas meter for this step and + // initialize it with the already consumed gas. The gas limit should + // actually be the lowest between the protocol parameter and the actual gas + // limit of the transaction + let min_gas_limit = state + .read::( + &namada_parameters::storage::get_fee_unshielding_gas_limit_key(), + ) + .expect("Error reading the storage") + .expect("Missing masp fee payment gas limit in storage") + .min(tx_gas_meter.borrow().tx_gas_limit.into()); + + let mut gas_meter = TxGasMeter::new(min_gas_limit); + gas_meter + .copy_consumed_gas_from(&tx_gas_meter.borrow()) + .map_err(|e| Error::GasError(e.to_string()))?; + let ref_unshield_gas_meter = RefCell::new(gas_meter); + + // FIXME: call dispatch_tx after merge + let is_valid_masp_transaction = { + // FIXME: review if using the batch write log instead of the precommit + // here NOTE: A clean tx write log must be provided to this call + // for a correct vp validation. Block write log, instead, + // should contain any prior changes (if any). This is to simulate + // the unshielding tx (to prevent the already written + // keys from being passed/triggering VPs) but we cannot + // commit the tx write log yet cause the tx could still + // be invalid. + state.write_log_mut().precommit_tx(); + match apply_wasm_tx( + tx.batch_ref_first_tx() + .ok_or_else(|| Error::MissingInnerTxs)?, + // FIXME: pass the correct index here? Probably there's no need but + // it would be better + &TxIndex::default(), + ShellParams { + tx_gas_meter: &ref_unshield_gas_meter, + state: *state, + vp_wasm_cache, + tx_wasm_cache, + }, + ) { + Ok(result) => { + // NOTE: do not commit yet cause this could be + // exploited to get free masp operations. We can commit only + // after the entire wrapper has been deemed valid + // FIXME: could I precommit here instead? + if !result.is_accepted() { + state.write_log_mut().drop_tx_keep_precommit(); + tracing::error!( + "The fee unshielding tx is invalid, some VPs rejected \ + it: {:#?}", + result.vps_result.rejected_vps + ); + } + + // Ensure that the transaction is actually a masp one, otherwise + // reject + is_masp_transfer(&result.changed_keys) && result.is_accepted() + } + Err(e) => { + state.write_log_mut().drop_tx_keep_precommit(); + tracing::error!( + "The fee unshielding tx is invalid, wasm run failed: {}", + e + ); + if let Error::GasError(_) = e { + // Popagate only if it is a gas error + return Err(e); + } + + false + } + } + }; + + tx_gas_meter + .borrow_mut() + .copy_consumed_gas_from(&ref_unshield_gas_meter.borrow()) + .map_err(|e| Error::GasError(e.to_string()))?; + + Ok(is_valid_masp_transaction) +} + /// Transfer `token` from `src` to `dest`. Returns an `Err` if `src` has /// insufficient balance or if the transfer the `dest` would overflow (This can /// only happen if the total supply doesn't fit in `token::Amount`). Contrary to @@ -657,6 +797,7 @@ where } /// Check if the fee payer has enough transparent balance to pay fees +// FIXME: here I should check fee unshielding pub fn check_fees(state: &S, wrapper: &WrapperTx) -> Result<()> where S: State + StorageRead, @@ -1364,7 +1505,7 @@ mod tests { // gas meter with no gas left let gas_meter = TxGasMeter::new(0); - let batched_tx = dummy_tx.batch_ref_first_tx(); + let batched_tx = dummy_tx.batch_ref_first_tx().unwrap(); let result = execute_vps( verifiers, changed_keys, diff --git a/crates/namada/src/vm/wasm/run.rs b/crates/namada/src/vm/wasm/run.rs index eb54aa62de..39ad8713e8 100644 --- a/crates/namada/src/vm/wasm/run.rs +++ b/crates/namada/src/vm/wasm/run.rs @@ -1120,7 +1120,7 @@ mod tests { let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.set_code(Code::new(tx_code.clone(), None)); outer_tx.set_data(Data::new(tx_data)); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let result = tx( &mut state, &gas_meter, @@ -1138,7 +1138,7 @@ mod tests { let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let error = tx( &mut state, &gas_meter, @@ -1207,20 +1207,18 @@ mod tests { let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); // When the `eval`ed VP doesn't run out of memory, it should return // `true` - assert!( - vp( - code_hash, - &outer_tx.batch_ref_first_tx(), - &tx_index, - &addr, - &state, - &gas_meter, - &keys_changed, - &verifiers, - vp_cache.clone(), - ) - .is_ok() - ); + assert!(vp( + code_hash, + &outer_tx.batch_ref_first_tx().unwrap(), + &tx_index, + &addr, + &state, + &gas_meter, + &keys_changed, + &verifiers, + vp_cache.clone(), + ) + .is_ok()); // Allocating `2^24` (16 MiB) should be above the memory limit and // should fail @@ -1239,20 +1237,18 @@ mod tests { // When the `eval`ed VP runs out of memory, its result should be // `false`, hence we should also get back `false` from the VP that // called `eval`. - assert!( - vp( - code_hash, - &outer_tx.batch_ref_first_tx(), - &tx_index, - &addr, - &state, - &gas_meter, - &keys_changed, - &verifiers, - vp_cache, - ) - .is_err() - ); + assert!(vp( + code_hash, + &outer_tx.batch_ref_first_tx().unwrap(), + &tx_index, + &addr, + &state, + &gas_meter, + &keys_changed, + &verifiers, + vp_cache, + ) + .is_err()); } /// Test that when a validity predicate wasm goes over the memory limit @@ -1291,7 +1287,7 @@ mod tests { let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let result = vp( code_hash, - &outer_tx.batch_ref_first_tx(), + &outer_tx.batch_ref_first_tx().unwrap(), &tx_index, &addr, &state, @@ -1310,7 +1306,7 @@ mod tests { outer_tx.set_data(Data::new(tx_data)); let error = vp( code_hash, - &outer_tx.batch_ref_first_tx(), + &outer_tx.batch_ref_first_tx().unwrap(), &tx_index, &addr, &state, @@ -1359,7 +1355,7 @@ mod tests { let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.set_code(Code::new(tx_no_op, None)); outer_tx.set_data(Data::new(tx_data)); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let result = tx( &mut state, &gas_meter, @@ -1424,7 +1420,7 @@ mod tests { let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let result = vp( code_hash, - &outer_tx.batch_ref_first_tx(), + &outer_tx.batch_ref_first_tx().unwrap(), &tx_index, &addr, &state, @@ -1495,7 +1491,7 @@ mod tests { let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.set_code(Code::new(tx_read_key, None)); outer_tx.set_data(Data::new(tx_data)); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let error = tx( &mut state, &gas_meter, @@ -1552,7 +1548,7 @@ mod tests { let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let error = vp( code_hash, - &outer_tx.batch_ref_first_tx(), + &outer_tx.batch_ref_first_tx().unwrap(), &tx_index, &addr, &state, @@ -1625,20 +1621,18 @@ mod tests { outer_tx.add_code(vec![], None).add_data(eval_vp); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); - assert!( - vp( - code_hash, - &outer_tx.batch_ref_first_tx(), - &tx_index, - &addr, - &state, - &gas_meter, - &keys_changed, - &verifiers, - vp_cache, - ) - .is_err() - ); + assert!(vp( + code_hash, + &outer_tx.batch_ref_first_tx().unwrap(), + &tx_index, + &addr, + &state, + &gas_meter, + &keys_changed, + &verifiers, + vp_cache, + ) + .is_err()); } #[test] @@ -1672,7 +1666,7 @@ mod tests { wrapper_tx.add_serialized_data(vec![]); let mut raw_tx = wrapper_tx.clone(); raw_tx.update_header(TxType::Raw); - let batched_tx = wrapper_tx.batch_ref_first_tx(); + let batched_tx = wrapper_tx.batch_ref_first_tx().unwrap(); // Check that using a disallowed wrapper tx leads to an error, but a raw // tx is ok even if not allowlisted @@ -1686,7 +1680,7 @@ mod tests { let result = check_tx_allowed(&batched_tx, &state); assert_matches!(result.unwrap_err(), Error::DisallowedTx); - let batched_raw_tx = raw_tx.batch_ref_first_tx(); + let batched_raw_tx = raw_tx.batch_ref_first_tx().unwrap(); let result = check_tx_allowed(&batched_raw_tx, &state); if let Err(result) = result { assert!(!matches!(result, Error::DisallowedTx)); @@ -1737,7 +1731,7 @@ mod tests { let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.set_code(Code::new(tx_code.clone(), None)); outer_tx.set_data(Data::new(vec![])); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let result = tx( &mut state, &gas_meter, @@ -1778,7 +1772,7 @@ mod tests { let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.set_code(Code::new(tx_code.clone(), None)); outer_tx.set_data(Data::new(vec![])); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let result = tx( &mut state, &gas_meter, @@ -1821,7 +1815,7 @@ mod tests { outer_tx.set_data(Data::new(vec![])); let result = vp( code_hash, - &outer_tx.batch_ref_first_tx(), + &outer_tx.batch_ref_first_tx().unwrap(), &tx_index, &addr, &state, @@ -1864,7 +1858,7 @@ mod tests { outer_tx.set_data(Data::new(vec![])); let result = vp( code_hash, - &outer_tx.batch_ref_first_tx(), + &outer_tx.batch_ref_first_tx().unwrap(), &tx_index, &addr, &state, @@ -2012,7 +2006,7 @@ mod tests { let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.set_code(Code::from_hash(code_hash, None)); outer_tx.set_data(Data::new(tx_data)); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); tx( &mut state, diff --git a/crates/node/src/shell/finalize_block.rs b/crates/node/src/shell/finalize_block.rs index 7adbcd17ba..d807d46366 100644 --- a/crates/node/src/shell/finalize_block.rs +++ b/crates/node/src/shell/finalize_block.rs @@ -701,6 +701,8 @@ where wrapper, tx_bytes: processed_tx.tx.as_ref(), block_proposer: native_block_proposer_address, + vp_wasm_cache: &mut self.vp_wasm_cache, + tx_wasm_cache: &mut self.tx_wasm_cache, }, tx_gas_meter, ) @@ -1355,7 +1357,11 @@ mod test_finalize_block { .enumerate() .find_map( |(idx, tx_hash)| { - if tx_hash == &hash { Some(idx) } else { None } + if tx_hash == &hash { + Some(idx) + } else { + None + } }, ) .unwrap(); @@ -2970,25 +2976,21 @@ mod test_finalize_block { assert_eq!(root_pre.0, root_post.0); // Check transaction's hash in storage - assert!( - shell - .shell - .state - .write_log() - .has_replay_protection_entry(&wrapper_tx.raw_header_hash()) - ); + assert!(shell + .shell + .state + .write_log() + .has_replay_protection_entry(&wrapper_tx.raw_header_hash())); // Check that the hash is not present in the merkle tree shell.state.commit_block().unwrap(); - assert!( - !shell - .shell - .state - .in_mem() - .block - .tree - .has_key(&wrapper_hash_key) - .unwrap() - ); + assert!(!shell + .shell + .state + .in_mem() + .block + .tree + .has_key(&wrapper_hash_key) + .unwrap()); // test that a commitment to replay protection gets added. let reprot_key = replay_protection::commitment_key(); @@ -3035,26 +3037,22 @@ mod test_finalize_block { assert_eq!(root_pre.0, root_post.0); // Check that the hashes are present in the merkle tree shell.state.commit_block().unwrap(); - assert!( - shell - .shell - .state - .in_mem() - .block - .tree - .has_key(&convert_key) - .unwrap() - ); - assert!( - shell - .shell - .state - .in_mem() - .block - .tree - .has_key(&commitment_key) - .unwrap() - ); + assert!(shell + .shell + .state + .in_mem() + .block + .tree + .has_key(&convert_key) + .unwrap()); + assert!(shell + .shell + .state + .in_mem() + .block + .tree + .has_key(&commitment_key) + .unwrap()); } /// Test that a tx that has already been applied in the same block @@ -3132,34 +3130,26 @@ mod test_finalize_block { assert_eq!(code, ResultCode::WasmRuntimeError); for wrapper in [&wrapper, &new_wrapper] { - assert!( - shell - .state - .write_log() - .has_replay_protection_entry(&wrapper.raw_header_hash()) - ); - assert!( - !shell - .state - .write_log() - .has_replay_protection_entry(&wrapper.header_hash()) - ); + assert!(shell + .state + .write_log() + .has_replay_protection_entry(&wrapper.raw_header_hash())); + assert!(!shell + .state + .write_log() + .has_replay_protection_entry(&wrapper.header_hash())); } // Commit to check the hashes from storage shell.commit(); for wrapper in [&wrapper, &new_wrapper] { - assert!( - shell - .state - .has_replay_protection_entry(&wrapper.raw_header_hash()) - .unwrap() - ); - assert!( - !shell - .state - .has_replay_protection_entry(&wrapper.header_hash()) - .unwrap() - ); + assert!(shell + .state + .has_replay_protection_entry(&wrapper.raw_header_hash()) + .unwrap()); + assert!(!shell + .state + .has_replay_protection_entry(&wrapper.header_hash()) + .unwrap()); } } @@ -3442,29 +3432,23 @@ mod test_finalize_block { &unsigned_wrapper, &wrong_commitment_wrapper, ] { - assert!( - !shell.state.write_log().has_replay_protection_entry( - &valid_wrapper.raw_header_hash() - ) - ); - assert!( - shell - .state - .write_log() - .has_replay_protection_entry(&valid_wrapper.header_hash()) - ); - } - assert!( - shell.state.write_log().has_replay_protection_entry( - &failing_wrapper.raw_header_hash() - ) - ); - assert!( - !shell + assert!(!shell .state .write_log() - .has_replay_protection_entry(&failing_wrapper.header_hash()) - ); + .has_replay_protection_entry(&valid_wrapper.raw_header_hash())); + assert!(shell + .state + .write_log() + .has_replay_protection_entry(&valid_wrapper.header_hash())); + } + assert!(shell + .state + .write_log() + .has_replay_protection_entry(&failing_wrapper.raw_header_hash())); + assert!(!shell + .state + .write_log() + .has_replay_protection_entry(&failing_wrapper.header_hash())); // Commit to check the hashes from storage shell.commit(); @@ -3473,33 +3457,23 @@ mod test_finalize_block { unsigned_wrapper, wrong_commitment_wrapper, ] { - assert!( - !shell - .state - .has_replay_protection_entry( - &valid_wrapper.raw_header_hash() - ) - .unwrap() - ); - assert!( - shell - .state - .has_replay_protection_entry(&valid_wrapper.header_hash()) - .unwrap() - ); - } - assert!( - shell + assert!(!shell .state - .has_replay_protection_entry(&failing_wrapper.raw_header_hash()) - .unwrap() - ); - assert!( - !shell + .has_replay_protection_entry(&valid_wrapper.raw_header_hash()) + .unwrap()); + assert!(shell .state - .has_replay_protection_entry(&failing_wrapper.header_hash()) - .unwrap() - ); + .has_replay_protection_entry(&valid_wrapper.header_hash()) + .unwrap()); + } + assert!(shell + .state + .has_replay_protection_entry(&failing_wrapper.raw_header_hash()) + .unwrap()); + assert!(!shell + .state + .has_replay_protection_entry(&failing_wrapper.header_hash()) + .unwrap()); } #[test] @@ -3559,18 +3533,14 @@ mod test_finalize_block { let code = event[0].read_attribute::().expect("Test failed"); assert_eq!(code, ResultCode::InvalidTx); - assert!( - shell - .state - .write_log() - .has_replay_protection_entry(&wrapper_hash) - ); - assert!( - !shell - .state - .write_log() - .has_replay_protection_entry(&wrapper.raw_header_hash()) - ); + assert!(shell + .state + .write_log() + .has_replay_protection_entry(&wrapper_hash)); + assert!(!shell + .state + .write_log() + .has_replay_protection_entry(&wrapper.raw_header_hash())); } // Test that the fees are paid even if the inner transaction fails and its @@ -3968,11 +3938,9 @@ mod test_finalize_block { .unwrap(), Some(ValidatorState::Consensus) ); - assert!( - enqueued_slashes_handle() - .at(&Epoch::default()) - .is_empty(&shell.state)? - ); + assert!(enqueued_slashes_handle() + .at(&Epoch::default()) + .is_empty(&shell.state)?); assert_eq!( get_num_consensus_validators(&shell.state, Epoch::default()) .unwrap(), @@ -3991,21 +3959,17 @@ mod test_finalize_block { .unwrap(), Some(ValidatorState::Jailed) ); - assert!( - enqueued_slashes_handle() - .at(&epoch) - .is_empty(&shell.state)? - ); + assert!(enqueued_slashes_handle() + .at(&epoch) + .is_empty(&shell.state)?); assert_eq!( get_num_consensus_validators(&shell.state, epoch).unwrap(), 5_u64 ); } - assert!( - !enqueued_slashes_handle() - .at(&processing_epoch) - .is_empty(&shell.state)? - ); + assert!(!enqueued_slashes_handle() + .at(&processing_epoch) + .is_empty(&shell.state)?); // Advance to the processing epoch loop { @@ -4028,11 +3992,9 @@ mod test_finalize_block { // println!("Reached processing epoch"); break; } else { - assert!( - enqueued_slashes_handle() - .at(&shell.state.in_mem().block.epoch) - .is_empty(&shell.state)? - ); + assert!(enqueued_slashes_handle() + .at(&shell.state.in_mem().block.epoch) + .is_empty(&shell.state)?); let stake1 = read_validator_stake( &shell.state, ¶ms, @@ -4516,13 +4478,11 @@ mod test_finalize_block { ) .unwrap(); assert_eq!(last_slash, Some(misbehavior_epoch)); - assert!( - namada_proof_of_stake::storage::validator_slashes_handle( - &val1.address - ) - .is_empty(&shell.state) - .unwrap() - ); + assert!(namada_proof_of_stake::storage::validator_slashes_handle( + &val1.address + ) + .is_empty(&shell.state) + .unwrap()); tracing::debug!("Advancing to epoch 7"); @@ -4587,22 +4547,18 @@ mod test_finalize_block { ) .unwrap(); assert_eq!(last_slash, Some(Epoch(4))); - assert!( - namada_proof_of_stake::is_validator_frozen( - &shell.state, - &val1.address, - current_epoch, - ¶ms - ) - .unwrap() - ); - assert!( - namada_proof_of_stake::storage::validator_slashes_handle( - &val1.address - ) - .is_empty(&shell.state) - .unwrap() - ); + assert!(namada_proof_of_stake::is_validator_frozen( + &shell.state, + &val1.address, + current_epoch, + ¶ms + ) + .unwrap()); + assert!(namada_proof_of_stake::storage::validator_slashes_handle( + &val1.address + ) + .is_empty(&shell.state) + .unwrap()); let pre_stake_10 = namada_proof_of_stake::storage::read_validator_stake( @@ -5467,7 +5423,7 @@ mod test_finalize_block { )); let keys_changed = BTreeSet::from([min_confirmations_key()]); let verifiers = BTreeSet::default(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = namada::ledger::native_vp::Ctx::new( shell.mode.get_validator_address().expect("Test failed"), shell.state.read_only(), @@ -5480,11 +5436,9 @@ mod test_finalize_block { shell.vp_wasm_cache.clone(), ); let parameters = ParametersVp { ctx }; - assert!( - parameters - .validate_tx(&batched_tx, &keys_changed, &verifiers) - .is_ok() - ); + assert!(parameters + .validate_tx(&batched_tx, &keys_changed, &verifiers) + .is_ok()); // we advance forward to the next epoch let mut req = FinalizeBlock::default(); @@ -5557,13 +5511,11 @@ mod test_finalize_block { let inner_results = inner_tx_result.batch_results.0; for cmt in batch.commitments() { - assert!( - inner_results - .get(&cmt.get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted()) - ); + assert!(inner_results + .get(&cmt.get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted())); } // Check storage modifications @@ -5601,24 +5553,18 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!( - inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted()) - ); - assert!( - inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err() - ); + assert!(inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted())); + assert!(inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err()); // Assert that the last tx didn't run - assert!( - !inner_results.contains_key(&batch.commitments()[2].get_hash()) - ); + assert!(!inner_results.contains_key(&batch.commitments()[2].get_hash())); // Check storage modifications are missing for key in ["random_key_1", "random_key_2", "random_key_3"] { @@ -5649,27 +5595,21 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!( - inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted()) - ); - assert!( - inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err() - ); - assert!( - inner_results - .get(&batch.commitments()[2].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted()) - ); + assert!(inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted())); + assert!(inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err()); + assert!(inner_results + .get(&batch.commitments()[2].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted())); // Check storage modifications assert_eq!( @@ -5680,12 +5620,10 @@ mod test_finalize_block { .unwrap(), STORAGE_VALUE ); - assert!( - !shell - .state - .has_key(&"random_key_2".parse().unwrap()) - .unwrap() - ); + assert!(!shell + .state + .has_key(&"random_key_2".parse().unwrap()) + .unwrap()); assert_eq!( shell .state @@ -5717,24 +5655,18 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!( - inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted()) - ); - assert!( - inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err() - ); + assert!(inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted())); + assert!(inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err()); // Assert that the last tx didn't run - assert!( - !inner_results.contains_key(&batch.commitments()[2].get_hash()) - ); + assert!(!inner_results.contains_key(&batch.commitments()[2].get_hash())); // Check storage modifications are missing for key in ["random_key_1", "random_key_2", "random_key_3"] { @@ -5764,24 +5696,18 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!( - inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted()) - ); - assert!( - inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err() - ); + assert!(inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted())); + assert!(inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err()); // Assert that the last tx didn't run - assert!( - !inner_results.contains_key(&batch.commitments()[2].get_hash()) - ); + assert!(!inner_results.contains_key(&batch.commitments()[2].get_hash())); // Check storage modifications assert_eq!( diff --git a/crates/node/src/shell/prepare_proposal.rs b/crates/node/src/shell/prepare_proposal.rs index 6cc2f22392..d3ab15a16b 100644 --- a/crates/node/src/shell/prepare_proposal.rs +++ b/crates/node/src/shell/prepare_proposal.rs @@ -5,7 +5,6 @@ use std::cell::RefCell; use namada::core::address::Address; use namada::core::key::tm_raw_hash_to_string; use namada::gas::{Gas, TxGasMeter}; -use namada::hash::Hash; use namada::ledger::protocol::{self, ShellParams}; use namada::proof_of_stake::storage::find_validator_by_raw_hash; use namada::state::{DBIter, StorageHasher, TempWlState, DB}; @@ -296,9 +295,10 @@ where // Check fees and extract the gas limit of this transaction // TODO(namada#2597): check if masp fee payment is required + // FIXME: look at this todo match prepare_proposal_fee_check( &wrapper, - tx.header_hash(), + &tx, block_proposer, proposer_local_config, &mut ShellParams::new( @@ -319,7 +319,7 @@ where #[allow(clippy::too_many_arguments)] fn prepare_proposal_fee_check( wrapper: &WrapperTx, - wrapper_tx_hash: Hash, + tx: &Tx, proposer: &Address, proposer_local_config: Option<&ValidatorLocalConfig>, shell_params: &mut ShellParams<'_, TempWlState<'_, D, H>, D, H, CA>, @@ -337,13 +337,8 @@ where super::fee_data_check(wrapper, minimum_gas_price, shell_params)?; - protocol::transfer_fee( - shell_params.state, - proposer, - wrapper, - wrapper_tx_hash, - ) - .map_err(Error::TxApply) + protocol::transfer_fee(shell_params, proposer, tx, wrapper) + .map_err(Error::TxApply) } fn compute_min_gas_price( diff --git a/crates/node/src/shell/process_proposal.rs b/crates/node/src/shell/process_proposal.rs index f226097c11..4fff2216e9 100644 --- a/crates/node/src/shell/process_proposal.rs +++ b/crates/node/src/shell/process_proposal.rs @@ -2,7 +2,6 @@ //! and [`RevertProposal`] ABCI++ methods for the Shell use data_encoding::HEXUPPER; -use namada::hash::Hash; use namada::ledger::pos::PosQueries; use namada::proof_of_stake::storage::find_validator_by_raw_hash; use namada::tx::data::protocol::ProtocolTxType; @@ -469,7 +468,7 @@ where // Check that the fee payer has sufficient balance. if let Err(e) = process_proposal_fee_check( &wrapper, - tx.header_hash(), + &tx, block_proposer, &mut ShellParams::new( &RefCell::new(tx_gas_meter), @@ -516,9 +515,10 @@ where } // TODO(namada#2597): check masp fee payment if required +// FIXME: check this todo fn process_proposal_fee_check( wrapper: &WrapperTx, - wrapper_tx_hash: Hash, + tx: &Tx, proposer: &Address, shell_params: &mut ShellParams<'_, TempWlState<'_, D, H>, D, H, CA>, ) -> Result<()> @@ -539,13 +539,8 @@ where fee_data_check(wrapper, minimum_gas_price, shell_params)?; - protocol::transfer_fee( - shell_params.state, - proposer, - wrapper, - wrapper_tx_hash, - ) - .map_err(Error::TxApply) + protocol::transfer_fee(shell_params, proposer, tx, wrapper) + .map_err(Error::TxApply) } /// We test the failure cases of [`process_proposal`]. The happy flows diff --git a/crates/shielded_token/src/utils.rs b/crates/shielded_token/src/utils.rs index 3d3240b18d..4820a057aa 100644 --- a/crates/shielded_token/src/utils.rs +++ b/crates/shielded_token/src/utils.rs @@ -1,11 +1,15 @@ //! MASP utilities +use std::collections::BTreeSet; + use masp_primitives::merkle_tree::CommitmentTree; use masp_primitives::sapling::Node; use masp_primitives::transaction::Transaction; -use namada_storage::{Error, Result, StorageRead, StorageWrite}; +use namada_storage::{Error, Key, Result, StorageRead, StorageWrite}; -use crate::storage_key::{masp_commitment_tree_key, masp_nullifier_key}; +use crate::storage_key::{ + is_masp_transfer_key, masp_commitment_tree_key, masp_nullifier_key, +}; // Writes the nullifiers of the provided masp transaction to storage fn reveal_nullifiers( @@ -67,3 +71,11 @@ pub fn handle_masp_tx( Ok(()) } + +/// Check if a transaction was a MASP transaction. This means +/// that at least one key owned by MASP was changed. We cannot +/// simply check that the MASP VP was triggered, as this can +/// be manually requested to be triggered by users. +pub fn is_masp_transfer(changed_keys: &BTreeSet) -> bool { + changed_keys.iter().any(is_masp_transfer_key) +} diff --git a/crates/tests/src/vm_host_env/mod.rs b/crates/tests/src/vm_host_env/mod.rs index da322d3fa0..6ebf1fd10a 100644 --- a/crates/tests/src/vm_host_env/mod.rs +++ b/crates/tests/src/vm_host_env/mod.rs @@ -635,7 +635,7 @@ mod tests { .add_serialized_data(input_data.clone()) .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); - let result = vp::CTX.eval(empty_code, tx.batch_ref_first_tx()); + let result = vp::CTX.eval(empty_code, tx.batch_ref_first_tx().unwrap()); assert!(result.is_err()); // evaluating the VP template which always returns `true` should pass @@ -654,7 +654,7 @@ mod tests { .add_serialized_data(input_data.clone()) .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); - let result = vp::CTX.eval(code_hash, tx.batch_ref_first_tx()); + let result = vp::CTX.eval(code_hash, tx.batch_ref_first_tx().unwrap()); assert!(result.is_ok()); // evaluating the VP template which always returns `false` shouldn't @@ -674,7 +674,7 @@ mod tests { .add_serialized_data(input_data) .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); - let result = vp::CTX.eval(code_hash, tx.batch_ref_first_tx()); + let result = vp::CTX.eval(code_hash, tx.batch_ref_first_tx().unwrap()); assert!(result.is_err()); } @@ -707,8 +707,10 @@ mod tests { // Check let mut env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Commit @@ -738,8 +740,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); } @@ -779,8 +783,10 @@ mod tests { // Check let mut env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Commit @@ -810,8 +816,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); } @@ -852,8 +860,10 @@ mod tests { // Check let mut env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Commit @@ -883,8 +893,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); } @@ -927,8 +939,10 @@ mod tests { // Check let mut env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Commit @@ -958,8 +972,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); } @@ -1002,8 +1018,10 @@ mod tests { // Check let mut env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Commit @@ -1034,8 +1052,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); } @@ -1087,8 +1107,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); // VP should fail because the transfer channel cannot be closed assert!(matches!( result.expect_err("validation succeeded unexpectedly"), @@ -1139,8 +1161,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); } @@ -1186,8 +1210,10 @@ mod tests { // Check let mut env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Check if the token was escrowed let escrow = token::storage_key::balance_key( @@ -1196,7 +1222,7 @@ mod tests { ); let token_vp_result = ibc::validate_multitoken_vp_from_tx( &env, - &tx.batch_ref_first_tx(), + &tx.batch_ref_first_tx().unwrap(), &escrow, ); assert!(token_vp_result.is_ok()); @@ -1234,8 +1260,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Check the balance tx_host_env::set(env); @@ -1321,8 +1349,10 @@ mod tests { let mut env = tx_host_env::take(); // The token must be part of the verifier set (checked by MultitokenVp) env.verifiers.insert(ibc_token); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!( result.is_ok(), "Expected VP to accept the tx, got {result:?}" @@ -1330,7 +1360,7 @@ mod tests { // Check if the token was burned let result = ibc::validate_multitoken_vp_from_tx( &env, - &tx.batch_ref_first_tx(), + &tx.batch_ref_first_tx().unwrap(), &minted_key, ); assert!( @@ -1401,16 +1431,20 @@ mod tests { // Check let mut env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Check if the token was minted // The token must be part of the verifier set (checked by MultitokenVp) let denom = format!("{}/{}/{}", port_id, channel_id, token); let ibc_token = ibc::ibc_token(&denom); env.verifiers.insert(ibc_token.clone()); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!( result.is_ok(), "Expected VP to accept the tx, got {result:?}" @@ -1419,7 +1453,7 @@ mod tests { let minted_key = token::storage_key::minted_balance_key(&ibc_token); let result = ibc::validate_multitoken_vp_from_tx( &env, - &tx.batch_ref_first_tx(), + &tx.batch_ref_first_tx().unwrap(), &minted_key, ); assert!( @@ -1492,8 +1526,10 @@ mod tests { // Check if the transaction is valid let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Check if the ack has an error due to the invalid packet data tx_host_env::set(env); @@ -1585,13 +1621,15 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Check if the token was unescrowed let result = ibc::validate_multitoken_vp_from_tx( &env, - &tx.batch_ref_first_tx(), + &tx.batch_ref_first_tx().unwrap(), &escrow_key, ); assert!(result.is_ok()); @@ -1687,13 +1725,15 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Check if the token was unescrowed let result = ibc::validate_multitoken_vp_from_tx( &env, - &tx.batch_ref_first_tx(), + &tx.batch_ref_first_tx().unwrap(), &escrow_key, ); assert!(result.is_ok()); @@ -1781,8 +1821,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Check if the token was refunded let escrow = token::storage_key::balance_key( @@ -1791,7 +1833,7 @@ mod tests { ); let result = ibc::validate_multitoken_vp_from_tx( &env, - &tx.batch_ref_first_tx(), + &tx.batch_ref_first_tx().unwrap(), &escrow, ); assert!(result.is_ok()); @@ -1865,8 +1907,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Check if the token was refunded let escrow = token::storage_key::balance_key( @@ -1875,7 +1919,7 @@ mod tests { ); let result = ibc::validate_multitoken_vp_from_tx( &env, - &tx.batch_ref_first_tx(), + &tx.batch_ref_first_tx().unwrap(), &escrow, ); assert!(result.is_ok()); diff --git a/crates/tx/src/types.rs b/crates/tx/src/types.rs index 3156b584d5..d814e7e352 100644 --- a/crates/tx/src/types.rs +++ b/crates/tx/src/types.rs @@ -1704,12 +1704,11 @@ impl Tx { } /// Creates a batched tx along with the reference to the first inner tx - #[cfg(any(test, feature = "testing"))] - pub fn batch_ref_first_tx(&self) -> BatchedTxRef<'_> { - BatchedTxRef { + pub fn batch_ref_first_tx(&self) -> Option> { + Some(BatchedTxRef { tx: self, - cmt: self.first_commitments().unwrap(), - } + cmt: self.first_commitments()?, + }) } /// Creates a batched tx along with a copy of the first inner tx From 40401f4717f558b8bab78cda6e4769373e6660c2 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 29 May 2024 17:03:01 +0200 Subject: [PATCH 11/40] Passes the correct tx index to masp fee payment check --- crates/benches/process_wrapper.rs | 2 ++ crates/namada/src/ledger/mod.rs | 1 + crates/namada/src/ledger/protocol/mod.rs | 26 +++++++++++++---------- crates/node/src/shell/finalize_block.rs | 1 + crates/node/src/shell/prepare_proposal.rs | 12 +++++++---- crates/node/src/shell/process_proposal.rs | 11 ++++++---- 6 files changed, 34 insertions(+), 19 deletions(-) diff --git a/crates/benches/process_wrapper.rs b/crates/benches/process_wrapper.rs index 65edb93467..98ee806d82 100644 --- a/crates/benches/process_wrapper.rs +++ b/crates/benches/process_wrapper.rs @@ -3,6 +3,7 @@ use namada::core::address; use namada::core::key::RefTo; use namada::core::storage::BlockHeight; use namada::core::time::DateTimeUtc; +use namada::state::TxIndex; use namada::token::{ Amount, DenominatedAmount, TransparentTransfer, TransparentTransferData, }; @@ -80,6 +81,7 @@ fn process_tx(c: &mut Criterion) { shell .check_proposal_tx( &wrapper, + &TxIndex::default(), validation_meta, temp_state, datetime, diff --git a/crates/namada/src/ledger/mod.rs b/crates/namada/src/ledger/mod.rs index 6174b4a65c..8d9effb59e 100644 --- a/crates/namada/src/ledger/mod.rs +++ b/crates/namada/src/ledger/mod.rs @@ -70,6 +70,7 @@ mod dry_run_tx { &tx, &wrapper, &request.data, + &TxIndex::default(), &tx_gas_meter, &mut shell_params, None, diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index f4a9e87618..c1bea73143 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -197,6 +197,8 @@ pub enum DispatchArgs<'a, CA: 'static + WasmCacheAccess + Sync> { wrapper: &'a WrapperTx, /// The transaction bytes for gas accounting tx_bytes: &'a [u8], + /// The tx index + tx_index: TxIndex, /// The block proposer block_proposer: &'a Address, /// Vp cache @@ -295,6 +297,7 @@ where DispatchArgs::Wrapper { wrapper, tx_bytes, + tx_index, block_proposer, vp_wasm_cache, tx_wasm_cache, @@ -310,6 +313,7 @@ where tx, wrapper, tx_bytes, + &tx_index, tx_gas_meter, &mut shell_params, Some(block_proposer), @@ -411,12 +415,11 @@ where /// - replay protection /// - fee payment /// - gas accounting -// TODO(namada#2597): this must signal to the caller if we need masp fee payment -// in the first inner tx of the batch pub(crate) fn apply_wrapper_tx( tx: &Tx, wrapper: &WrapperTx, tx_bytes: &[u8], + tx_index: &TxIndex, tx_gas_meter: &RefCell, shell_params: &mut ShellParams<'_, S, D, H, CA>, block_proposer: Option<&Address>, @@ -435,8 +438,9 @@ where .expect("Error while writing tx hash to storage"); // Charge fee before performing any fallible operations - charge_fee(shell_params, tx, wrapper, block_proposer)?; - // FIXME: if fees were paid with first inner tx signal it to the caller + charge_fee(shell_params, tx, wrapper, tx_index, block_proposer)?; + // FIXME: if fees were paid with first inner tx signal it to the caller -> + // use the ExtendedResult from the other branch // Account for gas tx_gas_meter @@ -458,6 +462,7 @@ fn charge_fee( shell_params: &mut ShellParams<'_, S, D, H, CA>, tx: &Tx, wrapper: &WrapperTx, + tx_index: &TxIndex, block_proposer: Option<&Address>, ) -> Result<()> where @@ -469,7 +474,7 @@ where // Charge or check fees before propagating any possible error let payment_result = match block_proposer { Some(block_proposer) => { - transfer_fee(shell_params, block_proposer, tx, wrapper) + transfer_fee(shell_params, block_proposer, tx, wrapper, tx_index) } None => { check_fees(shell_params.state, wrapper)?; @@ -498,6 +503,7 @@ pub fn transfer_fee( block_proposer: &Address, tx: &Tx, wrapper: &WrapperTx, + tx_index: &TxIndex, ) -> Result<()> where S: State + StorageRead + Sync, @@ -546,7 +552,7 @@ where // See if the first inner transaction of the batch pays the fees // with a masp unshield let is_valid_masp_transaction = - try_masp_fee_payment(shell_params, tx); + try_masp_fee_payment(shell_params, tx, tx_index); if let Ok(true) = is_valid_masp_transaction { let balance = crate::token::read_balance( shell_params.state, @@ -654,6 +660,7 @@ fn try_masp_fee_payment( tx_wasm_cache, }: &mut ShellParams<'_, S, D, H, CA>, tx: &Tx, + tx_index: &TxIndex, ) -> Result where S: State + StorageRead + Sync, @@ -682,8 +689,7 @@ where // FIXME: call dispatch_tx after merge let is_valid_masp_transaction = { - // FIXME: review if using the batch write log instead of the precommit - // here NOTE: A clean tx write log must be provided to this call + // NOTE: A clean tx write log must be provided to this call // for a correct vp validation. Block write log, instead, // should contain any prior changes (if any). This is to simulate // the unshielding tx (to prevent the already written @@ -694,9 +700,7 @@ where match apply_wasm_tx( tx.batch_ref_first_tx() .ok_or_else(|| Error::MissingInnerTxs)?, - // FIXME: pass the correct index here? Probably there's no need but - // it would be better - &TxIndex::default(), + tx_index, ShellParams { tx_gas_meter: &ref_unshield_gas_meter, state: *state, diff --git a/crates/node/src/shell/finalize_block.rs b/crates/node/src/shell/finalize_block.rs index d807d46366..4d23a8b7bb 100644 --- a/crates/node/src/shell/finalize_block.rs +++ b/crates/node/src/shell/finalize_block.rs @@ -700,6 +700,7 @@ where DispatchArgs::Wrapper { wrapper, tx_bytes: processed_tx.tx.as_ref(), + tx_index: TxIndex::must_from_usize(tx_index), block_proposer: native_block_proposer_address, vp_wasm_cache: &mut self.vp_wasm_cache, tx_wasm_cache: &mut self.tx_wasm_cache, diff --git a/crates/node/src/shell/prepare_proposal.rs b/crates/node/src/shell/prepare_proposal.rs index d3ab15a16b..8ea79d6aa8 100644 --- a/crates/node/src/shell/prepare_proposal.rs +++ b/crates/node/src/shell/prepare_proposal.rs @@ -7,7 +7,7 @@ use namada::core::key::tm_raw_hash_to_string; use namada::gas::{Gas, TxGasMeter}; use namada::ledger::protocol::{self, ShellParams}; use namada::proof_of_stake::storage::find_validator_by_raw_hash; -use namada::state::{DBIter, StorageHasher, TempWlState, DB}; +use namada::state::{DBIter, StorageHasher, TempWlState, TxIndex, DB}; use namada::token::{Amount, DenominatedAmount}; use namada::tx::data::{TxType, WrapperTx}; use namada::tx::Tx; @@ -122,8 +122,9 @@ where let txs = txs .iter() - .filter_map(|tx_bytes| { - match validate_wrapper_bytes(tx_bytes, block_time, block_proposer, proposer_local_config, &mut temp_state, &mut vp_wasm_cache, &mut tx_wasm_cache, ) { + .enumerate() + .filter_map(|(tx_index, tx_bytes)| { + match validate_wrapper_bytes(tx_bytes, &TxIndex::must_from_usize(tx_index),block_time, block_proposer, proposer_local_config, &mut temp_state, &mut vp_wasm_cache, &mut tx_wasm_cache, ) { Ok(gas) => { temp_state.write_log_mut().commit_tx(); Some((tx_bytes.to_owned(), gas)) @@ -259,6 +260,7 @@ where #[allow(clippy::too_many_arguments)] fn validate_wrapper_bytes( tx_bytes: &[u8], + tx_index: &TxIndex, block_time: Option, block_proposer: &Address, proposer_local_config: Option<&ValidatorLocalConfig>, @@ -299,6 +301,7 @@ where match prepare_proposal_fee_check( &wrapper, &tx, + tx_index, block_proposer, proposer_local_config, &mut ShellParams::new( @@ -320,6 +323,7 @@ where fn prepare_proposal_fee_check( wrapper: &WrapperTx, tx: &Tx, + tx_index: &TxIndex, proposer: &Address, proposer_local_config: Option<&ValidatorLocalConfig>, shell_params: &mut ShellParams<'_, TempWlState<'_, D, H>, D, H, CA>, @@ -337,7 +341,7 @@ where super::fee_data_check(wrapper, minimum_gas_price, shell_params)?; - protocol::transfer_fee(shell_params, proposer, tx, wrapper) + protocol::transfer_fee(shell_params, proposer, tx, wrapper, tx_index) .map_err(Error::TxApply) } diff --git a/crates/node/src/shell/process_proposal.rs b/crates/node/src/shell/process_proposal.rs index 4fff2216e9..6f5e78391f 100644 --- a/crates/node/src/shell/process_proposal.rs +++ b/crates/node/src/shell/process_proposal.rs @@ -136,9 +136,11 @@ where let tx_results: Vec<_> = txs .iter() - .map(|tx_bytes| { + .enumerate() + .map(|(tx_index, tx_bytes)| { let result = self.check_proposal_tx( tx_bytes, + &TxIndex::must_from_usize(tx_index), &mut metadata, &mut temp_state, block_time, @@ -190,6 +192,7 @@ where pub fn check_proposal_tx( &self, tx_bytes: &[u8], + tx_index: &TxIndex, metadata: &mut ValidationMeta, temp_state: &mut TempWlState<'_, D, H>, block_time: DateTimeUtc, @@ -469,6 +472,7 @@ where if let Err(e) = process_proposal_fee_check( &wrapper, &tx, + tx_index, block_proposer, &mut ShellParams::new( &RefCell::new(tx_gas_meter), @@ -514,11 +518,10 @@ where } } -// TODO(namada#2597): check masp fee payment if required -// FIXME: check this todo fn process_proposal_fee_check( wrapper: &WrapperTx, tx: &Tx, + tx_index: &TxIndex, proposer: &Address, shell_params: &mut ShellParams<'_, TempWlState<'_, D, H>, D, H, CA>, ) -> Result<()> @@ -539,7 +542,7 @@ where fee_data_check(wrapper, minimum_gas_price, shell_params)?; - protocol::transfer_fee(shell_params, proposer, tx, wrapper) + protocol::transfer_fee(shell_params, proposer, tx, wrapper, tx_index) .map_err(Error::TxApply) } From bece3de4ff0bfead3bfc51a67aab8b7fe742da28 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 29 May 2024 20:34:58 +0200 Subject: [PATCH 12/40] Reworks masp fee payment to correctly handle errors. Misc refactors --- crates/namada/src/ledger/protocol/mod.rs | 240 ++++++++++++---------- crates/node/src/shell/finalize_block.rs | 1 + crates/node/src/shell/prepare_proposal.rs | 2 - 3 files changed, 127 insertions(+), 116 deletions(-) diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index c1bea73143..c270b4876d 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -437,8 +437,25 @@ where .write_tx_hash(tx.header_hash()) .expect("Error while writing tx hash to storage"); - // Charge fee before performing any fallible operations - charge_fee(shell_params, tx, wrapper, tx_index, block_proposer)?; + // Charge or check fees before propagating any possible error + let payment_result = match block_proposer { + Some(block_proposer) => { + transfer_fee(shell_params, block_proposer, tx, wrapper, tx_index) + } + None => { + check_fees(shell_params.state, wrapper)?; + Ok(()) + } + }; + + // FIXME: make sure that both transfer and check fees commit or drop before + // returning, here we call a commit for safety Commit tx write log even + // in case of subsequent errors (if the fee payment failed instead, than the + // previous two functions must have already dropped the write log leading + // this function call to be essentially a no-op) + shell_params.state.write_log_mut().commit_tx(); + payment_result?; + // FIXME: if fees were paid with first inner tx signal it to the caller -> // use the ExtendedResult from the other branch @@ -454,50 +471,9 @@ where }) } -/// Charge fee for the provided wrapper transaction. Returns error if: -/// - Fee amount overflows -/// - Not enough funds are available to pay the entire amount of the fee -/// - The accumulated fee amount to be credited to the block proposer overflows -fn charge_fee( - shell_params: &mut ShellParams<'_, S, D, H, CA>, - tx: &Tx, - wrapper: &WrapperTx, - tx_index: &TxIndex, - block_proposer: Option<&Address>, -) -> Result<()> -where - S: State + Sync, - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, - CA: 'static + WasmCacheAccess + Sync, -{ - // Charge or check fees before propagating any possible error - let payment_result = match block_proposer { - Some(block_proposer) => { - transfer_fee(shell_params, block_proposer, tx, wrapper, tx_index) - } - None => { - check_fees(shell_params.state, wrapper)?; - Ok(()) - } - }; - - // FIXME: where should we commit? To block or batch? - match payment_result { - // Commit tx write log even in case of subsequent errors - Ok(()) | Err(Error::FeeError(_)) => { - shell_params.state.write_log_mut().commit_tx() - } - // FIXME: correct to drop here? I can probably avoid cause I drop later - // anyway - _ => shell_params.state.write_log_mut().drop_tx(), - } - - payment_result -} - /// Perform the actual transfer of fees from the fee payer to the block -/// proposer. +/// proposer. Drops the modifications if errors occur but does not commit since +/// we might want to drop things later. pub fn transfer_fee( shell_params: &mut ShellParams<'_, S, D, H, CA>, block_proposer: &Address, @@ -516,10 +492,7 @@ where &wrapper.fee.token, &wrapper.fee_payer(), ) - .unwrap(); - - const FEE_PAYMENT_DESCRIPTOR: std::borrow::Cow<'static, str> = - std::borrow::Cow::Borrowed("wrapper-fee-payment"); + .map_err(Error::StorageError)?; match wrapper.get_tx_fee() { Ok(fees) => { @@ -528,53 +501,51 @@ where &wrapper.fee.token, shell_params.state, ) - .map_err(|e| Error::FeeError(e.to_string()))?; - - let current_block_height = shell_params - .state - .in_mem() - .get_last_block_height() - .next_height(); + .map_err(Error::StorageError)?; - // FIXME: refactor let post_bal = if let Some(post_bal) = balance.checked_sub(fees) { - token_transfer( + fee_token_transfer( shell_params.state, &wrapper.fee.token, &wrapper.fee_payer(), block_proposer, fees, - ) - .map_err(|e| Error::FeeError(e.to_string()))?; + )?; Some(post_bal) } else { // See if the first inner transaction of the batch pays the fees // with a masp unshield - let is_valid_masp_transaction = - try_masp_fee_payment(shell_params, tx, tx_index); - if let Ok(true) = is_valid_masp_transaction { + if let Ok(true) = + try_masp_fee_payment(shell_params, tx, tx_index) + { + // NOTE: Even if the unshielding was succesfull we could + // still fail in the transfer (e.g. cause the unshielded + // amount is not enough to cover the fees). In this case we + // want do drop the changes applied by the masp transaction + // and try to drain the fees from the transparent balance. + // Because of this we must NOT propagate errors from within + // this branch let balance = crate::token::read_balance( shell_params.state, &wrapper.fee.token, &wrapper.fee_payer(), - ) - .unwrap(); + ); - if let Some(post_bal) = balance.checked_sub(fees) { - token_transfer( + // Ok to unwrap_or_default. In the default case, the only + // way the checked op can return Some is if fees are 0, but + // if that's the case then we would have never reached this + // branch of execution + balance.unwrap_or_default().checked_sub(fees).filter(|_| { + fee_token_transfer( shell_params.state, &wrapper.fee.token, &wrapper.fee_payer(), block_proposer, fees, ) - .map_err(|e| Error::FeeError(e.to_string()))?; - - Some(post_bal) - } else { - None - } + .is_ok() + }) } else { None } @@ -590,14 +561,13 @@ where available balance which is less than the fee. This \ shouldn't happen." ); - token_transfer( + fee_token_transfer( shell_params.state, &wrapper.fee.token, &wrapper.fee_payer(), block_proposer, balance, - ) - .map_err(|e| Error::FeeError(e.to_string()))?; + )?; } let target_post_balance = Some( @@ -610,6 +580,13 @@ where .into(), ); + const FEE_PAYMENT_DESCRIPTOR: std::borrow::Cow<'static, str> = + std::borrow::Cow::Borrowed("wrapper-fee-payment"); + let current_block_height = shell_params + .state + .in_mem() + .get_last_block_height() + .next_height(); shell_params.state.write_log_mut().emit_event( TokenEvent { descriptor: FEE_PAYMENT_DESCRIPTOR, @@ -630,6 +607,8 @@ where ); post_bal.map(|_| ()).ok_or_else(|| { + // In this case don't drop the state changes because we still + // want to drain the fee payer's balance Error::FeeError( "Transparent balance of wrapper's signer was insufficient \ to pay fee. All the available transparent funds have \ @@ -645,6 +624,7 @@ where "Transfer of tx fee cannot be applied to due to fee overflow. \ This shouldn't happen." ); + shell_params.state.write_log_mut().drop_tx(); Err(Error::FeeError(format!("{}", e))) } @@ -709,10 +689,12 @@ where }, ) { Ok(result) => { - // NOTE: do not commit yet cause this could be - // exploited to get free masp operations. We can commit only - // after the entire wrapper has been deemed valid - // FIXME: could I precommit here instead? + // NOTE: do not commit yet cause this could be exploited to get + // free masp operations. We can commit only after the entire fee + // payment has been deemed valid. Also, do not precommit cause + // we might need to discard the effects of this valid unshield + // (e.g. if it unshield an amount which is not enough to pay the + // fees) if !result.is_accepted() { state.write_log_mut().drop_tx_keep_precommit(); tracing::error!( @@ -721,6 +703,8 @@ where result.vps_result.rejected_vps ); } + // FIXME: make sure to propagate the bathced tx result to the + // WrapperCache! // Ensure that the transaction is actually a masp one, otherwise // reject @@ -750,12 +734,10 @@ where Ok(is_valid_masp_transaction) } -/// Transfer `token` from `src` to `dest`. Returns an `Err` if `src` has -/// insufficient balance or if the transfer the `dest` would overflow (This can -/// only happen if the total supply doesn't fit in `token::Amount`). Contrary to -/// `crate::token::transfer` this function updates the tx write log and -/// not the block write log. -fn token_transfer( +// Manage the token transfer for the fee payment. If an error is detected the +// write log is dropped to prevent committing an inconsistent state. Propagates +// the result to the caller +fn fee_token_transfer( state: &mut WLS, token: &Address, src: &Address, @@ -765,39 +747,68 @@ fn token_transfer( where WLS: State + StorageRead, { - let src_key = crate::token::storage_key::balance_key(token, src); - let src_balance = crate::token::read_balance(state, token, src) - .expect("Token balance read in protocol must not fail"); - match src_balance.checked_sub(amount) { - Some(new_src_balance) => { - if src == dest { - return Ok(()); - } - let dest_key = crate::token::storage_key::balance_key(token, dest); - let dest_balance = crate::token::read_balance(state, token, dest) - .expect("Token balance read in protocol must not fail"); - match dest_balance.checked_add(amount) { - Some(new_dest_balance) => { - state - .write_log_mut() - .write(&src_key, new_src_balance.serialize_to_vec()) - .map_err(|e| Error::FeeError(e.to_string()))?; - match state - .write_log_mut() - .write(&dest_key, new_dest_balance.serialize_to_vec()) - { - Ok(_) => Ok(()), - Err(e) => Err(Error::FeeError(e.to_string())), + // Transfer `token` from `src` to `dest`. Returns an `Err` if `src` has + // insufficient balance or if the transfer the `dest` would overflow (This + // can only happen if the total supply doesn't fit in `token::Amount`). + // Contrary to `crate::token::transfer` this function updates the tx + // write log and not the block write log. + fn inner_fee_token_transfer( + state: &mut WLS, + token: &Address, + src: &Address, + dest: &Address, + amount: Amount, + ) -> Result<()> + where + WLS: State + StorageRead, + { + if amount.is_zero() { + return Ok(()); + } + let src_key = crate::token::storage_key::balance_key(token, src); + let src_balance = crate::token::read_balance(state, token, src) + .map_err(Error::StorageError)?; + match src_balance.checked_sub(amount) { + Some(new_src_balance) => { + let dest_key = + crate::token::storage_key::balance_key(token, dest); + let dest_balance = + crate::token::read_balance(state, token, dest) + .map_err(Error::StorageError)?; + match dest_balance.checked_add(amount) { + Some(new_dest_balance) => { + state + .write_log_mut() + .write(&src_key, new_src_balance.serialize_to_vec()) + .map_err(|e| Error::FeeError(e.to_string()))?; + match state.write_log_mut().write( + &dest_key, + new_dest_balance.serialize_to_vec(), + ) { + Ok(_) => Ok(()), + Err(e) => Err(Error::FeeError(e.to_string())), + } } + None => Err(Error::StorageError( + namada_state::StorageError::new_alloc(format!( + "The transfer would overflow balance of {dest}" + )), + )), } - None => Err(Error::FeeError( - "The transfer would overflow destination balance" - .to_string(), - )), + } + None => { + Err(Error::StorageError(namada_state::StorageError::new_alloc( + format!("{src} has insufficient balance"), + ))) } } - None => Err(Error::FeeError("Insufficient source balance".to_string())), } + + inner_fee_token_transfer(state, token, src, dest, amount).map_err(|err| { + state.write_log_mut().drop_tx(); + + err + }) } /// Check if the fee payer has enough transparent balance to pay fees @@ -811,6 +822,7 @@ where &wrapper.fee.token, &wrapper.fee_payer(), ) + // FIXME: remove unwraps .unwrap(); let fees = wrapper diff --git a/crates/node/src/shell/finalize_block.rs b/crates/node/src/shell/finalize_block.rs index 4d23a8b7bb..ab9b220c2e 100644 --- a/crates/node/src/shell/finalize_block.rs +++ b/crates/node/src/shell/finalize_block.rs @@ -357,6 +357,7 @@ where match extended_dispatch_result { Ok(extended_tx_result) => match tx_data.tx.header.tx_type { TxType::Wrapper(_) => { + self.state.write_log_mut().commit_tx(); // Return withouth emitting any events return Some(WrapperCache { tx: tx_data.tx.to_owned(), diff --git a/crates/node/src/shell/prepare_proposal.rs b/crates/node/src/shell/prepare_proposal.rs index 8ea79d6aa8..128787c7d4 100644 --- a/crates/node/src/shell/prepare_proposal.rs +++ b/crates/node/src/shell/prepare_proposal.rs @@ -296,8 +296,6 @@ where super::replay_protection_checks(&tx, temp_state).map_err(|_| ())?; // Check fees and extract the gas limit of this transaction - // TODO(namada#2597): check if masp fee payment is required - // FIXME: look at this todo match prepare_proposal_fee_check( &wrapper, &tx, From 3d9ccc4b36be3932e3966a1ffb394737b687c555 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 30 May 2024 13:24:37 +0200 Subject: [PATCH 13/40] `check_fees` checks masp fee payment --- crates/namada/src/ledger/protocol/mod.rs | 110 ++++++++++++++++------- crates/node/src/shell/mod.rs | 10 ++- 2 files changed, 84 insertions(+), 36 deletions(-) diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index c270b4876d..6b3ff8a235 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -22,6 +22,7 @@ use namada_tx::data::{ use namada_tx::{BatchedTxRef, Tx}; use namada_vote_ext::EthereumTxData; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; +use smooth_operator::checked; use thiserror::Error; use crate::address::{Address, InternalAddress}; @@ -443,16 +444,19 @@ where transfer_fee(shell_params, block_proposer, tx, wrapper, tx_index) } None => { - check_fees(shell_params.state, wrapper)?; + check_fees(shell_params, tx, wrapper)?; Ok(()) } }; // FIXME: make sure that both transfer and check fees commit or drop before - // returning, here we call a commit for safety Commit tx write log even - // in case of subsequent errors (if the fee payment failed instead, than the - // previous two functions must have already dropped the write log leading - // this function call to be essentially a no-op) + // returning, here we call a commit for safety. Actually do I need to drop + // in check? + + // Commit tx write log even in case of subsequent errors (if the fee payment + // failed instead, than the previous two functions must have already + // dropped the write log leading this function call to be essentially a + // no-op) shell_params.state.write_log_mut().commit_tx(); payment_result?; @@ -487,13 +491,6 @@ where H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, { - let balance = crate::token::read_balance( - shell_params.state, - &wrapper.fee.token, - &wrapper.fee_payer(), - ) - .map_err(Error::StorageError)?; - match wrapper.get_tx_fee() { Ok(fees) => { let fees = crate::token::denom_to_amount( @@ -503,6 +500,13 @@ where ) .map_err(Error::StorageError)?; + let balance = crate::token::read_balance( + shell_params.state, + &wrapper.fee.token, + &wrapper.fee_payer(), + ) + .map_err(Error::StorageError)?; + let post_bal = if let Some(post_bal) = balance.checked_sub(fees) { fee_token_transfer( shell_params.state, @@ -812,31 +816,71 @@ where } /// Check if the fee payer has enough transparent balance to pay fees -// FIXME: here I should check fee unshielding -pub fn check_fees(state: &S, wrapper: &WrapperTx) -> Result<()> +pub fn check_fees( + shell_params: &mut ShellParams<'_, S, D, H, CA>, + tx: &Tx, + wrapper: &WrapperTx, +) -> Result<()> where - S: State + StorageRead, + // FIXME: can remove sync? + // FIXME: review these traits + S: State + StorageRead + Sync, + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + CA: 'static + WasmCacheAccess + Sync, { - let balance = crate::token::read_balance( - state, - &wrapper.fee.token, - &wrapper.fee_payer(), - ) - // FIXME: remove unwraps - .unwrap(); + match wrapper.get_tx_fee() { + Ok(fees) => { + let fees = crate::token::denom_to_amount( + fees, + &wrapper.fee.token, + shell_params.state, + ) + .map_err(Error::StorageError)?; - let fees = wrapper - .get_tx_fee() - .map_err(|e| Error::FeeError(e.to_string()))?; + let balance = crate::token::read_balance( + shell_params.state, + &wrapper.fee.token, + &wrapper.fee_payer(), + ) + .map_err(Error::StorageError)?; - let fees = crate::token::denom_to_amount(fees, &wrapper.fee.token, state) - .map_err(|e| Error::FeeError(e.to_string()))?; - if balance.checked_sub(fees).is_some() { - Ok(()) - } else { - Err(Error::FeeError( - "Insufficient transparent balance to pay fees".to_string(), - )) + checked!(balance - fees).map_or_else( + |_| { + // See if the first inner transaction of the batch pays the + // fees with a masp unshield + if let Ok(true) = try_masp_fee_payment( + shell_params, + tx, + &TxIndex::default(), + ) { + let balance = crate::token::read_balance( + shell_params.state, + &wrapper.fee.token, + &wrapper.fee_payer(), + ) + .map_err(Error::StorageError)?; + + checked!(balance - fees).map_or_else( + |_| { + Err(Error::FeeError( + "Masp fee payment unshielded an \ + insufficient amount" + .to_string(), + )) + }, + |_| Ok(()), + ) + } else { + Err(Error::FeeError( + "Failed masp fee payment".to_string(), + )) + } + }, + |_| Ok(()), + ) + } + Err(e) => Err(Error::FeeError(e.to_string())), } } diff --git a/crates/node/src/shell/mod.rs b/crates/node/src/shell/mod.rs index 2ace0f620a..5d9a22a6a3 100644 --- a/crates/node/src/shell/mod.rs +++ b/crates/node/src/shell/mod.rs @@ -1132,13 +1132,14 @@ where // TODO(namada#2597): validate masp fee payment if normal fee // payment fails Validate wrapper fees if let Err(e) = mempool_fee_check( - &wrapper, &mut ShellParams::new( &RefCell::new(gas_meter), &mut self.state.with_temp_write_log(), &mut self.vp_wasm_cache.clone(), &mut self.tx_wasm_cache.clone(), ), + &tx, + &wrapper, ) { response.code = ResultCode::FeeError.into(); response.log = format!("{INVALID_MSG}: {e}"); @@ -1281,8 +1282,9 @@ where // Perform the fee check in mempool fn mempool_fee_check( - wrapper: &WrapperTx, shell_params: &mut ShellParams<'_, TempWlState<'_, D, H>, D, H, CA>, + tx: &Tx, + wrapper: &WrapperTx, ) -> Result<()> where D: DB + for<'iter> DBIter<'iter> + Sync + 'static, @@ -1300,10 +1302,12 @@ where ))))?; fee_data_check(wrapper, minimum_gas_price, shell_params)?; - protocol::check_fees(shell_params.state, wrapper).map_err(Error::TxApply) + protocol::check_fees(shell_params, tx, wrapper).map_err(Error::TxApply) } /// Check the validity of the fee data +// FIXME: review the usage of this and if we a re doing this check also when +// paying checking fees pub fn fee_data_check( wrapper: &WrapperTx, minimum_gas_price: token::Amount, From 25dbf818eabadf85acc785cd164682b03559dbf8 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 30 May 2024 13:50:22 +0200 Subject: [PATCH 14/40] `check_fees` drop the storage changes in case of failure --- crates/namada/src/ledger/protocol/mod.rs | 123 ++++++++++++----------- 1 file changed, 67 insertions(+), 56 deletions(-) diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index 6b3ff8a235..ede27d4cf1 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -449,10 +449,6 @@ where } }; - // FIXME: make sure that both transfer and check fees commit or drop before - // returning, here we call a commit for safety. Actually do I need to drop - // in check? - // Commit tx write log even in case of subsequent errors (if the fee payment // failed instead, than the previous two functions must have already // dropped the write log leading this function call to be essentially a @@ -822,66 +818,81 @@ pub fn check_fees( wrapper: &WrapperTx, ) -> Result<()> where - // FIXME: can remove sync? - // FIXME: review these traits S: State + StorageRead + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, { - match wrapper.get_tx_fee() { - Ok(fees) => { - let fees = crate::token::denom_to_amount( - fees, - &wrapper.fee.token, - shell_params.state, - ) - .map_err(Error::StorageError)?; - - let balance = crate::token::read_balance( - shell_params.state, - &wrapper.fee.token, - &wrapper.fee_payer(), - ) - .map_err(Error::StorageError)?; - - checked!(balance - fees).map_or_else( - |_| { - // See if the first inner transaction of the batch pays the - // fees with a masp unshield - if let Ok(true) = try_masp_fee_payment( - shell_params, - tx, - &TxIndex::default(), - ) { - let balance = crate::token::read_balance( - shell_params.state, - &wrapper.fee.token, - &wrapper.fee_payer(), - ) - .map_err(Error::StorageError)?; + fn inner_check_fees( + shell_params: &mut ShellParams<'_, S, D, H, CA>, + tx: &Tx, + wrapper: &WrapperTx, + ) -> Result<()> + where + S: State + StorageRead + Sync, + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + CA: 'static + WasmCacheAccess + Sync, + { + match wrapper.get_tx_fee() { + Ok(fees) => { + let fees = crate::token::denom_to_amount( + fees, + &wrapper.fee.token, + shell_params.state, + ) + .map_err(Error::StorageError)?; - checked!(balance - fees).map_or_else( - |_| { - Err(Error::FeeError( - "Masp fee payment unshielded an \ - insufficient amount" - .to_string(), - )) - }, - |_| Ok(()), - ) - } else { - Err(Error::FeeError( - "Failed masp fee payment".to_string(), - )) - } - }, - |_| Ok(()), - ) + let balance = crate::token::read_balance( + shell_params.state, + &wrapper.fee.token, + &wrapper.fee_payer(), + ) + .map_err(Error::StorageError)?; + + checked!(balance - fees).map_or_else( + |_| { + // See if the first inner transaction of the batch pays + // the fees with a masp unshield + if let Ok(true) = try_masp_fee_payment( + shell_params, + tx, + &TxIndex::default(), + ) { + let balance = crate::token::read_balance( + shell_params.state, + &wrapper.fee.token, + &wrapper.fee_payer(), + ) + .map_err(Error::StorageError)?; + + checked!(balance - fees).map_or_else( + |_| { + Err(Error::FeeError( + "Masp fee payment unshielded an \ + insufficient amount" + .to_string(), + )) + }, + |_| Ok(()), + ) + } else { + Err(Error::FeeError( + "Failed masp fee payment".to_string(), + )) + } + }, + |_| Ok(()), + ) + } + Err(e) => Err(Error::FeeError(e.to_string())), } - Err(e) => Err(Error::FeeError(e.to_string())), } + inner_check_fees(shell_params, tx, wrapper).map_err(|err| { + shell_params.state.write_log_mut().drop_tx(); + + err + }) } /// Apply a transaction going via the wasm environment. Gas will be metered and From d5aafd508bc5fd02ccd8e8b0995b6acd2475fdfc Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 30 May 2024 16:01:33 +0200 Subject: [PATCH 15/40] Returns `BatchedTxResult` from masp fee payment --- crates/namada/src/ledger/protocol/mod.rs | 146 ++++++++++++---------- crates/node/src/shell/mod.rs | 6 +- crates/node/src/shell/prepare_proposal.rs | 3 +- crates/node/src/shell/process_proposal.rs | 2 +- 4 files changed, 86 insertions(+), 71 deletions(-) diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index ede27d4cf1..84a417dd90 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -443,10 +443,7 @@ where Some(block_proposer) => { transfer_fee(shell_params, block_proposer, tx, wrapper, tx_index) } - None => { - check_fees(shell_params, tx, wrapper)?; - Ok(()) - } + None => check_fees(shell_params, tx, wrapper), }; // Commit tx write log even in case of subsequent errors (if the fee payment @@ -454,7 +451,17 @@ where // dropped the write log leading this function call to be essentially a // no-op) shell_params.state.write_log_mut().commit_tx(); - payment_result?; + let batch_results = + payment_result?.map_or_else(BatchResults::default, |batched_result| { + let mut batch = BatchResults::default(); + batch.0.insert( + // Ok to unwrap cause if we have a batched result it means + // we've executed the first tx in the batch + tx.first_commitments().unwrap().get_hash(), + Ok(batched_result), + ); + batch + }); // FIXME: if fees were paid with first inner tx signal it to the caller -> // use the ExtendedResult from the other branch @@ -467,7 +474,7 @@ where Ok(TxResult { gas_used: tx_gas_meter.borrow().get_tx_consumed_gas(), - batch_results: BatchResults::default(), + batch_results, }) } @@ -480,7 +487,7 @@ pub fn transfer_fee( tx: &Tx, wrapper: &WrapperTx, tx_index: &TxIndex, -) -> Result<()> +) -> Result> where S: State + StorageRead + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -503,53 +510,61 @@ where ) .map_err(Error::StorageError)?; - let post_bal = if let Some(post_bal) = balance.checked_sub(fees) { - fee_token_transfer( - shell_params.state, - &wrapper.fee.token, - &wrapper.fee_payer(), - block_proposer, - fees, - )?; - - Some(post_bal) - } else { - // See if the first inner transaction of the batch pays the fees - // with a masp unshield - if let Ok(true) = - try_masp_fee_payment(shell_params, tx, tx_index) - { - // NOTE: Even if the unshielding was succesfull we could - // still fail in the transfer (e.g. cause the unshielded - // amount is not enough to cover the fees). In this case we - // want do drop the changes applied by the masp transaction - // and try to drain the fees from the transparent balance. - // Because of this we must NOT propagate errors from within - // this branch - let balance = crate::token::read_balance( + let (post_bal, valid_batched_tx_result) = + if let Some(post_bal) = balance.checked_sub(fees) { + fee_token_transfer( shell_params.state, &wrapper.fee.token, &wrapper.fee_payer(), - ); + block_proposer, + fees, + )?; - // Ok to unwrap_or_default. In the default case, the only - // way the checked op can return Some is if fees are 0, but - // if that's the case then we would have never reached this - // branch of execution - balance.unwrap_or_default().checked_sub(fees).filter(|_| { - fee_token_transfer( + (Some(post_bal), None) + } else { + // See if the first inner transaction of the batch pays the fees + // with a masp unshield + if let Ok(Some(valid_batched_tx_result)) = + try_masp_fee_payment(shell_params, tx, tx_index) + { + // NOTE: Even if the unshielding was succesfull we could + // still fail in the transfer (e.g. cause the unshielded + // amount is not enough to cover the fees). In this case we + // want do drop the changes applied by the masp transaction + // and try to drain the fees from the transparent balance. + // Because of this we must NOT propagate errors from within + // this branch + let balance = crate::token::read_balance( shell_params.state, &wrapper.fee.token, &wrapper.fee_payer(), - block_proposer, - fees, - ) - .is_ok() - }) - } else { - None - } - }; + ); + + // Ok to unwrap_or_default. In the default case, the only + // way the checked op can return Some is if fees are 0, but + // if that's the case then we would have never reached this + // branch of execution + let post_bal = balance + .unwrap_or_default() + .checked_sub(fees) + .filter(|_| { + fee_token_transfer( + shell_params.state, + &wrapper.fee.token, + &wrapper.fee_payer(), + block_proposer, + fees, + ) + .is_ok() + }); + + // Batched tx result must be returned (and considered) only + // if fee payment was successful + (post_bal, post_bal.map(|_| valid_batched_tx_result)) + } else { + (None, None) + } + }; if post_bal.is_none() { // Balance was insufficient for fee payment, move all the @@ -606,7 +621,7 @@ where .with(TxHashAttr(tx.header_hash())), ); - post_bal.map(|_| ()).ok_or_else(|| { + post_bal.map(|_| valid_batched_tx_result).ok_or_else(|| { // In this case don't drop the state changes because we still // want to drain the fee payer's balance Error::FeeError( @@ -641,7 +656,7 @@ fn try_masp_fee_payment( }: &mut ShellParams<'_, S, D, H, CA>, tx: &Tx, tx_index: &TxIndex, -) -> Result +) -> Result> where S: State + StorageRead + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -667,8 +682,8 @@ where .map_err(|e| Error::GasError(e.to_string()))?; let ref_unshield_gas_meter = RefCell::new(gas_meter); - // FIXME: call dispatch_tx after merge - let is_valid_masp_transaction = { + // FIXME: call dispatch_tx after merge? + let valid_batched_tx_result = { // NOTE: A clean tx write log must be provided to this call // for a correct vp validation. Block write log, instead, // should contain any prior changes (if any). This is to simulate @@ -703,12 +718,11 @@ where result.vps_result.rejected_vps ); } - // FIXME: make sure to propagate the bathced tx result to the - // WrapperCache! // Ensure that the transaction is actually a masp one, otherwise // reject - is_masp_transfer(&result.changed_keys) && result.is_accepted() + (is_masp_transfer(&result.changed_keys) && result.is_accepted()) + .then_some(result) } Err(e) => { state.write_log_mut().drop_tx_keep_precommit(); @@ -721,7 +735,7 @@ where return Err(e); } - false + None } } }; @@ -731,7 +745,7 @@ where .copy_consumed_gas_from(&ref_unshield_gas_meter.borrow()) .map_err(|e| Error::GasError(e.to_string()))?; - Ok(is_valid_masp_transaction) + Ok(valid_batched_tx_result) } // Manage the token transfer for the fee payment. If an error is detected the @@ -816,7 +830,7 @@ pub fn check_fees( shell_params: &mut ShellParams<'_, S, D, H, CA>, tx: &Tx, wrapper: &WrapperTx, -) -> Result<()> +) -> Result> where S: State + StorageRead + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -827,7 +841,7 @@ where shell_params: &mut ShellParams<'_, S, D, H, CA>, tx: &Tx, wrapper: &WrapperTx, - ) -> Result<()> + ) -> Result> where S: State + StorageRead + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -854,11 +868,13 @@ where |_| { // See if the first inner transaction of the batch pays // the fees with a masp unshield - if let Ok(true) = try_masp_fee_payment( - shell_params, - tx, - &TxIndex::default(), - ) { + if let Ok(valid_batched_tx_result @ Some(_)) = + try_masp_fee_payment( + shell_params, + tx, + &TxIndex::default(), + ) + { let balance = crate::token::read_balance( shell_params.state, &wrapper.fee.token, @@ -874,7 +890,7 @@ where .to_string(), )) }, - |_| Ok(()), + |_| Ok(valid_batched_tx_result), ) } else { Err(Error::FeeError( @@ -882,7 +898,7 @@ where )) } }, - |_| Ok(()), + |_| Ok(None), ) } Err(e) => Err(Error::FeeError(e.to_string())), diff --git a/crates/node/src/shell/mod.rs b/crates/node/src/shell/mod.rs index 5d9a22a6a3..7f09831caf 100644 --- a/crates/node/src/shell/mod.rs +++ b/crates/node/src/shell/mod.rs @@ -1302,12 +1302,12 @@ where ))))?; fee_data_check(wrapper, minimum_gas_price, shell_params)?; - protocol::check_fees(shell_params, tx, wrapper).map_err(Error::TxApply) + protocol::check_fees(shell_params, tx, wrapper) + .map_err(Error::TxApply) + .map(|_| ()) } /// Check the validity of the fee data -// FIXME: review the usage of this and if we a re doing this check also when -// paying checking fees pub fn fee_data_check( wrapper: &WrapperTx, minimum_gas_price: token::Amount, diff --git a/crates/node/src/shell/prepare_proposal.rs b/crates/node/src/shell/prepare_proposal.rs index 128787c7d4..826855569e 100644 --- a/crates/node/src/shell/prepare_proposal.rs +++ b/crates/node/src/shell/prepare_proposal.rs @@ -317,7 +317,6 @@ where } } -#[allow(clippy::too_many_arguments)] fn prepare_proposal_fee_check( wrapper: &WrapperTx, tx: &Tx, @@ -340,7 +339,7 @@ where super::fee_data_check(wrapper, minimum_gas_price, shell_params)?; protocol::transfer_fee(shell_params, proposer, tx, wrapper, tx_index) - .map_err(Error::TxApply) + .map_or_else(|e| Err(Error::TxApply(e)), |_| Ok(())) } fn compute_min_gas_price( diff --git a/crates/node/src/shell/process_proposal.rs b/crates/node/src/shell/process_proposal.rs index 6f5e78391f..f213c5c96f 100644 --- a/crates/node/src/shell/process_proposal.rs +++ b/crates/node/src/shell/process_proposal.rs @@ -543,7 +543,7 @@ where fee_data_check(wrapper, minimum_gas_price, shell_params)?; protocol::transfer_fee(shell_params, proposer, tx, wrapper, tx_index) - .map_err(Error::TxApply) + .map_or_else(|e| Err(Error::TxApply(e)), |_| Ok(())) } /// We test the failure cases of [`process_proposal`]. The happy flows From 606a0f03c914edace731c4f81d6960f33d72ba21 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 30 May 2024 16:19:18 +0200 Subject: [PATCH 16/40] Renames fee payment gas limit parameter --- crates/apps_lib/src/client/rpc.rs | 8 +++--- crates/apps_lib/src/config/genesis.rs | 4 +-- crates/apps_lib/src/config/genesis/chain.rs | 4 +-- .../apps_lib/src/config/genesis/templates.rs | 8 +++--- crates/core/src/parameters.rs | 4 +-- crates/namada/src/ledger/protocol/mod.rs | 2 +- crates/node/src/storage/mod.rs | 2 +- crates/parameters/src/lib.rs | 25 ++++++++++--------- crates/parameters/src/storage.rs | 6 ++--- crates/proof_of_stake/src/lib.rs | 2 +- crates/state/src/lib.rs | 2 +- genesis/localnet/parameters.toml | 4 +-- genesis/starter/parameters.toml | 4 +-- 13 files changed, 38 insertions(+), 37 deletions(-) diff --git a/crates/apps_lib/src/client/rpc.rs b/crates/apps_lib/src/client/rpc.rs index 941fa83444..d9b17aa0bd 100644 --- a/crates/apps_lib/src/client/rpc.rs +++ b/crates/apps_lib/src/client/rpc.rs @@ -660,16 +660,16 @@ pub async fn query_protocol_parameters( .expect("Parameter should be defined."); display_line!(context.io(), "{:4}Max block gas: {:?}", "", max_block_gas); - let key = param_storage::get_fee_unshielding_gas_limit_key(); - let fee_unshielding_gas_limit: u64 = + let key = param_storage::get_masp_fee_payment_gas_limit_key(); + let masp_fee_payment_gas_limit: u64 = query_storage_value(context.client(), &key) .await .expect("Parameter should be defined."); display_line!( context.io(), - "{:4}Fee unshielding gas limit: {:?}", + "{:4}Masp fee payment gas limit: {:?}", "", - fee_unshielding_gas_limit + masp_fee_payment_gas_limit ); let key = param_storage::get_gas_cost_key(); diff --git a/crates/apps_lib/src/config/genesis.rs b/crates/apps_lib/src/config/genesis.rs index d6e5d97046..cb5e564c9e 100644 --- a/crates/apps_lib/src/config/genesis.rs +++ b/crates/apps_lib/src/config/genesis.rs @@ -313,8 +313,8 @@ pub struct Parameters { pub masp_epoch_multiplier: u64, /// Maximum amount of signatures per transaction pub max_signatures_per_transaction: u8, - /// Fee unshielding gas limit - pub fee_unshielding_gas_limit: u64, + /// The gas limit for a masp transaction paying fees + pub masp_fee_payment_gas_limit: u64, /// Map of the cost per gas unit for every token allowed for fee payment pub minimum_gas_price: BTreeMap, } diff --git a/crates/apps_lib/src/config/genesis/chain.rs b/crates/apps_lib/src/config/genesis/chain.rs index 690d67909f..a5c088f8ee 100644 --- a/crates/apps_lib/src/config/genesis/chain.rs +++ b/crates/apps_lib/src/config/genesis/chain.rs @@ -304,7 +304,7 @@ impl Finalized { epochs_per_year, masp_epoch_multiplier, max_signatures_per_transaction, - fee_unshielding_gas_limit, + masp_fee_payment_gas_limit, max_block_gas, minimum_gas_price, max_tx_bytes, @@ -350,7 +350,7 @@ impl Finalized { masp_epoch_multiplier, max_proposal_bytes, max_signatures_per_transaction, - fee_unshielding_gas_limit, + masp_fee_payment_gas_limit, max_block_gas, minimum_gas_price: minimum_gas_price .iter() diff --git a/crates/apps_lib/src/config/genesis/templates.rs b/crates/apps_lib/src/config/genesis/templates.rs index db5ac13772..2b48a6188b 100644 --- a/crates/apps_lib/src/config/genesis/templates.rs +++ b/crates/apps_lib/src/config/genesis/templates.rs @@ -299,8 +299,8 @@ pub struct ChainParams { pub max_signatures_per_transaction: u8, /// Max gas for block pub max_block_gas: u64, - /// Fee unshielding gas limit - pub fee_unshielding_gas_limit: u64, + /// Gas limit of a masp transaction paying fees + pub masp_fee_payment_gas_limit: u64, /// Map of the cost per gas unit for every token allowed for fee payment pub minimum_gas_price: T::GasMinimums, } @@ -324,7 +324,7 @@ impl ChainParams { masp_epoch_multiplier, max_signatures_per_transaction, max_block_gas, - fee_unshielding_gas_limit, + masp_fee_payment_gas_limit, minimum_gas_price, } = self; let mut min_gas_prices = BTreeMap::default(); @@ -370,7 +370,7 @@ impl ChainParams { masp_epoch_multiplier, max_signatures_per_transaction, max_block_gas, - fee_unshielding_gas_limit, + masp_fee_payment_gas_limit, minimum_gas_price: min_gas_prices, }) } diff --git a/crates/core/src/parameters.rs b/crates/core/src/parameters.rs index aaf2c294f8..8ba72ee660 100644 --- a/crates/core/src/parameters.rs +++ b/crates/core/src/parameters.rs @@ -51,8 +51,8 @@ pub struct Parameters { pub masp_epoch_multiplier: u64, /// Maximum number of signature per transaction pub max_signatures_per_transaction: u8, - /// Fee unshielding gas limit - pub fee_unshielding_gas_limit: u64, + /// The gas limit for a masp transaction paying fees + pub masp_fee_payment_gas_limit: u64, /// Map of the cost per gas unit for every token allowed for fee payment pub minimum_gas_price: BTreeMap, /// Enable the native token transfer if it is true diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index 84a417dd90..c759a8a3a4 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -670,7 +670,7 @@ where // limit of the transaction let min_gas_limit = state .read::( - &namada_parameters::storage::get_fee_unshielding_gas_limit_key(), + &namada_parameters::storage::get_masp_fee_payment_gas_limit_key(), ) .expect("Error reading the storage") .expect("Missing masp fee payment gas limit in storage") diff --git a/crates/node/src/storage/mod.rs b/crates/node/src/storage/mod.rs index 49d37c81a8..f45eeaf639 100644 --- a/crates/node/src/storage/mod.rs +++ b/crates/node/src/storage/mod.rs @@ -175,7 +175,7 @@ mod tests { epochs_per_year: 365, masp_epoch_multiplier: 2, max_signatures_per_transaction: 10, - fee_unshielding_gas_limit: 0, + masp_fee_payment_gas_limit: 0, minimum_gas_price: Default::default(), is_native_token_transferable: true, }; diff --git a/crates/parameters/src/lib.rs b/crates/parameters/src/lib.rs index b8dbe12a88..9b8de510d5 100644 --- a/crates/parameters/src/lib.rs +++ b/crates/parameters/src/lib.rs @@ -77,7 +77,7 @@ where masp_epoch_multiplier, max_signatures_per_transaction, minimum_gas_price, - fee_unshielding_gas_limit, + masp_fee_payment_gas_limit, is_native_token_transferable, } = parameters; @@ -97,10 +97,11 @@ where let epoch_key = storage::get_epoch_duration_storage_key(); storage.write(&epoch_key, epoch_duration)?; - // write fee unshielding gas limit - let fee_unshielding_gas_limit_key = - storage::get_fee_unshielding_gas_limit_key(); - storage.write(&fee_unshielding_gas_limit_key, fee_unshielding_gas_limit)?; + // write masp fee payment gas limit + let masp_fee_payment_gas_limit_key = + storage::get_masp_fee_payment_gas_limit_key(); + storage + .write(&masp_fee_payment_gas_limit_key, masp_fee_payment_gas_limit)?; // write vp allowlist parameter let vp_allowlist_key = storage::get_vp_allowlist_storage_key(); @@ -371,11 +372,11 @@ where .ok_or(ReadError::ParametersMissing) .into_storage_result()?; - // read fee unshielding gas limit - let fee_unshielding_gas_limit_key = - storage::get_fee_unshielding_gas_limit_key(); - let value = storage.read(&fee_unshielding_gas_limit_key)?; - let fee_unshielding_gas_limit: u64 = value + // read masp fee payment gas limit + let masp_fee_payment_gas_limit_key = + storage::get_masp_fee_payment_gas_limit_key(); + let value = storage.read(&masp_fee_payment_gas_limit_key)?; + let masp_fee_payment_gas_limit: u64 = value .ok_or(ReadError::ParametersMissing) .into_storage_result()?; @@ -432,7 +433,7 @@ where masp_epoch_multiplier, max_signatures_per_transaction, minimum_gas_price, - fee_unshielding_gas_limit, + masp_fee_payment_gas_limit, is_native_token_transferable, }) } @@ -477,7 +478,7 @@ where epochs_per_year: 365, masp_epoch_multiplier: 2, max_signatures_per_transaction: 10, - fee_unshielding_gas_limit: 0, + masp_fee_payment_gas_limit: 0, minimum_gas_price: Default::default(), is_native_token_transferable: true, }; diff --git a/crates/parameters/src/storage.rs b/crates/parameters/src/storage.rs index 27204f9568..f17f7824e2 100644 --- a/crates/parameters/src/storage.rs +++ b/crates/parameters/src/storage.rs @@ -38,7 +38,7 @@ struct Keys { max_tx_bytes: &'static str, max_block_gas: &'static str, minimum_gas_price: &'static str, - fee_unshielding_gas_limit: &'static str, + masp_fee_payment_gas_limit: &'static str, max_signatures_per_transaction: &'static str, native_token_transferable: &'static str, } @@ -117,8 +117,8 @@ pub fn get_tx_allowlist_storage_key() -> Key { } /// Storage key used for the fee unshielding gas limit -pub fn get_fee_unshielding_gas_limit_key() -> Key { - get_fee_unshielding_gas_limit_key_at_addr(ADDRESS) +pub fn get_masp_fee_payment_gas_limit_key() -> Key { + get_masp_fee_payment_gas_limit_key_at_addr(ADDRESS) } /// Storage key used for max_epected_time_per_block parameter. diff --git a/crates/proof_of_stake/src/lib.rs b/crates/proof_of_stake/src/lib.rs index 19b003541f..7b68686934 100644 --- a/crates/proof_of_stake/src/lib.rs +++ b/crates/proof_of_stake/src/lib.rs @@ -2726,7 +2726,7 @@ pub mod test_utils { epochs_per_year: 10000000, masp_epoch_multiplier: 2, max_signatures_per_transaction: 15, - fee_unshielding_gas_limit: 10000, + masp_fee_payment_gas_limit: 10000, minimum_gas_price: BTreeMap::new(), is_native_token_transferable: true, }; diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index f62ab1c862..6165ffc4f7 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -760,7 +760,7 @@ mod tests { epochs_per_year: 100, masp_epoch_multiplier: 2, max_signatures_per_transaction: 15, - fee_unshielding_gas_limit: 20_000, + masp_fee_payment_gas_limit: 20_000, minimum_gas_price: BTreeMap::default(), is_native_token_transferable: true, }; diff --git a/genesis/localnet/parameters.toml b/genesis/localnet/parameters.toml index bef2c1ae2d..6197990bcf 100644 --- a/genesis/localnet/parameters.toml +++ b/genesis/localnet/parameters.toml @@ -24,8 +24,8 @@ masp_epoch_multiplier = 2 max_signatures_per_transaction = 15 # Max gas for block max_block_gas = 20000000 -# Fee unshielding gas limit -fee_unshielding_gas_limit = 20000 +# Masp fee payment gas limit +masp_fee_payment_gas_limit = 20000 # Map of the cost per gas unit for every token allowed for fee payment [parameters.minimum_gas_price] diff --git a/genesis/starter/parameters.toml b/genesis/starter/parameters.toml index dfb01522d6..43ad86b8d4 100644 --- a/genesis/starter/parameters.toml +++ b/genesis/starter/parameters.toml @@ -24,8 +24,8 @@ masp_epoch_multiplier = 2 max_signatures_per_transaction = 15 # Max gas for block max_block_gas = 20000000 -# Fee unshielding gas limit -fee_unshielding_gas_limit = 20000 +# Masp fee payment gas limit +masp_fee_payment_gas_limit = 20000 # Map of the cost per gas unit for every token allowed for fee payment [parameters.minimum_gas_price] From 2f072b07e5cf087dd58feccf39f1d1d5be6de36d Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 31 May 2024 16:03:51 +0200 Subject: [PATCH 17/40] Skips the execution of the first inner tx when masp fee payment --- crates/namada/src/ledger/mod.rs | 26 +- crates/namada/src/ledger/protocol/mod.rs | 76 ++-- crates/node/src/shell/finalize_block.rs | 443 ++++++++++++++--------- crates/state/src/wl_state.rs | 24 ++ 4 files changed, 352 insertions(+), 217 deletions(-) diff --git a/crates/namada/src/ledger/mod.rs b/crates/namada/src/ledger/mod.rs index 8d9effb59e..8bccb5e529 100644 --- a/crates/namada/src/ledger/mod.rs +++ b/crates/namada/src/ledger/mod.rs @@ -26,7 +26,7 @@ mod dry_run_tx { use namada_gas::Gas; use namada_sdk::queries::{EncodedResponseQuery, RequestCtx, RequestQuery}; use namada_state::{DBIter, ResultExt, StorageHasher, DB}; - use namada_tx::data::{GasLimit, TxResult}; + use namada_tx::data::{ExtendedTxResult, GasLimit, TxResult}; use super::protocol; use crate::vm::wasm::{TxCache, VpCache}; @@ -54,8 +54,9 @@ mod dry_run_tx { let tx = Tx::try_from(&request.data[..]).into_storage_result()?; tx.validate_tx().into_storage_result()?; + // FIXME: can't just call dispatch_tx? // Wrapper dry run to allow estimating the gas cost of a transaction - let (mut tx_result, tx_gas_meter) = match tx.header().tx_type { + let (extended_tx_result, tx_gas_meter) = match tx.header().tx_type { TxType::Wrapper(wrapper) => { let gas_limit = Gas::try_from(wrapper.gas_limit).into_storage_result()?; @@ -76,8 +77,6 @@ mod dry_run_tx { None, ) .into_storage_result()?; - // FIXME: if fees were paid with first inner tx skip it when - // executing the batch temp_state.write_log_mut().commit_tx(); let available_gas = tx_gas_meter.borrow().get_available_gas(); @@ -90,12 +89,27 @@ mod dry_run_tx { namada_parameters::get_max_block_gas(ctx.state)?; let gas_limit = Gas::try_from(GasLimit::from(max_block_gas)) .into_storage_result()?; - (TxResult::default(), TxGasMeter::new(gas_limit)) + ( + TxResult::default().to_extended_result(None), + TxGasMeter::new(gas_limit), + ) } }; + // FIXME: can't just call dispatch_tx? + let ExtendedTxResult { + mut tx_result, + masp_tx_refs, + } = extended_tx_result; let tx_gas_meter = RefCell::new(tx_gas_meter); - for cmt in tx.commitments() { + // FIXME: improve this + let mut batch_iter = tx.commitments().iter(); + if !masp_tx_refs.0.is_empty() { + // If fees were paid via masp skip the first transaction of the + // batch which has already been executed + batch_iter.next(); + } + for cmt in batch_iter { let batched_tx = tx.batch_ref_tx(cmt); let batched_tx_result = protocol::apply_wasm_tx( batched_tx, diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index c759a8a3a4..d21c88b4e1 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -7,6 +7,7 @@ use borsh_ext::BorshSerializeExt; use eyre::{eyre, WrapErr}; use namada_core::booleans::BoolResultUnitExt; use namada_core::hash::Hash; +use namada_core::masp::MaspTxRefs; use namada_events::extend::{ ComposeEvent, Height as HeightAttr, TxHash as TxHashAttr, }; @@ -14,6 +15,7 @@ use namada_events::EventLevel; use namada_gas::TxGasMeter; use namada_token::event::{TokenEvent, TokenOperation, UserAccount}; use namada_token::utils::is_masp_transfer; +use namada_tx::action::Read; use namada_tx::data::protocol::{ProtocolTx, ProtocolTxType}; use namada_tx::data::{ BatchResults, BatchedTxResult, ExtendedTxResult, TxResult, VpStatusFlags, @@ -186,7 +188,7 @@ pub enum DispatchArgs<'a, CA: 'static + WasmCacheAccess + Sync> { tx_index: TxIndex, /// The result of the corresponding wrapper tx (missing if governance /// transaction) - wrapper_tx_result: Option>, + wrapper_tx_result: Option>, /// Vp cache vp_wasm_cache: &'a mut VpCache, /// Tx cache @@ -310,7 +312,7 @@ where tx_wasm_cache, ); - let tx_result = apply_wrapper_tx( + apply_wrapper_tx( tx, wrapper, tx_bytes, @@ -319,16 +321,14 @@ where &mut shell_params, Some(block_proposer), ) - .map_err(|e| Error::WrapperRunnerError(e.to_string()))?; - - Ok(tx_result.to_extended_result(None)) + .map_err(|e| Error::WrapperRunnerError(e.to_string()).into()) } } } fn dispatch_inner_txs<'a, D, H, CA>( tx: &Tx, - tx_result: TxResult, + mut extended_tx_result: ExtendedTxResult, tx_index: TxIndex, tx_gas_meter: &'a RefCell, state: &'a mut WlState, @@ -340,11 +340,15 @@ where H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, { - let mut extended_tx_result = tx_result.to_extended_result(None); + // FIXME: improve this + let mut batch_iter = tx.commitments().iter(); + if !extended_tx_result.masp_tx_refs.0.is_empty() { + // If fees were paid via masp skip the first transaction of the batch + // which has already been executed + batch_iter.next(); + } - // TODO(namada#2597): handle masp fee payment in the first inner tx - // if necessary - for cmt in tx.commitments() { + for cmt in batch_iter { match apply_wasm_tx( tx.batch_ref_tx(cmt), &tx_index, @@ -424,9 +428,9 @@ pub(crate) fn apply_wrapper_tx( tx_gas_meter: &RefCell, shell_params: &mut ShellParams<'_, S, D, H, CA>, block_proposer: Option<&Address>, -) -> Result> +) -> Result> where - S: State + Sync, + S: State + Read + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, @@ -451,8 +455,9 @@ where // dropped the write log leading this function call to be essentially a // no-op) shell_params.state.write_log_mut().commit_tx(); - let batch_results = - payment_result?.map_or_else(BatchResults::default, |batched_result| { + let (batch_results, masp_tx_refs) = payment_result?.map_or_else( + || (BatchResults::default(), None), + |(batched_result, masp_section_ref)| { let mut batch = BatchResults::default(); batch.0.insert( // Ok to unwrap cause if we have a batched result it means @@ -460,11 +465,15 @@ where tx.first_commitments().unwrap().get_hash(), Ok(batched_result), ); - batch - }); + (batch, Some(MaspTxRefs(vec![masp_section_ref]))) + }, + ); - // FIXME: if fees were paid with first inner tx signal it to the caller -> - // use the ExtendedResult from the other branch + // FIXME: shoudld we maybe return a DispatchError and apply the same logic + // we apply for raw transactions? In this case probably I would not need to + // commit inside these functions but only in finalize block + // FIXME: At that point maybe I could share functions between finalize block + // and prepare/process proposal // Account for gas tx_gas_meter @@ -475,7 +484,8 @@ where Ok(TxResult { gas_used: tx_gas_meter.borrow().get_tx_consumed_gas(), batch_results, - }) + } + .to_extended_result(masp_tx_refs)) } /// Perform the actual transfer of fees from the fee payer to the block @@ -487,9 +497,9 @@ pub fn transfer_fee( tx: &Tx, wrapper: &WrapperTx, tx_index: &TxIndex, -) -> Result> +) -> Result> where - S: State + StorageRead + Sync, + S: State + StorageRead + Read + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, @@ -646,7 +656,7 @@ where } } -// FIXME: add tests +// FIXME: search for all the TODOS for 2596 and 2597 and remove them fn try_masp_fee_payment( ShellParams { tx_gas_meter, @@ -656,9 +666,9 @@ fn try_masp_fee_payment( }: &mut ShellParams<'_, S, D, H, CA>, tx: &Tx, tx_index: &TxIndex, -) -> Result> +) -> Result> where - S: State + StorageRead + Sync, + S: State + StorageRead + Read + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, @@ -682,7 +692,6 @@ where .map_err(|e| Error::GasError(e.to_string()))?; let ref_unshield_gas_meter = RefCell::new(gas_meter); - // FIXME: call dispatch_tx after merge? let valid_batched_tx_result = { // NOTE: A clean tx write log must be provided to this call // for a correct vp validation. Block write log, instead, @@ -719,10 +728,17 @@ where ); } + // FIXME: maybe I don't need the is_masp_trasnfer function + // anymore if I get the masp sectio nhash since this is validate + // by the masp vp? Not sure double checkt this + // FIXME: handle the unwraps + let masp_ref = namada_tx::action::get_masp_section_ref(*state) + .unwrap() + .unwrap(); // Ensure that the transaction is actually a masp one, otherwise // reject (is_masp_transfer(&result.changed_keys) && result.is_accepted()) - .then_some(result) + .then_some((result, masp_ref)) } Err(e) => { state.write_log_mut().drop_tx_keep_precommit(); @@ -830,9 +846,9 @@ pub fn check_fees( shell_params: &mut ShellParams<'_, S, D, H, CA>, tx: &Tx, wrapper: &WrapperTx, -) -> Result> +) -> Result> where - S: State + StorageRead + Sync, + S: State + StorageRead + Read + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, @@ -841,9 +857,9 @@ where shell_params: &mut ShellParams<'_, S, D, H, CA>, tx: &Tx, wrapper: &WrapperTx, - ) -> Result> + ) -> Result> where - S: State + StorageRead + Sync, + S: State + StorageRead + Read + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, diff --git a/crates/node/src/shell/finalize_block.rs b/crates/node/src/shell/finalize_block.rs index ab9b220c2e..2eafdafbde 100644 --- a/crates/node/src/shell/finalize_block.rs +++ b/crates/node/src/shell/finalize_block.rs @@ -364,7 +364,7 @@ where tx_index: tx_data.tx_index, gas_meter: tx_data.tx_gas_meter, event: tx_logs.tx_event, - tx_result: extended_tx_result.tx_result, + extended_tx_result, }); } _ => self.handle_inner_tx_results( @@ -374,6 +374,7 @@ where &mut tx_logs, ), }, + // FIXME: look here if we want to use DispatchError for wrappers too Err(DispatchError { error: protocol::Error::WrapperRunnerError(msg), tx_result: _, @@ -391,7 +392,11 @@ where .extend(GasUsed(tx_data.tx_gas_meter.get_tx_consumed_gas())) .extend(Info(msg.to_string())) .extend(Code(ResultCode::InvalidTx)); - // Make sure to clean the write logs for the next transaction + // FIXME: is this correct? what if failed wrapper but I want to + // commit the fee payment? It's done in the caleld functions but + // it won't be the case anymore if I want to use the + // DispatchError Make sure to clean the write + // logs for the next transaction self.state.write_log_mut().drop_tx(); } Err(dispatch_error) => { @@ -828,7 +833,7 @@ where tx_index, gas_meter: tx_gas_meter, event: tx_event, - tx_result: wrapper_tx_result, + extended_tx_result: wrapper_tx_result, } in successful_wrappers { let tx_hash = tx.header_hash(); @@ -895,7 +900,7 @@ struct WrapperCache { tx_index: usize, gas_meter: TxGasMeter, event: Event, - tx_result: namada::tx::data::TxResult, + extended_tx_result: namada::tx::data::ExtendedTxResult, } struct TxData<'tx> { @@ -1359,11 +1364,7 @@ mod test_finalize_block { .enumerate() .find_map( |(idx, tx_hash)| { - if tx_hash == &hash { - Some(idx) - } else { - None - } + if tx_hash == &hash { Some(idx) } else { None } }, ) .unwrap(); @@ -2978,21 +2979,25 @@ mod test_finalize_block { assert_eq!(root_pre.0, root_post.0); // Check transaction's hash in storage - assert!(shell - .shell - .state - .write_log() - .has_replay_protection_entry(&wrapper_tx.raw_header_hash())); + assert!( + shell + .shell + .state + .write_log() + .has_replay_protection_entry(&wrapper_tx.raw_header_hash()) + ); // Check that the hash is not present in the merkle tree shell.state.commit_block().unwrap(); - assert!(!shell - .shell - .state - .in_mem() - .block - .tree - .has_key(&wrapper_hash_key) - .unwrap()); + assert!( + !shell + .shell + .state + .in_mem() + .block + .tree + .has_key(&wrapper_hash_key) + .unwrap() + ); // test that a commitment to replay protection gets added. let reprot_key = replay_protection::commitment_key(); @@ -3039,22 +3044,26 @@ mod test_finalize_block { assert_eq!(root_pre.0, root_post.0); // Check that the hashes are present in the merkle tree shell.state.commit_block().unwrap(); - assert!(shell - .shell - .state - .in_mem() - .block - .tree - .has_key(&convert_key) - .unwrap()); - assert!(shell - .shell - .state - .in_mem() - .block - .tree - .has_key(&commitment_key) - .unwrap()); + assert!( + shell + .shell + .state + .in_mem() + .block + .tree + .has_key(&convert_key) + .unwrap() + ); + assert!( + shell + .shell + .state + .in_mem() + .block + .tree + .has_key(&commitment_key) + .unwrap() + ); } /// Test that a tx that has already been applied in the same block @@ -3132,26 +3141,34 @@ mod test_finalize_block { assert_eq!(code, ResultCode::WasmRuntimeError); for wrapper in [&wrapper, &new_wrapper] { - assert!(shell - .state - .write_log() - .has_replay_protection_entry(&wrapper.raw_header_hash())); - assert!(!shell - .state - .write_log() - .has_replay_protection_entry(&wrapper.header_hash())); + assert!( + shell + .state + .write_log() + .has_replay_protection_entry(&wrapper.raw_header_hash()) + ); + assert!( + !shell + .state + .write_log() + .has_replay_protection_entry(&wrapper.header_hash()) + ); } // Commit to check the hashes from storage shell.commit(); for wrapper in [&wrapper, &new_wrapper] { - assert!(shell - .state - .has_replay_protection_entry(&wrapper.raw_header_hash()) - .unwrap()); - assert!(!shell - .state - .has_replay_protection_entry(&wrapper.header_hash()) - .unwrap()); + assert!( + shell + .state + .has_replay_protection_entry(&wrapper.raw_header_hash()) + .unwrap() + ); + assert!( + !shell + .state + .has_replay_protection_entry(&wrapper.header_hash()) + .unwrap() + ); } } @@ -3434,23 +3451,29 @@ mod test_finalize_block { &unsigned_wrapper, &wrong_commitment_wrapper, ] { - assert!(!shell - .state - .write_log() - .has_replay_protection_entry(&valid_wrapper.raw_header_hash())); - assert!(shell + assert!( + !shell.state.write_log().has_replay_protection_entry( + &valid_wrapper.raw_header_hash() + ) + ); + assert!( + shell + .state + .write_log() + .has_replay_protection_entry(&valid_wrapper.header_hash()) + ); + } + assert!( + shell.state.write_log().has_replay_protection_entry( + &failing_wrapper.raw_header_hash() + ) + ); + assert!( + !shell .state .write_log() - .has_replay_protection_entry(&valid_wrapper.header_hash())); - } - assert!(shell - .state - .write_log() - .has_replay_protection_entry(&failing_wrapper.raw_header_hash())); - assert!(!shell - .state - .write_log() - .has_replay_protection_entry(&failing_wrapper.header_hash())); + .has_replay_protection_entry(&failing_wrapper.header_hash()) + ); // Commit to check the hashes from storage shell.commit(); @@ -3459,23 +3482,33 @@ mod test_finalize_block { unsigned_wrapper, wrong_commitment_wrapper, ] { - assert!(!shell + assert!( + !shell + .state + .has_replay_protection_entry( + &valid_wrapper.raw_header_hash() + ) + .unwrap() + ); + assert!( + shell + .state + .has_replay_protection_entry(&valid_wrapper.header_hash()) + .unwrap() + ); + } + assert!( + shell .state - .has_replay_protection_entry(&valid_wrapper.raw_header_hash()) - .unwrap()); - assert!(shell + .has_replay_protection_entry(&failing_wrapper.raw_header_hash()) + .unwrap() + ); + assert!( + !shell .state - .has_replay_protection_entry(&valid_wrapper.header_hash()) - .unwrap()); - } - assert!(shell - .state - .has_replay_protection_entry(&failing_wrapper.raw_header_hash()) - .unwrap()); - assert!(!shell - .state - .has_replay_protection_entry(&failing_wrapper.header_hash()) - .unwrap()); + .has_replay_protection_entry(&failing_wrapper.header_hash()) + .unwrap() + ); } #[test] @@ -3535,14 +3568,18 @@ mod test_finalize_block { let code = event[0].read_attribute::().expect("Test failed"); assert_eq!(code, ResultCode::InvalidTx); - assert!(shell - .state - .write_log() - .has_replay_protection_entry(&wrapper_hash)); - assert!(!shell - .state - .write_log() - .has_replay_protection_entry(&wrapper.raw_header_hash())); + assert!( + shell + .state + .write_log() + .has_replay_protection_entry(&wrapper_hash) + ); + assert!( + !shell + .state + .write_log() + .has_replay_protection_entry(&wrapper.raw_header_hash()) + ); } // Test that the fees are paid even if the inner transaction fails and its @@ -3940,9 +3977,11 @@ mod test_finalize_block { .unwrap(), Some(ValidatorState::Consensus) ); - assert!(enqueued_slashes_handle() - .at(&Epoch::default()) - .is_empty(&shell.state)?); + assert!( + enqueued_slashes_handle() + .at(&Epoch::default()) + .is_empty(&shell.state)? + ); assert_eq!( get_num_consensus_validators(&shell.state, Epoch::default()) .unwrap(), @@ -3961,17 +4000,21 @@ mod test_finalize_block { .unwrap(), Some(ValidatorState::Jailed) ); - assert!(enqueued_slashes_handle() - .at(&epoch) - .is_empty(&shell.state)?); + assert!( + enqueued_slashes_handle() + .at(&epoch) + .is_empty(&shell.state)? + ); assert_eq!( get_num_consensus_validators(&shell.state, epoch).unwrap(), 5_u64 ); } - assert!(!enqueued_slashes_handle() - .at(&processing_epoch) - .is_empty(&shell.state)?); + assert!( + !enqueued_slashes_handle() + .at(&processing_epoch) + .is_empty(&shell.state)? + ); // Advance to the processing epoch loop { @@ -3994,9 +4037,11 @@ mod test_finalize_block { // println!("Reached processing epoch"); break; } else { - assert!(enqueued_slashes_handle() - .at(&shell.state.in_mem().block.epoch) - .is_empty(&shell.state)?); + assert!( + enqueued_slashes_handle() + .at(&shell.state.in_mem().block.epoch) + .is_empty(&shell.state)? + ); let stake1 = read_validator_stake( &shell.state, ¶ms, @@ -4480,11 +4525,13 @@ mod test_finalize_block { ) .unwrap(); assert_eq!(last_slash, Some(misbehavior_epoch)); - assert!(namada_proof_of_stake::storage::validator_slashes_handle( - &val1.address - ) - .is_empty(&shell.state) - .unwrap()); + assert!( + namada_proof_of_stake::storage::validator_slashes_handle( + &val1.address + ) + .is_empty(&shell.state) + .unwrap() + ); tracing::debug!("Advancing to epoch 7"); @@ -4549,18 +4596,22 @@ mod test_finalize_block { ) .unwrap(); assert_eq!(last_slash, Some(Epoch(4))); - assert!(namada_proof_of_stake::is_validator_frozen( - &shell.state, - &val1.address, - current_epoch, - ¶ms - ) - .unwrap()); - assert!(namada_proof_of_stake::storage::validator_slashes_handle( - &val1.address - ) - .is_empty(&shell.state) - .unwrap()); + assert!( + namada_proof_of_stake::is_validator_frozen( + &shell.state, + &val1.address, + current_epoch, + ¶ms + ) + .unwrap() + ); + assert!( + namada_proof_of_stake::storage::validator_slashes_handle( + &val1.address + ) + .is_empty(&shell.state) + .unwrap() + ); let pre_stake_10 = namada_proof_of_stake::storage::read_validator_stake( @@ -5438,9 +5489,11 @@ mod test_finalize_block { shell.vp_wasm_cache.clone(), ); let parameters = ParametersVp { ctx }; - assert!(parameters - .validate_tx(&batched_tx, &keys_changed, &verifiers) - .is_ok()); + assert!( + parameters + .validate_tx(&batched_tx, &keys_changed, &verifiers) + .is_ok() + ); // we advance forward to the next epoch let mut req = FinalizeBlock::default(); @@ -5513,11 +5566,13 @@ mod test_finalize_block { let inner_results = inner_tx_result.batch_results.0; for cmt in batch.commitments() { - assert!(inner_results - .get(&cmt.get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted())); + assert!( + inner_results + .get(&cmt.get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted()) + ); } // Check storage modifications @@ -5555,18 +5610,24 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!(inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted())); - assert!(inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err()); + assert!( + inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted()) + ); + assert!( + inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err() + ); // Assert that the last tx didn't run - assert!(!inner_results.contains_key(&batch.commitments()[2].get_hash())); + assert!( + !inner_results.contains_key(&batch.commitments()[2].get_hash()) + ); // Check storage modifications are missing for key in ["random_key_1", "random_key_2", "random_key_3"] { @@ -5597,21 +5658,27 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!(inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted())); - assert!(inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err()); - assert!(inner_results - .get(&batch.commitments()[2].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted())); + assert!( + inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted()) + ); + assert!( + inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err() + ); + assert!( + inner_results + .get(&batch.commitments()[2].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted()) + ); // Check storage modifications assert_eq!( @@ -5622,10 +5689,12 @@ mod test_finalize_block { .unwrap(), STORAGE_VALUE ); - assert!(!shell - .state - .has_key(&"random_key_2".parse().unwrap()) - .unwrap()); + assert!( + !shell + .state + .has_key(&"random_key_2".parse().unwrap()) + .unwrap() + ); assert_eq!( shell .state @@ -5657,18 +5726,24 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!(inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted())); - assert!(inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err()); + assert!( + inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted()) + ); + assert!( + inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err() + ); // Assert that the last tx didn't run - assert!(!inner_results.contains_key(&batch.commitments()[2].get_hash())); + assert!( + !inner_results.contains_key(&batch.commitments()[2].get_hash()) + ); // Check storage modifications are missing for key in ["random_key_1", "random_key_2", "random_key_3"] { @@ -5698,18 +5773,24 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!(inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted())); - assert!(inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err()); + assert!( + inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted()) + ); + assert!( + inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err() + ); // Assert that the last tx didn't run - assert!(!inner_results.contains_key(&batch.commitments()[2].get_hash())); + assert!( + !inner_results.contains_key(&batch.commitments()[2].get_hash()) + ); // Check storage modifications assert_eq!( diff --git a/crates/state/src/wl_state.rs b/crates/state/src/wl_state.rs index b5103ce723..26e3a20cca 100644 --- a/crates/state/src/wl_state.rs +++ b/crates/state/src/wl_state.rs @@ -1392,3 +1392,27 @@ where } } } + +impl namada_tx::action::Read for TempWlState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + type Err = Error; + + fn read_temp( + &self, + key: &storage::Key, + ) -> Result> { + let (log_val, _) = self.write_log().read_temp(key).unwrap(); + match log_val { + Some(value) => { + let value = + namada_core::borsh::BorshDeserialize::try_from_slice(value) + .map_err(Error::BorshCodingError)?; + Ok(Some(value)) + } + None => Ok(None), + } + } +} From ba943ef513aa4e4e9eacf6c8731d0a92f24cc769 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 31 May 2024 17:48:03 +0200 Subject: [PATCH 18/40] Refactors batch execution in case of masp fee payment --- crates/namada/src/ledger/mod.rs | 14 ++---- crates/namada/src/ledger/protocol/mod.rs | 61 +++++++++++++++--------- 2 files changed, 43 insertions(+), 32 deletions(-) diff --git a/crates/namada/src/ledger/mod.rs b/crates/namada/src/ledger/mod.rs index 8bccb5e529..b453a70573 100644 --- a/crates/namada/src/ledger/mod.rs +++ b/crates/namada/src/ledger/mod.rs @@ -96,20 +96,14 @@ mod dry_run_tx { } }; - // FIXME: can't just call dispatch_tx? let ExtendedTxResult { mut tx_result, - masp_tx_refs, + ref masp_tx_refs, } = extended_tx_result; let tx_gas_meter = RefCell::new(tx_gas_meter); - // FIXME: improve this - let mut batch_iter = tx.commitments().iter(); - if !masp_tx_refs.0.is_empty() { - // If fees were paid via masp skip the first transaction of the - // batch which has already been executed - batch_iter.next(); - } - for cmt in batch_iter { + for cmt in + crate::ledger::protocol::get_batch_txs_to_execute(&tx, masp_tx_refs) + { let batched_tx = tx.batch_ref_tx(cmt); let batched_tx_result = protocol::apply_wasm_tx( batched_tx, diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index d21c88b4e1..c98d18d6b5 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -21,7 +21,7 @@ use namada_tx::data::{ BatchResults, BatchedTxResult, ExtendedTxResult, TxResult, VpStatusFlags, VpsResult, WrapperTx, }; -use namada_tx::{BatchedTxRef, Tx}; +use namada_tx::{BatchedTxRef, Tx, TxCommitments}; use namada_vote_ext::EthereumTxData; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use smooth_operator::checked; @@ -326,6 +326,20 @@ where } } +pub(crate) fn get_batch_txs_to_execute<'a>( + tx: &'a Tx, + masp_tx_refs: &MaspTxRefs, +) -> impl Iterator { + let mut batch_iter = tx.commitments().iter(); + if !masp_tx_refs.0.is_empty() { + // If fees were paid via masp skip the first transaction of the batch + // which has already been executed + batch_iter.next(); + } + + batch_iter +} + fn dispatch_inner_txs<'a, D, H, CA>( tx: &Tx, mut extended_tx_result: ExtendedTxResult, @@ -340,15 +354,7 @@ where H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, { - // FIXME: improve this - let mut batch_iter = tx.commitments().iter(); - if !extended_tx_result.masp_tx_refs.0.is_empty() { - // If fees were paid via masp skip the first transaction of the batch - // which has already been executed - batch_iter.next(); - } - - for cmt in batch_iter { + for cmt in get_batch_txs_to_execute(tx, &extended_tx_result.masp_tx_refs) { match apply_wasm_tx( tx.batch_ref_tx(cmt), &tx_index, @@ -430,7 +436,7 @@ pub(crate) fn apply_wrapper_tx( block_proposer: Option<&Address>, ) -> Result> where - S: State + Read + Sync, + S: State + Read + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, @@ -499,7 +505,10 @@ pub fn transfer_fee( tx_index: &TxIndex, ) -> Result> where - S: State + StorageRead + Read + Sync, + S: State + + StorageRead + + Read + + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, @@ -668,7 +677,10 @@ fn try_masp_fee_payment( tx_index: &TxIndex, ) -> Result> where - S: State + StorageRead + Read + Sync, + S: State + + StorageRead + + Read + + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, @@ -728,17 +740,16 @@ where ); } - // FIXME: maybe I don't need the is_masp_trasnfer function - // anymore if I get the masp sectio nhash since this is validate - // by the masp vp? Not sure double checkt this - // FIXME: handle the unwraps let masp_ref = namada_tx::action::get_masp_section_ref(*state) - .unwrap() - .unwrap(); + .map_err(Error::StateError)?; // Ensure that the transaction is actually a masp one, otherwise // reject (is_masp_transfer(&result.changed_keys) && result.is_accepted()) - .then_some((result, masp_ref)) + .then(|| { + masp_ref + .map(|masp_section_ref| (result, masp_section_ref)) + }) + .flatten() } Err(e) => { state.write_log_mut().drop_tx_keep_precommit(); @@ -848,7 +859,10 @@ pub fn check_fees( wrapper: &WrapperTx, ) -> Result> where - S: State + StorageRead + Read + Sync, + S: State + + StorageRead + + Read + + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, @@ -859,7 +873,10 @@ where wrapper: &WrapperTx, ) -> Result> where - S: State + StorageRead + Read + Sync, + S: State + + StorageRead + + Read + + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, From 07522e225aa96152c4ac2102c83815a109dd3d3e Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Sun, 2 Jun 2024 12:00:29 +0200 Subject: [PATCH 19/40] Adds integration tests for masp fee payment --- crates/namada/src/ledger/mod.rs | 25 +- crates/namada/src/ledger/protocol/mod.rs | 8 +- crates/node/src/shell/finalize_block.rs | 437 +++++++++-------------- crates/state/src/write_log.rs | 5 + crates/tests/src/integration/masp.rs | 319 +++++++++++++++++ 5 files changed, 514 insertions(+), 280 deletions(-) diff --git a/crates/namada/src/ledger/mod.rs b/crates/namada/src/ledger/mod.rs index b453a70573..38c27a5a33 100644 --- a/crates/namada/src/ledger/mod.rs +++ b/crates/namada/src/ledger/mod.rs @@ -54,7 +54,6 @@ mod dry_run_tx { let tx = Tx::try_from(&request.data[..]).into_storage_result()?; tx.validate_tx().into_storage_result()?; - // FIXME: can't just call dispatch_tx? // Wrapper dry run to allow estimating the gas cost of a transaction let (extended_tx_result, tx_gas_meter) = match tx.header().tx_type { TxType::Wrapper(wrapper) => { @@ -275,8 +274,8 @@ mod test { } #[tokio::test] - async fn test_shell_queries_router_with_client() - -> namada_state::StorageResult<()> { + async fn test_shell_queries_router_with_client( + ) -> namada_state::StorageResult<()> { // Initialize the `TestClient` let mut client = TestClient::new(RPC); // store the wasm code @@ -310,17 +309,15 @@ mod test { .dry_run_tx(&client, Some(tx_bytes), None, false) .await .unwrap(); - assert!( - result - .data - .batch_results - .0 - .get(&cmt.get_hash()) - .unwrap() - .as_ref() - .unwrap() - .is_accepted() - ); + assert!(result + .data + .batch_results + .0 + .get(&cmt.get_hash()) + .unwrap() + .as_ref() + .unwrap() + .is_accepted()); // Request storage value for a balance key ... let token_addr = address::testing::established_address_1(); diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index c98d18d6b5..75cf2034ff 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -458,7 +458,7 @@ where // Commit tx write log even in case of subsequent errors (if the fee payment // failed instead, than the previous two functions must have already - // dropped the write log leading this function call to be essentially a + // dropped the write log, leading this function call to be essentially a // no-op) shell_params.state.write_log_mut().commit_tx(); let (batch_results, masp_tx_refs) = payment_result?.map_or_else( @@ -475,12 +475,6 @@ where }, ); - // FIXME: shoudld we maybe return a DispatchError and apply the same logic - // we apply for raw transactions? In this case probably I would not need to - // commit inside these functions but only in finalize block - // FIXME: At that point maybe I could share functions between finalize block - // and prepare/process proposal - // Account for gas tx_gas_meter .borrow_mut() diff --git a/crates/node/src/shell/finalize_block.rs b/crates/node/src/shell/finalize_block.rs index 2eafdafbde..7553f466c2 100644 --- a/crates/node/src/shell/finalize_block.rs +++ b/crates/node/src/shell/finalize_block.rs @@ -374,7 +374,6 @@ where &mut tx_logs, ), }, - // FIXME: look here if we want to use DispatchError for wrappers too Err(DispatchError { error: protocol::Error::WrapperRunnerError(msg), tx_result: _, @@ -392,11 +391,7 @@ where .extend(GasUsed(tx_data.tx_gas_meter.get_tx_consumed_gas())) .extend(Info(msg.to_string())) .extend(Code(ResultCode::InvalidTx)); - // FIXME: is this correct? what if failed wrapper but I want to - // commit the fee payment? It's done in the caleld functions but - // it won't be the case anymore if I want to use the - // DispatchError Make sure to clean the write - // logs for the next transaction + // Make sure to clean the write logs for the next transaction self.state.write_log_mut().drop_tx(); } Err(dispatch_error) => { @@ -1364,7 +1359,11 @@ mod test_finalize_block { .enumerate() .find_map( |(idx, tx_hash)| { - if tx_hash == &hash { Some(idx) } else { None } + if tx_hash == &hash { + Some(idx) + } else { + None + } }, ) .unwrap(); @@ -2979,25 +2978,21 @@ mod test_finalize_block { assert_eq!(root_pre.0, root_post.0); // Check transaction's hash in storage - assert!( - shell - .shell - .state - .write_log() - .has_replay_protection_entry(&wrapper_tx.raw_header_hash()) - ); + assert!(shell + .shell + .state + .write_log() + .has_replay_protection_entry(&wrapper_tx.raw_header_hash())); // Check that the hash is not present in the merkle tree shell.state.commit_block().unwrap(); - assert!( - !shell - .shell - .state - .in_mem() - .block - .tree - .has_key(&wrapper_hash_key) - .unwrap() - ); + assert!(!shell + .shell + .state + .in_mem() + .block + .tree + .has_key(&wrapper_hash_key) + .unwrap()); // test that a commitment to replay protection gets added. let reprot_key = replay_protection::commitment_key(); @@ -3044,26 +3039,22 @@ mod test_finalize_block { assert_eq!(root_pre.0, root_post.0); // Check that the hashes are present in the merkle tree shell.state.commit_block().unwrap(); - assert!( - shell - .shell - .state - .in_mem() - .block - .tree - .has_key(&convert_key) - .unwrap() - ); - assert!( - shell - .shell - .state - .in_mem() - .block - .tree - .has_key(&commitment_key) - .unwrap() - ); + assert!(shell + .shell + .state + .in_mem() + .block + .tree + .has_key(&convert_key) + .unwrap()); + assert!(shell + .shell + .state + .in_mem() + .block + .tree + .has_key(&commitment_key) + .unwrap()); } /// Test that a tx that has already been applied in the same block @@ -3141,34 +3132,26 @@ mod test_finalize_block { assert_eq!(code, ResultCode::WasmRuntimeError); for wrapper in [&wrapper, &new_wrapper] { - assert!( - shell - .state - .write_log() - .has_replay_protection_entry(&wrapper.raw_header_hash()) - ); - assert!( - !shell - .state - .write_log() - .has_replay_protection_entry(&wrapper.header_hash()) - ); + assert!(shell + .state + .write_log() + .has_replay_protection_entry(&wrapper.raw_header_hash())); + assert!(!shell + .state + .write_log() + .has_replay_protection_entry(&wrapper.header_hash())); } // Commit to check the hashes from storage shell.commit(); for wrapper in [&wrapper, &new_wrapper] { - assert!( - shell - .state - .has_replay_protection_entry(&wrapper.raw_header_hash()) - .unwrap() - ); - assert!( - !shell - .state - .has_replay_protection_entry(&wrapper.header_hash()) - .unwrap() - ); + assert!(shell + .state + .has_replay_protection_entry(&wrapper.raw_header_hash()) + .unwrap()); + assert!(!shell + .state + .has_replay_protection_entry(&wrapper.header_hash()) + .unwrap()); } } @@ -3451,29 +3434,23 @@ mod test_finalize_block { &unsigned_wrapper, &wrong_commitment_wrapper, ] { - assert!( - !shell.state.write_log().has_replay_protection_entry( - &valid_wrapper.raw_header_hash() - ) - ); - assert!( - shell - .state - .write_log() - .has_replay_protection_entry(&valid_wrapper.header_hash()) - ); - } - assert!( - shell.state.write_log().has_replay_protection_entry( - &failing_wrapper.raw_header_hash() - ) - ); - assert!( - !shell + assert!(!shell .state .write_log() - .has_replay_protection_entry(&failing_wrapper.header_hash()) - ); + .has_replay_protection_entry(&valid_wrapper.raw_header_hash())); + assert!(shell + .state + .write_log() + .has_replay_protection_entry(&valid_wrapper.header_hash())); + } + assert!(shell + .state + .write_log() + .has_replay_protection_entry(&failing_wrapper.raw_header_hash())); + assert!(!shell + .state + .write_log() + .has_replay_protection_entry(&failing_wrapper.header_hash())); // Commit to check the hashes from storage shell.commit(); @@ -3482,33 +3459,23 @@ mod test_finalize_block { unsigned_wrapper, wrong_commitment_wrapper, ] { - assert!( - !shell - .state - .has_replay_protection_entry( - &valid_wrapper.raw_header_hash() - ) - .unwrap() - ); - assert!( - shell - .state - .has_replay_protection_entry(&valid_wrapper.header_hash()) - .unwrap() - ); - } - assert!( - shell + assert!(!shell .state - .has_replay_protection_entry(&failing_wrapper.raw_header_hash()) - .unwrap() - ); - assert!( - !shell + .has_replay_protection_entry(&valid_wrapper.raw_header_hash()) + .unwrap()); + assert!(shell .state - .has_replay_protection_entry(&failing_wrapper.header_hash()) - .unwrap() - ); + .has_replay_protection_entry(&valid_wrapper.header_hash()) + .unwrap()); + } + assert!(shell + .state + .has_replay_protection_entry(&failing_wrapper.raw_header_hash()) + .unwrap()); + assert!(!shell + .state + .has_replay_protection_entry(&failing_wrapper.header_hash()) + .unwrap()); } #[test] @@ -3568,18 +3535,14 @@ mod test_finalize_block { let code = event[0].read_attribute::().expect("Test failed"); assert_eq!(code, ResultCode::InvalidTx); - assert!( - shell - .state - .write_log() - .has_replay_protection_entry(&wrapper_hash) - ); - assert!( - !shell - .state - .write_log() - .has_replay_protection_entry(&wrapper.raw_header_hash()) - ); + assert!(shell + .state + .write_log() + .has_replay_protection_entry(&wrapper_hash)); + assert!(!shell + .state + .write_log() + .has_replay_protection_entry(&wrapper.raw_header_hash())); } // Test that the fees are paid even if the inner transaction fails and its @@ -3977,11 +3940,9 @@ mod test_finalize_block { .unwrap(), Some(ValidatorState::Consensus) ); - assert!( - enqueued_slashes_handle() - .at(&Epoch::default()) - .is_empty(&shell.state)? - ); + assert!(enqueued_slashes_handle() + .at(&Epoch::default()) + .is_empty(&shell.state)?); assert_eq!( get_num_consensus_validators(&shell.state, Epoch::default()) .unwrap(), @@ -4000,21 +3961,17 @@ mod test_finalize_block { .unwrap(), Some(ValidatorState::Jailed) ); - assert!( - enqueued_slashes_handle() - .at(&epoch) - .is_empty(&shell.state)? - ); + assert!(enqueued_slashes_handle() + .at(&epoch) + .is_empty(&shell.state)?); assert_eq!( get_num_consensus_validators(&shell.state, epoch).unwrap(), 5_u64 ); } - assert!( - !enqueued_slashes_handle() - .at(&processing_epoch) - .is_empty(&shell.state)? - ); + assert!(!enqueued_slashes_handle() + .at(&processing_epoch) + .is_empty(&shell.state)?); // Advance to the processing epoch loop { @@ -4037,11 +3994,9 @@ mod test_finalize_block { // println!("Reached processing epoch"); break; } else { - assert!( - enqueued_slashes_handle() - .at(&shell.state.in_mem().block.epoch) - .is_empty(&shell.state)? - ); + assert!(enqueued_slashes_handle() + .at(&shell.state.in_mem().block.epoch) + .is_empty(&shell.state)?); let stake1 = read_validator_stake( &shell.state, ¶ms, @@ -4525,13 +4480,11 @@ mod test_finalize_block { ) .unwrap(); assert_eq!(last_slash, Some(misbehavior_epoch)); - assert!( - namada_proof_of_stake::storage::validator_slashes_handle( - &val1.address - ) - .is_empty(&shell.state) - .unwrap() - ); + assert!(namada_proof_of_stake::storage::validator_slashes_handle( + &val1.address + ) + .is_empty(&shell.state) + .unwrap()); tracing::debug!("Advancing to epoch 7"); @@ -4596,22 +4549,18 @@ mod test_finalize_block { ) .unwrap(); assert_eq!(last_slash, Some(Epoch(4))); - assert!( - namada_proof_of_stake::is_validator_frozen( - &shell.state, - &val1.address, - current_epoch, - ¶ms - ) - .unwrap() - ); - assert!( - namada_proof_of_stake::storage::validator_slashes_handle( - &val1.address - ) - .is_empty(&shell.state) - .unwrap() - ); + assert!(namada_proof_of_stake::is_validator_frozen( + &shell.state, + &val1.address, + current_epoch, + ¶ms + ) + .unwrap()); + assert!(namada_proof_of_stake::storage::validator_slashes_handle( + &val1.address + ) + .is_empty(&shell.state) + .unwrap()); let pre_stake_10 = namada_proof_of_stake::storage::read_validator_stake( @@ -5489,11 +5438,9 @@ mod test_finalize_block { shell.vp_wasm_cache.clone(), ); let parameters = ParametersVp { ctx }; - assert!( - parameters - .validate_tx(&batched_tx, &keys_changed, &verifiers) - .is_ok() - ); + assert!(parameters + .validate_tx(&batched_tx, &keys_changed, &verifiers) + .is_ok()); // we advance forward to the next epoch let mut req = FinalizeBlock::default(); @@ -5566,13 +5513,11 @@ mod test_finalize_block { let inner_results = inner_tx_result.batch_results.0; for cmt in batch.commitments() { - assert!( - inner_results - .get(&cmt.get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted()) - ); + assert!(inner_results + .get(&cmt.get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted())); } // Check storage modifications @@ -5610,24 +5555,18 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!( - inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted()) - ); - assert!( - inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err() - ); + assert!(inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted())); + assert!(inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err()); // Assert that the last tx didn't run - assert!( - !inner_results.contains_key(&batch.commitments()[2].get_hash()) - ); + assert!(!inner_results.contains_key(&batch.commitments()[2].get_hash())); // Check storage modifications are missing for key in ["random_key_1", "random_key_2", "random_key_3"] { @@ -5658,27 +5597,21 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!( - inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted()) - ); - assert!( - inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err() - ); - assert!( - inner_results - .get(&batch.commitments()[2].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted()) - ); + assert!(inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted())); + assert!(inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err()); + assert!(inner_results + .get(&batch.commitments()[2].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted())); // Check storage modifications assert_eq!( @@ -5689,12 +5622,10 @@ mod test_finalize_block { .unwrap(), STORAGE_VALUE ); - assert!( - !shell - .state - .has_key(&"random_key_2".parse().unwrap()) - .unwrap() - ); + assert!(!shell + .state + .has_key(&"random_key_2".parse().unwrap()) + .unwrap()); assert_eq!( shell .state @@ -5726,24 +5657,18 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!( - inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted()) - ); - assert!( - inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err() - ); + assert!(inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted())); + assert!(inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err()); // Assert that the last tx didn't run - assert!( - !inner_results.contains_key(&batch.commitments()[2].get_hash()) - ); + assert!(!inner_results.contains_key(&batch.commitments()[2].get_hash())); // Check storage modifications are missing for key in ["random_key_1", "random_key_2", "random_key_3"] { @@ -5773,24 +5698,18 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!( - inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted()) - ); - assert!( - inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err() - ); + assert!(inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted())); + assert!(inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err()); // Assert that the last tx didn't run - assert!( - !inner_results.contains_key(&batch.commitments()[2].get_hash()) - ); + assert!(!inner_results.contains_key(&batch.commitments()[2].get_hash())); // Check storage modifications assert_eq!( diff --git a/crates/state/src/write_log.rs b/crates/state/src/write_log.rs index 82929e02f4..8d1f7e307c 100644 --- a/crates/state/src/write_log.rs +++ b/crates/state/src/write_log.rs @@ -83,6 +83,11 @@ pub(crate) struct TxWriteLog { // write/update/delete should ever happen on this field, this log should // only be populated through a dump of the `tx_write_log` and should be // cleaned either when committing or dumping the `tx_write_log` + // FIXME: remove this and use the batch for process_proposal and + // prepare_proposal? And what about finalize block? I need to execute the + // fee apyment even there, is it safe to use the batch bucket? + // FIXME: maybe expose a function to remove elements from the Batch write + // log in case we needed to delete anything in the future precommit_write_log: HashMap, /// The events emitted by the current transaction events: WriteLogEvents, diff --git a/crates/tests/src/integration/masp.rs b/crates/tests/src/integration/masp.rs index 5f7f6dfd1b..634db5ce63 100644 --- a/crates/tests/src/integration/masp.rs +++ b/crates/tests/src/integration/masp.rs @@ -2171,3 +2171,322 @@ fn dynamic_assets() -> Result<()> { Ok(()) } + +// Test fee payment in masp: +// +// 1. Masp fee payment runs out of gas +// 3. Valid fee payment (also check that the first tx in the batch is executed +// only once) +#[test] +fn masp_fee_payment() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + // Download the shielded pool parameters before starting node + let _ = FsShieldedUtils::new(PathBuf::new()); + let (mut node, _services) = setup::initialize_genesis(|mut genesis| { + genesis.parameters.parameters.masp_fee_payment_gas_limit = 20_000; + genesis + })?; + _ = node.next_epoch(); + + // Add the relevant viewing keys to the wallet otherwise the shielded + // context won't precache the masp data + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_a", + "--value", + AA_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_b", + "--value", + AB_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + + // Shield some tokens + run( + &node, + Bin::Client, + vec![ + "transfer", + "--source", + ALBERT_KEY, + "--target", + AA_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + "50000", + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 50000")); + + _ = node.next_masp_epoch(); + + // 1. Out of gas for masp fee payment + run( + &node, + Bin::Client, + vec![ + "transfer", + "--source", + A_SPENDING_KEY, + "--target", + AB_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + "1", + "--gas-limit", + "5000", + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 50000")); + + _ = node.next_masp_epoch(); + + // 2. Valid masp fee payment + run( + &node, + Bin::Client, + vec![ + "transfer", + "--source", + A_SPENDING_KEY, + "--target", + AB_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + "10000", + "--gas-limit", + "20000", + "--gas-price", + "1", + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + // Check the exact balance of the tx source to ensure that the masp fee + // payement transaction was executed only once + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 20000")); + + Ok(()) +} + +// Test that when paying gas via masp we select the gas limit as the minimum +// between the transaction's gas limit and the protocol parameter. +#[test] +fn masp_fee_payment_gas_limit() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + // Download the shielded pool parameters before starting node + let _ = FsShieldedUtils::new(PathBuf::new()); + let (mut node, _services) = setup::initialize_genesis(|mut genesis| { + // Set an insufficient gas limit for masp fee payment to force all + // transactions to fail + genesis.parameters.parameters.masp_fee_payment_gas_limit = 5_000; + genesis + })?; + _ = node.next_masp_epoch(); + + // Add the relevant viewing keys to the wallet otherwise the shielded + // context won't precache the masp data + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_a", + "--value", + AA_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_b", + "--value", + AB_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + + // Shield some tokens + run( + &node, + Bin::Client, + vec![ + "transfer", + "--source", + A_SPENDING_KEY, + "--target", + AB_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + "1000000", + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + + _ = node.next_epoch(); + + // Masp fee payment with huge gas, check that the tx still fails because of + // the protocol param + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "transfer", + "--source", + ALBERT_KEY, + "--target", + BERTHA, + "--token", + NAM, + "--amount", + "1", + "--gas-limit", + "100000", + "--gas-price", + "1", + "--ledger-address", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_err()); + node.assert_success(); + + _ = node.next_epoch(); + + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + + // Check that the balance hasn't changed + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 1000000")); + + Ok(()) +} From 5a1b5782f0dbf5bf24a1338c69b88c1ab08b0c58 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 4 Jun 2024 14:57:12 +0200 Subject: [PATCH 20/40] Removes write log precommit and leverages the batch log --- crates/namada/src/ledger/protocol/mod.rs | 21 ++- crates/node/src/shell/finalize_block.rs | 6 +- crates/node/src/shell/prepare_proposal.rs | 3 +- crates/node/src/shell/process_proposal.rs | 7 +- crates/state/src/write_log.rs | 165 ++++------------------ 5 files changed, 47 insertions(+), 155 deletions(-) diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index 75cf2034ff..f79340f786 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -456,11 +456,13 @@ where None => check_fees(shell_params, tx, wrapper), }; - // Commit tx write log even in case of subsequent errors (if the fee payment + // Commit tx and batch write log even in case of subsequent errors (if the fee payment // failed instead, than the previous two functions must have already // dropped the write log, leading this function call to be essentially a // no-op) - shell_params.state.write_log_mut().commit_tx(); + shell_params.state.write_log_mut().commit_tx_to_batch(); + shell_params.state.write_log_mut().commit_batch(); + let (batch_results, masp_tx_refs) = payment_result?.map_or_else( || (BatchResults::default(), None), |(batched_result, masp_section_ref)| { @@ -700,13 +702,10 @@ where let valid_batched_tx_result = { // NOTE: A clean tx write log must be provided to this call - // for a correct vp validation. Block write log, instead, + // for a correct vp validation. Block and batch write logs, instead, // should contain any prior changes (if any). This is to simulate - // the unshielding tx (to prevent the already written - // keys from being passed/triggering VPs) but we cannot - // commit the tx write log yet cause the tx could still - // be invalid. - state.write_log_mut().precommit_tx(); + // the fee-paying tx (to prevent the already written keys from being passed/triggering VPs) but we cannot commit the tx write log yet cause the tx could still be invalid. So we use the batch write log to dump the current modifications. + state.write_log_mut().commit_tx_to_batch(); match apply_wasm_tx( tx.batch_ref_first_tx() .ok_or_else(|| Error::MissingInnerTxs)?, @@ -721,12 +720,12 @@ where Ok(result) => { // NOTE: do not commit yet cause this could be exploited to get // free masp operations. We can commit only after the entire fee - // payment has been deemed valid. Also, do not precommit cause + // payment has been deemed valid. Also, do not commit to batch cause // we might need to discard the effects of this valid unshield // (e.g. if it unshield an amount which is not enough to pay the // fees) if !result.is_accepted() { - state.write_log_mut().drop_tx_keep_precommit(); + state.write_log_mut().drop_tx(); tracing::error!( "The fee unshielding tx is invalid, some VPs rejected \ it: {:#?}", @@ -746,7 +745,7 @@ where .flatten() } Err(e) => { - state.write_log_mut().drop_tx_keep_precommit(); + state.write_log_mut().drop_tx(); tracing::error!( "The fee unshielding tx is invalid, wasm run failed: {}", e diff --git a/crates/node/src/shell/finalize_block.rs b/crates/node/src/shell/finalize_block.rs index 7553f466c2..f5985cf09d 100644 --- a/crates/node/src/shell/finalize_block.rs +++ b/crates/node/src/shell/finalize_block.rs @@ -357,7 +357,9 @@ where match extended_dispatch_result { Ok(extended_tx_result) => match tx_data.tx.header.tx_type { TxType::Wrapper(_) => { - self.state.write_log_mut().commit_tx(); + self.state.write_log_mut().commit_tx_to_batch(); + self.state.write_log_mut().commit_batch(); + // Return withouth emitting any events return Some(WrapperCache { tx: tx_data.tx.to_owned(), @@ -392,7 +394,9 @@ where .extend(Info(msg.to_string())) .extend(Code(ResultCode::InvalidTx)); // Make sure to clean the write logs for the next transaction + //FIXME: can we commit everything to batch when executing the wrapper? self.state.write_log_mut().drop_tx(); + self.state.write_log_mut().drop_batch(); } Err(dispatch_error) => { // This branch represents an error that affects the entire diff --git a/crates/node/src/shell/prepare_proposal.rs b/crates/node/src/shell/prepare_proposal.rs index 826855569e..89eb62784e 100644 --- a/crates/node/src/shell/prepare_proposal.rs +++ b/crates/node/src/shell/prepare_proposal.rs @@ -126,7 +126,8 @@ where .filter_map(|(tx_index, tx_bytes)| { match validate_wrapper_bytes(tx_bytes, &TxIndex::must_from_usize(tx_index),block_time, block_proposer, proposer_local_config, &mut temp_state, &mut vp_wasm_cache, &mut tx_wasm_cache, ) { Ok(gas) => { - temp_state.write_log_mut().commit_tx(); + temp_state.write_log_mut().commit_tx_to_batch(); + temp_state.write_log_mut().commit_batch(); Some((tx_bytes.to_owned(), gas)) }, Err(()) => { diff --git a/crates/node/src/shell/process_proposal.rs b/crates/node/src/shell/process_proposal.rs index f213c5c96f..7abbe7e352 100644 --- a/crates/node/src/shell/process_proposal.rs +++ b/crates/node/src/shell/process_proposal.rs @@ -150,7 +150,8 @@ where ); let error_code = ResultCode::from_u32(result.code).unwrap(); if let ResultCode::Ok = error_code { - temp_state.write_log_mut().commit_tx(); + temp_state.write_log_mut().commit_tx_to_batch(); + temp_state.write_log_mut().commit_batch(); } else { tracing::info!( "Process proposal rejected an invalid tx. Error code: \ @@ -1024,7 +1025,7 @@ mod test_process_proposal { "Error trying to apply a transaction: Error while processing \ transaction's fees: Transparent balance of wrapper's signer \ was insufficient to pay fee. All the available transparent \ - funds have been moved to the block proposer" + funds have been moved to the block proposer. This shouldn't happen." ) ); } @@ -1090,7 +1091,7 @@ mod test_process_proposal { "Error trying to apply a transaction: Error while processing \ transaction's fees: Transparent balance of wrapper's signer \ was insufficient to pay fee. All the available transparent \ - funds have been moved to the block proposer" + funds have been moved to the block proposer. This shouldn't happen." ) ); } diff --git a/crates/state/src/write_log.rs b/crates/state/src/write_log.rs index 8d1f7e307c..d065d0aeca 100644 --- a/crates/state/src/write_log.rs +++ b/crates/state/src/write_log.rs @@ -75,20 +75,6 @@ pub(crate) struct TxWriteLog { // Temporary key-values for the current transaction that are dropped after // tx and its verifying VPs execution is done tx_temp_log: HashMap>, - // A precommit bucket for the `tx_write_log`. This is useful for - // validation when a clean `tx_write_log` is needed without committing any - // modification already in there. These modifications can be temporarily - // stored here and then discarded or committed to the `block_write_log`, - // together with the content of `tx_write_log`. No direct key - // write/update/delete should ever happen on this field, this log should - // only be populated through a dump of the `tx_write_log` and should be - // cleaned either when committing or dumping the `tx_write_log` - // FIXME: remove this and use the batch for process_proposal and - // prepare_proposal? And what about finalize block? I need to execute the - // fee apyment even there, is it safe to use the batch bucket? - // FIXME: maybe expose a function to remove elements from the Batch write - // log in case we needed to delete anything in the future - precommit_write_log: HashMap, /// The events emitted by the current transaction events: WriteLogEvents, } @@ -99,7 +85,6 @@ impl Default for TxWriteLog { address_gen: None, write_log: HashMap::with_capacity(100), tx_temp_log: HashMap::with_capacity(1), - precommit_write_log: HashMap::with_capacity(100), events: WriteLogEvents { tree: StringPatriciaMap::new(), }, @@ -202,10 +187,6 @@ impl WriteLog { .tx_write_log .write_log .get(key) - .or_else(|| { - // If not found, then try to read from tx precommit write log - self.tx_write_log.precommit_write_log.get(key) - }) .or_else(|| { // If not found, then try to read from batch write log, // following the insertion order @@ -309,12 +290,7 @@ impl WriteLog { } let len_signed = i64::try_from(len).map_err(|_| Error::ValueLenOverflow)?; - let size_diff = match self - .tx_write_log - .write_log - .get(key) - .or_else(|| self.tx_write_log.precommit_write_log.get(key)) - { + let size_diff = match self.tx_write_log.write_log.get(key) { Some(prev) => match prev { StorageModification::Write { ref value } => { let val_len = i64::try_from(value.len()) @@ -327,7 +303,7 @@ impl WriteLog { // wasm environment without the need for cooperation from // the wasm code (tx or vp), so there's no need to return // gas in case of an error because execution will terminate - // anyway and this cannot be exploited to run the vm forever + // anyway and this cannot be exploited to keep the vm running return Err(Error::UpdateVpOfNewAccount); } }, @@ -384,12 +360,7 @@ impl WriteLog { key: &storage::Key, value: Vec, ) -> Result<(u64, i64)> { - if let Some(prev) = self - .tx_write_log - .write_log - .get(key) - .or_else(|| self.tx_write_log.precommit_write_log.get(key)) - { + if let Some(prev) = self.tx_write_log.write_log.get(key) { match prev { StorageModification::Write { .. } => { // Cannot overwrite a write request with a temporary one @@ -434,12 +405,7 @@ impl WriteLog { if key.is_validity_predicate().is_some() { return Err(Error::DeleteVp); } - let size_diff = match self - .tx_write_log - .write_log - .get(key) - .or_else(|| self.tx_write_log.precommit_write_log.get(key)) - { + let size_diff = match self.tx_write_log.write_log.get(key) { Some(prev) => match prev { StorageModification::Write { ref value } => value.len(), StorageModification::Delete => 0, @@ -537,8 +503,7 @@ impl WriteLog { /// Get the non-temporary storage keys changed and accounts keys initialized /// in the current transaction. The account keys point to the validity - /// predicates of the newly created accounts. The keys in the precommit are - /// not included in the result of this function. + /// predicates of the newly created accounts. pub fn get_keys(&self) -> BTreeSet { self.tx_write_log .write_log @@ -547,18 +512,6 @@ impl WriteLog { .collect() } - /// Get the storage keys changed and accounts keys initialized in the - /// current transaction and precommit. The account keys point to the - /// validity predicates of the newly created accounts. - pub fn get_keys_with_precommit(&self) -> BTreeSet { - self.tx_write_log - .precommit_write_log - .keys() - .chain(self.tx_write_log.write_log.keys()) - .cloned() - .collect() - } - /// Get the storage keys changed in the current transaction (left) and /// the addresses of accounts initialized in the current transaction /// (right). The first vector excludes keys of validity predicates of @@ -640,50 +593,23 @@ impl WriteLog { self.tx_write_log.events.tree.values().flatten() } - /// Add the entire content of the tx write log to the precommit one. The tx - /// log gets reset in the process. - pub fn precommit_tx(&mut self) { - let tx_log = std::mem::replace( - &mut self.tx_write_log.write_log, - HashMap::with_capacity(100), - ); - - self.tx_write_log.precommit_write_log.extend(tx_log) - } - - /// Commit the current transaction's write log and precommit log to the - /// batch when it's accepted by all the triggered validity predicates. - /// Starts a new transaction write log. + /// Commit the current transaction's write log to the batch when it's accepted by all the triggered validity predicates. Starts a new transaction write log. pub fn commit_tx_to_batch(&mut self) { - // First precommit everything - self.precommit_tx(); - - // Then commit to batch let tx_write_log = std::mem::take(&mut self.tx_write_log); let batched_log = BatchedTxWriteLog { address_gen: tx_write_log.address_gen, - write_log: tx_write_log.precommit_write_log, + write_log: tx_write_log.write_log, }; self.batch_write_log.push(batched_log); } - /// Drop the current transaction's write log and IBC events and precommit - /// when it's declined by any of the triggered validity predicates. + /// Drop the current transaction's write log and IBC events when it's declined by any of the triggered validity predicates. /// Starts a new transaction write log a clears the temp write log. pub fn drop_tx(&mut self) { self.tx_write_log = Default::default(); } - /// Drop the current transaction's write log and temporary log but keep the - /// precommit one. This is useful only when a part of a transaction - /// failed but it can still be valid and we want to keep the changes - /// applied before the failed section. - pub fn drop_tx_keep_precommit(&mut self) { - self.tx_write_log.write_log.clear(); - self.tx_write_log.tx_temp_log.clear(); - } - /// Commit the entire batch to the block log. pub fn commit_batch(&mut self) { for log in std::mem::take(&mut self.batch_write_log) { @@ -693,19 +619,17 @@ impl WriteLog { } /// Drop the entire batch log. + //FIXME: should this also drop the tx? pub fn drop_batch(&mut self) { self.batch_write_log = Default::default(); } /// Commit the tx write log to the block write log. + //FIXME: shoul this pass through the batch? pub fn commit_tx(&mut self) { - // First precommit everything - self.precommit_tx(); - - // Then commit to block let tx_write_log = std::mem::take(&mut self.tx_write_log); - self.block_write_log - .extend(tx_write_log.precommit_write_log); + self.block_write_log.extend(tx_write_log.write_log); + self.block_address_gen = tx_write_log.address_gen; } /// Get the verifiers set whose validity predicates should validate the @@ -771,12 +695,7 @@ impl WriteLog { .iter() .rev() .flat_map(|batch_log| batch_log.write_log.iter()) - .chain( - self.tx_write_log - .precommit_write_log - .iter() - .chain(self.tx_write_log.write_log.iter()), - ), + .chain(self.tx_write_log.write_log.iter()), ) { if key.split_prefix(prefix).is_some() { matches.insert(key.to_string(), modification.clone()); @@ -1065,11 +984,9 @@ mod tests { assert!(state.write_log.replay_protection.is_empty()); for tx in ["tx1", "tx2", "tx3"] { let hash = Hash::sha256(tx.as_bytes()); - assert!( - state - .has_replay_protection_entry(&hash) - .expect("read failed") - ); + assert!(state + .has_replay_protection_entry(&hash) + .expect("read failed")); } { @@ -1096,17 +1013,13 @@ mod tests { assert!(state.write_log.replay_protection.is_empty()); for tx in ["tx1", "tx2", "tx3", "tx5", "tx6"] { - assert!( - state - .has_replay_protection_entry(&Hash::sha256(tx.as_bytes())) - .expect("read failed") - ); + assert!(state + .has_replay_protection_entry(&Hash::sha256(tx.as_bytes())) + .expect("read failed")); } - assert!( - !state - .has_replay_protection_entry(&Hash::sha256("tx4".as_bytes())) - .expect("read failed") - ); + assert!(!state + .has_replay_protection_entry(&Hash::sha256("tx4".as_bytes())) + .expect("read failed")); { let write_log = state.write_log_mut(); write_log @@ -1114,12 +1027,10 @@ mod tests { .unwrap(); // mark as redundant a missing hash and check that it fails - assert!( - state - .write_log - .redundant_tx_hash(&Hash::sha256("tx8".as_bytes())) - .is_err() - ); + assert!(state + .write_log + .redundant_tx_hash(&Hash::sha256("tx8".as_bytes())) + .is_err()); // Do not assert the state of replay protection because this // error will actually trigger a shut down of the node. Also, since @@ -1142,14 +1053,6 @@ mod tests { state.write_log.write(&key1, val1.clone()), Err(Error::UpdateTemporaryValue) )); - - // Test with a temporary write precommitted - state.write_log.write_temp(&key1, val1.clone()).unwrap(); - state.write_log.precommit_tx(); - assert!(matches!( - state.write_log.write(&key1, val1), - Err(Error::UpdateTemporaryValue) - )); } // Test that a temporary write on top of a write is not allowed @@ -1166,14 +1069,6 @@ mod tests { state.write_log.write_temp(&key1, val1.clone()), Err(Error::WriteTempAfterWrite) )); - - // Test with a temporary write precommitted - state.write_log.write(&key1, val1.clone()).unwrap(); - state.write_log.precommit_tx(); - assert!(matches!( - state.write_log.write_temp(&key1, val1), - Err(Error::WriteTempAfterWrite) - )); } // Test that a temporary write on top of a delete is not allowed @@ -1190,14 +1085,6 @@ mod tests { state.write_log.write_temp(&key1, val1.clone()), Err(Error::WriteTempAfterDelete) )); - - // Test with a temporary write precommitted - state.write_log.delete(&key1).unwrap(); - state.write_log.precommit_tx(); - assert!(matches!( - state.write_log.write_temp(&key1, val1), - Err(Error::WriteTempAfterDelete) - )); } prop_compose! { From 0e2b48824c03c151100a672ee79f6f115905b8b0 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 4 Jun 2024 15:02:19 +0200 Subject: [PATCH 21/40] Different gas cost for storage deletes --- crates/gas/src/lib.rs | 3 +++ crates/state/src/write_log.rs | 7 +++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/gas/src/lib.rs b/crates/gas/src/lib.rs index 89e24209ff..987dee5fab 100644 --- a/crates/gas/src/lib.rs +++ b/crates/gas/src/lib.rs @@ -74,6 +74,9 @@ pub const STORAGE_ACCESS_GAS_PER_BYTE: u64 = /// The cost of writing data to storage, per byte pub const STORAGE_WRITE_GAS_PER_BYTE: u64 = MEMORY_ACCESS_GAS_PER_BYTE + 69_634 + STORAGE_OCCUPATION_GAS_PER_BYTE; +/// The cost of removing data from storage, per byte +pub const STORAGE_DELETE_GAS_PER_BYTE: u64 = + MEMORY_ACCESS_GAS_PER_BYTE + 69_634 + PHYSICAL_STORAGE_LATENCY_PER_BYTE; /// The cost of verifying a single signature of a transaction pub const VERIFY_TX_SIG_GAS: u64 = 594_290; /// The cost for requesting one more page in wasm (64KiB) diff --git a/crates/state/src/write_log.rs b/crates/state/src/write_log.rs index d065d0aeca..3a29f650e2 100644 --- a/crates/state/src/write_log.rs +++ b/crates/state/src/write_log.rs @@ -10,7 +10,10 @@ use namada_core::collections::{HashMap, HashSet}; use namada_core::hash::Hash; use namada_core::{arith, storage}; use namada_events::{Event, EventToEmit, EventType}; -use namada_gas::{MEMORY_ACCESS_GAS_PER_BYTE, STORAGE_WRITE_GAS_PER_BYTE}; +use namada_gas::{ + MEMORY_ACCESS_GAS_PER_BYTE, STORAGE_DELETE_GAS_PER_BYTE, + STORAGE_WRITE_GAS_PER_BYTE, +}; use patricia_tree::map::StringPatriciaMap; use thiserror::Error; @@ -426,7 +429,7 @@ impl WriteLog { .ok() .and_then(i64::checked_neg) .ok_or(Error::SizeDiffOverflow)?; - Ok((checked!(gas * STORAGE_WRITE_GAS_PER_BYTE)?, size_diff)) + Ok((checked!(gas * STORAGE_DELETE_GAS_PER_BYTE)?, size_diff)) } /// Delete a key and its value. From 92aafe798c4a0ff5e3d5da00d53765e76d437904 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 4 Jun 2024 17:30:41 +0200 Subject: [PATCH 22/40] Refactors the write log api --- crates/namada/src/ledger/governance/mod.rs | 24 +- crates/namada/src/ledger/mod.rs | 28 +- crates/namada/src/ledger/native_vp/ibc/mod.rs | 34 +- crates/namada/src/ledger/protocol/mod.rs | 127 ++--- crates/namada/src/vm/wasm/run.rs | 84 ++-- crates/node/src/bench_utils.rs | 6 +- crates/node/src/shell/finalize_block.rs | 439 ++++++++++-------- crates/node/src/shell/governance.rs | 8 +- crates/node/src/shell/prepare_proposal.rs | 3 +- crates/node/src/shell/process_proposal.rs | 9 +- crates/state/src/wl_state.rs | 15 +- crates/state/src/write_log.rs | 74 +-- crates/tests/src/native_vp/pos.rs | 6 +- .../src/storage_api/collections/lazy_map.rs | 2 +- .../src/storage_api/collections/lazy_set.rs | 2 +- .../src/storage_api/collections/lazy_vec.rs | 2 +- .../collections/nested_lazy_map.rs | 2 +- crates/tests/src/vm_host_env/ibc.rs | 2 +- crates/tests/src/vm_host_env/mod.rs | 4 +- crates/tests/src/vm_host_env/tx.rs | 2 +- 20 files changed, 484 insertions(+), 389 deletions(-) diff --git a/crates/namada/src/ledger/governance/mod.rs b/crates/namada/src/ledger/governance/mod.rs index ea29733dea..0e762b1918 100644 --- a/crates/namada/src/ledger/governance/mod.rs +++ b/crates/namada/src/ledger/governance/mod.rs @@ -1368,7 +1368,7 @@ mod test { .write_log_mut() .write(&balance_key, amount.serialize_to_vec()) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); } #[cfg(test)] @@ -1609,7 +1609,7 @@ mod test { Ok(_) ); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().unwrap(); let governance_balance_key = balance_key(&nam(), &ADDRESS); @@ -1705,9 +1705,9 @@ mod test { assert_matches!(&result, Err(_)); if result.is_err() { - state.write_log_mut().drop_tx(); + state.drop_tx_batch(); } else { - state.write_log_mut().commit_tx(); + state.commit_tx_batch(); } state.commit_block().unwrap(); @@ -1803,9 +1803,9 @@ mod test { assert_matches!(&result, Ok(_)); if result.is_err() { - state.write_log_mut().drop_tx(); + state.drop_tx_batch(); } else { - state.write_log_mut().commit_tx(); + state.commit_tx_batch(); } state.commit_block().unwrap(); @@ -2328,7 +2328,7 @@ mod test { Ok(_) ); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().unwrap(); let height = state.in_mem().get_block_height().0 + (7 * 2); @@ -2457,7 +2457,7 @@ mod test { Ok(_) ); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().unwrap(); let height = state.in_mem().get_block_height().0 + (7 * 2); @@ -2586,7 +2586,7 @@ mod test { Ok(_) ); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().unwrap(); let height = state.in_mem().get_block_height().0 + (7 * 2); @@ -2715,7 +2715,7 @@ mod test { Ok(_) ); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().unwrap(); let height = state.in_mem().get_block_height().0 + (9 * 2); @@ -2861,7 +2861,7 @@ mod test { Ok(_) ); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().unwrap(); let height = state.in_mem().get_block_height().0 + (10 * 2); @@ -3007,7 +3007,7 @@ mod test { Ok(_) ); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().unwrap(); let height = state.in_mem().get_block_height().0 + (10 * 2); diff --git a/crates/namada/src/ledger/mod.rs b/crates/namada/src/ledger/mod.rs index 38c27a5a33..ee2a17264d 100644 --- a/crates/namada/src/ledger/mod.rs +++ b/crates/namada/src/ledger/mod.rs @@ -77,7 +77,7 @@ mod dry_run_tx { ) .into_storage_result()?; - temp_state.write_log_mut().commit_tx(); + temp_state.write_log_mut().commit_tx_to_batch(); let available_gas = tx_gas_meter.borrow().get_available_gas(); (tx_result, TxGasMeter::new_from_sub_limit(available_gas)) } @@ -274,8 +274,8 @@ mod test { } #[tokio::test] - async fn test_shell_queries_router_with_client( - ) -> namada_state::StorageResult<()> { + async fn test_shell_queries_router_with_client() + -> namada_state::StorageResult<()> { // Initialize the `TestClient` let mut client = TestClient::new(RPC); // store the wasm code @@ -309,15 +309,17 @@ mod test { .dry_run_tx(&client, Some(tx_bytes), None, false) .await .unwrap(); - assert!(result - .data - .batch_results - .0 - .get(&cmt.get_hash()) - .unwrap() - .as_ref() - .unwrap() - .is_accepted()); + assert!( + result + .data + .batch_results + .0 + .get(&cmt.get_hash()) + .unwrap() + .as_ref() + .unwrap() + .is_accepted() + ); // Request storage value for a balance key ... let token_addr = address::testing::established_address_1(); @@ -352,7 +354,7 @@ mod test { let balance = token::Amount::native_whole(1000); StorageWrite::write(&mut client.state, &balance_key, balance)?; // It has to be committed to be visible in a query - client.state.commit_tx(); + client.state.commit_tx_batch(); client.state.commit_block().unwrap(); // ... there should be the same value now let read_balance = RPC diff --git a/crates/namada/src/ledger/native_vp/ibc/mod.rs b/crates/namada/src/ledger/native_vp/ibc/mod.rs index 75f40e91e2..3c405eeadc 100644 --- a/crates/namada/src/ledger/native_vp/ibc/mod.rs +++ b/crates/namada/src/ledger/native_vp/ibc/mod.rs @@ -628,7 +628,7 @@ mod tests { .write_log_mut() .write(&client_update_height_key, host_height.encode_vec()) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); } fn get_connection_id() -> ConnectionId { @@ -1054,7 +1054,7 @@ mod tests { let mut keys_changed = BTreeSet::new(); let mut state = init_storage(); insert_init_client(&mut state); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block @@ -1178,7 +1178,7 @@ mod tests { let mut keys_changed = BTreeSet::new(); let mut state = init_storage(); insert_init_client(&mut state); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -1381,7 +1381,7 @@ mod tests { let mut keys_changed = BTreeSet::new(); let mut state = init_storage(); insert_init_client(&mut state); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -1510,7 +1510,7 @@ mod tests { .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -1619,7 +1619,7 @@ mod tests { .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -1715,7 +1715,7 @@ mod tests { .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -1837,7 +1837,7 @@ mod tests { .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -1967,7 +1967,7 @@ mod tests { .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -2074,7 +2074,7 @@ mod tests { .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -2187,7 +2187,7 @@ mod tests { .write_log_mut() .write(&balance_key, amount.serialize_to_vec()) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -2337,7 +2337,7 @@ mod tests { .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -2582,7 +2582,7 @@ mod tests { .write_log_mut() .write(&commitment_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -2745,7 +2745,7 @@ mod tests { .write_log_mut() .write(&commitment_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -2907,7 +2907,7 @@ mod tests { .write_log_mut() .write(&commitment_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -3054,7 +3054,7 @@ mod tests { .write(&metadata_key, metadata.serialize_to_vec()) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -3206,7 +3206,7 @@ mod tests { .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index f79340f786..c34cf14e2c 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -456,12 +456,11 @@ where None => check_fees(shell_params, tx, wrapper), }; - // Commit tx and batch write log even in case of subsequent errors (if the fee payment - // failed instead, than the previous two functions must have already - // dropped the write log, leading this function call to be essentially a - // no-op) + // Commit tx to the batch write log even in case of subsequent errors (if + // the fee payment failed instead, than the previous two functions must + // have already dropped the write log, leading this function call to be + // essentially a no-op) shell_params.state.write_log_mut().commit_tx_to_batch(); - shell_params.state.write_log_mut().commit_batch(); let (batch_results, masp_tx_refs) = payment_result?.map_or_else( || (BatchResults::default(), None), @@ -525,61 +524,62 @@ where ) .map_err(Error::StorageError)?; - let (post_bal, valid_batched_tx_result) = - if let Some(post_bal) = balance.checked_sub(fees) { - fee_token_transfer( + let (post_bal, valid_batched_tx_result) = if let Some(post_bal) = + balance.checked_sub(fees) + { + fee_token_transfer( + shell_params.state, + &wrapper.fee.token, + &wrapper.fee_payer(), + block_proposer, + fees, + )?; + + (Some(post_bal), None) + } else { + // See if the first inner transaction of the batch pays the fees + // with a masp unshield + if let Ok(Some(valid_batched_tx_result)) = + try_masp_fee_payment(shell_params, tx, tx_index) + { + // NOTE: Even if the unshielding was succesfull we could + // still fail in the transfer (e.g. cause the unshielded + // amount is not enough to cover the fees). In this case we + // want do drop the changes applied by the masp transaction + // and try to drain the fees from the transparent balance. + // Because of this we must NOT propagate errors from within + // this branch + let balance = crate::token::read_balance( shell_params.state, &wrapper.fee.token, &wrapper.fee_payer(), - block_proposer, - fees, - )?; + ); - (Some(post_bal), None) + // Ok to unwrap_or_default. In the default case, the only + // way the checked op can return Some is if fees are 0, but + // if that's the case then we would have never reached this + // branch of execution + let post_bal = balance + .unwrap_or_default() + .checked_sub(fees) + .filter(|_| { + fee_token_transfer( + shell_params.state, + &wrapper.fee.token, + &wrapper.fee_payer(), + block_proposer, + fees, + ) + .is_ok() + }); + + // Batched tx result must be returned (and considered) only + // if fee payment was successful + (post_bal, post_bal.map(|_| valid_batched_tx_result)) } else { - // See if the first inner transaction of the batch pays the fees - // with a masp unshield - if let Ok(Some(valid_batched_tx_result)) = - try_masp_fee_payment(shell_params, tx, tx_index) - { - // NOTE: Even if the unshielding was succesfull we could - // still fail in the transfer (e.g. cause the unshielded - // amount is not enough to cover the fees). In this case we - // want do drop the changes applied by the masp transaction - // and try to drain the fees from the transparent balance. - // Because of this we must NOT propagate errors from within - // this branch - let balance = crate::token::read_balance( - shell_params.state, - &wrapper.fee.token, - &wrapper.fee_payer(), - ); - - // Ok to unwrap_or_default. In the default case, the only - // way the checked op can return Some is if fees are 0, but - // if that's the case then we would have never reached this - // branch of execution - let post_bal = balance - .unwrap_or_default() - .checked_sub(fees) - .filter(|_| { - fee_token_transfer( - shell_params.state, - &wrapper.fee.token, - &wrapper.fee_payer(), - block_proposer, - fees, - ) - .is_ok() - }); - - // Batched tx result must be returned (and considered) only - // if fee payment was successful - (post_bal, post_bal.map(|_| valid_batched_tx_result)) - } else { - (None, None) - } - }; + (None, None) + } + }; if post_bal.is_none() { // Balance was insufficient for fee payment, move all the @@ -654,7 +654,7 @@ where "Transfer of tx fee cannot be applied to due to fee overflow. \ This shouldn't happen." ); - shell_params.state.write_log_mut().drop_tx(); + shell_params.state.write_log_mut().drop_batch(); Err(Error::FeeError(format!("{}", e))) } @@ -704,7 +704,10 @@ where // NOTE: A clean tx write log must be provided to this call // for a correct vp validation. Block and batch write logs, instead, // should contain any prior changes (if any). This is to simulate - // the fee-paying tx (to prevent the already written keys from being passed/triggering VPs) but we cannot commit the tx write log yet cause the tx could still be invalid. So we use the batch write log to dump the current modifications. + // the fee-paying tx (to prevent the already written keys from being + // passed/triggering VPs) but we cannot commit the tx write log yet + // cause the tx could still be invalid. So we use the batch write log to + // dump the current modifications. state.write_log_mut().commit_tx_to_batch(); match apply_wasm_tx( tx.batch_ref_first_tx() @@ -720,10 +723,10 @@ where Ok(result) => { // NOTE: do not commit yet cause this could be exploited to get // free masp operations. We can commit only after the entire fee - // payment has been deemed valid. Also, do not commit to batch cause - // we might need to discard the effects of this valid unshield - // (e.g. if it unshield an amount which is not enough to pay the - // fees) + // payment has been deemed valid. Also, do not commit to batch + // cause we might need to discard the effects of + // this valid unshield (e.g. if it unshield an + // amount which is not enough to pay the fees) if !result.is_accepted() { state.write_log_mut().drop_tx(); tracing::error!( @@ -1570,7 +1573,7 @@ mod tests { // commit storage changes. this will act as the // initial state of the chain - state.commit_tx(); + state.commit_tx_batch(); state.commit_block().unwrap(); // "execute" a dummy tx, by manually performing its state changes diff --git a/crates/namada/src/vm/wasm/run.rs b/crates/namada/src/vm/wasm/run.rs index 39ad8713e8..3cf1560bb7 100644 --- a/crates/namada/src/vm/wasm/run.rs +++ b/crates/namada/src/vm/wasm/run.rs @@ -1207,18 +1207,20 @@ mod tests { let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); // When the `eval`ed VP doesn't run out of memory, it should return // `true` - assert!(vp( - code_hash, - &outer_tx.batch_ref_first_tx().unwrap(), - &tx_index, - &addr, - &state, - &gas_meter, - &keys_changed, - &verifiers, - vp_cache.clone(), - ) - .is_ok()); + assert!( + vp( + code_hash, + &outer_tx.batch_ref_first_tx().unwrap(), + &tx_index, + &addr, + &state, + &gas_meter, + &keys_changed, + &verifiers, + vp_cache.clone(), + ) + .is_ok() + ); // Allocating `2^24` (16 MiB) should be above the memory limit and // should fail @@ -1237,18 +1239,20 @@ mod tests { // When the `eval`ed VP runs out of memory, its result should be // `false`, hence we should also get back `false` from the VP that // called `eval`. - assert!(vp( - code_hash, - &outer_tx.batch_ref_first_tx().unwrap(), - &tx_index, - &addr, - &state, - &gas_meter, - &keys_changed, - &verifiers, - vp_cache, - ) - .is_err()); + assert!( + vp( + code_hash, + &outer_tx.batch_ref_first_tx().unwrap(), + &tx_index, + &addr, + &state, + &gas_meter, + &keys_changed, + &verifiers, + vp_cache, + ) + .is_err() + ); } /// Test that when a validity predicate wasm goes over the memory limit @@ -1621,18 +1625,20 @@ mod tests { outer_tx.add_code(vec![], None).add_data(eval_vp); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); - assert!(vp( - code_hash, - &outer_tx.batch_ref_first_tx().unwrap(), - &tx_index, - &addr, - &state, - &gas_meter, - &keys_changed, - &verifiers, - vp_cache, - ) - .is_err()); + assert!( + vp( + code_hash, + &outer_tx.batch_ref_first_tx().unwrap(), + &tx_index, + &addr, + &state, + &gas_meter, + &keys_changed, + &verifiers, + vp_cache, + ) + .is_err() + ); } #[test] @@ -1676,7 +1682,7 @@ mod tests { &mut state, allowlist, ) .unwrap(); - state.commit_tx(); + state.commit_tx_batch(); let result = check_tx_allowed(&batched_tx, &state); assert_matches!(result.unwrap_err(), Error::DisallowedTx); @@ -1695,7 +1701,7 @@ mod tests { &mut state, allowlist, ) .unwrap(); - state.commit_tx(); + state.commit_tx_batch(); let result = check_tx_allowed(&batched_tx, &state); if let Err(result) = result { @@ -1970,7 +1976,7 @@ mod tests { vp( code_hash, - &outer_tx.batch_ref_first_tx(), + &outer_tx.batch_ref_first_tx().unwrap(), &tx_index, &addr, &state, diff --git a/crates/node/src/bench_utils.rs b/crates/node/src/bench_utils.rs index bd5503f316..fa2da91868 100644 --- a/crates/node/src/bench_utils.rs +++ b/crates/node/src/bench_utils.rs @@ -256,7 +256,7 @@ impl Default for BenchShell { ); bench_shell.execute_tx(&signed_tx.to_ref()); - bench_shell.state.commit_tx(); + bench_shell.state.commit_tx_batch(); // Initialize governance proposal let content_section = Section::ExtraData(Code::new( @@ -281,7 +281,7 @@ impl Default for BenchShell { ); bench_shell.execute_tx(&signed_tx.to_ref()); - bench_shell.state.commit_tx(); + bench_shell.state.commit_tx_batch(); bench_shell.commit_block(); // Advance epoch for pos benches @@ -627,7 +627,7 @@ impl BenchShell { ); self.last_block_masp_txs .push((masp_tx, self.state.write_log().get_keys())); - self.state.commit_tx(); + self.state.commit_tx_batch(); } } diff --git a/crates/node/src/shell/finalize_block.rs b/crates/node/src/shell/finalize_block.rs index f5985cf09d..99c41cb202 100644 --- a/crates/node/src/shell/finalize_block.rs +++ b/crates/node/src/shell/finalize_block.rs @@ -357,7 +357,6 @@ where match extended_dispatch_result { Ok(extended_tx_result) => match tx_data.tx.header.tx_type { TxType::Wrapper(_) => { - self.state.write_log_mut().commit_tx_to_batch(); self.state.write_log_mut().commit_batch(); // Return withouth emitting any events @@ -393,10 +392,12 @@ where .extend(GasUsed(tx_data.tx_gas_meter.get_tx_consumed_gas())) .extend(Info(msg.to_string())) .extend(Code(ResultCode::InvalidTx)); - // Make sure to clean the write logs for the next transaction - //FIXME: can we commit everything to batch when executing the wrapper? + // Drop the tx write log which could contain invalid data self.state.write_log_mut().drop_tx(); - self.state.write_log_mut().drop_batch(); + // Instead commit the batch write log because it contains data + // that should be persisted even in case of a wrapper failure + // (e.g. the fee payment state change) + self.state.write_log_mut().commit_batch(); } Err(dispatch_error) => { // This branch represents an error that affects the entire @@ -1363,11 +1364,7 @@ mod test_finalize_block { .enumerate() .find_map( |(idx, tx_hash)| { - if tx_hash == &hash { - Some(idx) - } else { - None - } + if tx_hash == &hash { Some(idx) } else { None } }, ) .unwrap(); @@ -2982,21 +2979,25 @@ mod test_finalize_block { assert_eq!(root_pre.0, root_post.0); // Check transaction's hash in storage - assert!(shell - .shell - .state - .write_log() - .has_replay_protection_entry(&wrapper_tx.raw_header_hash())); + assert!( + shell + .shell + .state + .write_log() + .has_replay_protection_entry(&wrapper_tx.raw_header_hash()) + ); // Check that the hash is not present in the merkle tree shell.state.commit_block().unwrap(); - assert!(!shell - .shell - .state - .in_mem() - .block - .tree - .has_key(&wrapper_hash_key) - .unwrap()); + assert!( + !shell + .shell + .state + .in_mem() + .block + .tree + .has_key(&wrapper_hash_key) + .unwrap() + ); // test that a commitment to replay protection gets added. let reprot_key = replay_protection::commitment_key(); @@ -3043,22 +3044,26 @@ mod test_finalize_block { assert_eq!(root_pre.0, root_post.0); // Check that the hashes are present in the merkle tree shell.state.commit_block().unwrap(); - assert!(shell - .shell - .state - .in_mem() - .block - .tree - .has_key(&convert_key) - .unwrap()); - assert!(shell - .shell - .state - .in_mem() - .block - .tree - .has_key(&commitment_key) - .unwrap()); + assert!( + shell + .shell + .state + .in_mem() + .block + .tree + .has_key(&convert_key) + .unwrap() + ); + assert!( + shell + .shell + .state + .in_mem() + .block + .tree + .has_key(&commitment_key) + .unwrap() + ); } /// Test that a tx that has already been applied in the same block @@ -3136,26 +3141,34 @@ mod test_finalize_block { assert_eq!(code, ResultCode::WasmRuntimeError); for wrapper in [&wrapper, &new_wrapper] { - assert!(shell - .state - .write_log() - .has_replay_protection_entry(&wrapper.raw_header_hash())); - assert!(!shell - .state - .write_log() - .has_replay_protection_entry(&wrapper.header_hash())); + assert!( + shell + .state + .write_log() + .has_replay_protection_entry(&wrapper.raw_header_hash()) + ); + assert!( + !shell + .state + .write_log() + .has_replay_protection_entry(&wrapper.header_hash()) + ); } // Commit to check the hashes from storage shell.commit(); for wrapper in [&wrapper, &new_wrapper] { - assert!(shell - .state - .has_replay_protection_entry(&wrapper.raw_header_hash()) - .unwrap()); - assert!(!shell - .state - .has_replay_protection_entry(&wrapper.header_hash()) - .unwrap()); + assert!( + shell + .state + .has_replay_protection_entry(&wrapper.raw_header_hash()) + .unwrap() + ); + assert!( + !shell + .state + .has_replay_protection_entry(&wrapper.header_hash()) + .unwrap() + ); } } @@ -3438,23 +3451,29 @@ mod test_finalize_block { &unsigned_wrapper, &wrong_commitment_wrapper, ] { - assert!(!shell - .state - .write_log() - .has_replay_protection_entry(&valid_wrapper.raw_header_hash())); - assert!(shell + assert!( + !shell.state.write_log().has_replay_protection_entry( + &valid_wrapper.raw_header_hash() + ) + ); + assert!( + shell + .state + .write_log() + .has_replay_protection_entry(&valid_wrapper.header_hash()) + ); + } + assert!( + shell.state.write_log().has_replay_protection_entry( + &failing_wrapper.raw_header_hash() + ) + ); + assert!( + !shell .state .write_log() - .has_replay_protection_entry(&valid_wrapper.header_hash())); - } - assert!(shell - .state - .write_log() - .has_replay_protection_entry(&failing_wrapper.raw_header_hash())); - assert!(!shell - .state - .write_log() - .has_replay_protection_entry(&failing_wrapper.header_hash())); + .has_replay_protection_entry(&failing_wrapper.header_hash()) + ); // Commit to check the hashes from storage shell.commit(); @@ -3463,23 +3482,33 @@ mod test_finalize_block { unsigned_wrapper, wrong_commitment_wrapper, ] { - assert!(!shell + assert!( + !shell + .state + .has_replay_protection_entry( + &valid_wrapper.raw_header_hash() + ) + .unwrap() + ); + assert!( + shell + .state + .has_replay_protection_entry(&valid_wrapper.header_hash()) + .unwrap() + ); + } + assert!( + shell .state - .has_replay_protection_entry(&valid_wrapper.raw_header_hash()) - .unwrap()); - assert!(shell + .has_replay_protection_entry(&failing_wrapper.raw_header_hash()) + .unwrap() + ); + assert!( + !shell .state - .has_replay_protection_entry(&valid_wrapper.header_hash()) - .unwrap()); - } - assert!(shell - .state - .has_replay_protection_entry(&failing_wrapper.raw_header_hash()) - .unwrap()); - assert!(!shell - .state - .has_replay_protection_entry(&failing_wrapper.header_hash()) - .unwrap()); + .has_replay_protection_entry(&failing_wrapper.header_hash()) + .unwrap() + ); } #[test] @@ -3539,14 +3568,18 @@ mod test_finalize_block { let code = event[0].read_attribute::().expect("Test failed"); assert_eq!(code, ResultCode::InvalidTx); - assert!(shell - .state - .write_log() - .has_replay_protection_entry(&wrapper_hash)); - assert!(!shell - .state - .write_log() - .has_replay_protection_entry(&wrapper.raw_header_hash())); + assert!( + shell + .state + .write_log() + .has_replay_protection_entry(&wrapper_hash) + ); + assert!( + !shell + .state + .write_log() + .has_replay_protection_entry(&wrapper.raw_header_hash()) + ); } // Test that the fees are paid even if the inner transaction fails and its @@ -3944,9 +3977,11 @@ mod test_finalize_block { .unwrap(), Some(ValidatorState::Consensus) ); - assert!(enqueued_slashes_handle() - .at(&Epoch::default()) - .is_empty(&shell.state)?); + assert!( + enqueued_slashes_handle() + .at(&Epoch::default()) + .is_empty(&shell.state)? + ); assert_eq!( get_num_consensus_validators(&shell.state, Epoch::default()) .unwrap(), @@ -3965,17 +4000,21 @@ mod test_finalize_block { .unwrap(), Some(ValidatorState::Jailed) ); - assert!(enqueued_slashes_handle() - .at(&epoch) - .is_empty(&shell.state)?); + assert!( + enqueued_slashes_handle() + .at(&epoch) + .is_empty(&shell.state)? + ); assert_eq!( get_num_consensus_validators(&shell.state, epoch).unwrap(), 5_u64 ); } - assert!(!enqueued_slashes_handle() - .at(&processing_epoch) - .is_empty(&shell.state)?); + assert!( + !enqueued_slashes_handle() + .at(&processing_epoch) + .is_empty(&shell.state)? + ); // Advance to the processing epoch loop { @@ -3998,9 +4037,11 @@ mod test_finalize_block { // println!("Reached processing epoch"); break; } else { - assert!(enqueued_slashes_handle() - .at(&shell.state.in_mem().block.epoch) - .is_empty(&shell.state)?); + assert!( + enqueued_slashes_handle() + .at(&shell.state.in_mem().block.epoch) + .is_empty(&shell.state)? + ); let stake1 = read_validator_stake( &shell.state, ¶ms, @@ -4484,11 +4525,13 @@ mod test_finalize_block { ) .unwrap(); assert_eq!(last_slash, Some(misbehavior_epoch)); - assert!(namada_proof_of_stake::storage::validator_slashes_handle( - &val1.address - ) - .is_empty(&shell.state) - .unwrap()); + assert!( + namada_proof_of_stake::storage::validator_slashes_handle( + &val1.address + ) + .is_empty(&shell.state) + .unwrap() + ); tracing::debug!("Advancing to epoch 7"); @@ -4553,18 +4596,22 @@ mod test_finalize_block { ) .unwrap(); assert_eq!(last_slash, Some(Epoch(4))); - assert!(namada_proof_of_stake::is_validator_frozen( - &shell.state, - &val1.address, - current_epoch, - ¶ms - ) - .unwrap()); - assert!(namada_proof_of_stake::storage::validator_slashes_handle( - &val1.address - ) - .is_empty(&shell.state) - .unwrap()); + assert!( + namada_proof_of_stake::is_validator_frozen( + &shell.state, + &val1.address, + current_epoch, + ¶ms + ) + .unwrap() + ); + assert!( + namada_proof_of_stake::storage::validator_slashes_handle( + &val1.address + ) + .is_empty(&shell.state) + .unwrap() + ); let pre_stake_10 = namada_proof_of_stake::storage::read_validator_stake( @@ -5442,9 +5489,11 @@ mod test_finalize_block { shell.vp_wasm_cache.clone(), ); let parameters = ParametersVp { ctx }; - assert!(parameters - .validate_tx(&batched_tx, &keys_changed, &verifiers) - .is_ok()); + assert!( + parameters + .validate_tx(&batched_tx, &keys_changed, &verifiers) + .is_ok() + ); // we advance forward to the next epoch let mut req = FinalizeBlock::default(); @@ -5517,11 +5566,13 @@ mod test_finalize_block { let inner_results = inner_tx_result.batch_results.0; for cmt in batch.commitments() { - assert!(inner_results - .get(&cmt.get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted())); + assert!( + inner_results + .get(&cmt.get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted()) + ); } // Check storage modifications @@ -5559,18 +5610,24 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!(inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted())); - assert!(inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err()); + assert!( + inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted()) + ); + assert!( + inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err() + ); // Assert that the last tx didn't run - assert!(!inner_results.contains_key(&batch.commitments()[2].get_hash())); + assert!( + !inner_results.contains_key(&batch.commitments()[2].get_hash()) + ); // Check storage modifications are missing for key in ["random_key_1", "random_key_2", "random_key_3"] { @@ -5601,21 +5658,27 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!(inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted())); - assert!(inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err()); - assert!(inner_results - .get(&batch.commitments()[2].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted())); + assert!( + inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted()) + ); + assert!( + inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err() + ); + assert!( + inner_results + .get(&batch.commitments()[2].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted()) + ); // Check storage modifications assert_eq!( @@ -5626,10 +5689,12 @@ mod test_finalize_block { .unwrap(), STORAGE_VALUE ); - assert!(!shell - .state - .has_key(&"random_key_2".parse().unwrap()) - .unwrap()); + assert!( + !shell + .state + .has_key(&"random_key_2".parse().unwrap()) + .unwrap() + ); assert_eq!( shell .state @@ -5661,18 +5726,24 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!(inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted())); - assert!(inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err()); + assert!( + inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted()) + ); + assert!( + inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err() + ); // Assert that the last tx didn't run - assert!(!inner_results.contains_key(&batch.commitments()[2].get_hash())); + assert!( + !inner_results.contains_key(&batch.commitments()[2].get_hash()) + ); // Check storage modifications are missing for key in ["random_key_1", "random_key_2", "random_key_3"] { @@ -5702,18 +5773,24 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!(inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted())); - assert!(inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err()); + assert!( + inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted()) + ); + assert!( + inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err() + ); // Assert that the last tx didn't run - assert!(!inner_results.contains_key(&batch.commitments()[2].get_hash())); + assert!( + !inner_results.contains_key(&batch.commitments()[2].get_hash()) + ); // Check storage modifications assert_eq!( diff --git a/crates/node/src/shell/governance.rs b/crates/node/src/shell/governance.rs index 758ec735d8..86b301fa50 100644 --- a/crates/node/src/shell/governance.rs +++ b/crates/node/src/shell/governance.rs @@ -426,7 +426,7 @@ where .get(&cmt.get_hash()) { Some(Ok(batched_result)) if batched_result.is_accepted() => { - shell.state.commit_tx(); + shell.state.commit_tx_batch(); Ok(true) } Some(Err(e)) => { @@ -434,12 +434,12 @@ where "Error executing governance proposal {}", e.to_string() ); - shell.state.drop_tx(); + shell.state.drop_tx_batch(); Ok(false) } _ => { tracing::warn!("not sure what happen"); - shell.state.drop_tx(); + shell.state.drop_tx_batch(); Ok(false) } }, @@ -448,7 +448,7 @@ where "Error executing governance proposal {}", e.error.to_string() ); - shell.state.drop_tx(); + shell.state.drop_tx_batch(); Ok(false) } } diff --git a/crates/node/src/shell/prepare_proposal.rs b/crates/node/src/shell/prepare_proposal.rs index 89eb62784e..7498a86111 100644 --- a/crates/node/src/shell/prepare_proposal.rs +++ b/crates/node/src/shell/prepare_proposal.rs @@ -126,12 +126,11 @@ where .filter_map(|(tx_index, tx_bytes)| { match validate_wrapper_bytes(tx_bytes, &TxIndex::must_from_usize(tx_index),block_time, block_proposer, proposer_local_config, &mut temp_state, &mut vp_wasm_cache, &mut tx_wasm_cache, ) { Ok(gas) => { - temp_state.write_log_mut().commit_tx_to_batch(); temp_state.write_log_mut().commit_batch(); Some((tx_bytes.to_owned(), gas)) }, Err(()) => { - temp_state.write_log_mut().drop_tx(); + temp_state.write_log_mut().drop_batch(); None } } diff --git a/crates/node/src/shell/process_proposal.rs b/crates/node/src/shell/process_proposal.rs index 7abbe7e352..870eec9afa 100644 --- a/crates/node/src/shell/process_proposal.rs +++ b/crates/node/src/shell/process_proposal.rs @@ -150,7 +150,6 @@ where ); let error_code = ResultCode::from_u32(result.code).unwrap(); if let ResultCode::Ok = error_code { - temp_state.write_log_mut().commit_tx_to_batch(); temp_state.write_log_mut().commit_batch(); } else { tracing::info!( @@ -159,7 +158,7 @@ where error_code, result.info ); - temp_state.write_log_mut().drop_tx(); + temp_state.write_log_mut().drop_batch(); } result }) @@ -1025,7 +1024,8 @@ mod test_process_proposal { "Error trying to apply a transaction: Error while processing \ transaction's fees: Transparent balance of wrapper's signer \ was insufficient to pay fee. All the available transparent \ - funds have been moved to the block proposer. This shouldn't happen." + funds have been moved to the block proposer. This shouldn't \ + happen." ) ); } @@ -1091,7 +1091,8 @@ mod test_process_proposal { "Error trying to apply a transaction: Error while processing \ transaction's fees: Transparent balance of wrapper's signer \ was insufficient to pay fee. All the available transparent \ - funds have been moved to the block proposer. This shouldn't happen." + funds have been moved to the block proposer. This shouldn't \ + happen." ) ); } diff --git a/crates/state/src/wl_state.rs b/crates/state/src/wl_state.rs index 26e3a20cca..e4ca57ea52 100644 --- a/crates/state/src/wl_state.rs +++ b/crates/state/src/wl_state.rs @@ -651,16 +651,17 @@ where } } - /// Commit the current transaction's write log to the block. Starts a new - /// transaction write log. - pub fn commit_tx(&mut self) { - self.write_log.commit_tx() + /// Commit the current transaction's write log and the entire batch to the + /// block. Starts a new transaction and batch write log. + pub fn commit_tx_batch(&mut self) { + self.write_log.commit_batch() } /// Drop the current transaction's write log when it's declined by any of - /// the triggered validity predicates. Starts a new transaction write log. - pub fn drop_tx(&mut self) { - self.write_log.drop_tx() + /// the triggered validity predicates together with the entire batch. Starts + /// new transaction and batch write logs. + pub fn drop_tx_batch(&mut self) { + self.write_log.drop_batch() } /// Mark the provided transaction's hash as redundant to prevent committing diff --git a/crates/state/src/write_log.rs b/crates/state/src/write_log.rs index 3a29f650e2..1d56c4699f 100644 --- a/crates/state/src/write_log.rs +++ b/crates/state/src/write_log.rs @@ -306,7 +306,8 @@ impl WriteLog { // wasm environment without the need for cooperation from // the wasm code (tx or vp), so there's no need to return // gas in case of an error because execution will terminate - // anyway and this cannot be exploited to keep the vm running + // anyway and this cannot be exploited to keep the vm + // running return Err(Error::UpdateVpOfNewAccount); } }, @@ -596,7 +597,9 @@ impl WriteLog { self.tx_write_log.events.tree.values().flatten() } - /// Commit the current transaction's write log to the batch when it's accepted by all the triggered validity predicates. Starts a new transaction write log. + /// Commit the current transaction's write log to the batch when it's + /// accepted by all the triggered validity predicates. Starts a new + /// transaction write log. pub fn commit_tx_to_batch(&mut self) { let tx_write_log = std::mem::take(&mut self.tx_write_log); let batched_log = BatchedTxWriteLog { @@ -607,34 +610,29 @@ impl WriteLog { self.batch_write_log.push(batched_log); } - /// Drop the current transaction's write log and IBC events when it's declined by any of the triggered validity predicates. - /// Starts a new transaction write log a clears the temp write log. + /// Drop the current transaction's write log and IBC events when it's + /// declined by any of the triggered validity predicates. Starts a new + /// transaction write log and clears the temp write log. pub fn drop_tx(&mut self) { self.tx_write_log = Default::default(); } - /// Commit the entire batch to the block log. + /// Commit the current tx and the entire batch to the block log. pub fn commit_batch(&mut self) { + self.commit_tx_to_batch(); + for log in std::mem::take(&mut self.batch_write_log) { self.block_write_log.extend(log.write_log); self.block_address_gen = log.address_gen; } } - /// Drop the entire batch log. - //FIXME: should this also drop the tx? + /// Drop the current tx and the entire batch log. pub fn drop_batch(&mut self) { + self.drop_tx(); self.batch_write_log = Default::default(); } - /// Commit the tx write log to the block write log. - //FIXME: shoul this pass through the batch? - pub fn commit_tx(&mut self) { - let tx_write_log = std::mem::take(&mut self.tx_write_log); - self.block_write_log.extend(tx_write_log.write_log); - self.block_address_gen = tx_write_log.address_gen; - } - /// Get the verifiers set whose validity predicates should validate the /// current transaction changes and the storage keys that have been /// modified created, updated and deleted via the write log. @@ -763,7 +761,7 @@ mod tests { // delete a non-existing key let (gas, diff) = write_log.delete(&key).unwrap(); - assert_eq!(gas, key.len() as u64 * STORAGE_WRITE_GAS_PER_BYTE); + assert_eq!(gas, key.len() as u64 * STORAGE_DELETE_GAS_PER_BYTE); assert_eq!(diff, 0); // insert a value @@ -801,13 +799,13 @@ mod tests { let (gas, diff) = write_log.delete(&key).unwrap(); assert_eq!( gas, - (key.len() + updated.len()) as u64 * STORAGE_WRITE_GAS_PER_BYTE + (key.len() + updated.len()) as u64 * STORAGE_DELETE_GAS_PER_BYTE ); assert_eq!(diff, -(updated.len() as i64)); // delete the deleted key again let (gas, diff) = write_log.delete(&key).unwrap(); - assert_eq!(gas, key.len() as u64 * STORAGE_WRITE_GAS_PER_BYTE); + assert_eq!(gas, key.len() as u64 * STORAGE_DELETE_GAS_PER_BYTE); assert_eq!(diff, 0); // read the deleted key @@ -924,7 +922,7 @@ mod tests { // initialize an account let vp1 = Hash::sha256("vp1".as_bytes()); let (addr1, _) = state.write_log.init_account(&address_gen, vp1, &[]); - state.write_log.commit_tx(); + state.write_log.commit_batch(); // write values let val1 = "val1".as_bytes().to_vec(); @@ -932,7 +930,7 @@ mod tests { state.write_log.write(&key2, val1.clone()).unwrap(); state.write_log.write(&key3, val1.clone()).unwrap(); state.write_log.write_temp(&key4, val1.clone()).unwrap(); - state.write_log.commit_tx(); + state.write_log.commit_batch(); // these values are not written due to drop_tx let val2 = "val2".as_bytes().to_vec(); @@ -945,7 +943,7 @@ mod tests { let val3 = "val3".as_bytes().to_vec(); state.write_log.delete(&key2).unwrap(); state.write_log.write(&key3, val3.clone()).unwrap(); - state.write_log.commit_tx(); + state.write_log.commit_batch(); // commit a block state.commit_block().expect("commit failed"); @@ -987,9 +985,11 @@ mod tests { assert!(state.write_log.replay_protection.is_empty()); for tx in ["tx1", "tx2", "tx3"] { let hash = Hash::sha256(tx.as_bytes()); - assert!(state - .has_replay_protection_entry(&hash) - .expect("read failed")); + assert!( + state + .has_replay_protection_entry(&hash) + .expect("read failed") + ); } { @@ -1016,13 +1016,17 @@ mod tests { assert!(state.write_log.replay_protection.is_empty()); for tx in ["tx1", "tx2", "tx3", "tx5", "tx6"] { - assert!(state - .has_replay_protection_entry(&Hash::sha256(tx.as_bytes())) - .expect("read failed")); + assert!( + state + .has_replay_protection_entry(&Hash::sha256(tx.as_bytes())) + .expect("read failed") + ); } - assert!(!state - .has_replay_protection_entry(&Hash::sha256("tx4".as_bytes())) - .expect("read failed")); + assert!( + !state + .has_replay_protection_entry(&Hash::sha256("tx4".as_bytes())) + .expect("read failed") + ); { let write_log = state.write_log_mut(); write_log @@ -1030,10 +1034,12 @@ mod tests { .unwrap(); // mark as redundant a missing hash and check that it fails - assert!(state - .write_log - .redundant_tx_hash(&Hash::sha256("tx8".as_bytes())) - .is_err()); + assert!( + state + .write_log + .redundant_tx_hash(&Hash::sha256("tx8".as_bytes())) + .is_err() + ); // Do not assert the state of replay protection because this // error will actually trigger a shut down of the node. Also, since diff --git a/crates/tests/src/native_vp/pos.rs b/crates/tests/src/native_vp/pos.rs index e0d970f11a..926588e594 100644 --- a/crates/tests/src/native_vp/pos.rs +++ b/crates/tests/src/native_vp/pos.rs @@ -268,7 +268,7 @@ mod tests { if !test_state.is_current_tx_valid { // Clear out the changes tx_host_env::with(|env| { - env.state.drop_tx(); + env.state.drop_tx_batch(); }); } @@ -282,7 +282,7 @@ mod tests { tx_host_env::with(|env| { // Clear out the changes if !test_state.is_current_tx_valid { - env.state.drop_tx(); + env.state.drop_tx_batch(); } // Also commit the last transaction(s) changes, if any env.commit_tx_and_block(); @@ -318,7 +318,7 @@ mod tests { // Clear out the invalid changes tx_host_env::with(|env| { - env.state.drop_tx(); + env.state.drop_tx_batch(); }) } } diff --git a/crates/tests/src/storage_api/collections/lazy_map.rs b/crates/tests/src/storage_api/collections/lazy_map.rs index 48d5f64511..2932be34dd 100644 --- a/crates/tests/src/storage_api/collections/lazy_map.rs +++ b/crates/tests/src/storage_api/collections/lazy_map.rs @@ -241,7 +241,7 @@ mod tests { match &transition { Transition::CommitTx => { // commit the tx without committing the block - tx_host_env::with(|env| env.state.commit_tx()); + tx_host_env::with(|env| env.state.commit_tx_batch()); } Transition::CommitTxAndBlock => { // commit the tx and the block diff --git a/crates/tests/src/storage_api/collections/lazy_set.rs b/crates/tests/src/storage_api/collections/lazy_set.rs index 4ac16671d9..386bb13a91 100644 --- a/crates/tests/src/storage_api/collections/lazy_set.rs +++ b/crates/tests/src/storage_api/collections/lazy_set.rs @@ -229,7 +229,7 @@ mod tests { match &transition { Transition::CommitTx => { // commit the tx without committing the block - tx_host_env::with(|env| env.state.commit_tx()); + tx_host_env::with(|env| env.state.commit_tx_batch()); } Transition::CommitTxAndBlock => { // commit the tx and the block diff --git a/crates/tests/src/storage_api/collections/lazy_vec.rs b/crates/tests/src/storage_api/collections/lazy_vec.rs index a51508dd71..af19ab8afb 100644 --- a/crates/tests/src/storage_api/collections/lazy_vec.rs +++ b/crates/tests/src/storage_api/collections/lazy_vec.rs @@ -234,7 +234,7 @@ mod tests { match &transition { Transition::CommitTx => { // commit the tx without committing the block - tx_host_env::with(|env| env.state.commit_tx()); + tx_host_env::with(|env| env.state.commit_tx_batch()); } Transition::CommitTxAndBlock => { // commit the tx and the block diff --git a/crates/tests/src/storage_api/collections/nested_lazy_map.rs b/crates/tests/src/storage_api/collections/nested_lazy_map.rs index 7658a66223..f7b0eef995 100644 --- a/crates/tests/src/storage_api/collections/nested_lazy_map.rs +++ b/crates/tests/src/storage_api/collections/nested_lazy_map.rs @@ -254,7 +254,7 @@ mod tests { match &transition { Transition::CommitTx => { // commit the tx without committing the block - tx_host_env::with(|env| env.state.commit_tx()); + tx_host_env::with(|env| env.state.commit_tx_batch()); } Transition::CommitTxAndBlock => { // commit the tx and the block diff --git a/crates/tests/src/vm_host_env/ibc.rs b/crates/tests/src/vm_host_env/ibc.rs index 4bee157c12..40a97e70e8 100644 --- a/crates/tests/src/vm_host_env/ibc.rs +++ b/crates/tests/src/vm_host_env/ibc.rs @@ -280,7 +280,7 @@ pub fn init_storage() -> (Address, Address) { // commit the initialized token and account tx_host_env::with(|env| { - env.state.commit_tx(); + env.state.commit_tx_batch(); env.state.commit_block().unwrap(); // block header to check timeout timestamp diff --git a/crates/tests/src/vm_host_env/mod.rs b/crates/tests/src/vm_host_env/mod.rs index 6ebf1fd10a..5c81af9299 100644 --- a/crates/tests/src/vm_host_env/mod.rs +++ b/crates/tests/src/vm_host_env/mod.rs @@ -397,7 +397,7 @@ mod tests { let existing_value = vec![2_u8; 1000]; tx_env.state.write(&existing_key, &existing_value).unwrap(); // ... and commit it - tx_env.state.commit_tx(); + tx_env.state.commit_tx_batch(); // In a transaction, write override the existing key's value and add // another key-value @@ -483,7 +483,7 @@ mod tests { tx_env.state.write(&key, i).unwrap(); } // ... and commit them - tx_env.state.commit_tx(); + tx_env.state.commit_tx_batch(); // In a transaction, write override the existing key's value and add // another key-value diff --git a/crates/tests/src/vm_host_env/tx.rs b/crates/tests/src/vm_host_env/tx.rs index 594391024d..ef580dd0ba 100644 --- a/crates/tests/src/vm_host_env/tx.rs +++ b/crates/tests/src/vm_host_env/tx.rs @@ -200,7 +200,7 @@ impl TestTxEnv { } pub fn commit_tx_and_block(&mut self) { - self.state.commit_tx(); + self.state.commit_tx_batch(); self.state .commit_block() .map_err(|err| println!("{:?}", err)) From 120ad8e6ce1889146afc95dabc3d86a915d25980 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 6 Jun 2024 17:48:44 +0200 Subject: [PATCH 23/40] Adds support for masp fee payment in sdk --- crates/apps_lib/src/cli.rs | 33 + crates/core/src/masp.rs | 14 +- crates/core/src/token.rs | 8 +- crates/light_sdk/src/transaction/transfer.rs | 2 + crates/namada/src/ledger/protocol/mod.rs | 3 - crates/node/src/bench_utils.rs | 2 + crates/node/src/shell/mod.rs | 3 +- crates/sdk/src/args.rs | 7 + crates/sdk/src/lib.rs | 6 +- crates/sdk/src/masp.rs | 946 ++++++++++++++----- crates/sdk/src/tx.rs | 119 ++- crates/shielded_token/src/utils.rs | 8 +- crates/token/src/lib.rs | 2 + 13 files changed, 875 insertions(+), 278 deletions(-) diff --git a/crates/apps_lib/src/cli.rs b/crates/apps_lib/src/cli.rs index ff0745ffc1..a0f4e31228 100644 --- a/crates/apps_lib/src/cli.rs +++ b/crates/apps_lib/src/cli.rs @@ -3221,6 +3221,8 @@ pub mod args { "gas-limit", DefaultFn(|| GasLimit::from(DEFAULT_GAS_LIMIT)), ); + pub const GAS_SPENDING_KEY: ArgOpt = + arg_opt("gas-spending-key"); pub const FEE_TOKEN: ArgDefaultFromCtx = arg_default_from_ctx("gas-token", DefaultFn(|| "".parse().unwrap())); pub const FEE_PAYER: Arg = arg("fee-payer"); @@ -4416,9 +4418,16 @@ pub mod args { }); } + let gas_spending_keys = self + .gas_spending_keys + .iter() + .map(|key| chain_ctx.get_cached(key)) + .collect(); + Ok(TxShieldedTransfer:: { tx, data, + gas_spending_keys, tx_code_path: self.tx_code_path.to_path_buf(), }) } @@ -4438,10 +4447,15 @@ pub mod args { token, amount, }]; + let mut gas_spending_keys = vec![]; + if let Some(key) = GAS_SPENDING_KEY.parse(matches) { + gas_spending_keys.push(key); + } Self { tx, data, + gas_spending_keys, tx_code_path, } } @@ -4464,6 +4478,10 @@ pub mod args { .def() .help(wrap!("The amount to transfer in decimal.")), ) + .arg(GAS_SPENDING_KEY.def().help(wrap!( + "The optional spending key that will be used in addition \ + to the source for gas payment." + ))) } } @@ -4557,10 +4575,16 @@ pub mod args { amount: transfer_data.amount, }); } + let gas_spending_keys = self + .gas_spending_keys + .iter() + .map(|key| chain_ctx.get_cached(key)) + .collect(); Ok(TxUnshieldingTransfer:: { tx, data, + gas_spending_keys, source: chain_ctx.get_cached(&self.source), tx_code_path: self.tx_code_path.to_path_buf(), }) @@ -4580,11 +4604,16 @@ pub mod args { token, amount, }]; + let mut gas_spending_keys = vec![]; + if let Some(key) = GAS_SPENDING_KEY.parse(matches) { + gas_spending_keys.push(key); + } Self { tx, source, data, + gas_spending_keys, tx_code_path, } } @@ -4607,6 +4636,10 @@ pub mod args { .def() .help(wrap!("The amount to transfer in decimal.")), ) + .arg(GAS_SPENDING_KEY.def().help(wrap!( + "The optional spending key that will be used in addition \ + to the source for gas payment." + ))) } } diff --git a/crates/core/src/masp.rs b/crates/core/src/masp.rs index c31734a803..fa533f25c4 100644 --- a/crates/core/src/masp.rs +++ b/crates/core/src/masp.rs @@ -368,7 +368,15 @@ impl<'de> serde::Deserialize<'de> for PaymentAddress { /// Wrapper for masp_primitive's ExtendedSpendingKey #[derive( - Clone, Debug, Copy, BorshSerialize, BorshDeserialize, BorshDeserializer, + Clone, + Debug, + Copy, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + Hash, + Eq, + PartialEq, )] pub struct ExtendedSpendingKey(masp_primitives::zip32::ExtendedSpendingKey); @@ -433,7 +441,7 @@ impl<'de> serde::Deserialize<'de> for ExtendedSpendingKey { } /// Represents a source of funds for a transfer -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Hash, Eq, PartialEq)] pub enum TransferSource { /// A transfer coming from a transparent address Address(Address), @@ -479,7 +487,7 @@ impl Display for TransferSource { } /// Represents a target for the funds of a transfer -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Hash, Eq, PartialEq)] pub enum TransferTarget { /// A transfer going to a transparent address Address(Address), diff --git a/crates/core/src/token.rs b/crates/core/src/token.rs index 69588b9518..81f39d17e2 100644 --- a/crates/core/src/token.rs +++ b/crates/core/src/token.rs @@ -231,13 +231,14 @@ impl Amount { Self { raw: Uint(raw) } } - /// Given a u128 and [`MaspDigitPos`], construct the corresponding + /// Given a i128 and [`MaspDigitPos`], construct the corresponding /// amount. - pub fn from_masp_denominated_u128( - val: u128, + pub fn from_masp_denominated_i128( + val: i128, denom: MaspDigitPos, ) -> Option { let lo = u64::try_from(val).ok()?; + #[allow(clippy::cast_sign_loss)] let hi = (val >> 64) as u64; let lo_pos = denom as usize; let hi_pos = lo_pos.checked_add(1)?; @@ -924,6 +925,7 @@ impl MaspDigitPos { } /// Get the corresponding u64 word from the input uint256. + // FIXME: remove if unused? pub fn denominate_i128(&self, amount: &Change) -> i128 { let val = i128::from(amount.abs().0[*self as usize]); if Change::is_negative(amount) { diff --git a/crates/light_sdk/src/transaction/transfer.rs b/crates/light_sdk/src/transaction/transfer.rs index 9dc7aa8e45..efdbfd89e6 100644 --- a/crates/light_sdk/src/transaction/transfer.rs +++ b/crates/light_sdk/src/transaction/transfer.rs @@ -35,11 +35,13 @@ impl Transfer { /// Build a shielded transfer transaction from the given parameters pub fn shielded( + fee_payer: Option
, shielded_section_hash: Hash, transaction: Transaction, args: GlobalArgs, ) -> Self { let data = namada_sdk::token::ShieldedTransfer { + fee_payer, section_hash: shielded_section_hash, }; diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index c34cf14e2c..a0249caca3 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -233,8 +233,6 @@ where tx_wasm_cache, } => { if let Some(tx_result) = wrapper_tx_result { - // TODO(namada#2597): handle masp fee payment in the first inner - // tx if necessary // Replay protection check on the batch let tx_hash = tx.raw_header_hash(); if state.write_log().has_replay_protection_entry(&tx_hash) { @@ -661,7 +659,6 @@ where } } -// FIXME: search for all the TODOS for 2596 and 2597 and remove them fn try_masp_fee_payment( ShellParams { tx_gas_meter, diff --git a/crates/node/src/bench_utils.rs b/crates/node/src/bench_utils.rs index fa2da91868..e166b6a20f 100644 --- a/crates/node/src/bench_utils.rs +++ b/crates/node/src/bench_utils.rs @@ -1092,6 +1092,7 @@ impl BenchShieldedCtx { ShieldedContext::::gen_shielded_transfer( &namada, vec![masp_transfer_data], + None, true, ), ) @@ -1119,6 +1120,7 @@ impl BenchShieldedCtx { namada.client().generate_tx( TX_SHIELDED_TRANSFER_WASM, ShieldedTransfer { + fee_payer: None, section_hash: shielded_section_hash, }, Some(shielded), diff --git a/crates/node/src/shell/mod.rs b/crates/node/src/shell/mod.rs index 7f09831caf..eb70a72c8a 100644 --- a/crates/node/src/shell/mod.rs +++ b/crates/node/src/shell/mod.rs @@ -1129,8 +1129,7 @@ where return response; } - // TODO(namada#2597): validate masp fee payment if normal fee - // payment fails Validate wrapper fees + // Validate wrapper fees if let Err(e) = mempool_fee_check( &mut ShellParams::new( &RefCell::new(gas_meter), diff --git a/crates/sdk/src/args.rs b/crates/sdk/src/args.rs index 7cc775829a..a94c6d9724 100644 --- a/crates/sdk/src/args.rs +++ b/crates/sdk/src/args.rs @@ -327,6 +327,8 @@ pub struct TxShieldedTransfer { pub tx: Tx, /// Transfer-specific data pub data: Vec>, + /// Optional additional keys for gas pyament + pub gas_spending_keys: Vec, /// Path to the TX WASM code file pub tx_code_path: PathBuf, } @@ -395,6 +397,8 @@ pub struct TxUnshieldingTransfer { pub source: C::SpendingKey, /// Transfer-specific data pub data: Vec>, + /// Optional additional keys for gas payment + pub gas_spending_keys: Vec, /// Path to the TX WASM code file pub tx_code_path: PathBuf, } @@ -2134,6 +2138,9 @@ pub struct Tx { pub gas_limit: GasLimit, /// The optional expiration of the transaction pub expiration: TxExpiration, + // FIXME: maybe should move this out of here, it's only needed for txs that + // pay the fees via the masp, so it should go together with the optional + // gas spending keys /// Generate an ephimeral signing key to be used only once to sign a /// wrapper tx pub disposable_signing_key: bool, diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs index 7d83a93895..7241da670a 100644 --- a/crates/sdk/src/lib.rs +++ b/crates/sdk/src/lib.rs @@ -189,9 +189,11 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { fn new_shielded_transfer( &self, data: Vec, + gas_spending_keys: Vec, ) -> args::TxShieldedTransfer { args::TxShieldedTransfer { data, + gas_spending_keys, tx_code_path: PathBuf::from(TX_SHIELDED_TRANSFER_WASM), tx: self.tx_builder(), } @@ -218,10 +220,12 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { &self, source: ExtendedSpendingKey, data: Vec, + gas_spending_keys: Vec, ) -> args::TxUnshieldingTransfer { args::TxUnshieldingTransfer { source, data, + gas_spending_keys, tx_code_path: PathBuf::from(TX_UNSHIELDING_TRANSFER_WASM), tx: self.tx_builder(), } @@ -1130,7 +1134,7 @@ pub mod testing { let tx_data = match masp_tx_type { MaspTxType::Shielded => { tx.add_code_from_hash(code_hash, Some(TX_SHIELDED_TRANSFER_WASM.to_owned())); - let data = ShieldedTransfer { section_hash: shielded_section_hash }; + let data = ShieldedTransfer { fee_payer: transfers.0.first().map(|transfer| transfer.target.clone()), section_hash: shielded_section_hash }; tx.add_data(data.clone()); TxData::ShieldedTransfer(data, (build_params, build_param_bytes)) }, diff --git a/crates/sdk/src/masp.rs b/crates/sdk/src/masp.rs index bed3d1cf52..66c3d68dc4 100644 --- a/crates/sdk/src/masp.rs +++ b/crates/sdk/src/masp.rs @@ -50,6 +50,7 @@ use masp_proofs::bls12_381::Bls12; use masp_proofs::prover::LocalTxProver; use masp_proofs::sapling::BatchValidator; use namada_core::address::Address; +use namada_core::arith::{CheckedAdd, CheckedSub}; use namada_core::collections::{HashMap, HashSet}; use namada_core::dec::Dec; pub use namada_core::masp::{ @@ -121,6 +122,15 @@ pub struct ShieldedTransfer { pub epoch: MaspEpoch, } +/// The data for a masp fee payment +#[allow(missing_docs)] +pub struct MaspFeeData { + pub sources: Vec, + pub target: Address, + pub token: Address, + pub amount: token::DenominatedAmount, +} + /// The data for a single masp transfer #[allow(missing_docs)] #[derive(Debug)] @@ -131,6 +141,36 @@ pub struct MaspTransferData { pub amount: token::DenominatedAmount, } +// The data for a masp transfer relative to a given source +#[derive(Hash, Eq, PartialEq)] +struct MaspSourceTransferData { + source: TransferSource, + token: Address, +} + +// The data for a masp transfer relative to a given target +#[derive(Hash, Eq, PartialEq)] +struct MaspTargetTransferData { + source: TransferSource, + target: TransferTarget, + token: Address, +} + +/// Data to log masp transactions' errors +#[allow(missing_docs)] +#[derive(Debug)] +pub struct MaspDataLog { + pub source: Option, + pub token: Address, + pub amount: token::DenominatedAmount, +} + +struct MaspTxReorderedData { + source_data: HashMap, + target_data: HashMap, + denoms: HashMap, +} + /// Shielded pool data for a token #[allow(missing_docs)] #[derive(Debug, BorshSerialize, BorshDeserialize, BorshDeserializer)] @@ -153,7 +193,7 @@ pub enum TransferErr { /// The error error: builder::Error, /// The optional associated transfer data for logging purposes - data: Option, + data: Option, }, /// errors #[error("{0}")] @@ -514,6 +554,11 @@ pub struct MaspChange { /// a masp amount pub type MaspAmount = ValueSum<(Option, Address), token::Change>; +// A type tracking the notes used to construct a shielded transfer. Used to +// avoid reusing the same notes multiple times which would lead to an invalid +// transaction +type SpentNotesTracker = HashMap>; + /// An extension of Option's cloned method for pair types fn cloned_pair((a, b): (&T, &U)) -> (T, U) { (a.clone(), b.clone()) @@ -1344,6 +1389,7 @@ impl ShieldedContext { pub async fn collect_unspent_notes( &mut self, context: &impl Namada, + spent_notes: &mut SpentNotesTracker, vk: &ViewingKey, target: I128Sum, target_epoch: MaspEpoch, @@ -1362,15 +1408,24 @@ impl ShieldedContext { let mut val_acc = I128Sum::zero(); let mut normed_val_acc = I128Sum::zero(); let mut notes = Vec::new(); + // Retrieve the notes that can be spent by this key if let Some(avail_notes) = self.pos_map.get(vk).cloned() { for note_idx in &avail_notes { + // Skip spend notes already used in this transaction + if spent_notes + .get(vk) + .is_some_and(|set| set.contains(note_idx)) + { + continue; + } // No more transaction inputs are required once we have met // the target amount if normed_val_acc >= target { break; } - // Spent notes cannot contribute a new transaction's pool + // Spent notes from the shielded context (i.e. from previous + // transactions) cannot contribute a new transaction's pool if self.spents.contains(note_idx) { continue; } @@ -1427,6 +1482,13 @@ impl ShieldedContext { })?; // Commit this note to our transaction notes.push((*diversifier, note, merkle_path)); + // Append the note the list of used ones + spent_notes + .entry(vk.to_owned()) + .and_modify(|set| { + set.insert(*note_idx); + }) + .or_insert([*note_idx].into_iter().collect()); } } } @@ -1532,6 +1594,7 @@ impl ShieldedContext { pub async fn gen_shielded_transfer( context: &impl Namada, data: Vec, + fee_data: Option, update_ctx: bool, ) -> Result, TransferErr> { // Try to get a seed from env var, if any. @@ -1612,7 +1675,134 @@ impl ShieldedContext { // Determine epoch in which to submit potential shielded transaction let epoch = rpc::query_masp_epoch(context.client()).await?; - let mut is_context_loaded = false; + let mut notes_tracker = SpentNotesTracker::new(); + { + // Load the current shielded context given + // the spending key we possess + let mut shielded = context.shielded_mut().await; + let _ = shielded.load().await; + } + + let Some(MaspTxReorderedData { + source_data, + target_data, + mut denoms, + }) = Self::reorder_data_for_masp_transfer(context, data).await? + else { + // No shielded components are needed when neither source nor + // destination are shielded + return Ok(None); + }; + + for (MaspSourceTransferData { source, token }, amount) in &source_data { + Self::add_inputs( + context, + &mut builder, + source, + token, + amount, + epoch, + &denoms, + &mut notes_tracker, + ) + .await?; + } + + for ( + MaspTargetTransferData { + source, + target, + token, + }, + amount, + ) in target_data + { + Self::add_outputs( + context, + &mut builder, + source, + &target, + token, + amount, + epoch, + &denoms, + ) + .await?; + } + + // Collect the fees if needed + if let Some(MaspFeeData { + sources, + target, + token, + amount, + }) = fee_data + { + Self::add_fees( + context, + &mut builder, + &source_data, + sources, + &target, + &token, + &amount, + epoch, + &mut denoms, + &mut notes_tracker, + ) + .await?; + } + + // Finally, add outputs representing the change from this payment. + Self::add_changes(&mut builder, &source_data)?; + + let builder_clone = builder.clone().map_builder(WalletMap); + // Build and return the constructed transaction + #[cfg(not(feature = "testing"))] + let prover = context.shielded().await.utils.local_tx_prover(); + #[cfg(feature = "testing")] + let prover = testing::MockTxProver(std::sync::Mutex::new(OsRng)); + let (masp_tx, metadata) = builder + .build( + &prover, + &FeeRule::non_standard(U64Sum::zero()), + &mut rng, + &mut RngBuildParams::new(OsRng), + ) + .map_err(|error| TransferErr::Build { error, data: None })?; + + if update_ctx { + // Cache the generated transfer + let mut shielded_ctx = context.shielded_mut().await; + shielded_ctx + .pre_cache_transaction(context, &[masp_tx.clone()]) + .await?; + } + + Ok(Some(ShieldedTransfer { + builder: builder_clone, + masp_tx, + metadata, + epoch, + })) + } + + // Group all the information for every source/token and target/token couple, + // and extract the denominations for all the tokens involved (expect the one + // involved in the fees if needed). This step is required so that we can + // collect the amount required for every couple and pass it to the + // appropriate function so that notes can be collected based on the correct + // amount. + async fn reorder_data_for_masp_transfer( + context: &impl Namada, + data: Vec, + ) -> Result, TransferErr> { + let mut source_data = + HashMap::::new(); + let mut target_data = + HashMap::::new(); + let mut denoms = HashMap::new(); + for MaspTransferData { source, target, @@ -1627,41 +1817,307 @@ impl ShieldedContext { if spending_key.is_none() && payment_address.is_none() { return Ok(None); } - // We want to fund our transaction solely from supplied spending key - let spending_key = spending_key.map(|x| x.into()); + + if denoms.get(&token).is_none() { + if let Some(denom) = query_denom(context.client(), &token).await + { + denoms.insert(token.clone(), denom); + } else { + return Err(TransferErr::General(Error::from( + QueryError::General(format!( + "denomination for token {token}" + )), + ))); + }; + } + + let key = MaspSourceTransferData { + source: source.clone(), + token: token.clone(), + }; + match source_data.get_mut(&key) { + Some(prev_amount) => { + *prev_amount = checked!(prev_amount.to_owned() + amount) + .map_err(|e| TransferErr::General(e.into()))?; + } + None => { + source_data.insert(key, amount); + } + } + + let key = MaspTargetTransferData { + source, + target, + token, + }; + match target_data.get_mut(&key) { + Some(prev_amount) => { + *prev_amount = checked!(prev_amount.to_owned() + amount) + .map_err(|e| TransferErr::General(e.into()))?; + } + None => { + target_data.insert(key, amount); + } + } + } + + Ok(Some(MaspTxReorderedData { + source_data, + target_data, + denoms, + })) + } + + // Add the necessary transaction inputs to the builder. Returns the actual + // amount of inputs added to the transaction if these are shielded, `None` + // if transparent inputs + #[allow(clippy::too_many_arguments)] + async fn add_inputs( + context: &impl Namada, + builder: &mut Builder, + source: &TransferSource, + token: &Address, + amount: &token::DenominatedAmount, + epoch: MaspEpoch, + denoms: &HashMap, + notes_tracker: &mut SpentNotesTracker, + ) -> Result, TransferErr> { + let spending_key = source.spending_key(); + + // We want to fund our transaction solely from supplied spending key + let spending_key = spending_key.map(|x| x.into()); + + // Now we build up the transaction within this object + + // Convert transaction amount into MASP types + // Ok to unwrap cause we've already seen the token before, the + // denomination must be there + let denom = denoms.get(token).unwrap(); + let (asset_types, masp_amount) = { + let mut shielded = context.shielded_mut().await; + // Do the actual conversion to an asset type + let amount = shielded + .convert_namada_amount_to_masp( + context.client(), + epoch, + token, + denom.to_owned(), + amount.amount(), + ) + .await?; + // Make sure to save any decodings of the asset types used so + // that balance queries involving them are + // successful + let _ = shielded.save().await; + amount + }; + + // If there are shielded inputs + let added_amt = if let Some(sk) = spending_key { + // Locate unspent notes that can help us meet the transaction + // amount + let (added_amount, unspent_notes, used_convs) = context + .shielded_mut() + .await + .collect_unspent_notes( + context, + notes_tracker, + &to_viewing_key(&sk).vk, + I128Sum::from_sum(masp_amount), + epoch, + ) + .await?; + // Commit the notes found to our transaction + for (diversifier, note, merkle_path) in unspent_notes { + builder + .add_sapling_spend(sk, diversifier, note, merkle_path) + .map_err(|e| TransferErr::Build { + error: builder::Error::SaplingBuild(e), + data: None, + })?; + } + // Commit the conversion notes used during summation + for (conv, wit, value) in used_convs.values() { + if value.is_positive() { + builder + .add_sapling_convert( + conv.clone(), + *value as u64, + wit.clone(), + ) + .map_err(|e| TransferErr::Build { + error: builder::Error::SaplingBuild(e), + data: None, + })?; + } + } + + Some(added_amount) + } else { + // We add a dummy UTXO to our transaction, but only the source + // of the parent Transfer object is used to + // validate fund availability + let source_enc = source + .address() + .ok_or_else(|| { + Error::Other( + "source address should be transparent".to_string(), + ) + })? + .serialize_to_vec(); + + let hash = ripemd::Ripemd160::digest(sha2::Sha256::digest( + source_enc.as_ref(), + )); + let script = TransparentAddress(hash.into()); + for (digit, asset_type) in + MaspDigitPos::iter().zip(asset_types.iter()) { - if !is_context_loaded { - // Load the current shielded context (at most once) given - // the spending key we possess - let mut shielded = context.shielded_mut().await; - let _ = shielded.load().await; - is_context_loaded = true; + let amount_part = digit.denominate(&amount.amount()); + // Skip adding an input if its value is 0 + if amount_part != 0 { + builder + .add_transparent_input(TxOut { + asset_type: *asset_type, + value: amount_part, + address: script, + }) + .map_err(|e| TransferErr::Build { + error: builder::Error::TransparentBuild(e), + data: None, + })?; } } - // Context required for storing which notes are in the source's - // possession - let memo = MemoBytes::empty(); - // Now we build up the transaction within this object + None + }; + + Ok(added_amt) + } - // Convert transaction amount into MASP types - let Some(denom) = query_denom(context.client(), &token).await - else { - return Err(TransferErr::General(Error::from( - QueryError::General(format!( - "denomination for token {token}" - )), - ))); - }; - let (asset_types, masp_amount) = { + // Add the necessary transaction outputs to the builder + #[allow(clippy::too_many_arguments)] + async fn add_outputs( + context: &impl Namada, + builder: &mut Builder, + source: TransferSource, + target: &TransferTarget, + token: Address, + amount: token::DenominatedAmount, + epoch: MaspEpoch, + denoms: &HashMap, + ) -> Result<(), TransferErr> { + // Anotate the asset type in the value balance with its decoding in + // order to facilitate cross-epoch computations + let value_balance = context + .shielded_mut() + .await + .decode_sum(context.client(), builder.value_balance()) + .await; + + let payment_address = target.payment_address(); + // If we are sending to a transparent output, then we will need to + // embed the transparent target address into the + // shielded transaction so that it can be signed + let transparent_target_hash = if payment_address.is_none() { + let target_enc = target + .address() + .ok_or_else(|| { + Error::Other( + "target address should be transparent".to_string(), + ) + })? + .serialize_to_vec(); + Some(ripemd::Ripemd160::digest(sha2::Sha256::digest( + target_enc.as_ref(), + ))) + } else { + None + }; + // This indicates how many more assets need to be sent to the + // receiver in order to satisfy the requested transfer + // amount. + let mut rem_amount = amount.amount().raw_amount().0; + + // Ok to unwrap cause we've already seen the token before, the + // denomination must be there + let denom = denoms.get(&token).unwrap(); + + // Now handle the outputs of this transaction + // Loop through the value balance components and see which + // ones can be given to the receiver + for ((asset_type, decoded), val) in value_balance.components() { + let rem_amount = &mut rem_amount[decoded.position as usize]; + // Only asset types with the correct token can contribute. But + // there must be a demonstrated need for it. + if decoded.token == token + && &decoded.denom == denom + && decoded.epoch.map_or(true, |vbal_epoch| vbal_epoch <= epoch) + && *rem_amount > 0 + { + let val = u128::try_from(*val).expect( + "value balance in absence of output descriptors should be \ + non-negative", + ); + // We want to take at most the remaining quota for the + // current denomination to the receiver + let contr = std::cmp::min(*rem_amount as u128, val) as u64; + // If we are sending to a shielded address, we need the outgoing + // viewing key in the following computations. + let ovk_opt = source + .spending_key() + .map(|x| ExtendedSpendingKey::from(x).expsk.ovk); + // Make transaction output tied to the current token, + // denomination, and epoch. + if let Some(pa) = payment_address { + // If there is a shielded output + builder + .add_sapling_output( + ovk_opt, + pa.into(), + *asset_type, + contr, + MemoBytes::empty(), + ) + .map_err(|e| TransferErr::Build { + error: builder::Error::SaplingBuild(e), + data: None, + })?; + } else { + // If there is a transparent output + let hash = transparent_target_hash + .expect( + "transparent target hash should have been \ + computed already", + ) + .into(); + builder + .add_transparent_output( + &TransparentAddress(hash), + *asset_type, + contr, + ) + .map_err(|e| TransferErr::Build { + error: builder::Error::TransparentBuild(e), + data: None, + })?; + } + // Lower what is required of the remaining contribution + *rem_amount -= contr; + } + } + + // Nothing must remain to be included in output + if rem_amount != [0; 4] { + let (asset_types, _) = { let mut shielded = context.shielded_mut().await; // Do the actual conversion to an asset type let amount = shielded - .convert_amount( + .convert_namada_amount_to_masp( context.client(), epoch, &token, - denom, + denom.to_owned(), amount.amount(), ) .await?; @@ -1672,198 +2128,209 @@ impl ShieldedContext { amount }; - // If there are shielded inputs - if let Some(sk) = spending_key { - // Locate unspent notes that can help us meet the transaction - // amount - let (_, unspent_notes, used_convs) = context - .shielded_mut() - .await - .collect_unspent_notes( - context, - &to_viewing_key(&sk).vk, - I128Sum::from_sum(masp_amount), - epoch, - ) - .await?; - // Commit the notes found to our transaction - for (diversifier, note, merkle_path) in unspent_notes { + // Convert the shortfall into a I128Sum + let mut shortfall = I128Sum::zero(); + for (asset_type, val) in asset_types.iter().zip(rem_amount) { + shortfall += I128Sum::from_pair(*asset_type, val.into()); + } + // Return an insufficient funds error + return Result::Err(TransferErr::Build { + error: builder::Error::InsufficientFunds(shortfall), + data: Some(MaspDataLog { + source: Some(source), + token, + amount, + }), + }); + } + + Ok(()) + } + + // Add the necessary note to include a masp fee payment in the transaction. + // Funds are gathered in the following order: + // + // 1. From the residual values of the already included spend notes (i.e. + // changes) + // 2. From new spend notes of the transaction's sources + // 3. From new spend notes of the optional gas spending keys + #[allow(clippy::too_many_arguments)] + async fn add_fees( + context: &impl Namada, + builder: &mut Builder, + source_data: &HashMap, + sources: Vec, + target: &Address, + token: &Address, + amount: &token::DenominatedAmount, + epoch: MaspEpoch, + denoms: &mut HashMap, + notes_tracker: &mut SpentNotesTracker, + ) -> Result<(), TransferErr> { + if denoms.get(token).is_none() { + if let Some(denom) = query_denom(context.client(), token).await { + denoms.insert(token.to_owned(), denom); + } else { + return Err(TransferErr::General(Error::from( + QueryError::General(format!( + "denomination for token {token}" + )), + ))); + }; + } + + let raw_amount = amount.amount().raw_amount().0; + let (asset_types, _) = { + let mut shielded = context.shielded_mut().await; + // Do the actual conversion to an asset type + let (asset_types, amount) = shielded + .convert_namada_amount_to_masp( + context.client(), + epoch, + token, + // Safe to unwrap + denoms.get(token).unwrap().to_owned(), + amount.amount(), + ) + .await?; + // Make sure to save any decodings of the asset types used so + // that balance queries involving them are + // successful + let _ = shielded.save().await; + (asset_types, amount) + }; + + let mut fees = I128Sum::zero(); + // Convert the shortfall into a I128Sum + for (asset_type, val) in asset_types.iter().zip(raw_amount) { + fees = + checked!(fees + &I128Sum::from_pair(*asset_type, val.into())) + .map_err(|e| TransferErr::General(e.into()))?; + } + + // 1. Try to use the change to pay fees + for (asset_type, amt) in builder.value_balance().components() { + if let Ordering::Greater = amt.cmp(&0) { + // Look for changes that match the fee asset types + for (fee_asset_type, fee_amt) in fees + .clone() + .components() + .filter(|(axt, _)| *axt == asset_type) + { + let transparent_target_hash = { + ripemd::Ripemd160::digest(sha2::Sha256::digest( + target.serialize_to_vec().as_ref(), + )) + }; + builder - .add_sapling_spend(sk, diversifier, note, merkle_path) + .add_transparent_output( + &TransparentAddress(transparent_target_hash.into()), + *fee_asset_type, + // Get the minimum between the available change and + // the due fee + *amt.min(fee_amt) as u64, + ) .map_err(|e| TransferErr::Build { - error: builder::Error::SaplingBuild(e), + error: builder::Error::TransparentBuild(e), data: None, })?; - } - // Commit the conversion notes used during summation - for (conv, wit, value) in used_convs.values() { - if value.is_positive() { - builder - .add_sapling_convert( - conv.clone(), - *value as u64, - wit.clone(), - ) - .map_err(|e| TransferErr::Build { - error: builder::Error::SaplingBuild(e), - data: None, - })?; - } - } - } else { - // We add a dummy UTXO to our transaction, but only the source - // of the parent Transfer object is used to - // validate fund availability - let source_enc = source - .address() - .ok_or_else(|| { - Error::Other( - "source address should be transparent".to_string(), + + fees = checked!( + fees - &ValueSum::from_pair( + asset_type.to_owned(), + amt.to_owned() ) - })? - .serialize_to_vec(); - - let hash = ripemd::Ripemd160::digest(sha2::Sha256::digest( - source_enc.as_ref(), - )); - let script = TransparentAddress(hash.into()); - for (digit, asset_type) in - MaspDigitPos::iter().zip(asset_types.iter()) - { - let amount_part = digit.denominate(&amount.amount()); - // Skip adding an input if its value is 0 - if amount_part != 0 { - builder - .add_transparent_input(TxOut { - asset_type: *asset_type, - value: amount_part, - address: script, - }) - .map_err(|e| TransferErr::Build { - error: builder::Error::TransparentBuild(e), - data: None, - })?; - } + ) + .map_err(|e| TransferErr::General(e.into()))?; } } - // Anotate the asset type in the value balance with its decoding in - // order to facilitate cross-epoch computations - let value_balance = builder.value_balance(); - let value_balance = context - .shielded_mut() - .await - .decode_sum(context.client(), value_balance) - .await; - - // If we are sending to a transparent output, then we will need to - // embed the transparent target address into the - // shielded transaction so that it can be signed - let transparent_target_hash = if payment_address.is_none() { - let target_enc = target - .address() - .ok_or_else(|| { - Error::Other( - "target address should be transparent".to_string(), - ) - })? - .serialize_to_vec(); - Some(ripemd::Ripemd160::digest(sha2::Sha256::digest( - target_enc.as_ref(), - ))) - } else { - None - }; - // This indicates how many more assets need to be sent to the - // receiver in order to satisfy the requested transfer - // amount. - let mut rem_amount = amount.amount().raw_amount().0; - // If we are sending to a shielded address, we may need the outgoing - // viewing key in the following computations. - let ovk_opt = spending_key.map(|x| x.expsk.ovk); - - // Now handle the outputs of this transaction - // Loop through the value balance components and see which - // ones can be given to the receiver - for ((asset_type, decoded), val) in value_balance.components() { - let rem_amount = &mut rem_amount[decoded.position as usize]; - // Only asset types with the correct token can contribute. But - // there must be a demonstrated need for it. - if decoded.token == token - && decoded.denom == denom - && decoded - .epoch - .map_or(true, |vbal_epoch| vbal_epoch <= epoch) - && *rem_amount > 0 - { - let val = u128::try_from(*val).expect( - "value balance in absence of output descriptors \ - should be non-negative", - ); - // We want to take at most the remaining quota for the - // current denomination to the receiver - let contr = std::cmp::min(*rem_amount as u128, val) as u64; - // Make transaction output tied to the current token, - // denomination, and epoch. - if let Some(pa) = payment_address { - // If there is a shielded output - builder - .add_sapling_output( - ovk_opt, - pa.into(), - *asset_type, - contr, - memo.clone(), - ) - .map_err(|e| TransferErr::Build { - error: builder::Error::SaplingBuild(e), - data: None, - })?; - } else { - // If there is a transparent output - let hash = transparent_target_hash - .expect( - "transparent target hash should have been \ - computed already", - ) - .into(); - builder - .add_transparent_output( - &TransparentAddress(hash), - *asset_type, - contr, - ) - .map_err(|e| TransferErr::Build { - error: builder::Error::TransparentBuild(e), - data: None, - })?; - } - // Lower what is required of the remaining contribution - *rem_amount -= contr; - } + if fees.is_zero() { + break; } + } - // Nothing must remain to be included in output - if rem_amount != [0; 4] { - // Convert the shortfall into a I128Sum - let mut shortfall = I128Sum::zero(); - for (asset_type, val) in asset_types.iter().zip(rem_amount) { - shortfall += I128Sum::from_pair(*asset_type, val.into()); + if !fees.is_zero() { + // 2. Look for unused spent notes of the sources and the optional + // gas spending keys (sources first) + for fee_source in + source_data.iter().map(|(src, _)| src.source.clone()).chain( + sources + .into_iter() + .map(TransferSource::ExtendedSpendingKey), + ) + { + let Some(found_amt) = Self::add_inputs( + context, + builder, + &fee_source, + token, + amount, + epoch, + denoms, + notes_tracker, + ) + .await? + else { + continue; + }; + let denom_amt = context + .shielded_mut() + .await + .convert_masp_amount_to_namada( + context.client(), + denoms.get(token).unwrap().to_owned(), + found_amt.clone(), + ) + .await?; + + Self::add_outputs( + context, + builder, + fee_source, + &TransferTarget::Address(target.clone()), + token.clone(), + denom_amt, + epoch, + denoms, + ) + .await?; + + fees = checked!(fees - &found_amt) + .map_err(|e| TransferErr::General(e.into()))?; + if fees.is_zero() { + break; } - // Return an insufficient funds error - return Result::Err(TransferErr::Build { - error: builder::Error::InsufficientFunds(shortfall), - data: Some(MaspTransferData { - source, - target, - token, - amount, - }), - }); } + } + + if !fees.is_zero() { + return Result::Err(TransferErr::Build { + error: builder::Error::InsufficientFunds(fees), + data: Some(MaspDataLog { + source: None, + token: token.to_owned(), + amount: *amount, + }), + }); + } + + Ok(()) + } - // Now add outputs representing the change from this payment - if let Some(sk) = spending_key { + // Add the changes back to the sources to balance the transaction. This + // function has to be called after `add_fees` cause we might have some + // change coming from there too + #[allow(clippy::result_large_err)] + fn add_changes( + builder: &mut Builder, + source_data: &HashMap, + ) -> Result<(), TransferErr> { + for (MaspSourceTransferData { source, token }, amount) in source_data { + if let Some(sk) = + source.spending_key().map(ExtendedSpendingKey::from) + { // Represents the amount of inputs we are short by let mut additional = I128Sum::zero(); for (asset_type, amt) in builder.value_balance().components() { @@ -1877,7 +2344,7 @@ impl ShieldedContext { sk.default_address().1, *asset_type, *amt as u64, - memo.clone(), + MemoBytes::empty(), ) .map_err(|e| TransferErr::Build { error: builder::Error::SaplingBuild(e), @@ -1904,46 +2371,17 @@ impl ShieldedContext { if !additional.is_zero() { return Result::Err(TransferErr::Build { error: builder::Error::InsufficientFunds(additional), - data: Some(MaspTransferData { - source, - target, - token, - amount, + data: Some(MaspDataLog { + source: Some(source.to_owned()), + token: token.to_owned(), + amount: *amount, }), }); } } } - let builder_clone = builder.clone().map_builder(WalletMap); - // Build and return the constructed transaction - #[cfg(not(feature = "testing"))] - let prover = context.shielded().await.utils.local_tx_prover(); - #[cfg(feature = "testing")] - let prover = testing::MockTxProver(std::sync::Mutex::new(OsRng)); - let (masp_tx, metadata) = builder - .build( - &prover, - &FeeRule::non_standard(U64Sum::zero()), - &mut rng, - &mut RngBuildParams::new(OsRng), - ) - .map_err(|error| TransferErr::Build { error, data: None })?; - - if update_ctx { - // Cache the generated transfer - let mut shielded_ctx = context.shielded_mut().await; - shielded_ctx - .pre_cache_transaction(context, &[masp_tx.clone()]) - .await?; - } - - Ok(Some(ShieldedTransfer { - builder: builder_clone, - masp_tx, - metadata, - epoch, - })) + Ok(()) } // Updates the internal state with the data of the newly generated @@ -2010,8 +2448,8 @@ impl ShieldedContext { Ok(asset_type) } - /// Convert Anoma amount and token type to MASP equivalents - async fn convert_amount( + /// Convert Namada amount and token type to MASP equivalents + async fn convert_namada_amount_to_masp( &mut self, client: &C, epoch: MaspEpoch, @@ -2045,6 +2483,28 @@ impl ShieldedContext { amount, )) } + + /// Convert MASP amount to Namada equivalent + async fn convert_masp_amount_to_namada( + &mut self, + client: &C, + denom: Denomination, + amt: I128Sum, + ) -> Result { + let mut amount = token::Amount::zero(); + let value_sum = self.decode_sum(client, amt).await; + + for ((_, decoded), val) in value_sum.components() { + let positioned_amt = token::Amount::from_masp_denominated_i128( + *val, + decoded.position, + ) + .unwrap_or_default(); + amount = checked!(amount + positioned_amt)?; + } + + Ok(token::DenominatedAmount::new(amount, denom)) + } } // Retrieves all the indexes at the specified height which refer diff --git a/crates/sdk/src/tx.rs b/crates/sdk/src/tx.rs index 60e6c19074..bfdcfec374 100644 --- a/crates/sdk/src/tx.rs +++ b/crates/sdk/src/tx.rs @@ -37,7 +37,8 @@ use namada_core::ibc::core::host::types::identifiers::{ChannelId, PortId}; use namada_core::ibc::primitives::Timestamp as IbcTimestamp; use namada_core::key::{self, *}; use namada_core::masp::{ - AssetData, MaspEpoch, PaymentAddress, TransferSource, TransferTarget, + AssetData, ExtendedSpendingKey, MaspEpoch, PaymentAddress, TransferSource, + TransferTarget, }; use namada_core::storage; use namada_core::storage::Epoch; @@ -68,14 +69,17 @@ use rand_core::{OsRng, RngCore}; use token::ShieldingTransferData; use crate::args::{ - TxShieldedTransferData, TxShieldingTransferData, TxTransparentTransferData, - TxUnshieldingTransferData, + SdkTypes, TxShieldedTransferData, TxShieldingTransferData, + TxTransparentTransferData, TxUnshieldingTransferData, }; use crate::control_flow::time; use crate::error::{EncodingError, Error, QueryError, Result, TxSubmitError}; use crate::io::Io; use crate::masp::TransferErr::Build; -use crate::masp::{MaspTransferData, ShieldedContext, ShieldedTransfer}; +use crate::masp::{ + MaspDataLog, MaspFeeData, MaspTransferData, ShieldedContext, + ShieldedTransfer, +}; use crate::queries::Client; use crate::rpc::{ self, get_validator_stake, query_wasm_code_hash, validate_amount, @@ -2518,19 +2522,20 @@ pub async fn build_ibc_transfer( query_wasm_code_hash(context, args.tx_code_path.to_str().unwrap()) .await .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; - let masp_transfer_data = MaspTransferData { + let masp_transfer_data = vec![MaspTransferData { source: args.source.clone(), target: TransferTarget::Address(Address::Internal( InternalAddress::Ibc, )), token: args.token.clone(), amount: validated_amount, - }; + }]; // For transfer from a spending key let shielded_parts = construct_shielded_parts( context, - vec![masp_transfer_data], + masp_transfer_data, + None, !(args.tx.dry_run || args.tx.dry_run_wrapper), ) .await?; @@ -2976,11 +2981,23 @@ pub async fn build_shielded_transfer( }); } - // TODO(namada#2597): this function should also take another arg as the fees - // token and amount + // Add masp fee payment if necessary + let masp_fee_data = get_masp_fee_payment_amount( + context, + &args.tx, + fee_amount, + &signing_data.fee_payer, + args.gas_spending_keys.clone(), + ) + .await?; + let data_fee_payer = masp_fee_data + .as_ref() + .map(|fee_data| fee_data.target.to_owned()); + let shielded_parts = construct_shielded_parts( context, transfer_data, + masp_fee_data, !(args.tx.dry_run || args.tx.dry_run_wrapper), ) .await? @@ -3018,6 +3035,7 @@ pub async fn build_shielded_transfer( // Construct the tx data with a placeholder shielded section hash let data = token::ShieldedTransfer { + fee_payer: data_fee_payer, section_hash: Hash::zero(), }; let tx = build_pow_flag( @@ -3033,6 +3051,36 @@ pub async fn build_shielded_transfer( Ok((tx, signing_data)) } +// Check if the transaction will need to pay fees via the masp and extract the +// right masp data +async fn get_masp_fee_payment_amount( + context: &N, + args: &args::Tx, + fee_amount: DenominatedAmount, + fee_payer: &common::PublicKey, + gas_spending_keys: Vec, +) -> Result> { + let fee_payer_address = Address::from(fee_payer); + let balance_key = balance_key(&args.fee_token, &fee_payer_address); + let balance = rpc::query_storage_value::<_, token::Amount>( + context.client(), + &balance_key, + ) + .await + .unwrap_or_default(); + let total_fee = checked!(fee_amount.amount() * u64::from(args.gas_limit))?; + + Ok(match total_fee.checked_sub(balance) { + Some(diff) if !diff.is_zero() => Some(MaspFeeData { + sources: gas_spending_keys, + target: fee_payer_address, + token: args.fee_token.clone(), + amount: DenominatedAmount::new(diff, fee_amount.denom()), + }), + _ => None, + }) +} + /// Build a shielding transfer pub async fn build_shielding_transfer( context: &N, @@ -3111,6 +3159,7 @@ pub async fn build_shielding_transfer( let shielded_parts = construct_shielded_parts( context, transfer_data, + None, !(args.tx.dry_run || args.tx.dry_run_wrapper), ) .await? @@ -3205,11 +3254,28 @@ pub async fn build_unshielding_transfer( }); } - // TODO(namada#2597): this function should also take another arg as the fees - // token and amount + // Add masp fee payment if necessary + let masp_fee_data = get_masp_fee_payment_amount( + context, + &args.tx, + fee_amount, + &signing_data.fee_payer, + args.gas_spending_keys.clone(), + ) + .await?; + if let Some(fee_data) = &masp_fee_data { + // Add another unshield to the list + data.push(token::UnshieldingTransferData { + target: fee_data.target.to_owned(), + token: fee_data.token.to_owned(), + amount: fee_data.amount, + }); + } + let shielded_parts = construct_shielded_parts( context, transfer_data, + masp_fee_data, !(args.tx.dry_run || args.tx.dry_run_wrapper), ) .await? @@ -3268,6 +3334,7 @@ pub async fn build_unshielding_transfer( async fn construct_shielded_parts( context: &N, data: Vec, + fee_data: Option, update_ctx: bool, ) -> Result)>> { // Precompute asset types to increase chances of success in decoding @@ -3280,7 +3347,7 @@ async fn construct_shielded_parts( .await; let stx_result = ShieldedContext::::gen_shielded_transfer( - context, data, update_ctx, + context, data, fee_data, update_ctx, ) .await; @@ -3291,16 +3358,28 @@ async fn construct_shielded_parts( error: builder::Error::InsufficientFunds(_), data, }) => { - let MaspTransferData { + if let Some(MaspDataLog { source, token, amount, - .. - } = data.unwrap(); - return Err(TxSubmitError::NegativeBalanceAfterTransfer( - Box::new(source.effective_address()), - amount.to_string(), - Box::new(token.clone()), + }) = data + { + if let Some(source) = source { + return Err(TxSubmitError::NegativeBalanceAfterTransfer( + Box::new(source.effective_address()), + amount.to_string(), + Box::new(token.clone()), + ) + .into()); + } + return Err(TxSubmitError::MaspError(format!( + "Insufficient funds: Could not collect enough funds to \ + pay for fees: token {token}, amount: {amount}" + )) + .into()); + } + return Err(TxSubmitError::MaspError( + "Insufficient funds".to_string(), ) .into()); } @@ -3624,6 +3703,8 @@ pub async fn gen_ibc_shielding_transfer( ShieldedContext::::gen_shielded_transfer( context, vec![masp_transfer_data], + // Fees are paid from the transparent balance of the relayer + None, true, ) .await diff --git a/crates/shielded_token/src/utils.rs b/crates/shielded_token/src/utils.rs index 4820a057aa..9396755659 100644 --- a/crates/shielded_token/src/utils.rs +++ b/crates/shielded_token/src/utils.rs @@ -63,10 +63,10 @@ pub fn handle_masp_tx( ctx: &mut (impl StorageRead + StorageWrite), shielded: &Transaction, ) -> Result<()> { - // TODO: temporarily disabled because of the node aggregation issue in WASM. - // Using the host env tx_update_masp_note_commitment_tree or directly the - // update_note_commitment_tree function as a workaround instead - // update_note_commitment_tree(ctx, shielded)?; + // TODO(masp#73): temporarily disabled because of the node aggregation issue + // in WASM. Using the host env tx_update_masp_note_commitment_tree or + // directly the update_note_commitment_tree function as a workaround + // instead update_note_commitment_tree(ctx, shielded)?; reveal_nullifiers(ctx, shielded)?; Ok(()) diff --git a/crates/token/src/lib.rs b/crates/token/src/lib.rs index 4470d08925..f93ec6c90b 100644 --- a/crates/token/src/lib.rs +++ b/crates/token/src/lib.rs @@ -126,6 +126,8 @@ pub struct TransparentTransferData { Deserialize, )] pub struct ShieldedTransfer { + /// Optional target of unshielding for fee payment + pub fee_payer: Option
, /// Hash of tx section that contains the MASP transaction pub section_hash: Hash, } From 584d2eb5b23af6c603c18e5c2ccc711c4e0fe8d8 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 7 Jun 2024 19:40:03 +0200 Subject: [PATCH 24/40] Removes unused denominate function --- crates/core/src/token.rs | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/crates/core/src/token.rs b/crates/core/src/token.rs index 81f39d17e2..463be7ff60 100644 --- a/crates/core/src/token.rs +++ b/crates/core/src/token.rs @@ -923,19 +923,6 @@ impl MaspDigitPos { let amount = amount.into(); amount.raw.0[*self as usize] } - - /// Get the corresponding u64 word from the input uint256. - // FIXME: remove if unused? - pub fn denominate_i128(&self, amount: &Change) -> i128 { - let val = i128::from(amount.abs().0[*self as usize]); - if Change::is_negative(amount) { - // Cannot panic as the value is limited to `u64` range - #[allow(clippy::arithmetic_side_effects)] - -val - } else { - val - } - } } impl From for IbcAmount { From 9f390a7041cc639313abc65cd3d580a3416922a1 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 7 Jun 2024 20:42:53 +0200 Subject: [PATCH 25/40] Updates shielded wasm code to handle fee unshielding --- crates/light_sdk/src/transaction/transfer.rs | 4 ++-- crates/node/src/bench_utils.rs | 2 +- crates/sdk/src/lib.rs | 2 +- crates/sdk/src/tx.rs | 13 +++++++++---- crates/token/src/lib.rs | 4 ++-- wasm/tx_shielded_transfer/src/lib.rs | 11 +++++++++++ 6 files changed, 26 insertions(+), 10 deletions(-) diff --git a/crates/light_sdk/src/transaction/transfer.rs b/crates/light_sdk/src/transaction/transfer.rs index efdbfd89e6..4e56316ea9 100644 --- a/crates/light_sdk/src/transaction/transfer.rs +++ b/crates/light_sdk/src/transaction/transfer.rs @@ -35,13 +35,13 @@ impl Transfer { /// Build a shielded transfer transaction from the given parameters pub fn shielded( - fee_payer: Option
, + fee_unshield: Option, shielded_section_hash: Hash, transaction: Transaction, args: GlobalArgs, ) -> Self { let data = namada_sdk::token::ShieldedTransfer { - fee_payer, + fee_unshield, section_hash: shielded_section_hash, }; diff --git a/crates/node/src/bench_utils.rs b/crates/node/src/bench_utils.rs index e166b6a20f..9b8f5ccaf1 100644 --- a/crates/node/src/bench_utils.rs +++ b/crates/node/src/bench_utils.rs @@ -1120,7 +1120,7 @@ impl BenchShieldedCtx { namada.client().generate_tx( TX_SHIELDED_TRANSFER_WASM, ShieldedTransfer { - fee_payer: None, + fee_unshield: None, section_hash: shielded_section_hash, }, Some(shielded), diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs index 7241da670a..82be8d364d 100644 --- a/crates/sdk/src/lib.rs +++ b/crates/sdk/src/lib.rs @@ -1134,7 +1134,7 @@ pub mod testing { let tx_data = match masp_tx_type { MaspTxType::Shielded => { tx.add_code_from_hash(code_hash, Some(TX_SHIELDED_TRANSFER_WASM.to_owned())); - let data = ShieldedTransfer { fee_payer: transfers.0.first().map(|transfer| transfer.target.clone()), section_hash: shielded_section_hash }; + let data = ShieldedTransfer { fee_unshield: transfers.0.first().map(|transfer| UnshieldingTransferData { target: transfer.target.to_owned(), token: transfer.token.to_owned(), amount: transfer.amount }), section_hash: shielded_section_hash }; tx.add_data(data.clone()); TxData::ShieldedTransfer(data, (build_params, build_param_bytes)) }, diff --git a/crates/sdk/src/tx.rs b/crates/sdk/src/tx.rs index bfdcfec374..16aac6aca5 100644 --- a/crates/sdk/src/tx.rs +++ b/crates/sdk/src/tx.rs @@ -2990,9 +2990,14 @@ pub async fn build_shielded_transfer( args.gas_spending_keys.clone(), ) .await?; - let data_fee_payer = masp_fee_data - .as_ref() - .map(|fee_data| fee_data.target.to_owned()); + let fee_unshield = + masp_fee_data + .as_ref() + .map(|fee_data| token::UnshieldingTransferData { + target: fee_data.target.to_owned(), + token: fee_data.token.to_owned(), + amount: fee_amount, + }); let shielded_parts = construct_shielded_parts( context, @@ -3035,7 +3040,7 @@ pub async fn build_shielded_transfer( // Construct the tx data with a placeholder shielded section hash let data = token::ShieldedTransfer { - fee_payer: data_fee_payer, + fee_unshield, section_hash: Hash::zero(), }; let tx = build_pow_flag( diff --git a/crates/token/src/lib.rs b/crates/token/src/lib.rs index f93ec6c90b..3b4921b73a 100644 --- a/crates/token/src/lib.rs +++ b/crates/token/src/lib.rs @@ -126,8 +126,8 @@ pub struct TransparentTransferData { Deserialize, )] pub struct ShieldedTransfer { - /// Optional target of unshielding for fee payment - pub fee_payer: Option
, + /// Optional unshield for fee payment + pub fee_unshield: Option, /// Hash of tx section that contains the MASP transaction pub section_hash: Hash, } diff --git a/wasm/tx_shielded_transfer/src/lib.rs b/wasm/tx_shielded_transfer/src/lib.rs index cc9e70a638..11a46790d2 100644 --- a/wasm/tx_shielded_transfer/src/lib.rs +++ b/wasm/tx_shielded_transfer/src/lib.rs @@ -9,6 +9,17 @@ fn apply_tx(ctx: &mut Ctx, tx_data: BatchedTx) -> TxResult { let transfer = token::ShieldedTransfer::try_from_slice(&data[..]) .wrap_err("Failed to decode token::ShieldedTransfer tx data")?; debug_log!("apply_tx called with transfer: {:#?}", transfer); + if let Some(fee_unshield) = transfer.fee_unshield { + // Unshield for fee payment + token::transfer( + ctx, + &address::MASP, + &fee_unshield.target, + &fee_unshield.token, + fee_unshield.amount.amount(), + ) + .wrap_err("Token transfer failed")?; + } let masp_section_ref = transfer.section_hash; let shielded = tx_data From fb0df693402d08c7124a0624eed1f9cc6e1964f9 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Sat, 8 Jun 2024 13:08:55 +0200 Subject: [PATCH 26/40] Fixes masp tx generation and integration tests --- crates/namada/src/ledger/protocol/mod.rs | 6 +- crates/sdk/src/args.rs | 3 - crates/sdk/src/error.rs | 5 +- crates/sdk/src/masp.rs | 408 +++++++---- crates/sdk/src/tx.rs | 18 +- crates/tests/src/integration/masp.rs | 824 +++++++++++++++++++++-- 6 files changed, 1072 insertions(+), 192 deletions(-) diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index a0249caca3..4c18bbb601 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -691,7 +691,11 @@ where .expect("Missing masp fee payment gas limit in storage") .min(tx_gas_meter.borrow().tx_gas_limit.into()); - let mut gas_meter = TxGasMeter::new(min_gas_limit); + let mut gas_meter = TxGasMeter::new( + namada_gas::Gas::from_whole_units(min_gas_limit).ok_or_else(|| { + Error::GasError("Overflow in gas expansion".to_string()) + })?, + ); gas_meter .copy_consumed_gas_from(&tx_gas_meter.borrow()) .map_err(|e| Error::GasError(e.to_string()))?; diff --git a/crates/sdk/src/args.rs b/crates/sdk/src/args.rs index a94c6d9724..67f0c69589 100644 --- a/crates/sdk/src/args.rs +++ b/crates/sdk/src/args.rs @@ -2138,9 +2138,6 @@ pub struct Tx { pub gas_limit: GasLimit, /// The optional expiration of the transaction pub expiration: TxExpiration, - // FIXME: maybe should move this out of here, it's only needed for txs that - // pay the fees via the masp, so it should go together with the optional - // gas spending keys /// Generate an ephimeral signing key to be used only once to sign a /// wrapper tx pub disposable_signing_key: bool, diff --git a/crates/sdk/src/error.rs b/crates/sdk/src/error.rs index a8934b1961..f0c22adfca 100644 --- a/crates/sdk/src/error.rs +++ b/crates/sdk/src/error.rs @@ -264,7 +264,10 @@ pub enum TxSubmitError { #[error("Proposal end epoch is not in the storage.")] EpochNotInStorage, /// Couldn't understand who the fee payer is - #[error("Either --signing-keys or --gas-payer must be available.")] + #[error( + "Either --signing-keys, --gas-payer or --disposable-gas-payer must be \ + available." + )] InvalidFeePayer, /// Account threshold is not set #[error("Account threshold must be set.")] diff --git a/crates/sdk/src/masp.rs b/crates/sdk/src/masp.rs index 66c3d68dc4..aff209ed6c 100644 --- a/crates/sdk/src/masp.rs +++ b/crates/sdk/src/masp.rs @@ -9,6 +9,7 @@ use std::path::PathBuf; use borsh::{BorshDeserialize, BorshSerialize}; use borsh_ext::BorshSerializeExt; +use itertools::Itertools; use lazy_static::lazy_static; use masp_primitives::asset_type::AssetType; #[cfg(feature = "mainnet")] @@ -50,7 +51,7 @@ use masp_proofs::bls12_381::Bls12; use masp_proofs::prover::LocalTxProver; use masp_proofs::sapling::BatchValidator; use namada_core::address::Address; -use namada_core::arith::{CheckedAdd, CheckedSub}; +use namada_core::arith::CheckedAdd; use namada_core::collections::{HashMap, HashSet}; use namada_core::dec::Dec; pub use namada_core::masp::{ @@ -81,7 +82,9 @@ use thiserror::Error; use crate::error::{Error, QueryError}; use crate::io::Io; use crate::queries::Client; -use crate::rpc::{query_block, query_conversion, query_denom}; +use crate::rpc::{ + query_block, query_conversion, query_denom, query_native_token, +}; use crate::{display_line, edisplay_line, rpc, MaybeSend, MaybeSync, Namada}; /// Env var to point to a dir with MASP parameters. When not specified, @@ -124,6 +127,7 @@ pub struct ShieldedTransfer { /// The data for a masp fee payment #[allow(missing_docs)] +#[derive(Debug)] pub struct MaspFeeData { pub sources: Vec, pub target: Address, @@ -171,6 +175,11 @@ struct MaspTxReorderedData { denoms: HashMap, } +// Data about the unspent amounts for any given shielded source coming from the +// spent notes in their posses that have been added to the builder. Can be used +// to either pay fees or to return a change +type Changes = HashMap; + /// Shielded pool data for a token #[allow(missing_docs)] #[derive(Debug, BorshSerialize, BorshDeserialize, BorshDeserializer)] @@ -531,15 +540,62 @@ pub fn find_valid_diversifier( } /// Determine if using the current note would actually bring us closer to our -/// target -pub fn is_amount_required(src: I128Sum, dest: I128Sum, delta: I128Sum) -> bool { - let gap = dest - src; +/// target. Returns the unused amounts (change) of delta if any +pub fn is_amount_required( + src: I128Sum, + dest: I128Sum, + normed_delta: I128Sum, + opt_delta: Option, +) -> Option { + let mut changes = None; + let gap = dest.clone() - src; + for (asset_type, value) in gap.components() { - if *value > 0 && delta[asset_type] > 0 { - return true; - } + if *value > 0 && normed_delta[asset_type] > 0 { + let signed_change_amt = + checked!(normed_delta[asset_type] - *value).unwrap_or_default(); + let unsigned_change_amt = if signed_change_amt > 0 { + signed_change_amt + } else { + // Even if there's no change we still need to set the return + // value of this function to be Some so that the caller sees + // that this note should be used + 0 + }; + + let change_amt = I128Sum::from_nonnegative( + asset_type.to_owned(), + unsigned_change_amt, + ) + .expect("Change is guaranteed to be non-negative"); + changes = changes + .map(|prev| prev + change_amt.clone()) + .or(Some(change_amt)); + } + } + + // Because of the way conversions are computed, we need an extra step here + // if the token is not the native one + if let Some(delta) = opt_delta { + // Only if this note is going to be used, handle the assets in delta + // (not normalized) that are not part of dest + changes = changes.map(|mut chngs| { + for (delta_asset_type, delta_amt) in delta.components() { + if !dest.asset_types().contains(delta_asset_type) { + let rmng = I128Sum::from_nonnegative( + delta_asset_type.to_owned(), + *delta_amt, + ) + .expect("Change is guaranteed to be non-negative"); + chngs += rmng; + } + } + + chngs + }); } - false + + changes } /// a masp change @@ -1385,14 +1441,17 @@ impl ShieldedContext { /// Collect enough unspent notes in this context to exceed the given amount /// of the specified asset type. Return the total value accumulated plus /// notes and the corresponding diversifiers/merkle paths that were used to - /// achieve the total value. + /// achieve the total value. Updates the changes map. + #[allow(clippy::too_many_arguments)] pub async fn collect_unspent_notes( &mut self, context: &impl Namada, spent_notes: &mut SpentNotesTracker, - vk: &ViewingKey, + sk: namada_core::masp::ExtendedSpendingKey, + is_native_token: bool, target: I128Sum, target_epoch: MaspEpoch, + changes: &mut Changes, ) -> Result< ( I128Sum, @@ -1401,6 +1460,7 @@ impl ShieldedContext { ), Error, > { + let vk = &to_viewing_key(&sk.into()).vk; // TODO: we should try to use the smallest notes possible to fund the // transaction to allow people to fetch less often // Establish connection with which to do exchange rate queries @@ -1447,16 +1507,29 @@ impl ShieldedContext { ) .await?; + let opt_delta = if is_native_token { + None + } else { + Some(contr.clone()) + }; // Use this note only if it brings us closer to our target - if is_amount_required( + if let Some(change) = is_amount_required( normed_val_acc.clone(), target.clone(), normed_contr.clone(), + opt_delta, ) { // Be sure to record the conversions used in computing // accumulated value val_acc += contr; normed_val_acc += normed_contr; + + // Update the changes + changes + .entry(sk) + .and_modify(|amt| *amt += &change) + .or_insert(change); + // Commit the conversions that were used to exchange conversions = proposed_convs; let merkle_path = self @@ -1693,6 +1766,7 @@ impl ShieldedContext { // destination are shielded return Ok(None); }; + let mut changes = Changes::default(); for (MaspSourceTransferData { source, token }, amount) in &source_data { Self::add_inputs( @@ -1704,6 +1778,7 @@ impl ShieldedContext { epoch, &denoms, &mut notes_tracker, + &mut changes, ) .await?; } @@ -1749,12 +1824,13 @@ impl ShieldedContext { epoch, &mut denoms, &mut notes_tracker, + &mut changes, ) .await?; } // Finally, add outputs representing the change from this payment. - Self::add_changes(&mut builder, &source_data)?; + Self::add_changes(&mut builder, changes)?; let builder_clone = builder.clone().map_builder(WalletMap); // Build and return the constructed transaction @@ -1868,9 +1944,7 @@ impl ShieldedContext { })) } - // Add the necessary transaction inputs to the builder. Returns the actual - // amount of inputs added to the transaction if these are shielded, `None` - // if transparent inputs + // Add the necessary transaction inputs to the builder. #[allow(clippy::too_many_arguments)] async fn add_inputs( context: &impl Namada, @@ -1881,11 +1955,10 @@ impl ShieldedContext { epoch: MaspEpoch, denoms: &HashMap, notes_tracker: &mut SpentNotesTracker, + changes: &mut Changes, ) -> Result, TransferErr> { - let spending_key = source.spending_key(); - // We want to fund our transaction solely from supplied spending key - let spending_key = spending_key.map(|x| x.into()); + let spending_key = source.spending_key(); // Now we build up the transaction within this object @@ -1914,6 +1987,8 @@ impl ShieldedContext { // If there are shielded inputs let added_amt = if let Some(sk) = spending_key { + let is_native_token = + &query_native_token(context.client()).await? == token; // Locate unspent notes that can help us meet the transaction // amount let (added_amount, unspent_notes, used_convs) = context @@ -1922,15 +1997,22 @@ impl ShieldedContext { .collect_unspent_notes( context, notes_tracker, - &to_viewing_key(&sk).vk, + sk, + is_native_token, I128Sum::from_sum(masp_amount), epoch, + changes, ) .await?; // Commit the notes found to our transaction for (diversifier, note, merkle_path) in unspent_notes { builder - .add_sapling_spend(sk, diversifier, note, merkle_path) + .add_sapling_spend( + sk.into(), + diversifier, + note, + merkle_path, + ) .map_err(|e| TransferErr::Build { error: builder::Error::SaplingBuild(e), data: None, @@ -2166,6 +2248,7 @@ impl ShieldedContext { epoch: MaspEpoch, denoms: &mut HashMap, notes_tracker: &mut SpentNotesTracker, + changes: &mut Changes, ) -> Result<(), TransferErr> { if denoms.get(token).is_none() { if let Some(denom) = query_denom(context.client(), token).await { @@ -2203,46 +2286,66 @@ impl ShieldedContext { let mut fees = I128Sum::zero(); // Convert the shortfall into a I128Sum for (asset_type, val) in asset_types.iter().zip(raw_amount) { - fees = - checked!(fees + &I128Sum::from_pair(*asset_type, val.into())) - .map_err(|e| TransferErr::General(e.into()))?; + fees += I128Sum::from_nonnegative(*asset_type, val.into()) + .map_err(|()| { + TransferErr::General(Error::Other( + "Fee amount is expected expected to be non-negative" + .to_string(), + )) + })?; } // 1. Try to use the change to pay fees - for (asset_type, amt) in builder.value_balance().components() { - if let Ordering::Greater = amt.cmp(&0) { - // Look for changes that match the fee asset types - for (fee_asset_type, fee_amt) in fees + let mut temp_changes = Changes::default(); + + for (sp, changes) in changes.iter() { + for (asset_type, change) in changes.components() { + for (_, fee_amt) in fees .clone() .components() .filter(|(axt, _)| *axt == asset_type) { - let transparent_target_hash = { - ripemd::Ripemd160::digest(sha2::Sha256::digest( - target.serialize_to_vec().as_ref(), + // Get the minimum between the available change and + // the due fee + let output_amt = I128Sum::from_nonnegative( + asset_type.to_owned(), + *change.min(fee_amt), + ) + .map_err(|()| { + TransferErr::General(Error::Other( + "Fee amount is expected to be non-negative" + .to_string(), )) - }; - - builder - .add_transparent_output( - &TransparentAddress(transparent_target_hash.into()), - *fee_asset_type, - // Get the minimum between the available change and - // the due fee - *amt.min(fee_amt) as u64, - ) - .map_err(|e| TransferErr::Build { - error: builder::Error::TransparentBuild(e), - data: None, - })?; - - fees = checked!( - fees - &ValueSum::from_pair( - asset_type.to_owned(), - amt.to_owned() + })?; + let denominated_output_amt = context + .shielded_mut() + .await + .convert_masp_amount_to_namada( + context.client(), + // Safe to unwrap + denoms.get(token).unwrap().to_owned(), + output_amt.clone(), ) + .await?; + + Self::add_outputs( + context, + builder, + TransferSource::ExtendedSpendingKey(sp.to_owned()), + &TransferTarget::Address(target.clone()), + token.clone(), + denominated_output_amt, + epoch, + denoms, ) - .map_err(|e| TransferErr::General(e.into()))?; + .await?; + + fees -= &output_amt; + // Update the changes + temp_changes + .entry(*sp) + .and_modify(|amt| *amt += &output_amt) + .or_insert(output_amt); } } @@ -2251,6 +2354,25 @@ impl ShieldedContext { } } + // Decrease the changes by the amounts used for fee payment + for (sp, temp_changes) in temp_changes.iter() { + for (asset_type, temp_change) in temp_changes.components() { + let output_amt = I128Sum::from_nonnegative( + asset_type.to_owned(), + *temp_change, + ) + .map_err(|()| { + TransferErr::General(Error::Other( + "Fee amount is expected expected to be non-negative" + .to_string(), + )) + })?; + + // Entry is guaranteed to be in the map + changes.entry(*sp).and_modify(|amt| *amt -= &output_amt); + } + } + if !fees.is_zero() { // 2. Look for unused spent notes of the sources and the optional // gas spending keys (sources first) @@ -2261,44 +2383,75 @@ impl ShieldedContext { .map(TransferSource::ExtendedSpendingKey), ) { - let Some(found_amt) = Self::add_inputs( - context, - builder, - &fee_source, - token, - amount, - epoch, - denoms, - notes_tracker, - ) - .await? - else { - continue; - }; - let denom_amt = context - .shielded_mut() - .await - .convert_masp_amount_to_namada( - context.client(), - denoms.get(token).unwrap().to_owned(), - found_amt.clone(), + for (asset_type, fee_amt) in fees.clone().components() { + let input_amt = I128Sum::from_nonnegative( + asset_type.to_owned(), + *fee_amt, + ) + .map_err(|()| { + TransferErr::General(Error::Other( + "Fee amount is expected expected to be \ + non-negative" + .to_string(), + )) + })?; + let denominated_fee = context + .shielded_mut() + .await + .convert_masp_amount_to_namada( + context.client(), + // Safe to unwrap + denoms.get(token).unwrap().to_owned(), + input_amt.clone(), + ) + .await?; + + let Some(found_amt) = Self::add_inputs( + context, + builder, + &fee_source, + token, + &denominated_fee, + epoch, + denoms, + notes_tracker, + changes, + ) + .await? + else { + continue; + }; + // Pick the minimum between the due fee and the amount found + let output_amt = match found_amt.partial_cmp(&input_amt) { + None | Some(Ordering::Less) => found_amt, + _ => input_amt.clone(), + }; + let denom_amt = context + .shielded_mut() + .await + .convert_masp_amount_to_namada( + context.client(), + // Safe to unwrap + denoms.get(token).unwrap().to_owned(), + output_amt.clone(), + ) + .await?; + + Self::add_outputs( + context, + builder, + fee_source.clone(), + &TransferTarget::Address(target.clone()), + token.clone(), + denom_amt, + epoch, + denoms, ) .await?; - Self::add_outputs( - context, - builder, - fee_source, - &TransferTarget::Address(target.clone()), - token.clone(), - denom_amt, - epoch, - denoms, - ) - .await?; + fees -= &output_amt; + } - fees = checked!(fees - &found_amt) - .map_err(|e| TransferErr::General(e.into()))?; if fees.is_zero() { break; } @@ -2319,68 +2472,45 @@ impl ShieldedContext { Ok(()) } - // Add the changes back to the sources to balance the transaction. This - // function has to be called after `add_fees` cause we might have some - // change coming from there too + // Consumes the changes and adds them back to the original sources to + // balance the transaction. This function has to be called after + // `add_fees` cause we might have some change coming from there too #[allow(clippy::result_large_err)] fn add_changes( builder: &mut Builder, - source_data: &HashMap, + changes: Changes, ) -> Result<(), TransferErr> { - for (MaspSourceTransferData { source, token }, amount) in source_data { - if let Some(sk) = - source.spending_key().map(ExtendedSpendingKey::from) - { - // Represents the amount of inputs we are short by - let mut additional = I128Sum::zero(); - for (asset_type, amt) in builder.value_balance().components() { - match amt.cmp(&0) { - Ordering::Greater => { - // Send the change in this asset type back to the - // sender - builder - .add_sapling_output( - Some(sk.expsk.ovk), - sk.default_address().1, - *asset_type, - *amt as u64, - MemoBytes::empty(), - ) - .map_err(|e| TransferErr::Build { - error: builder::Error::SaplingBuild(e), - data: None, - })?; - } - Ordering::Less => { - // Record how much of the current asset type we are - // short by - additional += - I128Sum::from_nonnegative(*asset_type, -*amt) - .map_err(|()| { - Error::Other(format!( - "from non negative conversion: {}", - line!() - )) - })?; - } - Ordering::Equal => {} - } - } - // If we are short by a non-zero amount, then we have - // insufficient funds - if !additional.is_zero() { - return Result::Err(TransferErr::Build { - error: builder::Error::InsufficientFunds(additional), - data: Some(MaspDataLog { - source: Some(source.to_owned()), - token: token.to_owned(), - amount: *amount, - }), - }); + for (sp, changes) in changes.into_iter() { + for (asset_type, amt) in changes.components() { + if let Ordering::Greater = amt.cmp(&0) { + let sk = ExtendedSpendingKey::from(sp.to_owned()); + // Send the change in this asset type back to the sender + builder + .add_sapling_output( + Some(sk.expsk.ovk), + sk.default_address().1, + *asset_type, + *amt as u64, + MemoBytes::empty(), + ) + .map_err(|e| TransferErr::Build { + error: builder::Error::SaplingBuild(e), + data: None, + })?; } } } + // Final safety check on the value balance to verify that the + // transaction is balanced + let value_balance = builder.value_balance(); + if !value_balance.is_zero() { + return Result::Err(TransferErr::Build { + error: builder::Error::InsufficientFunds(value_balance), + data: None, + }); + } + Ok(()) } diff --git a/crates/sdk/src/tx.rs b/crates/sdk/src/tx.rs index 16aac6aca5..b793240c84 100644 --- a/crates/sdk/src/tx.rs +++ b/crates/sdk/src/tx.rs @@ -2466,7 +2466,7 @@ pub async fn build_ibc_transfer( Some(source.clone()), ) .await?; - let (fee_amount, updated_balance) = + let (fee_per_gas_unit, updated_balance) = if let TransferSource::ExtendedSpendingKey(_) = args.source { // MASP fee payment (validate_fee(context, &args.tx).await?, None) @@ -2684,7 +2684,7 @@ pub async fn build_ibc_transfer( prepare_tx( &args.tx, &mut tx, - fee_amount, + fee_per_gas_unit, signing_data.fee_payer.clone(), ) .await?; @@ -2958,7 +2958,7 @@ pub async fn build_shielded_transfer( .await?; // Shielded fee payment - let fee_amount = validate_fee(context, &args.tx).await?; + let fee_per_gas_unit = validate_fee(context, &args.tx).await?; let mut transfer_data = vec![]; for TxShieldedTransferData { @@ -2985,7 +2985,7 @@ pub async fn build_shielded_transfer( let masp_fee_data = get_masp_fee_payment_amount( context, &args.tx, - fee_amount, + fee_per_gas_unit, &signing_data.fee_payer, args.gas_spending_keys.clone(), ) @@ -2996,7 +2996,7 @@ pub async fn build_shielded_transfer( .map(|fee_data| token::UnshieldingTransferData { target: fee_data.target.to_owned(), token: fee_data.token.to_owned(), - amount: fee_amount, + amount: fee_data.amount, }); let shielded_parts = construct_shielded_parts( @@ -3049,7 +3049,7 @@ pub async fn build_shielded_transfer( args.tx_code_path.clone(), data, add_shielded_parts, - fee_amount, + fee_per_gas_unit, &signing_data.fee_payer, ) .await?; @@ -3230,7 +3230,7 @@ pub async fn build_unshielding_transfer( .await?; // Shielded fee payment - let fee_amount = validate_fee(context, &args.tx).await?; + let fee_per_gas_unit = validate_fee(context, &args.tx).await?; let mut transfer_data = vec![]; let mut data = vec![]; @@ -3263,7 +3263,7 @@ pub async fn build_unshielding_transfer( let masp_fee_data = get_masp_fee_payment_amount( context, &args.tx, - fee_amount, + fee_per_gas_unit, &signing_data.fee_payer, args.gas_spending_keys.clone(), ) @@ -3328,7 +3328,7 @@ pub async fn build_unshielding_transfer( args.tx_code_path.clone(), data, add_shielded_parts, - fee_amount, + fee_per_gas_unit, &signing_data.fee_payer, ) .await?; diff --git a/crates/tests/src/integration/masp.rs b/crates/tests/src/integration/masp.rs index 634db5ce63..a26eba96b2 100644 --- a/crates/tests/src/integration/masp.rs +++ b/crates/tests/src/integration/masp.rs @@ -18,9 +18,9 @@ use test_log::test; use super::setup; use crate::e2e::setup::constants::{ AA_PAYMENT_ADDRESS, AA_VIEWING_KEY, AB_PAYMENT_ADDRESS, AB_VIEWING_KEY, - AC_PAYMENT_ADDRESS, ALBERT, ALBERT_KEY, A_SPENDING_KEY, BB_PAYMENT_ADDRESS, - BERTHA, BERTHA_KEY, BTC, B_SPENDING_KEY, CHRISTEL, CHRISTEL_KEY, ETH, MASP, - NAM, + AC_PAYMENT_ADDRESS, AC_VIEWING_KEY, ALBERT, ALBERT_KEY, A_SPENDING_KEY, + BB_PAYMENT_ADDRESS, BERTHA, BERTHA_KEY, BTC, B_SPENDING_KEY, CHRISTEL, + CHRISTEL_KEY, ETH, MASP, NAM, }; use crate::strings::TX_APPLIED_SUCCESS; @@ -2175,7 +2175,7 @@ fn dynamic_assets() -> Result<()> { // Test fee payment in masp: // // 1. Masp fee payment runs out of gas -// 3. Valid fee payment (also check that the first tx in the batch is executed +// 2. Valid fee payment (also check that the first tx in the batch is executed // only once) #[test] fn masp_fee_payment() -> Result<()> { @@ -2187,7 +2187,7 @@ fn masp_fee_payment() -> Result<()> { genesis.parameters.parameters.masp_fee_payment_gas_limit = 20_000; genesis })?; - _ = node.next_epoch(); + _ = node.next_masp_epoch(); // Add the relevant viewing keys to the wallet otherwise the shielded // context won't precache the masp data @@ -2223,7 +2223,7 @@ fn masp_fee_payment() -> Result<()> { &node, Bin::Client, vec![ - "transfer", + "shield", "--source", ALBERT_KEY, "--target", @@ -2237,6 +2237,7 @@ fn masp_fee_payment() -> Result<()> { ], )?; node.assert_success(); + _ = node.next_masp_epoch(); // sync shielded context run( &node, @@ -2262,29 +2263,33 @@ fn masp_fee_payment() -> Result<()> { assert!(captured.result.is_ok()); assert!(captured.contains("nam: 50000")); - _ = node.next_masp_epoch(); - // 1. Out of gas for masp fee payment - run( - &node, - Bin::Client, - vec![ - "transfer", - "--source", - A_SPENDING_KEY, - "--target", - AB_PAYMENT_ADDRESS, - "--token", - NAM, - "--amount", - "1", - "--gas-limit", - "5000", - "--ledger-address", - validator_one_rpc, - ], - )?; - node.assert_success(); + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "transfer", + "--source", + A_SPENDING_KEY, + "--target", + AB_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + "1", + "--gas-limit", + "2000", + "--gas-price", + "1", + "--disposable-gas-payer", + "--ledger-address", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_err()); + _ = node.next_masp_epoch(); // sync shielded context run( &node, @@ -2309,8 +2314,6 @@ fn masp_fee_payment() -> Result<()> { assert!(captured.result.is_ok()); assert!(captured.contains("nam: 50000")); - _ = node.next_masp_epoch(); - // 2. Valid masp fee payment run( &node, @@ -2329,6 +2332,7 @@ fn masp_fee_payment() -> Result<()> { "20000", "--gas-price", "1", + "--disposable-gas-payer", "--ledger-address", validator_one_rpc, ], @@ -2342,7 +2346,7 @@ fn masp_fee_payment() -> Result<()> { )?; node.assert_success(); // Check the exact balance of the tx source to ensure that the masp fee - // payement transaction was executed only once + // payment transaction was executed only once let captured = CapturedOutput::of(|| { run( &node, @@ -2360,6 +2364,23 @@ fn masp_fee_payment() -> Result<()> { }); assert!(captured.result.is_ok()); assert!(captured.contains("nam: 20000")); + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AB_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 10000")); Ok(()) } @@ -2375,7 +2396,7 @@ fn masp_fee_payment_gas_limit() -> Result<()> { let (mut node, _services) = setup::initialize_genesis(|mut genesis| { // Set an insufficient gas limit for masp fee payment to force all // transactions to fail - genesis.parameters.parameters.masp_fee_payment_gas_limit = 5_000; + genesis.parameters.parameters.masp_fee_payment_gas_limit = 3_000; genesis })?; _ = node.next_masp_epoch(); @@ -2414,11 +2435,11 @@ fn masp_fee_payment_gas_limit() -> Result<()> { &node, Bin::Client, vec![ - "transfer", + "shield", "--source", - A_SPENDING_KEY, + ALBERT_KEY, "--target", - AB_PAYMENT_ADDRESS, + AA_PAYMENT_ADDRESS, "--token", NAM, "--amount", @@ -2429,7 +2450,34 @@ fn masp_fee_payment_gas_limit() -> Result<()> { )?; node.assert_success(); - _ = node.next_epoch(); + _ = node.next_masp_epoch(); + + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + + // Check that the balance hasn't changed + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 1000000")); // Masp fee payment with huge gas, check that the tx still fails because of // the protocol param @@ -2438,9 +2486,9 @@ fn masp_fee_payment_gas_limit() -> Result<()> { &node, Bin::Client, vec![ - "transfer", + "unshield", "--source", - ALBERT_KEY, + A_SPENDING_KEY, "--target", BERTHA, "--token", @@ -2451,6 +2499,7 @@ fn masp_fee_payment_gas_limit() -> Result<()> { "100000", "--gas-price", "1", + "--disposable-gas-payer", "--ledger-address", validator_one_rpc, ], @@ -2459,7 +2508,7 @@ fn masp_fee_payment_gas_limit() -> Result<()> { assert!(captured.result.is_err()); node.assert_success(); - _ = node.next_epoch(); + _ = node.next_masp_epoch(); // sync shielded context run( @@ -2490,3 +2539,700 @@ fn masp_fee_payment_gas_limit() -> Result<()> { Ok(()) } + +// Test masp fee payement with an unshield to a non-disposable address with +// already some funds on it. +#[test] +fn masp_fee_payment_with_non_disposable() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + // Download the shielded pool parameters before starting node + let _ = FsShieldedUtils::new(PathBuf::new()); + let (mut node, _services) = setup::initialize_genesis(|mut genesis| { + genesis.parameters.parameters.masp_fee_payment_gas_limit = 20_000; + genesis + })?; + _ = node.next_masp_epoch(); + + // Add the relevant viewing keys to the wallet otherwise the shielded + // context won't precache the masp data + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_a", + "--value", + AA_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_b", + "--value", + AB_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + + // Shield some tokens + run( + &node, + Bin::Client, + vec![ + "shield", + "--source", + ALBERT_KEY, + "--target", + AA_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + // Drain balance of fee payer + "1999999", + // Pay gas transparently + "--gas-payer", + BERTHA_KEY, + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + + _ = node.next_masp_epoch(); + + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 1999999")); + + // Masp fee payment to non-disposable address + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "unshield", + "--source", + A_SPENDING_KEY, + "--target", + BERTHA, + "--token", + NAM, + "--amount", + "1", + "--gas-limit", + "20000", + "--gas-price", + "1", + "--gas-payer", + ALBERT_KEY, + "--ledger-address", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + node.assert_success(); + + _ = node.next_masp_epoch(); + + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 1979999")); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + ALBERT_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 0")); + + Ok(()) +} + +// Test masp fee payement with a custom provided spending key. Check that fees +// are splitted between the actual source of the payment and this gas spending +// key +#[test] +fn masp_fee_payment_with_custom_spending_key() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + // Download the shielded pool parameters before starting node + let _ = FsShieldedUtils::new(PathBuf::new()); + let (mut node, _services) = setup::initialize_genesis(|mut genesis| { + genesis.parameters.parameters.masp_fee_payment_gas_limit = 20_000; + genesis + })?; + _ = node.next_masp_epoch(); + + // Add the relevant viewing keys to the wallet otherwise the shielded + // context won't precache the masp data + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_a", + "--value", + AA_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_b", + "--value", + AB_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_c", + "--value", + AC_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + + // Shield some tokens + run( + &node, + Bin::Client, + vec![ + "shield", + "--source", + ALBERT_KEY, + "--target", + AA_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + "10000", + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + run( + &node, + Bin::Client, + vec![ + "shield", + "--source", + ALBERT_KEY, + "--target", + AB_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + "30000", + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + + _ = node.next_masp_epoch(); + + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 10000")); + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AB_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 30000")); + + // Masp fee payment with custom gas payer + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "transfer", + "--source", + A_SPENDING_KEY, + "--target", + AC_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + "9000", + "--gas-limit", + "20000", + "--gas-price", + "1", + "--gas-spending-key", + B_SPENDING_KEY, + "--disposable-gas-payer", + "--ledger-address", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + node.assert_success(); + + _ = node.next_masp_epoch(); + + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 0")); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AB_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 11000")); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AC_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 9000")); + + Ok(()) +} + +// Test masp fee payement with a different token from the one used in the +// transaction itself and with the support of a different key for gas payment +#[test] +fn masp_fee_payment_with_different_token() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + // Download the shielded pool parameters before starting node + let _ = FsShieldedUtils::new(PathBuf::new()); + let (mut node, _services) = setup::initialize_genesis(|mut genesis| { + genesis.parameters.parameters.masp_fee_payment_gas_limit = 20_000; + // Whitelist BTC for gas payment + genesis.parameters.parameters.minimum_gas_price.insert( + "btc".into(), + DenominatedAmount::new(1.into(), namada::token::Denomination(6)), + ); + genesis + })?; + _ = node.next_masp_epoch(); + + // Add the relevant viewing keys to the wallet otherwise the shielded + // context won't precache the masp data + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_a", + "--value", + AA_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_b", + "--value", + AB_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + + // Shield some tokens + run( + &node, + Bin::Client, + vec![ + "shield", + "--source", + ALBERT_KEY, + "--target", + AA_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + "1", + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + run( + &node, + Bin::Client, + vec![ + "shield", + "--source", + ALBERT, + "--target", + AA_PAYMENT_ADDRESS, + "--token", + BTC, + "--amount", + "1000", + "--gas-payer", + ALBERT_KEY, + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + run( + &node, + Bin::Client, + vec![ + "shield", + "--source", + ALBERT, + "--target", + AB_PAYMENT_ADDRESS, + "--token", + BTC, + "--amount", + "20000", + "--gas-payer", + ALBERT_KEY, + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + + _ = node.next_masp_epoch(); + + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 1")); + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + BTC, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("btc: 1000")); + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AB_VIEWING_KEY, + "--token", + BTC, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("btc: 20000")); + + // Masp fee payment with custom token and gas payer + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "transfer", + "--source", + A_SPENDING_KEY, + "--target", + AB_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + "1", + "--gas-token", + BTC, + "--gas-limit", + "20000", + "--gas-price", + "1", + "--gas-spending-key", + B_SPENDING_KEY, + "--disposable-gas-payer", + "--ledger-address", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + node.assert_success(); + + _ = node.next_masp_epoch(); + + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 0")); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AB_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 1")); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + BTC, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("btc: 0")); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AB_VIEWING_KEY, + "--token", + BTC, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("btc: 1000")); + + Ok(()) +} From 217afe30c82df2dc0ce3508d7ca1d3dddc4a42bc Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 12 Jun 2024 18:32:59 +0100 Subject: [PATCH 27/40] Masp fee payment for shielded actions --- crates/apps_lib/src/cli.rs | 11 ++++ crates/ibc/src/actions.rs | 1 + crates/ibc/src/lib.rs | 19 +++--- crates/ibc/src/msg.rs | 44 +++++++++++--- crates/namada/src/ledger/native_vp/ibc/mod.rs | 2 + crates/node/src/bench_utils.rs | 2 + crates/sdk/src/args.rs | 15 ++++- crates/sdk/src/lib.rs | 1 + crates/sdk/src/tx.rs | 34 ++++++++++- crates/tests/src/e2e/ibc_tests.rs | 58 ++++++++++++------- crates/tests/src/vm_host_env/ibc.rs | 1 + crates/tx_prelude/src/token.rs | 2 +- wasm/tx_ibc/src/lib.rs | 19 +++++- 13 files changed, 164 insertions(+), 45 deletions(-) diff --git a/crates/apps_lib/src/cli.rs b/crates/apps_lib/src/cli.rs index a0f4e31228..dfcf800ecf 100644 --- a/crates/apps_lib/src/cli.rs +++ b/crates/apps_lib/src/cli.rs @@ -4652,6 +4652,11 @@ pub mod args { ) -> Result, Self::Error> { let tx = self.tx.to_sdk(ctx)?; let chain_ctx = ctx.borrow_mut_chain_or_exit(); + let gas_spending_keys = self + .gas_spending_keys + .iter() + .map(|key| chain_ctx.get_cached(key)) + .collect(); Ok(TxIbcTransfer:: { tx, @@ -4665,6 +4670,7 @@ pub mod args { timeout_sec_offset: self.timeout_sec_offset, refund_target: chain_ctx.get_opt(&self.refund_target), memo: self.memo, + gas_spending_keys, tx_code_path: self.tx_code_path.to_path_buf(), }) } @@ -4686,6 +4692,10 @@ pub mod args { std::fs::read_to_string(path) .expect("Expected a file at given path") }); + let mut gas_spending_keys = vec![]; + if let Some(key) = GAS_SPENDING_KEY.parse(matches) { + gas_spending_keys.push(key); + } let tx_code_path = PathBuf::from(TX_IBC_WASM); Self { tx, @@ -4699,6 +4709,7 @@ pub mod args { timeout_sec_offset, refund_target, memo, + gas_spending_keys, tx_code_path, } } diff --git a/crates/ibc/src/actions.rs b/crates/ibc/src/actions.rs index 63696343a0..0269c9879a 100644 --- a/crates/ibc/src/actions.rs +++ b/crates/ibc/src/actions.rs @@ -222,6 +222,7 @@ where let data = MsgTransfer { message, transfer: None, + fee_unshield: None, } .serialize_to_vec(); diff --git a/crates/ibc/src/lib.rs b/crates/ibc/src/lib.rs index 15a673c9c6..6fd9b17969 100644 --- a/crates/ibc/src/lib.rs +++ b/crates/ibc/src/lib.rs @@ -76,7 +76,7 @@ use ibc::primitives::proto::Any; pub use ibc::*; pub use msg::*; use namada_core::address::{self, Address}; -use namada_token::ShieldingTransfer; +use namada_token::{ShieldingTransfer, UnshieldingTransferData}; pub use nft::*; use prost::Message; use thiserror::Error; @@ -152,7 +152,10 @@ where pub fn execute( &mut self, tx_data: &[u8], - ) -> Result, Error> { + ) -> Result< + (Option, Option), + Error, + > { let message = decode_message(tx_data)?; match &message { IbcMessage::Transfer(msg) => { @@ -167,7 +170,7 @@ where msg.message.clone(), ) .map_err(Error::TokenTransfer)?; - Ok(msg.transfer.clone()) + Ok((msg.transfer.clone(), msg.fee_unshield.clone())) } IbcMessage::NftTransfer(msg) => { let mut nft_transfer_ctx = @@ -178,7 +181,7 @@ where msg.message.clone(), ) .map_err(Error::NftTransfer)?; - Ok(msg.transfer.clone()) + Ok((msg.transfer.clone(), msg.fee_unshield.clone())) } IbcMessage::RecvPacket(msg) => { let envelope = @@ -191,7 +194,7 @@ where } else { None }; - Ok(transfer) + Ok((transfer, None)) } IbcMessage::AckPacket(msg) => { let envelope = @@ -205,7 +208,7 @@ where } else { None }; - Ok(transfer) + Ok((transfer, None)) } IbcMessage::Timeout(msg) => { let envelope = MsgEnvelope::Packet(PacketMsg::Timeout( @@ -213,12 +216,12 @@ where )); execute(&mut self.ctx, &mut self.router, envelope) .map_err(|e| Error::Context(Box::new(e)))?; - Ok(msg.transfer.clone()) + Ok((msg.transfer.clone(), None)) } IbcMessage::Envelope(envelope) => { execute(&mut self.ctx, &mut self.router, *envelope.clone()) .map_err(|e| Error::Context(Box::new(e)))?; - Ok(None) + Ok((None, None)) } } } diff --git a/crates/ibc/src/msg.rs b/crates/ibc/src/msg.rs index 7502cf1e31..e300dc56e1 100644 --- a/crates/ibc/src/msg.rs +++ b/crates/ibc/src/msg.rs @@ -7,7 +7,7 @@ use ibc::core::channel::types::msgs::{ }; use ibc::core::handler::types::msgs::MsgEnvelope; use ibc::primitives::proto::Protobuf; -use namada_token::ShieldingTransfer; +use namada_token::{ShieldingTransfer, UnshieldingTransferData}; /// The different variants of an Ibc message pub enum IbcMessage { @@ -32,6 +32,8 @@ pub struct MsgTransfer { pub message: IbcMsgTransfer, /// Shieleded transfer for MASP transaction pub transfer: Option, + /// Optional data for masp fee payment in the source chain + pub fee_unshield: Option, } impl BorshSerialize for MsgTransfer { @@ -40,7 +42,11 @@ impl BorshSerialize for MsgTransfer { writer: &mut W, ) -> std::io::Result<()> { let encoded_msg = self.message.clone().encode_vec(); - let members = (encoded_msg, self.transfer.clone()); + let members = ( + encoded_msg, + self.transfer.clone(), + self.fee_unshield.clone(), + ); BorshSerialize::serialize(&members, writer) } } @@ -50,11 +56,18 @@ impl BorshDeserialize for MsgTransfer { reader: &mut R, ) -> std::io::Result { use std::io::{Error, ErrorKind}; - let (msg, transfer): (Vec, Option) = - BorshDeserialize::deserialize_reader(reader)?; + let (msg, transfer, fee_unshield): ( + Vec, + Option, + Option, + ) = BorshDeserialize::deserialize_reader(reader)?; let message = IbcMsgTransfer::decode_vec(&msg) .map_err(|err| Error::new(ErrorKind::InvalidData, err))?; - Ok(Self { message, transfer }) + Ok(Self { + message, + transfer, + fee_unshield, + }) } } @@ -65,6 +78,8 @@ pub struct MsgNftTransfer { pub message: IbcMsgNftTransfer, /// Shieleded transfer for MASP transaction pub transfer: Option, + /// Optional data for masp fee payment in the source chain + pub fee_unshield: Option, } impl BorshSerialize for MsgNftTransfer { @@ -73,7 +88,11 @@ impl BorshSerialize for MsgNftTransfer { writer: &mut W, ) -> std::io::Result<()> { let encoded_msg = self.message.clone().encode_vec(); - let members = (encoded_msg, self.transfer.clone()); + let members = ( + encoded_msg, + self.transfer.clone(), + self.fee_unshield.clone(), + ); BorshSerialize::serialize(&members, writer) } } @@ -83,11 +102,18 @@ impl BorshDeserialize for MsgNftTransfer { reader: &mut R, ) -> std::io::Result { use std::io::{Error, ErrorKind}; - let (msg, transfer): (Vec, Option) = - BorshDeserialize::deserialize_reader(reader)?; + let (msg, transfer, fee_unshield): ( + Vec, + Option, + Option, + ) = BorshDeserialize::deserialize_reader(reader)?; let message = IbcMsgNftTransfer::decode_vec(&msg) .map_err(|err| Error::new(ErrorKind::InvalidData, err))?; - Ok(Self { message, transfer }) + Ok(Self { + message, + transfer, + fee_unshield, + }) } } diff --git a/crates/namada/src/ledger/native_vp/ibc/mod.rs b/crates/namada/src/ledger/native_vp/ibc/mod.rs index 3c405eeadc..53a1861d47 100644 --- a/crates/namada/src/ledger/native_vp/ibc/mod.rs +++ b/crates/namada/src/ledger/native_vp/ibc/mod.rs @@ -2281,6 +2281,7 @@ mod tests { let tx_data = MsgTransfer { message: msg, transfer: None, + fee_unshield: None, } .serialize_to_vec(); @@ -3150,6 +3151,7 @@ mod tests { let tx_data = MsgNftTransfer { message: msg, transfer: None, + fee_unshield: None, } .serialize_to_vec(); diff --git a/crates/node/src/bench_utils.rs b/crates/node/src/bench_utils.rs index 9b8f5ccaf1..85339dc937 100644 --- a/crates/node/src/bench_utils.rs +++ b/crates/node/src/bench_utils.rs @@ -405,6 +405,7 @@ impl BenchShell { let msg = MsgTransfer { message, transfer: None, + fee_unshield: None, }; self.generate_ibc_tx(TX_IBC_WASM, msg.serialize_to_vec()) @@ -1233,6 +1234,7 @@ impl BenchShieldedCtx { let msg = MsgTransfer { message: msg, transfer: Some(transfer), + fee_unshield: None, }; let mut ibc_tx = ctx diff --git a/crates/sdk/src/args.rs b/crates/sdk/src/args.rs index 67f0c69589..3fdf6ab46e 100644 --- a/crates/sdk/src/args.rs +++ b/crates/sdk/src/args.rs @@ -327,7 +327,7 @@ pub struct TxShieldedTransfer { pub tx: Tx, /// Transfer-specific data pub data: Vec>, - /// Optional additional keys for gas pyament + /// Optional additional keys for gas payment pub gas_spending_keys: Vec, /// Path to the TX WASM code file pub tx_code_path: PathBuf, @@ -438,6 +438,8 @@ pub struct TxIbcTransfer { pub refund_target: Option, /// Memo pub memo: Option, + /// Optional additional keys for gas payment + pub gas_spending_keys: Vec, /// Path to the TX WASM code file pub tx_code_path: PathBuf, } @@ -517,6 +519,17 @@ impl TxIbcTransfer { } } + /// Gas spending keys + pub fn gas_spending_keys( + self, + gas_spending_keys: Vec, + ) -> Self { + Self { + gas_spending_keys, + ..self + } + } + /// Path to the TX WASM code file pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { Self { diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs index 82be8d364d..7439587182 100644 --- a/crates/sdk/src/lib.rs +++ b/crates/sdk/src/lib.rs @@ -327,6 +327,7 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { timeout_sec_offset: None, refund_target: None, memo: None, + gas_spending_keys: Default::default(), tx: self.tx_builder(), tx_code_path: PathBuf::from(TX_IBC_WASM), } diff --git a/crates/sdk/src/tx.rs b/crates/sdk/src/tx.rs index b793240c84..980c3ad958 100644 --- a/crates/sdk/src/tx.rs +++ b/crates/sdk/src/tx.rs @@ -2531,11 +2531,29 @@ pub async fn build_ibc_transfer( amount: validated_amount, }]; + // Add masp fee payment if necessary + let masp_fee_data = get_masp_fee_payment_amount( + context, + &args.tx, + fee_per_gas_unit, + &signing_data.fee_payer, + args.gas_spending_keys.clone(), + ) + .await?; + let fee_unshield = + masp_fee_data + .as_ref() + .map(|fee_data| token::UnshieldingTransferData { + target: fee_data.target.to_owned(), + token: fee_data.token.to_owned(), + amount: fee_data.amount, + }); + // For transfer from a spending key let shielded_parts = construct_shielded_parts( context, masp_transfer_data, - None, + masp_fee_data, !(args.tx.dry_run || args.tx.dry_run_wrapper), ) .await?; @@ -2639,7 +2657,12 @@ pub async fn build_ibc_transfer( timeout_height_on_b: timeout_height, timeout_timestamp_on_b: timeout_timestamp, }; - MsgTransfer { message, transfer }.serialize_to_vec() + MsgTransfer { + message, + transfer, + fee_unshield, + } + .serialize_to_vec() } else if let Some((trace_path, base_class_id, token_id)) = is_nft_trace(&ibc_denom) { @@ -2670,7 +2693,12 @@ pub async fn build_ibc_transfer( timeout_height_on_b: timeout_height, timeout_timestamp_on_b: timeout_timestamp, }; - MsgNftTransfer { message, transfer }.serialize_to_vec() + MsgNftTransfer { + message, + transfer, + fee_unshield, + } + .serialize_to_vec() } else { return Err(Error::Other(format!("Invalid IBC denom: {ibc_denom}"))); }; diff --git a/crates/tests/src/e2e/ibc_tests.rs b/crates/tests/src/e2e/ibc_tests.rs index c1781b6295..e581d51ea4 100644 --- a/crates/tests/src/e2e/ibc_tests.rs +++ b/crates/tests/src/e2e/ibc_tests.rs @@ -230,7 +230,7 @@ fn run_ledger_ibc_with_hermes() -> Result<()> { receiver.to_string(), NAM, 100000.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_a, &channel_id_a, None, @@ -266,7 +266,7 @@ fn run_ledger_ibc_with_hermes() -> Result<()> { receiver.to_string(), ibc_denom, 50000.0, - BERTHA_KEY, + Some(BERTHA_KEY), &port_id_b, &channel_id_b, None, @@ -286,6 +286,16 @@ fn run_ledger_ibc_with_hermes() -> Result<()> { 100, ALBERT_KEY, )?; + // Send some token for masp fee payment + transfer_on_chain( + &test_a, + "shield", + ALBERT, + AA_PAYMENT_ADDRESS, + NAM, + 10_000, + ALBERT_KEY, + )?; shielded_sync(&test_a, AA_VIEWING_KEY)?; // Shieded transfer from Chain A to Chain B transfer( @@ -294,7 +304,7 @@ fn run_ledger_ibc_with_hermes() -> Result<()> { AB_PAYMENT_ADDRESS, BTC, 10.0, - ALBERT_KEY, + None, &port_id_a, &channel_id_a, None, @@ -311,7 +321,7 @@ fn run_ledger_ibc_with_hermes() -> Result<()> { "invalid_receiver", BTC, 10.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_a, &channel_id_a, None, @@ -333,7 +343,7 @@ fn run_ledger_ibc_with_hermes() -> Result<()> { AB_PAYMENT_ADDRESS, BTC, 10.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_a, &channel_id_a, Some(Duration::new(10, 0)), @@ -398,7 +408,7 @@ fn ibc_namada_gaia() -> Result<()> { receiver, APFEL, 200.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_namada, &channel_id_namada, None, @@ -453,7 +463,7 @@ fn ibc_namada_gaia() -> Result<()> { &receiver, ibc_denom, 100.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_namada, &channel_id_namada, None, @@ -500,7 +510,7 @@ fn ibc_namada_gaia() -> Result<()> { &receiver, &ibc_denom, 10.0, - BERTHA_KEY, + Some(BERTHA_KEY), &port_id_namada, &channel_id_namada, None, @@ -660,7 +670,7 @@ fn proposal_ibc_token_inflation() -> Result<()> { AB_PAYMENT_ADDRESS, APFEL, 1.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_a, &channel_id_a, None, @@ -727,7 +737,7 @@ fn ibc_rate_limit() -> Result<()> { receiver.to_string(), NAM, 1.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_a, &channel_id_a, None, @@ -742,7 +752,7 @@ fn ibc_rate_limit() -> Result<()> { receiver.to_string(), NAM, 1.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_a, &channel_id_a, None, @@ -768,7 +778,7 @@ fn ibc_rate_limit() -> Result<()> { receiver.to_string(), NAM, 1.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_a, &channel_id_a, None, @@ -792,7 +802,7 @@ fn ibc_rate_limit() -> Result<()> { receiver.to_string(), NAM, 1.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_a, &channel_id_a, Some(Duration::new(20, 0)), @@ -1558,7 +1568,7 @@ fn transfer_token( receiver.to_string(), NAM, 100000.0, - ALBERT_KEY, + Some(ALBERT_KEY), port_id_a, channel_id_a, None, @@ -1633,7 +1643,7 @@ fn try_invalid_transfers( receiver.to_string(), NAM, 10.1, - ALBERT_KEY, + Some(ALBERT_KEY), port_id_a, channel_id_a, None, @@ -1649,7 +1659,7 @@ fn try_invalid_transfers( receiver.to_string(), NAM, 10.0, - ALBERT_KEY, + Some(ALBERT_KEY), &"port".parse().unwrap(), channel_id_a, None, @@ -1665,7 +1675,7 @@ fn try_invalid_transfers( receiver.to_string(), NAM, 10.0, - ALBERT_KEY, + Some(ALBERT_KEY), port_id_a, &"channel-42".parse().unwrap(), None, @@ -1730,7 +1740,7 @@ fn transfer_back( receiver.to_string(), ibc_denom, 50000.0, - BERTHA_KEY, + Some(BERTHA_KEY), port_id_b, channel_id_b, None, @@ -1803,7 +1813,7 @@ fn transfer_timeout( receiver.to_string(), NAM, 100000.0, - ALBERT_KEY, + Some(ALBERT_KEY), port_id_a, channel_id_a, Some(Duration::new(5, 0)), @@ -1937,7 +1947,7 @@ fn transfer( receiver: impl AsRef, token: impl AsRef, amount: f64, - signer: impl AsRef, + signer: Option<&str>, port_id: &PortId, channel_id: &ChannelId, timeout_sec: Option, @@ -1956,8 +1966,6 @@ fn transfer( sender.as_ref(), "--receiver", receiver.as_ref(), - "--signing-keys", - signer.as_ref(), "--token", token.as_ref(), "--amount", @@ -1970,6 +1978,12 @@ fn transfer( &rpc, ]; + if let Some(signer) = signer { + tx_args.extend_from_slice(&["--signing-keys", signer]); + } else { + tx_args.push("--disposable-gas-payer"); + } + let timeout = timeout_sec.unwrap_or_default().as_secs().to_string(); if timeout_sec.is_some() { tx_args.push("--timeout-sec-offset"); diff --git a/crates/tests/src/vm_host_env/ibc.rs b/crates/tests/src/vm_host_env/ibc.rs index 40a97e70e8..30e1e14654 100644 --- a/crates/tests/src/vm_host_env/ibc.rs +++ b/crates/tests/src/vm_host_env/ibc.rs @@ -658,6 +658,7 @@ pub fn msg_transfer( MsgTransfer { message, transfer: None, + fee_unshield: None, } } diff --git a/crates/tx_prelude/src/token.rs b/crates/tx_prelude/src/token.rs index cd59dcdcd4..50b2ad64f5 100644 --- a/crates/tx_prelude/src/token.rs +++ b/crates/tx_prelude/src/token.rs @@ -7,7 +7,7 @@ pub use namada_token::testing; pub use namada_token::{ storage_key, utils, Amount, DenominatedAmount, ShieldedTransfer, ShieldingMultiTransfer, ShieldingTransfer, TransparentTransfer, - UnshieldingMultiTransfer, UnshieldingTransfer, + UnshieldingMultiTransfer, UnshieldingTransfer, UnshieldingTransferData, }; use namada_tx_env::TxEnv; diff --git a/wasm/tx_ibc/src/lib.rs b/wasm/tx_ibc/src/lib.rs index e131a90c9a..f48d93c523 100644 --- a/wasm/tx_ibc/src/lib.rs +++ b/wasm/tx_ibc/src/lib.rs @@ -4,14 +4,31 @@ //! `key::ed25519::SignedTxData` as its input as declared in `ibc` crate. use namada_tx_prelude::action::{Action, MaspAction, Write}; +use namada_tx_prelude::token::UnshieldingTransferData; use namada_tx_prelude::*; #[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: BatchedTx) -> TxResult { let data = ctx.get_tx_data(&tx_data)?; - let transfer = + let (transfer, masp_fee_payment) = ibc::ibc_actions(ctx).execute(&data).into_storage_result()?; + if let Some(UnshieldingTransferData { + token, + amount, + target, + }) = &masp_fee_payment + { + // Transparent unshield for fee payment + token::transfer( + ctx, + &Address::Internal(address::InternalAddress::Masp), + target, + token, + amount.amount(), + )?; + } + if let Some(masp_section_ref) = transfer.map(|transfer| transfer.shielded_section_hash) { From 13ea7b6786a9bdbc8d624b2994312df259b171dd Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 13 Jun 2024 19:02:00 +0100 Subject: [PATCH 28/40] Adds missing gas spending key arg to ibc tx --- crates/apps_lib/src/cli.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/apps_lib/src/cli.rs b/crates/apps_lib/src/cli.rs index dfcf800ecf..84c3658254 100644 --- a/crates/apps_lib/src/cli.rs +++ b/crates/apps_lib/src/cli.rs @@ -4746,6 +4746,11 @@ pub mod args { .arg(IBC_TRANSFER_MEMO_PATH.def().help(wrap!( "The path for the memo field of ICS20 transfer." ))) + .arg(GAS_SPENDING_KEY.def().help(wrap!( + "The optional spending key that will be used in addition \ + to the source for gas payment (if this is a shielded \ + action)." + ))) } } From bab5cdeb370fc01a71882e7abc30815d25a9a7c0 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 13 Jun 2024 19:04:57 +0100 Subject: [PATCH 29/40] Changelog #3393 --- .changelog/unreleased/features/3393-masp-fee-payment.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/features/3393-masp-fee-payment.md diff --git a/.changelog/unreleased/features/3393-masp-fee-payment.md b/.changelog/unreleased/features/3393-masp-fee-payment.md new file mode 100644 index 0000000000..d5b9fae6da --- /dev/null +++ b/.changelog/unreleased/features/3393-masp-fee-payment.md @@ -0,0 +1,2 @@ +- Added support for fee payment directly from the MASP pool. + ([\#3393](https://github.com/anoma/namada/pull/3393)) \ No newline at end of file From 9698bb7a0da969ea45b68de6fa601f5689cd7899 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 14 Jun 2024 16:38:54 +0100 Subject: [PATCH 30/40] Panics in fee payment if balance read fails --- crates/namada/src/ledger/protocol/mod.rs | 28 +++++++++++------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index 4c18bbb601..a575d55fcc 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -540,7 +540,7 @@ where if let Ok(Some(valid_batched_tx_result)) = try_masp_fee_payment(shell_params, tx, tx_index) { - // NOTE: Even if the unshielding was succesfull we could + // NOTE: Even if the unshielding was successful we could // still fail in the transfer (e.g. cause the unshielded // amount is not enough to cover the fees). In this case we // want do drop the changes applied by the masp transaction @@ -551,25 +551,23 @@ where shell_params.state, &wrapper.fee.token, &wrapper.fee_payer(), - ); + ) + .expect("Could not read balance key from storage"); // Ok to unwrap_or_default. In the default case, the only // way the checked op can return Some is if fees are 0, but // if that's the case then we would have never reached this // branch of execution - let post_bal = balance - .unwrap_or_default() - .checked_sub(fees) - .filter(|_| { - fee_token_transfer( - shell_params.state, - &wrapper.fee.token, - &wrapper.fee_payer(), - block_proposer, - fees, - ) - .is_ok() - }); + let post_bal = balance.checked_sub(fees).filter(|_| { + fee_token_transfer( + shell_params.state, + &wrapper.fee.token, + &wrapper.fee_payer(), + block_proposer, + fees, + ) + .is_ok() + }); // Batched tx result must be returned (and considered) only // if fee payment was successful From 70f40a6d1576fa2c60027186cde3263bf9e2e06b Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 14 Jun 2024 16:39:09 +0100 Subject: [PATCH 31/40] Fixes typo --- crates/tests/src/integration/masp.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/tests/src/integration/masp.rs b/crates/tests/src/integration/masp.rs index a26eba96b2..3897ea66ba 100644 --- a/crates/tests/src/integration/masp.rs +++ b/crates/tests/src/integration/masp.rs @@ -2714,7 +2714,7 @@ fn masp_fee_payment_with_non_disposable() -> Result<()> { } // Test masp fee payement with a custom provided spending key. Check that fees -// are splitted between the actual source of the payment and this gas spending +// are split between the actual source of the payment and this gas spending // key #[test] fn masp_fee_payment_with_custom_spending_key() -> Result<()> { From 4f893352e2b1c561c3b0cc81afcfdd0f3ac838d3 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 14 Jun 2024 16:50:27 +0100 Subject: [PATCH 32/40] Reuses token transfer --- crates/namada/src/ledger/protocol/mod.rs | 75 ++++---------------- crates/state/src/lib.rs | 11 +++ crates/state/src/wl_state.rs | 87 +++++++++++++++++++++++- 3 files changed, 110 insertions(+), 63 deletions(-) diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index a575d55fcc..2305d291fd 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -3,7 +3,6 @@ use std::cell::RefCell; use std::collections::BTreeSet; use std::fmt::Debug; -use borsh_ext::BorshSerializeExt; use eyre::{eyre, WrapErr}; use namada_core::booleans::BoolResultUnitExt; use namada_core::hash::Hash; @@ -13,6 +12,7 @@ use namada_events::extend::{ }; use namada_events::EventLevel; use namada_gas::TxGasMeter; +use namada_state::TxWrites; use namada_token::event::{TokenEvent, TokenOperation, UserAccount}; use namada_token::utils::is_masp_transfer; use namada_tx::action::Read; @@ -434,7 +434,7 @@ pub(crate) fn apply_wrapper_tx( block_proposer: Option<&Address>, ) -> Result> where - S: State + Read + Sync, + S: State + Read + TxWrites + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, @@ -500,6 +500,7 @@ pub fn transfer_fee( where S: State + StorageRead + + TxWrites + Read + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -781,69 +782,19 @@ fn fee_token_transfer( amount: Amount, ) -> Result<()> where - WLS: State + StorageRead, + WLS: State + StorageRead + TxWrites, { - // Transfer `token` from `src` to `dest`. Returns an `Err` if `src` has - // insufficient balance or if the transfer the `dest` would overflow (This - // can only happen if the total supply doesn't fit in `token::Amount`). - // Contrary to `crate::token::transfer` this function updates the tx - // write log and not the block write log. - fn inner_fee_token_transfer( - state: &mut WLS, - token: &Address, - src: &Address, - dest: &Address, - amount: Amount, - ) -> Result<()> - where - WLS: State + StorageRead, - { - if amount.is_zero() { - return Ok(()); - } - let src_key = crate::token::storage_key::balance_key(token, src); - let src_balance = crate::token::read_balance(state, token, src) - .map_err(Error::StorageError)?; - match src_balance.checked_sub(amount) { - Some(new_src_balance) => { - let dest_key = - crate::token::storage_key::balance_key(token, dest); - let dest_balance = - crate::token::read_balance(state, token, dest) - .map_err(Error::StorageError)?; - match dest_balance.checked_add(amount) { - Some(new_dest_balance) => { - state - .write_log_mut() - .write(&src_key, new_src_balance.serialize_to_vec()) - .map_err(|e| Error::FeeError(e.to_string()))?; - match state.write_log_mut().write( - &dest_key, - new_dest_balance.serialize_to_vec(), - ) { - Ok(_) => Ok(()), - Err(e) => Err(Error::FeeError(e.to_string())), - } - } - None => Err(Error::StorageError( - namada_state::StorageError::new_alloc(format!( - "The transfer would overflow balance of {dest}" - )), - )), - } - } - None => { - Err(Error::StorageError(namada_state::StorageError::new_alloc( - format!("{src} has insufficient balance"), - ))) - } - } - } - - inner_fee_token_transfer(state, token, src, dest, amount).map_err(|err| { + crate::token::transfer( + &mut state.with_tx_writes(), + token, + src, + dest, + amount, + ) + .map_err(|err| { state.write_log_mut().drop_tx(); - err + Error::StorageError(err) }) } diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index 6165ffc4f7..45bb3f3814 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -57,6 +57,7 @@ pub use namada_storage::{ StorageWrite, DB, }; use thiserror::Error; +use wl_state::TxWlState; pub use wl_state::{FullAccessState, TempWlState, WlState}; use write_log::WriteLog; @@ -205,6 +206,14 @@ pub trait State: StateRead + StorageWrite { } } +/// Perform storage writes and deletions to write-log at tx level. +pub trait TxWrites: StateRead { + /// Instead of performing protocol writes to block level write-log, + /// [`TxWlState`] storage writes at tx level write-log similarly to + /// [`TxHostEnvState`]. + fn with_tx_writes(&mut self) -> TxWlState<'_, Self::D, Self::H>; +} + /// Implement [`trait StorageRead`] using its [`trait StateRead`] /// implementation. #[macro_export] @@ -411,9 +420,11 @@ macro_rules! impl_storage_write_by_protocol { impl_storage_read!(FullAccessState); impl_storage_read!(WlState); impl_storage_read!(TempWlState<'_, D, H>); +impl_storage_read!(TxWlState<'_, D, H>); impl_storage_write_by_protocol!(FullAccessState); impl_storage_write_by_protocol!(WlState); impl_storage_write_by_protocol!(TempWlState<'_, D, H>); +impl_storage_write!(TxWlState<'_, D, H>); impl_storage_read!(TxHostEnvState<'_, D, H>); impl_storage_read!(VpHostEnvState<'_, D, H>); diff --git a/crates/state/src/wl_state.rs b/crates/state/src/wl_state.rs index e4ca57ea52..8f6ebf1984 100644 --- a/crates/state/src/wl_state.rs +++ b/crates/state/src/wl_state.rs @@ -22,7 +22,7 @@ use crate::write_log::{StorageModification, WriteLog}; use crate::{ is_pending_transfer_key, DBIter, Epoch, Error, Hash, Key, KeySeg, LastBlock, MembershipProof, MerkleTree, MerkleTreeError, ProofOps, Result, - State, StateRead, StorageHasher, StorageResult, StoreType, DB, + State, StateRead, StorageHasher, StorageResult, StoreType, TxWrites, DB, EPOCH_SWITCH_BLOCKS_DELAY, STORAGE_ACCESS_GAS_PER_BYTE, }; @@ -52,6 +52,22 @@ where pub diff_key_filter: fn(&storage::Key) -> bool, } +/// State with a temporary write log. This is used for dry-running txs and ABCI +/// prepare and processs proposal, which must not modify the actual state. +#[derive(Debug)] +pub struct TxWlState<'a, D, H> +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// Write log + pub(crate) write_log: &'a mut WriteLog, + // DB + pub(crate) db: &'a D, + /// State + pub(crate) in_mem: &'a InMemory, +} + /// State with a temporary write log. This is used for dry-running txs and ABCI /// prepare and processs proposal, which must not modify the actual state. #[derive(Debug)] @@ -1237,6 +1253,61 @@ where } } +impl TxWrites for WlState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn with_tx_writes(&mut self) -> TxWlState<'_, Self::D, Self::H> { + TxWlState { + write_log: &mut self.write_log, + db: &self.db, + in_mem: &self.in_mem, + } + } +} + +impl StateRead for TxWlState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + type D = D; + type H = H; + + fn write_log(&self) -> &WriteLog { + self.write_log + } + + fn db(&self) -> &D { + self.db + } + + fn in_mem(&self) -> &InMemory { + self.in_mem + } + + fn charge_gas(&self, _gas: u64) -> Result<()> { + Ok(()) + } +} + +impl State for TxWlState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn write_log_mut(&mut self) -> &mut WriteLog { + self.write_log + } + + fn split_borrow( + &mut self, + ) -> (&mut WriteLog, &InMemory, &Self::D) { + (self.write_log, (self.in_mem), (self.db)) + } +} + impl EmitEvents for WlState where D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1302,6 +1373,20 @@ where } } +impl TxWrites for TempWlState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn with_tx_writes(&mut self) -> TxWlState<'_, Self::D, Self::H> { + TxWlState { + write_log: &mut self.write_log, + db: self.db, + in_mem: self.in_mem, + } + } +} + impl Deref for FullAccessState where D: DB + for<'iter> DBIter<'iter>, From ed74656aab0f31b76e410eff5dc79e52296cef95 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 14 Jun 2024 17:14:49 +0100 Subject: [PATCH 33/40] Fixes broken docs --- crates/state/src/lib.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index 45bb3f3814..b2eb28e252 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -208,9 +208,7 @@ pub trait State: StateRead + StorageWrite { /// Perform storage writes and deletions to write-log at tx level. pub trait TxWrites: StateRead { - /// Instead of performing protocol writes to block level write-log, - /// [`TxWlState`] storage writes at tx level write-log similarly to - /// [`TxHostEnvState`]. + /// Performs storage writes at the tx level of the write-log. fn with_tx_writes(&mut self) -> TxWlState<'_, Self::D, Self::H>; } From 6026c4217ec5d87bdc903b09b5812a36ad58d2ea Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Sun, 16 Jun 2024 11:57:13 +0100 Subject: [PATCH 34/40] Fixes masp amounts conversion --- crates/core/src/token.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/core/src/token.rs b/crates/core/src/token.rs index 463be7ff60..1d87ca2ce0 100644 --- a/crates/core/src/token.rs +++ b/crates/core/src/token.rs @@ -237,7 +237,9 @@ impl Amount { val: i128, denom: MaspDigitPos, ) -> Option { - let lo = u64::try_from(val).ok()?; + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let lo = val as u64; #[allow(clippy::cast_sign_loss)] let hi = (val >> 64) as u64; let lo_pos = denom as usize; From dc0316cdb1cb4f25180981cc97de8fe3b5e02dca Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 18 Jun 2024 10:52:46 +0100 Subject: [PATCH 35/40] Fixes typo --- crates/namada/src/ledger/protocol/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index 2305d291fd..5143b3ba41 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -544,7 +544,7 @@ where // NOTE: Even if the unshielding was successful we could // still fail in the transfer (e.g. cause the unshielded // amount is not enough to cover the fees). In this case we - // want do drop the changes applied by the masp transaction + // want to drop the changes applied by the masp transaction // and try to drain the fees from the transparent balance. // Because of this we must NOT propagate errors from within // this branch @@ -754,7 +754,7 @@ where e ); if let Error::GasError(_) = e { - // Popagate only if it is a gas error + // Propagate only if it is a gas error return Err(e); } From 9e4d4c9693499bf366841db3efb663f81b247c16 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 21 Jun 2024 14:32:09 +0100 Subject: [PATCH 36/40] Removes useless write-log commit in fee payment --- crates/namada/src/ledger/protocol/mod.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index 5143b3ba41..d3bd389ece 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -701,14 +701,6 @@ where let ref_unshield_gas_meter = RefCell::new(gas_meter); let valid_batched_tx_result = { - // NOTE: A clean tx write log must be provided to this call - // for a correct vp validation. Block and batch write logs, instead, - // should contain any prior changes (if any). This is to simulate - // the fee-paying tx (to prevent the already written keys from being - // passed/triggering VPs) but we cannot commit the tx write log yet - // cause the tx could still be invalid. So we use the batch write log to - // dump the current modifications. - state.write_log_mut().commit_tx_to_batch(); match apply_wasm_tx( tx.batch_ref_first_tx() .ok_or_else(|| Error::MissingInnerTxs)?, From 13b4bcd098d0288c11c90088c6a7b106818f76a8 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Mon, 24 Jun 2024 14:25:26 +0200 Subject: [PATCH 37/40] Renames misleading gas limit variable --- crates/namada/src/ledger/protocol/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index d3bd389ece..ca637de835 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -682,7 +682,7 @@ where // initialize it with the already consumed gas. The gas limit should // actually be the lowest between the protocol parameter and the actual gas // limit of the transaction - let min_gas_limit = state + let max_gas_limit = state .read::( &namada_parameters::storage::get_masp_fee_payment_gas_limit_key(), ) @@ -691,7 +691,7 @@ where .min(tx_gas_meter.borrow().tx_gas_limit.into()); let mut gas_meter = TxGasMeter::new( - namada_gas::Gas::from_whole_units(min_gas_limit).ok_or_else(|| { + namada_gas::Gas::from_whole_units(max_gas_limit).ok_or_else(|| { Error::GasError("Overflow in gas expansion".to_string()) })?, ); From 09df28b197b7b99010f72185abe1c23077717059 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 25 Jun 2024 14:27:31 +0200 Subject: [PATCH 38/40] Early sapling balance check in masp vp --- crates/namada/src/ledger/native_vp/masp.rs | 64 +++++++++++----------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/crates/namada/src/ledger/native_vp/masp.rs b/crates/namada/src/ledger/native_vp/masp.rs index 51e86f018b..c87666dbb4 100644 --- a/crates/namada/src/ledger/native_vp/masp.rs +++ b/crates/namada/src/ledger/native_vp/masp.rs @@ -390,7 +390,7 @@ where } // The Sapling value balance adds to the transparent tx pool - let mut transparent_tx_pool = shielded_tx.sapling_value_balance(); + let transparent_tx_pool = shielded_tx.sapling_value_balance(); // Check the validity of the keys and get the transfer data let mut changed_balances = @@ -436,7 +436,7 @@ where validate_transparent_bundle( &shielded_tx, &mut changed_balances, - &mut transparent_tx_pool, + transparent_tx_pool, masp_epoch, conversion_state, &mut signers, @@ -523,31 +523,6 @@ where } } - // Ensure that the shielded transaction exactly balances - match transparent_tx_pool.partial_cmp(&I128Sum::zero()) { - None | Some(Ordering::Less) => { - let error = native_vp::Error::new_const( - "Transparent transaction value pool must be nonnegative. \ - Violation may be caused by transaction being constructed \ - in previous epoch. Maybe try again.", - ) - .into(); - tracing::debug!("{error}"); - // Section 3.4: The remaining value in the transparent - // transaction value pool MUST be nonnegative. - return Err(error); - } - Some(Ordering::Greater) => { - let error = native_vp::Error::new_const( - "Transaction fees cannot be paid inside MASP transaction.", - ) - .into(); - tracing::debug!("{error}"); - return Err(error); - } - _ => {} - } - // Verify the proofs verify_shielded_tx(&shielded_tx, |gas| self.ctx.charge_gas(gas)) .map_err(Error::NativeVpError) @@ -724,11 +699,12 @@ fn validate_transparent_output( // Update the transaction value pool and also ensure that the Transaction is // consistent with the balance changes. I.e. the transparent inputs are not more // than the initial balances and that the transparent outputs are not more than -// the final balances. +// the final balances. Also ensure that the sapling value balance is exactly 0. fn validate_transparent_bundle( shielded_tx: &Transaction, changed_balances: &mut ChangedBalances, - transparent_tx_pool: &mut I128Sum, + // Take ownership to prevent further usage after this call + mut transparent_tx_pool: I128Sum, epoch: MaspEpoch, conversion_state: &ConversionState, signers: &mut BTreeSet, @@ -738,7 +714,7 @@ fn validate_transparent_bundle( validate_transparent_input( vin, changed_balances, - transparent_tx_pool, + &mut transparent_tx_pool, epoch, conversion_state, signers, @@ -749,13 +725,37 @@ fn validate_transparent_bundle( validate_transparent_output( out, changed_balances, - transparent_tx_pool, + &mut transparent_tx_pool, epoch, conversion_state, )?; } } - Ok(()) + + // Ensure that the shielded transaction exactly balances + match transparent_tx_pool.partial_cmp(&I128Sum::zero()) { + None | Some(Ordering::Less) => { + let error = native_vp::Error::new_const( + "Transparent transaction value pool must be nonnegative. \ + Violation may be caused by transaction being constructed in \ + previous epoch. Maybe try again.", + ) + .into(); + tracing::debug!("{error}"); + // The remaining value in the transparent transaction value pool + // MUST be nonnegative. + Err(error) + } + Some(Ordering::Greater) => { + let error = native_vp::Error::new_const( + "Transaction fees cannot be left on the MASP balance.", + ) + .into(); + tracing::debug!("{error}"); + Err(error) + } + _ => Ok(()), + } } // Apply the given Sapling value balance component to the accumulator From dd907d551a4c6d3ad8fd4b71b8058a50b55f4236 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 25 Jun 2024 14:35:02 +0200 Subject: [PATCH 39/40] Changelog #2721 --- .../unreleased/improvements/2721-early-sapling-balance-check.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/improvements/2721-early-sapling-balance-check.md diff --git a/.changelog/unreleased/improvements/2721-early-sapling-balance-check.md b/.changelog/unreleased/improvements/2721-early-sapling-balance-check.md new file mode 100644 index 0000000000..2bde17c756 --- /dev/null +++ b/.changelog/unreleased/improvements/2721-early-sapling-balance-check.md @@ -0,0 +1,2 @@ +- Moved up the check on the sapling value balance in the masp vp. + ([\#2721](https://github.com/anoma/namada/issues/2721)) \ No newline at end of file From a3bb632a1f40700abae9027a80cc9fcd88725f70 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 25 Jun 2024 15:53:18 +0200 Subject: [PATCH 40/40] Extracts the sapling value balance directly in `validate_transparent_bundle` --- crates/namada/src/ledger/native_vp/masp.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/crates/namada/src/ledger/native_vp/masp.rs b/crates/namada/src/ledger/native_vp/masp.rs index c87666dbb4..5f0d15abef 100644 --- a/crates/namada/src/ledger/native_vp/masp.rs +++ b/crates/namada/src/ledger/native_vp/masp.rs @@ -389,9 +389,6 @@ where return Err(error); } - // The Sapling value balance adds to the transparent tx pool - let transparent_tx_pool = shielded_tx.sapling_value_balance(); - // Check the validity of the keys and get the transfer data let mut changed_balances = self.validate_state_and_get_transfer_data(keys_changed)?; @@ -436,7 +433,6 @@ where validate_transparent_bundle( &shielded_tx, &mut changed_balances, - transparent_tx_pool, masp_epoch, conversion_state, &mut signers, @@ -703,12 +699,13 @@ fn validate_transparent_output( fn validate_transparent_bundle( shielded_tx: &Transaction, changed_balances: &mut ChangedBalances, - // Take ownership to prevent further usage after this call - mut transparent_tx_pool: I128Sum, epoch: MaspEpoch, conversion_state: &ConversionState, signers: &mut BTreeSet, ) -> Result<()> { + // The Sapling value balance adds to the transparent tx pool + let mut transparent_tx_pool = shielded_tx.sapling_value_balance(); + if let Some(transp_bundle) = shielded_tx.transparent_bundle() { for vin in transp_bundle.vin.iter() { validate_transparent_input(