diff --git a/.changelog/unreleased/features/3356-vectorize-transfers.md b/.changelog/unreleased/features/3356-vectorize-transfers.md new file mode 100644 index 0000000000..3b7f8e66da --- /dev/null +++ b/.changelog/unreleased/features/3356-vectorize-transfers.md @@ -0,0 +1,2 @@ +- Reworked transparent and masp transfers to allow for multiple sources, targets, + tokens and amounts. ([\#3356](https://github.com/anoma/namada/pull/3356)) \ No newline at end of file diff --git a/.changelog/unreleased/features/3393-masp-fee-payment.md b/.changelog/unreleased/features/3393-masp-fee-payment.md new file mode 100644 index 0000000000..d5b9fae6da --- /dev/null +++ b/.changelog/unreleased/features/3393-masp-fee-payment.md @@ -0,0 +1,2 @@ +- Added support for fee payment directly from the MASP pool. + ([\#3393](https://github.com/anoma/namada/pull/3393)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2721-early-sapling-balance-check.md b/.changelog/unreleased/improvements/2721-early-sapling-balance-check.md new file mode 100644 index 0000000000..2bde17c756 --- /dev/null +++ b/.changelog/unreleased/improvements/2721-early-sapling-balance-check.md @@ -0,0 +1,2 @@ +- Moved up the check on the sapling value balance in the masp vp. + ([\#2721](https://github.com/anoma/namada/issues/2721)) \ No newline at end of file diff --git a/crates/apps_lib/src/cli.rs b/crates/apps_lib/src/cli.rs index eadf78063a..84c3658254 100644 --- a/crates/apps_lib/src/cli.rs +++ b/crates/apps_lib/src/cli.rs @@ -3221,6 +3221,8 @@ pub mod args { "gas-limit", DefaultFn(|| GasLimit::from(DEFAULT_GAS_LIMIT)), ); + pub const GAS_SPENDING_KEY: ArgOpt = + arg_opt("gas-spending-key"); pub const FEE_TOKEN: ArgDefaultFromCtx = arg_default_from_ctx("gas-token", DefaultFn(|| "".parse().unwrap())); pub const FEE_PAYER: Arg = arg("fee-payer"); @@ -4338,14 +4340,21 @@ pub mod args { ctx: &mut Context, ) -> Result, Self::Error> { let tx = self.tx.to_sdk(ctx)?; + let mut data = vec![]; let chain_ctx = ctx.borrow_mut_chain_or_exit(); + for transfer_data in self.data { + data.push(TxTransparentTransferData { + source: chain_ctx.get(&transfer_data.source), + target: chain_ctx.get(&transfer_data.target), + token: chain_ctx.get(&transfer_data.token), + amount: transfer_data.amount, + }); + } + Ok(TxTransparentTransfer:: { tx, - source: chain_ctx.get(&self.source), - target: chain_ctx.get(&self.target), - token: chain_ctx.get(&self.token), - amount: self.amount, + data, tx_code_path: self.tx_code_path.to_path_buf(), }) } @@ -4359,12 +4368,16 @@ pub mod args { let token = TOKEN.parse(matches); let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); let tx_code_path = PathBuf::from(TX_TRANSPARENT_TRANSFER_WASM); - Self { - tx, + let data = vec![TxTransparentTransferData { source, target, token, amount, + }]; + + Self { + tx, + data, tx_code_path, } } @@ -4393,14 +4406,28 @@ pub mod args { ctx: &mut Context, ) -> Result, Self::Error> { let tx = self.tx.to_sdk(ctx)?; + let mut data = vec![]; let chain_ctx = ctx.borrow_mut_chain_or_exit(); + for transfer_data in self.data { + data.push(TxShieldedTransferData { + source: chain_ctx.get_cached(&transfer_data.source), + target: chain_ctx.get(&transfer_data.target), + token: chain_ctx.get(&transfer_data.token), + amount: transfer_data.amount, + }); + } + + let gas_spending_keys = self + .gas_spending_keys + .iter() + .map(|key| chain_ctx.get_cached(key)) + .collect(); + Ok(TxShieldedTransfer:: { tx, - source: chain_ctx.get_cached(&self.source), - target: chain_ctx.get(&self.target), - token: chain_ctx.get(&self.token), - amount: self.amount, + data, + gas_spending_keys, tx_code_path: self.tx_code_path.to_path_buf(), }) } @@ -4414,12 +4441,21 @@ pub mod args { let token = TOKEN.parse(matches); let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); let tx_code_path = PathBuf::from(TX_SHIELDED_TRANSFER_WASM); - Self { - tx, + let data = vec![TxShieldedTransferData { source, target, token, amount, + }]; + let mut gas_spending_keys = vec![]; + if let Some(key) = GAS_SPENDING_KEY.parse(matches) { + gas_spending_keys.push(key); + } + + Self { + tx, + data, + gas_spending_keys, tx_code_path, } } @@ -4442,6 +4478,10 @@ pub mod args { .def() .help(wrap!("The amount to transfer in decimal.")), ) + .arg(GAS_SPENDING_KEY.def().help(wrap!( + "The optional spending key that will be used in addition \ + to the source for gas payment." + ))) } } @@ -4453,14 +4493,21 @@ pub mod args { ctx: &mut Context, ) -> Result, Self::Error> { let tx = self.tx.to_sdk(ctx)?; + let mut data = vec![]; let chain_ctx = ctx.borrow_mut_chain_or_exit(); + for transfer_data in self.data { + data.push(TxShieldingTransferData { + source: chain_ctx.get(&transfer_data.source), + token: chain_ctx.get(&transfer_data.token), + amount: transfer_data.amount, + }); + } + Ok(TxShieldingTransfer:: { tx, - source: chain_ctx.get(&self.source), + data, target: chain_ctx.get(&self.target), - token: chain_ctx.get(&self.token), - amount: self.amount, tx_code_path: self.tx_code_path.to_path_buf(), }) } @@ -4474,12 +4521,16 @@ pub mod args { let token = TOKEN.parse(matches); let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); let tx_code_path = PathBuf::from(TX_SHIELDING_TRANSFER_WASM); - Self { - tx, + let data = vec![TxShieldingTransferData { source, - target, token, amount, + }]; + + Self { + tx, + data, + target, tx_code_path, } } @@ -4514,14 +4565,27 @@ pub mod args { ctx: &mut Context, ) -> Result, Self::Error> { let tx = self.tx.to_sdk(ctx)?; + let mut data = vec![]; let chain_ctx = ctx.borrow_mut_chain_or_exit(); + for transfer_data in self.data { + data.push(TxUnshieldingTransferData { + target: chain_ctx.get(&transfer_data.target), + token: chain_ctx.get(&transfer_data.token), + amount: transfer_data.amount, + }); + } + let gas_spending_keys = self + .gas_spending_keys + .iter() + .map(|key| chain_ctx.get_cached(key)) + .collect(); + Ok(TxUnshieldingTransfer:: { tx, + data, + gas_spending_keys, source: chain_ctx.get_cached(&self.source), - target: chain_ctx.get(&self.target), - token: chain_ctx.get(&self.token), - amount: self.amount, tx_code_path: self.tx_code_path.to_path_buf(), }) } @@ -4535,12 +4599,21 @@ pub mod args { let token = TOKEN.parse(matches); let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); let tx_code_path = PathBuf::from(TX_UNSHIELDING_TRANSFER_WASM); - Self { - tx, - source, + let data = vec![TxUnshieldingTransferData { target, token, amount, + }]; + let mut gas_spending_keys = vec![]; + if let Some(key) = GAS_SPENDING_KEY.parse(matches) { + gas_spending_keys.push(key); + } + + Self { + tx, + source, + data, + gas_spending_keys, tx_code_path, } } @@ -4563,6 +4636,10 @@ pub mod args { .def() .help(wrap!("The amount to transfer in decimal.")), ) + .arg(GAS_SPENDING_KEY.def().help(wrap!( + "The optional spending key that will be used in addition \ + to the source for gas payment." + ))) } } @@ -4575,6 +4652,11 @@ pub mod args { ) -> Result, Self::Error> { let tx = self.tx.to_sdk(ctx)?; let chain_ctx = ctx.borrow_mut_chain_or_exit(); + let gas_spending_keys = self + .gas_spending_keys + .iter() + .map(|key| chain_ctx.get_cached(key)) + .collect(); Ok(TxIbcTransfer:: { tx, @@ -4588,6 +4670,7 @@ pub mod args { timeout_sec_offset: self.timeout_sec_offset, refund_target: chain_ctx.get_opt(&self.refund_target), memo: self.memo, + gas_spending_keys, tx_code_path: self.tx_code_path.to_path_buf(), }) } @@ -4609,6 +4692,10 @@ pub mod args { std::fs::read_to_string(path) .expect("Expected a file at given path") }); + let mut gas_spending_keys = vec![]; + if let Some(key) = GAS_SPENDING_KEY.parse(matches) { + gas_spending_keys.push(key); + } let tx_code_path = PathBuf::from(TX_IBC_WASM); Self { tx, @@ -4622,6 +4709,7 @@ pub mod args { timeout_sec_offset, refund_target, memo, + gas_spending_keys, tx_code_path, } } @@ -4658,6 +4746,11 @@ pub mod args { .arg(IBC_TRANSFER_MEMO_PATH.def().help(wrap!( "The path for the memo field of ICS20 transfer." ))) + .arg(GAS_SPENDING_KEY.def().help(wrap!( + "The optional spending key that will be used in addition \ + to the source for gas payment (if this is a shielded \ + action)." + ))) } } diff --git a/crates/apps_lib/src/client/rpc.rs b/crates/apps_lib/src/client/rpc.rs index 941fa83444..d9b17aa0bd 100644 --- a/crates/apps_lib/src/client/rpc.rs +++ b/crates/apps_lib/src/client/rpc.rs @@ -660,16 +660,16 @@ pub async fn query_protocol_parameters( .expect("Parameter should be defined."); display_line!(context.io(), "{:4}Max block gas: {:?}", "", max_block_gas); - let key = param_storage::get_fee_unshielding_gas_limit_key(); - let fee_unshielding_gas_limit: u64 = + let key = param_storage::get_masp_fee_payment_gas_limit_key(); + let masp_fee_payment_gas_limit: u64 = query_storage_value(context.client(), &key) .await .expect("Parameter should be defined."); display_line!( context.io(), - "{:4}Fee unshielding gas limit: {:?}", + "{:4}Masp fee payment gas limit: {:?}", "", - fee_unshielding_gas_limit + masp_fee_payment_gas_limit ); let key = param_storage::get_gas_cost_key(); diff --git a/crates/apps_lib/src/client/tx.rs b/crates/apps_lib/src/client/tx.rs index 80e6e52e09..12424f5b5c 100644 --- a/crates/apps_lib/src/client/tx.rs +++ b/crates/apps_lib/src/client/tx.rs @@ -743,7 +743,26 @@ pub async fn submit_transparent_transfer( namada: &impl Namada, args: args::TxTransparentTransfer, ) -> Result<(), error::Error> { - submit_reveal_aux(namada, args.tx.clone(), &args.source).await?; + if args.data.len() > 1 { + // TODO(namada#3379): Vectorized transfers are not yet supported in the + // CLI + return Err(error::Error::Other( + "Unexpected vectorized transparent transfer".to_string(), + )); + } + + submit_reveal_aux( + namada, + args.tx.clone(), + &args + .data + .first() + .ok_or_else(|| { + error::Error::Other("Missing transfer data".to_string()) + })? + .source, + ) + .await?; let (mut tx, signing_data) = args.clone().build(namada).await?; diff --git a/crates/apps_lib/src/config/genesis.rs b/crates/apps_lib/src/config/genesis.rs index d6e5d97046..cb5e564c9e 100644 --- a/crates/apps_lib/src/config/genesis.rs +++ b/crates/apps_lib/src/config/genesis.rs @@ -313,8 +313,8 @@ pub struct Parameters { pub masp_epoch_multiplier: u64, /// Maximum amount of signatures per transaction pub max_signatures_per_transaction: u8, - /// Fee unshielding gas limit - pub fee_unshielding_gas_limit: u64, + /// The gas limit for a masp transaction paying fees + pub masp_fee_payment_gas_limit: u64, /// Map of the cost per gas unit for every token allowed for fee payment pub minimum_gas_price: BTreeMap, } diff --git a/crates/apps_lib/src/config/genesis/chain.rs b/crates/apps_lib/src/config/genesis/chain.rs index 690d67909f..a5c088f8ee 100644 --- a/crates/apps_lib/src/config/genesis/chain.rs +++ b/crates/apps_lib/src/config/genesis/chain.rs @@ -304,7 +304,7 @@ impl Finalized { epochs_per_year, masp_epoch_multiplier, max_signatures_per_transaction, - fee_unshielding_gas_limit, + masp_fee_payment_gas_limit, max_block_gas, minimum_gas_price, max_tx_bytes, @@ -350,7 +350,7 @@ impl Finalized { masp_epoch_multiplier, max_proposal_bytes, max_signatures_per_transaction, - fee_unshielding_gas_limit, + masp_fee_payment_gas_limit, max_block_gas, minimum_gas_price: minimum_gas_price .iter() diff --git a/crates/apps_lib/src/config/genesis/templates.rs b/crates/apps_lib/src/config/genesis/templates.rs index db5ac13772..2b48a6188b 100644 --- a/crates/apps_lib/src/config/genesis/templates.rs +++ b/crates/apps_lib/src/config/genesis/templates.rs @@ -299,8 +299,8 @@ pub struct ChainParams { pub max_signatures_per_transaction: u8, /// Max gas for block pub max_block_gas: u64, - /// Fee unshielding gas limit - pub fee_unshielding_gas_limit: u64, + /// Gas limit of a masp transaction paying fees + pub masp_fee_payment_gas_limit: u64, /// Map of the cost per gas unit for every token allowed for fee payment pub minimum_gas_price: T::GasMinimums, } @@ -324,7 +324,7 @@ impl ChainParams { masp_epoch_multiplier, max_signatures_per_transaction, max_block_gas, - fee_unshielding_gas_limit, + masp_fee_payment_gas_limit, minimum_gas_price, } = self; let mut min_gas_prices = BTreeMap::default(); @@ -370,7 +370,7 @@ impl ChainParams { masp_epoch_multiplier, max_signatures_per_transaction, max_block_gas, - fee_unshielding_gas_limit, + masp_fee_payment_gas_limit, minimum_gas_price: min_gas_prices, }) } diff --git a/crates/benches/host_env.rs b/crates/benches/host_env.rs index 1eee561163..48e521cfd3 100644 --- a/crates/benches/host_env.rs +++ b/crates/benches/host_env.rs @@ -3,7 +3,7 @@ use namada::core::account::AccountPublicKeysMap; use namada::core::address; use namada::core::collections::{HashMap, HashSet}; use namada::ledger::storage::DB; -use namada::token::{Amount, TransparentTransfer}; +use namada::token::{Amount, TransparentTransfer, TransparentTransferData}; use namada::tx::Authorization; use namada::vm::wasm::TxCache; use namada_apps_lib::wallet::defaults; @@ -18,12 +18,12 @@ use namada_node::bench_utils::{ // transaction fn tx_section_signature_validation(c: &mut Criterion) { let shell = BenchShell::default(); - let transfer_data = TransparentTransfer { + let transfer_data = TransparentTransfer(vec![TransparentTransferData { source: defaults::albert_address(), target: defaults::bertha_address(), token: address::testing::nam(), amount: Amount::native_whole(500).native_denominated(), - }; + }]); let tx = shell.generate_tx( TX_TRANSPARENT_TRANSFER_WASM, transfer_data, diff --git a/crates/benches/native_vps.rs b/crates/benches/native_vps.rs index 298c37fcf5..aba3cb9802 100644 --- a/crates/benches/native_vps.rs +++ b/crates/benches/native_vps.rs @@ -55,7 +55,7 @@ use namada::sdk::masp_primitives::merkle_tree::CommitmentTree; use namada::sdk::masp_primitives::transaction::Transaction; use namada::sdk::masp_proofs::sapling::SaplingVerificationContextInner; use namada::state::{Epoch, StorageRead, StorageWrite, TxIndex}; -use namada::token::{Amount, TransparentTransfer}; +use namada::token::{Amount, TransparentTransfer, TransparentTransferData}; use namada::tx::{BatchedTx, Code, Section, Tx}; use namada_apps_lib::wallet::defaults; use namada_node::bench_utils::{ @@ -476,12 +476,12 @@ fn vp_multitoken(c: &mut Criterion) { let transfer = shell.generate_tx( TX_TRANSPARENT_TRANSFER_WASM, - TransparentTransfer { + TransparentTransfer(vec![TransparentTransferData { source: defaults::albert_address(), target: defaults::bertha_address(), token: address::testing::nam(), amount: Amount::native_whole(1000).native_denominated(), - }, + }]), None, None, vec![&defaults::albert_keypair()], diff --git a/crates/benches/process_wrapper.rs b/crates/benches/process_wrapper.rs index 9510ee3125..98ee806d82 100644 --- a/crates/benches/process_wrapper.rs +++ b/crates/benches/process_wrapper.rs @@ -3,7 +3,10 @@ use namada::core::address; use namada::core::key::RefTo; use namada::core::storage::BlockHeight; use namada::core::time::DateTimeUtc; -use namada::token::{Amount, DenominatedAmount, TransparentTransfer}; +use namada::state::TxIndex; +use namada::token::{ + Amount, DenominatedAmount, TransparentTransfer, TransparentTransferData, +}; use namada::tx::data::{Fee, WrapperTx}; use namada::tx::Authorization; use namada_apps_lib::wallet::defaults; @@ -19,12 +22,12 @@ fn process_tx(c: &mut Criterion) { let mut batched_tx = shell.generate_tx( TX_TRANSPARENT_TRANSFER_WASM, - TransparentTransfer { + TransparentTransfer(vec![TransparentTransferData { source: defaults::albert_address(), target: defaults::bertha_address(), token: address::testing::nam(), amount: Amount::native_whole(1).native_denominated(), - }, + }]), None, None, vec![&defaults::albert_keypair()], @@ -78,6 +81,7 @@ fn process_tx(c: &mut Criterion) { shell .check_proposal_tx( &wrapper, + &TxIndex::default(), validation_meta, temp_state, datetime, diff --git a/crates/core/src/masp.rs b/crates/core/src/masp.rs index c31734a803..fa533f25c4 100644 --- a/crates/core/src/masp.rs +++ b/crates/core/src/masp.rs @@ -368,7 +368,15 @@ impl<'de> serde::Deserialize<'de> for PaymentAddress { /// Wrapper for masp_primitive's ExtendedSpendingKey #[derive( - Clone, Debug, Copy, BorshSerialize, BorshDeserialize, BorshDeserializer, + Clone, + Debug, + Copy, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + Hash, + Eq, + PartialEq, )] pub struct ExtendedSpendingKey(masp_primitives::zip32::ExtendedSpendingKey); @@ -433,7 +441,7 @@ impl<'de> serde::Deserialize<'de> for ExtendedSpendingKey { } /// Represents a source of funds for a transfer -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Hash, Eq, PartialEq)] pub enum TransferSource { /// A transfer coming from a transparent address Address(Address), @@ -479,7 +487,7 @@ impl Display for TransferSource { } /// Represents a target for the funds of a transfer -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Hash, Eq, PartialEq)] pub enum TransferTarget { /// A transfer going to a transparent address Address(Address), diff --git a/crates/core/src/parameters.rs b/crates/core/src/parameters.rs index aaf2c294f8..8ba72ee660 100644 --- a/crates/core/src/parameters.rs +++ b/crates/core/src/parameters.rs @@ -51,8 +51,8 @@ pub struct Parameters { pub masp_epoch_multiplier: u64, /// Maximum number of signature per transaction pub max_signatures_per_transaction: u8, - /// Fee unshielding gas limit - pub fee_unshielding_gas_limit: u64, + /// The gas limit for a masp transaction paying fees + pub masp_fee_payment_gas_limit: u64, /// Map of the cost per gas unit for every token allowed for fee payment pub minimum_gas_price: BTreeMap, /// Enable the native token transfer if it is true diff --git a/crates/core/src/token.rs b/crates/core/src/token.rs index 69588b9518..1d87ca2ce0 100644 --- a/crates/core/src/token.rs +++ b/crates/core/src/token.rs @@ -231,13 +231,16 @@ impl Amount { Self { raw: Uint(raw) } } - /// Given a u128 and [`MaspDigitPos`], construct the corresponding + /// Given a i128 and [`MaspDigitPos`], construct the corresponding /// amount. - pub fn from_masp_denominated_u128( - val: u128, + pub fn from_masp_denominated_i128( + val: i128, denom: MaspDigitPos, ) -> Option { - let lo = u64::try_from(val).ok()?; + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let lo = val as u64; + #[allow(clippy::cast_sign_loss)] let hi = (val >> 64) as u64; let lo_pos = denom as usize; let hi_pos = lo_pos.checked_add(1)?; @@ -922,18 +925,6 @@ impl MaspDigitPos { let amount = amount.into(); amount.raw.0[*self as usize] } - - /// Get the corresponding u64 word from the input uint256. - pub fn denominate_i128(&self, amount: &Change) -> i128 { - let val = i128::from(amount.abs().0[*self as usize]); - if Change::is_negative(amount) { - // Cannot panic as the value is limited to `u64` range - #[allow(clippy::arithmetic_side_effects)] - -val - } else { - val - } - } } impl From for IbcAmount { diff --git a/crates/gas/src/lib.rs b/crates/gas/src/lib.rs index 89e24209ff..987dee5fab 100644 --- a/crates/gas/src/lib.rs +++ b/crates/gas/src/lib.rs @@ -74,6 +74,9 @@ pub const STORAGE_ACCESS_GAS_PER_BYTE: u64 = /// The cost of writing data to storage, per byte pub const STORAGE_WRITE_GAS_PER_BYTE: u64 = MEMORY_ACCESS_GAS_PER_BYTE + 69_634 + STORAGE_OCCUPATION_GAS_PER_BYTE; +/// The cost of removing data from storage, per byte +pub const STORAGE_DELETE_GAS_PER_BYTE: u64 = + MEMORY_ACCESS_GAS_PER_BYTE + 69_634 + PHYSICAL_STORAGE_LATENCY_PER_BYTE; /// The cost of verifying a single signature of a transaction pub const VERIFY_TX_SIG_GAS: u64 = 594_290; /// The cost for requesting one more page in wasm (64KiB) diff --git a/crates/ibc/src/actions.rs b/crates/ibc/src/actions.rs index 63696343a0..0269c9879a 100644 --- a/crates/ibc/src/actions.rs +++ b/crates/ibc/src/actions.rs @@ -222,6 +222,7 @@ where let data = MsgTransfer { message, transfer: None, + fee_unshield: None, } .serialize_to_vec(); diff --git a/crates/ibc/src/lib.rs b/crates/ibc/src/lib.rs index 15a673c9c6..6fd9b17969 100644 --- a/crates/ibc/src/lib.rs +++ b/crates/ibc/src/lib.rs @@ -76,7 +76,7 @@ use ibc::primitives::proto::Any; pub use ibc::*; pub use msg::*; use namada_core::address::{self, Address}; -use namada_token::ShieldingTransfer; +use namada_token::{ShieldingTransfer, UnshieldingTransferData}; pub use nft::*; use prost::Message; use thiserror::Error; @@ -152,7 +152,10 @@ where pub fn execute( &mut self, tx_data: &[u8], - ) -> Result, Error> { + ) -> Result< + (Option, Option), + Error, + > { let message = decode_message(tx_data)?; match &message { IbcMessage::Transfer(msg) => { @@ -167,7 +170,7 @@ where msg.message.clone(), ) .map_err(Error::TokenTransfer)?; - Ok(msg.transfer.clone()) + Ok((msg.transfer.clone(), msg.fee_unshield.clone())) } IbcMessage::NftTransfer(msg) => { let mut nft_transfer_ctx = @@ -178,7 +181,7 @@ where msg.message.clone(), ) .map_err(Error::NftTransfer)?; - Ok(msg.transfer.clone()) + Ok((msg.transfer.clone(), msg.fee_unshield.clone())) } IbcMessage::RecvPacket(msg) => { let envelope = @@ -191,7 +194,7 @@ where } else { None }; - Ok(transfer) + Ok((transfer, None)) } IbcMessage::AckPacket(msg) => { let envelope = @@ -205,7 +208,7 @@ where } else { None }; - Ok(transfer) + Ok((transfer, None)) } IbcMessage::Timeout(msg) => { let envelope = MsgEnvelope::Packet(PacketMsg::Timeout( @@ -213,12 +216,12 @@ where )); execute(&mut self.ctx, &mut self.router, envelope) .map_err(|e| Error::Context(Box::new(e)))?; - Ok(msg.transfer.clone()) + Ok((msg.transfer.clone(), None)) } IbcMessage::Envelope(envelope) => { execute(&mut self.ctx, &mut self.router, *envelope.clone()) .map_err(|e| Error::Context(Box::new(e)))?; - Ok(None) + Ok((None, None)) } } } diff --git a/crates/ibc/src/msg.rs b/crates/ibc/src/msg.rs index 7502cf1e31..e300dc56e1 100644 --- a/crates/ibc/src/msg.rs +++ b/crates/ibc/src/msg.rs @@ -7,7 +7,7 @@ use ibc::core::channel::types::msgs::{ }; use ibc::core::handler::types::msgs::MsgEnvelope; use ibc::primitives::proto::Protobuf; -use namada_token::ShieldingTransfer; +use namada_token::{ShieldingTransfer, UnshieldingTransferData}; /// The different variants of an Ibc message pub enum IbcMessage { @@ -32,6 +32,8 @@ pub struct MsgTransfer { pub message: IbcMsgTransfer, /// Shieleded transfer for MASP transaction pub transfer: Option, + /// Optional data for masp fee payment in the source chain + pub fee_unshield: Option, } impl BorshSerialize for MsgTransfer { @@ -40,7 +42,11 @@ impl BorshSerialize for MsgTransfer { writer: &mut W, ) -> std::io::Result<()> { let encoded_msg = self.message.clone().encode_vec(); - let members = (encoded_msg, self.transfer.clone()); + let members = ( + encoded_msg, + self.transfer.clone(), + self.fee_unshield.clone(), + ); BorshSerialize::serialize(&members, writer) } } @@ -50,11 +56,18 @@ impl BorshDeserialize for MsgTransfer { reader: &mut R, ) -> std::io::Result { use std::io::{Error, ErrorKind}; - let (msg, transfer): (Vec, Option) = - BorshDeserialize::deserialize_reader(reader)?; + let (msg, transfer, fee_unshield): ( + Vec, + Option, + Option, + ) = BorshDeserialize::deserialize_reader(reader)?; let message = IbcMsgTransfer::decode_vec(&msg) .map_err(|err| Error::new(ErrorKind::InvalidData, err))?; - Ok(Self { message, transfer }) + Ok(Self { + message, + transfer, + fee_unshield, + }) } } @@ -65,6 +78,8 @@ pub struct MsgNftTransfer { pub message: IbcMsgNftTransfer, /// Shieleded transfer for MASP transaction pub transfer: Option, + /// Optional data for masp fee payment in the source chain + pub fee_unshield: Option, } impl BorshSerialize for MsgNftTransfer { @@ -73,7 +88,11 @@ impl BorshSerialize for MsgNftTransfer { writer: &mut W, ) -> std::io::Result<()> { let encoded_msg = self.message.clone().encode_vec(); - let members = (encoded_msg, self.transfer.clone()); + let members = ( + encoded_msg, + self.transfer.clone(), + self.fee_unshield.clone(), + ); BorshSerialize::serialize(&members, writer) } } @@ -83,11 +102,18 @@ impl BorshDeserialize for MsgNftTransfer { reader: &mut R, ) -> std::io::Result { use std::io::{Error, ErrorKind}; - let (msg, transfer): (Vec, Option) = - BorshDeserialize::deserialize_reader(reader)?; + let (msg, transfer, fee_unshield): ( + Vec, + Option, + Option, + ) = BorshDeserialize::deserialize_reader(reader)?; let message = IbcMsgNftTransfer::decode_vec(&msg) .map_err(|err| Error::new(ErrorKind::InvalidData, err))?; - Ok(Self { message, transfer }) + Ok(Self { + message, + transfer, + fee_unshield, + }) } } diff --git a/crates/light_sdk/src/transaction/transfer.rs b/crates/light_sdk/src/transaction/transfer.rs index 84475660ea..4e56316ea9 100644 --- a/crates/light_sdk/src/transaction/transfer.rs +++ b/crates/light_sdk/src/transaction/transfer.rs @@ -1,7 +1,11 @@ use namada_sdk::address::Address; use namada_sdk::hash::Hash; use namada_sdk::key::common; -use namada_sdk::token::DenominatedAmount; +use namada_sdk::token::transaction::Transaction; +use namada_sdk::token::ShieldingTransferData; +pub use namada_sdk::token::{ + DenominatedAmount, TransparentTransfer, UnshieldingTransferData, +}; use namada_sdk::tx::data::GasLimit; use namada_sdk::tx::{ Authorization, Tx, TxError, TX_SHIELDED_TRANSFER_WASM, @@ -19,81 +23,80 @@ pub struct Transfer(Tx); impl Transfer { /// Build a transparent transfer transaction from the given parameters pub fn transparent( - source: Address, - target: Address, - token: Address, - amount: DenominatedAmount, + transfers: TransparentTransfer, args: GlobalArgs, ) -> Self { - let data = namada_sdk::token::TransparentTransfer { - source, - target, - token, - amount, - }; - Self(transaction::build_tx( args, - data, + transfers, TX_TRANSPARENT_TRANSFER_WASM.to_string(), )) } /// Build a shielded transfer transaction from the given parameters - pub fn shielded(shielded_section_hash: Hash, args: GlobalArgs) -> Self { + pub fn shielded( + fee_unshield: Option, + shielded_section_hash: Hash, + transaction: Transaction, + args: GlobalArgs, + ) -> Self { let data = namada_sdk::token::ShieldedTransfer { + fee_unshield, section_hash: shielded_section_hash, }; - Self(transaction::build_tx( + let mut tx = transaction::build_tx( args, data, TX_SHIELDED_TRANSFER_WASM.to_string(), - )) + ); + tx.add_masp_tx_section(transaction); + + Self(tx) } /// Build a shielding transfer transaction from the given parameters pub fn shielding( - source: Address, - token: Address, - amount: DenominatedAmount, + transfers: Vec, shielded_section_hash: Hash, + transaction: Transaction, args: GlobalArgs, ) -> Self { - let data = namada_sdk::token::ShieldingTransfer { - source, - token, - amount, + let data = namada_sdk::token::ShieldingMultiTransfer { + data: transfers, shielded_section_hash, }; - Self(transaction::build_tx( + let mut tx = transaction::build_tx( args, data, TX_SHIELDING_TRANSFER_WASM.to_string(), - )) + ); + tx.add_masp_tx_section(transaction); + + Self(tx) } /// Build an unshielding transfer transaction from the given parameters pub fn unshielding( - target: Address, - token: Address, - amount: DenominatedAmount, + transfers: Vec, shielded_section_hash: Hash, + transaction: Transaction, args: GlobalArgs, ) -> Self { - let data = namada_sdk::token::UnshieldingTransfer { - target, - token, - amount, + let data = namada_sdk::token::UnshieldingMultiTransfer { + data: transfers, shielded_section_hash, }; - Self(transaction::build_tx( + let mut tx = transaction::build_tx( args, data, TX_UNSHIELDING_TRANSFER_WASM.to_string(), - )) + ); + tx.add_masp_tx_section(transaction); + + Self(tx) } /// Get the bytes to sign for the given transaction diff --git a/crates/namada/src/ledger/governance/mod.rs b/crates/namada/src/ledger/governance/mod.rs index 93d566f9c4..0e762b1918 100644 --- a/crates/namada/src/ledger/governance/mod.rs +++ b/crates/namada/src/ledger/governance/mod.rs @@ -1334,7 +1334,7 @@ mod test { [(0, keypair_1())].into_iter().collect(), None, ))); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, @@ -1368,7 +1368,7 @@ mod test { .write_log_mut() .write(&balance_key, amount.serialize_to_vec()) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); } #[cfg(test)] @@ -1589,7 +1589,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1609,7 +1609,7 @@ mod test { Ok(_) ); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().unwrap(); let governance_balance_key = balance_key(&nam(), &ADDRESS); @@ -1685,7 +1685,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1705,9 +1705,9 @@ mod test { assert_matches!(&result, Err(_)); if result.is_err() { - state.write_log_mut().drop_tx(); + state.drop_tx_batch(); } else { - state.write_log_mut().commit_tx(); + state.commit_tx_batch(); } state.commit_block().unwrap(); @@ -1784,7 +1784,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1803,9 +1803,9 @@ mod test { assert_matches!(&result, Ok(_)); if result.is_err() { - state.write_log_mut().drop_tx(); + state.drop_tx_batch(); } else { - state.write_log_mut().commit_tx(); + state.commit_tx_batch(); } state.commit_block().unwrap(); @@ -1882,7 +1882,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1960,7 +1960,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2038,7 +2038,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2134,7 +2134,7 @@ mod test { true, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2230,7 +2230,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2308,7 +2308,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2328,7 +2328,7 @@ mod test { Ok(_) ); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().unwrap(); let height = state.in_mem().get_block_height().0 + (7 * 2); @@ -2359,7 +2359,7 @@ mod test { verifiers.clear(); verifiers.insert(validator_address); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2437,7 +2437,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2457,7 +2457,7 @@ mod test { Ok(_) ); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().unwrap(); let height = state.in_mem().get_block_height().0 + (7 * 2); @@ -2488,7 +2488,7 @@ mod test { verifiers.clear(); verifiers.insert(validator_address); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2566,7 +2566,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2586,7 +2586,7 @@ mod test { Ok(_) ); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().unwrap(); let height = state.in_mem().get_block_height().0 + (7 * 2); @@ -2617,7 +2617,7 @@ mod test { verifiers.clear(); verifiers.insert(validator_address); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2695,7 +2695,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2715,7 +2715,7 @@ mod test { Ok(_) ); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().unwrap(); let height = state.in_mem().get_block_height().0 + (9 * 2); @@ -2763,7 +2763,7 @@ mod test { verifiers.clear(); verifiers.insert(delegator_address); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2841,7 +2841,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2861,7 +2861,7 @@ mod test { Ok(_) ); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().unwrap(); let height = state.in_mem().get_block_height().0 + (10 * 2); @@ -2909,7 +2909,7 @@ mod test { verifiers.clear(); verifiers.insert(delegator_address); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2987,7 +2987,7 @@ mod test { false, ); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -3007,7 +3007,7 @@ mod test { Ok(_) ); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().unwrap(); let height = state.in_mem().get_block_height().0 + (10 * 2); @@ -3055,7 +3055,7 @@ mod test { verifiers.clear(); verifiers.insert(validator_address); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, diff --git a/crates/namada/src/ledger/mod.rs b/crates/namada/src/ledger/mod.rs index 2f952e4f99..ee2a17264d 100644 --- a/crates/namada/src/ledger/mod.rs +++ b/crates/namada/src/ledger/mod.rs @@ -26,7 +26,7 @@ mod dry_run_tx { use namada_gas::Gas; use namada_sdk::queries::{EncodedResponseQuery, RequestCtx, RequestQuery}; use namada_state::{DBIter, ResultExt, StorageHasher, DB}; - use namada_tx::data::{GasLimit, TxResult}; + use namada_tx::data::{ExtendedTxResult, GasLimit, TxResult}; use super::protocol; use crate::vm::wasm::{TxCache, VpCache}; @@ -55,22 +55,29 @@ mod dry_run_tx { tx.validate_tx().into_storage_result()?; // Wrapper dry run to allow estimating the gas cost of a transaction - let (mut tx_result, tx_gas_meter) = match tx.header().tx_type { + let (extended_tx_result, tx_gas_meter) = match tx.header().tx_type { TxType::Wrapper(wrapper) => { let gas_limit = Gas::try_from(wrapper.gas_limit).into_storage_result()?; let tx_gas_meter = RefCell::new(TxGasMeter::new(gas_limit)); + let mut shell_params = ShellParams::new( + &tx_gas_meter, + &mut temp_state, + &mut ctx.vp_wasm_cache, + &mut ctx.tx_wasm_cache, + ); let tx_result = protocol::apply_wrapper_tx( &tx, &wrapper, &request.data, + &TxIndex::default(), &tx_gas_meter, - &mut temp_state, + &mut shell_params, None, ) .into_storage_result()?; - temp_state.write_log_mut().commit_tx(); + temp_state.write_log_mut().commit_tx_to_batch(); let available_gas = tx_gas_meter.borrow().get_available_gas(); (tx_result, TxGasMeter::new_from_sub_limit(available_gas)) } @@ -81,12 +88,21 @@ mod dry_run_tx { namada_parameters::get_max_block_gas(ctx.state)?; let gas_limit = Gas::try_from(GasLimit::from(max_block_gas)) .into_storage_result()?; - (TxResult::default(), TxGasMeter::new(gas_limit)) + ( + TxResult::default().to_extended_result(None), + TxGasMeter::new(gas_limit), + ) } }; + let ExtendedTxResult { + mut tx_result, + ref masp_tx_refs, + } = extended_tx_result; let tx_gas_meter = RefCell::new(tx_gas_meter); - for cmt in tx.commitments() { + for cmt in + crate::ledger::protocol::get_batch_txs_to_execute(&tx, masp_tx_refs) + { let batched_tx = tx.batch_ref_tx(cmt); let batched_tx_result = protocol::apply_wasm_tx( batched_tx, @@ -338,7 +354,7 @@ mod test { let balance = token::Amount::native_whole(1000); StorageWrite::write(&mut client.state, &balance_key, balance)?; // It has to be committed to be visible in a query - client.state.commit_tx(); + client.state.commit_tx_batch(); client.state.commit_block().unwrap(); // ... there should be the same value now let read_balance = RPC diff --git a/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs b/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs index e97a7cd92c..f24c4e0e49 100644 --- a/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs +++ b/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs @@ -202,7 +202,7 @@ mod test_nuts { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::<_, WasmCacheRwAccess>::new( &Address::Internal(InternalAddress::Nut(DAI_ERC20_ETH_ADDRESS)), &state, diff --git a/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs b/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs index e9d9592331..21e89fcfa2 100644 --- a/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs +++ b/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs @@ -393,7 +393,7 @@ mod tests { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let vp = EthBridge { ctx: setup_ctx( batched_tx.tx, @@ -447,7 +447,7 @@ mod tests { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let vp = EthBridge { ctx: setup_ctx( batched_tx.tx, @@ -504,7 +504,7 @@ mod tests { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let vp = EthBridge { ctx: setup_ctx( batched_tx.tx, diff --git a/crates/namada/src/ledger/native_vp/ibc/mod.rs b/crates/namada/src/ledger/native_vp/ibc/mod.rs index 3f5bc24cc0..53a1861d47 100644 --- a/crates/namada/src/ledger/native_vp/ibc/mod.rs +++ b/crates/namada/src/ledger/native_vp/ibc/mod.rs @@ -628,7 +628,7 @@ mod tests { .write_log_mut() .write(&client_update_height_key, host_height.encode_vec()) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); } fn get_connection_id() -> ConnectionId { @@ -952,7 +952,7 @@ mod tests { [(0, keypair_1())].into_iter().collect(), None, ))); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1028,7 +1028,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1054,7 +1054,7 @@ mod tests { let mut keys_changed = BTreeSet::new(); let mut state = init_storage(); insert_init_client(&mut state); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block @@ -1153,7 +1153,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1178,7 +1178,7 @@ mod tests { let mut keys_changed = BTreeSet::new(); let mut state = init_storage(); insert_init_client(&mut state); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -1262,7 +1262,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1356,7 +1356,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1381,7 +1381,7 @@ mod tests { let mut keys_changed = BTreeSet::new(); let mut state = init_storage(); insert_init_client(&mut state); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -1476,7 +1476,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1510,7 +1510,7 @@ mod tests { .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -1586,7 +1586,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1619,7 +1619,7 @@ mod tests { .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -1681,7 +1681,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1715,7 +1715,7 @@ mod tests { .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -1804,7 +1804,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1837,7 +1837,7 @@ mod tests { .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -1926,7 +1926,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -1967,7 +1967,7 @@ mod tests { .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -2033,7 +2033,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2074,7 +2074,7 @@ mod tests { .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -2135,7 +2135,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2187,7 +2187,7 @@ mod tests { .write_log_mut() .write(&balance_key, amount.serialize_to_vec()) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -2281,6 +2281,7 @@ mod tests { let tx_data = MsgTransfer { message: msg, transfer: None, + fee_unshield: None, } .serialize_to_vec(); @@ -2296,7 +2297,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2337,7 +2338,7 @@ mod tests { .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -2507,7 +2508,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2582,7 +2583,7 @@ mod tests { .write_log_mut() .write(&commitment_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -2662,7 +2663,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2745,7 +2746,7 @@ mod tests { .write_log_mut() .write(&commitment_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -2824,7 +2825,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -2907,7 +2908,7 @@ mod tests { .write_log_mut() .write(&commitment_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -2987,7 +2988,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -3054,7 +3055,7 @@ mod tests { .write(&metadata_key, metadata.serialize_to_vec()) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -3150,6 +3151,7 @@ mod tests { let tx_data = MsgNftTransfer { message: msg, transfer: None, + fee_unshield: None, } .serialize_to_vec(); @@ -3165,7 +3167,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, @@ -3206,7 +3208,7 @@ mod tests { .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); - state.write_log_mut().commit_tx(); + state.write_log_mut().commit_batch(); state.commit_block().expect("commit failed"); // for next block state @@ -3399,7 +3401,7 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = Ctx::new( &ADDRESS, &state, diff --git a/crates/namada/src/ledger/native_vp/masp.rs b/crates/namada/src/ledger/native_vp/masp.rs index 51e86f018b..5f0d15abef 100644 --- a/crates/namada/src/ledger/native_vp/masp.rs +++ b/crates/namada/src/ledger/native_vp/masp.rs @@ -389,9 +389,6 @@ where return Err(error); } - // The Sapling value balance adds to the transparent tx pool - let mut transparent_tx_pool = shielded_tx.sapling_value_balance(); - // Check the validity of the keys and get the transfer data let mut changed_balances = self.validate_state_and_get_transfer_data(keys_changed)?; @@ -436,7 +433,6 @@ where validate_transparent_bundle( &shielded_tx, &mut changed_balances, - &mut transparent_tx_pool, masp_epoch, conversion_state, &mut signers, @@ -523,31 +519,6 @@ where } } - // Ensure that the shielded transaction exactly balances - match transparent_tx_pool.partial_cmp(&I128Sum::zero()) { - None | Some(Ordering::Less) => { - let error = native_vp::Error::new_const( - "Transparent transaction value pool must be nonnegative. \ - Violation may be caused by transaction being constructed \ - in previous epoch. Maybe try again.", - ) - .into(); - tracing::debug!("{error}"); - // Section 3.4: The remaining value in the transparent - // transaction value pool MUST be nonnegative. - return Err(error); - } - Some(Ordering::Greater) => { - let error = native_vp::Error::new_const( - "Transaction fees cannot be paid inside MASP transaction.", - ) - .into(); - tracing::debug!("{error}"); - return Err(error); - } - _ => {} - } - // Verify the proofs verify_shielded_tx(&shielded_tx, |gas| self.ctx.charge_gas(gas)) .map_err(Error::NativeVpError) @@ -724,21 +695,23 @@ fn validate_transparent_output( // Update the transaction value pool and also ensure that the Transaction is // consistent with the balance changes. I.e. the transparent inputs are not more // than the initial balances and that the transparent outputs are not more than -// the final balances. +// the final balances. Also ensure that the sapling value balance is exactly 0. fn validate_transparent_bundle( shielded_tx: &Transaction, changed_balances: &mut ChangedBalances, - transparent_tx_pool: &mut I128Sum, epoch: MaspEpoch, conversion_state: &ConversionState, signers: &mut BTreeSet, ) -> Result<()> { + // The Sapling value balance adds to the transparent tx pool + let mut transparent_tx_pool = shielded_tx.sapling_value_balance(); + if let Some(transp_bundle) = shielded_tx.transparent_bundle() { for vin in transp_bundle.vin.iter() { validate_transparent_input( vin, changed_balances, - transparent_tx_pool, + &mut transparent_tx_pool, epoch, conversion_state, signers, @@ -749,13 +722,37 @@ fn validate_transparent_bundle( validate_transparent_output( out, changed_balances, - transparent_tx_pool, + &mut transparent_tx_pool, epoch, conversion_state, )?; } } - Ok(()) + + // Ensure that the shielded transaction exactly balances + match transparent_tx_pool.partial_cmp(&I128Sum::zero()) { + None | Some(Ordering::Less) => { + let error = native_vp::Error::new_const( + "Transparent transaction value pool must be nonnegative. \ + Violation may be caused by transaction being constructed in \ + previous epoch. Maybe try again.", + ) + .into(); + tracing::debug!("{error}"); + // The remaining value in the transparent transaction value pool + // MUST be nonnegative. + Err(error) + } + Some(Ordering::Greater) => { + let error = native_vp::Error::new_const( + "Transaction fees cannot be left on the MASP balance.", + ) + .into(); + tracing::debug!("{error}"); + Err(error) + } + _ => Ok(()), + } } // Apply the given Sapling value balance component to the accumulator diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index b19098e18d..ca637de835 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -3,25 +3,28 @@ use std::cell::RefCell; use std::collections::BTreeSet; use std::fmt::Debug; -use borsh_ext::BorshSerializeExt; use eyre::{eyre, WrapErr}; use namada_core::booleans::BoolResultUnitExt; use namada_core::hash::Hash; +use namada_core::masp::MaspTxRefs; use namada_events::extend::{ ComposeEvent, Height as HeightAttr, TxHash as TxHashAttr, }; use namada_events::EventLevel; use namada_gas::TxGasMeter; -use namada_state::StorageWrite; +use namada_state::TxWrites; use namada_token::event::{TokenEvent, TokenOperation, UserAccount}; +use namada_token::utils::is_masp_transfer; +use namada_tx::action::Read; use namada_tx::data::protocol::{ProtocolTx, ProtocolTxType}; use namada_tx::data::{ BatchResults, BatchedTxResult, ExtendedTxResult, TxResult, VpStatusFlags, VpsResult, WrapperTx, }; -use namada_tx::{BatchedTxRef, Tx}; +use namada_tx::{BatchedTxRef, Tx, TxCommitments}; use namada_vote_ext::EthereumTxData; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; +use smooth_operator::checked; use thiserror::Error; use crate::address::{Address, InternalAddress}; @@ -185,7 +188,7 @@ pub enum DispatchArgs<'a, CA: 'static + WasmCacheAccess + Sync> { tx_index: TxIndex, /// The result of the corresponding wrapper tx (missing if governance /// transaction) - wrapper_tx_result: Option>, + wrapper_tx_result: Option>, /// Vp cache vp_wasm_cache: &'a mut VpCache, /// Tx cache @@ -197,8 +200,14 @@ pub enum DispatchArgs<'a, CA: 'static + WasmCacheAccess + Sync> { wrapper: &'a WrapperTx, /// The transaction bytes for gas accounting tx_bytes: &'a [u8], + /// The tx index + tx_index: TxIndex, /// The block proposer block_proposer: &'a Address, + /// Vp cache + vp_wasm_cache: &'a mut VpCache, + /// Tx cache + tx_wasm_cache: &'a mut TxCache, }, } @@ -224,8 +233,6 @@ where tx_wasm_cache, } => { if let Some(tx_result) = wrapper_tx_result { - // TODO(namada#2597): handle masp fee payment in the first inner - // tx if necessary // Replay protection check on the batch let tx_hash = tx.raw_header_hash(); if state.write_log().has_replay_protection_entry(&tx_hash) { @@ -291,26 +298,49 @@ where DispatchArgs::Wrapper { wrapper, tx_bytes, + tx_index, block_proposer, + vp_wasm_cache, + tx_wasm_cache, } => { - let tx_result = apply_wrapper_tx( + let mut shell_params = ShellParams::new( + tx_gas_meter, + state, + vp_wasm_cache, + tx_wasm_cache, + ); + + apply_wrapper_tx( tx, wrapper, tx_bytes, + &tx_index, tx_gas_meter, - state, + &mut shell_params, Some(block_proposer), ) - .map_err(|e| Error::WrapperRunnerError(e.to_string()))?; - - Ok(tx_result.to_extended_result(None)) + .map_err(|e| Error::WrapperRunnerError(e.to_string()).into()) } } } +pub(crate) fn get_batch_txs_to_execute<'a>( + tx: &'a Tx, + masp_tx_refs: &MaspTxRefs, +) -> impl Iterator { + let mut batch_iter = tx.commitments().iter(); + if !masp_tx_refs.0.is_empty() { + // If fees were paid via masp skip the first transaction of the batch + // which has already been executed + batch_iter.next(); + } + + batch_iter +} + fn dispatch_inner_txs<'a, D, H, CA>( tx: &Tx, - tx_result: TxResult, + mut extended_tx_result: ExtendedTxResult, tx_index: TxIndex, tx_gas_meter: &'a RefCell, state: &'a mut WlState, @@ -322,11 +352,7 @@ where H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, { - let mut extended_tx_result = tx_result.to_extended_result(None); - - // TODO(namada#2597): handle masp fee payment in the first inner tx - // if necessary - for cmt in tx.commitments() { + for cmt in get_batch_txs_to_execute(tx, &extended_tx_result.masp_tx_refs) { match apply_wasm_tx( tx.batch_ref_tx(cmt), &tx_index, @@ -398,31 +424,55 @@ where /// - replay protection /// - fee payment /// - gas accounting -// TODO(namada#2597): this must signal to the caller if we need masp fee payment -// in the first inner tx of the batch -pub(crate) fn apply_wrapper_tx( +pub(crate) fn apply_wrapper_tx( tx: &Tx, wrapper: &WrapperTx, tx_bytes: &[u8], + tx_index: &TxIndex, tx_gas_meter: &RefCell, - state: &mut S, + shell_params: &mut ShellParams<'_, S, D, H, CA>, block_proposer: Option<&Address>, -) -> Result> +) -> Result> where - S: State + Sync, + S: State + Read + TxWrites + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, + CA: 'static + WasmCacheAccess + Sync, { - let wrapper_tx_hash = tx.header_hash(); - // Write wrapper tx hash to storage - state + shell_params + .state .write_log_mut() - .write_tx_hash(wrapper_tx_hash) + .write_tx_hash(tx.header_hash()) .expect("Error while writing tx hash to storage"); - // Charge fee before performing any fallible operations - charge_fee(wrapper, wrapper_tx_hash, state, block_proposer)?; + // Charge or check fees before propagating any possible error + let payment_result = match block_proposer { + Some(block_proposer) => { + transfer_fee(shell_params, block_proposer, tx, wrapper, tx_index) + } + None => check_fees(shell_params, tx, wrapper), + }; + + // Commit tx to the batch write log even in case of subsequent errors (if + // the fee payment failed instead, than the previous two functions must + // have already dropped the write log, leading this function call to be + // essentially a no-op) + shell_params.state.write_log_mut().commit_tx_to_batch(); + + let (batch_results, masp_tx_refs) = payment_result?.map_or_else( + || (BatchResults::default(), None), + |(batched_result, masp_section_ref)| { + let mut batch = BatchResults::default(); + batch.0.insert( + // Ok to unwrap cause if we have a batched result it means + // we've executed the first tx in the batch + tx.first_commitments().unwrap().get_hash(), + Ok(batched_result), + ); + (batch, Some(MaspTxRefs(vec![masp_section_ref]))) + }, + ); // Account for gas tx_gas_meter @@ -432,166 +482,167 @@ where Ok(TxResult { gas_used: tx_gas_meter.borrow().get_tx_consumed_gas(), - batch_results: BatchResults::default(), - }) -} - -/// Charge fee for the provided wrapper transaction. Returns error if: -/// - Fee amount overflows -/// - Not enough funds are available to pay the entire amount of the fee -/// - The accumulated fee amount to be credited to the block proposer overflows -fn charge_fee( - wrapper: &WrapperTx, - wrapper_tx_hash: Hash, - state: &mut S, - block_proposer: Option<&Address>, -) -> Result<()> -where - S: State + Sync, - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, -{ - // Charge or check fees before propagating any possible error - let payment_result = match block_proposer { - Some(block_proposer) => { - transfer_fee(state, block_proposer, wrapper, wrapper_tx_hash) - } - None => { - check_fees(state, wrapper)?; - Ok(()) - } - }; - - // Commit tx write log even in case of subsequent errors - state.write_log_mut().commit_tx(); - - payment_result + batch_results, + } + .to_extended_result(masp_tx_refs)) } /// Perform the actual transfer of fees from the fee payer to the block -/// proposer. -pub fn transfer_fee( - state: &mut S, +/// proposer. Drops the modifications if errors occur but does not commit since +/// we might want to drop things later. +pub fn transfer_fee( + shell_params: &mut ShellParams<'_, S, D, H, CA>, block_proposer: &Address, + tx: &Tx, wrapper: &WrapperTx, - wrapper_tx_hash: Hash, -) -> Result<()> + tx_index: &TxIndex, +) -> Result> where - S: State + StorageRead + StorageWrite, + S: State + + StorageRead + + TxWrites + + Read + + Sync, + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + CA: 'static + WasmCacheAccess + Sync, { - let balance = crate::token::read_balance( - state, - &wrapper.fee.token, - &wrapper.fee_payer(), - ) - .unwrap(); - - const FEE_PAYMENT_DESCRIPTOR: std::borrow::Cow<'static, str> = - std::borrow::Cow::Borrowed("wrapper-fee-payment"); - match wrapper.get_tx_fee() { Ok(fees) => { - let fees = - crate::token::denom_to_amount(fees, &wrapper.fee.token, state) - .map_err(|e| Error::FeeError(e.to_string()))?; + let fees = crate::token::denom_to_amount( + fees, + &wrapper.fee.token, + shell_params.state, + ) + .map_err(Error::StorageError)?; - let current_block_height = - state.in_mem().get_last_block_height().next_height(); + let balance = crate::token::read_balance( + shell_params.state, + &wrapper.fee.token, + &wrapper.fee_payer(), + ) + .map_err(Error::StorageError)?; - if let Some(post_bal) = balance.checked_sub(fees) { - token_transfer( - state, + let (post_bal, valid_batched_tx_result) = if let Some(post_bal) = + balance.checked_sub(fees) + { + fee_token_transfer( + shell_params.state, &wrapper.fee.token, &wrapper.fee_payer(), block_proposer, fees, - ) - .map_err(|e| Error::FeeError(e.to_string()))?; + )?; - let target_post_balance = Some( - namada_token::read_balance( - state, + (Some(post_bal), None) + } else { + // See if the first inner transaction of the batch pays the fees + // with a masp unshield + if let Ok(Some(valid_batched_tx_result)) = + try_masp_fee_payment(shell_params, tx, tx_index) + { + // NOTE: Even if the unshielding was successful we could + // still fail in the transfer (e.g. cause the unshielded + // amount is not enough to cover the fees). In this case we + // want to drop the changes applied by the masp transaction + // and try to drain the fees from the transparent balance. + // Because of this we must NOT propagate errors from within + // this branch + let balance = crate::token::read_balance( + shell_params.state, &wrapper.fee.token, - block_proposer, + &wrapper.fee_payer(), ) - .map_err(Error::StorageError)? - .into(), - ); + .expect("Could not read balance key from storage"); + + // Ok to unwrap_or_default. In the default case, the only + // way the checked op can return Some is if fees are 0, but + // if that's the case then we would have never reached this + // branch of execution + let post_bal = balance.checked_sub(fees).filter(|_| { + fee_token_transfer( + shell_params.state, + &wrapper.fee.token, + &wrapper.fee_payer(), + block_proposer, + fees, + ) + .is_ok() + }); - state.write_log_mut().emit_event( - TokenEvent { - descriptor: FEE_PAYMENT_DESCRIPTOR, - level: EventLevel::Tx, - token: wrapper.fee.token.clone(), - operation: TokenOperation::Transfer { - amount: fees.into(), - source: UserAccount::Internal(wrapper.fee_payer()), - target: UserAccount::Internal( - block_proposer.clone(), - ), - source_post_balance: post_bal.into(), - target_post_balance, - }, - } - .with(HeightAttr(current_block_height)) - .with(TxHashAttr(wrapper_tx_hash)), - ); + // Batched tx result must be returned (and considered) only + // if fee payment was successful + (post_bal, post_bal.map(|_| valid_batched_tx_result)) + } else { + (None, None) + } + }; - Ok(()) - } else { + if post_bal.is_none() { // Balance was insufficient for fee payment, move all the // available funds in the transparent balance of // the fee payer. tracing::error!( "Transfer of tx fee cannot be applied to due to \ insufficient funds. Falling back to transferring the \ - available balance which is less than the fee." + available balance which is less than the fee. This \ + shouldn't happen." ); - token_transfer( - state, + fee_token_transfer( + shell_params.state, &wrapper.fee.token, &wrapper.fee_payer(), block_proposer, balance, - ) - .map_err(|e| Error::FeeError(e.to_string()))?; + )?; + } - let target_post_balance = Some( - namada_token::read_balance( - state, - &wrapper.fee.token, - block_proposer, - ) - .map_err(Error::StorageError)? - .into(), - ); + let target_post_balance = Some( + namada_token::read_balance( + shell_params.state, + &wrapper.fee.token, + block_proposer, + ) + .map_err(Error::StorageError)? + .into(), + ); - state.write_log_mut().emit_event( - TokenEvent { - descriptor: FEE_PAYMENT_DESCRIPTOR, - level: EventLevel::Tx, - token: wrapper.fee.token.clone(), - operation: TokenOperation::Transfer { - amount: balance.into(), - source: UserAccount::Internal(wrapper.fee_payer()), - target: UserAccount::Internal( - block_proposer.clone(), - ), - source_post_balance: namada_core::uint::ZERO, - target_post_balance, - }, - } - .with(HeightAttr(current_block_height)) - .with(TxHashAttr(wrapper_tx_hash)), - ); + const FEE_PAYMENT_DESCRIPTOR: std::borrow::Cow<'static, str> = + std::borrow::Cow::Borrowed("wrapper-fee-payment"); + let current_block_height = shell_params + .state + .in_mem() + .get_last_block_height() + .next_height(); + shell_params.state.write_log_mut().emit_event( + TokenEvent { + descriptor: FEE_PAYMENT_DESCRIPTOR, + level: EventLevel::Tx, + token: wrapper.fee.token.clone(), + operation: TokenOperation::Transfer { + amount: fees.into(), + source: UserAccount::Internal(wrapper.fee_payer()), + target: UserAccount::Internal(block_proposer.clone()), + source_post_balance: post_bal + .map(namada_core::uint::Uint::from) + .unwrap_or(namada_core::uint::ZERO), + target_post_balance, + }, + } + .with(HeightAttr(current_block_height)) + .with(TxHashAttr(tx.header_hash())), + ); - Err(Error::FeeError( + post_bal.map(|_| valid_batched_tx_result).ok_or_else(|| { + // In this case don't drop the state changes because we still + // want to drain the fee payer's balance + Error::FeeError( "Transparent balance of wrapper's signer was insufficient \ to pay fee. All the available transparent funds have \ - been moved to the block proposer" + been moved to the block proposer. This shouldn't happen." .to_string(), - )) - } + ) + }) } Err(e) => { // Fee overflow. This shouldn't happen as it should be prevented @@ -600,18 +651,122 @@ where "Transfer of tx fee cannot be applied to due to fee overflow. \ This shouldn't happen." ); + shell_params.state.write_log_mut().drop_batch(); Err(Error::FeeError(format!("{}", e))) } } } -/// Transfer `token` from `src` to `dest`. Returns an `Err` if `src` has -/// insufficient balance or if the transfer the `dest` would overflow (This can -/// only happen if the total supply doesn't fit in `token::Amount`). Contrary to -/// `crate::token::transfer` this function updates the tx write log and -/// not the block write log. -fn token_transfer( +fn try_masp_fee_payment( + ShellParams { + tx_gas_meter, + state, + vp_wasm_cache, + tx_wasm_cache, + }: &mut ShellParams<'_, S, D, H, CA>, + tx: &Tx, + tx_index: &TxIndex, +) -> Result> +where + S: State + + StorageRead + + Read + + Sync, + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + CA: 'static + WasmCacheAccess + Sync, +{ + // The fee payment is subject to a gas limit imposed by a protocol + // parameter. Here we instantiate a custom gas meter for this step and + // initialize it with the already consumed gas. The gas limit should + // actually be the lowest between the protocol parameter and the actual gas + // limit of the transaction + let max_gas_limit = state + .read::( + &namada_parameters::storage::get_masp_fee_payment_gas_limit_key(), + ) + .expect("Error reading the storage") + .expect("Missing masp fee payment gas limit in storage") + .min(tx_gas_meter.borrow().tx_gas_limit.into()); + + let mut gas_meter = TxGasMeter::new( + namada_gas::Gas::from_whole_units(max_gas_limit).ok_or_else(|| { + Error::GasError("Overflow in gas expansion".to_string()) + })?, + ); + gas_meter + .copy_consumed_gas_from(&tx_gas_meter.borrow()) + .map_err(|e| Error::GasError(e.to_string()))?; + let ref_unshield_gas_meter = RefCell::new(gas_meter); + + let valid_batched_tx_result = { + match apply_wasm_tx( + tx.batch_ref_first_tx() + .ok_or_else(|| Error::MissingInnerTxs)?, + tx_index, + ShellParams { + tx_gas_meter: &ref_unshield_gas_meter, + state: *state, + vp_wasm_cache, + tx_wasm_cache, + }, + ) { + Ok(result) => { + // NOTE: do not commit yet cause this could be exploited to get + // free masp operations. We can commit only after the entire fee + // payment has been deemed valid. Also, do not commit to batch + // cause we might need to discard the effects of + // this valid unshield (e.g. if it unshield an + // amount which is not enough to pay the fees) + if !result.is_accepted() { + state.write_log_mut().drop_tx(); + tracing::error!( + "The fee unshielding tx is invalid, some VPs rejected \ + it: {:#?}", + result.vps_result.rejected_vps + ); + } + + let masp_ref = namada_tx::action::get_masp_section_ref(*state) + .map_err(Error::StateError)?; + // Ensure that the transaction is actually a masp one, otherwise + // reject + (is_masp_transfer(&result.changed_keys) && result.is_accepted()) + .then(|| { + masp_ref + .map(|masp_section_ref| (result, masp_section_ref)) + }) + .flatten() + } + Err(e) => { + state.write_log_mut().drop_tx(); + tracing::error!( + "The fee unshielding tx is invalid, wasm run failed: {}", + e + ); + if let Error::GasError(_) = e { + // Propagate only if it is a gas error + return Err(e); + } + + None + } + } + }; + + tx_gas_meter + .borrow_mut() + .copy_consumed_gas_from(&ref_unshield_gas_meter.borrow()) + .map_err(|e| Error::GasError(e.to_string()))?; + + Ok(valid_batched_tx_result) +} + +// Manage the token transfer for the fee payment. If an error is detected the +// write log is dropped to prevent committing an inconsistent state. Propagates +// the result to the caller +fn fee_token_transfer( state: &mut WLS, token: &Address, src: &Address, @@ -619,68 +774,112 @@ fn token_transfer( amount: Amount, ) -> Result<()> where - WLS: State + StorageRead, + WLS: State + StorageRead + TxWrites, { - let src_key = crate::token::storage_key::balance_key(token, src); - let src_balance = crate::token::read_balance(state, token, src) - .expect("Token balance read in protocol must not fail"); - match src_balance.checked_sub(amount) { - Some(new_src_balance) => { - if src == dest { - return Ok(()); - } - let dest_key = crate::token::storage_key::balance_key(token, dest); - let dest_balance = crate::token::read_balance(state, token, dest) - .expect("Token balance read in protocol must not fail"); - match dest_balance.checked_add(amount) { - Some(new_dest_balance) => { - state - .write_log_mut() - .write(&src_key, new_src_balance.serialize_to_vec()) - .map_err(|e| Error::FeeError(e.to_string()))?; - match state - .write_log_mut() - .write(&dest_key, new_dest_balance.serialize_to_vec()) - { - Ok(_) => Ok(()), - Err(e) => Err(Error::FeeError(e.to_string())), - } - } - None => Err(Error::FeeError( - "The transfer would overflow destination balance" - .to_string(), - )), - } - } - None => Err(Error::FeeError("Insufficient source balance".to_string())), - } + crate::token::transfer( + &mut state.with_tx_writes(), + token, + src, + dest, + amount, + ) + .map_err(|err| { + state.write_log_mut().drop_tx(); + + Error::StorageError(err) + }) } /// Check if the fee payer has enough transparent balance to pay fees -pub fn check_fees(state: &S, wrapper: &WrapperTx) -> Result<()> +pub fn check_fees( + shell_params: &mut ShellParams<'_, S, D, H, CA>, + tx: &Tx, + wrapper: &WrapperTx, +) -> Result> where - S: State + StorageRead, + S: State + + StorageRead + + Read + + Sync, + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + CA: 'static + WasmCacheAccess + Sync, { - let balance = crate::token::read_balance( - state, - &wrapper.fee.token, - &wrapper.fee_payer(), - ) - .unwrap(); - - let fees = wrapper - .get_tx_fee() - .map_err(|e| Error::FeeError(e.to_string()))?; + fn inner_check_fees( + shell_params: &mut ShellParams<'_, S, D, H, CA>, + tx: &Tx, + wrapper: &WrapperTx, + ) -> Result> + where + S: State + + StorageRead + + Read + + Sync, + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + CA: 'static + WasmCacheAccess + Sync, + { + match wrapper.get_tx_fee() { + Ok(fees) => { + let fees = crate::token::denom_to_amount( + fees, + &wrapper.fee.token, + shell_params.state, + ) + .map_err(Error::StorageError)?; - let fees = crate::token::denom_to_amount(fees, &wrapper.fee.token, state) - .map_err(|e| Error::FeeError(e.to_string()))?; - if balance.checked_sub(fees).is_some() { - Ok(()) - } else { - Err(Error::FeeError( - "Insufficient transparent balance to pay fees".to_string(), - )) + let balance = crate::token::read_balance( + shell_params.state, + &wrapper.fee.token, + &wrapper.fee_payer(), + ) + .map_err(Error::StorageError)?; + + checked!(balance - fees).map_or_else( + |_| { + // See if the first inner transaction of the batch pays + // the fees with a masp unshield + if let Ok(valid_batched_tx_result @ Some(_)) = + try_masp_fee_payment( + shell_params, + tx, + &TxIndex::default(), + ) + { + let balance = crate::token::read_balance( + shell_params.state, + &wrapper.fee.token, + &wrapper.fee_payer(), + ) + .map_err(Error::StorageError)?; + + checked!(balance - fees).map_or_else( + |_| { + Err(Error::FeeError( + "Masp fee payment unshielded an \ + insufficient amount" + .to_string(), + )) + }, + |_| Ok(valid_batched_tx_result), + ) + } else { + Err(Error::FeeError( + "Failed masp fee payment".to_string(), + )) + } + }, + |_| Ok(None), + ) + } + Err(e) => Err(Error::FeeError(e.to_string())), + } } + inner_check_fees(shell_params, tx, wrapper).map_err(|err| { + shell_params.state.write_log_mut().drop_tx(); + + err + }) } /// Apply a transaction going via the wasm environment. Gas will be metered and @@ -1316,7 +1515,7 @@ mod tests { // commit storage changes. this will act as the // initial state of the chain - state.commit_tx(); + state.commit_tx_batch(); state.commit_block().unwrap(); // "execute" a dummy tx, by manually performing its state changes @@ -1364,7 +1563,7 @@ mod tests { // gas meter with no gas left let gas_meter = TxGasMeter::new(0); - let batched_tx = dummy_tx.batch_ref_first_tx(); + let batched_tx = dummy_tx.batch_ref_first_tx().unwrap(); let result = execute_vps( verifiers, changed_keys, diff --git a/crates/namada/src/vm/wasm/run.rs b/crates/namada/src/vm/wasm/run.rs index eb54aa62de..3cf1560bb7 100644 --- a/crates/namada/src/vm/wasm/run.rs +++ b/crates/namada/src/vm/wasm/run.rs @@ -1120,7 +1120,7 @@ mod tests { let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.set_code(Code::new(tx_code.clone(), None)); outer_tx.set_data(Data::new(tx_data)); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let result = tx( &mut state, &gas_meter, @@ -1138,7 +1138,7 @@ mod tests { let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let error = tx( &mut state, &gas_meter, @@ -1210,7 +1210,7 @@ mod tests { assert!( vp( code_hash, - &outer_tx.batch_ref_first_tx(), + &outer_tx.batch_ref_first_tx().unwrap(), &tx_index, &addr, &state, @@ -1242,7 +1242,7 @@ mod tests { assert!( vp( code_hash, - &outer_tx.batch_ref_first_tx(), + &outer_tx.batch_ref_first_tx().unwrap(), &tx_index, &addr, &state, @@ -1291,7 +1291,7 @@ mod tests { let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let result = vp( code_hash, - &outer_tx.batch_ref_first_tx(), + &outer_tx.batch_ref_first_tx().unwrap(), &tx_index, &addr, &state, @@ -1310,7 +1310,7 @@ mod tests { outer_tx.set_data(Data::new(tx_data)); let error = vp( code_hash, - &outer_tx.batch_ref_first_tx(), + &outer_tx.batch_ref_first_tx().unwrap(), &tx_index, &addr, &state, @@ -1359,7 +1359,7 @@ mod tests { let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.set_code(Code::new(tx_no_op, None)); outer_tx.set_data(Data::new(tx_data)); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let result = tx( &mut state, &gas_meter, @@ -1424,7 +1424,7 @@ mod tests { let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let result = vp( code_hash, - &outer_tx.batch_ref_first_tx(), + &outer_tx.batch_ref_first_tx().unwrap(), &tx_index, &addr, &state, @@ -1495,7 +1495,7 @@ mod tests { let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.set_code(Code::new(tx_read_key, None)); outer_tx.set_data(Data::new(tx_data)); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let error = tx( &mut state, &gas_meter, @@ -1552,7 +1552,7 @@ mod tests { let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let error = vp( code_hash, - &outer_tx.batch_ref_first_tx(), + &outer_tx.batch_ref_first_tx().unwrap(), &tx_index, &addr, &state, @@ -1628,7 +1628,7 @@ mod tests { assert!( vp( code_hash, - &outer_tx.batch_ref_first_tx(), + &outer_tx.batch_ref_first_tx().unwrap(), &tx_index, &addr, &state, @@ -1672,7 +1672,7 @@ mod tests { wrapper_tx.add_serialized_data(vec![]); let mut raw_tx = wrapper_tx.clone(); raw_tx.update_header(TxType::Raw); - let batched_tx = wrapper_tx.batch_ref_first_tx(); + let batched_tx = wrapper_tx.batch_ref_first_tx().unwrap(); // Check that using a disallowed wrapper tx leads to an error, but a raw // tx is ok even if not allowlisted @@ -1682,11 +1682,11 @@ mod tests { &mut state, allowlist, ) .unwrap(); - state.commit_tx(); + state.commit_tx_batch(); let result = check_tx_allowed(&batched_tx, &state); assert_matches!(result.unwrap_err(), Error::DisallowedTx); - let batched_raw_tx = raw_tx.batch_ref_first_tx(); + let batched_raw_tx = raw_tx.batch_ref_first_tx().unwrap(); let result = check_tx_allowed(&batched_raw_tx, &state); if let Err(result) = result { assert!(!matches!(result, Error::DisallowedTx)); @@ -1701,7 +1701,7 @@ mod tests { &mut state, allowlist, ) .unwrap(); - state.commit_tx(); + state.commit_tx_batch(); let result = check_tx_allowed(&batched_tx, &state); if let Err(result) = result { @@ -1737,7 +1737,7 @@ mod tests { let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.set_code(Code::new(tx_code.clone(), None)); outer_tx.set_data(Data::new(vec![])); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let result = tx( &mut state, &gas_meter, @@ -1778,7 +1778,7 @@ mod tests { let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.set_code(Code::new(tx_code.clone(), None)); outer_tx.set_data(Data::new(vec![])); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); let result = tx( &mut state, &gas_meter, @@ -1821,7 +1821,7 @@ mod tests { outer_tx.set_data(Data::new(vec![])); let result = vp( code_hash, - &outer_tx.batch_ref_first_tx(), + &outer_tx.batch_ref_first_tx().unwrap(), &tx_index, &addr, &state, @@ -1864,7 +1864,7 @@ mod tests { outer_tx.set_data(Data::new(vec![])); let result = vp( code_hash, - &outer_tx.batch_ref_first_tx(), + &outer_tx.batch_ref_first_tx().unwrap(), &tx_index, &addr, &state, @@ -1976,7 +1976,7 @@ mod tests { vp( code_hash, - &outer_tx.batch_ref_first_tx(), + &outer_tx.batch_ref_first_tx().unwrap(), &tx_index, &addr, &state, @@ -2012,7 +2012,7 @@ mod tests { let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.set_code(Code::from_hash(code_hash, None)); outer_tx.set_data(Data::new(tx_data)); - let batched_tx = outer_tx.batch_ref_first_tx(); + let batched_tx = outer_tx.batch_ref_first_tx().unwrap(); tx( &mut state, diff --git a/crates/node/src/bench_utils.rs b/crates/node/src/bench_utils.rs index ef80036715..85339dc937 100644 --- a/crates/node/src/bench_utils.rs +++ b/crates/node/src/bench_utils.rs @@ -76,8 +76,9 @@ use namada::ledger::queries::{ use namada::masp::MaspTxRefs; use namada::state::StorageRead; use namada::token::{ - Amount, DenominatedAmount, ShieldedTransfer, ShieldingTransfer, - UnshieldingTransfer, + Amount, DenominatedAmount, ShieldedTransfer, ShieldingMultiTransfer, + ShieldingTransfer, ShieldingTransferData, UnshieldingMultiTransfer, + UnshieldingTransferData, }; use namada::tx::data::pos::Bond; use namada::tx::data::{ @@ -94,7 +95,7 @@ use namada_apps_lib::cli::context::FromContext; use namada_apps_lib::cli::Context; use namada_apps_lib::wallet::{defaults, CliWalletUtils}; use namada_sdk::masp::{ - self, ContextSyncStatus, ShieldedContext, ShieldedUtils, + self, ContextSyncStatus, MaspTransferData, ShieldedContext, ShieldedUtils, }; pub use namada_sdk::tx::{ TX_BECOME_VALIDATOR_WASM, TX_BOND_WASM, TX_BRIDGE_POOL_WASM, @@ -255,7 +256,7 @@ impl Default for BenchShell { ); bench_shell.execute_tx(&signed_tx.to_ref()); - bench_shell.state.commit_tx(); + bench_shell.state.commit_tx_batch(); // Initialize governance proposal let content_section = Section::ExtraData(Code::new( @@ -280,7 +281,7 @@ impl Default for BenchShell { ); bench_shell.execute_tx(&signed_tx.to_ref()); - bench_shell.state.commit_tx(); + bench_shell.state.commit_tx_batch(); bench_shell.commit_block(); // Advance epoch for pos benches @@ -404,6 +405,7 @@ impl BenchShell { let msg = MsgTransfer { message, transfer: None, + fee_unshield: None, }; self.generate_ibc_tx(TX_IBC_WASM, msg.serialize_to_vec()) @@ -626,7 +628,7 @@ impl BenchShell { ); self.last_block_masp_txs .push((masp_tx, self.state.write_log().get_keys())); - self.state.commit_tx(); + self.state.commit_tx_batch(); } } @@ -1080,14 +1082,18 @@ impl BenchShieldedCtx { StdIo, native_token, ); + let masp_transfer_data = MaspTransferData { + source: source.clone(), + target: target.clone(), + token: address::testing::nam(), + amount: denominated_amount, + }; let shielded = async_runtime .block_on( ShieldedContext::::gen_shielded_transfer( &namada, - &source, - &target, - &address::testing::nam(), - denominated_amount, + vec![masp_transfer_data], + None, true, ), ) @@ -1115,6 +1121,7 @@ impl BenchShieldedCtx { namada.client().generate_tx( TX_SHIELDED_TRANSFER_WASM, ShieldedTransfer { + fee_unshield: None, section_hash: shielded_section_hash, }, Some(shielded), @@ -1124,10 +1131,12 @@ impl BenchShieldedCtx { } else if target.effective_address() == MASP { namada.client().generate_tx( TX_SHIELDING_TRANSFER_WASM, - ShieldingTransfer { - source: source.effective_address(), - token: address::testing::nam(), - amount: DenominatedAmount::native(amount), + ShieldingMultiTransfer { + data: vec![ShieldingTransferData { + source: source.effective_address(), + token: address::testing::nam(), + amount: DenominatedAmount::native(amount), + }], shielded_section_hash, }, Some(shielded), @@ -1137,10 +1146,12 @@ impl BenchShieldedCtx { } else { namada.client().generate_tx( TX_UNSHIELDING_TRANSFER_WASM, - UnshieldingTransfer { - target: target.effective_address(), - token: address::testing::nam(), - amount: DenominatedAmount::native(amount), + UnshieldingMultiTransfer { + data: vec![UnshieldingTransferData { + target: target.effective_address(), + token: address::testing::nam(), + amount: DenominatedAmount::native(amount), + }], shielded_section_hash, }, Some(shielded), @@ -1206,10 +1217,14 @@ impl BenchShieldedCtx { timeout_timestamp_on_b: timeout_timestamp, }; - let transfer = ShieldingTransfer::deserialize( + let vectorized_transfer = ShieldingMultiTransfer::deserialize( &mut tx.tx.data(&tx.cmt).unwrap().as_slice(), ) .unwrap(); + let transfer = ShieldingTransfer { + data: vectorized_transfer.data.first().unwrap().to_owned(), + shielded_section_hash: vectorized_transfer.shielded_section_hash, + }; let masp_tx = tx .tx .get_section(&transfer.shielded_section_hash) @@ -1219,6 +1234,7 @@ impl BenchShieldedCtx { let msg = MsgTransfer { message: msg, transfer: Some(transfer), + fee_unshield: None, }; let mut ibc_tx = ctx diff --git a/crates/node/src/shell/finalize_block.rs b/crates/node/src/shell/finalize_block.rs index 7adbcd17ba..99c41cb202 100644 --- a/crates/node/src/shell/finalize_block.rs +++ b/crates/node/src/shell/finalize_block.rs @@ -357,13 +357,15 @@ where match extended_dispatch_result { Ok(extended_tx_result) => match tx_data.tx.header.tx_type { TxType::Wrapper(_) => { + self.state.write_log_mut().commit_batch(); + // Return withouth emitting any events return Some(WrapperCache { tx: tx_data.tx.to_owned(), tx_index: tx_data.tx_index, gas_meter: tx_data.tx_gas_meter, event: tx_logs.tx_event, - tx_result: extended_tx_result.tx_result, + extended_tx_result, }); } _ => self.handle_inner_tx_results( @@ -390,8 +392,12 @@ where .extend(GasUsed(tx_data.tx_gas_meter.get_tx_consumed_gas())) .extend(Info(msg.to_string())) .extend(Code(ResultCode::InvalidTx)); - // Make sure to clean the write logs for the next transaction + // Drop the tx write log which could contain invalid data self.state.write_log_mut().drop_tx(); + // Instead commit the batch write log because it contains data + // that should be persisted even in case of a wrapper failure + // (e.g. the fee payment state change) + self.state.write_log_mut().commit_batch(); } Err(dispatch_error) => { // This branch represents an error that affects the entire @@ -700,7 +706,10 @@ where DispatchArgs::Wrapper { wrapper, tx_bytes: processed_tx.tx.as_ref(), + tx_index: TxIndex::must_from_usize(tx_index), block_proposer: native_block_proposer_address, + vp_wasm_cache: &mut self.vp_wasm_cache, + tx_wasm_cache: &mut self.tx_wasm_cache, }, tx_gas_meter, ) @@ -824,7 +833,7 @@ where tx_index, gas_meter: tx_gas_meter, event: tx_event, - tx_result: wrapper_tx_result, + extended_tx_result: wrapper_tx_result, } in successful_wrappers { let tx_hash = tx.header_hash(); @@ -891,7 +900,7 @@ struct WrapperCache { tx_index: usize, gas_meter: TxGasMeter, event: Event, - tx_result: namada::tx::data::TxResult, + extended_tx_result: namada::tx::data::ExtendedTxResult, } struct TxData<'tx> { @@ -5467,7 +5476,7 @@ mod test_finalize_block { )); let keys_changed = BTreeSet::from([min_confirmations_key()]); let verifiers = BTreeSet::default(); - let batched_tx = tx.batch_ref_first_tx(); + let batched_tx = tx.batch_ref_first_tx().unwrap(); let ctx = namada::ledger::native_vp::Ctx::new( shell.mode.get_validator_address().expect("Test failed"), shell.state.read_only(), diff --git a/crates/node/src/shell/governance.rs b/crates/node/src/shell/governance.rs index 758ec735d8..86b301fa50 100644 --- a/crates/node/src/shell/governance.rs +++ b/crates/node/src/shell/governance.rs @@ -426,7 +426,7 @@ where .get(&cmt.get_hash()) { Some(Ok(batched_result)) if batched_result.is_accepted() => { - shell.state.commit_tx(); + shell.state.commit_tx_batch(); Ok(true) } Some(Err(e)) => { @@ -434,12 +434,12 @@ where "Error executing governance proposal {}", e.to_string() ); - shell.state.drop_tx(); + shell.state.drop_tx_batch(); Ok(false) } _ => { tracing::warn!("not sure what happen"); - shell.state.drop_tx(); + shell.state.drop_tx_batch(); Ok(false) } }, @@ -448,7 +448,7 @@ where "Error executing governance proposal {}", e.error.to_string() ); - shell.state.drop_tx(); + shell.state.drop_tx_batch(); Ok(false) } } diff --git a/crates/node/src/shell/mod.rs b/crates/node/src/shell/mod.rs index 2ace0f620a..eb70a72c8a 100644 --- a/crates/node/src/shell/mod.rs +++ b/crates/node/src/shell/mod.rs @@ -1129,16 +1129,16 @@ where return response; } - // TODO(namada#2597): validate masp fee payment if normal fee - // payment fails Validate wrapper fees + // Validate wrapper fees if let Err(e) = mempool_fee_check( - &wrapper, &mut ShellParams::new( &RefCell::new(gas_meter), &mut self.state.with_temp_write_log(), &mut self.vp_wasm_cache.clone(), &mut self.tx_wasm_cache.clone(), ), + &tx, + &wrapper, ) { response.code = ResultCode::FeeError.into(); response.log = format!("{INVALID_MSG}: {e}"); @@ -1281,8 +1281,9 @@ where // Perform the fee check in mempool fn mempool_fee_check( - wrapper: &WrapperTx, shell_params: &mut ShellParams<'_, TempWlState<'_, D, H>, D, H, CA>, + tx: &Tx, + wrapper: &WrapperTx, ) -> Result<()> where D: DB + for<'iter> DBIter<'iter> + Sync + 'static, @@ -1300,7 +1301,9 @@ where ))))?; fee_data_check(wrapper, minimum_gas_price, shell_params)?; - protocol::check_fees(shell_params.state, wrapper).map_err(Error::TxApply) + protocol::check_fees(shell_params, tx, wrapper) + .map_err(Error::TxApply) + .map(|_| ()) } /// Check the validity of the fee data diff --git a/crates/node/src/shell/prepare_proposal.rs b/crates/node/src/shell/prepare_proposal.rs index 6cc2f22392..7498a86111 100644 --- a/crates/node/src/shell/prepare_proposal.rs +++ b/crates/node/src/shell/prepare_proposal.rs @@ -5,10 +5,9 @@ use std::cell::RefCell; use namada::core::address::Address; use namada::core::key::tm_raw_hash_to_string; use namada::gas::{Gas, TxGasMeter}; -use namada::hash::Hash; use namada::ledger::protocol::{self, ShellParams}; use namada::proof_of_stake::storage::find_validator_by_raw_hash; -use namada::state::{DBIter, StorageHasher, TempWlState, DB}; +use namada::state::{DBIter, StorageHasher, TempWlState, TxIndex, DB}; use namada::token::{Amount, DenominatedAmount}; use namada::tx::data::{TxType, WrapperTx}; use namada::tx::Tx; @@ -123,14 +122,15 @@ where let txs = txs .iter() - .filter_map(|tx_bytes| { - match validate_wrapper_bytes(tx_bytes, block_time, block_proposer, proposer_local_config, &mut temp_state, &mut vp_wasm_cache, &mut tx_wasm_cache, ) { + .enumerate() + .filter_map(|(tx_index, tx_bytes)| { + match validate_wrapper_bytes(tx_bytes, &TxIndex::must_from_usize(tx_index),block_time, block_proposer, proposer_local_config, &mut temp_state, &mut vp_wasm_cache, &mut tx_wasm_cache, ) { Ok(gas) => { - temp_state.write_log_mut().commit_tx(); + temp_state.write_log_mut().commit_batch(); Some((tx_bytes.to_owned(), gas)) }, Err(()) => { - temp_state.write_log_mut().drop_tx(); + temp_state.write_log_mut().drop_batch(); None } } @@ -260,6 +260,7 @@ where #[allow(clippy::too_many_arguments)] fn validate_wrapper_bytes( tx_bytes: &[u8], + tx_index: &TxIndex, block_time: Option, block_proposer: &Address, proposer_local_config: Option<&ValidatorLocalConfig>, @@ -295,10 +296,10 @@ where super::replay_protection_checks(&tx, temp_state).map_err(|_| ())?; // Check fees and extract the gas limit of this transaction - // TODO(namada#2597): check if masp fee payment is required match prepare_proposal_fee_check( &wrapper, - tx.header_hash(), + &tx, + tx_index, block_proposer, proposer_local_config, &mut ShellParams::new( @@ -316,10 +317,10 @@ where } } -#[allow(clippy::too_many_arguments)] fn prepare_proposal_fee_check( wrapper: &WrapperTx, - wrapper_tx_hash: Hash, + tx: &Tx, + tx_index: &TxIndex, proposer: &Address, proposer_local_config: Option<&ValidatorLocalConfig>, shell_params: &mut ShellParams<'_, TempWlState<'_, D, H>, D, H, CA>, @@ -337,13 +338,8 @@ where super::fee_data_check(wrapper, minimum_gas_price, shell_params)?; - protocol::transfer_fee( - shell_params.state, - proposer, - wrapper, - wrapper_tx_hash, - ) - .map_err(Error::TxApply) + protocol::transfer_fee(shell_params, proposer, tx, wrapper, tx_index) + .map_or_else(|e| Err(Error::TxApply(e)), |_| Ok(())) } fn compute_min_gas_price( diff --git a/crates/node/src/shell/process_proposal.rs b/crates/node/src/shell/process_proposal.rs index f226097c11..870eec9afa 100644 --- a/crates/node/src/shell/process_proposal.rs +++ b/crates/node/src/shell/process_proposal.rs @@ -2,7 +2,6 @@ //! and [`RevertProposal`] ABCI++ methods for the Shell use data_encoding::HEXUPPER; -use namada::hash::Hash; use namada::ledger::pos::PosQueries; use namada::proof_of_stake::storage::find_validator_by_raw_hash; use namada::tx::data::protocol::ProtocolTxType; @@ -137,9 +136,11 @@ where let tx_results: Vec<_> = txs .iter() - .map(|tx_bytes| { + .enumerate() + .map(|(tx_index, tx_bytes)| { let result = self.check_proposal_tx( tx_bytes, + &TxIndex::must_from_usize(tx_index), &mut metadata, &mut temp_state, block_time, @@ -149,7 +150,7 @@ where ); let error_code = ResultCode::from_u32(result.code).unwrap(); if let ResultCode::Ok = error_code { - temp_state.write_log_mut().commit_tx(); + temp_state.write_log_mut().commit_batch(); } else { tracing::info!( "Process proposal rejected an invalid tx. Error code: \ @@ -157,7 +158,7 @@ where error_code, result.info ); - temp_state.write_log_mut().drop_tx(); + temp_state.write_log_mut().drop_batch(); } result }) @@ -191,6 +192,7 @@ where pub fn check_proposal_tx( &self, tx_bytes: &[u8], + tx_index: &TxIndex, metadata: &mut ValidationMeta, temp_state: &mut TempWlState<'_, D, H>, block_time: DateTimeUtc, @@ -469,7 +471,8 @@ where // Check that the fee payer has sufficient balance. if let Err(e) = process_proposal_fee_check( &wrapper, - tx.header_hash(), + &tx, + tx_index, block_proposer, &mut ShellParams::new( &RefCell::new(tx_gas_meter), @@ -515,10 +518,10 @@ where } } -// TODO(namada#2597): check masp fee payment if required fn process_proposal_fee_check( wrapper: &WrapperTx, - wrapper_tx_hash: Hash, + tx: &Tx, + tx_index: &TxIndex, proposer: &Address, shell_params: &mut ShellParams<'_, TempWlState<'_, D, H>, D, H, CA>, ) -> Result<()> @@ -539,13 +542,8 @@ where fee_data_check(wrapper, minimum_gas_price, shell_params)?; - protocol::transfer_fee( - shell_params.state, - proposer, - wrapper, - wrapper_tx_hash, - ) - .map_err(Error::TxApply) + protocol::transfer_fee(shell_params, proposer, tx, wrapper, tx_index) + .map_or_else(|e| Err(Error::TxApply(e)), |_| Ok(())) } /// We test the failure cases of [`process_proposal`]. The happy flows @@ -1026,7 +1024,8 @@ mod test_process_proposal { "Error trying to apply a transaction: Error while processing \ transaction's fees: Transparent balance of wrapper's signer \ was insufficient to pay fee. All the available transparent \ - funds have been moved to the block proposer" + funds have been moved to the block proposer. This shouldn't \ + happen." ) ); } @@ -1092,7 +1091,8 @@ mod test_process_proposal { "Error trying to apply a transaction: Error while processing \ transaction's fees: Transparent balance of wrapper's signer \ was insufficient to pay fee. All the available transparent \ - funds have been moved to the block proposer" + funds have been moved to the block proposer. This shouldn't \ + happen." ) ); } diff --git a/crates/node/src/storage/mod.rs b/crates/node/src/storage/mod.rs index 49d37c81a8..f45eeaf639 100644 --- a/crates/node/src/storage/mod.rs +++ b/crates/node/src/storage/mod.rs @@ -175,7 +175,7 @@ mod tests { epochs_per_year: 365, masp_epoch_multiplier: 2, max_signatures_per_transaction: 10, - fee_unshielding_gas_limit: 0, + masp_fee_payment_gas_limit: 0, minimum_gas_price: Default::default(), is_native_token_transferable: true, }; diff --git a/crates/parameters/src/lib.rs b/crates/parameters/src/lib.rs index b8dbe12a88..9b8de510d5 100644 --- a/crates/parameters/src/lib.rs +++ b/crates/parameters/src/lib.rs @@ -77,7 +77,7 @@ where masp_epoch_multiplier, max_signatures_per_transaction, minimum_gas_price, - fee_unshielding_gas_limit, + masp_fee_payment_gas_limit, is_native_token_transferable, } = parameters; @@ -97,10 +97,11 @@ where let epoch_key = storage::get_epoch_duration_storage_key(); storage.write(&epoch_key, epoch_duration)?; - // write fee unshielding gas limit - let fee_unshielding_gas_limit_key = - storage::get_fee_unshielding_gas_limit_key(); - storage.write(&fee_unshielding_gas_limit_key, fee_unshielding_gas_limit)?; + // write masp fee payment gas limit + let masp_fee_payment_gas_limit_key = + storage::get_masp_fee_payment_gas_limit_key(); + storage + .write(&masp_fee_payment_gas_limit_key, masp_fee_payment_gas_limit)?; // write vp allowlist parameter let vp_allowlist_key = storage::get_vp_allowlist_storage_key(); @@ -371,11 +372,11 @@ where .ok_or(ReadError::ParametersMissing) .into_storage_result()?; - // read fee unshielding gas limit - let fee_unshielding_gas_limit_key = - storage::get_fee_unshielding_gas_limit_key(); - let value = storage.read(&fee_unshielding_gas_limit_key)?; - let fee_unshielding_gas_limit: u64 = value + // read masp fee payment gas limit + let masp_fee_payment_gas_limit_key = + storage::get_masp_fee_payment_gas_limit_key(); + let value = storage.read(&masp_fee_payment_gas_limit_key)?; + let masp_fee_payment_gas_limit: u64 = value .ok_or(ReadError::ParametersMissing) .into_storage_result()?; @@ -432,7 +433,7 @@ where masp_epoch_multiplier, max_signatures_per_transaction, minimum_gas_price, - fee_unshielding_gas_limit, + masp_fee_payment_gas_limit, is_native_token_transferable, }) } @@ -477,7 +478,7 @@ where epochs_per_year: 365, masp_epoch_multiplier: 2, max_signatures_per_transaction: 10, - fee_unshielding_gas_limit: 0, + masp_fee_payment_gas_limit: 0, minimum_gas_price: Default::default(), is_native_token_transferable: true, }; diff --git a/crates/parameters/src/storage.rs b/crates/parameters/src/storage.rs index 27204f9568..f17f7824e2 100644 --- a/crates/parameters/src/storage.rs +++ b/crates/parameters/src/storage.rs @@ -38,7 +38,7 @@ struct Keys { max_tx_bytes: &'static str, max_block_gas: &'static str, minimum_gas_price: &'static str, - fee_unshielding_gas_limit: &'static str, + masp_fee_payment_gas_limit: &'static str, max_signatures_per_transaction: &'static str, native_token_transferable: &'static str, } @@ -117,8 +117,8 @@ pub fn get_tx_allowlist_storage_key() -> Key { } /// Storage key used for the fee unshielding gas limit -pub fn get_fee_unshielding_gas_limit_key() -> Key { - get_fee_unshielding_gas_limit_key_at_addr(ADDRESS) +pub fn get_masp_fee_payment_gas_limit_key() -> Key { + get_masp_fee_payment_gas_limit_key_at_addr(ADDRESS) } /// Storage key used for max_epected_time_per_block parameter. diff --git a/crates/proof_of_stake/src/lib.rs b/crates/proof_of_stake/src/lib.rs index 19b003541f..7b68686934 100644 --- a/crates/proof_of_stake/src/lib.rs +++ b/crates/proof_of_stake/src/lib.rs @@ -2726,7 +2726,7 @@ pub mod test_utils { epochs_per_year: 10000000, masp_epoch_multiplier: 2, max_signatures_per_transaction: 15, - fee_unshielding_gas_limit: 10000, + masp_fee_payment_gas_limit: 10000, minimum_gas_price: BTreeMap::new(), is_native_token_transferable: true, }; diff --git a/crates/sdk/src/args.rs b/crates/sdk/src/args.rs index 8db6155e6f..3fdf6ab46e 100644 --- a/crates/sdk/src/args.rs +++ b/crates/sdk/src/args.rs @@ -229,11 +229,9 @@ impl From for InputAmount { } } -/// Transparent transfer transaction arguments +/// Transparent transfer-specific arguments #[derive(Clone, Debug)] -pub struct TxTransparentTransfer { - /// Common tx arguments - pub tx: Tx, +pub struct TxTransparentTransferData { /// Transfer source address pub source: C::Address, /// Transfer target address @@ -242,6 +240,15 @@ pub struct TxTransparentTransfer { pub token: C::Address, /// Transferred token amount pub amount: InputAmount, +} + +/// Transparent transfer transaction arguments +#[derive(Clone, Debug)] +pub struct TxTransparentTransfer { + /// Common tx arguments + pub tx: Tx, + /// The transfer specific data + pub data: Vec>, /// Path to the TX WASM code file pub tx_code_path: PathBuf, } @@ -258,7 +265,7 @@ impl TxBuilder for TxTransparentTransfer { } } -impl TxTransparentTransfer { +impl TxTransparentTransferData { /// Transfer source address pub fn source(self, source: C::Address) -> Self { Self { source, ..self } @@ -278,7 +285,9 @@ impl TxTransparentTransfer { pub fn amount(self, amount: InputAmount) -> Self { Self { amount, ..self } } +} +impl TxTransparentTransfer { /// Path to the TX WASM code file pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { Self { @@ -298,11 +307,9 @@ impl TxTransparentTransfer { } } -/// Shielded transfer transaction arguments +/// Shielded transfer-specific arguments #[derive(Clone, Debug)] -pub struct TxShieldedTransfer { - /// Common tx arguments - pub tx: Tx, +pub struct TxShieldedTransferData { /// Transfer source spending key pub source: C::SpendingKey, /// Transfer target address @@ -311,6 +318,17 @@ pub struct TxShieldedTransfer { pub token: C::Address, /// Transferred token amount pub amount: InputAmount, +} + +/// Shielded transfer transaction arguments +#[derive(Clone, Debug)] +pub struct TxShieldedTransfer { + /// Common tx arguments + pub tx: Tx, + /// Transfer-specific data + pub data: Vec>, + /// Optional additional keys for gas payment + pub gas_spending_keys: Vec, /// Path to the TX WASM code file pub tx_code_path: PathBuf, } @@ -325,19 +343,26 @@ impl TxShieldedTransfer { } } +/// Shielded transfer-specific arguments +#[derive(Clone, Debug)] +pub struct TxShieldingTransferData { + /// Transfer source spending key + pub source: C::Address, + /// Transferred token address + pub token: C::Address, + /// Transferred token amount + pub amount: InputAmount, +} + /// Shielding transfer transaction arguments #[derive(Clone, Debug)] pub struct TxShieldingTransfer { /// Common tx arguments pub tx: Tx, - /// Transfer source address - pub source: C::Address, /// Transfer target address pub target: C::PaymentAddress, - /// Transferred token address - pub token: C::Address, - /// Transferred token amount - pub amount: InputAmount, + /// Transfer-specific data + pub data: Vec>, /// Path to the TX WASM code file pub tx_code_path: PathBuf, } @@ -352,19 +377,28 @@ impl TxShieldingTransfer { } } -/// Unshielding transfer transaction arguments +/// Unshielding transfer-specific arguments #[derive(Clone, Debug)] -pub struct TxUnshieldingTransfer { - /// Common tx arguments - pub tx: Tx, - /// Transfer source spending key - pub source: C::SpendingKey, +pub struct TxUnshieldingTransferData { /// Transfer target address pub target: C::Address, /// Transferred token address pub token: C::Address, /// Transferred token amount pub amount: InputAmount, +} + +/// Unshielding transfer transaction arguments +#[derive(Clone, Debug)] +pub struct TxUnshieldingTransfer { + /// Common tx arguments + pub tx: Tx, + /// Transfer source spending key + pub source: C::SpendingKey, + /// Transfer-specific data + pub data: Vec>, + /// Optional additional keys for gas payment + pub gas_spending_keys: Vec, /// Path to the TX WASM code file pub tx_code_path: PathBuf, } @@ -404,6 +438,8 @@ pub struct TxIbcTransfer { pub refund_target: Option, /// Memo pub memo: Option, + /// Optional additional keys for gas payment + pub gas_spending_keys: Vec, /// Path to the TX WASM code file pub tx_code_path: PathBuf, } @@ -483,6 +519,17 @@ impl TxIbcTransfer { } } + /// Gas spending keys + pub fn gas_spending_keys( + self, + gas_spending_keys: Vec, + ) -> Self { + Self { + gas_spending_keys, + ..self + } + } + /// Path to the TX WASM code file pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { Self { diff --git a/crates/sdk/src/error.rs b/crates/sdk/src/error.rs index a8934b1961..f0c22adfca 100644 --- a/crates/sdk/src/error.rs +++ b/crates/sdk/src/error.rs @@ -264,7 +264,10 @@ pub enum TxSubmitError { #[error("Proposal end epoch is not in the storage.")] EpochNotInStorage, /// Couldn't understand who the fee payer is - #[error("Either --signing-keys or --gas-payer must be available.")] + #[error( + "Either --signing-keys, --gas-payer or --disposable-gas-payer must be \ + available." + )] InvalidFeePayer, /// Account threshold is not set #[error("Account threshold must be set.")] diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs index 7832b23d33..7439587182 100644 --- a/crates/sdk/src/lib.rs +++ b/crates/sdk/src/lib.rs @@ -175,16 +175,10 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { /// arguments fn new_transparent_transfer( &self, - source: Address, - target: Address, - token: Address, - amount: InputAmount, + data: Vec, ) -> args::TxTransparentTransfer { args::TxTransparentTransfer { - source, - target, - token, - amount, + data, tx_code_path: PathBuf::from(TX_TRANSPARENT_TRANSFER_WASM), tx: self.tx_builder(), } @@ -194,16 +188,12 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { /// arguments fn new_shielded_transfer( &self, - source: ExtendedSpendingKey, - target: PaymentAddress, - token: Address, - amount: InputAmount, + data: Vec, + gas_spending_keys: Vec, ) -> args::TxShieldedTransfer { args::TxShieldedTransfer { - source, - target, - token, - amount, + data, + gas_spending_keys, tx_code_path: PathBuf::from(TX_SHIELDED_TRANSFER_WASM), tx: self.tx_builder(), } @@ -213,16 +203,12 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { /// arguments fn new_shielding_transfer( &self, - source: Address, target: PaymentAddress, - token: Address, - amount: InputAmount, + data: Vec, ) -> args::TxShieldingTransfer { args::TxShieldingTransfer { - source, + data, target, - token, - amount, tx_code_path: PathBuf::from(TX_SHIELDING_TRANSFER_WASM), tx: self.tx_builder(), } @@ -233,15 +219,13 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { fn new_unshielding_transfer( &self, source: ExtendedSpendingKey, - target: Address, - token: Address, - amount: InputAmount, + data: Vec, + gas_spending_keys: Vec, ) -> args::TxUnshieldingTransfer { args::TxUnshieldingTransfer { source, - target, - token, - amount, + data, + gas_spending_keys, tx_code_path: PathBuf::from(TX_UNSHIELDING_TRANSFER_WASM), tx: self.tx_builder(), } @@ -343,6 +327,7 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { timeout_sec_offset: None, refund_target: None, memo: None, + gas_spending_keys: Default::default(), tx: self.tx_builder(), tx_code_path: PathBuf::from(TX_IBC_WASM), } @@ -872,13 +857,8 @@ pub mod testing { }; use namada_governance::{InitProposalData, VoteProposalData}; use namada_ibc::testing::arb_ibc_any; - use namada_token::testing::{ - arb_denominated_amount, arb_transparent_transfer, - }; - use namada_token::{ - ShieldedTransfer, ShieldingTransfer, TransparentTransfer, - UnshieldingTransfer, - }; + use namada_token::testing::arb_denominated_amount; + use namada_token::{ShieldedTransfer, TransparentTransfer}; use namada_tx::data::pgf::UpdateStewardCommission; use namada_tx::data::pos::{ BecomeValidator, Bond, CommissionChange, ConsensusKeyChange, @@ -890,6 +870,11 @@ pub mod testing { use prost::Message; use ripemd::Digest as RipemdDigest; use sha2::Digest; + use token::testing::arb_vectorized_transparent_transfer; + use token::{ + ShieldingMultiTransfer, ShieldingTransferData, + UnshieldingMultiTransfer, UnshieldingTransferData, + }; use super::*; use crate::account::tests::{arb_init_account, arb_update_account}; @@ -933,8 +918,11 @@ pub mod testing { Withdraw(Withdraw), TransparentTransfer(TransparentTransfer), ShieldedTransfer(ShieldedTransfer, (StoredBuildParams, String)), - ShieldingTransfer(ShieldingTransfer, (StoredBuildParams, String)), - UnshieldingTransfer(UnshieldingTransfer, (StoredBuildParams, String)), + ShieldingTransfer(ShieldingMultiTransfer, (StoredBuildParams, String)), + UnshieldingTransfer( + UnshieldingMultiTransfer, + (StoredBuildParams, String), + ), Bond(Bond), Redelegation(Redelegation), UpdateStewardCommission(UpdateStewardCommission), @@ -1093,7 +1081,7 @@ pub mod testing { pub fn arb_transparent_transfer_tx()( mut header in arb_header(), wrapper in arb_wrapper_tx(), - transfer in arb_transparent_transfer(), + transfer in arb_vectorized_transparent_transfer(10), code_hash in arb_hash(), ) -> (Tx, TxData) { header.tx_type = TxType::Wrapper(Box::new(wrapper)); @@ -1127,17 +1115,17 @@ pub mod testing { } prop_compose! { - /// Generate an arbitrary transfer transaction - pub fn arb_masp_transfer_tx()(transfer in arb_transparent_transfer())( + /// Generate an arbitrary masp transfer transaction + pub fn arb_masp_transfer_tx()(transfers in arb_vectorized_transparent_transfer(5))( mut header in arb_header(), wrapper in arb_wrapper_tx(), code_hash in arb_hash(), (masp_tx_type, (shielded_transfer, asset_types, build_params)) in prop_oneof![ (Just(MaspTxType::Shielded), arb_shielded_transfer(0..MAX_ASSETS)), - (Just(MaspTxType::Shielding), arb_shielding_transfer(encode_address(&transfer.source), 1)), - (Just(MaspTxType::Unshielding), arb_deshielding_transfer(encode_address(&transfer.target), 1)), + (Just(MaspTxType::Shielding), arb_shielding_transfer(encode_address(&transfers.0.first().unwrap().source), 1)), + (Just(MaspTxType::Unshielding), arb_deshielding_transfer(encode_address(&transfers.0.first().unwrap().target), 1)), ], - transfer in Just(transfer), + transfers in Just(transfers), ) -> (Tx, TxData) { header.tx_type = TxType::Wrapper(Box::new(wrapper)); let mut tx = Tx { header, sections: vec![] }; @@ -1147,7 +1135,7 @@ pub mod testing { let tx_data = match masp_tx_type { MaspTxType::Shielded => { tx.add_code_from_hash(code_hash, Some(TX_SHIELDED_TRANSFER_WASM.to_owned())); - let data = ShieldedTransfer { section_hash: shielded_section_hash }; + let data = ShieldedTransfer { fee_unshield: transfers.0.first().map(|transfer| UnshieldingTransferData { target: transfer.target.to_owned(), token: transfer.token.to_owned(), amount: transfer.amount }), section_hash: shielded_section_hash }; tx.add_data(data.clone()); TxData::ShieldedTransfer(data, (build_params, build_param_bytes)) }, @@ -1160,7 +1148,14 @@ pub mod testing { decoded.denom, ); tx.add_code_from_hash(code_hash, Some(TX_SHIELDING_TRANSFER_WASM.to_owned())); - let data = ShieldingTransfer {source: transfer.source, token, amount, shielded_section_hash }; + let data = transfers.0.into_iter().map(|transfer| + ShieldingTransferData{ + source: transfer.source, + token: token.clone(), + amount + } + ).collect(); + let data = ShieldingMultiTransfer{data, shielded_section_hash }; tx.add_data(data.clone()); TxData::ShieldingTransfer(data, (build_params, build_param_bytes)) }, @@ -1173,7 +1168,14 @@ pub mod testing { decoded.denom, ); tx.add_code_from_hash(code_hash, Some(TX_UNSHIELDING_TRANSFER_WASM.to_owned())); - let data = UnshieldingTransfer {target: transfer.target, token, amount, shielded_section_hash }; + let data = transfers.0.into_iter().map(|transfer| + UnshieldingTransferData{ + target: transfer.target, + token: token.clone(), + amount + } + ).collect(); + let data = UnshieldingMultiTransfer{data, shielded_section_hash }; tx.add_data(data.clone()); TxData::UnshieldingTransfer(data, (build_params, build_param_bytes)) }, diff --git a/crates/sdk/src/masp.rs b/crates/sdk/src/masp.rs index 516a7ad611..aff209ed6c 100644 --- a/crates/sdk/src/masp.rs +++ b/crates/sdk/src/masp.rs @@ -9,6 +9,7 @@ use std::path::PathBuf; use borsh::{BorshDeserialize, BorshSerialize}; use borsh_ext::BorshSerializeExt; +use itertools::Itertools; use lazy_static::lazy_static; use masp_primitives::asset_type::AssetType; #[cfg(feature = "mainnet")] @@ -50,6 +51,7 @@ use masp_proofs::bls12_381::Bls12; use masp_proofs::prover::LocalTxProver; use masp_proofs::sapling::BatchValidator; use namada_core::address::Address; +use namada_core::arith::CheckedAdd; use namada_core::collections::{HashMap, HashSet}; use namada_core::dec::Dec; pub use namada_core::masp::{ @@ -80,7 +82,9 @@ use thiserror::Error; use crate::error::{Error, QueryError}; use crate::io::Io; use crate::queries::Client; -use crate::rpc::{query_block, query_conversion, query_denom}; +use crate::rpc::{ + query_block, query_conversion, query_denom, query_native_token, +}; use crate::{display_line, edisplay_line, rpc, MaybeSend, MaybeSync, Namada}; /// Env var to point to a dir with MASP parameters. When not specified, @@ -121,6 +125,61 @@ pub struct ShieldedTransfer { pub epoch: MaspEpoch, } +/// The data for a masp fee payment +#[allow(missing_docs)] +#[derive(Debug)] +pub struct MaspFeeData { + pub sources: Vec, + pub target: Address, + pub token: Address, + pub amount: token::DenominatedAmount, +} + +/// The data for a single masp transfer +#[allow(missing_docs)] +#[derive(Debug)] +pub struct MaspTransferData { + pub source: TransferSource, + pub target: TransferTarget, + pub token: Address, + pub amount: token::DenominatedAmount, +} + +// The data for a masp transfer relative to a given source +#[derive(Hash, Eq, PartialEq)] +struct MaspSourceTransferData { + source: TransferSource, + token: Address, +} + +// The data for a masp transfer relative to a given target +#[derive(Hash, Eq, PartialEq)] +struct MaspTargetTransferData { + source: TransferSource, + target: TransferTarget, + token: Address, +} + +/// Data to log masp transactions' errors +#[allow(missing_docs)] +#[derive(Debug)] +pub struct MaspDataLog { + pub source: Option, + pub token: Address, + pub amount: token::DenominatedAmount, +} + +struct MaspTxReorderedData { + source_data: HashMap, + target_data: HashMap, + denoms: HashMap, +} + +// Data about the unspent amounts for any given shielded source coming from the +// spent notes in their posses that have been added to the builder. Can be used +// to either pay fees or to return a change +type Changes = HashMap; + /// Shielded pool data for a token #[allow(missing_docs)] #[derive(Debug, BorshSerialize, BorshDeserialize, BorshDeserializer)] @@ -134,11 +193,17 @@ pub struct MaspTokenRewardData { } /// A return type for gen_shielded_transfer +#[allow(clippy::large_enum_variant)] #[derive(Error, Debug)] pub enum TransferErr { /// Build error for masp errors - #[error("{0}")] - Build(#[from] builder::Error), + #[error("{error}")] + Build { + /// The error + error: builder::Error, + /// The optional associated transfer data for logging purposes + data: Option, + }, /// errors #[error("{0}")] General(#[from] Error), @@ -475,15 +540,62 @@ pub fn find_valid_diversifier( } /// Determine if using the current note would actually bring us closer to our -/// target -pub fn is_amount_required(src: I128Sum, dest: I128Sum, delta: I128Sum) -> bool { - let gap = dest - src; +/// target. Returns the unused amounts (change) of delta if any +pub fn is_amount_required( + src: I128Sum, + dest: I128Sum, + normed_delta: I128Sum, + opt_delta: Option, +) -> Option { + let mut changes = None; + let gap = dest.clone() - src; + for (asset_type, value) in gap.components() { - if *value > 0 && delta[asset_type] > 0 { - return true; - } + if *value > 0 && normed_delta[asset_type] > 0 { + let signed_change_amt = + checked!(normed_delta[asset_type] - *value).unwrap_or_default(); + let unsigned_change_amt = if signed_change_amt > 0 { + signed_change_amt + } else { + // Even if there's no change we still need to set the return + // value of this function to be Some so that the caller sees + // that this note should be used + 0 + }; + + let change_amt = I128Sum::from_nonnegative( + asset_type.to_owned(), + unsigned_change_amt, + ) + .expect("Change is guaranteed to be non-negative"); + changes = changes + .map(|prev| prev + change_amt.clone()) + .or(Some(change_amt)); + } + } + + // Because of the way conversions are computed, we need an extra step here + // if the token is not the native one + if let Some(delta) = opt_delta { + // Only if this note is going to be used, handle the assets in delta + // (not normalized) that are not part of dest + changes = changes.map(|mut chngs| { + for (delta_asset_type, delta_amt) in delta.components() { + if !dest.asset_types().contains(delta_asset_type) { + let rmng = I128Sum::from_nonnegative( + delta_asset_type.to_owned(), + *delta_amt, + ) + .expect("Change is guaranteed to be non-negative"); + chngs += rmng; + } + } + + chngs + }); } - false + + changes } /// a masp change @@ -498,6 +610,11 @@ pub struct MaspChange { /// a masp amount pub type MaspAmount = ValueSum<(Option, Address), token::Change>; +// A type tracking the notes used to construct a shielded transfer. Used to +// avoid reusing the same notes multiple times which would lead to an invalid +// transaction +type SpentNotesTracker = HashMap>; + /// An extension of Option's cloned method for pair types fn cloned_pair((a, b): (&T, &U)) -> (T, U) { (a.clone(), b.clone()) @@ -1324,13 +1441,17 @@ impl ShieldedContext { /// Collect enough unspent notes in this context to exceed the given amount /// of the specified asset type. Return the total value accumulated plus /// notes and the corresponding diversifiers/merkle paths that were used to - /// achieve the total value. + /// achieve the total value. Updates the changes map. + #[allow(clippy::too_many_arguments)] pub async fn collect_unspent_notes( &mut self, context: &impl Namada, - vk: &ViewingKey, + spent_notes: &mut SpentNotesTracker, + sk: namada_core::masp::ExtendedSpendingKey, + is_native_token: bool, target: I128Sum, target_epoch: MaspEpoch, + changes: &mut Changes, ) -> Result< ( I128Sum, @@ -1339,6 +1460,7 @@ impl ShieldedContext { ), Error, > { + let vk = &to_viewing_key(&sk.into()).vk; // TODO: we should try to use the smallest notes possible to fund the // transaction to allow people to fetch less often // Establish connection with which to do exchange rate queries @@ -1346,15 +1468,24 @@ impl ShieldedContext { let mut val_acc = I128Sum::zero(); let mut normed_val_acc = I128Sum::zero(); let mut notes = Vec::new(); + // Retrieve the notes that can be spent by this key if let Some(avail_notes) = self.pos_map.get(vk).cloned() { for note_idx in &avail_notes { + // Skip spend notes already used in this transaction + if spent_notes + .get(vk) + .is_some_and(|set| set.contains(note_idx)) + { + continue; + } // No more transaction inputs are required once we have met // the target amount if normed_val_acc >= target { break; } - // Spent notes cannot contribute a new transaction's pool + // Spent notes from the shielded context (i.e. from previous + // transactions) cannot contribute a new transaction's pool if self.spents.contains(note_idx) { continue; } @@ -1376,16 +1507,29 @@ impl ShieldedContext { ) .await?; + let opt_delta = if is_native_token { + None + } else { + Some(contr.clone()) + }; // Use this note only if it brings us closer to our target - if is_amount_required( + if let Some(change) = is_amount_required( normed_val_acc.clone(), target.clone(), normed_contr.clone(), + opt_delta, ) { // Be sure to record the conversions used in computing // accumulated value val_acc += contr; normed_val_acc += normed_contr; + + // Update the changes + changes + .entry(sk) + .and_modify(|amt| *amt += &change) + .or_insert(change); + // Commit the conversions that were used to exchange conversions = proposed_convs; let merkle_path = self @@ -1411,6 +1555,13 @@ impl ShieldedContext { })?; // Commit this note to our transaction notes.push((*diversifier, note, merkle_path)); + // Append the note the list of used ones + spent_notes + .entry(vk.to_owned()) + .and_modify(|set| { + set.insert(*note_idx); + }) + .or_insert([*note_idx].into_iter().collect()); } } } @@ -1515,36 +1666,10 @@ impl ShieldedContext { /// amounts and signatures specified by the containing Transfer object. pub async fn gen_shielded_transfer( context: &impl Namada, - source: &TransferSource, - target: &TransferTarget, - token: &Address, - amount: token::DenominatedAmount, + data: Vec, + fee_data: Option, update_ctx: bool, ) -> Result, TransferErr> { - // No shielded components are needed when neither source nor destination - // are shielded - - let spending_key = source.spending_key(); - let payment_address = target.payment_address(); - // No shielded components are needed when neither source nor - // destination are shielded - if spending_key.is_none() && payment_address.is_none() { - return Ok(None); - } - // We want to fund our transaction solely from supplied spending key - let spending_key = spending_key.map(|x| x.into()); - { - // Load the current shielded context given the spending key we - // possess - let mut shielded = context.shielded_mut().await; - let _ = shielded.load().await; - } - // Determine epoch in which to submit potential shielded transaction - let epoch = rpc::query_masp_epoch(context.client()).await?; - // Context required for storing which notes are in the source's - // possession - let memo = MemoBytes::empty(); - // Try to get a seed from env var, if any. #[allow(unused_mut)] let mut rng = StdRng::from_rng(OsRng).unwrap(); @@ -1567,7 +1692,6 @@ impl ShieldedContext { rng }; - // Now we build up the transaction within this object // TODO: if the user requested the default expiration, there might be a // small discrepancy between the datetime we calculate here and the one // we set for the transaction. This should be small enough to not cause @@ -1621,49 +1745,278 @@ impl ShieldedContext { // use from the masp crate to specify the expiration better expiration_height.into(), ); + // Determine epoch in which to submit potential shielded transaction + let epoch = rpc::query_masp_epoch(context.client()).await?; - // Convert transaction amount into MASP types - let Some(denom) = query_denom(context.client(), token).await else { - return Err(TransferErr::General(Error::from( - QueryError::General(format!("denomination for token {token}")), - ))); + let mut notes_tracker = SpentNotesTracker::new(); + { + // Load the current shielded context given + // the spending key we possess + let mut shielded = context.shielded_mut().await; + let _ = shielded.load().await; + } + + let Some(MaspTxReorderedData { + source_data, + target_data, + mut denoms, + }) = Self::reorder_data_for_masp_transfer(context, data).await? + else { + // No shielded components are needed when neither source nor + // destination are shielded + return Ok(None); }; + let mut changes = Changes::default(); + + for (MaspSourceTransferData { source, token }, amount) in &source_data { + Self::add_inputs( + context, + &mut builder, + source, + token, + amount, + epoch, + &denoms, + &mut notes_tracker, + &mut changes, + ) + .await?; + } + + for ( + MaspTargetTransferData { + source, + target, + token, + }, + amount, + ) in target_data + { + Self::add_outputs( + context, + &mut builder, + source, + &target, + token, + amount, + epoch, + &denoms, + ) + .await?; + } + + // Collect the fees if needed + if let Some(MaspFeeData { + sources, + target, + token, + amount, + }) = fee_data + { + Self::add_fees( + context, + &mut builder, + &source_data, + sources, + &target, + &token, + &amount, + epoch, + &mut denoms, + &mut notes_tracker, + &mut changes, + ) + .await?; + } + + // Finally, add outputs representing the change from this payment. + Self::add_changes(&mut builder, changes)?; + + let builder_clone = builder.clone().map_builder(WalletMap); + // Build and return the constructed transaction + #[cfg(not(feature = "testing"))] + let prover = context.shielded().await.utils.local_tx_prover(); + #[cfg(feature = "testing")] + let prover = testing::MockTxProver(std::sync::Mutex::new(OsRng)); + let (masp_tx, metadata) = builder + .build( + &prover, + &FeeRule::non_standard(U64Sum::zero()), + &mut rng, + &mut RngBuildParams::new(OsRng), + ) + .map_err(|error| TransferErr::Build { error, data: None })?; + + if update_ctx { + // Cache the generated transfer + let mut shielded_ctx = context.shielded_mut().await; + shielded_ctx + .pre_cache_transaction(context, &[masp_tx.clone()]) + .await?; + } + + Ok(Some(ShieldedTransfer { + builder: builder_clone, + masp_tx, + metadata, + epoch, + })) + } + + // Group all the information for every source/token and target/token couple, + // and extract the denominations for all the tokens involved (expect the one + // involved in the fees if needed). This step is required so that we can + // collect the amount required for every couple and pass it to the + // appropriate function so that notes can be collected based on the correct + // amount. + async fn reorder_data_for_masp_transfer( + context: &impl Namada, + data: Vec, + ) -> Result, TransferErr> { + let mut source_data = + HashMap::::new(); + let mut target_data = + HashMap::::new(); + let mut denoms = HashMap::new(); + + for MaspTransferData { + source, + target, + token, + amount, + } in data + { + let spending_key = source.spending_key(); + let payment_address = target.payment_address(); + // No shielded components are needed when neither source nor + // destination are shielded + if spending_key.is_none() && payment_address.is_none() { + return Ok(None); + } + + if denoms.get(&token).is_none() { + if let Some(denom) = query_denom(context.client(), &token).await + { + denoms.insert(token.clone(), denom); + } else { + return Err(TransferErr::General(Error::from( + QueryError::General(format!( + "denomination for token {token}" + )), + ))); + }; + } + + let key = MaspSourceTransferData { + source: source.clone(), + token: token.clone(), + }; + match source_data.get_mut(&key) { + Some(prev_amount) => { + *prev_amount = checked!(prev_amount.to_owned() + amount) + .map_err(|e| TransferErr::General(e.into()))?; + } + None => { + source_data.insert(key, amount); + } + } + + let key = MaspTargetTransferData { + source, + target, + token, + }; + match target_data.get_mut(&key) { + Some(prev_amount) => { + *prev_amount = checked!(prev_amount.to_owned() + amount) + .map_err(|e| TransferErr::General(e.into()))?; + } + None => { + target_data.insert(key, amount); + } + } + } + + Ok(Some(MaspTxReorderedData { + source_data, + target_data, + denoms, + })) + } + + // Add the necessary transaction inputs to the builder. + #[allow(clippy::too_many_arguments)] + async fn add_inputs( + context: &impl Namada, + builder: &mut Builder, + source: &TransferSource, + token: &Address, + amount: &token::DenominatedAmount, + epoch: MaspEpoch, + denoms: &HashMap, + notes_tracker: &mut SpentNotesTracker, + changes: &mut Changes, + ) -> Result, TransferErr> { + // We want to fund our transaction solely from supplied spending key + let spending_key = source.spending_key(); + + // Now we build up the transaction within this object + + // Convert transaction amount into MASP types + // Ok to unwrap cause we've already seen the token before, the + // denomination must be there + let denom = denoms.get(token).unwrap(); let (asset_types, masp_amount) = { let mut shielded = context.shielded_mut().await; // Do the actual conversion to an asset type let amount = shielded - .convert_amount( + .convert_namada_amount_to_masp( context.client(), epoch, token, - denom, + denom.to_owned(), amount.amount(), ) .await?; - // Make sure to save any decodings of the asset types used so that - // balance queries involving them are successful + // Make sure to save any decodings of the asset types used so + // that balance queries involving them are + // successful let _ = shielded.save().await; amount }; // If there are shielded inputs - if let Some(sk) = spending_key { - // Locate unspent notes that can help us meet the transaction amount - let (_, unspent_notes, used_convs) = context + let added_amt = if let Some(sk) = spending_key { + let is_native_token = + &query_native_token(context.client()).await? == token; + // Locate unspent notes that can help us meet the transaction + // amount + let (added_amount, unspent_notes, used_convs) = context .shielded_mut() .await .collect_unspent_notes( context, - &to_viewing_key(&sk).vk, + notes_tracker, + sk, + is_native_token, I128Sum::from_sum(masp_amount), epoch, + changes, ) .await?; // Commit the notes found to our transaction for (diversifier, note, merkle_path) in unspent_notes { builder - .add_sapling_spend(sk, diversifier, note, merkle_path) - .map_err(builder::Error::SaplingBuild)?; + .add_sapling_spend( + sk.into(), + diversifier, + note, + merkle_path, + ) + .map_err(|e| TransferErr::Build { + error: builder::Error::SaplingBuild(e), + data: None, + })?; } // Commit the conversion notes used during summation for (conv, wit, value) in used_convs.values() { @@ -1674,13 +2027,18 @@ impl ShieldedContext { *value as u64, wit.clone(), ) - .map_err(builder::Error::SaplingBuild)?; + .map_err(|e| TransferErr::Build { + error: builder::Error::SaplingBuild(e), + data: None, + })?; } } + + Some(added_amount) } else { - // We add a dummy UTXO to our transaction, but only the source of - // the parent Transfer object is used to validate fund - // availability + // We add a dummy UTXO to our transaction, but only the source + // of the parent Transfer object is used to + // validate fund availability let source_enc = source .address() .ok_or_else(|| { @@ -1706,23 +2064,43 @@ impl ShieldedContext { value: amount_part, address: script, }) - .map_err(builder::Error::TransparentBuild)?; + .map_err(|e| TransferErr::Build { + error: builder::Error::TransparentBuild(e), + data: None, + })?; } } - } + None + }; + + Ok(added_amt) + } + + // Add the necessary transaction outputs to the builder + #[allow(clippy::too_many_arguments)] + async fn add_outputs( + context: &impl Namada, + builder: &mut Builder, + source: TransferSource, + target: &TransferTarget, + token: Address, + amount: token::DenominatedAmount, + epoch: MaspEpoch, + denoms: &HashMap, + ) -> Result<(), TransferErr> { // Anotate the asset type in the value balance with its decoding in // order to facilitate cross-epoch computations - let value_balance = builder.value_balance(); let value_balance = context .shielded_mut() .await - .decode_sum(context.client(), value_balance) + .decode_sum(context.client(), builder.value_balance()) .await; - // If we are sending to a transparent output, then we will need to embed - // the transparent target address into the shielded transaction so that - // it can be signed + let payment_address = target.payment_address(); + // If we are sending to a transparent output, then we will need to + // embed the transparent target address into the + // shielded transaction so that it can be signed let transparent_target_hash = if payment_address.is_none() { let target_enc = target .address() @@ -1738,12 +2116,14 @@ impl ShieldedContext { } else { None }; - // This indicates how many more assets need to be sent to the receiver - // in order to satisfy the requested transfer amount. + // This indicates how many more assets need to be sent to the + // receiver in order to satisfy the requested transfer + // amount. let mut rem_amount = amount.amount().raw_amount().0; - // If we are sending to a shielded address, we may need the outgoing - // viewing key in the following computations. - let ovk_opt = spending_key.map(|x| x.expsk.ovk); + + // Ok to unwrap cause we've already seen the token before, the + // denomination must be there + let denom = denoms.get(&token).unwrap(); // Now handle the outputs of this transaction // Loop through the value balance components and see which @@ -1752,8 +2132,8 @@ impl ShieldedContext { let rem_amount = &mut rem_amount[decoded.position as usize]; // Only asset types with the correct token can contribute. But // there must be a demonstrated need for it. - if decoded.token == *token - && decoded.denom == denom + if decoded.token == token + && &decoded.denom == denom && decoded.epoch.map_or(true, |vbal_epoch| vbal_epoch <= epoch) && *rem_amount > 0 { @@ -1764,6 +2144,11 @@ impl ShieldedContext { // We want to take at most the remaining quota for the // current denomination to the receiver let contr = std::cmp::min(*rem_amount as u128, val) as u64; + // If we are sending to a shielded address, we need the outgoing + // viewing key in the following computations. + let ovk_opt = source + .spending_key() + .map(|x| ExtendedSpendingKey::from(x).expsk.ovk); // Make transaction output tied to the current token, // denomination, and epoch. if let Some(pa) = payment_address { @@ -1774,9 +2159,12 @@ impl ShieldedContext { pa.into(), *asset_type, contr, - memo.clone(), + MemoBytes::empty(), ) - .map_err(builder::Error::SaplingBuild)?; + .map_err(|e| TransferErr::Build { + error: builder::Error::SaplingBuild(e), + data: None, + })?; } else { // If there is a transparent output let hash = transparent_target_hash @@ -1791,7 +2179,10 @@ impl ShieldedContext { *asset_type, contr, ) - .map_err(builder::Error::TransparentBuild)?; + .map_err(|e| TransferErr::Build { + error: builder::Error::TransparentBuild(e), + data: None, + })?; } // Lower what is required of the remaining contribution *rem_amount -= contr; @@ -1800,86 +2191,327 @@ impl ShieldedContext { // Nothing must remain to be included in output if rem_amount != [0; 4] { + let (asset_types, _) = { + let mut shielded = context.shielded_mut().await; + // Do the actual conversion to an asset type + let amount = shielded + .convert_namada_amount_to_masp( + context.client(), + epoch, + &token, + denom.to_owned(), + amount.amount(), + ) + .await?; + // Make sure to save any decodings of the asset types used so + // that balance queries involving them are + // successful + let _ = shielded.save().await; + amount + }; + // Convert the shortfall into a I128Sum let mut shortfall = I128Sum::zero(); for (asset_type, val) in asset_types.iter().zip(rem_amount) { shortfall += I128Sum::from_pair(*asset_type, val.into()); } - // Return an insufficient ffunds error - return Result::Err(TransferErr::from( - builder::Error::InsufficientFunds(shortfall), - )); + // Return an insufficient funds error + return Result::Err(TransferErr::Build { + error: builder::Error::InsufficientFunds(shortfall), + data: Some(MaspDataLog { + source: Some(source), + token, + amount, + }), + }); } - // Now add outputs representing the change from this payment - if let Some(sk) = spending_key { - // Represents the amount of inputs we are short by - let mut additional = I128Sum::zero(); - for (asset_type, amt) in builder.value_balance().components() { - match amt.cmp(&0) { - Ordering::Greater => { - // Send the change in this asset type back to the sender - builder - .add_sapling_output( - Some(sk.expsk.ovk), - sk.default_address().1, - *asset_type, - *amt as u64, - memo.clone(), - ) - .map_err(builder::Error::SaplingBuild)?; - } - Ordering::Less => { - // Record how much of the current asset type we are - // short by - additional += - I128Sum::from_nonnegative(*asset_type, -*amt) - .map_err(|()| { - Error::Other(format!( - "from non negative conversion: {}", - line!() - )) - })?; - } - Ordering::Equal => {} + Ok(()) + } + + // Add the necessary note to include a masp fee payment in the transaction. + // Funds are gathered in the following order: + // + // 1. From the residual values of the already included spend notes (i.e. + // changes) + // 2. From new spend notes of the transaction's sources + // 3. From new spend notes of the optional gas spending keys + #[allow(clippy::too_many_arguments)] + async fn add_fees( + context: &impl Namada, + builder: &mut Builder, + source_data: &HashMap, + sources: Vec, + target: &Address, + token: &Address, + amount: &token::DenominatedAmount, + epoch: MaspEpoch, + denoms: &mut HashMap, + notes_tracker: &mut SpentNotesTracker, + changes: &mut Changes, + ) -> Result<(), TransferErr> { + if denoms.get(token).is_none() { + if let Some(denom) = query_denom(context.client(), token).await { + denoms.insert(token.to_owned(), denom); + } else { + return Err(TransferErr::General(Error::from( + QueryError::General(format!( + "denomination for token {token}" + )), + ))); + }; + } + + let raw_amount = amount.amount().raw_amount().0; + let (asset_types, _) = { + let mut shielded = context.shielded_mut().await; + // Do the actual conversion to an asset type + let (asset_types, amount) = shielded + .convert_namada_amount_to_masp( + context.client(), + epoch, + token, + // Safe to unwrap + denoms.get(token).unwrap().to_owned(), + amount.amount(), + ) + .await?; + // Make sure to save any decodings of the asset types used so + // that balance queries involving them are + // successful + let _ = shielded.save().await; + (asset_types, amount) + }; + + let mut fees = I128Sum::zero(); + // Convert the shortfall into a I128Sum + for (asset_type, val) in asset_types.iter().zip(raw_amount) { + fees += I128Sum::from_nonnegative(*asset_type, val.into()) + .map_err(|()| { + TransferErr::General(Error::Other( + "Fee amount is expected expected to be non-negative" + .to_string(), + )) + })?; + } + + // 1. Try to use the change to pay fees + let mut temp_changes = Changes::default(); + + for (sp, changes) in changes.iter() { + for (asset_type, change) in changes.components() { + for (_, fee_amt) in fees + .clone() + .components() + .filter(|(axt, _)| *axt == asset_type) + { + // Get the minimum between the available change and + // the due fee + let output_amt = I128Sum::from_nonnegative( + asset_type.to_owned(), + *change.min(fee_amt), + ) + .map_err(|()| { + TransferErr::General(Error::Other( + "Fee amount is expected to be non-negative" + .to_string(), + )) + })?; + let denominated_output_amt = context + .shielded_mut() + .await + .convert_masp_amount_to_namada( + context.client(), + // Safe to unwrap + denoms.get(token).unwrap().to_owned(), + output_amt.clone(), + ) + .await?; + + Self::add_outputs( + context, + builder, + TransferSource::ExtendedSpendingKey(sp.to_owned()), + &TransferTarget::Address(target.clone()), + token.clone(), + denominated_output_amt, + epoch, + denoms, + ) + .await?; + + fees -= &output_amt; + // Update the changes + temp_changes + .entry(*sp) + .and_modify(|amt| *amt += &output_amt) + .or_insert(output_amt); } } - // If we are short by a non-zero amount, then we have insufficient - // funds - if !additional.is_zero() { - return Err(TransferErr::from( - builder::Error::InsufficientFunds(additional), - )); + + if fees.is_zero() { + break; } } - let builder_clone = builder.clone().map_builder(WalletMap); - // Build and return the constructed transaction - #[cfg(not(feature = "testing"))] - let prover = context.shielded().await.utils.local_tx_prover(); - #[cfg(feature = "testing")] - let prover = testing::MockTxProver(std::sync::Mutex::new(OsRng)); - let (masp_tx, metadata) = builder.build( - &prover, - &FeeRule::non_standard(U64Sum::zero()), - &mut rng, - &mut RngBuildParams::new(OsRng), - )?; + // Decrease the changes by the amounts used for fee payment + for (sp, temp_changes) in temp_changes.iter() { + for (asset_type, temp_change) in temp_changes.components() { + let output_amt = I128Sum::from_nonnegative( + asset_type.to_owned(), + *temp_change, + ) + .map_err(|()| { + TransferErr::General(Error::Other( + "Fee amount is expected expected to be non-negative" + .to_string(), + )) + })?; - if update_ctx { - // Cache the generated transfer - let mut shielded_ctx = context.shielded_mut().await; - shielded_ctx - .pre_cache_transaction(context, &[masp_tx.clone()]) - .await?; + // Entry is guaranteed to be in the map + changes.entry(*sp).and_modify(|amt| *amt -= &output_amt); + } } - Ok(Some(ShieldedTransfer { - builder: builder_clone, - masp_tx, - metadata, - epoch, - })) + if !fees.is_zero() { + // 2. Look for unused spent notes of the sources and the optional + // gas spending keys (sources first) + for fee_source in + source_data.iter().map(|(src, _)| src.source.clone()).chain( + sources + .into_iter() + .map(TransferSource::ExtendedSpendingKey), + ) + { + for (asset_type, fee_amt) in fees.clone().components() { + let input_amt = I128Sum::from_nonnegative( + asset_type.to_owned(), + *fee_amt, + ) + .map_err(|()| { + TransferErr::General(Error::Other( + "Fee amount is expected expected to be \ + non-negative" + .to_string(), + )) + })?; + let denominated_fee = context + .shielded_mut() + .await + .convert_masp_amount_to_namada( + context.client(), + // Safe to unwrap + denoms.get(token).unwrap().to_owned(), + input_amt.clone(), + ) + .await?; + + let Some(found_amt) = Self::add_inputs( + context, + builder, + &fee_source, + token, + &denominated_fee, + epoch, + denoms, + notes_tracker, + changes, + ) + .await? + else { + continue; + }; + // Pick the minimum between the due fee and the amount found + let output_amt = match found_amt.partial_cmp(&input_amt) { + None | Some(Ordering::Less) => found_amt, + _ => input_amt.clone(), + }; + let denom_amt = context + .shielded_mut() + .await + .convert_masp_amount_to_namada( + context.client(), + // Safe to unwrap + denoms.get(token).unwrap().to_owned(), + output_amt.clone(), + ) + .await?; + + Self::add_outputs( + context, + builder, + fee_source.clone(), + &TransferTarget::Address(target.clone()), + token.clone(), + denom_amt, + epoch, + denoms, + ) + .await?; + + fees -= &output_amt; + } + + if fees.is_zero() { + break; + } + } + } + + if !fees.is_zero() { + return Result::Err(TransferErr::Build { + error: builder::Error::InsufficientFunds(fees), + data: Some(MaspDataLog { + source: None, + token: token.to_owned(), + amount: *amount, + }), + }); + } + + Ok(()) + } + + // Consumes the changes and adds them back to the original sources to + // balance the transaction. This function has to be called after + // `add_fees` cause we might have some change coming from there too + #[allow(clippy::result_large_err)] + fn add_changes( + builder: &mut Builder, + changes: Changes, + ) -> Result<(), TransferErr> { + for (sp, changes) in changes.into_iter() { + for (asset_type, amt) in changes.components() { + if let Ordering::Greater = amt.cmp(&0) { + let sk = ExtendedSpendingKey::from(sp.to_owned()); + // Send the change in this asset type back to the sender + builder + .add_sapling_output( + Some(sk.expsk.ovk), + sk.default_address().1, + *asset_type, + *amt as u64, + MemoBytes::empty(), + ) + .map_err(|e| TransferErr::Build { + error: builder::Error::SaplingBuild(e), + data: None, + })?; + } + } + } + + // Final safety check on the value balance to verify that the + // transaction is balanced + let value_balance = builder.value_balance(); + if !value_balance.is_zero() { + return Result::Err(TransferErr::Build { + error: builder::Error::InsufficientFunds(value_balance), + data: None, + }); + } + + Ok(()) } // Updates the internal state with the data of the newly generated @@ -1946,8 +2578,8 @@ impl ShieldedContext { Ok(asset_type) } - /// Convert Anoma amount and token type to MASP equivalents - async fn convert_amount( + /// Convert Namada amount and token type to MASP equivalents + async fn convert_namada_amount_to_masp( &mut self, client: &C, epoch: MaspEpoch, @@ -1981,6 +2613,28 @@ impl ShieldedContext { amount, )) } + + /// Convert MASP amount to Namada equivalent + async fn convert_masp_amount_to_namada( + &mut self, + client: &C, + denom: Denomination, + amt: I128Sum, + ) -> Result { + let mut amount = token::Amount::zero(); + let value_sum = self.decode_sum(client, amt).await; + + for ((_, decoded), val) in value_sum.components() { + let positioned_amt = token::Amount::from_masp_denominated_i128( + *val, + decoded.position, + ) + .unwrap_or_default(); + amount = checked!(amount + positioned_amt)?; + } + + Ok(token::DenominatedAmount::new(amount, denom)) + } } // Retrieves all the indexes at the specified height which refer diff --git a/crates/sdk/src/signing.rs b/crates/sdk/src/signing.rs index a6e1ef5f44..aa9417c36f 100644 --- a/crates/sdk/src/signing.rs +++ b/crates/sdk/src/signing.rs @@ -702,61 +702,168 @@ fn format_outputs(output: &mut Vec) { } } +enum TransferSide<'a> { + Source(&'a Address), + Target(&'a Address), +} + enum TokenTransfer<'a> { Transparent(&'a token::TransparentTransfer), Shielded, - Shielding(&'a token::ShieldingTransfer), - Unshielding(&'a token::UnshieldingTransfer), + Shielding(&'a token::ShieldingMultiTransfer), + Unshielding(&'a token::UnshieldingMultiTransfer), } impl TokenTransfer<'_> { - fn source(&self) -> Option<&Address> { + fn sources(&self) -> Vec<&Address> { match self { - TokenTransfer::Transparent(transfer) => Some(&transfer.source), - TokenTransfer::Shielded => None, - TokenTransfer::Shielding(transfer) => Some(&transfer.source), - TokenTransfer::Unshielding(_) => None, + TokenTransfer::Transparent(transfers) => transfers + .0 + .iter() + .map(|transfer| &transfer.source) + .collect(), + TokenTransfer::Shielded => Default::default(), + TokenTransfer::Shielding(transfers) => transfers + .data + .iter() + .map(|transfer| &transfer.source) + .collect(), + TokenTransfer::Unshielding(_) => Default::default(), } } - fn target(&self) -> Option<&Address> { + fn targets(&self) -> Vec<&Address> { match self { - TokenTransfer::Transparent(transfer) => Some(&transfer.target), - TokenTransfer::Shielded => None, - TokenTransfer::Shielding(_) => None, - TokenTransfer::Unshielding(transfer) => Some(&transfer.target), + TokenTransfer::Transparent(transfers) => transfers + .0 + .iter() + .map(|transfer| &transfer.target) + .collect(), + + TokenTransfer::Shielded => Default::default(), + TokenTransfer::Shielding(_) => Default::default(), + TokenTransfer::Unshielding(transfers) => transfers + .data + .iter() + .map(|transfer| &transfer.target) + .collect(), } } - fn token_and_amount(&self) -> Option<(&Address, DenominatedAmount)> { - match self { - TokenTransfer::Transparent(transfer) => { - Some((&transfer.token, transfer.amount)) + fn tokens_and_amounts( + &self, + address: TransferSide<'_>, + ) -> Result, Error> { + Ok(match self { + TokenTransfer::Transparent(transfers) => { + let mut map: HashMap<&Address, DenominatedAmount> = + HashMap::new(); + + match address { + TransferSide::Source(source) => { + for transfer in &transfers.0 { + if source == &transfer.source { + Self::update_token_amount_map( + &mut map, + &transfer.token, + transfer.amount, + )?; + } + } + } + TransferSide::Target(target) => { + for transfer in &transfers.0 { + if target == &transfer.target { + Self::update_token_amount_map( + &mut map, + &transfer.token, + transfer.amount, + )?; + } + } + } + } + + map + } + TokenTransfer::Shielded => Default::default(), + TokenTransfer::Shielding(transfers) => { + let mut map: HashMap<&Address, DenominatedAmount> = + HashMap::new(); + + if let TransferSide::Source(source_addr) = address { + for transfer in &transfers.data { + if &transfer.source == source_addr { + Self::update_token_amount_map( + &mut map, + &transfer.token, + transfer.amount, + )?; + } + } + } + + map + } + TokenTransfer::Unshielding(transfers) => { + let mut map: HashMap<&Address, DenominatedAmount> = + HashMap::new(); + + if let TransferSide::Target(target_addr) = address { + for transfer in &transfers.data { + if &transfer.target == target_addr { + Self::update_token_amount_map( + &mut map, + &transfer.token, + transfer.amount, + )?; + } + } + } + + map } - TokenTransfer::Shielded => None, - TokenTransfer::Shielding(transfer) => { - Some((&transfer.token, transfer.amount)) + }) + } + + fn update_token_amount_map<'a>( + map: &mut HashMap<&'a Address, DenominatedAmount>, + token: &'a Address, + amount: DenominatedAmount, + ) -> Result<(), Error> { + match map.get_mut(token) { + Some(prev_amount) => { + *prev_amount = checked!(prev_amount + amount)?; } - TokenTransfer::Unshielding(transfer) => { - Some((&transfer.token, transfer.amount)) + None => { + map.insert(token, amount); } } + + Ok(()) } } -/// Adds a Ledger output for the sender and destination for transparent and MASP -/// transactions +/// Adds a Ledger output for the senders and destinations for transparent and +/// MASP transactions async fn make_ledger_token_transfer_endpoints( tokens: &HashMap, output: &mut Vec, transfer: TokenTransfer<'_>, builder: Option<&MaspBuilder>, assets: &HashMap, -) { - if let Some(source) = transfer.source() { - output.push(format!("Sender : {}", source)); - if let Some((token, amount)) = transfer.token_and_amount() { - make_ledger_amount_addr(tokens, output, amount, token, "Sending "); +) -> Result<(), Error> { + let sources = transfer.sources(); + if !sources.is_empty() { + for source in transfer.sources() { + output.push(format!("Sender : {}", source)); + for (token, amount) in + transfer.tokens_and_amounts(TransferSide::Source(source))? + { + make_ledger_amount_addr( + tokens, output, amount, token, "Sending ", + ); + } } } else if let Some(builder) = builder { for sapling_input in builder.builder.sapling_inputs() { @@ -773,16 +880,21 @@ async fn make_ledger_token_transfer_endpoints( .await; } } - if let Some(target) = transfer.target() { - output.push(format!("Destination : {}", target)); - if let Some((token, amount)) = transfer.token_and_amount() { - make_ledger_amount_addr( - tokens, - output, - amount, - token, - "Receiving ", - ); + let targets = transfer.targets(); + if !targets.is_empty() { + for target in targets { + output.push(format!("Destination : {}", target)); + for (token, amount) in + transfer.tokens_and_amounts(TransferSide::Target(target))? + { + make_ledger_amount_addr( + tokens, + output, + amount, + token, + "Receiving ", + ); + } } } else if let Some(builder) = builder { for sapling_output in builder.builder.sapling_outputs() { @@ -799,6 +911,8 @@ async fn make_ledger_token_transfer_endpoints( .await; } } + + Ok(()) } /// Convert decimal numbers into the format used by Ledger. Specifically remove @@ -1294,7 +1408,7 @@ pub async fn to_ledger_vector( None, &HashMap::default(), ) - .await; + .await?; make_ledger_token_transfer_endpoints( &tokens, &mut tv.output_expert, @@ -1302,7 +1416,7 @@ pub async fn to_ledger_vector( None, &HashMap::default(), ) - .await; + .await?; } else if code_sec.tag == Some(TX_SHIELDED_TRANSFER_WASM.to_string()) { let transfer = token::ShieldedTransfer::try_from_slice( &tx.data(cmt) @@ -1341,7 +1455,7 @@ pub async fn to_ledger_vector( builder, &asset_types, ) - .await; + .await?; make_ledger_token_transfer_endpoints( &tokens, &mut tv.output_expert, @@ -1349,9 +1463,9 @@ pub async fn to_ledger_vector( builder, &asset_types, ) - .await; + .await?; } else if code_sec.tag == Some(TX_SHIELDING_TRANSFER_WASM.to_string()) { - let transfer = token::ShieldingTransfer::try_from_slice( + let transfer = token::ShieldingMultiTransfer::try_from_slice( &tx.data(cmt) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) @@ -1388,7 +1502,7 @@ pub async fn to_ledger_vector( builder, &asset_types, ) - .await; + .await?; make_ledger_token_transfer_endpoints( &tokens, &mut tv.output_expert, @@ -1396,10 +1510,10 @@ pub async fn to_ledger_vector( builder, &asset_types, ) - .await; + .await?; } else if code_sec.tag == Some(TX_UNSHIELDING_TRANSFER_WASM.to_string()) { - let transfer = token::UnshieldingTransfer::try_from_slice( + let transfer = token::UnshieldingMultiTransfer::try_from_slice( &tx.data(cmt) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) @@ -1436,7 +1550,7 @@ pub async fn to_ledger_vector( builder, &asset_types, ) - .await; + .await?; make_ledger_token_transfer_endpoints( &tokens, &mut tv.output_expert, @@ -1444,7 +1558,7 @@ pub async fn to_ledger_vector( builder, &asset_types, ) - .await; + .await?; } else if code_sec.tag == Some(TX_IBC_WASM.to_string()) { let any_msg = Any::decode( tx.data(cmt) diff --git a/crates/sdk/src/tx.rs b/crates/sdk/src/tx.rs index b9daabeca2..980c3ad958 100644 --- a/crates/sdk/src/tx.rs +++ b/crates/sdk/src/tx.rs @@ -37,7 +37,8 @@ use namada_core::ibc::core::host::types::identifiers::{ChannelId, PortId}; use namada_core::ibc::primitives::Timestamp as IbcTimestamp; use namada_core::key::{self, *}; use namada_core::masp::{ - AssetData, MaspEpoch, PaymentAddress, TransferSource, TransferTarget, + AssetData, ExtendedSpendingKey, MaspEpoch, PaymentAddress, TransferSource, + TransferTarget, }; use namada_core::storage; use namada_core::storage::Epoch; @@ -65,12 +66,20 @@ use namada_tx::data::{pos, BatchedTxResult, ResultCode, TxResult}; pub use namada_tx::{Authorization, *}; use num_traits::Zero; use rand_core::{OsRng, RngCore}; +use token::ShieldingTransferData; +use crate::args::{ + SdkTypes, TxShieldedTransferData, TxShieldingTransferData, + TxTransparentTransferData, TxUnshieldingTransferData, +}; use crate::control_flow::time; use crate::error::{EncodingError, Error, QueryError, Result, TxSubmitError}; use crate::io::Io; use crate::masp::TransferErr::Build; -use crate::masp::{ShieldedContext, ShieldedTransfer}; +use crate::masp::{ + MaspDataLog, MaspFeeData, MaspTransferData, ShieldedContext, + ShieldedTransfer, +}; use crate::queries::Client; use crate::rpc::{ self, get_validator_stake, query_wasm_code_hash, validate_amount, @@ -2457,7 +2466,7 @@ pub async fn build_ibc_transfer( Some(source.clone()), ) .await?; - let (fee_amount, updated_balance) = + let (fee_per_gas_unit, updated_balance) = if let TransferSource::ExtendedSpendingKey(_) = args.source { // MASP fee payment (validate_fee(context, &args.tx).await?, None) @@ -2513,15 +2522,38 @@ pub async fn build_ibc_transfer( query_wasm_code_hash(context, args.tx_code_path.to_str().unwrap()) .await .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; + let masp_transfer_data = vec![MaspTransferData { + source: args.source.clone(), + target: TransferTarget::Address(Address::Internal( + InternalAddress::Ibc, + )), + token: args.token.clone(), + amount: validated_amount, + }]; + + // Add masp fee payment if necessary + let masp_fee_data = get_masp_fee_payment_amount( + context, + &args.tx, + fee_per_gas_unit, + &signing_data.fee_payer, + args.gas_spending_keys.clone(), + ) + .await?; + let fee_unshield = + masp_fee_data + .as_ref() + .map(|fee_data| token::UnshieldingTransferData { + target: fee_data.target.to_owned(), + token: fee_data.token.to_owned(), + amount: fee_data.amount, + }); // For transfer from a spending key let shielded_parts = construct_shielded_parts( context, - &args.source, - // The token will be escrowed to IBC address - &TransferTarget::Address(Address::Internal(InternalAddress::Ibc)), - &args.token, - validated_amount, + masp_transfer_data, + masp_fee_data, !(args.tx.dry_run || args.tx.dry_run_wrapper), ) .await?; @@ -2568,10 +2600,12 @@ pub async fn build_ibc_transfer( let masp_tx_hash = tx.add_masp_tx_section(shielded_transfer.masp_tx.clone()).1; let transfer = token::ShieldingTransfer { - // The token will be escrowed to IBC address - source: source.clone(), - token: args.token.clone(), - amount: validated_amount, + data: ShieldingTransferData { + // The token will be escrowed to IBC address + source: source.clone(), + token: args.token.clone(), + amount: validated_amount, + }, // Link the Transfer to the MASP Transaction by hash code shielded_section_hash: masp_tx_hash, }; @@ -2623,7 +2657,12 @@ pub async fn build_ibc_transfer( timeout_height_on_b: timeout_height, timeout_timestamp_on_b: timeout_timestamp, }; - MsgTransfer { message, transfer }.serialize_to_vec() + MsgTransfer { + message, + transfer, + fee_unshield, + } + .serialize_to_vec() } else if let Some((trace_path, base_class_id, token_id)) = is_nft_trace(&ibc_denom) { @@ -2654,7 +2693,12 @@ pub async fn build_ibc_transfer( timeout_height_on_b: timeout_height, timeout_timestamp_on_b: timeout_timestamp, }; - MsgNftTransfer { message, transfer }.serialize_to_vec() + MsgNftTransfer { + message, + transfer, + fee_unshield, + } + .serialize_to_vec() } else { return Err(Error::Other(format!("Invalid IBC denom: {ibc_denom}"))); }; @@ -2668,7 +2712,7 @@ pub async fn build_ibc_transfer( prepare_tx( &args.tx, &mut tx, - fee_amount, + fee_per_gas_unit, signing_data.fee_payer.clone(), ) .await?; @@ -2831,64 +2875,93 @@ pub async fn build_transparent_transfer( context: &N, args: &mut args::TxTransparentTransfer, ) -> Result<(Tx, SigningTxData)> { - let source = &args.source; - let target = &args.target; + let mut transfers = vec![]; + + // Evaluate signer and fees + let (signing_data, fee_amount, updated_balance) = { + let source = if args.data.len() == 1 { + // If only one transfer take its source as the signer + args.data + .first() + .map(|transfer_data| transfer_data.source.clone()) + } else { + // Otherwise the caller is required to pass the public keys in the + // argument + None + }; - let default_signer = Some(source.clone()); - let signing_data = signing::aux_signing_data( - context, - &args.tx, - Some(source.clone()), - default_signer, - ) - .await?; + let signing_data = signing::aux_signing_data( + context, + &args.tx, + source.clone(), + source, + ) + .await?; - // Transparent fee payment - let (fee_amount, updated_balance) = - validate_transparent_fee(context, &args.tx, &signing_data.fee_payer) - .await - .map(|(fee_amount, updated_balance)| { - (fee_amount, Some(updated_balance)) - })?; + // Transparent fee payment + let (fee_amount, updated_balance) = validate_transparent_fee( + context, + &args.tx, + &signing_data.fee_payer, + ) + .await + .map(|(fee_amount, updated_balance)| { + (fee_amount, Some(updated_balance)) + })?; - // Check that the source address exists on chain - source_exists_or_err(source.clone(), args.tx.force, context).await?; - // Check that the target address exists on chain - target_exists_or_err(target.clone(), args.tx.force, context).await?; + (signing_data, fee_amount, updated_balance) + }; - // Validate the amount given - let validated_amount = - validate_amount(context, args.amount, &args.token, args.tx.force) + for TxTransparentTransferData { + source, + target, + token, + amount, + } in &args.data + { + // Check that the source address exists on chain + source_exists_or_err(source.clone(), args.tx.force, context).await?; + // Check that the target address exists on chain + target_exists_or_err(target.clone(), args.tx.force, context).await?; + + // Validate the amount given + let validated_amount = + validate_amount(context, amount.to_owned(), token, args.tx.force) + .await?; + + // Check the balance of the source + if let Some(updated_balance) = &updated_balance { + let check_balance = if &updated_balance.source == source + && &updated_balance.token == token + { + CheckBalance::Balance(updated_balance.post_balance) + } else { + CheckBalance::Query(balance_key(token, source)) + }; + + check_balance_too_low_err( + token, + source, + validated_amount.amount(), + check_balance, + args.tx.force, + context, + ) .await?; + } - // Check the balance of the source - if let Some(updated_balance) = updated_balance { - let check_balance = if &updated_balance.source == source - && updated_balance.token == args.token - { - CheckBalance::Balance(updated_balance.post_balance) - } else { - CheckBalance::Query(balance_key(&args.token, source)) + // Construct the corresponding transparent Transfer object + let transfer_data = token::TransparentTransferData { + source: source.to_owned(), + target: target.to_owned(), + token: token.to_owned(), + amount: validated_amount, }; - check_balance_too_low_err( - &args.token, - source, - validated_amount.amount(), - check_balance, - args.tx.force, - context, - ) - .await?; + transfers.push(transfer_data); } - // Construct the corresponding transparent Transfer object - let transfer = token::TransparentTransfer { - source: source.clone(), - target: target.clone(), - token: args.token.clone(), - amount: validated_amount, - }; + let transfer = token::TransparentTransfer(transfers); let tx = build_pow_flag( context, @@ -2908,31 +2981,56 @@ pub async fn build_shielded_transfer( context: &N, args: &mut args::TxShieldedTransfer, ) -> Result<(Tx, SigningTxData)> { - let default_signer = Some(MASP); - let signing_data = signing::aux_signing_data( + let signing_data = + signing::aux_signing_data(context, &args.tx, Some(MASP), Some(MASP)) + .await?; + + // Shielded fee payment + let fee_per_gas_unit = validate_fee(context, &args.tx).await?; + + let mut transfer_data = vec![]; + for TxShieldedTransferData { + source, + target, + token, + amount, + } in &args.data + { + // Validate the amount given + let validated_amount = + validate_amount(context, amount.to_owned(), token, args.tx.force) + .await?; + + transfer_data.push(MaspTransferData { + source: TransferSource::ExtendedSpendingKey(source.to_owned()), + target: TransferTarget::PaymentAddress(target.to_owned()), + token: token.to_owned(), + amount: validated_amount, + }); + } + + // Add masp fee payment if necessary + let masp_fee_data = get_masp_fee_payment_amount( context, &args.tx, - Some(MASP), - default_signer, + fee_per_gas_unit, + &signing_data.fee_payer, + args.gas_spending_keys.clone(), ) .await?; + let fee_unshield = + masp_fee_data + .as_ref() + .map(|fee_data| token::UnshieldingTransferData { + target: fee_data.target.to_owned(), + token: fee_data.token.to_owned(), + amount: fee_data.amount, + }); - // Shielded fee payment - let fee_amount = validate_fee(context, &args.tx).await?; - - // Validate the amount given - let validated_amount = - validate_amount(context, args.amount, &args.token, args.tx.force) - .await?; - - // TODO(namada#2597): this function should also take another arg as the fees - // token and amount let shielded_parts = construct_shielded_parts( context, - &TransferSource::ExtendedSpendingKey(args.source), - &TransferTarget::PaymentAddress(args.target), - &args.token, - validated_amount, + transfer_data, + masp_fee_data, !(args.tx.dry_run || args.tx.dry_run_wrapper), ) .await? @@ -2970,6 +3068,7 @@ pub async fn build_shielded_transfer( // Construct the tx data with a placeholder shielded section hash let data = token::ShieldedTransfer { + fee_unshield, section_hash: Hash::zero(), }; let tx = build_pow_flag( @@ -2978,27 +3077,61 @@ pub async fn build_shielded_transfer( args.tx_code_path.clone(), data, add_shielded_parts, - fee_amount, + fee_per_gas_unit, &signing_data.fee_payer, ) .await?; Ok((tx, signing_data)) } +// Check if the transaction will need to pay fees via the masp and extract the +// right masp data +async fn get_masp_fee_payment_amount( + context: &N, + args: &args::Tx, + fee_amount: DenominatedAmount, + fee_payer: &common::PublicKey, + gas_spending_keys: Vec, +) -> Result> { + let fee_payer_address = Address::from(fee_payer); + let balance_key = balance_key(&args.fee_token, &fee_payer_address); + let balance = rpc::query_storage_value::<_, token::Amount>( + context.client(), + &balance_key, + ) + .await + .unwrap_or_default(); + let total_fee = checked!(fee_amount.amount() * u64::from(args.gas_limit))?; + + Ok(match total_fee.checked_sub(balance) { + Some(diff) if !diff.is_zero() => Some(MaspFeeData { + sources: gas_spending_keys, + target: fee_payer_address, + token: args.fee_token.clone(), + amount: DenominatedAmount::new(diff, fee_amount.denom()), + }), + _ => None, + }) +} + /// Build a shielding transfer pub async fn build_shielding_transfer( context: &N, args: &mut args::TxShieldingTransfer, ) -> Result<(Tx, SigningTxData, MaspEpoch)> { - let source = &args.source; - let default_signer = Some(source.clone()); - let signing_data = signing::aux_signing_data( - context, - &args.tx, - Some(source.clone()), - default_signer, - ) - .await?; + let source = if args.data.len() == 1 { + // If only one transfer take its source as the signer + args.data + .first() + .map(|transfer_data| transfer_data.source.clone()) + } else { + // Otherwise the caller is required to pass the public keys in the + // argument + None + }; + let signing_data = + signing::aux_signing_data(context, &args.tx, source.clone(), source) + .await?; // Transparent fee payment let (fee_amount, updated_balance) = @@ -3008,38 +3141,58 @@ pub async fn build_shielding_transfer( (fee_amount, Some(updated_balance)) })?; - // Validate the amount given - let validated_amount = - validate_amount(context, args.amount, &args.token, args.tx.force) + let mut transfer_data = vec![]; + let mut data = vec![]; + for TxShieldingTransferData { + source, + token, + amount, + } in &args.data + { + // Validate the amount given + let validated_amount = + validate_amount(context, amount.to_owned(), token, args.tx.force) + .await?; + + // Check the balance of the source + if let Some(updated_balance) = &updated_balance { + let check_balance = if &updated_balance.source == source + && &updated_balance.token == token + { + CheckBalance::Balance(updated_balance.post_balance) + } else { + CheckBalance::Query(balance_key(token, source)) + }; + + check_balance_too_low_err( + token, + source, + validated_amount.amount(), + check_balance, + args.tx.force, + context, + ) .await?; + } - // Check the balance of the source - if let Some(updated_balance) = updated_balance { - let check_balance = if &updated_balance.source == source - && updated_balance.token == args.token - { - CheckBalance::Balance(updated_balance.post_balance) - } else { - CheckBalance::Query(balance_key(&args.token, source)) - }; + transfer_data.push(MaspTransferData { + source: TransferSource::Address(source.to_owned()), + target: TransferTarget::PaymentAddress(args.target), + token: token.to_owned(), + amount: validated_amount, + }); - check_balance_too_low_err( - &args.token, - source, - validated_amount.amount(), - check_balance, - args.tx.force, - context, - ) - .await?; + data.push(token::ShieldingTransferData { + source: source.to_owned(), + token: token.to_owned(), + amount: validated_amount, + }); } let shielded_parts = construct_shielded_parts( context, - &TransferSource::Address(source.clone()), - &TransferTarget::PaymentAddress(args.target), - &args.token, - validated_amount, + transfer_data, + None, !(args.tx.dry_run || args.tx.dry_run_wrapper), ) .await? @@ -3047,7 +3200,7 @@ pub async fn build_shielding_transfer( let shielded_tx_epoch = shielded_parts.0.epoch; let add_shielded_parts = - |tx: &mut Tx, data: &mut token::ShieldingTransfer| { + |tx: &mut Tx, data: &mut token::ShieldingMultiTransfer| { // Add the MASP Transaction and its Builder to facilitate validation let ( ShieldedTransfer { @@ -3077,10 +3230,8 @@ pub async fn build_shielding_transfer( }; // Construct the tx data with a placeholder shielded section hash - let data = token::ShieldingTransfer { - source: source.clone(), - token: args.token.clone(), - amount: validated_amount, + let data = token::ShieldingMultiTransfer { + data, shielded_section_hash: Hash::zero(), }; @@ -3102,38 +3253,69 @@ pub async fn build_unshielding_transfer( context: &N, args: &mut args::TxUnshieldingTransfer, ) -> Result<(Tx, SigningTxData)> { - let default_signer = Some(MASP); - let signing_data = signing::aux_signing_data( + let signing_data = + signing::aux_signing_data(context, &args.tx, Some(MASP), Some(MASP)) + .await?; + + // Shielded fee payment + let fee_per_gas_unit = validate_fee(context, &args.tx).await?; + + let mut transfer_data = vec![]; + let mut data = vec![]; + for TxUnshieldingTransferData { + target, + token, + amount, + } in &args.data + { + // Validate the amount given + let validated_amount = + validate_amount(context, amount.to_owned(), token, args.tx.force) + .await?; + + transfer_data.push(MaspTransferData { + source: TransferSource::ExtendedSpendingKey(args.source), + target: TransferTarget::Address(target.to_owned()), + token: token.to_owned(), + amount: validated_amount, + }); + + data.push(token::UnshieldingTransferData { + target: target.to_owned(), + token: token.to_owned(), + amount: validated_amount, + }); + } + + // Add masp fee payment if necessary + let masp_fee_data = get_masp_fee_payment_amount( context, &args.tx, - Some(MASP), - default_signer, + fee_per_gas_unit, + &signing_data.fee_payer, + args.gas_spending_keys.clone(), ) .await?; + if let Some(fee_data) = &masp_fee_data { + // Add another unshield to the list + data.push(token::UnshieldingTransferData { + target: fee_data.target.to_owned(), + token: fee_data.token.to_owned(), + amount: fee_data.amount, + }); + } - // Shielded fee payment - let fee_amount = validate_fee(context, &args.tx).await?; - - // Validate the amount given - let validated_amount = - validate_amount(context, args.amount, &args.token, args.tx.force) - .await?; - - // TODO(namada#2597): this function should also take another arg as the fees - // token and amount let shielded_parts = construct_shielded_parts( context, - &TransferSource::ExtendedSpendingKey(args.source), - &TransferTarget::Address(args.target.clone()), - &args.token, - validated_amount, + transfer_data, + masp_fee_data, !(args.tx.dry_run || args.tx.dry_run_wrapper), ) .await? .expect("Shielding transfer must have shielded parts"); let add_shielded_parts = - |tx: &mut Tx, data: &mut token::UnshieldingTransfer| { + |tx: &mut Tx, data: &mut token::UnshieldingMultiTransfer| { // Add the MASP Transaction and its Builder to facilitate validation let ( ShieldedTransfer { @@ -3163,19 +3345,18 @@ pub async fn build_unshielding_transfer( }; // Construct the tx data with a placeholder shielded section hash - let data = token::UnshieldingTransfer { - target: args.target.clone(), - token: args.token.clone(), - amount: validated_amount, + let data = token::UnshieldingMultiTransfer { + data, shielded_section_hash: Hash::zero(), }; + let tx = build_pow_flag( context, &args.tx, args.tx_code_path.clone(), data, add_shielded_parts, - fee_amount, + fee_per_gas_unit, &signing_data.fee_payer, ) .await?; @@ -3185,10 +3366,8 @@ pub async fn build_unshielding_transfer( // Construct the shielded part of the transaction, if any async fn construct_shielded_parts( context: &N, - source: &TransferSource, - target: &TransferTarget, - token: &Address, - amount: token::DenominatedAmount, + data: Vec, + fee_data: Option, update_ctx: bool, ) -> Result)>> { // Precompute asset types to increase chances of success in decoding @@ -3201,18 +3380,39 @@ async fn construct_shielded_parts( .await; let stx_result = ShieldedContext::::gen_shielded_transfer( - context, source, target, token, amount, update_ctx, + context, data, fee_data, update_ctx, ) .await; let shielded_parts = match stx_result { Ok(Some(stx)) => stx, Ok(None) => return Ok(None), - Err(Build(builder::Error::InsufficientFunds(_))) => { - return Err(TxSubmitError::NegativeBalanceAfterTransfer( - Box::new(source.effective_address()), - amount.to_string(), - Box::new(token.clone()), + Err(Build { + error: builder::Error::InsufficientFunds(_), + data, + }) => { + if let Some(MaspDataLog { + source, + token, + amount, + }) = data + { + if let Some(source) = source { + return Err(TxSubmitError::NegativeBalanceAfterTransfer( + Box::new(source.effective_address()), + amount.to_string(), + Box::new(token.clone()), + ) + .into()); + } + return Err(TxSubmitError::MaspError(format!( + "Insufficient funds: Could not collect enough funds to \ + pay for fees: token {token}, amount: {amount}" + )) + .into()); + } + return Err(TxSubmitError::MaspError( + "Insufficient funds".to_string(), ) .into()); } @@ -3526,13 +3726,18 @@ pub async fn gen_ibc_shielding_transfer( .precompute_asset_types(context.client(), tokens) .await; + let masp_transfer_data = MaspTransferData { + source: TransferSource::Address(source.clone()), + target: args.target, + token: token.clone(), + amount: validated_amount, + }; let shielded_transfer = ShieldedContext::::gen_shielded_transfer( context, - &TransferSource::Address(source.clone()), - &args.target, - &token, - validated_amount, + vec![masp_transfer_data], + // Fees are paid from the transparent balance of the relayer + None, true, ) .await @@ -3542,9 +3747,11 @@ pub async fn gen_ibc_shielding_transfer( let masp_tx_hash = Section::MaspTx(shielded_transfer.masp_tx.clone()).get_hash(); let transfer = token::ShieldingTransfer { - source: source.clone(), - token: token.clone(), - amount: validated_amount, + data: ShieldingTransferData { + source: source.clone(), + token: token.clone(), + amount: validated_amount, + }, shielded_section_hash: masp_tx_hash, }; Ok(Some((transfer, shielded_transfer.masp_tx))) diff --git a/crates/shielded_token/src/utils.rs b/crates/shielded_token/src/utils.rs index 3d3240b18d..9396755659 100644 --- a/crates/shielded_token/src/utils.rs +++ b/crates/shielded_token/src/utils.rs @@ -1,11 +1,15 @@ //! MASP utilities +use std::collections::BTreeSet; + use masp_primitives::merkle_tree::CommitmentTree; use masp_primitives::sapling::Node; use masp_primitives::transaction::Transaction; -use namada_storage::{Error, Result, StorageRead, StorageWrite}; +use namada_storage::{Error, Key, Result, StorageRead, StorageWrite}; -use crate::storage_key::{masp_commitment_tree_key, masp_nullifier_key}; +use crate::storage_key::{ + is_masp_transfer_key, masp_commitment_tree_key, masp_nullifier_key, +}; // Writes the nullifiers of the provided masp transaction to storage fn reveal_nullifiers( @@ -59,11 +63,19 @@ pub fn handle_masp_tx( ctx: &mut (impl StorageRead + StorageWrite), shielded: &Transaction, ) -> Result<()> { - // TODO: temporarily disabled because of the node aggregation issue in WASM. - // Using the host env tx_update_masp_note_commitment_tree or directly the - // update_note_commitment_tree function as a workaround instead - // update_note_commitment_tree(ctx, shielded)?; + // TODO(masp#73): temporarily disabled because of the node aggregation issue + // in WASM. Using the host env tx_update_masp_note_commitment_tree or + // directly the update_note_commitment_tree function as a workaround + // instead update_note_commitment_tree(ctx, shielded)?; reveal_nullifiers(ctx, shielded)?; Ok(()) } + +/// Check if a transaction was a MASP transaction. This means +/// that at least one key owned by MASP was changed. We cannot +/// simply check that the MASP VP was triggered, as this can +/// be manually requested to be triggered by users. +pub fn is_masp_transfer(changed_keys: &BTreeSet) -> bool { + changed_keys.iter().any(is_masp_transfer_key) +} diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index f62ab1c862..b2eb28e252 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -57,6 +57,7 @@ pub use namada_storage::{ StorageWrite, DB, }; use thiserror::Error; +use wl_state::TxWlState; pub use wl_state::{FullAccessState, TempWlState, WlState}; use write_log::WriteLog; @@ -205,6 +206,12 @@ pub trait State: StateRead + StorageWrite { } } +/// Perform storage writes and deletions to write-log at tx level. +pub trait TxWrites: StateRead { + /// Performs storage writes at the tx level of the write-log. + fn with_tx_writes(&mut self) -> TxWlState<'_, Self::D, Self::H>; +} + /// Implement [`trait StorageRead`] using its [`trait StateRead`] /// implementation. #[macro_export] @@ -411,9 +418,11 @@ macro_rules! impl_storage_write_by_protocol { impl_storage_read!(FullAccessState); impl_storage_read!(WlState); impl_storage_read!(TempWlState<'_, D, H>); +impl_storage_read!(TxWlState<'_, D, H>); impl_storage_write_by_protocol!(FullAccessState); impl_storage_write_by_protocol!(WlState); impl_storage_write_by_protocol!(TempWlState<'_, D, H>); +impl_storage_write!(TxWlState<'_, D, H>); impl_storage_read!(TxHostEnvState<'_, D, H>); impl_storage_read!(VpHostEnvState<'_, D, H>); @@ -760,7 +769,7 @@ mod tests { epochs_per_year: 100, masp_epoch_multiplier: 2, max_signatures_per_transaction: 15, - fee_unshielding_gas_limit: 20_000, + masp_fee_payment_gas_limit: 20_000, minimum_gas_price: BTreeMap::default(), is_native_token_transferable: true, }; diff --git a/crates/state/src/wl_state.rs b/crates/state/src/wl_state.rs index b5103ce723..8f6ebf1984 100644 --- a/crates/state/src/wl_state.rs +++ b/crates/state/src/wl_state.rs @@ -22,7 +22,7 @@ use crate::write_log::{StorageModification, WriteLog}; use crate::{ is_pending_transfer_key, DBIter, Epoch, Error, Hash, Key, KeySeg, LastBlock, MembershipProof, MerkleTree, MerkleTreeError, ProofOps, Result, - State, StateRead, StorageHasher, StorageResult, StoreType, DB, + State, StateRead, StorageHasher, StorageResult, StoreType, TxWrites, DB, EPOCH_SWITCH_BLOCKS_DELAY, STORAGE_ACCESS_GAS_PER_BYTE, }; @@ -52,6 +52,22 @@ where pub diff_key_filter: fn(&storage::Key) -> bool, } +/// State with a temporary write log. This is used for dry-running txs and ABCI +/// prepare and processs proposal, which must not modify the actual state. +#[derive(Debug)] +pub struct TxWlState<'a, D, H> +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// Write log + pub(crate) write_log: &'a mut WriteLog, + // DB + pub(crate) db: &'a D, + /// State + pub(crate) in_mem: &'a InMemory, +} + /// State with a temporary write log. This is used for dry-running txs and ABCI /// prepare and processs proposal, which must not modify the actual state. #[derive(Debug)] @@ -651,16 +667,17 @@ where } } - /// Commit the current transaction's write log to the block. Starts a new - /// transaction write log. - pub fn commit_tx(&mut self) { - self.write_log.commit_tx() + /// Commit the current transaction's write log and the entire batch to the + /// block. Starts a new transaction and batch write log. + pub fn commit_tx_batch(&mut self) { + self.write_log.commit_batch() } /// Drop the current transaction's write log when it's declined by any of - /// the triggered validity predicates. Starts a new transaction write log. - pub fn drop_tx(&mut self) { - self.write_log.drop_tx() + /// the triggered validity predicates together with the entire batch. Starts + /// new transaction and batch write logs. + pub fn drop_tx_batch(&mut self) { + self.write_log.drop_batch() } /// Mark the provided transaction's hash as redundant to prevent committing @@ -1236,6 +1253,61 @@ where } } +impl TxWrites for WlState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn with_tx_writes(&mut self) -> TxWlState<'_, Self::D, Self::H> { + TxWlState { + write_log: &mut self.write_log, + db: &self.db, + in_mem: &self.in_mem, + } + } +} + +impl StateRead for TxWlState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + type D = D; + type H = H; + + fn write_log(&self) -> &WriteLog { + self.write_log + } + + fn db(&self) -> &D { + self.db + } + + fn in_mem(&self) -> &InMemory { + self.in_mem + } + + fn charge_gas(&self, _gas: u64) -> Result<()> { + Ok(()) + } +} + +impl State for TxWlState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn write_log_mut(&mut self) -> &mut WriteLog { + self.write_log + } + + fn split_borrow( + &mut self, + ) -> (&mut WriteLog, &InMemory, &Self::D) { + (self.write_log, (self.in_mem), (self.db)) + } +} + impl EmitEvents for WlState where D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1301,6 +1373,20 @@ where } } +impl TxWrites for TempWlState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn with_tx_writes(&mut self) -> TxWlState<'_, Self::D, Self::H> { + TxWlState { + write_log: &mut self.write_log, + db: self.db, + in_mem: self.in_mem, + } + } +} + impl Deref for FullAccessState where D: DB + for<'iter> DBIter<'iter>, @@ -1392,3 +1478,27 @@ where } } } + +impl namada_tx::action::Read for TempWlState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + type Err = Error; + + fn read_temp( + &self, + key: &storage::Key, + ) -> Result> { + let (log_val, _) = self.write_log().read_temp(key).unwrap(); + match log_val { + Some(value) => { + let value = + namada_core::borsh::BorshDeserialize::try_from_slice(value) + .map_err(Error::BorshCodingError)?; + Ok(Some(value)) + } + None => Ok(None), + } + } +} diff --git a/crates/state/src/write_log.rs b/crates/state/src/write_log.rs index 82929e02f4..1d56c4699f 100644 --- a/crates/state/src/write_log.rs +++ b/crates/state/src/write_log.rs @@ -10,7 +10,10 @@ use namada_core::collections::{HashMap, HashSet}; use namada_core::hash::Hash; use namada_core::{arith, storage}; use namada_events::{Event, EventToEmit, EventType}; -use namada_gas::{MEMORY_ACCESS_GAS_PER_BYTE, STORAGE_WRITE_GAS_PER_BYTE}; +use namada_gas::{ + MEMORY_ACCESS_GAS_PER_BYTE, STORAGE_DELETE_GAS_PER_BYTE, + STORAGE_WRITE_GAS_PER_BYTE, +}; use patricia_tree::map::StringPatriciaMap; use thiserror::Error; @@ -75,15 +78,6 @@ pub(crate) struct TxWriteLog { // Temporary key-values for the current transaction that are dropped after // tx and its verifying VPs execution is done tx_temp_log: HashMap>, - // A precommit bucket for the `tx_write_log`. This is useful for - // validation when a clean `tx_write_log` is needed without committing any - // modification already in there. These modifications can be temporarily - // stored here and then discarded or committed to the `block_write_log`, - // together with the content of `tx_write_log`. No direct key - // write/update/delete should ever happen on this field, this log should - // only be populated through a dump of the `tx_write_log` and should be - // cleaned either when committing or dumping the `tx_write_log` - precommit_write_log: HashMap, /// The events emitted by the current transaction events: WriteLogEvents, } @@ -94,7 +88,6 @@ impl Default for TxWriteLog { address_gen: None, write_log: HashMap::with_capacity(100), tx_temp_log: HashMap::with_capacity(1), - precommit_write_log: HashMap::with_capacity(100), events: WriteLogEvents { tree: StringPatriciaMap::new(), }, @@ -197,10 +190,6 @@ impl WriteLog { .tx_write_log .write_log .get(key) - .or_else(|| { - // If not found, then try to read from tx precommit write log - self.tx_write_log.precommit_write_log.get(key) - }) .or_else(|| { // If not found, then try to read from batch write log, // following the insertion order @@ -304,12 +293,7 @@ impl WriteLog { } let len_signed = i64::try_from(len).map_err(|_| Error::ValueLenOverflow)?; - let size_diff = match self - .tx_write_log - .write_log - .get(key) - .or_else(|| self.tx_write_log.precommit_write_log.get(key)) - { + let size_diff = match self.tx_write_log.write_log.get(key) { Some(prev) => match prev { StorageModification::Write { ref value } => { let val_len = i64::try_from(value.len()) @@ -322,7 +306,8 @@ impl WriteLog { // wasm environment without the need for cooperation from // the wasm code (tx or vp), so there's no need to return // gas in case of an error because execution will terminate - // anyway and this cannot be exploited to run the vm forever + // anyway and this cannot be exploited to keep the vm + // running return Err(Error::UpdateVpOfNewAccount); } }, @@ -379,12 +364,7 @@ impl WriteLog { key: &storage::Key, value: Vec, ) -> Result<(u64, i64)> { - if let Some(prev) = self - .tx_write_log - .write_log - .get(key) - .or_else(|| self.tx_write_log.precommit_write_log.get(key)) - { + if let Some(prev) = self.tx_write_log.write_log.get(key) { match prev { StorageModification::Write { .. } => { // Cannot overwrite a write request with a temporary one @@ -429,12 +409,7 @@ impl WriteLog { if key.is_validity_predicate().is_some() { return Err(Error::DeleteVp); } - let size_diff = match self - .tx_write_log - .write_log - .get(key) - .or_else(|| self.tx_write_log.precommit_write_log.get(key)) - { + let size_diff = match self.tx_write_log.write_log.get(key) { Some(prev) => match prev { StorageModification::Write { ref value } => value.len(), StorageModification::Delete => 0, @@ -455,7 +430,7 @@ impl WriteLog { .ok() .and_then(i64::checked_neg) .ok_or(Error::SizeDiffOverflow)?; - Ok((checked!(gas * STORAGE_WRITE_GAS_PER_BYTE)?, size_diff)) + Ok((checked!(gas * STORAGE_DELETE_GAS_PER_BYTE)?, size_diff)) } /// Delete a key and its value. @@ -532,8 +507,7 @@ impl WriteLog { /// Get the non-temporary storage keys changed and accounts keys initialized /// in the current transaction. The account keys point to the validity - /// predicates of the newly created accounts. The keys in the precommit are - /// not included in the result of this function. + /// predicates of the newly created accounts. pub fn get_keys(&self) -> BTreeSet { self.tx_write_log .write_log @@ -542,18 +516,6 @@ impl WriteLog { .collect() } - /// Get the storage keys changed and accounts keys initialized in the - /// current transaction and precommit. The account keys point to the - /// validity predicates of the newly created accounts. - pub fn get_keys_with_precommit(&self) -> BTreeSet { - self.tx_write_log - .precommit_write_log - .keys() - .chain(self.tx_write_log.write_log.keys()) - .cloned() - .collect() - } - /// Get the storage keys changed in the current transaction (left) and /// the addresses of accounts initialized in the current transaction /// (right). The first vector excludes keys of validity predicates of @@ -635,74 +597,42 @@ impl WriteLog { self.tx_write_log.events.tree.values().flatten() } - /// Add the entire content of the tx write log to the precommit one. The tx - /// log gets reset in the process. - pub fn precommit_tx(&mut self) { - let tx_log = std::mem::replace( - &mut self.tx_write_log.write_log, - HashMap::with_capacity(100), - ); - - self.tx_write_log.precommit_write_log.extend(tx_log) - } - - /// Commit the current transaction's write log and precommit log to the - /// batch when it's accepted by all the triggered validity predicates. - /// Starts a new transaction write log. + /// Commit the current transaction's write log to the batch when it's + /// accepted by all the triggered validity predicates. Starts a new + /// transaction write log. pub fn commit_tx_to_batch(&mut self) { - // First precommit everything - self.precommit_tx(); - - // Then commit to batch let tx_write_log = std::mem::take(&mut self.tx_write_log); let batched_log = BatchedTxWriteLog { address_gen: tx_write_log.address_gen, - write_log: tx_write_log.precommit_write_log, + write_log: tx_write_log.write_log, }; self.batch_write_log.push(batched_log); } - /// Drop the current transaction's write log and IBC events and precommit - /// when it's declined by any of the triggered validity predicates. - /// Starts a new transaction write log a clears the temp write log. + /// Drop the current transaction's write log and IBC events when it's + /// declined by any of the triggered validity predicates. Starts a new + /// transaction write log and clears the temp write log. pub fn drop_tx(&mut self) { self.tx_write_log = Default::default(); } - /// Drop the current transaction's write log and temporary log but keep the - /// precommit one. This is useful only when a part of a transaction - /// failed but it can still be valid and we want to keep the changes - /// applied before the failed section. - pub fn drop_tx_keep_precommit(&mut self) { - self.tx_write_log.write_log.clear(); - self.tx_write_log.tx_temp_log.clear(); - } - - /// Commit the entire batch to the block log. + /// Commit the current tx and the entire batch to the block log. pub fn commit_batch(&mut self) { + self.commit_tx_to_batch(); + for log in std::mem::take(&mut self.batch_write_log) { self.block_write_log.extend(log.write_log); self.block_address_gen = log.address_gen; } } - /// Drop the entire batch log. + /// Drop the current tx and the entire batch log. pub fn drop_batch(&mut self) { + self.drop_tx(); self.batch_write_log = Default::default(); } - /// Commit the tx write log to the block write log. - pub fn commit_tx(&mut self) { - // First precommit everything - self.precommit_tx(); - - // Then commit to block - let tx_write_log = std::mem::take(&mut self.tx_write_log); - self.block_write_log - .extend(tx_write_log.precommit_write_log); - } - /// Get the verifiers set whose validity predicates should validate the /// current transaction changes and the storage keys that have been /// modified created, updated and deleted via the write log. @@ -766,12 +696,7 @@ impl WriteLog { .iter() .rev() .flat_map(|batch_log| batch_log.write_log.iter()) - .chain( - self.tx_write_log - .precommit_write_log - .iter() - .chain(self.tx_write_log.write_log.iter()), - ), + .chain(self.tx_write_log.write_log.iter()), ) { if key.split_prefix(prefix).is_some() { matches.insert(key.to_string(), modification.clone()); @@ -836,7 +761,7 @@ mod tests { // delete a non-existing key let (gas, diff) = write_log.delete(&key).unwrap(); - assert_eq!(gas, key.len() as u64 * STORAGE_WRITE_GAS_PER_BYTE); + assert_eq!(gas, key.len() as u64 * STORAGE_DELETE_GAS_PER_BYTE); assert_eq!(diff, 0); // insert a value @@ -874,13 +799,13 @@ mod tests { let (gas, diff) = write_log.delete(&key).unwrap(); assert_eq!( gas, - (key.len() + updated.len()) as u64 * STORAGE_WRITE_GAS_PER_BYTE + (key.len() + updated.len()) as u64 * STORAGE_DELETE_GAS_PER_BYTE ); assert_eq!(diff, -(updated.len() as i64)); // delete the deleted key again let (gas, diff) = write_log.delete(&key).unwrap(); - assert_eq!(gas, key.len() as u64 * STORAGE_WRITE_GAS_PER_BYTE); + assert_eq!(gas, key.len() as u64 * STORAGE_DELETE_GAS_PER_BYTE); assert_eq!(diff, 0); // read the deleted key @@ -997,7 +922,7 @@ mod tests { // initialize an account let vp1 = Hash::sha256("vp1".as_bytes()); let (addr1, _) = state.write_log.init_account(&address_gen, vp1, &[]); - state.write_log.commit_tx(); + state.write_log.commit_batch(); // write values let val1 = "val1".as_bytes().to_vec(); @@ -1005,7 +930,7 @@ mod tests { state.write_log.write(&key2, val1.clone()).unwrap(); state.write_log.write(&key3, val1.clone()).unwrap(); state.write_log.write_temp(&key4, val1.clone()).unwrap(); - state.write_log.commit_tx(); + state.write_log.commit_batch(); // these values are not written due to drop_tx let val2 = "val2".as_bytes().to_vec(); @@ -1018,7 +943,7 @@ mod tests { let val3 = "val3".as_bytes().to_vec(); state.write_log.delete(&key2).unwrap(); state.write_log.write(&key3, val3.clone()).unwrap(); - state.write_log.commit_tx(); + state.write_log.commit_batch(); // commit a block state.commit_block().expect("commit failed"); @@ -1137,14 +1062,6 @@ mod tests { state.write_log.write(&key1, val1.clone()), Err(Error::UpdateTemporaryValue) )); - - // Test with a temporary write precommitted - state.write_log.write_temp(&key1, val1.clone()).unwrap(); - state.write_log.precommit_tx(); - assert!(matches!( - state.write_log.write(&key1, val1), - Err(Error::UpdateTemporaryValue) - )); } // Test that a temporary write on top of a write is not allowed @@ -1161,14 +1078,6 @@ mod tests { state.write_log.write_temp(&key1, val1.clone()), Err(Error::WriteTempAfterWrite) )); - - // Test with a temporary write precommitted - state.write_log.write(&key1, val1.clone()).unwrap(); - state.write_log.precommit_tx(); - assert!(matches!( - state.write_log.write_temp(&key1, val1), - Err(Error::WriteTempAfterWrite) - )); } // Test that a temporary write on top of a delete is not allowed @@ -1185,14 +1094,6 @@ mod tests { state.write_log.write_temp(&key1, val1.clone()), Err(Error::WriteTempAfterDelete) )); - - // Test with a temporary write precommitted - state.write_log.delete(&key1).unwrap(); - state.write_log.precommit_tx(); - assert!(matches!( - state.write_log.write_temp(&key1, val1), - Err(Error::WriteTempAfterDelete) - )); } prop_compose! { diff --git a/crates/tests/src/e2e/ibc_tests.rs b/crates/tests/src/e2e/ibc_tests.rs index c1781b6295..e581d51ea4 100644 --- a/crates/tests/src/e2e/ibc_tests.rs +++ b/crates/tests/src/e2e/ibc_tests.rs @@ -230,7 +230,7 @@ fn run_ledger_ibc_with_hermes() -> Result<()> { receiver.to_string(), NAM, 100000.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_a, &channel_id_a, None, @@ -266,7 +266,7 @@ fn run_ledger_ibc_with_hermes() -> Result<()> { receiver.to_string(), ibc_denom, 50000.0, - BERTHA_KEY, + Some(BERTHA_KEY), &port_id_b, &channel_id_b, None, @@ -286,6 +286,16 @@ fn run_ledger_ibc_with_hermes() -> Result<()> { 100, ALBERT_KEY, )?; + // Send some token for masp fee payment + transfer_on_chain( + &test_a, + "shield", + ALBERT, + AA_PAYMENT_ADDRESS, + NAM, + 10_000, + ALBERT_KEY, + )?; shielded_sync(&test_a, AA_VIEWING_KEY)?; // Shieded transfer from Chain A to Chain B transfer( @@ -294,7 +304,7 @@ fn run_ledger_ibc_with_hermes() -> Result<()> { AB_PAYMENT_ADDRESS, BTC, 10.0, - ALBERT_KEY, + None, &port_id_a, &channel_id_a, None, @@ -311,7 +321,7 @@ fn run_ledger_ibc_with_hermes() -> Result<()> { "invalid_receiver", BTC, 10.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_a, &channel_id_a, None, @@ -333,7 +343,7 @@ fn run_ledger_ibc_with_hermes() -> Result<()> { AB_PAYMENT_ADDRESS, BTC, 10.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_a, &channel_id_a, Some(Duration::new(10, 0)), @@ -398,7 +408,7 @@ fn ibc_namada_gaia() -> Result<()> { receiver, APFEL, 200.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_namada, &channel_id_namada, None, @@ -453,7 +463,7 @@ fn ibc_namada_gaia() -> Result<()> { &receiver, ibc_denom, 100.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_namada, &channel_id_namada, None, @@ -500,7 +510,7 @@ fn ibc_namada_gaia() -> Result<()> { &receiver, &ibc_denom, 10.0, - BERTHA_KEY, + Some(BERTHA_KEY), &port_id_namada, &channel_id_namada, None, @@ -660,7 +670,7 @@ fn proposal_ibc_token_inflation() -> Result<()> { AB_PAYMENT_ADDRESS, APFEL, 1.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_a, &channel_id_a, None, @@ -727,7 +737,7 @@ fn ibc_rate_limit() -> Result<()> { receiver.to_string(), NAM, 1.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_a, &channel_id_a, None, @@ -742,7 +752,7 @@ fn ibc_rate_limit() -> Result<()> { receiver.to_string(), NAM, 1.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_a, &channel_id_a, None, @@ -768,7 +778,7 @@ fn ibc_rate_limit() -> Result<()> { receiver.to_string(), NAM, 1.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_a, &channel_id_a, None, @@ -792,7 +802,7 @@ fn ibc_rate_limit() -> Result<()> { receiver.to_string(), NAM, 1.0, - ALBERT_KEY, + Some(ALBERT_KEY), &port_id_a, &channel_id_a, Some(Duration::new(20, 0)), @@ -1558,7 +1568,7 @@ fn transfer_token( receiver.to_string(), NAM, 100000.0, - ALBERT_KEY, + Some(ALBERT_KEY), port_id_a, channel_id_a, None, @@ -1633,7 +1643,7 @@ fn try_invalid_transfers( receiver.to_string(), NAM, 10.1, - ALBERT_KEY, + Some(ALBERT_KEY), port_id_a, channel_id_a, None, @@ -1649,7 +1659,7 @@ fn try_invalid_transfers( receiver.to_string(), NAM, 10.0, - ALBERT_KEY, + Some(ALBERT_KEY), &"port".parse().unwrap(), channel_id_a, None, @@ -1665,7 +1675,7 @@ fn try_invalid_transfers( receiver.to_string(), NAM, 10.0, - ALBERT_KEY, + Some(ALBERT_KEY), port_id_a, &"channel-42".parse().unwrap(), None, @@ -1730,7 +1740,7 @@ fn transfer_back( receiver.to_string(), ibc_denom, 50000.0, - BERTHA_KEY, + Some(BERTHA_KEY), port_id_b, channel_id_b, None, @@ -1803,7 +1813,7 @@ fn transfer_timeout( receiver.to_string(), NAM, 100000.0, - ALBERT_KEY, + Some(ALBERT_KEY), port_id_a, channel_id_a, Some(Duration::new(5, 0)), @@ -1937,7 +1947,7 @@ fn transfer( receiver: impl AsRef, token: impl AsRef, amount: f64, - signer: impl AsRef, + signer: Option<&str>, port_id: &PortId, channel_id: &ChannelId, timeout_sec: Option, @@ -1956,8 +1966,6 @@ fn transfer( sender.as_ref(), "--receiver", receiver.as_ref(), - "--signing-keys", - signer.as_ref(), "--token", token.as_ref(), "--amount", @@ -1970,6 +1978,12 @@ fn transfer( &rpc, ]; + if let Some(signer) = signer { + tx_args.extend_from_slice(&["--signing-keys", signer]); + } else { + tx_args.push("--disposable-gas-payer"); + } + let timeout = timeout_sec.unwrap_or_default().as_secs().to_string(); if timeout_sec.is_some() { tx_args.push("--timeout-sec-offset"); diff --git a/crates/tests/src/integration/ledger_tests.rs b/crates/tests/src/integration/ledger_tests.rs index 214ab32a1d..3ec0da9b0e 100644 --- a/crates/tests/src/integration/ledger_tests.rs +++ b/crates/tests/src/integration/ledger_tests.rs @@ -59,16 +59,17 @@ fn ledger_txs_and_queries() -> Result<()> { let validator_one_rpc = "http://127.0.0.1:26567"; let (node, _services) = setup::setup()?; - let transfer = token::TransparentTransfer { - source: defaults::bertha_address(), - target: defaults::albert_address(), - token: node.native_token(), - amount: token::DenominatedAmount::new( - token::Amount::native_whole(10), - token::NATIVE_MAX_DECIMAL_PLACES.into(), - ), - } - .serialize_to_vec(); + let transfer = + token::TransparentTransfer(vec![token::TransparentTransferData { + source: defaults::bertha_address(), + target: defaults::albert_address(), + token: node.native_token(), + amount: token::DenominatedAmount::new( + token::Amount::native_whole(10), + token::NATIVE_MAX_DECIMAL_PLACES.into(), + ), + }]) + .serialize_to_vec(); let tx_data_path = node.test_dir.path().join("tx.data"); std::fs::write(&tx_data_path, transfer).unwrap(); let tx_data_path = tx_data_path.to_string_lossy(); diff --git a/crates/tests/src/integration/masp.rs b/crates/tests/src/integration/masp.rs index 5f7f6dfd1b..3897ea66ba 100644 --- a/crates/tests/src/integration/masp.rs +++ b/crates/tests/src/integration/masp.rs @@ -18,9 +18,9 @@ use test_log::test; use super::setup; use crate::e2e::setup::constants::{ AA_PAYMENT_ADDRESS, AA_VIEWING_KEY, AB_PAYMENT_ADDRESS, AB_VIEWING_KEY, - AC_PAYMENT_ADDRESS, ALBERT, ALBERT_KEY, A_SPENDING_KEY, BB_PAYMENT_ADDRESS, - BERTHA, BERTHA_KEY, BTC, B_SPENDING_KEY, CHRISTEL, CHRISTEL_KEY, ETH, MASP, - NAM, + AC_PAYMENT_ADDRESS, AC_VIEWING_KEY, ALBERT, ALBERT_KEY, A_SPENDING_KEY, + BB_PAYMENT_ADDRESS, BERTHA, BERTHA_KEY, BTC, B_SPENDING_KEY, CHRISTEL, + CHRISTEL_KEY, ETH, MASP, NAM, }; use crate::strings::TX_APPLIED_SUCCESS; @@ -2171,3 +2171,1068 @@ fn dynamic_assets() -> Result<()> { Ok(()) } + +// Test fee payment in masp: +// +// 1. Masp fee payment runs out of gas +// 2. Valid fee payment (also check that the first tx in the batch is executed +// only once) +#[test] +fn masp_fee_payment() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + // Download the shielded pool parameters before starting node + let _ = FsShieldedUtils::new(PathBuf::new()); + let (mut node, _services) = setup::initialize_genesis(|mut genesis| { + genesis.parameters.parameters.masp_fee_payment_gas_limit = 20_000; + genesis + })?; + _ = node.next_masp_epoch(); + + // Add the relevant viewing keys to the wallet otherwise the shielded + // context won't precache the masp data + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_a", + "--value", + AA_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_b", + "--value", + AB_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + + // Shield some tokens + run( + &node, + Bin::Client, + vec![ + "shield", + "--source", + ALBERT_KEY, + "--target", + AA_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + "50000", + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + _ = node.next_masp_epoch(); + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 50000")); + + // 1. Out of gas for masp fee payment + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "transfer", + "--source", + A_SPENDING_KEY, + "--target", + AB_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + "1", + "--gas-limit", + "2000", + "--gas-price", + "1", + "--disposable-gas-payer", + "--ledger-address", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_err()); + _ = node.next_masp_epoch(); + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 50000")); + + // 2. Valid masp fee payment + run( + &node, + Bin::Client, + vec![ + "transfer", + "--source", + A_SPENDING_KEY, + "--target", + AB_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + "10000", + "--gas-limit", + "20000", + "--gas-price", + "1", + "--disposable-gas-payer", + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + // Check the exact balance of the tx source to ensure that the masp fee + // payment transaction was executed only once + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 20000")); + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AB_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 10000")); + + Ok(()) +} + +// Test that when paying gas via masp we select the gas limit as the minimum +// between the transaction's gas limit and the protocol parameter. +#[test] +fn masp_fee_payment_gas_limit() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + // Download the shielded pool parameters before starting node + let _ = FsShieldedUtils::new(PathBuf::new()); + let (mut node, _services) = setup::initialize_genesis(|mut genesis| { + // Set an insufficient gas limit for masp fee payment to force all + // transactions to fail + genesis.parameters.parameters.masp_fee_payment_gas_limit = 3_000; + genesis + })?; + _ = node.next_masp_epoch(); + + // Add the relevant viewing keys to the wallet otherwise the shielded + // context won't precache the masp data + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_a", + "--value", + AA_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_b", + "--value", + AB_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + + // Shield some tokens + run( + &node, + Bin::Client, + vec![ + "shield", + "--source", + ALBERT_KEY, + "--target", + AA_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + "1000000", + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + + _ = node.next_masp_epoch(); + + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + + // Check that the balance hasn't changed + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 1000000")); + + // Masp fee payment with huge gas, check that the tx still fails because of + // the protocol param + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "unshield", + "--source", + A_SPENDING_KEY, + "--target", + BERTHA, + "--token", + NAM, + "--amount", + "1", + "--gas-limit", + "100000", + "--gas-price", + "1", + "--disposable-gas-payer", + "--ledger-address", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_err()); + node.assert_success(); + + _ = node.next_masp_epoch(); + + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + + // Check that the balance hasn't changed + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 1000000")); + + Ok(()) +} + +// Test masp fee payement with an unshield to a non-disposable address with +// already some funds on it. +#[test] +fn masp_fee_payment_with_non_disposable() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + // Download the shielded pool parameters before starting node + let _ = FsShieldedUtils::new(PathBuf::new()); + let (mut node, _services) = setup::initialize_genesis(|mut genesis| { + genesis.parameters.parameters.masp_fee_payment_gas_limit = 20_000; + genesis + })?; + _ = node.next_masp_epoch(); + + // Add the relevant viewing keys to the wallet otherwise the shielded + // context won't precache the masp data + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_a", + "--value", + AA_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_b", + "--value", + AB_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + + // Shield some tokens + run( + &node, + Bin::Client, + vec![ + "shield", + "--source", + ALBERT_KEY, + "--target", + AA_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + // Drain balance of fee payer + "1999999", + // Pay gas transparently + "--gas-payer", + BERTHA_KEY, + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + + _ = node.next_masp_epoch(); + + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 1999999")); + + // Masp fee payment to non-disposable address + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "unshield", + "--source", + A_SPENDING_KEY, + "--target", + BERTHA, + "--token", + NAM, + "--amount", + "1", + "--gas-limit", + "20000", + "--gas-price", + "1", + "--gas-payer", + ALBERT_KEY, + "--ledger-address", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + node.assert_success(); + + _ = node.next_masp_epoch(); + + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 1979999")); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + ALBERT_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 0")); + + Ok(()) +} + +// Test masp fee payement with a custom provided spending key. Check that fees +// are split between the actual source of the payment and this gas spending +// key +#[test] +fn masp_fee_payment_with_custom_spending_key() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + // Download the shielded pool parameters before starting node + let _ = FsShieldedUtils::new(PathBuf::new()); + let (mut node, _services) = setup::initialize_genesis(|mut genesis| { + genesis.parameters.parameters.masp_fee_payment_gas_limit = 20_000; + genesis + })?; + _ = node.next_masp_epoch(); + + // Add the relevant viewing keys to the wallet otherwise the shielded + // context won't precache the masp data + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_a", + "--value", + AA_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_b", + "--value", + AB_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_c", + "--value", + AC_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + + // Shield some tokens + run( + &node, + Bin::Client, + vec![ + "shield", + "--source", + ALBERT_KEY, + "--target", + AA_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + "10000", + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + run( + &node, + Bin::Client, + vec![ + "shield", + "--source", + ALBERT_KEY, + "--target", + AB_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + "30000", + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + + _ = node.next_masp_epoch(); + + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 10000")); + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AB_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 30000")); + + // Masp fee payment with custom gas payer + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "transfer", + "--source", + A_SPENDING_KEY, + "--target", + AC_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + "9000", + "--gas-limit", + "20000", + "--gas-price", + "1", + "--gas-spending-key", + B_SPENDING_KEY, + "--disposable-gas-payer", + "--ledger-address", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + node.assert_success(); + + _ = node.next_masp_epoch(); + + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 0")); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AB_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 11000")); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AC_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 9000")); + + Ok(()) +} + +// Test masp fee payement with a different token from the one used in the +// transaction itself and with the support of a different key for gas payment +#[test] +fn masp_fee_payment_with_different_token() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + // Download the shielded pool parameters before starting node + let _ = FsShieldedUtils::new(PathBuf::new()); + let (mut node, _services) = setup::initialize_genesis(|mut genesis| { + genesis.parameters.parameters.masp_fee_payment_gas_limit = 20_000; + // Whitelist BTC for gas payment + genesis.parameters.parameters.minimum_gas_price.insert( + "btc".into(), + DenominatedAmount::new(1.into(), namada::token::Denomination(6)), + ); + genesis + })?; + _ = node.next_masp_epoch(); + + // Add the relevant viewing keys to the wallet otherwise the shielded + // context won't precache the masp data + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_a", + "--value", + AA_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + run( + &node, + Bin::Wallet, + vec![ + "add", + "--alias", + "alias_b", + "--value", + AB_VIEWING_KEY, + "--unsafe-dont-encrypt", + ], + )?; + node.assert_success(); + + // Shield some tokens + run( + &node, + Bin::Client, + vec![ + "shield", + "--source", + ALBERT_KEY, + "--target", + AA_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + "1", + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + run( + &node, + Bin::Client, + vec![ + "shield", + "--source", + ALBERT, + "--target", + AA_PAYMENT_ADDRESS, + "--token", + BTC, + "--amount", + "1000", + "--gas-payer", + ALBERT_KEY, + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + run( + &node, + Bin::Client, + vec![ + "shield", + "--source", + ALBERT, + "--target", + AB_PAYMENT_ADDRESS, + "--token", + BTC, + "--amount", + "20000", + "--gas-payer", + ALBERT_KEY, + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + + _ = node.next_masp_epoch(); + + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 1")); + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + BTC, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("btc: 1000")); + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AB_VIEWING_KEY, + "--token", + BTC, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("btc: 20000")); + + // Masp fee payment with custom token and gas payer + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "transfer", + "--source", + A_SPENDING_KEY, + "--target", + AB_PAYMENT_ADDRESS, + "--token", + NAM, + "--amount", + "1", + "--gas-token", + BTC, + "--gas-limit", + "20000", + "--gas-price", + "1", + "--gas-spending-key", + B_SPENDING_KEY, + "--disposable-gas-payer", + "--ledger-address", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + node.assert_success(); + + _ = node.next_masp_epoch(); + + // sync shielded context + run( + &node, + Bin::Client, + vec!["shielded-sync", "--node", validator_one_rpc], + )?; + node.assert_success(); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 0")); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AB_VIEWING_KEY, + "--token", + NAM, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("nam: 1")); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AA_VIEWING_KEY, + "--token", + BTC, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("btc: 0")); + + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + vec![ + "balance", + "--owner", + AB_VIEWING_KEY, + "--token", + BTC, + "--node", + validator_one_rpc, + ], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("btc: 1000")); + + Ok(()) +} diff --git a/crates/tests/src/native_vp/pos.rs b/crates/tests/src/native_vp/pos.rs index e0d970f11a..926588e594 100644 --- a/crates/tests/src/native_vp/pos.rs +++ b/crates/tests/src/native_vp/pos.rs @@ -268,7 +268,7 @@ mod tests { if !test_state.is_current_tx_valid { // Clear out the changes tx_host_env::with(|env| { - env.state.drop_tx(); + env.state.drop_tx_batch(); }); } @@ -282,7 +282,7 @@ mod tests { tx_host_env::with(|env| { // Clear out the changes if !test_state.is_current_tx_valid { - env.state.drop_tx(); + env.state.drop_tx_batch(); } // Also commit the last transaction(s) changes, if any env.commit_tx_and_block(); @@ -318,7 +318,7 @@ mod tests { // Clear out the invalid changes tx_host_env::with(|env| { - env.state.drop_tx(); + env.state.drop_tx_batch(); }) } } diff --git a/crates/tests/src/storage_api/collections/lazy_map.rs b/crates/tests/src/storage_api/collections/lazy_map.rs index 48d5f64511..2932be34dd 100644 --- a/crates/tests/src/storage_api/collections/lazy_map.rs +++ b/crates/tests/src/storage_api/collections/lazy_map.rs @@ -241,7 +241,7 @@ mod tests { match &transition { Transition::CommitTx => { // commit the tx without committing the block - tx_host_env::with(|env| env.state.commit_tx()); + tx_host_env::with(|env| env.state.commit_tx_batch()); } Transition::CommitTxAndBlock => { // commit the tx and the block diff --git a/crates/tests/src/storage_api/collections/lazy_set.rs b/crates/tests/src/storage_api/collections/lazy_set.rs index 4ac16671d9..386bb13a91 100644 --- a/crates/tests/src/storage_api/collections/lazy_set.rs +++ b/crates/tests/src/storage_api/collections/lazy_set.rs @@ -229,7 +229,7 @@ mod tests { match &transition { Transition::CommitTx => { // commit the tx without committing the block - tx_host_env::with(|env| env.state.commit_tx()); + tx_host_env::with(|env| env.state.commit_tx_batch()); } Transition::CommitTxAndBlock => { // commit the tx and the block diff --git a/crates/tests/src/storage_api/collections/lazy_vec.rs b/crates/tests/src/storage_api/collections/lazy_vec.rs index a51508dd71..af19ab8afb 100644 --- a/crates/tests/src/storage_api/collections/lazy_vec.rs +++ b/crates/tests/src/storage_api/collections/lazy_vec.rs @@ -234,7 +234,7 @@ mod tests { match &transition { Transition::CommitTx => { // commit the tx without committing the block - tx_host_env::with(|env| env.state.commit_tx()); + tx_host_env::with(|env| env.state.commit_tx_batch()); } Transition::CommitTxAndBlock => { // commit the tx and the block diff --git a/crates/tests/src/storage_api/collections/nested_lazy_map.rs b/crates/tests/src/storage_api/collections/nested_lazy_map.rs index 7658a66223..f7b0eef995 100644 --- a/crates/tests/src/storage_api/collections/nested_lazy_map.rs +++ b/crates/tests/src/storage_api/collections/nested_lazy_map.rs @@ -254,7 +254,7 @@ mod tests { match &transition { Transition::CommitTx => { // commit the tx without committing the block - tx_host_env::with(|env| env.state.commit_tx()); + tx_host_env::with(|env| env.state.commit_tx_batch()); } Transition::CommitTxAndBlock => { // commit the tx and the block diff --git a/crates/tests/src/vm_host_env/ibc.rs b/crates/tests/src/vm_host_env/ibc.rs index 4bee157c12..30e1e14654 100644 --- a/crates/tests/src/vm_host_env/ibc.rs +++ b/crates/tests/src/vm_host_env/ibc.rs @@ -280,7 +280,7 @@ pub fn init_storage() -> (Address, Address) { // commit the initialized token and account tx_host_env::with(|env| { - env.state.commit_tx(); + env.state.commit_tx_batch(); env.state.commit_block().unwrap(); // block header to check timeout timestamp @@ -658,6 +658,7 @@ pub fn msg_transfer( MsgTransfer { message, transfer: None, + fee_unshield: None, } } diff --git a/crates/tests/src/vm_host_env/mod.rs b/crates/tests/src/vm_host_env/mod.rs index da322d3fa0..5c81af9299 100644 --- a/crates/tests/src/vm_host_env/mod.rs +++ b/crates/tests/src/vm_host_env/mod.rs @@ -397,7 +397,7 @@ mod tests { let existing_value = vec![2_u8; 1000]; tx_env.state.write(&existing_key, &existing_value).unwrap(); // ... and commit it - tx_env.state.commit_tx(); + tx_env.state.commit_tx_batch(); // In a transaction, write override the existing key's value and add // another key-value @@ -483,7 +483,7 @@ mod tests { tx_env.state.write(&key, i).unwrap(); } // ... and commit them - tx_env.state.commit_tx(); + tx_env.state.commit_tx_batch(); // In a transaction, write override the existing key's value and add // another key-value @@ -635,7 +635,7 @@ mod tests { .add_serialized_data(input_data.clone()) .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); - let result = vp::CTX.eval(empty_code, tx.batch_ref_first_tx()); + let result = vp::CTX.eval(empty_code, tx.batch_ref_first_tx().unwrap()); assert!(result.is_err()); // evaluating the VP template which always returns `true` should pass @@ -654,7 +654,7 @@ mod tests { .add_serialized_data(input_data.clone()) .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); - let result = vp::CTX.eval(code_hash, tx.batch_ref_first_tx()); + let result = vp::CTX.eval(code_hash, tx.batch_ref_first_tx().unwrap()); assert!(result.is_ok()); // evaluating the VP template which always returns `false` shouldn't @@ -674,7 +674,7 @@ mod tests { .add_serialized_data(input_data) .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); - let result = vp::CTX.eval(code_hash, tx.batch_ref_first_tx()); + let result = vp::CTX.eval(code_hash, tx.batch_ref_first_tx().unwrap()); assert!(result.is_err()); } @@ -707,8 +707,10 @@ mod tests { // Check let mut env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Commit @@ -738,8 +740,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); } @@ -779,8 +783,10 @@ mod tests { // Check let mut env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Commit @@ -810,8 +816,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); } @@ -852,8 +860,10 @@ mod tests { // Check let mut env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Commit @@ -883,8 +893,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); } @@ -927,8 +939,10 @@ mod tests { // Check let mut env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Commit @@ -958,8 +972,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); } @@ -1002,8 +1018,10 @@ mod tests { // Check let mut env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Commit @@ -1034,8 +1052,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); } @@ -1087,8 +1107,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); // VP should fail because the transfer channel cannot be closed assert!(matches!( result.expect_err("validation succeeded unexpectedly"), @@ -1139,8 +1161,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); } @@ -1186,8 +1210,10 @@ mod tests { // Check let mut env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Check if the token was escrowed let escrow = token::storage_key::balance_key( @@ -1196,7 +1222,7 @@ mod tests { ); let token_vp_result = ibc::validate_multitoken_vp_from_tx( &env, - &tx.batch_ref_first_tx(), + &tx.batch_ref_first_tx().unwrap(), &escrow, ); assert!(token_vp_result.is_ok()); @@ -1234,8 +1260,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Check the balance tx_host_env::set(env); @@ -1321,8 +1349,10 @@ mod tests { let mut env = tx_host_env::take(); // The token must be part of the verifier set (checked by MultitokenVp) env.verifiers.insert(ibc_token); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!( result.is_ok(), "Expected VP to accept the tx, got {result:?}" @@ -1330,7 +1360,7 @@ mod tests { // Check if the token was burned let result = ibc::validate_multitoken_vp_from_tx( &env, - &tx.batch_ref_first_tx(), + &tx.batch_ref_first_tx().unwrap(), &minted_key, ); assert!( @@ -1401,16 +1431,20 @@ mod tests { // Check let mut env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Check if the token was minted // The token must be part of the verifier set (checked by MultitokenVp) let denom = format!("{}/{}/{}", port_id, channel_id, token); let ibc_token = ibc::ibc_token(&denom); env.verifiers.insert(ibc_token.clone()); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!( result.is_ok(), "Expected VP to accept the tx, got {result:?}" @@ -1419,7 +1453,7 @@ mod tests { let minted_key = token::storage_key::minted_balance_key(&ibc_token); let result = ibc::validate_multitoken_vp_from_tx( &env, - &tx.batch_ref_first_tx(), + &tx.batch_ref_first_tx().unwrap(), &minted_key, ); assert!( @@ -1492,8 +1526,10 @@ mod tests { // Check if the transaction is valid let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Check if the ack has an error due to the invalid packet data tx_host_env::set(env); @@ -1585,13 +1621,15 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Check if the token was unescrowed let result = ibc::validate_multitoken_vp_from_tx( &env, - &tx.batch_ref_first_tx(), + &tx.batch_ref_first_tx().unwrap(), &escrow_key, ); assert!(result.is_ok()); @@ -1687,13 +1725,15 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Check if the token was unescrowed let result = ibc::validate_multitoken_vp_from_tx( &env, - &tx.batch_ref_first_tx(), + &tx.batch_ref_first_tx().unwrap(), &escrow_key, ); assert!(result.is_ok()); @@ -1781,8 +1821,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Check if the token was refunded let escrow = token::storage_key::balance_key( @@ -1791,7 +1833,7 @@ mod tests { ); let result = ibc::validate_multitoken_vp_from_tx( &env, - &tx.batch_ref_first_tx(), + &tx.batch_ref_first_tx().unwrap(), &escrow, ); assert!(result.is_ok()); @@ -1865,8 +1907,10 @@ mod tests { // Check let env = tx_host_env::take(); - let result = - ibc::validate_ibc_vp_from_tx(&env, &tx.batch_ref_first_tx()); + let result = ibc::validate_ibc_vp_from_tx( + &env, + &tx.batch_ref_first_tx().unwrap(), + ); assert!(result.is_ok()); // Check if the token was refunded let escrow = token::storage_key::balance_key( @@ -1875,7 +1919,7 @@ mod tests { ); let result = ibc::validate_multitoken_vp_from_tx( &env, - &tx.batch_ref_first_tx(), + &tx.batch_ref_first_tx().unwrap(), &escrow, ); assert!(result.is_ok()); diff --git a/crates/tests/src/vm_host_env/tx.rs b/crates/tests/src/vm_host_env/tx.rs index 594391024d..ef580dd0ba 100644 --- a/crates/tests/src/vm_host_env/tx.rs +++ b/crates/tests/src/vm_host_env/tx.rs @@ -200,7 +200,7 @@ impl TestTxEnv { } pub fn commit_tx_and_block(&mut self) { - self.state.commit_tx(); + self.state.commit_tx_batch(); self.state .commit_block() .map_err(|err| println!("{:?}", err)) diff --git a/crates/token/src/lib.rs b/crates/token/src/lib.rs index c14d93f430..3b4921b73a 100644 --- a/crates/token/src/lib.rs +++ b/crates/token/src/lib.rs @@ -67,6 +67,23 @@ where Ok(()) } +/// Arguments for a multi-party transparent token transfer +#[derive( + Debug, + Clone, + PartialEq, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Hash, + Eq, + PartialOrd, + Serialize, + Deserialize, +)] +pub struct TransparentTransfer(pub Vec); + /// Arguments for a transparent token transfer #[derive( Debug, @@ -82,7 +99,7 @@ where Serialize, Deserialize, )] -pub struct TransparentTransfer { +pub struct TransparentTransferData { /// Source address will spend the tokens pub source: Address, /// Target address will receive the tokens @@ -109,6 +126,8 @@ pub struct TransparentTransfer { Deserialize, )] pub struct ShieldedTransfer { + /// Optional unshield for fee payment + pub fee_unshield: Option, /// Hash of tx section that contains the MASP transaction pub section_hash: Hash, } @@ -129,13 +148,57 @@ pub struct ShieldedTransfer { Serialize, Deserialize, )] -pub struct ShieldingTransfer { +pub struct ShieldingTransferData { /// Source address will spend the tokens pub source: Address, /// Token's address pub token: Address, /// The amount of tokens pub amount: DenominatedAmount, +} + +/// Arguments for a shielding transfer (from a transparent token to a shielded +/// token) +#[derive( + Debug, + Clone, + PartialEq, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Hash, + Eq, + PartialOrd, + Serialize, + Deserialize, +)] +pub struct ShieldingTransfer { + /// Transfer-specific data + pub data: ShieldingTransferData, + /// Hash of tx section that contains the MASP transaction + pub shielded_section_hash: Hash, +} + +/// Arguments for a shielding transfer (from a transparent token to a shielded +/// token) +#[derive( + Debug, + Clone, + PartialEq, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Hash, + Eq, + PartialOrd, + Serialize, + Deserialize, +)] +pub struct ShieldingMultiTransfer { + /// Transfer-specific data + pub data: Vec, /// Hash of tx section that contains the MASP transaction pub shielded_section_hash: Hash, } @@ -156,13 +219,57 @@ pub struct ShieldingTransfer { Serialize, Deserialize, )] -pub struct UnshieldingTransfer { +pub struct UnshieldingTransferData { /// Target address will receive the tokens pub target: Address, /// Token's address pub token: Address, /// The amount of tokens pub amount: DenominatedAmount, +} + +/// Arguments for an unshielding transfer (from a shielded token to a +/// transparent token) +#[derive( + Debug, + Clone, + PartialEq, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Hash, + Eq, + PartialOrd, + Serialize, + Deserialize, +)] +pub struct UnshieldingTransfer { + /// Transfer-specific data + pub data: UnshieldingTransferData, + /// Hash of tx section that contains the MASP transaction + pub shielded_section_hash: Hash, +} + +/// Arguments for a multi-source unshielding transfer (from a shielded token to +/// a transparent token) +#[derive( + Debug, + Clone, + PartialEq, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Hash, + Eq, + PartialOrd, + Serialize, + Deserialize, +)] +pub struct UnshieldingMultiTransfer { + /// Transfer-specific data + pub data: Vec, /// Hash of tx section that contains the MASP transaction pub shielded_section_hash: Hash, } @@ -178,17 +285,17 @@ pub mod testing { pub use namada_trans_token::testing::*; use proptest::prelude::*; - use super::TransparentTransfer; + use super::{TransparentTransfer, TransparentTransferData}; prop_compose! { /// Generate a transparent transfer - pub fn arb_transparent_transfer()( + fn arb_transparent_transfer()( source in arb_non_internal_address(), target in arb_non_internal_address(), token in arb_established_address().prop_map(Address::Established), amount in arb_denominated_amount(), - ) -> TransparentTransfer { - TransparentTransfer { + ) -> TransparentTransferData{ + TransparentTransferData { source, target, token, @@ -196,4 +303,12 @@ pub mod testing { } } } + + /// Generate a vectorized transparent transfer + pub fn arb_vectorized_transparent_transfer( + number_of_txs: usize, + ) -> impl Strategy { + proptest::collection::vec(arb_transparent_transfer(), 0..number_of_txs) + .prop_map(TransparentTransfer) + } } diff --git a/crates/tx/src/types.rs b/crates/tx/src/types.rs index 3156b584d5..d814e7e352 100644 --- a/crates/tx/src/types.rs +++ b/crates/tx/src/types.rs @@ -1704,12 +1704,11 @@ impl Tx { } /// Creates a batched tx along with the reference to the first inner tx - #[cfg(any(test, feature = "testing"))] - pub fn batch_ref_first_tx(&self) -> BatchedTxRef<'_> { - BatchedTxRef { + pub fn batch_ref_first_tx(&self) -> Option> { + Some(BatchedTxRef { tx: self, - cmt: self.first_commitments().unwrap(), - } + cmt: self.first_commitments()?, + }) } /// Creates a batched tx along with a copy of the first inner tx diff --git a/crates/tx_prelude/src/token.rs b/crates/tx_prelude/src/token.rs index ab9ecaf571..50b2ad64f5 100644 --- a/crates/tx_prelude/src/token.rs +++ b/crates/tx_prelude/src/token.rs @@ -6,7 +6,8 @@ use namada_events::{EmitEvents, EventLevel}; pub use namada_token::testing; pub use namada_token::{ storage_key, utils, Amount, DenominatedAmount, ShieldedTransfer, - ShieldingTransfer, TransparentTransfer, UnshieldingTransfer, + ShieldingMultiTransfer, ShieldingTransfer, TransparentTransfer, + UnshieldingMultiTransfer, UnshieldingTransfer, UnshieldingTransferData, }; use namada_tx_env::TxEnv; diff --git a/genesis/localnet/parameters.toml b/genesis/localnet/parameters.toml index bef2c1ae2d..6197990bcf 100644 --- a/genesis/localnet/parameters.toml +++ b/genesis/localnet/parameters.toml @@ -24,8 +24,8 @@ masp_epoch_multiplier = 2 max_signatures_per_transaction = 15 # Max gas for block max_block_gas = 20000000 -# Fee unshielding gas limit -fee_unshielding_gas_limit = 20000 +# Masp fee payment gas limit +masp_fee_payment_gas_limit = 20000 # Map of the cost per gas unit for every token allowed for fee payment [parameters.minimum_gas_price] diff --git a/genesis/starter/parameters.toml b/genesis/starter/parameters.toml index dfb01522d6..43ad86b8d4 100644 --- a/genesis/starter/parameters.toml +++ b/genesis/starter/parameters.toml @@ -24,8 +24,8 @@ masp_epoch_multiplier = 2 max_signatures_per_transaction = 15 # Max gas for block max_block_gas = 20000000 -# Fee unshielding gas limit -fee_unshielding_gas_limit = 20000 +# Masp fee payment gas limit +masp_fee_payment_gas_limit = 20000 # Map of the cost per gas unit for every token allowed for fee payment [parameters.minimum_gas_price] diff --git a/wasm/tx_ibc/src/lib.rs b/wasm/tx_ibc/src/lib.rs index e131a90c9a..f48d93c523 100644 --- a/wasm/tx_ibc/src/lib.rs +++ b/wasm/tx_ibc/src/lib.rs @@ -4,14 +4,31 @@ //! `key::ed25519::SignedTxData` as its input as declared in `ibc` crate. use namada_tx_prelude::action::{Action, MaspAction, Write}; +use namada_tx_prelude::token::UnshieldingTransferData; use namada_tx_prelude::*; #[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: BatchedTx) -> TxResult { let data = ctx.get_tx_data(&tx_data)?; - let transfer = + let (transfer, masp_fee_payment) = ibc::ibc_actions(ctx).execute(&data).into_storage_result()?; + if let Some(UnshieldingTransferData { + token, + amount, + target, + }) = &masp_fee_payment + { + // Transparent unshield for fee payment + token::transfer( + ctx, + &Address::Internal(address::InternalAddress::Masp), + target, + token, + amount.amount(), + )?; + } + if let Some(masp_section_ref) = transfer.map(|transfer| transfer.shielded_section_hash) { diff --git a/wasm/tx_shielded_transfer/src/lib.rs b/wasm/tx_shielded_transfer/src/lib.rs index cc9e70a638..11a46790d2 100644 --- a/wasm/tx_shielded_transfer/src/lib.rs +++ b/wasm/tx_shielded_transfer/src/lib.rs @@ -9,6 +9,17 @@ fn apply_tx(ctx: &mut Ctx, tx_data: BatchedTx) -> TxResult { let transfer = token::ShieldedTransfer::try_from_slice(&data[..]) .wrap_err("Failed to decode token::ShieldedTransfer tx data")?; debug_log!("apply_tx called with transfer: {:#?}", transfer); + if let Some(fee_unshield) = transfer.fee_unshield { + // Unshield for fee payment + token::transfer( + ctx, + &address::MASP, + &fee_unshield.target, + &fee_unshield.token, + fee_unshield.amount.amount(), + ) + .wrap_err("Token transfer failed")?; + } let masp_section_ref = transfer.section_hash; let shielded = tx_data diff --git a/wasm/tx_shielding_transfer/src/lib.rs b/wasm/tx_shielding_transfer/src/lib.rs index 389942686b..499f983b3c 100644 --- a/wasm/tx_shielding_transfer/src/lib.rs +++ b/wasm/tx_shielding_transfer/src/lib.rs @@ -6,20 +6,22 @@ use namada_tx_prelude::*; #[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: BatchedTx) -> TxResult { let data = ctx.get_tx_data(&tx_data)?; - let transfer = token::ShieldingTransfer::try_from_slice(&data[..]) + let transfers = token::ShieldingMultiTransfer::try_from_slice(&data[..]) .wrap_err("Failed to decode token::ShieldingTransfer tx data")?; - debug_log!("apply_tx called with transfer: {:#?}", transfer); + debug_log!("apply_tx called with transfer: {:#?}", transfers); - token::transfer( - ctx, - &transfer.source, - &address::MASP, - &transfer.token, - transfer.amount.amount(), - ) - .wrap_err("Token transfer failed")?; + for transfer in transfers.data { + token::transfer( + ctx, + &transfer.source, + &address::MASP, + &transfer.token, + transfer.amount.amount(), + ) + .wrap_err("Token transfer failed")?; + } - let masp_section_ref = transfer.shielded_section_hash; + let masp_section_ref = transfers.shielded_section_hash; let shielded = tx_data .tx .get_section(&masp_section_ref) diff --git a/wasm/tx_transparent_transfer/src/lib.rs b/wasm/tx_transparent_transfer/src/lib.rs index 76aacc3891..0da40600d8 100644 --- a/wasm/tx_transparent_transfer/src/lib.rs +++ b/wasm/tx_transparent_transfer/src/lib.rs @@ -7,16 +7,20 @@ use namada_tx_prelude::*; #[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: BatchedTx) -> TxResult { let data = ctx.get_tx_data(&tx_data)?; - let transfer = token::TransparentTransfer::try_from_slice(&data[..]) + let transfers = token::TransparentTransfer::try_from_slice(&data[..]) .wrap_err("Failed to decode token::TransparentTransfer tx data")?; - debug_log!("apply_tx called with transfer: {:#?}", transfer); + debug_log!("apply_tx called with transfer: {:#?}", transfers); - token::transfer( - ctx, - &transfer.source, - &transfer.target, - &transfer.token, - transfer.amount.amount(), - ) - .wrap_err("Token transfer failed") + for transfer in transfers.0 { + token::transfer( + ctx, + &transfer.source, + &transfer.target, + &transfer.token, + transfer.amount.amount(), + ) + .wrap_err("Token transfer failed")?; + } + + Ok(()) } diff --git a/wasm/tx_unshielding_transfer/src/lib.rs b/wasm/tx_unshielding_transfer/src/lib.rs index 79bdac0757..f0bf375c73 100644 --- a/wasm/tx_unshielding_transfer/src/lib.rs +++ b/wasm/tx_unshielding_transfer/src/lib.rs @@ -6,20 +6,22 @@ use namada_tx_prelude::*; #[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: BatchedTx) -> TxResult { let data = ctx.get_tx_data(&tx_data)?; - let transfer = token::UnshieldingTransfer::try_from_slice(&data[..]) + let transfers = token::UnshieldingMultiTransfer::try_from_slice(&data[..]) .wrap_err("Failed to decode token::UnshieldingTransfer tx data")?; - debug_log!("apply_tx called with transfer: {:#?}", transfer); + debug_log!("apply_tx called with transfer: {:#?}", transfers); - token::transfer( - ctx, - &address::MASP, - &transfer.target, - &transfer.token, - transfer.amount.amount(), - ) - .wrap_err("Token transfer failed")?; + for transfer in transfers.data { + token::transfer( + ctx, + &address::MASP, + &transfer.target, + &transfer.token, + transfer.amount.amount(), + ) + .wrap_err("Token transfer failed")?; + } - let masp_section_ref = transfer.shielded_section_hash; + let masp_section_ref = transfers.shielded_section_hash; let shielded = tx_data .tx .get_section(&masp_section_ref)