From f697611b10e56a20fee36abf03339ca174d80493 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 1 Dec 2023 09:00:16 -0500 Subject: [PATCH 01/41] Create a boot contract to initialize pre-pox-4 aggregate key Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/stacks/boot/mod.rs | 5 +++ testnet/stacks-node/src/mockamoto.rs | 46 +++++++++++++++++++-- 2 files changed, 47 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 2f2cc637c7..2dfcef0b53 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -81,6 +81,11 @@ const POX_4_BODY: &'static str = std::include_str!("pox-4.clar"); pub const COSTS_1_NAME: &'static str = "costs"; pub const COSTS_2_NAME: &'static str = "costs-2"; pub const COSTS_3_NAME: &'static str = "costs-3"; +/// This contract name is used in testnet **only** to lookup an initial +/// setting for the pox-4 aggregate key. This contract should contain a `define-read-only` +/// function called `aggregate-key` with zero arguments which returns a (buff 33) +pub const BOOT_TEST_POX_4_AGG_KEY_CONTRACT: &'static str = "pox-4-agg-test-booter"; +pub const BOOT_TEST_POX_4_AGG_KEY_FNAME: &'static str = "aggregate-key"; pub mod docs; diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs index 20bd7106b8..114f6c0418 100644 --- a/testnet/stacks-node/src/mockamoto.rs +++ b/testnet/stacks-node/src/mockamoto.rs @@ -5,8 +5,10 @@ use std::thread; use std::thread::{sleep, JoinHandle}; use std::time::Duration; +use clarity::boot_util::boot_code_id; use clarity::vm::ast::ASTRules; -use clarity::vm::Value as ClarityValue; +use clarity::vm::clarity::TransactionConnection; +use clarity::vm::{ClarityVersion, Value as ClarityValue}; use lazy_static::lazy_static; use stacks::burnchains::bitcoin::address::{ BitcoinAddress, LegacyBitcoinAddress, LegacyBitcoinAddressType, @@ -33,6 +35,9 @@ use stacks::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, SetupBlockResult, }; use stacks::chainstate::stacks::address::PoxAddress; +use stacks::chainstate::stacks::boot::{ + BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME, +}; use stacks::chainstate::stacks::db::{ChainStateBootData, ClarityTx, StacksChainState}; use stacks::chainstate::stacks::miner::{ BlockBuilder, BlockBuilderSettings, BlockLimitFunction, MinerStatus, TransactionResult, @@ -64,7 +69,7 @@ use stacks_common::types::chainstate::{ StacksPrivateKey, VRFSeed, }; use stacks_common::types::{PrivateKey, StacksEpochId}; -use stacks_common::util::hash::{Hash160, MerkleTree, Sha512Trunc256Sum}; +use stacks_common::util::hash::{to_hex, Hash160, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, SchnorrSignature, Secp256k1PublicKey}; use stacks_common::util::vrf::{VRFPrivateKey, VRFProof, VRFPublicKey, VRF}; @@ -405,7 +410,40 @@ impl MockamotoNode { initial_balances.push((stacker.into(), 100_000_000_000_000)); - let mut boot_data = ChainStateBootData::new(&burnchain, initial_balances, None); + // Create a boot contract to initialize the aggregate public key prior to Pox-4 activation + let self_signer = SelfSigner::single_signer(); + let agg_pub_key = to_hex(&self_signer.aggregate_public_key.compress().data); + info!("Mockamoto node setting agg public key"; "agg_pub_key" => &agg_pub_key); + let callback = move |clarity_tx: &mut ClarityTx| { + let contract_content = format!( + "(define-read-only ({}) 0x{})", + BOOT_TEST_POX_4_AGG_KEY_FNAME, agg_pub_key + ); + let contract_id = boot_code_id(BOOT_TEST_POX_4_AGG_KEY_CONTRACT, false); + clarity_tx.connection().as_transaction(|clarity| { + let (ast, analysis) = clarity + .analyze_smart_contract( + &contract_id, + ClarityVersion::Clarity2, + &contract_content, + ASTRules::PrecheckSize, + ) + .unwrap(); + clarity + .initialize_smart_contract( + &contract_id, + ClarityVersion::Clarity2, + &ast, + &contract_content, + None, + |_, _| false, + ) + .unwrap(); + clarity.save_analysis(&contract_id, &analysis).unwrap(); + }) + }; + let mut boot_data = + ChainStateBootData::new(&burnchain, initial_balances, Some(Box::new(callback))); let (chainstate, boot_receipts) = StacksChainState::open_and_exec( config.is_mainnet(), config.burnchain.chain_id, @@ -446,7 +484,7 @@ impl MockamotoNode { Ok(MockamotoNode { sortdb, - self_signer: SelfSigner::single_signer(), + self_signer, chainstate, miner_key, vrf_key, From 8783eab434aca97e81719d5dc57c62bad23e280e Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 1 Dec 2023 09:04:22 -0500 Subject: [PATCH 02/41] Retrieve boot contract init agg key and set all pre-pox-4 cycles to it Signed-off-by: Jacinta Ferrant --- stackslib/src/clarity_vm/clarity.rs | 75 +++++++++++++++++++++++++++-- 1 file changed, 71 insertions(+), 4 deletions(-) diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index aed3bb9947..59b5463d79 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -38,16 +38,17 @@ use clarity::vm::types::{ use clarity::vm::{analysis, ast, ClarityVersion, ContractName}; use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksBlockId, TrieHash, + BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksAddress, StacksBlockId, TrieHash, }; use stacks_common::util::secp256k1::MessageSignature; use crate::burnchains::{Burnchain, PoxConstants}; use crate::chainstate::stacks::boot::{ BOOT_CODE_COSTS, BOOT_CODE_COSTS_2, BOOT_CODE_COSTS_2_TESTNET, BOOT_CODE_COSTS_3, - BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, COSTS_2_NAME, - COSTS_3_NAME, POX_2_MAINNET_CODE, POX_2_NAME, POX_2_TESTNET_CODE, POX_3_MAINNET_CODE, - POX_3_NAME, POX_3_TESTNET_CODE, POX_4_MAINNET_CODE, POX_4_NAME, POX_4_TESTNET_CODE, + BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, + BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME, COSTS_2_NAME, COSTS_3_NAME, + POX_2_MAINNET_CODE, POX_2_NAME, POX_2_TESTNET_CODE, POX_3_MAINNET_CODE, POX_3_NAME, + POX_3_TESTNET_CODE, POX_4_MAINNET_CODE, POX_4_NAME, POX_4_TESTNET_CODE, }; use crate::chainstate::stacks::db::{StacksAccount, StacksChainState}; use crate::chainstate::stacks::events::{StacksTransactionEvent, StacksTransactionReceipt}; @@ -1343,6 +1344,32 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { let pox_4_contract_tx = StacksTransaction::new(tx_version.clone(), boot_code_auth.clone(), payload); + let initialized_agg_key = if !mainnet { + self.with_readonly_clarity_env( + false, + self.chain_id, + ClarityVersion::Clarity2, + StacksAddress::burn_address(false).into(), + None, + LimitedCostTracker::Free, + |vm_env| { + vm_env.execute_contract_allow_private( + &boot_code_id(BOOT_TEST_POX_4_AGG_KEY_CONTRACT, false), + BOOT_TEST_POX_4_AGG_KEY_FNAME, + &[], + true, + ) + }, + ) + .ok() + .map(|agg_key_value| { + Value::buff_from(agg_key_value.expect_buff(33)) + .expect("failed to reconstruct buffer") + }) + } else { + None + }; + let pox_4_initialization_receipt = self.as_transaction(|tx_conn| { // initialize with a synthetic transaction debug!("Instantiate {} contract", &pox_4_contract_id); @@ -1375,6 +1402,46 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { ) .expect("Failed to set burnchain parameters in PoX-3 contract"); + // set the aggregate public key for all pre-pox-4 cycles, if in testnet, and can fetch a boot-setting + if !mainnet { + if let Some(ref agg_pub_key) = initialized_agg_key { + for set_in_reward_cycle in 0..pox_4_first_cycle { + info!( + "Setting initial aggregate-public-key in PoX-4"; + "agg_pub_key" => %agg_pub_key, + "reward_cycle" => set_in_reward_cycle, + ); + tx_conn + .with_abort_callback( + |vm_env| { + vm_env.execute_in_env( + StacksAddress::burn_address(false).into(), + None, + None, + |env| { + env.execute_contract_allow_private( + &pox_4_contract_id, + "set-aggregate-public-key", + &[ + SymbolicExpression::atom_value( + Value::UInt(set_in_reward_cycle.into()), + ), + SymbolicExpression::atom_value( + agg_pub_key.clone(), + ), + ], + false, + ) + }, + ) + }, + |_, _| false, + ) + .unwrap(); + } + } + } + receipt }); From 24a16cdd3a309d9a845aea3b6bdcb9d8508f918a Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 1 Dec 2023 09:25:20 -0500 Subject: [PATCH 03/41] Only use the self_signer aggregate pub key for genesis blocks Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/mockamoto.rs | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs index 114f6c0418..14f538ab4d 100644 --- a/testnet/stacks-node/src/mockamoto.rs +++ b/testnet/stacks-node/src/mockamoto.rs @@ -998,7 +998,28 @@ impl MockamotoNode { let config = self.chainstate.config(); let chain_length = block.header.chain_length; let sortition_handle = self.sortdb.index_handle_at_tip(); - let aggregate_public_key = self.self_signer.aggregate_public_key; + let aggregate_public_key = if chain_length <= 1 { + self.self_signer.aggregate_public_key + } else { + let block_sn = SortitionDB::get_block_snapshot_consensus( + sortition_handle.conn(), + &block.header.consensus_hash, + )? + .ok_or(ChainstateError::DBError(DBError::NotFoundError))?; + // TODO: https://github.com/stacks-network/stacks-core/issues/4109 + // Update this to retrieve the last block in the last reward cycle rather than chain tip + let aggregate_key_block_header = + NakamotoChainState::get_canonical_block_header(self.chainstate.db(), &self.sortdb)? + .unwrap(); + let aggregate_public_key = NakamotoChainState::get_aggregate_public_key( + &self.sortdb, + &sortition_handle, + &mut self.chainstate, + block_sn.block_height, + &aggregate_key_block_header.index_block_hash(), + )?; + aggregate_public_key + }; self.self_signer.sign_nakamoto_block(&mut block); let staging_tx = self.chainstate.staging_db_tx_begin()?; NakamotoChainState::accept_block( From cad57dc91abbedcac3719547dde6fd92b2047fd3 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 1 Dec 2023 09:57:16 -0500 Subject: [PATCH 04/41] Set the aggregate public key for the NEXT reward cycle in every block Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/mockamoto.rs | 45 +++++++++++++++++++++++++++- 1 file changed, 44 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs index 14f538ab4d..c3f9511ba1 100644 --- a/testnet/stacks-node/src/mockamoto.rs +++ b/testnet/stacks-node/src/mockamoto.rs @@ -904,7 +904,50 @@ impl MockamotoNode { parent_chain_length + 1, )?; - let txs = vec![tenure_tx, coinbase_tx, stacks_stx_tx]; + // Set the aggregate public key for the NEXT reward cycle hence +1 + let reward_cycle = self + .sortdb + .pox_constants + .block_height_to_reward_cycle( + self.sortdb.first_block_height, + sortition_tip.block_height, + ) + .expect( + format!( + "Failed to determine reward cycle of block height: {}", + sortition_tip.block_height + ) + .as_str(), + ) + + 1; + let aggregate_payload = TransactionPayload::ContractCall(TransactionContractCall { + address: StacksAddress::burn_address(false), + contract_name: "pox-4".try_into().unwrap(), + function_name: "set-aggregate-public-key".try_into().unwrap(), + function_args: vec![ + ClarityValue::UInt(u128::from(reward_cycle)), + ClarityValue::buff_from( + self.self_signer + .aggregate_public_key + .compress() + .data + .to_vec(), + ) + .expect("Failed to serialize aggregate public key"), + ], + }); + let mut aggregate_tx: StacksTransaction = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&self.miner_key).unwrap(), + aggregate_payload, + ); + aggregate_tx.chain_id = chain_id; + aggregate_tx.set_origin_nonce(miner_nonce + 3); + let mut aggregate_tx_signer = StacksTransactionSigner::new(&aggregate_tx); + aggregate_tx_signer.sign_origin(&self.miner_key).unwrap(); + let aggregate_tx = aggregate_tx_signer.get_tx().unwrap(); + + let txs = vec![tenure_tx, coinbase_tx, stacks_stx_tx, aggregate_tx]; let _ = match StacksChainState::process_block_transactions( &mut clarity_tx, From 76ccefa8250ee2ec8e717fb8aea0308be59477bc Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 1 Dec 2023 10:41:45 -0500 Subject: [PATCH 05/41] Cleanup mine_stacks_block by pulling transaction construction into sep functions Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/mockamoto.rs | 270 +++++++++++++++------------ 1 file changed, 150 insertions(+), 120 deletions(-) diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs index c3f9511ba1..dee2af6049 100644 --- a/testnet/stacks-node/src/mockamoto.rs +++ b/testnet/stacks-node/src/mockamoto.rs @@ -36,7 +36,7 @@ use stacks::chainstate::nakamoto::{ }; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::{ - BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME, + BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME, POX_4_NAME, }; use stacks::chainstate::stacks::db::{ChainStateBootData, ClarityTx, StacksChainState}; use stacks::chainstate::stacks::miner::{ @@ -72,6 +72,7 @@ use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::hash::{to_hex, Hash160, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, SchnorrSignature, Secp256k1PublicKey}; use stacks_common::util::vrf::{VRFPrivateKey, VRFProof, VRFPublicKey, VRF}; +use wsts::curve::point::Point; use self::signer::SelfSigner; use crate::neon::Counters; @@ -800,86 +801,46 @@ impl MockamotoNode { "chain_tip_ch" => %chain_tip_ch, "miner_account" => %miner_principal, "miner_nonce" => %miner_nonce, ); - let vrf_proof = VRF::prove(&self.vrf_key, sortition_tip.sortition_hash.as_bytes()); - let coinbase_tx_payload = - TransactionPayload::Coinbase(CoinbasePayload([1; 32]), None, Some(vrf_proof)); - let mut coinbase_tx = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&self.miner_key).unwrap(), - coinbase_tx_payload, - ); - coinbase_tx.chain_id = chain_id; - coinbase_tx.set_origin_nonce(miner_nonce + 1); - let mut coinbase_tx_signer = StacksTransactionSigner::new(&coinbase_tx); - coinbase_tx_signer.sign_origin(&self.miner_key).unwrap(); - let coinbase_tx = coinbase_tx_signer.get_tx().unwrap(); - // Add a tenure change transaction to the block: // as of now every mockamoto block is a tenure-change. // If mockamoto mode changes to support non-tenure-changing blocks, this will have // to be gated. - let tenure_change_tx_payload = TransactionPayload::TenureChange( - TenureChangePayload { - previous_tenure_end: parent_block_id, - previous_tenure_blocks: 1, - cause: TenureChangeCause::BlockFound, - pubkey_hash: Hash160([0; 20]), - - signers: vec![], - }, - ThresholdSignature::mock(), - ); - let mut tenure_tx = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&self.miner_key).unwrap(), - tenure_change_tx_payload, - ); - tenure_tx.chain_id = chain_id; - tenure_tx.set_origin_nonce(miner_nonce); - let mut tenure_tx_signer = StacksTransactionSigner::new(&tenure_tx); - tenure_tx_signer.sign_origin(&self.miner_key).unwrap(); - let tenure_tx = tenure_tx_signer.get_tx().unwrap(); - - let pox_address = PoxAddress::Standard( - StacksAddress::burn_address(false), - Some(AddressHashMode::SerializeP2PKH), + let tenure_tx = + make_tenure_change_tx(&self.miner_key, miner_nonce, chain_id, parent_block_id); + let vrf_proof = VRF::prove(&self.vrf_key, sortition_tip.sortition_hash.as_bytes()); + let coinbase_tx = + make_coinbase_tx(&self.miner_key, miner_nonce + 1, chain_id, Some(vrf_proof)); + let stacks_stx_tx = make_stacks_stx_tx( + &self.miner_key, + miner_nonce + 2, + chain_id, + parent_chain_length, + parent_burn_height, ); - - let stack_stx_payload = if parent_chain_length < 2 { - TransactionPayload::ContractCall(TransactionContractCall { - address: StacksAddress::burn_address(false), - contract_name: "pox-4".try_into().unwrap(), - function_name: "stack-stx".try_into().unwrap(), - function_args: vec![ - ClarityValue::UInt(99_000_000_000_000), - pox_address.as_clarity_tuple().unwrap().into(), - ClarityValue::UInt(u128::from(parent_burn_height)), - ClarityValue::UInt(12), - ], - }) - } else { - // NOTE: stack-extend doesn't currently work, because the PoX-4 lockup - // special functions have not been implemented. - TransactionPayload::ContractCall(TransactionContractCall { - address: StacksAddress::burn_address(false), - contract_name: "pox-4".try_into().unwrap(), - function_name: "stack-extend".try_into().unwrap(), - function_args: vec![ - ClarityValue::UInt(5), - pox_address.as_clarity_tuple().unwrap().into(), - ], - }) - }; - let mut stack_stx_tx = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&self.miner_key).unwrap(), - stack_stx_payload, + // Set the aggregate public key for the NEXT reward cycle hence +1 + let reward_cycle = self + .sortdb + .pox_constants + .block_height_to_reward_cycle( + self.sortdb.first_block_height, + sortition_tip.block_height, + ) + .expect( + format!( + "Failed to determine reward cycle of block height: {}", + sortition_tip.block_height + ) + .as_str(), + ) + + 1; + let aggregate_tx = make_aggregate_tx( + &self.miner_key, + miner_nonce + 3, + chain_id, + &self.self_signer.aggregate_public_key, + reward_cycle, ); - stack_stx_tx.chain_id = chain_id; - stack_stx_tx.set_origin_nonce(miner_nonce + 2); - let mut stack_stx_tx_signer = StacksTransactionSigner::new(&stack_stx_tx); - stack_stx_tx_signer.sign_origin(&self.miner_key).unwrap(); - let stacks_stx_tx = stack_stx_tx_signer.get_tx().unwrap(); + let txs = vec![tenure_tx, coinbase_tx, stacks_stx_tx, aggregate_tx]; let sortdb_handle = self.sortdb.index_conn(); let SetupBlockResult { @@ -904,51 +865,6 @@ impl MockamotoNode { parent_chain_length + 1, )?; - // Set the aggregate public key for the NEXT reward cycle hence +1 - let reward_cycle = self - .sortdb - .pox_constants - .block_height_to_reward_cycle( - self.sortdb.first_block_height, - sortition_tip.block_height, - ) - .expect( - format!( - "Failed to determine reward cycle of block height: {}", - sortition_tip.block_height - ) - .as_str(), - ) - + 1; - let aggregate_payload = TransactionPayload::ContractCall(TransactionContractCall { - address: StacksAddress::burn_address(false), - contract_name: "pox-4".try_into().unwrap(), - function_name: "set-aggregate-public-key".try_into().unwrap(), - function_args: vec![ - ClarityValue::UInt(u128::from(reward_cycle)), - ClarityValue::buff_from( - self.self_signer - .aggregate_public_key - .compress() - .data - .to_vec(), - ) - .expect("Failed to serialize aggregate public key"), - ], - }); - let mut aggregate_tx: StacksTransaction = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&self.miner_key).unwrap(), - aggregate_payload, - ); - aggregate_tx.chain_id = chain_id; - aggregate_tx.set_origin_nonce(miner_nonce + 3); - let mut aggregate_tx_signer = StacksTransactionSigner::new(&aggregate_tx); - aggregate_tx_signer.sign_origin(&self.miner_key).unwrap(); - let aggregate_tx = aggregate_tx_signer.get_tx().unwrap(); - - let txs = vec![tenure_tx, coinbase_tx, stacks_stx_tx, aggregate_tx]; - let _ = match StacksChainState::process_block_transactions( &mut clarity_tx, &txs, @@ -1076,3 +992,117 @@ impl MockamotoNode { Ok(chain_length) } } + +// Helper function to make a signed tenure change transaction +fn make_tenure_change_tx( + key: &StacksPrivateKey, + miner_nonce: u64, + chain_id: u32, + parent_block_id: StacksBlockId, +) -> StacksTransaction { + let tenure_change_tx_payload = TransactionPayload::TenureChange( + TenureChangePayload { + previous_tenure_end: parent_block_id, + previous_tenure_blocks: 1, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160([0; 20]), + signers: vec![], + }, + ThresholdSignature::mock(), + ); + make_tx(key, miner_nonce, tenure_change_tx_payload, chain_id) +} + +// Helper function to make a signed coinbase transaction +fn make_coinbase_tx( + key: &StacksPrivateKey, + miner_nonce: u64, + chain_id: u32, + vrf_proof: Option, +) -> StacksTransaction { + let coinbase_tx_payload = + TransactionPayload::Coinbase(CoinbasePayload([1; 32]), None, vrf_proof); + make_tx(key, miner_nonce, coinbase_tx_payload, chain_id) +} + +// Helper function to make a signed stacks-stx transaction +fn make_stacks_stx_tx( + key: &StacksPrivateKey, + miner_nonce: u64, + chain_id: u32, + parent_chain_length: u64, + parent_burn_height: u32, +) -> StacksTransaction { + let pox_address = PoxAddress::Standard( + StacksAddress::burn_address(false), + Some(AddressHashMode::SerializeP2PKH), + ); + + let stack_stx_payload = if parent_chain_length < 2 { + TransactionPayload::ContractCall(TransactionContractCall { + address: StacksAddress::burn_address(false), + contract_name: POX_4_NAME.into(), + function_name: "stack-stx".try_into().unwrap(), + function_args: vec![ + ClarityValue::UInt(99_000_000_000_000), + pox_address.as_clarity_tuple().unwrap().into(), + ClarityValue::UInt(u128::from(parent_burn_height)), + ClarityValue::UInt(12), + ], + }) + } else { + // NOTE: stack-extend doesn't currently work, because the PoX-4 lockup + // special functions have not been implemented. + TransactionPayload::ContractCall(TransactionContractCall { + address: StacksAddress::burn_address(false), + contract_name: POX_4_NAME.into(), + function_name: "stack-extend".try_into().unwrap(), + function_args: vec![ + ClarityValue::UInt(5), + pox_address.as_clarity_tuple().unwrap().into(), + ], + }) + }; + make_tx(key, miner_nonce, stack_stx_payload, chain_id) +} + +/// Helper function to make a set-aggregate-public-key transaction +fn make_aggregate_tx( + key: &StacksPrivateKey, + nonce: u64, + chain_id: u32, + aggregate_public_key: &Point, + reward_cycle: u64, +) -> StacksTransaction { + let aggregate_payload = TransactionPayload::ContractCall(TransactionContractCall { + address: StacksAddress::burn_address(false), + contract_name: POX_4_NAME.into(), + function_name: "set-aggregate-public-key".try_into().unwrap(), + function_args: vec![ + ClarityValue::UInt(u128::from(reward_cycle)), + ClarityValue::buff_from(aggregate_public_key.compress().data.to_vec()) + .expect("Failed to serialize aggregate public key"), + ], + }); + make_tx(&key, nonce, aggregate_payload, chain_id) +} + +/// Helper function to create a zero fee transaction +/// TODO: this is duplicated in so many places. We should have a utils fn for this +fn make_tx( + key: &StacksPrivateKey, + nonce: u64, + tx_payload: TransactionPayload, + chain_id: u32, +) -> StacksTransaction { + let mut tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&key).unwrap(), + tx_payload, + ); + tx.chain_id = chain_id; + tx.set_origin_nonce(nonce); + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&key).unwrap(); + tx_signer.get_tx().unwrap() +} From 03848a3341597a68954178a2af1fcc54ac44fec5 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 4 Dec 2023 10:38:44 -0500 Subject: [PATCH 06/41] CRC: add test to set and get the aggregate public key Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/mockamoto/tests.rs | 162 ++++++++++++++++++++- 1 file changed, 161 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/mockamoto/tests.rs b/testnet/stacks-node/src/mockamoto/tests.rs index d1f3696418..91325431c2 100644 --- a/testnet/stacks-node/src/mockamoto/tests.rs +++ b/testnet/stacks-node/src/mockamoto/tests.rs @@ -1,18 +1,25 @@ use std::thread; use std::time::{Duration, Instant}; +use clarity::boot_util::boot_code_addr; use clarity::vm::costs::ExecutionCost; +use clarity::vm::Value; +use rand_core::OsRng; +use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::nakamoto::NakamotoChainState; +use stacks::chainstate::stacks::boot::POX_4_NAME; use stacks::chainstate::stacks::db::StacksChainState; use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey}; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; +use wsts::curve::point::Point; +use wsts::curve::scalar::Scalar; use super::MockamotoNode; use crate::config::{EventKeyType, EventObserverConfig}; use crate::neon_node::PeerThread; use crate::tests::neon_integrations::test_observer; -use crate::tests::{make_stacks_transfer, to_addr}; +use crate::tests::{make_contract_call, make_stacks_transfer, to_addr}; use crate::{Config, ConfigFile}; #[test] @@ -120,3 +127,156 @@ fn observe_100_blocks() { .join() .expect("Failed to join node thread to exit"); } + +#[test] +fn observe_set_aggregate_tx() { + let mut conf = Config::from_config_file(ConfigFile::mockamoto()).unwrap(); + conf.node.mockamoto_time_ms = 10; + + let submitter_sk = StacksPrivateKey::from_seed(&[1]); + let submitter_addr = to_addr(&submitter_sk); + conf.add_initial_balance(submitter_addr.to_string(), 1_000); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + conf.events_observers.push(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut mockamoto = MockamotoNode::new(&conf).unwrap(); + + let globals = mockamoto.globals.clone(); + + let mut mempool = PeerThread::connect_mempool_db(&conf); + let (mut chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let burnchain = conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(mockamoto.sortdb.conn()).unwrap(); + + let start = Instant::now(); + // Get a reward cycle to compare against + let reward_cycle = mockamoto + .sortdb + .pox_constants + .block_height_to_reward_cycle( + mockamoto.sortdb.first_block_height, + sortition_tip.block_height, + ) + .expect( + format!( + "Failed to determine reward cycle of block height: {}", + sortition_tip.block_height + ) + .as_str(), + ); + + let node_thread = thread::Builder::new() + .name("mockamoto-main".into()) + .spawn(move || { + mockamoto.run(); + let aggregate_key_block_header = NakamotoChainState::get_canonical_block_header( + mockamoto.chainstate.db(), + &mockamoto.sortdb, + ) + .unwrap() + .unwrap(); + // Get the aggregate public key to later verify that it was set correctly + mockamoto + .chainstate + .get_aggregate_public_key_pox_4( + &mockamoto.sortdb, + &aggregate_key_block_header.index_block_hash(), + reward_cycle, + ) + .unwrap() + }) + .expect("FATAL: failed to start mockamoto main thread"); + + // Create a "set-aggregate-public-key" tx to verify it sets correctly + let mut rng = OsRng::default(); + let x = Scalar::random(&mut rng); + let random_key = Point::from(x); + + let aggregate_public_key = Value::buff_from(random_key.compress().data.to_vec()) + .expect("Failed to serialize aggregate public key"); + let aggregate_tx = make_contract_call( + &submitter_sk, + 0, + 10, + &boot_code_addr(false), + POX_4_NAME, + "set-aggregate-public-key", + &[Value::UInt(u128::from(reward_cycle)), aggregate_public_key], + ); + let aggregate_tx_hex = format!("0x{}", to_hex(&aggregate_tx)); + + // complete within 5 seconds or abort (we are only observing one block) + let completed = loop { + if Instant::now().duration_since(start) > Duration::from_secs(5) { + break false; + } + let latest_block = test_observer::get_blocks().pop(); + thread::sleep(Duration::from_secs(1)); + let Some(ref latest_block) = latest_block else { + info!("No block observed yet!"); + continue; + }; + let stacks_block_height = latest_block.get("block_height").unwrap().as_u64().unwrap(); + info!("Block height observed: {stacks_block_height}"); + + // Submit the aggregate tx for processing to update the aggregate public key + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + mempool + .submit_raw( + &mut chainstate, + &sortdb, + &tip.consensus_hash, + &tip.anchored_header.block_hash(), + aggregate_tx.clone(), + &ExecutionCost::max_value(), + &StacksEpochId::Epoch30, + ) + .unwrap(); + break true; + }; + + globals.signal_stop(); + + let aggregate_key = node_thread + .join() + .expect("Failed to join node thread to exit"); + + // Did we set and retrieve the aggregate key correctly? + assert_eq!(aggregate_key.unwrap(), random_key); + + let aggregate_tx_included = test_observer::get_blocks() + .into_iter() + .find(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&aggregate_tx_hex)) + .is_some() + }) + .is_some(); + + assert!( + aggregate_tx_included, + "Mockamoto node failed to include the aggregate tx" + ); + + assert!( + completed, + "Mockamoto node failed to produce and announce its block before timeout" + ); +} From 90dce75a6dc27670fc48b02dce7ae41d0eaac155 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 4 Dec 2023 16:59:00 -0500 Subject: [PATCH 07/41] CRC: check that the initial aggregate key was set correctly Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/mockamoto/tests.rs | 34 ++++++++++++++++------ 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/testnet/stacks-node/src/mockamoto/tests.rs b/testnet/stacks-node/src/mockamoto/tests.rs index 91325431c2..aab08337ac 100644 --- a/testnet/stacks-node/src/mockamoto/tests.rs +++ b/testnet/stacks-node/src/mockamoto/tests.rs @@ -145,6 +145,8 @@ fn observe_set_aggregate_tx() { }); let mut mockamoto = MockamotoNode::new(&conf).unwrap(); + // Get the aggregate public key of the original reward cycle to compare against + let orig_key = mockamoto.self_signer.aggregate_public_key; let globals = mockamoto.globals.clone(); @@ -161,7 +163,7 @@ fn observe_set_aggregate_tx() { let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(mockamoto.sortdb.conn()).unwrap(); let start = Instant::now(); - // Get a reward cycle to compare against + // Get the reward cycle of the sortition tip let reward_cycle = mockamoto .sortdb .pox_constants @@ -187,15 +189,25 @@ fn observe_set_aggregate_tx() { ) .unwrap() .unwrap(); - // Get the aggregate public key to later verify that it was set correctly - mockamoto + // Get the aggregate public key of the original reward cycle + let orig_aggregate_key = mockamoto .chainstate .get_aggregate_public_key_pox_4( &mockamoto.sortdb, &aggregate_key_block_header.index_block_hash(), reward_cycle, ) - .unwrap() + .unwrap(); + // Get the aggregate public key of the next reward cycle that we manually overwrote + let new_aggregate_key = mockamoto + .chainstate + .get_aggregate_public_key_pox_4( + &mockamoto.sortdb, + &aggregate_key_block_header.index_block_hash(), + reward_cycle + 1, + ) + .unwrap(); + (orig_aggregate_key, new_aggregate_key) }) .expect("FATAL: failed to start mockamoto main thread"); @@ -213,7 +225,10 @@ fn observe_set_aggregate_tx() { &boot_code_addr(false), POX_4_NAME, "set-aggregate-public-key", - &[Value::UInt(u128::from(reward_cycle)), aggregate_public_key], + &[ + Value::UInt(u128::from(reward_cycle + 1)), + aggregate_public_key, + ], ); let aggregate_tx_hex = format!("0x{}", to_hex(&aggregate_tx)); @@ -251,13 +266,10 @@ fn observe_set_aggregate_tx() { globals.signal_stop(); - let aggregate_key = node_thread + let (orig_aggregate_key, new_aggregate_key) = node_thread .join() .expect("Failed to join node thread to exit"); - // Did we set and retrieve the aggregate key correctly? - assert_eq!(aggregate_key.unwrap(), random_key); - let aggregate_tx_included = test_observer::get_blocks() .into_iter() .find(|block_json| { @@ -279,4 +291,8 @@ fn observe_set_aggregate_tx() { completed, "Mockamoto node failed to produce and announce its block before timeout" ); + + // Did we set and retrieve the aggregate key correctly? + assert_eq!(orig_aggregate_key.unwrap(), orig_key); + assert_eq!(new_aggregate_key.unwrap(), random_key); } From b79762aabe65ba95d7af82b69b8703dcd0c9607a Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 7 Dec 2023 07:53:37 -0500 Subject: [PATCH 08/41] Fix failed rebase by updating mockamoto tests.rs to use insert instead of push Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/mockamoto/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/mockamoto/tests.rs b/testnet/stacks-node/src/mockamoto/tests.rs index aab08337ac..915833ea20 100644 --- a/testnet/stacks-node/src/mockamoto/tests.rs +++ b/testnet/stacks-node/src/mockamoto/tests.rs @@ -139,7 +139,7 @@ fn observe_set_aggregate_tx() { test_observer::spawn(); let observer_port = test_observer::EVENT_OBSERVER_PORT; - conf.events_observers.push(EventObserverConfig { + conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], }); From 287c19b8398dcbd770638aaa58d7e5ddc1d2cfb5 Mon Sep 17 00:00:00 2001 From: Joey Yandle Date: Fri, 1 Dec 2023 13:15:40 -0500 Subject: [PATCH 09/41] make set-aggregate-public-key private in the pox4 contract call set-aggregate-public-key during append_block pass reward cycle and agg pubkey as symbolic expressions to private contract call read aggregate public key from parent reward cycle then set it in the following remove TODO comment referencing the issue being fixed use from instead of as for explicit cast fmt fixes --- stackslib/src/chainstate/nakamoto/mod.rs | 83 ++++++++++++++++++- .../src/chainstate/stacks/boot/pox-4.clar | 3 +- 2 files changed, 81 insertions(+), 5 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index a7ca6e6a79..1560106a07 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -18,10 +18,11 @@ use std::collections::HashSet; use std::ops::DerefMut; use clarity::vm::ast::ASTRules; -use clarity::vm::costs::ExecutionCost; +use clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; use clarity::vm::database::BurnStateDB; use clarity::vm::events::StacksTransactionEvent; use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::{ClarityVersion, SymbolicExpression, Value}; use lazy_static::{__Deref, lazy_static}; use rusqlite::types::{FromSql, FromSqlError}; use rusqlite::{params, Connection, OptionalExtension, ToSql, NO_PARAMS}; @@ -33,8 +34,8 @@ use stacks_common::consts::{ FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, MINER_REWARD_MATURITY, }; use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksBlockId, StacksPrivateKey, - StacksPublicKey, TrieHash, VRFSeed, + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, + StacksPrivateKey, StacksPublicKey, TrieHash, VRFSeed, }; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::get_epoch_time_secs; @@ -62,13 +63,16 @@ use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::operations::{LeaderBlockCommitOp, LeaderKeyRegisterOp}; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::coordinator::{BlockEventDispatcher, Error}; +use crate::chainstate::stacks::boot::POX_4_NAME; use crate::chainstate::stacks::db::{DBConfig as ChainstateConfig, StacksChainState}; use crate::chainstate::stacks::{MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH}; +use crate::clarity::vm::clarity::ClarityConnection; use crate::clarity_vm::clarity::{ClarityInstance, PreCommitClarityBlock}; use crate::clarity_vm::database::SortitionDBRef; use crate::core::BOOT_BLOCK_HASH; use crate::monitoring; use crate::net::Error as net_error; +use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::{ query_row, query_row_panic, query_rows, u64_to_sql, DBConn, Error as DBError, FromRow, }; @@ -2380,6 +2384,7 @@ impl NakamotoChainState { let ast_rules = ASTRules::PrecheckSize; let mainnet = chainstate_tx.get_config().mainnet; + let chain_id = chainstate_tx.get_config().chain_id; let next_block_height = block.header.chain_length; let (parent_ch, parent_block_hash) = if block.is_first_mined() { @@ -2514,6 +2519,78 @@ impl NakamotoChainState { tenure_height, )?; + if !block.is_first_mined() { + let parent_reward_cycle = pox_constants + .block_height_to_reward_cycle( + burn_dbconn.context.first_block_height, + parent_chain_tip + .burn_header_height + .try_into() + .expect("Burn block height exceeded u32"), + ) + .unwrap(); + let my_reward_cycle = pox_constants + .block_height_to_reward_cycle( + burn_dbconn.context.first_block_height, + burn_header_height, + ) + .unwrap(); + if parent_reward_cycle != my_reward_cycle { + // execute `set-aggregate-public-key` using `clarity-tx` + let aggregate_public_key = clarity_tx + .connection() + .with_readonly_clarity_env( + false, + chain_id, + ClarityVersion::Clarity2, + StacksAddress::burn_address(false).into(), + None, + LimitedCostTracker::Free, + |vm_env| { + vm_env.execute_contract_allow_private( + &boot_code_id(POX_4_NAME, false), + "get-aggregate-public-key", + &vec![SymbolicExpression::atom_value(Value::UInt(u128::from( + parent_reward_cycle, + )))], + true, + ) + }, + ) + .ok() + .map(|agg_key_value| { + Value::buff_from(agg_key_value.expect_buff(33)) + .expect("failed to reconstruct buffer") + }) + .expect("get-aggregate-public-key returned None"); + + clarity_tx + .connection() + .with_readonly_clarity_env( + false, + chain_id, + ClarityVersion::Clarity2, + StacksAddress::burn_address(false).into(), + None, + LimitedCostTracker::Free, + |vm_env| { + vm_env.execute_contract_allow_private( + &boot_code_id(POX_4_NAME, false), + "set-aggregate-public-key", + &vec![ + SymbolicExpression::atom_value(Value::UInt(u128::from( + my_reward_cycle, + ))), + SymbolicExpression::atom_value(aggregate_public_key), + ], + false, + ) + }, + ) + .ok(); + } + } + let starting_cost = clarity_tx.cost_so_far(); debug!( diff --git a/stackslib/src/chainstate/stacks/boot/pox-4.clar b/stackslib/src/chainstate/stacks/boot/pox-4.clar index 6766e4022e..ffb4bc7f0c 100644 --- a/stackslib/src/chainstate/stacks/boot/pox-4.clar +++ b/stackslib/src/chainstate/stacks/boot/pox-4.clar @@ -1330,9 +1330,8 @@ ) ;; Set the aggregate public key to the provided value -;; TODO: https://github.com/stacks-network/stacks-core/issues/4101 ;; *New in Stacks 3.0* -(define-public (set-aggregate-public-key (reward-cycle uint) (aggregate-public-key (buff 33))) +(define-private (set-aggregate-public-key (reward-cycle uint) (aggregate-public-key (buff 33))) (begin (ok (map-set aggregate-public-keys reward-cycle aggregate-public-key)) ) From 5998f3b0884bd2fb9797499b059972535ae3a66b Mon Sep 17 00:00:00 2001 From: Joey Yandle Date: Thu, 7 Dec 2023 04:50:30 -0500 Subject: [PATCH 10/41] replace manual set-aggregate-public-key test with one that checks to see if the key is set automatically for a new reward cycle --- testnet/stacks-node/src/mockamoto/tests.rs | 57 +--------------------- 1 file changed, 2 insertions(+), 55 deletions(-) diff --git a/testnet/stacks-node/src/mockamoto/tests.rs b/testnet/stacks-node/src/mockamoto/tests.rs index 915833ea20..f9376c9b71 100644 --- a/testnet/stacks-node/src/mockamoto/tests.rs +++ b/testnet/stacks-node/src/mockamoto/tests.rs @@ -129,7 +129,7 @@ fn observe_100_blocks() { } #[test] -fn observe_set_aggregate_tx() { +fn observe_set_aggregate_key() { let mut conf = Config::from_config_file(ConfigFile::mockamoto()).unwrap(); conf.node.mockamoto_time_ms = 10; @@ -211,27 +211,6 @@ fn observe_set_aggregate_tx() { }) .expect("FATAL: failed to start mockamoto main thread"); - // Create a "set-aggregate-public-key" tx to verify it sets correctly - let mut rng = OsRng::default(); - let x = Scalar::random(&mut rng); - let random_key = Point::from(x); - - let aggregate_public_key = Value::buff_from(random_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - let aggregate_tx = make_contract_call( - &submitter_sk, - 0, - 10, - &boot_code_addr(false), - POX_4_NAME, - "set-aggregate-public-key", - &[ - Value::UInt(u128::from(reward_cycle + 1)), - aggregate_public_key, - ], - ); - let aggregate_tx_hex = format!("0x{}", to_hex(&aggregate_tx)); - // complete within 5 seconds or abort (we are only observing one block) let completed = loop { if Instant::now().duration_since(start) > Duration::from_secs(5) { @@ -246,21 +225,6 @@ fn observe_set_aggregate_tx() { let stacks_block_height = latest_block.get("block_height").unwrap().as_u64().unwrap(); info!("Block height observed: {stacks_block_height}"); - // Submit the aggregate tx for processing to update the aggregate public key - let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) - .unwrap() - .unwrap(); - mempool - .submit_raw( - &mut chainstate, - &sortdb, - &tip.consensus_hash, - &tip.anchored_header.block_hash(), - aggregate_tx.clone(), - &ExecutionCost::max_value(), - &StacksEpochId::Epoch30, - ) - .unwrap(); break true; }; @@ -270,23 +234,6 @@ fn observe_set_aggregate_tx() { .join() .expect("Failed to join node thread to exit"); - let aggregate_tx_included = test_observer::get_blocks() - .into_iter() - .find(|block_json| { - block_json["transactions"] - .as_array() - .unwrap() - .iter() - .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&aggregate_tx_hex)) - .is_some() - }) - .is_some(); - - assert!( - aggregate_tx_included, - "Mockamoto node failed to include the aggregate tx" - ); - assert!( completed, "Mockamoto node failed to produce and announce its block before timeout" @@ -294,5 +241,5 @@ fn observe_set_aggregate_tx() { // Did we set and retrieve the aggregate key correctly? assert_eq!(orig_aggregate_key.unwrap(), orig_key); - assert_eq!(new_aggregate_key.unwrap(), random_key); + assert_eq!(new_aggregate_key.unwrap(), orig_key); } From 9865cb6845b768e4be9d2cb7ea1554598bb9353d Mon Sep 17 00:00:00 2001 From: Joey Yandle Date: Mon, 11 Dec 2023 17:19:46 -0500 Subject: [PATCH 11/41] execute private contract call using as_transaction/with_abort_callback/execute_in_env context chain --- stackslib/src/chainstate/nakamoto/mod.rs | 44 +++++++++++----------- testnet/stacks-node/src/mockamoto/tests.rs | 7 ++-- 2 files changed, 27 insertions(+), 24 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 1560106a07..f6eaf1651b 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -66,7 +66,7 @@ use crate::chainstate::coordinator::{BlockEventDispatcher, Error}; use crate::chainstate::stacks::boot::POX_4_NAME; use crate::chainstate::stacks::db::{DBConfig as ChainstateConfig, StacksChainState}; use crate::chainstate::stacks::{MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH}; -use crate::clarity::vm::clarity::ClarityConnection; +use crate::clarity::vm::clarity::{ClarityConnection, TransactionConnection}; use crate::clarity_vm::clarity::{ClarityInstance, PreCommitClarityBlock}; use crate::clarity_vm::database::SortitionDBRef; use crate::core::BOOT_BLOCK_HASH; @@ -2564,30 +2564,32 @@ impl NakamotoChainState { }) .expect("get-aggregate-public-key returned None"); - clarity_tx - .connection() - .with_readonly_clarity_env( - false, - chain_id, - ClarityVersion::Clarity2, - StacksAddress::burn_address(false).into(), - None, - LimitedCostTracker::Free, + clarity_tx.connection().as_transaction(|tx| { + tx.with_abort_callback( |vm_env| { - vm_env.execute_contract_allow_private( - &boot_code_id(POX_4_NAME, false), - "set-aggregate-public-key", - &vec![ - SymbolicExpression::atom_value(Value::UInt(u128::from( - my_reward_cycle, - ))), - SymbolicExpression::atom_value(aggregate_public_key), - ], - false, + vm_env.execute_in_env( + StacksAddress::burn_address(false).into(), + None, + None, + |vm_env| { + vm_env.execute_contract_allow_private( + &boot_code_id(POX_4_NAME, false), + "set-aggregate-public-key", + &vec![ + SymbolicExpression::atom_value(Value::UInt( + u128::from(my_reward_cycle), + )), + SymbolicExpression::atom_value(aggregate_public_key), + ], + false, + ) + }, ) }, + |_, _| false, ) - .ok(); + .expect("FATAL: `ust-liquid-supply` overflowed") + }); } } diff --git a/testnet/stacks-node/src/mockamoto/tests.rs b/testnet/stacks-node/src/mockamoto/tests.rs index f9376c9b71..a1a1b3cdf8 100644 --- a/testnet/stacks-node/src/mockamoto/tests.rs +++ b/testnet/stacks-node/src/mockamoto/tests.rs @@ -213,7 +213,7 @@ fn observe_set_aggregate_key() { // complete within 5 seconds or abort (we are only observing one block) let completed = loop { - if Instant::now().duration_since(start) > Duration::from_secs(5) { + if Instant::now().duration_since(start) > Duration::from_secs(120) { break false; } let latest_block = test_observer::get_blocks().pop(); @@ -224,8 +224,9 @@ fn observe_set_aggregate_key() { }; let stacks_block_height = latest_block.get("block_height").unwrap().as_u64().unwrap(); info!("Block height observed: {stacks_block_height}"); - - break true; + if stacks_block_height >= 100 { + break true; + } }; globals.signal_stop(); From 18fafdd43ad943324f751c816805247c0cde6898 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 1 Dec 2023 09:00:16 -0500 Subject: [PATCH 12/41] Create a boot contract to initialize pre-pox-4 aggregate key Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/stacks/boot/mod.rs | 5 +++ testnet/stacks-node/src/mockamoto.rs | 46 +++++++++++++++++++-- 2 files changed, 47 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 2f2cc637c7..2dfcef0b53 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -81,6 +81,11 @@ const POX_4_BODY: &'static str = std::include_str!("pox-4.clar"); pub const COSTS_1_NAME: &'static str = "costs"; pub const COSTS_2_NAME: &'static str = "costs-2"; pub const COSTS_3_NAME: &'static str = "costs-3"; +/// This contract name is used in testnet **only** to lookup an initial +/// setting for the pox-4 aggregate key. This contract should contain a `define-read-only` +/// function called `aggregate-key` with zero arguments which returns a (buff 33) +pub const BOOT_TEST_POX_4_AGG_KEY_CONTRACT: &'static str = "pox-4-agg-test-booter"; +pub const BOOT_TEST_POX_4_AGG_KEY_FNAME: &'static str = "aggregate-key"; pub mod docs; diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs index 20bd7106b8..114f6c0418 100644 --- a/testnet/stacks-node/src/mockamoto.rs +++ b/testnet/stacks-node/src/mockamoto.rs @@ -5,8 +5,10 @@ use std::thread; use std::thread::{sleep, JoinHandle}; use std::time::Duration; +use clarity::boot_util::boot_code_id; use clarity::vm::ast::ASTRules; -use clarity::vm::Value as ClarityValue; +use clarity::vm::clarity::TransactionConnection; +use clarity::vm::{ClarityVersion, Value as ClarityValue}; use lazy_static::lazy_static; use stacks::burnchains::bitcoin::address::{ BitcoinAddress, LegacyBitcoinAddress, LegacyBitcoinAddressType, @@ -33,6 +35,9 @@ use stacks::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, SetupBlockResult, }; use stacks::chainstate::stacks::address::PoxAddress; +use stacks::chainstate::stacks::boot::{ + BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME, +}; use stacks::chainstate::stacks::db::{ChainStateBootData, ClarityTx, StacksChainState}; use stacks::chainstate::stacks::miner::{ BlockBuilder, BlockBuilderSettings, BlockLimitFunction, MinerStatus, TransactionResult, @@ -64,7 +69,7 @@ use stacks_common::types::chainstate::{ StacksPrivateKey, VRFSeed, }; use stacks_common::types::{PrivateKey, StacksEpochId}; -use stacks_common::util::hash::{Hash160, MerkleTree, Sha512Trunc256Sum}; +use stacks_common::util::hash::{to_hex, Hash160, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, SchnorrSignature, Secp256k1PublicKey}; use stacks_common::util::vrf::{VRFPrivateKey, VRFProof, VRFPublicKey, VRF}; @@ -405,7 +410,40 @@ impl MockamotoNode { initial_balances.push((stacker.into(), 100_000_000_000_000)); - let mut boot_data = ChainStateBootData::new(&burnchain, initial_balances, None); + // Create a boot contract to initialize the aggregate public key prior to Pox-4 activation + let self_signer = SelfSigner::single_signer(); + let agg_pub_key = to_hex(&self_signer.aggregate_public_key.compress().data); + info!("Mockamoto node setting agg public key"; "agg_pub_key" => &agg_pub_key); + let callback = move |clarity_tx: &mut ClarityTx| { + let contract_content = format!( + "(define-read-only ({}) 0x{})", + BOOT_TEST_POX_4_AGG_KEY_FNAME, agg_pub_key + ); + let contract_id = boot_code_id(BOOT_TEST_POX_4_AGG_KEY_CONTRACT, false); + clarity_tx.connection().as_transaction(|clarity| { + let (ast, analysis) = clarity + .analyze_smart_contract( + &contract_id, + ClarityVersion::Clarity2, + &contract_content, + ASTRules::PrecheckSize, + ) + .unwrap(); + clarity + .initialize_smart_contract( + &contract_id, + ClarityVersion::Clarity2, + &ast, + &contract_content, + None, + |_, _| false, + ) + .unwrap(); + clarity.save_analysis(&contract_id, &analysis).unwrap(); + }) + }; + let mut boot_data = + ChainStateBootData::new(&burnchain, initial_balances, Some(Box::new(callback))); let (chainstate, boot_receipts) = StacksChainState::open_and_exec( config.is_mainnet(), config.burnchain.chain_id, @@ -446,7 +484,7 @@ impl MockamotoNode { Ok(MockamotoNode { sortdb, - self_signer: SelfSigner::single_signer(), + self_signer, chainstate, miner_key, vrf_key, From f2772d22b29102e208dfa7259e48316ac591af52 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 1 Dec 2023 09:04:22 -0500 Subject: [PATCH 13/41] Retrieve boot contract init agg key and set all pre-pox-4 cycles to it Signed-off-by: Jacinta Ferrant --- stackslib/src/clarity_vm/clarity.rs | 75 +++++++++++++++++++++++++++-- 1 file changed, 71 insertions(+), 4 deletions(-) diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index aed3bb9947..59b5463d79 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -38,16 +38,17 @@ use clarity::vm::types::{ use clarity::vm::{analysis, ast, ClarityVersion, ContractName}; use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksBlockId, TrieHash, + BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksAddress, StacksBlockId, TrieHash, }; use stacks_common::util::secp256k1::MessageSignature; use crate::burnchains::{Burnchain, PoxConstants}; use crate::chainstate::stacks::boot::{ BOOT_CODE_COSTS, BOOT_CODE_COSTS_2, BOOT_CODE_COSTS_2_TESTNET, BOOT_CODE_COSTS_3, - BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, COSTS_2_NAME, - COSTS_3_NAME, POX_2_MAINNET_CODE, POX_2_NAME, POX_2_TESTNET_CODE, POX_3_MAINNET_CODE, - POX_3_NAME, POX_3_TESTNET_CODE, POX_4_MAINNET_CODE, POX_4_NAME, POX_4_TESTNET_CODE, + BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, + BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME, COSTS_2_NAME, COSTS_3_NAME, + POX_2_MAINNET_CODE, POX_2_NAME, POX_2_TESTNET_CODE, POX_3_MAINNET_CODE, POX_3_NAME, + POX_3_TESTNET_CODE, POX_4_MAINNET_CODE, POX_4_NAME, POX_4_TESTNET_CODE, }; use crate::chainstate::stacks::db::{StacksAccount, StacksChainState}; use crate::chainstate::stacks::events::{StacksTransactionEvent, StacksTransactionReceipt}; @@ -1343,6 +1344,32 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { let pox_4_contract_tx = StacksTransaction::new(tx_version.clone(), boot_code_auth.clone(), payload); + let initialized_agg_key = if !mainnet { + self.with_readonly_clarity_env( + false, + self.chain_id, + ClarityVersion::Clarity2, + StacksAddress::burn_address(false).into(), + None, + LimitedCostTracker::Free, + |vm_env| { + vm_env.execute_contract_allow_private( + &boot_code_id(BOOT_TEST_POX_4_AGG_KEY_CONTRACT, false), + BOOT_TEST_POX_4_AGG_KEY_FNAME, + &[], + true, + ) + }, + ) + .ok() + .map(|agg_key_value| { + Value::buff_from(agg_key_value.expect_buff(33)) + .expect("failed to reconstruct buffer") + }) + } else { + None + }; + let pox_4_initialization_receipt = self.as_transaction(|tx_conn| { // initialize with a synthetic transaction debug!("Instantiate {} contract", &pox_4_contract_id); @@ -1375,6 +1402,46 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { ) .expect("Failed to set burnchain parameters in PoX-3 contract"); + // set the aggregate public key for all pre-pox-4 cycles, if in testnet, and can fetch a boot-setting + if !mainnet { + if let Some(ref agg_pub_key) = initialized_agg_key { + for set_in_reward_cycle in 0..pox_4_first_cycle { + info!( + "Setting initial aggregate-public-key in PoX-4"; + "agg_pub_key" => %agg_pub_key, + "reward_cycle" => set_in_reward_cycle, + ); + tx_conn + .with_abort_callback( + |vm_env| { + vm_env.execute_in_env( + StacksAddress::burn_address(false).into(), + None, + None, + |env| { + env.execute_contract_allow_private( + &pox_4_contract_id, + "set-aggregate-public-key", + &[ + SymbolicExpression::atom_value( + Value::UInt(set_in_reward_cycle.into()), + ), + SymbolicExpression::atom_value( + agg_pub_key.clone(), + ), + ], + false, + ) + }, + ) + }, + |_, _| false, + ) + .unwrap(); + } + } + } + receipt }); From 949e0fd5be478251b0117551ed87881cea72bdca Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 1 Dec 2023 09:25:20 -0500 Subject: [PATCH 14/41] Only use the self_signer aggregate pub key for genesis blocks Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/mockamoto.rs | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs index 114f6c0418..14f538ab4d 100644 --- a/testnet/stacks-node/src/mockamoto.rs +++ b/testnet/stacks-node/src/mockamoto.rs @@ -998,7 +998,28 @@ impl MockamotoNode { let config = self.chainstate.config(); let chain_length = block.header.chain_length; let sortition_handle = self.sortdb.index_handle_at_tip(); - let aggregate_public_key = self.self_signer.aggregate_public_key; + let aggregate_public_key = if chain_length <= 1 { + self.self_signer.aggregate_public_key + } else { + let block_sn = SortitionDB::get_block_snapshot_consensus( + sortition_handle.conn(), + &block.header.consensus_hash, + )? + .ok_or(ChainstateError::DBError(DBError::NotFoundError))?; + // TODO: https://github.com/stacks-network/stacks-core/issues/4109 + // Update this to retrieve the last block in the last reward cycle rather than chain tip + let aggregate_key_block_header = + NakamotoChainState::get_canonical_block_header(self.chainstate.db(), &self.sortdb)? + .unwrap(); + let aggregate_public_key = NakamotoChainState::get_aggregate_public_key( + &self.sortdb, + &sortition_handle, + &mut self.chainstate, + block_sn.block_height, + &aggregate_key_block_header.index_block_hash(), + )?; + aggregate_public_key + }; self.self_signer.sign_nakamoto_block(&mut block); let staging_tx = self.chainstate.staging_db_tx_begin()?; NakamotoChainState::accept_block( From c2f3c5f5ba77ee9bf1598ac7bdc6adb9eb4466ed Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 1 Dec 2023 09:57:16 -0500 Subject: [PATCH 15/41] Set the aggregate public key for the NEXT reward cycle in every block Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/mockamoto.rs | 45 +++++++++++++++++++++++++++- 1 file changed, 44 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs index 14f538ab4d..c3f9511ba1 100644 --- a/testnet/stacks-node/src/mockamoto.rs +++ b/testnet/stacks-node/src/mockamoto.rs @@ -904,7 +904,50 @@ impl MockamotoNode { parent_chain_length + 1, )?; - let txs = vec![tenure_tx, coinbase_tx, stacks_stx_tx]; + // Set the aggregate public key for the NEXT reward cycle hence +1 + let reward_cycle = self + .sortdb + .pox_constants + .block_height_to_reward_cycle( + self.sortdb.first_block_height, + sortition_tip.block_height, + ) + .expect( + format!( + "Failed to determine reward cycle of block height: {}", + sortition_tip.block_height + ) + .as_str(), + ) + + 1; + let aggregate_payload = TransactionPayload::ContractCall(TransactionContractCall { + address: StacksAddress::burn_address(false), + contract_name: "pox-4".try_into().unwrap(), + function_name: "set-aggregate-public-key".try_into().unwrap(), + function_args: vec![ + ClarityValue::UInt(u128::from(reward_cycle)), + ClarityValue::buff_from( + self.self_signer + .aggregate_public_key + .compress() + .data + .to_vec(), + ) + .expect("Failed to serialize aggregate public key"), + ], + }); + let mut aggregate_tx: StacksTransaction = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&self.miner_key).unwrap(), + aggregate_payload, + ); + aggregate_tx.chain_id = chain_id; + aggregate_tx.set_origin_nonce(miner_nonce + 3); + let mut aggregate_tx_signer = StacksTransactionSigner::new(&aggregate_tx); + aggregate_tx_signer.sign_origin(&self.miner_key).unwrap(); + let aggregate_tx = aggregate_tx_signer.get_tx().unwrap(); + + let txs = vec![tenure_tx, coinbase_tx, stacks_stx_tx, aggregate_tx]; let _ = match StacksChainState::process_block_transactions( &mut clarity_tx, From ef6b7f4b18db94dd8751157a569955b3dcdf1eb7 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 1 Dec 2023 10:41:45 -0500 Subject: [PATCH 16/41] Cleanup mine_stacks_block by pulling transaction construction into sep functions Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/mockamoto.rs | 270 +++++++++++++++------------ 1 file changed, 150 insertions(+), 120 deletions(-) diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs index c3f9511ba1..dee2af6049 100644 --- a/testnet/stacks-node/src/mockamoto.rs +++ b/testnet/stacks-node/src/mockamoto.rs @@ -36,7 +36,7 @@ use stacks::chainstate::nakamoto::{ }; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::{ - BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME, + BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME, POX_4_NAME, }; use stacks::chainstate::stacks::db::{ChainStateBootData, ClarityTx, StacksChainState}; use stacks::chainstate::stacks::miner::{ @@ -72,6 +72,7 @@ use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::hash::{to_hex, Hash160, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, SchnorrSignature, Secp256k1PublicKey}; use stacks_common::util::vrf::{VRFPrivateKey, VRFProof, VRFPublicKey, VRF}; +use wsts::curve::point::Point; use self::signer::SelfSigner; use crate::neon::Counters; @@ -800,86 +801,46 @@ impl MockamotoNode { "chain_tip_ch" => %chain_tip_ch, "miner_account" => %miner_principal, "miner_nonce" => %miner_nonce, ); - let vrf_proof = VRF::prove(&self.vrf_key, sortition_tip.sortition_hash.as_bytes()); - let coinbase_tx_payload = - TransactionPayload::Coinbase(CoinbasePayload([1; 32]), None, Some(vrf_proof)); - let mut coinbase_tx = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&self.miner_key).unwrap(), - coinbase_tx_payload, - ); - coinbase_tx.chain_id = chain_id; - coinbase_tx.set_origin_nonce(miner_nonce + 1); - let mut coinbase_tx_signer = StacksTransactionSigner::new(&coinbase_tx); - coinbase_tx_signer.sign_origin(&self.miner_key).unwrap(); - let coinbase_tx = coinbase_tx_signer.get_tx().unwrap(); - // Add a tenure change transaction to the block: // as of now every mockamoto block is a tenure-change. // If mockamoto mode changes to support non-tenure-changing blocks, this will have // to be gated. - let tenure_change_tx_payload = TransactionPayload::TenureChange( - TenureChangePayload { - previous_tenure_end: parent_block_id, - previous_tenure_blocks: 1, - cause: TenureChangeCause::BlockFound, - pubkey_hash: Hash160([0; 20]), - - signers: vec![], - }, - ThresholdSignature::mock(), - ); - let mut tenure_tx = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&self.miner_key).unwrap(), - tenure_change_tx_payload, - ); - tenure_tx.chain_id = chain_id; - tenure_tx.set_origin_nonce(miner_nonce); - let mut tenure_tx_signer = StacksTransactionSigner::new(&tenure_tx); - tenure_tx_signer.sign_origin(&self.miner_key).unwrap(); - let tenure_tx = tenure_tx_signer.get_tx().unwrap(); - - let pox_address = PoxAddress::Standard( - StacksAddress::burn_address(false), - Some(AddressHashMode::SerializeP2PKH), + let tenure_tx = + make_tenure_change_tx(&self.miner_key, miner_nonce, chain_id, parent_block_id); + let vrf_proof = VRF::prove(&self.vrf_key, sortition_tip.sortition_hash.as_bytes()); + let coinbase_tx = + make_coinbase_tx(&self.miner_key, miner_nonce + 1, chain_id, Some(vrf_proof)); + let stacks_stx_tx = make_stacks_stx_tx( + &self.miner_key, + miner_nonce + 2, + chain_id, + parent_chain_length, + parent_burn_height, ); - - let stack_stx_payload = if parent_chain_length < 2 { - TransactionPayload::ContractCall(TransactionContractCall { - address: StacksAddress::burn_address(false), - contract_name: "pox-4".try_into().unwrap(), - function_name: "stack-stx".try_into().unwrap(), - function_args: vec![ - ClarityValue::UInt(99_000_000_000_000), - pox_address.as_clarity_tuple().unwrap().into(), - ClarityValue::UInt(u128::from(parent_burn_height)), - ClarityValue::UInt(12), - ], - }) - } else { - // NOTE: stack-extend doesn't currently work, because the PoX-4 lockup - // special functions have not been implemented. - TransactionPayload::ContractCall(TransactionContractCall { - address: StacksAddress::burn_address(false), - contract_name: "pox-4".try_into().unwrap(), - function_name: "stack-extend".try_into().unwrap(), - function_args: vec![ - ClarityValue::UInt(5), - pox_address.as_clarity_tuple().unwrap().into(), - ], - }) - }; - let mut stack_stx_tx = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&self.miner_key).unwrap(), - stack_stx_payload, + // Set the aggregate public key for the NEXT reward cycle hence +1 + let reward_cycle = self + .sortdb + .pox_constants + .block_height_to_reward_cycle( + self.sortdb.first_block_height, + sortition_tip.block_height, + ) + .expect( + format!( + "Failed to determine reward cycle of block height: {}", + sortition_tip.block_height + ) + .as_str(), + ) + + 1; + let aggregate_tx = make_aggregate_tx( + &self.miner_key, + miner_nonce + 3, + chain_id, + &self.self_signer.aggregate_public_key, + reward_cycle, ); - stack_stx_tx.chain_id = chain_id; - stack_stx_tx.set_origin_nonce(miner_nonce + 2); - let mut stack_stx_tx_signer = StacksTransactionSigner::new(&stack_stx_tx); - stack_stx_tx_signer.sign_origin(&self.miner_key).unwrap(); - let stacks_stx_tx = stack_stx_tx_signer.get_tx().unwrap(); + let txs = vec![tenure_tx, coinbase_tx, stacks_stx_tx, aggregate_tx]; let sortdb_handle = self.sortdb.index_conn(); let SetupBlockResult { @@ -904,51 +865,6 @@ impl MockamotoNode { parent_chain_length + 1, )?; - // Set the aggregate public key for the NEXT reward cycle hence +1 - let reward_cycle = self - .sortdb - .pox_constants - .block_height_to_reward_cycle( - self.sortdb.first_block_height, - sortition_tip.block_height, - ) - .expect( - format!( - "Failed to determine reward cycle of block height: {}", - sortition_tip.block_height - ) - .as_str(), - ) - + 1; - let aggregate_payload = TransactionPayload::ContractCall(TransactionContractCall { - address: StacksAddress::burn_address(false), - contract_name: "pox-4".try_into().unwrap(), - function_name: "set-aggregate-public-key".try_into().unwrap(), - function_args: vec![ - ClarityValue::UInt(u128::from(reward_cycle)), - ClarityValue::buff_from( - self.self_signer - .aggregate_public_key - .compress() - .data - .to_vec(), - ) - .expect("Failed to serialize aggregate public key"), - ], - }); - let mut aggregate_tx: StacksTransaction = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&self.miner_key).unwrap(), - aggregate_payload, - ); - aggregate_tx.chain_id = chain_id; - aggregate_tx.set_origin_nonce(miner_nonce + 3); - let mut aggregate_tx_signer = StacksTransactionSigner::new(&aggregate_tx); - aggregate_tx_signer.sign_origin(&self.miner_key).unwrap(); - let aggregate_tx = aggregate_tx_signer.get_tx().unwrap(); - - let txs = vec![tenure_tx, coinbase_tx, stacks_stx_tx, aggregate_tx]; - let _ = match StacksChainState::process_block_transactions( &mut clarity_tx, &txs, @@ -1076,3 +992,117 @@ impl MockamotoNode { Ok(chain_length) } } + +// Helper function to make a signed tenure change transaction +fn make_tenure_change_tx( + key: &StacksPrivateKey, + miner_nonce: u64, + chain_id: u32, + parent_block_id: StacksBlockId, +) -> StacksTransaction { + let tenure_change_tx_payload = TransactionPayload::TenureChange( + TenureChangePayload { + previous_tenure_end: parent_block_id, + previous_tenure_blocks: 1, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160([0; 20]), + signers: vec![], + }, + ThresholdSignature::mock(), + ); + make_tx(key, miner_nonce, tenure_change_tx_payload, chain_id) +} + +// Helper function to make a signed coinbase transaction +fn make_coinbase_tx( + key: &StacksPrivateKey, + miner_nonce: u64, + chain_id: u32, + vrf_proof: Option, +) -> StacksTransaction { + let coinbase_tx_payload = + TransactionPayload::Coinbase(CoinbasePayload([1; 32]), None, vrf_proof); + make_tx(key, miner_nonce, coinbase_tx_payload, chain_id) +} + +// Helper function to make a signed stacks-stx transaction +fn make_stacks_stx_tx( + key: &StacksPrivateKey, + miner_nonce: u64, + chain_id: u32, + parent_chain_length: u64, + parent_burn_height: u32, +) -> StacksTransaction { + let pox_address = PoxAddress::Standard( + StacksAddress::burn_address(false), + Some(AddressHashMode::SerializeP2PKH), + ); + + let stack_stx_payload = if parent_chain_length < 2 { + TransactionPayload::ContractCall(TransactionContractCall { + address: StacksAddress::burn_address(false), + contract_name: POX_4_NAME.into(), + function_name: "stack-stx".try_into().unwrap(), + function_args: vec![ + ClarityValue::UInt(99_000_000_000_000), + pox_address.as_clarity_tuple().unwrap().into(), + ClarityValue::UInt(u128::from(parent_burn_height)), + ClarityValue::UInt(12), + ], + }) + } else { + // NOTE: stack-extend doesn't currently work, because the PoX-4 lockup + // special functions have not been implemented. + TransactionPayload::ContractCall(TransactionContractCall { + address: StacksAddress::burn_address(false), + contract_name: POX_4_NAME.into(), + function_name: "stack-extend".try_into().unwrap(), + function_args: vec![ + ClarityValue::UInt(5), + pox_address.as_clarity_tuple().unwrap().into(), + ], + }) + }; + make_tx(key, miner_nonce, stack_stx_payload, chain_id) +} + +/// Helper function to make a set-aggregate-public-key transaction +fn make_aggregate_tx( + key: &StacksPrivateKey, + nonce: u64, + chain_id: u32, + aggregate_public_key: &Point, + reward_cycle: u64, +) -> StacksTransaction { + let aggregate_payload = TransactionPayload::ContractCall(TransactionContractCall { + address: StacksAddress::burn_address(false), + contract_name: POX_4_NAME.into(), + function_name: "set-aggregate-public-key".try_into().unwrap(), + function_args: vec![ + ClarityValue::UInt(u128::from(reward_cycle)), + ClarityValue::buff_from(aggregate_public_key.compress().data.to_vec()) + .expect("Failed to serialize aggregate public key"), + ], + }); + make_tx(&key, nonce, aggregate_payload, chain_id) +} + +/// Helper function to create a zero fee transaction +/// TODO: this is duplicated in so many places. We should have a utils fn for this +fn make_tx( + key: &StacksPrivateKey, + nonce: u64, + tx_payload: TransactionPayload, + chain_id: u32, +) -> StacksTransaction { + let mut tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&key).unwrap(), + tx_payload, + ); + tx.chain_id = chain_id; + tx.set_origin_nonce(nonce); + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&key).unwrap(); + tx_signer.get_tx().unwrap() +} From e9f99740bc91e460697ece110f7fab74946caefb Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 4 Dec 2023 10:38:44 -0500 Subject: [PATCH 17/41] CRC: add test to set and get the aggregate public key Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/mockamoto/tests.rs | 166 ++++++++++++++++++++- 1 file changed, 164 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/mockamoto/tests.rs b/testnet/stacks-node/src/mockamoto/tests.rs index 99c7d34cc8..a1f3785111 100644 --- a/testnet/stacks-node/src/mockamoto/tests.rs +++ b/testnet/stacks-node/src/mockamoto/tests.rs @@ -1,18 +1,25 @@ use std::thread; use std::time::{Duration, Instant}; +use clarity::boot_util::boot_code_addr; use clarity::vm::costs::ExecutionCost; +use clarity::vm::Value; +use rand_core::OsRng; +use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::nakamoto::NakamotoChainState; +use stacks::chainstate::stacks::boot::POX_4_NAME; use stacks::chainstate::stacks::db::StacksChainState; use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey}; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; +use wsts::curve::point::Point; +use wsts::curve::scalar::Scalar; use super::MockamotoNode; use crate::config::{EventKeyType, EventObserverConfig}; use crate::neon_node::PeerThread; use crate::tests::neon_integrations::{submit_tx, test_observer}; -use crate::tests::{make_stacks_transfer, to_addr}; +use crate::tests::{make_contract_call, make_stacks_transfer, to_addr}; use crate::{Config, ConfigFile}; #[test] @@ -54,7 +61,8 @@ fn observe_100_blocks() { .expect("FATAL: failed to start mockamoto main thread"); // make a transfer tx to test that the mockamoto miner picks up txs from the mempool - let transfer_tx = make_stacks_transfer(&submitter_sk, 0, 10, &recipient_addr, 100); + let tx_fee = 200; + let transfer_tx = make_stacks_transfer(&submitter_sk, 0, tx_fee, &recipient_addr, 100); let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx)); // complete within 2 minutes or abort @@ -122,6 +130,160 @@ fn observe_100_blocks() { .expect("Failed to join node thread to exit"); } +#[test] +fn observe_set_aggregate_tx() { + let mut conf = Config::from_config_file(ConfigFile::mockamoto()).unwrap(); + conf.node.mockamoto_time_ms = 10; + + let submitter_sk = StacksPrivateKey::from_seed(&[1]); + let submitter_addr = to_addr(&submitter_sk); + conf.add_initial_balance(submitter_addr.to_string(), 1_000); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut mockamoto = MockamotoNode::new(&conf).unwrap(); + + let globals = mockamoto.globals.clone(); + + let mut mempool = PeerThread::connect_mempool_db(&conf); + let (mut chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let burnchain = conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(mockamoto.sortdb.conn()).unwrap(); + + let start = Instant::now(); + // Get a reward cycle to compare against + let reward_cycle = mockamoto + .sortdb + .pox_constants + .block_height_to_reward_cycle( + mockamoto.sortdb.first_block_height, + sortition_tip.block_height, + ) + .expect( + format!( + "Failed to determine reward cycle of block height: {}", + sortition_tip.block_height + ) + .as_str(), + ); + + let node_thread = thread::Builder::new() + .name("mockamoto-main".into()) + .spawn(move || { + mockamoto.run(); + let aggregate_key_block_header = NakamotoChainState::get_canonical_block_header( + mockamoto.chainstate.db(), + &mockamoto.sortdb, + ) + .unwrap() + .unwrap(); + // Get the aggregate public key to later verify that it was set correctly + mockamoto + .chainstate + .get_aggregate_public_key_pox_4( + &mockamoto.sortdb, + &aggregate_key_block_header.index_block_hash(), + reward_cycle, + ) + .unwrap() + }) + .expect("FATAL: failed to start mockamoto main thread"); + + // Create a "set-aggregate-public-key" tx to verify it sets correctly + let mut rng = OsRng::default(); + let x = Scalar::random(&mut rng); + let random_key = Point::from(x); + + let tx_fee = 200; + let aggregate_public_key = Value::buff_from(random_key.compress().data.to_vec()) + .expect("Failed to serialize aggregate public key"); + let aggregate_tx = make_contract_call( + &submitter_sk, + 0, + tx_fee, + &boot_code_addr(false), + POX_4_NAME, + "set-aggregate-public-key", + &[Value::UInt(u128::from(reward_cycle)), aggregate_public_key], + ); + let aggregate_tx_hex = format!("0x{}", to_hex(&aggregate_tx)); + + // complete within 5 seconds or abort (we are only observing one block) + let completed = loop { + if Instant::now().duration_since(start) > Duration::from_secs(5) { + break false; + } + let latest_block = test_observer::get_blocks().pop(); + thread::sleep(Duration::from_secs(1)); + let Some(ref latest_block) = latest_block else { + info!("No block observed yet!"); + continue; + }; + let stacks_block_height = latest_block.get("block_height").unwrap().as_u64().unwrap(); + info!("Block height observed: {stacks_block_height}"); + + // Submit the aggregate tx for processing to update the aggregate public key + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + mempool + .submit_raw( + &mut chainstate, + &sortdb, + &tip.consensus_hash, + &tip.anchored_header.block_hash(), + aggregate_tx.clone(), + &ExecutionCost::max_value(), + &StacksEpochId::Epoch30, + ) + .unwrap(); + break true; + }; + + globals.signal_stop(); + + let aggregate_key = node_thread + .join() + .expect("Failed to join node thread to exit"); + + // Did we set and retrieve the aggregate key correctly? + assert_eq!(aggregate_key.unwrap(), random_key); + + let aggregate_tx_included = test_observer::get_blocks() + .into_iter() + .find(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&aggregate_tx_hex)) + .is_some() + }) + .is_some(); + + assert!( + aggregate_tx_included, + "Mockamoto node failed to include the aggregate tx" + ); + + assert!( + completed, + "Mockamoto node failed to produce and announce its block before timeout" + ); +} + #[test] fn mempool_rpc_submit() { let mut conf = Config::from_config_file(ConfigFile::mockamoto()).unwrap(); From 76eea0a0865e76ca52fd1d65669bedc86af5e49e Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 4 Dec 2023 16:59:00 -0500 Subject: [PATCH 18/41] CRC: check that the initial aggregate key was set correctly Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/mockamoto/tests.rs | 34 ++++++++++++++++------ 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/testnet/stacks-node/src/mockamoto/tests.rs b/testnet/stacks-node/src/mockamoto/tests.rs index a1f3785111..38e4976f14 100644 --- a/testnet/stacks-node/src/mockamoto/tests.rs +++ b/testnet/stacks-node/src/mockamoto/tests.rs @@ -147,6 +147,8 @@ fn observe_set_aggregate_tx() { }); let mut mockamoto = MockamotoNode::new(&conf).unwrap(); + // Get the aggregate public key of the original reward cycle to compare against + let orig_key = mockamoto.self_signer.aggregate_public_key; let globals = mockamoto.globals.clone(); @@ -163,7 +165,7 @@ fn observe_set_aggregate_tx() { let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(mockamoto.sortdb.conn()).unwrap(); let start = Instant::now(); - // Get a reward cycle to compare against + // Get the reward cycle of the sortition tip let reward_cycle = mockamoto .sortdb .pox_constants @@ -189,15 +191,25 @@ fn observe_set_aggregate_tx() { ) .unwrap() .unwrap(); - // Get the aggregate public key to later verify that it was set correctly - mockamoto + // Get the aggregate public key of the original reward cycle + let orig_aggregate_key = mockamoto .chainstate .get_aggregate_public_key_pox_4( &mockamoto.sortdb, &aggregate_key_block_header.index_block_hash(), reward_cycle, ) - .unwrap() + .unwrap(); + // Get the aggregate public key of the next reward cycle that we manually overwrote + let new_aggregate_key = mockamoto + .chainstate + .get_aggregate_public_key_pox_4( + &mockamoto.sortdb, + &aggregate_key_block_header.index_block_hash(), + reward_cycle + 1, + ) + .unwrap(); + (orig_aggregate_key, new_aggregate_key) }) .expect("FATAL: failed to start mockamoto main thread"); @@ -216,7 +228,10 @@ fn observe_set_aggregate_tx() { &boot_code_addr(false), POX_4_NAME, "set-aggregate-public-key", - &[Value::UInt(u128::from(reward_cycle)), aggregate_public_key], + &[ + Value::UInt(u128::from(reward_cycle + 1)), + aggregate_public_key, + ], ); let aggregate_tx_hex = format!("0x{}", to_hex(&aggregate_tx)); @@ -254,13 +269,10 @@ fn observe_set_aggregate_tx() { globals.signal_stop(); - let aggregate_key = node_thread + let (orig_aggregate_key, new_aggregate_key) = node_thread .join() .expect("Failed to join node thread to exit"); - // Did we set and retrieve the aggregate key correctly? - assert_eq!(aggregate_key.unwrap(), random_key); - let aggregate_tx_included = test_observer::get_blocks() .into_iter() .find(|block_json| { @@ -282,6 +294,10 @@ fn observe_set_aggregate_tx() { completed, "Mockamoto node failed to produce and announce its block before timeout" ); + + // Did we set and retrieve the aggregate key correctly? + assert_eq!(orig_aggregate_key.unwrap(), orig_key); + assert_eq!(new_aggregate_key.unwrap(), random_key); } #[test] From 96911d0b39e3dea15ca96528c66e72df6c8d04f9 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 11 Dec 2023 19:45:06 -0500 Subject: [PATCH 19/41] Make sure thread exits before looking for transfer tx Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/mockamoto/tests.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/mockamoto/tests.rs b/testnet/stacks-node/src/mockamoto/tests.rs index 38e4976f14..a93cd887a9 100644 --- a/testnet/stacks-node/src/mockamoto/tests.rs +++ b/testnet/stacks-node/src/mockamoto/tests.rs @@ -104,6 +104,10 @@ fn observe_100_blocks() { globals.signal_stop(); + node_thread + .join() + .expect("Failed to join node thread to exit"); + let transfer_tx_included = test_observer::get_blocks() .into_iter() .find(|block_json| { @@ -125,9 +129,6 @@ fn observe_100_blocks() { completed, "Mockamoto node failed to produce and announce 100 blocks before timeout" ); - node_thread - .join() - .expect("Failed to join node thread to exit"); } #[test] @@ -360,6 +361,10 @@ fn mempool_rpc_submit() { globals.signal_stop(); + node_thread + .join() + .expect("Failed to join node thread to exit"); + let transfer_tx_included = test_observer::get_blocks() .into_iter() .find(|block_json| { @@ -381,7 +386,4 @@ fn mempool_rpc_submit() { completed, "Mockamoto node failed to produce and announce 100 blocks before timeout" ); - node_thread - .join() - .expect("Failed to join node thread to exit"); } From dcd522fc813c19a1f903118c19905d320f2a8107 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 4 Dec 2023 16:21:33 -0600 Subject: [PATCH 20/41] feat: add nakamoto_node, nakamoto-neon mode * Refactor some of the reused structs from `neon_node` * Fix a logic-bug in `nakamoto::coordinator`: the first prepare phase information will be a Epoch2x block, so the reward set calculation has to handle that. * Add `nakamoto_node` module based on `neon_node` * Add simple integration test for `nakamoto_node` --- .../chainstate/nakamoto/coordinator/mod.rs | 40 +- stackslib/src/chainstate/nakamoto/miner.rs | 8 +- stackslib/src/chainstate/stacks/miner.rs | 17 +- .../burnchains/bitcoin_regtest_controller.rs | 9 +- testnet/stacks-node/src/config.rs | 17 +- testnet/stacks-node/src/globals.rs | 266 +++++ testnet/stacks-node/src/keychain.rs | 24 +- testnet/stacks-node/src/main.rs | 6 + testnet/stacks-node/src/mockamoto.rs | 8 +- testnet/stacks-node/src/nakamoto_node.rs | 683 +++++++++++ .../stacks-node/src/nakamoto_node/miner.rs | 645 +++++++++++ testnet/stacks-node/src/nakamoto_node/peer.rs | 418 +++++++ .../stacks-node/src/nakamoto_node/relayer.rs | 961 +++++++++++++++ testnet/stacks-node/src/neon_node.rs | 266 +---- testnet/stacks-node/src/run_loop/mod.rs | 1 + testnet/stacks-node/src/run_loop/nakamoto.rs | 1029 +++++++++++++++++ testnet/stacks-node/src/run_loop/neon.rs | 55 +- .../stacks-node/src/tests/bitcoin_regtest.rs | 1 + testnet/stacks-node/src/tests/mod.rs | 1 + .../src/tests/nakamoto_integrations.rs | 322 ++++++ .../src/tests/neon_integrations.rs | 4 +- 21 files changed, 4480 insertions(+), 301 deletions(-) create mode 100644 testnet/stacks-node/src/globals.rs create mode 100644 testnet/stacks-node/src/nakamoto_node.rs create mode 100644 testnet/stacks-node/src/nakamoto_node/miner.rs create mode 100644 testnet/stacks-node/src/nakamoto_node/peer.rs create mode 100644 testnet/stacks-node/src/nakamoto_node/relayer.rs create mode 100644 testnet/stacks-node/src/run_loop/nakamoto.rs create mode 100644 testnet/stacks-node/src/tests/nakamoto_integrations.rs diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 462662d4d9..6dde267bc2 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -169,7 +169,7 @@ pub fn get_nakamoto_reward_cycle_info( .epoch_id; assert!( - epoch_at_height >= StacksEpochId::Epoch30, + epoch_at_height >= StacksEpochId::Epoch25, "FATAL: called a nakamoto function outside of epoch 3" ); @@ -216,22 +216,40 @@ pub fn get_nakamoto_reward_cycle_info( } // find the first Stacks block processed in the prepare phase - let Some(prepare_start_block_header) = + let parent_block_id = if let Some(nakamoto_start_block) = NakamotoChainState::get_nakamoto_tenure_start_block_header( chain_state.db(), &sn.consensus_hash, + )? { + nakamoto_start_block + .anchored_header + .as_stacks_nakamoto() + // TODO: maybe `get_nakamoto_tenure_start_block_header` should + // return a type that doesn't require this unwrapping? + .expect("FATAL: queried non-Nakamoto tenure start header") + .parent_block_id + } else { + let Some(block_header) = + StacksChainState::get_stacks_block_header_info_by_consensus_hash( + chain_state.db(), + &sn.consensus_hash, + )? + else { + // no header for this snapshot (possibly invalid) + debug!("Failed to find block by consensus hash"; "consensus_hash" => %sn.consensus_hash); + continue; + }; + let Some(parent_block_id) = StacksChainState::get_parent_block_id( + chain_state.db(), + &block_header.index_block_hash(), )? - else { - // no header for this snapshot (possibly invalid) - continue; + else { + debug!("Failed to get parent block"; "block_id" => %block_header.index_block_hash()); + continue; + }; + parent_block_id }; - let parent_block_id = &prepare_start_block_header - .anchored_header - .as_stacks_nakamoto() - .expect("FATAL: queried non-Nakamoto tenure start header") - .parent_block_id; - // find the tenure-start block of the tenure of the parent of this Stacks block. // in epoch 2, this is the preceding anchor block // in nakamoto, this is the tenure-start block of the preceding tenure diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 82b6d34b93..1f75cd55ac 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -498,7 +498,7 @@ impl NakamotoBlockBuilder { state_root_hash ); - info!( + debug!( "Miner: mined Nakamoto block"; "consensus_hash" => %block.header.consensus_hash, "block_hash" => %block.header.block_hash(), @@ -570,13 +570,15 @@ impl NakamotoBlockBuilder { .block_limit() .expect("Failed to obtain block limit from miner's block connection"); + let initial_txs: Vec<_> = + [new_tenure_info.tenure_change_tx.cloned(), + new_tenure_info.coinbase_tx.cloned()].into_iter().filter_map(|x| x).collect(); let (blocked, tx_events) = match StacksBlockBuilder::select_and_apply_transactions( &mut tenure_tx, &mut builder, mempool, parent_stacks_header.stacks_block_height, - tenure_info.tenure_change_tx(), - tenure_info.coinbase_tx(), + &initial_txs, settings, event_observer, ASTRules::PrecheckSize, diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index a9cfacf929..3eb1ea36cc 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -2139,8 +2139,7 @@ impl StacksBlockBuilder { builder: &mut B, mempool: &mut MemPoolDB, tip_height: u64, - tenure_change_tx: Option<&StacksTransaction>, - coinbase_tx: Option<&StacksTransaction>, + initial_txs: &[StacksTransaction], settings: BlockBuilderSettings, event_observer: Option<&dyn MemPoolEventDispatcher>, ast_rules: ASTRules, @@ -2155,17 +2154,10 @@ impl StacksBlockBuilder { let mut tx_events = Vec::new(); - if let Some(tenure_tx) = tenure_change_tx { + for initial_tx in initial_txs.iter() { tx_events.push( builder - .try_mine_tx(epoch_tx, tenure_tx, ast_rules.clone())? - .convert_to_event(), - ); - } - if let Some(coinbase_tx) = coinbase_tx { - tx_events.push( - builder - .try_mine_tx(epoch_tx, coinbase_tx, ast_rules.clone())? + .try_mine_tx(epoch_tx, initial_tx, ast_rules.clone())? .convert_to_event(), ); } @@ -2442,8 +2434,7 @@ impl StacksBlockBuilder { &mut builder, mempool, parent_stacks_header.stacks_block_height, - None, - Some(coinbase_tx), + &[coinbase_tx.clone()], settings, event_observer, ast_rules, diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index d70fca1c02..ad83dd6f57 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -8,7 +8,7 @@ use async_h1::client; use async_std::io::ReadExt; use async_std::net::TcpStream; use base64::encode; -use clarity::vm::types::PrincipalData; + use http_types::{Method, Request, Url}; use serde::Serialize; use serde_json::json; @@ -50,11 +50,16 @@ use stacks_common::deps_common::bitcoin::network::encodable::ConsensusEncodable; use stacks_common::deps_common::bitcoin::network::serialize::deserialize as btc_deserialize; use stacks_common::deps_common::bitcoin::network::serialize::RawEncoder; use stacks_common::deps_common::bitcoin::util::hash::Sha256dHash; -use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksAddress}; +use stacks_common::types::chainstate::BurnchainHeaderHash; use stacks_common::util::hash::{hex_bytes, Hash160}; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; +#[cfg(test)] +use clarity::vm::types::PrincipalData; +#[cfg(test)] +use stacks_common::types::chainstate::StacksAddress; + use super::super::operations::BurnchainOpSigner; use super::super::Config; use super::{BurnchainController, BurnchainTip, Error as BurnchainControllerError}; diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index f634f526c8..feaa0208ac 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -35,6 +35,8 @@ use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::hex_bytes; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use crate::mockamoto::signer::SelfSigner; + pub const DEFAULT_SATS_PER_VB: u64 = 50; const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; @@ -491,6 +493,13 @@ lazy_static! { } impl Config { + pub fn self_signing(&self) -> Option { + if !(self.burnchain.mode == "nakamoto-neon" || self.burnchain.mode == "mockamoto") { + return None; + } + self.miner.self_signing_key.clone() + } + /// get the up-to-date burnchain from the config pub fn get_burnchain_config(&self) -> Result { if let Some(path) = &self.config_path { @@ -1095,6 +1104,7 @@ impl Config { .as_ref() .map(|x| Secp256k1PrivateKey::from_hex(x)) .transpose()?, + self_signing_key: None, }, None => miner_default_config, }; @@ -1108,6 +1118,7 @@ impl Config { "xenon", "mainnet", "mockamoto", + "nakamoto-neon", ]; if !supported_modes.contains(&burnchain.mode.as_str()) { @@ -1629,10 +1640,10 @@ impl BurnchainConfig { match self.mode.as_str() { "mainnet" => ("mainnet".to_string(), BitcoinNetworkType::Mainnet), "xenon" => ("testnet".to_string(), BitcoinNetworkType::Testnet), - "helium" | "neon" | "argon" | "krypton" | "mocknet" | "mockamoto" => { + "helium" | "neon" | "argon" | "krypton" | "mocknet" | "mockamoto" | "nakamoto-neon" => { ("regtest".to_string(), BitcoinNetworkType::Regtest) } - _ => panic!("Invalid bitcoin mode -- expected mainnet, testnet, or regtest"), + other => panic!("Invalid stacks-node mode: {other}"), } } } @@ -2116,6 +2127,7 @@ pub struct MinerConfig { pub candidate_retry_cache_size: u64, pub unprocessed_block_deadline_secs: u64, pub mining_key: Option, + pub self_signing_key: Option, } impl MinerConfig { @@ -2133,6 +2145,7 @@ impl MinerConfig { candidate_retry_cache_size: 10_000, unprocessed_block_deadline_secs: 30, mining_key: None, + self_signing_key: None, } } } diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs new file mode 100644 index 0000000000..acace012f8 --- /dev/null +++ b/testnet/stacks-node/src/globals.rs @@ -0,0 +1,266 @@ +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering; +use std::sync::mpsc::SyncSender; +use std::sync::Arc; +use std::sync::Mutex; + +use stacks::burnchains::Txid; +use stacks::chainstate::burn::operations::LeaderKeyRegisterOp; +use stacks::chainstate::burn::BlockSnapshot; +use stacks::chainstate::coordinator::comm::CoordinatorChannels; +use stacks::chainstate::stacks::db::unconfirmed::UnconfirmedTxMap; +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::chainstate::stacks::miner::MinerStatus; +use stacks::net::NetworkResult; +use stacks_common::types::chainstate::BlockHeaderHash; +use stacks_common::types::chainstate::BurnchainHeaderHash; +use stacks_common::types::chainstate::ConsensusHash; + +use crate::neon::Counters; +use crate::run_loop::RegisteredKey; +use crate::syncctl::PoxSyncWatchdogComms; + +use crate::neon_node::LeaderKeyRegistrationState; + +/// Command types for the relayer thread, issued to it by other threads +pub enum RelayerDirective { + /// Handle some new data that arrived on the network (such as blocks, transactions, and + HandleNetResult(NetworkResult), + /// Announce a new sortition. Process and broadcast the block if we won. + ProcessTenure(ConsensusHash, BurnchainHeaderHash, BlockHeaderHash), + /// Try to mine a block + RunTenure(RegisteredKey, BlockSnapshot, u128), // (vrf key, chain tip, time of issuance in ms) + /// A nakamoto tenure's first block has been processed. + NakamotoTenureStartProcessed(ConsensusHash, BlockHeaderHash), + /// Try to register a VRF public key + RegisterKey(BlockSnapshot), + /// Stop the relayer thread + Exit, +} + +/// Inter-thread communication structure, shared between threads +#[derive(Clone)] +pub struct Globals { + /// Last sortition processed + last_sortition: Arc>>, + /// Status of the miner + miner_status: Arc>, + /// Communication link to the coordinator thread + pub(crate) coord_comms: CoordinatorChannels, + /// Unconfirmed transactions (shared between the relayer and p2p threads) + unconfirmed_txs: Arc>, + /// Writer endpoint to the relayer thread + pub relay_send: SyncSender, + /// Cointer state in the main thread + pub counters: Counters, + /// Connection to the PoX sync watchdog + pub sync_comms: PoxSyncWatchdogComms, + /// Global flag to see if we should keep running + pub should_keep_running: Arc, + /// Status of our VRF key registration state (shared between the main thread and the relayer) + leader_key_registration_state: Arc>, +} + +impl Globals { + pub fn new( + coord_comms: CoordinatorChannels, + miner_status: Arc>, + relay_send: SyncSender, + counters: Counters, + sync_comms: PoxSyncWatchdogComms, + should_keep_running: Arc, + ) -> Globals { + Globals { + last_sortition: Arc::new(Mutex::new(None)), + miner_status, + coord_comms, + unconfirmed_txs: Arc::new(Mutex::new(UnconfirmedTxMap::new())), + relay_send, + counters, + sync_comms, + should_keep_running, + leader_key_registration_state: Arc::new(Mutex::new( + LeaderKeyRegistrationState::Inactive, + )), + } + } + + /// Get the last sortition processed by the relayer thread + pub fn get_last_sortition(&self) -> Option { + self.last_sortition + .lock() + .unwrap_or_else(|_| { + error!("Sortition mutex poisoned!"); + panic!(); + }) + .clone() + } + + /// Set the last sortition processed + pub fn set_last_sortition(&self, block_snapshot: BlockSnapshot) { + let mut last_sortition = self.last_sortition.lock().unwrap_or_else(|_| { + error!("Sortition mutex poisoned!"); + panic!(); + }); + last_sortition.replace(block_snapshot); + } + + /// Get the status of the miner (blocked or ready) + pub fn get_miner_status(&self) -> Arc> { + self.miner_status.clone() + } + + pub fn block_miner(&self) { + self.miner_status + .lock() + .expect("FATAL: mutex poisoned") + .add_blocked() + } + + pub fn unblock_miner(&self) { + self.miner_status + .lock() + .expect("FATAL: mutex poisoned") + .remove_blocked() + } + + /// Get the main thread's counters + pub fn get_counters(&self) -> Counters { + self.counters.clone() + } + + /// Called by the relayer to pass unconfirmed txs to the p2p thread, so the p2p thread doesn't + /// need to do the disk I/O needed to instantiate the unconfirmed state trie they represent. + /// Clears the unconfirmed transactions, and replaces them with the chainstate's. + pub fn send_unconfirmed_txs(&self, chainstate: &StacksChainState) { + let Some(ref unconfirmed) = chainstate.unconfirmed_state else { + return; + }; + let mut txs = self.unconfirmed_txs.lock().unwrap_or_else(|e| { + // can only happen due to a thread panic in the relayer + error!("FATAL: unconfirmed tx arc mutex is poisoned: {e:?}"); + panic!(); + }); + txs.clear(); + txs.extend(unconfirmed.mined_txs.clone()); + } + + /// Called by the p2p thread to accept the unconfirmed tx state processed by the relayer. + /// Puts the shared unconfirmed transactions to chainstate. + pub fn recv_unconfirmed_txs(&self, chainstate: &mut StacksChainState) { + let Some(ref mut unconfirmed) = chainstate.unconfirmed_state else { + return; + }; + let txs = self.unconfirmed_txs.lock().unwrap_or_else(|e| { + // can only happen due to a thread panic in the relayer + error!("FATAL: unconfirmed tx arc mutex is poisoned: {e:?}"); + panic!(); + }); + unconfirmed.mined_txs.clear(); + unconfirmed.mined_txs.extend(txs.clone()); + } + + /// Signal system-wide stop + pub fn signal_stop(&self) { + self.should_keep_running.store(false, Ordering::SeqCst); + } + + /// Should we keep running? + pub fn keep_running(&self) -> bool { + self.should_keep_running.load(Ordering::SeqCst) + } + + /// Get the handle to the coordinator + pub fn coord(&self) -> &CoordinatorChannels { + &self.coord_comms + } + + /// Get the current leader key registration state. + /// Called from the runloop thread and relayer thread. + pub fn get_leader_key_registration_state(&self) -> LeaderKeyRegistrationState { + let key_state = self + .leader_key_registration_state + .lock() + .unwrap_or_else(|e| { + // can only happen due to a thread panic in the relayer + error!("FATAL: leader key registration mutex is poisoned: {e:?}"); + panic!(); + }); + key_state.clone() + } + + /// Set the initial leader key registration state. + /// Called from the runloop thread when booting up. + pub fn set_initial_leader_key_registration_state(&self, new_state: LeaderKeyRegistrationState) { + let mut key_state = self + .leader_key_registration_state + .lock() + .unwrap_or_else(|e| { + // can only happen due to a thread panic in the relayer + error!("FATAL: leader key registration mutex is poisoned: {e:?}"); + panic!(); + }); + *key_state = new_state; + } + + /// Advance the leader key registration state to pending, given a txid we just sent. + /// Only the relayer thread calls this. + pub fn set_pending_leader_key_registration(&self, target_block_height: u64, txid: Txid) { + let mut key_state = self + .leader_key_registration_state + .lock() + .unwrap_or_else(|_e| { + error!("FATAL: failed to lock leader key registration state mutex"); + panic!(); + }); + *key_state = LeaderKeyRegistrationState::Pending(target_block_height, txid); + } + + /// Advance the leader key registration state to active, given the VRF key registration ops + /// we've discovered in a given snapshot. + /// The runloop thread calls this whenever it processes a sortition. + pub fn try_activate_leader_key_registration( + &self, + burn_block_height: u64, + key_registers: Vec, + ) -> bool { + let mut activated = false; + let mut key_state = self + .leader_key_registration_state + .lock() + .unwrap_or_else(|e| { + // can only happen due to a thread panic in the relayer + error!("FATAL: leader key registration mutex is poisoned: {e:?}"); + panic!(); + }); + // if key_state is anything but pending, then we don't activate + let LeaderKeyRegistrationState::Pending(target_block_height, txid) = *key_state else { + return false; + }; + for op in key_registers.into_iter() { + info!( + "Processing burnchain block with key_register_op"; + "burn_block_height" => burn_block_height, + "txid" => %op.txid, + "checking_txid" => %txid, + ); + + if txid == op.txid { + *key_state = LeaderKeyRegistrationState::Active(RegisteredKey { + target_block_height, + vrf_public_key: op.public_key, + block_height: u64::from(op.block_height), + op_vtxindex: u32::from(op.vtxindex), + }); + activated = true; + } else { + debug!( + "key_register_op {} does not match our pending op {}", + txid, &op.txid + ); + } + } + + activated + } +} diff --git a/testnet/stacks-node/src/keychain.rs b/testnet/stacks-node/src/keychain.rs index 7ea3b90556..712fa0b662 100644 --- a/testnet/stacks-node/src/keychain.rs +++ b/testnet/stacks-node/src/keychain.rs @@ -7,7 +7,7 @@ use stacks_common::address::{ }; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::hash::{Hash160, Sha256Sum}; -use stacks_common::util::secp256k1::Secp256k1PublicKey; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::vrf::{VRFPrivateKey, VRFProof, VRFPublicKey, VRF}; use super::operations::BurnchainOpSigner; @@ -16,6 +16,7 @@ use super::operations::BurnchainOpSigner; #[derive(Clone)] pub struct Keychain { secret_state: Vec, + nakamoto_mining_key: Secp256k1PrivateKey, } impl Keychain { @@ -44,10 +45,27 @@ impl Keychain { StacksPrivateKey::from_slice(&sk_bytes[..]).expect("FATAL: Keychain::make_secret_key_bytes() returned bytes that could not be parsed into a secp256k1 secret key!") } - /// Create a default keychain from the seed + /// Get the public key hash of the nakamoto mining key (i.e., Hash160(pubkey)) + pub fn get_nakamoto_pkh(&self) -> Hash160 { + let pk = Secp256k1PublicKey::from_private(&self.nakamoto_mining_key); + Hash160::from_node_public_key(&pk) + } + + /// Get the secrete key of the nakamoto mining key + pub fn get_nakamoto_sk(&self) -> &Secp256k1PrivateKey { + &self.nakamoto_mining_key + } + + /// Create a default keychain from the seed, with a default nakamoto mining key derived + /// from the same seed ( pub fn default(seed: Vec) -> Keychain { + let secret_state = Self::make_secret_key_bytes(&seed); + // re-hash secret_state to use as a default seed for the nakamoto mining key + let nakamoto_mining_key = + Secp256k1PrivateKey::from_seed(Sha256Sum::from_data(&secret_state).as_bytes()); Keychain { - secret_state: Keychain::make_secret_key_bytes(&seed), + secret_state, + nakamoto_mining_key, } } diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index 6addce37a1..8675b43132 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -19,8 +19,10 @@ pub mod burnchains; pub mod config; pub mod event_dispatcher; pub mod genesis_data; +pub mod globals; pub mod keychain; pub mod mockamoto; +pub mod nakamoto_node; pub mod neon_node; pub mod node; pub mod operations; @@ -44,6 +46,7 @@ pub use self::node::{ChainTip, Node}; pub use self::run_loop::{helium, neon}; pub use self::tenure::Tenure; use crate::mockamoto::MockamotoNode; +use crate::run_loop::nakamoto; fn main() { panic::set_hook(Box::new(|panic_info| { @@ -209,6 +212,9 @@ fn main() { } else if conf.burnchain.mode == "mockamoto" { let mut mockamoto = MockamotoNode::new(&conf).unwrap(); mockamoto.run(); + } else if conf.burnchain.mode == "nakamoto-neon" { + let mut run_loop = nakamoto::RunLoop::new(conf); + run_loop.start(None, 0); } else { println!("Burnchain mode '{}' not supported", conf.burnchain.mode); } diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs index 6c5e7ca878..78bc2ae491 100644 --- a/testnet/stacks-node/src/mockamoto.rs +++ b/testnet/stacks-node/src/mockamoto.rs @@ -69,10 +69,9 @@ use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::util::vrf::{VRFPrivateKey, VRFProof, VRFPublicKey, VRF}; use self::signer::SelfSigner; +use crate::globals::{Globals, RelayerDirective}; use crate::neon::Counters; -use crate::neon_node::{ - Globals, PeerThread, RelayerDirective, StacksNode, BLOCK_PROCESSOR_STACK_SIZE, -}; +use crate::neon_node::{PeerThread, StacksNode, BLOCK_PROCESSOR_STACK_SIZE}; use crate::syncctl::PoxSyncWatchdogComms; use crate::{Config, EventDispatcher}; @@ -891,8 +890,7 @@ impl MockamotoNode { &mut builder, &mut self.mempool, parent_chain_length, - None, - None, + &[], BlockBuilderSettings { max_miner_time_ms: 15_000, mempool_settings: MemPoolWalkSettings::default(), diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs new file mode 100644 index 0000000000..1c71b09045 --- /dev/null +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -0,0 +1,683 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::collections::HashMap; +use std::convert::TryFrom; +use std::net::SocketAddr; +use std::sync::mpsc::Receiver; +use std::thread; +use std::thread::JoinHandle; + +use super::{Config, EventDispatcher, Keychain}; +use crate::burnchains::bitcoin_regtest_controller::addr2str; +use crate::globals::Globals; +use crate::globals::RelayerDirective; +use crate::neon_node::LeaderKeyRegistrationState; +use crate::run_loop::nakamoto::RunLoop; +use crate::run_loop::RegisteredKey; +use clarity::vm::ast::ASTRules; +use clarity::vm::types::QualifiedContractIdentifier; +use stacks::burnchains::{Burnchain, BurnchainSigner, Txid}; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::BlockSnapshot; +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::chainstate::stacks::Error as ChainstateError; +use stacks::core::mempool::MemPoolDB; +use stacks::cost_estimates::metrics::UnitMetric; +use stacks::cost_estimates::UnitEstimator; +use stacks::monitoring; +use stacks::monitoring::update_active_miners_count_gauge; +use stacks::net::atlas::{AtlasConfig, AtlasDB}; +use stacks::net::db::PeerDB; +use stacks::net::p2p::PeerNetwork; +use stacks::net::relay::Relayer; +use stacks::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBs}; +use stacks::net::{Error as NetError, PeerNetworkComms, ServiceFlags}; +use stacks::util_lib::strings::{UrlString, VecDisplay}; +use stacks_common::types::chainstate::SortitionId; +use stacks_common::types::net::PeerAddress; +use stacks_common::types::StacksEpochId; +use stacks_common::util::get_epoch_time_secs; +use stacks_common::util::secp256k1::Secp256k1PrivateKey; + +pub mod miner; +pub mod peer; +pub mod relayer; + +use self::peer::PeerThread; +use self::relayer::RelayerThread; + +pub const RELAYER_MAX_BUFFER: usize = 100; +const VRF_MOCK_MINER_KEY: u64 = 1; + +pub const BLOCK_PROCESSOR_STACK_SIZE: usize = 32 * 1024 * 1024; // 32 MB + +pub type BlockCommits = HashMap; + +/// Node implementation for both miners and followers. +/// This struct is used to set up the node proper and launch the p2p thread and relayer thread. +/// It is further used by the main thread to communicate with these two threads. +pub struct StacksNode { + /// Atlas network configuration + pub atlas_config: AtlasConfig, + /// Global inter-thread communication handle + pub globals: Globals, + /// True if we're a miner + is_miner: bool, + /// handle to the p2p thread + pub p2p_thread_handle: JoinHandle<()>, + /// handle to the relayer thread + pub relayer_thread_handle: JoinHandle<()>, +} + +/// Fault injection logic to artificially increase the length of a tenure. +/// Only used in testing +#[cfg(test)] +fn fault_injection_long_tenure() { + // simulated slow block + match std::env::var("STX_TEST_SLOW_TENURE") { + Ok(tenure_str) => match tenure_str.parse::() { + Ok(tenure_time) => { + info!( + "Fault injection: sleeping for {} milliseconds to simulate a long tenure", + tenure_time + ); + stacks_common::util::sleep_ms(tenure_time); + } + Err(_) => { + error!("Parse error for STX_TEST_SLOW_TENURE"); + panic!(); + } + }, + _ => {} + } +} + +#[cfg(not(test))] +fn fault_injection_long_tenure() {} + +/// Fault injection to skip mining in this bitcoin block height +/// Only used in testing +#[cfg(test)] +fn fault_injection_skip_mining(rpc_bind: &str, target_burn_height: u64) -> bool { + match std::env::var("STACKS_DISABLE_MINER") { + Ok(disable_heights) => { + let disable_schedule: serde_json::Value = + serde_json::from_str(&disable_heights).unwrap(); + let disable_schedule = disable_schedule.as_array().unwrap(); + for disabled in disable_schedule { + let target_miner_rpc_bind = disabled + .get("rpc_bind") + .unwrap() + .as_str() + .unwrap() + .to_string(); + if target_miner_rpc_bind != rpc_bind { + continue; + } + let target_block_heights = disabled.get("blocks").unwrap().as_array().unwrap(); + for target_block_value in target_block_heights { + let target_block = target_block_value.as_i64().unwrap() as u64; + if target_block == target_burn_height { + return true; + } + } + } + return false; + } + Err(_) => { + return false; + } + } +} + +#[cfg(not(test))] +fn fault_injection_skip_mining(_rpc_bind: &str, _target_burn_height: u64) -> bool { + false +} + +/// Open the chainstate, and inject faults from the config file +pub(crate) fn open_chainstate_with_faults( + config: &Config, +) -> Result { + let stacks_chainstate_path = config.get_chainstate_path_str(); + let (mut chainstate, _) = StacksChainState::open( + config.is_mainnet(), + config.burnchain.chain_id, + &stacks_chainstate_path, + Some(config.node.get_marf_opts()), + )?; + + chainstate.fault_injection.hide_blocks = config.node.fault_injection_hide_blocks; + Ok(chainstate) +} + +/// Types of errors that can arise during mining +#[derive(Debug)] +enum Error { + /// Can't find the block sortition snapshot for the chain tip + SnapshotNotFoundForChainTip, + /// The burnchain tip changed while this operation was in progress + BurnchainTipChanged, + SpawnError(std::io::Error), + FaultInjection, + MissedMiningOpportunity, + /// Attempted to mine while there was no active VRF key + NoVRFKeyActive, + /// The parent block or tenure could not be found + ParentNotFound, + /// Something unexpected happened (e.g., hash mismatches) + UnexpectedChainState, + /// A burnchain operation failed when submitting it to the burnchain + BurnchainSubmissionFailed, + NewParentDiscovered, +} + +impl StacksNode { + /// Set up the AST size-precheck height, if configured + fn setup_ast_size_precheck(config: &Config, sortdb: &mut SortitionDB) { + if let Some(ast_precheck_size_height) = config.burnchain.ast_precheck_size_height { + info!( + "Override burnchain height of {:?} to {}", + ASTRules::PrecheckSize, + ast_precheck_size_height + ); + let mut tx = sortdb + .tx_begin() + .expect("FATAL: failed to begin tx on sortition DB"); + SortitionDB::override_ast_rule_height( + &mut tx, + ASTRules::PrecheckSize, + ast_precheck_size_height, + ) + .expect("FATAL: failed to override AST PrecheckSize rule height"); + tx.commit() + .expect("FATAL: failed to commit sortition DB transaction"); + } + } + + /// Set up the mempool DB by making sure it exists. + /// Panics on failure. + fn setup_mempool_db(config: &Config) -> MemPoolDB { + // force early mempool instantiation + let cost_estimator = config + .make_cost_estimator() + .unwrap_or_else(|| Box::new(UnitEstimator)); + let metric = config + .make_cost_metric() + .unwrap_or_else(|| Box::new(UnitMetric)); + + let mempool = MemPoolDB::open( + config.is_mainnet(), + config.burnchain.chain_id, + &config.get_chainstate_path_str(), + cost_estimator, + metric, + ) + .expect("BUG: failed to instantiate mempool"); + + mempool + } + + /// Set up the Peer DB and update any soft state from the config file. This includes: + /// * blacklisted/whitelisted nodes + /// * node keys + /// * bootstrap nodes + /// Returns the instantiated PeerDB + /// Panics on failure. + fn setup_peer_db( + config: &Config, + burnchain: &Burnchain, + stackerdb_contract_ids: &[QualifiedContractIdentifier], + ) -> PeerDB { + let data_url = UrlString::try_from(format!("{}", &config.node.data_url)).unwrap(); + let initial_neighbors = config.node.bootstrap_node.clone(); + if initial_neighbors.len() > 0 { + info!( + "Will bootstrap from peers {}", + VecDisplay(&initial_neighbors) + ); + } else { + warn!("Without a peer to bootstrap from, the node will start mining a new chain"); + } + + let p2p_sock: SocketAddr = config.node.p2p_bind.parse().expect(&format!( + "Failed to parse socket: {}", + &config.node.p2p_bind + )); + let p2p_addr: SocketAddr = config.node.p2p_address.parse().expect(&format!( + "Failed to parse socket: {}", + &config.node.p2p_address + )); + let node_privkey = Secp256k1PrivateKey::from_seed(&config.node.local_peer_seed); + + let mut peerdb = PeerDB::connect( + &config.get_peer_db_file_path(), + true, + config.burnchain.chain_id, + burnchain.network_id, + Some(node_privkey), + config.connection_options.private_key_lifetime.clone(), + PeerAddress::from_socketaddr(&p2p_addr), + p2p_sock.port(), + data_url, + &[], + Some(&initial_neighbors), + stackerdb_contract_ids, + ) + .map_err(|e| { + eprintln!( + "Failed to open {}: {:?}", + &config.get_peer_db_file_path(), + &e + ); + panic!(); + }) + .unwrap(); + + // allow all bootstrap nodes + { + let mut tx = peerdb.tx_begin().unwrap(); + for initial_neighbor in initial_neighbors.iter() { + // update peer in case public key changed + PeerDB::update_peer(&mut tx, &initial_neighbor).unwrap(); + PeerDB::set_allow_peer( + &mut tx, + initial_neighbor.addr.network_id, + &initial_neighbor.addr.addrbytes, + initial_neighbor.addr.port, + -1, + ) + .unwrap(); + } + tx.commit().unwrap(); + } + + if !config.node.deny_nodes.is_empty() { + warn!("Will ignore nodes {:?}", &config.node.deny_nodes); + } + + // deny all config-denied peers + { + let mut tx = peerdb.tx_begin().unwrap(); + for denied in config.node.deny_nodes.iter() { + PeerDB::set_deny_peer( + &mut tx, + denied.addr.network_id, + &denied.addr.addrbytes, + denied.addr.port, + get_epoch_time_secs() + 24 * 365 * 3600, + ) + .unwrap(); + } + tx.commit().unwrap(); + } + + // update services to indicate we can support mempool sync + { + let mut tx = peerdb.tx_begin().unwrap(); + PeerDB::set_local_services( + &mut tx, + (ServiceFlags::RPC as u16) | (ServiceFlags::RELAY as u16), + ) + .unwrap(); + tx.commit().unwrap(); + } + + peerdb + } + + /// Set up the PeerNetwork, but do not bind it. + pub fn setup_peer_network( + config: &Config, + atlas_config: &AtlasConfig, + burnchain: Burnchain, + ) -> PeerNetwork { + let sortdb = SortitionDB::open( + &config.get_burn_db_file_path(), + true, + burnchain.pox_constants.clone(), + ) + .expect("Error while instantiating sor/tition db"); + + let epochs = SortitionDB::get_stacks_epochs(sortdb.conn()) + .expect("Error while loading stacks epochs"); + + let view = { + let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) + .expect("Failed to get sortition tip"); + SortitionDB::get_burnchain_view(&sortdb.index_conn(), &burnchain, &sortition_tip) + .unwrap() + }; + + let atlasdb = + AtlasDB::connect(atlas_config.clone(), &config.get_atlas_db_file_path(), true).unwrap(); + + let stackerdbs = StackerDBs::connect(&config.get_stacker_db_file_path(), true).unwrap(); + + let mut chainstate = + open_chainstate_with_faults(config).expect("FATAL: could not open chainstate DB"); + + let mut stackerdb_machines = HashMap::new(); + for stackerdb_contract_id in config.node.stacker_dbs.iter() { + // attempt to load the config + let (instantiate, stacker_db_config) = match StackerDBConfig::from_smart_contract( + &mut chainstate, + &sortdb, + stackerdb_contract_id, + ) { + Ok(c) => (true, c), + Err(e) => { + warn!( + "Failed to load StackerDB config for {}: {:?}", + stackerdb_contract_id, &e + ); + (false, StackerDBConfig::noop()) + } + }; + let mut stackerdbs = + StackerDBs::connect(&config.get_stacker_db_file_path(), true).unwrap(); + + if instantiate { + match stackerdbs.get_stackerdb_id(stackerdb_contract_id) { + Ok(..) => { + // reconfigure + let tx = stackerdbs.tx_begin(stacker_db_config.clone()).unwrap(); + tx.reconfigure_stackerdb(stackerdb_contract_id, &stacker_db_config.signers) + .expect(&format!( + "FATAL: failed to reconfigure StackerDB replica {}", + stackerdb_contract_id + )); + tx.commit().unwrap(); + } + Err(NetError::NoSuchStackerDB(..)) => { + // instantiate replica + let tx = stackerdbs.tx_begin(stacker_db_config.clone()).unwrap(); + tx.create_stackerdb(stackerdb_contract_id, &stacker_db_config.signers) + .expect(&format!( + "FATAL: failed to instantiate StackerDB replica {}", + stackerdb_contract_id + )); + tx.commit().unwrap(); + } + Err(e) => { + panic!("FATAL: failed to query StackerDB state: {:?}", &e); + } + } + } + let stacker_db_sync = match StackerDBSync::new( + stackerdb_contract_id.clone(), + &stacker_db_config, + PeerNetworkComms::new(), + stackerdbs, + ) { + Ok(s) => s, + Err(e) => { + warn!( + "Failed to instantiate StackerDB sync machine for {}: {:?}", + stackerdb_contract_id, &e + ); + continue; + } + }; + + stackerdb_machines.insert( + stackerdb_contract_id.clone(), + (stacker_db_config, stacker_db_sync), + ); + } + + let stackerdb_contract_ids: Vec<_> = + stackerdb_machines.keys().map(|sc| sc.clone()).collect(); + let peerdb = Self::setup_peer_db(config, &burnchain, &stackerdb_contract_ids); + + let local_peer = match PeerDB::get_local_peer(peerdb.conn()) { + Ok(local_peer) => local_peer, + _ => panic!("Unable to retrieve local peer"), + }; + + let p2p_net = PeerNetwork::new( + peerdb, + atlasdb, + stackerdbs, + local_peer, + config.burnchain.peer_version, + burnchain, + view, + config.connection_options.clone(), + stackerdb_machines, + epochs, + ); + + p2p_net + } + + /// This function sets the global var `GLOBAL_BURNCHAIN_SIGNER`. + /// + /// This variable is used for prometheus monitoring (which only + /// runs when the feature flag `monitoring_prom` is activated). + /// The address is set using the single-signature BTC address + /// associated with `keychain`'s public key. This address always + /// assumes Epoch-2.1 rules for the miner address: if the + /// node is configured for segwit, then the miner address generated + /// is a segwit address, otherwise it is a p2pkh. + /// + fn set_monitoring_miner_address(keychain: &Keychain, relayer_thread: &RelayerThread) { + let public_key = keychain.get_pub_key(); + let miner_addr = relayer_thread + .bitcoin_controller + .get_miner_address(StacksEpochId::Epoch21, &public_key); + let miner_addr_str = addr2str(&miner_addr); + let _ = monitoring::set_burnchain_signer(BurnchainSigner(miner_addr_str)).map_err(|e| { + warn!("Failed to set global burnchain signer: {:?}", &e); + e + }); + } + + pub fn spawn( + runloop: &RunLoop, + globals: Globals, + // relay receiver endpoint for the p2p thread, so the relayer can feed it data to push + relay_recv: Receiver, + ) -> StacksNode { + let config = runloop.config().clone(); + let is_miner = runloop.is_miner(); + let burnchain = runloop.get_burnchain(); + let atlas_config = config.atlas.clone(); + let keychain = Keychain::default(config.node.seed.clone()); + + // we can call _open_ here rather than _connect_, since connect is first called in + // make_genesis_block + let mut sortdb = SortitionDB::open( + &config.get_burn_db_file_path(), + true, + burnchain.pox_constants.clone(), + ) + .expect("Error while instantiating sortition db"); + + Self::setup_ast_size_precheck(&config, &mut sortdb); + + let _ = Self::setup_mempool_db(&config); + + let mut p2p_net = Self::setup_peer_network(&config, &atlas_config, burnchain.clone()); + + let stackerdbs = StackerDBs::connect(&config.get_stacker_db_file_path(), true) + .expect("FATAL: failed to connect to stacker DB"); + + let relayer = Relayer::from_p2p(&mut p2p_net, stackerdbs); + + let local_peer = p2p_net.local_peer.clone(); + + // setup initial key registration + let leader_key_registration_state = if config.node.mock_mining { + // mock mining, pretend to have a registered key + let (vrf_public_key, _) = keychain.make_vrf_keypair(VRF_MOCK_MINER_KEY); + LeaderKeyRegistrationState::Active(RegisteredKey { + target_block_height: VRF_MOCK_MINER_KEY, + block_height: 1, + op_vtxindex: 1, + vrf_public_key, + }) + } else { + LeaderKeyRegistrationState::Inactive + }; + globals.set_initial_leader_key_registration_state(leader_key_registration_state); + + let relayer_thread = RelayerThread::new(runloop, local_peer.clone(), relayer); + + StacksNode::set_monitoring_miner_address(&keychain, &relayer_thread); + + let relayer_thread_handle = thread::Builder::new() + .name(format!("relayer-{}", &local_peer.data_url)) + .stack_size(BLOCK_PROCESSOR_STACK_SIZE) + .spawn(move || { + relayer_thread.main(relay_recv); + }) + .expect("FATAL: failed to start relayer thread"); + + let p2p_event_dispatcher = runloop.get_event_dispatcher(); + let p2p_thread = PeerThread::new(runloop, p2p_net); + let p2p_thread_handle = thread::Builder::new() + .stack_size(BLOCK_PROCESSOR_STACK_SIZE) + .name(format!( + "p2p-({},{})", + &config.node.p2p_bind, &config.node.rpc_bind + )) + .spawn(move || { + p2p_thread.main(p2p_event_dispatcher); + }) + .expect("FATAL: failed to start p2p thread"); + + info!("Start HTTP server on: {}", &config.node.rpc_bind); + info!("Start P2P server on: {}", &config.node.p2p_bind); + + StacksNode { + atlas_config, + globals, + is_miner, + p2p_thread_handle, + relayer_thread_handle, + } + } + + /// Notify the relayer that a new burn block has been processed by the sortition db, + /// telling it to process the block and begin mining if this miner won. + /// returns _false_ if the relayer hung up the channel. + /// Called from the main thread. + pub fn relayer_burnchain_notify(&self) -> bool { + if !self.is_miner { + // node is a follower, don't try to process my own tenure. + return true; + } + + let Some(snapshot) = self.globals.get_last_sortition() else { + debug!("Tenure: Notify sortition! No last burn block"); + return true; + }; + + debug!( + "Tenure: Notify sortition!"; + "consensus_hash" => %snapshot.consensus_hash, + "burn_block_hash" => %snapshot.burn_header_hash, + "winning_stacks_block_hash" => %snapshot.winning_stacks_block_hash, + "burn_block_height" => &snapshot.block_height, + "sortition_id" => %snapshot.sortition_id + ); + + // unlike in neon_node, the nakamoto node should *always* notify the relayer of + // a new burnchain block + + return self + .globals + .relay_send + .send(RelayerDirective::ProcessTenure( + snapshot.consensus_hash.clone(), + snapshot.parent_burn_header_hash.clone(), + snapshot.winning_stacks_block_hash.clone(), + )) + .is_ok(); + } + + /// Process a state coming from the burnchain, by extracting the validated KeyRegisterOp + /// and inspecting if a sortition was won. + /// `ibd`: boolean indicating whether or not we are in the initial block download + /// Called from the main thread. + pub fn process_burnchain_state( + &mut self, + sortdb: &SortitionDB, + sort_id: &SortitionId, + ibd: bool, + ) -> Option { + let mut last_sortitioned_block = None; + + let ic = sortdb.index_conn(); + + let block_snapshot = SortitionDB::get_block_snapshot(&ic, sort_id) + .expect("Failed to obtain block snapshot for processed burn block.") + .expect("Failed to obtain block snapshot for processed burn block."); + let block_height = block_snapshot.block_height; + + let block_commits = + SortitionDB::get_block_commits_by_block(&ic, &block_snapshot.sortition_id) + .expect("Unexpected SortitionDB error fetching block commits"); + + let num_block_commits = block_commits.len(); + + update_active_miners_count_gauge(block_commits.len() as i64); + + for op in block_commits.into_iter() { + if op.txid == block_snapshot.winning_block_txid { + info!( + "Received burnchain block #{} including block_commit_op (winning) - {} ({})", + block_height, op.apparent_sender, &op.block_header_hash + ); + last_sortitioned_block = Some((block_snapshot.clone(), op.vtxindex)); + } else { + if self.is_miner { + info!( + "Received burnchain block #{} including block_commit_op - {} ({})", + block_height, op.apparent_sender, &op.block_header_hash + ); + } + } + } + + let key_registers = + SortitionDB::get_leader_keys_by_block(&ic, &block_snapshot.sortition_id) + .expect("Unexpected SortitionDB error fetching key registers"); + + let num_key_registers = key_registers.len(); + + self.globals + .try_activate_leader_key_registration(block_height, key_registers); + + debug!( + "Processed burnchain state"; + "burn_height" => block_height, + "leader_keys_count" => num_key_registers, + "block_commits_count" => num_block_commits, + "in_initial_block_download?" => ibd, + ); + + self.globals.set_last_sortition(block_snapshot); + last_sortitioned_block.map(|x| x.0) + } + + /// Join all inner threads + pub fn join(self) { + self.relayer_thread_handle.join().unwrap(); + self.p2p_thread_handle.join().unwrap(); + } +} diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs new file mode 100644 index 0000000000..cb9942d451 --- /dev/null +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -0,0 +1,645 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::convert::TryFrom; +use std::thread; +use std::thread::JoinHandle; +use std::time::Instant; + +use super::relayer::RelayerThread; +use super::Error as NakamotoNodeError; +use super::{Config, EventDispatcher, Keychain}; +use crate::globals::Globals; +use crate::mockamoto::signer::SelfSigner; +use crate::nakamoto_node::VRF_MOCK_MINER_KEY; +use crate::run_loop::RegisteredKey; +use crate::ChainTip; +use clarity::vm::types::PrincipalData; +use stacks::burnchains::{Burnchain, BurnchainParameters}; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureStart}; +use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; +use stacks::chainstate::stacks::Error as ChainstateError; +use stacks::chainstate::stacks::TenureChangeCause; +use stacks::chainstate::stacks::TenureChangePayload; +use stacks::chainstate::stacks::ThresholdSignature; +use stacks::chainstate::stacks::{ + CoinbasePayload, StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, + TransactionPayload, TransactionVersion, +}; +use stacks::core::mempool::MemPoolDB; +use stacks::core::FIRST_BURNCHAIN_CONSENSUS_HASH; +use stacks::cost_estimates::metrics::UnitMetric; +use stacks::cost_estimates::UnitEstimator; +use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; +use stacks_common::types::PrivateKey; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::Hash160; +use stacks_common::util::vrf::VRFProof; + +pub enum MinerDirective { + /// The miner won sortition so they should begin a new tenure + BeginTenure { + parent_tenure_start: StacksBlockId, + burnchain_tip: BlockSnapshot, + }, + /// The miner should try to continue their tenure if they are the active miner + ContinueTenure { new_burn_view: ConsensusHash }, + /// The miner did not win sortition + StopTenure, +} + +struct ParentTenureInfo { + #[allow(dead_code)] + parent_tenure_start: StacksBlockId, + parent_tenure_blocks: u64, +} + +/// Metadata required for beginning a new tenure +struct ParentStacksBlockInfo { + /// Header metadata for the Stacks block we're going to build on top of + stacks_parent_header: StacksHeaderInfo, + /// the total amount burned in the sortition that selected the Stacks block parent + parent_block_total_burn: u64, + /// nonce to use for this new block's coinbase transaction + coinbase_nonce: u64, + parent_tenure: Option, +} + +pub struct BlockMinerThread { + /// node config struct + config: Config, + /// handle to global state + globals: Globals, + /// copy of the node's keychain + keychain: Keychain, + /// burnchain configuration + burnchain: Burnchain, + /// Set of blocks that we have mined, but are still potentially-broadcastable + /// (copied from RelayerThread since we need the info to determine the strategy for mining the + /// next block during this tenure). + last_mined_blocks: Vec, + /// Copy of the node's registered VRF key + registered_key: RegisteredKey, + /// Burnchain block snapshot which elected this miner + burn_block: BlockSnapshot, + /// The start of the parent tenure for this tenure + parent_tenure_id: StacksBlockId, + /// Handle to the node's event dispatcher + event_dispatcher: EventDispatcher, +} + +impl BlockMinerThread { + /// Instantiate the miner thread + pub fn new( + rt: &RelayerThread, + registered_key: RegisteredKey, + burn_block: BlockSnapshot, + parent_tenure_id: StacksBlockId, + ) -> BlockMinerThread { + BlockMinerThread { + config: rt.config.clone(), + globals: rt.globals.clone(), + keychain: rt.keychain.clone(), + burnchain: rt.burnchain.clone(), + last_mined_blocks: vec![], + registered_key, + burn_block, + event_dispatcher: rt.event_dispatcher.clone(), + parent_tenure_id, + } + } + + /// Stop a miner tenure by blocking the miner and then joining the tenure thread + pub fn stop_miner(globals: &Globals, prior_miner: JoinHandle<()>) { + globals.block_miner(); + prior_miner + .join() + .expect("FATAL: IO failure joining prior mining thread"); + globals.unblock_miner(); + } + + pub fn run_miner(mut self, prior_miner: Option>) { + // when starting a new tenure, block the mining thread if its currently running. + // the new mining thread will join it (so that the new mining thread stalls, not the relayer) + if let Some(prior_miner) = prior_miner { + Self::stop_miner(&self.globals, prior_miner); + } + + // now, actually run this tenure + let Some(new_block) = self.mine_block() else { + warn!("Failed to mine block"); + return; + }; + + if let Some(self_signer) = self.config.self_signing() { + if let Err(e) = self.self_sign_and_broadcast(self_signer, new_block.clone()) { + warn!("Error self-signing block: {e:?}"); + } else { + self.globals.coord().announce_new_stacks_block(); + } + } else { + warn!("Not self-signing: nakamoto node does not support stacker-signer-protocol yet"); + } + + self.globals.counters.bump_naka_mined_blocks(); + self.last_mined_blocks.push(new_block); + } + + fn self_sign_and_broadcast( + &self, + mut signer: SelfSigner, + mut block: NakamotoBlock, + ) -> Result<(), ChainstateError> { + signer.sign_nakamoto_block(&mut block); + let mut chain_state = super::open_chainstate_with_faults(&self.config) + .expect("FATAL: could not open chainstate DB"); + let chainstate_config = chain_state.config(); + let sort_db = SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + self.burnchain.pox_constants.clone(), + ) + .expect("FATAL: could not open sortition DB"); + let sortition_handle = sort_db.index_handle_at_tip(); + let staging_tx = chain_state.staging_db_tx_begin()?; + NakamotoChainState::accept_block( + &chainstate_config, + block, + &sortition_handle, + &staging_tx, + &signer.aggregate_public_key, + )?; + staging_tx.commit()?; + Ok(()) + } + + /// Get the coinbase recipient address, if set in the config and if allowed in this epoch + fn get_coinbase_recipient(&self, epoch_id: StacksEpochId) -> Option { + if epoch_id < StacksEpochId::Epoch21 && self.config.miner.block_reward_recipient.is_some() { + warn!("Coinbase pay-to-contract is not supported in the current epoch"); + None + } else { + self.config.miner.block_reward_recipient.clone() + } + } + + fn generate_tenure_change_tx( + &mut self, + nonce: u64, + parent_block_id: StacksBlockId, + parent_tenure_blocks: u64, + miner_pkh: Hash160, + ) -> Option { + if self.config.self_signing().is_none() { + // if we're not self-signing, then we can't generate a tenure change tx: it has to come from the signers. + return None; + } + let is_mainnet = self.config.is_mainnet(); + let chain_id = self.config.burnchain.chain_id; + let tenure_change_tx_payload = TransactionPayload::TenureChange( + TenureChangePayload { + previous_tenure_end: parent_block_id, + previous_tenure_blocks: u32::try_from(parent_tenure_blocks) + .expect("FATAL: more than u32 blocks in a tenure"), + cause: TenureChangeCause::BlockFound, + pubkey_hash: miner_pkh, + signers: vec![], + }, + ThresholdSignature::mock(), + ); + + let mut tx_auth = self.keychain.get_transaction_auth().unwrap(); + tx_auth.set_origin_nonce(nonce); + + let version = if is_mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + + let mut tx = StacksTransaction::new(version, tx_auth, tenure_change_tx_payload); + + tx.chain_id = chain_id; + tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + let mut tx_signer = StacksTransactionSigner::new(&tx); + self.keychain.sign_as_origin(&mut tx_signer); + + Some(tx_signer.get_tx().unwrap()) + } + + /// Create a coinbase transaction. + fn generate_coinbase_tx( + &mut self, + nonce: u64, + epoch_id: StacksEpochId, + vrf_proof: VRFProof, + ) -> StacksTransaction { + let is_mainnet = self.config.is_mainnet(); + let chain_id = self.config.burnchain.chain_id; + let mut tx_auth = self.keychain.get_transaction_auth().unwrap(); + tx_auth.set_origin_nonce(nonce); + + let version = if is_mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + + let recipient_opt = self.get_coinbase_recipient(epoch_id); + + let mut tx = StacksTransaction::new( + version, + tx_auth, + TransactionPayload::Coinbase( + CoinbasePayload([0u8; 32]), + recipient_opt, + Some(vrf_proof), + ), + ); + tx.chain_id = chain_id; + tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + let mut tx_signer = StacksTransactionSigner::new(&tx); + self.keychain.sign_as_origin(&mut tx_signer); + + tx_signer.get_tx().unwrap() + } + + /// Load up the parent block info for mining. + /// If there's no parent because this is the first block, then return the genesis block's info. + /// If we can't find the parent in the DB but we expect one, return None. + fn load_block_parent_info( + &self, + burn_db: &mut SortitionDB, + chain_state: &mut StacksChainState, + ) -> Option { + let Some(stacks_tip) = + NakamotoChainState::get_canonical_block_header(chain_state.db(), burn_db) + .expect("FATAL: could not query chain tip") + else { + debug!("No Stacks chain tip known, will return a genesis block"); + let (network, _) = self.config.burnchain.get_bitcoin_network(); + let burnchain_params = + BurnchainParameters::from_params(&self.config.burnchain.chain, &network) + .expect("Bitcoin network unsupported"); + + let chain_tip = ChainTip::genesis( + &burnchain_params.first_block_hash, + burnchain_params.first_block_height.into(), + burnchain_params.first_block_timestamp.into(), + ); + + return Some(ParentStacksBlockInfo { + parent_tenure: Some(ParentTenureInfo { + parent_tenure_start: chain_tip.metadata.index_block_hash(), + parent_tenure_blocks: 0, + }), + stacks_parent_header: chain_tip.metadata, + parent_block_total_burn: 0, + coinbase_nonce: 0, + }); + }; + + let miner_address = self + .keychain + .origin_address(self.config.is_mainnet()) + .unwrap(); + match ParentStacksBlockInfo::lookup( + chain_state, + burn_db, + &self.burn_block, + miner_address, + &self.parent_tenure_id, + stacks_tip, + ) { + Ok(parent_info) => Some(parent_info), + Err(NakamotoNodeError::BurnchainTipChanged) => { + self.globals.counters.bump_missed_tenures(); + None + } + Err(..) => None, + } + } + + /// Generate the VRF proof for the block we're going to build. + /// Returns Some(proof) if we could make the proof + /// Return None if we could not make the proof + fn make_vrf_proof(&mut self) -> Option { + // if we're a mock miner, then make sure that the keychain has a keypair for the mocked VRF + // key + let vrf_proof = if self.config.node.mock_mining { + self.keychain.generate_proof( + VRF_MOCK_MINER_KEY, + self.burn_block.sortition_hash.as_bytes(), + ) + } else { + self.keychain.generate_proof( + self.registered_key.target_block_height, + self.burn_block.sortition_hash.as_bytes(), + ) + }; + + debug!( + "Generated VRF Proof: {} over {} ({},{}) with key {}", + vrf_proof.to_hex(), + &self.burn_block.sortition_hash, + &self.burn_block.block_height, + &self.burn_block.burn_header_hash, + &self.registered_key.vrf_public_key.to_hex() + ); + Some(vrf_proof) + } + + /// Try to mine a Stacks block by assembling one from mempool transactions and sending a + /// burnchain block-commit transaction. If we succeed, then return the assembled block data as + /// well as the microblock private key to use to produce microblocks. + /// Return None if we couldn't build a block for whatever reason. + fn mine_block(&mut self) -> Option { + debug!("block miner thread ID is {:?}", thread::current().id()); + super::fault_injection_long_tenure(); + + let burn_db_path = self.config.get_burn_db_file_path(); + let stacks_chainstate_path = self.config.get_chainstate_path_str(); + + let cost_estimator = self + .config + .make_cost_estimator() + .unwrap_or_else(|| Box::new(UnitEstimator)); + let metric = self + .config + .make_cost_metric() + .unwrap_or_else(|| Box::new(UnitMetric)); + + // NOTE: read-write access is needed in order to be able to query the recipient set. + // This is an artifact of the way the MARF is built (see #1449) + let mut burn_db = + SortitionDB::open(&burn_db_path, true, self.burnchain.pox_constants.clone()) + .expect("FATAL: could not open sortition DB"); + + let mut chain_state = super::open_chainstate_with_faults(&self.config) + .expect("FATAL: could not open chainstate DB"); + + let mut mem_pool = MemPoolDB::open( + self.config.is_mainnet(), + self.config.burnchain.chain_id, + &stacks_chainstate_path, + cost_estimator, + metric, + ) + .expect("Database failure opening mempool"); + + let assembly_start = Instant::now(); + + let target_epoch_id = + SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height + 1) + .ok()? + .expect("FATAL: no epoch defined") + .epoch_id; + let mut parent_block_info = self.load_block_parent_info(&mut burn_db, &mut chain_state)?; + let vrf_proof = self.make_vrf_proof()?; + + if self.last_mined_blocks.is_empty() { + if parent_block_info.parent_tenure.is_none() { + warn!( + "Miner should be starting a new tenure, but failed to load parent tenure info" + ); + return None; + } + } + + // create our coinbase if this is the first block we've mined this tenure + let tenure_start_info = if let Some(ref par_tenure_info) = parent_block_info.parent_tenure { + let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); + let current_miner_nonce = parent_block_info.coinbase_nonce; + let tenure_change_tx = self.generate_tenure_change_tx( + current_miner_nonce, + parent_block_id, + par_tenure_info.parent_tenure_blocks, + self.keychain.get_nakamoto_pkh(), + )?; + let coinbase_tx = self.generate_coinbase_tx( + current_miner_nonce + 1, + target_epoch_id, + vrf_proof.clone(), + ); + Some(NakamotoTenureStart { + coinbase_tx, + // TODO (refactor): the nakamoto block builder doesn't use this VRF proof, + // it has to be included in the coinbase tx, which is an arg to the builder. + // we should probably just remove this from the nakamoto block builder. + vrf_proof: vrf_proof.clone(), + tenure_change_tx, + }) + } else { + None + }; + + parent_block_info.stacks_parent_header.microblock_tail = None; + + // build the block itself + let (mut block, _, _) = match NakamotoBlockBuilder::build_nakamoto_block( + &chain_state, + &burn_db.index_conn(), + &mut mem_pool, + // TODO (refactor): the nakamoto block builder doesn't use the parent tenure ID, + // it has to be included in the tenure change tx, which is an arg to the builder. + // we should probably just remove this from the nakamoto block builder, so that + // there isn't duplicated or unused logic here + &self.parent_tenure_id, + &parent_block_info.stacks_parent_header, + &self.burn_block.consensus_hash, + self.burn_block.total_burn, + tenure_start_info, + self.config.make_block_builder_settings( + // TODO: the attempt counter needs a different configuration approach in nakamoto + 1, + false, + self.globals.get_miner_status(), + ), + Some(&self.event_dispatcher), + ) { + Ok(block) => block, + Err(e) => { + error!("Relayer: Failure mining anchored block: {}", e); + return None; + } + }; + + let mining_key = self.keychain.get_nakamoto_sk(); + let miner_signature = mining_key + .sign(block.header.signature_hash().ok()?.as_bytes()) + .ok()?; + block.header.miner_signature = miner_signature; + + info!( + "Miner: Succeeded assembling {} block #{}: {}, with {} txs", + if parent_block_info.parent_block_total_burn == 0 { + "Genesis" + } else { + "Stacks" + }, + block.header.chain_length, + block.header.block_hash(), + block.txs.len(), + ); + + // last chance -- confirm that the stacks tip is unchanged (since it could have taken long + // enough to build this block that another block could have arrived), and confirm that all + // Stacks blocks with heights higher than the canoincal tip are processed. + let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + if cur_burn_chain_tip.consensus_hash != block.header.consensus_hash { + info!("Miner: Cancel block assembly; burnchain tip has changed"); + self.globals.counters.bump_missed_tenures(); + return None; + } + + Some(block) + } +} + +impl ParentStacksBlockInfo { + /// Determine where in the set of forks to attempt to mine the next anchored block. + /// `mine_tip_ch` and `mine_tip_bhh` identify the parent block on top of which to mine. + /// `check_burn_block` identifies what we believe to be the burn chain's sortition history tip. + /// This is used to mitigate (but not eliminate) a TOCTTOU issue with mining: the caller's + /// conception of the sortition history tip may have become stale by the time they call this + /// method, in which case, mining should *not* happen (since the block will be invalid). + pub fn lookup( + chain_state: &mut StacksChainState, + burn_db: &mut SortitionDB, + check_burn_block: &BlockSnapshot, + miner_address: StacksAddress, + parent_tenure_id: &StacksBlockId, + stacks_tip_header: StacksHeaderInfo, + ) -> Result { + // the stacks block I'm mining off of's burn header hash and vtxindex: + let parent_snapshot = SortitionDB::get_block_snapshot_consensus( + burn_db.conn(), + &stacks_tip_header.consensus_hash, + ) + .expect("Failed to look up block's parent snapshot") + .expect("Failed to look up block's parent snapshot"); + + let parent_sortition_id = &parent_snapshot.sortition_id; + + let parent_block_total_burn = + if &stacks_tip_header.consensus_hash == &FIRST_BURNCHAIN_CONSENSUS_HASH { + 0 + } else { + let parent_burn_block = + SortitionDB::get_block_snapshot(burn_db.conn(), parent_sortition_id) + .expect("SortitionDB failure.") + .ok_or_else(|| { + error!( + "Failed to find block snapshot for the parent sortition"; + "parent_sortition_id" => %parent_sortition_id + ); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + + parent_burn_block.total_burn + }; + + // don't mine off of an old burnchain block + let burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash { + info!( + "New canonical burn chain tip detected. Will not try to mine."; + "new_consensus_hash" => %burn_chain_tip.consensus_hash, + "old_consensus_hash" => %check_burn_block.consensus_hash, + "new_burn_height" => burn_chain_tip.block_height, + "old_burn_height" => check_burn_block.block_height + ); + return Err(NakamotoNodeError::BurnchainTipChanged); + } + + let Ok(Some(parent_tenure_header)) = + NakamotoChainState::get_block_header(chain_state.db(), &parent_tenure_id) + else { + warn!("Failed loading parent tenure ID"; "parent_tenure_id" => %parent_tenure_id); + return Err(NakamotoNodeError::ParentNotFound); + }; + + // check if we're mining a first tenure block (by checking if our parent block is in the tenure of parent_tenure_id) + // and if so, figure out how many blocks there were in the parent tenure + let parent_tenure_info = if stacks_tip_header.consensus_hash + == parent_tenure_header.consensus_hash + { + let parent_tenure_blocks = if parent_tenure_header + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + let Ok(Some(last_parent_tenure_header)) = + NakamotoChainState::get_nakamoto_tenure_finish_block_header( + chain_state.db(), + &parent_tenure_header.consensus_hash, + ) + else { + warn!("Failed loading last block of parent tenure"; "parent_tenure_id" => %parent_tenure_id); + return Err(NakamotoNodeError::ParentNotFound); + }; + // the last known tenure block of our parent should be the stacks_tip. if not, error. + if stacks_tip_header.index_block_hash() + != last_parent_tenure_header.index_block_hash() + { + return Err(NakamotoNodeError::NewParentDiscovered); + } + 1 + last_parent_tenure_header.stacks_block_height + - parent_tenure_header.stacks_block_height + } else { + 1 + }; + Some(ParentTenureInfo { + parent_tenure_start: parent_tenure_id.clone(), + parent_tenure_blocks, + }) + } else { + None + }; + + debug!("Mining tenure's last consensus hash: {} (height {} hash {}), stacks tip consensus hash: {} (height {} hash {})", + &check_burn_block.consensus_hash, check_burn_block.block_height, &check_burn_block.burn_header_hash, + &parent_snapshot.consensus_hash, parent_snapshot.block_height, &parent_snapshot.burn_header_hash); + + let coinbase_nonce = { + let principal = miner_address.into(); + let account = chain_state + .with_read_only_clarity_tx( + &burn_db.index_conn(), + &stacks_tip_header.index_block_hash(), + |conn| StacksChainState::get_account(conn, &principal), + ) + .expect(&format!( + "BUG: stacks tip block {} no longer exists after we queried it", + &stacks_tip_header.index_block_hash(), + )); + account.nonce + }; + + Ok(ParentStacksBlockInfo { + stacks_parent_header: stacks_tip_header, + parent_block_total_burn, + coinbase_nonce, + parent_tenure: parent_tenure_info, + }) + } +} diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs new file mode 100644 index 0000000000..8fe688972e --- /dev/null +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -0,0 +1,418 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::cmp; +use std::collections::VecDeque; + +use std::default::Default; +use std::net::SocketAddr; +use std::sync::mpsc::TrySendError; + +use std::thread; +use std::time::Duration; + +use stacks::burnchains::db::BurnchainHeaderReader; +use stacks::burnchains::PoxConstants; +use stacks::chainstate::burn::db::sortdb::SortitionDB; + +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::chainstate::stacks::miner::signal_mining_blocked; + +use stacks::core::mempool::MemPoolDB; + +use stacks::cost_estimates::metrics::{CostMetric, UnitMetric}; +use stacks::cost_estimates::{CostEstimator, FeeEstimator, UnitEstimator}; + +use stacks::net::dns::{DNSClient, DNSResolver}; +use stacks::net::p2p::PeerNetwork; + +use stacks::net::RPCHandlerArgs; + +use stacks_common::util::hash::Sha256Sum; + +use crate::burnchains::make_bitcoin_indexer; +use crate::globals::Globals; +use crate::globals::RelayerDirective; + +use crate::run_loop::nakamoto::RunLoop; + +use crate::{Config, EventDispatcher}; + +use super::open_chainstate_with_faults; + +/// Thread that runs the network state machine, handling both p2p and http requests. +pub struct PeerThread { + /// Node config + config: Config, + /// instance of the peer network. Made optional in order to trick the borrow checker. + net: Option, + /// handle to global inter-thread comms + globals: Globals, + /// how long to wait for network messages on each poll, in millis + poll_timeout: u64, + /// handle to the sortition DB (optional so we can take/replace it) + sortdb: Option, + /// handle to the chainstate DB (optional so we can take/replace it) + chainstate: Option, + /// handle to the mempool DB (optional so we can take/replace it) + mempool: Option, + /// buffer of relayer commands with block data that couldn't be sent to the relayer just yet + /// (i.e. due to backpressure). We track this separately, instead of just using a bigger + /// channel, because we need to know when backpressure occurs in order to throttle the p2p + /// thread's downloader. + results_with_data: VecDeque, + /// total number of p2p state-machine passes so far. Used to signal when to download the next + /// reward cycle of blocks + num_p2p_state_machine_passes: u64, + /// total number of inventory state-machine passes so far. Used to signal when to download the + /// next reward cycle of blocks. + num_inv_sync_passes: u64, + /// total number of download state-machine passes so far. Used to signal when to download the + /// next reward cycle of blocks. + num_download_passes: u64, + /// last burnchain block seen in the PeerNetwork's chain view since the last run + last_burn_block_height: u64, +} + +impl PeerThread { + /// Main loop of the p2p thread. + /// Runs in a separate thread. + /// Continuously receives, until told otherwise. + pub fn main(mut self, event_dispatcher: EventDispatcher) { + debug!("p2p thread ID is {:?}", thread::current().id()); + let should_keep_running = self.globals.should_keep_running.clone(); + let (mut dns_resolver, mut dns_client) = DNSResolver::new(10); + + // spawn a daemon thread that runs the DNS resolver. + // It will die when the rest of the system dies. + { + let _jh = thread::Builder::new() + .name("dns-resolver".to_string()) + .spawn(move || { + debug!("DNS resolver thread ID is {:?}", thread::current().id()); + dns_resolver.thread_main(); + }) + .unwrap(); + } + + // NOTE: these must be instantiated in the thread context, since it can't be safely sent + // between threads + let fee_estimator_opt = self.config.make_fee_estimator(); + let cost_estimator = self + .config + .make_cost_estimator() + .unwrap_or_else(|| Box::new(UnitEstimator)); + let cost_metric = self + .config + .make_cost_metric() + .unwrap_or_else(|| Box::new(UnitMetric)); + + let indexer = make_bitcoin_indexer(&self.config, Some(should_keep_running)); + + // receive until we can't reach the receiver thread + loop { + if !self.globals.keep_running() { + break; + } + if !self.run_one_pass( + &indexer, + Some(&mut dns_client), + &event_dispatcher, + &cost_estimator, + &cost_metric, + fee_estimator_opt.as_ref(), + ) { + break; + } + } + + // kill miner + signal_mining_blocked(self.globals.get_miner_status()); + + // set termination flag so other threads die + self.globals.signal_stop(); + + // thread exited, so signal to the relayer thread to die. + while let Err(TrySendError::Full(_)) = + self.globals.relay_send.try_send(RelayerDirective::Exit) + { + warn!("Failed to direct relayer thread to exit, sleeping and trying again"); + thread::sleep(Duration::from_secs(5)); + } + info!("P2P thread exit!"); + } + + /// set up the mempool DB connection + pub fn connect_mempool_db(config: &Config) -> MemPoolDB { + // create estimators, metric instances for RPC handler + let cost_estimator = config + .make_cost_estimator() + .unwrap_or_else(|| Box::new(UnitEstimator)); + let metric = config + .make_cost_metric() + .unwrap_or_else(|| Box::new(UnitMetric)); + + let mempool = MemPoolDB::open( + config.is_mainnet(), + config.burnchain.chain_id, + &config.get_chainstate_path_str(), + cost_estimator, + metric, + ) + .expect("Database failure opening mempool"); + + mempool + } + + /// Instantiate the p2p thread. + /// Binds the addresses in the config (which may panic if the port is blocked). + /// This is so the node will crash "early" before any new threads start if there's going to be + /// a bind error anyway. + pub fn new(runloop: &RunLoop, net: PeerNetwork) -> PeerThread { + Self::new_all( + runloop.get_globals(), + runloop.config(), + runloop.get_burnchain().pox_constants, + net, + ) + } + + pub fn new_all( + globals: Globals, + config: &Config, + pox_constants: PoxConstants, + mut net: PeerNetwork, + ) -> Self { + let config = config.clone(); + let mempool = Self::connect_mempool_db(&config); + let burn_db_path = config.get_burn_db_file_path(); + + let sortdb = SortitionDB::open(&burn_db_path, false, pox_constants) + .expect("FATAL: could not open sortition DB"); + + let chainstate = + open_chainstate_with_faults(&config).expect("FATAL: could not open chainstate DB"); + + let p2p_sock: SocketAddr = config.node.p2p_bind.parse().expect(&format!( + "Failed to parse socket: {}", + &config.node.p2p_bind + )); + let rpc_sock = config.node.rpc_bind.parse().expect(&format!( + "Failed to parse socket: {}", + &config.node.rpc_bind + )); + + net.bind(&p2p_sock, &rpc_sock) + .expect("BUG: PeerNetwork could not bind or is already bound"); + + let poll_timeout = cmp::min(5000, config.miner.first_attempt_time_ms / 2); + + PeerThread { + config, + net: Some(net), + globals, + poll_timeout, + sortdb: Some(sortdb), + chainstate: Some(chainstate), + mempool: Some(mempool), + results_with_data: VecDeque::new(), + num_p2p_state_machine_passes: 0, + num_inv_sync_passes: 0, + num_download_passes: 0, + last_burn_block_height: 0, + } + } + + /// Do something with mutable references to the mempool, sortdb, and chainstate + /// Fools the borrow checker. + /// NOT COMPOSIBLE + fn with_chainstate(&mut self, func: F) -> R + where + F: FnOnce(&mut PeerThread, &mut SortitionDB, &mut StacksChainState, &mut MemPoolDB) -> R, + { + let mut sortdb = self.sortdb.take().expect("BUG: sortdb already taken"); + let mut chainstate = self + .chainstate + .take() + .expect("BUG: chainstate already taken"); + let mut mempool = self.mempool.take().expect("BUG: mempool already taken"); + + let res = func(self, &mut sortdb, &mut chainstate, &mut mempool); + + self.sortdb = Some(sortdb); + self.chainstate = Some(chainstate); + self.mempool = Some(mempool); + + res + } + + /// Get an immutable ref to the inner network. + /// DO NOT USE WITHIN with_network() + fn get_network(&self) -> &PeerNetwork { + self.net.as_ref().expect("BUG: did not replace net") + } + + /// Do something with mutable references to the network. + /// Fools the borrow checker. + /// NOT COMPOSIBLE. DO NOT CALL THIS OR get_network() IN func + fn with_network(&mut self, func: F) -> R + where + F: FnOnce(&mut PeerThread, &mut PeerNetwork) -> R, + { + let mut net = self.net.take().expect("BUG: net already taken"); + + let res = func(self, &mut net); + + self.net = Some(net); + res + } + + /// Run one pass of the p2p/http state machine + /// Return true if we should continue running passes; false if not + pub fn run_one_pass( + &mut self, + indexer: &B, + dns_client_opt: Option<&mut DNSClient>, + event_dispatcher: &EventDispatcher, + cost_estimator: &Box, + cost_metric: &Box, + fee_estimator: Option<&Box>, + ) -> bool { + // initial block download? + let ibd = self.globals.sync_comms.get_ibd(); + let download_backpressure = self.results_with_data.len() > 0; + let poll_ms = if !download_backpressure && self.get_network().has_more_downloads() { + // keep getting those blocks -- drive the downloader state-machine + debug!( + "P2P: backpressure: {}, more downloads: {}", + download_backpressure, + self.get_network().has_more_downloads() + ); + 1 + } else { + self.poll_timeout + }; + + // do one pass + let p2p_res = self.with_chainstate(|p2p_thread, sortdb, chainstate, mempool| { + // NOTE: handler_args must be created such that it outlives the inner net.run() call and + // doesn't ref anything within p2p_thread. + let handler_args = RPCHandlerArgs { + exit_at_block_height: p2p_thread + .config + .burnchain + .process_exit_at_block_height + .clone(), + genesis_chainstate_hash: Sha256Sum::from_hex(stx_genesis::GENESIS_CHAINSTATE_HASH) + .unwrap(), + event_observer: Some(event_dispatcher), + cost_estimator: Some(cost_estimator.as_ref()), + cost_metric: Some(cost_metric.as_ref()), + fee_estimator: fee_estimator.map(|boxed_estimator| boxed_estimator.as_ref()), + ..RPCHandlerArgs::default() + }; + p2p_thread.with_network(|_, net| { + net.run( + indexer, + sortdb, + chainstate, + mempool, + dns_client_opt, + download_backpressure, + ibd, + poll_ms, + &handler_args, + ) + }) + }); + + match p2p_res { + Ok(network_result) => { + let mut have_update = false; + if self.num_p2p_state_machine_passes < network_result.num_state_machine_passes { + // p2p state-machine did a full pass. Notify anyone listening. + self.globals.sync_comms.notify_p2p_state_pass(); + self.num_p2p_state_machine_passes = network_result.num_state_machine_passes; + } + + if self.num_inv_sync_passes < network_result.num_inv_sync_passes { + // inv-sync state-machine did a full pass. Notify anyone listening. + self.globals.sync_comms.notify_inv_sync_pass(); + self.num_inv_sync_passes = network_result.num_inv_sync_passes; + + // the relayer cares about the number of inventory passes, so pass this along + have_update = true; + } + + if self.num_download_passes < network_result.num_download_passes { + // download state-machine did a full pass. Notify anyone listening. + self.globals.sync_comms.notify_download_pass(); + self.num_download_passes = network_result.num_download_passes; + + // the relayer cares about the number of download passes, so pass this along + have_update = true; + } + + if network_result.has_data_to_store() + || self.last_burn_block_height != network_result.burn_height + || have_update + { + // pass along if we have blocks, microblocks, or transactions, or a status + // update on the network's view of the burnchain + self.last_burn_block_height = network_result.burn_height; + self.results_with_data + .push_back(RelayerDirective::HandleNetResult(network_result)); + } + } + Err(e) => { + // this is only reachable if the network is not instantiated correctly -- + // i.e. you didn't connect it + panic!("P2P: Failed to process network dispatch: {:?}", &e); + } + }; + + while let Some(next_result) = self.results_with_data.pop_front() { + // have blocks, microblocks, and/or transactions (don't care about anything else), + // or a directive to mine microblocks + if let Err(e) = self.globals.relay_send.try_send(next_result) { + debug!( + "P2P: {:?}: download backpressure detected (bufferred {})", + &self.get_network().local_peer, + self.results_with_data.len() + ); + match e { + TrySendError::Full(directive) => { + if let RelayerDirective::RunTenure(..) = directive { + // can drop this + } else { + // don't lose this data -- just try it again + self.results_with_data.push_front(directive); + } + break; + } + TrySendError::Disconnected(_) => { + info!("P2P: Relayer hang up with p2p channel"); + self.globals.signal_stop(); + return false; + } + } + } else { + debug!("P2P: Dispatched result to Relayer!"); + } + } + + true + } +} diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs new file mode 100644 index 0000000000..a90b17866f --- /dev/null +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -0,0 +1,961 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use stacks::burnchains::{Burnchain, Txid}; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::operations::leader_block_commit::{ + RewardSetInfo, BURN_BLOCK_MINED_AT_MODULUS, +}; +use stacks::chainstate::burn::operations::{ + BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, +}; +use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvider}; +use stacks::chainstate::nakamoto::NakamotoChainState; +use stacks::chainstate::stacks::address::PoxAddress; +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::chainstate::stacks::miner::{ + get_mining_spend_amount, signal_mining_blocked, signal_mining_ready, +}; +use stacks::core::mempool::MemPoolDB; +use stacks::core::FIRST_BURNCHAIN_CONSENSUS_HASH; +use stacks::core::FIRST_STACKS_BLOCK_HASH; +use stacks::core::STACKS_EPOCH_3_0_MARKER; +use stacks::cost_estimates::metrics::UnitMetric; +use stacks::cost_estimates::UnitEstimator; +use stacks::monitoring::increment_stx_blocks_mined_counter; +use stacks::net::db::LocalPeer; +use stacks::net::relay::Relayer; +use stacks::net::NetworkResult; +use stacks_common::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, VRFSeed, +}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::get_epoch_time_ms; +use stacks_common::util::hash::Hash160; +use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; +use std::collections::HashMap; +use std::sync::mpsc::Receiver; +use std::sync::mpsc::RecvTimeoutError; +use std::thread::JoinHandle; +use std::time::Duration; +use std::time::Instant; + +use super::Error as NakamotoNodeError; +use super::{ + fault_injection_skip_mining, open_chainstate_with_faults, BlockCommits, Config, + EventDispatcher, Keychain, BLOCK_PROCESSOR_STACK_SIZE, +}; +use crate::burnchains::BurnchainController; +use crate::globals::Globals; +use crate::globals::RelayerDirective; +use crate::nakamoto_node::miner::{BlockMinerThread, MinerDirective}; +use crate::neon_node::LeaderKeyRegistrationState; +use crate::run_loop::nakamoto::RunLoop; +use crate::run_loop::RegisteredKey; +use crate::BitcoinRegtestController; + +/// Relayer thread +/// * accepts network results and stores blocks and microblocks +/// * forwards new blocks, microblocks, and transactions to the p2p thread +/// * processes burnchain state +/// * if mining, runs the miner and broadcasts blocks (via a subordinate MinerThread) +pub struct RelayerThread { + /// Node config + pub(crate) config: Config, + /// Handle to the sortition DB (optional so we can take/replace it) + sortdb: Option, + /// Handle to the chainstate DB (optional so we can take/replace it) + chainstate: Option, + /// Handle to the mempool DB (optional so we can take/replace it) + mempool: Option, + /// Handle to global state and inter-thread communication channels + pub(crate) globals: Globals, + /// Authoritative copy of the keychain state + pub(crate) keychain: Keychain, + /// Burnchian configuration + pub(crate) burnchain: Burnchain, + /// height of last VRF key registration request + last_vrf_key_burn_height: Option, + /// Set of blocks that we have mined, but are still potentially-broadcastable + // TODO: this field is a slow leak! + pub(crate) last_commits: BlockCommits, + /// client to the burnchain (used only for sending block-commits) + pub(crate) bitcoin_controller: BitcoinRegtestController, + /// client to the event dispatcher + pub(crate) event_dispatcher: EventDispatcher, + /// copy of the local peer state + local_peer: LocalPeer, + /// last observed burnchain block height from the p2p thread (obtained from network results) + last_network_block_height: u64, + /// time at which we observed a change in the network block height (epoch time in millis) + last_network_block_height_ts: u128, + /// last observed number of downloader state-machine passes from the p2p thread (obtained from + /// network results) + last_network_download_passes: u64, + /// last observed number of inventory state-machine passes from the p2p thread (obtained from + /// network results) + last_network_inv_passes: u64, + /// minimum number of downloader state-machine passes that must take place before mining (this + /// is used to ensure that the p2p thread attempts to download new Stacks block data before + /// this thread tries to mine a block) + min_network_download_passes: u64, + /// minimum number of inventory state-machine passes that must take place before mining (this + /// is used to ensure that the p2p thread attempts to download new Stacks block data before + /// this thread tries to mine a block) + min_network_inv_passes: u64, + + /// Inner relayer instance for forwarding broadcasted data back to the p2p thread for dispatch + /// to neighbors + relayer: Relayer, + + /// handle to the subordinate miner thread + miner_thread: Option>, + /// The relayer thread reads directives from the relay_rcv, but it also periodically wakes up + /// to check if it should issue a block commit or try to register a VRF key + next_initiative: Instant, + is_miner: bool, + /// This is the last snapshot in which the relayer committed + last_committed_at: Option, +} + +impl RelayerThread { + /// Instantiate off of a StacksNode, a runloop, and a relayer. + pub fn new(runloop: &RunLoop, local_peer: LocalPeer, relayer: Relayer) -> RelayerThread { + let config = runloop.config().clone(); + let globals = runloop.get_globals(); + let burn_db_path = config.get_burn_db_file_path(); + let stacks_chainstate_path = config.get_chainstate_path_str(); + let is_mainnet = config.is_mainnet(); + let chain_id = config.burnchain.chain_id; + let is_miner = runloop.is_miner(); + + let sortdb = SortitionDB::open(&burn_db_path, true, runloop.get_burnchain().pox_constants) + .expect("FATAL: failed to open burnchain DB"); + + let chainstate = + open_chainstate_with_faults(&config).expect("FATAL: failed to open chainstate DB"); + + let cost_estimator = config + .make_cost_estimator() + .unwrap_or_else(|| Box::new(UnitEstimator)); + let metric = config + .make_cost_metric() + .unwrap_or_else(|| Box::new(UnitMetric)); + + let mempool = MemPoolDB::open( + is_mainnet, + chain_id, + &stacks_chainstate_path, + cost_estimator, + metric, + ) + .expect("Database failure opening mempool"); + + let keychain = Keychain::default(config.node.seed.clone()); + let bitcoin_controller = BitcoinRegtestController::new_dummy(config.clone()); + + RelayerThread { + config: config.clone(), + sortdb: Some(sortdb), + chainstate: Some(chainstate), + mempool: Some(mempool), + globals, + keychain, + burnchain: runloop.get_burnchain(), + last_vrf_key_burn_height: None, + last_commits: HashMap::new(), + bitcoin_controller, + event_dispatcher: runloop.get_event_dispatcher(), + local_peer, + + last_network_block_height: 0, + last_network_block_height_ts: 0, + last_network_download_passes: 0, + min_network_download_passes: 0, + last_network_inv_passes: 0, + min_network_inv_passes: 0, + + relayer, + + miner_thread: None, + is_miner, + next_initiative: Instant::now() + Duration::from_secs(10), + last_committed_at: None, + } + } + + /// Get an immutible ref to the sortdb + pub fn sortdb_ref(&self) -> &SortitionDB { + self.sortdb + .as_ref() + .expect("FATAL: tried to access sortdb while taken") + } + + /// Get an immutible ref to the chainstate + pub fn chainstate_ref(&self) -> &StacksChainState { + self.chainstate + .as_ref() + .expect("FATAL: tried to access chainstate while it was taken") + } + + /// Fool the borrow checker into letting us do something with the chainstate databases. + /// DOES NOT COMPOSE -- do NOT call this, or self.sortdb_ref(), or self.chainstate_ref(), within + /// `func`. You will get a runtime panic. + pub fn with_chainstate(&mut self, func: F) -> R + where + F: FnOnce(&mut RelayerThread, &mut SortitionDB, &mut StacksChainState, &mut MemPoolDB) -> R, + { + let mut sortdb = self + .sortdb + .take() + .expect("FATAL: tried to take sortdb while taken"); + let mut chainstate = self + .chainstate + .take() + .expect("FATAL: tried to take chainstate while taken"); + let mut mempool = self + .mempool + .take() + .expect("FATAL: tried to take mempool while taken"); + let res = func(self, &mut sortdb, &mut chainstate, &mut mempool); + self.sortdb = Some(sortdb); + self.chainstate = Some(chainstate); + self.mempool = Some(mempool); + res + } + + /// have we waited for the right conditions under which to start mining a block off of our + /// chain tip? + pub fn has_waited_for_latest_blocks(&self) -> bool { + // a network download pass took place + (self.min_network_download_passes <= self.last_network_download_passes + // a network inv pass took place + && self.min_network_download_passes <= self.last_network_download_passes) + // we waited long enough for a download pass, but timed out waiting + || self.last_network_block_height_ts + (self.config.node.wait_time_for_blocks as u128) < get_epoch_time_ms() + // we're not supposed to wait at all + || !self.config.miner.wait_for_block_download + } + + /// Return debug string for waiting for latest blocks + pub fn debug_waited_for_latest_blocks(&self) -> String { + format!( + "({} <= {} && {} <= {}) || {} + {} < {} || {}", + self.min_network_download_passes, + self.last_network_download_passes, + self.min_network_inv_passes, + self.last_network_inv_passes, + self.last_network_block_height_ts, + self.config.node.wait_time_for_blocks, + get_epoch_time_ms(), + self.config.miner.wait_for_block_download + ) + } + + /// Handle a NetworkResult from the p2p/http state machine. Usually this is the act of + /// * preprocessing and storing new blocks and microblocks + /// * relaying blocks, microblocks, and transacctions + /// * updating unconfirmed state views + pub fn process_network_result(&mut self, mut net_result: NetworkResult) { + debug!( + "Relayer: Handle network result (from {})", + net_result.burn_height + ); + + if self.last_network_block_height != net_result.burn_height { + // burnchain advanced; disable mining until we also do a download pass. + self.last_network_block_height = net_result.burn_height; + self.min_network_download_passes = net_result.num_download_passes + 1; + self.min_network_inv_passes = net_result.num_inv_sync_passes + 1; + self.last_network_block_height_ts = get_epoch_time_ms(); + debug!( + "Relayer: block mining until the next download pass {}", + self.min_network_download_passes + ); + signal_mining_blocked(self.globals.get_miner_status()); + } + + let net_receipts = self.with_chainstate(|relayer_thread, sortdb, chainstate, mempool| { + relayer_thread + .relayer + .process_network_result( + &relayer_thread.local_peer, + &mut net_result, + sortdb, + chainstate, + mempool, + relayer_thread.globals.sync_comms.get_ibd(), + Some(&relayer_thread.globals.coord_comms), + Some(&relayer_thread.event_dispatcher), + ) + .expect("BUG: failure processing network results") + }); + + if net_receipts.num_new_blocks > 0 || net_receipts.num_new_confirmed_microblocks > 0 { + // if we received any new block data that could invalidate our view of the chain tip, + // then stop mining until we process it + debug!("Relayer: block mining to process newly-arrived blocks or microblocks"); + signal_mining_blocked(self.globals.get_miner_status()); + } + + let mempool_txs_added = net_receipts.mempool_txs_added.len(); + if mempool_txs_added > 0 { + self.event_dispatcher + .process_new_mempool_txs(net_receipts.mempool_txs_added); + } + + let num_unconfirmed_microblock_tx_receipts = + net_receipts.processed_unconfirmed_state.receipts.len(); + if num_unconfirmed_microblock_tx_receipts > 0 { + if let Some(unconfirmed_state) = self.chainstate_ref().unconfirmed_state.as_ref() { + let canonical_tip = unconfirmed_state.confirmed_chain_tip.clone(); + self.event_dispatcher.process_new_microblocks( + canonical_tip, + net_receipts.processed_unconfirmed_state, + ); + } else { + warn!("Relayer: oops, unconfirmed state is uninitialized but there are microblock events"); + } + } + + // Dispatch retrieved attachments, if any. + if net_result.has_attachments() { + self.event_dispatcher + .process_new_attachments(&net_result.attachments); + } + + // synchronize unconfirmed tx index to p2p thread + self.with_chainstate(|relayer_thread, _sortdb, chainstate, _mempool| { + relayer_thread.globals.send_unconfirmed_txs(chainstate); + }); + + // resume mining if we blocked it, and if we've done the requisite download + // passes + self.last_network_download_passes = net_result.num_download_passes; + self.last_network_inv_passes = net_result.num_inv_sync_passes; + if self.has_waited_for_latest_blocks() { + debug!("Relayer: did a download pass, so unblocking mining"); + signal_mining_ready(self.globals.get_miner_status()); + } + } + + /// Given the pointer to a recently processed sortition, see if we won the sortition. + /// + /// Returns `true` if we won this last sortition. + pub fn process_sortition( + &mut self, + consensus_hash: ConsensusHash, + burn_hash: BurnchainHeaderHash, + committed_index_hash: StacksBlockId, + ) -> MinerDirective { + let sn = + SortitionDB::get_block_snapshot_consensus(self.sortdb_ref().conn(), &consensus_hash) + .expect("FATAL: failed to query sortition DB") + .expect("FATAL: unknown consensus hash"); + + self.globals.set_last_sortition(sn.clone()); + + let won_sortition = + sn.sortition && self.last_commits.remove(&sn.winning_block_txid).is_some(); + + info!( + "Relayer: Process sortition"; + "sortition_ch" => %consensus_hash, + "burn_hash" => %burn_hash, + "burn_height" => sn.block_height, + "winning_txid" => %sn.winning_block_txid, + "committed_parent" => %committed_index_hash, + "won_sortition?" => won_sortition, + ); + + if won_sortition { + increment_stx_blocks_mined_counter(); + } + + if sn.sortition { + if won_sortition { + MinerDirective::BeginTenure { + parent_tenure_start: committed_index_hash, + burnchain_tip: sn, + } + } else { + MinerDirective::StopTenure + } + } else { + MinerDirective::ContinueTenure { + new_burn_view: consensus_hash, + } + } + } + + /// Constructs and returns a LeaderKeyRegisterOp out of the provided params + fn make_key_register_op( + vrf_public_key: VRFPublicKey, + consensus_hash: &ConsensusHash, + miner_pkh: &Hash160, + ) -> BlockstackOperationType { + BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { + public_key: vrf_public_key, + memo: miner_pkh.as_bytes().to_vec(), + consensus_hash: consensus_hash.clone(), + vtxindex: 0, + txid: Txid([0u8; 32]), + block_height: 0, + burn_header_hash: BurnchainHeaderHash::zero(), + }) + } + + /// Create and broadcast a VRF public key registration transaction. + /// Returns true if we succeed in doing so; false if not. + pub fn rotate_vrf_and_register(&mut self, burn_block: &BlockSnapshot) { + if self.last_vrf_key_burn_height.is_some() { + // already in-flight + return; + } + let cur_epoch = + SortitionDB::get_stacks_epoch(self.sortdb_ref().conn(), burn_block.block_height) + .expect("FATAL: failed to query sortition DB") + .expect("FATAL: no epoch defined") + .epoch_id; + let (vrf_pk, _) = self.keychain.make_vrf_keypair(burn_block.block_height); + let burnchain_tip_consensus_hash = &burn_block.consensus_hash; + let miner_pkh = self.keychain.get_nakamoto_pkh(); + + debug!( + "Submitting LeaderKeyRegister"; + "vrf_pk" => vrf_pk.to_hex(), + "burn_block_height" => burn_block.block_height, + "miner_pkh" => miner_pkh.to_hex(), + ); + + let op = Self::make_key_register_op(vrf_pk, burnchain_tip_consensus_hash, &miner_pkh); + + let mut op_signer = self.keychain.generate_op_signer(); + if let Some(txid) = + self.bitcoin_controller + .submit_operation(cur_epoch, op, &mut op_signer, 1) + { + // advance key registration state + self.last_vrf_key_burn_height = Some(burn_block.block_height); + self.globals + .set_pending_leader_key_registration(burn_block.block_height, txid); + self.globals.counters.bump_naka_submitted_vrfs(); + } + } + + /// Produce the block-commit for this anchored block, if we can. + /// `target_ch` is the consensus-hash of the Tenure we will build off + /// `target_bh` is the block hash of the Tenure we will build off + /// Returns the (the most recent burn snapshot, the expected epoch, the commit-op) on success + /// Returns None if we fail somehow. + fn make_block_commit( + &mut self, + target_ch: &ConsensusHash, + target_bh: &BlockHeaderHash, + ) -> Result<(BlockSnapshot, StacksEpochId, LeaderBlockCommitOp), NakamotoNodeError> { + let chain_state = self + .chainstate + .as_mut() + .expect("FATAL: Failed to load chain state"); + let sort_db = self.sortdb.as_mut().expect("FATAL: Failed to load sortdb"); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()) + .map_err(|_| NakamotoNodeError::SnapshotNotFoundForChainTip)?; + + let parent_vrf_proof = + NakamotoChainState::get_block_vrf_proof(chain_state.db(), &target_ch) + .map_err(|_e| NakamotoNodeError::ParentNotFound)? + .unwrap_or_else(|| VRFProof::empty()); + + // let's figure out the recipient set! + let recipients = get_next_recipients( + &sort_tip, + chain_state, + sort_db, + &self.burnchain, + &OnChainRewardSetProvider(), + self.config.node.always_use_affirmation_maps, + ) + .map_err(|e| { + error!("Relayer: Failure fetching recipient set: {:?}", e); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + + let block_header = + NakamotoChainState::get_block_header_by_consensus_hash(chain_state.db(), target_ch) + .map_err(|e| { + error!("Relayer: Failed to get block header for parent tenure: {e:?}"); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!("Relayer: Failed to find block header for parent tenure"); + NakamotoNodeError::ParentNotFound + })?; + + let parent_block_id = block_header.index_block_hash(); + if parent_block_id != StacksBlockId::new(target_ch, target_bh) { + error!("Relayer: Found block header for parent tenure, but mismatched block id"; + "expected_block_id" => %StacksBlockId::new(target_ch, target_bh), + "found_block_id" => %parent_block_id); + return Err(NakamotoNodeError::UnexpectedChainState); + } + + let Ok(Some(parent_sortition)) = + SortitionDB::get_block_snapshot_consensus(sort_db.conn(), target_ch) + else { + error!("Relayer: Failed to lookup the block snapshot of parent tenure ID"; "tenure_consensus_hash" => %target_ch); + return Err(NakamotoNodeError::ParentNotFound); + }; + + let Ok(Some(target_epoch)) = + SortitionDB::get_stacks_epoch(sort_db.conn(), sort_tip.block_height + 1) + else { + error!("Relayer: Failed to lookup its epoch"; "target_height" => sort_tip.block_height + 1); + return Err(NakamotoNodeError::SnapshotNotFoundForChainTip); + }; + + let parent_block_burn_height = parent_sortition.block_height; + let Ok(Some(parent_winning_tx)) = SortitionDB::get_block_commit( + sort_db.conn(), + &parent_sortition.winning_block_txid, + &parent_sortition.sortition_id, + ) else { + error!("Relayer: Failed to lookup the block commit of parent tenure ID"; "tenure_consensus_hash" => %target_ch); + return Err(NakamotoNodeError::SnapshotNotFoundForChainTip); + }; + + let parent_winning_vtxindex = parent_winning_tx.vtxindex; + + // let burn_fee_cap = self.config.burnchain.burn_fee_cap; + let burn_fee_cap = get_mining_spend_amount(self.globals.get_miner_status()); + let sunset_burn = self.burnchain.expected_sunset_burn( + sort_tip.block_height + 1, + burn_fee_cap, + target_epoch.epoch_id, + ); + let rest_commit = burn_fee_cap - sunset_burn; + + let commit_outs = if !self + .burnchain + .pox_constants + .is_after_pox_sunset_end(sort_tip.block_height, target_epoch.epoch_id) + && !self + .burnchain + .is_in_prepare_phase(sort_tip.block_height + 1) + { + RewardSetInfo::into_commit_outs(recipients, self.config.is_mainnet()) + } else { + vec![PoxAddress::standard_burn_address(self.config.is_mainnet())] + }; + + // let's commit, but target the current burnchain tip with our modulus + let burn_parent_modulus = u8::try_from(sort_tip.block_height % BURN_BLOCK_MINED_AT_MODULUS) + .map_err(|_| { + error!("Relayer: Block mining modulus is not u8"); + NakamotoNodeError::UnexpectedChainState + })?; + let sender = self.keychain.get_burnchain_signer(); + let key = self + .globals + .get_leader_key_registration_state() + .get_active() + .ok_or_else(|| NakamotoNodeError::NoVRFKeyActive)?; + let op = LeaderBlockCommitOp { + sunset_burn, + block_header_hash: BlockHeaderHash(parent_block_id.0), + burn_fee: rest_commit, + input: (Txid([0; 32]), 0), + apparent_sender: sender, + key_block_ptr: u32::try_from(key.block_height) + .expect("FATAL: burn block height exceeded u32"), + key_vtxindex: u16::try_from(key.op_vtxindex).expect("FATAL: vtxindex exceeded u16"), + memo: vec![STACKS_EPOCH_3_0_MARKER], + new_seed: VRFSeed::from_proof(&parent_vrf_proof), + parent_block_ptr: u32::try_from(parent_block_burn_height) + .expect("FATAL: burn block height exceeded u32"), + parent_vtxindex: u16::try_from(parent_winning_vtxindex) + .expect("FATAL: vtxindex exceeded u16"), + vtxindex: 0, + txid: Txid([0u8; 32]), + block_height: 0, + burn_header_hash: BurnchainHeaderHash::zero(), + burn_parent_modulus, + commit_outs, + }; + + Ok((sort_tip, target_epoch.epoch_id, op)) + } + + /// Create the block miner thread state. + /// Only proceeds if all of the following are true: + /// * the miner is not blocked + /// * last_burn_block corresponds to the canonical sortition DB's chain tip + /// * the time of issuance is sufficiently recent + /// * there are no unprocessed stacks blocks in the staging DB + /// * the relayer has already tried a download scan that included this sortition (which, if a + /// block was found, would have placed it into the staging DB and marked it as + /// unprocessed) + /// * a miner thread is not running already + fn create_block_miner( + &mut self, + registered_key: RegisteredKey, + last_burn_block: BlockSnapshot, + parent_tenure_id: StacksBlockId, + ) -> Result { + if fault_injection_skip_mining(&self.config.node.rpc_bind, last_burn_block.block_height) { + debug!( + "Relayer: fault injection skip mining at block height {}", + last_burn_block.block_height + ); + return Err(NakamotoNodeError::FaultInjection); + } + + let burn_header_hash = last_burn_block.burn_header_hash.clone(); + let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + let burn_chain_tip = burn_chain_sn.burn_header_hash.clone(); + + if burn_chain_tip != burn_header_hash { + debug!( + "Relayer: Drop stale RunTenure for {}: current sortition is for {}", + &burn_header_hash, &burn_chain_tip + ); + self.globals.counters.bump_missed_tenures(); + return Err(NakamotoNodeError::MissedMiningOpportunity); + } + + debug!( + "Relayer: Spawn tenure thread"; + "height" => last_burn_block.block_height, + "burn_header_hash" => %burn_header_hash, + ); + + let miner_thread_state = + BlockMinerThread::new(self, registered_key, last_burn_block, parent_tenure_id); + Ok(miner_thread_state) + } + + fn start_new_tenure( + &mut self, + parent_tenure_start: StacksBlockId, + burn_tip: BlockSnapshot, + ) -> Result<(), NakamotoNodeError> { + // when starting a new tenure, block the mining thread if its currently running. + // the new mining thread will join it (so that the new mining thread stalls, not the relayer) + let prior_tenure_thread = self.miner_thread.take(); + let vrf_key = self + .globals + .get_leader_key_registration_state() + .get_active() + .ok_or_else(|| { + warn!("Trying to start new tenure, but no VRF key active"); + NakamotoNodeError::NoVRFKeyActive + })?; + let new_miner_state = self.create_block_miner(vrf_key, burn_tip, parent_tenure_start)?; + + let new_miner_handle = std::thread::Builder::new() + .name(format!("miner-{}", self.local_peer.data_url)) + .stack_size(BLOCK_PROCESSOR_STACK_SIZE) + .spawn(move || new_miner_state.run_miner(prior_tenure_thread)) + .map_err(|e| { + error!("Relayer: Failed to start tenure thread: {:?}", &e); + NakamotoNodeError::SpawnError(e) + })?; + + self.miner_thread.replace(new_miner_handle); + + Ok(()) + } + + fn stop_tenure(&mut self) -> Result<(), NakamotoNodeError> { + // when stopping a tenure, block the mining thread if its currently running, then join it. + // do this in a new thread will (so that the new thread stalls, not the relayer) + let Some(prior_tenure_thread) = self.miner_thread.take() else { + return Ok(()); + }; + let globals = self.globals.clone(); + + let stop_handle = std::thread::Builder::new() + .name(format!("tenure-stop-{}", self.local_peer.data_url)) + .spawn(move || BlockMinerThread::stop_miner(&globals, prior_tenure_thread)) + .map_err(|e| { + error!("Relayer: Failed to spawn a stop-tenure thread: {:?}", &e); + NakamotoNodeError::SpawnError(e) + })?; + + self.miner_thread.replace(stop_handle); + + Ok(()) + } + + fn handle_sortition( + &mut self, + consensus_hash: ConsensusHash, + burn_hash: BurnchainHeaderHash, + committed_index_hash: StacksBlockId, + ) -> bool { + let miner_instruction = + self.process_sortition(consensus_hash, burn_hash, committed_index_hash); + + match miner_instruction { + MinerDirective::BeginTenure { + parent_tenure_start, + burnchain_tip, + } => { + let _ = self.start_new_tenure(parent_tenure_start, burnchain_tip); + } + MinerDirective::ContinueTenure { new_burn_view: _ } => { + // TODO: in this case, we eventually want to undergo a tenure + // change to switch to the new burn view, but right now, we will + // simply end our current tenure if it exists + let _ = self.stop_tenure(); + } + MinerDirective::StopTenure => { + let _ = self.stop_tenure(); + } + } + + true + } + + fn issue_block_commit( + &mut self, + tenure_start_ch: ConsensusHash, + tenure_start_bh: BlockHeaderHash, + ) -> Result<(), NakamotoNodeError> { + let (last_committed_at, target_epoch_id, commit) = + self.make_block_commit(&tenure_start_ch, &tenure_start_bh)?; + let mut op_signer = self.keychain.generate_op_signer(); + let txid = self + .bitcoin_controller + .submit_operation( + target_epoch_id, + BlockstackOperationType::LeaderBlockCommit(commit), + &mut op_signer, + 1, + ) + .ok_or_else(|| { + warn!("Failed to submit block-commit bitcoin transaction"); + NakamotoNodeError::BurnchainSubmissionFailed + })?; + info!( + "Relayer: Submitted block-commit"; + "parent_consensus_hash" => %tenure_start_ch, + "parent_block_hash" => %tenure_start_bh, + "txid" => %txid, + ); + + self.last_commits.insert(txid, ()); + self.last_committed_at = Some(last_committed_at); + self.globals.counters.bump_naka_submitted_commits(); + + Ok(()) + } + + fn initiative(&mut self) -> Option { + if !self.is_miner { + return None; + } + + // TODO (nakamoto): the miner shouldn't issue either of these directives + // if we're still in IBD! + + // do we need a VRF key registration? + if matches!( + self.globals.get_leader_key_registration_state(), + LeaderKeyRegistrationState::Inactive + ) { + let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) + else { + warn!("Failed to fetch sortition tip while needing to register VRF key"); + return None; + }; + return Some(RelayerDirective::RegisterKey(sort_tip)); + } + + // are we still waiting on a pending registration? + if !matches!( + self.globals.get_leader_key_registration_state(), + LeaderKeyRegistrationState::Active(_) + ) { + return None; + } + + // has there been a new sortition + let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) + else { + return None; + }; + + let should_commit = if let Some(last_committed_at) = self.last_committed_at.as_ref() { + // if the new sortition tip has a different consesus hash than the last commit, + // issue a new commit + sort_tip.consensus_hash != last_committed_at.consensus_hash + } else { + // if there was no last commit, issue a new commit + true + }; + + let Ok(Some(chain_tip_header)) = NakamotoChainState::get_canonical_block_header( + self.chainstate_ref().db(), + self.sortdb_ref(), + ) else { + info!("No known canonical tip, will issue a genesis block commit"); + return Some(RelayerDirective::NakamotoTenureStartProcessed( + FIRST_BURNCHAIN_CONSENSUS_HASH, + FIRST_STACKS_BLOCK_HASH, + )); + }; + + if should_commit { + // TODO: just use `get_block_header_by_consensus_hash`? + let first_block_hash = if chain_tip_header + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + // if the parent block is a nakamoto block, find the starting block of its tenure + let Ok(Some(first_block)) = + NakamotoChainState::get_nakamoto_tenure_start_block_header( + self.chainstate_ref().db(), + &chain_tip_header.consensus_hash, + ) + else { + warn!("Failure getting the first block of tenure in order to assemble block commit"; + "tenure_consensus_hash" => %chain_tip_header.consensus_hash, + "tip_block_hash" => %chain_tip_header.anchored_header.block_hash()); + return None; + }; + first_block.anchored_header.block_hash() + } else { + // otherwise the parent block is a epoch2 block, just return its hash directly + chain_tip_header.anchored_header.block_hash() + }; + return Some(RelayerDirective::NakamotoTenureStartProcessed( + chain_tip_header.consensus_hash, + first_block_hash, + )); + } + + return None; + } + + /// Main loop of the relayer. + /// Runs in a separate thread. + /// Continuously receives + pub fn main(mut self, relay_rcv: Receiver) { + debug!("relayer thread ID is {:?}", std::thread::current().id()); + + self.next_initiative = Instant::now() + Duration::from_secs(10); + while self.globals.keep_running() { + let directive = if Instant::now() >= self.next_initiative { + self.next_initiative = Instant::now() + Duration::from_secs(10); + self.initiative() + } else { + None + }; + + let Some(timeout) = self.next_initiative.checked_duration_since(Instant::now()) else { + // next_initiative timeout occurred, so go to next loop iteration. + continue; + }; + + let directive = if let Some(directive) = directive { + directive + } else { + match relay_rcv.recv_timeout(timeout) { + Ok(directive) => directive, + // timed out, so go to next loop iteration + Err(RecvTimeoutError::Timeout) => continue, + Err(RecvTimeoutError::Disconnected) => break, + } + }; + + if !self.handle_directive(directive) { + break; + } + } + + // kill miner if it's running + signal_mining_blocked(self.globals.get_miner_status()); + + // set termination flag so other threads die + self.globals.signal_stop(); + + debug!("Relayer exit!"); + } + + /// Top-level dispatcher + pub fn handle_directive(&mut self, directive: RelayerDirective) -> bool { + let continue_running = match directive { + RelayerDirective::HandleNetResult(net_result) => { + debug!("Relayer: directive Handle network result"); + self.process_network_result(net_result); + debug!("Relayer: directive Handled network result"); + true + } + // RegisterKey directives mean that the relayer should try to register a new VRF key. + // These are triggered by the relayer waking up without an active VRF key. + RelayerDirective::RegisterKey(last_burn_block) => { + if !self.is_miner { + return true; + } + debug!("Relayer: directive Register VRF key"); + self.rotate_vrf_and_register(&last_burn_block); + self.globals.counters.bump_blocks_processed(); + debug!("Relayer: directive Registered VRF key"); + true + } + // ProcessTenure directives correspond to a new sortition occurring. + // relayer should invoke `handle_sortition` to determine if they won the sortition, + // and to start their miner, or stop their miner if an active tenure is now ending + RelayerDirective::ProcessTenure(consensus_hash, burn_hash, block_header_hash) => { + if !self.is_miner { + return true; + } + info!("Relayer: directive Process tenures"); + let res = self.handle_sortition( + consensus_hash, + burn_hash, + StacksBlockId(block_header_hash.0), + ); + info!("Relayer: directive Processed tenures"); + res + } + // NakamotoTenureStartProcessed directives mean that a new tenure start has been processed + // These are triggered by the relayer waking up, seeing a new consensus hash *and* a new first tenure block + RelayerDirective::NakamotoTenureStartProcessed(consensus_hash, block_hash) => { + if !self.is_miner { + return true; + } + debug!("Relayer: Nakamoto Tenure Start"); + if let Err(e) = self.issue_block_commit(consensus_hash, block_hash) { + warn!("Relayer failed to issue block commit"; "err" => ?e); + } + debug!("Relayer: Nakamoto Tenure Start"); + true + } + RelayerDirective::RunTenure(..) => { + // No Op: the nakamoto node does not use the RunTenure directive to control its + // miner thread. + true + } + RelayerDirective::Exit => false, + }; + + continue_running + } +} diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 5ef68a4c28..c23bf1fc19 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -142,9 +142,7 @@ use std::collections::{HashMap, VecDeque}; use std::convert::{TryFrom, TryInto}; use std::default::Default; use std::net::SocketAddr; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::mpsc::{Receiver, SyncSender, TrySendError}; -use std::sync::{Arc, Mutex}; +use std::sync::mpsc::{Receiver, TrySendError}; use std::thread::JoinHandle; use std::time::Duration; use std::{mem, thread}; @@ -162,15 +160,13 @@ use stacks::chainstate::burn::operations::{ BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, }; use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; -use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvider}; use stacks::chainstate::nakamoto::NakamotoChainState; use stacks::chainstate::stacks::address::PoxAddress; -use stacks::chainstate::stacks::db::unconfirmed::UnconfirmedTxMap; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY}; use stacks::chainstate::stacks::miner::{ get_mining_spend_amount, signal_mining_blocked, signal_mining_ready, BlockBuilderSettings, - MinerStatus, StacksMicroblockBuilder, + StacksMicroblockBuilder, }; use stacks::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksBlock, StacksBlockBuilder, StacksBlockHeader, @@ -210,9 +206,10 @@ use crate::burnchains::bitcoin_regtest_controller::{ addr2str, BitcoinRegtestController, OngoingBlockCommit, }; use crate::burnchains::make_bitcoin_indexer; -use crate::run_loop::neon::{Counters, RunLoop}; +use crate::globals::Globals; +use crate::globals::RelayerDirective; +use crate::run_loop::neon::RunLoop; use crate::run_loop::RegisteredKey; -use crate::syncctl::PoxSyncWatchdogComms; use crate::ChainTip; pub const RELAYER_MAX_BUFFER: usize = 100; @@ -256,44 +253,6 @@ struct AssembledAnchorBlock { tenure_begin: u128, } -/// Command types for the relayer thread, issued to it by other threads -pub enum RelayerDirective { - /// Handle some new data that arrived on the network (such as blocks, transactions, and - /// microblocks) - HandleNetResult(NetworkResult), - /// Announce a new sortition. Process and broadcast the block if we won. - ProcessTenure(ConsensusHash, BurnchainHeaderHash, BlockHeaderHash), - /// Try to mine a block - RunTenure(RegisteredKey, BlockSnapshot, u128), // (vrf key, chain tip, time of issuance in ms) - /// Try to register a VRF public key - RegisterKey(BlockSnapshot), - /// Stop the relayer thread - Exit, -} - -/// Inter-thread communication structure, shared between threads -#[derive(Clone)] -pub struct Globals { - /// Last sortition processed - last_sortition: Arc>>, - /// Status of the miner - miner_status: Arc>, - /// Communication link to the coordinator thread - coord_comms: CoordinatorChannels, - /// Unconfirmed transactions (shared between the relayer and p2p threads) - unconfirmed_txs: Arc>, - /// Writer endpoint to the relayer thread - relay_send: SyncSender, - /// Cointer state in the main thread - counters: Counters, - /// Connection to the PoX sync watchdog - sync_comms: PoxSyncWatchdogComms, - /// Global flag to see if we should keep running - pub should_keep_running: Arc, - /// Status of our VRF key registration state (shared between the main thread and the relayer) - leader_key_registration_state: Arc>, -} - /// Miner chain tip, on top of which to build microblocks #[derive(Debug, Clone, PartialEq)] pub struct MinerTip { @@ -327,205 +286,6 @@ impl MinerTip { } } -impl Globals { - pub fn new( - coord_comms: CoordinatorChannels, - miner_status: Arc>, - relay_send: SyncSender, - counters: Counters, - sync_comms: PoxSyncWatchdogComms, - should_keep_running: Arc, - ) -> Globals { - Globals { - last_sortition: Arc::new(Mutex::new(None)), - miner_status, - coord_comms, - unconfirmed_txs: Arc::new(Mutex::new(UnconfirmedTxMap::new())), - relay_send, - counters, - sync_comms, - should_keep_running, - leader_key_registration_state: Arc::new(Mutex::new( - LeaderKeyRegistrationState::Inactive, - )), - } - } - - /// Get the last sortition processed by the relayer thread - pub fn get_last_sortition(&self) -> Option { - match self.last_sortition.lock() { - Ok(sort_opt) => sort_opt.clone(), - Err(_) => { - error!("Sortition mutex poisoned!"); - panic!(); - } - } - } - - /// Set the last sortition processed - pub fn set_last_sortition(&self, block_snapshot: BlockSnapshot) { - match self.last_sortition.lock() { - Ok(mut sortition_opt) => { - sortition_opt.replace(block_snapshot); - } - Err(_) => { - error!("Sortition mutex poisoned!"); - panic!(); - } - }; - } - - /// Get the status of the miner (blocked or ready) - pub fn get_miner_status(&self) -> Arc> { - self.miner_status.clone() - } - - /// Get the main thread's counters - pub fn get_counters(&self) -> Counters { - self.counters.clone() - } - - /// Called by the relayer to pass unconfirmed txs to the p2p thread, so the p2p thread doesn't - /// need to do the disk I/O needed to instantiate the unconfirmed state trie they represent. - /// Clears the unconfirmed transactions, and replaces them with the chainstate's. - pub fn send_unconfirmed_txs(&self, chainstate: &StacksChainState) { - if let Some(ref unconfirmed) = chainstate.unconfirmed_state { - match self.unconfirmed_txs.lock() { - Ok(mut txs) => { - txs.clear(); - txs.extend(unconfirmed.mined_txs.clone()); - } - Err(e) => { - // can only happen due to a thread panic in the relayer - error!("FATAL: unconfirmed tx arc mutex is poisoned: {:?}", &e); - panic!(); - } - }; - } - } - - /// Called by the p2p thread to accept the unconfirmed tx state processed by the relayer. - /// Puts the shared unconfirmed transactions to chainstate. - pub fn recv_unconfirmed_txs(&self, chainstate: &mut StacksChainState) { - if let Some(ref mut unconfirmed) = chainstate.unconfirmed_state { - match self.unconfirmed_txs.lock() { - Ok(txs) => { - unconfirmed.mined_txs.clear(); - unconfirmed.mined_txs.extend(txs.clone()); - } - Err(e) => { - // can only happen due to a thread panic in the relayer - error!("FATAL: unconfirmed arc mutex is poisoned: {:?}", &e); - panic!(); - } - }; - } - } - - /// Signal system-wide stop - pub fn signal_stop(&self) { - self.should_keep_running.store(false, Ordering::SeqCst); - } - - /// Should we keep running? - pub fn keep_running(&self) -> bool { - self.should_keep_running.load(Ordering::SeqCst) - } - - /// Get the handle to the coordinator - pub fn coord(&self) -> &CoordinatorChannels { - &self.coord_comms - } - - /// Get the current leader key registration state. - /// Called from the runloop thread and relayer thread. - fn get_leader_key_registration_state(&self) -> LeaderKeyRegistrationState { - match self.leader_key_registration_state.lock() { - Ok(state) => (*state).clone(), - Err(e) => { - // can only happen due to a thread panic in the relayer - error!("FATAL: leader key registration mutex is poisoned: {:?}", &e); - panic!(); - } - } - } - - /// Set the initial leader key registration state. - /// Called from the runloop thread when booting up. - fn set_initial_leader_key_registration_state(&self, new_state: LeaderKeyRegistrationState) { - match self.leader_key_registration_state.lock() { - Ok(mut state) => { - *state = new_state; - } - Err(e) => { - // can only happen due to a thread panic in the relayer - error!("FATAL: leader key registration mutex is poisoned: {:?}", &e); - panic!(); - } - } - } - - /// Advance the leader key registration state to pending, given a txid we just sent. - /// Only the relayer thread calls this. - fn set_pending_leader_key_registration(&self, target_block_height: u64, txid: Txid) { - match self.leader_key_registration_state.lock() { - Ok(ref mut leader_key_registration_state) => { - **leader_key_registration_state = - LeaderKeyRegistrationState::Pending(target_block_height, txid); - } - Err(_e) => { - error!("FATAL: failed to lock leader key registration state mutex"); - panic!(); - } - } - } - - /// Advance the leader key registration state to active, given the VRF key registration ops - /// we've discovered in a given snapshot. - /// The runloop thread calls this whenever it processes a sortition. - pub fn try_activate_leader_key_registration( - &self, - burn_block_height: u64, - key_registers: Vec, - ) -> bool { - let mut activated = false; - match self.leader_key_registration_state.lock() { - Ok(ref mut leader_key_registration_state) => { - for op in key_registers.into_iter() { - if let LeaderKeyRegistrationState::Pending(target_block_height, txid) = - **leader_key_registration_state - { - info!( - "Received burnchain block #{} including key_register_op - {}", - burn_block_height, txid - ); - if txid == op.txid { - **leader_key_registration_state = - LeaderKeyRegistrationState::Active(RegisteredKey { - target_block_height, - vrf_public_key: op.public_key, - block_height: u64::from(op.block_height), - op_vtxindex: u32::from(op.vtxindex), - }); - activated = true; - } else { - debug!( - "key_register_op {} does not match our pending op {}", - txid, &op.txid - ); - } - } - } - } - Err(_e) => { - error!("FATAL: failed to lock leader key registration state mutex"); - panic!(); - } - } - activated - } -} - /// Node implementation for both miners and followers. /// This struct is used to set up the node proper and launch the p2p thread and relayer thread. /// It is further used by the main thread to communicate with these two threads. @@ -653,7 +413,7 @@ struct ParentStacksBlockInfo { } #[derive(Clone)] -enum LeaderKeyRegistrationState { +pub enum LeaderKeyRegistrationState { /// Not started yet Inactive, /// Waiting for burnchain confirmation @@ -664,6 +424,16 @@ enum LeaderKeyRegistrationState { Active(RegisteredKey), } +impl LeaderKeyRegistrationState { + pub fn get_active(&self) -> Option { + if let Self::Active(registered_key) = self { + Some(registered_key.clone()) + } else { + None + } + } +} + /// Relayer thread /// * accepts network results and stores blocks and microblocks /// * forwards new blocks, microblocks, and transactions to the p2p thread @@ -3407,6 +3177,10 @@ impl RelayerThread { debug!("Relayer: directive Ran tenure"); true } + RelayerDirective::NakamotoTenureStartProcessed(_, _) => { + warn!("Relayer: Nakamoto tenure start notification received while still operating 2.x neon node"); + true + } RelayerDirective::Exit => false, }; if !continue_running { diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index c7aaf87b56..abfbe37c37 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -1,4 +1,5 @@ pub mod helium; +pub mod nakamoto; pub mod neon; use clarity::vm::costs::ExecutionCost; diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs new file mode 100644 index 0000000000..f758a65d33 --- /dev/null +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -0,0 +1,1029 @@ +use std::sync::atomic::AtomicBool; +use std::sync::mpsc::sync_channel; +use std::sync::{Arc, Mutex}; +use std::thread::JoinHandle; +use std::{cmp, thread}; + +use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; +use stacks::burnchains::Burnchain; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::BlockSnapshot; +use stacks::chainstate::coordinator::comm::{CoordinatorChannels, CoordinatorReceivers}; +use stacks::chainstate::coordinator::{ + static_get_canonical_affirmation_map, static_get_heaviest_affirmation_map, + static_get_stacks_tip_affirmation_map, ChainsCoordinator, ChainsCoordinatorConfig, + CoordinatorCommunication, +}; +use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; +use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; +use stacks::core::StacksEpochId; +use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; +use stacks_common::types::PublicKey; +use stacks_common::util::hash::Hash160; +use stacks_common::util::{get_epoch_time_secs, sleep_ms}; +use stx_genesis::GenesisData; + +use super::RunLoopCallbacks; +use crate::burnchains::make_bitcoin_indexer; +use crate::globals::Globals; +use crate::monitoring::start_serving_monitoring_metrics; +use crate::nakamoto_node::{StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER}; +use crate::neon::RunLoopCounter; +use crate::node::{ + get_account_balances, get_account_lockups, get_names, get_namespaces, + use_test_genesis_chainstate, +}; +use crate::run_loop::neon; +use crate::run_loop::neon::Counters; +use crate::syncctl::{PoxSyncWatchdog, PoxSyncWatchdogComms}; +use crate::{ + run_loop, BitcoinRegtestController, BurnchainController, Config, EventDispatcher, Keychain, +}; + +pub const STDERR: i32 = 2; + +#[cfg(test)] +const UNCONDITIONAL_CHAIN_LIVENESS_CHECK: u64 = 30; + +#[cfg(not(test))] +const UNCONDITIONAL_CHAIN_LIVENESS_CHECK: u64 = 300; + +/// Coordinating a node running in neon mode. +pub struct RunLoop { + config: Config, + pub callbacks: RunLoopCallbacks, + globals: Option, + counters: Counters, + coordinator_channels: Option<(CoordinatorReceivers, CoordinatorChannels)>, + should_keep_running: Arc, + event_dispatcher: EventDispatcher, + pox_watchdog: Option, // can't be instantiated until .start() is called + is_miner: Option, // not known until .start() is called + burnchain: Option, // not known until .start() is called + pox_watchdog_comms: PoxSyncWatchdogComms, + /// NOTE: this is duplicated in self.globals, but it needs to be accessible before globals is + /// instantiated (namely, so the test framework can access it). + miner_status: Arc>, +} + +impl RunLoop { + /// Sets up a runloop and node, given a config. + pub fn new(config: Config) -> Self { + let channels = CoordinatorCommunication::instantiate(); + let should_keep_running = Arc::new(AtomicBool::new(true)); + let pox_watchdog_comms = PoxSyncWatchdogComms::new(should_keep_running.clone()); + let miner_status = Arc::new(Mutex::new(MinerStatus::make_ready( + config.burnchain.burn_fee_cap, + ))); + + let mut event_dispatcher = EventDispatcher::new(); + for observer in config.events_observers.iter() { + event_dispatcher.register_observer(observer); + } + + Self { + config, + globals: None, + coordinator_channels: Some(channels), + callbacks: RunLoopCallbacks::new(), + counters: Counters::new(), + should_keep_running, + event_dispatcher, + pox_watchdog: None, + is_miner: None, + burnchain: None, + pox_watchdog_comms, + miner_status, + } + } + + pub fn get_globals(&self) -> Globals { + self.globals + .clone() + .expect("FATAL: globals not instantiated") + } + + fn set_globals(&mut self, globals: Globals) { + self.globals = Some(globals); + } + + pub fn get_coordinator_channel(&self) -> Option { + self.coordinator_channels.as_ref().map(|x| x.1.clone()) + } + + pub fn get_blocks_processed_arc(&self) -> RunLoopCounter { + self.counters.blocks_processed.clone() + } + + pub fn submitted_commits(&self) -> RunLoopCounter { + self.counters.naka_submitted_commits.clone() + } + + pub fn submitted_vrfs(&self) -> RunLoopCounter { + self.counters.naka_submitted_vrfs.clone() + } + + pub fn mined_blocks(&self) -> RunLoopCounter { + self.counters.naka_mined_blocks.clone() + } + + pub fn get_counters(&self) -> Counters { + self.counters.clone() + } + + pub fn config(&self) -> &Config { + &self.config + } + + pub fn get_event_dispatcher(&self) -> EventDispatcher { + self.event_dispatcher.clone() + } + + pub fn is_miner(&self) -> bool { + self.is_miner.unwrap_or(false) + } + + pub fn get_pox_sync_comms(&self) -> PoxSyncWatchdogComms { + self.pox_watchdog_comms.clone() + } + + pub fn get_termination_switch(&self) -> Arc { + self.should_keep_running.clone() + } + + pub fn get_burnchain(&self) -> Burnchain { + self.burnchain + .clone() + .expect("FATAL: tried to get runloop burnchain before calling .start()") + } + + pub fn get_pox_watchdog(&mut self) -> &mut PoxSyncWatchdog { + self.pox_watchdog + .as_mut() + .expect("FATAL: tried to get PoX watchdog before calling .start()") + } + + pub fn get_miner_status(&self) -> Arc> { + self.miner_status.clone() + } + + /// Determine if we're the miner. + /// If there's a network error, then assume that we're not a miner. + fn check_is_miner(&mut self, burnchain: &mut BitcoinRegtestController) -> bool { + if self.config.node.miner { + let keychain = Keychain::default(self.config.node.seed.clone()); + let mut op_signer = keychain.generate_op_signer(); + match burnchain.create_wallet_if_dne() { + Err(e) => warn!("Error when creating wallet: {:?}", e), + _ => {} + } + let mut btc_addrs = vec![( + StacksEpochId::Epoch2_05, + // legacy + BitcoinAddress::from_bytes_legacy( + self.config.burnchain.get_bitcoin_network().1, + LegacyBitcoinAddressType::PublicKeyHash, + &Hash160::from_data(&op_signer.get_public_key().to_bytes()).0, + ) + .expect("FATAL: failed to construct legacy bitcoin address"), + )]; + if self.config.miner.segwit { + btc_addrs.push(( + StacksEpochId::Epoch21, + // segwit p2wpkh + BitcoinAddress::from_bytes_segwit_p2wpkh( + self.config.burnchain.get_bitcoin_network().1, + &Hash160::from_data(&op_signer.get_public_key().to_bytes_compressed()).0, + ) + .expect("FATAL: failed to construct segwit p2wpkh address"), + )); + } + + for (epoch_id, btc_addr) in btc_addrs.into_iter() { + info!("Miner node: checking UTXOs at address: {}", &btc_addr); + let utxos = burnchain.get_utxos(epoch_id, &op_signer.get_public_key(), 1, None, 0); + if utxos.is_none() { + warn!("UTXOs not found for {}. If this is unexpected, please ensure that your bitcoind instance is indexing transactions for the address {} (importaddress)", btc_addr, btc_addr); + } else { + info!("UTXOs found - will run as a Miner node"); + return true; + } + } + if self.config.node.mock_mining { + info!("No UTXOs found, but configured to mock mine"); + return true; + } else { + return false; + } + } else { + info!("Will run as a Follower node"); + false + } + } + + /// Boot up the stacks chainstate. + /// Instantiate the chainstate and push out the boot receipts to observers + /// This is only public so we can test it. + pub fn boot_chainstate(&mut self, burnchain_config: &Burnchain) -> StacksChainState { + let use_test_genesis_data = use_test_genesis_chainstate(&self.config); + + // load up genesis balances + let initial_balances = self + .config + .initial_balances + .iter() + .map(|e| (e.address.clone(), e.amount)) + .collect(); + + // TODO (nakamoto-neon): check if we're trying to setup a self-signing network + // and set the right genesis data + + // instantiate chainstate + let mut boot_data = ChainStateBootData { + initial_balances, + post_flight_callback: None, + first_burnchain_block_hash: burnchain_config.first_block_hash, + first_burnchain_block_height: burnchain_config.first_block_height as u32, + first_burnchain_block_timestamp: burnchain_config.first_block_timestamp, + pox_constants: burnchain_config.pox_constants.clone(), + get_bulk_initial_lockups: Some(Box::new(move || { + get_account_lockups(use_test_genesis_data) + })), + get_bulk_initial_balances: Some(Box::new(move || { + get_account_balances(use_test_genesis_data) + })), + get_bulk_initial_namespaces: Some(Box::new(move || { + get_namespaces(use_test_genesis_data) + })), + get_bulk_initial_names: Some(Box::new(move || get_names(use_test_genesis_data))), + }; + + let (chain_state_db, receipts) = StacksChainState::open_and_exec( + self.config.is_mainnet(), + self.config.burnchain.chain_id, + &self.config.get_chainstate_path_str(), + Some(&mut boot_data), + Some(self.config.node.get_marf_opts()), + ) + .unwrap(); + run_loop::announce_boot_receipts( + &mut self.event_dispatcher, + &chain_state_db, + &burnchain_config.pox_constants, + &receipts, + ); + chain_state_db + } + + /// Instantiate the Stacks chain state and start the chains coordinator thread. + /// Returns the coordinator thread handle, and the receiving end of the coordinator's atlas + /// attachment channel. + fn spawn_chains_coordinator( + &mut self, + burnchain_config: &Burnchain, + coordinator_receivers: CoordinatorReceivers, + miner_status: Arc>, + ) -> JoinHandle<()> { + let use_test_genesis_data = use_test_genesis_chainstate(&self.config); + + // load up genesis Atlas attachments + let mut atlas_config = AtlasConfig::new(self.config.is_mainnet()); + let genesis_attachments = GenesisData::new(use_test_genesis_data) + .read_name_zonefiles() + .into_iter() + .map(|z| Attachment::new(z.zonefile_content.as_bytes().to_vec())) + .collect(); + atlas_config.genesis_attachments = Some(genesis_attachments); + + let chain_state_db = self.boot_chainstate(burnchain_config); + + // NOTE: re-instantiate AtlasConfig so we don't have to keep the genesis attachments around + let moved_atlas_config = self.config.atlas.clone(); + let moved_config = self.config.clone(); + let moved_burnchain_config = burnchain_config.clone(); + let mut coordinator_dispatcher = self.event_dispatcher.clone(); + let atlas_db = AtlasDB::connect( + moved_atlas_config.clone(), + &self.config.get_atlas_db_file_path(), + true, + ) + .expect("Failed to connect Atlas DB during startup"); + let coordinator_indexer = + make_bitcoin_indexer(&self.config, Some(self.should_keep_running.clone())); + + let coordinator_thread_handle = thread::Builder::new() + .name(format!( + "chains-coordinator-{}", + &moved_config.node.rpc_bind + )) + .stack_size(BLOCK_PROCESSOR_STACK_SIZE) + .spawn(move || { + debug!( + "chains-coordinator thread ID is {:?}", + thread::current().id() + ); + let mut cost_estimator = moved_config.make_cost_estimator(); + let mut fee_estimator = moved_config.make_fee_estimator(); + + let coord_config = ChainsCoordinatorConfig { + always_use_affirmation_maps: moved_config.node.always_use_affirmation_maps, + require_affirmed_anchor_blocks: moved_config + .node + .require_affirmed_anchor_blocks, + ..ChainsCoordinatorConfig::new() + }; + ChainsCoordinator::run( + coord_config, + chain_state_db, + moved_burnchain_config, + &mut coordinator_dispatcher, + coordinator_receivers, + moved_atlas_config, + cost_estimator.as_deref_mut(), + fee_estimator.as_deref_mut(), + miner_status, + coordinator_indexer, + atlas_db, + ); + }) + .expect("FATAL: failed to start chains coordinator thread"); + + coordinator_thread_handle + } + + /// Start Prometheus logging + fn start_prometheus(&mut self) { + let prometheus_bind = self.config.node.prometheus_bind.clone(); + if let Some(prometheus_bind) = prometheus_bind { + thread::Builder::new() + .name("prometheus".to_string()) + .spawn(move || { + debug!("prometheus thread ID is {:?}", thread::current().id()); + start_serving_monitoring_metrics(prometheus_bind); + }) + .unwrap(); + } + } + + /// Get the sortition DB's highest block height, aligned to a reward cycle boundary, and the + /// highest sortition. + /// Returns (height at rc start, sortition) + fn get_reward_cycle_sortition_db_height( + sortdb: &SortitionDB, + burnchain_config: &Burnchain, + ) -> (u64, BlockSnapshot) { + let (stacks_ch, _) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()) + .expect("BUG: failed to load canonical stacks chain tip hash"); + + let sn = match SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &stacks_ch) + .expect("BUG: failed to query sortition DB") + { + Some(sn) => sn, + None => { + debug!("No canonical stacks chain tip hash present"); + let sn = SortitionDB::get_first_block_snapshot(&sortdb.conn()) + .expect("BUG: failed to get first-ever block snapshot"); + sn + } + }; + + ( + burnchain_config.reward_cycle_to_block_height( + burnchain_config + .block_height_to_reward_cycle(sn.block_height) + .expect("BUG: snapshot preceeds first reward cycle"), + ), + sn, + ) + } + + /// Wake up and drive stacks block processing if there's been a PoX reorg. + /// Be careful not to saturate calls to announce new stacks blocks, because that will disable + /// mining (which would prevent a miner attempting to fix a hidden PoX anchor block from making + /// progress). + fn drive_pox_reorg_stacks_block_processing( + globals: &Globals, + config: &Config, + burnchain: &Burnchain, + sortdb: &SortitionDB, + last_stacks_pox_reorg_recover_time: &mut u128, + ) { + let delay = cmp::max( + config.node.chain_liveness_poll_time_secs, + cmp::max( + config.miner.first_attempt_time_ms, + config.miner.subsequent_attempt_time_ms, + ) / 1000, + ); + + if *last_stacks_pox_reorg_recover_time + (delay as u128) >= get_epoch_time_secs().into() { + // too soon + return; + } + + // compare stacks and heaviest AMs + let burnchain_db = burnchain + .open_burnchain_db(false) + .expect("FATAL: failed to open burnchain DB"); + + let sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .expect("FATAL: could not read sortition DB"); + + let indexer = make_bitcoin_indexer(config, Some(globals.should_keep_running.clone())); + + let heaviest_affirmation_map = match static_get_heaviest_affirmation_map( + &burnchain, + &indexer, + &burnchain_db, + sortdb, + &sn.sortition_id, + ) { + Ok(am) => am, + Err(e) => { + warn!("Failed to find heaviest affirmation map: {:?}", &e); + return; + } + }; + + let highest_sn = SortitionDB::get_highest_known_burn_chain_tip(sortdb.conn()) + .expect("FATAL: could not read sortition DB"); + + let canonical_burnchain_tip = burnchain_db + .get_canonical_chain_tip() + .expect("FATAL: could not read burnchain DB"); + + let sortition_tip_affirmation_map = + match SortitionDB::find_sortition_tip_affirmation_map(sortdb, &sn.sortition_id) { + Ok(am) => am, + Err(e) => { + warn!("Failed to find sortition affirmation map: {:?}", &e); + return; + } + }; + + let stacks_tip_affirmation_map = static_get_stacks_tip_affirmation_map( + &burnchain_db, + sortdb, + &sn.sortition_id, + &sn.canonical_stacks_tip_consensus_hash, + &sn.canonical_stacks_tip_hash, + ) + .expect("FATAL: could not query stacks DB"); + + if stacks_tip_affirmation_map.len() < heaviest_affirmation_map.len() + || stacks_tip_affirmation_map + .find_divergence(&heaviest_affirmation_map) + .is_some() + { + // the sortition affirmation map might also be inconsistent, so we'll need to fix that + // (i.e. the underlying sortitions) before we can fix the stacks fork + if sortition_tip_affirmation_map.len() < heaviest_affirmation_map.len() + || sortition_tip_affirmation_map + .find_divergence(&heaviest_affirmation_map) + .is_some() + { + debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map); + globals.coord().announce_new_burn_block(); + } else if highest_sn.block_height == sn.block_height + && sn.block_height == canonical_burnchain_tip.block_height + { + // need to force an affirmation reorg because there will be no more burn block + // announcements. + debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {}, burn height {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map, sn.block_height); + globals.coord().announce_new_burn_block(); + } + + debug!( + "Drive stacks block processing: possible PoX reorg (stacks tip: {}, heaviest: {})", + &stacks_tip_affirmation_map, &heaviest_affirmation_map + ); + globals.coord().announce_new_stacks_block(); + } else { + debug!( + "Drive stacks block processing: no need (stacks tip: {}, heaviest: {})", + &stacks_tip_affirmation_map, &heaviest_affirmation_map + ); + + // announce a new stacks block to force the chains coordinator + // to wake up anyways. this isn't free, so we have to make sure + // the chain-liveness thread doesn't wake up too often + globals.coord().announce_new_stacks_block(); + } + + *last_stacks_pox_reorg_recover_time = get_epoch_time_secs().into(); + } + + /// Wake up and drive sortition processing if there's been a PoX reorg. + /// Be careful not to saturate calls to announce new burn blocks, because that will disable + /// mining (which would prevent a miner attempting to fix a hidden PoX anchor block from making + /// progress). + /// + /// only call if no in ibd + fn drive_pox_reorg_burn_block_processing( + globals: &Globals, + config: &Config, + burnchain: &Burnchain, + sortdb: &SortitionDB, + chain_state_db: &StacksChainState, + last_burn_pox_reorg_recover_time: &mut u128, + last_announce_time: &mut u128, + ) { + let delay = cmp::max( + config.node.chain_liveness_poll_time_secs, + cmp::max( + config.miner.first_attempt_time_ms, + config.miner.subsequent_attempt_time_ms, + ) / 1000, + ); + + if *last_burn_pox_reorg_recover_time + (delay as u128) >= get_epoch_time_secs().into() { + // too soon + return; + } + + // compare sortition and heaviest AMs + let burnchain_db = burnchain + .open_burnchain_db(false) + .expect("FATAL: failed to open burnchain DB"); + + let highest_sn = SortitionDB::get_highest_known_burn_chain_tip(sortdb.conn()) + .expect("FATAL: could not read sortition DB"); + + let canonical_burnchain_tip = burnchain_db + .get_canonical_chain_tip() + .expect("FATAL: could not read burnchain DB"); + + if canonical_burnchain_tip.block_height > highest_sn.block_height { + // still processing sortitions + test_debug!( + "Drive burn block processing: still processing sortitions ({} > {})", + canonical_burnchain_tip.block_height, + highest_sn.block_height + ); + return; + } + + // NOTE: this could be lower than the highest_sn + let sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .expect("FATAL: could not read sortition DB"); + + let sortition_tip_affirmation_map = + match SortitionDB::find_sortition_tip_affirmation_map(sortdb, &sn.sortition_id) { + Ok(am) => am, + Err(e) => { + warn!("Failed to find sortition affirmation map: {:?}", &e); + return; + } + }; + + let indexer = make_bitcoin_indexer(config, Some(globals.should_keep_running.clone())); + + let heaviest_affirmation_map = match static_get_heaviest_affirmation_map( + &burnchain, + &indexer, + &burnchain_db, + sortdb, + &sn.sortition_id, + ) { + Ok(am) => am, + Err(e) => { + warn!("Failed to find heaviest affirmation map: {:?}", &e); + return; + } + }; + + let canonical_affirmation_map = match static_get_canonical_affirmation_map( + &burnchain, + &indexer, + &burnchain_db, + sortdb, + &chain_state_db, + &sn.sortition_id, + ) { + Ok(am) => am, + Err(e) => { + warn!("Failed to find canonical affirmation map: {:?}", &e); + return; + } + }; + + if sortition_tip_affirmation_map.len() < heaviest_affirmation_map.len() + || sortition_tip_affirmation_map + .find_divergence(&heaviest_affirmation_map) + .is_some() + || sn.block_height < highest_sn.block_height + { + debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {}, {} = heaviest_affirmation_map.len() + && sortition_tip_affirmation_map.len() <= canonical_affirmation_map.len() + { + if let Some(divergence_rc) = + canonical_affirmation_map.find_divergence(&sortition_tip_affirmation_map) + { + if divergence_rc + 1 >= (heaviest_affirmation_map.len() as u64) { + // we have unaffirmed PoX anchor blocks that are not yet processed in the sortition history + debug!("Drive burnchain processing: possible PoX reorg from unprocessed anchor block(s) (sortition tip: {}, heaviest: {}, canonical: {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map, &canonical_affirmation_map); + globals.coord().announce_new_burn_block(); + globals.coord().announce_new_stacks_block(); + *last_announce_time = get_epoch_time_secs().into(); + } + } + } else { + debug!( + "Drive burn block processing: no need (sortition tip: {}, heaviest: {}, {} JoinHandle<()> { + let config = self.config.clone(); + let burnchain = self.get_burnchain(); + let sortdb = burnchain + .open_sortition_db(true) + .expect("FATAL: could not open sortition DB"); + + let (chain_state_db, _) = StacksChainState::open( + config.is_mainnet(), + config.burnchain.chain_id, + &config.get_chainstate_path_str(), + Some(config.node.get_marf_opts()), + ) + .unwrap(); + + let liveness_thread_handle = thread::Builder::new() + .name(format!("chain-liveness-{}", config.node.rpc_bind)) + .stack_size(BLOCK_PROCESSOR_STACK_SIZE) + .spawn(move || { + Self::drive_chain_liveness(globals, config, burnchain, sortdb, chain_state_db) + }) + .expect("FATAL: failed to spawn chain liveness thread"); + + liveness_thread_handle + } + + /// Starts the node runloop. + /// + /// This function will block by looping infinitely. + /// It will start the burnchain (separate thread), set-up a channel in + /// charge of coordinating the new blocks coming from the burnchain and + /// the nodes, taking turns on tenures. + pub fn start(&mut self, burnchain_opt: Option, mut mine_start: u64) { + let (coordinator_receivers, coordinator_senders) = self + .coordinator_channels + .take() + .expect("Run loop already started, can only start once after initialization."); + + neon::RunLoop::setup_termination_handler(self.should_keep_running.clone()); + let mut burnchain = neon::RunLoop::instantiate_burnchain_state( + &self.config, + self.should_keep_running.clone(), + burnchain_opt, + coordinator_senders.clone(), + ); + + let burnchain_config = burnchain.get_burnchain(); + self.burnchain = Some(burnchain_config.clone()); + + // can we mine? + let is_miner = self.check_is_miner(&mut burnchain); + self.is_miner = Some(is_miner); + + // relayer linkup + let (relay_send, relay_recv) = sync_channel(RELAYER_MAX_BUFFER); + + // set up globals so other subsystems can instantiate off of the runloop state. + let globals = Globals::new( + coordinator_senders, + self.get_miner_status(), + relay_send, + self.counters.clone(), + self.pox_watchdog_comms.clone(), + self.should_keep_running.clone(), + ); + self.set_globals(globals.clone()); + + // have headers; boot up the chains coordinator and instantiate the chain state + let coordinator_thread_handle = self.spawn_chains_coordinator( + &burnchain_config, + coordinator_receivers, + globals.get_miner_status(), + ); + self.start_prometheus(); + + // We announce a new burn block so that the chains coordinator + // can resume prior work and handle eventual unprocessed sortitions + // stored during a previous session. + globals.coord().announce_new_burn_block(); + + // Make sure at least one sortition has happened, and make sure it's globally available + let sortdb = burnchain.sortdb_mut(); + let (rc_aligned_height, sn) = + RunLoop::get_reward_cycle_sortition_db_height(&sortdb, &burnchain_config); + + let burnchain_tip_snapshot = if sn.block_height == burnchain_config.first_block_height { + // need at least one sortition to happen. + burnchain + .wait_for_sortitions(globals.coord().clone(), sn.block_height + 1) + .expect("Unable to get burnchain tip") + .block_snapshot + } else { + sn + }; + + globals.set_last_sortition(burnchain_tip_snapshot.clone()); + + // Boot up the p2p network and relayer, and figure out how many sortitions we have so far + // (it could be non-zero if the node is resuming from chainstate) + let mut node = StacksNode::spawn(self, globals.clone(), relay_recv); + let liveness_thread = self.spawn_chain_liveness_thread(globals.clone()); + + // Wait for all pending sortitions to process + let burnchain_db = burnchain_config + .open_burnchain_db(false) + .expect("FATAL: failed to open burnchain DB"); + let burnchain_db_tip = burnchain_db + .get_canonical_chain_tip() + .expect("FATAL: failed to query burnchain DB"); + let mut burnchain_tip = burnchain + .wait_for_sortitions(globals.coord().clone(), burnchain_db_tip.block_height) + .expect("Unable to get burnchain tip"); + + // Start the runloop + debug!("Runloop: Begin run loop"); + self.counters.bump_blocks_processed(); + + let mut sortition_db_height = rc_aligned_height; + let mut burnchain_height = sortition_db_height; + let mut num_sortitions_in_last_cycle; + + // prepare to fetch the first reward cycle! + let mut target_burnchain_block_height = cmp::min( + burnchain_config.reward_cycle_to_block_height( + burnchain_config + .block_height_to_reward_cycle(burnchain_height) + .expect("BUG: block height is not in a reward cycle") + + 1, + ), + burnchain.get_headers_height() - 1, + ); + + debug!( + "Runloop: Begin main runloop starting a burnchain block {}", + sortition_db_height + ); + + let mut last_tenure_sortition_height = 0; + + loop { + if !globals.keep_running() { + // The p2p thread relies on the same atomic_bool, it will + // discontinue its execution after completing its ongoing runloop epoch. + info!("Terminating p2p process"); + info!("Terminating relayer"); + info!("Terminating chains-coordinator"); + + globals.coord().stop_chains_coordinator(); + coordinator_thread_handle.join().unwrap(); + node.join(); + liveness_thread.join().unwrap(); + + info!("Exiting stacks-node"); + break; + } + + let remote_chain_height = burnchain.get_headers_height() - 1; + + // wait for the p2p state-machine to do at least one pass + debug!("Runloop: Wait until Stacks block downloads reach a quiescent state before processing more burnchain blocks"; "remote_chain_height" => remote_chain_height, "local_chain_height" => burnchain_height); + + let ibd = false; + + // calculate burnchain sync percentage + let percent: f64 = if remote_chain_height > 0 { + burnchain_tip.block_snapshot.block_height as f64 / remote_chain_height as f64 + } else { + 0.0 + }; + + // Download each burnchain block and process their sortitions. This, in turn, will + // cause the node's p2p and relayer threads to go fetch and download Stacks blocks and + // process them. This loop runs for one reward cycle, so that the next pass of the + // runloop will cause the PoX sync watchdog to wait until it believes that the node has + // obtained all the Stacks blocks it can. + debug!( + "Runloop: Download burnchain blocks up to reward cycle #{} (height {})", + burnchain_config + .block_height_to_reward_cycle(target_burnchain_block_height) + .expect("FATAL: target burnchain block height does not have a reward cycle"), + target_burnchain_block_height; + "total_burn_sync_percent" => %percent, + "local_burn_height" => burnchain_tip.block_snapshot.block_height, + "remote_tip_height" => remote_chain_height + ); + + loop { + if !globals.keep_running() { + break; + } + + let (next_burnchain_tip, tip_burnchain_height) = + match burnchain.sync(Some(target_burnchain_block_height)) { + Ok(x) => x, + Err(e) => { + warn!("Runloop: Burnchain controller stopped: {}", e); + continue; + } + }; + + // *now* we know the burnchain height + burnchain_tip = next_burnchain_tip; + burnchain_height = tip_burnchain_height; + + let sortition_tip = &burnchain_tip.block_snapshot.sortition_id; + let next_sortition_height = burnchain_tip.block_snapshot.block_height; + + if next_sortition_height != last_tenure_sortition_height { + info!( + "Runloop: Downloaded burnchain blocks up to height {}; target height is {}; remote_chain_height = {} next_sortition_height = {}, sortition_db_height = {}", + burnchain_height, target_burnchain_block_height, remote_chain_height, next_sortition_height, sortition_db_height + ); + } + + if next_sortition_height > sortition_db_height { + debug!( + "Runloop: New burnchain block height {} > {}", + next_sortition_height, sortition_db_height + ); + + let mut sort_count = 0; + + debug!("Runloop: block mining until we process all sortitions"); + signal_mining_blocked(globals.get_miner_status()); + + // first, let's process all blocks in (sortition_db_height, next_sortition_height] + for block_to_process in (sortition_db_height + 1)..(next_sortition_height + 1) { + // stop mining so we can advance the sortition DB and so our + // ProcessTenure() directive (sent by relayer_sortition_notify() below) + // will be unblocked. + + let block = { + let ic = burnchain.sortdb_ref().index_conn(); + SortitionDB::get_ancestor_snapshot(&ic, block_to_process, sortition_tip) + .unwrap() + .expect( + "Failed to find block in fork processed by burnchain indexer", + ) + }; + if block.sortition { + sort_count += 1; + } + + let sortition_id = &block.sortition_id; + + // Have the node process the new block, that can include, or not, a sortition. + node.process_burnchain_state(burnchain.sortdb_mut(), sortition_id, ibd); + + // Now, tell the relayer to check if it won a sortition during this block, + // and, if so, to process and advertize the block. This is basically a + // no-op during boot-up. + // + // _this will block if the relayer's buffer is full_ + if !node.relayer_burnchain_notify() { + // relayer hung up, exit. + error!("Runloop: Block relayer and miner hung up, exiting."); + return; + } + } + + debug!("Runloop: enable miner after processing sortitions"); + signal_mining_ready(globals.get_miner_status()); + + num_sortitions_in_last_cycle = sort_count; + debug!( + "Runloop: Synchronized sortitions up to block height {} from {} (chain tip height is {}); {} sortitions", + next_sortition_height, sortition_db_height, burnchain_height, num_sortitions_in_last_cycle; + ); + + sortition_db_height = next_sortition_height; + } else if ibd { + // drive block processing after we reach the burnchain tip. + // we may have downloaded all the blocks already, + // so we can't rely on the relayer alone to + // drive it. + globals.coord().announce_new_stacks_block(); + } + + if burnchain_height >= target_burnchain_block_height + || burnchain_height >= remote_chain_height + { + break; + } + } + + // advance one reward cycle at a time. + // If we're still downloading, then this is simply target_burnchain_block_height + reward_cycle_len. + // Otherwise, this is burnchain_tip + reward_cycle_len + let next_target_burnchain_block_height = cmp::min( + burnchain_config.reward_cycle_to_block_height( + burnchain_config + .block_height_to_reward_cycle(target_burnchain_block_height) + .expect("FATAL: burnchain height before system start") + + 1, + ), + remote_chain_height, + ); + + debug!("Runloop: Advance target burnchain block height from {} to {} (sortition height {})", target_burnchain_block_height, next_target_burnchain_block_height, sortition_db_height); + target_burnchain_block_height = next_target_burnchain_block_height; + + if sortition_db_height >= burnchain_height && !ibd { + let canonical_stacks_tip_height = + SortitionDB::get_canonical_burn_chain_tip(burnchain.sortdb_ref().conn()) + .map(|snapshot| snapshot.canonical_stacks_tip_height) + .unwrap_or(0); + if canonical_stacks_tip_height < mine_start { + info!( + "Runloop: Synchronized full burnchain, but stacks tip height is {}, and we are trying to boot to {}, not mining until reaching chain tip", + canonical_stacks_tip_height, + mine_start + ); + } else { + // once we've synced to the chain tip once, don't apply this check again. + // this prevents a possible corner case in the event of a PoX fork. + mine_start = 0; + + // at tip, and not downloading. proceed to mine. + if last_tenure_sortition_height != sortition_db_height { + info!( + "Runloop: Synchronized full burnchain up to height {}. Proceeding to mine blocks", + sortition_db_height + ); + last_tenure_sortition_height = sortition_db_height; + } + } + } + } + } +} diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index c9368e9e3a..c10c9b88c3 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -31,8 +31,9 @@ use stx_genesis::GenesisData; use super::RunLoopCallbacks; use crate::burnchains::make_bitcoin_indexer; +use crate::globals::Globals; use crate::monitoring::start_serving_monitoring_metrics; -use crate::neon_node::{Globals, StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER}; +use crate::neon_node::{StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER}; use crate::node::{ get_account_balances, get_account_lockups, get_names, get_namespaces, use_test_genesis_chainstate, @@ -63,6 +64,10 @@ pub struct Counters { pub missed_tenures: RunLoopCounter, pub missed_microblock_tenures: RunLoopCounter, pub cancelled_commits: RunLoopCounter, + + pub naka_submitted_vrfs: RunLoopCounter, + pub naka_submitted_commits: RunLoopCounter, + pub naka_mined_blocks: RunLoopCounter, } impl Counters { @@ -74,6 +79,9 @@ impl Counters { missed_tenures: RunLoopCounter::new(AtomicU64::new(0)), missed_microblock_tenures: RunLoopCounter::new(AtomicU64::new(0)), cancelled_commits: RunLoopCounter::new(AtomicU64::new(0)), + naka_submitted_vrfs: RunLoopCounter::new(AtomicU64::new(0)), + naka_submitted_commits: RunLoopCounter::new(AtomicU64::new(0)), + naka_mined_blocks: RunLoopCounter::new(AtomicU64::new(0)), } } @@ -85,6 +93,9 @@ impl Counters { missed_tenures: (), missed_microblock_tenures: (), cancelled_commits: (), + naka_submitted_vrfs: (), + naka_submitted_commits: (), + naka_mined_blocks: (), } } @@ -124,6 +135,18 @@ impl Counters { Counters::inc(&self.cancelled_commits); } + pub fn bump_naka_submitted_vrfs(&self) { + Counters::inc(&self.naka_submitted_vrfs); + } + + pub fn bump_naka_submitted_commits(&self) { + Counters::inc(&self.naka_submitted_commits); + } + + pub fn bump_naka_mined_blocks(&self) { + Counters::inc(&self.naka_mined_blocks); + } + pub fn set_microblocks_processed(&self, value: u64) { Counters::set(&self.microblocks_processed, value) } @@ -251,7 +274,7 @@ impl RunLoop { } pub fn get_termination_switch(&self) -> Arc { - self.get_globals().should_keep_running.clone() + self.should_keep_running.clone() } pub fn get_burnchain(&self) -> Burnchain { @@ -272,8 +295,7 @@ impl RunLoop { /// Set up termination handler. Have a signal set the `should_keep_running` atomic bool to /// false. Panics of called more than once. - fn setup_termination_handler(&self) { - let keep_running_writer = self.should_keep_running.clone(); + pub fn setup_termination_handler(keep_running_writer: Arc) { let install = termination::set_handler(move |sig_id| match sig_id { SignalId::Bus => { let msg = "Caught SIGBUS; crashing immediately and dumping core\n"; @@ -355,17 +377,18 @@ impl RunLoop { /// Instantiate the burnchain client and databases. /// Fetches headers and instantiates the burnchain. /// Panics on failure. - fn instantiate_burnchain_state( - &mut self, + pub fn instantiate_burnchain_state( + config: &Config, + should_keep_running: Arc, burnchain_opt: Option, coordinator_senders: CoordinatorChannels, ) -> BitcoinRegtestController { // Initialize and start the burnchain. let mut burnchain_controller = BitcoinRegtestController::with_burnchain( - self.config.clone(), + config.clone(), Some(coordinator_senders), burnchain_opt, - Some(self.should_keep_running.clone()), + Some(should_keep_running.clone()), ); let burnchain = burnchain_controller.get_burnchain(); @@ -377,9 +400,9 @@ impl RunLoop { // Upgrade chainstate databases if they exist already match migrate_chainstate_dbs( &epochs, - &self.config.get_burn_db_file_path(), - &self.config.get_chainstate_path_str(), - Some(self.config.node.get_marf_opts()), + &config.get_burn_db_file_path(), + &config.get_chainstate_path_str(), + Some(config.node.get_marf_opts()), ) { Ok(_) => {} Err(coord_error::DBError(db_error::TooOldForEpoch)) => { @@ -951,9 +974,13 @@ impl RunLoop { .take() .expect("Run loop already started, can only start once after initialization."); - self.setup_termination_handler(); - let mut burnchain = - self.instantiate_burnchain_state(burnchain_opt, coordinator_senders.clone()); + Self::setup_termination_handler(self.should_keep_running.clone()); + let mut burnchain = Self::instantiate_burnchain_state( + &self.config, + self.should_keep_running.clone(), + burnchain_opt, + coordinator_senders.clone(), + ); let burnchain_config = burnchain.get_burnchain(); self.burnchain = Some(burnchain_config.clone()); diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index fdb09dd22c..454e92b50b 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -16,6 +16,7 @@ use crate::helium::RunLoop; use crate::tests::to_addr; use crate::Config; +#[derive(Debug)] pub enum BitcoinCoreError { SpawnFailed(String), } diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index faea7f99d9..8ac9fcff53 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -43,6 +43,7 @@ mod epoch_23; mod epoch_24; mod integrations; mod mempool; +mod nakamoto_integrations; pub mod neon_integrations; mod signer; mod stackerdb; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs new file mode 100644 index 0000000000..efa36ea1e5 --- /dev/null +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -0,0 +1,322 @@ +use clarity::vm::types::PrincipalData; +use stacks::burnchains::MagicBytes; +use stacks::chainstate::nakamoto::NakamotoChainState; +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::core::{ + StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, + PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, + PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, + PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, +}; +use stacks_common::address::AddressHashMode; +use stacks_common::consts::STACKS_EPOCH_MAX; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::Secp256k1PrivateKey; +use std::sync::atomic::Ordering; +use std::time::{Duration, Instant}; +use std::{env, thread}; + +use super::bitcoin_regtest::BitcoinCoreController; +use crate::mockamoto::signer::SelfSigner; +use crate::run_loop::nakamoto; +use crate::tests::neon_integrations::{ + next_block_and_wait, run_until_burnchain_height, submit_tx, wait_for_runloop, +}; +use crate::{ + neon, tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain, +}; +use lazy_static::lazy_static; + +lazy_static! { + pub static ref NAKAMOTO_INTEGRATION_EPOCHS: [StacksEpoch; 9] = [ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: BLOCK_LIMIT_MAINNET_10.clone(), + network_epoch: PEER_VERSION_EPOCH_1_0 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 1, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_0 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 1, + end_height: 2, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_05 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 2, + end_height: 3, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_1 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: 3, + end_height: 4, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_2 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: 4, + end_height: 5, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_3 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: 5, + end_height: 6, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_4 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: 6, + end_height: 220, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_5 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: 220, + end_height: STACKS_EPOCH_MAX, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_3_0 + }, + ]; +} + +/// Return a working nakamoto-neon config and the miner's bitcoin address to fund +pub fn naka_neon_integration_conf(seed: Option<&[u8]>) -> (Config, StacksAddress) { + let mut conf = super::new_test_conf(); + conf.burnchain.mode = "nakamoto-neon".into(); + + // tests can override this, but these tests run with epoch 2.05 by default + conf.burnchain.epochs = Some(NAKAMOTO_INTEGRATION_EPOCHS.to_vec()); + + if let Some(seed) = seed { + conf.node.seed = seed.to_vec(); + } + + // instantiate the keychain so we can fund the bitcoin op signer + let keychain = Keychain::default(conf.node.seed.clone()); + + let mining_key = Secp256k1PrivateKey::from_seed(&[1]); + conf.miner.mining_key = Some(mining_key); + conf.miner.self_signing_key = Some(SelfSigner::single_signer()); + + conf.node.miner = true; + conf.node.wait_time_for_microblocks = 500; + conf.burnchain.burn_fee_cap = 20000; + + conf.burnchain.username = Some("neon-tester".into()); + conf.burnchain.password = Some("neon-tester-pass".into()); + conf.burnchain.peer_host = "127.0.0.1".into(); + conf.burnchain.local_mining_public_key = + Some(keychain.generate_op_signer().get_public_key().to_hex()); + conf.burnchain.commit_anchor_block_within = 0; + + // test to make sure config file parsing is correct + let mut cfile = ConfigFile::xenon(); + cfile.node.as_mut().map(|node| node.bootstrap_node.take()); + + if let Some(burnchain) = cfile.burnchain.as_mut() { + burnchain.peer_host = Some("127.0.0.1".to_string()); + } + + conf.burnchain.magic_bytes = MagicBytes::from(['T' as u8, '3' as u8].as_ref()); + conf.burnchain.poll_time_secs = 1; + conf.node.pox_sync_sample_secs = 0; + + conf.miner.min_tx_fee = 1; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + + // if there's just one node, then this must be true for tests to pass + conf.miner.wait_for_block_download = false; + + conf.node.mine_microblocks = false; + conf.miner.microblock_attempt_time_ms = 10; + conf.node.microblock_frequency = 0; + conf.node.wait_time_for_blocks = 200; + + let miner_account = keychain.origin_address(conf.is_mainnet()).unwrap(); + + conf.burnchain.pox_prepare_length = Some(5); + conf.burnchain.pox_reward_length = Some(20); + + (conf, miner_account) +} + +pub fn next_block_and( + btc_controller: &mut BitcoinRegtestController, + timeout_secs: u64, + mut check: F, +) -> Result<(), String> +where + F: FnMut() -> Result, +{ + eprintln!("Issuing bitcoin block"); + btc_controller.build_next_block(1); + let start = Instant::now(); + while !check()? { + if start.elapsed() > Duration::from_secs(timeout_secs) { + error!("Timed out waiting for block to process, trying to continue test"); + return Err("Timed out".into()); + } + thread::sleep(Duration::from_millis(100)); + } + Ok(()) +} + +#[test] +#[ignore] +fn simple_neon_integration() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let stacker_sk = Secp256k1PrivateKey::new(); + let stacker_address = tests::to_addr(&stacker_sk); + naka_conf.add_initial_balance( + PrincipalData::from(stacker_address.clone()).to_string(), + 100_000_000_000_000, + ); + + let epoch_2_conf = naka_conf.clone(); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + btc_regtest_controller.bootstrap_chain(201); + + info!("Chain bootstrapped to bitcoin block 201, starting a epoch-2x miner"); + + let mut run_loop = neon::RunLoop::new(epoch_2_conf.clone()); + + let epoch_2_stopper = run_loop.get_termination_switch(); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let epoch_2_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // first mined stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // stack enough to activate pox-4 + let pox_addr_tuple = clarity::vm::tests::execute(&format!( + "{{ hashbytes: 0x{}, version: 0x{:02x} }}", + to_hex(&[0; 20]), + AddressHashMode::SerializeP2PKH as u8, + )); + + let stacking_tx = tests::make_contract_call( + &stacker_sk, + 0, + 1000, + &StacksAddress::burn_address(false), + "pox-4", + "stack-stx", + &[ + clarity::vm::Value::UInt(99_000_000_000_000), + pox_addr_tuple, + clarity::vm::Value::UInt(205), + clarity::vm::Value::UInt(12), + ], + ); + + submit_tx(&http_origin, &stacking_tx); + + run_until_burnchain_height( + &mut btc_regtest_controller, + &blocks_processed, + 219, + &epoch_2_conf, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + epoch_2_stopper.store(false, Ordering::SeqCst); + + epoch_2_thread.join().unwrap(); + + let mut run_loop = nakamoto::RunLoop::new(naka_conf.clone()); + let epoch_3_stopper = run_loop.get_termination_switch(); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let vrfs_submitted = run_loop.submitted_vrfs(); + let commits_submitted = run_loop.submitted_commits(); + let blocks_mined = run_loop.submitted_commits(); + let coord_channel = run_loop.get_coordinator_channel().unwrap(); + + let epoch_3_thread = thread::spawn(move || run_loop.start(None, 0)); + + wait_for_runloop(&blocks_processed); + info!("Nakamoto miner started..."); + // first block wakes up the run loop, wait until a key registration has been submitted. + next_block_and(&mut btc_regtest_controller, 60, || { + let vrf_count = vrfs_submitted.load(Ordering::SeqCst); + Ok(vrf_count >= 1) + }) + .unwrap(); + + // second block should confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + + let blocks_processed_before_mining = coord_channel.get_stacks_blocks_processed(); + + // this block should perform the sortition, wait until a block is mined + next_block_and(&mut btc_regtest_controller, 60, || { + let mined_count = blocks_mined.load(Ordering::SeqCst); + Ok(mined_count >= 1) + }) + .unwrap(); + + // wait until the coordinator has processed the new block(s) + while coord_channel.get_stacks_blocks_processed() <= blocks_processed_before_mining { + thread::sleep(Duration::from_secs(1)); + } + + // load the chain tip, and assert that it is a nakamoto block + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + + coord_channel.stop_chains_coordinator(); + + epoch_3_stopper.store(false, Ordering::SeqCst); + epoch_3_thread.join().unwrap(); +} diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index b1e68d26d7..455e414208 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -483,7 +483,7 @@ pub mod test_observer { } } -const PANIC_TIMEOUT_SECS: u64 = 600; +const PANIC_TIMEOUT_SECS: u64 = 30; /// Returns `false` on a timeout, true otherwise. pub fn next_block_and_wait( @@ -556,7 +556,7 @@ pub fn next_block_and_iterate( /// reaches *exactly* `target_height`. /// /// Returns `false` if `next_block_and_wait` times out. -fn run_until_burnchain_height( +pub fn run_until_burnchain_height( btc_regtest_controller: &mut BitcoinRegtestController, blocks_processed: &Arc, target_height: u64, From 7f0e1d4ad31169691dcf9a17dcf8242e1fcb9263 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 7 Dec 2023 14:13:34 -0600 Subject: [PATCH 21/41] expand first nakamoto-neon test, update block commit logic to issue commits at tenure_id changes, cargo fmt-stacks --- .../burnchains/bitcoin_regtest_controller.rs | 10 +- testnet/stacks-node/src/globals.rs | 13 +- testnet/stacks-node/src/nakamoto_node.rs | 14 +- .../stacks-node/src/nakamoto_node/miner.rs | 26 +- testnet/stacks-node/src/nakamoto_node/peer.rs | 19 +- .../stacks-node/src/nakamoto_node/relayer.rs | 95 +++---- testnet/stacks-node/src/neon_node.rs | 3 +- .../src/tests/nakamoto_integrations.rs | 255 ++++++++++++++---- 8 files changed, 285 insertions(+), 150 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index ad83dd6f57..0ed1bb0e03 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -8,7 +8,8 @@ use async_h1::client; use async_std::io::ReadExt; use async_std::net::TcpStream; use base64::encode; - +#[cfg(test)] +use clarity::vm::types::PrincipalData; use http_types::{Method, Request, Url}; use serde::Serialize; use serde_json::json; @@ -51,15 +52,12 @@ use stacks_common::deps_common::bitcoin::network::serialize::deserialize as btc_ use stacks_common::deps_common::bitcoin::network::serialize::RawEncoder; use stacks_common::deps_common::bitcoin::util::hash::Sha256dHash; use stacks_common::types::chainstate::BurnchainHeaderHash; +#[cfg(test)] +use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::hash::{hex_bytes, Hash160}; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; -#[cfg(test)] -use clarity::vm::types::PrincipalData; -#[cfg(test)] -use stacks_common::types::chainstate::StacksAddress; - use super::super::operations::BurnchainOpSigner; use super::super::Config; use super::{BurnchainController, BurnchainTip, Error as BurnchainControllerError}; diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index acace012f8..7e9e47a8fe 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -1,8 +1,6 @@ -use std::sync::atomic::AtomicBool; -use std::sync::atomic::Ordering; +use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::SyncSender; -use std::sync::Arc; -use std::sync::Mutex; +use std::sync::{Arc, Mutex}; use stacks::burnchains::Txid; use stacks::chainstate::burn::operations::LeaderKeyRegisterOp; @@ -12,16 +10,13 @@ use stacks::chainstate::stacks::db::unconfirmed::UnconfirmedTxMap; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::MinerStatus; use stacks::net::NetworkResult; -use stacks_common::types::chainstate::BlockHeaderHash; -use stacks_common::types::chainstate::BurnchainHeaderHash; -use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, ConsensusHash}; use crate::neon::Counters; +use crate::neon_node::LeaderKeyRegistrationState; use crate::run_loop::RegisteredKey; use crate::syncctl::PoxSyncWatchdogComms; -use crate::neon_node::LeaderKeyRegistrationState; - /// Command types for the relayer thread, issued to it by other threads pub enum RelayerDirective { /// Handle some new data that arrived on the network (such as blocks, transactions, and diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 1c71b09045..de0d04cfb5 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -20,13 +20,6 @@ use std::sync::mpsc::Receiver; use std::thread; use std::thread::JoinHandle; -use super::{Config, EventDispatcher, Keychain}; -use crate::burnchains::bitcoin_regtest_controller::addr2str; -use crate::globals::Globals; -use crate::globals::RelayerDirective; -use crate::neon_node::LeaderKeyRegistrationState; -use crate::run_loop::nakamoto::RunLoop; -use crate::run_loop::RegisteredKey; use clarity::vm::ast::ASTRules; use clarity::vm::types::QualifiedContractIdentifier; use stacks::burnchains::{Burnchain, BurnchainSigner, Txid}; @@ -52,6 +45,13 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::secp256k1::Secp256k1PrivateKey; +use super::{Config, EventDispatcher, Keychain}; +use crate::burnchains::bitcoin_regtest_controller::addr2str; +use crate::globals::{Globals, RelayerDirective}; +use crate::neon_node::LeaderKeyRegistrationState; +use crate::run_loop::nakamoto::RunLoop; +use crate::run_loop::RegisteredKey; + pub mod miner; pub mod peer; pub mod relayer; diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index cb9942d451..2d2d88293a 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -18,14 +18,6 @@ use std::thread; use std::thread::JoinHandle; use std::time::Instant; -use super::relayer::RelayerThread; -use super::Error as NakamotoNodeError; -use super::{Config, EventDispatcher, Keychain}; -use crate::globals::Globals; -use crate::mockamoto::signer::SelfSigner; -use crate::nakamoto_node::VRF_MOCK_MINER_KEY; -use crate::run_loop::RegisteredKey; -use crate::ChainTip; use clarity::vm::types::PrincipalData; use stacks::burnchains::{Burnchain, BurnchainParameters}; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -33,12 +25,9 @@ use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureStart}; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; -use stacks::chainstate::stacks::Error as ChainstateError; -use stacks::chainstate::stacks::TenureChangeCause; -use stacks::chainstate::stacks::TenureChangePayload; -use stacks::chainstate::stacks::ThresholdSignature; use stacks::chainstate::stacks::{ - CoinbasePayload, StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, + CoinbasePayload, Error as ChainstateError, StacksTransaction, StacksTransactionSigner, + TenureChangeCause, TenureChangePayload, ThresholdSignature, TransactionAnchorMode, TransactionPayload, TransactionVersion, }; use stacks::core::mempool::MemPoolDB; @@ -46,11 +35,18 @@ use stacks::core::FIRST_BURNCHAIN_CONSENSUS_HASH; use stacks::cost_estimates::metrics::UnitMetric; use stacks::cost_estimates::UnitEstimator; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; -use stacks_common::types::PrivateKey; -use stacks_common::types::StacksEpochId; +use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::hash::Hash160; use stacks_common::util::vrf::VRFProof; +use super::relayer::RelayerThread; +use super::{Config, Error as NakamotoNodeError, EventDispatcher, Keychain}; +use crate::globals::Globals; +use crate::mockamoto::signer::SelfSigner; +use crate::nakamoto_node::VRF_MOCK_MINER_KEY; +use crate::run_loop::RegisteredKey; +use crate::ChainTip; + pub enum MinerDirective { /// The miner won sortition so they should begin a new tenure BeginTenure { diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 8fe688972e..9f2a37c50d 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -13,45 +13,32 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::cmp; use std::collections::VecDeque; - use std::default::Default; use std::net::SocketAddr; use std::sync::mpsc::TrySendError; - -use std::thread; use std::time::Duration; +use std::{cmp, thread}; use stacks::burnchains::db::BurnchainHeaderReader; use stacks::burnchains::PoxConstants; use stacks::chainstate::burn::db::sortdb::SortitionDB; - use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::signal_mining_blocked; - use stacks::core::mempool::MemPoolDB; - use stacks::cost_estimates::metrics::{CostMetric, UnitMetric}; use stacks::cost_estimates::{CostEstimator, FeeEstimator, UnitEstimator}; - use stacks::net::dns::{DNSClient, DNSResolver}; use stacks::net::p2p::PeerNetwork; - use stacks::net::RPCHandlerArgs; - use stacks_common::util::hash::Sha256Sum; +use super::open_chainstate_with_faults; use crate::burnchains::make_bitcoin_indexer; -use crate::globals::Globals; -use crate::globals::RelayerDirective; - +use crate::globals::{Globals, RelayerDirective}; use crate::run_loop::nakamoto::RunLoop; - use crate::{Config, EventDispatcher}; -use super::open_chainstate_with_faults; - /// Thread that runs the network state machine, handling both p2p and http requests. pub struct PeerThread { /// Node config diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index a90b17866f..6aa4568d0b 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -13,6 +13,11 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::HashMap; +use std::sync::mpsc::{Receiver, RecvTimeoutError}; +use std::thread::JoinHandle; +use std::time::{Duration, Instant}; + use stacks::burnchains::{Burnchain, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::leader_block_commit::{ @@ -30,9 +35,9 @@ use stacks::chainstate::stacks::miner::{ get_mining_spend_amount, signal_mining_blocked, signal_mining_ready, }; use stacks::core::mempool::MemPoolDB; -use stacks::core::FIRST_BURNCHAIN_CONSENSUS_HASH; -use stacks::core::FIRST_STACKS_BLOCK_HASH; -use stacks::core::STACKS_EPOCH_3_0_MARKER; +use stacks::core::{ + FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, STACKS_EPOCH_3_0_MARKER, +}; use stacks::cost_estimates::metrics::UnitMetric; use stacks::cost_estimates::UnitEstimator; use stacks::monitoring::increment_stx_blocks_mined_counter; @@ -46,21 +51,13 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::Hash160; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; -use std::collections::HashMap; -use std::sync::mpsc::Receiver; -use std::sync::mpsc::RecvTimeoutError; -use std::thread::JoinHandle; -use std::time::Duration; -use std::time::Instant; -use super::Error as NakamotoNodeError; use super::{ fault_injection_skip_mining, open_chainstate_with_faults, BlockCommits, Config, - EventDispatcher, Keychain, BLOCK_PROCESSOR_STACK_SIZE, + Error as NakamotoNodeError, EventDispatcher, Keychain, BLOCK_PROCESSOR_STACK_SIZE, }; use crate::burnchains::BurnchainController; -use crate::globals::Globals; -use crate::globals::RelayerDirective; +use crate::globals::{Globals, RelayerDirective}; use crate::nakamoto_node::miner::{BlockMinerThread, MinerDirective}; use crate::neon_node::LeaderKeyRegistrationState; use crate::run_loop::nakamoto::RunLoop; @@ -127,8 +124,9 @@ pub struct RelayerThread { /// to check if it should issue a block commit or try to register a VRF key next_initiative: Instant, is_miner: bool, - /// This is the last snapshot in which the relayer committed - last_committed_at: Option, + /// This is the last snapshot in which the relayer committed, and the parent_tenure_id + /// which was committed to + last_committed: Option<(BlockSnapshot, StacksBlockId)>, } impl RelayerThread { @@ -193,7 +191,7 @@ impl RelayerThread { miner_thread: None, is_miner, next_initiative: Instant::now() + Duration::from_secs(10), - last_committed_at: None, + last_committed: None, } } @@ -759,7 +757,10 @@ impl RelayerThread { ); self.last_commits.insert(txid, ()); - self.last_committed_at = Some(last_committed_at); + self.last_committed = Some(( + last_committed_at, + StacksBlockId::new(&tenure_start_ch, &tenure_start_bh), + )); self.globals.counters.bump_naka_submitted_commits(); Ok(()) @@ -800,7 +801,10 @@ impl RelayerThread { return None; }; - let should_commit = if let Some(last_committed_at) = self.last_committed_at.as_ref() { + // check if the burnchain changed, if so, we should issue a commit. + // if not, we may still want to update a commit if we've received a new tenure start block + let burnchain_changed = if let Some((last_committed_at, ..)) = self.last_committed.as_ref() + { // if the new sortition tip has a different consesus hash than the last commit, // issue a new commit sort_tip.consensus_hash != last_committed_at.consensus_hash @@ -820,37 +824,38 @@ impl RelayerThread { )); }; - if should_commit { - // TODO: just use `get_block_header_by_consensus_hash`? - let first_block_hash = if chain_tip_header - .anchored_header - .as_stacks_nakamoto() - .is_some() - { - // if the parent block is a nakamoto block, find the starting block of its tenure - let Ok(Some(first_block)) = - NakamotoChainState::get_nakamoto_tenure_start_block_header( - self.chainstate_ref().db(), - &chain_tip_header.consensus_hash, - ) - else { - warn!("Failure getting the first block of tenure in order to assemble block commit"; - "tenure_consensus_hash" => %chain_tip_header.consensus_hash, - "tip_block_hash" => %chain_tip_header.anchored_header.block_hash()); - return None; - }; - first_block.anchored_header.block_hash() + // get the starting block of the chain tip's tenure + let Ok(Some(chain_tip_tenure_start)) = + NakamotoChainState::get_block_header_by_consensus_hash( + self.chainstate_ref().db(), + &chain_tip_header.consensus_hash, + ) + else { + warn!("Failure getting the first block of tenure in order to assemble block commit"; + "tenure_consensus_hash" => %chain_tip_header.consensus_hash, + "tip_block_hash" => %chain_tip_header.anchored_header.block_hash()); + return None; + }; + + let chain_tip_tenure_id = chain_tip_tenure_start.index_block_hash(); + let should_commit = burnchain_changed + || if let Some((_, last_committed_tenure_id)) = self.last_committed.as_ref() { + // if the tenure ID of the chain tip has changed, issue a new commit + last_committed_tenure_id != &chain_tip_tenure_id } else { - // otherwise the parent block is a epoch2 block, just return its hash directly - chain_tip_header.anchored_header.block_hash() + // should be unreachable, but either way, if + // `self.last_committed` is None, we should issue a commit + true }; - return Some(RelayerDirective::NakamotoTenureStartProcessed( + + if should_commit { + Some(RelayerDirective::NakamotoTenureStartProcessed( chain_tip_header.consensus_hash, - first_block_hash, - )); + chain_tip_header.anchored_header.block_hash(), + )) + } else { + None } - - return None; } /// Main loop of the relayer. diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index c23bf1fc19..a3821fae2b 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -206,8 +206,7 @@ use crate::burnchains::bitcoin_regtest_controller::{ addr2str, BitcoinRegtestController, OngoingBlockCommit, }; use crate::burnchains::make_bitcoin_indexer; -use crate::globals::Globals; -use crate::globals::RelayerDirective; +use crate::globals::{Globals, RelayerDirective}; use crate::run_loop::neon::RunLoop; use crate::run_loop::RegisteredKey; use crate::ChainTip; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index efa36ea1e5..a7be83272f 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1,32 +1,43 @@ +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use std::{env, thread}; + +use clarity::vm::costs::ExecutionCost; use clarity::vm::types::PrincipalData; +use lazy_static::lazy_static; use stacks::burnchains::MagicBytes; +use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::NakamotoChainState; use stacks::chainstate::stacks::db::StacksChainState; use stacks::core::{ - StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, + MemPoolDB, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, }; +use stacks::cost_estimates::metrics::UnitMetric; +use stacks::cost_estimates::UnitEstimator; use stacks_common::address::AddressHashMode; use stacks_common::consts::STACKS_EPOCH_MAX; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::Secp256k1PrivateKey; -use std::sync::atomic::Ordering; -use std::time::{Duration, Instant}; -use std::{env, thread}; use super::bitcoin_regtest::BitcoinCoreController; +use crate::config::{EventKeyType, EventObserverConfig}; use crate::mockamoto::signer::SelfSigner; use crate::run_loop::nakamoto; +use crate::tests::make_stacks_transfer; use crate::tests::neon_integrations::{ - next_block_and_wait, run_until_burnchain_height, submit_tx, wait_for_runloop, + next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, wait_for_runloop, }; use crate::{ neon, tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain, }; -use lazy_static::lazy_static; + +static POX_4_DEFAULT_STACKER_BALANCE: u64 = 100_000_000_000_000; +static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000; lazy_static! { pub static ref NAKAMOTO_INTEGRATION_EPOCHS: [StacksEpoch; 9] = [ @@ -179,44 +190,83 @@ where Ok(()) } -#[test] -#[ignore] -fn simple_neon_integration() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } +/// Mine a bitcoin block, and wait until: +/// (1) a new block has been processed by the coordinator +/// (2) 2 block commits have been issued ** or ** more than 10 seconds have +/// passed since (1) occurred +fn next_block_and_mine_commit( + btc_controller: &mut BitcoinRegtestController, + timeout_secs: u64, + coord_channels: &CoordinatorChannels, + commits_submitted: &Arc, +) -> Result<(), String> { + let commits_submitted = commits_submitted.clone(); + let blocks_processed_before = coord_channels.get_stacks_blocks_processed(); + let commits_before = commits_submitted.load(Ordering::SeqCst); + let mut block_processed_time: Option = None; + next_block_and(btc_controller, timeout_secs, || { + if let Some(block_processed_time) = block_processed_time.as_ref() { + let commits_sent = commits_submitted.load(Ordering::SeqCst); + if commits_sent >= commits_before + 2 { + return Ok(true); + } + if commits_sent >= commits_before + 1 + && block_processed_time.elapsed() > Duration::from_secs(10) + { + return Ok(true); + } + Ok(false) + } else { + let blocks_processed = coord_channels.get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + block_processed_time.replace(Instant::now()); + } + Ok(false) + } + }) +} - let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); +fn setup_stacker(naka_conf: &mut Config) -> Secp256k1PrivateKey { let stacker_sk = Secp256k1PrivateKey::new(); let stacker_address = tests::to_addr(&stacker_sk); naka_conf.add_initial_balance( PrincipalData::from(stacker_address.clone()).to_string(), - 100_000_000_000_000, + POX_4_DEFAULT_STACKER_BALANCE, ); + stacker_sk +} +/// +/// * `stacker_sk` - must be a private key for sending a large `stack-stx` transaction in order +/// for pox-4 to activate +fn boot_to_epoch_3( + naka_conf: &Config, + stacker_sk: Secp256k1PrivateKey, + btc_regtest_controller: &mut BitcoinRegtestController, +) { let epoch_2_conf = naka_conf.clone(); - - let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); - let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); btc_regtest_controller.bootstrap_chain(201); - info!("Chain bootstrapped to bitcoin block 201, starting a epoch-2x miner"); + let epochs = epoch_2_conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + + info!( + "Chain bootstrapped to bitcoin block 201, starting Epoch 2x miner"; + "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), + ); + let http_origin = format!("http://{}", &epoch_2_conf.node.rpc_bind); let mut run_loop = neon::RunLoop::new(epoch_2_conf.clone()); let epoch_2_stopper = run_loop.get_termination_switch(); let blocks_processed = run_loop.get_blocks_processed_arc(); let epoch_2_thread = thread::spawn(move || run_loop.start(None, 0)); wait_for_runloop(&blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, &blocks_processed); // first mined stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, &blocks_processed); + // stack enough to activate pox-4 let pox_addr_tuple = clarity::vm::tests::execute(&format!( "{{ hashbytes: 0x{}, version: 0x{:02x} }}", @@ -232,7 +282,7 @@ fn simple_neon_integration() { "pox-4", "stack-stx", &[ - clarity::vm::Value::UInt(99_000_000_000_000), + clarity::vm::Value::UInt(POX_4_DEFAULT_STACKER_STX_AMT), pox_addr_tuple, clarity::vm::Value::UInt(205), clarity::vm::Value::UInt(12), @@ -242,23 +292,82 @@ fn simple_neon_integration() { submit_tx(&http_origin, &stacking_tx); run_until_burnchain_height( - &mut btc_regtest_controller, + btc_regtest_controller, &blocks_processed, - 219, + epoch_3.start_height - 1, &epoch_2_conf, ); - info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + info!("Bootstrapped to Epoch-3.0 boundary, stopping Epoch2x miner"); epoch_2_stopper.store(false, Ordering::SeqCst); - epoch_2_thread.join().unwrap(); +} + +#[test] +#[ignore] +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches +/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). +/// This test makes three assertions: +/// * 30 blocks are mined after 3.0 starts. This is enough to mine across 2 reward cycles +/// * A transaction submitted to the mempool in 3.0 will be mined in 3.0 +/// * The final chain tip is a nakamoto block +fn simple_neon_integration() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for a test stx transfer + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 100; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + send_amt + send_fee, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + + boot_to_epoch_3(&naka_conf, stacker_sk, &mut btc_regtest_controller); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); let mut run_loop = nakamoto::RunLoop::new(naka_conf.clone()); let epoch_3_stopper = run_loop.get_termination_switch(); let blocks_processed = run_loop.get_blocks_processed_arc(); let vrfs_submitted = run_loop.submitted_vrfs(); let commits_submitted = run_loop.submitted_commits(); - let blocks_mined = run_loop.submitted_commits(); let coord_channel = run_loop.get_coordinator_channel().unwrap(); let epoch_3_thread = thread::spawn(move || run_loop.start(None, 0)); @@ -279,41 +388,87 @@ fn simple_neon_integration() { }) .unwrap(); - let blocks_processed_before_mining = coord_channel.get_stacks_blocks_processed(); - - // this block should perform the sortition, wait until a block is mined - next_block_and(&mut btc_regtest_controller, 60, || { - let mined_count = blocks_mined.load(Ordering::SeqCst); - Ok(mined_count >= 1) - }) - .unwrap(); - - // wait until the coordinator has processed the new block(s) - while coord_channel.get_stacks_blocks_processed() <= blocks_processed_before_mining { - thread::sleep(Duration::from_secs(1)); + // Mine 15 nakamoto tenures + for _i in 0..15 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); } - // load the chain tip, and assert that it is a nakamoto block + // Submit a TX + let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); + let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx)); - let burnchain = naka_conf.get_burnchain(); - let sortdb = burnchain.open_sortition_db(true).unwrap(); - let (chainstate, _) = StacksChainState::open( + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + + let mut mempool = MemPoolDB::open( naka_conf.is_mainnet(), naka_conf.burnchain.chain_id, &naka_conf.get_chainstate_path_str(), - None, + Box::new(UnitEstimator), + Box::new(UnitMetric), ) - .unwrap(); + .expect("Database failure opening mempool"); + + mempool + .submit_raw( + &mut chainstate, + &sortdb, + &tip.consensus_hash, + &tip.anchored_header.block_hash(), + transfer_tx.clone(), + &ExecutionCost::max_value(), + &StacksEpochId::Epoch30, + ) + .unwrap(); + // Mine 15 more nakamoto tenures + for _i in 0..15 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); info!( "Latest tip"; + "height" => tip.stacks_block_height, "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), ); + // assert that the transfer tx was observed + let transfer_tx_included = test_observer::get_blocks() + .into_iter() + .find(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) + .is_some() + }) + .is_some(); + + assert!( + transfer_tx_included, + "Nakamoto node failed to include the transfer tx" + ); + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + assert!(tip.stacks_block_height >= block_height_pre_3_0 + 30); coord_channel.stop_chains_coordinator(); From 25d1b52d7396c2617802ae1bd8fd32cfbf967247 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sat, 9 Dec 2023 13:02:21 -0600 Subject: [PATCH 22/41] feat: add boot_nakamoto to wrap the 2.x/3.x node handoff --- stackslib/src/burnchains/bitcoin/indexer.rs | 13 +- stackslib/src/core/mod.rs | 27 +++ testnet/stacks-node/src/main.rs | 4 +- .../stacks-node/src/nakamoto_node/miner.rs | 3 - .../stacks-node/src/run_loop/boot_nakamoto.rs | 205 ++++++++++++++++++ testnet/stacks-node/src/run_loop/mod.rs | 1 + testnet/stacks-node/src/run_loop/nakamoto.rs | 11 +- .../src/tests/nakamoto_integrations.rs | 99 +++++---- 8 files changed, 300 insertions(+), 63 deletions(-) create mode 100644 testnet/stacks-node/src/run_loop/boot_nakamoto.rs diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index c273a38de4..6f6b82ceec 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -46,7 +46,8 @@ use crate::burnchains::{ Burnchain, BurnchainBlockHeader, Error as burnchain_error, MagicBytes, BLOCKSTACK_MAGIC_MAINNET, }; use crate::core::{ - StacksEpoch, STACKS_EPOCHS_MAINNET, STACKS_EPOCHS_REGTEST, STACKS_EPOCHS_TESTNET, + StacksEpoch, StacksEpochExtension, STACKS_EPOCHS_MAINNET, STACKS_EPOCHS_REGTEST, + STACKS_EPOCHS_TESTNET, }; use crate::util_lib::db::Error as DBError; @@ -91,7 +92,7 @@ impl TryFrom for BitcoinNetworkType { /// Get the default epochs definitions for the given BitcoinNetworkType. /// Should *not* be used except by the BitcoinIndexer when no epochs vector /// was specified. -fn get_bitcoin_stacks_epochs(network_id: BitcoinNetworkType) -> Vec { +pub fn get_bitcoin_stacks_epochs(network_id: BitcoinNetworkType) -> Vec { match network_id { BitcoinNetworkType::Mainnet => STACKS_EPOCHS_MAINNET.to_vec(), BitcoinNetworkType::Testnet => STACKS_EPOCHS_TESTNET.to_vec(), @@ -1030,13 +1031,7 @@ impl BurnchainIndexer for BitcoinIndexer { /// /// It is an error (panic) to set custom epochs if running on `Mainnet`. fn get_stacks_epochs(&self) -> Vec { - match self.config.epochs { - Some(ref epochs) => { - assert!(self.runtime.network_id != BitcoinNetworkType::Mainnet); - epochs.clone() - } - None => get_bitcoin_stacks_epochs(self.runtime.network_id), - } + StacksEpoch::get_epochs(self.runtime.network_id, self.config.epochs.as_ref()) } /// Read downloaded headers within a range diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index b03fe0c8e0..38f383194e 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -25,6 +25,8 @@ pub use stacks_common::types::StacksEpochId; use stacks_common::util::log; pub use self::mempool::MemPoolDB; +use crate::burnchains::bitcoin::indexer::get_bitcoin_stacks_epochs; +use crate::burnchains::bitcoin::BitcoinNetworkType; use crate::burnchains::{Burnchain, Error as burnchain_error}; use crate::chainstate::burn::ConsensusHash; pub mod mempool; @@ -604,9 +606,34 @@ pub trait StacksEpochExtension { epoch_2_1_block_height: u64, ) -> Vec; fn validate_epochs(epochs: &[StacksEpoch]) -> Vec; + /// This method gets the epoch vector. + /// + /// Choose according to: + /// 1) Use the custom epochs defined on the underlying `BitcoinIndexerConfig`, if they exist. + /// 2) Use hard-coded static values, otherwise. + /// + /// It is an error (panic) to set custom epochs if running on `Mainnet`. + /// + fn get_epochs( + bitcoin_network: BitcoinNetworkType, + configured_epochs: Option<&Vec>, + ) -> Vec; } impl StacksEpochExtension for StacksEpoch { + fn get_epochs( + bitcoin_network: BitcoinNetworkType, + configured_epochs: Option<&Vec>, + ) -> Vec { + match configured_epochs { + Some(epochs) => { + assert!(bitcoin_network != BitcoinNetworkType::Mainnet); + epochs.clone() + } + None => get_bitcoin_stacks_epochs(bitcoin_network), + } + } + #[cfg(test)] fn unit_test_pre_2_05(first_burnchain_height: u64) -> Vec { info!( diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index 8675b43132..d180aead8b 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -46,7 +46,7 @@ pub use self::node::{ChainTip, Node}; pub use self::run_loop::{helium, neon}; pub use self::tenure::Tenure; use crate::mockamoto::MockamotoNode; -use crate::run_loop::nakamoto; +use crate::run_loop::boot_nakamoto; fn main() { panic::set_hook(Box::new(|panic_info| { @@ -213,7 +213,7 @@ fn main() { let mut mockamoto = MockamotoNode::new(&conf).unwrap(); mockamoto.run(); } else if conf.burnchain.mode == "nakamoto-neon" { - let mut run_loop = nakamoto::RunLoop::new(conf); + let mut run_loop = boot_nakamoto::BootRunLoop::new(conf).unwrap(); run_loop.start(None, 0); } else { println!("Burnchain mode '{}' not supported", conf.burnchain.mode); diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 2d2d88293a..bc684a07bf 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -16,7 +16,6 @@ use std::convert::TryFrom; use std::thread; use std::thread::JoinHandle; -use std::time::Instant; use clarity::vm::types::PrincipalData; use stacks::burnchains::{Burnchain, BurnchainParameters}; @@ -398,8 +397,6 @@ impl BlockMinerThread { ) .expect("Database failure opening mempool"); - let assembly_start = Instant::now(); - let target_epoch_id = SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height + 1) .ok()? diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs new file mode 100644 index 0000000000..1b54c24f5a --- /dev/null +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -0,0 +1,205 @@ +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::{Arc, Mutex}; +use std::thread::JoinHandle; +use std::time::Duration; +use std::{fs, thread}; + +use stacks::burnchains::Burnchain; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::coordinator::comm::CoordinatorChannels; +use stacks::core::StacksEpochExtension; +use stacks_common::types::{StacksEpoch, StacksEpochId}; + +use crate::neon::Counters; +use crate::run_loop::nakamoto::RunLoop as NakaRunLoop; +use crate::run_loop::neon::RunLoop as NeonRunLoop; +use crate::Config; + +/// This runloop handles booting to Nakamoto: +/// During epochs [1.0, 2.5], it runs a neon run_loop. +/// Once epoch 3.0 is reached, it stops the neon run_loop +/// and starts nakamoto. +pub struct BootRunLoop { + config: Config, + active_loop: InnerLoops, + coordinator_channels: Arc>, +} + +enum InnerLoops { + Epoch2(NeonRunLoop), + Epoch3(NakaRunLoop), +} + +impl BootRunLoop { + pub fn new(config: Config) -> Result { + let (coordinator_channels, active_loop) = if !Self::reached_epoch_30_transition(&config)? { + let neon = NeonRunLoop::new(config.clone()); + ( + neon.get_coordinator_channel().unwrap(), + InnerLoops::Epoch2(neon), + ) + } else { + let naka = NakaRunLoop::new(config.clone(), None, None); + ( + naka.get_coordinator_channel().unwrap(), + InnerLoops::Epoch3(naka), + ) + }; + + Ok(BootRunLoop { + config, + active_loop, + coordinator_channels: Arc::new(Mutex::new(coordinator_channels)), + }) + } + + /// Get a mutex-guarded pointer to this run-loops coordinator channels. + /// The reason this must be mutex guarded is that the run loop will switch + /// from a "neon" coordinator to a "nakamoto" coordinator, and update the + /// backing coordinator channel. That way, anyone still holding the Arc<> + /// should be able to query the new coordinator channel. + pub fn coordinator_channels(&self) -> Arc> { + self.coordinator_channels.clone() + } + + /// Get the runtime counters for the inner runloop. The nakamoto + /// runloop inherits the counters object from the neon node, + /// so no need for another layer of indirection/mutex. + pub fn counters(&self) -> Counters { + match &self.active_loop { + InnerLoops::Epoch2(x) => x.get_counters(), + InnerLoops::Epoch3(x) => x.get_counters(), + } + } + + /// Get the termination switch from the active run loop. + pub fn get_termination_switch(&self) -> Arc { + match &self.active_loop { + InnerLoops::Epoch2(x) => x.get_termination_switch(), + InnerLoops::Epoch3(x) => x.get_termination_switch(), + } + } + + /// The main entry point for the run loop. This starts either a 2.x-neon or 3.x-nakamoto + /// node depending on the current burnchain height. + pub fn start(&mut self, burnchain_opt: Option, mine_start: u64) { + match self.active_loop { + InnerLoops::Epoch2(_) => return self.start_from_neon(burnchain_opt, mine_start), + InnerLoops::Epoch3(_) => return self.start_from_naka(burnchain_opt, mine_start), + } + } + + fn start_from_naka(&mut self, burnchain_opt: Option, mine_start: u64) { + let InnerLoops::Epoch3(ref mut naka_loop) = self.active_loop else { + panic!("FATAL: unexpectedly invoked start_from_naka when active loop wasn't nakamoto"); + }; + naka_loop.start(burnchain_opt, mine_start) + } + + fn start_from_neon(&mut self, burnchain_opt: Option, mine_start: u64) { + let InnerLoops::Epoch2(ref mut neon_loop) = self.active_loop else { + panic!("FATAL: unexpectedly invoked start_from_neon when active loop wasn't neon"); + }; + let termination_switch = neon_loop.get_termination_switch(); + let counters = neon_loop.get_counters(); + + let boot_thread = Self::spawn_stopper(&self.config, neon_loop) + .expect("FATAL: failed to spawn epoch-2/3-boot thread"); + neon_loop.start(burnchain_opt.clone(), mine_start); + + // did we exit because of the epoch-3.0 transition, or some other reason? + let exited_for_transition = boot_thread + .join() + .expect("FATAL: failed to join epoch-2/3-boot thread"); + if !exited_for_transition { + info!("Shutting down epoch-2/3 transition thread"); + return; + } + info!("Reached Epoch-3.0 boundary, starting nakamoto node"); + termination_switch.store(true, Ordering::SeqCst); + let naka = NakaRunLoop::new( + self.config.clone(), + Some(termination_switch), + Some(counters), + ); + let new_coord_channels = naka + .get_coordinator_channel() + .expect("FATAL: should have coordinator channel in newly instantiated runloop"); + { + let mut coord_channel = self.coordinator_channels.lock().expect("Mutex poisoned"); + *coord_channel = new_coord_channels; + } + self.active_loop = InnerLoops::Epoch3(naka); + let InnerLoops::Epoch3(ref mut naka_loop) = self.active_loop else { + panic!("FATAL: unexpectedly found epoch2 loop after setting epoch3 active"); + }; + naka_loop.start(burnchain_opt, mine_start) + } + + fn spawn_stopper( + config: &Config, + neon: &NeonRunLoop, + ) -> Result, std::io::Error> { + let neon_term_switch = neon.get_termination_switch(); + let config = config.clone(); + thread::Builder::new() + .name("epoch-2/3-boot".into()) + .spawn(move || { + loop { + let do_transition = Self::reached_epoch_30_transition(&config) + .unwrap_or_else(|err| { + warn!("Error checking for Epoch-3.0 transition: {err:?}. Assuming transition did not occur yet."); + false + }); + if do_transition { + break; + } + if !neon_term_switch.load(Ordering::SeqCst) { + info!("Stop requested, exiting epoch-2/3-boot thread"); + return false; + } + thread::sleep(Duration::from_secs(1)); + } + // if loop exited, do the transition + info!("Epoch-3.0 boundary reached, stopping Epoch-2.x run loop"); + neon_term_switch.store(false, Ordering::SeqCst); + return true + }) + } + + fn reached_epoch_30_transition(config: &Config) -> Result { + let burn_height = Self::get_burn_height(config)?; + let epochs = StacksEpoch::get_epochs( + config.burnchain.get_bitcoin_network().1, + config.burnchain.epochs.as_ref(), + ); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30) + .ok_or("No Epoch-3.0 defined")?]; + + Ok(u64::from(burn_height) >= epoch_3.start_height - 1) + } + + fn get_burn_height(config: &Config) -> Result { + let burnchain = config.get_burnchain(); + let sortdb_path = config.get_burn_db_file_path(); + if fs::metadata(&sortdb_path).is_err() { + // if the sortition db doesn't exist yet, don't try to open() it, because that creates the + // db file even if it doesn't instantiate the tables, which breaks connect() logic. + info!("Failed to open Sortition DB while checking current burn height, assuming height = 0"); + return Ok(0); + } + + let Ok(sortdb) = SortitionDB::open(&sortdb_path, false, burnchain.pox_constants.clone()) + else { + info!("Failed to open Sortition DB while checking current burn height, assuming height = 0"); + return Ok(0); + }; + + let Ok(tip_sn) = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) else { + info!("Failed to query Sortition DB for current burn height, assuming height = 0"); + return Ok(0); + }; + + Ok(u32::try_from(tip_sn.block_height).expect("FATAL: burn height exceeded u32")) + } +} diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index abfbe37c37..9ad4fd583e 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -1,3 +1,4 @@ +pub mod boot_nakamoto; pub mod helium; pub mod nakamoto; pub mod neon; diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index f758a65d33..e6a835abb8 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -68,9 +68,14 @@ pub struct RunLoop { impl RunLoop { /// Sets up a runloop and node, given a config. - pub fn new(config: Config) -> Self { + pub fn new( + config: Config, + should_keep_running: Option>, + counters: Option, + ) -> Self { let channels = CoordinatorCommunication::instantiate(); - let should_keep_running = Arc::new(AtomicBool::new(true)); + let should_keep_running = + should_keep_running.unwrap_or_else(|| Arc::new(AtomicBool::new(true))); let pox_watchdog_comms = PoxSyncWatchdogComms::new(should_keep_running.clone()); let miner_status = Arc::new(Mutex::new(MinerStatus::make_ready( config.burnchain.burn_fee_cap, @@ -86,7 +91,7 @@ impl RunLoop { globals: None, coordinator_channels: Some(channels), callbacks: RunLoopCallbacks::new(), - counters: Counters::new(), + counters: counters.unwrap_or_else(|| Counters::new()), should_keep_running, event_dispatcher, pox_watchdog: None, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index a7be83272f..ad9c473992 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1,5 +1,5 @@ use std::sync::atomic::{AtomicU64, Ordering}; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use std::{env, thread}; @@ -27,14 +27,13 @@ use stacks_common::util::secp256k1::Secp256k1PrivateKey; use super::bitcoin_regtest::BitcoinCoreController; use crate::config::{EventKeyType, EventObserverConfig}; use crate::mockamoto::signer::SelfSigner; -use crate::run_loop::nakamoto; +use crate::neon::{Counters, RunLoopCounter}; +use crate::run_loop::boot_nakamoto; use crate::tests::make_stacks_transfer; use crate::tests::neon_integrations::{ next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, wait_for_runloop, }; -use crate::{ - neon, tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain, -}; +use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; static POX_4_DEFAULT_STACKER_BALANCE: u64 = 100_000_000_000_000; static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000; @@ -197,11 +196,14 @@ where fn next_block_and_mine_commit( btc_controller: &mut BitcoinRegtestController, timeout_secs: u64, - coord_channels: &CoordinatorChannels, + coord_channels: &Arc>, commits_submitted: &Arc, ) -> Result<(), String> { let commits_submitted = commits_submitted.clone(); - let blocks_processed_before = coord_channels.get_stacks_blocks_processed(); + let blocks_processed_before = coord_channels + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); let commits_before = commits_submitted.load(Ordering::SeqCst); let mut block_processed_time: Option = None; next_block_and(btc_controller, timeout_secs, || { @@ -217,7 +219,10 @@ fn next_block_and_mine_commit( } Ok(false) } else { - let blocks_processed = coord_channels.get_stacks_blocks_processed(); + let blocks_processed = coord_channels + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); if blocks_processed > blocks_processed_before { block_processed_time.replace(Instant::now()); } @@ -241,27 +246,18 @@ fn setup_stacker(naka_conf: &mut Config) -> Secp256k1PrivateKey { /// for pox-4 to activate fn boot_to_epoch_3( naka_conf: &Config, + blocks_processed: &RunLoopCounter, stacker_sk: Secp256k1PrivateKey, btc_regtest_controller: &mut BitcoinRegtestController, ) { - let epoch_2_conf = naka_conf.clone(); - btc_regtest_controller.bootstrap_chain(201); - - let epochs = epoch_2_conf.burnchain.epochs.clone().unwrap(); - + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; info!( "Chain bootstrapped to bitcoin block 201, starting Epoch 2x miner"; "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), ); - let http_origin = format!("http://{}", &epoch_2_conf.node.rpc_bind); - let mut run_loop = neon::RunLoop::new(epoch_2_conf.clone()); - - let epoch_2_stopper = run_loop.get_termination_switch(); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let epoch_2_thread = thread::spawn(move || run_loop.start(None, 0)); - wait_for_runloop(&blocks_processed); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); next_block_and_wait(btc_regtest_controller, &blocks_processed); next_block_and_wait(btc_regtest_controller, &blocks_processed); // first mined stacks block @@ -295,19 +291,18 @@ fn boot_to_epoch_3( btc_regtest_controller, &blocks_processed, epoch_3.start_height - 1, - &epoch_2_conf, + &naka_conf, ); - info!("Bootstrapped to Epoch-3.0 boundary, stopping Epoch2x miner"); - epoch_2_stopper.store(false, Ordering::SeqCst); - epoch_2_thread.join().unwrap(); + info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop"); } #[test] #[ignore] /// This test spins up a nakamoto-neon node. /// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches -/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). +/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. /// This test makes three assertions: /// * 30 blocks are mined after 3.0 starts. This is enough to mine across 2 reward cycles /// * A transaction submitted to the mempool in 3.0 will be mined in 3.0 @@ -330,13 +325,39 @@ fn simple_neon_integration() { let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller .start_bitcoind() .expect("Failed starting bitcoind"); let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_vrfs: vrfs_submitted, + naka_submitted_commits: commits_submitted, + .. + } = run_loop.counters(); - boot_to_epoch_3(&naka_conf, stacker_sk, &mut btc_regtest_controller); + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + stacker_sk, + &mut btc_regtest_controller, + ); info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); @@ -356,23 +377,6 @@ fn simple_neon_integration() { .unwrap() .stacks_block_height; - test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - }); - - let mut run_loop = nakamoto::RunLoop::new(naka_conf.clone()); - let epoch_3_stopper = run_loop.get_termination_switch(); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let vrfs_submitted = run_loop.submitted_vrfs(); - let commits_submitted = run_loop.submitted_commits(); - let coord_channel = run_loop.get_coordinator_channel().unwrap(); - - let epoch_3_thread = thread::spawn(move || run_loop.start(None, 0)); - - wait_for_runloop(&blocks_processed); info!("Nakamoto miner started..."); // first block wakes up the run loop, wait until a key registration has been submitted. next_block_and(&mut btc_regtest_controller, 60, || { @@ -470,8 +474,11 @@ fn simple_neon_integration() { assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); assert!(tip.stacks_block_height >= block_height_pre_3_0 + 30); - coord_channel.stop_chains_coordinator(); + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); - epoch_3_stopper.store(false, Ordering::SeqCst); - epoch_3_thread.join().unwrap(); + run_loop_thread.join().unwrap(); } From b5bb4ac64de189760e735c78ad3e82a9e4d76a97 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sat, 9 Dec 2023 15:05:14 -0600 Subject: [PATCH 23/41] add copyright headers, some code cleanup --- testnet/stacks-node/src/config.rs | 29 +- testnet/stacks-node/src/globals.rs | 32 +- testnet/stacks-node/src/mockamoto.rs | 17 +- testnet/stacks-node/src/nakamoto_node.rs | 401 +----------------- .../stacks-node/src/nakamoto_node/miner.rs | 37 +- testnet/stacks-node/src/nakamoto_node/peer.rs | 149 ++----- .../stacks-node/src/nakamoto_node/relayer.rs | 196 ++++----- testnet/stacks-node/src/neon_node.rs | 88 ++-- .../stacks-node/src/run_loop/boot_nakamoto.rs | 15 + testnet/stacks-node/src/run_loop/nakamoto.rs | 37 +- testnet/stacks-node/src/run_loop/neon.rs | 2 +- .../src/tests/nakamoto_integrations.rs | 30 +- 12 files changed, 295 insertions(+), 738 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index feaa0208ac..526c2a90da 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -17,17 +17,18 @@ use stacks::chainstate::stacks::miner::{BlockBuilderSettings, MinerStatus}; use stacks::chainstate::stacks::MAX_BLOCK_LEN; use stacks::core::mempool::MemPoolWalkSettings; use stacks::core::{ - StacksEpoch, StacksEpochExtension, StacksEpochId, CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, - PEER_VERSION_MAINNET, PEER_VERSION_TESTNET, + MemPoolDB, StacksEpoch, StacksEpochExtension, StacksEpochId, CHAIN_ID_MAINNET, + CHAIN_ID_TESTNET, PEER_VERSION_MAINNET, PEER_VERSION_TESTNET, }; use stacks::cost_estimates::fee_medians::WeightedMedianFeeRateEstimator; use stacks::cost_estimates::fee_rate_fuzzer::FeeRateFuzzer; use stacks::cost_estimates::fee_scalar::ScalarFeeRateEstimator; -use stacks::cost_estimates::metrics::{CostMetric, ProportionalDotProduct}; -use stacks::cost_estimates::{CostEstimator, FeeEstimator, PessimisticEstimator}; +use stacks::cost_estimates::metrics::{CostMetric, ProportionalDotProduct, UnitMetric}; +use stacks::cost_estimates::{CostEstimator, FeeEstimator, PessimisticEstimator, UnitEstimator}; use stacks::net::atlas::AtlasConfig; use stacks::net::connection::ConnectionOptions; use stacks::net::{Neighbor, NeighborKey}; +use stacks::util_lib::db::Error as DBError; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::types::net::PeerAddress; @@ -510,6 +511,26 @@ impl Config { Ok(self.burnchain.clone()) } } + + /// Connect to the MempoolDB using the configured cost estimation + pub fn connect_mempool_db(&self) -> Result { + // create estimators, metric instances for RPC handler + let cost_estimator = self + .make_cost_estimator() + .unwrap_or_else(|| Box::new(UnitEstimator)); + let metric = self + .make_cost_metric() + .unwrap_or_else(|| Box::new(UnitMetric)); + + MemPoolDB::open( + self.is_mainnet(), + self.burnchain.chain_id, + &self.get_chainstate_path_str(), + cost_estimator, + metric, + ) + } + /// Apply any test settings to this burnchain config struct fn apply_test_settings(&self, burnchain: &mut Burnchain) { if self.burnchain.get_bitcoin_network().1 == BitcoinNetworkType::Mainnet { diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index 7e9e47a8fe..6c60e9a591 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -17,6 +17,8 @@ use crate::neon_node::LeaderKeyRegistrationState; use crate::run_loop::RegisteredKey; use crate::syncctl::PoxSyncWatchdogComms; +pub type NeonGlobals = Globals; + /// Command types for the relayer thread, issued to it by other threads pub enum RelayerDirective { /// Handle some new data that arrived on the network (such as blocks, transactions, and @@ -34,8 +36,7 @@ pub enum RelayerDirective { } /// Inter-thread communication structure, shared between threads -#[derive(Clone)] -pub struct Globals { +pub struct Globals { /// Last sortition processed last_sortition: Arc>>, /// Status of the miner @@ -45,7 +46,7 @@ pub struct Globals { /// Unconfirmed transactions (shared between the relayer and p2p threads) unconfirmed_txs: Arc>, /// Writer endpoint to the relayer thread - pub relay_send: SyncSender, + pub relay_send: SyncSender, /// Cointer state in the main thread pub counters: Counters, /// Connection to the PoX sync watchdog @@ -56,15 +57,34 @@ pub struct Globals { leader_key_registration_state: Arc>, } -impl Globals { +// Need to manually implement Clone, because [derive(Clone)] requires +// all trait bounds to implement Clone, even though T doesn't need Clone +// because it's behind SyncSender. +impl Clone for Globals { + fn clone(&self) -> Self { + Self { + last_sortition: self.last_sortition.clone(), + miner_status: self.miner_status.clone(), + coord_comms: self.coord_comms.clone(), + unconfirmed_txs: self.unconfirmed_txs.clone(), + relay_send: self.relay_send.clone(), + counters: self.counters.clone(), + sync_comms: self.sync_comms.clone(), + should_keep_running: self.should_keep_running.clone(), + leader_key_registration_state: self.leader_key_registration_state.clone(), + } + } +} + +impl Globals { pub fn new( coord_comms: CoordinatorChannels, miner_status: Arc>, - relay_send: SyncSender, + relay_send: SyncSender, counters: Counters, sync_comms: PoxSyncWatchdogComms, should_keep_running: Arc, - ) -> Globals { + ) -> Globals { Globals { last_sortition: Arc::new(Mutex::new(None)), miner_status, diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs index 78bc2ae491..7b56c2afb8 100644 --- a/testnet/stacks-node/src/mockamoto.rs +++ b/testnet/stacks-node/src/mockamoto.rs @@ -1,3 +1,18 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . use std::sync::atomic::AtomicBool; use std::sync::mpsc::{sync_channel, Receiver, RecvTimeoutError}; use std::sync::{Arc, Mutex}; @@ -69,7 +84,7 @@ use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::util::vrf::{VRFPrivateKey, VRFProof, VRFPublicKey, VRF}; use self::signer::SelfSigner; -use crate::globals::{Globals, RelayerDirective}; +use crate::globals::{NeonGlobals as Globals, RelayerDirective}; use crate::neon::Counters; use crate::neon_node::{PeerThread, StacksNode, BLOCK_PROCESSOR_STACK_SIZE}; use crate::syncctl::PoxSyncWatchdogComms; diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index de0d04cfb5..0482bbfb05 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020 Stacks Open Internet Foundation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -14,42 +14,25 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::collections::HashMap; -use std::convert::TryFrom; -use std::net::SocketAddr; use std::sync::mpsc::Receiver; use std::thread; use std::thread::JoinHandle; -use clarity::vm::ast::ASTRules; -use clarity::vm::types::QualifiedContractIdentifier; -use stacks::burnchains::{Burnchain, BurnchainSigner, Txid}; +use stacks::burnchains::{BurnchainSigner, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::BlockSnapshot; -use stacks::chainstate::stacks::db::StacksChainState; -use stacks::chainstate::stacks::Error as ChainstateError; -use stacks::core::mempool::MemPoolDB; -use stacks::cost_estimates::metrics::UnitMetric; -use stacks::cost_estimates::UnitEstimator; use stacks::monitoring; use stacks::monitoring::update_active_miners_count_gauge; -use stacks::net::atlas::{AtlasConfig, AtlasDB}; -use stacks::net::db::PeerDB; -use stacks::net::p2p::PeerNetwork; +use stacks::net::atlas::AtlasConfig; use stacks::net::relay::Relayer; -use stacks::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBs}; -use stacks::net::{Error as NetError, PeerNetworkComms, ServiceFlags}; -use stacks::util_lib::strings::{UrlString, VecDisplay}; +use stacks::net::stackerdb::StackerDBs; use stacks_common::types::chainstate::SortitionId; -use stacks_common::types::net::PeerAddress; use stacks_common::types::StacksEpochId; -use stacks_common::util::get_epoch_time_secs; -use stacks_common::util::secp256k1::Secp256k1PrivateKey; use super::{Config, EventDispatcher, Keychain}; use crate::burnchains::bitcoin_regtest_controller::addr2str; -use crate::globals::{Globals, RelayerDirective}; -use crate::neon_node::LeaderKeyRegistrationState; -use crate::run_loop::nakamoto::RunLoop; +use crate::neon_node::{LeaderKeyRegistrationState, StacksNode as NeonNode}; +use crate::run_loop::nakamoto::{Globals, RunLoop}; use crate::run_loop::RegisteredKey; pub mod miner; @@ -57,7 +40,7 @@ pub mod peer; pub mod relayer; use self::peer::PeerThread; -use self::relayer::RelayerThread; +use self::relayer::{RelayerDirective, RelayerThread}; pub const RELAYER_MAX_BUFFER: usize = 100; const VRF_MOCK_MINER_KEY: u64 = 1; @@ -82,88 +65,6 @@ pub struct StacksNode { pub relayer_thread_handle: JoinHandle<()>, } -/// Fault injection logic to artificially increase the length of a tenure. -/// Only used in testing -#[cfg(test)] -fn fault_injection_long_tenure() { - // simulated slow block - match std::env::var("STX_TEST_SLOW_TENURE") { - Ok(tenure_str) => match tenure_str.parse::() { - Ok(tenure_time) => { - info!( - "Fault injection: sleeping for {} milliseconds to simulate a long tenure", - tenure_time - ); - stacks_common::util::sleep_ms(tenure_time); - } - Err(_) => { - error!("Parse error for STX_TEST_SLOW_TENURE"); - panic!(); - } - }, - _ => {} - } -} - -#[cfg(not(test))] -fn fault_injection_long_tenure() {} - -/// Fault injection to skip mining in this bitcoin block height -/// Only used in testing -#[cfg(test)] -fn fault_injection_skip_mining(rpc_bind: &str, target_burn_height: u64) -> bool { - match std::env::var("STACKS_DISABLE_MINER") { - Ok(disable_heights) => { - let disable_schedule: serde_json::Value = - serde_json::from_str(&disable_heights).unwrap(); - let disable_schedule = disable_schedule.as_array().unwrap(); - for disabled in disable_schedule { - let target_miner_rpc_bind = disabled - .get("rpc_bind") - .unwrap() - .as_str() - .unwrap() - .to_string(); - if target_miner_rpc_bind != rpc_bind { - continue; - } - let target_block_heights = disabled.get("blocks").unwrap().as_array().unwrap(); - for target_block_value in target_block_heights { - let target_block = target_block_value.as_i64().unwrap() as u64; - if target_block == target_burn_height { - return true; - } - } - } - return false; - } - Err(_) => { - return false; - } - } -} - -#[cfg(not(test))] -fn fault_injection_skip_mining(_rpc_bind: &str, _target_burn_height: u64) -> bool { - false -} - -/// Open the chainstate, and inject faults from the config file -pub(crate) fn open_chainstate_with_faults( - config: &Config, -) -> Result { - let stacks_chainstate_path = config.get_chainstate_path_str(); - let (mut chainstate, _) = StacksChainState::open( - config.is_mainnet(), - config.burnchain.chain_id, - &stacks_chainstate_path, - Some(config.node.get_marf_opts()), - )?; - - chainstate.fault_injection.hide_blocks = config.node.fault_injection_hide_blocks; - Ok(chainstate) -} - /// Types of errors that can arise during mining #[derive(Debug)] enum Error { @@ -186,284 +87,6 @@ enum Error { } impl StacksNode { - /// Set up the AST size-precheck height, if configured - fn setup_ast_size_precheck(config: &Config, sortdb: &mut SortitionDB) { - if let Some(ast_precheck_size_height) = config.burnchain.ast_precheck_size_height { - info!( - "Override burnchain height of {:?} to {}", - ASTRules::PrecheckSize, - ast_precheck_size_height - ); - let mut tx = sortdb - .tx_begin() - .expect("FATAL: failed to begin tx on sortition DB"); - SortitionDB::override_ast_rule_height( - &mut tx, - ASTRules::PrecheckSize, - ast_precheck_size_height, - ) - .expect("FATAL: failed to override AST PrecheckSize rule height"); - tx.commit() - .expect("FATAL: failed to commit sortition DB transaction"); - } - } - - /// Set up the mempool DB by making sure it exists. - /// Panics on failure. - fn setup_mempool_db(config: &Config) -> MemPoolDB { - // force early mempool instantiation - let cost_estimator = config - .make_cost_estimator() - .unwrap_or_else(|| Box::new(UnitEstimator)); - let metric = config - .make_cost_metric() - .unwrap_or_else(|| Box::new(UnitMetric)); - - let mempool = MemPoolDB::open( - config.is_mainnet(), - config.burnchain.chain_id, - &config.get_chainstate_path_str(), - cost_estimator, - metric, - ) - .expect("BUG: failed to instantiate mempool"); - - mempool - } - - /// Set up the Peer DB and update any soft state from the config file. This includes: - /// * blacklisted/whitelisted nodes - /// * node keys - /// * bootstrap nodes - /// Returns the instantiated PeerDB - /// Panics on failure. - fn setup_peer_db( - config: &Config, - burnchain: &Burnchain, - stackerdb_contract_ids: &[QualifiedContractIdentifier], - ) -> PeerDB { - let data_url = UrlString::try_from(format!("{}", &config.node.data_url)).unwrap(); - let initial_neighbors = config.node.bootstrap_node.clone(); - if initial_neighbors.len() > 0 { - info!( - "Will bootstrap from peers {}", - VecDisplay(&initial_neighbors) - ); - } else { - warn!("Without a peer to bootstrap from, the node will start mining a new chain"); - } - - let p2p_sock: SocketAddr = config.node.p2p_bind.parse().expect(&format!( - "Failed to parse socket: {}", - &config.node.p2p_bind - )); - let p2p_addr: SocketAddr = config.node.p2p_address.parse().expect(&format!( - "Failed to parse socket: {}", - &config.node.p2p_address - )); - let node_privkey = Secp256k1PrivateKey::from_seed(&config.node.local_peer_seed); - - let mut peerdb = PeerDB::connect( - &config.get_peer_db_file_path(), - true, - config.burnchain.chain_id, - burnchain.network_id, - Some(node_privkey), - config.connection_options.private_key_lifetime.clone(), - PeerAddress::from_socketaddr(&p2p_addr), - p2p_sock.port(), - data_url, - &[], - Some(&initial_neighbors), - stackerdb_contract_ids, - ) - .map_err(|e| { - eprintln!( - "Failed to open {}: {:?}", - &config.get_peer_db_file_path(), - &e - ); - panic!(); - }) - .unwrap(); - - // allow all bootstrap nodes - { - let mut tx = peerdb.tx_begin().unwrap(); - for initial_neighbor in initial_neighbors.iter() { - // update peer in case public key changed - PeerDB::update_peer(&mut tx, &initial_neighbor).unwrap(); - PeerDB::set_allow_peer( - &mut tx, - initial_neighbor.addr.network_id, - &initial_neighbor.addr.addrbytes, - initial_neighbor.addr.port, - -1, - ) - .unwrap(); - } - tx.commit().unwrap(); - } - - if !config.node.deny_nodes.is_empty() { - warn!("Will ignore nodes {:?}", &config.node.deny_nodes); - } - - // deny all config-denied peers - { - let mut tx = peerdb.tx_begin().unwrap(); - for denied in config.node.deny_nodes.iter() { - PeerDB::set_deny_peer( - &mut tx, - denied.addr.network_id, - &denied.addr.addrbytes, - denied.addr.port, - get_epoch_time_secs() + 24 * 365 * 3600, - ) - .unwrap(); - } - tx.commit().unwrap(); - } - - // update services to indicate we can support mempool sync - { - let mut tx = peerdb.tx_begin().unwrap(); - PeerDB::set_local_services( - &mut tx, - (ServiceFlags::RPC as u16) | (ServiceFlags::RELAY as u16), - ) - .unwrap(); - tx.commit().unwrap(); - } - - peerdb - } - - /// Set up the PeerNetwork, but do not bind it. - pub fn setup_peer_network( - config: &Config, - atlas_config: &AtlasConfig, - burnchain: Burnchain, - ) -> PeerNetwork { - let sortdb = SortitionDB::open( - &config.get_burn_db_file_path(), - true, - burnchain.pox_constants.clone(), - ) - .expect("Error while instantiating sor/tition db"); - - let epochs = SortitionDB::get_stacks_epochs(sortdb.conn()) - .expect("Error while loading stacks epochs"); - - let view = { - let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) - .expect("Failed to get sortition tip"); - SortitionDB::get_burnchain_view(&sortdb.index_conn(), &burnchain, &sortition_tip) - .unwrap() - }; - - let atlasdb = - AtlasDB::connect(atlas_config.clone(), &config.get_atlas_db_file_path(), true).unwrap(); - - let stackerdbs = StackerDBs::connect(&config.get_stacker_db_file_path(), true).unwrap(); - - let mut chainstate = - open_chainstate_with_faults(config).expect("FATAL: could not open chainstate DB"); - - let mut stackerdb_machines = HashMap::new(); - for stackerdb_contract_id in config.node.stacker_dbs.iter() { - // attempt to load the config - let (instantiate, stacker_db_config) = match StackerDBConfig::from_smart_contract( - &mut chainstate, - &sortdb, - stackerdb_contract_id, - ) { - Ok(c) => (true, c), - Err(e) => { - warn!( - "Failed to load StackerDB config for {}: {:?}", - stackerdb_contract_id, &e - ); - (false, StackerDBConfig::noop()) - } - }; - let mut stackerdbs = - StackerDBs::connect(&config.get_stacker_db_file_path(), true).unwrap(); - - if instantiate { - match stackerdbs.get_stackerdb_id(stackerdb_contract_id) { - Ok(..) => { - // reconfigure - let tx = stackerdbs.tx_begin(stacker_db_config.clone()).unwrap(); - tx.reconfigure_stackerdb(stackerdb_contract_id, &stacker_db_config.signers) - .expect(&format!( - "FATAL: failed to reconfigure StackerDB replica {}", - stackerdb_contract_id - )); - tx.commit().unwrap(); - } - Err(NetError::NoSuchStackerDB(..)) => { - // instantiate replica - let tx = stackerdbs.tx_begin(stacker_db_config.clone()).unwrap(); - tx.create_stackerdb(stackerdb_contract_id, &stacker_db_config.signers) - .expect(&format!( - "FATAL: failed to instantiate StackerDB replica {}", - stackerdb_contract_id - )); - tx.commit().unwrap(); - } - Err(e) => { - panic!("FATAL: failed to query StackerDB state: {:?}", &e); - } - } - } - let stacker_db_sync = match StackerDBSync::new( - stackerdb_contract_id.clone(), - &stacker_db_config, - PeerNetworkComms::new(), - stackerdbs, - ) { - Ok(s) => s, - Err(e) => { - warn!( - "Failed to instantiate StackerDB sync machine for {}: {:?}", - stackerdb_contract_id, &e - ); - continue; - } - }; - - stackerdb_machines.insert( - stackerdb_contract_id.clone(), - (stacker_db_config, stacker_db_sync), - ); - } - - let stackerdb_contract_ids: Vec<_> = - stackerdb_machines.keys().map(|sc| sc.clone()).collect(); - let peerdb = Self::setup_peer_db(config, &burnchain, &stackerdb_contract_ids); - - let local_peer = match PeerDB::get_local_peer(peerdb.conn()) { - Ok(local_peer) => local_peer, - _ => panic!("Unable to retrieve local peer"), - }; - - let p2p_net = PeerNetwork::new( - peerdb, - atlasdb, - stackerdbs, - local_peer, - config.burnchain.peer_version, - burnchain, - view, - config.connection_options.clone(), - stackerdb_machines, - epochs, - ); - - p2p_net - } - /// This function sets the global var `GLOBAL_BURNCHAIN_SIGNER`. /// /// This variable is used for prometheus monitoring (which only @@ -507,11 +130,13 @@ impl StacksNode { ) .expect("Error while instantiating sortition db"); - Self::setup_ast_size_precheck(&config, &mut sortdb); + NeonNode::setup_ast_size_precheck(&config, &mut sortdb); - let _ = Self::setup_mempool_db(&config); + let _ = config + .connect_mempool_db() + .expect("FATAL: database failure opening mempool"); - let mut p2p_net = Self::setup_peer_network(&config, &atlas_config, burnchain.clone()); + let mut p2p_net = NeonNode::setup_peer_network(&config, &atlas_config, burnchain.clone()); let stackerdbs = StackerDBs::connect(&config.get_stacker_db_file_path(), true) .expect("FATAL: failed to connect to stacker DB"); @@ -602,7 +227,7 @@ impl StacksNode { return self .globals .relay_send - .send(RelayerDirective::ProcessTenure( + .send(RelayerDirective::ProcessedBurnBlock( snapshot.consensus_hash.clone(), snapshot.parent_burn_header_hash.clone(), snapshot.winning_stacks_block_hash.clone(), diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index bc684a07bf..ae2781ce7b 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020 Stacks Open Internet Foundation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -29,10 +29,7 @@ use stacks::chainstate::stacks::{ TenureChangeCause, TenureChangePayload, ThresholdSignature, TransactionAnchorMode, TransactionPayload, TransactionVersion, }; -use stacks::core::mempool::MemPoolDB; use stacks::core::FIRST_BURNCHAIN_CONSENSUS_HASH; -use stacks::cost_estimates::metrics::UnitMetric; -use stacks::cost_estimates::UnitEstimator; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::hash::Hash160; @@ -40,11 +37,11 @@ use stacks_common::util::vrf::VRFProof; use super::relayer::RelayerThread; use super::{Config, Error as NakamotoNodeError, EventDispatcher, Keychain}; -use crate::globals::Globals; use crate::mockamoto::signer::SelfSigner; use crate::nakamoto_node::VRF_MOCK_MINER_KEY; +use crate::run_loop::nakamoto::Globals; use crate::run_loop::RegisteredKey; -use crate::ChainTip; +use crate::{neon_node, ChainTip}; pub enum MinerDirective { /// The miner won sortition so they should begin a new tenure @@ -161,7 +158,7 @@ impl BlockMinerThread { mut block: NakamotoBlock, ) -> Result<(), ChainstateError> { signer.sign_nakamoto_block(&mut block); - let mut chain_state = super::open_chainstate_with_faults(&self.config) + let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) .expect("FATAL: could not open chainstate DB"); let chainstate_config = chain_state.config(); let sort_db = SortitionDB::open( @@ -365,19 +362,9 @@ impl BlockMinerThread { /// Return None if we couldn't build a block for whatever reason. fn mine_block(&mut self) -> Option { debug!("block miner thread ID is {:?}", thread::current().id()); - super::fault_injection_long_tenure(); + neon_node::fault_injection_long_tenure(); let burn_db_path = self.config.get_burn_db_file_path(); - let stacks_chainstate_path = self.config.get_chainstate_path_str(); - - let cost_estimator = self - .config - .make_cost_estimator() - .unwrap_or_else(|| Box::new(UnitEstimator)); - let metric = self - .config - .make_cost_metric() - .unwrap_or_else(|| Box::new(UnitMetric)); // NOTE: read-write access is needed in order to be able to query the recipient set. // This is an artifact of the way the MARF is built (see #1449) @@ -385,17 +372,13 @@ impl BlockMinerThread { SortitionDB::open(&burn_db_path, true, self.burnchain.pox_constants.clone()) .expect("FATAL: could not open sortition DB"); - let mut chain_state = super::open_chainstate_with_faults(&self.config) + let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) .expect("FATAL: could not open chainstate DB"); - let mut mem_pool = MemPoolDB::open( - self.config.is_mainnet(), - self.config.burnchain.chain_id, - &stacks_chainstate_path, - cost_estimator, - metric, - ) - .expect("Database failure opening mempool"); + let mut mem_pool = self + .config + .connect_mempool_db() + .expect("Database failure opening mempool"); let target_epoch_id = SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height + 1) diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 9f2a37c50d..762aa45eda 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020 Stacks Open Internet Foundation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -33,10 +33,10 @@ use stacks::net::p2p::PeerNetwork; use stacks::net::RPCHandlerArgs; use stacks_common::util::hash::Sha256Sum; -use super::open_chainstate_with_faults; use crate::burnchains::make_bitcoin_indexer; -use crate::globals::{Globals, RelayerDirective}; -use crate::run_loop::nakamoto::RunLoop; +use crate::nakamoto_node::relayer::RelayerDirective; +use crate::neon_node::open_chainstate_with_faults; +use crate::run_loop::nakamoto::{Globals, RunLoop}; use crate::{Config, EventDispatcher}; /// Thread that runs the network state machine, handling both p2p and http requests. @@ -44,17 +44,17 @@ pub struct PeerThread { /// Node config config: Config, /// instance of the peer network. Made optional in order to trick the borrow checker. - net: Option, + net: PeerNetwork, /// handle to global inter-thread comms globals: Globals, /// how long to wait for network messages on each poll, in millis poll_timeout: u64, - /// handle to the sortition DB (optional so we can take/replace it) - sortdb: Option, - /// handle to the chainstate DB (optional so we can take/replace it) - chainstate: Option, - /// handle to the mempool DB (optional so we can take/replace it) - mempool: Option, + /// handle to the sortition DB + sortdb: SortitionDB, + /// handle to the chainstate DB + chainstate: StacksChainState, + /// handle to the mempool DB + mempool: MemPoolDB, /// buffer of relayer commands with block data that couldn't be sent to the relayer just yet /// (i.e. due to backpressure). We track this separately, instead of just using a bigger /// channel, because we need to know when backpressure occurs in order to throttle the p2p @@ -141,28 +141,6 @@ impl PeerThread { info!("P2P thread exit!"); } - /// set up the mempool DB connection - pub fn connect_mempool_db(config: &Config) -> MemPoolDB { - // create estimators, metric instances for RPC handler - let cost_estimator = config - .make_cost_estimator() - .unwrap_or_else(|| Box::new(UnitEstimator)); - let metric = config - .make_cost_metric() - .unwrap_or_else(|| Box::new(UnitMetric)); - - let mempool = MemPoolDB::open( - config.is_mainnet(), - config.burnchain.chain_id, - &config.get_chainstate_path_str(), - cost_estimator, - metric, - ) - .expect("Database failure opening mempool"); - - mempool - } - /// Instantiate the p2p thread. /// Binds the addresses in the config (which may panic if the port is blocked). /// This is so the node will crash "early" before any new threads start if there's going to be @@ -183,7 +161,9 @@ impl PeerThread { mut net: PeerNetwork, ) -> Self { let config = config.clone(); - let mempool = Self::connect_mempool_db(&config); + let mempool = config + .connect_mempool_db() + .expect("FATAL: database failure opening mempool"); let burn_db_path = config.get_burn_db_file_path(); let sortdb = SortitionDB::open(&burn_db_path, false, pox_constants) @@ -208,12 +188,12 @@ impl PeerThread { PeerThread { config, - net: Some(net), + net, globals, poll_timeout, - sortdb: Some(sortdb), - chainstate: Some(chainstate), - mempool: Some(mempool), + sortdb, + chainstate, + mempool, results_with_data: VecDeque::new(), num_p2p_state_machine_passes: 0, num_inv_sync_passes: 0, @@ -222,50 +202,6 @@ impl PeerThread { } } - /// Do something with mutable references to the mempool, sortdb, and chainstate - /// Fools the borrow checker. - /// NOT COMPOSIBLE - fn with_chainstate(&mut self, func: F) -> R - where - F: FnOnce(&mut PeerThread, &mut SortitionDB, &mut StacksChainState, &mut MemPoolDB) -> R, - { - let mut sortdb = self.sortdb.take().expect("BUG: sortdb already taken"); - let mut chainstate = self - .chainstate - .take() - .expect("BUG: chainstate already taken"); - let mut mempool = self.mempool.take().expect("BUG: mempool already taken"); - - let res = func(self, &mut sortdb, &mut chainstate, &mut mempool); - - self.sortdb = Some(sortdb); - self.chainstate = Some(chainstate); - self.mempool = Some(mempool); - - res - } - - /// Get an immutable ref to the inner network. - /// DO NOT USE WITHIN with_network() - fn get_network(&self) -> &PeerNetwork { - self.net.as_ref().expect("BUG: did not replace net") - } - - /// Do something with mutable references to the network. - /// Fools the borrow checker. - /// NOT COMPOSIBLE. DO NOT CALL THIS OR get_network() IN func - fn with_network(&mut self, func: F) -> R - where - F: FnOnce(&mut PeerThread, &mut PeerNetwork) -> R, - { - let mut net = self.net.take().expect("BUG: net already taken"); - - let res = func(self, &mut net); - - self.net = Some(net); - res - } - /// Run one pass of the p2p/http state machine /// Return true if we should continue running passes; false if not pub fn run_one_pass( @@ -280,12 +216,12 @@ impl PeerThread { // initial block download? let ibd = self.globals.sync_comms.get_ibd(); let download_backpressure = self.results_with_data.len() > 0; - let poll_ms = if !download_backpressure && self.get_network().has_more_downloads() { + let poll_ms = if !download_backpressure && self.net.has_more_downloads() { // keep getting those blocks -- drive the downloader state-machine debug!( "P2P: backpressure: {}, more downloads: {}", download_backpressure, - self.get_network().has_more_downloads() + self.net.has_more_downloads() ); 1 } else { @@ -293,15 +229,11 @@ impl PeerThread { }; // do one pass - let p2p_res = self.with_chainstate(|p2p_thread, sortdb, chainstate, mempool| { + let p2p_res = { // NOTE: handler_args must be created such that it outlives the inner net.run() call and // doesn't ref anything within p2p_thread. let handler_args = RPCHandlerArgs { - exit_at_block_height: p2p_thread - .config - .burnchain - .process_exit_at_block_height - .clone(), + exit_at_block_height: self.config.burnchain.process_exit_at_block_height.clone(), genesis_chainstate_hash: Sha256Sum::from_hex(stx_genesis::GENESIS_CHAINSTATE_HASH) .unwrap(), event_observer: Some(event_dispatcher), @@ -310,21 +242,18 @@ impl PeerThread { fee_estimator: fee_estimator.map(|boxed_estimator| boxed_estimator.as_ref()), ..RPCHandlerArgs::default() }; - p2p_thread.with_network(|_, net| { - net.run( - indexer, - sortdb, - chainstate, - mempool, - dns_client_opt, - download_backpressure, - ibd, - poll_ms, - &handler_args, - ) - }) - }); - + self.net.run( + indexer, + &self.sortdb, + &mut self.chainstate, + &mut self.mempool, + dns_client_opt, + download_backpressure, + ibd, + poll_ms, + &handler_args, + ) + }; match p2p_res { Ok(network_result) => { let mut have_update = false; @@ -376,17 +305,13 @@ impl PeerThread { if let Err(e) = self.globals.relay_send.try_send(next_result) { debug!( "P2P: {:?}: download backpressure detected (bufferred {})", - &self.get_network().local_peer, + &self.net.local_peer, self.results_with_data.len() ); match e { TrySendError::Full(directive) => { - if let RelayerDirective::RunTenure(..) = directive { - // can drop this - } else { - // don't lose this data -- just try it again - self.results_with_data.push_front(directive); - } + // don't lose this data -- just try it again + self.results_with_data.push_front(directive); break; } TrySendError::Disconnected(_) => { diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 6aa4568d0b..04f04241e0 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020 Stacks Open Internet Foundation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -53,17 +53,35 @@ use stacks_common::util::hash::Hash160; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; use super::{ - fault_injection_skip_mining, open_chainstate_with_faults, BlockCommits, Config, - Error as NakamotoNodeError, EventDispatcher, Keychain, BLOCK_PROCESSOR_STACK_SIZE, + BlockCommits, Config, Error as NakamotoNodeError, EventDispatcher, Keychain, + BLOCK_PROCESSOR_STACK_SIZE, }; use crate::burnchains::BurnchainController; -use crate::globals::{Globals, RelayerDirective}; use crate::nakamoto_node::miner::{BlockMinerThread, MinerDirective}; -use crate::neon_node::LeaderKeyRegistrationState; -use crate::run_loop::nakamoto::RunLoop; +use crate::neon_node::{ + fault_injection_skip_mining, open_chainstate_with_faults, LeaderKeyRegistrationState, +}; +use crate::run_loop::nakamoto::{Globals, RunLoop}; use crate::run_loop::RegisteredKey; use crate::BitcoinRegtestController; +/// Command types for the Nakamoto relayer thread, issued to it by other threads +pub enum RelayerDirective { + /// Handle some new data that arrived on the network (such as blocks, transactions, and + HandleNetResult(NetworkResult), + /// A new burn block has been processed by the SortitionDB, check if this miner won sortition, + /// and if so, start the miner thread + ProcessedBurnBlock(ConsensusHash, BurnchainHeaderHash, BlockHeaderHash), + /// Either a new burn block has been processed (without a miner active yet) or a + /// nakamoto tenure's first block has been processed, so the relayer should issue + /// a block commit + IssueBlockCommit(ConsensusHash, BlockHeaderHash), + /// Try to register a VRF public key + RegisterKey(BlockSnapshot), + /// Stop the relayer thread + Exit, +} + /// Relayer thread /// * accepts network results and stores blocks and microblocks /// * forwards new blocks, microblocks, and transactions to the p2p thread @@ -72,12 +90,12 @@ use crate::BitcoinRegtestController; pub struct RelayerThread { /// Node config pub(crate) config: Config, - /// Handle to the sortition DB (optional so we can take/replace it) - sortdb: Option, - /// Handle to the chainstate DB (optional so we can take/replace it) - chainstate: Option, - /// Handle to the mempool DB (optional so we can take/replace it) - mempool: Option, + /// Handle to the sortition DB + sortdb: SortitionDB, + /// Handle to the chainstate DB + chainstate: StacksChainState, + /// Handle to the mempool DB + mempool: MemPoolDB, /// Handle to global state and inter-thread communication channels pub(crate) globals: Globals, /// Authoritative copy of the keychain state @@ -167,9 +185,9 @@ impl RelayerThread { RelayerThread { config: config.clone(), - sortdb: Some(sortdb), - chainstate: Some(chainstate), - mempool: Some(mempool), + sortdb, + chainstate, + mempool, globals, keychain, burnchain: runloop.get_burnchain(), @@ -195,46 +213,6 @@ impl RelayerThread { } } - /// Get an immutible ref to the sortdb - pub fn sortdb_ref(&self) -> &SortitionDB { - self.sortdb - .as_ref() - .expect("FATAL: tried to access sortdb while taken") - } - - /// Get an immutible ref to the chainstate - pub fn chainstate_ref(&self) -> &StacksChainState { - self.chainstate - .as_ref() - .expect("FATAL: tried to access chainstate while it was taken") - } - - /// Fool the borrow checker into letting us do something with the chainstate databases. - /// DOES NOT COMPOSE -- do NOT call this, or self.sortdb_ref(), or self.chainstate_ref(), within - /// `func`. You will get a runtime panic. - pub fn with_chainstate(&mut self, func: F) -> R - where - F: FnOnce(&mut RelayerThread, &mut SortitionDB, &mut StacksChainState, &mut MemPoolDB) -> R, - { - let mut sortdb = self - .sortdb - .take() - .expect("FATAL: tried to take sortdb while taken"); - let mut chainstate = self - .chainstate - .take() - .expect("FATAL: tried to take chainstate while taken"); - let mut mempool = self - .mempool - .take() - .expect("FATAL: tried to take mempool while taken"); - let res = func(self, &mut sortdb, &mut chainstate, &mut mempool); - self.sortdb = Some(sortdb); - self.chainstate = Some(chainstate); - self.mempool = Some(mempool); - res - } - /// have we waited for the right conditions under which to start mining a block off of our /// chain tip? pub fn has_waited_for_latest_blocks(&self) -> bool { @@ -286,21 +264,19 @@ impl RelayerThread { signal_mining_blocked(self.globals.get_miner_status()); } - let net_receipts = self.with_chainstate(|relayer_thread, sortdb, chainstate, mempool| { - relayer_thread - .relayer - .process_network_result( - &relayer_thread.local_peer, - &mut net_result, - sortdb, - chainstate, - mempool, - relayer_thread.globals.sync_comms.get_ibd(), - Some(&relayer_thread.globals.coord_comms), - Some(&relayer_thread.event_dispatcher), - ) - .expect("BUG: failure processing network results") - }); + let net_receipts = self + .relayer + .process_network_result( + &self.local_peer, + &mut net_result, + &mut self.sortdb, + &mut self.chainstate, + &mut self.mempool, + self.globals.sync_comms.get_ibd(), + Some(&self.globals.coord_comms), + Some(&self.event_dispatcher), + ) + .expect("BUG: failure processing network results"); if net_receipts.num_new_blocks > 0 || net_receipts.num_new_confirmed_microblocks > 0 { // if we received any new block data that could invalidate our view of the chain tip, @@ -318,7 +294,7 @@ impl RelayerThread { let num_unconfirmed_microblock_tx_receipts = net_receipts.processed_unconfirmed_state.receipts.len(); if num_unconfirmed_microblock_tx_receipts > 0 { - if let Some(unconfirmed_state) = self.chainstate_ref().unconfirmed_state.as_ref() { + if let Some(unconfirmed_state) = self.chainstate.unconfirmed_state.as_ref() { let canonical_tip = unconfirmed_state.confirmed_chain_tip.clone(); self.event_dispatcher.process_new_microblocks( canonical_tip, @@ -336,16 +312,14 @@ impl RelayerThread { } // synchronize unconfirmed tx index to p2p thread - self.with_chainstate(|relayer_thread, _sortdb, chainstate, _mempool| { - relayer_thread.globals.send_unconfirmed_txs(chainstate); - }); + self.globals.send_unconfirmed_txs(&self.chainstate); // resume mining if we blocked it, and if we've done the requisite download // passes self.last_network_download_passes = net_result.num_download_passes; self.last_network_inv_passes = net_result.num_inv_sync_passes; if self.has_waited_for_latest_blocks() { - debug!("Relayer: did a download pass, so unblocking mining"); + info!("Relayer: did a download pass, so unblocking mining"); signal_mining_ready(self.globals.get_miner_status()); } } @@ -359,10 +333,9 @@ impl RelayerThread { burn_hash: BurnchainHeaderHash, committed_index_hash: StacksBlockId, ) -> MinerDirective { - let sn = - SortitionDB::get_block_snapshot_consensus(self.sortdb_ref().conn(), &consensus_hash) - .expect("FATAL: failed to query sortition DB") - .expect("FATAL: unknown consensus hash"); + let sn = SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &consensus_hash) + .expect("FATAL: failed to query sortition DB") + .expect("FATAL: unknown consensus hash"); self.globals.set_last_sortition(sn.clone()); @@ -423,11 +396,10 @@ impl RelayerThread { // already in-flight return; } - let cur_epoch = - SortitionDB::get_stacks_epoch(self.sortdb_ref().conn(), burn_block.block_height) - .expect("FATAL: failed to query sortition DB") - .expect("FATAL: no epoch defined") - .epoch_id; + let cur_epoch = SortitionDB::get_stacks_epoch(self.sortdb.conn(), burn_block.block_height) + .expect("FATAL: failed to query sortition DB") + .expect("FATAL: no epoch defined") + .epoch_id; let (vrf_pk, _) = self.keychain.make_vrf_keypair(burn_block.block_height); let burnchain_tip_consensus_hash = &burn_block.consensus_hash; let miner_pkh = self.keychain.get_nakamoto_pkh(); @@ -464,24 +436,19 @@ impl RelayerThread { target_ch: &ConsensusHash, target_bh: &BlockHeaderHash, ) -> Result<(BlockSnapshot, StacksEpochId, LeaderBlockCommitOp), NakamotoNodeError> { - let chain_state = self - .chainstate - .as_mut() - .expect("FATAL: Failed to load chain state"); - let sort_db = self.sortdb.as_mut().expect("FATAL: Failed to load sortdb"); - let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()) + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) .map_err(|_| NakamotoNodeError::SnapshotNotFoundForChainTip)?; let parent_vrf_proof = - NakamotoChainState::get_block_vrf_proof(chain_state.db(), &target_ch) + NakamotoChainState::get_block_vrf_proof(self.chainstate.db(), &target_ch) .map_err(|_e| NakamotoNodeError::ParentNotFound)? .unwrap_or_else(|| VRFProof::empty()); // let's figure out the recipient set! let recipients = get_next_recipients( &sort_tip, - chain_state, - sort_db, + &mut self.chainstate, + &mut self.sortdb, &self.burnchain, &OnChainRewardSetProvider(), self.config.node.always_use_affirmation_maps, @@ -492,7 +459,7 @@ impl RelayerThread { })?; let block_header = - NakamotoChainState::get_block_header_by_consensus_hash(chain_state.db(), target_ch) + NakamotoChainState::get_block_header_by_consensus_hash(self.chainstate.db(), target_ch) .map_err(|e| { error!("Relayer: Failed to get block header for parent tenure: {e:?}"); NakamotoNodeError::ParentNotFound @@ -511,14 +478,14 @@ impl RelayerThread { } let Ok(Some(parent_sortition)) = - SortitionDB::get_block_snapshot_consensus(sort_db.conn(), target_ch) + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), target_ch) else { error!("Relayer: Failed to lookup the block snapshot of parent tenure ID"; "tenure_consensus_hash" => %target_ch); return Err(NakamotoNodeError::ParentNotFound); }; let Ok(Some(target_epoch)) = - SortitionDB::get_stacks_epoch(sort_db.conn(), sort_tip.block_height + 1) + SortitionDB::get_stacks_epoch(self.sortdb.conn(), sort_tip.block_height + 1) else { error!("Relayer: Failed to lookup its epoch"; "target_height" => sort_tip.block_height + 1); return Err(NakamotoNodeError::SnapshotNotFoundForChainTip); @@ -526,7 +493,7 @@ impl RelayerThread { let parent_block_burn_height = parent_sortition.block_height; let Ok(Some(parent_winning_tx)) = SortitionDB::get_block_commit( - sort_db.conn(), + self.sortdb.conn(), &parent_sortition.winning_block_txid, &parent_sortition.sortition_id, ) else { @@ -621,7 +588,7 @@ impl RelayerThread { } let burn_header_hash = last_burn_block.burn_header_hash.clone(); - let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) + let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); let burn_chain_tip = burn_chain_sn.burn_header_hash.clone(); @@ -779,8 +746,7 @@ impl RelayerThread { self.globals.get_leader_key_registration_state(), LeaderKeyRegistrationState::Inactive ) { - let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) - else { + let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) else { warn!("Failed to fetch sortition tip while needing to register VRF key"); return None; }; @@ -796,8 +762,7 @@ impl RelayerThread { } // has there been a new sortition - let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) - else { + let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) else { return None; }; @@ -813,12 +778,11 @@ impl RelayerThread { true }; - let Ok(Some(chain_tip_header)) = NakamotoChainState::get_canonical_block_header( - self.chainstate_ref().db(), - self.sortdb_ref(), - ) else { + let Ok(Some(chain_tip_header)) = + NakamotoChainState::get_canonical_block_header(self.chainstate.db(), &self.sortdb) + else { info!("No known canonical tip, will issue a genesis block commit"); - return Some(RelayerDirective::NakamotoTenureStartProcessed( + return Some(RelayerDirective::IssueBlockCommit( FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, )); @@ -827,7 +791,7 @@ impl RelayerThread { // get the starting block of the chain tip's tenure let Ok(Some(chain_tip_tenure_start)) = NakamotoChainState::get_block_header_by_consensus_hash( - self.chainstate_ref().db(), + self.chainstate.db(), &chain_tip_header.consensus_hash, ) else { @@ -849,7 +813,7 @@ impl RelayerThread { }; if should_commit { - Some(RelayerDirective::NakamotoTenureStartProcessed( + Some(RelayerDirective::IssueBlockCommit( chain_tip_header.consensus_hash, chain_tip_header.anchored_header.block_hash(), )) @@ -924,10 +888,10 @@ impl RelayerThread { debug!("Relayer: directive Registered VRF key"); true } - // ProcessTenure directives correspond to a new sortition occurring. + // ProcessedBurnBlock directives correspond to a new sortition perhaps occurring. // relayer should invoke `handle_sortition` to determine if they won the sortition, // and to start their miner, or stop their miner if an active tenure is now ending - RelayerDirective::ProcessTenure(consensus_hash, burn_hash, block_header_hash) => { + RelayerDirective::ProcessedBurnBlock(consensus_hash, burn_hash, block_header_hash) => { if !self.is_miner { return true; } @@ -940,9 +904,8 @@ impl RelayerThread { info!("Relayer: directive Processed tenures"); res } - // NakamotoTenureStartProcessed directives mean that a new tenure start has been processed - // These are triggered by the relayer waking up, seeing a new consensus hash *and* a new first tenure block - RelayerDirective::NakamotoTenureStartProcessed(consensus_hash, block_hash) => { + // These are triggered by the relayer waking up, seeing a new consensus hash *or* a new first tenure block + RelayerDirective::IssueBlockCommit(consensus_hash, block_hash) => { if !self.is_miner { return true; } @@ -953,11 +916,6 @@ impl RelayerThread { debug!("Relayer: Nakamoto Tenure Start"); true } - RelayerDirective::RunTenure(..) => { - // No Op: the nakamoto node does not use the RunTenure directive to control its - // miner thread. - true - } RelayerDirective::Exit => false, }; diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index a3821fae2b..284d63a1c3 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -206,7 +206,7 @@ use crate::burnchains::bitcoin_regtest_controller::{ addr2str, BitcoinRegtestController, OngoingBlockCommit, }; use crate::burnchains::make_bitcoin_indexer; -use crate::globals::{Globals, RelayerDirective}; +use crate::globals::{NeonGlobals as Globals, RelayerDirective}; use crate::run_loop::neon::RunLoop; use crate::run_loop::RegisteredKey; use crate::ChainTip; @@ -304,71 +304,59 @@ pub struct StacksNode { /// Fault injection logic to artificially increase the length of a tenure. /// Only used in testing #[cfg(test)] -fn fault_injection_long_tenure() { +pub(crate) fn fault_injection_long_tenure() { // simulated slow block - match std::env::var("STX_TEST_SLOW_TENURE") { - Ok(tenure_str) => match tenure_str.parse::() { - Ok(tenure_time) => { - info!( - "Fault injection: sleeping for {} milliseconds to simulate a long tenure", - tenure_time - ); - stacks_common::util::sleep_ms(tenure_time); - } - Err(_) => { - error!("Parse error for STX_TEST_SLOW_TENURE"); - panic!(); - } - }, - _ => {} - } + let Ok(tenure_str) = std::env::var("STX_TEST_SLOW_TENURE") else { + return; + }; + let Ok(tenure_time) = tenure_str.parse::() else { + error!("Parse error for STX_TEST_SLOW_TENURE"); + panic!(); + }; + info!( + "Fault injection: sleeping for {} milliseconds to simulate a long tenure", + tenure_time + ); + stacks_common::util::sleep_ms(tenure_time); } #[cfg(not(test))] -fn fault_injection_long_tenure() {} +pub(crate) fn fault_injection_long_tenure() {} /// Fault injection to skip mining in this bitcoin block height /// Only used in testing #[cfg(test)] -fn fault_injection_skip_mining(rpc_bind: &str, target_burn_height: u64) -> bool { - match std::env::var("STACKS_DISABLE_MINER") { - Ok(disable_heights) => { - let disable_schedule: serde_json::Value = - serde_json::from_str(&disable_heights).unwrap(); - let disable_schedule = disable_schedule.as_array().unwrap(); - for disabled in disable_schedule { - let target_miner_rpc_bind = disabled - .get("rpc_bind") - .unwrap() - .as_str() - .unwrap() - .to_string(); - if target_miner_rpc_bind != rpc_bind { - continue; - } - let target_block_heights = disabled.get("blocks").unwrap().as_array().unwrap(); - for target_block_value in target_block_heights { - let target_block = target_block_value.as_i64().unwrap() as u64; - if target_block == target_burn_height { - return true; - } - } - } - return false; +pub(crate) fn fault_injection_skip_mining(rpc_bind: &str, target_burn_height: u64) -> bool { + let Ok(disable_heights) = std::env::var("STACKS_DISABLE_MINER") else { + return false; + }; + let disable_schedule: serde_json::Value = serde_json::from_str(&disable_heights).unwrap(); + let disable_schedule = disable_schedule.as_array().unwrap(); + for disabled in disable_schedule { + let target_miner_rpc_bind = disabled.get("rpc_bind").unwrap().as_str().unwrap(); + if target_miner_rpc_bind != rpc_bind { + continue; } - Err(_) => { - return false; + let target_block_heights = disabled.get("blocks").unwrap().as_array().unwrap(); + for target_block_value in target_block_heights { + let target_block = u64::try_from(target_block_value.as_i64().unwrap()).unwrap(); + if target_block == target_burn_height { + return true; + } } } + false } #[cfg(not(test))] -fn fault_injection_skip_mining(_rpc_bind: &str, _target_burn_height: u64) -> bool { +pub(crate) fn fault_injection_skip_mining(_rpc_bind: &str, _target_burn_height: u64) -> bool { false } /// Open the chainstate, and inject faults from the config file -fn open_chainstate_with_faults(config: &Config) -> Result { +pub(crate) fn open_chainstate_with_faults( + config: &Config, +) -> Result { let stacks_chainstate_path = config.get_chainstate_path_str(); let (mut chainstate, _) = StacksChainState::open( config.is_mainnet(), @@ -3635,7 +3623,7 @@ impl StacksNode { } /// Set up the AST size-precheck height, if configured - fn setup_ast_size_precheck(config: &Config, sortdb: &mut SortitionDB) { + pub(crate) fn setup_ast_size_precheck(config: &Config, sortdb: &mut SortitionDB) { if let Some(ast_precheck_size_height) = config.burnchain.ast_precheck_size_height { info!( "Override burnchain height of {:?} to {}", @@ -3788,7 +3776,7 @@ impl StacksNode { } /// Set up the PeerNetwork, but do not bind it. - pub fn setup_peer_network( + pub(crate) fn setup_peer_network( config: &Config, atlas_config: &AtlasConfig, burnchain: Burnchain, diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs index 1b54c24f5a..e70784ce42 100644 --- a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -1,3 +1,18 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index e6a835abb8..b3458a4ce6 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -1,3 +1,18 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . use std::sync::atomic::AtomicBool; use std::sync::mpsc::sync_channel; use std::sync::{Arc, Mutex}; @@ -25,10 +40,9 @@ use stx_genesis::GenesisData; use super::RunLoopCallbacks; use crate::burnchains::make_bitcoin_indexer; -use crate::globals::Globals; +use crate::globals::Globals as GenericGlobals; use crate::monitoring::start_serving_monitoring_metrics; -use crate::nakamoto_node::{StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER}; -use crate::neon::RunLoopCounter; +use crate::nakamoto_node::{self, StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER}; use crate::node::{ get_account_balances, get_account_lockups, get_names, get_namespaces, use_test_genesis_chainstate, @@ -41,6 +55,7 @@ use crate::{ }; pub const STDERR: i32 = 2; +pub type Globals = GenericGlobals; #[cfg(test)] const UNCONDITIONAL_CHAIN_LIVENESS_CHECK: u64 = 30; @@ -116,22 +131,6 @@ impl RunLoop { self.coordinator_channels.as_ref().map(|x| x.1.clone()) } - pub fn get_blocks_processed_arc(&self) -> RunLoopCounter { - self.counters.blocks_processed.clone() - } - - pub fn submitted_commits(&self) -> RunLoopCounter { - self.counters.naka_submitted_commits.clone() - } - - pub fn submitted_vrfs(&self) -> RunLoopCounter { - self.counters.naka_submitted_vrfs.clone() - } - - pub fn mined_blocks(&self) -> RunLoopCounter { - self.counters.naka_mined_blocks.clone() - } - pub fn get_counters(&self) -> Counters { self.counters.clone() } diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index c10c9b88c3..cffcd1aa10 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -31,7 +31,7 @@ use stx_genesis::GenesisData; use super::RunLoopCallbacks; use crate::burnchains::make_bitcoin_indexer; -use crate::globals::Globals; +use crate::globals::NeonGlobals as Globals; use crate::monitoring::start_serving_monitoring_metrics; use crate::neon_node::{StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER}; use crate::node::{ diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index ad9c473992..2b4fdfa540 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1,3 +1,18 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; @@ -11,13 +26,11 @@ use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::NakamotoChainState; use stacks::chainstate::stacks::db::StacksChainState; use stacks::core::{ - MemPoolDB, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, + StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, }; -use stacks::cost_estimates::metrics::UnitMetric; -use stacks::cost_estimates::UnitEstimator; use stacks_common::address::AddressHashMode; use stacks_common::consts::STACKS_EPOCH_MAX; use stacks_common::types::chainstate::StacksAddress; @@ -411,14 +424,9 @@ fn simple_neon_integration() { .unwrap() .unwrap(); - let mut mempool = MemPoolDB::open( - naka_conf.is_mainnet(), - naka_conf.burnchain.chain_id, - &naka_conf.get_chainstate_path_str(), - Box::new(UnitEstimator), - Box::new(UnitMetric), - ) - .expect("Database failure opening mempool"); + let mut mempool = naka_conf + .connect_mempool_db() + .expect("Database failure opening mempool"); mempool .submit_raw( From b84b483fa5089c755dea1f630d6c8d5610fecfd0 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sun, 10 Dec 2023 09:58:40 -0600 Subject: [PATCH 24/41] chore: comments, cleanup unused functions --- testnet/stacks-node/src/globals.rs | 10 +- testnet/stacks-node/src/nakamoto_node.rs | 59 ++++++------ testnet/stacks-node/src/nakamoto_node/peer.rs | 4 +- .../stacks-node/src/nakamoto_node/relayer.rs | 91 ++++++++----------- testnet/stacks-node/src/run_loop/nakamoto.rs | 62 ++++++------- 5 files changed, 102 insertions(+), 124 deletions(-) diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index 6c60e9a591..bd1560477c 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -35,7 +35,9 @@ pub enum RelayerDirective { Exit, } -/// Inter-thread communication structure, shared between threads +/// Inter-thread communication structure, shared between threads. This +/// is generic over the relayer communication channel: nakamoto and +/// neon nodes use different relayer directives. pub struct Globals { /// Last sortition processed last_sortition: Arc>>, @@ -100,6 +102,12 @@ impl Globals { } } + /// Does the inventory sync watcher think we still need to + /// catch up to the chain tip? + pub fn in_initial_block_download(&self) -> bool { + self.sync_comms.get_ibd() + } + /// Get the last sortition processed by the relayer thread pub fn get_last_sortition(&self) -> Option { self.last_sortition diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 0482bbfb05..3584a5d864 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -65,15 +65,18 @@ pub struct StacksNode { pub relayer_thread_handle: JoinHandle<()>, } -/// Types of errors that can arise during mining +/// Types of errors that can arise during Nakamoto StacksNode operation #[derive(Debug)] -enum Error { +pub enum Error { /// Can't find the block sortition snapshot for the chain tip SnapshotNotFoundForChainTip, /// The burnchain tip changed while this operation was in progress BurnchainTipChanged, + /// Error while spawning a subordinate thread SpawnError(std::io::Error), + /// Injected testing errors FaultInjection, + /// This miner was elected, but another sortition occurred before mining started MissedMiningOpportunity, /// Attempted to mine while there was no active VRF key NoVRFKeyActive, @@ -83,7 +86,10 @@ enum Error { UnexpectedChainState, /// A burnchain operation failed when submitting it to the burnchain BurnchainSubmissionFailed, + /// A new parent has been discovered since mining started NewParentDiscovered, + // The thread that we tried to send to has closed + ChannelClosed, } impl StacksNode { @@ -201,19 +207,14 @@ impl StacksNode { /// telling it to process the block and begin mining if this miner won. /// returns _false_ if the relayer hung up the channel. /// Called from the main thread. - pub fn relayer_burnchain_notify(&self) -> bool { + fn relayer_burnchain_notify(&self, snapshot: BlockSnapshot) -> Result<(), Error> { if !self.is_miner { - // node is a follower, don't try to process my own tenure. - return true; + // node is a follower, don't need to notify the relayer of these events. + return Ok(()); } - let Some(snapshot) = self.globals.get_last_sortition() else { - debug!("Tenure: Notify sortition! No last burn block"); - return true; - }; - - debug!( - "Tenure: Notify sortition!"; + info!( + "Tenure: Notify burn block!"; "consensus_hash" => %snapshot.consensus_hash, "burn_block_hash" => %snapshot.burn_header_hash, "winning_stacks_block_hash" => %snapshot.winning_stacks_block_hash, @@ -224,15 +225,14 @@ impl StacksNode { // unlike in neon_node, the nakamoto node should *always* notify the relayer of // a new burnchain block - return self - .globals + self.globals .relay_send .send(RelayerDirective::ProcessedBurnBlock( - snapshot.consensus_hash.clone(), - snapshot.parent_burn_header_hash.clone(), - snapshot.winning_stacks_block_hash.clone(), + snapshot.consensus_hash, + snapshot.parent_burn_header_hash, + snapshot.winning_stacks_block_hash, )) - .is_ok(); + .map_err(|_| Error::ChannelClosed) } /// Process a state coming from the burnchain, by extracting the validated KeyRegisterOp @@ -244,9 +244,7 @@ impl StacksNode { sortdb: &SortitionDB, sort_id: &SortitionId, ibd: bool, - ) -> Option { - let mut last_sortitioned_block = None; - + ) -> Result<(), Error> { let ic = sortdb.index_conn(); let block_snapshot = SortitionDB::get_block_snapshot(&ic, sort_id) @@ -268,14 +266,11 @@ impl StacksNode { "Received burnchain block #{} including block_commit_op (winning) - {} ({})", block_height, op.apparent_sender, &op.block_header_hash ); - last_sortitioned_block = Some((block_snapshot.clone(), op.vtxindex)); - } else { - if self.is_miner { - info!( - "Received burnchain block #{} including block_commit_op - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash - ); - } + } else if self.is_miner { + info!( + "Received burnchain block #{} including block_commit_op - {} ({})", + block_height, op.apparent_sender, &op.block_header_hash + ); } } @@ -296,8 +291,10 @@ impl StacksNode { "in_initial_block_download?" => ibd, ); - self.globals.set_last_sortition(block_snapshot); - last_sortitioned_block.map(|x| x.0) + self.globals.set_last_sortition(block_snapshot.clone()); + + // notify the relayer thread of the new sortition state + self.relayer_burnchain_notify(block_snapshot) } /// Join all inner threads diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 762aa45eda..376c437723 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -154,7 +154,7 @@ impl PeerThread { ) } - pub fn new_all( + fn new_all( globals: Globals, config: &Config, pox_constants: PoxConstants, @@ -204,7 +204,7 @@ impl PeerThread { /// Run one pass of the p2p/http state machine /// Return true if we should continue running passes; false if not - pub fn run_one_pass( + pub(crate) fn run_one_pass( &mut self, indexer: &B, dns_client_opt: Option<&mut DNSClient>, diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 04f04241e0..68ca5d723a 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1,3 +1,4 @@ +use core::fmt; // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2023 Stacks Open Internet Foundation // @@ -38,8 +39,6 @@ use stacks::core::mempool::MemPoolDB; use stacks::core::{ FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, STACKS_EPOCH_3_0_MARKER, }; -use stacks::cost_estimates::metrics::UnitMetric; -use stacks::cost_estimates::UnitEstimator; use stacks::monitoring::increment_stx_blocks_mined_counter; use stacks::net::db::LocalPeer; use stacks::net::relay::Relayer; @@ -82,10 +81,23 @@ pub enum RelayerDirective { Exit, } +impl fmt::Display for RelayerDirective { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + RelayerDirective::HandleNetResult(_) => write!(f, "HandleNetResult"), + RelayerDirective::ProcessedBurnBlock(_, _, _) => write!(f, "ProcessedBurnBlock"), + RelayerDirective::IssueBlockCommit(_, _) => write!(f, "IssueBlockCommit"), + RelayerDirective::RegisterKey(_) => write!(f, "RegisterKey"), + RelayerDirective::Exit => write!(f, "Exit"), + } + } +} + /// Relayer thread /// * accepts network results and stores blocks and microblocks /// * forwards new blocks, microblocks, and transactions to the p2p thread -/// * processes burnchain state +/// * issues (and re-issues) block commits to participate as a miner +/// * processes burnchain state to determine if selected as a miner /// * if mining, runs the miner and broadcasts blocks (via a subordinate MinerThread) pub struct RelayerThread { /// Node config @@ -148,14 +160,12 @@ pub struct RelayerThread { } impl RelayerThread { - /// Instantiate off of a StacksNode, a runloop, and a relayer. + /// Instantiate relayer thread. + /// Uses `runloop` to obtain globals, config, and `is_miner`` status pub fn new(runloop: &RunLoop, local_peer: LocalPeer, relayer: Relayer) -> RelayerThread { let config = runloop.config().clone(); let globals = runloop.get_globals(); let burn_db_path = config.get_burn_db_file_path(); - let stacks_chainstate_path = config.get_chainstate_path_str(); - let is_mainnet = config.is_mainnet(); - let chain_id = config.burnchain.chain_id; let is_miner = runloop.is_miner(); let sortdb = SortitionDB::open(&burn_db_path, true, runloop.get_burnchain().pox_constants) @@ -164,21 +174,9 @@ impl RelayerThread { let chainstate = open_chainstate_with_faults(&config).expect("FATAL: failed to open chainstate DB"); - let cost_estimator = config - .make_cost_estimator() - .unwrap_or_else(|| Box::new(UnitEstimator)); - let metric = config - .make_cost_metric() - .unwrap_or_else(|| Box::new(UnitMetric)); - - let mempool = MemPoolDB::open( - is_mainnet, - chain_id, - &stacks_chainstate_path, - cost_estimator, - metric, - ) - .expect("Database failure opening mempool"); + let mempool = config + .connect_mempool_db() + .expect("Database failure opening mempool"); let keychain = Keychain::default(config.node.seed.clone()); let bitcoin_controller = BitcoinRegtestController::new_dummy(config.clone()); @@ -215,7 +213,7 @@ impl RelayerThread { /// have we waited for the right conditions under which to start mining a block off of our /// chain tip? - pub fn has_waited_for_latest_blocks(&self) -> bool { + fn has_waited_for_latest_blocks(&self) -> bool { // a network download pass took place (self.min_network_download_passes <= self.last_network_download_passes // a network inv pass took place @@ -226,21 +224,6 @@ impl RelayerThread { || !self.config.miner.wait_for_block_download } - /// Return debug string for waiting for latest blocks - pub fn debug_waited_for_latest_blocks(&self) -> String { - format!( - "({} <= {} && {} <= {}) || {} + {} < {} || {}", - self.min_network_download_passes, - self.last_network_download_passes, - self.min_network_inv_passes, - self.last_network_inv_passes, - self.last_network_block_height_ts, - self.config.node.wait_time_for_blocks, - get_epoch_time_ms(), - self.config.miner.wait_for_block_download - ) - } - /// Handle a NetworkResult from the p2p/http state machine. Usually this is the act of /// * preprocessing and storing new blocks and microblocks /// * relaying blocks, microblocks, and transacctions @@ -503,7 +486,6 @@ impl RelayerThread { let parent_winning_vtxindex = parent_winning_tx.vtxindex; - // let burn_fee_cap = self.config.burnchain.burn_fee_cap; let burn_fee_cap = get_mining_spend_amount(self.globals.get_miner_status()); let sunset_burn = self.burnchain.expected_sunset_burn( sort_tip.block_height + 1, @@ -738,9 +720,6 @@ impl RelayerThread { return None; } - // TODO (nakamoto): the miner shouldn't issue either of these directives - // if we're still in IBD! - // do we need a VRF key registration? if matches!( self.globals.get_leader_key_registration_state(), @@ -869,11 +848,10 @@ impl RelayerThread { /// Top-level dispatcher pub fn handle_directive(&mut self, directive: RelayerDirective) -> bool { + info!("Relayer: handling directive"; "directive" => %directive); let continue_running = match directive { RelayerDirective::HandleNetResult(net_result) => { - debug!("Relayer: directive Handle network result"); self.process_network_result(net_result); - debug!("Relayer: directive Handled network result"); true } // RegisterKey directives mean that the relayer should try to register a new VRF key. @@ -882,10 +860,12 @@ impl RelayerThread { if !self.is_miner { return true; } - debug!("Relayer: directive Register VRF key"); + if self.globals.in_initial_block_download() { + info!("In initial block download, will not submit VRF registration"); + return true; + } self.rotate_vrf_and_register(&last_burn_block); self.globals.counters.bump_blocks_processed(); - debug!("Relayer: directive Registered VRF key"); true } // ProcessedBurnBlock directives correspond to a new sortition perhaps occurring. @@ -895,30 +875,33 @@ impl RelayerThread { if !self.is_miner { return true; } - info!("Relayer: directive Process tenures"); - let res = self.handle_sortition( + if self.globals.in_initial_block_download() { + debug!("In initial block download, will not check sortition for miner"); + return true; + } + self.handle_sortition( consensus_hash, burn_hash, StacksBlockId(block_header_hash.0), - ); - info!("Relayer: directive Processed tenures"); - res + ) } // These are triggered by the relayer waking up, seeing a new consensus hash *or* a new first tenure block RelayerDirective::IssueBlockCommit(consensus_hash, block_hash) => { if !self.is_miner { return true; } - debug!("Relayer: Nakamoto Tenure Start"); + if self.globals.in_initial_block_download() { + debug!("In initial block download, will not issue block commit"); + return true; + } if let Err(e) = self.issue_block_commit(consensus_hash, block_hash) { warn!("Relayer failed to issue block commit"; "err" => ?e); } - debug!("Relayer: Nakamoto Tenure Start"); true } RelayerDirective::Exit => false, }; - + debug!("Relayer: handled directive"; "continue_running" => continue_running); continue_running } } diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index b3458a4ce6..e429e79c91 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -38,7 +38,6 @@ use stacks_common::util::hash::Hash160; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use stx_genesis::GenesisData; -use super::RunLoopCallbacks; use crate::burnchains::make_bitcoin_indexer; use crate::globals::Globals as GenericGlobals; use crate::monitoring::start_serving_monitoring_metrics; @@ -63,18 +62,18 @@ const UNCONDITIONAL_CHAIN_LIVENESS_CHECK: u64 = 30; #[cfg(not(test))] const UNCONDITIONAL_CHAIN_LIVENESS_CHECK: u64 = 300; -/// Coordinating a node running in neon mode. +/// Coordinating a node running in nakamoto mode. This runloop operates very similarly to the neon runloop. pub struct RunLoop { config: Config, - pub callbacks: RunLoopCallbacks, globals: Option, counters: Counters, coordinator_channels: Option<(CoordinatorReceivers, CoordinatorChannels)>, should_keep_running: Arc, event_dispatcher: EventDispatcher, + #[allow(dead_code)] pox_watchdog: Option, // can't be instantiated until .start() is called - is_miner: Option, // not known until .start() is called - burnchain: Option, // not known until .start() is called + is_miner: Option, // not known until .start() is called + burnchain: Option, // not known until .start() is called pox_watchdog_comms: PoxSyncWatchdogComms, /// NOTE: this is duplicated in self.globals, but it needs to be accessible before globals is /// instantiated (namely, so the test framework can access it). @@ -105,7 +104,6 @@ impl RunLoop { config, globals: None, coordinator_channels: Some(channels), - callbacks: RunLoopCallbacks::new(), counters: counters.unwrap_or_else(|| Counters::new()), should_keep_running, event_dispatcher, @@ -117,7 +115,7 @@ impl RunLoop { } } - pub fn get_globals(&self) -> Globals { + pub(crate) fn get_globals(&self) -> Globals { self.globals .clone() .expect("FATAL: globals not instantiated") @@ -127,47 +125,37 @@ impl RunLoop { self.globals = Some(globals); } - pub fn get_coordinator_channel(&self) -> Option { + pub(crate) fn get_coordinator_channel(&self) -> Option { self.coordinator_channels.as_ref().map(|x| x.1.clone()) } - pub fn get_counters(&self) -> Counters { + pub(crate) fn get_counters(&self) -> Counters { self.counters.clone() } - pub fn config(&self) -> &Config { + pub(crate) fn config(&self) -> &Config { &self.config } - pub fn get_event_dispatcher(&self) -> EventDispatcher { + pub(crate) fn get_event_dispatcher(&self) -> EventDispatcher { self.event_dispatcher.clone() } - pub fn is_miner(&self) -> bool { + pub(crate) fn is_miner(&self) -> bool { self.is_miner.unwrap_or(false) } - pub fn get_pox_sync_comms(&self) -> PoxSyncWatchdogComms { - self.pox_watchdog_comms.clone() - } - - pub fn get_termination_switch(&self) -> Arc { + pub(crate) fn get_termination_switch(&self) -> Arc { self.should_keep_running.clone() } - pub fn get_burnchain(&self) -> Burnchain { + pub(crate) fn get_burnchain(&self) -> Burnchain { self.burnchain .clone() .expect("FATAL: tried to get runloop burnchain before calling .start()") } - pub fn get_pox_watchdog(&mut self) -> &mut PoxSyncWatchdog { - self.pox_watchdog - .as_mut() - .expect("FATAL: tried to get PoX watchdog before calling .start()") - } - - pub fn get_miner_status(&self) -> Arc> { + pub(crate) fn get_miner_status(&self) -> Arc> { self.miner_status.clone() } @@ -228,7 +216,7 @@ impl RunLoop { /// Boot up the stacks chainstate. /// Instantiate the chainstate and push out the boot receipts to observers /// This is only public so we can test it. - pub fn boot_chainstate(&mut self, burnchain_config: &Burnchain) -> StacksChainState { + fn boot_chainstate(&mut self, burnchain_config: &Burnchain) -> StacksChainState { let use_test_genesis_data = use_test_genesis_chainstate(&self.config); // load up genesis balances @@ -862,7 +850,14 @@ impl RunLoop { // wait for the p2p state-machine to do at least one pass debug!("Runloop: Wait until Stacks block downloads reach a quiescent state before processing more burnchain blocks"; "remote_chain_height" => remote_chain_height, "local_chain_height" => burnchain_height); + // TODO: for now, we just set initial block download false. + // I think that the sync watchdog probably needs to change a fair bit + // for nakamoto. There may be some opportunity to refactor this runloop + // as well (e.g., the `mine_start` should be integrated with the + // watchdog so that there's just one source of truth about ibd), + // but I think all of this can be saved for post-neon work. let ibd = false; + self.pox_watchdog_comms.set_ibd(ibd); // calculate burnchain sync percentage let percent: f64 = if remote_chain_height > 0 { @@ -947,16 +942,11 @@ impl RunLoop { let sortition_id = &block.sortition_id; // Have the node process the new block, that can include, or not, a sortition. - node.process_burnchain_state(burnchain.sortdb_mut(), sortition_id, ibd); - - // Now, tell the relayer to check if it won a sortition during this block, - // and, if so, to process and advertize the block. This is basically a - // no-op during boot-up. - // - // _this will block if the relayer's buffer is full_ - if !node.relayer_burnchain_notify() { - // relayer hung up, exit. - error!("Runloop: Block relayer and miner hung up, exiting."); + if let Err(e) = + node.process_burnchain_state(burnchain.sortdb_mut(), sortition_id, ibd) + { + // relayer errored, exit. + error!("Runloop: Block relayer and miner errored, exiting."; "err" => ?e); return; } } From aa5ca438829a59bcbba7c392a95193a5e0ae2435 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 12 Dec 2023 09:48:21 -0600 Subject: [PATCH 25/41] chore: handle merge/rebase artifacts, address PR feedback --- Cargo.lock | 4 + stackslib/src/chainstate/nakamoto/miner.rs | 10 +- testnet/stacks-node/src/mockamoto.rs | 2 +- .../stacks-node/src/nakamoto_node/miner.rs | 56 +-- testnet/stacks-node/src/run_loop/nakamoto.rs | 339 +----------------- 5 files changed, 42 insertions(+), 369 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a90cb48536..b9f59752b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2354,6 +2354,8 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "p256k1" version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5afcf536d20c074ef45371ee9a654dcfc46fb2dde18ecc54ec30c936eb850fa2" dependencies = [ "bindgen", "bitvec", @@ -4711,6 +4713,8 @@ dependencies = [ [[package]] name = "wsts" version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c250118354755b4abb091a83cb8d659b511c0ae211ccdb3b1254e3db199cb86" dependencies = [ "aes-gcm 0.10.2", "bs58 0.5.0", diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 1f75cd55ac..5b511f6aa2 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -570,9 +570,13 @@ impl NakamotoBlockBuilder { .block_limit() .expect("Failed to obtain block limit from miner's block connection"); - let initial_txs: Vec<_> = - [new_tenure_info.tenure_change_tx.cloned(), - new_tenure_info.coinbase_tx.cloned()].into_iter().filter_map(|x| x).collect(); + let initial_txs: Vec<_> = [ + tenure_info.tenure_change_tx.clone(), + tenure_info.coinbase_tx.clone(), + ] + .into_iter() + .filter_map(|x| x) + .collect(); let (blocked, tx_events) = match StacksBlockBuilder::select_and_apply_transactions( &mut tenure_tx, &mut builder, diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs index 7b56c2afb8..373bcab8f2 100644 --- a/testnet/stacks-node/src/mockamoto.rs +++ b/testnet/stacks-node/src/mockamoto.rs @@ -797,7 +797,7 @@ impl MockamotoNode { let tenure_change_tx_payload = TransactionPayload::TenureChange(TenureChangePayload { tenure_consensus_hash: sortition_tip.consensus_hash.clone(), prev_tenure_consensus_hash: chain_tip_ch.clone(), - sortition_consensus_hash: sortition_tip.consensus_hash, + burn_view_consensus_hash: sortition_tip.consensus_hash, previous_tenure_end: parent_block_id, previous_tenure_blocks: 1, cause: TenureChangeCause::BlockFound, diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index ae2781ce7b..07efbedaca 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -21,7 +21,7 @@ use clarity::vm::types::PrincipalData; use stacks::burnchains::{Burnchain, BurnchainParameters}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; -use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureStart}; +use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use stacks::chainstate::stacks::{ @@ -56,9 +56,8 @@ pub enum MinerDirective { } struct ParentTenureInfo { - #[allow(dead_code)] - parent_tenure_start: StacksBlockId, parent_tenure_blocks: u64, + parent_tenure_consensus_hash: ConsensusHash, } /// Metadata required for beginning a new tenure @@ -167,12 +166,12 @@ impl BlockMinerThread { self.burnchain.pox_constants.clone(), ) .expect("FATAL: could not open sortition DB"); - let sortition_handle = sort_db.index_handle_at_tip(); + let mut sortition_handle = sort_db.index_handle_at_tip(); let staging_tx = chain_state.staging_db_tx_begin()?; NakamotoChainState::accept_block( &chainstate_config, block, - &sortition_handle, + &mut sortition_handle, &staging_tx, &signer.aggregate_public_key, )?; @@ -194,6 +193,7 @@ impl BlockMinerThread { &mut self, nonce: u64, parent_block_id: StacksBlockId, + parent_tenure_consensus_hash: ConsensusHash, parent_tenure_blocks: u64, miner_pkh: Hash160, ) -> Option { @@ -203,17 +203,18 @@ impl BlockMinerThread { } let is_mainnet = self.config.is_mainnet(); let chain_id = self.config.burnchain.chain_id; - let tenure_change_tx_payload = TransactionPayload::TenureChange( - TenureChangePayload { - previous_tenure_end: parent_block_id, - previous_tenure_blocks: u32::try_from(parent_tenure_blocks) - .expect("FATAL: more than u32 blocks in a tenure"), - cause: TenureChangeCause::BlockFound, - pubkey_hash: miner_pkh, - signers: vec![], - }, - ThresholdSignature::mock(), - ); + let tenure_change_tx_payload = TransactionPayload::TenureChange(TenureChangePayload { + tenure_consensus_hash: self.burn_block.consensus_hash.clone(), + prev_tenure_consensus_hash: parent_tenure_consensus_hash, + burn_view_consensus_hash: self.burn_block.consensus_hash.clone(), + previous_tenure_end: parent_block_id, + previous_tenure_blocks: u32::try_from(parent_tenure_blocks) + .expect("FATAL: more than u32 blocks in a tenure"), + cause: TenureChangeCause::BlockFound, + pubkey_hash: miner_pkh, + signers: vec![], + signature: ThresholdSignature::mock(), + }); let mut tx_auth = self.keychain.get_transaction_auth().unwrap(); tx_auth.set_origin_nonce(nonce); @@ -297,7 +298,7 @@ impl BlockMinerThread { return Some(ParentStacksBlockInfo { parent_tenure: Some(ParentTenureInfo { - parent_tenure_start: chain_tip.metadata.index_block_hash(), + parent_tenure_consensus_hash: chain_tip.metadata.consensus_hash, parent_tenure_blocks: 0, }), stacks_parent_header: chain_tip.metadata, @@ -404,6 +405,7 @@ impl BlockMinerThread { let tenure_change_tx = self.generate_tenure_change_tx( current_miner_nonce, parent_block_id, + par_tenure_info.parent_tenure_consensus_hash, par_tenure_info.parent_tenure_blocks, self.keychain.get_nakamoto_pkh(), )?; @@ -412,16 +414,15 @@ impl BlockMinerThread { target_epoch_id, vrf_proof.clone(), ); - Some(NakamotoTenureStart { - coinbase_tx, - // TODO (refactor): the nakamoto block builder doesn't use this VRF proof, - // it has to be included in the coinbase tx, which is an arg to the builder. - // we should probably just remove this from the nakamoto block builder. - vrf_proof: vrf_proof.clone(), - tenure_change_tx, - }) + NakamotoTenureInfo { + coinbase_tx: Some(coinbase_tx), + tenure_change_tx: Some(tenure_change_tx), + } } else { - None + NakamotoTenureInfo { + coinbase_tx: None, + tenure_change_tx: None, + } }; parent_block_info.stacks_parent_header.microblock_tail = None; @@ -584,9 +585,10 @@ impl ParentStacksBlockInfo { } else { 1 }; + let parent_tenure_consensus_hash = parent_tenure_header.consensus_hash.clone(); Some(ParentTenureInfo { - parent_tenure_start: parent_tenure_id.clone(), parent_tenure_blocks, + parent_tenure_consensus_hash, }) } else { None diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index e429e79c91..83382f869e 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -25,9 +25,7 @@ use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::BlockSnapshot; use stacks::chainstate::coordinator::comm::{CoordinatorChannels, CoordinatorReceivers}; use stacks::chainstate::coordinator::{ - static_get_canonical_affirmation_map, static_get_heaviest_affirmation_map, - static_get_stacks_tip_affirmation_map, ChainsCoordinator, ChainsCoordinatorConfig, - CoordinatorCommunication, + ChainsCoordinator, ChainsCoordinatorConfig, CoordinatorCommunication, }; use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; @@ -35,7 +33,6 @@ use stacks::core::StacksEpochId; use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; use stacks_common::types::PublicKey; use stacks_common::util::hash::Hash160; -use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use stx_genesis::GenesisData; use crate::burnchains::make_bitcoin_indexer; @@ -56,12 +53,6 @@ use crate::{ pub const STDERR: i32 = 2; pub type Globals = GenericGlobals; -#[cfg(test)] -const UNCONDITIONAL_CHAIN_LIVENESS_CHECK: u64 = 30; - -#[cfg(not(test))] -const UNCONDITIONAL_CHAIN_LIVENESS_CHECK: u64 = 300; - /// Coordinating a node running in nakamoto mode. This runloop operates very similarly to the neon runloop. pub struct RunLoop { config: Config, @@ -389,332 +380,6 @@ impl RunLoop { ) } - /// Wake up and drive stacks block processing if there's been a PoX reorg. - /// Be careful not to saturate calls to announce new stacks blocks, because that will disable - /// mining (which would prevent a miner attempting to fix a hidden PoX anchor block from making - /// progress). - fn drive_pox_reorg_stacks_block_processing( - globals: &Globals, - config: &Config, - burnchain: &Burnchain, - sortdb: &SortitionDB, - last_stacks_pox_reorg_recover_time: &mut u128, - ) { - let delay = cmp::max( - config.node.chain_liveness_poll_time_secs, - cmp::max( - config.miner.first_attempt_time_ms, - config.miner.subsequent_attempt_time_ms, - ) / 1000, - ); - - if *last_stacks_pox_reorg_recover_time + (delay as u128) >= get_epoch_time_secs().into() { - // too soon - return; - } - - // compare stacks and heaviest AMs - let burnchain_db = burnchain - .open_burnchain_db(false) - .expect("FATAL: failed to open burnchain DB"); - - let sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) - .expect("FATAL: could not read sortition DB"); - - let indexer = make_bitcoin_indexer(config, Some(globals.should_keep_running.clone())); - - let heaviest_affirmation_map = match static_get_heaviest_affirmation_map( - &burnchain, - &indexer, - &burnchain_db, - sortdb, - &sn.sortition_id, - ) { - Ok(am) => am, - Err(e) => { - warn!("Failed to find heaviest affirmation map: {:?}", &e); - return; - } - }; - - let highest_sn = SortitionDB::get_highest_known_burn_chain_tip(sortdb.conn()) - .expect("FATAL: could not read sortition DB"); - - let canonical_burnchain_tip = burnchain_db - .get_canonical_chain_tip() - .expect("FATAL: could not read burnchain DB"); - - let sortition_tip_affirmation_map = - match SortitionDB::find_sortition_tip_affirmation_map(sortdb, &sn.sortition_id) { - Ok(am) => am, - Err(e) => { - warn!("Failed to find sortition affirmation map: {:?}", &e); - return; - } - }; - - let stacks_tip_affirmation_map = static_get_stacks_tip_affirmation_map( - &burnchain_db, - sortdb, - &sn.sortition_id, - &sn.canonical_stacks_tip_consensus_hash, - &sn.canonical_stacks_tip_hash, - ) - .expect("FATAL: could not query stacks DB"); - - if stacks_tip_affirmation_map.len() < heaviest_affirmation_map.len() - || stacks_tip_affirmation_map - .find_divergence(&heaviest_affirmation_map) - .is_some() - { - // the sortition affirmation map might also be inconsistent, so we'll need to fix that - // (i.e. the underlying sortitions) before we can fix the stacks fork - if sortition_tip_affirmation_map.len() < heaviest_affirmation_map.len() - || sortition_tip_affirmation_map - .find_divergence(&heaviest_affirmation_map) - .is_some() - { - debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map); - globals.coord().announce_new_burn_block(); - } else if highest_sn.block_height == sn.block_height - && sn.block_height == canonical_burnchain_tip.block_height - { - // need to force an affirmation reorg because there will be no more burn block - // announcements. - debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {}, burn height {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map, sn.block_height); - globals.coord().announce_new_burn_block(); - } - - debug!( - "Drive stacks block processing: possible PoX reorg (stacks tip: {}, heaviest: {})", - &stacks_tip_affirmation_map, &heaviest_affirmation_map - ); - globals.coord().announce_new_stacks_block(); - } else { - debug!( - "Drive stacks block processing: no need (stacks tip: {}, heaviest: {})", - &stacks_tip_affirmation_map, &heaviest_affirmation_map - ); - - // announce a new stacks block to force the chains coordinator - // to wake up anyways. this isn't free, so we have to make sure - // the chain-liveness thread doesn't wake up too often - globals.coord().announce_new_stacks_block(); - } - - *last_stacks_pox_reorg_recover_time = get_epoch_time_secs().into(); - } - - /// Wake up and drive sortition processing if there's been a PoX reorg. - /// Be careful not to saturate calls to announce new burn blocks, because that will disable - /// mining (which would prevent a miner attempting to fix a hidden PoX anchor block from making - /// progress). - /// - /// only call if no in ibd - fn drive_pox_reorg_burn_block_processing( - globals: &Globals, - config: &Config, - burnchain: &Burnchain, - sortdb: &SortitionDB, - chain_state_db: &StacksChainState, - last_burn_pox_reorg_recover_time: &mut u128, - last_announce_time: &mut u128, - ) { - let delay = cmp::max( - config.node.chain_liveness_poll_time_secs, - cmp::max( - config.miner.first_attempt_time_ms, - config.miner.subsequent_attempt_time_ms, - ) / 1000, - ); - - if *last_burn_pox_reorg_recover_time + (delay as u128) >= get_epoch_time_secs().into() { - // too soon - return; - } - - // compare sortition and heaviest AMs - let burnchain_db = burnchain - .open_burnchain_db(false) - .expect("FATAL: failed to open burnchain DB"); - - let highest_sn = SortitionDB::get_highest_known_burn_chain_tip(sortdb.conn()) - .expect("FATAL: could not read sortition DB"); - - let canonical_burnchain_tip = burnchain_db - .get_canonical_chain_tip() - .expect("FATAL: could not read burnchain DB"); - - if canonical_burnchain_tip.block_height > highest_sn.block_height { - // still processing sortitions - test_debug!( - "Drive burn block processing: still processing sortitions ({} > {})", - canonical_burnchain_tip.block_height, - highest_sn.block_height - ); - return; - } - - // NOTE: this could be lower than the highest_sn - let sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) - .expect("FATAL: could not read sortition DB"); - - let sortition_tip_affirmation_map = - match SortitionDB::find_sortition_tip_affirmation_map(sortdb, &sn.sortition_id) { - Ok(am) => am, - Err(e) => { - warn!("Failed to find sortition affirmation map: {:?}", &e); - return; - } - }; - - let indexer = make_bitcoin_indexer(config, Some(globals.should_keep_running.clone())); - - let heaviest_affirmation_map = match static_get_heaviest_affirmation_map( - &burnchain, - &indexer, - &burnchain_db, - sortdb, - &sn.sortition_id, - ) { - Ok(am) => am, - Err(e) => { - warn!("Failed to find heaviest affirmation map: {:?}", &e); - return; - } - }; - - let canonical_affirmation_map = match static_get_canonical_affirmation_map( - &burnchain, - &indexer, - &burnchain_db, - sortdb, - &chain_state_db, - &sn.sortition_id, - ) { - Ok(am) => am, - Err(e) => { - warn!("Failed to find canonical affirmation map: {:?}", &e); - return; - } - }; - - if sortition_tip_affirmation_map.len() < heaviest_affirmation_map.len() - || sortition_tip_affirmation_map - .find_divergence(&heaviest_affirmation_map) - .is_some() - || sn.block_height < highest_sn.block_height - { - debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {}, {} = heaviest_affirmation_map.len() - && sortition_tip_affirmation_map.len() <= canonical_affirmation_map.len() - { - if let Some(divergence_rc) = - canonical_affirmation_map.find_divergence(&sortition_tip_affirmation_map) - { - if divergence_rc + 1 >= (heaviest_affirmation_map.len() as u64) { - // we have unaffirmed PoX anchor blocks that are not yet processed in the sortition history - debug!("Drive burnchain processing: possible PoX reorg from unprocessed anchor block(s) (sortition tip: {}, heaviest: {}, canonical: {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map, &canonical_affirmation_map); - globals.coord().announce_new_burn_block(); - globals.coord().announce_new_stacks_block(); - *last_announce_time = get_epoch_time_secs().into(); - } - } - } else { - debug!( - "Drive burn block processing: no need (sortition tip: {}, heaviest: {}, {} JoinHandle<()> { - let config = self.config.clone(); - let burnchain = self.get_burnchain(); - let sortdb = burnchain - .open_sortition_db(true) - .expect("FATAL: could not open sortition DB"); - - let (chain_state_db, _) = StacksChainState::open( - config.is_mainnet(), - config.burnchain.chain_id, - &config.get_chainstate_path_str(), - Some(config.node.get_marf_opts()), - ) - .unwrap(); - - let liveness_thread_handle = thread::Builder::new() - .name(format!("chain-liveness-{}", config.node.rpc_bind)) - .stack_size(BLOCK_PROCESSOR_STACK_SIZE) - .spawn(move || { - Self::drive_chain_liveness(globals, config, burnchain, sortdb, chain_state_db) - }) - .expect("FATAL: failed to spawn chain liveness thread"); - - liveness_thread_handle - } - /// Starts the node runloop. /// /// This function will block by looping infinitely. @@ -789,7 +454,6 @@ impl RunLoop { // Boot up the p2p network and relayer, and figure out how many sortitions we have so far // (it could be non-zero if the node is resuming from chainstate) let mut node = StacksNode::spawn(self, globals.clone(), relay_recv); - let liveness_thread = self.spawn_chain_liveness_thread(globals.clone()); // Wait for all pending sortitions to process let burnchain_db = burnchain_config @@ -839,7 +503,6 @@ impl RunLoop { globals.coord().stop_chains_coordinator(); coordinator_thread_handle.join().unwrap(); node.join(); - liveness_thread.join().unwrap(); info!("Exiting stacks-node"); break; From fc147df85b2a738d3418f86a9e303fb681d46e56 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 12 Dec 2023 10:21:33 -0600 Subject: [PATCH 26/41] remove unconfirmed tx handling in nakamoto RelayerThread --- .../stacks-node/src/nakamoto_node/relayer.rs | 21 ++----------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 68ca5d723a..8c83bb35b9 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1,4 +1,3 @@ -use core::fmt; // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2023 Stacks Open Internet Foundation // @@ -14,6 +13,7 @@ use core::fmt; // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use core::fmt; use std::collections::HashMap; use std::sync::mpsc::{Receiver, RecvTimeoutError}; use std::thread::JoinHandle; @@ -261,7 +261,7 @@ impl RelayerThread { ) .expect("BUG: failure processing network results"); - if net_receipts.num_new_blocks > 0 || net_receipts.num_new_confirmed_microblocks > 0 { + if net_receipts.num_new_blocks > 0 { // if we received any new block data that could invalidate our view of the chain tip, // then stop mining until we process it debug!("Relayer: block mining to process newly-arrived blocks or microblocks"); @@ -274,29 +274,12 @@ impl RelayerThread { .process_new_mempool_txs(net_receipts.mempool_txs_added); } - let num_unconfirmed_microblock_tx_receipts = - net_receipts.processed_unconfirmed_state.receipts.len(); - if num_unconfirmed_microblock_tx_receipts > 0 { - if let Some(unconfirmed_state) = self.chainstate.unconfirmed_state.as_ref() { - let canonical_tip = unconfirmed_state.confirmed_chain_tip.clone(); - self.event_dispatcher.process_new_microblocks( - canonical_tip, - net_receipts.processed_unconfirmed_state, - ); - } else { - warn!("Relayer: oops, unconfirmed state is uninitialized but there are microblock events"); - } - } - // Dispatch retrieved attachments, if any. if net_result.has_attachments() { self.event_dispatcher .process_new_attachments(&net_result.attachments); } - // synchronize unconfirmed tx index to p2p thread - self.globals.send_unconfirmed_txs(&self.chainstate); - // resume mining if we blocked it, and if we've done the requisite download // passes self.last_network_download_passes = net_result.num_download_passes; From 16bb6887f84a299b5bbbe4c469337169849ce6ed Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 12 Dec 2023 11:49:11 -0600 Subject: [PATCH 27/41] add epoch-3.0 burnchain configuration assertions --- testnet/stacks-node/src/config.rs | 25 +++++++++++++++++++ .../src/tests/nakamoto_integrations.rs | 6 ++--- 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 526c2a90da..8b1f7a8578 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -624,6 +624,31 @@ impl Config { ); burnchain.pox_constants.sunset_end = sunset_end.into(); } + + // check if the Epoch 3.0 burnchain settings as configured are going to be valid. + let epochs = StacksEpoch::get_epochs( + self.burnchain.get_bitcoin_network().1, + self.burnchain.epochs.as_ref(), + ); + let Some(epoch_30) = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30) + .map(|epoch_ix| epochs[epoch_ix].clone()) + else { + // no Epoch 3.0, so just return + return; + }; + if burnchain.pox_constants.prepare_length < 3 { + panic!( + "FATAL: Nakamoto rules require a prepare length >= 3. Prepare length set to {}", + burnchain.pox_constants.prepare_length + ); + } + if burnchain.is_in_prepare_phase(epoch_30.start_height) { + panic!( + "FATAL: Epoch 3.0 must start *during* a reward phase, not a prepare phase. Epoch 3.0 start set to: {}. PoX Parameters: {:?}", + epoch_30.start_height, + &burnchain.pox_constants + ); + } } /// Load up a Burnchain and apply config settings to it. diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 2b4fdfa540..0b1d79ffa3 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -105,13 +105,13 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch25, start_height: 6, - end_height: 220, + end_height: 221, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_2_5 }, StacksEpoch { epoch_id: StacksEpochId::Epoch30, - start_height: 220, + start_height: 221, end_height: STACKS_EPOCH_MAX, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 @@ -226,7 +226,7 @@ fn next_block_and_mine_commit( return Ok(true); } if commits_sent >= commits_before + 1 - && block_processed_time.elapsed() > Duration::from_secs(10) + && block_processed_time.elapsed() > Duration::from_secs(6) { return Ok(true); } From 81c163f954db6ed6ec5ab1f0b56a93ecae3c0469 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 13 Dec 2023 10:17:29 -0500 Subject: [PATCH 28/41] chore: put set-aggregate-public-key call into NakamotoChainState::setup_block() and call it on every block --- stackslib/src/chainstate/nakamoto/miner.rs | 1 + stackslib/src/chainstate/nakamoto/mod.rs | 155 ++++++++++++--------- 2 files changed, 90 insertions(+), 66 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 5b511f6aa2..2a0799ae71 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -436,6 +436,7 @@ impl NakamotoBlockBuilder { &mut info.chainstate_tx, info.clarity_instance, burn_dbconn, + burn_dbconn.context.first_block_height, &burn_dbconn.context.pox_constants, info.parent_consensus_hash, info.parent_header_hash, diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 076384d1b7..1301486eac 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1754,24 +1754,28 @@ impl NakamotoChainState { sortdb: &SortitionDB, sort_handle: &SortitionHandleConn, chainstate: &mut StacksChainState, - for_block_height: u64, + for_burn_block_height: u64, at_block_id: &StacksBlockId, ) -> Result { // Get the current reward cycle let Some(reward_cycle) = sort_handle .context .pox_constants - .block_height_to_reward_cycle(sort_handle.context.first_block_height, for_block_height) + .block_height_to_reward_cycle( + sort_handle.context.first_block_height, + for_burn_block_height, + ) else { // This should be unreachable, but we'll return an error just in case. let msg = format!( - "BUG: Failed to determine reward cycle of block height: {}.", - for_block_height + "BUG: Failed to determine reward cycle of burn block height: {}.", + for_burn_block_height ); warn!("{msg}"); return Err(ChainstateError::InvalidStacksBlock(msg)); }; + debug!("get-aggregate-public-key {} {}", at_block_id, reward_cycle); chainstate .get_aggregate_public_key_pox_4(sortdb, at_block_id, reward_cycle)? .ok_or_else(|| { @@ -2342,6 +2346,7 @@ impl NakamotoChainState { chainstate_tx: &'b mut ChainstateTx, clarity_instance: &'a mut ClarityInstance, sortition_dbconn: &'b dyn SortitionDBRef, + first_block_height: u64, pox_constants: &PoxConstants, parent_consensus_hash: ConsensusHash, parent_header_hash: BlockHeaderHash, @@ -2488,6 +2493,16 @@ impl NakamotoChainState { ); } + if !clarity_tx.config.mainnet { + Self::set_aggregate_public_key( + &mut clarity_tx, + first_block_height, + pox_constants, + parent_burn_height.into(), + burn_header_height.into(), + ); + } + debug!( "Setup block: completed setup"; "parent_consensus_hash" => %parent_consensus_hash, @@ -2537,8 +2552,9 @@ impl NakamotoChainState { Ok(lockup_events) } - /// (TESTNET ONLY) Set the aggregate public key for verifying stacker signatures. - /// Do not call in mainnet + /// Set the aggregate public key for verifying stacker signatures. + /// TODO: rely on signer voting instead + /// DO NOT USE IN MAINNET pub(crate) fn set_aggregate_public_key( clarity_tx: &mut ClarityTx, first_block_height: u64, @@ -2548,6 +2564,7 @@ impl NakamotoChainState { ) { let mainnet = clarity_tx.config.mainnet; let chain_id = clarity_tx.config.chain_id; + assert!(!mainnet); let parent_reward_cycle = pox_constants .block_height_to_reward_cycle( @@ -2565,62 +2582,71 @@ impl NakamotoChainState { .expect("Burn block height exceeded u32"), ) .expect("FATAL: block height occurs before first block height"); - if parent_reward_cycle != my_reward_cycle { - // execute `set-aggregate-public-key` using `clarity-tx` - let aggregate_public_key = clarity_tx - .connection() - .with_readonly_clarity_env( - false, - chain_id, - ClarityVersion::Clarity2, - StacksAddress::burn_address(mainnet).into(), - None, - LimitedCostTracker::Free, - |vm_env| { - vm_env.execute_contract_allow_private( - &boot_code_id(POX_4_NAME, mainnet), - "get-aggregate-public-key", - &vec![SymbolicExpression::atom_value(Value::UInt(u128::from( - parent_reward_cycle, - )))], - true, - ) - }, - ) - .ok() - .map(|agg_key_value| { - Value::buff_from(agg_key_value.expect_buff(33)) - .expect("failed to reconstruct buffer") - }) - .expect("get-aggregate-public-key returned None"); - - clarity_tx.connection().as_transaction(|tx| { - tx.with_abort_callback( - |vm_env| { - vm_env.execute_in_env( - StacksAddress::burn_address(mainnet).into(), - None, - None, - |vm_env| { - vm_env.execute_contract_allow_private( - &boot_code_id(POX_4_NAME, mainnet), - "set-aggregate-public-key", - &vec![ - SymbolicExpression::atom_value(Value::UInt( - u128::from(my_reward_cycle), - )), - SymbolicExpression::atom_value(aggregate_public_key), - ], - false, - ) - }, - ) - }, - |_, _| false, - ) - .expect("FATAL: failed to set aggregate public key") - }); - } + + // carry forward the aggregate public key in the past reward cycle to the current + // reward cycle. + // TODO: replace with signer voting + debug!( + "Setting aggregate public key in reward cycle {}", + my_reward_cycle + ); + // execute `set-aggregate-public-key` using `clarity-tx` + let aggregate_public_key = clarity_tx + .connection() + .with_readonly_clarity_env( + mainnet, + chain_id, + ClarityVersion::Clarity2, + StacksAddress::burn_address(mainnet).into(), + None, + LimitedCostTracker::Free, + |vm_env| { + vm_env.execute_contract_allow_private( + &boot_code_id(POX_4_NAME, mainnet), + "get-aggregate-public-key", + &vec![SymbolicExpression::atom_value(Value::UInt(u128::from( + parent_reward_cycle, + )))], + true, + ) + }, + ) + .ok() + .map(|agg_key_value| { + let agg_key_opt = agg_key_value.expect_optional(); + let agg_key_buff = + agg_key_opt.expect("FATAL: aggregate public key not set in boot code"); + Value::buff_from(agg_key_buff.expect_buff(33)) + .expect("failed to reconstruct buffer") + }) + .expect("get-aggregate-public-key returned None"); + + clarity_tx.connection().as_transaction(|tx| { + tx.with_abort_callback( + |vm_env| { + vm_env.execute_in_env( + StacksAddress::burn_address(mainnet).into(), + None, + None, + |vm_env| { + vm_env.execute_contract_allow_private( + &boot_code_id(POX_4_NAME, mainnet), + "set-aggregate-public-key", + &vec![ + SymbolicExpression::atom_value(Value::UInt(u128::from( + my_reward_cycle, + ))), + SymbolicExpression::atom_value(aggregate_public_key), + ], + false, + ) + }, + ) + }, + |_, _| false, + ) + .expect("FATAL: failed to set aggregate public key") + }); } /// Append a Nakamoto Stacks block to the Stacks chain state. @@ -2792,6 +2818,7 @@ impl NakamotoChainState { chainstate_tx, clarity_instance, burn_dbconn, + first_block_height, pox_constants, parent_ch, parent_block_hash, @@ -2806,10 +2833,6 @@ impl NakamotoChainState { tenure_extend, )?; - if !block.is_first_mined() && !clarity_tx.config.mainnet { - Self::set_aggregate_public_key(&mut clarity_tx, first_block_height, pox_constants, parent_chain_tip.burn_header_height.into(), burn_header_height); - } - let starting_cost = clarity_tx.cost_so_far(); debug!( From 4ce8f729153b7fced4814d806458a6e7b905fe1e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 13 Dec 2023 10:18:06 -0500 Subject: [PATCH 29/41] fix: reward cycle prepare phase length of 3 --- testnet/stacks-node/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 8b1f7a8578..5ab9a46e6a 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -316,7 +316,7 @@ impl ConfigFile { password: Some("blockstacksystem".into()), magic_bytes: Some("M3".into()), epochs: Some(epochs), - pox_prepare_length: Some(2), + pox_prepare_length: Some(3), pox_reward_length: Some(36), ..BurnchainConfigFile::default() }; From eb043c673e1fccadaa6b0f9c7f6334a940dd19bd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 13 Dec 2023 10:18:26 -0500 Subject: [PATCH 30/41] fix: get aggregate public key from sortition tip parent height --- testnet/stacks-node/src/mockamoto.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs index e414916eca..495120a4c0 100644 --- a/testnet/stacks-node/src/mockamoto.rs +++ b/testnet/stacks-node/src/mockamoto.rs @@ -828,7 +828,7 @@ impl MockamotoNode { let mut coinbase_tx_signer = StacksTransactionSigner::new(&coinbase_tx); coinbase_tx_signer.sign_origin(&self.miner_key).unwrap(); let coinbase_tx = coinbase_tx_signer.get_tx().unwrap(); - + let miner_pk = Secp256k1PublicKey::from_private(&self.miner_key); let miner_pk_hash = Hash160::from_node_public_key(&miner_pk); @@ -855,6 +855,12 @@ impl MockamotoNode { TransactionAuth::from_p2pkh(&self.miner_key).unwrap(), tenure_change_tx_payload, ); + tenure_tx.chain_id = chain_id; + tenure_tx.set_origin_nonce(miner_nonce); + let mut tenure_tx_signer = StacksTransactionSigner::new(&tenure_tx); + tenure_tx_signer.sign_origin(&self.miner_key).unwrap(); + let tenure_tx = tenure_tx_signer.get_tx().unwrap(); + let pox_address = PoxAddress::Standard( StacksAddress::burn_address(false), Some(AddressHashMode::SerializeP2PKH), @@ -905,6 +911,7 @@ impl MockamotoNode { &mut chainstate_tx, clarity_instance, &sortdb_handle, + self.sortdb.first_block_height, &self.sortdb.pox_constants, chain_tip_ch.clone(), chain_tip_bh.clone(), @@ -918,7 +925,7 @@ impl MockamotoNode { parent_chain_length + 1, false, )?; - + let txs = vec![tenure_tx, coinbase_tx, stacks_stx_tx]; let _ = match StacksChainState::process_block_transactions( @@ -1021,16 +1028,15 @@ impl MockamotoNode { &block.header.consensus_hash, )? .ok_or(ChainstateError::DBError(DBError::NotFoundError))?; - // TODO: https://github.com/stacks-network/stacks-core/issues/4109 - // Update this to retrieve the last block in the last reward cycle rather than chain tip let aggregate_key_block_header = NakamotoChainState::get_canonical_block_header(self.chainstate.db(), &self.sortdb)? .unwrap(); + let aggregate_public_key = NakamotoChainState::get_aggregate_public_key( &self.sortdb, &sortition_handle, &mut self.chainstate, - block_sn.block_height, + block_sn.block_height.saturating_sub(1), &aggregate_key_block_header.index_block_hash(), )?; aggregate_public_key @@ -1048,4 +1054,3 @@ impl MockamotoNode { Ok(chain_length) } } - From bc5f9f0930001f248dcbf334eb93244ba667fd30 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 13 Dec 2023 10:18:58 -0500 Subject: [PATCH 31/41] chore: delete old test -- we can't set the aggregate public key via a tx --- testnet/stacks-node/src/mockamoto/tests.rs | 170 --------------------- 1 file changed, 170 deletions(-) diff --git a/testnet/stacks-node/src/mockamoto/tests.rs b/testnet/stacks-node/src/mockamoto/tests.rs index 7a7f03365f..7282271e19 100644 --- a/testnet/stacks-node/src/mockamoto/tests.rs +++ b/testnet/stacks-node/src/mockamoto/tests.rs @@ -135,176 +135,6 @@ fn observe_100_blocks() { ); } -#[test] -fn observe_set_aggregate_tx() { - let mut conf = Config::from_config_file(ConfigFile::mockamoto()).unwrap(); - conf.node.mockamoto_time_ms = 10; - - let submitter_sk = StacksPrivateKey::from_seed(&[1]); - let submitter_addr = to_addr(&submitter_sk); - conf.add_initial_balance(submitter_addr.to_string(), 1_000); - - test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - }); - - let mut mockamoto = MockamotoNode::new(&conf).unwrap(); - // Get the aggregate public key of the original reward cycle to compare against - let orig_key = mockamoto.self_signer.aggregate_public_key; - - let globals = mockamoto.globals.clone(); - - let mut mempool = PeerThread::connect_mempool_db(&conf); - let (mut chainstate, _) = StacksChainState::open( - conf.is_mainnet(), - conf.burnchain.chain_id, - &conf.get_chainstate_path_str(), - None, - ) - .unwrap(); - let burnchain = conf.get_burnchain(); - let sortdb = burnchain.open_sortition_db(true).unwrap(); - let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(mockamoto.sortdb.conn()).unwrap(); - - let start = Instant::now(); - // Get the reward cycle of the sortition tip - let reward_cycle = mockamoto - .sortdb - .pox_constants - .block_height_to_reward_cycle( - mockamoto.sortdb.first_block_height, - sortition_tip.block_height, - ) - .expect( - format!( - "Failed to determine reward cycle of block height: {}", - sortition_tip.block_height - ) - .as_str(), - ); - - let node_thread = thread::Builder::new() - .name("mockamoto-main".into()) - .spawn(move || { - mockamoto.run(); - let aggregate_key_block_header = NakamotoChainState::get_canonical_block_header( - mockamoto.chainstate.db(), - &mockamoto.sortdb, - ) - .unwrap() - .unwrap(); - // Get the aggregate public key of the original reward cycle - let orig_aggregate_key = mockamoto - .chainstate - .get_aggregate_public_key_pox_4( - &mockamoto.sortdb, - &aggregate_key_block_header.index_block_hash(), - reward_cycle, - ) - .unwrap(); - // Get the aggregate public key of the next reward cycle that we manually overwrote - let new_aggregate_key = mockamoto - .chainstate - .get_aggregate_public_key_pox_4( - &mockamoto.sortdb, - &aggregate_key_block_header.index_block_hash(), - reward_cycle + 1, - ) - .unwrap(); - (orig_aggregate_key, new_aggregate_key) - }) - .expect("FATAL: failed to start mockamoto main thread"); - - // Create a "set-aggregate-public-key" tx to verify it sets correctly - let mut rng = OsRng::default(); - let x = Scalar::random(&mut rng); - let random_key = Point::from(x); - - let tx_fee = 200; - let aggregate_public_key = Value::buff_from(random_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - let aggregate_tx = make_contract_call( - &submitter_sk, - 0, - tx_fee, - &boot_code_addr(false), - POX_4_NAME, - "set-aggregate-public-key", - &[ - Value::UInt(u128::from(reward_cycle + 1)), - aggregate_public_key, - ], - ); - let aggregate_tx_hex = format!("0x{}", to_hex(&aggregate_tx)); - - // complete within 5 seconds or abort (we are only observing one block) - let completed = loop { - if Instant::now().duration_since(start) > Duration::from_secs(5) { - break false; - } - let latest_block = test_observer::get_blocks().pop(); - thread::sleep(Duration::from_secs(1)); - let Some(ref latest_block) = latest_block else { - info!("No block observed yet!"); - continue; - }; - let stacks_block_height = latest_block.get("block_height").unwrap().as_u64().unwrap(); - info!("Block height observed: {stacks_block_height}"); - - // Submit the aggregate tx for processing to update the aggregate public key - let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) - .unwrap() - .unwrap(); - mempool - .submit_raw( - &mut chainstate, - &sortdb, - &tip.consensus_hash, - &tip.anchored_header.block_hash(), - aggregate_tx.clone(), - &ExecutionCost::max_value(), - &StacksEpochId::Epoch30, - ) - .unwrap(); - break true; - }; - - globals.signal_stop(); - - let (orig_aggregate_key, new_aggregate_key) = node_thread - .join() - .expect("Failed to join node thread to exit"); - - let aggregate_tx_included = test_observer::get_blocks() - .into_iter() - .find(|block_json| { - block_json["transactions"] - .as_array() - .unwrap() - .iter() - .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&aggregate_tx_hex)) - .is_some() - }) - .is_some(); - - assert!( - aggregate_tx_included, - "Mockamoto node failed to include the aggregate tx" - ); - - assert!( - completed, - "Mockamoto node failed to produce and announce its block before timeout" - ); - - // Did we set and retrieve the aggregate key correctly? - assert_eq!(orig_aggregate_key.unwrap(), orig_key); - assert_eq!(new_aggregate_key.unwrap(), random_key); -} - #[test] fn mempool_rpc_submit() { let mut conf = Config::from_config_file(ConfigFile::mockamoto()).unwrap(); From 21aa307f7d2f7158591970d1126dd3adf0dabed7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 13 Dec 2023 10:19:17 -0500 Subject: [PATCH 32/41] chore: load aggregate public key from clarity --- .../stacks-node/src/nakamoto_node/miner.rs | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 07efbedaca..592de7817d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -30,6 +30,7 @@ use stacks::chainstate::stacks::{ TransactionPayload, TransactionVersion, }; use stacks::core::FIRST_BURNCHAIN_CONSENSUS_HASH; +use stacks::util_lib::db::Error as DBError; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::hash::Hash160; @@ -167,13 +168,35 @@ impl BlockMinerThread { ) .expect("FATAL: could not open sortition DB"); let mut sortition_handle = sort_db.index_handle_at_tip(); + let aggregate_public_key = if block.header.chain_length <= 1 { + signer.aggregate_public_key.clone() + } else { + let block_sn = SortitionDB::get_block_snapshot_consensus( + sortition_handle.conn(), + &block.header.consensus_hash, + )? + .ok_or(ChainstateError::DBError(DBError::NotFoundError))?; + let aggregate_key_block_header = + NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db)? + .unwrap(); + + let aggregate_public_key = NakamotoChainState::get_aggregate_public_key( + &sort_db, + &sortition_handle, + &mut chain_state, + block_sn.block_height.saturating_sub(1), + &aggregate_key_block_header.index_block_hash(), + )?; + aggregate_public_key + }; + let staging_tx = chain_state.staging_db_tx_begin()?; NakamotoChainState::accept_block( &chainstate_config, block, &mut sortition_handle, &staging_tx, - &signer.aggregate_public_key, + &aggregate_public_key, )?; staging_tx.commit()?; Ok(()) From a4c2a7d128a1719e4f018df145f881841699a04c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 13 Dec 2023 10:19:29 -0500 Subject: [PATCH 33/41] chore: set aggregate public key smart contract on bootup --- testnet/stacks-node/src/run_loop/nakamoto.rs | 53 ++++++++++++++++++-- 1 file changed, 48 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 83382f869e..eb8bbdc501 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -19,6 +19,10 @@ use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; use std::{cmp, thread}; +use clarity::boot_util::boot_code_id; +use clarity::vm::ast::ASTRules; +use clarity::vm::clarity::TransactionConnection; +use clarity::vm::ClarityVersion; use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -27,12 +31,15 @@ use stacks::chainstate::coordinator::comm::{CoordinatorChannels, CoordinatorRece use stacks::chainstate::coordinator::{ ChainsCoordinator, ChainsCoordinatorConfig, CoordinatorCommunication, }; -use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; +use stacks::chainstate::stacks::boot::{ + BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME, +}; +use stacks::chainstate::stacks::db::{ChainStateBootData, ClarityTx, StacksChainState}; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use stacks::core::StacksEpochId; use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; use stacks_common::types::PublicKey; -use stacks_common::util::hash::Hash160; +use stacks_common::util::hash::{to_hex, Hash160}; use stx_genesis::GenesisData; use crate::burnchains::make_bitcoin_indexer; @@ -218,13 +225,49 @@ impl RunLoop { .map(|e| (e.address.clone(), e.amount)) .collect(); - // TODO (nakamoto-neon): check if we're trying to setup a self-signing network - // and set the right genesis data + let agg_pubkey_boot_callback = if let Some(self_signer) = self.config.self_signing() { + let agg_pub_key = to_hex(&self_signer.aggregate_public_key.compress().data); + info!("Mockamoto node setting agg public key"; "agg_pub_key" => &agg_pub_key); + let callback = Box::new(move |clarity_tx: &mut ClarityTx| { + let contract_content = format!( + "(define-read-only ({}) 0x{})", + BOOT_TEST_POX_4_AGG_KEY_FNAME, agg_pub_key + ); + // NOTE: this defaults to a testnet address to prevent it from ever working on + // mainnet + let contract_id = boot_code_id(BOOT_TEST_POX_4_AGG_KEY_CONTRACT, false); + clarity_tx.connection().as_transaction(|clarity| { + let (ast, analysis) = clarity + .analyze_smart_contract( + &contract_id, + ClarityVersion::Clarity2, + &contract_content, + ASTRules::PrecheckSize, + ) + .unwrap(); + clarity + .initialize_smart_contract( + &contract_id, + ClarityVersion::Clarity2, + &ast, + &contract_content, + None, + |_, _| false, + ) + .unwrap(); + clarity.save_analysis(&contract_id, &analysis).unwrap(); + }) + }) as Box; + Some(callback) + } else { + warn!("Self-signing is not supported yet"); + None + }; // instantiate chainstate let mut boot_data = ChainStateBootData { initial_balances, - post_flight_callback: None, + post_flight_callback: agg_pubkey_boot_callback, first_burnchain_block_hash: burnchain_config.first_block_hash, first_burnchain_block_height: burnchain_config.first_block_height as u32, first_burnchain_block_timestamp: burnchain_config.first_block_timestamp, From 3785db73a8f9fc06f6d4fe57231c70e7124b485f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 13 Dec 2023 12:45:04 -0500 Subject: [PATCH 34/41] fix: when searching for the aggregate public key, search _all prior reward cycles_ because integration tests can run in epoch 2.x for many reward cycles before the epoch 3 transition (and, the neon boot code sets the initial aggregate public key) --- .../chainstate/nakamoto/coordinator/tests.rs | 61 ++---- stackslib/src/chainstate/nakamoto/mod.rs | 190 ++++++++++-------- .../src/chainstate/nakamoto/tests/mod.rs | 6 +- stackslib/src/net/mod.rs | 48 +++++ stackslib/src/net/relay.rs | 2 - 5 files changed, 182 insertions(+), 125 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 578fd5d6a9..3e9231e614 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -47,7 +47,7 @@ use crate::net::relay::Relayer; use crate::net::test::{TestPeer, TestPeerConfig}; /// Bring a TestPeer into the Nakamoto Epoch -fn advance_to_nakamoto(peer: &mut TestPeer, aggregate_public_key: &Point) { +fn advance_to_nakamoto(peer: &mut TestPeer) { let mut peer_nonce = 0; let private_key = peer.config.private_key.clone(); let addr = StacksAddress::from_public_keys( @@ -70,13 +70,7 @@ fn advance_to_nakamoto(peer: &mut TestPeer, aggregate_public_key: &Point) { 12, 34, ); - let aggregate_tx: StacksTransaction = make_pox_4_aggregate_key( - &private_key, - 1, - sortition_height + 1, - aggregate_public_key, - ); - vec![stack_tx, aggregate_tx] + vec![stack_tx] } else { vec![] }; @@ -95,6 +89,7 @@ pub fn boot_nakamoto( aggregate_public_key: Point, ) -> TestPeer { let mut peer_config = TestPeerConfig::new(test_name, 0, 0); + peer_config.aggregate_public_key = Some(aggregate_public_key.clone()); let private_key = peer_config.private_key.clone(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, @@ -117,19 +112,19 @@ pub fn boot_nakamoto( peer_config.burnchain.pox_constants.pox_4_activation_height = 31; let mut peer = TestPeer::new(peer_config); - advance_to_nakamoto(&mut peer, &aggregate_public_key); + advance_to_nakamoto(&mut peer); peer } /// Make a replay peer, used for replaying the blockchain -fn make_replay_peer<'a>(peer: &'a mut TestPeer<'a>, aggregate_public_key: &Point) -> TestPeer<'a> { +fn make_replay_peer<'a>(peer: &'a mut TestPeer<'a>) -> TestPeer<'a> { let mut replay_config = peer.config.clone(); replay_config.test_name = format!("{}.replay", &peer.config.test_name); replay_config.server_port = 0; replay_config.http_port = 0; let mut replay_peer = TestPeer::new(replay_config); - advance_to_nakamoto(&mut replay_peer, aggregate_public_key); + advance_to_nakamoto(&mut replay_peer); // sanity check let replay_tip = { @@ -244,7 +239,11 @@ fn replay_reward_cycle( #[test] fn test_simple_nakamoto_coordinator_bootup() { let mut test_signers = TestSigners::default(); - let mut peer = boot_nakamoto(function_name!(), vec![], test_signers.aggregate_public_key); + let mut peer = boot_nakamoto( + function_name!(), + vec![], + test_signers.aggregate_public_key.clone(), + ); let (burn_ops, mut tenure_change, miner_key) = peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); @@ -303,7 +302,7 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], - test_signers.aggregate_public_key, + test_signers.aggregate_public_key.clone(), ); let (burn_ops, mut tenure_change, miner_key) = @@ -376,7 +375,7 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { // replay the blocks and sortitions in random order, and verify that we still reach the chain // tip - let mut replay_peer = make_replay_peer(&mut peer, &test_signers.aggregate_public_key); + let mut replay_peer = make_replay_peer(&mut peer); replay_reward_cycle(&mut replay_peer, &[burn_ops], &blocks); let tip = { @@ -424,7 +423,7 @@ fn test_nakamoto_chainstate_getters() { let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], - test_signers.aggregate_public_key, + test_signers.aggregate_public_key.clone(), ); let sort_tip = { @@ -913,7 +912,7 @@ fn test_simple_nakamoto_coordinator_10_tenures_10_blocks() { let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], - test_signers.aggregate_public_key, + test_signers.aggregate_public_key.clone(), ); let mut all_blocks = vec![]; @@ -953,7 +952,6 @@ fn test_simple_nakamoto_coordinator_10_tenures_10_blocks() { // do a stx transfer in each block to a given recipient let recipient_addr = StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); - let aggregate_public_key = test_signers.aggregate_public_key.clone(); let blocks_and_sizes = peer.make_nakamoto_tenure( tenure_change_tx, coinbase_tx, @@ -973,14 +971,7 @@ fn test_simple_nakamoto_coordinator_10_tenures_10_blocks() { 1, &recipient_addr, ); - - let aggregate_tx = make_pox_4_aggregate_key( - &private_key, - account.nonce + 1, - 7 + i, - &aggregate_public_key, - ); - vec![stx_transfer, aggregate_tx] + vec![stx_transfer] } else { vec![] } @@ -1196,7 +1187,7 @@ fn test_simple_nakamoto_coordinator_10_tenures_10_blocks() { } // replay the blocks and sortitions in random order, and verify that we still reach the chain // tip - let mut replay_peer = make_replay_peer(&mut peer, &test_signers.aggregate_public_key); + let mut replay_peer = make_replay_peer(&mut peer); for (burn_ops, blocks) in rc_burn_ops.iter().zip(rc_blocks.iter()) { replay_reward_cycle(&mut replay_peer, burn_ops, blocks); } @@ -1241,7 +1232,7 @@ fn test_simple_nakamoto_coordinator_2_tenures_3_sortitions() { let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], - test_signers.aggregate_public_key, + test_signers.aggregate_public_key.clone(), ); let mut rc_burn_ops = vec![]; @@ -1530,7 +1521,7 @@ fn test_simple_nakamoto_coordinator_2_tenures_3_sortitions() { // replay the blocks and sortitions in random order, and verify that we still reach the chain // tip - let mut replay_peer = make_replay_peer(&mut peer, &test_signers.aggregate_public_key); + let mut replay_peer = make_replay_peer(&mut peer); replay_reward_cycle(&mut replay_peer, &rc_burn_ops, &all_blocks); let tip = { @@ -1569,7 +1560,7 @@ fn test_simple_nakamoto_coordinator_10_tenures_and_extensions_10_blocks() { let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], - test_signers.aggregate_public_key, + test_signers.aggregate_public_key.clone(), ); let mut all_blocks = vec![]; @@ -1598,7 +1589,6 @@ fn test_simple_nakamoto_coordinator_10_tenures_and_extensions_10_blocks() { // do a stx transfer in each block to a given recipient let recipient_addr = StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); - let aggregate_public_key = test_signers.aggregate_public_key.clone(); let blocks_and_sizes = peer.make_nakamoto_tenure( tenure_change_tx, coinbase_tx, @@ -1619,13 +1609,6 @@ fn test_simple_nakamoto_coordinator_10_tenures_and_extensions_10_blocks() { &recipient_addr, ); - let aggregate_tx = make_pox_4_aggregate_key( - &private_key, - account.nonce + 1, - 7 + i, - &aggregate_public_key, - ); - let last_block_opt = blocks_so_far .last() .as_ref() @@ -1642,7 +1625,7 @@ fn test_simple_nakamoto_coordinator_10_tenures_and_extensions_10_blocks() { miner.make_nakamoto_tenure_change(tenure_extension.clone()); txs.push(tenure_extension_tx); } - txs.append(&mut vec![stx_transfer, aggregate_tx]); + txs.append(&mut vec![stx_transfer]); txs } else { vec![] @@ -1792,7 +1775,7 @@ fn test_simple_nakamoto_coordinator_10_tenures_and_extensions_10_blocks() { // replay the blocks and sortitions in random order, and verify that we still reach the chain // tip - let mut replay_peer = make_replay_peer(&mut peer, &test_signers.aggregate_public_key); + let mut replay_peer = make_replay_peer(&mut peer); for (burn_ops, blocks) in rc_burn_ops.iter().zip(rc_blocks.iter()) { replay_reward_cycle(&mut replay_peer, burn_ops, blocks); } diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 1301486eac..fc3a7b8f01 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1775,17 +1775,32 @@ impl NakamotoChainState { return Err(ChainstateError::InvalidStacksBlock(msg)); }; - debug!("get-aggregate-public-key {} {}", at_block_id, reward_cycle); - chainstate - .get_aggregate_public_key_pox_4(sortdb, at_block_id, reward_cycle)? - .ok_or_else(|| { - warn!( - "Failed to get aggregate public key"; - "block_id" => %at_block_id, - "reward_cycle" => reward_cycle, - ); - ChainstateError::InvalidStacksBlock("Failed to get aggregate public key".into()) - }) + // need to search back because the set-aggregate-public-key call only happens in nakamoto + // TODO: this will be removed once there's aggregate public key voting + for rc in (0..=reward_cycle).rev() { + debug!("get-aggregate-public-key {} {}", at_block_id, rc); + match chainstate.get_aggregate_public_key_pox_4(sortdb, at_block_id, rc)? { + Some(agg_key) => { + return Ok(agg_key); + } + None => { + debug!( + "No aggregate public key set; trying in a lower cycle"; + "block_id" => %at_block_id, + "reward_cycle" => rc, + ); + continue; + } + } + } + warn!( + "Failed to get aggregate public key"; + "block_id" => %at_block_id, + "reward_cycle" => reward_cycle, + ); + Err(ChainstateError::InvalidStacksBlock( + "Failed to get aggregate public key".into(), + )) } /// Return the total ExecutionCost consumed during the tenure up to and including @@ -2498,7 +2513,6 @@ impl NakamotoChainState { &mut clarity_tx, first_block_height, pox_constants, - parent_burn_height.into(), burn_header_height.into(), ); } @@ -2559,21 +2573,12 @@ impl NakamotoChainState { clarity_tx: &mut ClarityTx, first_block_height: u64, pox_constants: &PoxConstants, - parent_burn_header_height: u64, burn_header_height: u64, ) { let mainnet = clarity_tx.config.mainnet; let chain_id = clarity_tx.config.chain_id; assert!(!mainnet); - let parent_reward_cycle = pox_constants - .block_height_to_reward_cycle( - first_block_height, - parent_burn_header_height - .try_into() - .expect("Burn block height exceeded u32"), - ) - .expect("FATAL: block height occurs before first block height"); let my_reward_cycle = pox_constants .block_height_to_reward_cycle( first_block_height, @@ -2583,70 +2588,89 @@ impl NakamotoChainState { ) .expect("FATAL: block height occurs before first block height"); - // carry forward the aggregate public key in the past reward cycle to the current - // reward cycle. - // TODO: replace with signer voting - debug!( - "Setting aggregate public key in reward cycle {}", + for parent_reward_cycle in (0..my_reward_cycle).rev() { + // carry forward the aggregate public key in the past reward cycle to the current + // reward cycle. It may be several cycles back, such as in integration tests where + // nakamoto boots up several reward cycles after the initial aggregate public key was set. + // TODO: replace with signer voting + debug!( + "Try setting aggregate public key in reward cycle {}, parent {}", + my_reward_cycle, parent_reward_cycle + ); + // execute `set-aggregate-public-key` using `clarity-tx` + let Some(aggregate_public_key) = clarity_tx + .connection() + .with_readonly_clarity_env( + mainnet, + chain_id, + ClarityVersion::Clarity2, + StacksAddress::burn_address(mainnet).into(), + None, + LimitedCostTracker::Free, + |vm_env| { + vm_env.execute_contract_allow_private( + &boot_code_id(POX_4_NAME, mainnet), + "get-aggregate-public-key", + &vec![SymbolicExpression::atom_value(Value::UInt(u128::from( + parent_reward_cycle, + )))], + true, + ) + }, + ) + .ok() + .map(|agg_key_value| { + let agg_key_opt = agg_key_value.expect_optional().map(|agg_key_buff| { + Value::buff_from(agg_key_buff.expect_buff(33)) + .expect("failed to reconstruct buffer") + }); + agg_key_opt + }) + .flatten() + else { + debug!( + "No aggregate public key in parent cycle {}", + parent_reward_cycle + ); + continue; + }; + + clarity_tx.connection().as_transaction(|tx| { + tx.with_abort_callback( + |vm_env| { + vm_env.execute_in_env( + StacksAddress::burn_address(mainnet).into(), + None, + None, + |vm_env| { + vm_env.execute_contract_allow_private( + &boot_code_id(POX_4_NAME, mainnet), + "set-aggregate-public-key", + &vec![ + SymbolicExpression::atom_value(Value::UInt(u128::from( + my_reward_cycle, + ))), + SymbolicExpression::atom_value(aggregate_public_key), + ], + false, + ) + }, + ) + }, + |_, _| false, + ) + .expect("FATAL: failed to set aggregate public key") + }); + + // success! + return; + } + + // if we get here, then we didn't ever set the initial aggregate public key + panic!( + "FATAL: no aggregate public key in pox-4 in any reward cycle between 0 and {}", my_reward_cycle ); - // execute `set-aggregate-public-key` using `clarity-tx` - let aggregate_public_key = clarity_tx - .connection() - .with_readonly_clarity_env( - mainnet, - chain_id, - ClarityVersion::Clarity2, - StacksAddress::burn_address(mainnet).into(), - None, - LimitedCostTracker::Free, - |vm_env| { - vm_env.execute_contract_allow_private( - &boot_code_id(POX_4_NAME, mainnet), - "get-aggregate-public-key", - &vec![SymbolicExpression::atom_value(Value::UInt(u128::from( - parent_reward_cycle, - )))], - true, - ) - }, - ) - .ok() - .map(|agg_key_value| { - let agg_key_opt = agg_key_value.expect_optional(); - let agg_key_buff = - agg_key_opt.expect("FATAL: aggregate public key not set in boot code"); - Value::buff_from(agg_key_buff.expect_buff(33)) - .expect("failed to reconstruct buffer") - }) - .expect("get-aggregate-public-key returned None"); - - clarity_tx.connection().as_transaction(|tx| { - tx.with_abort_callback( - |vm_env| { - vm_env.execute_in_env( - StacksAddress::burn_address(mainnet).into(), - None, - None, - |vm_env| { - vm_env.execute_contract_allow_private( - &boot_code_id(POX_4_NAME, mainnet), - "set-aggregate-public-key", - &vec![ - SymbolicExpression::atom_value(Value::UInt(u128::from( - my_reward_cycle, - ))), - SymbolicExpression::atom_value(aggregate_public_key), - ], - false, - ) - }, - ) - }, - |_, _| false, - ) - .expect("FATAL: failed to set aggregate public key") - }); } /// Append a Nakamoto Stacks block to the Stacks chain state. diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index e2d702830a..a494e0be9d 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -1496,7 +1496,11 @@ fn make_fork_run_with_arrivals( #[test] pub fn test_get_highest_nakamoto_tenure() { let test_signers = TestSigners::default(); - let mut peer = boot_nakamoto(function_name!(), vec![], test_signers.aggregate_public_key); + let mut peer = boot_nakamoto( + function_name!(), + vec![], + test_signers.aggregate_public_key.clone(), + ); // extract chainstate and sortdb -- we don't need the peer anymore let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 17f9837cad..e1fcc76c34 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -67,6 +67,9 @@ use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::{ConsensusHash, Opcodes}; use crate::chainstate::coordinator::Error as coordinator_error; use crate::chainstate::nakamoto::NakamotoChainState; +use crate::chainstate::stacks::boot::{ + BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME, +}; use crate::chainstate::stacks::db::blocks::MemPoolRejection; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::index::Error as marf_error; @@ -1555,6 +1558,7 @@ pub mod test { use std::sync::Mutex; use std::{fs, io, thread}; + use clarity::boot_util::boot_code_id; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::database::STXBalance; @@ -1571,6 +1575,7 @@ pub mod test { use stacks_common::util::secp256k1::*; use stacks_common::util::uint::*; use stacks_common::util::vrf::*; + use wsts::curve::point::Point; use {mio, rand}; use super::*; @@ -1601,6 +1606,7 @@ pub mod test { use crate::chainstate::stacks::tests::*; use crate::chainstate::stacks::{StacksMicroblockHeader, *}; use crate::chainstate::*; + use crate::clarity::vm::clarity::TransactionConnection; use crate::core::{StacksEpoch, StacksEpochExtension, NETWORK_P2P_PORT}; use crate::net::asn::*; use crate::net::atlas::*; @@ -1911,6 +1917,8 @@ pub mod test { pub stacker_db_configs: Vec>, /// What services should this peer support? pub services: u16, + /// aggregate public key to use + pub aggregate_public_key: Option, } impl TestPeerConfig { @@ -1974,6 +1982,7 @@ pub mod test { services: (ServiceFlags::RELAY as u16) | (ServiceFlags::RPC as u16) | (ServiceFlags::STACKERDB as u16), + aggregate_public_key: None, } } @@ -2249,9 +2258,48 @@ pub mod test { let atlasdb_path = format!("{}/atlas.sqlite", &test_path); let atlasdb = AtlasDB::connect(AtlasConfig::new(false), &atlasdb_path, true).unwrap(); + let agg_pub_key_opt = config + .aggregate_public_key + .as_ref() + .map(|apk| to_hex(&apk.compress().data)); + let conf = config.clone(); let post_flight_callback = move |clarity_tx: &mut ClarityTx| { let mut receipts = vec![]; + + if let Some(agg_pub_key) = agg_pub_key_opt { + debug!("Setting aggregate public key to {}", &agg_pub_key); + // instantiate aggregate public key + let contract_content = format!( + "(define-read-only ({}) 0x{})", + BOOT_TEST_POX_4_AGG_KEY_FNAME, agg_pub_key + ); + let contract_id = boot_code_id(BOOT_TEST_POX_4_AGG_KEY_CONTRACT, false); + clarity_tx.connection().as_transaction(|clarity| { + let (ast, analysis) = clarity + .analyze_smart_contract( + &contract_id, + ClarityVersion::Clarity2, + &contract_content, + ASTRules::PrecheckSize, + ) + .unwrap(); + clarity + .initialize_smart_contract( + &contract_id, + ClarityVersion::Clarity2, + &ast, + &contract_content, + None, + |_, _| false, + ) + .unwrap(); + clarity.save_analysis(&contract_id, &analysis).unwrap(); + }); + } else { + debug!("Not setting aggregate public key"); + } + // add test-specific boot code if conf.setup_code.len() > 0 { let receipt = clarity_tx.connection().as_transaction(|clarity| { let boot_code_addr = boot_code_test_addr(); diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index d1f787b667..82455c78d4 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -718,8 +718,6 @@ impl Relayer { &block.header.block_hash() ); - // TODO: https://github.com/stacks-network/stacks-core/issues/4109 - // Update this to retrieve the last block in the last reward cycle rather than chain tip let Some(canonical_block_header) = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb)? else { From 0a6aa26aec657972080a086d769a7758f9bb323a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 13 Dec 2023 12:46:01 -0500 Subject: [PATCH 35/41] fix: install initial aggregate public key to neon, not nakamoto --- testnet/stacks-node/src/mockamoto.rs | 2 +- .../stacks-node/src/nakamoto_node/miner.rs | 2 +- testnet/stacks-node/src/run_loop/nakamoto.rs | 41 +------------- testnet/stacks-node/src/run_loop/neon.rs | 53 +++++++++++++++++-- 4 files changed, 53 insertions(+), 45 deletions(-) diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs index 495120a4c0..9777995f9f 100644 --- a/testnet/stacks-node/src/mockamoto.rs +++ b/testnet/stacks-node/src/mockamoto.rs @@ -1036,7 +1036,7 @@ impl MockamotoNode { &self.sortdb, &sortition_handle, &mut self.chainstate, - block_sn.block_height.saturating_sub(1), + block_sn.block_height, &aggregate_key_block_header.index_block_hash(), )?; aggregate_public_key diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 592de7817d..83fc419f59 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -184,7 +184,7 @@ impl BlockMinerThread { &sort_db, &sortition_handle, &mut chain_state, - block_sn.block_height.saturating_sub(1), + block_sn.block_height, &aggregate_key_block_header.index_block_hash(), )?; aggregate_public_key diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index eb8bbdc501..e729a5160f 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -225,49 +225,10 @@ impl RunLoop { .map(|e| (e.address.clone(), e.amount)) .collect(); - let agg_pubkey_boot_callback = if let Some(self_signer) = self.config.self_signing() { - let agg_pub_key = to_hex(&self_signer.aggregate_public_key.compress().data); - info!("Mockamoto node setting agg public key"; "agg_pub_key" => &agg_pub_key); - let callback = Box::new(move |clarity_tx: &mut ClarityTx| { - let contract_content = format!( - "(define-read-only ({}) 0x{})", - BOOT_TEST_POX_4_AGG_KEY_FNAME, agg_pub_key - ); - // NOTE: this defaults to a testnet address to prevent it from ever working on - // mainnet - let contract_id = boot_code_id(BOOT_TEST_POX_4_AGG_KEY_CONTRACT, false); - clarity_tx.connection().as_transaction(|clarity| { - let (ast, analysis) = clarity - .analyze_smart_contract( - &contract_id, - ClarityVersion::Clarity2, - &contract_content, - ASTRules::PrecheckSize, - ) - .unwrap(); - clarity - .initialize_smart_contract( - &contract_id, - ClarityVersion::Clarity2, - &ast, - &contract_content, - None, - |_, _| false, - ) - .unwrap(); - clarity.save_analysis(&contract_id, &analysis).unwrap(); - }) - }) as Box; - Some(callback) - } else { - warn!("Self-signing is not supported yet"); - None - }; - // instantiate chainstate let mut boot_data = ChainStateBootData { initial_balances, - post_flight_callback: agg_pubkey_boot_callback, + post_flight_callback: None, first_burnchain_block_hash: burnchain_config.first_block_hash, first_burnchain_block_height: burnchain_config.first_block_height as u32, first_burnchain_block_timestamp: burnchain_config.first_block_timestamp, diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index cffcd1aa10..8517df4264 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -6,6 +6,10 @@ use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; use std::{cmp, thread}; +use clarity::boot_util::boot_code_id; +use clarity::vm::ast::ASTRules; +use clarity::vm::clarity::TransactionConnection; +use clarity::vm::ClarityVersion; use libc; use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; use stacks::burnchains::Burnchain; @@ -17,7 +21,10 @@ use stacks::chainstate::coordinator::{ static_get_heaviest_affirmation_map, static_get_stacks_tip_affirmation_map, ChainsCoordinator, ChainsCoordinatorConfig, CoordinatorCommunication, Error as coord_error, }; -use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; +use stacks::chainstate::stacks::boot::{ + BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME, +}; +use stacks::chainstate::stacks::db::{ChainStateBootData, ClarityTx, StacksChainState}; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use stacks::core::StacksEpochId; use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; @@ -25,7 +32,7 @@ use stacks::util_lib::db::Error as db_error; use stacks_common::deps_common::ctrlc as termination; use stacks_common::deps_common::ctrlc::SignalId; use stacks_common::types::PublicKey; -use stacks_common::util::hash::Hash160; +use stacks_common::util::hash::{to_hex, Hash160}; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use stx_genesis::GenesisData; @@ -470,10 +477,50 @@ impl RunLoop { .map(|e| (e.address.clone(), e.amount)) .collect(); + // TODO: delete this once aggregate public key voting is working + let agg_pubkey_boot_callback = if let Some(self_signer) = self.config.self_signing() { + let agg_pub_key = to_hex(&self_signer.aggregate_public_key.compress().data); + info!("Mockamoto node setting agg public key"; "agg_pub_key" => &agg_pub_key); + let callback = Box::new(move |clarity_tx: &mut ClarityTx| { + let contract_content = format!( + "(define-read-only ({}) 0x{})", + BOOT_TEST_POX_4_AGG_KEY_FNAME, agg_pub_key + ); + // NOTE: this defaults to a testnet address to prevent it from ever working on + // mainnet + let contract_id = boot_code_id(BOOT_TEST_POX_4_AGG_KEY_CONTRACT, false); + clarity_tx.connection().as_transaction(|clarity| { + let (ast, analysis) = clarity + .analyze_smart_contract( + &contract_id, + ClarityVersion::Clarity2, + &contract_content, + ASTRules::PrecheckSize, + ) + .unwrap(); + clarity + .initialize_smart_contract( + &contract_id, + ClarityVersion::Clarity2, + &ast, + &contract_content, + None, + |_, _| false, + ) + .unwrap(); + clarity.save_analysis(&contract_id, &analysis).unwrap(); + }) + }) as Box; + Some(callback) + } else { + warn!("Self-signing is not supported yet"); + None + }; + // instantiate chainstate let mut boot_data = ChainStateBootData { initial_balances, - post_flight_callback: None, + post_flight_callback: agg_pubkey_boot_callback, first_burnchain_block_hash: burnchain_config.first_block_hash, first_burnchain_block_height: burnchain_config.first_block_height as u32, first_burnchain_block_timestamp: burnchain_config.first_block_timestamp, From e409b0af05c7b5f390d83b32aba3920b8ba63a21 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 13 Dec 2023 13:34:22 -0500 Subject: [PATCH 36/41] fix: search reward cycle 0 as well --- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index fc3a7b8f01..ae74db5fd0 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2588,7 +2588,7 @@ impl NakamotoChainState { ) .expect("FATAL: block height occurs before first block height"); - for parent_reward_cycle in (0..my_reward_cycle).rev() { + for parent_reward_cycle in (0..=my_reward_cycle).rev() { // carry forward the aggregate public key in the past reward cycle to the current // reward cycle. It may be several cycles back, such as in integration tests where // nakamoto boots up several reward cycles after the initial aggregate public key was set. From 7a910e78f1bd67d487030887af0c8f4f88638b46 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 13 Dec 2023 13:48:11 -0500 Subject: [PATCH 37/41] fix: expect to wait up to 10 minutes for a block to be processed (not 30s) --- testnet/stacks-node/src/tests/neon_integrations.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 455e414208..a37ca24ec7 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -483,7 +483,7 @@ pub mod test_observer { } } -const PANIC_TIMEOUT_SECS: u64 = 30; +const PANIC_TIMEOUT_SECS: u64 = 600; /// Returns `false` on a timeout, true otherwise. pub fn next_block_and_wait( From ea7b165363a80a1f008e434c6c2e2f7abac658fc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 13 Dec 2023 15:07:10 -0500 Subject: [PATCH 38/41] chore: address PR feedback --- Cargo.lock | 1 + stackslib/src/chainstate/burn/db/sortdb.rs | 22 ++++++ stackslib/src/chainstate/nakamoto/mod.rs | 78 ++++++++++++++++--- stackslib/src/net/mod.rs | 36 ++------- stackslib/src/net/relay.rs | 23 +----- testnet/stacks-node/src/mockamoto.rs | 45 ++--------- .../stacks-node/src/nakamoto_node/miner.rs | 14 +--- testnet/stacks-node/src/run_loop/nakamoto.rs | 3 - testnet/stacks-node/src/run_loop/neon.rs | 37 +-------- 9 files changed, 109 insertions(+), 150 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 78c3a9e1e6..e3c09a8bbb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3552,6 +3552,7 @@ dependencies = [ "libsigner", "pico-args", "rand 0.8.5", + "rand_core 0.6.4", "regex", "reqwest", "ring", diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 6554dccc1e..ffef8a7782 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -1004,6 +1004,12 @@ pub trait SortitionHandle { block_height: u64, ) -> Result, db_error>; + /// Get the first burn block height + fn first_burn_block_height(&self) -> u64; + + /// Get a ref to the PoX constants + fn pox_constants(&self) -> &PoxConstants; + /// is the given block a descendant of `potential_ancestor`? /// * block_at_burn_height: the burn height of the sortition that chose the stacks block to check /// * potential_ancestor: the stacks block hash of the potential ancestor @@ -1396,6 +1402,14 @@ impl SortitionHandle for SortitionHandleTx<'_> { SortitionDB::get_ancestor_snapshot_tx(self, block_height, &chain_tip) } + fn first_burn_block_height(&self) -> u64 { + self.context.first_block_height + } + + fn pox_constants(&self) -> &PoxConstants { + &self.context.pox_constants + } + fn sqlite(&self) -> &Connection { self.tx() } @@ -1409,6 +1423,14 @@ impl SortitionHandle for SortitionHandleConn<'_> { SortitionHandleConn::get_block_snapshot_by_height(self, block_height) } + fn first_burn_block_height(&self) -> u64 { + self.context.first_block_height + } + + fn pox_constants(&self) -> &PoxConstants { + &self.context.pox_constants + } + fn sqlite(&self) -> &Connection { self.conn() } diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index ae74db5fd0..45d1200ced 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -54,6 +54,7 @@ use super::burn::db::sortdb::{ SortitionHandleConn, SortitionHandleTx, }; use super::burn::operations::{DelegateStxOp, StackStxOp, TransferStxOp}; +use super::stacks::boot::{BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME}; use super::stacks::db::accounts::MinerReward; use super::stacks::db::blocks::StagingUserBurnSupport; use super::stacks::db::{ @@ -1749,23 +1750,19 @@ impl NakamotoChainState { Ok(true) } - /// Get the aggregate public key for the given block. - pub fn get_aggregate_public_key( + /// Get the aggregate public key for the given block from the pox-4 contract + fn load_aggregate_public_key( sortdb: &SortitionDB, - sort_handle: &SortitionHandleConn, + sort_handle: &SH, chainstate: &mut StacksChainState, for_burn_block_height: u64, at_block_id: &StacksBlockId, ) -> Result { // Get the current reward cycle - let Some(reward_cycle) = sort_handle - .context - .pox_constants - .block_height_to_reward_cycle( - sort_handle.context.first_block_height, - for_burn_block_height, - ) - else { + let Some(reward_cycle) = sort_handle.pox_constants().block_height_to_reward_cycle( + sort_handle.first_burn_block_height(), + for_burn_block_height, + ) else { // This should be unreachable, but we'll return an error just in case. let msg = format!( "BUG: Failed to determine reward cycle of burn block height: {}.", @@ -1803,6 +1800,29 @@ impl NakamotoChainState { )) } + /// Get the aggregate public key for a block + pub fn get_aggregate_public_key( + chainstate: &mut StacksChainState, + sortdb: &SortitionDB, + sort_handle: &SH, + block: &NakamotoBlock, + ) -> Result { + let block_sn = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &block.header.consensus_hash)? + .ok_or(ChainstateError::DBError(DBError::NotFoundError))?; + let aggregate_key_block_header = + Self::get_canonical_block_header(chainstate.db(), sortdb)?.unwrap(); + + let aggregate_public_key = Self::load_aggregate_public_key( + sortdb, + sort_handle, + chainstate, + block_sn.block_height, + &aggregate_key_block_header.index_block_hash(), + )?; + Ok(aggregate_public_key) + } + /// Return the total ExecutionCost consumed during the tenure up to and including /// `block` pub fn get_total_tenure_cost_at( @@ -3047,6 +3067,42 @@ impl NakamotoChainState { Ok((epoch_receipt, clarity_commit)) } + + /// Boot code instantiation for the aggregate public key. + /// TODO: This should be removed once it's possible for stackers to vote on the aggregate + /// public key + /// DO NOT USE IN MAINNET + pub fn aggregate_public_key_bootcode(clarity_tx: &mut ClarityTx, apk: &Point) { + let agg_pub_key = to_hex(&apk.compress().data); + let contract_content = format!( + "(define-read-only ({}) 0x{})", + BOOT_TEST_POX_4_AGG_KEY_FNAME, agg_pub_key + ); + // NOTE: this defaults to a testnet address to prevent it from ever working on + // mainnet + let contract_id = boot_code_id(BOOT_TEST_POX_4_AGG_KEY_CONTRACT, false); + clarity_tx.connection().as_transaction(|clarity| { + let (ast, analysis) = clarity + .analyze_smart_contract( + &contract_id, + ClarityVersion::Clarity2, + &contract_content, + ASTRules::PrecheckSize, + ) + .unwrap(); + clarity + .initialize_smart_contract( + &contract_id, + ClarityVersion::Clarity2, + &ast, + &contract_content, + None, + |_, _| false, + ) + .unwrap(); + clarity.save_analysis(&contract_id, &analysis).unwrap(); + }) + } } impl StacksMessageCodec for NakamotoBlock { diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index e1fcc76c34..d237fb1f89 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2258,44 +2258,18 @@ pub mod test { let atlasdb_path = format!("{}/atlas.sqlite", &test_path); let atlasdb = AtlasDB::connect(AtlasConfig::new(false), &atlasdb_path, true).unwrap(); - let agg_pub_key_opt = config - .aggregate_public_key - .as_ref() - .map(|apk| to_hex(&apk.compress().data)); + let agg_pub_key_opt = config.aggregate_public_key.clone(); let conf = config.clone(); let post_flight_callback = move |clarity_tx: &mut ClarityTx| { let mut receipts = vec![]; if let Some(agg_pub_key) = agg_pub_key_opt { - debug!("Setting aggregate public key to {}", &agg_pub_key); - // instantiate aggregate public key - let contract_content = format!( - "(define-read-only ({}) 0x{})", - BOOT_TEST_POX_4_AGG_KEY_FNAME, agg_pub_key + debug!( + "Setting aggregate public key to {}", + &to_hex(&agg_pub_key.compress().data) ); - let contract_id = boot_code_id(BOOT_TEST_POX_4_AGG_KEY_CONTRACT, false); - clarity_tx.connection().as_transaction(|clarity| { - let (ast, analysis) = clarity - .analyze_smart_contract( - &contract_id, - ClarityVersion::Clarity2, - &contract_content, - ASTRules::PrecheckSize, - ) - .unwrap(); - clarity - .initialize_smart_contract( - &contract_id, - ClarityVersion::Clarity2, - &ast, - &contract_content, - None, - |_, _| false, - ) - .unwrap(); - clarity.save_analysis(&contract_id, &analysis).unwrap(); - }); + NakamotoChainState::aggregate_public_key_bootcode(clarity_tx, &agg_pub_key); } else { debug!("Not setting aggregate public key"); } diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 82455c78d4..0d47942abf 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -718,27 +718,10 @@ impl Relayer { &block.header.block_hash() ); - let Some(canonical_block_header) = - NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb)? - else { - warn!( - "Failed to find Nakamoto canonical block header. Will not store or relay"; - "stacks_block_hash" => %block.header.block_hash(), - "consensus_hash" => %block.header.consensus_hash, - "burn_height" => block.header.chain_length, - "sortition_height" => block_sn.block_height, - ); - return Ok(false); - }; - let config = chainstate.config(); - let Ok(aggregate_public_key) = NakamotoChainState::get_aggregate_public_key( - &sortdb, - &sort_handle, - chainstate, - block_sn.block_height, - &canonical_block_header.index_block_hash(), - ) else { + let Ok(aggregate_public_key) = + NakamotoChainState::get_aggregate_public_key(chainstate, &sortdb, sort_handle, &block) + else { warn!("Failed to get aggregate public key. Will not store or relay"; "stacks_block_hash" => %block.header.block_hash(), "consensus_hash" => %block.header.consensus_hash, diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs index 9777995f9f..0654e99fad 100644 --- a/testnet/stacks-node/src/mockamoto.rs +++ b/testnet/stacks-node/src/mockamoto.rs @@ -427,35 +427,10 @@ impl MockamotoNode { // Create a boot contract to initialize the aggregate public key prior to Pox-4 activation let self_signer = SelfSigner::single_signer(); - let agg_pub_key = to_hex(&self_signer.aggregate_public_key.compress().data); - info!("Mockamoto node setting agg public key"; "agg_pub_key" => &agg_pub_key); + let agg_pub_key = self_signer.aggregate_public_key.clone(); + info!("Mockamoto node setting agg public key"; "agg_pub_key" => %to_hex(&self_signer.aggregate_public_key.compress().data)); let callback = move |clarity_tx: &mut ClarityTx| { - let contract_content = format!( - "(define-read-only ({}) 0x{})", - BOOT_TEST_POX_4_AGG_KEY_FNAME, agg_pub_key - ); - let contract_id = boot_code_id(BOOT_TEST_POX_4_AGG_KEY_CONTRACT, false); - clarity_tx.connection().as_transaction(|clarity| { - let (ast, analysis) = clarity - .analyze_smart_contract( - &contract_id, - ClarityVersion::Clarity2, - &contract_content, - ASTRules::PrecheckSize, - ) - .unwrap(); - clarity - .initialize_smart_contract( - &contract_id, - ClarityVersion::Clarity2, - &ast, - &contract_content, - None, - |_, _| false, - ) - .unwrap(); - clarity.save_analysis(&contract_id, &analysis).unwrap(); - }) + NakamotoChainState::aggregate_public_key_bootcode(clarity_tx, &agg_pub_key); }; let mut boot_data = ChainStateBootData::new(&burnchain, initial_balances, Some(Box::new(callback))); @@ -1023,21 +998,11 @@ impl MockamotoNode { let aggregate_public_key = if chain_length <= 1 { self.self_signer.aggregate_public_key } else { - let block_sn = SortitionDB::get_block_snapshot_consensus( - sortition_handle.conn(), - &block.header.consensus_hash, - )? - .ok_or(ChainstateError::DBError(DBError::NotFoundError))?; - let aggregate_key_block_header = - NakamotoChainState::get_canonical_block_header(self.chainstate.db(), &self.sortdb)? - .unwrap(); - let aggregate_public_key = NakamotoChainState::get_aggregate_public_key( + &mut self.chainstate, &self.sortdb, &sortition_handle, - &mut self.chainstate, - block_sn.block_height, - &aggregate_key_block_header.index_block_hash(), + &block, )?; aggregate_public_key }; diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 24a8cfbb62..fd4919b44b 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -174,21 +174,11 @@ impl BlockMinerThread { let aggregate_public_key = if block.header.chain_length <= 1 { signer.aggregate_public_key.clone() } else { - let block_sn = SortitionDB::get_block_snapshot_consensus( - sortition_handle.conn(), - &block.header.consensus_hash, - )? - .ok_or(ChainstateError::DBError(DBError::NotFoundError))?; - let aggregate_key_block_header = - NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db)? - .unwrap(); - let aggregate_public_key = NakamotoChainState::get_aggregate_public_key( + &mut chain_state, &sort_db, &sortition_handle, - &mut chain_state, - block_sn.block_height, - &aggregate_key_block_header.index_block_hash(), + &block, )?; aggregate_public_key }; diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 5e9e36c648..df93e79ea2 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -31,9 +31,6 @@ use stacks::chainstate::coordinator::comm::{CoordinatorChannels, CoordinatorRece use stacks::chainstate::coordinator::{ ChainsCoordinator, ChainsCoordinatorConfig, CoordinatorCommunication, }; -use stacks::chainstate::stacks::boot::{ - BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME, -}; use stacks::chainstate::stacks::db::{ChainStateBootData, ClarityTx, StacksChainState}; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use stacks::core::StacksEpochId; diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 3ac8c35edc..d3053415d9 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -21,9 +21,7 @@ use stacks::chainstate::coordinator::{ static_get_heaviest_affirmation_map, static_get_stacks_tip_affirmation_map, ChainsCoordinator, ChainsCoordinatorConfig, CoordinatorCommunication, Error as coord_error, }; -use stacks::chainstate::stacks::boot::{ - BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME, -}; +use stacks::chainstate::nakamoto::NakamotoChainState; use stacks::chainstate::stacks::db::{ChainStateBootData, ClarityTx, StacksChainState}; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use stacks::core::StacksEpochId; @@ -480,37 +478,10 @@ impl RunLoop { // TODO: delete this once aggregate public key voting is working let agg_pubkey_boot_callback = if let Some(self_signer) = self.config.self_signing() { - let agg_pub_key = to_hex(&self_signer.aggregate_public_key.compress().data); - info!("Mockamoto node setting agg public key"; "agg_pub_key" => &agg_pub_key); + let agg_pub_key = self_signer.aggregate_public_key.clone(); + info!("Neon node setting agg public key"; "agg_pub_key" => %to_hex(&agg_pub_key.compress().data)); let callback = Box::new(move |clarity_tx: &mut ClarityTx| { - let contract_content = format!( - "(define-read-only ({}) 0x{})", - BOOT_TEST_POX_4_AGG_KEY_FNAME, agg_pub_key - ); - // NOTE: this defaults to a testnet address to prevent it from ever working on - // mainnet - let contract_id = boot_code_id(BOOT_TEST_POX_4_AGG_KEY_CONTRACT, false); - clarity_tx.connection().as_transaction(|clarity| { - let (ast, analysis) = clarity - .analyze_smart_contract( - &contract_id, - ClarityVersion::Clarity2, - &contract_content, - ASTRules::PrecheckSize, - ) - .unwrap(); - clarity - .initialize_smart_contract( - &contract_id, - ClarityVersion::Clarity2, - &ast, - &contract_content, - None, - |_, _| false, - ) - .unwrap(); - clarity.save_analysis(&contract_id, &analysis).unwrap(); - }) + NakamotoChainState::aggregate_public_key_bootcode(clarity_tx, &agg_pub_key) }) as Box; Some(callback) } else { From b63ca2155876bbbfd200bd4c27c06d0bf18e044d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 13 Dec 2023 18:12:12 -0500 Subject: [PATCH 39/41] chore: address PR feedback and force epoch 2.5 to begin after the integration test framework has mined some blocks --- stackslib/src/chainstate/nakamoto/mod.rs | 197 ++++++++---------- stackslib/src/clarity_vm/clarity.rs | 3 +- testnet/stacks-node/src/config.rs | 2 +- .../src/tests/nakamoto_integrations.rs | 8 +- 4 files changed, 97 insertions(+), 113 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 45d1200ced..28d71caa1a 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1759,7 +1759,7 @@ impl NakamotoChainState { at_block_id: &StacksBlockId, ) -> Result { // Get the current reward cycle - let Some(reward_cycle) = sort_handle.pox_constants().block_height_to_reward_cycle( + let Some(rc) = sort_handle.pox_constants().block_height_to_reward_cycle( sort_handle.first_burn_block_height(), for_burn_block_height, ) else { @@ -1772,35 +1772,32 @@ impl NakamotoChainState { return Err(ChainstateError::InvalidStacksBlock(msg)); }; - // need to search back because the set-aggregate-public-key call only happens in nakamoto - // TODO: this will be removed once there's aggregate public key voting - for rc in (0..=reward_cycle).rev() { - debug!("get-aggregate-public-key {} {}", at_block_id, rc); - match chainstate.get_aggregate_public_key_pox_4(sortdb, at_block_id, rc)? { - Some(agg_key) => { - return Ok(agg_key); - } - None => { - debug!( - "No aggregate public key set; trying in a lower cycle"; - "block_id" => %at_block_id, - "reward_cycle" => rc, - ); - continue; - } + debug!("get-aggregate-public-key {} {}", at_block_id, rc); + match chainstate.get_aggregate_public_key_pox_4(sortdb, at_block_id, rc)? { + Some(key) => Ok(key), + None => { + // if this is the first block in its reward cycle, it'll contain the effects of + // setting the aggregate public key for `rc`, but there will currently be no key + // for `rc`. So, check `rc - 1` + chainstate + .get_aggregate_public_key_pox_4(sortdb, at_block_id, rc.saturating_sub(1))? + .ok_or_else(|| { + warn!( + "Failed to get aggregate public key"; + "block_id" => %at_block_id, + "reward_cycle" => rc, + ); + ChainstateError::InvalidStacksBlock( + "Failed to get aggregate public key".into(), + ) + }) } } - warn!( - "Failed to get aggregate public key"; - "block_id" => %at_block_id, - "reward_cycle" => reward_cycle, - ); - Err(ChainstateError::InvalidStacksBlock( - "Failed to get aggregate public key".into(), - )) } - /// Get the aggregate public key for a block + /// Get the aggregate public key for a block. + /// TODO: The block at which the aggregate public key is queried needs to be better defined. + /// See https://github.com/stacks-network/stacks-core/issues/4109 pub fn get_aggregate_public_key( chainstate: &mut StacksChainState, sortdb: &SortitionDB, @@ -2608,89 +2605,75 @@ impl NakamotoChainState { ) .expect("FATAL: block height occurs before first block height"); - for parent_reward_cycle in (0..=my_reward_cycle).rev() { - // carry forward the aggregate public key in the past reward cycle to the current - // reward cycle. It may be several cycles back, such as in integration tests where - // nakamoto boots up several reward cycles after the initial aggregate public key was set. - // TODO: replace with signer voting - debug!( - "Try setting aggregate public key in reward cycle {}, parent {}", - my_reward_cycle, parent_reward_cycle - ); - // execute `set-aggregate-public-key` using `clarity-tx` - let Some(aggregate_public_key) = clarity_tx - .connection() - .with_readonly_clarity_env( - mainnet, - chain_id, - ClarityVersion::Clarity2, - StacksAddress::burn_address(mainnet).into(), - None, - LimitedCostTracker::Free, - |vm_env| { - vm_env.execute_contract_allow_private( - &boot_code_id(POX_4_NAME, mainnet), - "get-aggregate-public-key", - &vec![SymbolicExpression::atom_value(Value::UInt(u128::from( - parent_reward_cycle, - )))], - true, - ) - }, - ) - .ok() - .map(|agg_key_value| { - let agg_key_opt = agg_key_value.expect_optional().map(|agg_key_buff| { - Value::buff_from(agg_key_buff.expect_buff(33)) - .expect("failed to reconstruct buffer") - }); - agg_key_opt - }) - .flatten() - else { - debug!( - "No aggregate public key in parent cycle {}", - parent_reward_cycle - ); - continue; - }; - - clarity_tx.connection().as_transaction(|tx| { - tx.with_abort_callback( - |vm_env| { - vm_env.execute_in_env( - StacksAddress::burn_address(mainnet).into(), - None, - None, - |vm_env| { - vm_env.execute_contract_allow_private( - &boot_code_id(POX_4_NAME, mainnet), - "set-aggregate-public-key", - &vec![ - SymbolicExpression::atom_value(Value::UInt(u128::from( - my_reward_cycle, - ))), - SymbolicExpression::atom_value(aggregate_public_key), - ], - false, - ) - }, - ) - }, - |_, _| false, - ) - .expect("FATAL: failed to set aggregate public key") - }); + let parent_reward_cycle = my_reward_cycle.saturating_sub(1); + debug!( + "Try setting aggregate public key in reward cycle {}, parent {}", + my_reward_cycle, parent_reward_cycle + ); - // success! - return; - } + // execute `set-aggregate-public-key` using `clarity-tx` + let Some(aggregate_public_key) = clarity_tx + .connection() + .with_readonly_clarity_env( + mainnet, + chain_id, + ClarityVersion::Clarity2, + StacksAddress::burn_address(mainnet).into(), + None, + LimitedCostTracker::Free, + |vm_env| { + vm_env.execute_contract_allow_private( + &boot_code_id(POX_4_NAME, mainnet), + "get-aggregate-public-key", + &vec![SymbolicExpression::atom_value(Value::UInt(u128::from( + parent_reward_cycle, + )))], + true, + ) + }, + ) + .ok() + .map(|agg_key_value| { + let agg_key_opt = agg_key_value.expect_optional().map(|agg_key_buff| { + Value::buff_from(agg_key_buff.expect_buff(33)) + .expect("failed to reconstruct buffer") + }); + agg_key_opt + }) + .flatten() + else { + panic!( + "No aggregate public key in parent cycle {}", + parent_reward_cycle + ); + }; - // if we get here, then we didn't ever set the initial aggregate public key - panic!( - "FATAL: no aggregate public key in pox-4 in any reward cycle between 0 and {}", - my_reward_cycle - ); + clarity_tx.connection().as_transaction(|tx| { + tx.with_abort_callback( + |vm_env| { + vm_env.execute_in_env( + StacksAddress::burn_address(mainnet).into(), + None, + None, + |vm_env| { + vm_env.execute_contract_allow_private( + &boot_code_id(POX_4_NAME, mainnet), + "set-aggregate-public-key", + &vec![ + SymbolicExpression::atom_value(Value::UInt(u128::from( + my_reward_cycle, + ))), + SymbolicExpression::atom_value(aggregate_public_key), + ], + false, + ) + }, + ) + }, + |_, _| false, + ) + .expect("FATAL: failed to set aggregate public key") + }); } /// Append a Nakamoto Stacks block to the Stacks chain state. diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index 59b5463d79..ae2dfbddc6 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -1405,11 +1405,12 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { // set the aggregate public key for all pre-pox-4 cycles, if in testnet, and can fetch a boot-setting if !mainnet { if let Some(ref agg_pub_key) = initialized_agg_key { - for set_in_reward_cycle in 0..pox_4_first_cycle { + for set_in_reward_cycle in 0..=pox_4_first_cycle { info!( "Setting initial aggregate-public-key in PoX-4"; "agg_pub_key" => %agg_pub_key, "reward_cycle" => set_in_reward_cycle, + "pox_4_first_cycle" => pox_4_first_cycle, ); tx_conn .with_abort_callback( diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 1d80c92bf7..72b208eb6f 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -599,7 +599,7 @@ impl Config { .iter() .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch25) { - // Override pox_3_activation_height to the start_height of epoch2.5 + // Override pox_4_activation_height to the start_height of epoch2.5 debug!( "Override pox_4_activation_height from {} to {}", burnchain.pox_constants.pox_4_activation_height, epoch.start_height diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 0b1d79ffa3..6666b1eac9 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -98,20 +98,20 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch24, start_height: 5, - end_height: 6, + end_height: 201, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_2_4 }, StacksEpoch { epoch_id: StacksEpochId::Epoch25, - start_height: 6, - end_height: 221, + start_height: 201, + end_height: 231, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_2_5 }, StacksEpoch { epoch_id: StacksEpochId::Epoch30, - start_height: 221, + start_height: 231, end_height: STACKS_EPOCH_MAX, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 From ece58d37a24bbc1dfe4b5908d5e1418ef87ee315 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 20 Dec 2023 01:59:45 +0200 Subject: [PATCH 40/41] feat: ready source branch --- clarity/Cargo.toml | 2 +- clarity/src/{libclarity.rs => lib.rs} | 0 libsigner/Cargo.toml | 2 +- libsigner/src/{libsigner.rs => lib.rs} | 0 stacks-common/Cargo.toml | 2 +- stacks-common/src/{libcommon.rs => lib.rs} | 0 6 files changed, 3 insertions(+), 3 deletions(-) rename clarity/src/{libclarity.rs => lib.rs} (100%) rename libsigner/src/{libsigner.rs => lib.rs} (100%) rename stacks-common/src/{libcommon.rs => lib.rs} (100%) diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index 86089991dc..e83c77f823 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -15,7 +15,7 @@ resolver = "2" [lib] name = "clarity" -path = "./src/libclarity.rs" +path = "./src/lib.rs" [dependencies] rand = "0.7.3" diff --git a/clarity/src/libclarity.rs b/clarity/src/lib.rs similarity index 100% rename from clarity/src/libclarity.rs rename to clarity/src/lib.rs diff --git a/libsigner/Cargo.toml b/libsigner/Cargo.toml index 8500ef55fa..35aaca69f7 100644 --- a/libsigner/Cargo.toml +++ b/libsigner/Cargo.toml @@ -13,7 +13,7 @@ edition = "2021" [lib] name = "libsigner" -path = "./src/libsigner.rs" +path = "./src/lib.rs" [dependencies] clarity = { path = "../clarity" } diff --git a/libsigner/src/libsigner.rs b/libsigner/src/lib.rs similarity index 100% rename from libsigner/src/libsigner.rs rename to libsigner/src/lib.rs diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index 1916572cf4..8ba0b64197 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -15,7 +15,7 @@ edition = "2021" [lib] name = "stacks_common" -path = "./src/libcommon.rs" +path = "./src/lib.rs" [dependencies] rand = "0.7.3" diff --git a/stacks-common/src/libcommon.rs b/stacks-common/src/lib.rs similarity index 100% rename from stacks-common/src/libcommon.rs rename to stacks-common/src/lib.rs From 0b27aedbdf6161ebd0127953873d9d62b56f5266 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 22 Dec 2023 18:44:50 +0200 Subject: [PATCH 41/41] feat: run again with `cargo test --test-threads` arg --- stacks-common/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-common/src/lib.rs b/stacks-common/src/lib.rs index 1448a2f90c..c55f56611c 100644 --- a/stacks-common/src/lib.rs +++ b/stacks-common/src/lib.rs @@ -6,7 +6,7 @@ #![allow(non_upper_case_globals)] #![cfg_attr(test, allow(unused_variables, unused_assignments))] #![allow(clippy::assertions_on_constants)] - +// test to trigger mutants re-run #[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] extern crate slog;