diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 4acac1c8a0..069857ed44 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -69,6 +69,7 @@ jobs: - tests::neon_integrations::test_problematic_txs_are_not_stored - tests::neon_integrations::use_latest_tip_integration_test - tests::should_succeed_handling_malformed_and_valid_txs + - tests::nakamoto_integrations::simple_neon_integration steps: ## Setup test environment - name: Setup Test Environment diff --git a/Cargo.lock b/Cargo.lock index a90cb48536..78c3a9e1e6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2354,6 +2354,8 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "p256k1" version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5afcf536d20c074ef45371ee9a654dcfc46fb2dde18ecc54ec30c936eb850fa2" dependencies = [ "bindgen", "bitvec", @@ -3549,8 +3551,7 @@ dependencies = [ "libc", "libsigner", "pico-args", - "rand 0.7.3", - "rand_core 0.6.4", + "rand 0.8.5", "regex", "reqwest", "ring", @@ -4711,6 +4712,8 @@ dependencies = [ [[package]] name = "wsts" version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c250118354755b4abb091a83cb8d659b511c0ae211ccdb3b1254e3db199cb86" dependencies = [ "aes-gcm 0.10.2", "bs58 0.5.0", diff --git a/Cargo.toml b/Cargo.toml index a861f143e9..3d2d9d066d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,6 +15,8 @@ members = [ # Dependencies we want to keep the same between workspace members [workspace.dependencies] wsts = "5.0" +rand_core = "0.6" +rand = "0.8" # Use a bit more than default optimization for # dev builds to speed up test execution diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index 86089991dc..e83c77f823 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -15,7 +15,7 @@ resolver = "2" [lib] name = "clarity" -path = "./src/libclarity.rs" +path = "./src/lib.rs" [dependencies] rand = "0.7.3" diff --git a/clarity/src/libclarity.rs b/clarity/src/lib.rs similarity index 100% rename from clarity/src/libclarity.rs rename to clarity/src/lib.rs diff --git a/libsigner/Cargo.toml b/libsigner/Cargo.toml index 8500ef55fa..35aaca69f7 100644 --- a/libsigner/Cargo.toml +++ b/libsigner/Cargo.toml @@ -13,7 +13,7 @@ edition = "2021" [lib] name = "libsigner" -path = "./src/libsigner.rs" +path = "./src/lib.rs" [dependencies] clarity = { path = "../clarity" } diff --git a/libsigner/src/libsigner.rs b/libsigner/src/lib.rs similarity index 100% rename from libsigner/src/libsigner.rs rename to libsigner/src/lib.rs diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index 1916572cf4..8ba0b64197 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -15,7 +15,7 @@ edition = "2021" [lib] name = "stacks_common" -path = "./src/libcommon.rs" +path = "./src/lib.rs" [dependencies] rand = "0.7.3" diff --git a/stacks-common/src/libcommon.rs b/stacks-common/src/lib.rs similarity index 100% rename from stacks-common/src/libcommon.rs rename to stacks-common/src/lib.rs diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index c273a38de4..6f6b82ceec 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -46,7 +46,8 @@ use crate::burnchains::{ Burnchain, BurnchainBlockHeader, Error as burnchain_error, MagicBytes, BLOCKSTACK_MAGIC_MAINNET, }; use crate::core::{ - StacksEpoch, STACKS_EPOCHS_MAINNET, STACKS_EPOCHS_REGTEST, STACKS_EPOCHS_TESTNET, + StacksEpoch, StacksEpochExtension, STACKS_EPOCHS_MAINNET, STACKS_EPOCHS_REGTEST, + STACKS_EPOCHS_TESTNET, }; use crate::util_lib::db::Error as DBError; @@ -91,7 +92,7 @@ impl TryFrom for BitcoinNetworkType { /// Get the default epochs definitions for the given BitcoinNetworkType. /// Should *not* be used except by the BitcoinIndexer when no epochs vector /// was specified. -fn get_bitcoin_stacks_epochs(network_id: BitcoinNetworkType) -> Vec { +pub fn get_bitcoin_stacks_epochs(network_id: BitcoinNetworkType) -> Vec { match network_id { BitcoinNetworkType::Mainnet => STACKS_EPOCHS_MAINNET.to_vec(), BitcoinNetworkType::Testnet => STACKS_EPOCHS_TESTNET.to_vec(), @@ -1030,13 +1031,7 @@ impl BurnchainIndexer for BitcoinIndexer { /// /// It is an error (panic) to set custom epochs if running on `Mainnet`. fn get_stacks_epochs(&self) -> Vec { - match self.config.epochs { - Some(ref epochs) => { - assert!(self.runtime.network_id != BitcoinNetworkType::Mainnet); - epochs.clone() - } - None => get_bitcoin_stacks_epochs(self.runtime.network_id), - } + StacksEpoch::get_epochs(self.runtime.network_id, self.config.epochs.as_ref()) } /// Read downloaded headers within a range diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 462662d4d9..6dde267bc2 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -169,7 +169,7 @@ pub fn get_nakamoto_reward_cycle_info( .epoch_id; assert!( - epoch_at_height >= StacksEpochId::Epoch30, + epoch_at_height >= StacksEpochId::Epoch25, "FATAL: called a nakamoto function outside of epoch 3" ); @@ -216,22 +216,40 @@ pub fn get_nakamoto_reward_cycle_info( } // find the first Stacks block processed in the prepare phase - let Some(prepare_start_block_header) = + let parent_block_id = if let Some(nakamoto_start_block) = NakamotoChainState::get_nakamoto_tenure_start_block_header( chain_state.db(), &sn.consensus_hash, + )? { + nakamoto_start_block + .anchored_header + .as_stacks_nakamoto() + // TODO: maybe `get_nakamoto_tenure_start_block_header` should + // return a type that doesn't require this unwrapping? + .expect("FATAL: queried non-Nakamoto tenure start header") + .parent_block_id + } else { + let Some(block_header) = + StacksChainState::get_stacks_block_header_info_by_consensus_hash( + chain_state.db(), + &sn.consensus_hash, + )? + else { + // no header for this snapshot (possibly invalid) + debug!("Failed to find block by consensus hash"; "consensus_hash" => %sn.consensus_hash); + continue; + }; + let Some(parent_block_id) = StacksChainState::get_parent_block_id( + chain_state.db(), + &block_header.index_block_hash(), )? - else { - // no header for this snapshot (possibly invalid) - continue; + else { + debug!("Failed to get parent block"; "block_id" => %block_header.index_block_hash()); + continue; + }; + parent_block_id }; - let parent_block_id = &prepare_start_block_header - .anchored_header - .as_stacks_nakamoto() - .expect("FATAL: queried non-Nakamoto tenure start header") - .parent_block_id; - // find the tenure-start block of the tenure of the parent of this Stacks block. // in epoch 2, this is the preceding anchor block // in nakamoto, this is the tenure-start block of the preceding tenure diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 82b6d34b93..5b511f6aa2 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -498,7 +498,7 @@ impl NakamotoBlockBuilder { state_root_hash ); - info!( + debug!( "Miner: mined Nakamoto block"; "consensus_hash" => %block.header.consensus_hash, "block_hash" => %block.header.block_hash(), @@ -570,13 +570,19 @@ impl NakamotoBlockBuilder { .block_limit() .expect("Failed to obtain block limit from miner's block connection"); + let initial_txs: Vec<_> = [ + tenure_info.tenure_change_tx.clone(), + tenure_info.coinbase_tx.clone(), + ] + .into_iter() + .filter_map(|x| x) + .collect(); let (blocked, tx_events) = match StacksBlockBuilder::select_and_apply_transactions( &mut tenure_tx, &mut builder, mempool, parent_stacks_header.stacks_block_height, - tenure_info.tenure_change_tx(), - tenure_info.coinbase_tx(), + &initial_txs, settings, event_observer, ASTRules::PrecheckSize, diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index a9cfacf929..3eb1ea36cc 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -2139,8 +2139,7 @@ impl StacksBlockBuilder { builder: &mut B, mempool: &mut MemPoolDB, tip_height: u64, - tenure_change_tx: Option<&StacksTransaction>, - coinbase_tx: Option<&StacksTransaction>, + initial_txs: &[StacksTransaction], settings: BlockBuilderSettings, event_observer: Option<&dyn MemPoolEventDispatcher>, ast_rules: ASTRules, @@ -2155,17 +2154,10 @@ impl StacksBlockBuilder { let mut tx_events = Vec::new(); - if let Some(tenure_tx) = tenure_change_tx { + for initial_tx in initial_txs.iter() { tx_events.push( builder - .try_mine_tx(epoch_tx, tenure_tx, ast_rules.clone())? - .convert_to_event(), - ); - } - if let Some(coinbase_tx) = coinbase_tx { - tx_events.push( - builder - .try_mine_tx(epoch_tx, coinbase_tx, ast_rules.clone())? + .try_mine_tx(epoch_tx, initial_tx, ast_rules.clone())? .convert_to_event(), ); } @@ -2442,8 +2434,7 @@ impl StacksBlockBuilder { &mut builder, mempool, parent_stacks_header.stacks_block_height, - None, - Some(coinbase_tx), + &[coinbase_tx.clone()], settings, event_observer, ast_rules, diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index b03fe0c8e0..38f383194e 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -25,6 +25,8 @@ pub use stacks_common::types::StacksEpochId; use stacks_common::util::log; pub use self::mempool::MemPoolDB; +use crate::burnchains::bitcoin::indexer::get_bitcoin_stacks_epochs; +use crate::burnchains::bitcoin::BitcoinNetworkType; use crate::burnchains::{Burnchain, Error as burnchain_error}; use crate::chainstate::burn::ConsensusHash; pub mod mempool; @@ -604,9 +606,34 @@ pub trait StacksEpochExtension { epoch_2_1_block_height: u64, ) -> Vec; fn validate_epochs(epochs: &[StacksEpoch]) -> Vec; + /// This method gets the epoch vector. + /// + /// Choose according to: + /// 1) Use the custom epochs defined on the underlying `BitcoinIndexerConfig`, if they exist. + /// 2) Use hard-coded static values, otherwise. + /// + /// It is an error (panic) to set custom epochs if running on `Mainnet`. + /// + fn get_epochs( + bitcoin_network: BitcoinNetworkType, + configured_epochs: Option<&Vec>, + ) -> Vec; } impl StacksEpochExtension for StacksEpoch { + fn get_epochs( + bitcoin_network: BitcoinNetworkType, + configured_epochs: Option<&Vec>, + ) -> Vec { + match configured_epochs { + Some(epochs) => { + assert!(bitcoin_network != BitcoinNetworkType::Mainnet); + epochs.clone() + } + None => get_bitcoin_stacks_epochs(bitcoin_network), + } + } + #[cfg(test)] fn unit_test_pre_2_05(first_burnchain_height: u64) -> Vec { info!( diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 780b65116e..9e0c8a74e7 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -9,7 +9,6 @@ rust-version = "1.61" [dependencies] lazy_static = "1.4.0" pico-args = "0.3.1" -rand = "0.7.3" serde = "1" serde_derive = "1" serde_json = { version = "1.0", features = ["arbitrary_precision", "raw_value"] } @@ -29,7 +28,7 @@ chrono = "0.4.19" regex = "1" libsigner = { path = "../../libsigner" } wsts = { workspace = true } -rand_core = "0.6" +rand = { workspace = true } [dev-dependencies] ring = "0.16.19" diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index d70fca1c02..7d1a2aec08 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -8,7 +8,6 @@ use async_h1::client; use async_std::io::ReadExt; use async_std::net::TcpStream; use base64::encode; -use clarity::vm::types::PrincipalData; use http_types::{Method, Request, Url}; use serde::Serialize; use serde_json::json; @@ -50,7 +49,7 @@ use stacks_common::deps_common::bitcoin::network::encodable::ConsensusEncodable; use stacks_common::deps_common::bitcoin::network::serialize::deserialize as btc_deserialize; use stacks_common::deps_common::bitcoin::network::serialize::RawEncoder; use stacks_common::deps_common::bitcoin::util::hash::Sha256dHash; -use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksAddress}; +use stacks_common::types::chainstate::BurnchainHeaderHash; use stacks_common::util::hash::{hex_bytes, Hash160}; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index f634f526c8..1d80c92bf7 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -17,17 +17,18 @@ use stacks::chainstate::stacks::miner::{BlockBuilderSettings, MinerStatus}; use stacks::chainstate::stacks::MAX_BLOCK_LEN; use stacks::core::mempool::MemPoolWalkSettings; use stacks::core::{ - StacksEpoch, StacksEpochExtension, StacksEpochId, CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, - PEER_VERSION_MAINNET, PEER_VERSION_TESTNET, + MemPoolDB, StacksEpoch, StacksEpochExtension, StacksEpochId, CHAIN_ID_MAINNET, + CHAIN_ID_TESTNET, PEER_VERSION_MAINNET, PEER_VERSION_TESTNET, }; use stacks::cost_estimates::fee_medians::WeightedMedianFeeRateEstimator; use stacks::cost_estimates::fee_rate_fuzzer::FeeRateFuzzer; use stacks::cost_estimates::fee_scalar::ScalarFeeRateEstimator; -use stacks::cost_estimates::metrics::{CostMetric, ProportionalDotProduct}; -use stacks::cost_estimates::{CostEstimator, FeeEstimator, PessimisticEstimator}; +use stacks::cost_estimates::metrics::{CostMetric, ProportionalDotProduct, UnitMetric}; +use stacks::cost_estimates::{CostEstimator, FeeEstimator, PessimisticEstimator, UnitEstimator}; use stacks::net::atlas::AtlasConfig; use stacks::net::connection::ConnectionOptions; use stacks::net::{Neighbor, NeighborKey}; +use stacks::util_lib::db::Error as DBError; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::types::net::PeerAddress; @@ -35,6 +36,8 @@ use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::hex_bytes; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use crate::mockamoto::signer::SelfSigner; + pub const DEFAULT_SATS_PER_VB: u64 = 50; const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; @@ -313,7 +316,7 @@ impl ConfigFile { password: Some("blockstacksystem".into()), magic_bytes: Some("M3".into()), epochs: Some(epochs), - pox_prepare_length: Some(2), + pox_prepare_length: Some(3), pox_reward_length: Some(36), ..BurnchainConfigFile::default() }; @@ -491,6 +494,13 @@ lazy_static! { } impl Config { + pub fn self_signing(&self) -> Option { + if !(self.burnchain.mode == "nakamoto-neon" || self.burnchain.mode == "mockamoto") { + return None; + } + self.miner.self_signing_key.clone() + } + /// get the up-to-date burnchain from the config pub fn get_burnchain_config(&self) -> Result { if let Some(path) = &self.config_path { @@ -501,6 +511,26 @@ impl Config { Ok(self.burnchain.clone()) } } + + /// Connect to the MempoolDB using the configured cost estimation + pub fn connect_mempool_db(&self) -> Result { + // create estimators, metric instances for RPC handler + let cost_estimator = self + .make_cost_estimator() + .unwrap_or_else(|| Box::new(UnitEstimator)); + let metric = self + .make_cost_metric() + .unwrap_or_else(|| Box::new(UnitMetric)); + + MemPoolDB::open( + self.is_mainnet(), + self.burnchain.chain_id, + &self.get_chainstate_path_str(), + cost_estimator, + metric, + ) + } + /// Apply any test settings to this burnchain config struct fn apply_test_settings(&self, burnchain: &mut Burnchain) { if self.burnchain.get_bitcoin_network().1 == BitcoinNetworkType::Mainnet { @@ -594,6 +624,37 @@ impl Config { ); burnchain.pox_constants.sunset_end = sunset_end.into(); } + + // check if the Epoch 3.0 burnchain settings as configured are going to be valid. + if self.burnchain.mode == "nakamoto-neon" || self.burnchain.mode == "mockamoto" { + self.check_nakamoto_config(&burnchain); + } + } + + fn check_nakamoto_config(&self, burnchain: &Burnchain) { + let epochs = StacksEpoch::get_epochs( + self.burnchain.get_bitcoin_network().1, + self.burnchain.epochs.as_ref(), + ); + let Some(epoch_30) = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30) + .map(|epoch_ix| epochs[epoch_ix].clone()) + else { + // no Epoch 3.0, so just return + return; + }; + if burnchain.pox_constants.prepare_length < 3 { + panic!( + "FATAL: Nakamoto rules require a prepare length >= 3. Prepare length set to {}", + burnchain.pox_constants.prepare_length + ); + } + if burnchain.is_in_prepare_phase(epoch_30.start_height) { + panic!( + "FATAL: Epoch 3.0 must start *during* a reward phase, not a prepare phase. Epoch 3.0 start set to: {}. PoX Parameters: {:?}", + epoch_30.start_height, + &burnchain.pox_constants + ); + } } /// Load up a Burnchain and apply config settings to it. @@ -1095,6 +1156,11 @@ impl Config { .as_ref() .map(|x| Secp256k1PrivateKey::from_hex(x)) .transpose()?, + self_signing_key: miner + .self_signing_seed + .as_ref() + .map(|x| SelfSigner::from_seed(*x)) + .or(miner_default_config.self_signing_key), }, None => miner_default_config, }; @@ -1108,6 +1174,7 @@ impl Config { "xenon", "mainnet", "mockamoto", + "nakamoto-neon", ]; if !supported_modes.contains(&burnchain.mode.as_str()) { @@ -1629,10 +1696,10 @@ impl BurnchainConfig { match self.mode.as_str() { "mainnet" => ("mainnet".to_string(), BitcoinNetworkType::Mainnet), "xenon" => ("testnet".to_string(), BitcoinNetworkType::Testnet), - "helium" | "neon" | "argon" | "krypton" | "mocknet" | "mockamoto" => { + "helium" | "neon" | "argon" | "krypton" | "mocknet" | "mockamoto" | "nakamoto-neon" => { ("regtest".to_string(), BitcoinNetworkType::Regtest) } - _ => panic!("Invalid bitcoin mode -- expected mainnet, testnet, or regtest"), + other => panic!("Invalid stacks-node mode: {other}"), } } } @@ -2116,6 +2183,7 @@ pub struct MinerConfig { pub candidate_retry_cache_size: u64, pub unprocessed_block_deadline_secs: u64, pub mining_key: Option, + pub self_signing_key: Option, } impl MinerConfig { @@ -2133,6 +2201,7 @@ impl MinerConfig { candidate_retry_cache_size: 10_000, unprocessed_block_deadline_secs: 30, mining_key: None, + self_signing_key: None, } } } @@ -2241,6 +2310,7 @@ pub struct MinerConfigFile { pub candidate_retry_cache_size: Option, pub unprocessed_block_deadline_secs: Option, pub mining_key: Option, + pub self_signing_seed: Option, } #[derive(Clone, Deserialize, Default, Debug)] diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs new file mode 100644 index 0000000000..bd1560477c --- /dev/null +++ b/testnet/stacks-node/src/globals.rs @@ -0,0 +1,289 @@ +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::mpsc::SyncSender; +use std::sync::{Arc, Mutex}; + +use stacks::burnchains::Txid; +use stacks::chainstate::burn::operations::LeaderKeyRegisterOp; +use stacks::chainstate::burn::BlockSnapshot; +use stacks::chainstate::coordinator::comm::CoordinatorChannels; +use stacks::chainstate::stacks::db::unconfirmed::UnconfirmedTxMap; +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::chainstate::stacks::miner::MinerStatus; +use stacks::net::NetworkResult; +use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, ConsensusHash}; + +use crate::neon::Counters; +use crate::neon_node::LeaderKeyRegistrationState; +use crate::run_loop::RegisteredKey; +use crate::syncctl::PoxSyncWatchdogComms; + +pub type NeonGlobals = Globals; + +/// Command types for the relayer thread, issued to it by other threads +pub enum RelayerDirective { + /// Handle some new data that arrived on the network (such as blocks, transactions, and + HandleNetResult(NetworkResult), + /// Announce a new sortition. Process and broadcast the block if we won. + ProcessTenure(ConsensusHash, BurnchainHeaderHash, BlockHeaderHash), + /// Try to mine a block + RunTenure(RegisteredKey, BlockSnapshot, u128), // (vrf key, chain tip, time of issuance in ms) + /// A nakamoto tenure's first block has been processed. + NakamotoTenureStartProcessed(ConsensusHash, BlockHeaderHash), + /// Try to register a VRF public key + RegisterKey(BlockSnapshot), + /// Stop the relayer thread + Exit, +} + +/// Inter-thread communication structure, shared between threads. This +/// is generic over the relayer communication channel: nakamoto and +/// neon nodes use different relayer directives. +pub struct Globals { + /// Last sortition processed + last_sortition: Arc>>, + /// Status of the miner + miner_status: Arc>, + /// Communication link to the coordinator thread + pub(crate) coord_comms: CoordinatorChannels, + /// Unconfirmed transactions (shared between the relayer and p2p threads) + unconfirmed_txs: Arc>, + /// Writer endpoint to the relayer thread + pub relay_send: SyncSender, + /// Cointer state in the main thread + pub counters: Counters, + /// Connection to the PoX sync watchdog + pub sync_comms: PoxSyncWatchdogComms, + /// Global flag to see if we should keep running + pub should_keep_running: Arc, + /// Status of our VRF key registration state (shared between the main thread and the relayer) + leader_key_registration_state: Arc>, +} + +// Need to manually implement Clone, because [derive(Clone)] requires +// all trait bounds to implement Clone, even though T doesn't need Clone +// because it's behind SyncSender. +impl Clone for Globals { + fn clone(&self) -> Self { + Self { + last_sortition: self.last_sortition.clone(), + miner_status: self.miner_status.clone(), + coord_comms: self.coord_comms.clone(), + unconfirmed_txs: self.unconfirmed_txs.clone(), + relay_send: self.relay_send.clone(), + counters: self.counters.clone(), + sync_comms: self.sync_comms.clone(), + should_keep_running: self.should_keep_running.clone(), + leader_key_registration_state: self.leader_key_registration_state.clone(), + } + } +} + +impl Globals { + pub fn new( + coord_comms: CoordinatorChannels, + miner_status: Arc>, + relay_send: SyncSender, + counters: Counters, + sync_comms: PoxSyncWatchdogComms, + should_keep_running: Arc, + ) -> Globals { + Globals { + last_sortition: Arc::new(Mutex::new(None)), + miner_status, + coord_comms, + unconfirmed_txs: Arc::new(Mutex::new(UnconfirmedTxMap::new())), + relay_send, + counters, + sync_comms, + should_keep_running, + leader_key_registration_state: Arc::new(Mutex::new( + LeaderKeyRegistrationState::Inactive, + )), + } + } + + /// Does the inventory sync watcher think we still need to + /// catch up to the chain tip? + pub fn in_initial_block_download(&self) -> bool { + self.sync_comms.get_ibd() + } + + /// Get the last sortition processed by the relayer thread + pub fn get_last_sortition(&self) -> Option { + self.last_sortition + .lock() + .unwrap_or_else(|_| { + error!("Sortition mutex poisoned!"); + panic!(); + }) + .clone() + } + + /// Set the last sortition processed + pub fn set_last_sortition(&self, block_snapshot: BlockSnapshot) { + let mut last_sortition = self.last_sortition.lock().unwrap_or_else(|_| { + error!("Sortition mutex poisoned!"); + panic!(); + }); + last_sortition.replace(block_snapshot); + } + + /// Get the status of the miner (blocked or ready) + pub fn get_miner_status(&self) -> Arc> { + self.miner_status.clone() + } + + pub fn block_miner(&self) { + self.miner_status + .lock() + .expect("FATAL: mutex poisoned") + .add_blocked() + } + + pub fn unblock_miner(&self) { + self.miner_status + .lock() + .expect("FATAL: mutex poisoned") + .remove_blocked() + } + + /// Get the main thread's counters + pub fn get_counters(&self) -> Counters { + self.counters.clone() + } + + /// Called by the relayer to pass unconfirmed txs to the p2p thread, so the p2p thread doesn't + /// need to do the disk I/O needed to instantiate the unconfirmed state trie they represent. + /// Clears the unconfirmed transactions, and replaces them with the chainstate's. + pub fn send_unconfirmed_txs(&self, chainstate: &StacksChainState) { + let Some(ref unconfirmed) = chainstate.unconfirmed_state else { + return; + }; + let mut txs = self.unconfirmed_txs.lock().unwrap_or_else(|e| { + // can only happen due to a thread panic in the relayer + error!("FATAL: unconfirmed tx arc mutex is poisoned: {e:?}"); + panic!(); + }); + txs.clear(); + txs.extend(unconfirmed.mined_txs.clone()); + } + + /// Called by the p2p thread to accept the unconfirmed tx state processed by the relayer. + /// Puts the shared unconfirmed transactions to chainstate. + pub fn recv_unconfirmed_txs(&self, chainstate: &mut StacksChainState) { + let Some(ref mut unconfirmed) = chainstate.unconfirmed_state else { + return; + }; + let txs = self.unconfirmed_txs.lock().unwrap_or_else(|e| { + // can only happen due to a thread panic in the relayer + error!("FATAL: unconfirmed tx arc mutex is poisoned: {e:?}"); + panic!(); + }); + unconfirmed.mined_txs.clear(); + unconfirmed.mined_txs.extend(txs.clone()); + } + + /// Signal system-wide stop + pub fn signal_stop(&self) { + self.should_keep_running.store(false, Ordering::SeqCst); + } + + /// Should we keep running? + pub fn keep_running(&self) -> bool { + self.should_keep_running.load(Ordering::SeqCst) + } + + /// Get the handle to the coordinator + pub fn coord(&self) -> &CoordinatorChannels { + &self.coord_comms + } + + /// Get the current leader key registration state. + /// Called from the runloop thread and relayer thread. + pub fn get_leader_key_registration_state(&self) -> LeaderKeyRegistrationState { + let key_state = self + .leader_key_registration_state + .lock() + .unwrap_or_else(|e| { + // can only happen due to a thread panic in the relayer + error!("FATAL: leader key registration mutex is poisoned: {e:?}"); + panic!(); + }); + key_state.clone() + } + + /// Set the initial leader key registration state. + /// Called from the runloop thread when booting up. + pub fn set_initial_leader_key_registration_state(&self, new_state: LeaderKeyRegistrationState) { + let mut key_state = self + .leader_key_registration_state + .lock() + .unwrap_or_else(|e| { + // can only happen due to a thread panic in the relayer + error!("FATAL: leader key registration mutex is poisoned: {e:?}"); + panic!(); + }); + *key_state = new_state; + } + + /// Advance the leader key registration state to pending, given a txid we just sent. + /// Only the relayer thread calls this. + pub fn set_pending_leader_key_registration(&self, target_block_height: u64, txid: Txid) { + let mut key_state = self + .leader_key_registration_state + .lock() + .unwrap_or_else(|_e| { + error!("FATAL: failed to lock leader key registration state mutex"); + panic!(); + }); + *key_state = LeaderKeyRegistrationState::Pending(target_block_height, txid); + } + + /// Advance the leader key registration state to active, given the VRF key registration ops + /// we've discovered in a given snapshot. + /// The runloop thread calls this whenever it processes a sortition. + pub fn try_activate_leader_key_registration( + &self, + burn_block_height: u64, + key_registers: Vec, + ) -> bool { + let mut activated = false; + let mut key_state = self + .leader_key_registration_state + .lock() + .unwrap_or_else(|e| { + // can only happen due to a thread panic in the relayer + error!("FATAL: leader key registration mutex is poisoned: {e:?}"); + panic!(); + }); + // if key_state is anything but pending, then we don't activate + let LeaderKeyRegistrationState::Pending(target_block_height, txid) = *key_state else { + return false; + }; + for op in key_registers.into_iter() { + info!( + "Processing burnchain block with key_register_op"; + "burn_block_height" => burn_block_height, + "txid" => %op.txid, + "checking_txid" => %txid, + ); + + if txid == op.txid { + *key_state = LeaderKeyRegistrationState::Active(RegisteredKey { + target_block_height, + vrf_public_key: op.public_key, + block_height: u64::from(op.block_height), + op_vtxindex: u32::from(op.vtxindex), + }); + activated = true; + } else { + debug!( + "key_register_op {} does not match our pending op {}", + txid, &op.txid + ); + } + } + + activated + } +} diff --git a/testnet/stacks-node/src/keychain.rs b/testnet/stacks-node/src/keychain.rs index 7ea3b90556..d2575cb2b9 100644 --- a/testnet/stacks-node/src/keychain.rs +++ b/testnet/stacks-node/src/keychain.rs @@ -7,7 +7,7 @@ use stacks_common::address::{ }; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::hash::{Hash160, Sha256Sum}; -use stacks_common::util::secp256k1::Secp256k1PublicKey; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::vrf::{VRFPrivateKey, VRFProof, VRFPublicKey, VRF}; use super::operations::BurnchainOpSigner; @@ -16,6 +16,7 @@ use super::operations::BurnchainOpSigner; #[derive(Clone)] pub struct Keychain { secret_state: Vec, + nakamoto_mining_key: Secp256k1PrivateKey, } impl Keychain { @@ -44,10 +45,32 @@ impl Keychain { StacksPrivateKey::from_slice(&sk_bytes[..]).expect("FATAL: Keychain::make_secret_key_bytes() returned bytes that could not be parsed into a secp256k1 secret key!") } - /// Create a default keychain from the seed + /// Get the public key hash of the nakamoto mining key (i.e., Hash160(pubkey)) + pub fn get_nakamoto_pkh(&self) -> Hash160 { + let pk = Secp256k1PublicKey::from_private(&self.nakamoto_mining_key); + Hash160::from_node_public_key(&pk) + } + + /// Get the secret key of the nakamoto mining key + pub fn get_nakamoto_sk(&self) -> &Secp256k1PrivateKey { + &self.nakamoto_mining_key + } + + /// Set the secret key of the nakamoto mining key + pub fn set_nakamoto_sk(&mut self, mining_key: Secp256k1PrivateKey) { + self.nakamoto_mining_key = mining_key; + } + + /// Create a default keychain from the seed, with a default nakamoto mining key derived + /// from the same seed ( pub fn default(seed: Vec) -> Keychain { + let secret_state = Self::make_secret_key_bytes(&seed); + // re-hash secret_state to use as a default seed for the nakamoto mining key + let nakamoto_mining_key = + Secp256k1PrivateKey::from_seed(Sha256Sum::from_data(&secret_state).as_bytes()); Keychain { - secret_state: Keychain::make_secret_key_bytes(&seed), + secret_state, + nakamoto_mining_key, } } diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index 6addce37a1..d180aead8b 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -19,8 +19,10 @@ pub mod burnchains; pub mod config; pub mod event_dispatcher; pub mod genesis_data; +pub mod globals; pub mod keychain; pub mod mockamoto; +pub mod nakamoto_node; pub mod neon_node; pub mod node; pub mod operations; @@ -44,6 +46,7 @@ pub use self::node::{ChainTip, Node}; pub use self::run_loop::{helium, neon}; pub use self::tenure::Tenure; use crate::mockamoto::MockamotoNode; +use crate::run_loop::boot_nakamoto; fn main() { panic::set_hook(Box::new(|panic_info| { @@ -209,6 +212,9 @@ fn main() { } else if conf.burnchain.mode == "mockamoto" { let mut mockamoto = MockamotoNode::new(&conf).unwrap(); mockamoto.run(); + } else if conf.burnchain.mode == "nakamoto-neon" { + let mut run_loop = boot_nakamoto::BootRunLoop::new(conf).unwrap(); + run_loop.start(None, 0); } else { println!("Burnchain mode '{}' not supported", conf.burnchain.mode); } diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs index 8f17aae677..0929a67743 100644 --- a/testnet/stacks-node/src/mockamoto.rs +++ b/testnet/stacks-node/src/mockamoto.rs @@ -1,3 +1,18 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . use std::sync::atomic::AtomicBool; use std::sync::mpsc::{sync_channel, Receiver, RecvTimeoutError}; use std::sync::{Arc, Mutex}; @@ -69,10 +84,9 @@ use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::util::vrf::{VRFPrivateKey, VRFProof, VRFPublicKey, VRF}; use self::signer::SelfSigner; +use crate::globals::{NeonGlobals as Globals, RelayerDirective}; use crate::neon::Counters; -use crate::neon_node::{ - Globals, PeerThread, RelayerDirective, StacksNode, BLOCK_PROCESSOR_STACK_SIZE, -}; +use crate::neon_node::{PeerThread, StacksNode, BLOCK_PROCESSOR_STACK_SIZE}; use crate::syncctl::PoxSyncWatchdogComms; use crate::{Config, EventDispatcher}; @@ -894,8 +908,7 @@ impl MockamotoNode { &mut builder, &mut self.mempool, parent_chain_length, - None, - None, + &[], BlockBuilderSettings { max_miner_time_ms: 15_000, mempool_settings: MemPoolWalkSettings::default(), diff --git a/testnet/stacks-node/src/mockamoto/signer.rs b/testnet/stacks-node/src/mockamoto/signer.rs index c0d4af0b69..7e577b24f2 100644 --- a/testnet/stacks-node/src/mockamoto/signer.rs +++ b/testnet/stacks-node/src/mockamoto/signer.rs @@ -1,3 +1,4 @@ +use rand::{CryptoRng, RngCore, SeedableRng}; use stacks::chainstate::nakamoto::NakamotoBlock; use stacks::chainstate::stacks::ThresholdSignature; use wsts::curve::point::Point; @@ -22,9 +23,17 @@ pub struct SelfSigner { } impl SelfSigner { + pub fn from_seed(seed: u64) -> Self { + let rng = rand::rngs::StdRng::seed_from_u64(seed); + Self::from_rng::(rng) + } + pub fn single_signer() -> Self { - let mut rng = rand_core::OsRng::default(); + let rng = rand::rngs::OsRng::default(); + Self::from_rng::(rng) + } + fn from_rng(mut rng: RNG) -> Self { // Create the parties let mut signer_parties = [wsts::v2::Party::new(0, &[0], 1, 1, 1, &mut rng)]; @@ -54,7 +63,7 @@ impl SelfSigner { } pub fn sign_nakamoto_block(&mut self, block: &mut NakamotoBlock) { - let mut rng = rand_core::OsRng; + let mut rng = rand::rngs::OsRng::default(); let msg = block .header .signer_signature_hash() diff --git a/testnet/stacks-node/src/mockamoto/tests.rs b/testnet/stacks-node/src/mockamoto/tests.rs index b7914dcba8..7d7f65f852 100644 --- a/testnet/stacks-node/src/mockamoto/tests.rs +++ b/testnet/stacks-node/src/mockamoto/tests.rs @@ -6,6 +6,7 @@ use stacks::chainstate::nakamoto::NakamotoChainState; use stacks::chainstate::stacks::db::StacksChainState; use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey}; use stacks_common::types::StacksEpochId; +use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::to_hex; use super::MockamotoNode; @@ -18,6 +19,12 @@ use crate::{Config, ConfigFile}; #[test] fn observe_100_blocks() { let mut conf = Config::from_config_file(ConfigFile::mockamoto()).unwrap(); + conf.node.working_dir = format!( + "/tmp/stacks-node-tests/mock_observe_100_blocks-{}", + get_epoch_time_secs() + ); + conf.node.rpc_bind = "127.0.0.1:19343".into(); + conf.node.p2p_bind = "127.0.0.1:19344".into(); conf.node.mockamoto_time_ms = 10; let submitter_sk = StacksPrivateKey::from_seed(&[1]); @@ -25,8 +32,8 @@ fn observe_100_blocks() { conf.add_initial_balance(submitter_addr.to_string(), 1_000_000); let recipient_addr = StacksAddress::burn_address(false).into(); - test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; + let observer_port = 19300; + test_observer::spawn_at(observer_port); conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], @@ -129,6 +136,12 @@ fn observe_100_blocks() { #[test] fn mempool_rpc_submit() { let mut conf = Config::from_config_file(ConfigFile::mockamoto()).unwrap(); + conf.node.working_dir = format!( + "/tmp/stacks-node-tests/mempool_rpc_submit-{}", + get_epoch_time_secs() + ); + conf.node.rpc_bind = "127.0.0.1:19743".into(); + conf.node.p2p_bind = "127.0.0.1:19744".into(); conf.node.mockamoto_time_ms = 10; let submitter_sk = StacksPrivateKey::from_seed(&[1]); @@ -136,8 +149,8 @@ fn mempool_rpc_submit() { conf.add_initial_balance(submitter_addr.to_string(), 1_000); let recipient_addr = StacksAddress::burn_address(false).into(); - test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; + let observer_port = 19800; + test_observer::spawn_at(observer_port); conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs new file mode 100644 index 0000000000..ddcbc197f7 --- /dev/null +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -0,0 +1,315 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::collections::HashSet; +use std::sync::mpsc::Receiver; +use std::thread; +use std::thread::JoinHandle; + +use stacks::burnchains::{BurnchainSigner, Txid}; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::BlockSnapshot; +use stacks::chainstate::stacks::Error as ChainstateError; +use stacks::monitoring; +use stacks::monitoring::update_active_miners_count_gauge; +use stacks::net::atlas::AtlasConfig; +use stacks::net::relay::Relayer; +use stacks::net::stackerdb::StackerDBs; +use stacks_common::types::chainstate::SortitionId; +use stacks_common::types::StacksEpochId; + +use super::{Config, EventDispatcher, Keychain}; +use crate::burnchains::bitcoin_regtest_controller::addr2str; +use crate::neon_node::{LeaderKeyRegistrationState, StacksNode as NeonNode}; +use crate::run_loop::nakamoto::{Globals, RunLoop}; +use crate::run_loop::RegisteredKey; + +pub mod miner; +pub mod peer; +pub mod relayer; + +use self::peer::PeerThread; +use self::relayer::{RelayerDirective, RelayerThread}; + +pub const RELAYER_MAX_BUFFER: usize = 100; +const VRF_MOCK_MINER_KEY: u64 = 1; + +pub const BLOCK_PROCESSOR_STACK_SIZE: usize = 32 * 1024 * 1024; // 32 MB + +pub type BlockCommits = HashSet; + +/// Node implementation for both miners and followers. +/// This struct is used to set up the node proper and launch the p2p thread and relayer thread. +/// It is further used by the main thread to communicate with these two threads. +pub struct StacksNode { + /// Atlas network configuration + pub atlas_config: AtlasConfig, + /// Global inter-thread communication handle + pub globals: Globals, + /// True if we're a miner + is_miner: bool, + /// handle to the p2p thread + pub p2p_thread_handle: JoinHandle<()>, + /// handle to the relayer thread + pub relayer_thread_handle: JoinHandle<()>, +} + +/// Types of errors that can arise during Nakamoto StacksNode operation +#[derive(Debug)] +pub enum Error { + /// Can't find the block sortition snapshot for the chain tip + SnapshotNotFoundForChainTip, + /// The burnchain tip changed while this operation was in progress + BurnchainTipChanged, + /// Error while spawning a subordinate thread + SpawnError(std::io::Error), + /// Injected testing errors + FaultInjection, + /// This miner was elected, but another sortition occurred before mining started + MissedMiningOpportunity, + /// Attempted to mine while there was no active VRF key + NoVRFKeyActive, + /// The parent block or tenure could not be found + ParentNotFound, + /// Something unexpected happened (e.g., hash mismatches) + UnexpectedChainState, + /// A burnchain operation failed when submitting it to the burnchain + BurnchainSubmissionFailed, + /// A new parent has been discovered since mining started + NewParentDiscovered, + /// A failure occurred while constructing a VRF Proof + BadVrfConstruction, + CannotSelfSign, + MiningFailure(ChainstateError), + SigningError(&'static str), + // The thread that we tried to send to has closed + ChannelClosed, +} + +impl StacksNode { + /// This function sets the global var `GLOBAL_BURNCHAIN_SIGNER`. + /// + /// This variable is used for prometheus monitoring (which only + /// runs when the feature flag `monitoring_prom` is activated). + /// The address is set using the single-signature BTC address + /// associated with `keychain`'s public key. This address always + /// assumes Epoch-2.1 rules for the miner address: if the + /// node is configured for segwit, then the miner address generated + /// is a segwit address, otherwise it is a p2pkh. + /// + fn set_monitoring_miner_address(keychain: &Keychain, relayer_thread: &RelayerThread) { + let public_key = keychain.get_pub_key(); + let miner_addr = relayer_thread + .bitcoin_controller + .get_miner_address(StacksEpochId::Epoch21, &public_key); + let miner_addr_str = addr2str(&miner_addr); + let _ = monitoring::set_burnchain_signer(BurnchainSigner(miner_addr_str)).map_err(|e| { + warn!("Failed to set global burnchain signer: {:?}", &e); + e + }); + } + + pub fn spawn( + runloop: &RunLoop, + globals: Globals, + // relay receiver endpoint for the p2p thread, so the relayer can feed it data to push + relay_recv: Receiver, + ) -> StacksNode { + let config = runloop.config().clone(); + let is_miner = runloop.is_miner(); + let burnchain = runloop.get_burnchain(); + let atlas_config = config.atlas.clone(); + let mut keychain = Keychain::default(config.node.seed.clone()); + if let Some(mining_key) = config.miner.mining_key.clone() { + keychain.set_nakamoto_sk(mining_key); + } + + // we can call _open_ here rather than _connect_, since connect is first called in + // make_genesis_block + let mut sortdb = SortitionDB::open( + &config.get_burn_db_file_path(), + true, + burnchain.pox_constants.clone(), + ) + .expect("Error while instantiating sortition db"); + + NeonNode::setup_ast_size_precheck(&config, &mut sortdb); + + let _ = config + .connect_mempool_db() + .expect("FATAL: database failure opening mempool"); + + let mut p2p_net = NeonNode::setup_peer_network(&config, &atlas_config, burnchain.clone()); + + let stackerdbs = StackerDBs::connect(&config.get_stacker_db_file_path(), true) + .expect("FATAL: failed to connect to stacker DB"); + + let relayer = Relayer::from_p2p(&mut p2p_net, stackerdbs); + + let local_peer = p2p_net.local_peer.clone(); + + // setup initial key registration + let leader_key_registration_state = if config.node.mock_mining { + // mock mining, pretend to have a registered key + let (vrf_public_key, _) = keychain.make_vrf_keypair(VRF_MOCK_MINER_KEY); + LeaderKeyRegistrationState::Active(RegisteredKey { + target_block_height: VRF_MOCK_MINER_KEY, + block_height: 1, + op_vtxindex: 1, + vrf_public_key, + }) + } else { + LeaderKeyRegistrationState::Inactive + }; + globals.set_initial_leader_key_registration_state(leader_key_registration_state); + + let relayer_thread = + RelayerThread::new(runloop, local_peer.clone(), relayer, keychain.clone()); + + StacksNode::set_monitoring_miner_address(&keychain, &relayer_thread); + + let relayer_thread_handle = thread::Builder::new() + .name(format!("relayer-{}", &local_peer.data_url)) + .stack_size(BLOCK_PROCESSOR_STACK_SIZE) + .spawn(move || { + relayer_thread.main(relay_recv); + }) + .expect("FATAL: failed to start relayer thread"); + + let p2p_event_dispatcher = runloop.get_event_dispatcher(); + let p2p_thread = PeerThread::new(runloop, p2p_net); + let p2p_thread_handle = thread::Builder::new() + .stack_size(BLOCK_PROCESSOR_STACK_SIZE) + .name(format!( + "p2p-({},{})", + &config.node.p2p_bind, &config.node.rpc_bind + )) + .spawn(move || { + p2p_thread.main(p2p_event_dispatcher); + }) + .expect("FATAL: failed to start p2p thread"); + + info!("Start HTTP server on: {}", &config.node.rpc_bind); + info!("Start P2P server on: {}", &config.node.p2p_bind); + + StacksNode { + atlas_config, + globals, + is_miner, + p2p_thread_handle, + relayer_thread_handle, + } + } + + /// Notify the relayer that a new burn block has been processed by the sortition db, + /// telling it to process the block and begin mining if this miner won. + /// returns _false_ if the relayer hung up the channel. + /// Called from the main thread. + fn relayer_burnchain_notify(&self, snapshot: BlockSnapshot) -> Result<(), Error> { + if !self.is_miner { + // node is a follower, don't need to notify the relayer of these events. + return Ok(()); + } + + info!( + "Tenure: Notify burn block!"; + "consensus_hash" => %snapshot.consensus_hash, + "burn_block_hash" => %snapshot.burn_header_hash, + "winning_stacks_block_hash" => %snapshot.winning_stacks_block_hash, + "burn_block_height" => &snapshot.block_height, + "sortition_id" => %snapshot.sortition_id + ); + + // unlike in neon_node, the nakamoto node should *always* notify the relayer of + // a new burnchain block + + self.globals + .relay_send + .send(RelayerDirective::ProcessedBurnBlock( + snapshot.consensus_hash, + snapshot.parent_burn_header_hash, + snapshot.winning_stacks_block_hash, + )) + .map_err(|_| Error::ChannelClosed) + } + + /// Process a state coming from the burnchain, by extracting the validated KeyRegisterOp + /// and inspecting if a sortition was won. + /// `ibd`: boolean indicating whether or not we are in the initial block download + /// Called from the main thread. + pub fn process_burnchain_state( + &mut self, + sortdb: &SortitionDB, + sort_id: &SortitionId, + ibd: bool, + ) -> Result<(), Error> { + let ic = sortdb.index_conn(); + + let block_snapshot = SortitionDB::get_block_snapshot(&ic, sort_id) + .expect("Failed to obtain block snapshot for processed burn block.") + .expect("Failed to obtain block snapshot for processed burn block."); + let block_height = block_snapshot.block_height; + + let block_commits = + SortitionDB::get_block_commits_by_block(&ic, &block_snapshot.sortition_id) + .expect("Unexpected SortitionDB error fetching block commits"); + + let num_block_commits = block_commits.len(); + + update_active_miners_count_gauge(block_commits.len() as i64); + + for op in block_commits.into_iter() { + if op.txid == block_snapshot.winning_block_txid { + info!( + "Received burnchain block #{} including block_commit_op (winning) - {} ({})", + block_height, op.apparent_sender, &op.block_header_hash + ); + } else if self.is_miner { + info!( + "Received burnchain block #{} including block_commit_op - {} ({})", + block_height, op.apparent_sender, &op.block_header_hash + ); + } + } + + let key_registers = + SortitionDB::get_leader_keys_by_block(&ic, &block_snapshot.sortition_id) + .expect("Unexpected SortitionDB error fetching key registers"); + + let num_key_registers = key_registers.len(); + + self.globals + .try_activate_leader_key_registration(block_height, key_registers); + + debug!( + "Processed burnchain state"; + "burn_height" => block_height, + "leader_keys_count" => num_key_registers, + "block_commits_count" => num_block_commits, + "in_initial_block_download?" => ibd, + ); + + self.globals.set_last_sortition(block_snapshot.clone()); + + // notify the relayer thread of the new sortition state + self.relayer_burnchain_notify(block_snapshot) + } + + /// Join all inner threads + pub fn join(self) { + self.relayer_thread_handle.join().unwrap(); + self.p2p_thread_handle.join().unwrap(); + } +} diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs new file mode 100644 index 0000000000..b38225f31f --- /dev/null +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -0,0 +1,635 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::convert::TryFrom; +use std::thread; +use std::thread::JoinHandle; + +use clarity::vm::types::PrincipalData; +use stacks::burnchains::{Burnchain, BurnchainParameters}; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; +use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; +use stacks::chainstate::stacks::{ + CoinbasePayload, Error as ChainstateError, StacksTransaction, StacksTransactionSigner, + TenureChangeCause, TenureChangePayload, ThresholdSignature, TransactionAnchorMode, + TransactionPayload, TransactionVersion, +}; +use stacks::core::FIRST_BURNCHAIN_CONSENSUS_HASH; +use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; +use stacks_common::types::{PrivateKey, StacksEpochId}; +use stacks_common::util::hash::Hash160; +use stacks_common::util::vrf::VRFProof; + +use super::relayer::RelayerThread; +use super::{Config, Error as NakamotoNodeError, EventDispatcher, Keychain}; +use crate::mockamoto::signer::SelfSigner; +use crate::nakamoto_node::VRF_MOCK_MINER_KEY; +use crate::run_loop::nakamoto::Globals; +use crate::run_loop::RegisteredKey; +use crate::{neon_node, ChainTip}; + +pub enum MinerDirective { + /// The miner won sortition so they should begin a new tenure + BeginTenure { + parent_tenure_start: StacksBlockId, + burnchain_tip: BlockSnapshot, + }, + /// The miner should try to continue their tenure if they are the active miner + ContinueTenure { new_burn_view: ConsensusHash }, + /// The miner did not win sortition + StopTenure, +} + +struct ParentTenureInfo { + parent_tenure_blocks: u64, + parent_tenure_consensus_hash: ConsensusHash, +} + +/// Metadata required for beginning a new tenure +struct ParentStacksBlockInfo { + /// Header metadata for the Stacks block we're going to build on top of + stacks_parent_header: StacksHeaderInfo, + /// the total amount burned in the sortition that selected the Stacks block parent + parent_block_total_burn: u64, + /// nonce to use for this new block's coinbase transaction + coinbase_nonce: u64, + parent_tenure: Option, +} + +pub struct BlockMinerThread { + /// node config struct + config: Config, + /// handle to global state + globals: Globals, + /// copy of the node's keychain + keychain: Keychain, + /// burnchain configuration + burnchain: Burnchain, + /// Set of blocks that we have mined, but are still potentially-broadcastable + /// (copied from RelayerThread since we need the info to determine the strategy for mining the + /// next block during this tenure). + last_mined_blocks: Vec, + /// Copy of the node's registered VRF key + registered_key: RegisteredKey, + /// Burnchain block snapshot which elected this miner + burn_block: BlockSnapshot, + /// The start of the parent tenure for this tenure + parent_tenure_id: StacksBlockId, + /// Handle to the node's event dispatcher + event_dispatcher: EventDispatcher, +} + +impl BlockMinerThread { + /// Instantiate the miner thread + pub fn new( + rt: &RelayerThread, + registered_key: RegisteredKey, + burn_block: BlockSnapshot, + parent_tenure_id: StacksBlockId, + ) -> BlockMinerThread { + BlockMinerThread { + config: rt.config.clone(), + globals: rt.globals.clone(), + keychain: rt.keychain.clone(), + burnchain: rt.burnchain.clone(), + last_mined_blocks: vec![], + registered_key, + burn_block, + event_dispatcher: rt.event_dispatcher.clone(), + parent_tenure_id, + } + } + + /// Stop a miner tenure by blocking the miner and then joining the tenure thread + pub fn stop_miner(globals: &Globals, prior_miner: JoinHandle<()>) { + globals.block_miner(); + prior_miner + .join() + .expect("FATAL: IO failure joining prior mining thread"); + globals.unblock_miner(); + } + + pub fn run_miner(mut self, prior_miner: Option>) { + // when starting a new tenure, block the mining thread if its currently running. + // the new mining thread will join it (so that the new mining thread stalls, not the relayer) + if let Some(prior_miner) = prior_miner { + Self::stop_miner(&self.globals, prior_miner); + } + + // now, actually run this tenure + let new_block = match self.mine_block() { + Ok(x) => x, + Err(e) => { + warn!("Failed to mine block: {e:?}"); + return; + } + }; + + if let Some(self_signer) = self.config.self_signing() { + if let Err(e) = self.self_sign_and_broadcast(self_signer, new_block.clone()) { + warn!("Error self-signing block: {e:?}"); + } else { + self.globals.coord().announce_new_stacks_block(); + } + } else { + warn!("Not self-signing: nakamoto node does not support stacker-signer-protocol yet"); + } + + self.globals.counters.bump_naka_mined_blocks(); + self.last_mined_blocks.push(new_block); + } + + fn self_sign_and_broadcast( + &self, + mut signer: SelfSigner, + mut block: NakamotoBlock, + ) -> Result<(), ChainstateError> { + signer.sign_nakamoto_block(&mut block); + let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) + .expect("FATAL: could not open chainstate DB"); + let chainstate_config = chain_state.config(); + let sort_db = SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + self.burnchain.pox_constants.clone(), + ) + .expect("FATAL: could not open sortition DB"); + let mut sortition_handle = sort_db.index_handle_at_tip(); + let staging_tx = chain_state.staging_db_tx_begin()?; + NakamotoChainState::accept_block( + &chainstate_config, + block, + &mut sortition_handle, + &staging_tx, + &signer.aggregate_public_key, + )?; + staging_tx.commit()?; + Ok(()) + } + + /// Get the coinbase recipient address, if set in the config and if allowed in this epoch + fn get_coinbase_recipient(&self, epoch_id: StacksEpochId) -> Option { + if epoch_id < StacksEpochId::Epoch21 && self.config.miner.block_reward_recipient.is_some() { + warn!("Coinbase pay-to-contract is not supported in the current epoch"); + None + } else { + self.config.miner.block_reward_recipient.clone() + } + } + + fn generate_tenure_change_tx( + &mut self, + nonce: u64, + parent_block_id: StacksBlockId, + parent_tenure_consensus_hash: ConsensusHash, + parent_tenure_blocks: u64, + miner_pkh: Hash160, + ) -> Result { + if self.config.self_signing().is_none() { + // if we're not self-signing, then we can't generate a tenure change tx: it has to come from the signers. + warn!("Tried to generate a tenure change transaction, but we aren't self-signing"); + return Err(NakamotoNodeError::CannotSelfSign); + } + let is_mainnet = self.config.is_mainnet(); + let chain_id = self.config.burnchain.chain_id; + let tenure_change_tx_payload = TransactionPayload::TenureChange(TenureChangePayload { + tenure_consensus_hash: self.burn_block.consensus_hash.clone(), + prev_tenure_consensus_hash: parent_tenure_consensus_hash, + burn_view_consensus_hash: self.burn_block.consensus_hash.clone(), + previous_tenure_end: parent_block_id, + previous_tenure_blocks: u32::try_from(parent_tenure_blocks) + .expect("FATAL: more than u32 blocks in a tenure"), + cause: TenureChangeCause::BlockFound, + pubkey_hash: miner_pkh, + signers: vec![], + signature: ThresholdSignature::mock(), + }); + + let mut tx_auth = self.keychain.get_transaction_auth().unwrap(); + tx_auth.set_origin_nonce(nonce); + + let version = if is_mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + + let mut tx = StacksTransaction::new(version, tx_auth, tenure_change_tx_payload); + + tx.chain_id = chain_id; + tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + let mut tx_signer = StacksTransactionSigner::new(&tx); + self.keychain.sign_as_origin(&mut tx_signer); + + Ok(tx_signer.get_tx().unwrap()) + } + + /// Create a coinbase transaction. + fn generate_coinbase_tx( + &mut self, + nonce: u64, + epoch_id: StacksEpochId, + vrf_proof: VRFProof, + ) -> StacksTransaction { + let is_mainnet = self.config.is_mainnet(); + let chain_id = self.config.burnchain.chain_id; + let mut tx_auth = self.keychain.get_transaction_auth().unwrap(); + tx_auth.set_origin_nonce(nonce); + + let version = if is_mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + + let recipient_opt = self.get_coinbase_recipient(epoch_id); + + let mut tx = StacksTransaction::new( + version, + tx_auth, + TransactionPayload::Coinbase( + CoinbasePayload([0u8; 32]), + recipient_opt, + Some(vrf_proof), + ), + ); + tx.chain_id = chain_id; + tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + let mut tx_signer = StacksTransactionSigner::new(&tx); + self.keychain.sign_as_origin(&mut tx_signer); + + tx_signer.get_tx().unwrap() + } + + /// Load up the parent block info for mining. + /// If there's no parent because this is the first block, then return the genesis block's info. + /// If we can't find the parent in the DB but we expect one, return None. + fn load_block_parent_info( + &self, + burn_db: &mut SortitionDB, + chain_state: &mut StacksChainState, + ) -> Result { + let Some(stacks_tip) = + NakamotoChainState::get_canonical_block_header(chain_state.db(), burn_db) + .expect("FATAL: could not query chain tip") + else { + debug!("No Stacks chain tip known, will return a genesis block"); + let (network, _) = self.config.burnchain.get_bitcoin_network(); + let burnchain_params = + BurnchainParameters::from_params(&self.config.burnchain.chain, &network) + .expect("Bitcoin network unsupported"); + + let chain_tip = ChainTip::genesis( + &burnchain_params.first_block_hash, + burnchain_params.first_block_height.into(), + burnchain_params.first_block_timestamp.into(), + ); + + return Ok(ParentStacksBlockInfo { + parent_tenure: Some(ParentTenureInfo { + parent_tenure_consensus_hash: chain_tip.metadata.consensus_hash, + parent_tenure_blocks: 0, + }), + stacks_parent_header: chain_tip.metadata, + parent_block_total_burn: 0, + coinbase_nonce: 0, + }); + }; + + let miner_address = self + .keychain + .origin_address(self.config.is_mainnet()) + .unwrap(); + match ParentStacksBlockInfo::lookup( + chain_state, + burn_db, + &self.burn_block, + miner_address, + &self.parent_tenure_id, + stacks_tip, + ) { + Ok(parent_info) => Ok(parent_info), + Err(NakamotoNodeError::BurnchainTipChanged) => { + self.globals.counters.bump_missed_tenures(); + Err(NakamotoNodeError::BurnchainTipChanged) + } + Err(e) => Err(e), + } + } + + /// Generate the VRF proof for the block we're going to build. + /// Returns Some(proof) if we could make the proof + /// Return None if we could not make the proof + fn make_vrf_proof(&mut self) -> Option { + // if we're a mock miner, then make sure that the keychain has a keypair for the mocked VRF + // key + let vrf_proof = if self.config.node.mock_mining { + self.keychain.generate_proof( + VRF_MOCK_MINER_KEY, + self.burn_block.sortition_hash.as_bytes(), + ) + } else { + self.keychain.generate_proof( + self.registered_key.target_block_height, + self.burn_block.sortition_hash.as_bytes(), + ) + }; + + debug!( + "Generated VRF Proof: {} over {} ({},{}) with key {}", + vrf_proof.to_hex(), + &self.burn_block.sortition_hash, + &self.burn_block.block_height, + &self.burn_block.burn_header_hash, + &self.registered_key.vrf_public_key.to_hex() + ); + Some(vrf_proof) + } + + /// Try to mine a Stacks block by assembling one from mempool transactions and sending a + /// burnchain block-commit transaction. If we succeed, then return the assembled block data as + /// well as the microblock private key to use to produce microblocks. + /// Return None if we couldn't build a block for whatever reason. + fn mine_block(&mut self) -> Result { + debug!("block miner thread ID is {:?}", thread::current().id()); + neon_node::fault_injection_long_tenure(); + + let burn_db_path = self.config.get_burn_db_file_path(); + + // NOTE: read-write access is needed in order to be able to query the recipient set. + // This is an artifact of the way the MARF is built (see #1449) + let mut burn_db = + SortitionDB::open(&burn_db_path, true, self.burnchain.pox_constants.clone()) + .expect("FATAL: could not open sortition DB"); + + let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) + .expect("FATAL: could not open chainstate DB"); + + let mut mem_pool = self + .config + .connect_mempool_db() + .expect("Database failure opening mempool"); + + let target_epoch_id = + SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height + 1) + .map_err(|_| NakamotoNodeError::SnapshotNotFoundForChainTip)? + .expect("FATAL: no epoch defined") + .epoch_id; + let mut parent_block_info = self.load_block_parent_info(&mut burn_db, &mut chain_state)?; + let vrf_proof = self + .make_vrf_proof() + .ok_or_else(|| NakamotoNodeError::BadVrfConstruction)?; + + if self.last_mined_blocks.is_empty() { + if parent_block_info.parent_tenure.is_none() { + warn!( + "Miner should be starting a new tenure, but failed to load parent tenure info" + ); + return Err(NakamotoNodeError::ParentNotFound); + } + } + + // create our coinbase if this is the first block we've mined this tenure + let tenure_start_info = if let Some(ref par_tenure_info) = parent_block_info.parent_tenure { + let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); + let current_miner_nonce = parent_block_info.coinbase_nonce; + let tenure_change_tx = self.generate_tenure_change_tx( + current_miner_nonce, + parent_block_id, + par_tenure_info.parent_tenure_consensus_hash, + par_tenure_info.parent_tenure_blocks, + self.keychain.get_nakamoto_pkh(), + )?; + let coinbase_tx = self.generate_coinbase_tx( + current_miner_nonce + 1, + target_epoch_id, + vrf_proof.clone(), + ); + NakamotoTenureInfo { + coinbase_tx: Some(coinbase_tx), + tenure_change_tx: Some(tenure_change_tx), + } + } else { + NakamotoTenureInfo { + coinbase_tx: None, + tenure_change_tx: None, + } + }; + + parent_block_info.stacks_parent_header.microblock_tail = None; + + // build the block itself + let (mut block, _, _) = match NakamotoBlockBuilder::build_nakamoto_block( + &chain_state, + &burn_db.index_conn(), + &mut mem_pool, + // TODO (refactor): the nakamoto block builder doesn't use the parent tenure ID, + // it has to be included in the tenure change tx, which is an arg to the builder. + // we should probably just remove this from the nakamoto block builder, so that + // there isn't duplicated or unused logic here + &self.parent_tenure_id, + &parent_block_info.stacks_parent_header, + &self.burn_block.consensus_hash, + self.burn_block.total_burn, + tenure_start_info, + self.config.make_block_builder_settings( + // TODO: the attempt counter needs a different configuration approach in nakamoto + 1, + false, + self.globals.get_miner_status(), + ), + Some(&self.event_dispatcher), + ) { + Ok(block) => block, + Err(e) => { + error!("Relayer: Failure mining anchored block: {}", e); + return Err(NakamotoNodeError::MiningFailure(e)); + } + }; + + let mining_key = self.keychain.get_nakamoto_sk(); + let miner_signature = mining_key + .sign( + block + .header + .signature_hash() + .map_err(|_| NakamotoNodeError::SigningError("Could not create sighash"))? + .as_bytes(), + ) + .map_err(NakamotoNodeError::SigningError)?; + block.header.miner_signature = miner_signature; + + info!( + "Miner: Succeeded assembling {} block #{}: {}, with {} txs", + if parent_block_info.parent_block_total_burn == 0 { + "Genesis" + } else { + "Stacks" + }, + block.header.chain_length, + block.header.block_hash(), + block.txs.len(), + ); + + // last chance -- confirm that the stacks tip is unchanged (since it could have taken long + // enough to build this block that another block could have arrived), and confirm that all + // Stacks blocks with heights higher than the canoincal tip are processed. + let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + if cur_burn_chain_tip.consensus_hash != block.header.consensus_hash { + info!("Miner: Cancel block assembly; burnchain tip has changed"); + self.globals.counters.bump_missed_tenures(); + return Err(NakamotoNodeError::BurnchainTipChanged); + } + + Ok(block) + } +} + +impl ParentStacksBlockInfo { + /// Determine where in the set of forks to attempt to mine the next anchored block. + /// `mine_tip_ch` and `mine_tip_bhh` identify the parent block on top of which to mine. + /// `check_burn_block` identifies what we believe to be the burn chain's sortition history tip. + /// This is used to mitigate (but not eliminate) a TOCTTOU issue with mining: the caller's + /// conception of the sortition history tip may have become stale by the time they call this + /// method, in which case, mining should *not* happen (since the block will be invalid). + pub fn lookup( + chain_state: &mut StacksChainState, + burn_db: &mut SortitionDB, + check_burn_block: &BlockSnapshot, + miner_address: StacksAddress, + parent_tenure_id: &StacksBlockId, + stacks_tip_header: StacksHeaderInfo, + ) -> Result { + // the stacks block I'm mining off of's burn header hash and vtxindex: + let parent_snapshot = SortitionDB::get_block_snapshot_consensus( + burn_db.conn(), + &stacks_tip_header.consensus_hash, + ) + .expect("Failed to look up block's parent snapshot") + .expect("Failed to look up block's parent snapshot"); + + let parent_sortition_id = &parent_snapshot.sortition_id; + + let parent_block_total_burn = + if &stacks_tip_header.consensus_hash == &FIRST_BURNCHAIN_CONSENSUS_HASH { + 0 + } else { + let parent_burn_block = + SortitionDB::get_block_snapshot(burn_db.conn(), parent_sortition_id) + .expect("SortitionDB failure.") + .ok_or_else(|| { + error!( + "Failed to find block snapshot for the parent sortition"; + "parent_sortition_id" => %parent_sortition_id + ); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + + parent_burn_block.total_burn + }; + + // don't mine off of an old burnchain block + let burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash { + info!( + "New canonical burn chain tip detected. Will not try to mine."; + "new_consensus_hash" => %burn_chain_tip.consensus_hash, + "old_consensus_hash" => %check_burn_block.consensus_hash, + "new_burn_height" => burn_chain_tip.block_height, + "old_burn_height" => check_burn_block.block_height + ); + return Err(NakamotoNodeError::BurnchainTipChanged); + } + + let Ok(Some(parent_tenure_header)) = + NakamotoChainState::get_block_header(chain_state.db(), &parent_tenure_id) + else { + warn!("Failed loading parent tenure ID"; "parent_tenure_id" => %parent_tenure_id); + return Err(NakamotoNodeError::ParentNotFound); + }; + + // check if we're mining a first tenure block (by checking if our parent block is in the tenure of parent_tenure_id) + // and if so, figure out how many blocks there were in the parent tenure + let parent_tenure_info = if stacks_tip_header.consensus_hash + == parent_tenure_header.consensus_hash + { + let parent_tenure_blocks = if parent_tenure_header + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + let Ok(Some(last_parent_tenure_header)) = + NakamotoChainState::get_nakamoto_tenure_finish_block_header( + chain_state.db(), + &parent_tenure_header.consensus_hash, + ) + else { + warn!("Failed loading last block of parent tenure"; "parent_tenure_id" => %parent_tenure_id); + return Err(NakamotoNodeError::ParentNotFound); + }; + // the last known tenure block of our parent should be the stacks_tip. if not, error. + if stacks_tip_header.index_block_hash() + != last_parent_tenure_header.index_block_hash() + { + return Err(NakamotoNodeError::NewParentDiscovered); + } + 1 + last_parent_tenure_header.stacks_block_height + - parent_tenure_header.stacks_block_height + } else { + 1 + }; + let parent_tenure_consensus_hash = parent_tenure_header.consensus_hash.clone(); + Some(ParentTenureInfo { + parent_tenure_blocks, + parent_tenure_consensus_hash, + }) + } else { + None + }; + + debug!("Mining tenure's last consensus hash: {} (height {} hash {}), stacks tip consensus hash: {} (height {} hash {})", + &check_burn_block.consensus_hash, check_burn_block.block_height, &check_burn_block.burn_header_hash, + &parent_snapshot.consensus_hash, parent_snapshot.block_height, &parent_snapshot.burn_header_hash); + + let coinbase_nonce = { + let principal = miner_address.into(); + let account = chain_state + .with_read_only_clarity_tx( + &burn_db.index_conn(), + &stacks_tip_header.index_block_hash(), + |conn| StacksChainState::get_account(conn, &principal), + ) + .expect(&format!( + "BUG: stacks tip block {} no longer exists after we queried it", + &stacks_tip_header.index_block_hash(), + )); + account.nonce + }; + + Ok(ParentStacksBlockInfo { + stacks_parent_header: stacks_tip_header, + parent_block_total_burn, + coinbase_nonce, + parent_tenure: parent_tenure_info, + }) + } +} diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs new file mode 100644 index 0000000000..376c437723 --- /dev/null +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -0,0 +1,330 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::collections::VecDeque; +use std::default::Default; +use std::net::SocketAddr; +use std::sync::mpsc::TrySendError; +use std::time::Duration; +use std::{cmp, thread}; + +use stacks::burnchains::db::BurnchainHeaderReader; +use stacks::burnchains::PoxConstants; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::chainstate::stacks::miner::signal_mining_blocked; +use stacks::core::mempool::MemPoolDB; +use stacks::cost_estimates::metrics::{CostMetric, UnitMetric}; +use stacks::cost_estimates::{CostEstimator, FeeEstimator, UnitEstimator}; +use stacks::net::dns::{DNSClient, DNSResolver}; +use stacks::net::p2p::PeerNetwork; +use stacks::net::RPCHandlerArgs; +use stacks_common::util::hash::Sha256Sum; + +use crate::burnchains::make_bitcoin_indexer; +use crate::nakamoto_node::relayer::RelayerDirective; +use crate::neon_node::open_chainstate_with_faults; +use crate::run_loop::nakamoto::{Globals, RunLoop}; +use crate::{Config, EventDispatcher}; + +/// Thread that runs the network state machine, handling both p2p and http requests. +pub struct PeerThread { + /// Node config + config: Config, + /// instance of the peer network. Made optional in order to trick the borrow checker. + net: PeerNetwork, + /// handle to global inter-thread comms + globals: Globals, + /// how long to wait for network messages on each poll, in millis + poll_timeout: u64, + /// handle to the sortition DB + sortdb: SortitionDB, + /// handle to the chainstate DB + chainstate: StacksChainState, + /// handle to the mempool DB + mempool: MemPoolDB, + /// buffer of relayer commands with block data that couldn't be sent to the relayer just yet + /// (i.e. due to backpressure). We track this separately, instead of just using a bigger + /// channel, because we need to know when backpressure occurs in order to throttle the p2p + /// thread's downloader. + results_with_data: VecDeque, + /// total number of p2p state-machine passes so far. Used to signal when to download the next + /// reward cycle of blocks + num_p2p_state_machine_passes: u64, + /// total number of inventory state-machine passes so far. Used to signal when to download the + /// next reward cycle of blocks. + num_inv_sync_passes: u64, + /// total number of download state-machine passes so far. Used to signal when to download the + /// next reward cycle of blocks. + num_download_passes: u64, + /// last burnchain block seen in the PeerNetwork's chain view since the last run + last_burn_block_height: u64, +} + +impl PeerThread { + /// Main loop of the p2p thread. + /// Runs in a separate thread. + /// Continuously receives, until told otherwise. + pub fn main(mut self, event_dispatcher: EventDispatcher) { + debug!("p2p thread ID is {:?}", thread::current().id()); + let should_keep_running = self.globals.should_keep_running.clone(); + let (mut dns_resolver, mut dns_client) = DNSResolver::new(10); + + // spawn a daemon thread that runs the DNS resolver. + // It will die when the rest of the system dies. + { + let _jh = thread::Builder::new() + .name("dns-resolver".to_string()) + .spawn(move || { + debug!("DNS resolver thread ID is {:?}", thread::current().id()); + dns_resolver.thread_main(); + }) + .unwrap(); + } + + // NOTE: these must be instantiated in the thread context, since it can't be safely sent + // between threads + let fee_estimator_opt = self.config.make_fee_estimator(); + let cost_estimator = self + .config + .make_cost_estimator() + .unwrap_or_else(|| Box::new(UnitEstimator)); + let cost_metric = self + .config + .make_cost_metric() + .unwrap_or_else(|| Box::new(UnitMetric)); + + let indexer = make_bitcoin_indexer(&self.config, Some(should_keep_running)); + + // receive until we can't reach the receiver thread + loop { + if !self.globals.keep_running() { + break; + } + if !self.run_one_pass( + &indexer, + Some(&mut dns_client), + &event_dispatcher, + &cost_estimator, + &cost_metric, + fee_estimator_opt.as_ref(), + ) { + break; + } + } + + // kill miner + signal_mining_blocked(self.globals.get_miner_status()); + + // set termination flag so other threads die + self.globals.signal_stop(); + + // thread exited, so signal to the relayer thread to die. + while let Err(TrySendError::Full(_)) = + self.globals.relay_send.try_send(RelayerDirective::Exit) + { + warn!("Failed to direct relayer thread to exit, sleeping and trying again"); + thread::sleep(Duration::from_secs(5)); + } + info!("P2P thread exit!"); + } + + /// Instantiate the p2p thread. + /// Binds the addresses in the config (which may panic if the port is blocked). + /// This is so the node will crash "early" before any new threads start if there's going to be + /// a bind error anyway. + pub fn new(runloop: &RunLoop, net: PeerNetwork) -> PeerThread { + Self::new_all( + runloop.get_globals(), + runloop.config(), + runloop.get_burnchain().pox_constants, + net, + ) + } + + fn new_all( + globals: Globals, + config: &Config, + pox_constants: PoxConstants, + mut net: PeerNetwork, + ) -> Self { + let config = config.clone(); + let mempool = config + .connect_mempool_db() + .expect("FATAL: database failure opening mempool"); + let burn_db_path = config.get_burn_db_file_path(); + + let sortdb = SortitionDB::open(&burn_db_path, false, pox_constants) + .expect("FATAL: could not open sortition DB"); + + let chainstate = + open_chainstate_with_faults(&config).expect("FATAL: could not open chainstate DB"); + + let p2p_sock: SocketAddr = config.node.p2p_bind.parse().expect(&format!( + "Failed to parse socket: {}", + &config.node.p2p_bind + )); + let rpc_sock = config.node.rpc_bind.parse().expect(&format!( + "Failed to parse socket: {}", + &config.node.rpc_bind + )); + + net.bind(&p2p_sock, &rpc_sock) + .expect("BUG: PeerNetwork could not bind or is already bound"); + + let poll_timeout = cmp::min(5000, config.miner.first_attempt_time_ms / 2); + + PeerThread { + config, + net, + globals, + poll_timeout, + sortdb, + chainstate, + mempool, + results_with_data: VecDeque::new(), + num_p2p_state_machine_passes: 0, + num_inv_sync_passes: 0, + num_download_passes: 0, + last_burn_block_height: 0, + } + } + + /// Run one pass of the p2p/http state machine + /// Return true if we should continue running passes; false if not + pub(crate) fn run_one_pass( + &mut self, + indexer: &B, + dns_client_opt: Option<&mut DNSClient>, + event_dispatcher: &EventDispatcher, + cost_estimator: &Box, + cost_metric: &Box, + fee_estimator: Option<&Box>, + ) -> bool { + // initial block download? + let ibd = self.globals.sync_comms.get_ibd(); + let download_backpressure = self.results_with_data.len() > 0; + let poll_ms = if !download_backpressure && self.net.has_more_downloads() { + // keep getting those blocks -- drive the downloader state-machine + debug!( + "P2P: backpressure: {}, more downloads: {}", + download_backpressure, + self.net.has_more_downloads() + ); + 1 + } else { + self.poll_timeout + }; + + // do one pass + let p2p_res = { + // NOTE: handler_args must be created such that it outlives the inner net.run() call and + // doesn't ref anything within p2p_thread. + let handler_args = RPCHandlerArgs { + exit_at_block_height: self.config.burnchain.process_exit_at_block_height.clone(), + genesis_chainstate_hash: Sha256Sum::from_hex(stx_genesis::GENESIS_CHAINSTATE_HASH) + .unwrap(), + event_observer: Some(event_dispatcher), + cost_estimator: Some(cost_estimator.as_ref()), + cost_metric: Some(cost_metric.as_ref()), + fee_estimator: fee_estimator.map(|boxed_estimator| boxed_estimator.as_ref()), + ..RPCHandlerArgs::default() + }; + self.net.run( + indexer, + &self.sortdb, + &mut self.chainstate, + &mut self.mempool, + dns_client_opt, + download_backpressure, + ibd, + poll_ms, + &handler_args, + ) + }; + match p2p_res { + Ok(network_result) => { + let mut have_update = false; + if self.num_p2p_state_machine_passes < network_result.num_state_machine_passes { + // p2p state-machine did a full pass. Notify anyone listening. + self.globals.sync_comms.notify_p2p_state_pass(); + self.num_p2p_state_machine_passes = network_result.num_state_machine_passes; + } + + if self.num_inv_sync_passes < network_result.num_inv_sync_passes { + // inv-sync state-machine did a full pass. Notify anyone listening. + self.globals.sync_comms.notify_inv_sync_pass(); + self.num_inv_sync_passes = network_result.num_inv_sync_passes; + + // the relayer cares about the number of inventory passes, so pass this along + have_update = true; + } + + if self.num_download_passes < network_result.num_download_passes { + // download state-machine did a full pass. Notify anyone listening. + self.globals.sync_comms.notify_download_pass(); + self.num_download_passes = network_result.num_download_passes; + + // the relayer cares about the number of download passes, so pass this along + have_update = true; + } + + if network_result.has_data_to_store() + || self.last_burn_block_height != network_result.burn_height + || have_update + { + // pass along if we have blocks, microblocks, or transactions, or a status + // update on the network's view of the burnchain + self.last_burn_block_height = network_result.burn_height; + self.results_with_data + .push_back(RelayerDirective::HandleNetResult(network_result)); + } + } + Err(e) => { + // this is only reachable if the network is not instantiated correctly -- + // i.e. you didn't connect it + panic!("P2P: Failed to process network dispatch: {:?}", &e); + } + }; + + while let Some(next_result) = self.results_with_data.pop_front() { + // have blocks, microblocks, and/or transactions (don't care about anything else), + // or a directive to mine microblocks + if let Err(e) = self.globals.relay_send.try_send(next_result) { + debug!( + "P2P: {:?}: download backpressure detected (bufferred {})", + &self.net.local_peer, + self.results_with_data.len() + ); + match e { + TrySendError::Full(directive) => { + // don't lose this data -- just try it again + self.results_with_data.push_front(directive); + break; + } + TrySendError::Disconnected(_) => { + info!("P2P: Relayer hang up with p2p channel"); + self.globals.signal_stop(); + return false; + } + } + } else { + debug!("P2P: Dispatched result to Relayer!"); + } + } + + true + } +} diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs new file mode 100644 index 0000000000..f10a327b60 --- /dev/null +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -0,0 +1,893 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use core::fmt; +use std::collections::HashSet; +use std::sync::mpsc::{Receiver, RecvTimeoutError}; +use std::thread::JoinHandle; +use std::time::{Duration, Instant}; + +use stacks::burnchains::{Burnchain, Txid}; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::operations::leader_block_commit::{ + RewardSetInfo, BURN_BLOCK_MINED_AT_MODULUS, +}; +use stacks::chainstate::burn::operations::{ + BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, +}; +use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvider}; +use stacks::chainstate::nakamoto::NakamotoChainState; +use stacks::chainstate::stacks::address::PoxAddress; +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::chainstate::stacks::miner::{ + get_mining_spend_amount, signal_mining_blocked, signal_mining_ready, +}; +use stacks::core::mempool::MemPoolDB; +use stacks::core::{ + FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, STACKS_EPOCH_3_0_MARKER, +}; +use stacks::monitoring::increment_stx_blocks_mined_counter; +use stacks::net::db::LocalPeer; +use stacks::net::relay::Relayer; +use stacks::net::NetworkResult; +use stacks_common::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, VRFSeed, +}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::get_epoch_time_ms; +use stacks_common::util::hash::Hash160; +use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; + +use super::{ + BlockCommits, Config, Error as NakamotoNodeError, EventDispatcher, Keychain, + BLOCK_PROCESSOR_STACK_SIZE, +}; +use crate::burnchains::BurnchainController; +use crate::nakamoto_node::miner::{BlockMinerThread, MinerDirective}; +use crate::neon_node::{ + fault_injection_skip_mining, open_chainstate_with_faults, LeaderKeyRegistrationState, +}; +use crate::run_loop::nakamoto::{Globals, RunLoop}; +use crate::run_loop::RegisteredKey; +use crate::BitcoinRegtestController; + +/// Command types for the Nakamoto relayer thread, issued to it by other threads +pub enum RelayerDirective { + /// Handle some new data that arrived on the network (such as blocks, transactions, and + HandleNetResult(NetworkResult), + /// A new burn block has been processed by the SortitionDB, check if this miner won sortition, + /// and if so, start the miner thread + ProcessedBurnBlock(ConsensusHash, BurnchainHeaderHash, BlockHeaderHash), + /// Either a new burn block has been processed (without a miner active yet) or a + /// nakamoto tenure's first block has been processed, so the relayer should issue + /// a block commit + IssueBlockCommit(ConsensusHash, BlockHeaderHash), + /// Try to register a VRF public key + RegisterKey(BlockSnapshot), + /// Stop the relayer thread + Exit, +} + +impl fmt::Display for RelayerDirective { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + RelayerDirective::HandleNetResult(_) => write!(f, "HandleNetResult"), + RelayerDirective::ProcessedBurnBlock(_, _, _) => write!(f, "ProcessedBurnBlock"), + RelayerDirective::IssueBlockCommit(_, _) => write!(f, "IssueBlockCommit"), + RelayerDirective::RegisterKey(_) => write!(f, "RegisterKey"), + RelayerDirective::Exit => write!(f, "Exit"), + } + } +} + +/// Relayer thread +/// * accepts network results and stores blocks and microblocks +/// * forwards new blocks, microblocks, and transactions to the p2p thread +/// * issues (and re-issues) block commits to participate as a miner +/// * processes burnchain state to determine if selected as a miner +/// * if mining, runs the miner and broadcasts blocks (via a subordinate MinerThread) +pub struct RelayerThread { + /// Node config + pub(crate) config: Config, + /// Handle to the sortition DB + sortdb: SortitionDB, + /// Handle to the chainstate DB + chainstate: StacksChainState, + /// Handle to the mempool DB + mempool: MemPoolDB, + /// Handle to global state and inter-thread communication channels + pub(crate) globals: Globals, + /// Authoritative copy of the keychain state + pub(crate) keychain: Keychain, + /// Burnchian configuration + pub(crate) burnchain: Burnchain, + /// height of last VRF key registration request + last_vrf_key_burn_height: Option, + /// Set of blocks that we have mined, but are still potentially-broadcastable + // TODO: this field is a slow leak! + pub(crate) last_commits: BlockCommits, + /// client to the burnchain (used only for sending block-commits) + pub(crate) bitcoin_controller: BitcoinRegtestController, + /// client to the event dispatcher + pub(crate) event_dispatcher: EventDispatcher, + /// copy of the local peer state + local_peer: LocalPeer, + /// last observed burnchain block height from the p2p thread (obtained from network results) + last_network_block_height: u64, + /// time at which we observed a change in the network block height (epoch time in millis) + last_network_block_height_ts: u128, + /// last observed number of downloader state-machine passes from the p2p thread (obtained from + /// network results) + last_network_download_passes: u64, + /// last observed number of inventory state-machine passes from the p2p thread (obtained from + /// network results) + last_network_inv_passes: u64, + /// minimum number of downloader state-machine passes that must take place before mining (this + /// is used to ensure that the p2p thread attempts to download new Stacks block data before + /// this thread tries to mine a block) + min_network_download_passes: u64, + /// minimum number of inventory state-machine passes that must take place before mining (this + /// is used to ensure that the p2p thread attempts to download new Stacks block data before + /// this thread tries to mine a block) + min_network_inv_passes: u64, + + /// Inner relayer instance for forwarding broadcasted data back to the p2p thread for dispatch + /// to neighbors + relayer: Relayer, + + /// handle to the subordinate miner thread + miner_thread: Option>, + /// The relayer thread reads directives from the relay_rcv, but it also periodically wakes up + /// to check if it should issue a block commit or try to register a VRF key + next_initiative: Instant, + is_miner: bool, + /// This is the last snapshot in which the relayer committed, and the parent_tenure_id + /// which was committed to + last_committed: Option<(BlockSnapshot, StacksBlockId)>, +} + +impl RelayerThread { + /// Instantiate relayer thread. + /// Uses `runloop` to obtain globals, config, and `is_miner`` status + pub fn new( + runloop: &RunLoop, + local_peer: LocalPeer, + relayer: Relayer, + keychain: Keychain, + ) -> RelayerThread { + let config = runloop.config().clone(); + let globals = runloop.get_globals(); + let burn_db_path = config.get_burn_db_file_path(); + let is_miner = runloop.is_miner(); + + let sortdb = SortitionDB::open(&burn_db_path, true, runloop.get_burnchain().pox_constants) + .expect("FATAL: failed to open burnchain DB"); + + let chainstate = + open_chainstate_with_faults(&config).expect("FATAL: failed to open chainstate DB"); + + let mempool = config + .connect_mempool_db() + .expect("Database failure opening mempool"); + + let bitcoin_controller = BitcoinRegtestController::new_dummy(config.clone()); + + RelayerThread { + config: config.clone(), + sortdb, + chainstate, + mempool, + globals, + keychain, + burnchain: runloop.get_burnchain(), + last_vrf_key_burn_height: None, + last_commits: HashSet::new(), + bitcoin_controller, + event_dispatcher: runloop.get_event_dispatcher(), + local_peer, + + last_network_block_height: 0, + last_network_block_height_ts: 0, + last_network_download_passes: 0, + min_network_download_passes: 0, + last_network_inv_passes: 0, + min_network_inv_passes: 0, + + relayer, + + miner_thread: None, + is_miner, + next_initiative: Instant::now() + Duration::from_secs(10), + last_committed: None, + } + } + + /// have we waited for the right conditions under which to start mining a block off of our + /// chain tip? + fn has_waited_for_latest_blocks(&self) -> bool { + // a network download pass took place + (self.min_network_download_passes <= self.last_network_download_passes + // a network inv pass took place + && self.min_network_download_passes <= self.last_network_download_passes) + // we waited long enough for a download pass, but timed out waiting + || self.last_network_block_height_ts + (self.config.node.wait_time_for_blocks as u128) < get_epoch_time_ms() + // we're not supposed to wait at all + || !self.config.miner.wait_for_block_download + } + + /// Handle a NetworkResult from the p2p/http state machine. Usually this is the act of + /// * preprocessing and storing new blocks and microblocks + /// * relaying blocks, microblocks, and transacctions + /// * updating unconfirmed state views + pub fn process_network_result(&mut self, mut net_result: NetworkResult) { + debug!( + "Relayer: Handle network result (from {})", + net_result.burn_height + ); + + if self.last_network_block_height != net_result.burn_height { + // burnchain advanced; disable mining until we also do a download pass. + self.last_network_block_height = net_result.burn_height; + self.min_network_download_passes = net_result.num_download_passes + 1; + self.min_network_inv_passes = net_result.num_inv_sync_passes + 1; + self.last_network_block_height_ts = get_epoch_time_ms(); + debug!( + "Relayer: block mining until the next download pass {}", + self.min_network_download_passes + ); + signal_mining_blocked(self.globals.get_miner_status()); + } + + let net_receipts = self + .relayer + .process_network_result( + &self.local_peer, + &mut net_result, + &mut self.sortdb, + &mut self.chainstate, + &mut self.mempool, + self.globals.sync_comms.get_ibd(), + Some(&self.globals.coord_comms), + Some(&self.event_dispatcher), + ) + .expect("BUG: failure processing network results"); + + if net_receipts.num_new_blocks > 0 { + // if we received any new block data that could invalidate our view of the chain tip, + // then stop mining until we process it + debug!("Relayer: block mining to process newly-arrived blocks or microblocks"); + signal_mining_blocked(self.globals.get_miner_status()); + } + + let mempool_txs_added = net_receipts.mempool_txs_added.len(); + if mempool_txs_added > 0 { + self.event_dispatcher + .process_new_mempool_txs(net_receipts.mempool_txs_added); + } + + // Dispatch retrieved attachments, if any. + if net_result.has_attachments() { + self.event_dispatcher + .process_new_attachments(&net_result.attachments); + } + + // resume mining if we blocked it, and if we've done the requisite download + // passes + self.last_network_download_passes = net_result.num_download_passes; + self.last_network_inv_passes = net_result.num_inv_sync_passes; + if self.has_waited_for_latest_blocks() { + info!("Relayer: did a download pass, so unblocking mining"); + signal_mining_ready(self.globals.get_miner_status()); + } + } + + /// Given the pointer to a recently processed sortition, see if we won the sortition. + /// + /// Returns `true` if we won this last sortition. + pub fn process_sortition( + &mut self, + consensus_hash: ConsensusHash, + burn_hash: BurnchainHeaderHash, + committed_index_hash: StacksBlockId, + ) -> MinerDirective { + let sn = SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &consensus_hash) + .expect("FATAL: failed to query sortition DB") + .expect("FATAL: unknown consensus hash"); + + self.globals.set_last_sortition(sn.clone()); + + let won_sortition = sn.sortition && self.last_commits.remove(&sn.winning_block_txid); + + info!( + "Relayer: Process sortition"; + "sortition_ch" => %consensus_hash, + "burn_hash" => %burn_hash, + "burn_height" => sn.block_height, + "winning_txid" => %sn.winning_block_txid, + "committed_parent" => %committed_index_hash, + "won_sortition?" => won_sortition, + ); + + if won_sortition { + increment_stx_blocks_mined_counter(); + } + + if sn.sortition { + if won_sortition { + MinerDirective::BeginTenure { + parent_tenure_start: committed_index_hash, + burnchain_tip: sn, + } + } else { + MinerDirective::StopTenure + } + } else { + MinerDirective::ContinueTenure { + new_burn_view: consensus_hash, + } + } + } + + /// Constructs and returns a LeaderKeyRegisterOp out of the provided params + fn make_key_register_op( + vrf_public_key: VRFPublicKey, + consensus_hash: &ConsensusHash, + miner_pkh: &Hash160, + ) -> BlockstackOperationType { + BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { + public_key: vrf_public_key, + memo: miner_pkh.as_bytes().to_vec(), + consensus_hash: consensus_hash.clone(), + vtxindex: 0, + txid: Txid([0u8; 32]), + block_height: 0, + burn_header_hash: BurnchainHeaderHash::zero(), + }) + } + + /// Create and broadcast a VRF public key registration transaction. + /// Returns true if we succeed in doing so; false if not. + pub fn rotate_vrf_and_register(&mut self, burn_block: &BlockSnapshot) { + if self.last_vrf_key_burn_height.is_some() { + // already in-flight + return; + } + let cur_epoch = SortitionDB::get_stacks_epoch(self.sortdb.conn(), burn_block.block_height) + .expect("FATAL: failed to query sortition DB") + .expect("FATAL: no epoch defined") + .epoch_id; + let (vrf_pk, _) = self.keychain.make_vrf_keypair(burn_block.block_height); + let burnchain_tip_consensus_hash = &burn_block.consensus_hash; + let miner_pkh = self.keychain.get_nakamoto_pkh(); + + debug!( + "Submitting LeaderKeyRegister"; + "vrf_pk" => vrf_pk.to_hex(), + "burn_block_height" => burn_block.block_height, + "miner_pkh" => miner_pkh.to_hex(), + ); + + let op = Self::make_key_register_op(vrf_pk, burnchain_tip_consensus_hash, &miner_pkh); + + let mut op_signer = self.keychain.generate_op_signer(); + if let Some(txid) = + self.bitcoin_controller + .submit_operation(cur_epoch, op, &mut op_signer, 1) + { + // advance key registration state + self.last_vrf_key_burn_height = Some(burn_block.block_height); + self.globals + .set_pending_leader_key_registration(burn_block.block_height, txid); + self.globals.counters.bump_naka_submitted_vrfs(); + } + } + + /// Produce the block-commit for this anchored block, if we can. + /// `target_ch` is the consensus-hash of the Tenure we will build off + /// `target_bh` is the block hash of the Tenure we will build off + /// Returns the (the most recent burn snapshot, the expected epoch, the commit-op) on success + /// Returns None if we fail somehow. + fn make_block_commit( + &mut self, + target_ch: &ConsensusHash, + target_bh: &BlockHeaderHash, + ) -> Result<(BlockSnapshot, StacksEpochId, LeaderBlockCommitOp), NakamotoNodeError> { + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) + .map_err(|_| NakamotoNodeError::SnapshotNotFoundForChainTip)?; + + let parent_vrf_proof = + NakamotoChainState::get_block_vrf_proof(self.chainstate.db(), &target_ch) + .map_err(|_e| NakamotoNodeError::ParentNotFound)? + .unwrap_or_else(|| VRFProof::empty()); + + // let's figure out the recipient set! + let recipients = get_next_recipients( + &sort_tip, + &mut self.chainstate, + &mut self.sortdb, + &self.burnchain, + &OnChainRewardSetProvider(), + self.config.node.always_use_affirmation_maps, + ) + .map_err(|e| { + error!("Relayer: Failure fetching recipient set: {:?}", e); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + + let block_header = + NakamotoChainState::get_block_header_by_consensus_hash(self.chainstate.db(), target_ch) + .map_err(|e| { + error!("Relayer: Failed to get block header for parent tenure: {e:?}"); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!("Relayer: Failed to find block header for parent tenure"); + NakamotoNodeError::ParentNotFound + })?; + + let parent_block_id = block_header.index_block_hash(); + if parent_block_id != StacksBlockId::new(target_ch, target_bh) { + error!("Relayer: Found block header for parent tenure, but mismatched block id"; + "expected_block_id" => %StacksBlockId::new(target_ch, target_bh), + "found_block_id" => %parent_block_id); + return Err(NakamotoNodeError::UnexpectedChainState); + } + + let Ok(Some(parent_sortition)) = + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), target_ch) + else { + error!("Relayer: Failed to lookup the block snapshot of parent tenure ID"; "tenure_consensus_hash" => %target_ch); + return Err(NakamotoNodeError::ParentNotFound); + }; + + let Ok(Some(target_epoch)) = + SortitionDB::get_stacks_epoch(self.sortdb.conn(), sort_tip.block_height + 1) + else { + error!("Relayer: Failed to lookup its epoch"; "target_height" => sort_tip.block_height + 1); + return Err(NakamotoNodeError::SnapshotNotFoundForChainTip); + }; + + let parent_block_burn_height = parent_sortition.block_height; + let Ok(Some(parent_winning_tx)) = SortitionDB::get_block_commit( + self.sortdb.conn(), + &parent_sortition.winning_block_txid, + &parent_sortition.sortition_id, + ) else { + error!("Relayer: Failed to lookup the block commit of parent tenure ID"; "tenure_consensus_hash" => %target_ch); + return Err(NakamotoNodeError::SnapshotNotFoundForChainTip); + }; + + let parent_winning_vtxindex = parent_winning_tx.vtxindex; + + let burn_fee_cap = get_mining_spend_amount(self.globals.get_miner_status()); + let sunset_burn = self.burnchain.expected_sunset_burn( + sort_tip.block_height + 1, + burn_fee_cap, + target_epoch.epoch_id, + ); + let rest_commit = burn_fee_cap - sunset_burn; + + let commit_outs = if !self + .burnchain + .pox_constants + .is_after_pox_sunset_end(sort_tip.block_height, target_epoch.epoch_id) + && !self + .burnchain + .is_in_prepare_phase(sort_tip.block_height + 1) + { + RewardSetInfo::into_commit_outs(recipients, self.config.is_mainnet()) + } else { + vec![PoxAddress::standard_burn_address(self.config.is_mainnet())] + }; + + // let's commit, but target the current burnchain tip with our modulus + let burn_parent_modulus = u8::try_from(sort_tip.block_height % BURN_BLOCK_MINED_AT_MODULUS) + .map_err(|_| { + error!("Relayer: Block mining modulus is not u8"); + NakamotoNodeError::UnexpectedChainState + })?; + let sender = self.keychain.get_burnchain_signer(); + let key = self + .globals + .get_leader_key_registration_state() + .get_active() + .ok_or_else(|| NakamotoNodeError::NoVRFKeyActive)?; + let op = LeaderBlockCommitOp { + sunset_burn, + block_header_hash: BlockHeaderHash(parent_block_id.0), + burn_fee: rest_commit, + input: (Txid([0; 32]), 0), + apparent_sender: sender, + key_block_ptr: u32::try_from(key.block_height) + .expect("FATAL: burn block height exceeded u32"), + key_vtxindex: u16::try_from(key.op_vtxindex).expect("FATAL: vtxindex exceeded u16"), + memo: vec![STACKS_EPOCH_3_0_MARKER], + new_seed: VRFSeed::from_proof(&parent_vrf_proof), + parent_block_ptr: u32::try_from(parent_block_burn_height) + .expect("FATAL: burn block height exceeded u32"), + parent_vtxindex: u16::try_from(parent_winning_vtxindex) + .expect("FATAL: vtxindex exceeded u16"), + vtxindex: 0, + txid: Txid([0u8; 32]), + block_height: 0, + burn_header_hash: BurnchainHeaderHash::zero(), + burn_parent_modulus, + commit_outs, + }; + + Ok((sort_tip, target_epoch.epoch_id, op)) + } + + /// Create the block miner thread state. + /// Only proceeds if all of the following are true: + /// * the miner is not blocked + /// * last_burn_block corresponds to the canonical sortition DB's chain tip + /// * the time of issuance is sufficiently recent + /// * there are no unprocessed stacks blocks in the staging DB + /// * the relayer has already tried a download scan that included this sortition (which, if a + /// block was found, would have placed it into the staging DB and marked it as + /// unprocessed) + /// * a miner thread is not running already + fn create_block_miner( + &mut self, + registered_key: RegisteredKey, + last_burn_block: BlockSnapshot, + parent_tenure_id: StacksBlockId, + ) -> Result { + if fault_injection_skip_mining(&self.config.node.rpc_bind, last_burn_block.block_height) { + debug!( + "Relayer: fault injection skip mining at block height {}", + last_burn_block.block_height + ); + return Err(NakamotoNodeError::FaultInjection); + } + + let burn_header_hash = last_burn_block.burn_header_hash.clone(); + let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + let burn_chain_tip = burn_chain_sn.burn_header_hash.clone(); + + if burn_chain_tip != burn_header_hash { + debug!( + "Relayer: Drop stale RunTenure for {}: current sortition is for {}", + &burn_header_hash, &burn_chain_tip + ); + self.globals.counters.bump_missed_tenures(); + return Err(NakamotoNodeError::MissedMiningOpportunity); + } + + debug!( + "Relayer: Spawn tenure thread"; + "height" => last_burn_block.block_height, + "burn_header_hash" => %burn_header_hash, + ); + + let miner_thread_state = + BlockMinerThread::new(self, registered_key, last_burn_block, parent_tenure_id); + Ok(miner_thread_state) + } + + fn start_new_tenure( + &mut self, + parent_tenure_start: StacksBlockId, + burn_tip: BlockSnapshot, + ) -> Result<(), NakamotoNodeError> { + // when starting a new tenure, block the mining thread if its currently running. + // the new mining thread will join it (so that the new mining thread stalls, not the relayer) + let prior_tenure_thread = self.miner_thread.take(); + let vrf_key = self + .globals + .get_leader_key_registration_state() + .get_active() + .ok_or_else(|| { + warn!("Trying to start new tenure, but no VRF key active"); + NakamotoNodeError::NoVRFKeyActive + })?; + let new_miner_state = self.create_block_miner(vrf_key, burn_tip, parent_tenure_start)?; + + let new_miner_handle = std::thread::Builder::new() + .name(format!("miner-{}", self.local_peer.data_url)) + .stack_size(BLOCK_PROCESSOR_STACK_SIZE) + .spawn(move || new_miner_state.run_miner(prior_tenure_thread)) + .map_err(|e| { + error!("Relayer: Failed to start tenure thread: {:?}", &e); + NakamotoNodeError::SpawnError(e) + })?; + + self.miner_thread.replace(new_miner_handle); + + Ok(()) + } + + fn stop_tenure(&mut self) -> Result<(), NakamotoNodeError> { + // when stopping a tenure, block the mining thread if its currently running, then join it. + // do this in a new thread will (so that the new thread stalls, not the relayer) + let Some(prior_tenure_thread) = self.miner_thread.take() else { + return Ok(()); + }; + let globals = self.globals.clone(); + + let stop_handle = std::thread::Builder::new() + .name(format!("tenure-stop-{}", self.local_peer.data_url)) + .spawn(move || BlockMinerThread::stop_miner(&globals, prior_tenure_thread)) + .map_err(|e| { + error!("Relayer: Failed to spawn a stop-tenure thread: {:?}", &e); + NakamotoNodeError::SpawnError(e) + })?; + + self.miner_thread.replace(stop_handle); + + Ok(()) + } + + fn handle_sortition( + &mut self, + consensus_hash: ConsensusHash, + burn_hash: BurnchainHeaderHash, + committed_index_hash: StacksBlockId, + ) -> bool { + let miner_instruction = + self.process_sortition(consensus_hash, burn_hash, committed_index_hash); + + match miner_instruction { + MinerDirective::BeginTenure { + parent_tenure_start, + burnchain_tip, + } => { + let _ = self.start_new_tenure(parent_tenure_start, burnchain_tip); + } + MinerDirective::ContinueTenure { new_burn_view: _ } => { + // TODO: in this case, we eventually want to undergo a tenure + // change to switch to the new burn view, but right now, we will + // simply end our current tenure if it exists + let _ = self.stop_tenure(); + } + MinerDirective::StopTenure => { + let _ = self.stop_tenure(); + } + } + + true + } + + fn issue_block_commit( + &mut self, + tenure_start_ch: ConsensusHash, + tenure_start_bh: BlockHeaderHash, + ) -> Result<(), NakamotoNodeError> { + let (last_committed_at, target_epoch_id, commit) = + self.make_block_commit(&tenure_start_ch, &tenure_start_bh)?; + let mut op_signer = self.keychain.generate_op_signer(); + let txid = self + .bitcoin_controller + .submit_operation( + target_epoch_id, + BlockstackOperationType::LeaderBlockCommit(commit), + &mut op_signer, + 1, + ) + .ok_or_else(|| { + warn!("Failed to submit block-commit bitcoin transaction"); + NakamotoNodeError::BurnchainSubmissionFailed + })?; + info!( + "Relayer: Submitted block-commit"; + "parent_consensus_hash" => %tenure_start_ch, + "parent_block_hash" => %tenure_start_bh, + "txid" => %txid, + ); + + self.last_commits.insert(txid); + self.last_committed = Some(( + last_committed_at, + StacksBlockId::new(&tenure_start_ch, &tenure_start_bh), + )); + self.globals.counters.bump_naka_submitted_commits(); + + Ok(()) + } + + fn initiative(&mut self) -> Option { + if !self.is_miner { + return None; + } + + // do we need a VRF key registration? + if matches!( + self.globals.get_leader_key_registration_state(), + LeaderKeyRegistrationState::Inactive + ) { + let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) else { + warn!("Failed to fetch sortition tip while needing to register VRF key"); + return None; + }; + return Some(RelayerDirective::RegisterKey(sort_tip)); + } + + // are we still waiting on a pending registration? + if !matches!( + self.globals.get_leader_key_registration_state(), + LeaderKeyRegistrationState::Active(_) + ) { + return None; + } + + // has there been a new sortition + let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) else { + return None; + }; + + // check if the burnchain changed, if so, we should issue a commit. + // if not, we may still want to update a commit if we've received a new tenure start block + let burnchain_changed = if let Some((last_committed_at, ..)) = self.last_committed.as_ref() + { + // if the new sortition tip has a different consesus hash than the last commit, + // issue a new commit + sort_tip.consensus_hash != last_committed_at.consensus_hash + } else { + // if there was no last commit, issue a new commit + true + }; + + let Ok(Some(chain_tip_header)) = + NakamotoChainState::get_canonical_block_header(self.chainstate.db(), &self.sortdb) + else { + info!("No known canonical tip, will issue a genesis block commit"); + return Some(RelayerDirective::IssueBlockCommit( + FIRST_BURNCHAIN_CONSENSUS_HASH, + FIRST_STACKS_BLOCK_HASH, + )); + }; + + // get the starting block of the chain tip's tenure + let Ok(Some(chain_tip_tenure_start)) = + NakamotoChainState::get_block_header_by_consensus_hash( + self.chainstate.db(), + &chain_tip_header.consensus_hash, + ) + else { + warn!("Failure getting the first block of tenure in order to assemble block commit"; + "tenure_consensus_hash" => %chain_tip_header.consensus_hash, + "tip_block_hash" => %chain_tip_header.anchored_header.block_hash()); + return None; + }; + + let chain_tip_tenure_id = chain_tip_tenure_start.index_block_hash(); + let should_commit = burnchain_changed + || if let Some((_, last_committed_tenure_id)) = self.last_committed.as_ref() { + // if the tenure ID of the chain tip has changed, issue a new commit + last_committed_tenure_id != &chain_tip_tenure_id + } else { + // should be unreachable, but either way, if + // `self.last_committed` is None, we should issue a commit + true + }; + + if should_commit { + Some(RelayerDirective::IssueBlockCommit( + chain_tip_header.consensus_hash, + chain_tip_header.anchored_header.block_hash(), + )) + } else { + None + } + } + + /// Main loop of the relayer. + /// Runs in a separate thread. + /// Continuously receives + pub fn main(mut self, relay_rcv: Receiver) { + debug!("relayer thread ID is {:?}", std::thread::current().id()); + + self.next_initiative = Instant::now() + Duration::from_secs(10); + while self.globals.keep_running() { + let directive = if Instant::now() >= self.next_initiative { + self.next_initiative = Instant::now() + Duration::from_secs(10); + self.initiative() + } else { + None + }; + + let Some(timeout) = self.next_initiative.checked_duration_since(Instant::now()) else { + // next_initiative timeout occurred, so go to next loop iteration. + continue; + }; + + let directive = if let Some(directive) = directive { + directive + } else { + match relay_rcv.recv_timeout(timeout) { + Ok(directive) => directive, + // timed out, so go to next loop iteration + Err(RecvTimeoutError::Timeout) => continue, + Err(RecvTimeoutError::Disconnected) => break, + } + }; + + if !self.handle_directive(directive) { + break; + } + } + + // kill miner if it's running + signal_mining_blocked(self.globals.get_miner_status()); + + // set termination flag so other threads die + self.globals.signal_stop(); + + debug!("Relayer exit!"); + } + + /// Top-level dispatcher + pub fn handle_directive(&mut self, directive: RelayerDirective) -> bool { + info!("Relayer: handling directive"; "directive" => %directive); + let continue_running = match directive { + RelayerDirective::HandleNetResult(net_result) => { + self.process_network_result(net_result); + true + } + // RegisterKey directives mean that the relayer should try to register a new VRF key. + // These are triggered by the relayer waking up without an active VRF key. + RelayerDirective::RegisterKey(last_burn_block) => { + if !self.is_miner { + return true; + } + if self.globals.in_initial_block_download() { + info!("In initial block download, will not submit VRF registration"); + return true; + } + self.rotate_vrf_and_register(&last_burn_block); + self.globals.counters.bump_blocks_processed(); + true + } + // ProcessedBurnBlock directives correspond to a new sortition perhaps occurring. + // relayer should invoke `handle_sortition` to determine if they won the sortition, + // and to start their miner, or stop their miner if an active tenure is now ending + RelayerDirective::ProcessedBurnBlock(consensus_hash, burn_hash, block_header_hash) => { + if !self.is_miner { + return true; + } + if self.globals.in_initial_block_download() { + debug!("In initial block download, will not check sortition for miner"); + return true; + } + self.handle_sortition( + consensus_hash, + burn_hash, + StacksBlockId(block_header_hash.0), + ) + } + // These are triggered by the relayer waking up, seeing a new consensus hash *or* a new first tenure block + RelayerDirective::IssueBlockCommit(consensus_hash, block_hash) => { + if !self.is_miner { + return true; + } + if self.globals.in_initial_block_download() { + debug!("In initial block download, will not issue block commit"); + return true; + } + if let Err(e) = self.issue_block_commit(consensus_hash, block_hash) { + warn!("Relayer failed to issue block commit"; "err" => ?e); + } + true + } + RelayerDirective::Exit => false, + }; + debug!("Relayer: handled directive"; "continue_running" => continue_running); + continue_running + } +} diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 5ef68a4c28..284d63a1c3 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -142,9 +142,7 @@ use std::collections::{HashMap, VecDeque}; use std::convert::{TryFrom, TryInto}; use std::default::Default; use std::net::SocketAddr; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::mpsc::{Receiver, SyncSender, TrySendError}; -use std::sync::{Arc, Mutex}; +use std::sync::mpsc::{Receiver, TrySendError}; use std::thread::JoinHandle; use std::time::Duration; use std::{mem, thread}; @@ -162,15 +160,13 @@ use stacks::chainstate::burn::operations::{ BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, }; use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; -use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvider}; use stacks::chainstate::nakamoto::NakamotoChainState; use stacks::chainstate::stacks::address::PoxAddress; -use stacks::chainstate::stacks::db::unconfirmed::UnconfirmedTxMap; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY}; use stacks::chainstate::stacks::miner::{ get_mining_spend_amount, signal_mining_blocked, signal_mining_ready, BlockBuilderSettings, - MinerStatus, StacksMicroblockBuilder, + StacksMicroblockBuilder, }; use stacks::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksBlock, StacksBlockBuilder, StacksBlockHeader, @@ -210,9 +206,9 @@ use crate::burnchains::bitcoin_regtest_controller::{ addr2str, BitcoinRegtestController, OngoingBlockCommit, }; use crate::burnchains::make_bitcoin_indexer; -use crate::run_loop::neon::{Counters, RunLoop}; +use crate::globals::{NeonGlobals as Globals, RelayerDirective}; +use crate::run_loop::neon::RunLoop; use crate::run_loop::RegisteredKey; -use crate::syncctl::PoxSyncWatchdogComms; use crate::ChainTip; pub const RELAYER_MAX_BUFFER: usize = 100; @@ -256,44 +252,6 @@ struct AssembledAnchorBlock { tenure_begin: u128, } -/// Command types for the relayer thread, issued to it by other threads -pub enum RelayerDirective { - /// Handle some new data that arrived on the network (such as blocks, transactions, and - /// microblocks) - HandleNetResult(NetworkResult), - /// Announce a new sortition. Process and broadcast the block if we won. - ProcessTenure(ConsensusHash, BurnchainHeaderHash, BlockHeaderHash), - /// Try to mine a block - RunTenure(RegisteredKey, BlockSnapshot, u128), // (vrf key, chain tip, time of issuance in ms) - /// Try to register a VRF public key - RegisterKey(BlockSnapshot), - /// Stop the relayer thread - Exit, -} - -/// Inter-thread communication structure, shared between threads -#[derive(Clone)] -pub struct Globals { - /// Last sortition processed - last_sortition: Arc>>, - /// Status of the miner - miner_status: Arc>, - /// Communication link to the coordinator thread - coord_comms: CoordinatorChannels, - /// Unconfirmed transactions (shared between the relayer and p2p threads) - unconfirmed_txs: Arc>, - /// Writer endpoint to the relayer thread - relay_send: SyncSender, - /// Cointer state in the main thread - counters: Counters, - /// Connection to the PoX sync watchdog - sync_comms: PoxSyncWatchdogComms, - /// Global flag to see if we should keep running - pub should_keep_running: Arc, - /// Status of our VRF key registration state (shared between the main thread and the relayer) - leader_key_registration_state: Arc>, -} - /// Miner chain tip, on top of which to build microblocks #[derive(Debug, Clone, PartialEq)] pub struct MinerTip { @@ -327,205 +285,6 @@ impl MinerTip { } } -impl Globals { - pub fn new( - coord_comms: CoordinatorChannels, - miner_status: Arc>, - relay_send: SyncSender, - counters: Counters, - sync_comms: PoxSyncWatchdogComms, - should_keep_running: Arc, - ) -> Globals { - Globals { - last_sortition: Arc::new(Mutex::new(None)), - miner_status, - coord_comms, - unconfirmed_txs: Arc::new(Mutex::new(UnconfirmedTxMap::new())), - relay_send, - counters, - sync_comms, - should_keep_running, - leader_key_registration_state: Arc::new(Mutex::new( - LeaderKeyRegistrationState::Inactive, - )), - } - } - - /// Get the last sortition processed by the relayer thread - pub fn get_last_sortition(&self) -> Option { - match self.last_sortition.lock() { - Ok(sort_opt) => sort_opt.clone(), - Err(_) => { - error!("Sortition mutex poisoned!"); - panic!(); - } - } - } - - /// Set the last sortition processed - pub fn set_last_sortition(&self, block_snapshot: BlockSnapshot) { - match self.last_sortition.lock() { - Ok(mut sortition_opt) => { - sortition_opt.replace(block_snapshot); - } - Err(_) => { - error!("Sortition mutex poisoned!"); - panic!(); - } - }; - } - - /// Get the status of the miner (blocked or ready) - pub fn get_miner_status(&self) -> Arc> { - self.miner_status.clone() - } - - /// Get the main thread's counters - pub fn get_counters(&self) -> Counters { - self.counters.clone() - } - - /// Called by the relayer to pass unconfirmed txs to the p2p thread, so the p2p thread doesn't - /// need to do the disk I/O needed to instantiate the unconfirmed state trie they represent. - /// Clears the unconfirmed transactions, and replaces them with the chainstate's. - pub fn send_unconfirmed_txs(&self, chainstate: &StacksChainState) { - if let Some(ref unconfirmed) = chainstate.unconfirmed_state { - match self.unconfirmed_txs.lock() { - Ok(mut txs) => { - txs.clear(); - txs.extend(unconfirmed.mined_txs.clone()); - } - Err(e) => { - // can only happen due to a thread panic in the relayer - error!("FATAL: unconfirmed tx arc mutex is poisoned: {:?}", &e); - panic!(); - } - }; - } - } - - /// Called by the p2p thread to accept the unconfirmed tx state processed by the relayer. - /// Puts the shared unconfirmed transactions to chainstate. - pub fn recv_unconfirmed_txs(&self, chainstate: &mut StacksChainState) { - if let Some(ref mut unconfirmed) = chainstate.unconfirmed_state { - match self.unconfirmed_txs.lock() { - Ok(txs) => { - unconfirmed.mined_txs.clear(); - unconfirmed.mined_txs.extend(txs.clone()); - } - Err(e) => { - // can only happen due to a thread panic in the relayer - error!("FATAL: unconfirmed arc mutex is poisoned: {:?}", &e); - panic!(); - } - }; - } - } - - /// Signal system-wide stop - pub fn signal_stop(&self) { - self.should_keep_running.store(false, Ordering::SeqCst); - } - - /// Should we keep running? - pub fn keep_running(&self) -> bool { - self.should_keep_running.load(Ordering::SeqCst) - } - - /// Get the handle to the coordinator - pub fn coord(&self) -> &CoordinatorChannels { - &self.coord_comms - } - - /// Get the current leader key registration state. - /// Called from the runloop thread and relayer thread. - fn get_leader_key_registration_state(&self) -> LeaderKeyRegistrationState { - match self.leader_key_registration_state.lock() { - Ok(state) => (*state).clone(), - Err(e) => { - // can only happen due to a thread panic in the relayer - error!("FATAL: leader key registration mutex is poisoned: {:?}", &e); - panic!(); - } - } - } - - /// Set the initial leader key registration state. - /// Called from the runloop thread when booting up. - fn set_initial_leader_key_registration_state(&self, new_state: LeaderKeyRegistrationState) { - match self.leader_key_registration_state.lock() { - Ok(mut state) => { - *state = new_state; - } - Err(e) => { - // can only happen due to a thread panic in the relayer - error!("FATAL: leader key registration mutex is poisoned: {:?}", &e); - panic!(); - } - } - } - - /// Advance the leader key registration state to pending, given a txid we just sent. - /// Only the relayer thread calls this. - fn set_pending_leader_key_registration(&self, target_block_height: u64, txid: Txid) { - match self.leader_key_registration_state.lock() { - Ok(ref mut leader_key_registration_state) => { - **leader_key_registration_state = - LeaderKeyRegistrationState::Pending(target_block_height, txid); - } - Err(_e) => { - error!("FATAL: failed to lock leader key registration state mutex"); - panic!(); - } - } - } - - /// Advance the leader key registration state to active, given the VRF key registration ops - /// we've discovered in a given snapshot. - /// The runloop thread calls this whenever it processes a sortition. - pub fn try_activate_leader_key_registration( - &self, - burn_block_height: u64, - key_registers: Vec, - ) -> bool { - let mut activated = false; - match self.leader_key_registration_state.lock() { - Ok(ref mut leader_key_registration_state) => { - for op in key_registers.into_iter() { - if let LeaderKeyRegistrationState::Pending(target_block_height, txid) = - **leader_key_registration_state - { - info!( - "Received burnchain block #{} including key_register_op - {}", - burn_block_height, txid - ); - if txid == op.txid { - **leader_key_registration_state = - LeaderKeyRegistrationState::Active(RegisteredKey { - target_block_height, - vrf_public_key: op.public_key, - block_height: u64::from(op.block_height), - op_vtxindex: u32::from(op.vtxindex), - }); - activated = true; - } else { - debug!( - "key_register_op {} does not match our pending op {}", - txid, &op.txid - ); - } - } - } - } - Err(_e) => { - error!("FATAL: failed to lock leader key registration state mutex"); - panic!(); - } - } - activated - } -} - /// Node implementation for both miners and followers. /// This struct is used to set up the node proper and launch the p2p thread and relayer thread. /// It is further used by the main thread to communicate with these two threads. @@ -545,71 +304,59 @@ pub struct StacksNode { /// Fault injection logic to artificially increase the length of a tenure. /// Only used in testing #[cfg(test)] -fn fault_injection_long_tenure() { +pub(crate) fn fault_injection_long_tenure() { // simulated slow block - match std::env::var("STX_TEST_SLOW_TENURE") { - Ok(tenure_str) => match tenure_str.parse::() { - Ok(tenure_time) => { - info!( - "Fault injection: sleeping for {} milliseconds to simulate a long tenure", - tenure_time - ); - stacks_common::util::sleep_ms(tenure_time); - } - Err(_) => { - error!("Parse error for STX_TEST_SLOW_TENURE"); - panic!(); - } - }, - _ => {} - } + let Ok(tenure_str) = std::env::var("STX_TEST_SLOW_TENURE") else { + return; + }; + let Ok(tenure_time) = tenure_str.parse::() else { + error!("Parse error for STX_TEST_SLOW_TENURE"); + panic!(); + }; + info!( + "Fault injection: sleeping for {} milliseconds to simulate a long tenure", + tenure_time + ); + stacks_common::util::sleep_ms(tenure_time); } #[cfg(not(test))] -fn fault_injection_long_tenure() {} +pub(crate) fn fault_injection_long_tenure() {} /// Fault injection to skip mining in this bitcoin block height /// Only used in testing #[cfg(test)] -fn fault_injection_skip_mining(rpc_bind: &str, target_burn_height: u64) -> bool { - match std::env::var("STACKS_DISABLE_MINER") { - Ok(disable_heights) => { - let disable_schedule: serde_json::Value = - serde_json::from_str(&disable_heights).unwrap(); - let disable_schedule = disable_schedule.as_array().unwrap(); - for disabled in disable_schedule { - let target_miner_rpc_bind = disabled - .get("rpc_bind") - .unwrap() - .as_str() - .unwrap() - .to_string(); - if target_miner_rpc_bind != rpc_bind { - continue; - } - let target_block_heights = disabled.get("blocks").unwrap().as_array().unwrap(); - for target_block_value in target_block_heights { - let target_block = target_block_value.as_i64().unwrap() as u64; - if target_block == target_burn_height { - return true; - } - } - } - return false; +pub(crate) fn fault_injection_skip_mining(rpc_bind: &str, target_burn_height: u64) -> bool { + let Ok(disable_heights) = std::env::var("STACKS_DISABLE_MINER") else { + return false; + }; + let disable_schedule: serde_json::Value = serde_json::from_str(&disable_heights).unwrap(); + let disable_schedule = disable_schedule.as_array().unwrap(); + for disabled in disable_schedule { + let target_miner_rpc_bind = disabled.get("rpc_bind").unwrap().as_str().unwrap(); + if target_miner_rpc_bind != rpc_bind { + continue; } - Err(_) => { - return false; + let target_block_heights = disabled.get("blocks").unwrap().as_array().unwrap(); + for target_block_value in target_block_heights { + let target_block = u64::try_from(target_block_value.as_i64().unwrap()).unwrap(); + if target_block == target_burn_height { + return true; + } } } + false } #[cfg(not(test))] -fn fault_injection_skip_mining(_rpc_bind: &str, _target_burn_height: u64) -> bool { +pub(crate) fn fault_injection_skip_mining(_rpc_bind: &str, _target_burn_height: u64) -> bool { false } /// Open the chainstate, and inject faults from the config file -fn open_chainstate_with_faults(config: &Config) -> Result { +pub(crate) fn open_chainstate_with_faults( + config: &Config, +) -> Result { let stacks_chainstate_path = config.get_chainstate_path_str(); let (mut chainstate, _) = StacksChainState::open( config.is_mainnet(), @@ -653,7 +400,7 @@ struct ParentStacksBlockInfo { } #[derive(Clone)] -enum LeaderKeyRegistrationState { +pub enum LeaderKeyRegistrationState { /// Not started yet Inactive, /// Waiting for burnchain confirmation @@ -664,6 +411,16 @@ enum LeaderKeyRegistrationState { Active(RegisteredKey), } +impl LeaderKeyRegistrationState { + pub fn get_active(&self) -> Option { + if let Self::Active(registered_key) = self { + Some(registered_key.clone()) + } else { + None + } + } +} + /// Relayer thread /// * accepts network results and stores blocks and microblocks /// * forwards new blocks, microblocks, and transactions to the p2p thread @@ -3407,6 +3164,10 @@ impl RelayerThread { debug!("Relayer: directive Ran tenure"); true } + RelayerDirective::NakamotoTenureStartProcessed(_, _) => { + warn!("Relayer: Nakamoto tenure start notification received while still operating 2.x neon node"); + true + } RelayerDirective::Exit => false, }; if !continue_running { @@ -3862,7 +3623,7 @@ impl StacksNode { } /// Set up the AST size-precheck height, if configured - fn setup_ast_size_precheck(config: &Config, sortdb: &mut SortitionDB) { + pub(crate) fn setup_ast_size_precheck(config: &Config, sortdb: &mut SortitionDB) { if let Some(ast_precheck_size_height) = config.burnchain.ast_precheck_size_height { info!( "Override burnchain height of {:?} to {}", @@ -4015,7 +3776,7 @@ impl StacksNode { } /// Set up the PeerNetwork, but do not bind it. - pub fn setup_peer_network( + pub(crate) fn setup_peer_network( config: &Config, atlas_config: &AtlasConfig, burnchain: Burnchain, diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs new file mode 100644 index 0000000000..e70784ce42 --- /dev/null +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -0,0 +1,220 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::{Arc, Mutex}; +use std::thread::JoinHandle; +use std::time::Duration; +use std::{fs, thread}; + +use stacks::burnchains::Burnchain; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::coordinator::comm::CoordinatorChannels; +use stacks::core::StacksEpochExtension; +use stacks_common::types::{StacksEpoch, StacksEpochId}; + +use crate::neon::Counters; +use crate::run_loop::nakamoto::RunLoop as NakaRunLoop; +use crate::run_loop::neon::RunLoop as NeonRunLoop; +use crate::Config; + +/// This runloop handles booting to Nakamoto: +/// During epochs [1.0, 2.5], it runs a neon run_loop. +/// Once epoch 3.0 is reached, it stops the neon run_loop +/// and starts nakamoto. +pub struct BootRunLoop { + config: Config, + active_loop: InnerLoops, + coordinator_channels: Arc>, +} + +enum InnerLoops { + Epoch2(NeonRunLoop), + Epoch3(NakaRunLoop), +} + +impl BootRunLoop { + pub fn new(config: Config) -> Result { + let (coordinator_channels, active_loop) = if !Self::reached_epoch_30_transition(&config)? { + let neon = NeonRunLoop::new(config.clone()); + ( + neon.get_coordinator_channel().unwrap(), + InnerLoops::Epoch2(neon), + ) + } else { + let naka = NakaRunLoop::new(config.clone(), None, None); + ( + naka.get_coordinator_channel().unwrap(), + InnerLoops::Epoch3(naka), + ) + }; + + Ok(BootRunLoop { + config, + active_loop, + coordinator_channels: Arc::new(Mutex::new(coordinator_channels)), + }) + } + + /// Get a mutex-guarded pointer to this run-loops coordinator channels. + /// The reason this must be mutex guarded is that the run loop will switch + /// from a "neon" coordinator to a "nakamoto" coordinator, and update the + /// backing coordinator channel. That way, anyone still holding the Arc<> + /// should be able to query the new coordinator channel. + pub fn coordinator_channels(&self) -> Arc> { + self.coordinator_channels.clone() + } + + /// Get the runtime counters for the inner runloop. The nakamoto + /// runloop inherits the counters object from the neon node, + /// so no need for another layer of indirection/mutex. + pub fn counters(&self) -> Counters { + match &self.active_loop { + InnerLoops::Epoch2(x) => x.get_counters(), + InnerLoops::Epoch3(x) => x.get_counters(), + } + } + + /// Get the termination switch from the active run loop. + pub fn get_termination_switch(&self) -> Arc { + match &self.active_loop { + InnerLoops::Epoch2(x) => x.get_termination_switch(), + InnerLoops::Epoch3(x) => x.get_termination_switch(), + } + } + + /// The main entry point for the run loop. This starts either a 2.x-neon or 3.x-nakamoto + /// node depending on the current burnchain height. + pub fn start(&mut self, burnchain_opt: Option, mine_start: u64) { + match self.active_loop { + InnerLoops::Epoch2(_) => return self.start_from_neon(burnchain_opt, mine_start), + InnerLoops::Epoch3(_) => return self.start_from_naka(burnchain_opt, mine_start), + } + } + + fn start_from_naka(&mut self, burnchain_opt: Option, mine_start: u64) { + let InnerLoops::Epoch3(ref mut naka_loop) = self.active_loop else { + panic!("FATAL: unexpectedly invoked start_from_naka when active loop wasn't nakamoto"); + }; + naka_loop.start(burnchain_opt, mine_start) + } + + fn start_from_neon(&mut self, burnchain_opt: Option, mine_start: u64) { + let InnerLoops::Epoch2(ref mut neon_loop) = self.active_loop else { + panic!("FATAL: unexpectedly invoked start_from_neon when active loop wasn't neon"); + }; + let termination_switch = neon_loop.get_termination_switch(); + let counters = neon_loop.get_counters(); + + let boot_thread = Self::spawn_stopper(&self.config, neon_loop) + .expect("FATAL: failed to spawn epoch-2/3-boot thread"); + neon_loop.start(burnchain_opt.clone(), mine_start); + + // did we exit because of the epoch-3.0 transition, or some other reason? + let exited_for_transition = boot_thread + .join() + .expect("FATAL: failed to join epoch-2/3-boot thread"); + if !exited_for_transition { + info!("Shutting down epoch-2/3 transition thread"); + return; + } + info!("Reached Epoch-3.0 boundary, starting nakamoto node"); + termination_switch.store(true, Ordering::SeqCst); + let naka = NakaRunLoop::new( + self.config.clone(), + Some(termination_switch), + Some(counters), + ); + let new_coord_channels = naka + .get_coordinator_channel() + .expect("FATAL: should have coordinator channel in newly instantiated runloop"); + { + let mut coord_channel = self.coordinator_channels.lock().expect("Mutex poisoned"); + *coord_channel = new_coord_channels; + } + self.active_loop = InnerLoops::Epoch3(naka); + let InnerLoops::Epoch3(ref mut naka_loop) = self.active_loop else { + panic!("FATAL: unexpectedly found epoch2 loop after setting epoch3 active"); + }; + naka_loop.start(burnchain_opt, mine_start) + } + + fn spawn_stopper( + config: &Config, + neon: &NeonRunLoop, + ) -> Result, std::io::Error> { + let neon_term_switch = neon.get_termination_switch(); + let config = config.clone(); + thread::Builder::new() + .name("epoch-2/3-boot".into()) + .spawn(move || { + loop { + let do_transition = Self::reached_epoch_30_transition(&config) + .unwrap_or_else(|err| { + warn!("Error checking for Epoch-3.0 transition: {err:?}. Assuming transition did not occur yet."); + false + }); + if do_transition { + break; + } + if !neon_term_switch.load(Ordering::SeqCst) { + info!("Stop requested, exiting epoch-2/3-boot thread"); + return false; + } + thread::sleep(Duration::from_secs(1)); + } + // if loop exited, do the transition + info!("Epoch-3.0 boundary reached, stopping Epoch-2.x run loop"); + neon_term_switch.store(false, Ordering::SeqCst); + return true + }) + } + + fn reached_epoch_30_transition(config: &Config) -> Result { + let burn_height = Self::get_burn_height(config)?; + let epochs = StacksEpoch::get_epochs( + config.burnchain.get_bitcoin_network().1, + config.burnchain.epochs.as_ref(), + ); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30) + .ok_or("No Epoch-3.0 defined")?]; + + Ok(u64::from(burn_height) >= epoch_3.start_height - 1) + } + + fn get_burn_height(config: &Config) -> Result { + let burnchain = config.get_burnchain(); + let sortdb_path = config.get_burn_db_file_path(); + if fs::metadata(&sortdb_path).is_err() { + // if the sortition db doesn't exist yet, don't try to open() it, because that creates the + // db file even if it doesn't instantiate the tables, which breaks connect() logic. + info!("Failed to open Sortition DB while checking current burn height, assuming height = 0"); + return Ok(0); + } + + let Ok(sortdb) = SortitionDB::open(&sortdb_path, false, burnchain.pox_constants.clone()) + else { + info!("Failed to open Sortition DB while checking current burn height, assuming height = 0"); + return Ok(0); + }; + + let Ok(tip_sn) = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) else { + info!("Failed to query Sortition DB for current burn height, assuming height = 0"); + return Ok(0); + }; + + Ok(u32::try_from(tip_sn.block_height).expect("FATAL: burn height exceeded u32")) + } +} diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index c7aaf87b56..9ad4fd583e 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -1,4 +1,6 @@ +pub mod boot_nakamoto; pub mod helium; +pub mod nakamoto; pub mod neon; use clarity::vm::costs::ExecutionCost; diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs new file mode 100644 index 0000000000..f18f236da6 --- /dev/null +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -0,0 +1,687 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::sync::atomic::AtomicBool; +use std::sync::mpsc::sync_channel; +use std::sync::{Arc, Mutex}; +use std::thread::JoinHandle; +use std::{cmp, thread}; + +use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; +use stacks::burnchains::Burnchain; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::BlockSnapshot; +use stacks::chainstate::coordinator::comm::{CoordinatorChannels, CoordinatorReceivers}; +use stacks::chainstate::coordinator::{ + ChainsCoordinator, ChainsCoordinatorConfig, CoordinatorCommunication, +}; +use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; +use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; +use stacks::core::StacksEpochId; +use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; +use stacks_common::types::PublicKey; +use stacks_common::util::hash::Hash160; +use stx_genesis::GenesisData; + +use crate::burnchains::make_bitcoin_indexer; +use crate::globals::Globals as GenericGlobals; +use crate::monitoring::start_serving_monitoring_metrics; +use crate::nakamoto_node::{self, StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER}; +use crate::node::{ + get_account_balances, get_account_lockups, get_names, get_namespaces, + use_test_genesis_chainstate, +}; +use crate::run_loop::neon; +use crate::run_loop::neon::Counters; +use crate::syncctl::{PoxSyncWatchdog, PoxSyncWatchdogComms}; +use crate::{ + run_loop, BitcoinRegtestController, BurnchainController, Config, EventDispatcher, Keychain, +}; + +pub const STDERR: i32 = 2; +pub type Globals = GenericGlobals; + +/// Coordinating a node running in nakamoto mode. This runloop operates very similarly to the neon runloop. +pub struct RunLoop { + config: Config, + globals: Option, + counters: Counters, + coordinator_channels: Option<(CoordinatorReceivers, CoordinatorChannels)>, + should_keep_running: Arc, + event_dispatcher: EventDispatcher, + #[allow(dead_code)] + pox_watchdog: Option, // can't be instantiated until .start() is called + is_miner: Option, // not known until .start() is called + burnchain: Option, // not known until .start() is called + pox_watchdog_comms: PoxSyncWatchdogComms, + /// NOTE: this is duplicated in self.globals, but it needs to be accessible before globals is + /// instantiated (namely, so the test framework can access it). + miner_status: Arc>, +} + +impl RunLoop { + /// Sets up a runloop and node, given a config. + pub fn new( + config: Config, + should_keep_running: Option>, + counters: Option, + ) -> Self { + let channels = CoordinatorCommunication::instantiate(); + let should_keep_running = + should_keep_running.unwrap_or_else(|| Arc::new(AtomicBool::new(true))); + let pox_watchdog_comms = PoxSyncWatchdogComms::new(should_keep_running.clone()); + let miner_status = Arc::new(Mutex::new(MinerStatus::make_ready( + config.burnchain.burn_fee_cap, + ))); + + let mut event_dispatcher = EventDispatcher::new(); + for observer in config.events_observers.iter() { + event_dispatcher.register_observer(observer); + } + + Self { + config, + globals: None, + coordinator_channels: Some(channels), + counters: counters.unwrap_or_else(|| Counters::new()), + should_keep_running, + event_dispatcher, + pox_watchdog: None, + is_miner: None, + burnchain: None, + pox_watchdog_comms, + miner_status, + } + } + + pub(crate) fn get_globals(&self) -> Globals { + self.globals + .clone() + .expect("FATAL: globals not instantiated") + } + + fn set_globals(&mut self, globals: Globals) { + self.globals = Some(globals); + } + + pub(crate) fn get_coordinator_channel(&self) -> Option { + self.coordinator_channels.as_ref().map(|x| x.1.clone()) + } + + pub(crate) fn get_counters(&self) -> Counters { + self.counters.clone() + } + + pub(crate) fn config(&self) -> &Config { + &self.config + } + + pub(crate) fn get_event_dispatcher(&self) -> EventDispatcher { + self.event_dispatcher.clone() + } + + pub(crate) fn is_miner(&self) -> bool { + self.is_miner.unwrap_or(false) + } + + pub(crate) fn get_termination_switch(&self) -> Arc { + self.should_keep_running.clone() + } + + pub(crate) fn get_burnchain(&self) -> Burnchain { + self.burnchain + .clone() + .expect("FATAL: tried to get runloop burnchain before calling .start()") + } + + pub(crate) fn get_miner_status(&self) -> Arc> { + self.miner_status.clone() + } + + /// Determine if we're the miner. + /// If there's a network error, then assume that we're not a miner. + fn check_is_miner(&mut self, burnchain: &mut BitcoinRegtestController) -> bool { + if self.config.node.miner { + let keychain = Keychain::default(self.config.node.seed.clone()); + let mut op_signer = keychain.generate_op_signer(); + match burnchain.create_wallet_if_dne() { + Err(e) => warn!("Error when creating wallet: {:?}", e), + _ => {} + } + let mut btc_addrs = vec![( + StacksEpochId::Epoch2_05, + // legacy + BitcoinAddress::from_bytes_legacy( + self.config.burnchain.get_bitcoin_network().1, + LegacyBitcoinAddressType::PublicKeyHash, + &Hash160::from_data(&op_signer.get_public_key().to_bytes()).0, + ) + .expect("FATAL: failed to construct legacy bitcoin address"), + )]; + if self.config.miner.segwit { + btc_addrs.push(( + StacksEpochId::Epoch21, + // segwit p2wpkh + BitcoinAddress::from_bytes_segwit_p2wpkh( + self.config.burnchain.get_bitcoin_network().1, + &Hash160::from_data(&op_signer.get_public_key().to_bytes_compressed()).0, + ) + .expect("FATAL: failed to construct segwit p2wpkh address"), + )); + } + + for (epoch_id, btc_addr) in btc_addrs.into_iter() { + info!("Miner node: checking UTXOs at address: {}", &btc_addr); + let utxos = burnchain.get_utxos(epoch_id, &op_signer.get_public_key(), 1, None, 0); + if utxos.is_none() { + warn!("UTXOs not found for {}. If this is unexpected, please ensure that your bitcoind instance is indexing transactions for the address {} (importaddress)", btc_addr, btc_addr); + } else { + info!("UTXOs found - will run as a Miner node"); + return true; + } + } + if self.config.node.mock_mining { + info!("No UTXOs found, but configured to mock mine"); + return true; + } else { + return false; + } + } else { + info!("Will run as a Follower node"); + false + } + } + + /// Boot up the stacks chainstate. + /// Instantiate the chainstate and push out the boot receipts to observers + /// This is only public so we can test it. + fn boot_chainstate(&mut self, burnchain_config: &Burnchain) -> StacksChainState { + let use_test_genesis_data = use_test_genesis_chainstate(&self.config); + + // load up genesis balances + let initial_balances = self + .config + .initial_balances + .iter() + .map(|e| (e.address.clone(), e.amount)) + .collect(); + + // TODO (nakamoto-neon): check if we're trying to setup a self-signing network + // and set the right genesis data + + // instantiate chainstate + let mut boot_data = ChainStateBootData { + initial_balances, + post_flight_callback: None, + first_burnchain_block_hash: burnchain_config.first_block_hash, + first_burnchain_block_height: burnchain_config.first_block_height as u32, + first_burnchain_block_timestamp: burnchain_config.first_block_timestamp, + pox_constants: burnchain_config.pox_constants.clone(), + get_bulk_initial_lockups: Some(Box::new(move || { + get_account_lockups(use_test_genesis_data) + })), + get_bulk_initial_balances: Some(Box::new(move || { + get_account_balances(use_test_genesis_data) + })), + get_bulk_initial_namespaces: Some(Box::new(move || { + get_namespaces(use_test_genesis_data) + })), + get_bulk_initial_names: Some(Box::new(move || get_names(use_test_genesis_data))), + }; + + let (chain_state_db, receipts) = StacksChainState::open_and_exec( + self.config.is_mainnet(), + self.config.burnchain.chain_id, + &self.config.get_chainstate_path_str(), + Some(&mut boot_data), + Some(self.config.node.get_marf_opts()), + ) + .unwrap(); + run_loop::announce_boot_receipts( + &mut self.event_dispatcher, + &chain_state_db, + &burnchain_config.pox_constants, + &receipts, + ); + chain_state_db + } + + /// Instantiate the Stacks chain state and start the chains coordinator thread. + /// Returns the coordinator thread handle, and the receiving end of the coordinator's atlas + /// attachment channel. + fn spawn_chains_coordinator( + &mut self, + burnchain_config: &Burnchain, + coordinator_receivers: CoordinatorReceivers, + miner_status: Arc>, + ) -> JoinHandle<()> { + let use_test_genesis_data = use_test_genesis_chainstate(&self.config); + + // load up genesis Atlas attachments + let mut atlas_config = AtlasConfig::new(self.config.is_mainnet()); + let genesis_attachments = GenesisData::new(use_test_genesis_data) + .read_name_zonefiles() + .into_iter() + .map(|z| Attachment::new(z.zonefile_content.as_bytes().to_vec())) + .collect(); + atlas_config.genesis_attachments = Some(genesis_attachments); + + let chain_state_db = self.boot_chainstate(burnchain_config); + + // NOTE: re-instantiate AtlasConfig so we don't have to keep the genesis attachments around + let moved_atlas_config = self.config.atlas.clone(); + let moved_config = self.config.clone(); + let moved_burnchain_config = burnchain_config.clone(); + let mut coordinator_dispatcher = self.event_dispatcher.clone(); + let atlas_db = AtlasDB::connect( + moved_atlas_config.clone(), + &self.config.get_atlas_db_file_path(), + true, + ) + .expect("Failed to connect Atlas DB during startup"); + let coordinator_indexer = + make_bitcoin_indexer(&self.config, Some(self.should_keep_running.clone())); + + let coordinator_thread_handle = thread::Builder::new() + .name(format!( + "chains-coordinator-{}", + &moved_config.node.rpc_bind + )) + .stack_size(BLOCK_PROCESSOR_STACK_SIZE) + .spawn(move || { + debug!( + "chains-coordinator thread ID is {:?}", + thread::current().id() + ); + let mut cost_estimator = moved_config.make_cost_estimator(); + let mut fee_estimator = moved_config.make_fee_estimator(); + + let coord_config = ChainsCoordinatorConfig { + always_use_affirmation_maps: moved_config.node.always_use_affirmation_maps, + require_affirmed_anchor_blocks: moved_config + .node + .require_affirmed_anchor_blocks, + ..ChainsCoordinatorConfig::new() + }; + ChainsCoordinator::run( + coord_config, + chain_state_db, + moved_burnchain_config, + &mut coordinator_dispatcher, + coordinator_receivers, + moved_atlas_config, + cost_estimator.as_deref_mut(), + fee_estimator.as_deref_mut(), + miner_status, + coordinator_indexer, + atlas_db, + ); + }) + .expect("FATAL: failed to start chains coordinator thread"); + + coordinator_thread_handle + } + + /// Start Prometheus logging + fn start_prometheus(&mut self) { + let prometheus_bind = self.config.node.prometheus_bind.clone(); + if let Some(prometheus_bind) = prometheus_bind { + thread::Builder::new() + .name("prometheus".to_string()) + .spawn(move || { + debug!("prometheus thread ID is {:?}", thread::current().id()); + start_serving_monitoring_metrics(prometheus_bind); + }) + .unwrap(); + } + } + + /// Get the sortition DB's highest block height, aligned to a reward cycle boundary, and the + /// highest sortition. + /// Returns (height at rc start, sortition) + fn get_reward_cycle_sortition_db_height( + sortdb: &SortitionDB, + burnchain_config: &Burnchain, + ) -> (u64, BlockSnapshot) { + let (stacks_ch, _) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()) + .expect("BUG: failed to load canonical stacks chain tip hash"); + + let sn = match SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &stacks_ch) + .expect("BUG: failed to query sortition DB") + { + Some(sn) => sn, + None => { + debug!("No canonical stacks chain tip hash present"); + let sn = SortitionDB::get_first_block_snapshot(&sortdb.conn()) + .expect("BUG: failed to get first-ever block snapshot"); + sn + } + }; + + ( + burnchain_config.reward_cycle_to_block_height( + burnchain_config + .block_height_to_reward_cycle(sn.block_height) + .expect("BUG: snapshot preceeds first reward cycle"), + ), + sn, + ) + } + + /// Starts the node runloop. + /// + /// This function will block by looping infinitely. + /// It will start the burnchain (separate thread), set-up a channel in + /// charge of coordinating the new blocks coming from the burnchain and + /// the nodes, taking turns on tenures. + pub fn start(&mut self, burnchain_opt: Option, mut mine_start: u64) { + let (coordinator_receivers, coordinator_senders) = self + .coordinator_channels + .take() + .expect("Run loop already started, can only start once after initialization."); + + // setup the termination handler, allow it to error if a prior runloop already set it + neon::RunLoop::setup_termination_handler(self.should_keep_running.clone(), true); + let mut burnchain = neon::RunLoop::instantiate_burnchain_state( + &self.config, + self.should_keep_running.clone(), + burnchain_opt, + coordinator_senders.clone(), + ); + + let burnchain_config = burnchain.get_burnchain(); + self.burnchain = Some(burnchain_config.clone()); + + // can we mine? + let is_miner = self.check_is_miner(&mut burnchain); + self.is_miner = Some(is_miner); + + // relayer linkup + let (relay_send, relay_recv) = sync_channel(RELAYER_MAX_BUFFER); + + // set up globals so other subsystems can instantiate off of the runloop state. + let globals = Globals::new( + coordinator_senders, + self.get_miner_status(), + relay_send, + self.counters.clone(), + self.pox_watchdog_comms.clone(), + self.should_keep_running.clone(), + ); + self.set_globals(globals.clone()); + + // have headers; boot up the chains coordinator and instantiate the chain state + let coordinator_thread_handle = self.spawn_chains_coordinator( + &burnchain_config, + coordinator_receivers, + globals.get_miner_status(), + ); + self.start_prometheus(); + + // We announce a new burn block so that the chains coordinator + // can resume prior work and handle eventual unprocessed sortitions + // stored during a previous session. + globals.coord().announce_new_burn_block(); + + // Make sure at least one sortition has happened, and make sure it's globally available + let sortdb = burnchain.sortdb_mut(); + let (rc_aligned_height, sn) = + RunLoop::get_reward_cycle_sortition_db_height(&sortdb, &burnchain_config); + + let burnchain_tip_snapshot = if sn.block_height == burnchain_config.first_block_height { + // need at least one sortition to happen. + burnchain + .wait_for_sortitions(globals.coord().clone(), sn.block_height + 1) + .expect("Unable to get burnchain tip") + .block_snapshot + } else { + sn + }; + + globals.set_last_sortition(burnchain_tip_snapshot.clone()); + + // Boot up the p2p network and relayer, and figure out how many sortitions we have so far + // (it could be non-zero if the node is resuming from chainstate) + let mut node = StacksNode::spawn(self, globals.clone(), relay_recv); + + // Wait for all pending sortitions to process + let burnchain_db = burnchain_config + .open_burnchain_db(false) + .expect("FATAL: failed to open burnchain DB"); + let burnchain_db_tip = burnchain_db + .get_canonical_chain_tip() + .expect("FATAL: failed to query burnchain DB"); + let mut burnchain_tip = burnchain + .wait_for_sortitions(globals.coord().clone(), burnchain_db_tip.block_height) + .expect("Unable to get burnchain tip"); + + // Start the runloop + debug!("Runloop: Begin run loop"); + self.counters.bump_blocks_processed(); + + let mut sortition_db_height = rc_aligned_height; + let mut burnchain_height = sortition_db_height; + let mut num_sortitions_in_last_cycle; + + // prepare to fetch the first reward cycle! + let mut target_burnchain_block_height = cmp::min( + burnchain_config.reward_cycle_to_block_height( + burnchain_config + .block_height_to_reward_cycle(burnchain_height) + .expect("BUG: block height is not in a reward cycle") + + 1, + ), + burnchain.get_headers_height() - 1, + ); + + debug!( + "Runloop: Begin main runloop starting a burnchain block {}", + sortition_db_height + ); + + let mut last_tenure_sortition_height = 0; + + loop { + if !globals.keep_running() { + // The p2p thread relies on the same atomic_bool, it will + // discontinue its execution after completing its ongoing runloop epoch. + info!("Terminating p2p process"); + info!("Terminating relayer"); + info!("Terminating chains-coordinator"); + + globals.coord().stop_chains_coordinator(); + coordinator_thread_handle.join().unwrap(); + node.join(); + + info!("Exiting stacks-node"); + break; + } + + let remote_chain_height = burnchain.get_headers_height() - 1; + + // wait for the p2p state-machine to do at least one pass + debug!("Runloop: Wait until Stacks block downloads reach a quiescent state before processing more burnchain blocks"; "remote_chain_height" => remote_chain_height, "local_chain_height" => burnchain_height); + + // TODO: for now, we just set initial block download false. + // I think that the sync watchdog probably needs to change a fair bit + // for nakamoto. There may be some opportunity to refactor this runloop + // as well (e.g., the `mine_start` should be integrated with the + // watchdog so that there's just one source of truth about ibd), + // but I think all of this can be saved for post-neon work. + let ibd = false; + self.pox_watchdog_comms.set_ibd(ibd); + + // calculate burnchain sync percentage + let percent: f64 = if remote_chain_height > 0 { + burnchain_tip.block_snapshot.block_height as f64 / remote_chain_height as f64 + } else { + 0.0 + }; + + // Download each burnchain block and process their sortitions. This, in turn, will + // cause the node's p2p and relayer threads to go fetch and download Stacks blocks and + // process them. This loop runs for one reward cycle, so that the next pass of the + // runloop will cause the PoX sync watchdog to wait until it believes that the node has + // obtained all the Stacks blocks it can. + debug!( + "Runloop: Download burnchain blocks up to reward cycle #{} (height {})", + burnchain_config + .block_height_to_reward_cycle(target_burnchain_block_height) + .expect("FATAL: target burnchain block height does not have a reward cycle"), + target_burnchain_block_height; + "total_burn_sync_percent" => %percent, + "local_burn_height" => burnchain_tip.block_snapshot.block_height, + "remote_tip_height" => remote_chain_height + ); + + loop { + if !globals.keep_running() { + break; + } + + let (next_burnchain_tip, tip_burnchain_height) = + match burnchain.sync(Some(target_burnchain_block_height)) { + Ok(x) => x, + Err(e) => { + warn!("Runloop: Burnchain controller stopped: {}", e); + continue; + } + }; + + // *now* we know the burnchain height + burnchain_tip = next_burnchain_tip; + burnchain_height = tip_burnchain_height; + + let sortition_tip = &burnchain_tip.block_snapshot.sortition_id; + let next_sortition_height = burnchain_tip.block_snapshot.block_height; + + if next_sortition_height != last_tenure_sortition_height { + info!( + "Runloop: Downloaded burnchain blocks up to height {}; target height is {}; remote_chain_height = {} next_sortition_height = {}, sortition_db_height = {}", + burnchain_height, target_burnchain_block_height, remote_chain_height, next_sortition_height, sortition_db_height + ); + } + + if next_sortition_height > sortition_db_height { + debug!( + "Runloop: New burnchain block height {} > {}", + next_sortition_height, sortition_db_height + ); + + let mut sort_count = 0; + + debug!("Runloop: block mining until we process all sortitions"); + signal_mining_blocked(globals.get_miner_status()); + + // first, let's process all blocks in (sortition_db_height, next_sortition_height] + for block_to_process in (sortition_db_height + 1)..(next_sortition_height + 1) { + // stop mining so we can advance the sortition DB and so our + // ProcessTenure() directive (sent by relayer_sortition_notify() below) + // will be unblocked. + + let block = { + let ic = burnchain.sortdb_ref().index_conn(); + SortitionDB::get_ancestor_snapshot(&ic, block_to_process, sortition_tip) + .unwrap() + .expect( + "Failed to find block in fork processed by burnchain indexer", + ) + }; + if block.sortition { + sort_count += 1; + } + + let sortition_id = &block.sortition_id; + + // Have the node process the new block, that can include, or not, a sortition. + if let Err(e) = + node.process_burnchain_state(burnchain.sortdb_mut(), sortition_id, ibd) + { + // relayer errored, exit. + error!("Runloop: Block relayer and miner errored, exiting."; "err" => ?e); + return; + } + } + + debug!("Runloop: enable miner after processing sortitions"); + signal_mining_ready(globals.get_miner_status()); + + num_sortitions_in_last_cycle = sort_count; + debug!( + "Runloop: Synchronized sortitions up to block height {} from {} (chain tip height is {}); {} sortitions", + next_sortition_height, sortition_db_height, burnchain_height, num_sortitions_in_last_cycle; + ); + + sortition_db_height = next_sortition_height; + } else if ibd { + // drive block processing after we reach the burnchain tip. + // we may have downloaded all the blocks already, + // so we can't rely on the relayer alone to + // drive it. + globals.coord().announce_new_stacks_block(); + } + + if burnchain_height >= target_burnchain_block_height + || burnchain_height >= remote_chain_height + { + break; + } + } + + // advance one reward cycle at a time. + // If we're still downloading, then this is simply target_burnchain_block_height + reward_cycle_len. + // Otherwise, this is burnchain_tip + reward_cycle_len + let next_target_burnchain_block_height = cmp::min( + burnchain_config.reward_cycle_to_block_height( + burnchain_config + .block_height_to_reward_cycle(target_burnchain_block_height) + .expect("FATAL: burnchain height before system start") + + 1, + ), + remote_chain_height, + ); + + debug!("Runloop: Advance target burnchain block height from {} to {} (sortition height {})", target_burnchain_block_height, next_target_burnchain_block_height, sortition_db_height); + target_burnchain_block_height = next_target_burnchain_block_height; + + if sortition_db_height >= burnchain_height && !ibd { + let canonical_stacks_tip_height = + SortitionDB::get_canonical_burn_chain_tip(burnchain.sortdb_ref().conn()) + .map(|snapshot| snapshot.canonical_stacks_tip_height) + .unwrap_or(0); + if canonical_stacks_tip_height < mine_start { + info!( + "Runloop: Synchronized full burnchain, but stacks tip height is {}, and we are trying to boot to {}, not mining until reaching chain tip", + canonical_stacks_tip_height, + mine_start + ); + } else { + // once we've synced to the chain tip once, don't apply this check again. + // this prevents a possible corner case in the event of a PoX fork. + mine_start = 0; + + // at tip, and not downloading. proceed to mine. + if last_tenure_sortition_height != sortition_db_height { + info!( + "Runloop: Synchronized full burnchain up to height {}. Proceeding to mine blocks", + sortition_db_height + ); + last_tenure_sortition_height = sortition_db_height; + } + } + } + } + } +} diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index c9368e9e3a..68e13dc511 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -31,8 +31,9 @@ use stx_genesis::GenesisData; use super::RunLoopCallbacks; use crate::burnchains::make_bitcoin_indexer; +use crate::globals::NeonGlobals as Globals; use crate::monitoring::start_serving_monitoring_metrics; -use crate::neon_node::{Globals, StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER}; +use crate::neon_node::{StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER}; use crate::node::{ get_account_balances, get_account_lockups, get_names, get_namespaces, use_test_genesis_chainstate, @@ -63,6 +64,10 @@ pub struct Counters { pub missed_tenures: RunLoopCounter, pub missed_microblock_tenures: RunLoopCounter, pub cancelled_commits: RunLoopCounter, + + pub naka_submitted_vrfs: RunLoopCounter, + pub naka_submitted_commits: RunLoopCounter, + pub naka_mined_blocks: RunLoopCounter, } impl Counters { @@ -74,6 +79,9 @@ impl Counters { missed_tenures: RunLoopCounter::new(AtomicU64::new(0)), missed_microblock_tenures: RunLoopCounter::new(AtomicU64::new(0)), cancelled_commits: RunLoopCounter::new(AtomicU64::new(0)), + naka_submitted_vrfs: RunLoopCounter::new(AtomicU64::new(0)), + naka_submitted_commits: RunLoopCounter::new(AtomicU64::new(0)), + naka_mined_blocks: RunLoopCounter::new(AtomicU64::new(0)), } } @@ -85,6 +93,9 @@ impl Counters { missed_tenures: (), missed_microblock_tenures: (), cancelled_commits: (), + naka_submitted_vrfs: (), + naka_submitted_commits: (), + naka_mined_blocks: (), } } @@ -124,6 +135,18 @@ impl Counters { Counters::inc(&self.cancelled_commits); } + pub fn bump_naka_submitted_vrfs(&self) { + Counters::inc(&self.naka_submitted_vrfs); + } + + pub fn bump_naka_submitted_commits(&self) { + Counters::inc(&self.naka_submitted_commits); + } + + pub fn bump_naka_mined_blocks(&self) { + Counters::inc(&self.naka_mined_blocks); + } + pub fn set_microblocks_processed(&self, value: u64) { Counters::set(&self.microblocks_processed, value) } @@ -251,7 +274,7 @@ impl RunLoop { } pub fn get_termination_switch(&self) -> Arc { - self.get_globals().should_keep_running.clone() + self.should_keep_running.clone() } pub fn get_burnchain(&self) -> Burnchain { @@ -272,8 +295,7 @@ impl RunLoop { /// Set up termination handler. Have a signal set the `should_keep_running` atomic bool to /// false. Panics of called more than once. - fn setup_termination_handler(&self) { - let keep_running_writer = self.should_keep_running.clone(); + pub fn setup_termination_handler(keep_running_writer: Arc, allow_err: bool) { let install = termination::set_handler(move |sig_id| match sig_id { SignalId::Bus => { let msg = "Caught SIGBUS; crashing immediately and dumping core\n"; @@ -291,7 +313,8 @@ impl RunLoop { if let Err(e) = install { // integration tests can do this - if cfg!(test) { + if cfg!(test) || allow_err { + info!("Error setting up signal handler, may have already been set"); } else { panic!("FATAL: error setting termination handler - {}", e); } @@ -355,17 +378,18 @@ impl RunLoop { /// Instantiate the burnchain client and databases. /// Fetches headers and instantiates the burnchain. /// Panics on failure. - fn instantiate_burnchain_state( - &mut self, + pub fn instantiate_burnchain_state( + config: &Config, + should_keep_running: Arc, burnchain_opt: Option, coordinator_senders: CoordinatorChannels, ) -> BitcoinRegtestController { // Initialize and start the burnchain. let mut burnchain_controller = BitcoinRegtestController::with_burnchain( - self.config.clone(), + config.clone(), Some(coordinator_senders), burnchain_opt, - Some(self.should_keep_running.clone()), + Some(should_keep_running.clone()), ); let burnchain = burnchain_controller.get_burnchain(); @@ -377,9 +401,9 @@ impl RunLoop { // Upgrade chainstate databases if they exist already match migrate_chainstate_dbs( &epochs, - &self.config.get_burn_db_file_path(), - &self.config.get_chainstate_path_str(), - Some(self.config.node.get_marf_opts()), + &config.get_burn_db_file_path(), + &config.get_chainstate_path_str(), + Some(config.node.get_marf_opts()), ) { Ok(_) => {} Err(coord_error::DBError(db_error::TooOldForEpoch)) => { @@ -951,9 +975,13 @@ impl RunLoop { .take() .expect("Run loop already started, can only start once after initialization."); - self.setup_termination_handler(); - let mut burnchain = - self.instantiate_burnchain_state(burnchain_opt, coordinator_senders.clone()); + Self::setup_termination_handler(self.should_keep_running.clone(), false); + let mut burnchain = Self::instantiate_burnchain_state( + &self.config, + self.should_keep_running.clone(), + burnchain_opt, + coordinator_senders.clone(), + ); let burnchain_config = burnchain.get_burnchain(); self.burnchain = Some(burnchain_config.clone()); diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index fdb09dd22c..454e92b50b 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -16,6 +16,7 @@ use crate::helium::RunLoop; use crate::tests::to_addr; use crate::Config; +#[derive(Debug)] pub enum BitcoinCoreError { SpawnFailed(String), } diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index faea7f99d9..8ac9fcff53 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -43,6 +43,7 @@ mod epoch_23; mod epoch_24; mod integrations; mod mempool; +mod nakamoto_integrations; pub mod neon_integrations; mod signer; mod stackerdb; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs new file mode 100644 index 0000000000..0b1d79ffa3 --- /dev/null +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -0,0 +1,492 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::{Arc, Mutex}; +use std::time::{Duration, Instant}; +use std::{env, thread}; + +use clarity::vm::costs::ExecutionCost; +use clarity::vm::types::PrincipalData; +use lazy_static::lazy_static; +use stacks::burnchains::MagicBytes; +use stacks::chainstate::coordinator::comm::CoordinatorChannels; +use stacks::chainstate::nakamoto::NakamotoChainState; +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::core::{ + StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, + PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, + PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, + PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, +}; +use stacks_common::address::AddressHashMode; +use stacks_common::consts::STACKS_EPOCH_MAX; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::Secp256k1PrivateKey; + +use super::bitcoin_regtest::BitcoinCoreController; +use crate::config::{EventKeyType, EventObserverConfig}; +use crate::mockamoto::signer::SelfSigner; +use crate::neon::{Counters, RunLoopCounter}; +use crate::run_loop::boot_nakamoto; +use crate::tests::make_stacks_transfer; +use crate::tests::neon_integrations::{ + next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, wait_for_runloop, +}; +use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; + +static POX_4_DEFAULT_STACKER_BALANCE: u64 = 100_000_000_000_000; +static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000; + +lazy_static! { + pub static ref NAKAMOTO_INTEGRATION_EPOCHS: [StacksEpoch; 9] = [ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: BLOCK_LIMIT_MAINNET_10.clone(), + network_epoch: PEER_VERSION_EPOCH_1_0 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 1, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_0 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 1, + end_height: 2, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_05 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 2, + end_height: 3, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_1 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: 3, + end_height: 4, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_2 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: 4, + end_height: 5, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_3 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: 5, + end_height: 6, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_4 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: 6, + end_height: 221, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_5 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: 221, + end_height: STACKS_EPOCH_MAX, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_3_0 + }, + ]; +} + +/// Return a working nakamoto-neon config and the miner's bitcoin address to fund +pub fn naka_neon_integration_conf(seed: Option<&[u8]>) -> (Config, StacksAddress) { + let mut conf = super::new_test_conf(); + conf.burnchain.mode = "nakamoto-neon".into(); + + // tests can override this, but these tests run with epoch 2.05 by default + conf.burnchain.epochs = Some(NAKAMOTO_INTEGRATION_EPOCHS.to_vec()); + + if let Some(seed) = seed { + conf.node.seed = seed.to_vec(); + } + + // instantiate the keychain so we can fund the bitcoin op signer + let keychain = Keychain::default(conf.node.seed.clone()); + + let mining_key = Secp256k1PrivateKey::from_seed(&[1]); + conf.miner.mining_key = Some(mining_key); + conf.miner.self_signing_key = Some(SelfSigner::single_signer()); + + conf.node.miner = true; + conf.node.wait_time_for_microblocks = 500; + conf.burnchain.burn_fee_cap = 20000; + + conf.burnchain.username = Some("neon-tester".into()); + conf.burnchain.password = Some("neon-tester-pass".into()); + conf.burnchain.peer_host = "127.0.0.1".into(); + conf.burnchain.local_mining_public_key = + Some(keychain.generate_op_signer().get_public_key().to_hex()); + conf.burnchain.commit_anchor_block_within = 0; + + // test to make sure config file parsing is correct + let mut cfile = ConfigFile::xenon(); + cfile.node.as_mut().map(|node| node.bootstrap_node.take()); + + if let Some(burnchain) = cfile.burnchain.as_mut() { + burnchain.peer_host = Some("127.0.0.1".to_string()); + } + + conf.burnchain.magic_bytes = MagicBytes::from(['T' as u8, '3' as u8].as_ref()); + conf.burnchain.poll_time_secs = 1; + conf.node.pox_sync_sample_secs = 0; + + conf.miner.min_tx_fee = 1; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + + // if there's just one node, then this must be true for tests to pass + conf.miner.wait_for_block_download = false; + + conf.node.mine_microblocks = false; + conf.miner.microblock_attempt_time_ms = 10; + conf.node.microblock_frequency = 0; + conf.node.wait_time_for_blocks = 200; + + let miner_account = keychain.origin_address(conf.is_mainnet()).unwrap(); + + conf.burnchain.pox_prepare_length = Some(5); + conf.burnchain.pox_reward_length = Some(20); + + (conf, miner_account) +} + +pub fn next_block_and( + btc_controller: &mut BitcoinRegtestController, + timeout_secs: u64, + mut check: F, +) -> Result<(), String> +where + F: FnMut() -> Result, +{ + eprintln!("Issuing bitcoin block"); + btc_controller.build_next_block(1); + let start = Instant::now(); + while !check()? { + if start.elapsed() > Duration::from_secs(timeout_secs) { + error!("Timed out waiting for block to process, trying to continue test"); + return Err("Timed out".into()); + } + thread::sleep(Duration::from_millis(100)); + } + Ok(()) +} + +/// Mine a bitcoin block, and wait until: +/// (1) a new block has been processed by the coordinator +/// (2) 2 block commits have been issued ** or ** more than 10 seconds have +/// passed since (1) occurred +fn next_block_and_mine_commit( + btc_controller: &mut BitcoinRegtestController, + timeout_secs: u64, + coord_channels: &Arc>, + commits_submitted: &Arc, +) -> Result<(), String> { + let commits_submitted = commits_submitted.clone(); + let blocks_processed_before = coord_channels + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + let commits_before = commits_submitted.load(Ordering::SeqCst); + let mut block_processed_time: Option = None; + next_block_and(btc_controller, timeout_secs, || { + if let Some(block_processed_time) = block_processed_time.as_ref() { + let commits_sent = commits_submitted.load(Ordering::SeqCst); + if commits_sent >= commits_before + 2 { + return Ok(true); + } + if commits_sent >= commits_before + 1 + && block_processed_time.elapsed() > Duration::from_secs(6) + { + return Ok(true); + } + Ok(false) + } else { + let blocks_processed = coord_channels + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + block_processed_time.replace(Instant::now()); + } + Ok(false) + } + }) +} + +fn setup_stacker(naka_conf: &mut Config) -> Secp256k1PrivateKey { + let stacker_sk = Secp256k1PrivateKey::new(); + let stacker_address = tests::to_addr(&stacker_sk); + naka_conf.add_initial_balance( + PrincipalData::from(stacker_address.clone()).to_string(), + POX_4_DEFAULT_STACKER_BALANCE, + ); + stacker_sk +} + +/// +/// * `stacker_sk` - must be a private key for sending a large `stack-stx` transaction in order +/// for pox-4 to activate +fn boot_to_epoch_3( + naka_conf: &Config, + blocks_processed: &RunLoopCounter, + stacker_sk: Secp256k1PrivateKey, + btc_regtest_controller: &mut BitcoinRegtestController, +) { + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + + info!( + "Chain bootstrapped to bitcoin block 201, starting Epoch 2x miner"; + "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), + ); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, &blocks_processed); + // first mined stacks block + next_block_and_wait(btc_regtest_controller, &blocks_processed); + + // stack enough to activate pox-4 + let pox_addr_tuple = clarity::vm::tests::execute(&format!( + "{{ hashbytes: 0x{}, version: 0x{:02x} }}", + to_hex(&[0; 20]), + AddressHashMode::SerializeP2PKH as u8, + )); + + let stacking_tx = tests::make_contract_call( + &stacker_sk, + 0, + 1000, + &StacksAddress::burn_address(false), + "pox-4", + "stack-stx", + &[ + clarity::vm::Value::UInt(POX_4_DEFAULT_STACKER_STX_AMT), + pox_addr_tuple, + clarity::vm::Value::UInt(205), + clarity::vm::Value::UInt(12), + ], + ); + + submit_tx(&http_origin, &stacking_tx); + + run_until_burnchain_height( + btc_regtest_controller, + &blocks_processed, + epoch_3.start_height - 1, + &naka_conf, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop"); +} + +#[test] +#[ignore] +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches +/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. +/// This test makes three assertions: +/// * 30 blocks are mined after 3.0 starts. This is enough to mine across 2 reward cycles +/// * A transaction submitted to the mempool in 3.0 will be mined in 3.0 +/// * The final chain tip is a nakamoto block +fn simple_neon_integration() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for a test stx transfer + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 100; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + send_amt + send_fee, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_vrfs: vrfs_submitted, + naka_submitted_commits: commits_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + stacker_sk, + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + // first block wakes up the run loop, wait until a key registration has been submitted. + next_block_and(&mut btc_regtest_controller, 60, || { + let vrf_count = vrfs_submitted.load(Ordering::SeqCst); + Ok(vrf_count >= 1) + }) + .unwrap(); + + // second block should confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + + // Mine 15 nakamoto tenures + for _i in 0..15 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + } + + // Submit a TX + let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); + let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx)); + + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + + let mut mempool = naka_conf + .connect_mempool_db() + .expect("Database failure opening mempool"); + + mempool + .submit_raw( + &mut chainstate, + &sortdb, + &tip.consensus_hash, + &tip.anchored_header.block_hash(), + transfer_tx.clone(), + &ExecutionCost::max_value(), + &StacksEpochId::Epoch30, + ) + .unwrap(); + + // Mine 15 more nakamoto tenures + for _i in 0..15 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + // assert that the transfer tx was observed + let transfer_tx_included = test_observer::get_blocks() + .into_iter() + .find(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) + .is_some() + }) + .is_some(); + + assert!( + transfer_tx_included, + "Nakamoto node failed to include the transfer tx" + ); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + assert!(tip.stacks_block_height >= block_height_pre_3_0 + 30); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index b1e68d26d7..5676d1bb12 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -9,7 +9,7 @@ use std::{cmp, env, fs, thread}; use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; -use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StandardPrincipalData}; +use clarity::vm::types::PrincipalData; use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value, MAX_CALL_STACK_DEPTH}; use rand::Rng; use rusqlite::types::ToSql; @@ -24,7 +24,6 @@ use stacks::chainstate::burn::operations::{ }; use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::coordinator::comm::CoordinatorChannels; -use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{ signal_mining_blocked, signal_mining_ready, TransactionErrorEvent, TransactionEvent, @@ -55,7 +54,6 @@ use stacks::net::atlas::{ }; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::db::{query_row_columns, query_rows, u64_to_sql}; -use stacks_common::address::C32_ADDRESS_VERSION_TESTNET_SINGLESIG; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, @@ -403,7 +401,7 @@ pub mod test_observer { } /// each path here should correspond to one of the paths listed in `event_dispatcher.rs` - async fn serve() { + async fn serve(port: u16) { let new_blocks = warp::path!("new_block") .and(warp::post()) .and(warp::body::json()) @@ -458,7 +456,7 @@ pub mod test_observer { .or(mined_nakamoto_blocks) .or(new_stackerdb_chunks), ) - .run(([127, 0, 0, 1], EVENT_OBSERVER_PORT)) + .run(([127, 0, 0, 1], port)) .await } @@ -466,7 +464,15 @@ pub mod test_observer { clear(); thread::spawn(|| { let rt = tokio::runtime::Runtime::new().expect("Failed to initialize tokio"); - rt.block_on(serve()); + rt.block_on(serve(EVENT_OBSERVER_PORT)); + }); + } + + pub fn spawn_at(port: u16) { + clear(); + thread::spawn(move || { + let rt = tokio::runtime::Runtime::new().expect("Failed to initialize tokio"); + rt.block_on(serve(port)); }); } @@ -556,7 +562,7 @@ pub fn next_block_and_iterate( /// reaches *exactly* `target_height`. /// /// Returns `false` if `next_block_and_wait` times out. -fn run_until_burnchain_height( +pub fn run_until_burnchain_height( btc_regtest_controller: &mut BitcoinRegtestController, blocks_processed: &Arc, target_height: u64,