diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml
index fc4a725687..3aa1968f3c 100644
--- a/.github/workflows/pr-differences-mutants.yml
+++ b/.github/workflows/pr-differences-mutants.yml
@@ -9,6 +9,16 @@ on:
- ready_for_review
paths:
- '**.rs'
+ workflow_dispatch:
+ inputs:
+ ignore_timeout:
+ description: "Ignore mutants timeout limit"
+ required: false
+ type: choice
+ options:
+ - true
+ - false
+ default: 'true'
concurrency:
group: pr-differences-${{ github.head_ref || github.ref || github.run_id }}
@@ -16,11 +26,29 @@ concurrency:
cancel-in-progress: true
jobs:
+ check-right-permissions:
+ name: Check Right Permissions
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check Right Permissions To Trigger This
+ id: check_right_permissions
+ uses: stacks-network/actions/team-membership@feat/mutation-testing
+ with:
+ username: ${{ github.actor }}
+ team: 'Blockchain Team'
+ GITHUB_TOKEN: ${{ secrets.GH_TOKEN }}
+
+ - name: Fail if the user does not have the right permissions
+ if: ${{ inputs.ignore_timeout == true && steps.check_right_permissions.outputs.is_team_member != 'true' }}
+ run: exit 1
+
+
# Check and output whether to run big (`stacks-node`/`stackslib`) or small (others) packages with or without shards
check-big-packages-and-shards:
name: Check Packages and Shards
-
runs-on: ubuntu-latest
+ needs: check-right-permissions
outputs:
run_stackslib: ${{ steps.check_packages_and_shards.outputs.run_stackslib }}
@@ -30,10 +58,13 @@ jobs:
run_small_packages: ${{ steps.check_packages_and_shards.outputs.run_small_packages }}
small_packages_with_shards: ${{ steps.check_packages_and_shards.outputs.small_packages_with_shards }}
run_stacks_signer: ${{ steps.check_packages_and_shards.outputs.run_stacks_signer }}
+ too_many_mutants: ${{ steps.check_packages_and_shards.outputs.too_many_mutants }}
steps:
- id: check_packages_and_shards
- uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@main
+ uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@feat/mutation-testing
+ with:
+ ignore_timeout: ${{ inputs.ignore_timeout }}
# Mutation testing - Execute on PR on small packages that have functions modified (normal run, no shards)
pr-differences-mutants-small-normal:
@@ -49,7 +80,7 @@ jobs:
steps:
- name: Run mutants on diffs
- uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main
+ uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing
with:
package: 'small'
@@ -72,7 +103,7 @@ jobs:
steps:
- name: Run mutants on diffs
- uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main
+ uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing
with:
shard: ${{ matrix.shard }}
package: 'small'
@@ -94,7 +125,7 @@ jobs:
env:
BITCOIND_TEST: 1
RUST_BACKTRACE: full
- uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main
+ uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing
with:
package: 'stackslib'
@@ -120,7 +151,7 @@ jobs:
env:
BITCOIND_TEST: 1
RUST_BACKTRACE: full
- uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main
+ uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing
with:
shard: ${{ matrix.shard }}
package: 'stackslib'
@@ -142,7 +173,7 @@ jobs:
env:
BITCOIND_TEST: 1
RUST_BACKTRACE: full
- uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main
+ uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing
with:
package: 'stacks-node'
@@ -168,7 +199,7 @@ jobs:
env:
BITCOIND_TEST: 1
RUST_BACKTRACE: full
- uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main
+ uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing
with:
shard: ${{ matrix.shard }}
package: 'stacks-node'
@@ -186,7 +217,7 @@ jobs:
steps:
- name: Run mutants on diffs
- uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main
+ uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing
with:
package: 'stacks-signer'
@@ -211,7 +242,7 @@ jobs:
steps:
- name: Output Mutants
- uses: stacks-network/actions/stacks-core/mutation-testing/output-pr-mutants@main
+ uses: stacks-network/actions/stacks-core/mutation-testing/output-pr-mutants@feat/mutation-testing
with:
stackslib_package: ${{ needs.check-big-packages-and-shards.outputs.run_stackslib }}
shards_for_stackslib_package: ${{ needs.check-big-packages-and-shards.outputs.stackslib_with_shards }}
@@ -220,3 +251,4 @@ jobs:
small_packages: ${{ needs.check-big-packages-and-shards.outputs.run_small_packages }}
shards_for_small_packages: ${{ needs.check-big-packages-and-shards.outputs.small_packages_with_shards }}
stacks_signer: ${{ needs.check-big-packages-and-shards.outputs.run_stacks_signer }}
+ too_many_mutants: ${{ needs.check-big-packages-and-shards.outputs.too_many_mutants }}
diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs
index dd440ac110..3ae47383c2 100644
--- a/stackslib/src/net/download/nakamoto/mod.rs
+++ b/stackslib/src/net/download/nakamoto/mod.rs
@@ -163,6 +163,12 @@ mod tenure;
mod tenure_downloader;
mod tenure_downloader_set;
mod tenure_downloader_unconfirmed;
+mod tenure_downloader_copy;
+mod tenure_downloader_set_copy;
+mod tenure_downloader_unconfirmed_copy;
+mod tenure_downloader_opy;
+mod tenure_downloader_set_opy;
+mod tenure_downloader_unconfirmed_opy;
pub use crate::net::download::nakamoto::download_state_machine::{
NakamotoDownloadState, NakamotoDownloadStateMachine,
diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs
new file mode 100644
index 0000000000..c6e5ee0703
--- /dev/null
+++ b/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs
@@ -0,0 +1,699 @@
+// Copyright (C) 2020-2024 Stacks Open Internet Foundation
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see .
+
+use std::collections::{BTreeMap, HashMap, HashSet, VecDeque};
+use std::convert::TryFrom;
+use std::fmt;
+use std::hash::{Hash, Hasher};
+use std::io::{Read, Write};
+use std::net::{IpAddr, SocketAddr};
+use std::time::{Duration, Instant};
+
+use rand::seq::SliceRandom;
+use rand::{thread_rng, RngCore};
+use stacks_common::types::chainstate::{
+ BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId,
+};
+use stacks_common::types::net::{PeerAddress, PeerHost};
+use stacks_common::types::StacksEpochId;
+use stacks_common::util::hash::to_hex;
+use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey};
+use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log};
+
+use crate::burnchains::{Burnchain, BurnchainView, PoxConstants};
+use crate::chainstate::burn::db::sortdb::{
+ BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn,
+};
+use crate::chainstate::burn::BlockSnapshot;
+use crate::chainstate::nakamoto::{
+ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef,
+};
+use crate::chainstate::stacks::boot::RewardSet;
+use crate::chainstate::stacks::db::StacksChainState;
+use crate::chainstate::stacks::{
+ Error as chainstate_error, StacksBlockHeader, TenureChangePayload,
+};
+use crate::core::{
+ EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH,
+};
+use crate::net::api::gettenureinfo::RPCGetTenureInfo;
+use crate::net::chat::ConversationP2P;
+use crate::net::db::{LocalPeer, PeerDB};
+use crate::net::http::HttpRequestContents;
+use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse};
+use crate::net::inv::epoch2x::InvState;
+use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv};
+use crate::net::neighbors::rpc::NeighborRPC;
+use crate::net::neighbors::NeighborComms;
+use crate::net::p2p::{CurrentRewardSet, PeerNetwork};
+use crate::net::server::HttpPeer;
+use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey};
+use crate::util_lib::db::{DBConn, Error as DBError};
+
+/// Download states for an historic tenure. This is a tenure for which we know the hashes of the
+/// start and end block. This includes all tenures except for the two most recent ones.
+#[derive(Debug, Clone, PartialEq)]
+pub enum NakamotoTenureDownloadState {
+ /// Getting the tenure-start block (the given StacksBlockId is it's block ID).
+ GetTenureStartBlock(StacksBlockId),
+ /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not
+ /// always) handled by the execution of another NakamotoTenureDownloader. The only
+ /// exceptions are as follows:
+ ///
+ /// * if this tenure contains the anchor block, and it's the last tenure in the
+ /// reward cycle. In this case, the end-block must be directly fetched, since there will be no
+ /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this.
+ ///
+ /// * if this tenure is the highest complete tenure, and we just learned the start-block of the
+ /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block
+ /// already known. This step will be skipped because the end-block is already present in the
+ /// state machine.
+ ///
+ /// * if the deadline (second parameter) is exceeded, the state machine transitions to
+ /// GetTenureEndBlock.
+ ///
+ /// The two fields here are:
+ /// * the block ID of the last block in the tenure (which happens to be the block ID of the
+ /// start block of the next tenure)
+ /// * the deadline by which this state machine needs to have obtained the tenure end-block
+ /// before transitioning to `GetTenureEndBlock`.
+ WaitForTenureEndBlock(StacksBlockId, Instant),
+ /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks
+ /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in
+ /// which we cannot quickly get the tenure-end block.
+ ///
+ /// The field here is the block ID of the tenure end block.
+ GetTenureEndBlock(StacksBlockId),
+ /// Receiving tenure blocks.
+ /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This
+ /// is because a tenure is fetched in order from highest block to lowest block.
+ GetTenureBlocks(StacksBlockId),
+ /// We have gotten all the blocks for this tenure
+ Done,
+}
+
+pub const WAIT_FOR_TENURE_END_BLOCK_TIMEOUT: u64 = 1;
+
+impl fmt::Display for NakamotoTenureDownloadState {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?}", self)
+ }
+}
+
+/// Download state machine for an historic tenure -- a tenure for which the start and end block IDs
+/// can be inferred from the chainstate and a peer's inventory (this excludes the two most recent
+/// tenures).
+///
+/// This state machine works as follows:
+///
+/// 1. Fetch the first block in the given tenure
+/// 2. Obtain the last block in the given tenure, via one of the following means:
+/// a. Another NakamotoTenureDownloader's tenure-start block happens to be the end-block of this
+/// machine's tenure, and can be copied into this machine.
+/// b. This machine is configured to directly fetch the end-block. This only happens if this
+/// tenure both contains the anchor block for the next reward cycle and happens to be the last
+/// tenure in the current reward cycle.
+/// c. This machine is given the end-block on instantiation. This only happens when the machine
+/// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure);
+/// in this case, the end-block is the start-block of the ongoing tenure.
+/// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse
+/// order. As blocks are found, their signer signatures will be validated against the signer
+/// public keys for this tenure; their hash-chain continuity will be validated against the start
+/// and end block hashes; their quantity will be validated against the tenure-change transaction
+/// in the end-block.
+///
+/// Once the machine has reached the `Done` state, it will have obtained the entire run of Nakamoto
+/// blocks for the given tenure (regardless of how many sortitions it straddles, and regardless of
+/// whether or not it straddles a reward cycle boundary).
+#[derive(Debug, Clone, PartialEq)]
+pub struct NakamotoTenureDownloader {
+ /// Consensus hash that identifies this tenure
+ pub tenure_id_consensus_hash: ConsensusHash,
+ /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and
+ /// sortition DB.
+ pub tenure_start_block_id: StacksBlockId,
+ /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID
+ /// for some other tenure). Learned from the inventory state machine and sortition DB.
+ pub tenure_end_block_id: StacksBlockId,
+ /// Address of who we're asking for blocks
+ pub naddr: NeighborAddress,
+ /// Signer public keys that signed the start-block of this tenure, in reward cycle order
+ pub start_signer_keys: RewardSet,
+ /// Signer public keys that signed the end-block of this tenure
+ pub end_signer_keys: RewardSet,
+ /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with
+ /// this state machine.
+ pub idle: bool,
+
+ /// What state we're in for downloading this tenure
+ pub state: NakamotoTenureDownloadState,
+ /// Tenure-start block
+ pub tenure_start_block: Option,
+ /// Pre-stored tenure end block (used by the unconfirmed block downloader).
+ /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once
+ /// the start-block for the current tenure is downloaded. This is that start-block, which is
+ /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step.
+ pub tenure_end_block: Option,
+ /// Tenure-end block header and TenureChange
+ pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>,
+ /// Tenure blocks
+ pub tenure_blocks: Option>,
+}
+
+impl NakamotoTenureDownloader {
+ pub fn new(
+ tenure_id_consensus_hash: ConsensusHash,
+ tenure_start_block_id: StacksBlockId,
+ tenure_end_block_id: StacksBlockId,
+ naddr: NeighborAddress,
+ start_signer_keys: RewardSet,
+ end_signer_keys: RewardSet,
+ ) -> Self {
+ test_debug!(
+ "Instantiate downloader to {} for tenure {}: {}-{}",
+ &naddr,
+ &tenure_id_consensus_hash,
+ &tenure_start_block_id,
+ &tenure_end_block_id,
+ );
+ Self {
+ tenure_id_consensus_hash,
+ tenure_start_block_id,
+ tenure_end_block_id,
+ naddr,
+ start_signer_keys,
+ end_signer_keys,
+ idle: false,
+ state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()),
+ tenure_start_block: None,
+ tenure_end_header: None,
+ tenure_end_block: None,
+ tenure_blocks: None,
+ }
+ }
+
+ /// Follow-on constructor used to instantiate a machine for downloading the highest-confirmed
+ /// tenure. This supplies the tenure end-block if known in advance.
+ pub fn with_tenure_end_block(mut self, tenure_end_block: NakamotoBlock) -> Self {
+ self.tenure_end_block = Some(tenure_end_block);
+ self
+ }
+
+ /// Is this downloader waiting for the tenure-end block data from some other downloader? Per
+ /// the struct documentation, this is case 2(a).
+ pub fn is_waiting(&self) -> bool {
+ if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ /// Validate and accept a given tenure-start block. If accepted, then advance the state.
+ /// Returns Ok(()) if the start-block is valid.
+ /// Returns Err(..) if it is not valid.
+ pub fn try_accept_tenure_start_block(
+ &mut self,
+ tenure_start_block: NakamotoBlock,
+ ) -> Result<(), NetError> {
+ let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else {
+ // not the right state for this
+ warn!("Invalid state for this method";
+ "state" => %self.state);
+ return Err(NetError::InvalidState);
+ };
+
+ if self.tenure_start_block_id != tenure_start_block.header.block_id() {
+ // not the block we were expecting
+ warn!("Invalid tenure-start block: unexpected";
+ "tenure_id" => %self.tenure_id_consensus_hash,
+ "tenure_id_start_block" => %self.tenure_start_block_id,
+ "tenure_start_block ID" => %tenure_start_block.header.block_id(),
+ "state" => %self.state);
+ return Err(NetError::InvalidMessage);
+ }
+
+ if let Err(e) = tenure_start_block
+ .header
+ .verify_signer_signatures(&self.start_signer_keys)
+ {
+ // signature verification failed
+ warn!("Invalid tenure-start block: bad signer signature";
+ "tenure_id" => %self.tenure_id_consensus_hash,
+ "block.header.block_id" => %tenure_start_block.header.block_id(),
+ "state" => %self.state,
+ "error" => %e);
+ return Err(NetError::InvalidMessage);
+ }
+
+ debug!(
+ "Accepted tenure-start block for tenure {} block={}",
+ &self.tenure_id_consensus_hash,
+ &tenure_start_block.block_id()
+ );
+ self.tenure_start_block = Some(tenure_start_block);
+
+ if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() {
+ // tenure_end_header supplied externally
+ self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone());
+ } else if let Some(tenure_end_block) = self.tenure_end_block.take() {
+ // we already have the tenure-end block, so immediately proceed to accept it.
+ test_debug!(
+ "Preemptively process tenure-end block {} for tenure {}",
+ tenure_end_block.block_id(),
+ &self.tenure_id_consensus_hash
+ );
+ self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock(
+ tenure_end_block.block_id(),
+ Instant::now()
+ .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0))
+ .ok_or(NetError::OverflowError("Deadline is too big".into()))?,
+ );
+ self.try_accept_tenure_end_block(&tenure_end_block)?;
+ } else {
+ // need to get tenure_end_header. By default, assume that another
+ // NakamotoTenureDownloader will provide this block, and allow the
+ // NakamotoTenureDownloaderSet instance that manages a collection of these
+ // state-machines make the call to require this one to fetch the block directly.
+ self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock(
+ self.tenure_end_block_id.clone(),
+ Instant::now()
+ .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0))
+ .ok_or(NetError::OverflowError("Deadline is too big".into()))?,
+ );
+ }
+ Ok(())
+ }
+
+ /// Transition this state-machine from waiting for its tenure-end block from another
+ /// state-machine to directly fetching it. This only needs to happen if the tenure this state
+ /// machine is downloading contains the PoX anchor block, and it's also the last confirmed
+ /// tenurein this reward cycle.
+ ///
+ /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and
+ /// runs a set of these machines based on the peers' inventory vectors. But because we don't
+ /// know if this is the PoX anchor block tenure (or even the last tenure) until we have
+ /// inventory vectors for this tenure's reward cycle, this state-transition must be driven
+ /// after this machine's instantiation.
+ pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> {
+ let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state
+ else {
+ return Err(NetError::InvalidState);
+ };
+ test_debug!(
+ "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)",
+ &self.naddr,
+ &end_block_id
+ );
+ self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id);
+ Ok(())
+ }
+
+ /// Transition to fetching the tenure-end block directly if waiting has taken too long.
+ pub fn transition_to_fetch_end_block_on_timeout(&mut self) {
+ if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) =
+ self.state
+ {
+ if wait_deadline < Instant::now() {
+ test_debug!(
+ "Transition downloader to {} to directly fetch tenure-end block {} (timed out)",
+ &self.naddr,
+ &end_block_id
+ );
+ self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id);
+ }
+ }
+ }
+
+ /// Validate and accept a tenure-end block. If accepted, then advance the state.
+ /// Once accepted, this function extracts the tenure-change transaction and block header from
+ /// this block (it does not need the entire block).
+ ///
+ /// Returns Ok(()) if the block was valid
+ /// Returns Err(..) if the block was invalid
+ pub fn try_accept_tenure_end_block(
+ &mut self,
+ tenure_end_block: &NakamotoBlock,
+ ) -> Result<(), NetError> {
+ if !matches!(
+ &self.state,
+ NakamotoTenureDownloadState::WaitForTenureEndBlock(..)
+ | NakamotoTenureDownloadState::GetTenureEndBlock(_)
+ ) {
+ warn!("Invalid state for this method";
+ "state" => %self.state);
+ return Err(NetError::InvalidState);
+ };
+ let Some(tenure_start_block) = self.tenure_start_block.as_ref() else {
+ warn!("Invalid state -- tenure_start_block is not set");
+ return Err(NetError::InvalidState);
+ };
+
+ if self.tenure_end_block_id != tenure_end_block.header.block_id() {
+ // not the block we asked for
+ warn!("Invalid tenure-end block: unexpected";
+ "tenure_id" => %self.tenure_id_consensus_hash,
+ "tenure_id_end_block" => %self.tenure_end_block_id,
+ "block.header.block_id" => %tenure_end_block.header.block_id(),
+ "state" => %self.state);
+ return Err(NetError::InvalidMessage);
+ }
+
+ if let Err(e) = tenure_end_block
+ .header
+ .verify_signer_signatures(&self.end_signer_keys)
+ {
+ // bad signature
+ warn!("Invalid tenure-end block: bad signer signature";
+ "tenure_id" => %self.tenure_id_consensus_hash,
+ "block.header.block_id" => %tenure_end_block.header.block_id(),
+ "state" => %self.state,
+ "error" => %e);
+ return Err(NetError::InvalidMessage);
+ }
+
+ // extract the needful -- need the tenure-change payload (which proves that the tenure-end
+ // block is the tenure-start block for the next tenure) and the parent block ID (which is
+ // the next block to download).
+ let Ok(valid) = tenure_end_block.is_wellformed_tenure_start_block() else {
+ warn!("Invalid tenure-end block: failed to validate tenure-start";
+ "block_id" => %tenure_end_block.block_id());
+ return Err(NetError::InvalidMessage);
+ };
+
+ if !valid {
+ warn!("Invalid tenure-end block: not a well-formed tenure-start block";
+ "block_id" => %tenure_end_block.block_id());
+ return Err(NetError::InvalidMessage);
+ }
+
+ let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else {
+ warn!("Invalid tenure-end block: no tenure-change transaction";
+ "block_id" => %tenure_end_block.block_id());
+ return Err(NetError::InvalidMessage);
+ };
+
+ // tc_payload must point to the tenure-start block's header
+ if tc_payload.prev_tenure_consensus_hash != tenure_start_block.header.consensus_hash {
+ warn!("Invalid tenure-end block: tenure-change does not point to tenure-start block";
+ "start_block_id" => %tenure_start_block.block_id(),
+ "end_block_id" => %tenure_end_block.block_id(),
+ "tc_payload.prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash,
+ "tenure_start.consensus_hash" => %tenure_start_block.header.consensus_hash);
+ return Err(NetError::InvalidMessage);
+ }
+
+ debug!(
+ "Accepted tenure-end header for tenure {} block={}; expect {} blocks",
+ &self.tenure_id_consensus_hash,
+ &tenure_end_block.block_id(),
+ tc_payload.previous_tenure_blocks
+ );
+ self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone()));
+ self.state = NakamotoTenureDownloadState::GetTenureBlocks(
+ tenure_end_block.header.parent_block_id.clone(),
+ );
+ Ok(())
+ }
+
+ /// Determine how many blocks must be in this tenure.
+ /// Returns None if we don't have the start and end blocks yet.
+ pub fn tenure_length(&self) -> Option {
+ self.tenure_end_header
+ .as_ref()
+ .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks))
+ }
+
+ /// Add downloaded tenure blocks to this machine.
+ /// If we have collected all tenure blocks, then return them and transition to the Done state.
+ ///
+ /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in
+ /// ascending order by height, and will include the tenure-start block but exclude the
+ /// tenure-end block.
+ /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to
+ /// the next block to fetch (stored in self.state) will be updated.
+ /// Returns Err(..) if the blocks were invalid.
+ pub fn try_accept_tenure_blocks(
+ &mut self,
+ mut tenure_blocks: Vec,
+ ) -> Result>, NetError> {
+ let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else {
+ warn!("Invalid state for this method";
+ "state" => %self.state);
+ return Err(NetError::InvalidState);
+ };
+
+ if tenure_blocks.is_empty() {
+ // nothing to do
+ return Ok(None);
+ }
+
+ // blocks must be contiguous and in order from highest to lowest
+ let mut expected_block_id = block_cursor;
+ let mut count = 0;
+ for block in tenure_blocks.iter() {
+ if &block.header.block_id() != expected_block_id {
+ warn!("Unexpected Nakamoto block -- not part of tenure";
+ "expected_block_id" => %expected_block_id,
+ "block_id" => %block.header.block_id(),
+ "state" => %self.state);
+ return Err(NetError::InvalidMessage);
+ }
+
+ if let Err(e) = block
+ .header
+ .verify_signer_signatures(&self.start_signer_keys)
+ {
+ warn!("Invalid block: bad signer signature";
+ "tenure_id" => %self.tenure_id_consensus_hash,
+ "block.header.block_id" => %block.header.block_id(),
+ "state" => %self.state,
+ "error" => %e);
+ return Err(NetError::InvalidMessage);
+ }
+
+ expected_block_id = &block.header.parent_block_id;
+ count += 1;
+ if self
+ .tenure_blocks
+ .as_ref()
+ .map(|blocks| blocks.len())
+ .unwrap_or(0)
+ .saturating_add(count)
+ > self.tenure_length().unwrap_or(0) as usize
+ {
+ // there are more blocks downloaded than indicated by the end-blocks tenure-change
+ // transaction.
+ warn!("Invalid blocks: exceeded {} tenure blocks", self.tenure_length().unwrap_or(0);
+ "tenure_id" => %self.tenure_id_consensus_hash,
+ "count" => %count,
+ "tenure_length" => self.tenure_length().unwrap_or(0),
+ "num_blocks" => tenure_blocks.len());
+ return Err(NetError::InvalidMessage);
+ }
+ }
+
+ if let Some(blocks) = self.tenure_blocks.as_mut() {
+ blocks.append(&mut tenure_blocks);
+ } else {
+ self.tenure_blocks = Some(tenure_blocks);
+ }
+
+ // did we reach the tenure start block?
+ let Some(blocks) = self.tenure_blocks.as_ref() else {
+ // unreachable but be defensive
+ warn!("Invalid state: no blocks (infallible -- got None)");
+ return Err(NetError::InvalidState);
+ };
+
+ let Some(earliest_block) = blocks.last() else {
+ // unreachable but be defensive
+ warn!("Invalid state: no blocks (infallible -- got empty vec)");
+ return Err(NetError::InvalidState);
+ };
+
+ let Some(tenure_start_block) = self.tenure_start_block.as_ref() else {
+ // unreachable but be defensive
+ warn!("Invalid state: no tenure-start block (infallible)");
+ return Err(NetError::InvalidState);
+ };
+
+ test_debug!(
+ "Accepted tenure blocks for tenure {} cursor={} ({})",
+ &self.tenure_id_consensus_hash,
+ &block_cursor,
+ count
+ );
+ if earliest_block.block_id() != tenure_start_block.block_id() {
+ // still have more blocks to download
+ let next_block_id = earliest_block.header.parent_block_id.clone();
+ debug!(
+ "Need more blocks for tenure {} (went from {} to {}, next is {})",
+ &self.tenure_id_consensus_hash,
+ &block_cursor,
+ &earliest_block.block_id(),
+ &next_block_id
+ );
+ self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id);
+ return Ok(None);
+ }
+
+ // finished!
+ self.state = NakamotoTenureDownloadState::Done;
+ Ok(self
+ .tenure_blocks
+ .take()
+ .map(|blocks| blocks.into_iter().rev().collect()))
+ }
+
+ /// Produce the next HTTP request that, when successfully executed, will fetch the data needed
+ /// to advance this state machine.
+ /// Not all states require an HTTP request for advanceement.
+ ///
+ /// Returns Ok(Some(request)) if a request is needed
+ /// Returns Ok(None) if a request is not needed (i.e. we're waiting for some other machine's
+ /// state)
+ /// Returns Err(()) if we're done.
+ pub fn make_next_download_request(
+ &self,
+ peerhost: PeerHost,
+ ) -> Result , ()> {
+ let request = match self.state {
+ NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => {
+ test_debug!("Request tenure-start block {}", &start_block_id);
+ StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone())
+ }
+ NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => {
+ // we're waiting for some other downloader's block-fetch to complete
+ test_debug!(
+ "Waiting for tenure-end block {} until {:?}",
+ &_block_id,
+ _deadline
+ );
+ return Ok(None);
+ }
+ NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => {
+ test_debug!("Request tenure-end block {}", &end_block_id);
+ StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone())
+ }
+ NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => {
+ test_debug!("Downloading tenure ending at {}", &end_block_id);
+ StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None)
+ }
+ NakamotoTenureDownloadState::Done => {
+ // nothing more to do
+ return Err(());
+ }
+ };
+ Ok(Some(request))
+ }
+
+ /// Begin the next download request for this state machine. The request will be sent to the
+ /// data URL corresponding to self.naddr.
+ /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The
+ /// caller should try this again until it gets one of the other possible return values.
+ /// Returns Ok(false) if not (e.g. neighbor is known to be dead or broken)
+ /// Returns Err(..) if self.naddr is known to be a dead or broken peer, or if we were unable to
+ /// resolve its data URL to a socket address.
+ pub fn send_next_download_request(
+ &mut self,
+ network: &mut PeerNetwork,
+ neighbor_rpc: &mut NeighborRPC,
+ ) -> Result {
+ if neighbor_rpc.has_inflight(&self.naddr) {
+ test_debug!("Peer {} has an inflight request", &self.naddr);
+ return Ok(true);
+ }
+ if neighbor_rpc.is_dead_or_broken(network, &self.naddr) {
+ return Err(NetError::PeerNotConnected);
+ }
+
+ let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else {
+ // no conversation open to this neighbor
+ neighbor_rpc.add_dead(network, &self.naddr);
+ return Err(NetError::PeerNotConnected);
+ };
+
+ let request = match self.make_next_download_request(peerhost) {
+ Ok(Some(request)) => request,
+ Ok(None) => {
+ return Ok(true);
+ }
+ Err(_) => {
+ return Ok(false);
+ }
+ };
+
+ neighbor_rpc.send_request(network, self.naddr.clone(), request)?;
+ self.idle = false;
+ Ok(true)
+ }
+
+ /// Handle a received StacksHttpResponse and advance the state machine.
+ /// If we get the full tenure's blocks, then return them.
+ /// Returns Ok(Some([blocks])) if we successfully complete the state machine.
+ /// Returns Ok(None) if we accepted the response and did a state-transition, but we're not done
+ /// yet. The caller should now call `send_next_download_request()`
+ /// Returns Err(..) on failure to process the response.
+ pub fn handle_next_download_response(
+ &mut self,
+ response: StacksHttpResponse,
+ ) -> Result>, NetError> {
+ self.idle = true;
+ match self.state {
+ NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => {
+ test_debug!(
+ "Got download response for tenure-start block {}",
+ &_block_id
+ );
+ let block = response.decode_nakamoto_block().map_err(|e| {
+ warn!("Failed to decode response for a Nakamoto block: {:?}", &e);
+ e
+ })?;
+ self.try_accept_tenure_start_block(block)?;
+ Ok(None)
+ }
+ NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => {
+ test_debug!("Invalid state -- Got download response for WaitForTenureBlock");
+ Err(NetError::InvalidState)
+ }
+ NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => {
+ test_debug!("Got download response to tenure-end block {}", &_block_id);
+ let block = response.decode_nakamoto_block().map_err(|e| {
+ warn!("Failed to decode response for a Nakamoto block: {:?}", &e);
+ e
+ })?;
+ self.try_accept_tenure_end_block(&block)?;
+ Ok(None)
+ }
+ NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => {
+ test_debug!(
+ "Got download response for tenure blocks ending at {}",
+ &_end_block_id
+ );
+ let blocks = response.decode_nakamoto_tenure().map_err(|e| {
+ warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e);
+ e
+ })?;
+ self.try_accept_tenure_blocks(blocks)
+ }
+ NakamotoTenureDownloadState::Done => Err(NetError::InvalidState),
+ }
+ }
+
+ pub fn is_done(&self) -> bool {
+ self.state == NakamotoTenureDownloadState::Done
+ }
+}
diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs
new file mode 100644
index 0000000000..c6e5ee0703
--- /dev/null
+++ b/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs
@@ -0,0 +1,699 @@
+// Copyright (C) 2020-2024 Stacks Open Internet Foundation
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see .
+
+use std::collections::{BTreeMap, HashMap, HashSet, VecDeque};
+use std::convert::TryFrom;
+use std::fmt;
+use std::hash::{Hash, Hasher};
+use std::io::{Read, Write};
+use std::net::{IpAddr, SocketAddr};
+use std::time::{Duration, Instant};
+
+use rand::seq::SliceRandom;
+use rand::{thread_rng, RngCore};
+use stacks_common::types::chainstate::{
+ BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId,
+};
+use stacks_common::types::net::{PeerAddress, PeerHost};
+use stacks_common::types::StacksEpochId;
+use stacks_common::util::hash::to_hex;
+use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey};
+use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log};
+
+use crate::burnchains::{Burnchain, BurnchainView, PoxConstants};
+use crate::chainstate::burn::db::sortdb::{
+ BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn,
+};
+use crate::chainstate::burn::BlockSnapshot;
+use crate::chainstate::nakamoto::{
+ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef,
+};
+use crate::chainstate::stacks::boot::RewardSet;
+use crate::chainstate::stacks::db::StacksChainState;
+use crate::chainstate::stacks::{
+ Error as chainstate_error, StacksBlockHeader, TenureChangePayload,
+};
+use crate::core::{
+ EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH,
+};
+use crate::net::api::gettenureinfo::RPCGetTenureInfo;
+use crate::net::chat::ConversationP2P;
+use crate::net::db::{LocalPeer, PeerDB};
+use crate::net::http::HttpRequestContents;
+use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse};
+use crate::net::inv::epoch2x::InvState;
+use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv};
+use crate::net::neighbors::rpc::NeighborRPC;
+use crate::net::neighbors::NeighborComms;
+use crate::net::p2p::{CurrentRewardSet, PeerNetwork};
+use crate::net::server::HttpPeer;
+use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey};
+use crate::util_lib::db::{DBConn, Error as DBError};
+
+/// Download states for an historic tenure. This is a tenure for which we know the hashes of the
+/// start and end block. This includes all tenures except for the two most recent ones.
+#[derive(Debug, Clone, PartialEq)]
+pub enum NakamotoTenureDownloadState {
+ /// Getting the tenure-start block (the given StacksBlockId is it's block ID).
+ GetTenureStartBlock(StacksBlockId),
+ /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not
+ /// always) handled by the execution of another NakamotoTenureDownloader. The only
+ /// exceptions are as follows:
+ ///
+ /// * if this tenure contains the anchor block, and it's the last tenure in the
+ /// reward cycle. In this case, the end-block must be directly fetched, since there will be no
+ /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this.
+ ///
+ /// * if this tenure is the highest complete tenure, and we just learned the start-block of the
+ /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block
+ /// already known. This step will be skipped because the end-block is already present in the
+ /// state machine.
+ ///
+ /// * if the deadline (second parameter) is exceeded, the state machine transitions to
+ /// GetTenureEndBlock.
+ ///
+ /// The two fields here are:
+ /// * the block ID of the last block in the tenure (which happens to be the block ID of the
+ /// start block of the next tenure)
+ /// * the deadline by which this state machine needs to have obtained the tenure end-block
+ /// before transitioning to `GetTenureEndBlock`.
+ WaitForTenureEndBlock(StacksBlockId, Instant),
+ /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks
+ /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in
+ /// which we cannot quickly get the tenure-end block.
+ ///
+ /// The field here is the block ID of the tenure end block.
+ GetTenureEndBlock(StacksBlockId),
+ /// Receiving tenure blocks.
+ /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This
+ /// is because a tenure is fetched in order from highest block to lowest block.
+ GetTenureBlocks(StacksBlockId),
+ /// We have gotten all the blocks for this tenure
+ Done,
+}
+
+pub const WAIT_FOR_TENURE_END_BLOCK_TIMEOUT: u64 = 1;
+
+impl fmt::Display for NakamotoTenureDownloadState {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?}", self)
+ }
+}
+
+/// Download state machine for an historic tenure -- a tenure for which the start and end block IDs
+/// can be inferred from the chainstate and a peer's inventory (this excludes the two most recent
+/// tenures).
+///
+/// This state machine works as follows:
+///
+/// 1. Fetch the first block in the given tenure
+/// 2. Obtain the last block in the given tenure, via one of the following means:
+/// a. Another NakamotoTenureDownloader's tenure-start block happens to be the end-block of this
+/// machine's tenure, and can be copied into this machine.
+/// b. This machine is configured to directly fetch the end-block. This only happens if this
+/// tenure both contains the anchor block for the next reward cycle and happens to be the last
+/// tenure in the current reward cycle.
+/// c. This machine is given the end-block on instantiation. This only happens when the machine
+/// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure);
+/// in this case, the end-block is the start-block of the ongoing tenure.
+/// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse
+/// order. As blocks are found, their signer signatures will be validated against the signer
+/// public keys for this tenure; their hash-chain continuity will be validated against the start
+/// and end block hashes; their quantity will be validated against the tenure-change transaction
+/// in the end-block.
+///
+/// Once the machine has reached the `Done` state, it will have obtained the entire run of Nakamoto
+/// blocks for the given tenure (regardless of how many sortitions it straddles, and regardless of
+/// whether or not it straddles a reward cycle boundary).
+#[derive(Debug, Clone, PartialEq)]
+pub struct NakamotoTenureDownloader {
+ /// Consensus hash that identifies this tenure
+ pub tenure_id_consensus_hash: ConsensusHash,
+ /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and
+ /// sortition DB.
+ pub tenure_start_block_id: StacksBlockId,
+ /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID
+ /// for some other tenure). Learned from the inventory state machine and sortition DB.
+ pub tenure_end_block_id: StacksBlockId,
+ /// Address of who we're asking for blocks
+ pub naddr: NeighborAddress,
+ /// Signer public keys that signed the start-block of this tenure, in reward cycle order
+ pub start_signer_keys: RewardSet,
+ /// Signer public keys that signed the end-block of this tenure
+ pub end_signer_keys: RewardSet,
+ /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with
+ /// this state machine.
+ pub idle: bool,
+
+ /// What state we're in for downloading this tenure
+ pub state: NakamotoTenureDownloadState,
+ /// Tenure-start block
+ pub tenure_start_block: Option,
+ /// Pre-stored tenure end block (used by the unconfirmed block downloader).
+ /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once
+ /// the start-block for the current tenure is downloaded. This is that start-block, which is
+ /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step.
+ pub tenure_end_block: Option,
+ /// Tenure-end block header and TenureChange
+ pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>,
+ /// Tenure blocks
+ pub tenure_blocks: Option>,
+}
+
+impl NakamotoTenureDownloader {
+ pub fn new(
+ tenure_id_consensus_hash: ConsensusHash,
+ tenure_start_block_id: StacksBlockId,
+ tenure_end_block_id: StacksBlockId,
+ naddr: NeighborAddress,
+ start_signer_keys: RewardSet,
+ end_signer_keys: RewardSet,
+ ) -> Self {
+ test_debug!(
+ "Instantiate downloader to {} for tenure {}: {}-{}",
+ &naddr,
+ &tenure_id_consensus_hash,
+ &tenure_start_block_id,
+ &tenure_end_block_id,
+ );
+ Self {
+ tenure_id_consensus_hash,
+ tenure_start_block_id,
+ tenure_end_block_id,
+ naddr,
+ start_signer_keys,
+ end_signer_keys,
+ idle: false,
+ state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()),
+ tenure_start_block: None,
+ tenure_end_header: None,
+ tenure_end_block: None,
+ tenure_blocks: None,
+ }
+ }
+
+ /// Follow-on constructor used to instantiate a machine for downloading the highest-confirmed
+ /// tenure. This supplies the tenure end-block if known in advance.
+ pub fn with_tenure_end_block(mut self, tenure_end_block: NakamotoBlock) -> Self {
+ self.tenure_end_block = Some(tenure_end_block);
+ self
+ }
+
+ /// Is this downloader waiting for the tenure-end block data from some other downloader? Per
+ /// the struct documentation, this is case 2(a).
+ pub fn is_waiting(&self) -> bool {
+ if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ /// Validate and accept a given tenure-start block. If accepted, then advance the state.
+ /// Returns Ok(()) if the start-block is valid.
+ /// Returns Err(..) if it is not valid.
+ pub fn try_accept_tenure_start_block(
+ &mut self,
+ tenure_start_block: NakamotoBlock,
+ ) -> Result<(), NetError> {
+ let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else {
+ // not the right state for this
+ warn!("Invalid state for this method";
+ "state" => %self.state);
+ return Err(NetError::InvalidState);
+ };
+
+ if self.tenure_start_block_id != tenure_start_block.header.block_id() {
+ // not the block we were expecting
+ warn!("Invalid tenure-start block: unexpected";
+ "tenure_id" => %self.tenure_id_consensus_hash,
+ "tenure_id_start_block" => %self.tenure_start_block_id,
+ "tenure_start_block ID" => %tenure_start_block.header.block_id(),
+ "state" => %self.state);
+ return Err(NetError::InvalidMessage);
+ }
+
+ if let Err(e) = tenure_start_block
+ .header
+ .verify_signer_signatures(&self.start_signer_keys)
+ {
+ // signature verification failed
+ warn!("Invalid tenure-start block: bad signer signature";
+ "tenure_id" => %self.tenure_id_consensus_hash,
+ "block.header.block_id" => %tenure_start_block.header.block_id(),
+ "state" => %self.state,
+ "error" => %e);
+ return Err(NetError::InvalidMessage);
+ }
+
+ debug!(
+ "Accepted tenure-start block for tenure {} block={}",
+ &self.tenure_id_consensus_hash,
+ &tenure_start_block.block_id()
+ );
+ self.tenure_start_block = Some(tenure_start_block);
+
+ if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() {
+ // tenure_end_header supplied externally
+ self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone());
+ } else if let Some(tenure_end_block) = self.tenure_end_block.take() {
+ // we already have the tenure-end block, so immediately proceed to accept it.
+ test_debug!(
+ "Preemptively process tenure-end block {} for tenure {}",
+ tenure_end_block.block_id(),
+ &self.tenure_id_consensus_hash
+ );
+ self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock(
+ tenure_end_block.block_id(),
+ Instant::now()
+ .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0))
+ .ok_or(NetError::OverflowError("Deadline is too big".into()))?,
+ );
+ self.try_accept_tenure_end_block(&tenure_end_block)?;
+ } else {
+ // need to get tenure_end_header. By default, assume that another
+ // NakamotoTenureDownloader will provide this block, and allow the
+ // NakamotoTenureDownloaderSet instance that manages a collection of these
+ // state-machines make the call to require this one to fetch the block directly.
+ self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock(
+ self.tenure_end_block_id.clone(),
+ Instant::now()
+ .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0))
+ .ok_or(NetError::OverflowError("Deadline is too big".into()))?,
+ );
+ }
+ Ok(())
+ }
+
+ /// Transition this state-machine from waiting for its tenure-end block from another
+ /// state-machine to directly fetching it. This only needs to happen if the tenure this state
+ /// machine is downloading contains the PoX anchor block, and it's also the last confirmed
+ /// tenurein this reward cycle.
+ ///
+ /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and
+ /// runs a set of these machines based on the peers' inventory vectors. But because we don't
+ /// know if this is the PoX anchor block tenure (or even the last tenure) until we have
+ /// inventory vectors for this tenure's reward cycle, this state-transition must be driven
+ /// after this machine's instantiation.
+ pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> {
+ let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state
+ else {
+ return Err(NetError::InvalidState);
+ };
+ test_debug!(
+ "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)",
+ &self.naddr,
+ &end_block_id
+ );
+ self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id);
+ Ok(())
+ }
+
+ /// Transition to fetching the tenure-end block directly if waiting has taken too long.
+ pub fn transition_to_fetch_end_block_on_timeout(&mut self) {
+ if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) =
+ self.state
+ {
+ if wait_deadline < Instant::now() {
+ test_debug!(
+ "Transition downloader to {} to directly fetch tenure-end block {} (timed out)",
+ &self.naddr,
+ &end_block_id
+ );
+ self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id);
+ }
+ }
+ }
+
+ /// Validate and accept a tenure-end block. If accepted, then advance the state.
+ /// Once accepted, this function extracts the tenure-change transaction and block header from
+ /// this block (it does not need the entire block).
+ ///
+ /// Returns Ok(()) if the block was valid
+ /// Returns Err(..) if the block was invalid
+ pub fn try_accept_tenure_end_block(
+ &mut self,
+ tenure_end_block: &NakamotoBlock,
+ ) -> Result<(), NetError> {
+ if !matches!(
+ &self.state,
+ NakamotoTenureDownloadState::WaitForTenureEndBlock(..)
+ | NakamotoTenureDownloadState::GetTenureEndBlock(_)
+ ) {
+ warn!("Invalid state for this method";
+ "state" => %self.state);
+ return Err(NetError::InvalidState);
+ };
+ let Some(tenure_start_block) = self.tenure_start_block.as_ref() else {
+ warn!("Invalid state -- tenure_start_block is not set");
+ return Err(NetError::InvalidState);
+ };
+
+ if self.tenure_end_block_id != tenure_end_block.header.block_id() {
+ // not the block we asked for
+ warn!("Invalid tenure-end block: unexpected";
+ "tenure_id" => %self.tenure_id_consensus_hash,
+ "tenure_id_end_block" => %self.tenure_end_block_id,
+ "block.header.block_id" => %tenure_end_block.header.block_id(),
+ "state" => %self.state);
+ return Err(NetError::InvalidMessage);
+ }
+
+ if let Err(e) = tenure_end_block
+ .header
+ .verify_signer_signatures(&self.end_signer_keys)
+ {
+ // bad signature
+ warn!("Invalid tenure-end block: bad signer signature";
+ "tenure_id" => %self.tenure_id_consensus_hash,
+ "block.header.block_id" => %tenure_end_block.header.block_id(),
+ "state" => %self.state,
+ "error" => %e);
+ return Err(NetError::InvalidMessage);
+ }
+
+ // extract the needful -- need the tenure-change payload (which proves that the tenure-end
+ // block is the tenure-start block for the next tenure) and the parent block ID (which is
+ // the next block to download).
+ let Ok(valid) = tenure_end_block.is_wellformed_tenure_start_block() else {
+ warn!("Invalid tenure-end block: failed to validate tenure-start";
+ "block_id" => %tenure_end_block.block_id());
+ return Err(NetError::InvalidMessage);
+ };
+
+ if !valid {
+ warn!("Invalid tenure-end block: not a well-formed tenure-start block";
+ "block_id" => %tenure_end_block.block_id());
+ return Err(NetError::InvalidMessage);
+ }
+
+ let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else {
+ warn!("Invalid tenure-end block: no tenure-change transaction";
+ "block_id" => %tenure_end_block.block_id());
+ return Err(NetError::InvalidMessage);
+ };
+
+ // tc_payload must point to the tenure-start block's header
+ if tc_payload.prev_tenure_consensus_hash != tenure_start_block.header.consensus_hash {
+ warn!("Invalid tenure-end block: tenure-change does not point to tenure-start block";
+ "start_block_id" => %tenure_start_block.block_id(),
+ "end_block_id" => %tenure_end_block.block_id(),
+ "tc_payload.prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash,
+ "tenure_start.consensus_hash" => %tenure_start_block.header.consensus_hash);
+ return Err(NetError::InvalidMessage);
+ }
+
+ debug!(
+ "Accepted tenure-end header for tenure {} block={}; expect {} blocks",
+ &self.tenure_id_consensus_hash,
+ &tenure_end_block.block_id(),
+ tc_payload.previous_tenure_blocks
+ );
+ self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone()));
+ self.state = NakamotoTenureDownloadState::GetTenureBlocks(
+ tenure_end_block.header.parent_block_id.clone(),
+ );
+ Ok(())
+ }
+
+ /// Determine how many blocks must be in this tenure.
+ /// Returns None if we don't have the start and end blocks yet.
+ pub fn tenure_length(&self) -> Option {
+ self.tenure_end_header
+ .as_ref()
+ .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks))
+ }
+
+ /// Add downloaded tenure blocks to this machine.
+ /// If we have collected all tenure blocks, then return them and transition to the Done state.
+ ///
+ /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in
+ /// ascending order by height, and will include the tenure-start block but exclude the
+ /// tenure-end block.
+ /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to
+ /// the next block to fetch (stored in self.state) will be updated.
+ /// Returns Err(..) if the blocks were invalid.
+ pub fn try_accept_tenure_blocks(
+ &mut self,
+ mut tenure_blocks: Vec,
+ ) -> Result>, NetError> {
+ let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else {
+ warn!("Invalid state for this method";
+ "state" => %self.state);
+ return Err(NetError::InvalidState);
+ };
+
+ if tenure_blocks.is_empty() {
+ // nothing to do
+ return Ok(None);
+ }
+
+ // blocks must be contiguous and in order from highest to lowest
+ let mut expected_block_id = block_cursor;
+ let mut count = 0;
+ for block in tenure_blocks.iter() {
+ if &block.header.block_id() != expected_block_id {
+ warn!("Unexpected Nakamoto block -- not part of tenure";
+ "expected_block_id" => %expected_block_id,
+ "block_id" => %block.header.block_id(),
+ "state" => %self.state);
+ return Err(NetError::InvalidMessage);
+ }
+
+ if let Err(e) = block
+ .header
+ .verify_signer_signatures(&self.start_signer_keys)
+ {
+ warn!("Invalid block: bad signer signature";
+ "tenure_id" => %self.tenure_id_consensus_hash,
+ "block.header.block_id" => %block.header.block_id(),
+ "state" => %self.state,
+ "error" => %e);
+ return Err(NetError::InvalidMessage);
+ }
+
+ expected_block_id = &block.header.parent_block_id;
+ count += 1;
+ if self
+ .tenure_blocks
+ .as_ref()
+ .map(|blocks| blocks.len())
+ .unwrap_or(0)
+ .saturating_add(count)
+ > self.tenure_length().unwrap_or(0) as usize
+ {
+ // there are more blocks downloaded than indicated by the end-blocks tenure-change
+ // transaction.
+ warn!("Invalid blocks: exceeded {} tenure blocks", self.tenure_length().unwrap_or(0);
+ "tenure_id" => %self.tenure_id_consensus_hash,
+ "count" => %count,
+ "tenure_length" => self.tenure_length().unwrap_or(0),
+ "num_blocks" => tenure_blocks.len());
+ return Err(NetError::InvalidMessage);
+ }
+ }
+
+ if let Some(blocks) = self.tenure_blocks.as_mut() {
+ blocks.append(&mut tenure_blocks);
+ } else {
+ self.tenure_blocks = Some(tenure_blocks);
+ }
+
+ // did we reach the tenure start block?
+ let Some(blocks) = self.tenure_blocks.as_ref() else {
+ // unreachable but be defensive
+ warn!("Invalid state: no blocks (infallible -- got None)");
+ return Err(NetError::InvalidState);
+ };
+
+ let Some(earliest_block) = blocks.last() else {
+ // unreachable but be defensive
+ warn!("Invalid state: no blocks (infallible -- got empty vec)");
+ return Err(NetError::InvalidState);
+ };
+
+ let Some(tenure_start_block) = self.tenure_start_block.as_ref() else {
+ // unreachable but be defensive
+ warn!("Invalid state: no tenure-start block (infallible)");
+ return Err(NetError::InvalidState);
+ };
+
+ test_debug!(
+ "Accepted tenure blocks for tenure {} cursor={} ({})",
+ &self.tenure_id_consensus_hash,
+ &block_cursor,
+ count
+ );
+ if earliest_block.block_id() != tenure_start_block.block_id() {
+ // still have more blocks to download
+ let next_block_id = earliest_block.header.parent_block_id.clone();
+ debug!(
+ "Need more blocks for tenure {} (went from {} to {}, next is {})",
+ &self.tenure_id_consensus_hash,
+ &block_cursor,
+ &earliest_block.block_id(),
+ &next_block_id
+ );
+ self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id);
+ return Ok(None);
+ }
+
+ // finished!
+ self.state = NakamotoTenureDownloadState::Done;
+ Ok(self
+ .tenure_blocks
+ .take()
+ .map(|blocks| blocks.into_iter().rev().collect()))
+ }
+
+ /// Produce the next HTTP request that, when successfully executed, will fetch the data needed
+ /// to advance this state machine.
+ /// Not all states require an HTTP request for advanceement.
+ ///
+ /// Returns Ok(Some(request)) if a request is needed
+ /// Returns Ok(None) if a request is not needed (i.e. we're waiting for some other machine's
+ /// state)
+ /// Returns Err(()) if we're done.
+ pub fn make_next_download_request(
+ &self,
+ peerhost: PeerHost,
+ ) -> Result , ()> {
+ let request = match self.state {
+ NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => {
+ test_debug!("Request tenure-start block {}", &start_block_id);
+ StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone())
+ }
+ NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => {
+ // we're waiting for some other downloader's block-fetch to complete
+ test_debug!(
+ "Waiting for tenure-end block {} until {:?}",
+ &_block_id,
+ _deadline
+ );
+ return Ok(None);
+ }
+ NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => {
+ test_debug!("Request tenure-end block {}", &end_block_id);
+ StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone())
+ }
+ NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => {
+ test_debug!("Downloading tenure ending at {}", &end_block_id);
+ StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None)
+ }
+ NakamotoTenureDownloadState::Done => {
+ // nothing more to do
+ return Err(());
+ }
+ };
+ Ok(Some(request))
+ }
+
+ /// Begin the next download request for this state machine. The request will be sent to the
+ /// data URL corresponding to self.naddr.
+ /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The
+ /// caller should try this again until it gets one of the other possible return values.
+ /// Returns Ok(false) if not (e.g. neighbor is known to be dead or broken)
+ /// Returns Err(..) if self.naddr is known to be a dead or broken peer, or if we were unable to
+ /// resolve its data URL to a socket address.
+ pub fn send_next_download_request(
+ &mut self,
+ network: &mut PeerNetwork,
+ neighbor_rpc: &mut NeighborRPC,
+ ) -> Result {
+ if neighbor_rpc.has_inflight(&self.naddr) {
+ test_debug!("Peer {} has an inflight request", &self.naddr);
+ return Ok(true);
+ }
+ if neighbor_rpc.is_dead_or_broken(network, &self.naddr) {
+ return Err(NetError::PeerNotConnected);
+ }
+
+ let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else {
+ // no conversation open to this neighbor
+ neighbor_rpc.add_dead(network, &self.naddr);
+ return Err(NetError::PeerNotConnected);
+ };
+
+ let request = match self.make_next_download_request(peerhost) {
+ Ok(Some(request)) => request,
+ Ok(None) => {
+ return Ok(true);
+ }
+ Err(_) => {
+ return Ok(false);
+ }
+ };
+
+ neighbor_rpc.send_request(network, self.naddr.clone(), request)?;
+ self.idle = false;
+ Ok(true)
+ }
+
+ /// Handle a received StacksHttpResponse and advance the state machine.
+ /// If we get the full tenure's blocks, then return them.
+ /// Returns Ok(Some([blocks])) if we successfully complete the state machine.
+ /// Returns Ok(None) if we accepted the response and did a state-transition, but we're not done
+ /// yet. The caller should now call `send_next_download_request()`
+ /// Returns Err(..) on failure to process the response.
+ pub fn handle_next_download_response(
+ &mut self,
+ response: StacksHttpResponse,
+ ) -> Result>, NetError> {
+ self.idle = true;
+ match self.state {
+ NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => {
+ test_debug!(
+ "Got download response for tenure-start block {}",
+ &_block_id
+ );
+ let block = response.decode_nakamoto_block().map_err(|e| {
+ warn!("Failed to decode response for a Nakamoto block: {:?}", &e);
+ e
+ })?;
+ self.try_accept_tenure_start_block(block)?;
+ Ok(None)
+ }
+ NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => {
+ test_debug!("Invalid state -- Got download response for WaitForTenureBlock");
+ Err(NetError::InvalidState)
+ }
+ NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => {
+ test_debug!("Got download response to tenure-end block {}", &_block_id);
+ let block = response.decode_nakamoto_block().map_err(|e| {
+ warn!("Failed to decode response for a Nakamoto block: {:?}", &e);
+ e
+ })?;
+ self.try_accept_tenure_end_block(&block)?;
+ Ok(None)
+ }
+ NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => {
+ test_debug!(
+ "Got download response for tenure blocks ending at {}",
+ &_end_block_id
+ );
+ let blocks = response.decode_nakamoto_tenure().map_err(|e| {
+ warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e);
+ e
+ })?;
+ self.try_accept_tenure_blocks(blocks)
+ }
+ NakamotoTenureDownloadState::Done => Err(NetError::InvalidState),
+ }
+ }
+
+ pub fn is_done(&self) -> bool {
+ self.state == NakamotoTenureDownloadState::Done
+ }
+}
diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs
new file mode 100644
index 0000000000..8a154637cf
--- /dev/null
+++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs
@@ -0,0 +1,668 @@
+// Copyright (C) 2020-2024 Stacks Open Internet Foundation
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see .
+
+use std::collections::{BTreeMap, HashMap, HashSet, VecDeque};
+use std::convert::TryFrom;
+use std::fmt;
+use std::hash::{Hash, Hasher};
+use std::io::{Read, Write};
+use std::net::{IpAddr, SocketAddr};
+use std::time::{Duration, Instant};
+
+use rand::seq::SliceRandom;
+use rand::{thread_rng, RngCore};
+use stacks_common::types::chainstate::{
+ BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId,
+};
+use stacks_common::types::net::{PeerAddress, PeerHost};
+use stacks_common::types::StacksEpochId;
+use stacks_common::util::hash::to_hex;
+use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey};
+use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log};
+
+use crate::burnchains::{Burnchain, BurnchainView, PoxConstants};
+use crate::chainstate::burn::db::sortdb::{
+ BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn,
+};
+use crate::chainstate::burn::BlockSnapshot;
+use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo};
+use crate::chainstate::nakamoto::{
+ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef,
+};
+use crate::chainstate::stacks::boot::RewardSet;
+use crate::chainstate::stacks::db::StacksChainState;
+use crate::chainstate::stacks::{
+ Error as chainstate_error, StacksBlockHeader, TenureChangePayload,
+};
+use crate::core::{
+ EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH,
+};
+use crate::net::api::gettenureinfo::RPCGetTenureInfo;
+use crate::net::chat::ConversationP2P;
+use crate::net::db::{LocalPeer, PeerDB};
+use crate::net::download::nakamoto::{
+ AvailableTenures, NakamotoTenureDownloadState, NakamotoTenureDownloader,
+ NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure,
+};
+use crate::net::http::HttpRequestContents;
+use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse};
+use crate::net::inv::epoch2x::InvState;
+use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv};
+use crate::net::neighbors::rpc::NeighborRPC;
+use crate::net::neighbors::NeighborComms;
+use crate::net::p2p::{CurrentRewardSet, PeerNetwork};
+use crate::net::server::HttpPeer;
+use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey};
+use crate::util_lib::db::{DBConn, Error as DBError};
+
+/// A set of confirmed downloader state machines assigned to one or more neighbors. The block
+/// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure
+/// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer
+/// connections to downloader state machines, such that each peer is assigned to at most one
+/// downloader. A peer is assigned a downloader for the duration of at most one RPC request, at
+/// which point, it will be re-assigned a (possibly different) downloader. As such, each machine
+/// can make progress even if there is only one available peer (in which case, that peer will get
+/// scheduled across multiple machines to drive their progress in the right sequence such that
+/// tenures will be incrementally fetched and yielded by the p2p state machine to the relayer).
+pub struct NakamotoTenureDownloaderSet {
+ /// A list of instantiated downloaders that are in progress
+ pub(crate) downloaders: Vec >,
+ /// An assignment of peers to downloader machines in the `downloaders` list.
+ pub(crate) peers: HashMap,
+ /// The set of tenures that have been successfully downloaded (but possibly not yet stored or
+ /// processed)
+ pub(crate) completed_tenures: HashSet,
+}
+
+impl NakamotoTenureDownloaderSet {
+ pub fn new() -> Self {
+ Self {
+ downloaders: vec![],
+ peers: HashMap::new(),
+ completed_tenures: HashSet::new(),
+ }
+ }
+
+ /// Assign the given peer to the given downloader state machine. Allocate a slot for it if
+ /// needed.
+ fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) {
+ test_debug!(
+ "Add downloader for tenure {} driven by {}",
+ &downloader.tenure_id_consensus_hash,
+ &naddr
+ );
+ if let Some(idx) = self.peers.get(&naddr) {
+ self.downloaders[*idx] = Some(downloader);
+ } else {
+ self.downloaders.push(Some(downloader));
+ self.peers.insert(naddr, self.downloaders.len() - 1);
+ }
+ }
+
+ /// Does the given neighbor have an assigned downloader state machine?
+ pub(crate) fn has_downloader(&self, naddr: &NeighborAddress) -> bool {
+ let Some(idx) = self.peers.get(naddr) else {
+ return false;
+ };
+ let Some(downloader_opt) = self.downloaders.get(*idx) else {
+ return false;
+ };
+ downloader_opt.is_some()
+ }
+
+ /// Drop the downloader associated with the given neighbor, if any.
+ pub fn clear_downloader(&mut self, naddr: &NeighborAddress) {
+ let Some(index) = self.peers.remove(naddr) else {
+ return;
+ };
+ self.downloaders[index] = None;
+ }
+
+ /// How many downloaders are there?
+ pub fn num_downloaders(&self) -> usize {
+ self.downloaders
+ .iter()
+ .fold(0, |acc, dl| if dl.is_some() { acc + 1 } else { acc })
+ }
+
+ /// How many downloaders are there, which are scheduled?
+ pub fn num_scheduled_downloaders(&self) -> usize {
+ let mut cnt = 0;
+ for (_, idx) in self.peers.iter() {
+ if let Some(Some(_)) = self.downloaders.get(*idx) {
+ cnt += 1;
+ }
+ }
+ cnt
+ }
+
+ /// Add a sequence of (address, downloader) pairs to this downloader set.
+ pub(crate) fn add_downloaders(
+ &mut self,
+ iter: impl IntoIterator- ,
+ ) {
+ for (naddr, downloader) in iter {
+ if self.has_downloader(&naddr) {
+ test_debug!("Already have downloader for {}", &naddr);
+ continue;
+ }
+ self.add_downloader(naddr, downloader);
+ }
+ }
+
+ /// Count up the number of in-flight messages, based on the states of each instantiated
+ /// downloader.
+ pub fn inflight(&self) -> usize {
+ let mut cnt = 0;
+ for downloader_opt in self.downloaders.iter() {
+ let Some(downloader) = downloader_opt else {
+ continue;
+ };
+ if downloader.idle {
+ continue;
+ }
+ if downloader.is_waiting() {
+ continue;
+ }
+ if downloader.is_done() {
+ continue;
+ }
+ cnt += 1;
+ }
+ cnt
+ }
+
+ /// Determine whether or not there exists a downloader for the given tenure, identified by its
+ /// consensus hash.
+ pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool {
+ self.downloaders
+ .iter()
+ .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch))
+ .is_some()
+ }
+
+ /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders.
+ pub fn is_empty(&self) -> bool {
+ for downloader_opt in self.downloaders.iter() {
+ let Some(downloader) = downloader_opt else {
+ continue;
+ };
+ if downloader.is_done() {
+ continue;
+ }
+ test_debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state);
+ return false;
+ }
+ true
+ }
+
+ /// Try to resume processing a download state machine with a given peer. Since a peer is
+ /// detached from the machine after a single RPC call, this call is needed to re-attach it to a
+ /// (potentially different, unblocked) machine for the next RPC call to this peer.
+ ///
+ /// Returns true if the peer gets scheduled.
+ /// Returns false if not.
+ pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool {
+ if let Some(idx) = self.peers.get(&naddr) {
+ let Some(Some(_downloader)) = self.downloaders.get(*idx) else {
+ return false;
+ };
+
+ test_debug!(
+ "Peer {} already bound to downloader for {}",
+ &naddr,
+ &_downloader.tenure_id_consensus_hash
+ );
+ return true;
+ }
+ for (i, downloader_opt) in self.downloaders.iter_mut().enumerate() {
+ let Some(downloader) = downloader_opt else {
+ continue;
+ };
+ if !downloader.idle {
+ continue;
+ }
+ if downloader.is_waiting() {
+ continue;
+ }
+ if downloader.naddr != naddr {
+ continue;
+ }
+ test_debug!(
+ "Assign peer {} to work on downloader for {} in state {}",
+ &naddr,
+ &downloader.tenure_id_consensus_hash,
+ &downloader.state
+ );
+ self.peers.insert(naddr, i);
+ return true;
+ }
+ return false;
+ }
+
+ /// Deschedule peers that are bound to downloader slots that are either vacant or correspond to
+ /// blocked downloaders.
+ pub fn clear_available_peers(&mut self) {
+ let mut idled: Vec
= vec![];
+ for (naddr, i) in self.peers.iter() {
+ let Some(downloader_opt) = self.downloaders.get(*i) else {
+ // should be unreachable
+ idled.push(naddr.clone());
+ continue;
+ };
+ let Some(downloader) = downloader_opt else {
+ test_debug!("Remove peer {} for null download {}", &naddr, i);
+ idled.push(naddr.clone());
+ continue;
+ };
+ if downloader.idle || downloader.is_waiting() {
+ test_debug!(
+ "Remove idled peer {} for tenure download {}",
+ &naddr,
+ &downloader.tenure_id_consensus_hash
+ );
+ idled.push(naddr.clone());
+ }
+ }
+ for naddr in idled.into_iter() {
+ self.peers.remove(&naddr);
+ }
+ }
+
+ /// Clear out downloaders (but not their peers) that have finished. The caller should follow
+ /// this up with a call to `clear_available_peers()`.
+ pub fn clear_finished_downloaders(&mut self) {
+ for downloader_opt in self.downloaders.iter_mut() {
+ let Some(downloader) = downloader_opt else {
+ continue;
+ };
+ if downloader.is_done() {
+ *downloader_opt = None;
+ }
+ }
+ }
+
+ /// Find the downloaders that have obtained their tenure-start blocks, and extract them. These
+ /// will be fed into other downloaders which are blocked on needing their tenure-end blocks.
+ pub(crate) fn find_new_tenure_start_blocks(&self) -> HashMap {
+ let mut ret = HashMap::new();
+ for downloader_opt in self.downloaders.iter() {
+ let Some(downloader) = downloader_opt else {
+ continue;
+ };
+ let Some(block) = downloader.tenure_start_block.as_ref() else {
+ continue;
+ };
+ ret.insert(block.block_id(), block.clone());
+ }
+ ret
+ }
+
+ /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their
+ /// tenure-end blocks.
+ /// Return a list of peers driving downloaders with failing `tenure_start_blocks`
+ pub(crate) fn handle_tenure_end_blocks(
+ &mut self,
+ tenure_start_blocks: &HashMap,
+ ) -> Vec {
+ test_debug!(
+ "handle tenure-end blocks: {:?}",
+ &tenure_start_blocks.keys().collect::>()
+ );
+ let mut dead = vec![];
+ for downloader_opt in self.downloaders.iter_mut() {
+ let Some(downloader) = downloader_opt else {
+ continue;
+ };
+ let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) =
+ &downloader.state
+ else {
+ continue;
+ };
+ let Some(end_block) = tenure_start_blocks.get(end_block_id) else {
+ continue;
+ };
+ if let Err(e) = downloader.try_accept_tenure_end_block(end_block) {
+ warn!(
+ "Failed to accept tenure end-block {} for tenure {}: {:?}",
+ &end_block.block_id(),
+ &downloader.tenure_id_consensus_hash,
+ &e
+ );
+ dead.push(downloader.naddr.clone());
+ }
+ }
+ dead
+ }
+
+ /// Does there exist a downloader (possibly unscheduled) for the given tenure?
+ pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool {
+ for downloader_opt in self.downloaders.iter() {
+ let Some(downloader) = downloader_opt else {
+ continue;
+ };
+ if &downloader.tenure_id_consensus_hash == tenure_id {
+ test_debug!(
+ "Have downloader for tenure {} already (idle={}, waiting={}, state={})",
+ tenure_id,
+ downloader.idle,
+ downloader.is_waiting(),
+ &downloader.state
+ );
+ return true;
+ }
+ }
+ false
+ }
+
+ /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor
+ /// block, we need to go and directly fetch its end block instead of waiting for another
+ /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method
+ /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block.
+ pub(crate) fn try_transition_fetch_tenure_end_blocks(
+ &mut self,
+ tenure_block_ids: &HashMap,
+ ) {
+ for downloader_opt in self.downloaders.iter_mut() {
+ let Some(downloader) = downloader_opt.as_mut() else {
+ continue;
+ };
+ downloader.transition_to_fetch_end_block_on_timeout();
+ }
+
+ // find tenures in which we need to fetch the tenure-end block directly.
+ let mut last_available_tenures: HashSet = HashSet::new();
+ for (_, all_available) in tenure_block_ids.iter() {
+ for (_, available) in all_available.iter() {
+ if available.fetch_end_block {
+ last_available_tenures.insert(available.end_block_id.clone());
+ }
+ }
+ }
+
+ // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to
+ // fetching
+ for downloader_opt in self.downloaders.iter_mut() {
+ let Some(downloader) = downloader_opt.as_mut() else {
+ continue;
+ };
+ if !downloader.idle {
+ continue;
+ }
+ if !downloader.is_waiting() {
+ continue;
+ }
+ if !last_available_tenures.contains(&downloader.tenure_end_block_id) {
+ continue;
+ }
+ test_debug!(
+ "Transition downloader for {} from waiting to fetching",
+ &downloader.tenure_id_consensus_hash
+ );
+ if let Err(e) = downloader.transition_to_fetch_end_block() {
+ warn!(
+ "Downloader for {} failed to transition to fetch end block: {:?}",
+ &downloader.tenure_id_consensus_hash, &e
+ );
+ }
+ }
+ }
+
+ /// Create a given number of downloads from a schedule and availability set.
+ /// Removes items from the schedule, and neighbors from the availability set.
+ /// A neighbor will be issued at most one request.
+ pub(crate) fn make_tenure_downloaders(
+ &mut self,
+ schedule: &mut VecDeque,
+ available: &mut HashMap>,
+ tenure_block_ids: &HashMap,
+ count: usize,
+ current_reward_cycles: &BTreeMap,
+ ) {
+ test_debug!("schedule: {:?}", schedule);
+ test_debug!("available: {:?}", &available);
+ test_debug!("tenure_block_ids: {:?}", &tenure_block_ids);
+ test_debug!("inflight: {}", self.inflight());
+ test_debug!(
+ "count: {}, running: {}, scheduled: {}",
+ count,
+ self.num_downloaders(),
+ self.num_scheduled_downloaders()
+ );
+
+ self.clear_finished_downloaders();
+ self.clear_available_peers();
+ self.try_transition_fetch_tenure_end_blocks(tenure_block_ids);
+ while self.inflight() < count {
+ let Some(ch) = schedule.front() else {
+ break;
+ };
+ if self.completed_tenures.contains(&ch) {
+ test_debug!("Already successfully downloaded tenure {}", &ch);
+ schedule.pop_front();
+ continue;
+ }
+ let Some(neighbors) = available.get_mut(ch) else {
+ // not found on any neighbors, so stop trying this tenure
+ test_debug!("No neighbors have tenure {}", ch);
+ schedule.pop_front();
+ continue;
+ };
+ if neighbors.is_empty() {
+ // no more neighbors to try
+ test_debug!("No more neighbors can serve tenure {}", ch);
+ schedule.pop_front();
+ continue;
+ }
+ let Some(naddr) = neighbors.pop() else {
+ test_debug!("No more neighbors can serve tenure {}", ch);
+ schedule.pop_front();
+ continue;
+ };
+ if self.try_resume_peer(naddr.clone()) {
+ continue;
+ };
+ if self.has_downloader_for_tenure(&ch) {
+ schedule.pop_front();
+ continue;
+ }
+
+ let Some(available_tenures) = tenure_block_ids.get(&naddr) else {
+ // this peer doesn't have any known tenures, so try the others
+ test_debug!("No tenures available from {}", &naddr);
+ continue;
+ };
+ let Some(tenure_info) = available_tenures.get(ch) else {
+ // this peer does not have a tenure start/end block for this tenure, so try the
+ // others.
+ test_debug!("Neighbor {} does not serve tenure {}", &naddr, ch);
+ continue;
+ };
+ let Some(Some(start_reward_set)) = current_reward_cycles
+ .get(&tenure_info.start_reward_cycle)
+ .map(|cycle_info| cycle_info.reward_set())
+ else {
+ test_debug!(
+ "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}",
+ tenure_info.start_reward_cycle,
+ &tenure_info
+ );
+ schedule.pop_front();
+ continue;
+ };
+ let Some(Some(end_reward_set)) = current_reward_cycles
+ .get(&tenure_info.end_reward_cycle)
+ .map(|cycle_info| cycle_info.reward_set())
+ else {
+ test_debug!(
+ "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}",
+ tenure_info.end_reward_cycle,
+ &tenure_info
+ );
+ schedule.pop_front();
+ continue;
+ };
+
+ test_debug!(
+ "Download tenure {} (start={}, end={}) (rc {},{})",
+ &ch,
+ &tenure_info.start_block_id,
+ &tenure_info.end_block_id,
+ tenure_info.start_reward_cycle,
+ tenure_info.end_reward_cycle
+ );
+ let tenure_download = NakamotoTenureDownloader::new(
+ ch.clone(),
+ tenure_info.start_block_id.clone(),
+ tenure_info.end_block_id.clone(),
+ naddr.clone(),
+ start_reward_set.clone(),
+ end_reward_set.clone(),
+ );
+
+ test_debug!("Request tenure {} from neighbor {}", ch, &naddr);
+ self.add_downloader(naddr, tenure_download);
+ schedule.pop_front();
+ }
+ }
+
+ /// Run all confirmed downloaders.
+ /// * Identify neighbors for which we do not have an inflight request
+ /// * Get each such neighbor's downloader, and generate its next HTTP reqeust. Send that
+ /// request to the neighbor and begin driving the underlying socket I/O.
+ /// * Get each HTTP reply, and pass it into the corresponding downloader's handler to advance
+ /// its state.
+ /// * Identify and remove misbehaving neighbors and neighbors whose connections have broken.
+ ///
+ /// Returns the set of downloaded blocks obtained for completed downloaders. These will be
+ /// full confirmed tenures.
+ pub fn run(
+ &mut self,
+ network: &mut PeerNetwork,
+ neighbor_rpc: &mut NeighborRPC,
+ ) -> HashMap> {
+ let addrs: Vec<_> = self.peers.keys().cloned().collect();
+ let mut finished = vec![];
+ let mut finished_tenures = vec![];
+ let mut new_blocks = HashMap::new();
+
+ // send requests
+ for (naddr, index) in self.peers.iter() {
+ if neighbor_rpc.has_inflight(&naddr) {
+ test_debug!("Peer {} has an inflight request", &naddr);
+ continue;
+ }
+ let Some(Some(downloader)) = self.downloaders.get_mut(*index) else {
+ test_debug!("No downloader for {}", &naddr);
+ continue;
+ };
+ if downloader.is_done() {
+ test_debug!("Downloader for {} is done", &naddr);
+ finished.push(naddr.clone());
+ finished_tenures.push(downloader.tenure_id_consensus_hash.clone());
+ continue;
+ }
+ test_debug!(
+ "Send request to {} for tenure {} (state {})",
+ &naddr,
+ &downloader.tenure_id_consensus_hash,
+ &downloader.state
+ );
+ let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else {
+ test_debug!("Downloader for {} failed; this peer is dead", &naddr);
+ neighbor_rpc.add_dead(network, naddr);
+ continue;
+ };
+ if !sent {
+ // this downloader is dead or broken
+ finished.push(naddr.clone());
+ continue;
+ }
+ }
+
+ // clear dead, broken, and done
+ for naddr in addrs.iter() {
+ if neighbor_rpc.is_dead_or_broken(network, naddr) {
+ test_debug!("Remove dead/broken downloader for {}", &naddr);
+ self.clear_downloader(&naddr);
+ }
+ }
+ for done_naddr in finished.drain(..) {
+ test_debug!("Remove finished downloader for {}", &done_naddr);
+ self.clear_downloader(&done_naddr);
+ }
+ for done_tenure in finished_tenures.drain(..) {
+ self.completed_tenures.insert(done_tenure);
+ }
+
+ // handle responses
+ for (naddr, response) in neighbor_rpc.collect_replies(network) {
+ let Some(index) = self.peers.get(&naddr) else {
+ test_debug!("No downloader for {}", &naddr);
+ continue;
+ };
+ let Some(Some(downloader)) = self.downloaders.get_mut(*index) else {
+ test_debug!("No downloader for {}", &naddr);
+ continue;
+ };
+ test_debug!("Got response from {}", &naddr);
+
+ let Ok(blocks_opt) = downloader
+ .handle_next_download_response(response)
+ .map_err(|e| {
+ debug!("Failed to handle response from {}: {:?}", &naddr, &e);
+ e
+ })
+ else {
+ test_debug!("Failed to handle download response from {}", &naddr);
+ neighbor_rpc.add_dead(network, &naddr);
+ continue;
+ };
+
+ let Some(blocks) = blocks_opt else {
+ continue;
+ };
+
+ test_debug!(
+ "Got {} blocks for tenure {}",
+ blocks.len(),
+ &downloader.tenure_id_consensus_hash
+ );
+ new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks);
+ if downloader.is_done() {
+ finished.push(naddr.clone());
+ finished_tenures.push(downloader.tenure_id_consensus_hash.clone());
+ continue;
+ }
+ }
+
+ // clear dead, broken, and done
+ for naddr in addrs.iter() {
+ if neighbor_rpc.is_dead_or_broken(network, naddr) {
+ test_debug!("Remove dead/broken downloader for {}", &naddr);
+ self.clear_downloader(naddr);
+ }
+ }
+ for done_naddr in finished.drain(..) {
+ test_debug!("Remove finished downloader for {}", &done_naddr);
+ self.clear_downloader(&done_naddr);
+ }
+ for done_tenure in finished_tenures.drain(..) {
+ self.completed_tenures.insert(done_tenure);
+ }
+
+ new_blocks
+ }
+}
diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs
new file mode 100644
index 0000000000..8a154637cf
--- /dev/null
+++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs
@@ -0,0 +1,668 @@
+// Copyright (C) 2020-2024 Stacks Open Internet Foundation
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see .
+
+use std::collections::{BTreeMap, HashMap, HashSet, VecDeque};
+use std::convert::TryFrom;
+use std::fmt;
+use std::hash::{Hash, Hasher};
+use std::io::{Read, Write};
+use std::net::{IpAddr, SocketAddr};
+use std::time::{Duration, Instant};
+
+use rand::seq::SliceRandom;
+use rand::{thread_rng, RngCore};
+use stacks_common::types::chainstate::{
+ BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId,
+};
+use stacks_common::types::net::{PeerAddress, PeerHost};
+use stacks_common::types::StacksEpochId;
+use stacks_common::util::hash::to_hex;
+use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey};
+use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log};
+
+use crate::burnchains::{Burnchain, BurnchainView, PoxConstants};
+use crate::chainstate::burn::db::sortdb::{
+ BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn,
+};
+use crate::chainstate::burn::BlockSnapshot;
+use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo};
+use crate::chainstate::nakamoto::{
+ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef,
+};
+use crate::chainstate::stacks::boot::RewardSet;
+use crate::chainstate::stacks::db::StacksChainState;
+use crate::chainstate::stacks::{
+ Error as chainstate_error, StacksBlockHeader, TenureChangePayload,
+};
+use crate::core::{
+ EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH,
+};
+use crate::net::api::gettenureinfo::RPCGetTenureInfo;
+use crate::net::chat::ConversationP2P;
+use crate::net::db::{LocalPeer, PeerDB};
+use crate::net::download::nakamoto::{
+ AvailableTenures, NakamotoTenureDownloadState, NakamotoTenureDownloader,
+ NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure,
+};
+use crate::net::http::HttpRequestContents;
+use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse};
+use crate::net::inv::epoch2x::InvState;
+use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv};
+use crate::net::neighbors::rpc::NeighborRPC;
+use crate::net::neighbors::NeighborComms;
+use crate::net::p2p::{CurrentRewardSet, PeerNetwork};
+use crate::net::server::HttpPeer;
+use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey};
+use crate::util_lib::db::{DBConn, Error as DBError};
+
+/// A set of confirmed downloader state machines assigned to one or more neighbors. The block
+/// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure
+/// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer
+/// connections to downloader state machines, such that each peer is assigned to at most one
+/// downloader. A peer is assigned a downloader for the duration of at most one RPC request, at
+/// which point, it will be re-assigned a (possibly different) downloader. As such, each machine
+/// can make progress even if there is only one available peer (in which case, that peer will get
+/// scheduled across multiple machines to drive their progress in the right sequence such that
+/// tenures will be incrementally fetched and yielded by the p2p state machine to the relayer).
+pub struct NakamotoTenureDownloaderSet {
+ /// A list of instantiated downloaders that are in progress
+ pub(crate) downloaders: Vec>,
+ /// An assignment of peers to downloader machines in the `downloaders` list.
+ pub(crate) peers: HashMap,
+ /// The set of tenures that have been successfully downloaded (but possibly not yet stored or
+ /// processed)
+ pub(crate) completed_tenures: HashSet,
+}
+
+impl NakamotoTenureDownloaderSet {
+ pub fn new() -> Self {
+ Self {
+ downloaders: vec![],
+ peers: HashMap::new(),
+ completed_tenures: HashSet::new(),
+ }
+ }
+
+ /// Assign the given peer to the given downloader state machine. Allocate a slot for it if
+ /// needed.
+ fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) {
+ test_debug!(
+ "Add downloader for tenure {} driven by {}",
+ &downloader.tenure_id_consensus_hash,
+ &naddr
+ );
+ if let Some(idx) = self.peers.get(&naddr) {
+ self.downloaders[*idx] = Some(downloader);
+ } else {
+ self.downloaders.push(Some(downloader));
+ self.peers.insert(naddr, self.downloaders.len() - 1);
+ }
+ }
+
+ /// Does the given neighbor have an assigned downloader state machine?
+ pub(crate) fn has_downloader(&self, naddr: &NeighborAddress) -> bool {
+ let Some(idx) = self.peers.get(naddr) else {
+ return false;
+ };
+ let Some(downloader_opt) = self.downloaders.get(*idx) else {
+ return false;
+ };
+ downloader_opt.is_some()
+ }
+
+ /// Drop the downloader associated with the given neighbor, if any.
+ pub fn clear_downloader(&mut self, naddr: &NeighborAddress) {
+ let Some(index) = self.peers.remove(naddr) else {
+ return;
+ };
+ self.downloaders[index] = None;
+ }
+
+ /// How many downloaders are there?
+ pub fn num_downloaders(&self) -> usize {
+ self.downloaders
+ .iter()
+ .fold(0, |acc, dl| if dl.is_some() { acc + 1 } else { acc })
+ }
+
+ /// How many downloaders are there, which are scheduled?
+ pub fn num_scheduled_downloaders(&self) -> usize {
+ let mut cnt = 0;
+ for (_, idx) in self.peers.iter() {
+ if let Some(Some(_)) = self.downloaders.get(*idx) {
+ cnt += 1;
+ }
+ }
+ cnt
+ }
+
+ /// Add a sequence of (address, downloader) pairs to this downloader set.
+ pub(crate) fn add_downloaders(
+ &mut self,
+ iter: impl IntoIterator- ,
+ ) {
+ for (naddr, downloader) in iter {
+ if self.has_downloader(&naddr) {
+ test_debug!("Already have downloader for {}", &naddr);
+ continue;
+ }
+ self.add_downloader(naddr, downloader);
+ }
+ }
+
+ /// Count up the number of in-flight messages, based on the states of each instantiated
+ /// downloader.
+ pub fn inflight(&self) -> usize {
+ let mut cnt = 0;
+ for downloader_opt in self.downloaders.iter() {
+ let Some(downloader) = downloader_opt else {
+ continue;
+ };
+ if downloader.idle {
+ continue;
+ }
+ if downloader.is_waiting() {
+ continue;
+ }
+ if downloader.is_done() {
+ continue;
+ }
+ cnt += 1;
+ }
+ cnt
+ }
+
+ /// Determine whether or not there exists a downloader for the given tenure, identified by its
+ /// consensus hash.
+ pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool {
+ self.downloaders
+ .iter()
+ .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch))
+ .is_some()
+ }
+
+ /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders.
+ pub fn is_empty(&self) -> bool {
+ for downloader_opt in self.downloaders.iter() {
+ let Some(downloader) = downloader_opt else {
+ continue;
+ };
+ if downloader.is_done() {
+ continue;
+ }
+ test_debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state);
+ return false;
+ }
+ true
+ }
+
+ /// Try to resume processing a download state machine with a given peer. Since a peer is
+ /// detached from the machine after a single RPC call, this call is needed to re-attach it to a
+ /// (potentially different, unblocked) machine for the next RPC call to this peer.
+ ///
+ /// Returns true if the peer gets scheduled.
+ /// Returns false if not.
+ pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool {
+ if let Some(idx) = self.peers.get(&naddr) {
+ let Some(Some(_downloader)) = self.downloaders.get(*idx) else {
+ return false;
+ };
+
+ test_debug!(
+ "Peer {} already bound to downloader for {}",
+ &naddr,
+ &_downloader.tenure_id_consensus_hash
+ );
+ return true;
+ }
+ for (i, downloader_opt) in self.downloaders.iter_mut().enumerate() {
+ let Some(downloader) = downloader_opt else {
+ continue;
+ };
+ if !downloader.idle {
+ continue;
+ }
+ if downloader.is_waiting() {
+ continue;
+ }
+ if downloader.naddr != naddr {
+ continue;
+ }
+ test_debug!(
+ "Assign peer {} to work on downloader for {} in state {}",
+ &naddr,
+ &downloader.tenure_id_consensus_hash,
+ &downloader.state
+ );
+ self.peers.insert(naddr, i);
+ return true;
+ }
+ return false;
+ }
+
+ /// Deschedule peers that are bound to downloader slots that are either vacant or correspond to
+ /// blocked downloaders.
+ pub fn clear_available_peers(&mut self) {
+ let mut idled: Vec
= vec![];
+ for (naddr, i) in self.peers.iter() {
+ let Some(downloader_opt) = self.downloaders.get(*i) else {
+ // should be unreachable
+ idled.push(naddr.clone());
+ continue;
+ };
+ let Some(downloader) = downloader_opt else {
+ test_debug!("Remove peer {} for null download {}", &naddr, i);
+ idled.push(naddr.clone());
+ continue;
+ };
+ if downloader.idle || downloader.is_waiting() {
+ test_debug!(
+ "Remove idled peer {} for tenure download {}",
+ &naddr,
+ &downloader.tenure_id_consensus_hash
+ );
+ idled.push(naddr.clone());
+ }
+ }
+ for naddr in idled.into_iter() {
+ self.peers.remove(&naddr);
+ }
+ }
+
+ /// Clear out downloaders (but not their peers) that have finished. The caller should follow
+ /// this up with a call to `clear_available_peers()`.
+ pub fn clear_finished_downloaders(&mut self) {
+ for downloader_opt in self.downloaders.iter_mut() {
+ let Some(downloader) = downloader_opt else {
+ continue;
+ };
+ if downloader.is_done() {
+ *downloader_opt = None;
+ }
+ }
+ }
+
+ /// Find the downloaders that have obtained their tenure-start blocks, and extract them. These
+ /// will be fed into other downloaders which are blocked on needing their tenure-end blocks.
+ pub(crate) fn find_new_tenure_start_blocks(&self) -> HashMap {
+ let mut ret = HashMap::new();
+ for downloader_opt in self.downloaders.iter() {
+ let Some(downloader) = downloader_opt else {
+ continue;
+ };
+ let Some(block) = downloader.tenure_start_block.as_ref() else {
+ continue;
+ };
+ ret.insert(block.block_id(), block.clone());
+ }
+ ret
+ }
+
+ /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their
+ /// tenure-end blocks.
+ /// Return a list of peers driving downloaders with failing `tenure_start_blocks`
+ pub(crate) fn handle_tenure_end_blocks(
+ &mut self,
+ tenure_start_blocks: &HashMap,
+ ) -> Vec {
+ test_debug!(
+ "handle tenure-end blocks: {:?}",
+ &tenure_start_blocks.keys().collect::>()
+ );
+ let mut dead = vec![];
+ for downloader_opt in self.downloaders.iter_mut() {
+ let Some(downloader) = downloader_opt else {
+ continue;
+ };
+ let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) =
+ &downloader.state
+ else {
+ continue;
+ };
+ let Some(end_block) = tenure_start_blocks.get(end_block_id) else {
+ continue;
+ };
+ if let Err(e) = downloader.try_accept_tenure_end_block(end_block) {
+ warn!(
+ "Failed to accept tenure end-block {} for tenure {}: {:?}",
+ &end_block.block_id(),
+ &downloader.tenure_id_consensus_hash,
+ &e
+ );
+ dead.push(downloader.naddr.clone());
+ }
+ }
+ dead
+ }
+
+ /// Does there exist a downloader (possibly unscheduled) for the given tenure?
+ pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool {
+ for downloader_opt in self.downloaders.iter() {
+ let Some(downloader) = downloader_opt else {
+ continue;
+ };
+ if &downloader.tenure_id_consensus_hash == tenure_id {
+ test_debug!(
+ "Have downloader for tenure {} already (idle={}, waiting={}, state={})",
+ tenure_id,
+ downloader.idle,
+ downloader.is_waiting(),
+ &downloader.state
+ );
+ return true;
+ }
+ }
+ false
+ }
+
+ /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor
+ /// block, we need to go and directly fetch its end block instead of waiting for another
+ /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method
+ /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block.
+ pub(crate) fn try_transition_fetch_tenure_end_blocks(
+ &mut self,
+ tenure_block_ids: &HashMap,
+ ) {
+ for downloader_opt in self.downloaders.iter_mut() {
+ let Some(downloader) = downloader_opt.as_mut() else {
+ continue;
+ };
+ downloader.transition_to_fetch_end_block_on_timeout();
+ }
+
+ // find tenures in which we need to fetch the tenure-end block directly.
+ let mut last_available_tenures: HashSet = HashSet::new();
+ for (_, all_available) in tenure_block_ids.iter() {
+ for (_, available) in all_available.iter() {
+ if available.fetch_end_block {
+ last_available_tenures.insert(available.end_block_id.clone());
+ }
+ }
+ }
+
+ // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to
+ // fetching
+ for downloader_opt in self.downloaders.iter_mut() {
+ let Some(downloader) = downloader_opt.as_mut() else {
+ continue;
+ };
+ if !downloader.idle {
+ continue;
+ }
+ if !downloader.is_waiting() {
+ continue;
+ }
+ if !last_available_tenures.contains(&downloader.tenure_end_block_id) {
+ continue;
+ }
+ test_debug!(
+ "Transition downloader for {} from waiting to fetching",
+ &downloader.tenure_id_consensus_hash
+ );
+ if let Err(e) = downloader.transition_to_fetch_end_block() {
+ warn!(
+ "Downloader for {} failed to transition to fetch end block: {:?}",
+ &downloader.tenure_id_consensus_hash, &e
+ );
+ }
+ }
+ }
+
+ /// Create a given number of downloads from a schedule and availability set.
+ /// Removes items from the schedule, and neighbors from the availability set.
+ /// A neighbor will be issued at most one request.
+ pub(crate) fn make_tenure_downloaders(
+ &mut self,
+ schedule: &mut VecDeque,
+ available: &mut HashMap>,
+ tenure_block_ids: &HashMap,
+ count: usize,
+ current_reward_cycles: &BTreeMap,
+ ) {
+ test_debug!("schedule: {:?}", schedule);
+ test_debug!("available: {:?}", &available);
+ test_debug!("tenure_block_ids: {:?}", &tenure_block_ids);
+ test_debug!("inflight: {}", self.inflight());
+ test_debug!(
+ "count: {}, running: {}, scheduled: {}",
+ count,
+ self.num_downloaders(),
+ self.num_scheduled_downloaders()
+ );
+
+ self.clear_finished_downloaders();
+ self.clear_available_peers();
+ self.try_transition_fetch_tenure_end_blocks(tenure_block_ids);
+ while self.inflight() < count {
+ let Some(ch) = schedule.front() else {
+ break;
+ };
+ if self.completed_tenures.contains(&ch) {
+ test_debug!("Already successfully downloaded tenure {}", &ch);
+ schedule.pop_front();
+ continue;
+ }
+ let Some(neighbors) = available.get_mut(ch) else {
+ // not found on any neighbors, so stop trying this tenure
+ test_debug!("No neighbors have tenure {}", ch);
+ schedule.pop_front();
+ continue;
+ };
+ if neighbors.is_empty() {
+ // no more neighbors to try
+ test_debug!("No more neighbors can serve tenure {}", ch);
+ schedule.pop_front();
+ continue;
+ }
+ let Some(naddr) = neighbors.pop() else {
+ test_debug!("No more neighbors can serve tenure {}", ch);
+ schedule.pop_front();
+ continue;
+ };
+ if self.try_resume_peer(naddr.clone()) {
+ continue;
+ };
+ if self.has_downloader_for_tenure(&ch) {
+ schedule.pop_front();
+ continue;
+ }
+
+ let Some(available_tenures) = tenure_block_ids.get(&naddr) else {
+ // this peer doesn't have any known tenures, so try the others
+ test_debug!("No tenures available from {}", &naddr);
+ continue;
+ };
+ let Some(tenure_info) = available_tenures.get(ch) else {
+ // this peer does not have a tenure start/end block for this tenure, so try the
+ // others.
+ test_debug!("Neighbor {} does not serve tenure {}", &naddr, ch);
+ continue;
+ };
+ let Some(Some(start_reward_set)) = current_reward_cycles
+ .get(&tenure_info.start_reward_cycle)
+ .map(|cycle_info| cycle_info.reward_set())
+ else {
+ test_debug!(
+ "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}",
+ tenure_info.start_reward_cycle,
+ &tenure_info
+ );
+ schedule.pop_front();
+ continue;
+ };
+ let Some(Some(end_reward_set)) = current_reward_cycles
+ .get(&tenure_info.end_reward_cycle)
+ .map(|cycle_info| cycle_info.reward_set())
+ else {
+ test_debug!(
+ "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}",
+ tenure_info.end_reward_cycle,
+ &tenure_info
+ );
+ schedule.pop_front();
+ continue;
+ };
+
+ test_debug!(
+ "Download tenure {} (start={}, end={}) (rc {},{})",
+ &ch,
+ &tenure_info.start_block_id,
+ &tenure_info.end_block_id,
+ tenure_info.start_reward_cycle,
+ tenure_info.end_reward_cycle
+ );
+ let tenure_download = NakamotoTenureDownloader::new(
+ ch.clone(),
+ tenure_info.start_block_id.clone(),
+ tenure_info.end_block_id.clone(),
+ naddr.clone(),
+ start_reward_set.clone(),
+ end_reward_set.clone(),
+ );
+
+ test_debug!("Request tenure {} from neighbor {}", ch, &naddr);
+ self.add_downloader(naddr, tenure_download);
+ schedule.pop_front();
+ }
+ }
+
+ /// Run all confirmed downloaders.
+ /// * Identify neighbors for which we do not have an inflight request
+ /// * Get each such neighbor's downloader, and generate its next HTTP reqeust. Send that
+ /// request to the neighbor and begin driving the underlying socket I/O.
+ /// * Get each HTTP reply, and pass it into the corresponding downloader's handler to advance
+ /// its state.
+ /// * Identify and remove misbehaving neighbors and neighbors whose connections have broken.
+ ///
+ /// Returns the set of downloaded blocks obtained for completed downloaders. These will be
+ /// full confirmed tenures.
+ pub fn run(
+ &mut self,
+ network: &mut PeerNetwork,
+ neighbor_rpc: &mut NeighborRPC,
+ ) -> HashMap> {
+ let addrs: Vec<_> = self.peers.keys().cloned().collect();
+ let mut finished = vec![];
+ let mut finished_tenures = vec![];
+ let mut new_blocks = HashMap::new();
+
+ // send requests
+ for (naddr, index) in self.peers.iter() {
+ if neighbor_rpc.has_inflight(&naddr) {
+ test_debug!("Peer {} has an inflight request", &naddr);
+ continue;
+ }
+ let Some(Some(downloader)) = self.downloaders.get_mut(*index) else {
+ test_debug!("No downloader for {}", &naddr);
+ continue;
+ };
+ if downloader.is_done() {
+ test_debug!("Downloader for {} is done", &naddr);
+ finished.push(naddr.clone());
+ finished_tenures.push(downloader.tenure_id_consensus_hash.clone());
+ continue;
+ }
+ test_debug!(
+ "Send request to {} for tenure {} (state {})",
+ &naddr,
+ &downloader.tenure_id_consensus_hash,
+ &downloader.state
+ );
+ let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else {
+ test_debug!("Downloader for {} failed; this peer is dead", &naddr);
+ neighbor_rpc.add_dead(network, naddr);
+ continue;
+ };
+ if !sent {
+ // this downloader is dead or broken
+ finished.push(naddr.clone());
+ continue;
+ }
+ }
+
+ // clear dead, broken, and done
+ for naddr in addrs.iter() {
+ if neighbor_rpc.is_dead_or_broken(network, naddr) {
+ test_debug!("Remove dead/broken downloader for {}", &naddr);
+ self.clear_downloader(&naddr);
+ }
+ }
+ for done_naddr in finished.drain(..) {
+ test_debug!("Remove finished downloader for {}", &done_naddr);
+ self.clear_downloader(&done_naddr);
+ }
+ for done_tenure in finished_tenures.drain(..) {
+ self.completed_tenures.insert(done_tenure);
+ }
+
+ // handle responses
+ for (naddr, response) in neighbor_rpc.collect_replies(network) {
+ let Some(index) = self.peers.get(&naddr) else {
+ test_debug!("No downloader for {}", &naddr);
+ continue;
+ };
+ let Some(Some(downloader)) = self.downloaders.get_mut(*index) else {
+ test_debug!("No downloader for {}", &naddr);
+ continue;
+ };
+ test_debug!("Got response from {}", &naddr);
+
+ let Ok(blocks_opt) = downloader
+ .handle_next_download_response(response)
+ .map_err(|e| {
+ debug!("Failed to handle response from {}: {:?}", &naddr, &e);
+ e
+ })
+ else {
+ test_debug!("Failed to handle download response from {}", &naddr);
+ neighbor_rpc.add_dead(network, &naddr);
+ continue;
+ };
+
+ let Some(blocks) = blocks_opt else {
+ continue;
+ };
+
+ test_debug!(
+ "Got {} blocks for tenure {}",
+ blocks.len(),
+ &downloader.tenure_id_consensus_hash
+ );
+ new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks);
+ if downloader.is_done() {
+ finished.push(naddr.clone());
+ finished_tenures.push(downloader.tenure_id_consensus_hash.clone());
+ continue;
+ }
+ }
+
+ // clear dead, broken, and done
+ for naddr in addrs.iter() {
+ if neighbor_rpc.is_dead_or_broken(network, naddr) {
+ test_debug!("Remove dead/broken downloader for {}", &naddr);
+ self.clear_downloader(naddr);
+ }
+ }
+ for done_naddr in finished.drain(..) {
+ test_debug!("Remove finished downloader for {}", &done_naddr);
+ self.clear_downloader(&done_naddr);
+ }
+ for done_tenure in finished_tenures.drain(..) {
+ self.completed_tenures.insert(done_tenure);
+ }
+
+ new_blocks
+ }
+}
diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs
new file mode 100644
index 0000000000..d51e99d5a1
--- /dev/null
+++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs
@@ -0,0 +1,869 @@
+// Copyright (C) 2020-2024 Stacks Open Internet Foundation
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see .
+
+use std::collections::{BTreeMap, HashMap, HashSet, VecDeque};
+use std::convert::TryFrom;
+use std::fmt;
+use std::hash::{Hash, Hasher};
+use std::io::{Read, Write};
+use std::net::{IpAddr, SocketAddr};
+use std::time::{Duration, Instant};
+
+use rand::seq::SliceRandom;
+use rand::{thread_rng, RngCore};
+use stacks_common::types::chainstate::{
+ BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId,
+};
+use stacks_common::types::net::{PeerAddress, PeerHost};
+use stacks_common::types::StacksEpochId;
+use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum};
+use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey};
+use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log};
+
+use crate::burnchains::{Burnchain, BurnchainView, PoxConstants};
+use crate::chainstate::burn::db::sortdb::{
+ BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn,
+};
+use crate::chainstate::burn::BlockSnapshot;
+use crate::chainstate::coordinator::RewardCycleInfo;
+use crate::chainstate::nakamoto::{
+ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef,
+};
+use crate::chainstate::stacks::boot::RewardSet;
+use crate::chainstate::stacks::db::StacksChainState;
+use crate::chainstate::stacks::{
+ Error as chainstate_error, StacksBlockHeader, TenureChangePayload,
+};
+use crate::core::{
+ EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH,
+};
+use crate::net::api::gettenureinfo::RPCGetTenureInfo;
+use crate::net::chat::ConversationP2P;
+use crate::net::db::{LocalPeer, PeerDB};
+use crate::net::download::nakamoto::{
+ AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd,
+ WantedTenure,
+};
+use crate::net::http::HttpRequestContents;
+use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse};
+use crate::net::inv::epoch2x::InvState;
+use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv};
+use crate::net::neighbors::rpc::NeighborRPC;
+use crate::net::neighbors::NeighborComms;
+use crate::net::p2p::{CurrentRewardSet, PeerNetwork};
+use crate::net::server::HttpPeer;
+use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey};
+use crate::util_lib::db::{DBConn, Error as DBError};
+
+/// Download states for a unconfirmed tenures. These include the ongoing tenure, as well as the
+/// last complete tenure whose tenure-end block hash has not yet been written to the burnchain (but
+/// the tenure-start hash has -- it was done so in the block-commit for the ongoing tenure).
+#[derive(Debug, Clone, PartialEq)]
+pub enum NakamotoUnconfirmedDownloadState {
+ /// Getting the tenure tip information
+ GetTenureInfo,
+ /// Get the tenure start block for the ongoing tenure.
+ /// The inner value is tenure-start block ID of the ongoing tenure.
+ GetTenureStartBlock(StacksBlockId),
+ /// Receiving unconfirmed tenure blocks.
+ /// The inner value is the block ID of the next block to fetch.
+ GetUnconfirmedTenureBlocks(StacksBlockId),
+ /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block
+ /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`).
+ Done,
+}
+
+impl fmt::Display for NakamotoUnconfirmedDownloadState {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?}", self)
+ }
+}
+
+/// Download state machine for the unconfirmed tenures. It operates in the following steps:
+///
+/// 1. Get /v3/tenures/info to learn the unconfirmed chain tip
+/// 2. Get the tenure-start block for the unconfirmed chain tip
+/// 3. Get the unconfirmed blocks, starting with the one identified by step (1) and ending with the
+/// immediate child of the one obtained in (2)
+///
+/// Once this state-machine finishes execution, the tenure-start block is used to construct a
+/// `NakamotoTenureDownloader` state machine for the highest-confirmed tenure.
+///
+#[derive(Debug, Clone, PartialEq)]
+pub struct NakamotoUnconfirmedTenureDownloader {
+ /// state of this machine
+ pub state: NakamotoUnconfirmedDownloadState,
+ /// Address of who we're asking
+ pub naddr: NeighborAddress,
+ /// reward set of the highest confirmed tenure
+ pub confirmed_signer_keys: Option,
+ /// reward set of the unconfirmed (ongoing) tenure
+ pub unconfirmed_signer_keys: Option,
+ /// Block ID of this node's highest-processed block.
+ /// We will not download any blocks lower than this, if it's set.
+ pub highest_processed_block_id: Option,
+ /// Highest processed block height (which may not need to be loaded)
+ pub highest_processed_block_height: Option,
+
+ /// Tenure tip info we obtained for this peer
+ pub tenure_tip: Option,
+ /// Tenure start block for the ongoing tip.
+ /// This is also the tenure-end block for the highest-complete tip.
+ pub unconfirmed_tenure_start_block: Option,
+ /// Unconfirmed tenure blocks obtained
+ pub unconfirmed_tenure_blocks: Option>,
+}
+
+impl NakamotoUnconfirmedTenureDownloader {
+ /// Make a new downloader which will download blocks from the tip back down to the optional
+ /// `highest_processed_block_id` (so we don't re-download the same blocks over and over).
+ pub fn new(naddr: NeighborAddress, highest_processed_block_id: Option) -> Self {
+ Self {
+ state: NakamotoUnconfirmedDownloadState::GetTenureInfo,
+ naddr,
+ confirmed_signer_keys: None,
+ unconfirmed_signer_keys: None,
+ highest_processed_block_id,
+ highest_processed_block_height: None,
+ tenure_tip: None,
+ unconfirmed_tenure_start_block: None,
+ unconfirmed_tenure_blocks: None,
+ }
+ }
+
+ /// What's the tenure ID of the ongoing tenure? This is learned from /v3/tenure/info, which is
+ /// checked upon receipt against the burnchain state (so we're not blindly trusting the remote
+ /// node).
+ pub fn unconfirmed_tenure_id(&self) -> Option<&ConsensusHash> {
+ self.tenure_tip.as_ref().map(|tt| &tt.consensus_hash)
+ }
+
+ /// Set the highest-processed block.
+ /// This can be performed by the downloader itself in order to inform ongoing requests for
+ /// unconfirmed tenures of newly-processed blocks, so they don't re-download blocks this node
+ /// has already handled.
+ pub fn set_highest_processed_block(
+ &mut self,
+ highest_processed_block_id: StacksBlockId,
+ highest_processed_block_height: u64,
+ ) {
+ self.highest_processed_block_id = Some(highest_processed_block_id);
+ self.highest_processed_block_height = Some(highest_processed_block_height);
+ }
+
+ /// Try and accept the tenure info. It will be validated against the sortition DB and its tip.
+ ///
+ /// * tenure_tip.consensus_hash
+ /// This is the consensus hash of the remote node's ongoing tenure. It may not be the
+ /// sortition tip, e.g. if the tenure spans multiple sortitions.
+ /// * tenure_tip.tenure_start_block_id
+ /// This is the first block ID of the ongoing unconfirmed tenure.
+ /// * tenure_tip.parent_consensus_hash
+ /// This is the consensus hash of the parent of the ongoing tenure. It's the node's highest
+ /// complete tenure, for which we know the start and end block IDs.
+ /// * tenure_tip.parent_tenure_start_block_id
+ /// This is the tenure start block for the highest complete tenure. It should be equal to
+ /// the winning Stacks block hash of the snapshot for the ongoing tenure.
+ ///
+ /// We may already have the tenure-start block for the unconfirmed tenure. If so, then don't go
+ /// fetch it again; just get the new unconfirmed blocks.
+ pub fn try_accept_tenure_info(
+ &mut self,
+ sortdb: &SortitionDB,
+ local_sort_tip: &BlockSnapshot,
+ chainstate: &StacksChainState,
+ remote_tenure_tip: RPCGetTenureInfo,
+ current_reward_sets: &BTreeMap,
+ ) -> Result<(), NetError> {
+ if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo {
+ return Err(NetError::InvalidState);
+ }
+ if self.tenure_tip.is_some() {
+ return Err(NetError::InvalidState);
+ }
+
+ test_debug!("Got tenure info {:?}", remote_tenure_tip);
+ test_debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash);
+
+ // authenticate consensus hashes against canonical chain history
+ let local_tenure_sn = SortitionDB::get_block_snapshot_consensus(
+ sortdb.conn(),
+ &remote_tenure_tip.consensus_hash,
+ )?
+ .ok_or_else(|| {
+ debug!(
+ "No snapshot for tenure {}",
+ &remote_tenure_tip.consensus_hash
+ );
+ NetError::DBError(DBError::NotFoundError)
+ })?;
+ let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus(
+ sortdb.conn(),
+ &remote_tenure_tip.parent_consensus_hash,
+ )?
+ .ok_or_else(|| {
+ debug!(
+ "No snapshot for parent tenure {}",
+ &remote_tenure_tip.parent_consensus_hash
+ );
+ NetError::DBError(DBError::NotFoundError)
+ })?;
+
+ let ih = sortdb.index_handle(&local_sort_tip.sortition_id);
+ let ancestor_local_tenure_sn = ih
+ .get_block_snapshot_by_height(local_tenure_sn.block_height)?
+ .ok_or_else(|| {
+ debug!(
+ "No tenure snapshot at burn block height {} off of sortition {} ({})",
+ local_tenure_sn.block_height,
+ &local_tenure_sn.sortition_id,
+ &local_tenure_sn.consensus_hash
+ );
+ NetError::DBError(DBError::NotFoundError)
+ })?;
+
+ if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id {
+ // .consensus_hash is not on the canonical fork
+ warn!("Unconfirmed tenure consensus hash is not canonical";
+ "peer" => %self.naddr,
+ "consensus_hash" => %remote_tenure_tip.consensus_hash);
+ return Err(DBError::NotFoundError.into());
+ }
+ let ancestor_parent_local_tenure_sn = ih
+ .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)?
+ .ok_or_else(|| {
+ debug!(
+ "No parent tenure snapshot at burn block height {} off of sortition {} ({})",
+ local_tenure_sn.block_height,
+ &local_tenure_sn.sortition_id,
+ &local_tenure_sn.consensus_hash
+ );
+ NetError::DBError(DBError::NotFoundError.into())
+ })?;
+
+ if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id {
+ // .parent_consensus_hash is not on the canonical fork
+ warn!("Parent unconfirmed tenure consensus hash is not canonical";
+ "peer" => %self.naddr,
+ "consensus_hash" => %remote_tenure_tip.parent_consensus_hash);
+ return Err(DBError::NotFoundError.into());
+ }
+
+ // parent tenure sortition must precede the ongoing tenure sortition
+ if local_tenure_sn.block_height <= parent_local_tenure_sn.block_height {
+ warn!("Parent tenure snapshot is not an ancestor of the current tenure snapshot";
+ "peer" => %self.naddr,
+ "consensus_hash" => %remote_tenure_tip.consensus_hash,
+ "parent_consensus_hash" => %remote_tenure_tip.parent_consensus_hash);
+ return Err(NetError::InvalidMessage);
+ }
+
+ // parent tenure start block ID must be the winning block hash for the ongoing tenure's
+ // snapshot
+ if local_tenure_sn.winning_stacks_block_hash.0
+ != remote_tenure_tip.parent_tenure_start_block_id.0
+ {
+ debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr;
+ "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id,
+ "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash);
+ return Err(NetError::StaleView);
+ }
+
+ if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() {
+ // we've synchronized this tenure before, so don't get anymore blocks before it.
+ let highest_processed_block = chainstate
+ .nakamoto_blocks_db()
+ .get_nakamoto_block(highest_processed_block_id)?
+ .ok_or_else(|| {
+ debug!("No such Nakamoto block {}", &highest_processed_block_id);
+ NetError::DBError(DBError::NotFoundError)
+ })?
+ .0;
+
+ let highest_processed_block_height = highest_processed_block.header.chain_length;
+ self.highest_processed_block_height = Some(highest_processed_block_height);
+
+ if &remote_tenure_tip.tip_block_id == highest_processed_block_id
+ || highest_processed_block_height > remote_tenure_tip.tip_height
+ {
+ // nothing to do -- we're at or ahead of the remote peer, so finish up.
+ // If we don't have the tenure-start block for the confirmed tenure that the remote
+ // peer claims to have, then the remote peer has sent us invalid data and we should
+ // treat it as such.
+ let unconfirmed_tenure_start_block = chainstate
+ .nakamoto_blocks_db()
+ .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)?
+ .ok_or(NetError::InvalidMessage)?
+ .0;
+ self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block);
+ self.state = NakamotoUnconfirmedDownloadState::Done;
+ }
+ }
+
+ if self.state == NakamotoUnconfirmedDownloadState::Done {
+ // only need to remember the tenure tip
+ self.tenure_tip = Some(remote_tenure_tip);
+ return Ok(());
+ }
+
+ // we're not finished
+ let tenure_rc = sortdb
+ .pox_constants
+ .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height)
+ .expect("FATAL: sortition from before system start");
+ let parent_tenure_rc = sortdb
+ .pox_constants
+ .block_height_to_reward_cycle(
+ sortdb.first_block_height,
+ parent_local_tenure_sn.block_height,
+ )
+ .expect("FATAL: sortition from before system start");
+
+ // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions
+ let Some(Some(confirmed_reward_set)) = current_reward_sets
+ .get(&parent_tenure_rc)
+ .map(|cycle_info| cycle_info.reward_set())
+ else {
+ warn!(
+ "No signer public keys for confirmed tenure {} (rc {})",
+ &parent_local_tenure_sn.consensus_hash, parent_tenure_rc
+ );
+ return Err(NetError::InvalidState);
+ };
+
+ let Some(Some(unconfirmed_reward_set)) = current_reward_sets
+ .get(&tenure_rc)
+ .map(|cycle_info| cycle_info.reward_set())
+ else {
+ warn!(
+ "No signer public keys for unconfirmed tenure {} (rc {})",
+ &local_tenure_sn.consensus_hash, tenure_rc
+ );
+ return Err(NetError::InvalidState);
+ };
+
+ if chainstate
+ .nakamoto_blocks_db()
+ .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())?
+ {
+ // proceed to get unconfirmed blocks. We already have the tenure-start block.
+ let unconfirmed_tenure_start_block = chainstate
+ .nakamoto_blocks_db()
+ .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)?
+ .ok_or_else(|| {
+ debug!(
+ "No such tenure-start Nakamoto block {}",
+ &remote_tenure_tip.tenure_start_block_id
+ );
+ NetError::DBError(DBError::NotFoundError)
+ })?
+ .0;
+ self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block);
+ self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(
+ remote_tenure_tip.tip_block_id.clone(),
+ );
+ } else {
+ // get the tenure-start block first
+ self.state = NakamotoUnconfirmedDownloadState::GetTenureStartBlock(
+ remote_tenure_tip.tenure_start_block_id.clone(),
+ );
+ }
+
+ test_debug!(
+ "Will validate unconfirmed blocks with reward sets in ({},{})",
+ parent_tenure_rc,
+ tenure_rc
+ );
+ self.confirmed_signer_keys = Some(confirmed_reward_set.clone());
+ self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone());
+ self.tenure_tip = Some(remote_tenure_tip);
+
+ Ok(())
+ }
+
+ /// Validate and accept the unconfirmed tenure-start block. If accepted, then advance the state.
+ /// Returns Ok(()) if the unconfirmed tenure start block was valid
+ /// Returns Err(..) if it was not valid, or if this function was called out of sequence.
+ pub fn try_accept_unconfirmed_tenure_start_block(
+ &mut self,
+ unconfirmed_tenure_start_block: NakamotoBlock,
+ ) -> Result<(), NetError> {
+ let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(tenure_start_block_id) =
+ &self.state
+ else {
+ warn!("Invalid state for this method";
+ "state" => %self.state);
+ return Err(NetError::InvalidState);
+ };
+ let Some(tenure_tip) = self.tenure_tip.as_ref() else {
+ warn!("tenure_tip is not set");
+ return Err(NetError::InvalidState);
+ };
+
+ let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else {
+ warn!("unconfirmed_signer_keys is not set");
+ return Err(NetError::InvalidState);
+ };
+
+ // stacker signature has to match the current reward set
+ if let Err(e) = unconfirmed_tenure_start_block
+ .header
+ .verify_signer_signatures(unconfirmed_signer_keys)
+ {
+ warn!("Invalid tenure-start block: bad signer signature";
+ "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash,
+ "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(),
+ "state" => %self.state,
+ "error" => %e);
+ return Err(NetError::InvalidMessage);
+ }
+
+ // block has to match the expected hash
+ if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() {
+ warn!("Invalid tenure-start block";
+ "tenure_id_start_block" => %tenure_start_block_id,
+ "unconfirmed_tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash,
+ "unconfirmed_tenure_start_block ID" => %unconfirmed_tenure_start_block.header.block_id(),
+ "state" => %self.state);
+ return Err(NetError::InvalidMessage);
+ }
+
+ // furthermore, the block has to match the expected tenure ID
+ if unconfirmed_tenure_start_block.header.consensus_hash != tenure_tip.consensus_hash {
+ warn!("Invalid tenure-start block or tenure-tip: consensus hash mismatch";
+ "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash,
+ "tenure_tip.consensus_hash" => %tenure_tip.consensus_hash);
+ return Err(NetError::InvalidMessage);
+ }
+
+ self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block);
+ self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(
+ tenure_tip.tip_block_id.clone(),
+ );
+ Ok(())
+ }
+
+ /// Add downloaded unconfirmed tenure blocks.
+ /// If we have collected all tenure blocks, then return them.
+ /// Returns Ok(Some(list-of-blocks)) on success, in which case, `list-of-blocks` is the
+ /// height-ordered sequence of blocks in this tenure, and includes only the blocks that come
+ /// after the highest-processed block (if set).
+ /// Returns Ok(None) if there are still blocks to fetch, in which case, the caller should call
+ /// `send_next_download_request()`
+ /// Returns Err(..) on invalid state or invalid block.
+ pub fn try_accept_unconfirmed_tenure_blocks(
+ &mut self,
+ mut tenure_blocks: Vec,
+ ) -> Result>, NetError> {
+ let NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(last_block_id) =
+ &self.state
+ else {
+ return Err(NetError::InvalidState);
+ };
+
+ let Some(tenure_tip) = self.tenure_tip.as_ref() else {
+ warn!("tenure_tip is not set");
+ return Err(NetError::InvalidState);
+ };
+
+ let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else {
+ warn!("unconfirmed_signer_keys is not set");
+ return Err(NetError::InvalidState);
+ };
+
+ if tenure_blocks.is_empty() {
+ // nothing to do
+ debug!("No tenure blocks obtained");
+ return Ok(None);
+ }
+
+ // blocks must be contiguous and in order from highest to lowest.
+ // If there's a tenure-start block, it must be last.
+ let mut expected_block_id = last_block_id;
+ let mut finished_download = false;
+ let mut last_block_index = None;
+ for (cnt, block) in tenure_blocks.iter().enumerate() {
+ if &block.header.block_id() != expected_block_id {
+ warn!("Unexpected Nakamoto block -- not part of tenure";
+ "expected_block_id" => %expected_block_id,
+ "block_id" => %block.header.block_id());
+ return Err(NetError::InvalidMessage);
+ }
+ if let Err(e) = block
+ .header
+ .verify_signer_signatures(unconfirmed_signer_keys)
+ {
+ warn!("Invalid block: bad signer signature";
+ "tenure_id" => %tenure_tip.consensus_hash,
+ "block.header.block_id" => %block.header.block_id(),
+ "state" => %self.state,
+ "error" => %e);
+ return Err(NetError::InvalidMessage);
+ }
+
+ // we may or may not need the tenure-start block for the unconfirmed tenure. But if we
+ // do, make sure it's valid, and it's the last block we receive.
+ let Ok(is_tenure_start) = block.is_wellformed_tenure_start_block() else {
+ warn!("Invalid tenure-start block";
+ "tenure_id" => %tenure_tip.consensus_hash,
+ "block.header.block_id" => %block.header.block_id(),
+ "state" => %self.state);
+ return Err(NetError::InvalidMessage);
+ };
+ if is_tenure_start {
+ // this is the tenure-start block, so make sure it matches our /v3/tenure/info
+ if block.header.block_id() != tenure_tip.tenure_start_block_id {
+ warn!("Unexpected tenure-start block";
+ "tenure_id" => %tenure_tip.consensus_hash,
+ "block.header.block_id" => %block.header.block_id(),
+ "tenure_tip.tenure_start_block_id" => %tenure_tip.tenure_start_block_id);
+ return Err(NetError::InvalidMessage);
+ }
+
+ if cnt.saturating_add(1) != tenure_blocks.len() {
+ warn!("Invalid tenure stream -- got tenure-start before end of tenure";
+ "tenure_id" => %tenure_tip.consensus_hash,
+ "block.header.block_id" => %block.header.block_id(),
+ "cnt" => cnt,
+ "len" => tenure_blocks.len(),
+ "state" => %self.state);
+ return Err(NetError::InvalidMessage);
+ }
+
+ finished_download = true;
+ last_block_index = Some(cnt);
+ break;
+ }
+
+ test_debug!("Got unconfirmed tenure block {}", &block.header.block_id());
+
+ // NOTE: this field can get updated by the downloader while this state-machine is in
+ // this state.
+ if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() {
+ if expected_block_id == highest_processed_block_id {
+ // got all the blocks we asked for
+ debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id);
+ finished_download = true;
+ last_block_index = Some(cnt);
+ break;
+ }
+ }
+
+ // NOTE: this field can get updated by the downloader while this state-machine is in
+ // this state.
+ if let Some(highest_processed_block_height) =
+ self.highest_processed_block_height.as_ref()
+ {
+ if &block.header.chain_length <= highest_processed_block_height {
+ // no need to continue this download
+ debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height);
+ finished_download = true;
+ last_block_index = Some(cnt);
+ break;
+ }
+ }
+
+ expected_block_id = &block.header.parent_block_id;
+ last_block_index = Some(cnt);
+ }
+
+ // blocks after the last_block_index were not processed, so should be dropped
+ if let Some(last_block_index) = last_block_index {
+ tenure_blocks.truncate(last_block_index + 1);
+ }
+
+ if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() {
+ blocks.append(&mut tenure_blocks);
+ } else {
+ self.unconfirmed_tenure_blocks = Some(tenure_blocks);
+ }
+
+ if finished_download {
+ // we have all of the unconfirmed tenure blocks that were requested.
+ // only return those newer than the highest block.
+ self.state = NakamotoUnconfirmedDownloadState::Done;
+ let highest_processed_block_height =
+ *self.highest_processed_block_height.as_ref().unwrap_or(&0);
+
+ test_debug!("Finished receiving unconfirmed tenure");
+ return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| {
+ blocks
+ .into_iter()
+ .filter(|block| block.header.chain_length > highest_processed_block_height)
+ .rev()
+ .collect()
+ }));
+ }
+
+ let Some(blocks) = self.unconfirmed_tenure_blocks.as_ref() else {
+ // unreachable but be defensive
+ warn!("Invalid state: no blocks (infallible -- got empty vec)");
+ return Err(NetError::InvalidState);
+ };
+
+ // still have more to get
+ let Some(earliest_block) = blocks.last() else {
+ // unreachable but be defensive
+ warn!("Invalid state: no blocks (infallible -- got empty vec)");
+ return Err(NetError::InvalidState);
+ };
+ let next_block_id = earliest_block.header.parent_block_id.clone();
+
+ test_debug!(
+ "Will resume fetching unconfirmed tenure blocks starting at {}",
+ &next_block_id
+ );
+ self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id);
+ Ok(None)
+ }
+
+ /// Once this machine runs to completion, examine its state to see if we still need to fetch
+ /// the highest complete tenure. We may not need to, especially if we're just polling for new
+ /// unconfirmed blocks.
+ ///
+ /// Return Ok(true) if we need it still
+ /// Return Ok(false) if we already have it
+ /// Return Err(..) if we encounter a DB error or if this function was called out of sequence.
+ pub fn need_highest_complete_tenure(
+ &self,
+ chainstate: &StacksChainState,
+ ) -> Result {
+ if self.state != NakamotoUnconfirmedDownloadState::Done {
+ return Err(NetError::InvalidState);
+ }
+ let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref()
+ else {
+ return Err(NetError::InvalidState);
+ };
+
+ // if we've processed the unconfirmed tenure-start block already, then we've necessarily
+ // downloaded and processed the highest-complete tenure already.
+ Ok(!NakamotoChainState::has_block_header(
+ chainstate.db(),
+ &unconfirmed_tenure_start_block.header.block_id(),
+ false,
+ )?)
+ }
+
+ /// Determine if we can produce a highest-complete tenure request.
+ /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure
+ pub fn can_make_highest_complete_tenure_downloader(
+ &self,
+ sortdb: &SortitionDB,
+ ) -> Result {
+ let Some(tenure_tip) = &self.tenure_tip else {
+ return Ok(false);
+ };
+
+ let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus(
+ sortdb.conn(),
+ &tenure_tip.parent_consensus_hash,
+ )?
+ else {
+ return Ok(false);
+ };
+
+ let Some(tip_sn) =
+ SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)?
+ else {
+ return Ok(false);
+ };
+
+ let Some(parent_tenure) =
+ SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)?
+ else {
+ return Ok(false);
+ };
+
+ let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)?
+ else {
+ return Ok(false);
+ };
+
+ if parent_tenure.epoch_id < StacksEpochId::Epoch30
+ || tip_tenure.epoch_id < StacksEpochId::Epoch30
+ {
+ debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block";
+ "start_tenure" => %tenure_tip.parent_consensus_hash,
+ "end_tenure" => %tenure_tip.consensus_hash,
+ "start_tenure_epoch" => %parent_tenure.epoch_id,
+ "end_tenure_epoch" => %tip_tenure.epoch_id
+ );
+ return Ok(false);
+ }
+
+ Ok(true)
+ }
+
+ /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the
+ /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get
+ /// its tenure-start block.
+ ///
+ /// Returns Ok(downloader) on success
+ /// Returns Err(..) if we call this function out of sequence.
+ pub fn make_highest_complete_tenure_downloader(
+ &self,
+ ) -> Result {
+ if self.state != NakamotoUnconfirmedDownloadState::Done {
+ return Err(NetError::InvalidState);
+ }
+ let Some(tenure_tip) = &self.tenure_tip else {
+ return Err(NetError::InvalidState);
+ };
+ let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else {
+ return Err(NetError::InvalidState);
+ };
+ let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else {
+ return Err(NetError::InvalidState);
+ };
+
+ test_debug!(
+ "Create downloader for highest complete tenure {} known by {}",
+ &tenure_tip.parent_consensus_hash,
+ &self.naddr,
+ );
+ let ntd = NakamotoTenureDownloader::new(
+ tenure_tip.parent_consensus_hash.clone(),
+ tenure_tip.parent_tenure_start_block_id.clone(),
+ tenure_tip.tenure_start_block_id.clone(),
+ self.naddr.clone(),
+ confirmed_signer_keys.clone(),
+ unconfirmed_signer_keys.clone(),
+ );
+
+ Ok(ntd)
+ }
+
+ /// Produce the next HTTP request that, when successfully executed, will advance this state
+ /// machine.
+ ///
+ /// Returns Some(request) if a request must be sent.
+ /// Returns None if we're done
+ pub fn make_next_download_request(&self, peerhost: PeerHost) -> Option {
+ match &self.state {
+ NakamotoUnconfirmedDownloadState::GetTenureInfo => {
+ // need to get the tenure tip
+ return Some(StacksHttpRequest::new_get_nakamoto_tenure_info(peerhost));
+ }
+ NakamotoUnconfirmedDownloadState::GetTenureStartBlock(block_id) => {
+ return Some(StacksHttpRequest::new_get_nakamoto_block(
+ peerhost,
+ block_id.clone(),
+ ));
+ }
+ NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(tip_block_id) => {
+ return Some(StacksHttpRequest::new_get_nakamoto_tenure(
+ peerhost,
+ tip_block_id.clone(),
+ self.highest_processed_block_id.clone(),
+ ));
+ }
+ NakamotoUnconfirmedDownloadState::Done => {
+ // got all unconfirmed blocks! Next step is to turn this downloader into a confirmed
+ // tenure downloader using the earliest unconfirmed tenure block.
+ return None;
+ }
+ }
+ }
+
+ /// Begin the next download request for this state machine.
+ /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The
+ /// caller should try this again until it gets one of the other possible return values. It's
+ /// up to the caller to determine when it's appropriate to convert this state machine into a
+ /// `NakamotoTenureDownloader`.
+ /// Returns Err(..) if the neighbor is dead or broken.
+ pub fn send_next_download_request(
+ &self,
+ network: &mut PeerNetwork,
+ neighbor_rpc: &mut NeighborRPC,
+ ) -> Result<(), NetError> {
+ if neighbor_rpc.has_inflight(&self.naddr) {
+ test_debug!("Peer {} has an inflight request", &self.naddr);
+ return Ok(());
+ }
+ if neighbor_rpc.is_dead_or_broken(network, &self.naddr) {
+ return Err(NetError::PeerNotConnected);
+ }
+
+ let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else {
+ // no conversation open to this neighbor
+ neighbor_rpc.add_dead(network, &self.naddr);
+ return Err(NetError::PeerNotConnected);
+ };
+
+ let Some(request) = self.make_next_download_request(peerhost) else {
+ // treat this downloader as still in-flight since the overall state machine will need
+ // to keep it around long enough to convert it into a tenure downloader for the highest
+ // complete tenure.
+ return Ok(());
+ };
+
+ neighbor_rpc.send_request(network, self.naddr.clone(), request)?;
+ Ok(())
+ }
+
+ /// Handle a received StacksHttpResponse and advance this machine's state
+ /// If we get the full tenure, return it.
+ ///
+ /// Returns Ok(Some(blocks)) if we finished downloading the unconfirmed tenure
+ /// Returns Ok(None) if we're still working, in which case the caller should call
+ /// `send_next_download_request()`
+ /// Returns Err(..) on unrecoverable failure to advance state
+ pub fn handle_next_download_response(
+ &mut self,
+ response: StacksHttpResponse,
+ sortdb: &SortitionDB,
+ local_sort_tip: &BlockSnapshot,
+ chainstate: &StacksChainState,
+ current_reward_sets: &BTreeMap,
+ ) -> Result>, NetError> {
+ match &self.state {
+ NakamotoUnconfirmedDownloadState::GetTenureInfo => {
+ test_debug!("Got tenure-info response");
+ let remote_tenure_info = response.decode_nakamoto_tenure_info()?;
+ test_debug!("Got tenure-info response: {:?}", &remote_tenure_info);
+ self.try_accept_tenure_info(
+ sortdb,
+ local_sort_tip,
+ chainstate,
+ remote_tenure_info,
+ current_reward_sets,
+ )?;
+ Ok(None)
+ }
+ NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => {
+ test_debug!("Got tenure start-block response");
+ let block = response.decode_nakamoto_block()?;
+ self.try_accept_unconfirmed_tenure_start_block(block)?;
+ Ok(None)
+ }
+ NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => {
+ test_debug!("Got unconfirmed tenure blocks response");
+ let blocks = response.decode_nakamoto_tenure()?;
+ let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?;
+ test_debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some());
+ Ok(accepted_opt)
+ }
+ NakamotoUnconfirmedDownloadState::Done => {
+ return Err(NetError::InvalidState);
+ }
+ }
+ }
+
+ /// Is this machine finished?
+ pub fn is_done(&self) -> bool {
+ self.state == NakamotoUnconfirmedDownloadState::Done
+ }
+}
diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs
new file mode 100644
index 0000000000..d51e99d5a1
--- /dev/null
+++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs
@@ -0,0 +1,869 @@
+// Copyright (C) 2020-2024 Stacks Open Internet Foundation
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see .
+
+use std::collections::{BTreeMap, HashMap, HashSet, VecDeque};
+use std::convert::TryFrom;
+use std::fmt;
+use std::hash::{Hash, Hasher};
+use std::io::{Read, Write};
+use std::net::{IpAddr, SocketAddr};
+use std::time::{Duration, Instant};
+
+use rand::seq::SliceRandom;
+use rand::{thread_rng, RngCore};
+use stacks_common::types::chainstate::{
+ BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId,
+};
+use stacks_common::types::net::{PeerAddress, PeerHost};
+use stacks_common::types::StacksEpochId;
+use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum};
+use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey};
+use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log};
+
+use crate::burnchains::{Burnchain, BurnchainView, PoxConstants};
+use crate::chainstate::burn::db::sortdb::{
+ BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn,
+};
+use crate::chainstate::burn::BlockSnapshot;
+use crate::chainstate::coordinator::RewardCycleInfo;
+use crate::chainstate::nakamoto::{
+ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef,
+};
+use crate::chainstate::stacks::boot::RewardSet;
+use crate::chainstate::stacks::db::StacksChainState;
+use crate::chainstate::stacks::{
+ Error as chainstate_error, StacksBlockHeader, TenureChangePayload,
+};
+use crate::core::{
+ EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH,
+};
+use crate::net::api::gettenureinfo::RPCGetTenureInfo;
+use crate::net::chat::ConversationP2P;
+use crate::net::db::{LocalPeer, PeerDB};
+use crate::net::download::nakamoto::{
+ AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd,
+ WantedTenure,
+};
+use crate::net::http::HttpRequestContents;
+use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse};
+use crate::net::inv::epoch2x::InvState;
+use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv};
+use crate::net::neighbors::rpc::NeighborRPC;
+use crate::net::neighbors::NeighborComms;
+use crate::net::p2p::{CurrentRewardSet, PeerNetwork};
+use crate::net::server::HttpPeer;
+use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey};
+use crate::util_lib::db::{DBConn, Error as DBError};
+
+/// Download states for a unconfirmed tenures. These include the ongoing tenure, as well as the
+/// last complete tenure whose tenure-end block hash has not yet been written to the burnchain (but
+/// the tenure-start hash has -- it was done so in the block-commit for the ongoing tenure).
+#[derive(Debug, Clone, PartialEq)]
+pub enum NakamotoUnconfirmedDownloadState {
+ /// Getting the tenure tip information
+ GetTenureInfo,
+ /// Get the tenure start block for the ongoing tenure.
+ /// The inner value is tenure-start block ID of the ongoing tenure.
+ GetTenureStartBlock(StacksBlockId),
+ /// Receiving unconfirmed tenure blocks.
+ /// The inner value is the block ID of the next block to fetch.
+ GetUnconfirmedTenureBlocks(StacksBlockId),
+ /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block
+ /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`).
+ Done,
+}
+
+impl fmt::Display for NakamotoUnconfirmedDownloadState {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?}", self)
+ }
+}
+
+/// Download state machine for the unconfirmed tenures. It operates in the following steps:
+///
+/// 1. Get /v3/tenures/info to learn the unconfirmed chain tip
+/// 2. Get the tenure-start block for the unconfirmed chain tip
+/// 3. Get the unconfirmed blocks, starting with the one identified by step (1) and ending with the
+/// immediate child of the one obtained in (2)
+///
+/// Once this state-machine finishes execution, the tenure-start block is used to construct a
+/// `NakamotoTenureDownloader` state machine for the highest-confirmed tenure.
+///
+#[derive(Debug, Clone, PartialEq)]
+pub struct NakamotoUnconfirmedTenureDownloader {
+ /// state of this machine
+ pub state: NakamotoUnconfirmedDownloadState,
+ /// Address of who we're asking
+ pub naddr: NeighborAddress,
+ /// reward set of the highest confirmed tenure
+ pub confirmed_signer_keys: Option,
+ /// reward set of the unconfirmed (ongoing) tenure
+ pub unconfirmed_signer_keys: Option,
+ /// Block ID of this node's highest-processed block.
+ /// We will not download any blocks lower than this, if it's set.
+ pub highest_processed_block_id: Option,
+ /// Highest processed block height (which may not need to be loaded)
+ pub highest_processed_block_height: Option,
+
+ /// Tenure tip info we obtained for this peer
+ pub tenure_tip: Option,
+ /// Tenure start block for the ongoing tip.
+ /// This is also the tenure-end block for the highest-complete tip.
+ pub unconfirmed_tenure_start_block: Option,
+ /// Unconfirmed tenure blocks obtained
+ pub unconfirmed_tenure_blocks: Option>,
+}
+
+impl NakamotoUnconfirmedTenureDownloader {
+ /// Make a new downloader which will download blocks from the tip back down to the optional
+ /// `highest_processed_block_id` (so we don't re-download the same blocks over and over).
+ pub fn new(naddr: NeighborAddress, highest_processed_block_id: Option) -> Self {
+ Self {
+ state: NakamotoUnconfirmedDownloadState::GetTenureInfo,
+ naddr,
+ confirmed_signer_keys: None,
+ unconfirmed_signer_keys: None,
+ highest_processed_block_id,
+ highest_processed_block_height: None,
+ tenure_tip: None,
+ unconfirmed_tenure_start_block: None,
+ unconfirmed_tenure_blocks: None,
+ }
+ }
+
+ /// What's the tenure ID of the ongoing tenure? This is learned from /v3/tenure/info, which is
+ /// checked upon receipt against the burnchain state (so we're not blindly trusting the remote
+ /// node).
+ pub fn unconfirmed_tenure_id(&self) -> Option<&ConsensusHash> {
+ self.tenure_tip.as_ref().map(|tt| &tt.consensus_hash)
+ }
+
+ /// Set the highest-processed block.
+ /// This can be performed by the downloader itself in order to inform ongoing requests for
+ /// unconfirmed tenures of newly-processed blocks, so they don't re-download blocks this node
+ /// has already handled.
+ pub fn set_highest_processed_block(
+ &mut self,
+ highest_processed_block_id: StacksBlockId,
+ highest_processed_block_height: u64,
+ ) {
+ self.highest_processed_block_id = Some(highest_processed_block_id);
+ self.highest_processed_block_height = Some(highest_processed_block_height);
+ }
+
+ /// Try and accept the tenure info. It will be validated against the sortition DB and its tip.
+ ///
+ /// * tenure_tip.consensus_hash
+ /// This is the consensus hash of the remote node's ongoing tenure. It may not be the
+ /// sortition tip, e.g. if the tenure spans multiple sortitions.
+ /// * tenure_tip.tenure_start_block_id
+ /// This is the first block ID of the ongoing unconfirmed tenure.
+ /// * tenure_tip.parent_consensus_hash
+ /// This is the consensus hash of the parent of the ongoing tenure. It's the node's highest
+ /// complete tenure, for which we know the start and end block IDs.
+ /// * tenure_tip.parent_tenure_start_block_id
+ /// This is the tenure start block for the highest complete tenure. It should be equal to
+ /// the winning Stacks block hash of the snapshot for the ongoing tenure.
+ ///
+ /// We may already have the tenure-start block for the unconfirmed tenure. If so, then don't go
+ /// fetch it again; just get the new unconfirmed blocks.
+ pub fn try_accept_tenure_info(
+ &mut self,
+ sortdb: &SortitionDB,
+ local_sort_tip: &BlockSnapshot,
+ chainstate: &StacksChainState,
+ remote_tenure_tip: RPCGetTenureInfo,
+ current_reward_sets: &BTreeMap,
+ ) -> Result<(), NetError> {
+ if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo {
+ return Err(NetError::InvalidState);
+ }
+ if self.tenure_tip.is_some() {
+ return Err(NetError::InvalidState);
+ }
+
+ test_debug!("Got tenure info {:?}", remote_tenure_tip);
+ test_debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash);
+
+ // authenticate consensus hashes against canonical chain history
+ let local_tenure_sn = SortitionDB::get_block_snapshot_consensus(
+ sortdb.conn(),
+ &remote_tenure_tip.consensus_hash,
+ )?
+ .ok_or_else(|| {
+ debug!(
+ "No snapshot for tenure {}",
+ &remote_tenure_tip.consensus_hash
+ );
+ NetError::DBError(DBError::NotFoundError)
+ })?;
+ let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus(
+ sortdb.conn(),
+ &remote_tenure_tip.parent_consensus_hash,
+ )?
+ .ok_or_else(|| {
+ debug!(
+ "No snapshot for parent tenure {}",
+ &remote_tenure_tip.parent_consensus_hash
+ );
+ NetError::DBError(DBError::NotFoundError)
+ })?;
+
+ let ih = sortdb.index_handle(&local_sort_tip.sortition_id);
+ let ancestor_local_tenure_sn = ih
+ .get_block_snapshot_by_height(local_tenure_sn.block_height)?
+ .ok_or_else(|| {
+ debug!(
+ "No tenure snapshot at burn block height {} off of sortition {} ({})",
+ local_tenure_sn.block_height,
+ &local_tenure_sn.sortition_id,
+ &local_tenure_sn.consensus_hash
+ );
+ NetError::DBError(DBError::NotFoundError)
+ })?;
+
+ if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id {
+ // .consensus_hash is not on the canonical fork
+ warn!("Unconfirmed tenure consensus hash is not canonical";
+ "peer" => %self.naddr,
+ "consensus_hash" => %remote_tenure_tip.consensus_hash);
+ return Err(DBError::NotFoundError.into());
+ }
+ let ancestor_parent_local_tenure_sn = ih
+ .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)?
+ .ok_or_else(|| {
+ debug!(
+ "No parent tenure snapshot at burn block height {} off of sortition {} ({})",
+ local_tenure_sn.block_height,
+ &local_tenure_sn.sortition_id,
+ &local_tenure_sn.consensus_hash
+ );
+ NetError::DBError(DBError::NotFoundError.into())
+ })?;
+
+ if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id {
+ // .parent_consensus_hash is not on the canonical fork
+ warn!("Parent unconfirmed tenure consensus hash is not canonical";
+ "peer" => %self.naddr,
+ "consensus_hash" => %remote_tenure_tip.parent_consensus_hash);
+ return Err(DBError::NotFoundError.into());
+ }
+
+ // parent tenure sortition must precede the ongoing tenure sortition
+ if local_tenure_sn.block_height <= parent_local_tenure_sn.block_height {
+ warn!("Parent tenure snapshot is not an ancestor of the current tenure snapshot";
+ "peer" => %self.naddr,
+ "consensus_hash" => %remote_tenure_tip.consensus_hash,
+ "parent_consensus_hash" => %remote_tenure_tip.parent_consensus_hash);
+ return Err(NetError::InvalidMessage);
+ }
+
+ // parent tenure start block ID must be the winning block hash for the ongoing tenure's
+ // snapshot
+ if local_tenure_sn.winning_stacks_block_hash.0
+ != remote_tenure_tip.parent_tenure_start_block_id.0
+ {
+ debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr;
+ "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id,
+ "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash);
+ return Err(NetError::StaleView);
+ }
+
+ if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() {
+ // we've synchronized this tenure before, so don't get anymore blocks before it.
+ let highest_processed_block = chainstate
+ .nakamoto_blocks_db()
+ .get_nakamoto_block(highest_processed_block_id)?
+ .ok_or_else(|| {
+ debug!("No such Nakamoto block {}", &highest_processed_block_id);
+ NetError::DBError(DBError::NotFoundError)
+ })?
+ .0;
+
+ let highest_processed_block_height = highest_processed_block.header.chain_length;
+ self.highest_processed_block_height = Some(highest_processed_block_height);
+
+ if &remote_tenure_tip.tip_block_id == highest_processed_block_id
+ || highest_processed_block_height > remote_tenure_tip.tip_height
+ {
+ // nothing to do -- we're at or ahead of the remote peer, so finish up.
+ // If we don't have the tenure-start block for the confirmed tenure that the remote
+ // peer claims to have, then the remote peer has sent us invalid data and we should
+ // treat it as such.
+ let unconfirmed_tenure_start_block = chainstate
+ .nakamoto_blocks_db()
+ .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)?
+ .ok_or(NetError::InvalidMessage)?
+ .0;
+ self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block);
+ self.state = NakamotoUnconfirmedDownloadState::Done;
+ }
+ }
+
+ if self.state == NakamotoUnconfirmedDownloadState::Done {
+ // only need to remember the tenure tip
+ self.tenure_tip = Some(remote_tenure_tip);
+ return Ok(());
+ }
+
+ // we're not finished
+ let tenure_rc = sortdb
+ .pox_constants
+ .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height)
+ .expect("FATAL: sortition from before system start");
+ let parent_tenure_rc = sortdb
+ .pox_constants
+ .block_height_to_reward_cycle(
+ sortdb.first_block_height,
+ parent_local_tenure_sn.block_height,
+ )
+ .expect("FATAL: sortition from before system start");
+
+ // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions
+ let Some(Some(confirmed_reward_set)) = current_reward_sets
+ .get(&parent_tenure_rc)
+ .map(|cycle_info| cycle_info.reward_set())
+ else {
+ warn!(
+ "No signer public keys for confirmed tenure {} (rc {})",
+ &parent_local_tenure_sn.consensus_hash, parent_tenure_rc
+ );
+ return Err(NetError::InvalidState);
+ };
+
+ let Some(Some(unconfirmed_reward_set)) = current_reward_sets
+ .get(&tenure_rc)
+ .map(|cycle_info| cycle_info.reward_set())
+ else {
+ warn!(
+ "No signer public keys for unconfirmed tenure {} (rc {})",
+ &local_tenure_sn.consensus_hash, tenure_rc
+ );
+ return Err(NetError::InvalidState);
+ };
+
+ if chainstate
+ .nakamoto_blocks_db()
+ .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())?
+ {
+ // proceed to get unconfirmed blocks. We already have the tenure-start block.
+ let unconfirmed_tenure_start_block = chainstate
+ .nakamoto_blocks_db()
+ .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)?
+ .ok_or_else(|| {
+ debug!(
+ "No such tenure-start Nakamoto block {}",
+ &remote_tenure_tip.tenure_start_block_id
+ );
+ NetError::DBError(DBError::NotFoundError)
+ })?
+ .0;
+ self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block);
+ self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(
+ remote_tenure_tip.tip_block_id.clone(),
+ );
+ } else {
+ // get the tenure-start block first
+ self.state = NakamotoUnconfirmedDownloadState::GetTenureStartBlock(
+ remote_tenure_tip.tenure_start_block_id.clone(),
+ );
+ }
+
+ test_debug!(
+ "Will validate unconfirmed blocks with reward sets in ({},{})",
+ parent_tenure_rc,
+ tenure_rc
+ );
+ self.confirmed_signer_keys = Some(confirmed_reward_set.clone());
+ self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone());
+ self.tenure_tip = Some(remote_tenure_tip);
+
+ Ok(())
+ }
+
+ /// Validate and accept the unconfirmed tenure-start block. If accepted, then advance the state.
+ /// Returns Ok(()) if the unconfirmed tenure start block was valid
+ /// Returns Err(..) if it was not valid, or if this function was called out of sequence.
+ pub fn try_accept_unconfirmed_tenure_start_block(
+ &mut self,
+ unconfirmed_tenure_start_block: NakamotoBlock,
+ ) -> Result<(), NetError> {
+ let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(tenure_start_block_id) =
+ &self.state
+ else {
+ warn!("Invalid state for this method";
+ "state" => %self.state);
+ return Err(NetError::InvalidState);
+ };
+ let Some(tenure_tip) = self.tenure_tip.as_ref() else {
+ warn!("tenure_tip is not set");
+ return Err(NetError::InvalidState);
+ };
+
+ let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else {
+ warn!("unconfirmed_signer_keys is not set");
+ return Err(NetError::InvalidState);
+ };
+
+ // stacker signature has to match the current reward set
+ if let Err(e) = unconfirmed_tenure_start_block
+ .header
+ .verify_signer_signatures(unconfirmed_signer_keys)
+ {
+ warn!("Invalid tenure-start block: bad signer signature";
+ "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash,
+ "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(),
+ "state" => %self.state,
+ "error" => %e);
+ return Err(NetError::InvalidMessage);
+ }
+
+ // block has to match the expected hash
+ if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() {
+ warn!("Invalid tenure-start block";
+ "tenure_id_start_block" => %tenure_start_block_id,
+ "unconfirmed_tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash,
+ "unconfirmed_tenure_start_block ID" => %unconfirmed_tenure_start_block.header.block_id(),
+ "state" => %self.state);
+ return Err(NetError::InvalidMessage);
+ }
+
+ // furthermore, the block has to match the expected tenure ID
+ if unconfirmed_tenure_start_block.header.consensus_hash != tenure_tip.consensus_hash {
+ warn!("Invalid tenure-start block or tenure-tip: consensus hash mismatch";
+ "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash,
+ "tenure_tip.consensus_hash" => %tenure_tip.consensus_hash);
+ return Err(NetError::InvalidMessage);
+ }
+
+ self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block);
+ self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(
+ tenure_tip.tip_block_id.clone(),
+ );
+ Ok(())
+ }
+
+ /// Add downloaded unconfirmed tenure blocks.
+ /// If we have collected all tenure blocks, then return them.
+ /// Returns Ok(Some(list-of-blocks)) on success, in which case, `list-of-blocks` is the
+ /// height-ordered sequence of blocks in this tenure, and includes only the blocks that come
+ /// after the highest-processed block (if set).
+ /// Returns Ok(None) if there are still blocks to fetch, in which case, the caller should call
+ /// `send_next_download_request()`
+ /// Returns Err(..) on invalid state or invalid block.
+ pub fn try_accept_unconfirmed_tenure_blocks(
+ &mut self,
+ mut tenure_blocks: Vec,
+ ) -> Result>, NetError> {
+ let NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(last_block_id) =
+ &self.state
+ else {
+ return Err(NetError::InvalidState);
+ };
+
+ let Some(tenure_tip) = self.tenure_tip.as_ref() else {
+ warn!("tenure_tip is not set");
+ return Err(NetError::InvalidState);
+ };
+
+ let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else {
+ warn!("unconfirmed_signer_keys is not set");
+ return Err(NetError::InvalidState);
+ };
+
+ if tenure_blocks.is_empty() {
+ // nothing to do
+ debug!("No tenure blocks obtained");
+ return Ok(None);
+ }
+
+ // blocks must be contiguous and in order from highest to lowest.
+ // If there's a tenure-start block, it must be last.
+ let mut expected_block_id = last_block_id;
+ let mut finished_download = false;
+ let mut last_block_index = None;
+ for (cnt, block) in tenure_blocks.iter().enumerate() {
+ if &block.header.block_id() != expected_block_id {
+ warn!("Unexpected Nakamoto block -- not part of tenure";
+ "expected_block_id" => %expected_block_id,
+ "block_id" => %block.header.block_id());
+ return Err(NetError::InvalidMessage);
+ }
+ if let Err(e) = block
+ .header
+ .verify_signer_signatures(unconfirmed_signer_keys)
+ {
+ warn!("Invalid block: bad signer signature";
+ "tenure_id" => %tenure_tip.consensus_hash,
+ "block.header.block_id" => %block.header.block_id(),
+ "state" => %self.state,
+ "error" => %e);
+ return Err(NetError::InvalidMessage);
+ }
+
+ // we may or may not need the tenure-start block for the unconfirmed tenure. But if we
+ // do, make sure it's valid, and it's the last block we receive.
+ let Ok(is_tenure_start) = block.is_wellformed_tenure_start_block() else {
+ warn!("Invalid tenure-start block";
+ "tenure_id" => %tenure_tip.consensus_hash,
+ "block.header.block_id" => %block.header.block_id(),
+ "state" => %self.state);
+ return Err(NetError::InvalidMessage);
+ };
+ if is_tenure_start {
+ // this is the tenure-start block, so make sure it matches our /v3/tenure/info
+ if block.header.block_id() != tenure_tip.tenure_start_block_id {
+ warn!("Unexpected tenure-start block";
+ "tenure_id" => %tenure_tip.consensus_hash,
+ "block.header.block_id" => %block.header.block_id(),
+ "tenure_tip.tenure_start_block_id" => %tenure_tip.tenure_start_block_id);
+ return Err(NetError::InvalidMessage);
+ }
+
+ if cnt.saturating_add(1) != tenure_blocks.len() {
+ warn!("Invalid tenure stream -- got tenure-start before end of tenure";
+ "tenure_id" => %tenure_tip.consensus_hash,
+ "block.header.block_id" => %block.header.block_id(),
+ "cnt" => cnt,
+ "len" => tenure_blocks.len(),
+ "state" => %self.state);
+ return Err(NetError::InvalidMessage);
+ }
+
+ finished_download = true;
+ last_block_index = Some(cnt);
+ break;
+ }
+
+ test_debug!("Got unconfirmed tenure block {}", &block.header.block_id());
+
+ // NOTE: this field can get updated by the downloader while this state-machine is in
+ // this state.
+ if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() {
+ if expected_block_id == highest_processed_block_id {
+ // got all the blocks we asked for
+ debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id);
+ finished_download = true;
+ last_block_index = Some(cnt);
+ break;
+ }
+ }
+
+ // NOTE: this field can get updated by the downloader while this state-machine is in
+ // this state.
+ if let Some(highest_processed_block_height) =
+ self.highest_processed_block_height.as_ref()
+ {
+ if &block.header.chain_length <= highest_processed_block_height {
+ // no need to continue this download
+ debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height);
+ finished_download = true;
+ last_block_index = Some(cnt);
+ break;
+ }
+ }
+
+ expected_block_id = &block.header.parent_block_id;
+ last_block_index = Some(cnt);
+ }
+
+ // blocks after the last_block_index were not processed, so should be dropped
+ if let Some(last_block_index) = last_block_index {
+ tenure_blocks.truncate(last_block_index + 1);
+ }
+
+ if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() {
+ blocks.append(&mut tenure_blocks);
+ } else {
+ self.unconfirmed_tenure_blocks = Some(tenure_blocks);
+ }
+
+ if finished_download {
+ // we have all of the unconfirmed tenure blocks that were requested.
+ // only return those newer than the highest block.
+ self.state = NakamotoUnconfirmedDownloadState::Done;
+ let highest_processed_block_height =
+ *self.highest_processed_block_height.as_ref().unwrap_or(&0);
+
+ test_debug!("Finished receiving unconfirmed tenure");
+ return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| {
+ blocks
+ .into_iter()
+ .filter(|block| block.header.chain_length > highest_processed_block_height)
+ .rev()
+ .collect()
+ }));
+ }
+
+ let Some(blocks) = self.unconfirmed_tenure_blocks.as_ref() else {
+ // unreachable but be defensive
+ warn!("Invalid state: no blocks (infallible -- got empty vec)");
+ return Err(NetError::InvalidState);
+ };
+
+ // still have more to get
+ let Some(earliest_block) = blocks.last() else {
+ // unreachable but be defensive
+ warn!("Invalid state: no blocks (infallible -- got empty vec)");
+ return Err(NetError::InvalidState);
+ };
+ let next_block_id = earliest_block.header.parent_block_id.clone();
+
+ test_debug!(
+ "Will resume fetching unconfirmed tenure blocks starting at {}",
+ &next_block_id
+ );
+ self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id);
+ Ok(None)
+ }
+
+ /// Once this machine runs to completion, examine its state to see if we still need to fetch
+ /// the highest complete tenure. We may not need to, especially if we're just polling for new
+ /// unconfirmed blocks.
+ ///
+ /// Return Ok(true) if we need it still
+ /// Return Ok(false) if we already have it
+ /// Return Err(..) if we encounter a DB error or if this function was called out of sequence.
+ pub fn need_highest_complete_tenure(
+ &self,
+ chainstate: &StacksChainState,
+ ) -> Result {
+ if self.state != NakamotoUnconfirmedDownloadState::Done {
+ return Err(NetError::InvalidState);
+ }
+ let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref()
+ else {
+ return Err(NetError::InvalidState);
+ };
+
+ // if we've processed the unconfirmed tenure-start block already, then we've necessarily
+ // downloaded and processed the highest-complete tenure already.
+ Ok(!NakamotoChainState::has_block_header(
+ chainstate.db(),
+ &unconfirmed_tenure_start_block.header.block_id(),
+ false,
+ )?)
+ }
+
+ /// Determine if we can produce a highest-complete tenure request.
+ /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure
+ pub fn can_make_highest_complete_tenure_downloader(
+ &self,
+ sortdb: &SortitionDB,
+ ) -> Result {
+ let Some(tenure_tip) = &self.tenure_tip else {
+ return Ok(false);
+ };
+
+ let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus(
+ sortdb.conn(),
+ &tenure_tip.parent_consensus_hash,
+ )?
+ else {
+ return Ok(false);
+ };
+
+ let Some(tip_sn) =
+ SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)?
+ else {
+ return Ok(false);
+ };
+
+ let Some(parent_tenure) =
+ SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)?
+ else {
+ return Ok(false);
+ };
+
+ let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)?
+ else {
+ return Ok(false);
+ };
+
+ if parent_tenure.epoch_id < StacksEpochId::Epoch30
+ || tip_tenure.epoch_id < StacksEpochId::Epoch30
+ {
+ debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block";
+ "start_tenure" => %tenure_tip.parent_consensus_hash,
+ "end_tenure" => %tenure_tip.consensus_hash,
+ "start_tenure_epoch" => %parent_tenure.epoch_id,
+ "end_tenure_epoch" => %tip_tenure.epoch_id
+ );
+ return Ok(false);
+ }
+
+ Ok(true)
+ }
+
+ /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the
+ /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get
+ /// its tenure-start block.
+ ///
+ /// Returns Ok(downloader) on success
+ /// Returns Err(..) if we call this function out of sequence.
+ pub fn make_highest_complete_tenure_downloader(
+ &self,
+ ) -> Result {
+ if self.state != NakamotoUnconfirmedDownloadState::Done {
+ return Err(NetError::InvalidState);
+ }
+ let Some(tenure_tip) = &self.tenure_tip else {
+ return Err(NetError::InvalidState);
+ };
+ let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else {
+ return Err(NetError::InvalidState);
+ };
+ let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else {
+ return Err(NetError::InvalidState);
+ };
+
+ test_debug!(
+ "Create downloader for highest complete tenure {} known by {}",
+ &tenure_tip.parent_consensus_hash,
+ &self.naddr,
+ );
+ let ntd = NakamotoTenureDownloader::new(
+ tenure_tip.parent_consensus_hash.clone(),
+ tenure_tip.parent_tenure_start_block_id.clone(),
+ tenure_tip.tenure_start_block_id.clone(),
+ self.naddr.clone(),
+ confirmed_signer_keys.clone(),
+ unconfirmed_signer_keys.clone(),
+ );
+
+ Ok(ntd)
+ }
+
+ /// Produce the next HTTP request that, when successfully executed, will advance this state
+ /// machine.
+ ///
+ /// Returns Some(request) if a request must be sent.
+ /// Returns None if we're done
+ pub fn make_next_download_request(&self, peerhost: PeerHost) -> Option {
+ match &self.state {
+ NakamotoUnconfirmedDownloadState::GetTenureInfo => {
+ // need to get the tenure tip
+ return Some(StacksHttpRequest::new_get_nakamoto_tenure_info(peerhost));
+ }
+ NakamotoUnconfirmedDownloadState::GetTenureStartBlock(block_id) => {
+ return Some(StacksHttpRequest::new_get_nakamoto_block(
+ peerhost,
+ block_id.clone(),
+ ));
+ }
+ NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(tip_block_id) => {
+ return Some(StacksHttpRequest::new_get_nakamoto_tenure(
+ peerhost,
+ tip_block_id.clone(),
+ self.highest_processed_block_id.clone(),
+ ));
+ }
+ NakamotoUnconfirmedDownloadState::Done => {
+ // got all unconfirmed blocks! Next step is to turn this downloader into a confirmed
+ // tenure downloader using the earliest unconfirmed tenure block.
+ return None;
+ }
+ }
+ }
+
+ /// Begin the next download request for this state machine.
+ /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The
+ /// caller should try this again until it gets one of the other possible return values. It's
+ /// up to the caller to determine when it's appropriate to convert this state machine into a
+ /// `NakamotoTenureDownloader`.
+ /// Returns Err(..) if the neighbor is dead or broken.
+ pub fn send_next_download_request(
+ &self,
+ network: &mut PeerNetwork,
+ neighbor_rpc: &mut NeighborRPC,
+ ) -> Result<(), NetError> {
+ if neighbor_rpc.has_inflight(&self.naddr) {
+ test_debug!("Peer {} has an inflight request", &self.naddr);
+ return Ok(());
+ }
+ if neighbor_rpc.is_dead_or_broken(network, &self.naddr) {
+ return Err(NetError::PeerNotConnected);
+ }
+
+ let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else {
+ // no conversation open to this neighbor
+ neighbor_rpc.add_dead(network, &self.naddr);
+ return Err(NetError::PeerNotConnected);
+ };
+
+ let Some(request) = self.make_next_download_request(peerhost) else {
+ // treat this downloader as still in-flight since the overall state machine will need
+ // to keep it around long enough to convert it into a tenure downloader for the highest
+ // complete tenure.
+ return Ok(());
+ };
+
+ neighbor_rpc.send_request(network, self.naddr.clone(), request)?;
+ Ok(())
+ }
+
+ /// Handle a received StacksHttpResponse and advance this machine's state
+ /// If we get the full tenure, return it.
+ ///
+ /// Returns Ok(Some(blocks)) if we finished downloading the unconfirmed tenure
+ /// Returns Ok(None) if we're still working, in which case the caller should call
+ /// `send_next_download_request()`
+ /// Returns Err(..) on unrecoverable failure to advance state
+ pub fn handle_next_download_response(
+ &mut self,
+ response: StacksHttpResponse,
+ sortdb: &SortitionDB,
+ local_sort_tip: &BlockSnapshot,
+ chainstate: &StacksChainState,
+ current_reward_sets: &BTreeMap,
+ ) -> Result>, NetError> {
+ match &self.state {
+ NakamotoUnconfirmedDownloadState::GetTenureInfo => {
+ test_debug!("Got tenure-info response");
+ let remote_tenure_info = response.decode_nakamoto_tenure_info()?;
+ test_debug!("Got tenure-info response: {:?}", &remote_tenure_info);
+ self.try_accept_tenure_info(
+ sortdb,
+ local_sort_tip,
+ chainstate,
+ remote_tenure_info,
+ current_reward_sets,
+ )?;
+ Ok(None)
+ }
+ NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => {
+ test_debug!("Got tenure start-block response");
+ let block = response.decode_nakamoto_block()?;
+ self.try_accept_unconfirmed_tenure_start_block(block)?;
+ Ok(None)
+ }
+ NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => {
+ test_debug!("Got unconfirmed tenure blocks response");
+ let blocks = response.decode_nakamoto_tenure()?;
+ let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?;
+ test_debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some());
+ Ok(accepted_opt)
+ }
+ NakamotoUnconfirmedDownloadState::Done => {
+ return Err(NetError::InvalidState);
+ }
+ }
+ }
+
+ /// Is this machine finished?
+ pub fn is_done(&self) -> bool {
+ self.state == NakamotoUnconfirmedDownloadState::Done
+ }
+}