diff --git a/chain/arweave/src/chain.rs b/chain/arweave/src/chain.rs index 0a79a0279f3..8d40408a463 100644 --- a/chain/arweave/src/chain.rs +++ b/chain/arweave/src/chain.rs @@ -6,6 +6,7 @@ use graph::blockchain::{ EmptyNodeCapabilities, NoopDecoderHook, NoopRuntimeAdapter, }; use graph::cheap_clone::CheapClone; +use graph::components::adapter::ChainId; use graph::components::store::DeploymentCursorTracker; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::env::EnvVars; @@ -41,7 +42,7 @@ use graph::blockchain::block_stream::{ pub struct Chain { logger_factory: LoggerFactory, - name: String, + name: ChainId, client: Arc>, chain_store: Arc, metrics_registry: Arc, @@ -53,8 +54,9 @@ impl std::fmt::Debug for Chain { } } +#[async_trait] impl BlockchainBuilder for BasicBlockchainBuilder { - fn build(self, _config: &Arc) -> Chain { + async fn build(self, _config: &Arc) -> Chain { Chain { logger_factory: self.logger_factory, name: self.name, @@ -157,21 +159,22 @@ impl Blockchain for Chain { number: BlockNumber, ) -> Result { self.client - .firehose_endpoint()? + .firehose_endpoint() + .await? .block_ptr_for_number::(logger, number) .await .map_err(Into::into) } - fn runtime(&self) -> (Arc>, Self::DecoderHook) { - (Arc::new(NoopRuntimeAdapter::default()), NoopDecoderHook) + fn runtime(&self) -> anyhow::Result<(Arc>, Self::DecoderHook)> { + Ok((Arc::new(NoopRuntimeAdapter::default()), NoopDecoderHook)) } fn chain_client(&self) -> Arc> { self.client.clone() } - fn block_ingestor(&self) -> anyhow::Result> { + async fn block_ingestor(&self) -> anyhow::Result> { let ingestor = FirehoseBlockIngestor::::new( self.chain_store.cheap_clone(), self.chain_client(), diff --git a/chain/arweave/src/trigger.rs b/chain/arweave/src/trigger.rs index 73013715af6..186bb857009 100644 --- a/chain/arweave/src/trigger.rs +++ b/chain/arweave/src/trigger.rs @@ -17,6 +17,7 @@ use crate::codec; // Logging the block is too verbose, so this strips the block from the trigger for Debug. impl std::fmt::Debug for ArweaveTrigger { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + #[allow(unused)] #[derive(Debug)] pub enum MappingTriggerWithoutBlock { Block, diff --git a/chain/cosmos/src/chain.rs b/chain/cosmos/src/chain.rs index 383c40d4478..955aa7efc3c 100644 --- a/chain/cosmos/src/chain.rs +++ b/chain/cosmos/src/chain.rs @@ -1,5 +1,6 @@ use graph::blockchain::firehose_block_ingestor::FirehoseBlockIngestor; use graph::blockchain::{BlockIngestor, NoopDecoderHook}; +use graph::components::adapter::ChainId; use graph::env::EnvVars; use graph::prelude::MetricsRegistry; use graph::substreams::Clock; @@ -36,7 +37,7 @@ use crate::{codec, TriggerFilter}; pub struct Chain { logger_factory: LoggerFactory, - name: String, + name: ChainId, client: Arc>, chain_store: Arc, metrics_registry: Arc, @@ -48,8 +49,9 @@ impl std::fmt::Debug for Chain { } } +#[async_trait] impl BlockchainBuilder for BasicBlockchainBuilder { - fn build(self, _config: &Arc) -> Chain { + async fn build(self, _config: &Arc) -> Chain { Chain { logger_factory: self.logger_factory, name: self.name, @@ -150,7 +152,7 @@ impl Blockchain for Chain { logger: &Logger, number: BlockNumber, ) -> Result { - let firehose_endpoint = self.client.firehose_endpoint()?; + let firehose_endpoint = self.client.firehose_endpoint().await?; firehose_endpoint .block_ptr_for_number::(logger, number) @@ -158,15 +160,15 @@ impl Blockchain for Chain { .map_err(Into::into) } - fn runtime(&self) -> (Arc>, Self::DecoderHook) { - (Arc::new(NoopRuntimeAdapter::default()), NoopDecoderHook) + fn runtime(&self) -> anyhow::Result<(Arc>, Self::DecoderHook)> { + Ok((Arc::new(NoopRuntimeAdapter::default()), NoopDecoderHook)) } fn chain_client(&self) -> Arc> { self.client.clone() } - fn block_ingestor(&self) -> anyhow::Result> { + async fn block_ingestor(&self) -> anyhow::Result> { let ingestor = FirehoseBlockIngestor::::new( self.chain_store.cheap_clone(), self.chain_client(), diff --git a/chain/cosmos/src/trigger.rs b/chain/cosmos/src/trigger.rs index 448af7bb238..9700a75bf76 100644 --- a/chain/cosmos/src/trigger.rs +++ b/chain/cosmos/src/trigger.rs @@ -13,6 +13,7 @@ use crate::data_source::EventOrigin; // Logging the block is too verbose, so this strips the block from the trigger for Debug. impl std::fmt::Debug for CosmosTrigger { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + #[allow(unused)] #[derive(Debug)] pub enum MappingTriggerWithoutBlock<'e> { Block, diff --git a/chain/ethereum/examples/firehose.rs b/chain/ethereum/examples/firehose.rs index 306f7b26bc3..b49cb71ac31 100644 --- a/chain/ethereum/examples/firehose.rs +++ b/chain/ethereum/examples/firehose.rs @@ -2,10 +2,9 @@ use anyhow::Error; use graph::{ endpoint::EndpointMetrics, env::env_var, - firehose::SubgraphLimit, + firehose::{self, FirehoseEndpoint, NoopGenesisDecoder, SubgraphLimit}, log::logger, prelude::{prost, tokio, tonic, MetricsRegistry}, - {firehose, firehose::FirehoseEndpoint}, }; use graph_chain_ethereum::codec; use hex::ToHex; @@ -39,6 +38,7 @@ async fn main() -> Result<(), Error> { false, SubgraphLimit::Unlimited, metrics, + NoopGenesisDecoder::boxed(), )); loop { diff --git a/chain/ethereum/src/chain.rs b/chain/ethereum/src/chain.rs index be7fb2b431e..9456267fa56 100644 --- a/chain/ethereum/src/chain.rs +++ b/chain/ethereum/src/chain.rs @@ -5,6 +5,7 @@ use graph::blockchain::firehose_block_ingestor::{FirehoseBlockIngestor, Transfor use graph::blockchain::{ BlockIngestor, BlockTime, BlockchainKind, ChainIdentifier, TriggersAdapterSelector, }; +use graph::components::adapter::ChainId; use graph::components::store::DeploymentCursorTracker; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::firehose::{FirehoseEndpoint, ForkStep}; @@ -146,7 +147,7 @@ impl BlockStreamBuilder for EthereumStreamBuilder { let chain_store = chain.chain_store(); let chain_head_update_stream = chain .chain_head_update_listener - .subscribe(chain.name.clone(), logger.clone()); + .subscribe(chain.name.to_string(), logger.clone()); // Special case: Detect Celo and set the threshold to 0, so that eth_getLogs is always used. // This is ok because Celo blocks are always final. And we _need_ to do this because @@ -156,6 +157,7 @@ impl BlockStreamBuilder for EthereumStreamBuilder { ChainClient::Rpc(adapter) => { adapter .cheapest() + .await .ok_or(anyhow!("unable to get eth adapter for chan_id call"))? .chain_id() .await? @@ -199,7 +201,7 @@ impl BlockRefetcher for EthereumBlockRefetcher { logger: &Logger, cursor: FirehoseCursor, ) -> Result { - let endpoint = chain.chain_client().firehose_endpoint()?; + let endpoint = chain.chain_client().firehose_endpoint().await?; let block = endpoint.get_block::(cursor, logger).await?; let ethereum_block: EthereumBlockWithCalls = (&block).try_into()?; Ok(BlockFinality::NonFinal(ethereum_block)) @@ -286,9 +288,8 @@ impl RuntimeAdapterBuilder for EthereumRuntimeAdapterBuilder { pub struct Chain { logger_factory: LoggerFactory, - name: String, + pub name: ChainId, node_id: NodeId, - chain_identifier: Arc, registry: Arc, client: Arc>, chain_store: Arc, @@ -314,7 +315,7 @@ impl Chain { /// Creates a new Ethereum [`Chain`]. pub fn new( logger_factory: LoggerFactory, - name: String, + name: ChainId, node_id: NodeId, registry: Arc, chain_store: Arc, @@ -330,12 +331,10 @@ impl Chain { polling_ingestor_interval: Duration, is_ingestible: bool, ) -> Self { - let chain_identifier = Arc::new(chain_store.chain_identifier().clone()); Chain { logger_factory, name, node_id, - chain_identifier, registry, client, chain_store, @@ -360,12 +359,12 @@ impl Chain { // TODO: This is only used to build the block stream which could prolly // be moved to the chain itself and return a block stream future that the // caller can spawn. - pub fn cheapest_adapter(&self) -> Arc { + pub async fn cheapest_adapter(&self) -> Arc { let adapters = match self.client.as_ref() { ChainClient::Firehose(_) => panic!("no adapter with firehose"), ChainClient::Rpc(adapter) => adapter, }; - adapters.cheapest().unwrap() + adapters.cheapest().await.unwrap() } } @@ -454,13 +453,15 @@ impl Blockchain for Chain { ) -> Result { match self.client.as_ref() { ChainClient::Firehose(endpoints) => endpoints - .endpoint()? + .endpoint() + .await? .block_ptr_for_number::(logger, number) .await .map_err(IngestorError::Unknown), ChainClient::Rpc(adapters) => { let adapter = adapters .cheapest() + .await .with_context(|| format!("no adapter for chain {}", self.name))? .clone(); @@ -484,15 +485,16 @@ impl Blockchain for Chain { self.block_refetcher.get_block(self, logger, cursor).await } - fn runtime(&self) -> (Arc>, Self::DecoderHook) { + fn runtime(&self) -> anyhow::Result<(Arc>, Self::DecoderHook)> { let call_cache = Arc::new(BufferedCallCache::new(self.call_cache.cheap_clone())); + let chain_ident = self.chain_store.chain_identifier()?; let builder = self.runtime_adapter_builder.build( self.eth_adapters.cheap_clone(), call_cache.cheap_clone(), - self.chain_identifier.cheap_clone(), + Arc::new(chain_ident.clone()), ); - let eth_call_gas = eth_call_gas(&self.chain_identifier); + let eth_call_gas = eth_call_gas(&chain_ident); let decoder_hook = crate::data_source::DecoderHook::new( self.eth_adapters.cheap_clone(), @@ -500,14 +502,14 @@ impl Blockchain for Chain { eth_call_gas, ); - (builder, decoder_hook) + Ok((builder, decoder_hook)) } fn chain_client(&self) -> Arc> { self.client.clone() } - fn block_ingestor(&self) -> anyhow::Result> { + async fn block_ingestor(&self) -> anyhow::Result> { let ingestor: Box = match self.chain_client().as_ref() { ChainClient::Firehose(_) => { let ingestor = FirehoseBlockIngestor::::new( @@ -521,10 +523,7 @@ impl Blockchain for Chain { Box::new(ingestor) } - ChainClient::Rpc(rpc) => { - let eth_adapter = rpc - .cheapest() - .ok_or_else(|| anyhow!("unable to get adapter for ethereum block ingestor"))?; + ChainClient::Rpc(_) => { let logger = self .logger_factory .component_logger( @@ -535,7 +534,7 @@ impl Blockchain for Chain { }), }), ) - .new(o!("provider" => eth_adapter.provider().to_string())); + .new(o!()); if !self.is_ingestible { bail!( @@ -550,7 +549,7 @@ impl Blockchain for Chain { Box::new(PollingBlockIngestor::new( logger, graph::env::ENV_VARS.reorg_threshold, - eth_adapter, + self.chain_client(), self.chain_store().cheap_clone(), self.polling_ingestor_interval, self.name.clone(), @@ -675,7 +674,10 @@ impl TriggersAdapterTrait for TriggersAdapter { filter: &TriggerFilter, ) -> Result<(Vec>, BlockNumber), Error> { blocks_with_triggers( - self.chain_client.rpc()?.cheapest_with(&self.capabilities)?, + self.chain_client + .rpc()? + .cheapest_with(&self.capabilities) + .await?, self.logger.clone(), self.chain_store.clone(), self.ethrpc_metrics.clone(), @@ -705,7 +707,11 @@ impl TriggersAdapterTrait for TriggersAdapter { match &block { BlockFinality::Final(_) => { - let adapter = self.chain_client.rpc()?.cheapest_with(&self.capabilities)?; + let adapter = self + .chain_client + .rpc()? + .cheapest_with(&self.capabilities) + .await?; let block_number = block.number() as BlockNumber; let (blocks, _) = blocks_with_triggers( adapter, @@ -738,6 +744,7 @@ impl TriggersAdapterTrait for TriggersAdapter { self.chain_client .rpc()? .cheapest() + .await .ok_or(anyhow!("unable to get adapter for is_on_main_chain"))? .is_on_main_chain(&self.logger, ptr.clone()) .await @@ -775,7 +782,8 @@ impl TriggersAdapterTrait for TriggersAdapter { }), ChainClient::Rpc(adapters) => { let blocks = adapters - .cheapest_with(&self.capabilities)? + .cheapest_with(&self.capabilities) + .await? .load_blocks( self.logger.cheap_clone(), self.chain_store.cheap_clone(), diff --git a/chain/ethereum/src/ethereum_adapter.rs b/chain/ethereum/src/ethereum_adapter.rs index a9c84e8c802..dcd1b2ac82a 100644 --- a/chain/ethereum/src/ethereum_adapter.rs +++ b/chain/ethereum/src/ethereum_adapter.rs @@ -1905,7 +1905,8 @@ pub(crate) async fn get_calls( } else { client .rpc()? - .cheapest_with(capabilities)? + .cheapest_with(capabilities) + .await? .calls_in_block( &logger, subgraph_metrics.clone(), diff --git a/chain/ethereum/src/ingestor.rs b/chain/ethereum/src/ingestor.rs index 1c1603b59fa..d22e08c4294 100644 --- a/chain/ethereum/src/ingestor.rs +++ b/chain/ethereum/src/ingestor.rs @@ -1,5 +1,11 @@ -use crate::{chain::BlockFinality, EthereumAdapter, EthereumAdapterTrait, ENV_VARS}; -use graph::futures03::compat::Future01CompatExt; +use crate::{chain::BlockFinality, ENV_VARS}; +use crate::{EthereumAdapter, EthereumAdapterTrait as _}; +use graph::blockchain::client::ChainClient; +use graph::blockchain::BlockchainKind; +use graph::components::adapter::ChainId; +use graph::futures03::compat::Future01CompatExt as _; +use graph::slog::o; +use graph::util::backoff::ExponentialBackoff; use graph::{ blockchain::{BlockHash, BlockIngestor, BlockPtr, IngestorError}, cheap_clone::CheapClone, @@ -13,25 +19,25 @@ use std::{sync::Arc, time::Duration}; pub struct PollingBlockIngestor { logger: Logger, ancestor_count: i32, - eth_adapter: Arc, + chain_client: Arc>, chain_store: Arc, polling_interval: Duration, - network_name: String, + network_name: ChainId, } impl PollingBlockIngestor { pub fn new( logger: Logger, ancestor_count: i32, - eth_adapter: Arc, + chain_client: Arc>, chain_store: Arc, polling_interval: Duration, - network_name: String, + network_name: ChainId, ) -> Result { Ok(PollingBlockIngestor { logger, ancestor_count, - eth_adapter, + chain_client, chain_store, polling_interval, network_name, @@ -59,8 +65,12 @@ impl PollingBlockIngestor { } } - async fn do_poll(&self) -> Result<(), IngestorError> { - trace!(self.logger, "BlockIngestor::do_poll"); + async fn do_poll( + &self, + logger: &Logger, + eth_adapter: Arc, + ) -> Result<(), IngestorError> { + trace!(&logger, "BlockIngestor::do_poll"); // Get chain head ptr from store let head_block_ptr_opt = self.chain_store.cheap_clone().chain_head_ptr().await?; @@ -68,7 +78,7 @@ impl PollingBlockIngestor { // To check if there is a new block or not, fetch only the block header since that's cheaper // than the full block. This is worthwhile because most of the time there won't be a new // block, as we expect the poll interval to be much shorter than the block time. - let latest_block = self.latest_block().await?; + let latest_block = self.latest_block(logger, ð_adapter).await?; if let Some(head_block) = head_block_ptr_opt.as_ref() { // If latest block matches head block in store, nothing needs to be done @@ -80,7 +90,7 @@ impl PollingBlockIngestor { // An ingestor might wait or move forward, but it never // wavers and goes back. More seriously, this keeps us from // later trying to ingest a block with the same number again - warn!(self.logger, + warn!(&logger, "Provider went backwards - ignoring this latest block"; "current_block_head" => head_block.number, "latest_block_head" => latest_block.number); @@ -92,7 +102,7 @@ impl PollingBlockIngestor { match head_block_ptr_opt { None => { info!( - self.logger, + &logger, "Downloading latest blocks from Ethereum, this may take a few minutes..." ); } @@ -108,7 +118,7 @@ impl PollingBlockIngestor { }; if distance > 0 { info!( - self.logger, + &logger, "Syncing {} blocks from Ethereum", blocks_needed; "current_block_head" => head_number, @@ -125,7 +135,9 @@ impl PollingBlockIngestor { // Might be a no-op if latest block is one that we have seen. // ingest_blocks will return a (potentially incomplete) list of blocks that are // missing. - let mut missing_block_hash = self.ingest_block(&latest_block.hash).await?; + let mut missing_block_hash = self + .ingest_block(&logger, ð_adapter, &latest_block.hash) + .await?; // Repeatedly fetch missing parent blocks, and ingest them. // ingest_blocks will continue to tell us about more missing parent @@ -146,29 +158,27 @@ impl PollingBlockIngestor { // iteration will have at most block number N-1. // - Therefore, the loop will iterate at most ancestor_count times. while let Some(hash) = missing_block_hash { - missing_block_hash = self.ingest_block(&hash).await?; + missing_block_hash = self.ingest_block(&logger, ð_adapter, &hash).await?; } Ok(()) } async fn ingest_block( &self, + logger: &Logger, + eth_adapter: &Arc, block_hash: &BlockHash, ) -> Result, IngestorError> { // TODO: H256::from_slice can panic let block_hash = H256::from_slice(block_hash.as_slice()); // Get the fully populated block - let block = self - .eth_adapter - .block_by_hash(&self.logger, block_hash) + let block = eth_adapter + .block_by_hash(logger, block_hash) .compat() .await? .ok_or(IngestorError::BlockUnavailable(block_hash))?; - let ethereum_block = self - .eth_adapter - .load_full_block(&self.logger, block) - .await?; + let ethereum_block = eth_adapter.load_full_block(&logger, block).await?; // We need something that implements `Block` to store the block; the // store does not care whether the block is final or not @@ -188,31 +198,62 @@ impl PollingBlockIngestor { .await .map(|missing| missing.map(|h256| h256.into())) .map_err(|e| { - error!(self.logger, "failed to update chain head"); + error!(logger, "failed to update chain head"); IngestorError::Unknown(e) }) } - async fn latest_block(&self) -> Result { - self.eth_adapter - .latest_block_header(&self.logger) + async fn latest_block( + &self, + logger: &Logger, + eth_adapter: &Arc, + ) -> Result { + eth_adapter + .latest_block_header(&logger) .compat() .await .map(|block| block.into()) } + + async fn eth_adapter(&self) -> anyhow::Result> { + self.chain_client + .rpc()? + .cheapest() + .await + .ok_or_else(|| graph::anyhow::anyhow!("unable to get eth adapter")) + } } #[async_trait] impl BlockIngestor for PollingBlockIngestor { async fn run(self: Box) { + let mut backoff = + ExponentialBackoff::new(Duration::from_millis(250), Duration::from_secs(30)); + loop { - match self.do_poll().await { - // Some polls will fail due to transient issues + let eth_adapter = match self.eth_adapter().await { + Ok(adapter) => { + backoff.reset(); + adapter + } Err(err) => { error!( - self.logger, - "Trying again after block polling failed: {}", err + &self.logger, + "unable to get ethereum adapter, backing off... error: {}", + err.to_string() ); + backoff.sleep_async().await; + continue; + } + }; + let logger = self + .logger + .new(o!("provider" => eth_adapter.provider().to_string())); + + match self.do_poll(&logger, eth_adapter).await { + // Some polls will fail due to transient issues + Err(err) => { + error!(logger, "Trying again after block polling failed: {}", err); } Ok(()) => (), } @@ -225,7 +266,11 @@ impl BlockIngestor for PollingBlockIngestor { } } - fn network_name(&self) -> String { + fn network_name(&self) -> ChainId { self.network_name.clone() } + + fn kind(&self) -> BlockchainKind { + BlockchainKind::Ethereum + } } diff --git a/chain/ethereum/src/lib.rs b/chain/ethereum/src/lib.rs index 934830ecde5..b83415146ac 100644 --- a/chain/ethereum/src/lib.rs +++ b/chain/ethereum/src/lib.rs @@ -32,7 +32,6 @@ pub use crate::adapter::{ ProviderEthRpcMetrics, SubgraphEthRpcMetrics, TriggerFilter, }; pub use crate::chain::Chain; -pub use crate::network::EthereumNetworks; pub use graph::blockchain::BlockIngestor; #[cfg(test)] diff --git a/chain/ethereum/src/network.rs b/chain/ethereum/src/network.rs index 11b06eddb5a..9d417e6ccfe 100644 --- a/chain/ethereum/src/network.rs +++ b/chain/ethereum/src/network.rs @@ -1,15 +1,14 @@ use anyhow::{anyhow, bail}; -use graph::cheap_clone::CheapClone; +use graph::blockchain::ChainIdentifier; +use graph::components::adapter::{ChainId, NetIdentifiable, ProviderManager, ProviderName}; use graph::endpoint::EndpointMetrics; use graph::firehose::{AvailableCapacity, SubgraphLimit}; use graph::prelude::rand::seq::IteratorRandom; use graph::prelude::rand::{self, Rng}; -use std::cmp::Ordering; -use std::collections::HashMap; use std::sync::Arc; pub use graph::impl_slog_value; -use graph::prelude::Error; +use graph::prelude::{async_trait, Error}; use crate::adapter::EthereumAdapter as _; use crate::capabilities::NodeCapabilities; @@ -29,7 +28,32 @@ pub struct EthereumNetworkAdapter { limit: SubgraphLimit, } +#[async_trait] +impl NetIdentifiable for EthereumNetworkAdapter { + async fn net_identifiers(&self) -> Result { + self.adapter.net_identifiers().await + } + fn provider_name(&self) -> ProviderName { + self.adapter.provider().into() + } +} + impl EthereumNetworkAdapter { + pub fn new( + endpoint_metrics: Arc, + capabilities: NodeCapabilities, + adapter: Arc, + limit: SubgraphLimit, + ) -> Self { + Self { + endpoint_metrics, + capabilities, + adapter, + limit, + } + } + + #[cfg(debug_assertions)] fn is_call_only(&self) -> bool { self.adapter.is_call_only() } @@ -48,64 +72,132 @@ impl EthereumNetworkAdapter { #[derive(Debug, Clone)] pub struct EthereumNetworkAdapters { - pub adapters: Vec, + chain_id: ChainId, + manager: ProviderManager, call_only_adapters: Vec, // Percentage of request that should be used to retest errored adapters. retest_percent: f64, } -impl Default for EthereumNetworkAdapters { - fn default() -> Self { - Self::new(None) - } -} - impl EthereumNetworkAdapters { - pub fn new(retest_percent: Option) -> Self { + pub fn empty_for_testing() -> Self { Self { - adapters: vec![], + chain_id: "".into(), + manager: ProviderManager::default(), call_only_adapters: vec![], - retest_percent: retest_percent.unwrap_or(DEFAULT_ADAPTER_ERROR_RETEST_PERCENT), + retest_percent: DEFAULT_ADAPTER_ERROR_RETEST_PERCENT, } } - pub fn push_adapter(&mut self, adapter: EthereumNetworkAdapter) { - if adapter.is_call_only() { - self.call_only_adapters.push(adapter); - } else { - self.adapters.push(adapter); + #[cfg(debug_assertions)] + pub async fn for_testing( + mut adapters: Vec, + call_only: Vec, + ) -> Self { + use std::cmp::Ordering; + + use graph::slog::{o, Discard, Logger}; + + use graph::components::adapter::MockIdentValidator; + let chain_id: ChainId = "testing".into(); + adapters.sort_by(|a, b| { + a.capabilities + .partial_cmp(&b.capabilities) + .unwrap_or(Ordering::Equal) + }); + + let provider = ProviderManager::new( + Logger::root(Discard, o!()), + vec![(chain_id.clone(), adapters)].into_iter(), + Arc::new(MockIdentValidator), + ); + provider.mark_all_valid().await; + + Self::new(chain_id, provider, call_only, None) + } + + pub fn new( + chain_id: ChainId, + manager: ProviderManager, + call_only_adapters: Vec, + retest_percent: Option, + ) -> Self { + #[cfg(debug_assertions)] + call_only_adapters.iter().for_each(|a| { + a.is_call_only(); + }); + + Self { + chain_id, + manager, + call_only_adapters, + retest_percent: retest_percent.unwrap_or(DEFAULT_ADAPTER_ERROR_RETEST_PERCENT), } } - pub fn all_cheapest_with( - &self, + + fn available_with_capabilities<'a>( + input: Vec<&'a EthereumNetworkAdapter>, required_capabilities: &NodeCapabilities, - ) -> impl Iterator + '_ { - let cheapest_sufficient_capability = self - .adapters + ) -> impl Iterator + 'a { + let cheapest_sufficient_capability = input .iter() .find(|adapter| &adapter.capabilities >= required_capabilities) .map(|adapter| &adapter.capabilities); - self.adapters - .iter() + input + .into_iter() .filter(move |adapter| Some(&adapter.capabilities) == cheapest_sufficient_capability) .filter(|adapter| adapter.get_capacity() > AvailableCapacity::Unavailable) } - pub fn cheapest_with( + /// returns all the available adapters that meet the required capabilities + /// if no adapters are available at the time or none that meet the capabilities then + /// an empty iterator is returned. + pub async fn all_cheapest_with( &self, required_capabilities: &NodeCapabilities, + ) -> impl Iterator + '_ { + let all = self + .manager + .get_all(&self.chain_id) + .await + .unwrap_or_default(); + + Self::available_with_capabilities(all, required_capabilities) + } + + // get all the adapters, don't trigger the ProviderManager's validations because we want + // this function to remain sync. If no adapters are available an empty iterator is returned. + pub(crate) fn all_unverified_cheapest_with( + &self, + required_capabilities: &NodeCapabilities, + ) -> impl Iterator + '_ { + let all = self + .manager + .get_all_unverified(&self.chain_id) + .unwrap_or_default(); + + Self::available_with_capabilities(all, required_capabilities) + } + + // handle adapter selection from a list, implements the availability checking with an abstracted + // source of the adapter list. + fn cheapest_from( + input: Vec<&EthereumNetworkAdapter>, + required_capabilities: &NodeCapabilities, + retest_percent: f64, ) -> Result, Error> { let retest_rng: f64 = (&mut rand::thread_rng()).gen(); - let cheapest = self - .all_cheapest_with(required_capabilities) + + let cheapest = input + .into_iter() .choose_multiple(&mut rand::thread_rng(), 3); let cheapest = cheapest.iter(); // If request falls below the retest threshold, use this request to try and // reset the failed adapter. If a request succeeds the adapter will be more // likely to be selected afterwards. - if retest_rng < self.retest_percent { + if retest_rng < retest_percent { cheapest.max_by_key(|adapter| adapter.current_error_count()) } else { // The assumption here is that most RPC endpoints will not have limits @@ -123,19 +215,45 @@ impl EthereumNetworkAdapters { )) } - pub fn cheapest(&self) -> Option> { + pub(crate) fn unverified_cheapest_with( + &self, + required_capabilities: &NodeCapabilities, + ) -> Result, Error> { + let cheapest = self.all_unverified_cheapest_with(required_capabilities); + + Self::cheapest_from( + cheapest.choose_multiple(&mut rand::thread_rng(), 3), + required_capabilities, + self.retest_percent, + ) + } + + /// This is the public entry point and should always use verified adapters + pub async fn cheapest_with( + &self, + required_capabilities: &NodeCapabilities, + ) -> Result, Error> { + let cheapest = self + .all_cheapest_with(required_capabilities) + .await + .choose_multiple(&mut rand::thread_rng(), 3); + + Self::cheapest_from(cheapest, required_capabilities, self.retest_percent) + } + + pub async fn cheapest(&self) -> Option> { // EthereumAdapters are sorted by their NodeCapabilities when the EthereumNetworks // struct is instantiated so they do not need to be sorted here - self.adapters + self.manager + .get_all(&self.chain_id) + .await + .unwrap_or_default() .first() .map(|ethereum_network_adapter| ethereum_network_adapter.adapter.clone()) } - pub fn remove(&mut self, provider: &str) { - self.adapters - .retain(|adapter| adapter.adapter.provider() != provider); - } - + /// call_or_cheapest will bypass ProviderManagers' validation in order to remain non async. + /// ideally this should only be called for already validated providers. pub fn call_or_cheapest( &self, capabilities: Option<&NodeCapabilities>, @@ -145,11 +263,13 @@ impl EthereumNetworkAdapters { // so we will ignore this error and return whatever comes out of `cheapest_with` match self.call_only_adapter() { Ok(Some(adapter)) => Ok(adapter), - _ => self.cheapest_with(capabilities.unwrap_or(&NodeCapabilities { - // Archive is required for call_only - archive: true, - traces: false, - })), + _ => { + self.unverified_cheapest_with(capabilities.unwrap_or(&NodeCapabilities { + // Archive is required for call_only + archive: true, + traces: false, + })) + } } } @@ -179,99 +299,14 @@ impl EthereumNetworkAdapters { } } -#[derive(Clone)] -pub struct EthereumNetworks { - pub metrics: Arc, - pub networks: HashMap, -} - -impl EthereumNetworks { - pub fn new(metrics: Arc) -> EthereumNetworks { - EthereumNetworks { - networks: HashMap::new(), - metrics, - } - } - - pub fn insert_empty(&mut self, name: String) { - self.networks.entry(name).or_default(); - } - - pub fn insert( - &mut self, - name: String, - capabilities: NodeCapabilities, - adapter: Arc, - limit: SubgraphLimit, - ) { - let network_adapters = self.networks.entry(name).or_default(); - - network_adapters.push_adapter(EthereumNetworkAdapter { - capabilities, - adapter, - limit, - endpoint_metrics: self.metrics.cheap_clone(), - }); - } - - pub fn remove(&mut self, name: &str, provider: &str) { - if let Some(adapters) = self.networks.get_mut(name) { - adapters.remove(provider); - } - } - - pub fn extend(&mut self, other_networks: EthereumNetworks) { - self.networks.extend(other_networks.networks); - } - - pub fn flatten(&self) -> Vec<(String, NodeCapabilities, Arc)> { - self.networks - .iter() - .flat_map(|(network_name, network_adapters)| { - network_adapters - .adapters - .iter() - .map(move |network_adapter| { - ( - network_name.clone(), - network_adapter.capabilities, - network_adapter.adapter.clone(), - ) - }) - }) - .collect() - } - - pub fn sort(&mut self) { - for adapters in self.networks.values_mut() { - adapters.adapters.sort_by(|a, b| { - a.capabilities - .partial_cmp(&b.capabilities) - // We can't define a total ordering over node capabilities, - // so incomparable items are considered equal and end up - // near each other. - .unwrap_or(Ordering::Equal) - }) - } - } - - pub fn adapter_with_capabilities( - &self, - network_name: String, - requirements: &NodeCapabilities, - ) -> Result, Error> { - self.networks - .get(&network_name) - .ok_or(anyhow!("network not supported: {}", &network_name)) - .and_then(|adapters| adapters.cheapest_with(requirements)) - } -} - #[cfg(test)] mod tests { + use graph::cheap_clone::CheapClone; + use graph::components::adapter::{MockIdentValidator, ProviderManager, ProviderName}; + use graph::data::value::Word; use graph::http::HeaderMap; use graph::{ - endpoint::{EndpointMetrics, Provider}, + endpoint::EndpointMetrics, firehose::SubgraphLimit, prelude::MetricsRegistry, slog::{o, Discard, Logger}, @@ -281,9 +316,7 @@ mod tests { use std::sync::Arc; use uuid::Uuid; - use crate::{ - EthereumAdapter, EthereumAdapterTrait, EthereumNetworks, ProviderEthRpcMetrics, Transport, - }; + use crate::{EthereumAdapter, EthereumAdapterTrait, ProviderEthRpcMetrics, Transport}; use super::{EthereumNetworkAdapter, EthereumNetworkAdapters, NodeCapabilities}; @@ -345,7 +378,6 @@ mod tests { #[tokio::test] async fn adapter_selector_selects_eth_call() { let metrics = Arc::new(EndpointMetrics::mock()); - let chain = "mainnet".to_string(); let logger = graph::log::logger(true); let mock_registry = Arc::new(MetricsRegistry::mock()); let transport = Transport::new_rpc( @@ -380,28 +412,27 @@ mod tests { .await, ); - let mut adapters = { - let mut ethereum_networks = EthereumNetworks::new(metrics); - ethereum_networks.insert( - chain.clone(), + let mut adapters: EthereumNetworkAdapters = EthereumNetworkAdapters::for_testing( + vec![EthereumNetworkAdapter::new( + metrics.cheap_clone(), NodeCapabilities { archive: true, traces: false, }, - eth_call_adapter.clone(), + eth_adapter.clone(), SubgraphLimit::Limit(3), - ); - ethereum_networks.insert( - chain.clone(), + )], + vec![EthereumNetworkAdapter::new( + metrics.cheap_clone(), NodeCapabilities { archive: true, traces: false, }, - eth_adapter.clone(), + eth_call_adapter.clone(), SubgraphLimit::Limit(3), - ); - ethereum_networks.networks.get(&chain).unwrap().clone() - }; + )], + ) + .await; // one reference above and one inside adapters struct assert_eq!(Arc::strong_count(ð_call_adapter), 2); assert_eq!(Arc::strong_count(ð_adapter), 2); @@ -413,6 +444,7 @@ mod tests { archive: false, traces: true, }) + .await .is_err()); // Check cheapest is not call only @@ -421,6 +453,7 @@ mod tests { archive: true, traces: false, }) + .await .unwrap(); assert_eq!(adapter.is_call_only(), false); } @@ -451,7 +484,6 @@ mod tests { #[tokio::test] async fn adapter_selector_unlimited() { let metrics = Arc::new(EndpointMetrics::mock()); - let chain = "mainnet".to_string(); let logger = graph::log::logger(true); let mock_registry = Arc::new(MetricsRegistry::mock()); let transport = Transport::new_rpc( @@ -486,32 +518,33 @@ mod tests { .await, ); - let adapters = { - let mut ethereum_networks = EthereumNetworks::new(metrics); - ethereum_networks.insert( - chain.clone(), + let adapters: EthereumNetworkAdapters = EthereumNetworkAdapters::for_testing( + vec![EthereumNetworkAdapter::new( + metrics.cheap_clone(), NodeCapabilities { archive: true, traces: false, }, eth_call_adapter.clone(), SubgraphLimit::Unlimited, - ); - ethereum_networks.insert( - chain.clone(), + )], + vec![EthereumNetworkAdapter::new( + metrics.cheap_clone(), NodeCapabilities { archive: true, traces: false, }, eth_adapter.clone(), - SubgraphLimit::Limit(3), - ); - ethereum_networks.networks.get(&chain).unwrap().clone() - }; + SubgraphLimit::Limit(2), + )], + ) + .await; // one reference above and one inside adapters struct assert_eq!(Arc::strong_count(ð_call_adapter), 2); assert_eq!(Arc::strong_count(ð_adapter), 2); + // verify that after all call_only were exhausted, we can still + // get normal adapters let keep: Vec> = vec![0; 10] .iter() .map(|_| adapters.call_or_cheapest(None).unwrap()) @@ -522,7 +555,6 @@ mod tests { #[tokio::test] async fn adapter_selector_disable_call_only_fallback() { let metrics = Arc::new(EndpointMetrics::mock()); - let chain = "mainnet".to_string(); let logger = graph::log::logger(true); let mock_registry = Arc::new(MetricsRegistry::mock()); let transport = Transport::new_rpc( @@ -557,28 +589,27 @@ mod tests { .await, ); - let adapters = { - let mut ethereum_networks = EthereumNetworks::new(metrics); - ethereum_networks.insert( - chain.clone(), + let adapters: EthereumNetworkAdapters = EthereumNetworkAdapters::for_testing( + vec![EthereumNetworkAdapter::new( + metrics.cheap_clone(), NodeCapabilities { archive: true, traces: false, }, eth_call_adapter.clone(), SubgraphLimit::Disabled, - ); - ethereum_networks.insert( - chain.clone(), + )], + vec![EthereumNetworkAdapter::new( + metrics.cheap_clone(), NodeCapabilities { archive: true, traces: false, }, eth_adapter.clone(), SubgraphLimit::Limit(3), - ); - ethereum_networks.networks.get(&chain).unwrap().clone() - }; + )], + ) + .await; // one reference above and one inside adapters struct assert_eq!(Arc::strong_count(ð_call_adapter), 2); assert_eq!(Arc::strong_count(ð_adapter), 2); @@ -591,7 +622,6 @@ mod tests { #[tokio::test] async fn adapter_selector_no_call_only_fallback() { let metrics = Arc::new(EndpointMetrics::mock()); - let chain = "mainnet".to_string(); let logger = graph::log::logger(true); let mock_registry = Arc::new(MetricsRegistry::mock()); let transport = Transport::new_rpc( @@ -614,19 +644,19 @@ mod tests { .await, ); - let adapters = { - let mut ethereum_networks = EthereumNetworks::new(metrics); - ethereum_networks.insert( - chain.clone(), + let adapters: EthereumNetworkAdapters = EthereumNetworkAdapters::for_testing( + vec![EthereumNetworkAdapter::new( + metrics.cheap_clone(), NodeCapabilities { archive: true, traces: false, }, eth_adapter.clone(), SubgraphLimit::Limit(3), - ); - ethereum_networks.networks.get(&chain).unwrap().clone() - }; + )], + vec![], + ) + .await; // one reference above and one inside adapters struct assert_eq!(Arc::strong_count(ð_adapter), 2); assert_eq!( @@ -654,6 +684,7 @@ mod tests { )); let logger = graph::log::logger(true); let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); + let chain_id: Word = "chain_id".into(); let adapters = vec![ fake_adapter( @@ -676,10 +707,11 @@ mod tests { ]; // Set errors - metrics.report_for_test(&Provider::from(error_provider.clone()), false); + metrics.report_for_test(&ProviderName::from(error_provider.clone()), false); + + let mut no_retest_adapters = vec![]; + let mut always_retest_adapters = vec![]; - let mut no_retest_adapters = EthereumNetworkAdapters::new(Some(0f64)); - let mut always_retest_adapters = EthereumNetworkAdapters::new(Some(1f64)); adapters.iter().cloned().for_each(|adapter| { let limit = if adapter.provider() == unavailable_provider { SubgraphLimit::Disabled @@ -687,7 +719,7 @@ mod tests { SubgraphLimit::Unlimited }; - no_retest_adapters.adapters.push(EthereumNetworkAdapter { + no_retest_adapters.push(EthereumNetworkAdapter { endpoint_metrics: metrics.clone(), capabilities: NodeCapabilities { archive: true, @@ -696,18 +728,39 @@ mod tests { adapter: adapter.clone(), limit: limit.clone(), }); - always_retest_adapters - .adapters - .push(EthereumNetworkAdapter { - endpoint_metrics: metrics.clone(), - capabilities: NodeCapabilities { - archive: true, - traces: false, - }, - adapter, - limit, - }); + always_retest_adapters.push(EthereumNetworkAdapter { + endpoint_metrics: metrics.clone(), + capabilities: NodeCapabilities { + archive: true, + traces: false, + }, + adapter, + limit, + }); }); + let manager = ProviderManager::::new( + logger, + vec![( + chain_id.clone(), + no_retest_adapters + .iter() + .cloned() + .chain(always_retest_adapters.iter().cloned()) + .collect(), + )] + .into_iter(), + Arc::new(MockIdentValidator), + ); + manager.mark_all_valid().await; + + let no_retest_adapters = EthereumNetworkAdapters::new( + chain_id.clone(), + manager.cheap_clone(), + vec![], + Some(0f64), + ); + let always_retest_adapters = + EthereumNetworkAdapters::new(chain_id, manager.cheap_clone(), vec![], Some(1f64)); assert_eq!( no_retest_adapters @@ -715,6 +768,7 @@ mod tests { archive: true, traces: false, }) + .await .unwrap() .provider(), no_error_provider @@ -725,6 +779,7 @@ mod tests { archive: true, traces: false, }) + .await .unwrap() .provider(), error_provider @@ -748,14 +803,15 @@ mod tests { ], mock_registry.clone(), )); + let chain_id: Word = "chain_id".into(); let logger = graph::log::logger(true); let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); // Set errors - metrics.report_for_test(&Provider::from(error_provider.clone()), false); + metrics.report_for_test(&ProviderName::from(error_provider.clone()), false); - let mut no_retest_adapters = EthereumNetworkAdapters::new(Some(0f64)); - no_retest_adapters.adapters.push(EthereumNetworkAdapter { + let mut no_retest_adapters = vec![]; + no_retest_adapters.push(EthereumNetworkAdapter { endpoint_metrics: metrics.clone(), capabilities: NodeCapabilities { archive: true, @@ -765,49 +821,78 @@ mod tests { .await, limit: SubgraphLimit::Unlimited, }); + + let mut always_retest_adapters = vec![]; + always_retest_adapters.push(EthereumNetworkAdapter { + endpoint_metrics: metrics.clone(), + capabilities: NodeCapabilities { + archive: true, + traces: false, + }, + adapter: fake_adapter( + &logger, + &no_error_provider, + &provider_metrics, + &metrics, + false, + ) + .await, + limit: SubgraphLimit::Unlimited, + }); + let manager = ProviderManager::::new( + logger.clone(), + always_retest_adapters + .iter() + .cloned() + .map(|a| (chain_id.clone(), vec![a])), + Arc::new(MockIdentValidator), + ); + manager.mark_all_valid().await; + + let always_retest_adapters = EthereumNetworkAdapters::new( + chain_id.clone(), + manager.cheap_clone(), + vec![], + Some(1f64), + ); assert_eq!( - no_retest_adapters + always_retest_adapters .cheapest_with(&NodeCapabilities { archive: true, traces: false, }) + .await .unwrap() .provider(), - error_provider + no_error_provider ); - let mut always_retest_adapters = EthereumNetworkAdapters::new(Some(1f64)); - always_retest_adapters - .adapters - .push(EthereumNetworkAdapter { - endpoint_metrics: metrics.clone(), - capabilities: NodeCapabilities { - archive: true, - traces: false, - }, - adapter: fake_adapter( - &logger, - &no_error_provider, - &provider_metrics, - &metrics, - false, - ) - .await, - limit: SubgraphLimit::Unlimited, - }); + let manager = ProviderManager::::new( + logger.clone(), + no_retest_adapters + .iter() + .cloned() + .map(|a| (chain_id.clone(), vec![a])), + Arc::new(MockIdentValidator), + ); + manager.mark_all_valid().await; + + let no_retest_adapters = + EthereumNetworkAdapters::new(chain_id.clone(), manager, vec![], Some(0f64)); assert_eq!( - always_retest_adapters + no_retest_adapters .cheapest_with(&NodeCapabilities { archive: true, traces: false, }) + .await .unwrap() .provider(), - no_error_provider + error_provider ); - let mut no_available_adapter = EthereumNetworkAdapters::default(); - no_available_adapter.adapters.push(EthereumNetworkAdapter { + let mut no_available_adapter = vec![]; + no_available_adapter.push(EthereumNetworkAdapter { endpoint_metrics: metrics.clone(), capabilities: NodeCapabilities { archive: true, @@ -823,10 +908,24 @@ mod tests { .await, limit: SubgraphLimit::Disabled, }); - let res = no_available_adapter.cheapest_with(&NodeCapabilities { - archive: true, - traces: false, - }); + let manager = ProviderManager::new( + logger, + vec![( + chain_id.clone(), + no_available_adapter.iter().cloned().collect(), + )] + .into_iter(), + Arc::new(MockIdentValidator), + ); + manager.mark_all_valid().await; + + let no_available_adapter = EthereumNetworkAdapters::new(chain_id, manager, vec![], None); + let res = no_available_adapter + .cheapest_with(&NodeCapabilities { + archive: true, + traces: false, + }) + .await; assert!(res.is_err(), "{:?}", res); } diff --git a/chain/ethereum/src/runtime/runtime_adapter.rs b/chain/ethereum/src/runtime/runtime_adapter.rs index 87cfd6b11b1..4147d61f5b0 100644 --- a/chain/ethereum/src/runtime/runtime_adapter.rs +++ b/chain/ethereum/src/runtime/runtime_adapter.rs @@ -111,7 +111,7 @@ impl blockchain::RuntimeAdapter for RuntimeAdapter { let ethereum_get_balance = HostFn { name: "ethereum.getBalance", func: Arc::new(move |ctx, wasm_ptr| { - let eth_adapter = eth_adapters.cheapest_with(&NodeCapabilities { + let eth_adapter = eth_adapters.unverified_cheapest_with(&NodeCapabilities { archive, traces: false, })?; @@ -123,7 +123,7 @@ impl blockchain::RuntimeAdapter for RuntimeAdapter { let ethereum_get_code = HostFn { name: "ethereum.hasCode", func: Arc::new(move |ctx, wasm_ptr| { - let eth_adapter = eth_adapters.cheapest_with(&NodeCapabilities { + let eth_adapter = eth_adapters.unverified_cheapest_with(&NodeCapabilities { archive, traces: false, })?; diff --git a/chain/ethereum/src/transport.rs b/chain/ethereum/src/transport.rs index f77ffe90299..1698b18e4ed 100644 --- a/chain/ethereum/src/transport.rs +++ b/chain/ethereum/src/transport.rs @@ -1,4 +1,5 @@ -use graph::endpoint::{EndpointMetrics, Provider, RequestLabels}; +use graph::components::adapter::ProviderName; +use graph::endpoint::{EndpointMetrics, RequestLabels}; use jsonrpc_core::types::Call; use jsonrpc_core::Value; @@ -15,7 +16,7 @@ pub enum Transport { RPC { client: http::Http, metrics: Arc, - provider: Provider, + provider: ProviderName, }, IPC(ipc::Ipc), WS(ws::WebSocket), diff --git a/chain/near/src/chain.rs b/chain/near/src/chain.rs index a5b98cfaf01..283552e7f33 100644 --- a/chain/near/src/chain.rs +++ b/chain/near/src/chain.rs @@ -7,6 +7,7 @@ use graph::blockchain::{ NoopRuntimeAdapter, }; use graph::cheap_clone::CheapClone; +use graph::components::adapter::ChainId; use graph::components::store::DeploymentCursorTracker; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::env::EnvVars; @@ -160,7 +161,7 @@ impl BlockStreamBuilder for NearStreamBuilder { pub struct Chain { logger_factory: LoggerFactory, - name: String, + name: ChainId, client: Arc>, chain_store: Arc, metrics_registry: Arc, @@ -174,8 +175,9 @@ impl std::fmt::Debug for Chain { } } +#[async_trait] impl BlockchainBuilder for BasicBlockchainBuilder { - fn build(self, config: &Arc) -> Chain { + async fn build(self, config: &Arc) -> Chain { Chain { logger_factory: self.logger_factory, name: self.name, @@ -279,7 +281,7 @@ impl Blockchain for Chain { logger: &Logger, number: BlockNumber, ) -> Result { - let firehose_endpoint = self.client.firehose_endpoint()?; + let firehose_endpoint = self.client.firehose_endpoint().await?; firehose_endpoint .block_ptr_for_number::(logger, number) @@ -287,15 +289,15 @@ impl Blockchain for Chain { .await } - fn runtime(&self) -> (Arc>, Self::DecoderHook) { - (Arc::new(NoopRuntimeAdapter::default()), NoopDecoderHook) + fn runtime(&self) -> anyhow::Result<(Arc>, Self::DecoderHook)> { + Ok((Arc::new(NoopRuntimeAdapter::default()), NoopDecoderHook)) } fn chain_client(&self) -> Arc> { self.client.clone() } - fn block_ingestor(&self) -> anyhow::Result> { + async fn block_ingestor(&self) -> anyhow::Result> { let ingestor = FirehoseBlockIngestor::::new( self.chain_store.cheap_clone(), self.chain_client(), diff --git a/chain/near/src/trigger.rs b/chain/near/src/trigger.rs index dc39ba236fd..364b9061038 100644 --- a/chain/near/src/trigger.rs +++ b/chain/near/src/trigger.rs @@ -15,6 +15,7 @@ use crate::codec; // Logging the block is too verbose, so this strips the block from the trigger for Debug. impl std::fmt::Debug for NearTrigger { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + #[allow(unused)] #[derive(Debug)] pub enum MappingTriggerWithoutBlock<'a> { Block, diff --git a/chain/starknet/src/chain.rs b/chain/starknet/src/chain.rs index b83425218e3..cd10af5f965 100644 --- a/chain/starknet/src/chain.rs +++ b/chain/starknet/src/chain.rs @@ -14,7 +14,10 @@ use graph::{ RuntimeAdapter as RuntimeAdapterTrait, }, cheap_clone::CheapClone, - components::store::{DeploymentCursorTracker, DeploymentLocator}, + components::{ + adapter::ChainId, + store::{DeploymentCursorTracker, DeploymentLocator}, + }, data::subgraph::UnifiedMappingApiVersion, env::EnvVars, firehose::{self, FirehoseEndpoint, ForkStep}, @@ -40,7 +43,7 @@ use crate::{ pub struct Chain { logger_factory: LoggerFactory, - name: String, + name: ChainId, client: Arc>, chain_store: Arc, metrics_registry: Arc, @@ -56,8 +59,9 @@ pub struct FirehoseMapper { pub struct TriggersAdapter; +#[async_trait] impl BlockchainBuilder for BasicBlockchainBuilder { - fn build(self, _config: &Arc) -> Chain { + async fn build(self, _config: &Arc) -> Chain { Chain { logger_factory: self.logger_factory, name: self.name, @@ -148,7 +152,7 @@ impl Blockchain for Chain { logger: &Logger, number: BlockNumber, ) -> Result { - let firehose_endpoint = self.client.firehose_endpoint()?; + let firehose_endpoint = self.client.firehose_endpoint().await?; firehose_endpoint .block_ptr_for_number::(logger, number) @@ -156,15 +160,17 @@ impl Blockchain for Chain { .await } - fn runtime(&self) -> (Arc>, Self::DecoderHook) { - (Arc::new(NoopRuntimeAdapter::default()), NoopDecoderHook) + fn runtime( + &self, + ) -> graph::anyhow::Result<(Arc>, Self::DecoderHook)> { + Ok((Arc::new(NoopRuntimeAdapter::default()), NoopDecoderHook)) } fn chain_client(&self) -> Arc> { self.client.clone() } - fn block_ingestor(&self) -> Result> { + async fn block_ingestor(&self) -> Result> { let ingestor = FirehoseBlockIngestor::::new( self.chain_store.cheap_clone(), self.chain_client(), diff --git a/chain/substreams/examples/substreams.rs b/chain/substreams/examples/substreams.rs index a0abfba6082..7377ed8585d 100644 --- a/chain/substreams/examples/substreams.rs +++ b/chain/substreams/examples/substreams.rs @@ -3,7 +3,7 @@ use graph::blockchain::block_stream::{BlockStreamEvent, FirehoseCursor}; use graph::blockchain::client::ChainClient; use graph::blockchain::substreams_block_stream::SubstreamsBlockStream; use graph::endpoint::EndpointMetrics; -use graph::firehose::{FirehoseEndpoints, SubgraphLimit}; +use graph::firehose::{FirehoseEndpoints, NoopGenesisDecoder, SubgraphLimit}; use graph::prelude::{info, tokio, DeploymentHash, MetricsRegistry, Registry}; use graph::tokio_stream::StreamExt; use graph::{env::env_var, firehose::FirehoseEndpoint, log::logger, substreams}; @@ -57,11 +57,12 @@ async fn main() -> Result<(), Error> { false, SubgraphLimit::Unlimited, Arc::new(endpoint_metrics), + NoopGenesisDecoder::boxed(), )); - let client = Arc::new(ChainClient::new_firehose(FirehoseEndpoints::from(vec![ - firehose, - ]))); + let client = Arc::new(ChainClient::new_firehose(FirehoseEndpoints::for_testing( + vec![firehose], + ))); let mut stream: SubstreamsBlockStream = SubstreamsBlockStream::new( diff --git a/chain/substreams/src/block_ingestor.rs b/chain/substreams/src/block_ingestor.rs index eba52516fc8..eee86b21299 100644 --- a/chain/substreams/src/block_ingestor.rs +++ b/chain/substreams/src/block_ingestor.rs @@ -3,9 +3,11 @@ use std::{sync::Arc, time::Duration}; use crate::mapper::Mapper; use anyhow::{Context, Error}; use graph::blockchain::block_stream::{BlockStreamError, FirehoseCursor}; +use graph::blockchain::BlockchainKind; use graph::blockchain::{ client::ChainClient, substreams_block_stream::SubstreamsBlockStream, BlockIngestor, }; +use graph::components::adapter::ChainId; use graph::prelude::MetricsRegistry; use graph::slog::trace; use graph::substreams::Package; @@ -27,7 +29,7 @@ pub struct SubstreamsBlockIngestor { chain_store: Arc, client: Arc>, logger: Logger, - chain_name: String, + chain_name: ChainId, metrics: Arc, } @@ -36,7 +38,7 @@ impl SubstreamsBlockIngestor { chain_store: Arc, client: Arc>, logger: Logger, - chain_name: String, + chain_name: ChainId, metrics: Arc, ) -> SubstreamsBlockIngestor { SubstreamsBlockIngestor { @@ -192,7 +194,10 @@ impl BlockIngestor for SubstreamsBlockIngestor { } } - fn network_name(&self) -> String { + fn network_name(&self) -> ChainId { self.chain_name.clone() } + fn kind(&self) -> BlockchainKind { + BlockchainKind::Substreams + } } diff --git a/chain/substreams/src/chain.rs b/chain/substreams/src/chain.rs index a871d813e08..fc9f6e3f7fd 100644 --- a/chain/substreams/src/chain.rs +++ b/chain/substreams/src/chain.rs @@ -8,7 +8,6 @@ use graph::blockchain::{ }; use graph::components::store::DeploymentCursorTracker; use graph::env::EnvVars; -use graph::firehose::FirehoseEndpoints; use graph::prelude::{BlockHash, CheapClone, Entity, LoggerFactory, MetricsRegistry}; use graph::schema::EntityKey; use graph::{ @@ -76,14 +75,14 @@ pub struct Chain { impl Chain { pub fn new( logger_factory: LoggerFactory, - firehose_endpoints: FirehoseEndpoints, + chain_client: Arc>, metrics_registry: Arc, chain_store: Arc, block_stream_builder: Arc>, ) -> Self { Self { logger_factory, - client: Arc::new(ChainClient::new_firehose(firehose_endpoints)), + client: chain_client, metrics_registry, chain_store, block_stream_builder, @@ -181,27 +180,28 @@ impl Blockchain for Chain { number, }) } - fn runtime(&self) -> (Arc>, Self::DecoderHook) { - (Arc::new(NoopRuntimeAdapter::default()), NoopDecoderHook) + fn runtime(&self) -> anyhow::Result<(Arc>, Self::DecoderHook)> { + Ok((Arc::new(NoopRuntimeAdapter::default()), NoopDecoderHook)) } fn chain_client(&self) -> Arc> { self.client.clone() } - fn block_ingestor(&self) -> anyhow::Result> { + async fn block_ingestor(&self) -> anyhow::Result> { Ok(Box::new(SubstreamsBlockIngestor::new( self.chain_store.cheap_clone(), self.client.cheap_clone(), self.logger_factory.component_logger("", None), - "substreams".to_string(), + "substreams".into(), self.metrics_registry.cheap_clone(), ))) } } +#[async_trait] impl blockchain::BlockchainBuilder for BasicBlockchainBuilder { - fn build(self, _config: &Arc) -> Chain { + async fn build(self, _config: &Arc) -> Chain { let BasicBlockchainBuilder { logger_factory, name: _, diff --git a/core/src/subgraph/instance_manager.rs b/core/src/subgraph/instance_manager.rs index 223b855d132..c98641539d9 100644 --- a/core/src/subgraph/instance_manager.rs +++ b/core/src/subgraph/instance_manager.rs @@ -11,6 +11,7 @@ use graph::blockchain::{Blockchain, BlockchainKind, DataSource, NodeCapabilities use graph::components::metrics::gas::GasMetrics; use graph::components::subgraph::ProofOfIndexingVersion; use graph::data::subgraph::{UnresolvedSubgraphManifest, SPEC_VERSION_0_0_6}; +use graph::data::value::Word; use graph::data_source::causality_region::CausalityRegionSeq; use graph::env::EnvVars; use graph::prelude::{SubgraphInstanceManager as SubgraphInstanceManagerTrait, *}; @@ -307,7 +308,7 @@ impl SubgraphInstanceManager { .collect::>(); let required_capabilities = C::NodeCapabilities::from_data_sources(&onchain_data_sources); - let network = manifest.network_name(); + let network: Word = manifest.network_name().into(); let chain = self .chains @@ -390,7 +391,7 @@ impl SubgraphInstanceManager { let deployment_head = store.block_ptr().map(|ptr| ptr.number).unwrap_or(0) as f64; block_stream_metrics.deployment_head.set(deployment_head); - let (runtime_adapter, decoder_hook) = chain.runtime(); + let (runtime_adapter, decoder_hook) = chain.runtime()?; let host_builder = graph_runtime_wasm::RuntimeHostBuilder::new( runtime_adapter, self.link_resolver.cheap_clone(), @@ -426,7 +427,7 @@ impl SubgraphInstanceManager { unified_api_version, static_filters: self.static_filters, poi_version, - network, + network: network.to_string(), instrument, }; diff --git a/core/src/subgraph/registrar.rs b/core/src/subgraph/registrar.rs index 715f06d71b4..fe80d118457 100644 --- a/core/src/subgraph/registrar.rs +++ b/core/src/subgraph/registrar.rs @@ -9,6 +9,7 @@ use graph::components::store::{DeploymentId, DeploymentLocator, SubscriptionMana use graph::components::subgraph::Settings; use graph::data::subgraph::schema::DeploymentCreate; use graph::data::subgraph::Graft; +use graph::data::value::Word; use graph::futures01; use graph::futures01::future; use graph::futures01::stream; @@ -643,7 +644,7 @@ async fn create_subgraph_version( .await .map_err(SubgraphRegistrarError::ManifestValidationError)?; - let network_name = manifest.network_name(); + let network_name: Word = manifest.network_name().into(); let chain = chains .get::(network_name.clone()) @@ -726,7 +727,7 @@ async fn create_subgraph_version( &manifest.schema, deployment, node_id, - network_name, + network_name.into(), version_switching_mode, ) .map_err(SubgraphRegistrarError::SubgraphDeploymentError) diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 1fcc22c7952..c78c2eb2194 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -28,7 +28,7 @@ services: volumes: - ./data/ipfs:/data/ipfs:Z postgres: - image: postgres:14 + image: postgres ports: - '5432:5432' command: diff --git a/graph/src/blockchain/builder.rs b/graph/src/blockchain/builder.rs index 3ea1464a2a3..07046d62e71 100644 --- a/graph/src/blockchain/builder.rs +++ b/graph/src/blockchain/builder.rs @@ -1,6 +1,8 @@ +use tonic::async_trait; + use super::Blockchain; use crate::{ - components::store::ChainStore, env::EnvVars, firehose::FirehoseEndpoints, + components::store::ChainStore, data::value::Word, env::EnvVars, firehose::FirehoseEndpoints, prelude::LoggerFactory, prelude::MetricsRegistry, }; use std::sync::Arc; @@ -9,16 +11,17 @@ use std::sync::Arc; /// particularly fancy builder logic. pub struct BasicBlockchainBuilder { pub logger_factory: LoggerFactory, - pub name: String, + pub name: Word, pub chain_store: Arc, pub firehose_endpoints: FirehoseEndpoints, pub metrics_registry: Arc, } /// Something that can build a [`Blockchain`]. +#[async_trait] pub trait BlockchainBuilder where C: Blockchain, { - fn build(self, config: &Arc) -> C; + async fn build(self, config: &Arc) -> C; } diff --git a/graph/src/blockchain/client.rs b/graph/src/blockchain/client.rs index 4f853569e87..8d83536b577 100644 --- a/graph/src/blockchain/client.rs +++ b/graph/src/blockchain/client.rs @@ -17,20 +17,11 @@ pub enum ChainClient { impl ChainClient { pub fn new_firehose(firehose_endpoints: FirehoseEndpoints) -> Self { - Self::new(firehose_endpoints, C::Client::default()) + Self::Firehose(firehose_endpoints) } - pub fn new(firehose_endpoints: FirehoseEndpoints, adapters: C::Client) -> Self { - // If we can get a firehose endpoint then we should prioritise it. - // the reason we want to test this by getting an adapter is because - // adapter limits in the configuration can effectively disable firehose - // by setting a limit to 0. - // In this case we should fallback to an rpc client. - let firehose_available = firehose_endpoints.endpoint().is_ok(); - match firehose_available { - true => Self::Firehose(firehose_endpoints), - false => Self::Rpc(adapters), - } + pub fn new_rpc(rpc: C::Client) -> Self { + Self::Rpc(rpc) } pub fn is_firehose(&self) -> bool { @@ -40,9 +31,9 @@ impl ChainClient { } } - pub fn firehose_endpoint(&self) -> anyhow::Result> { + pub async fn firehose_endpoint(&self) -> anyhow::Result> { match self { - ChainClient::Firehose(endpoints) => endpoints.endpoint(), + ChainClient::Firehose(endpoints) => endpoints.endpoint().await, _ => Err(anyhow!("firehose endpoint requested on rpc chain client")), } } diff --git a/graph/src/blockchain/firehose_block_ingestor.rs b/graph/src/blockchain/firehose_block_ingestor.rs index 23f59b3cd22..b691179116d 100644 --- a/graph/src/blockchain/firehose_block_ingestor.rs +++ b/graph/src/blockchain/firehose_block_ingestor.rs @@ -2,7 +2,7 @@ use std::{marker::PhantomData, sync::Arc, time::Duration}; use crate::{ blockchain::Block as BlockchainBlock, - components::store::ChainStore, + components::{adapter::ChainId, store::ChainStore}, firehose::{self, decode_firehose_block, HeaderOnly}, prelude::{error, info, Logger}, util::backoff::ExponentialBackoff, @@ -15,7 +15,7 @@ use prost_types::Any; use slog::{o, trace}; use tonic::Streaming; -use super::{client::ChainClient, BlockIngestor, Blockchain}; +use super::{client::ChainClient, BlockIngestor, Blockchain, BlockchainKind}; const TRANSFORM_ETHEREUM_HEADER_ONLY: &str = "type.googleapis.com/sf.ethereum.transform.v1.HeaderOnly"; @@ -43,7 +43,7 @@ where client: Arc>, logger: Logger, default_transforms: Vec, - chain_name: String, + chain_name: ChainId, phantom: PhantomData, } @@ -56,7 +56,7 @@ where chain_store: Arc, client: Arc>, logger: Logger, - chain_name: String, + chain_name: ChainId, ) -> FirehoseBlockIngestor { FirehoseBlockIngestor { chain_store, @@ -169,7 +169,7 @@ where ExponentialBackoff::new(Duration::from_millis(250), Duration::from_secs(30)); loop { - let endpoint = match self.client.firehose_endpoint() { + let endpoint = match self.client.firehose_endpoint().await { Ok(endpoint) => endpoint, Err(err) => { error!( @@ -182,7 +182,7 @@ where }; let logger = self.logger.new( - o!("provider" => endpoint.provider.to_string(), "network_name"=> self.network_name()), + o!("provider" => endpoint.provider.to_string(), "network_name"=> self.network_name().to_string()), ); info!( @@ -226,7 +226,11 @@ where } } - fn network_name(&self) -> String { + fn network_name(&self) -> ChainId { self.chain_name.clone() } + + fn kind(&self) -> BlockchainKind { + C::KIND + } } diff --git a/graph/src/blockchain/firehose_block_stream.rs b/graph/src/blockchain/firehose_block_stream.rs index 159eca7666b..254ccd42f82 100644 --- a/graph/src/blockchain/firehose_block_stream.rs +++ b/graph/src/blockchain/firehose_block_stream.rs @@ -214,7 +214,7 @@ fn stream_blocks>( try_stream! { loop { - let endpoint = client.firehose_endpoint()?; + let endpoint = client.firehose_endpoint().await?; let logger = logger.new(o!("deployment" => deployment.clone(), "provider" => endpoint.provider.to_string())); info!( diff --git a/graph/src/blockchain/mock.rs b/graph/src/blockchain/mock.rs index 87d20a236d0..99ca7eb3db6 100644 --- a/graph/src/blockchain/mock.rs +++ b/graph/src/blockchain/mock.rs @@ -1,4 +1,5 @@ use crate::{ + bail, components::{ link_resolver::LinkResolver, store::{BlockNumber, DeploymentCursorTracker, DeploymentLocator}, @@ -372,15 +373,17 @@ impl Blockchain for MockBlockchain { todo!() } - fn runtime(&self) -> (std::sync::Arc>, Self::DecoderHook) { - todo!() + fn runtime( + &self, + ) -> anyhow::Result<(std::sync::Arc>, Self::DecoderHook)> { + bail!("mock has no runtime adapter") } fn chain_client(&self) -> Arc> { todo!() } - fn block_ingestor(&self) -> anyhow::Result> { + async fn block_ingestor(&self) -> anyhow::Result> { todo!() } } diff --git a/graph/src/blockchain/mod.rs b/graph/src/blockchain/mod.rs index 68776c70ce5..1e09c73dca3 100644 --- a/graph/src/blockchain/mod.rs +++ b/graph/src/blockchain/mod.rs @@ -18,6 +18,7 @@ mod types; use crate::{ cheap_clone::CheapClone, components::{ + adapter::ChainId, metrics::subgraph::SubgraphInstanceMetrics, store::{DeploymentCursorTracker, DeploymentLocator, StoredDynamicDataSource}, subgraph::{HostMetrics, InstanceDSTemplateInfo, MappingError}, @@ -37,7 +38,7 @@ use async_trait::async_trait; use graph_derive::CheapClone; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; -use slog::Logger; +use slog::{error, Logger}; use std::{ any::Any, collections::{HashMap, HashSet}, @@ -61,7 +62,8 @@ use self::{ #[async_trait] pub trait BlockIngestor: 'static + Send + Sync { async fn run(self: Box); - fn network_name(&self) -> String; + fn network_name(&self) -> ChainId; + fn kind(&self) -> BlockchainKind; } pub trait TriggersAdapterSelector: Sync + Send { @@ -147,7 +149,7 @@ pub trait Blockchain: Debug + Sized + Send + Sync + Unpin + 'static { const KIND: BlockchainKind; const ALIASES: &'static [&'static str] = &[]; - type Client: Debug + Default + Sync + Send; + type Client: Debug + Sync + Send; // The `Clone` bound is used when reprocessing a block, because `triggers_in_block` requires an // owned `Block`. It would be good to come up with a way to remove this bound. type Block: Block + Clone + Debug + Default; @@ -207,11 +209,11 @@ pub trait Blockchain: Debug + Sized + Send + Sync + Unpin + 'static { fn is_refetch_block_required(&self) -> bool; - fn runtime(&self) -> (Arc>, Self::DecoderHook); + fn runtime(&self) -> anyhow::Result<(Arc>, Self::DecoderHook)>; fn chain_client(&self) -> Arc>; - fn block_ingestor(&self) -> anyhow::Result>; + async fn block_ingestor(&self) -> anyhow::Result>; } #[derive(Error, Debug)] @@ -510,18 +512,42 @@ impl BlockchainKind { /// A collection of blockchains, keyed by `BlockchainKind` and network. #[derive(Default, Debug, Clone)] -pub struct BlockchainMap(HashMap<(BlockchainKind, String), Arc>); +pub struct BlockchainMap(HashMap<(BlockchainKind, ChainId), Arc>); impl BlockchainMap { pub fn new() -> Self { Self::default() } - pub fn insert(&mut self, network: String, chain: Arc) { + pub fn iter( + &self, + ) -> impl Iterator)> { + self.0.iter() + } + + pub fn insert(&mut self, network: ChainId, chain: Arc) { self.0.insert((C::KIND, network), chain); } - pub fn get(&self, network: String) -> Result, Error> { + pub fn get_all_by_kind( + &self, + kind: BlockchainKind, + ) -> Result>, Error> { + self.0 + .iter() + .flat_map(|((k, _), chain)| { + if k.eq(&kind) { + Some(chain.cheap_clone().downcast().map_err(|_| { + anyhow!("unable to downcast, wrong type for blockchain {}", C::KIND) + })) + } else { + None + } + }) + .collect::>, Error>>() + } + + pub fn get(&self, network: ChainId) -> Result, Error> { self.0 .get(&(C::KIND, network.clone())) .with_context(|| format!("no network {} found on chain {}", network, C::KIND))? diff --git a/graph/src/blockchain/substreams_block_stream.rs b/graph/src/blockchain/substreams_block_stream.rs index 284cdb348c8..7121692fddf 100644 --- a/graph/src/blockchain/substreams_block_stream.rs +++ b/graph/src/blockchain/substreams_block_stream.rs @@ -194,7 +194,7 @@ fn stream_blocks>( )))?; } - let endpoint = client.firehose_endpoint()?; + let endpoint = client.firehose_endpoint().await?; let mut logger = logger.new(o!("deployment" => deployment.clone(), "provider" => endpoint.provider.to_string())); loop { diff --git a/graph/src/blockchain/types.rs b/graph/src/blockchain/types.rs index ae5505dd30b..931e52e2dd5 100644 --- a/graph/src/blockchain/types.rs +++ b/graph/src/blockchain/types.rs @@ -333,6 +333,21 @@ pub struct ChainIdentifier { pub genesis_block_hash: BlockHash, } +impl ChainIdentifier { + pub fn is_default(&self) -> bool { + ChainIdentifier::default().eq(self) + } +} + +impl Default for ChainIdentifier { + fn default() -> Self { + Self { + net_version: String::default(), + genesis_block_hash: BlockHash::from(H256::zero()), + } + } +} + impl fmt::Display for ChainIdentifier { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( diff --git a/graph/src/components/adapter.rs b/graph/src/components/adapter.rs new file mode 100644 index 00000000000..2622ff8100b --- /dev/null +++ b/graph/src/components/adapter.rs @@ -0,0 +1,886 @@ +use std::{ + collections::HashMap, + ops::{Add, Deref}, + sync::Arc, +}; + +use async_trait::async_trait; +use chrono::{DateTime, Duration, Utc}; + +use itertools::Itertools; +use slog::{o, warn, Discard, Logger}; +use thiserror::Error; + +use crate::{ + blockchain::{BlockHash, ChainIdentifier}, + cheap_clone::CheapClone, + data::value::Word, + prelude::error, + tokio::sync::RwLock, +}; + +use crate::components::store::{BlockStore as BlockStoreTrait, ChainStore as ChainStoreTrait}; + +const VALIDATION_ATTEMPT_TTL: Duration = Duration::minutes(5); + +#[derive(Debug, Error)] +pub enum ProviderManagerError { + #[error("unknown error {0}")] + Unknown(#[from] anyhow::Error), + #[error("provider {provider} on chain {chain_id} failed verification, expected ident {expected}, got {actual}")] + ProviderFailedValidation { + chain_id: ChainId, + provider: ProviderName, + expected: ChainIdentifier, + actual: ChainIdentifier, + }, + #[error("no providers available for chain {0}")] + NoProvidersAvailable(ChainId), + #[error("all providers for chain_id {0} have failed")] + AllProvidersFailed(ChainId), +} + +#[async_trait] +pub trait NetIdentifiable: Sync + Send { + async fn net_identifiers(&self) -> Result; + fn provider_name(&self) -> ProviderName; +} + +#[async_trait] +impl NetIdentifiable for Arc { + async fn net_identifiers(&self) -> Result { + self.as_ref().net_identifiers().await + } + fn provider_name(&self) -> ProviderName { + self.as_ref().provider_name() + } +} + +pub type ProviderName = Word; +pub type ChainId = Word; + +#[derive(Default, Debug, Clone, PartialEq, Eq, Hash)] +struct Ident { + provider: ProviderName, + chain_id: ChainId, +} + +#[derive(Error, Debug, Clone)] +pub enum IdentValidatorError { + #[error("database error: {0}")] + UnknownError(String), + #[error("Store ident wasn't set")] + UnsetIdent, + #[error("the net version for chain {chain_id} has changed from {store_net_version} to {chain_net_version} since the last time we ran")] + ChangedNetVersion { + chain_id: ChainId, + store_net_version: String, + chain_net_version: String, + }, + #[error("the genesis block hash for chain {chain_id} has changed from {store_hash} to {chain_hash} since the last time we ran")] + ChangedHash { + chain_id: ChainId, + store_hash: BlockHash, + chain_hash: BlockHash, + }, + #[error("unable to get store for chain {0}")] + UnavailableStore(ChainId), +} + +impl From for IdentValidatorError { + fn from(value: anyhow::Error) -> Self { + IdentValidatorError::UnknownError(value.to_string()) + } +} + +#[async_trait] +/// IdentValidator validates that the provided chain ident matches the expected value for a certain +/// chain_id. This is probably only going to matter for the ChainStore but this allows us to decouple +/// the all the trait bounds and database integration from the ProviderManager and tests. +pub trait IdentValidator: Sync + Send { + fn check_ident( + &self, + chain_id: &ChainId, + ident: &ChainIdentifier, + ) -> Result<(), IdentValidatorError>; + + fn update_ident( + &self, + chain_id: &ChainId, + ident: &ChainIdentifier, + ) -> Result<(), anyhow::Error>; +} + +impl> IdentValidator for B { + fn check_ident( + &self, + chain_id: &ChainId, + ident: &ChainIdentifier, + ) -> Result<(), IdentValidatorError> { + let network_chain = self + .chain_store(&chain_id) + .ok_or_else(|| IdentValidatorError::UnavailableStore(chain_id.clone()))?; + let store_ident = network_chain + .chain_identifier() + .map_err(IdentValidatorError::from)?; + + if store_ident == ChainIdentifier::default() { + return Err(IdentValidatorError::UnsetIdent); + } + + if store_ident.net_version != ident.net_version { + // This behavior is preserved from the previous implementation, firehose does not provide + // a net_version so switching to and from firehose will cause this value to be different. + // we prioritise rpc when creating the chain but it's possible that it is created by firehose + // firehose always return 0 on net_version so we need to allow switching between the two. + if store_ident.net_version != "0" && ident.net_version != "0" { + return Err(IdentValidatorError::ChangedNetVersion { + chain_id: chain_id.clone(), + store_net_version: store_ident.net_version.clone(), + chain_net_version: ident.net_version.clone(), + }); + } + } + + let store_hash = &store_ident.genesis_block_hash; + let chain_hash = &ident.genesis_block_hash; + if store_hash != chain_hash { + return Err(IdentValidatorError::ChangedHash { + chain_id: chain_id.clone(), + store_hash: store_hash.clone(), + chain_hash: chain_hash.clone(), + }); + } + + return Ok(()); + } + + fn update_ident( + &self, + chain_id: &ChainId, + ident: &ChainIdentifier, + ) -> Result<(), anyhow::Error> { + let network_chain = self + .chain_store(&chain_id) + .ok_or_else(|| IdentValidatorError::UnavailableStore(chain_id.clone()))?; + + network_chain.set_chain_identifier(ident)?; + + Ok(()) + } +} + +pub struct MockIdentValidator; + +impl IdentValidator for MockIdentValidator { + fn check_ident( + &self, + _chain_id: &ChainId, + _ident: &ChainIdentifier, + ) -> Result<(), IdentValidatorError> { + Ok(()) + } + + fn update_ident( + &self, + _chain_id: &ChainId, + _ident: &ChainIdentifier, + ) -> Result<(), anyhow::Error> { + Ok(()) + } +} + +/// ProviderCorrectness will maintain a list of providers which have had their +/// ChainIdentifiers checked. The first identifier is considered correct, if a later +/// provider for the same chain offers a different ChainIdentifier, this will be considered a +/// failed validation and it will be disabled. +#[derive(Clone, Debug)] +pub struct ProviderManager { + inner: Arc>, +} + +impl CheapClone for ProviderManager { + fn cheap_clone(&self) -> Self { + Self { + inner: self.inner.cheap_clone(), + } + } +} + +impl Default for ProviderManager { + fn default() -> Self { + Self { + inner: Arc::new(Inner { + logger: Logger::root(Discard, o!()), + adapters: HashMap::default(), + status: vec![], + validator: Arc::new(MockIdentValidator {}), + }), + } + } +} + +impl ProviderManager { + pub fn new( + logger: Logger, + adapters: impl Iterator)>, + validator: Arc, + ) -> Self { + let mut status: Vec<(Ident, RwLock)> = Vec::new(); + + let adapters = HashMap::from_iter(adapters.map(|(chain_id, adapters)| { + let adapters = adapters + .into_iter() + .map(|adapter| { + let name = adapter.provider_name(); + + // Get status index or add new status. + let index = match status + .iter() + .find_position(|(ident, _)| ident.provider.eq(&name)) + { + Some((index, _)) => index, + None => { + status.push(( + Ident { + provider: name, + chain_id: chain_id.clone(), + }, + RwLock::new(GenesisCheckStatus::NotChecked), + )); + status.len() - 1 + } + }; + (index, adapter) + }) + .collect_vec(); + + (chain_id, adapters) + })); + + Self { + inner: Arc::new(Inner { + logger, + adapters, + status, + validator, + }), + } + } + + pub fn len(&self, chain_id: &ChainId) -> usize { + self.inner + .adapters + .get(chain_id) + .map(|a| a.len()) + .unwrap_or_default() + } + + #[cfg(debug_assertions)] + pub async fn mark_all_valid(&self) { + for (_, status) in self.inner.status.iter() { + let mut s = status.write().await; + *s = GenesisCheckStatus::Valid; + } + } + + async fn verify(&self, adapters: &Vec<(usize, T)>) -> Result<(), ProviderManagerError> { + let mut tasks = vec![]; + + for (index, adapter) in adapters.into_iter() { + let inner = self.inner.cheap_clone(); + let adapter = adapter.clone(); + let index = *index; + tasks.push(inner.verify_provider(index, adapter)); + } + + crate::futures03::future::join_all(tasks) + .await + .into_iter() + .collect::, ProviderManagerError>>()?; + + Ok(()) + } + + /// get_all_unverified it's an escape hatch for places where checking the adapter status is + /// undesirable or just can't be done because async can't be used. This function just returns + /// the stored adapters and doesn't try to perform any verification. It will also return + /// adapters that failed verification. For the most part this should be fine since ideally + /// get_all would have been used before. Nevertheless, it is possible that a misconfigured + /// adapter is returned from this list even after validation. + pub fn get_all_unverified(&self, chain_id: &ChainId) -> Result, ProviderManagerError> { + Ok(self + .inner + .adapters + .get(chain_id) + .map(|v| v.iter().map(|v| &v.1).collect()) + .unwrap_or_default()) + } + + /// get_all will trigger the verification of the endpoints for the provided chain_id, hence the + /// async. If this is undesirable, check `get_all_unverified` as an alternatives that does not + /// cause the validation but also doesn't not guaratee any adapters have been validated. + pub async fn get_all(&self, chain_id: &ChainId) -> Result, ProviderManagerError> { + tokio::time::timeout(std::time::Duration::from_secs(5), async move { + let adapters = match self.inner.adapters.get(chain_id) { + Some(adapters) if !adapters.is_empty() => adapters, + _ => return Ok(vec![]), + }; + + // Optimistic check + if self.inner.is_all_verified(&adapters).await { + return Ok(adapters.iter().map(|v| &v.1).collect()); + } + + match self.verify(adapters).await { + Ok(_) => {} + Err(error) => error!( + self.inner.logger, + "unable to verify genesis for adapter: {}", + error.to_string() + ), + } + + self.inner.get_verified_for_chain(&chain_id).await + }) + .await + .map_err(|_| crate::anyhow::anyhow!("timed out, validation took too long"))? + } +} + +struct Inner { + logger: Logger, + // Most operations start by getting the value so we keep track of the index to minimize the + // locked surface. + adapters: HashMap>, + // Status per (ChainId, ProviderName) pair. The RwLock here helps prevent multiple concurrent + // checks for the same provider, when one provider is being checked, all other uses will wait, + // this is correct because no provider should be used until they have been validated. + // There shouldn't be many values here so Vec is fine even if less ergonomic, because we track + // the index alongside the adapter it should be O(1) after initialization. + status: Vec<(Ident, RwLock)>, + // Validator used to compare the existing identifier to the one returned by an adapter. + validator: Arc, +} + +impl std::fmt::Debug for Inner { + fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + Ok(()) + } +} + +impl Inner { + async fn is_all_verified(&self, adapters: &Vec<(usize, T)>) -> bool { + for (index, _) in adapters.iter() { + let status = self.status.get(*index).unwrap().1.read().await; + if *status != GenesisCheckStatus::Valid { + return false; + } + } + + true + } + + /// Returns any adapters that have been validated, empty if none are defined or an error if + /// all adapters have failed or are unavailable, returns different errors for these use cases + /// so that that caller can handle the different situations, as one is permanent and the other + /// is retryable. + async fn get_verified_for_chain( + &self, + chain_id: &ChainId, + ) -> Result, ProviderManagerError> { + let mut out = vec![]; + let adapters = match self.adapters.get(chain_id) { + Some(adapters) if !adapters.is_empty() => adapters, + _ => return Ok(vec![]), + }; + + let mut failed = 0; + for (index, adapter) in adapters.iter() { + let status = self.status.get(*index).unwrap().1.read().await; + match status.deref() { + GenesisCheckStatus::Valid => {} + GenesisCheckStatus::Failed => { + failed += 1; + continue; + } + GenesisCheckStatus::NotChecked | GenesisCheckStatus::TemporaryFailure { .. } => { + continue + } + } + out.push(adapter); + } + + if out.is_empty() { + if failed == adapters.len() { + return Err(ProviderManagerError::AllProvidersFailed(chain_id.clone())); + } + + return Err(ProviderManagerError::NoProvidersAvailable(chain_id.clone())); + } + + Ok(out) + } + + async fn get_ident_status(&self, index: usize) -> (Ident, GenesisCheckStatus) { + match self.status.get(index) { + Some(status) => (status.0.clone(), status.1.read().await.clone()), + None => (Ident::default(), GenesisCheckStatus::Failed), + } + } + + fn ttl_has_elapsed(checked_at: &DateTime) -> bool { + checked_at.add(VALIDATION_ATTEMPT_TTL) < Utc::now() + } + + fn should_verify(status: &GenesisCheckStatus) -> bool { + match status { + GenesisCheckStatus::TemporaryFailure { checked_at } + if Self::ttl_has_elapsed(checked_at) => + { + true + } + // Let check the provider + GenesisCheckStatus::NotChecked => true, + _ => false, + } + } + + async fn verify_provider( + self: Arc>, + index: usize, + adapter: T, + ) -> Result<(), ProviderManagerError> { + let (ident, status) = self.get_ident_status(index).await; + if !Self::should_verify(&status) { + return Ok(()); + } + + let mut status = self.status.get(index).unwrap().1.write().await; + // double check nothing has changed. + if !Self::should_verify(&status) { + return Ok(()); + } + + let chain_ident = match adapter.net_identifiers().await { + Ok(ident) => ident, + Err(err) => { + error!( + &self.logger, + "failed to get net identifiers: {}", + err.to_string() + ); + *status = GenesisCheckStatus::TemporaryFailure { + checked_at: Utc::now(), + }; + + return Err(err.into()); + } + }; + + match self.validator.check_ident(&ident.chain_id, &chain_ident) { + Ok(_) => { + *status = GenesisCheckStatus::Valid; + } + Err(err) => match err { + IdentValidatorError::UnsetIdent => { + self.validator + .update_ident(&ident.chain_id, &chain_ident) + .map_err(ProviderManagerError::from)?; + *status = GenesisCheckStatus::Valid; + } + IdentValidatorError::ChangedNetVersion { + chain_id, + store_net_version, + chain_net_version, + } if store_net_version == "0" => { + warn!(self.logger, + "the net version for chain {} has changed from 0 to {} since the last time we ran, ignoring difference because 0 means UNSET and firehose does not provide it", + chain_id, + chain_net_version, + ); + *status = GenesisCheckStatus::Valid; + } + IdentValidatorError::ChangedNetVersion { + store_net_version, + chain_net_version, + .. + } => { + *status = GenesisCheckStatus::Failed; + return Err(ProviderManagerError::ProviderFailedValidation { + provider: ident.provider, + expected: ChainIdentifier { + net_version: store_net_version, + genesis_block_hash: chain_ident.genesis_block_hash.clone(), + }, + actual: ChainIdentifier { + net_version: chain_net_version, + genesis_block_hash: chain_ident.genesis_block_hash, + }, + chain_id: ident.chain_id.clone(), + }); + } + IdentValidatorError::ChangedHash { + store_hash, + chain_hash, + .. + } => { + *status = GenesisCheckStatus::Failed; + return Err(ProviderManagerError::ProviderFailedValidation { + provider: ident.provider, + expected: ChainIdentifier { + net_version: chain_ident.net_version.clone(), + genesis_block_hash: store_hash, + }, + actual: ChainIdentifier { + net_version: chain_ident.net_version, + genesis_block_hash: chain_hash, + }, + chain_id: ident.chain_id.clone(), + }); + } + e @ IdentValidatorError::UnavailableStore(_) + | e @ IdentValidatorError::UnknownError(_) => { + *status = GenesisCheckStatus::TemporaryFailure { + checked_at: Utc::now(), + }; + + return Err(ProviderManagerError::Unknown(crate::anyhow::anyhow!( + e.to_string() + ))); + } + }, + } + + Ok(()) + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +enum GenesisCheckStatus { + NotChecked, + TemporaryFailure { checked_at: DateTime }, + Valid, + Failed, +} + +#[cfg(test)] +mod test { + use std::{ + ops::Sub, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + }; + + use crate::{ + bail, + blockchain::BlockHash, + components::adapter::{ChainId, GenesisCheckStatus, MockIdentValidator}, + data::value::Word, + prelude::lazy_static, + }; + use async_trait::async_trait; + use chrono::{Duration, Utc}; + use ethabi::ethereum_types::H256; + use slog::{o, Discard, Logger}; + + use crate::{blockchain::ChainIdentifier, components::adapter::ProviderManagerError}; + + use super::{ + IdentValidator, IdentValidatorError, NetIdentifiable, ProviderManager, ProviderName, + VALIDATION_ATTEMPT_TTL, + }; + + const TEST_CHAIN_ID: &str = "valid"; + + lazy_static! { + static ref UNTESTABLE_ADAPTER: MockAdapter = + MockAdapter{ + provider: "untestable".into(), + status: GenesisCheckStatus::TemporaryFailure { checked_at: Utc::now()}, + }; + + // way past TTL, ready to check again + static ref TESTABLE_ADAPTER: MockAdapter = + MockAdapter{ + provider: "testable".into(), + status: GenesisCheckStatus::TemporaryFailure { checked_at: Utc::now().sub(Duration::seconds(10000000)) }, + }; + static ref VALID_ADAPTER: MockAdapter = MockAdapter {provider: "valid".into(), status: GenesisCheckStatus::Valid,}; + static ref FAILED_ADAPTER: MockAdapter = MockAdapter {provider: "FAILED".into(), status: GenesisCheckStatus::Failed,}; + static ref NEW_CHAIN_IDENT: ChainIdentifier =ChainIdentifier { net_version: "123".to_string(), genesis_block_hash: BlockHash::from( H256::repeat_byte(1))}; + } + + struct TestValidator { + check_result: Result<(), IdentValidatorError>, + expected_new_ident: Option, + } + + impl IdentValidator for TestValidator { + fn check_ident( + &self, + _chain_id: &ChainId, + _ident: &ChainIdentifier, + ) -> Result<(), IdentValidatorError> { + self.check_result.clone() + } + + fn update_ident( + &self, + _chain_id: &ChainId, + ident: &ChainIdentifier, + ) -> Result<(), anyhow::Error> { + match self.expected_new_ident.as_ref() { + None => unreachable!("unexpected call to update_ident"), + Some(ident_expected) if ident_expected.eq(ident) => Ok(()), + Some(_) => bail!("update_ident called with unexpected value"), + } + } + } + + #[derive(Clone, PartialEq, Eq, Debug)] + struct MockAdapter { + provider: Word, + status: GenesisCheckStatus, + } + + #[async_trait] + impl NetIdentifiable for MockAdapter { + async fn net_identifiers(&self) -> Result { + match self.status { + GenesisCheckStatus::TemporaryFailure { checked_at } + if checked_at > Utc::now().sub(VALIDATION_ATTEMPT_TTL) => + { + unreachable!("should never check if ttl has not elapsed"); + } + _ => Ok(NEW_CHAIN_IDENT.clone()), + } + } + + fn provider_name(&self) -> ProviderName { + self.provider.clone() + } + } + + #[tokio::test] + async fn test_provider_manager() { + struct Case<'a> { + name: &'a str, + chain_id: &'a str, + adapters: Vec<(ChainId, Vec)>, + validator: Option, + expected: Result, ProviderManagerError>, + } + + let cases = vec![ + Case { + name: "no adapters", + chain_id: TEST_CHAIN_ID, + adapters: vec![], + validator: None, + expected: Ok(vec![]), + }, + Case { + name: "no adapters", + chain_id: TEST_CHAIN_ID, + adapters: vec![(TEST_CHAIN_ID.into(), vec![TESTABLE_ADAPTER.clone()])], + validator: Some(TestValidator { + check_result: Err(IdentValidatorError::UnsetIdent), + expected_new_ident: Some(NEW_CHAIN_IDENT.clone()), + }), + expected: Ok(vec![&TESTABLE_ADAPTER]), + }, + Case { + name: "adapter temporary failure with Ident unset", + chain_id: TEST_CHAIN_ID, + // UNTESTABLE_ADAPTER has failed ident, will be valid cause idents has None value + adapters: vec![(TEST_CHAIN_ID.into(), vec![UNTESTABLE_ADAPTER.clone()])], + validator: None, + expected: Err(ProviderManagerError::NoProvidersAvailable( + TEST_CHAIN_ID.into(), + )), + }, + Case { + name: "adapter temporary failure", + chain_id: TEST_CHAIN_ID, + adapters: vec![(TEST_CHAIN_ID.into(), vec![UNTESTABLE_ADAPTER.clone()])], + validator: None, + expected: Err(ProviderManagerError::NoProvidersAvailable( + TEST_CHAIN_ID.into(), + )), + }, + Case { + name: "wrong chain ident", + chain_id: TEST_CHAIN_ID, + adapters: vec![(TEST_CHAIN_ID.into(), vec![FAILED_ADAPTER.clone()])], + validator: Some(TestValidator { + check_result: Err(IdentValidatorError::ChangedNetVersion { + chain_id: TEST_CHAIN_ID.into(), + store_net_version: "".to_string(), + chain_net_version: "".to_string(), + }), + expected_new_ident: None, + }), + expected: Err(ProviderManagerError::AllProvidersFailed( + TEST_CHAIN_ID.into(), + )), + }, + Case { + name: "all adapters ok or not checkable yet", + chain_id: TEST_CHAIN_ID, + adapters: vec![( + TEST_CHAIN_ID.into(), + vec![VALID_ADAPTER.clone(), FAILED_ADAPTER.clone()], + )], + // if a check is performed (which it shouldn't) the test will fail + validator: Some(TestValidator { + check_result: Err(IdentValidatorError::ChangedNetVersion { + chain_id: TEST_CHAIN_ID.into(), + store_net_version: "".to_string(), + chain_net_version: "".to_string(), + }), + expected_new_ident: None, + }), + expected: Ok(vec![&VALID_ADAPTER]), + }, + Case { + name: "all adapters ok or checkable", + chain_id: TEST_CHAIN_ID, + adapters: vec![( + TEST_CHAIN_ID.into(), + vec![VALID_ADAPTER.clone(), TESTABLE_ADAPTER.clone()], + )], + validator: None, + expected: Ok(vec![&VALID_ADAPTER, &TESTABLE_ADAPTER]), + }, + ]; + + for case in cases.into_iter() { + let Case { + name, + chain_id, + adapters, + validator, + expected, + } = case; + + let logger = Logger::root(Discard, o!()); + let chain_id = chain_id.into(); + + let validator: Arc = match validator { + None => Arc::new(MockIdentValidator {}), + Some(validator) => Arc::new(validator), + }; + + let manager = ProviderManager::new(logger, adapters.clone().into_iter(), validator); + + for (_, adapters) in adapters.iter() { + for adapter in adapters.iter() { + let provider = adapter.provider.clone(); + let slot = manager + .inner + .status + .iter() + .find(|(ident, _)| ident.provider.eq(&provider)) + .expect(&format!( + "case: {} - there should be a status for provider \"{}\"", + name, provider + )); + let mut s = slot.1.write().await; + *s = adapter.status.clone(); + } + } + + let result = manager.get_all(&chain_id).await; + match (expected, result) { + (Ok(expected), Ok(result)) => assert_eq!( + expected, result, + "case {} failed. Result: {:?}", + name, result + ), + (Err(expected), Err(result)) => assert_eq!( + expected.to_string(), + result.to_string(), + "case {} failed. Result: {:?}", + name, + result + ), + (Ok(expected), Err(result)) => panic!( + "case {} failed. Result: {}, Expected: {:?}", + name, result, expected + ), + (Err(expected), Ok(result)) => panic!( + "case {} failed. Result: {:?}, Expected: {}", + name, result, expected + ), + } + } + } + + #[tokio::test] + async fn test_provider_manager_updates_on_unset() { + #[derive(Clone, Debug, Eq, PartialEq)] + struct MockAdapter {} + + #[async_trait] + impl NetIdentifiable for MockAdapter { + async fn net_identifiers(&self) -> Result { + Ok(NEW_CHAIN_IDENT.clone()) + } + fn provider_name(&self) -> ProviderName { + TEST_CHAIN_ID.into() + } + } + + struct TestValidator { + called: AtomicBool, + err: IdentValidatorError, + } + + impl IdentValidator for TestValidator { + fn check_ident( + &self, + _chain_id: &ChainId, + _ident: &ChainIdentifier, + ) -> Result<(), IdentValidatorError> { + Err(self.err.clone()) + } + + fn update_ident( + &self, + _chain_id: &ChainId, + ident: &ChainIdentifier, + ) -> Result<(), anyhow::Error> { + if NEW_CHAIN_IDENT.eq(ident) { + self.called.store(true, Ordering::SeqCst); + return Ok(()); + } + + unreachable!("unexpected call to update_ident ot unexpected ident passed"); + } + } + + let logger = Logger::root(Discard, o!()); + let chain_id = TEST_CHAIN_ID.into(); + + // Ensure the provider updates the chain ident when it wasn't set yet. + let validator = Arc::new(TestValidator { + called: AtomicBool::default(), + err: IdentValidatorError::UnsetIdent, + }); + let adapter = MockAdapter {}; + + let manager = ProviderManager::new( + logger, + vec![(TEST_CHAIN_ID.into(), vec![adapter.clone()])].into_iter(), + validator.clone(), + ); + + let mut result = manager.get_all(&chain_id).await.unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(&adapter, result.pop().unwrap()); + assert_eq!(validator.called.load(Ordering::SeqCst), true); + } +} diff --git a/graph/src/components/mod.rs b/graph/src/components/mod.rs index 71b2f143ceb..ad6480d1d0e 100644 --- a/graph/src/components/mod.rs +++ b/graph/src/components/mod.rs @@ -60,6 +60,8 @@ pub mod metrics; /// Components dealing with versioning pub mod versions; +pub mod adapter; + /// A component that receives events of type `T`. pub trait EventConsumer { /// Get the event sink. diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index 7c29b891fdf..ed80fca49f7 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -410,6 +410,11 @@ pub trait QueryStoreManager: Send + Sync + 'static { pub trait BlockStore: Send + Sync + 'static { type ChainStore: ChainStore; + fn create_chain_store( + &self, + network: &str, + ident: ChainIdentifier, + ) -> anyhow::Result>; fn chain_store(&self, network: &str) -> Option>; } @@ -536,7 +541,10 @@ pub trait ChainStore: Send + Sync + 'static { async fn clear_call_cache(&self, from: BlockNumber, to: BlockNumber) -> Result<(), Error>; /// Return the chain identifier for this store. - fn chain_identifier(&self) -> &ChainIdentifier; + fn chain_identifier(&self) -> Result; + + /// Update the chain identifier for this store. + fn set_chain_identifier(&self, ident: &ChainIdentifier) -> Result<(), Error>; } pub trait EthereumCallCache: Send + Sync + 'static { diff --git a/graph/src/endpoint.rs b/graph/src/endpoint.rs index bff6b0c53f9..82a69398446 100644 --- a/graph/src/endpoint.rs +++ b/graph/src/endpoint.rs @@ -9,22 +9,22 @@ use std::{ use prometheus::IntCounterVec; use slog::{warn, Logger}; -use crate::{components::metrics::MetricsRegistry, data::value::Word}; +use crate::{ + components::{adapter::ProviderName, metrics::MetricsRegistry}, + data::value::Word, +}; /// ProviderCount is the underlying structure to keep the count, /// we require that all the hosts are known ahead of time, this way we can /// avoid locking since we don't need to modify the entire struture. -type ProviderCount = Arc>; - -/// Provider represents label of the underlying endpoint. -pub type Provider = Word; +type ProviderCount = Arc>; /// This struct represents all the current labels except for the result /// which is added separately. If any new labels are necessary they should /// remain in the same order as added in [`EndpointMetrics::new`] #[derive(Clone)] pub struct RequestLabels { - pub provider: Provider, + pub provider: ProviderName, pub req_type: Word, pub conn_type: ConnectionType, } @@ -84,7 +84,7 @@ impl EndpointMetrics { let providers = Arc::new(HashMap::from_iter( providers .iter() - .map(|h| (Provider::from(h.as_ref()), AtomicU64::new(0))), + .map(|h| (ProviderName::from(h.as_ref()), AtomicU64::new(0))), )); let counter = registry @@ -114,7 +114,7 @@ impl EndpointMetrics { } #[cfg(debug_assertions)] - pub fn report_for_test(&self, provider: &Provider, success: bool) { + pub fn report_for_test(&self, provider: &ProviderName, success: bool) { match success { true => self.success(&RequestLabels { provider: provider.clone(), @@ -161,7 +161,7 @@ impl EndpointMetrics { /// Returns the current error count of a host or 0 if the host /// doesn't have a value on the map. - pub fn get_count(&self, provider: &Provider) -> u64 { + pub fn get_count(&self, provider: &ProviderName) -> u64 { self.providers .get(provider) .map(|c| c.load(Ordering::Relaxed)) @@ -177,12 +177,13 @@ mod test { use crate::{ components::metrics::MetricsRegistry, - endpoint::{EndpointMetrics, Provider}, + endpoint::{EndpointMetrics, ProviderName}, }; #[tokio::test] async fn should_increment_and_reset() { - let (a, b, c): (Provider, Provider, Provider) = ("a".into(), "b".into(), "c".into()); + let (a, b, c): (ProviderName, ProviderName, ProviderName) = + ("a".into(), "b".into(), "c".into()); let hosts: &[&str] = &[&a, &b, &c]; let logger = Logger::root(Discard, o!()); diff --git a/graph/src/firehose/endpoints.rs b/graph/src/firehose/endpoints.rs index 24c47d3990c..d4f0e13e448 100644 --- a/graph/src/firehose/endpoints.rs +++ b/graph/src/firehose/endpoints.rs @@ -1,830 +1,963 @@ -use crate::{ - blockchain::block_stream::FirehoseCursor, - blockchain::Block as BlockchainBlock, - blockchain::BlockPtr, - cheap_clone::CheapClone, - components::store::BlockNumber, - data::value::Word, - endpoint::{ConnectionType, EndpointMetrics, Provider, RequestLabels}, - env::ENV_VARS, - firehose::decode_firehose_block, - prelude::{anyhow, debug, info, DeploymentHash}, - substreams_rpc, -}; - -use crate::firehose::fetch_client::FetchClient; -use crate::firehose::interceptors::AuthInterceptor; -use futures03::StreamExt; -use http::uri::{Scheme, Uri}; -use itertools::Itertools; -use slog::Logger; -use std::{ - collections::{BTreeMap, HashMap}, - fmt::Display, - ops::ControlFlow, - sync::Arc, - time::Duration, -}; -use tonic::codegen::InterceptedService; -use tonic::{ - codegen::CompressionEncoding, - metadata::{Ascii, MetadataKey, MetadataValue}, - transport::{Channel, ClientTlsConfig}, - Request, -}; - -use super::{codec as firehose, interceptors::MetricsInterceptor, stream_client::StreamClient}; - -/// This is constant because we found this magic number of connections after -/// which the grpc connections start to hang. -/// For more details see: https://github.com/graphprotocol/graph-node/issues/3879 -pub const SUBGRAPHS_PER_CONN: usize = 100; - -const LOW_VALUE_THRESHOLD: usize = 10; -const LOW_VALUE_USED_PERCENTAGE: usize = 50; -const HIGH_VALUE_USED_PERCENTAGE: usize = 80; - -#[derive(Debug)] -pub struct FirehoseEndpoint { - pub provider: Provider, - pub auth: AuthInterceptor, - pub filters_enabled: bool, - pub compression_enabled: bool, - pub subgraph_limit: SubgraphLimit, - endpoint_metrics: Arc, - channel: Channel, -} - -#[derive(Debug)] -pub struct ConnectionHeaders(HashMap, MetadataValue>); - -impl ConnectionHeaders { - pub fn new() -> Self { - Self(HashMap::new()) - } - pub fn with_deployment(mut self, deployment: DeploymentHash) -> Self { - if let Ok(deployment) = deployment.parse() { - self.0 - .insert("x-deployment-id".parse().unwrap(), deployment); - } - self - } - pub fn add_to_request(&self, request: T) -> Request { - let mut request = Request::new(request); - self.0.iter().for_each(|(k, v)| { - request.metadata_mut().insert(k, v.clone()); - }); - request - } -} - -#[derive(Clone, Debug, PartialEq, Ord, Eq, PartialOrd)] -pub enum AvailableCapacity { - Unavailable, - Low, - High, -} - -// TODO: Find a new home for this type. -#[derive(Clone, Debug, PartialEq, Ord, Eq, PartialOrd)] -pub enum SubgraphLimit { - Disabled, - Limit(usize), - Unlimited, -} - -impl SubgraphLimit { - pub fn get_capacity(&self, current: usize) -> AvailableCapacity { - match self { - // Limit(0) should probably be Disabled but just in case - SubgraphLimit::Disabled | SubgraphLimit::Limit(0) => AvailableCapacity::Unavailable, - SubgraphLimit::Limit(total) => { - let total = *total; - if current >= total { - return AvailableCapacity::Unavailable; - } - - let used_percent = current * 100 / total; - - // If total is low it can vary very quickly so we can consider 50% as the low threshold - // to make selection more reliable - let threshold_percent = if total <= LOW_VALUE_THRESHOLD { - LOW_VALUE_USED_PERCENTAGE - } else { - HIGH_VALUE_USED_PERCENTAGE - }; - - if used_percent < threshold_percent { - return AvailableCapacity::High; - } - - AvailableCapacity::Low - } - _ => AvailableCapacity::High, - } - } - - pub fn has_capacity(&self, current: usize) -> bool { - match self { - SubgraphLimit::Unlimited => true, - SubgraphLimit::Limit(limit) => limit > ¤t, - SubgraphLimit::Disabled => false, - } - } -} - -impl Display for FirehoseEndpoint { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - Display::fmt(self.provider.as_str(), f) - } -} - -impl FirehoseEndpoint { - pub fn new>( - provider: S, - url: S, - token: Option, - key: Option, - filters_enabled: bool, - compression_enabled: bool, - subgraph_limit: SubgraphLimit, - endpoint_metrics: Arc, - ) -> Self { - let uri = url - .as_ref() - .parse::() - .expect("the url should have been validated by now, so it is a valid Uri"); - - let endpoint_builder = match uri.scheme().unwrap_or(&Scheme::HTTP).as_str() { - "http" => Channel::builder(uri), - "https" => Channel::builder(uri) - .tls_config(ClientTlsConfig::new()) - .expect("TLS config on this host is invalid"), - _ => panic!("invalid uri scheme for firehose endpoint"), - }; - - // These tokens come from the config so they have to be ascii. - let token: Option> = token - .map_or(Ok(None), |token| { - let bearer_token = format!("bearer {}", token); - bearer_token.parse::>().map(Some) - }) - .expect("Firehose token is invalid"); - - let key: Option> = key - .map_or(Ok(None), |key| { - key.parse::>().map(Some) - }) - .expect("Firehose key is invalid"); - - // Note on the connection window size: We run multiple block streams on a same connection, - // and a problematic subgraph with a stalled block stream might consume the entire window - // capacity for its http2 stream and never release it. If there are enough stalled block - // streams to consume all the capacity on the http2 connection, then _all_ subgraphs using - // this same http2 connection will stall. At a default stream window size of 2^16, setting - // the connection window size to the maximum of 2^31 allows for 2^15 streams without any - // contention, which is effectively unlimited for normal graph node operation. - // - // Note: Do not set `http2_keep_alive_interval` or `http2_adaptive_window`, as these will - // send ping frames, and many cloud load balancers will drop connections that frequently - // send pings. - let endpoint = endpoint_builder - .initial_connection_window_size(Some((1 << 31) - 1)) - .connect_timeout(Duration::from_secs(10)) - .tcp_keepalive(Some(Duration::from_secs(15))) - // Timeout on each request, so the timeout to estabilish each 'Blocks' stream. - .timeout(Duration::from_secs(120)); - - let subgraph_limit = match subgraph_limit { - // See the comment on the constant - SubgraphLimit::Unlimited => SubgraphLimit::Limit(SUBGRAPHS_PER_CONN), - // This is checked when parsing from config but doesn't hurt to be defensive. - SubgraphLimit::Limit(limit) => SubgraphLimit::Limit(limit.min(SUBGRAPHS_PER_CONN)), - l => l, - }; - - FirehoseEndpoint { - provider: provider.as_ref().into(), - channel: endpoint.connect_lazy(), - auth: AuthInterceptor { token, key }, - filters_enabled, - compression_enabled, - subgraph_limit, - endpoint_metrics, - } - } - - pub fn current_error_count(&self) -> u64 { - self.endpoint_metrics.get_count(&self.provider) - } - - // we need to -1 because there will always be a reference - // inside FirehoseEndpoints that is not used (is always cloned). - pub fn get_capacity(self: &Arc) -> AvailableCapacity { - self.subgraph_limit - .get_capacity(Arc::strong_count(self).saturating_sub(1)) - } - - fn new_client( - &self, - ) -> FetchClient< - InterceptedService, impl tonic::service::Interceptor>, - > { - let metrics = MetricsInterceptor { - metrics: self.endpoint_metrics.cheap_clone(), - service: self.channel.cheap_clone(), - labels: RequestLabels { - provider: self.provider.clone().into(), - req_type: "unknown".into(), - conn_type: ConnectionType::Firehose, - }, - }; - - let mut client: FetchClient< - InterceptedService, AuthInterceptor>, - > = FetchClient::with_interceptor(metrics, self.auth.clone()) - .accept_compressed(CompressionEncoding::Gzip); - - if self.compression_enabled { - client = client.send_compressed(CompressionEncoding::Gzip); - } - - client - } - - fn new_stream_client( - &self, - ) -> StreamClient< - InterceptedService, impl tonic::service::Interceptor>, - > { - let metrics = MetricsInterceptor { - metrics: self.endpoint_metrics.cheap_clone(), - service: self.channel.cheap_clone(), - labels: RequestLabels { - provider: self.provider.clone().into(), - req_type: "unknown".into(), - conn_type: ConnectionType::Firehose, - }, - }; - - let mut client = StreamClient::with_interceptor(metrics, self.auth.clone()) - .accept_compressed(CompressionEncoding::Gzip); - - if self.compression_enabled { - client = client.send_compressed(CompressionEncoding::Gzip); - } - - client = client - .max_decoding_message_size(1024 * 1024 * ENV_VARS.firehose_grpc_max_decode_size_mb); - - client - } - - fn new_substreams_client( - &self, - ) -> substreams_rpc::stream_client::StreamClient< - InterceptedService, impl tonic::service::Interceptor>, - > { - let metrics = MetricsInterceptor { - metrics: self.endpoint_metrics.cheap_clone(), - service: self.channel.cheap_clone(), - labels: RequestLabels { - provider: self.provider.clone().into(), - req_type: "unknown".into(), - conn_type: ConnectionType::Substreams, - }, - }; - - let mut client = substreams_rpc::stream_client::StreamClient::with_interceptor( - metrics, - self.auth.clone(), - ) - .accept_compressed(CompressionEncoding::Gzip); - - if self.compression_enabled { - client = client.send_compressed(CompressionEncoding::Gzip); - } - - client - } - - pub async fn get_block( - &self, - cursor: FirehoseCursor, - logger: &Logger, - ) -> Result - where - M: prost::Message + BlockchainBlock + Default + 'static, - { - debug!( - logger, - "Connecting to firehose to retrieve block for cursor {}", cursor; - "provider" => self.provider.as_str(), - ); - - let req = firehose::SingleBlockRequest { - transforms: [].to_vec(), - reference: Some(firehose::single_block_request::Reference::Cursor( - firehose::single_block_request::Cursor { - cursor: cursor.to_string(), - }, - )), - }; - - let mut client = self.new_client(); - match client.block(req).await { - Ok(v) => Ok(M::decode( - v.get_ref().block.as_ref().unwrap().value.as_ref(), - )?), - Err(e) => return Err(anyhow::format_err!("firehose error {}", e)), - } - } - - pub async fn genesis_block_ptr(&self, logger: &Logger) -> Result - where - M: prost::Message + BlockchainBlock + Default + 'static, - { - info!(logger, "Requesting genesis block from firehose"; - "provider" => self.provider.as_str()); - - // We use 0 here to mean the genesis block of the chain. Firehose - // when seeing start block number 0 will always return the genesis - // block of the chain, even if the chain's start block number is - // not starting at block #0. - self.block_ptr_for_number::(logger, 0).await - } - - pub async fn block_ptr_for_number( - &self, - logger: &Logger, - number: BlockNumber, - ) -> Result - where - M: prost::Message + BlockchainBlock + Default + 'static, - { - debug!( - logger, - "Connecting to firehose to retrieve block for number {}", number; - "provider" => self.provider.as_str(), - ); - - let mut client = self.new_stream_client(); - - // The trick is the following. - // - // Firehose `start_block_num` and `stop_block_num` are both inclusive, so we specify - // the block we are looking for in both. - // - // Now, the remaining question is how the block from the canonical chain is picked. We - // leverage the fact that Firehose will always send the block in the longuest chain as the - // last message of this request. - // - // That way, we either get the final block if the block is now in a final segment of the - // chain (or probabilisticly if not finality concept exists for the chain). Or we get the - // block that is in the longuest chain according to Firehose. - let response_stream = client - .blocks(firehose::Request { - start_block_num: number as i64, - stop_block_num: number as u64, - final_blocks_only: false, - ..Default::default() - }) - .await?; - - let mut block_stream = response_stream.into_inner(); - - debug!(logger, "Retrieving block(s) from firehose"; - "provider" => self.provider.as_str()); - - let mut latest_received_block: Option = None; - while let Some(message) = block_stream.next().await { - match message { - Ok(v) => { - let block = decode_firehose_block::(&v)?.ptr(); - - match latest_received_block { - None => { - latest_received_block = Some(block); - } - Some(ref actual_ptr) => { - // We want to receive all events related to a specific block number, - // however, in some circumstances, it seems Firehose would not stop sending - // blocks (`start_block_num: 0 and stop_block_num: 0` on NEAR seems to trigger - // this). - // - // To prevent looping infinitely, we stop as soon as a new received block's - // number is higher than the latest received block's number, in which case it - // means it's an event for a block we are not interested in. - if block.number > actual_ptr.number { - break; - } - - latest_received_block = Some(block); - } - } - } - Err(e) => return Err(anyhow::format_err!("firehose error {}", e)), - }; - } - - match latest_received_block { - Some(block_ptr) => Ok(block_ptr), - None => Err(anyhow::format_err!( - "Firehose should have returned at least one block for request" - )), - } - } - - pub async fn stream_blocks( - self: Arc, - request: firehose::Request, - headers: &ConnectionHeaders, - ) -> Result, anyhow::Error> { - let mut client = self.new_stream_client(); - let request = headers.add_to_request(request); - let response_stream = client.blocks(request).await?; - let block_stream = response_stream.into_inner(); - - Ok(block_stream) - } - - pub async fn substreams( - self: Arc, - request: substreams_rpc::Request, - headers: &ConnectionHeaders, - ) -> Result, anyhow::Error> { - let mut client = self.new_substreams_client(); - let request = headers.add_to_request(request); - let response_stream = client.blocks(request).await?; - let block_stream = response_stream.into_inner(); - - Ok(block_stream) - } -} - -#[derive(Clone, Debug)] -pub struct FirehoseEndpoints(Vec>); - -impl FirehoseEndpoints { - pub fn new() -> Self { - Self(vec![]) - } - - pub fn len(&self) -> usize { - self.0.len() - } - - /// This function will attempt to grab an endpoint based on the Lowest error count - // with high capacity available. If an adapter cannot be found `endpoint` will - // return an error. - pub fn endpoint(&self) -> anyhow::Result> { - let endpoint = self - .0 - .iter() - .sorted_by_key(|x| x.current_error_count()) - .try_fold(None, |acc, adapter| { - match adapter.get_capacity() { - AvailableCapacity::Unavailable => ControlFlow::Continue(acc), - AvailableCapacity::Low => match acc { - Some(_) => ControlFlow::Continue(acc), - None => ControlFlow::Continue(Some(adapter)), - }, - // This means that if all adapters with low/no errors are low capacity - // we will retry the high capacity that has errors, at this point - // any other available with no errors are almost at their limit. - AvailableCapacity::High => ControlFlow::Break(Some(adapter)), - } - }); - - match endpoint { - ControlFlow::Continue(adapter) - | ControlFlow::Break(adapter) => - adapter.cloned().ok_or(anyhow!("unable to get a connection, increase the firehose conn_pool_size or limit for the node")) - } - } - - pub fn remove(&mut self, provider: &str) { - self.0 - .retain(|network_endpoint| network_endpoint.provider.as_str() != provider); - } -} - -impl From>> for FirehoseEndpoints { - fn from(val: Vec>) -> Self { - FirehoseEndpoints(val) - } -} - -#[derive(Clone, Debug)] -pub struct FirehoseNetworks { - /// networks contains a map from chain id (`near-mainnet`, `near-testnet`, `solana-mainnet`, etc.) - /// to a list of FirehoseEndpoint (type wrapper around `Arc>`). - pub networks: BTreeMap, -} - -impl FirehoseNetworks { - pub fn new() -> FirehoseNetworks { - FirehoseNetworks { - networks: BTreeMap::new(), - } - } - - pub fn insert(&mut self, chain_id: String, endpoint: Arc) { - let endpoints = self - .networks - .entry(chain_id) - .or_insert_with(FirehoseEndpoints::new); - - endpoints.0.push(endpoint); - } - - pub fn remove(&mut self, chain_id: &str, provider: &str) { - if let Some(endpoints) = self.networks.get_mut(chain_id) { - endpoints.remove(provider); - } - } - - /// Returns a `HashMap` where the key is the chain's id and the key is an endpoint for this chain. - /// There can be multiple keys with the same chain id but with different - /// endpoint where multiple providers exist for a single chain id. Providers with the same - /// label do not need to be tested individually, if one is working, every other endpoint in the - /// pool should also work. - pub fn flatten(&self) -> HashMap<(String, Word), Arc> { - self.networks - .iter() - .flat_map(|(chain_id, firehose_endpoints)| { - firehose_endpoints.0.iter().map(move |endpoint| { - ( - (chain_id.clone(), endpoint.provider.clone()), - endpoint.clone(), - ) - }) - }) - .collect() - } -} - -#[cfg(test)] -mod test { - use std::{mem, sync::Arc}; - - use slog::{o, Discard, Logger}; - - use crate::{ - components::metrics::MetricsRegistry, endpoint::EndpointMetrics, firehose::SubgraphLimit, - }; - - use super::{AvailableCapacity, FirehoseEndpoint, FirehoseEndpoints, SUBGRAPHS_PER_CONN}; - - #[tokio::test] - async fn firehose_endpoint_errors() { - let endpoint = vec![Arc::new(FirehoseEndpoint::new( - String::new(), - "http://127.0.0.1".to_string(), - None, - None, - false, - false, - SubgraphLimit::Unlimited, - Arc::new(EndpointMetrics::mock()), - ))]; - - let mut endpoints = FirehoseEndpoints::from(endpoint); - - let mut keep = vec![]; - for _i in 0..SUBGRAPHS_PER_CONN { - keep.push(endpoints.endpoint().unwrap()); - } - - let err = endpoints.endpoint().unwrap_err(); - assert!(err.to_string().contains("conn_pool_size")); - - mem::drop(keep); - endpoints.endpoint().unwrap(); - - // Fails when empty too - endpoints.remove(""); - - let err = endpoints.endpoint().unwrap_err(); - assert!(err.to_string().contains("unable to get a connection")); - } - - #[tokio::test] - async fn firehose_endpoint_with_limit() { - let endpoint = vec![Arc::new(FirehoseEndpoint::new( - String::new(), - "http://127.0.0.1".to_string(), - None, - None, - false, - false, - SubgraphLimit::Limit(2), - Arc::new(EndpointMetrics::mock()), - ))]; - - let mut endpoints = FirehoseEndpoints::from(endpoint); - - let mut keep = vec![]; - for _ in 0..2 { - keep.push(endpoints.endpoint().unwrap()); - } - - let err = endpoints.endpoint().unwrap_err(); - assert!(err.to_string().contains("conn_pool_size")); - - mem::drop(keep); - endpoints.endpoint().unwrap(); - - // Fails when empty too - endpoints.remove(""); - - let err = endpoints.endpoint().unwrap_err(); - assert!(err.to_string().contains("unable to get a connection")); - } - - #[tokio::test] - async fn firehose_endpoint_no_traffic() { - let endpoint = vec![Arc::new(FirehoseEndpoint::new( - String::new(), - "http://127.0.0.1".to_string(), - None, - None, - false, - false, - SubgraphLimit::Disabled, - Arc::new(EndpointMetrics::mock()), - ))]; - - let mut endpoints = FirehoseEndpoints::from(endpoint); - - let err = endpoints.endpoint().unwrap_err(); - assert!(err.to_string().contains("conn_pool_size")); - - // Fails when empty too - endpoints.remove(""); - - let err = endpoints.endpoint().unwrap_err(); - assert!(err.to_string().contains("unable to get a connection")); - } - - #[tokio::test] - async fn firehose_endpoint_selection() { - let logger = Logger::root(Discard, o!()); - let endpoint_metrics = Arc::new(EndpointMetrics::new( - logger, - &["high_error", "low availability", "high availability"], - Arc::new(MetricsRegistry::mock()), - )); - - let high_error_adapter1 = Arc::new(FirehoseEndpoint::new( - "high_error".to_string(), - "http://127.0.0.1".to_string(), - None, - None, - false, - false, - SubgraphLimit::Unlimited, - endpoint_metrics.clone(), - )); - let high_error_adapter2 = Arc::new(FirehoseEndpoint::new( - "high_error".to_string(), - "http://127.0.0.1".to_string(), - None, - None, - false, - false, - SubgraphLimit::Unlimited, - endpoint_metrics.clone(), - )); - let low_availability = Arc::new(FirehoseEndpoint::new( - "low availability".to_string(), - "http://127.0.0.2".to_string(), - None, - None, - false, - false, - SubgraphLimit::Limit(2), - endpoint_metrics.clone(), - )); - let high_availability = Arc::new(FirehoseEndpoint::new( - "high availability".to_string(), - "http://127.0.0.3".to_string(), - None, - None, - false, - false, - SubgraphLimit::Unlimited, - endpoint_metrics.clone(), - )); - - endpoint_metrics.report_for_test(&high_error_adapter1.provider, false); - - let mut endpoints = FirehoseEndpoints::from(vec![ - high_error_adapter1.clone(), - high_error_adapter2, - low_availability.clone(), - high_availability.clone(), - ]); - - let res = endpoints.endpoint().unwrap(); - assert_eq!(res.provider, high_availability.provider); - - // Removing high availability without errors should fallback to low availability - endpoints.remove(&high_availability.provider); - - // Ensure we're in a low capacity situation - assert_eq!(low_availability.get_capacity(), AvailableCapacity::Low); - - // In the scenario where the only high level adapter has errors we keep trying that - // because the others will be low or unavailable - let res = endpoints.endpoint().unwrap(); - // This will match both high error adapters - assert_eq!(res.provider, high_error_adapter1.provider); - } - - #[test] - fn subgraph_limit_calculates_availability() { - #[derive(Debug)] - struct Case { - limit: SubgraphLimit, - current: usize, - capacity: AvailableCapacity, - } - - let cases = vec![ - Case { - limit: SubgraphLimit::Disabled, - current: 20, - capacity: AvailableCapacity::Unavailable, - }, - Case { - limit: SubgraphLimit::Limit(0), - current: 20, - capacity: AvailableCapacity::Unavailable, - }, - Case { - limit: SubgraphLimit::Limit(0), - current: 0, - capacity: AvailableCapacity::Unavailable, - }, - Case { - limit: SubgraphLimit::Limit(100), - current: 80, - capacity: AvailableCapacity::Low, - }, - Case { - limit: SubgraphLimit::Limit(2), - current: 1, - capacity: AvailableCapacity::Low, - }, - Case { - limit: SubgraphLimit::Limit(100), - current: 19, - capacity: AvailableCapacity::High, - }, - Case { - limit: SubgraphLimit::Limit(100), - current: 100, - capacity: AvailableCapacity::Unavailable, - }, - Case { - limit: SubgraphLimit::Limit(100), - current: 99, - capacity: AvailableCapacity::Low, - }, - Case { - limit: SubgraphLimit::Limit(100), - current: 101, - capacity: AvailableCapacity::Unavailable, - }, - Case { - limit: SubgraphLimit::Unlimited, - current: 1000, - capacity: AvailableCapacity::High, - }, - Case { - limit: SubgraphLimit::Unlimited, - current: 0, - capacity: AvailableCapacity::High, - }, - ]; - - for c in cases { - let res = c.limit.get_capacity(c.current); - assert_eq!(res, c.capacity, "{:#?}", c); - } - } - - #[test] - fn available_capacity_ordering() { - assert_eq!( - AvailableCapacity::Unavailable < AvailableCapacity::Low, - true - ); - assert_eq!( - AvailableCapacity::Unavailable < AvailableCapacity::High, - true - ); - assert_eq!(AvailableCapacity::Low < AvailableCapacity::High, true); - } -} +use crate::{ + bail, + blockchain::{ + block_stream::FirehoseCursor, Block as BlockchainBlock, BlockHash, BlockPtr, + ChainIdentifier, + }, + cheap_clone::CheapClone, + components::{ + adapter::{ChainId, NetIdentifiable, ProviderManager, ProviderName}, + store::BlockNumber, + }, + data::value::Word, + endpoint::{ConnectionType, EndpointMetrics, RequestLabels}, + env::ENV_VARS, + firehose::decode_firehose_block, + prelude::{anyhow, debug, info, DeploymentHash}, + substreams::Package, + substreams_rpc::{self, response, BlockScopedData, Response}, +}; + +use crate::firehose::fetch_client::FetchClient; +use crate::firehose::interceptors::AuthInterceptor; +use async_trait::async_trait; +use futures03::StreamExt; +use http::uri::{Scheme, Uri}; +use itertools::Itertools; +use prost::Message; +use slog::Logger; +use std::{ + collections::HashMap, fmt::Display, marker::PhantomData, ops::ControlFlow, str::FromStr, + sync::Arc, time::Duration, +}; +use tonic::codegen::InterceptedService; +use tonic::{ + codegen::CompressionEncoding, + metadata::{Ascii, MetadataKey, MetadataValue}, + transport::{Channel, ClientTlsConfig}, + Request, +}; + +use super::{codec as firehose, interceptors::MetricsInterceptor, stream_client::StreamClient}; + +/// This is constant because we found this magic number of connections after +/// which the grpc connections start to hang. +/// For more details see: https://github.com/graphprotocol/graph-node/issues/3879 +pub const SUBGRAPHS_PER_CONN: usize = 100; + +/// Substreams does not provide a simpler way to get the chain identity so we use this package +/// to obtain the genesis hash. +const SUBSTREAMS_HEAD_TRACKER_BYTES: &[u8; 89935] = include_bytes!( + "../../../substreams/substreams-head-tracker/substreams-head-tracker-v1.0.0.spkg" +); + +const LOW_VALUE_THRESHOLD: usize = 10; +const LOW_VALUE_USED_PERCENTAGE: usize = 50; +const HIGH_VALUE_USED_PERCENTAGE: usize = 80; + +/// Firehose endpoints do not currently provide a chain agnostic way of getting the genesis block. +/// In order to get the genesis hash the block needs to be decoded and the graph crate has no +/// knowledge of specific chains so this abstracts the chain details from the FirehoseEndpoint. +#[async_trait] +pub trait GenesisDecoder: std::fmt::Debug + Sync + Send { + async fn get_genesis_block_ptr( + &self, + endpoint: &Arc, + ) -> Result; + fn box_clone(&self) -> Box; +} + +#[derive(Debug, Clone)] +pub struct FirehoseGenesisDecoder { + pub logger: Logger, + phantom: PhantomData, +} + +impl FirehoseGenesisDecoder { + pub fn new(logger: Logger) -> Box { + Box::new(Self { + logger, + phantom: PhantomData, + }) + } +} + +#[async_trait] +impl GenesisDecoder + for FirehoseGenesisDecoder +{ + async fn get_genesis_block_ptr( + &self, + endpoint: &Arc, + ) -> Result { + endpoint.genesis_block_ptr::(&self.logger).await + } + + fn box_clone(&self) -> Box { + Box::new(Self { + logger: self.logger.cheap_clone(), + phantom: PhantomData, + }) + } +} + +#[derive(Debug, Clone)] +pub struct SubstreamsGenesisDecoder {} + +#[async_trait] +impl GenesisDecoder for SubstreamsGenesisDecoder { + async fn get_genesis_block_ptr( + &self, + endpoint: &Arc, + ) -> Result { + let package = Package::decode(SUBSTREAMS_HEAD_TRACKER_BYTES.to_vec().as_ref()).unwrap(); + let headers = ConnectionHeaders::new(); + let endpoint = endpoint.cheap_clone(); + + let mut stream = endpoint + .substreams( + substreams_rpc::Request { + start_block_num: 0, + start_cursor: "".to_string(), + stop_block_num: 1, + final_blocks_only: true, + production_mode: false, + output_module: "map_blocks".to_string(), + modules: package.modules, + debug_initial_store_snapshot_for_modules: vec![], + }, + &headers, + ) + .await?; + + tokio::time::timeout(Duration::from_secs(30), async move { + loop { + let rsp = stream.next().await; + + match rsp { + Some(Ok(Response { message })) => match message { + Some(response::Message::BlockScopedData(BlockScopedData { + clock, .. + })) if clock.is_some() => { + // unwrap: the match guard ensures this is safe. + let clock = clock.unwrap(); + return Ok(BlockPtr { + number: clock.number.try_into()?, + hash: BlockHash::from_str(&clock.id)?, + }); + } + // most other messages are related to the protocol itself or debugging which are + // not relevant for this use case. + Some(_) => continue, + // No idea when this would happen + None => continue, + }, + Some(Err(status)) => bail!("unable to get genesis block, status: {}", status), + None => bail!("unable to get genesis block, stream ended"), + } + } + }) + .await + .map_err(|_| anyhow!("unable to get genesis block, timed out."))? + } + + fn box_clone(&self) -> Box { + Box::new(Self {}) + } +} + +#[derive(Debug, Clone)] +pub struct NoopGenesisDecoder; + +impl NoopGenesisDecoder { + pub fn boxed() -> Box { + Box::new(Self {}) + } +} + +#[async_trait] +impl GenesisDecoder for NoopGenesisDecoder { + async fn get_genesis_block_ptr( + &self, + _endpoint: &Arc, + ) -> Result { + Ok(BlockPtr { + hash: BlockHash::zero(), + number: 0, + }) + } + + fn box_clone(&self) -> Box { + Box::new(Self {}) + } +} + +#[derive(Debug)] +pub struct FirehoseEndpoint { + pub provider: ProviderName, + pub auth: AuthInterceptor, + pub filters_enabled: bool, + pub compression_enabled: bool, + pub subgraph_limit: SubgraphLimit, + genesis_decoder: Box, + endpoint_metrics: Arc, + channel: Channel, +} + +#[derive(Debug)] +pub struct ConnectionHeaders(HashMap, MetadataValue>); + +#[async_trait] +impl NetIdentifiable for Arc { + async fn net_identifiers(&self) -> Result { + let ptr: BlockPtr = self.genesis_decoder.get_genesis_block_ptr(self).await?; + + Ok(ChainIdentifier { + net_version: "0".to_string(), + genesis_block_hash: ptr.hash, + }) + } + fn provider_name(&self) -> ProviderName { + self.provider.clone() + } +} + +impl ConnectionHeaders { + pub fn new() -> Self { + Self(HashMap::new()) + } + pub fn with_deployment(mut self, deployment: DeploymentHash) -> Self { + if let Ok(deployment) = deployment.parse() { + self.0 + .insert("x-deployment-id".parse().unwrap(), deployment); + } + self + } + pub fn add_to_request(&self, request: T) -> Request { + let mut request = Request::new(request); + self.0.iter().for_each(|(k, v)| { + request.metadata_mut().insert(k, v.clone()); + }); + request + } +} + +#[derive(Clone, Debug, PartialEq, Ord, Eq, PartialOrd)] +pub enum AvailableCapacity { + Unavailable, + Low, + High, +} + +// TODO: Find a new home for this type. +#[derive(Clone, Debug, PartialEq, Ord, Eq, PartialOrd)] +pub enum SubgraphLimit { + Disabled, + Limit(usize), + Unlimited, +} + +impl SubgraphLimit { + pub fn get_capacity(&self, current: usize) -> AvailableCapacity { + match self { + // Limit(0) should probably be Disabled but just in case + SubgraphLimit::Disabled | SubgraphLimit::Limit(0) => AvailableCapacity::Unavailable, + SubgraphLimit::Limit(total) => { + let total = *total; + if current >= total { + return AvailableCapacity::Unavailable; + } + + let used_percent = current * 100 / total; + + // If total is low it can vary very quickly so we can consider 50% as the low threshold + // to make selection more reliable + let threshold_percent = if total <= LOW_VALUE_THRESHOLD { + LOW_VALUE_USED_PERCENTAGE + } else { + HIGH_VALUE_USED_PERCENTAGE + }; + + if used_percent < threshold_percent { + return AvailableCapacity::High; + } + + AvailableCapacity::Low + } + _ => AvailableCapacity::High, + } + } + + pub fn has_capacity(&self, current: usize) -> bool { + match self { + SubgraphLimit::Unlimited => true, + SubgraphLimit::Limit(limit) => limit > ¤t, + SubgraphLimit::Disabled => false, + } + } +} + +impl Display for FirehoseEndpoint { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + Display::fmt(self.provider.as_str(), f) + } +} + +impl FirehoseEndpoint { + pub fn new>( + provider: S, + url: S, + token: Option, + key: Option, + filters_enabled: bool, + compression_enabled: bool, + subgraph_limit: SubgraphLimit, + endpoint_metrics: Arc, + genesis_decoder: Box, + ) -> Self { + let uri = url + .as_ref() + .parse::() + .expect("the url should have been validated by now, so it is a valid Uri"); + + let endpoint_builder = match uri.scheme().unwrap_or(&Scheme::HTTP).as_str() { + "http" => Channel::builder(uri), + "https" => Channel::builder(uri) + .tls_config(ClientTlsConfig::new()) + .expect("TLS config on this host is invalid"), + _ => panic!("invalid uri scheme for firehose endpoint"), + }; + + // These tokens come from the config so they have to be ascii. + let token: Option> = token + .map_or(Ok(None), |token| { + let bearer_token = format!("bearer {}", token); + bearer_token.parse::>().map(Some) + }) + .expect("Firehose token is invalid"); + + let key: Option> = key + .map_or(Ok(None), |key| { + key.parse::>().map(Some) + }) + .expect("Firehose key is invalid"); + + // Note on the connection window size: We run multiple block streams on a same connection, + // and a problematic subgraph with a stalled block stream might consume the entire window + // capacity for its http2 stream and never release it. If there are enough stalled block + // streams to consume all the capacity on the http2 connection, then _all_ subgraphs using + // this same http2 connection will stall. At a default stream window size of 2^16, setting + // the connection window size to the maximum of 2^31 allows for 2^15 streams without any + // contention, which is effectively unlimited for normal graph node operation. + // + // Note: Do not set `http2_keep_alive_interval` or `http2_adaptive_window`, as these will + // send ping frames, and many cloud load balancers will drop connections that frequently + // send pings. + let endpoint = endpoint_builder + .initial_connection_window_size(Some((1 << 31) - 1)) + .connect_timeout(Duration::from_secs(10)) + .tcp_keepalive(Some(Duration::from_secs(15))) + // Timeout on each request, so the timeout to estabilish each 'Blocks' stream. + .timeout(Duration::from_secs(120)); + + let subgraph_limit = match subgraph_limit { + // See the comment on the constant + SubgraphLimit::Unlimited => SubgraphLimit::Limit(SUBGRAPHS_PER_CONN), + // This is checked when parsing from config but doesn't hurt to be defensive. + SubgraphLimit::Limit(limit) => SubgraphLimit::Limit(limit.min(SUBGRAPHS_PER_CONN)), + l => l, + }; + + FirehoseEndpoint { + provider: provider.as_ref().into(), + channel: endpoint.connect_lazy(), + auth: AuthInterceptor { token, key }, + filters_enabled, + compression_enabled, + subgraph_limit, + endpoint_metrics, + genesis_decoder, + } + } + + pub fn current_error_count(&self) -> u64 { + self.endpoint_metrics.get_count(&self.provider) + } + + // we need to -1 because there will always be a reference + // inside FirehoseEndpoints that is not used (is always cloned). + pub fn get_capacity(self: &Arc) -> AvailableCapacity { + self.subgraph_limit + .get_capacity(Arc::strong_count(self).saturating_sub(1)) + } + + fn new_client( + &self, + ) -> FetchClient< + InterceptedService, impl tonic::service::Interceptor>, + > { + let metrics = MetricsInterceptor { + metrics: self.endpoint_metrics.cheap_clone(), + service: self.channel.cheap_clone(), + labels: RequestLabels { + provider: self.provider.clone().into(), + req_type: "unknown".into(), + conn_type: ConnectionType::Firehose, + }, + }; + + let mut client: FetchClient< + InterceptedService, AuthInterceptor>, + > = FetchClient::with_interceptor(metrics, self.auth.clone()) + .accept_compressed(CompressionEncoding::Gzip); + + if self.compression_enabled { + client = client.send_compressed(CompressionEncoding::Gzip); + } + + client + } + + fn new_stream_client( + &self, + ) -> StreamClient< + InterceptedService, impl tonic::service::Interceptor>, + > { + let metrics = MetricsInterceptor { + metrics: self.endpoint_metrics.cheap_clone(), + service: self.channel.cheap_clone(), + labels: RequestLabels { + provider: self.provider.clone().into(), + req_type: "unknown".into(), + conn_type: ConnectionType::Firehose, + }, + }; + + let mut client = StreamClient::with_interceptor(metrics, self.auth.clone()) + .accept_compressed(CompressionEncoding::Gzip); + + if self.compression_enabled { + client = client.send_compressed(CompressionEncoding::Gzip); + } + client = client + .max_decoding_message_size(1024 * 1024 * ENV_VARS.firehose_grpc_max_decode_size_mb); + + client + } + + fn new_substreams_client( + &self, + ) -> substreams_rpc::stream_client::StreamClient< + InterceptedService, impl tonic::service::Interceptor>, + > { + let metrics = MetricsInterceptor { + metrics: self.endpoint_metrics.cheap_clone(), + service: self.channel.cheap_clone(), + labels: RequestLabels { + provider: self.provider.clone().into(), + req_type: "unknown".into(), + conn_type: ConnectionType::Substreams, + }, + }; + + let mut client = substreams_rpc::stream_client::StreamClient::with_interceptor( + metrics, + self.auth.clone(), + ) + .accept_compressed(CompressionEncoding::Gzip); + + if self.compression_enabled { + client = client.send_compressed(CompressionEncoding::Gzip); + } + + client + } + + pub async fn get_block( + &self, + cursor: FirehoseCursor, + logger: &Logger, + ) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + debug!( + logger, + "Connecting to firehose to retrieve block for cursor {}", cursor; + "provider" => self.provider.as_str(), + ); + + let req = firehose::SingleBlockRequest { + transforms: [].to_vec(), + reference: Some(firehose::single_block_request::Reference::Cursor( + firehose::single_block_request::Cursor { + cursor: cursor.to_string(), + }, + )), + }; + + let mut client = self.new_client(); + match client.block(req).await { + Ok(v) => Ok(M::decode( + v.get_ref().block.as_ref().unwrap().value.as_ref(), + )?), + Err(e) => return Err(anyhow::format_err!("firehose error {}", e)), + } + } + + pub async fn genesis_block_ptr(&self, logger: &Logger) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + info!(logger, "Requesting genesis block from firehose"; + "provider" => self.provider.as_str()); + + // We use 0 here to mean the genesis block of the chain. Firehose + // when seeing start block number 0 will always return the genesis + // block of the chain, even if the chain's start block number is + // not starting at block #0. + self.block_ptr_for_number::(logger, 0).await + } + + pub async fn block_ptr_for_number( + &self, + logger: &Logger, + number: BlockNumber, + ) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + debug!( + logger, + "Connecting to firehose to retrieve block for number {}", number; + "provider" => self.provider.as_str(), + ); + + let mut client = self.new_stream_client(); + + // The trick is the following. + // + // Firehose `start_block_num` and `stop_block_num` are both inclusive, so we specify + // the block we are looking for in both. + // + // Now, the remaining question is how the block from the canonical chain is picked. We + // leverage the fact that Firehose will always send the block in the longuest chain as the + // last message of this request. + // + // That way, we either get the final block if the block is now in a final segment of the + // chain (or probabilisticly if not finality concept exists for the chain). Or we get the + // block that is in the longuest chain according to Firehose. + let response_stream = client + .blocks(firehose::Request { + start_block_num: number as i64, + stop_block_num: number as u64, + final_blocks_only: false, + ..Default::default() + }) + .await?; + + let mut block_stream = response_stream.into_inner(); + + debug!(logger, "Retrieving block(s) from firehose"; + "provider" => self.provider.as_str()); + + let mut latest_received_block: Option = None; + while let Some(message) = block_stream.next().await { + match message { + Ok(v) => { + let block = decode_firehose_block::(&v)?.ptr(); + + match latest_received_block { + None => { + latest_received_block = Some(block); + } + Some(ref actual_ptr) => { + // We want to receive all events related to a specific block number, + // however, in some circumstances, it seems Firehose would not stop sending + // blocks (`start_block_num: 0 and stop_block_num: 0` on NEAR seems to trigger + // this). + // + // To prevent looping infinitely, we stop as soon as a new received block's + // number is higher than the latest received block's number, in which case it + // means it's an event for a block we are not interested in. + if block.number > actual_ptr.number { + break; + } + + latest_received_block = Some(block); + } + } + } + Err(e) => return Err(anyhow::format_err!("firehose error {}", e)), + }; + } + + match latest_received_block { + Some(block_ptr) => Ok(block_ptr), + None => Err(anyhow::format_err!( + "Firehose should have returned at least one block for request" + )), + } + } + + pub async fn stream_blocks( + self: Arc, + request: firehose::Request, + headers: &ConnectionHeaders, + ) -> Result, anyhow::Error> { + let mut client = self.new_stream_client(); + let request = headers.add_to_request(request); + let response_stream = client.blocks(request).await?; + let block_stream = response_stream.into_inner(); + + Ok(block_stream) + } + + pub async fn substreams( + self: Arc, + request: substreams_rpc::Request, + headers: &ConnectionHeaders, + ) -> Result, anyhow::Error> { + let mut client = self.new_substreams_client(); + let request = headers.add_to_request(request); + let response_stream = client.blocks(request).await?; + let block_stream = response_stream.into_inner(); + + Ok(block_stream) + } +} + +#[derive(Clone, Debug, Default)] +pub struct FirehoseEndpoints(ChainId, ProviderManager>); + +impl FirehoseEndpoints { + pub fn for_testing(adapters: Vec>) -> Self { + use slog::{o, Discard}; + + use crate::components::adapter::MockIdentValidator; + let chain_id: Word = "testing".into(); + + Self( + chain_id.clone(), + ProviderManager::new( + Logger::root(Discard, o!()), + vec![(chain_id, adapters)].into_iter(), + Arc::new(MockIdentValidator), + ), + ) + } + + pub fn new( + chain_id: ChainId, + provider_manager: ProviderManager>, + ) -> Self { + Self(chain_id, provider_manager) + } + + pub fn len(&self) -> usize { + self.1.len(&self.0) + } + + /// This function will attempt to grab an endpoint based on the Lowest error count + // with high capacity available. If an adapter cannot be found `endpoint` will + // return an error. + pub async fn endpoint(&self) -> anyhow::Result> { + let endpoint = self + .1 + .get_all(&self.0) + .await? + .into_iter() + .sorted_by_key(|x| x.current_error_count()) + .try_fold(None, |acc, adapter| { + match adapter.get_capacity() { + AvailableCapacity::Unavailable => ControlFlow::Continue(acc), + AvailableCapacity::Low => match acc { + Some(_) => ControlFlow::Continue(acc), + None => ControlFlow::Continue(Some(adapter)), + }, + // This means that if all adapters with low/no errors are low capacity + // we will retry the high capacity that has errors, at this point + // any other available with no errors are almost at their limit. + AvailableCapacity::High => ControlFlow::Break(Some(adapter)), + } + }); + + match endpoint { + ControlFlow::Continue(adapter) + | ControlFlow::Break(adapter) => + adapter.cloned().ok_or(anyhow!("unable to get a connection, increase the firehose conn_pool_size or limit for the node")) + } + } +} + +#[cfg(test)] +mod test { + use std::{mem, sync::Arc}; + + use slog::{o, Discard, Logger}; + + use crate::{ + components::{adapter::NetIdentifiable, metrics::MetricsRegistry}, + endpoint::EndpointMetrics, + firehose::{NoopGenesisDecoder, SubgraphLimit}, + }; + + use super::{AvailableCapacity, FirehoseEndpoint, FirehoseEndpoints, SUBGRAPHS_PER_CONN}; + + #[tokio::test] + async fn firehose_endpoint_errors() { + let endpoint = vec![Arc::new(FirehoseEndpoint::new( + String::new(), + "http://127.0.0.1".to_string(), + None, + None, + false, + false, + SubgraphLimit::Unlimited, + Arc::new(EndpointMetrics::mock()), + NoopGenesisDecoder::boxed(), + ))]; + + let endpoints = FirehoseEndpoints::for_testing(endpoint); + + let mut keep = vec![]; + for _i in 0..SUBGRAPHS_PER_CONN { + keep.push(endpoints.endpoint().await.unwrap()); + } + + let err = endpoints.endpoint().await.unwrap_err(); + assert!(err.to_string().contains("conn_pool_size")); + + mem::drop(keep); + endpoints.endpoint().await.unwrap(); + + let endpoints = FirehoseEndpoints::for_testing(vec![]); + + let err = endpoints.endpoint().await.unwrap_err(); + assert!(err.to_string().contains("unable to get a connection")); + } + + #[tokio::test] + async fn firehose_endpoint_with_limit() { + let endpoint = vec![Arc::new(FirehoseEndpoint::new( + String::new(), + "http://127.0.0.1".to_string(), + None, + None, + false, + false, + SubgraphLimit::Limit(2), + Arc::new(EndpointMetrics::mock()), + NoopGenesisDecoder::boxed(), + ))]; + + let endpoints = FirehoseEndpoints::for_testing(endpoint); + + let mut keep = vec![]; + for _ in 0..2 { + keep.push(endpoints.endpoint().await.unwrap()); + } + + let err = endpoints.endpoint().await.unwrap_err(); + assert!(err.to_string().contains("conn_pool_size")); + + mem::drop(keep); + endpoints.endpoint().await.unwrap(); + } + + #[tokio::test] + async fn firehose_endpoint_no_traffic() { + let endpoint = vec![Arc::new(FirehoseEndpoint::new( + String::new(), + "http://127.0.0.1".to_string(), + None, + None, + false, + false, + SubgraphLimit::Disabled, + Arc::new(EndpointMetrics::mock()), + NoopGenesisDecoder::boxed(), + ))]; + + let endpoints = FirehoseEndpoints::for_testing(endpoint); + + let err = endpoints.endpoint().await.unwrap_err(); + assert!(err.to_string().contains("conn_pool_size")); + } + + #[tokio::test] + async fn firehose_endpoint_selection() { + let logger = Logger::root(Discard, o!()); + let endpoint_metrics = Arc::new(EndpointMetrics::new( + logger, + &["high_error", "low availability", "high availability"], + Arc::new(MetricsRegistry::mock()), + )); + + let high_error_adapter1 = Arc::new(FirehoseEndpoint::new( + "high_error".to_string(), + "http://127.0.0.1".to_string(), + None, + None, + false, + false, + SubgraphLimit::Unlimited, + endpoint_metrics.clone(), + NoopGenesisDecoder::boxed(), + )); + let high_error_adapter2 = Arc::new(FirehoseEndpoint::new( + "high_error".to_string(), + "http://127.0.0.1".to_string(), + None, + None, + false, + false, + SubgraphLimit::Unlimited, + endpoint_metrics.clone(), + NoopGenesisDecoder::boxed(), + )); + let low_availability = Arc::new(FirehoseEndpoint::new( + "low availability".to_string(), + "http://127.0.0.2".to_string(), + None, + None, + false, + false, + SubgraphLimit::Limit(2), + endpoint_metrics.clone(), + NoopGenesisDecoder::boxed(), + )); + let high_availability = Arc::new(FirehoseEndpoint::new( + "high availability".to_string(), + "http://127.0.0.3".to_string(), + None, + None, + false, + false, + SubgraphLimit::Unlimited, + endpoint_metrics.clone(), + NoopGenesisDecoder::boxed(), + )); + + endpoint_metrics.report_for_test(&high_error_adapter1.provider, false); + + let endpoints = FirehoseEndpoints::for_testing(vec![ + high_error_adapter1.clone(), + high_error_adapter2.clone(), + low_availability.clone(), + high_availability.clone(), + ]); + + let res = endpoints.endpoint().await.unwrap(); + assert_eq!(res.provider, high_availability.provider); + mem::drop(endpoints); + + // Removing high availability without errors should fallback to low availability + let endpoints = FirehoseEndpoints::for_testing( + vec![ + high_error_adapter1.clone(), + high_error_adapter2, + low_availability.clone(), + high_availability.clone(), + ] + .into_iter() + .filter(|a| a.provider_name() != high_availability.provider) + .collect(), + ); + + // Ensure we're in a low capacity situation + assert_eq!(low_availability.get_capacity(), AvailableCapacity::Low); + + // In the scenario where the only high level adapter has errors we keep trying that + // because the others will be low or unavailable + let res = endpoints.endpoint().await.unwrap(); + // This will match both high error adapters + assert_eq!(res.provider, high_error_adapter1.provider); + } + + #[test] + fn subgraph_limit_calculates_availability() { + #[derive(Debug)] + struct Case { + limit: SubgraphLimit, + current: usize, + capacity: AvailableCapacity, + } + + let cases = vec![ + Case { + limit: SubgraphLimit::Disabled, + current: 20, + capacity: AvailableCapacity::Unavailable, + }, + Case { + limit: SubgraphLimit::Limit(0), + current: 20, + capacity: AvailableCapacity::Unavailable, + }, + Case { + limit: SubgraphLimit::Limit(0), + current: 0, + capacity: AvailableCapacity::Unavailable, + }, + Case { + limit: SubgraphLimit::Limit(100), + current: 80, + capacity: AvailableCapacity::Low, + }, + Case { + limit: SubgraphLimit::Limit(2), + current: 1, + capacity: AvailableCapacity::Low, + }, + Case { + limit: SubgraphLimit::Limit(100), + current: 19, + capacity: AvailableCapacity::High, + }, + Case { + limit: SubgraphLimit::Limit(100), + current: 100, + capacity: AvailableCapacity::Unavailable, + }, + Case { + limit: SubgraphLimit::Limit(100), + current: 99, + capacity: AvailableCapacity::Low, + }, + Case { + limit: SubgraphLimit::Limit(100), + current: 101, + capacity: AvailableCapacity::Unavailable, + }, + Case { + limit: SubgraphLimit::Unlimited, + current: 1000, + capacity: AvailableCapacity::High, + }, + Case { + limit: SubgraphLimit::Unlimited, + current: 0, + capacity: AvailableCapacity::High, + }, + ]; + + for c in cases { + let res = c.limit.get_capacity(c.current); + assert_eq!(res, c.capacity, "{:#?}", c); + } + } + + #[test] + fn available_capacity_ordering() { + assert_eq!( + AvailableCapacity::Unavailable < AvailableCapacity::Low, + true + ); + assert_eq!( + AvailableCapacity::Unavailable < AvailableCapacity::High, + true + ); + assert_eq!(AvailableCapacity::Low < AvailableCapacity::High, true); + } +} diff --git a/graph/src/lib.rs b/graph/src/lib.rs index 1ee333fa64b..c6ebac6bd37 100644 --- a/graph/src/lib.rs +++ b/graph/src/lib.rs @@ -112,6 +112,7 @@ pub mod prelude { pub use crate::blockchain::{BlockHash, BlockPtr}; + pub use crate::components::adapter; pub use crate::components::ethereum::{ EthereumBlock, EthereumBlockWithCalls, EthereumCall, LightEthereumBlock, LightEthereumBlockExt, diff --git a/graph/src/task_spawn.rs b/graph/src/task_spawn.rs index c323d6d85a4..09055ad5381 100644 --- a/graph/src/task_spawn.rs +++ b/graph/src/task_spawn.rs @@ -51,6 +51,7 @@ pub fn spawn_blocking_allow_panic( } /// Runs the future on the current thread. Panics if not within a tokio runtime. +#[track_caller] pub fn block_on(f: impl Future03) -> T { tokio::runtime::Handle::current().block_on(f) } diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index 02922a9ea12..4a3696e79c4 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -2,6 +2,7 @@ use clap::{Parser, Subcommand}; use config::PoolSize; use git_testament::{git_testament, render_testament}; use graph::bail; +use graph::cheap_clone::CheapClone; use graph::endpoint::EndpointMetrics; use graph::env::ENV_VARS; use graph::log::logger_with_levels; @@ -14,13 +15,13 @@ use graph::{ }, url::Url, }; -use graph_chain_ethereum::{EthereumAdapter, EthereumNetworks}; +use graph_chain_ethereum::EthereumAdapter; use graph_graphql::prelude::GraphQlRunner; use graph_node::config::{self, Config as Cfg}; use graph_node::manager::color::Terminal; use graph_node::manager::commands; +use graph_node::network_setup::Networks; use graph_node::{ - chain::create_all_ethereum_networks, manager::{deployment::DeploymentSearch, PanicSubscriptionManager}, store_builder::StoreBuilder, MetricsContext, @@ -32,7 +33,6 @@ use graph_store_postgres::{ SubscriptionManager, PRIMARY_SHARD, }; use lazy_static::lazy_static; -use std::collections::BTreeMap; use std::{collections::HashMap, num::ParseIntError, sync::Arc, time::Duration}; const VERSION_LABEL_KEY: &str = "version"; @@ -910,7 +910,7 @@ impl Context { (primary_pool, mgr) } - fn store(self) -> Arc { + fn store(&self) -> Arc { let (store, _) = self.store_and_pools(); store } @@ -931,12 +931,12 @@ impl Context { .await } - fn store_and_pools(self) -> (Arc, HashMap) { + fn store_and_pools(&self) -> (Arc, HashMap) { let (subgraph_store, pools, _) = StoreBuilder::make_subgraph_store_and_pools( &self.logger, &self.node_id, &self.config, - self.fork_base, + self.fork_base.clone(), self.registry.clone(), ); @@ -949,8 +949,8 @@ impl Context { pools.clone(), subgraph_store, HashMap::default(), - BTreeMap::new(), - self.registry, + Vec::new(), + self.registry.cheap_clone(), ); (store, pools) @@ -987,11 +987,11 @@ impl Context { )) } - async fn ethereum_networks(&self) -> anyhow::Result { + async fn networks(&self, block_store: Arc) -> anyhow::Result { let logger = self.logger.clone(); let registry = self.metrics_registry(); let metrics = Arc::new(EndpointMetrics::mock()); - create_all_ethereum_networks(logger, registry, &self.config, metrics).await + Networks::from_config(logger, &self.config, registry, metrics, block_store).await } fn chain_store(self, chain_name: &str) -> anyhow::Result> { @@ -1006,12 +1006,13 @@ impl Context { self, chain_name: &str, ) -> anyhow::Result<(Arc, Arc)> { - let ethereum_networks = self.ethereum_networks().await?; + let block_store = self.store().block_store(); + let networks = self.networks(block_store).await?; let chain_store = self.chain_store(chain_name)?; - let ethereum_adapter = ethereum_networks - .networks - .get(chain_name) - .and_then(|adapters| adapters.cheapest()) + let ethereum_adapter = networks + .ethereum_rpcs(chain_name.into()) + .cheapest() + .await .ok_or(anyhow::anyhow!( "Failed to obtain an Ethereum adapter for chain '{}'", chain_name diff --git a/node/src/chain.rs b/node/src/chain.rs index 6b95e564797..b6247d9a78a 100644 --- a/node/src/chain.rs +++ b/node/src/chain.rs @@ -1,23 +1,44 @@ use crate::config::{Config, ProviderDetails}; -use ethereum::{EthereumNetworks, ProviderEthRpcMetrics}; -use graph::anyhow::{bail, Error}; -use graph::blockchain::{Block as BlockchainBlock, BlockchainKind, ChainIdentifier}; +use crate::network_setup::{ + AdapterConfiguration, EthAdapterConfig, FirehoseAdapterConfig, Networks, +}; +use ethereum::chain::{ + EthereumAdapterSelector, EthereumBlockRefetcher, EthereumRuntimeAdapterBuilder, + EthereumStreamBuilder, +}; +use ethereum::network::EthereumNetworkAdapter; +use ethereum::ProviderEthRpcMetrics; +use graph::anyhow::bail; +use graph::blockchain::client::ChainClient; +use graph::blockchain::{ + BasicBlockchainBuilder, Blockchain as _, BlockchainBuilder as _, BlockchainKind, BlockchainMap, + ChainIdentifier, +}; use graph::cheap_clone::CheapClone; +use graph::components::adapter::ChainId; +use graph::components::store::{BlockStore as _, ChainStore}; +use graph::data::store::NodeId; use graph::endpoint::EndpointMetrics; -use graph::firehose::{FirehoseEndpoint, FirehoseNetworks, SubgraphLimit}; -use graph::futures03::future::{join_all, try_join_all}; +use graph::env::{EnvVars, ENV_VARS}; +use graph::firehose::{ + FirehoseEndpoint, FirehoseGenesisDecoder, GenesisDecoder, SubgraphLimit, + SubstreamsGenesisDecoder, +}; +use graph::futures03::future::try_join_all; use graph::futures03::TryFutureExt; use graph::ipfs_client::IpfsClient; -use graph::prelude::{anyhow, tokio}; -use graph::prelude::{prost, MetricsRegistry}; +use graph::itertools::Itertools; +use graph::log::factory::LoggerFactory; +use graph::prelude::anyhow; +use graph::prelude::MetricsRegistry; use graph::slog::{debug, error, info, o, Logger}; use graph::url::Url; -use graph::util::futures::retry; use graph::util::security::SafeDisplay; -use graph_chain_ethereum::{self as ethereum, EthereumAdapterTrait, Transport}; -use std::collections::{btree_map, BTreeMap}; +use graph_chain_ethereum::{self as ethereum, Transport}; +use graph_store_postgres::{BlockStore, ChainHeadUpdateListener}; +use std::cmp::Ordering; +use std::collections::BTreeMap; use std::sync::Arc; -use std::time::Duration; // The status of a provider that we learned from connecting to it #[derive(PartialEq)] @@ -32,11 +53,6 @@ pub enum ProviderNetworkStatus { }, } -/// How long we will hold up node startup to get the net version and genesis -/// hash from the client. If we can't get it within that time, we'll try and -/// continue regardless. -const NET_VERSION_WAIT_TIME: Duration = Duration::from_secs(30); - pub fn create_ipfs_clients(logger: &Logger, ipfs_addresses: &Vec) -> Vec { // Parse the IPFS URL from the `--ipfs` command line argument let ipfs_addresses: Vec<_> = ipfs_addresses @@ -108,7 +124,7 @@ pub fn create_substreams_networks( logger: Logger, config: &Config, endpoint_metrics: Arc, -) -> BTreeMap { +) -> Vec { debug!( logger, "Creating firehose networks [{} chains, ingestor {}]", @@ -116,50 +132,60 @@ pub fn create_substreams_networks( config.chains.ingestor, ); - let mut networks_by_kind = BTreeMap::new(); + let mut networks_by_kind: BTreeMap<(BlockchainKind, ChainId), Vec>> = + BTreeMap::new(); for (name, chain) in &config.chains.chains { + let name: ChainId = name.as_str().into(); for provider in &chain.providers { if let ProviderDetails::Substreams(ref firehose) = provider.details { info!( logger, - "Configuring firehose endpoint"; + "Configuring substreams endpoint"; "provider" => &provider.label, + "network" => &name.to_string(), ); let parsed_networks = networks_by_kind - .entry(chain.protocol) - .or_insert_with(FirehoseNetworks::new); + .entry((chain.protocol, name.clone())) + .or_insert_with(Vec::new); for _ in 0..firehose.conn_pool_size { - parsed_networks.insert( - name.to_string(), - Arc::new(FirehoseEndpoint::new( - // This label needs to be the original label so that the metrics - // can be deduped. - &provider.label, - &firehose.url, - firehose.token.clone(), - firehose.key.clone(), - firehose.filters_enabled(), - firehose.compression_enabled(), - SubgraphLimit::Unlimited, - endpoint_metrics.clone(), - )), - ); + parsed_networks.push(Arc::new(FirehoseEndpoint::new( + // This label needs to be the original label so that the metrics + // can be deduped. + &provider.label, + &firehose.url, + firehose.token.clone(), + firehose.key.clone(), + firehose.filters_enabled(), + firehose.compression_enabled(), + SubgraphLimit::Unlimited, + endpoint_metrics.clone(), + Box::new(SubstreamsGenesisDecoder {}), + ))); } } } } networks_by_kind + .into_iter() + .map(|((kind, chain_id), endpoints)| { + AdapterConfiguration::Substreams(FirehoseAdapterConfig { + chain_id, + kind, + adapters: endpoints.into(), + }) + }) + .collect() } pub fn create_firehose_networks( logger: Logger, config: &Config, endpoint_metrics: Arc, -) -> BTreeMap { +) -> Vec { debug!( logger, "Creating firehose networks [{} chains, ingestor {}]", @@ -167,20 +193,45 @@ pub fn create_firehose_networks( config.chains.ingestor, ); - let mut networks_by_kind = BTreeMap::new(); + let mut networks_by_kind: BTreeMap<(BlockchainKind, ChainId), Vec>> = + BTreeMap::new(); for (name, chain) in &config.chains.chains { + let name: ChainId = name.as_str().into(); for provider in &chain.providers { + let logger = logger.cheap_clone(); if let ProviderDetails::Firehose(ref firehose) = provider.details { info!( - logger, + &logger, "Configuring firehose endpoint"; "provider" => &provider.label, + "network" => &name.to_string(), ); let parsed_networks = networks_by_kind - .entry(chain.protocol) - .or_insert_with(FirehoseNetworks::new); + .entry((chain.protocol, name.clone())) + .or_insert_with(Vec::new); + + let decoder: Box = match chain.protocol { + BlockchainKind::Arweave => { + FirehoseGenesisDecoder::::new(logger) + } + BlockchainKind::Ethereum => { + FirehoseGenesisDecoder::::new(logger) + } + BlockchainKind::Near => { + FirehoseGenesisDecoder::::new(logger) + } + BlockchainKind::Cosmos => { + FirehoseGenesisDecoder::::new(logger) + } + BlockchainKind::Substreams => { + unreachable!("Substreams configuration should not be handled here"); + } + BlockchainKind::Starknet => { + FirehoseGenesisDecoder::::new(logger) + } + }; // Create n FirehoseEndpoints where n is the size of the pool. If a // subgraph limit is defined for this endpoint then each endpoint @@ -189,240 +240,34 @@ pub fn create_firehose_networks( // of FirehoseEndpoint and each of those instance can be used in 2 different // SubgraphInstances. for _ in 0..firehose.conn_pool_size { - parsed_networks.insert( - name.to_string(), - Arc::new(FirehoseEndpoint::new( - // This label needs to be the original label so that the metrics - // can be deduped. - &provider.label, - &firehose.url, - firehose.token.clone(), - firehose.key.clone(), - firehose.filters_enabled(), - firehose.compression_enabled(), - firehose.limit_for(&config.node), - endpoint_metrics.cheap_clone(), - )), - ); + parsed_networks.push(Arc::new(FirehoseEndpoint::new( + // This label needs to be the original label so that the metrics + // can be deduped. + &provider.label, + &firehose.url, + firehose.token.clone(), + firehose.key.clone(), + firehose.filters_enabled(), + firehose.compression_enabled(), + firehose.limit_for(&config.node), + endpoint_metrics.cheap_clone(), + decoder.box_clone(), + ))); } } } } networks_by_kind -} - -/// Try to connect to all the providers in `eth_networks` and get their net -/// version and genesis block. Return the same `eth_networks` and the -/// retrieved net identifiers grouped by network name. Remove all providers -/// for which trying to connect resulted in an error from the returned -/// `EthereumNetworks`, since it's likely pointless to try and connect to -/// them. If the connection attempt to a provider times out after -/// `NET_VERSION_WAIT_TIME`, keep the provider, but don't report a -/// version for it. -pub async fn connect_ethereum_networks( - logger: &Logger, - mut eth_networks: EthereumNetworks, -) -> Result<(EthereumNetworks, BTreeMap), anyhow::Error> { - // This has one entry for each provider, and therefore multiple entries - // for each network - let statuses = join_all( - eth_networks - .flatten() - .into_iter() - .map(|(network_name, capabilities, eth_adapter)| { - (network_name, capabilities, eth_adapter, logger.clone()) + .into_iter() + .map(|((kind, chain_id), endpoints)| { + AdapterConfiguration::Firehose(FirehoseAdapterConfig { + chain_id, + kind, + adapters: endpoints.into(), }) - .map(|(network, capabilities, eth_adapter, logger)| async move { - let logger = logger.new(o!("provider" => eth_adapter.provider().to_string())); - info!( - logger, "Connecting to Ethereum to get network identifier"; - "capabilities" => &capabilities - ); - match tokio::time::timeout(NET_VERSION_WAIT_TIME, eth_adapter.net_identifiers()) - .await - .map_err(Error::from) - { - // An `Err` means a timeout, an `Ok(Err)` means some other error (maybe a typo - // on the URL) - Ok(Err(e)) | Err(e) => { - error!(logger, "Connection to provider failed. Not using this provider"; - "error" => e.to_string()); - ProviderNetworkStatus::Broken { - chain_id: network, - provider: eth_adapter.provider().to_string(), - } - } - Ok(Ok(ident)) => { - info!( - logger, - "Connected to Ethereum"; - "network_version" => &ident.net_version, - "capabilities" => &capabilities - ); - ProviderNetworkStatus::Version { - chain_id: network, - ident, - } - } - } - }), - ) - .await; - - // Group identifiers by network name - let idents: BTreeMap = - statuses - .into_iter() - .try_fold(BTreeMap::new(), |mut networks, status| { - match status { - ProviderNetworkStatus::Broken { - chain_id: network, - provider, - } => eth_networks.remove(&network, &provider), - ProviderNetworkStatus::Version { - chain_id: network, - ident, - } => match networks.entry(network.clone()) { - btree_map::Entry::Vacant(entry) => { - entry.insert(ident); - } - btree_map::Entry::Occupied(entry) => { - if &ident != entry.get() { - return Err(anyhow!( - "conflicting network identifiers for chain {}: `{}` != `{}`", - network, - ident, - entry.get() - )); - } - } - }, - } - Ok(networks) - })?; - Ok((eth_networks, idents)) -} - -/// Try to connect to all the providers in `firehose_networks` and get their net -/// version and genesis block. Return the same `eth_networks` and the -/// retrieved net identifiers grouped by network name. Remove all providers -/// for which trying to connect resulted in an error from the returned -/// `EthereumNetworks`, since it's likely pointless to try and connect to -/// them. If the connection attempt to a provider times out after -/// `NET_VERSION_WAIT_TIME`, keep the provider, but don't report a -/// version for it. -pub async fn connect_firehose_networks( - logger: &Logger, - mut firehose_networks: FirehoseNetworks, -) -> Result<(FirehoseNetworks, BTreeMap), Error> -where - M: prost::Message + BlockchainBlock + Default + 'static, -{ - // This has one entry for each provider, and therefore multiple entries - // for each network - let statuses = join_all( - firehose_networks - .flatten() - .into_iter() - .map(|(chain_id, endpoint)| (chain_id, endpoint, logger.clone())) - .map(|((chain_id, _), endpoint, logger)| async move { - let logger = logger.new(o!("provider" => endpoint.provider.to_string())); - info!( - logger, "Connecting to Firehose to get chain identifier"; - "provider" => &endpoint.provider.to_string(), - ); - - let retry_endpoint = endpoint.clone(); - let retry_logger = logger.clone(); - let req = retry("firehose startup connection test", &logger) - .no_limit() - .no_timeout() - .run(move || { - let retry_endpoint = retry_endpoint.clone(); - let retry_logger = retry_logger.clone(); - async move { retry_endpoint.genesis_block_ptr::(&retry_logger).await } - }); - - match tokio::time::timeout(NET_VERSION_WAIT_TIME, req) - .await - .map_err(Error::from) - { - // An `Err` means a timeout, an `Ok(Err)` means some other error (maybe a typo - // on the URL) - Ok(Err(e)) | Err(e) => { - error!(logger, "Connection to provider failed. Not using this provider"; - "error" => format!("{:#}", e)); - ProviderNetworkStatus::Broken { - chain_id, - provider: endpoint.provider.to_string(), - } - } - Ok(Ok(ptr)) => { - info!( - logger, - "Connected to Firehose"; - "provider" => &endpoint.provider.to_string(), - "genesis_block" => format_args!("{}", &ptr), - ); - - // BUG: Firehose doesn't provide the net_version. - // See also: firehose-no-net-version - let ident = ChainIdentifier { - net_version: "0".to_string(), - genesis_block_hash: ptr.hash, - }; - - ProviderNetworkStatus::Version { chain_id, ident } - } - } - }), - ) - .await; - - // Group identifiers by chain id - let idents: BTreeMap = - statuses - .into_iter() - .try_fold(BTreeMap::new(), |mut networks, status| { - match status { - ProviderNetworkStatus::Broken { chain_id, provider } => { - firehose_networks.remove(&chain_id, &provider) - } - ProviderNetworkStatus::Version { chain_id, ident } => { - match networks.entry(chain_id.clone()) { - btree_map::Entry::Vacant(entry) => { - entry.insert(ident); - } - btree_map::Entry::Occupied(entry) => { - if &ident != entry.get() { - return Err(anyhow!( - "conflicting network identifiers for chain {}: `{}` != `{}`", - chain_id, - ident, - entry.get() - )); - } - } - } - } - } - Ok(networks) - })?; - - // Clean-up chains with 0 provider - firehose_networks.networks.retain(|chain_id, endpoints| { - if endpoints.len() == 0 { - error!( - logger, - "No non-broken providers available for chain {}; ignoring this chain", chain_id - ); - } - - endpoints.len() > 0 - }); - - Ok((firehose_networks, idents)) + }) + .collect() } /// Parses all Ethereum connection strings and returns their network names and @@ -432,7 +277,7 @@ pub async fn create_all_ethereum_networks( registry: Arc, config: &Config, endpoint_metrics: Arc, -) -> anyhow::Result { +) -> anyhow::Result> { let eth_rpc_metrics = Arc::new(ProviderEthRpcMetrics::new(registry)); let eth_networks_futures = config .chains @@ -449,14 +294,7 @@ pub async fn create_all_ethereum_networks( ) }); - Ok(try_join_all(eth_networks_futures) - .await? - .into_iter() - .reduce(|mut a, b| { - a.extend(b); - a - }) - .unwrap_or_else(|| EthereumNetworks::new(endpoint_metrics))) + Ok(try_join_all(eth_networks_futures).await?) } /// Parses a single Ethereum connection string and returns its network name and `EthereumAdapter`. @@ -466,20 +304,21 @@ pub async fn create_ethereum_networks_for_chain( config: &Config, network_name: &str, endpoint_metrics: Arc, -) -> anyhow::Result { - let mut parsed_networks = EthereumNetworks::new(endpoint_metrics.cheap_clone()); +) -> anyhow::Result { let chain = config .chains .chains .get(network_name) .ok_or_else(|| anyhow!("unknown network {}", network_name))?; + let mut adapters = vec![]; + let mut call_only_adapters = vec![]; for provider in &chain.providers { let (web3, call_only) = match &provider.details { ProviderDetails::Web3Call(web3) => (web3, true), ProviderDetails::Web3(web3) => (web3, false), _ => { - parsed_networks.insert_empty(network_name.to_string()); + // parsed_networks.insert_empty(network_name.to_string()); continue; } }; @@ -511,9 +350,8 @@ pub async fn create_ethereum_networks_for_chain( }; let supports_eip_1898 = !web3.features.contains("no_eip1898"); - - parsed_networks.insert( - network_name.to_string(), + let adapter = EthereumNetworkAdapter::new( + endpoint_metrics.cheap_clone(), capabilities, Arc::new( graph_chain_ethereum::EthereumAdapter::new( @@ -528,20 +366,272 @@ pub async fn create_ethereum_networks_for_chain( ), web3.limit_for(&config.node), ); + + if call_only { + call_only_adapters.push(adapter); + } else { + adapters.push(adapter); + } } - parsed_networks.sort(); - Ok(parsed_networks) + adapters.sort_by(|a, b| { + a.capabilities + .partial_cmp(&b.capabilities) + // We can't define a total ordering over node capabilities, + // so incomparable items are considered equal and end up + // near each other. + .unwrap_or(Ordering::Equal) + }); + + Ok(AdapterConfiguration::Rpc(EthAdapterConfig { + chain_id: network_name.into(), + adapters, + call_only: call_only_adapters, + polling_interval: Some(chain.polling_interval), + })) +} + +pub async fn networks_as_chains( + config: &Arc, + blockchain_map: &mut BlockchainMap, + node_id: &NodeId, + logger: &Logger, + networks: &Networks, + store: Arc, + logger_factory: &LoggerFactory, + metrics_registry: Arc, + chain_head_update_listener: Arc, +) { + let adapters = networks + .adapters + .iter() + .chunk_by(|a| a.chain_id()) + .into_iter() + .map(|(chain_id, adapters)| (chain_id, adapters.into_iter().collect_vec())) + .collect_vec(); + + let substreams: Vec<&FirehoseAdapterConfig> = networks + .adapters + .iter() + .flat_map(|a| a.as_substreams()) + .collect(); + + let chains = adapters.into_iter().map(|(chain_id, adapters)| { + let adapters: Vec<&AdapterConfiguration> = adapters.into_iter().collect(); + let kind = adapters + .iter() + .map(|a| a.kind()) + .reduce(|a1, a2| match (a1, a2) { + (BlockchainKind::Substreams, k) => k, + (k, BlockchainKind::Substreams) => k, + (k, _) => k, + }) + .expect("validation should have checked we have at least one provider"); + (chain_id, adapters, kind) + }); + for (chain_id, adapters, kind) in chains.into_iter() { + let chain_store = match store.chain_store(chain_id) { + Some(c) => c, + None => { + let ident = networks + .chain_identifier(&logger, chain_id) + .await + .expect("must be able to get chain identity to create a store"); + store + .create_chain_store(chain_id, ident) + .expect("must be able to create store if one is not yet setup for the chain") + } + }; + + match kind { + BlockchainKind::Arweave => { + let firehose_endpoints = networks.firehose_endpoints(chain_id.clone()); + + blockchain_map.insert::( + chain_id.clone(), + Arc::new( + BasicBlockchainBuilder { + logger_factory: logger_factory.clone(), + name: chain_id.clone(), + chain_store, + firehose_endpoints, + metrics_registry: metrics_registry.clone(), + } + .build(config) + .await, + ), + ); + } + BlockchainKind::Ethereum => { + // polling interval is set per chain so if set all adapter configuration will have + // the same value. + let polling_interval = adapters + .first() + .and_then(|a| a.as_rpc().and_then(|a| a.polling_interval)) + .unwrap_or(config.ingestor_polling_interval); + + let firehose_endpoints = networks.firehose_endpoints(chain_id.clone()); + let eth_adapters = networks.ethereum_rpcs(chain_id.clone()); + + let cc = if firehose_endpoints.len() > 0 { + ChainClient::::new_firehose(firehose_endpoints) + } else { + ChainClient::::new_rpc(eth_adapters.clone()) + }; + + let client = Arc::new(cc); + let adapter_selector = EthereumAdapterSelector::new( + logger_factory.clone(), + client.clone(), + metrics_registry.clone(), + chain_store.clone(), + ); + + let call_cache = chain_store.cheap_clone(); + + let chain = ethereum::Chain::new( + logger_factory.clone(), + chain_id.clone(), + node_id.clone(), + metrics_registry.clone(), + chain_store.cheap_clone(), + call_cache, + client, + chain_head_update_listener.clone(), + Arc::new(EthereumStreamBuilder {}), + Arc::new(EthereumBlockRefetcher {}), + Arc::new(adapter_selector), + Arc::new(EthereumRuntimeAdapterBuilder {}), + Arc::new(eth_adapters.clone()), + ENV_VARS.reorg_threshold, + polling_interval, + true, + ); + + blockchain_map + .insert::(chain_id.clone(), Arc::new(chain)); + } + BlockchainKind::Near => { + let firehose_endpoints = networks.firehose_endpoints(chain_id.clone()); + blockchain_map.insert::( + chain_id.clone(), + Arc::new( + BasicBlockchainBuilder { + logger_factory: logger_factory.clone(), + name: chain_id.clone(), + chain_store, + firehose_endpoints, + metrics_registry: metrics_registry.clone(), + } + .build(config) + .await, + ), + ); + } + BlockchainKind::Cosmos => { + let firehose_endpoints = networks.firehose_endpoints(chain_id.clone()); + blockchain_map.insert::( + chain_id.clone(), + Arc::new( + BasicBlockchainBuilder { + logger_factory: logger_factory.clone(), + name: chain_id.clone(), + chain_store, + firehose_endpoints, + metrics_registry: metrics_registry.clone(), + } + .build(config) + .await, + ), + ); + } + BlockchainKind::Starknet => { + let firehose_endpoints = networks.firehose_endpoints(chain_id.clone()); + blockchain_map.insert::( + chain_id.clone(), + Arc::new( + BasicBlockchainBuilder { + logger_factory: logger_factory.clone(), + name: chain_id.clone(), + chain_store, + firehose_endpoints, + metrics_registry: metrics_registry.clone(), + } + .build(config) + .await, + ), + ); + } + BlockchainKind::Substreams => {} + } + } + + fn chain_store( + blockchain_map: &BlockchainMap, + kind: &BlockchainKind, + network: ChainId, + ) -> anyhow::Result> { + let chain_store: Arc = match kind { + BlockchainKind::Arweave => blockchain_map + .get::(network) + .map(|c| c.chain_store())?, + BlockchainKind::Ethereum => blockchain_map + .get::(network) + .map(|c| c.chain_store())?, + BlockchainKind::Near => blockchain_map + .get::(network) + .map(|c| c.chain_store())?, + BlockchainKind::Cosmos => blockchain_map + .get::(network) + .map(|c| c.chain_store())?, + BlockchainKind::Substreams => blockchain_map + .get::(network) + .map(|c| c.chain_store())?, + BlockchainKind::Starknet => blockchain_map + .get::(network) + .map(|c| c.chain_store())?, + }; + + Ok(chain_store) + } + + for FirehoseAdapterConfig { + chain_id, + kind, + adapters: _, + } in substreams.iter() + { + let chain_store = chain_store(&blockchain_map, kind, chain_id.clone()).expect(&format!( + "{} requires an rpc or firehose endpoint defined", + chain_id + )); + let substreams_endpoints = networks.substreams_endpoints(chain_id.clone()); + + blockchain_map.insert::( + chain_id.clone(), + Arc::new( + BasicBlockchainBuilder { + logger_factory: logger_factory.clone(), + name: chain_id.clone(), + chain_store, + firehose_endpoints: substreams_endpoints, + metrics_registry: metrics_registry.clone(), + } + .build(config) + .await, + ), + ); + } } #[cfg(test)] mod test { - use crate::chain::create_all_ethereum_networks; use crate::config::{Config, Opt}; + use crate::network_setup::{AdapterConfiguration, Networks}; + use graph::components::adapter::{ChainId, MockIdentValidator}; use graph::endpoint::EndpointMetrics; use graph::log::logger; use graph::prelude::{tokio, MetricsRegistry}; - use graph::prometheus::Registry; use graph_chain_ethereum::NodeCapabilities; use std::sync::Arc; @@ -570,17 +660,18 @@ mod test { let metrics = Arc::new(EndpointMetrics::mock()); let config = Config::load(&logger, &opt).expect("can create config"); - let prometheus_registry = Arc::new(Registry::new()); - let metrics_registry = Arc::new(MetricsRegistry::new( - logger.clone(), - prometheus_registry.clone(), - )); + let metrics_registry = Arc::new(MetricsRegistry::mock()); + let ident_validator = Arc::new(MockIdentValidator); - let ethereum_networks = - create_all_ethereum_networks(logger, metrics_registry, &config, metrics) + let networks = + Networks::from_config(logger, &config, metrics_registry, metrics, ident_validator) .await - .expect("Correctly parse Ethereum network args"); - let mut network_names = ethereum_networks.networks.keys().collect::>(); + .expect("can parse config"); + let mut network_names = networks + .adapters + .iter() + .map(|a| a.chain_id()) + .collect::>(); network_names.sort(); let traces = NodeCapabilities { @@ -592,45 +683,26 @@ mod test { traces: false, }; - let has_mainnet_with_traces = ethereum_networks - .adapter_with_capabilities("mainnet".to_string(), &traces) - .is_ok(); - let has_goerli_with_archive = ethereum_networks - .adapter_with_capabilities("goerli".to_string(), &archive) - .is_ok(); - let has_mainnet_with_archive = ethereum_networks - .adapter_with_capabilities("mainnet".to_string(), &archive) - .is_ok(); - let has_goerli_with_traces = ethereum_networks - .adapter_with_capabilities("goerli".to_string(), &traces) - .is_ok(); - - assert_eq!(has_mainnet_with_traces, true); - assert_eq!(has_goerli_with_archive, true); - assert_eq!(has_mainnet_with_archive, false); - assert_eq!(has_goerli_with_traces, false); - - let goerli_capability = ethereum_networks - .networks - .get("goerli") - .unwrap() + let mainnet: Vec<&AdapterConfiguration> = networks .adapters - .first() - .unwrap() - .capabilities; - let mainnet_capability = ethereum_networks - .networks - .get("mainnet") - .unwrap() + .iter() + .filter(|a| a.chain_id().as_str().eq("mainnet")) + .collect(); + assert_eq!(mainnet.len(), 1); + let mainnet = mainnet.first().unwrap().as_rpc().unwrap(); + assert_eq!(mainnet.adapters.len(), 1); + let mainnet = mainnet.adapters.first().unwrap(); + assert_eq!(mainnet.capabilities, traces); + + let goerli: Vec<&AdapterConfiguration> = networks .adapters - .first() - .unwrap() - .capabilities; - assert_eq!( - network_names, - vec![&"goerli".to_string(), &"mainnet".to_string()] - ); - assert_eq!(goerli_capability, archive); - assert_eq!(mainnet_capability, traces); + .iter() + .filter(|a| a.chain_id().as_str().eq("goerli")) + .collect(); + assert_eq!(goerli.len(), 1); + let goerli = goerli.first().unwrap().as_rpc().unwrap(); + assert_eq!(goerli.adapters.len(), 1); + let goerli = goerli.adapters.first().unwrap(); + assert_eq!(goerli.capabilities, archive); } } diff --git a/node/src/config.rs b/node/src/config.rs index 6fb0135d99e..93aab34ee8c 100644 --- a/node/src/config.rs +++ b/node/src/config.rs @@ -1,6 +1,7 @@ use graph::{ anyhow::Error, blockchain::BlockchainKind, + components::adapter::ChainId, env::ENV_VARS, firehose::{SubgraphLimit, SUBGRAPHS_PER_CONN}, itertools::Itertools, @@ -10,15 +11,17 @@ use graph::{ regex::Regex, serde::{ de::{self, value, SeqAccess, Visitor}, - Deserialize, Deserializer, Serialize, + Deserialize, Deserializer, }, serde_json, serde_regex, toml, Logger, NodeId, StoreError, }, }; -use graph_chain_ethereum::{self as ethereum, NodeCapabilities}; +use graph_chain_ethereum as ethereum; +use graph_chain_ethereum::NodeCapabilities; use graph_store_postgres::{DeploymentPlacer, Shard as ShardName, PRIMARY_SHARD}; use graph::http::{HeaderMap, Uri}; +use serde::Serialize; use std::{ collections::{BTreeMap, BTreeSet}, fmt, @@ -101,6 +104,14 @@ fn validate_name(s: &str) -> Result<()> { } impl Config { + pub fn chain_ids(&self) -> Vec { + self.chains + .chains + .keys() + .map(|k| k.as_str().into()) + .collect() + } + /// Check that the config is valid. fn validate(&mut self) -> Result<()> { if !self.stores.contains_key(PRIMARY_SHARD.as_str()) { diff --git a/node/src/lib.rs b/node/src/lib.rs index f26f14fef5b..f65ffc1be8f 100644 --- a/node/src/lib.rs +++ b/node/src/lib.rs @@ -7,6 +7,7 @@ extern crate diesel; pub mod chain; pub mod config; +pub mod network_setup; pub mod opt; pub mod store_builder; diff --git a/node/src/main.rs b/node/src/main.rs index 28a637ea4c1..0572f1997b1 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -1,48 +1,29 @@ use clap::Parser as _; -use ethereum::chain::{ - EthereumAdapterSelector, EthereumBlockRefetcher, EthereumRuntimeAdapterBuilder, - EthereumStreamBuilder, -}; -use ethereum::{BlockIngestor, EthereumNetworks}; use git_testament::{git_testament, render_testament}; -use graph::blockchain::client::ChainClient; +use graph::components::adapter::IdentValidator; use graph::futures01::Future as _; use graph::futures03::compat::Future01CompatExt; use graph::futures03::future::TryFutureExt; -use graph_chain_ethereum::codec::HeaderOnlyBlock; -use graph::blockchain::{ - BasicBlockchainBuilder, Blockchain, BlockchainBuilder, BlockchainKind, BlockchainMap, - ChainIdentifier, -}; +use graph::blockchain::{Blockchain, BlockchainKind}; use graph::components::link_resolver::{ArweaveClient, FileSizeLimit}; -use graph::components::store::BlockStore; use graph::components::subgraph::Settings; use graph::data::graphql::load_manager::LoadManager; use graph::endpoint::EndpointMetrics; use graph::env::EnvVars; -use graph::firehose::{FirehoseEndpoints, FirehoseNetworks}; use graph::log::logger; use graph::prelude::*; use graph::prometheus::Registry; use graph::url::Url; -use graph_chain_arweave::{self as arweave, Block as ArweaveBlock}; -use graph_chain_cosmos::{self as cosmos, Block as CosmosFirehoseBlock}; -use graph_chain_ethereum as ethereum; -use graph_chain_near::{self as near, HeaderOnlyBlock as NearFirehoseHeaderOnlyBlock}; -use graph_chain_starknet::{self as starknet, Block as StarknetBlock}; -use graph_chain_substreams as substreams; use graph_core::polling_monitor::{arweave_service, ipfs_service}; use graph_core::{ SubgraphAssignmentProvider as IpfsSubgraphAssignmentProvider, SubgraphInstanceManager, SubgraphRegistrar as IpfsSubgraphRegistrar, }; use graph_graphql::prelude::GraphQlRunner; -use graph_node::chain::{ - connect_ethereum_networks, connect_firehose_networks, create_all_ethereum_networks, - create_firehose_networks, create_ipfs_clients, create_substreams_networks, -}; +use graph_node::chain::create_ipfs_clients; use graph_node::config::Config; +use graph_node::network_setup::Networks; use graph_node::opt; use graph_node::store_builder::StoreBuilder; use graph_server_http::GraphQLServer as GraphQLQueryServer; @@ -50,9 +31,7 @@ use graph_server_index_node::IndexNodeServer; use graph_server_json_rpc::JsonRpcServer; use graph_server_metrics::PrometheusMetricsServer; use graph_server_websocket::SubscriptionServer as GraphQLSubscriptionServer; -use graph_store_postgres::{register_jobs as register_store_jobs, ChainHeadUpdateListener, Store}; -use std::collections::BTreeMap; -use std::collections::HashMap; +use graph_store_postgres::register_jobs as register_store_jobs; use std::io::{BufRead, BufReader}; use std::path::Path; use std::time::Duration; @@ -98,24 +77,6 @@ fn read_expensive_queries( Ok(queries) } -macro_rules! collect_ingestors { - ($acc:ident, $logger:ident, $($chain:ident),+) => { - $( - $chain.iter().for_each(|(network_name, chain)| { - let logger = $logger.new(o!("network_name" => network_name.clone())); - match chain.block_ingestor() { - Ok(ingestor) =>{ - info!(logger, "Started block ingestor"); - $acc.push(ingestor); - } - Err(err) => error!(&logger, - "Failed to create block ingestor {}",err), - } - }); - )+ - }; -} - #[tokio::main] async fn main() { env_logger::init(); @@ -173,7 +134,6 @@ async fn main() { let node_id = NodeId::new(opt.node_id.clone()) .expect("Node ID must be between 1 and 63 characters in length"); - let query_only = config.query_only(&node_id); // Obtain subgraph related command-line arguments let subgraph = opt.subgraph.clone(); @@ -274,33 +234,6 @@ async fn main() { metrics_registry.cheap_clone(), )); - // Ethereum clients; query nodes ignore all ethereum clients and never - // connect to them directly - let eth_networks = if query_only { - EthereumNetworks::new(endpoint_metrics.cheap_clone()) - } else { - create_all_ethereum_networks( - logger.clone(), - metrics_registry.clone(), - &config, - endpoint_metrics.cheap_clone(), - ) - .await - .expect("Failed to parse Ethereum networks") - }; - - let mut firehose_networks_by_kind = if query_only { - BTreeMap::new() - } else { - create_firehose_networks(logger.clone(), &config, endpoint_metrics.cheap_clone()) - }; - - let mut substreams_networks_by_kind = if query_only { - BTreeMap::new() - } else { - create_substreams_networks(logger.clone(), &config, endpoint_metrics.clone()) - }; - let graphql_metrics_registry = metrics_registry.clone(); let contention_logger = logger.clone(); @@ -323,186 +256,63 @@ async fn main() { let chain_head_update_listener = store_builder.chain_head_update_listener(); let primary_pool = store_builder.primary_pool(); - // To support the ethereum block ingestor, ethereum networks are referenced both by the - // `blockchain_map` and `ethereum_chains`. Future chains should be referred to only in - // `blockchain_map`. - let mut blockchain_map = BlockchainMap::new(); - - // Unwraps: `connect_ethereum_networks` and `connect_firehose_networks` only fail if - // mismatching chain identifiers are returned for a same network, which indicates a serious - // inconsistency between providers. - let (arweave_networks, arweave_idents) = connect_firehose_networks::( - &logger, - firehose_networks_by_kind - .remove(&BlockchainKind::Arweave) - .unwrap_or_else(FirehoseNetworks::new), + let network_store = store_builder.network_store(config.chain_ids()); + let block_store = network_store.block_store(); + let validator: Arc = network_store.block_store(); + let network_adapters = Networks::from_config( + logger.cheap_clone(), + &config, + metrics_registry.cheap_clone(), + endpoint_metrics, + validator, ) .await - .unwrap(); - - // This only has idents for chains with rpc adapters. - let (eth_networks, ethereum_idents) = connect_ethereum_networks(&logger, eth_networks) - .await - .unwrap(); + .expect("unable to parse network configuration"); - let (eth_firehose_only_networks, eth_firehose_only_idents) = - connect_firehose_networks::( + let blockchain_map = network_adapters + .blockchain_map( + &env_vars, + &node_id, &logger, - firehose_networks_by_kind - .remove(&BlockchainKind::Ethereum) - .unwrap_or_else(FirehoseNetworks::new), + block_store, + &logger_factory, + metrics_registry.cheap_clone(), + chain_head_update_listener, ) - .await - .unwrap(); + .await; - let (near_networks, near_idents) = - connect_firehose_networks::( - &logger, - firehose_networks_by_kind - .remove(&BlockchainKind::Near) - .unwrap_or_else(FirehoseNetworks::new), - ) - .await - .unwrap(); - - let (cosmos_networks, cosmos_idents) = connect_firehose_networks::( - &logger, - firehose_networks_by_kind - .remove(&BlockchainKind::Cosmos) - .unwrap_or_else(FirehoseNetworks::new), - ) - .await - .unwrap(); - - let substreams_networks = substreams_networks_by_kind - .remove(&BlockchainKind::Substreams) - .unwrap_or_else(FirehoseNetworks::new); - - let (starknet_networks, starknet_idents) = connect_firehose_networks::( - &logger, - firehose_networks_by_kind - .remove(&BlockchainKind::Starknet) - .unwrap_or_else(FirehoseNetworks::new), - ) - .await - .unwrap(); - - let substream_idents = substreams_networks - .networks - .keys() - .map(|name| { - ( - name.clone(), - ChainIdentifier { - net_version: name.to_string(), - genesis_block_hash: BlockHash::default(), - }, - ) - }) - .collect::>(); - - // Note that both `eth_firehose_only_idents` and `ethereum_idents` contain Ethereum - // networks. If the same network is configured in both RPC and Firehose, the RPC ident takes - // precedence. This is necessary because Firehose endpoints currently have no `net_version`. - // See also: firehose-no-net-version. - let mut network_identifiers = eth_firehose_only_idents; - network_identifiers.extend(ethereum_idents); - network_identifiers.extend(arweave_idents); - network_identifiers.extend(near_idents); - network_identifiers.extend(cosmos_idents); - network_identifiers.extend(substream_idents); - network_identifiers.extend(starknet_idents); - - let network_store = store_builder.network_store(network_identifiers); - - let arweave_chains = networks_as_chains::( - &env_vars, - &mut blockchain_map, - &logger, - &arweave_networks, - substreams_networks_by_kind.get(&BlockchainKind::Arweave), - network_store.as_ref(), - &logger_factory, - metrics_registry.clone(), - ); - - let eth_firehose_only_networks = if eth_firehose_only_networks.networks.len() == 0 { - None - } else { - Some(ð_firehose_only_networks) - }; - - if !opt.disable_block_ingestor && eth_networks.networks.len() != 0 { - let eth_network_names = Vec::from_iter(eth_networks.networks.keys()); - let fh_only = match eth_firehose_only_networks { - Some(firehose_only) => Some(Vec::from_iter(firehose_only.networks.keys())), - None => None, - }; - network_store - .block_store() - .cleanup_ethereum_shallow_blocks(eth_network_names, fh_only) - .unwrap(); + // see comment on cleanup_ethereum_shallow_blocks + if !opt.disable_block_ingestor { + match blockchain_map + .get_all_by_kind::(BlockchainKind::Ethereum) + .ok() + .map(|chains| { + chains + .iter() + .flat_map(|c| { + if !c.chain_client().is_firehose() { + Some(c.name.to_string()) + } else { + None + } + }) + .collect() + }) { + Some(eth_network_names) => { + network_store + .block_store() + .cleanup_ethereum_shallow_blocks(eth_network_names) + .unwrap(); + } + // This code path only happens if the downcast on the blockchain map fails, that + // probably means we have a problem with the chain loading logic so it's probably + // safest to just refuse to start. + None => unreachable!( + "If you are seeing this message just use a different version of graph-node" + ), + } } - let ethereum_chains = ethereum_networks_as_chains( - &mut blockchain_map, - &logger, - &config, - node_id.clone(), - metrics_registry.clone(), - eth_firehose_only_networks, - substreams_networks_by_kind.get(&BlockchainKind::Ethereum), - ð_networks, - network_store.as_ref(), - chain_head_update_listener, - &logger_factory, - metrics_registry.clone(), - ); - - let near_chains = networks_as_chains::( - &env_vars, - &mut blockchain_map, - &logger, - &near_networks, - substreams_networks_by_kind.get(&BlockchainKind::Near), - network_store.as_ref(), - &logger_factory, - metrics_registry.clone(), - ); - - let cosmos_chains = networks_as_chains::( - &env_vars, - &mut blockchain_map, - &logger, - &cosmos_networks, - substreams_networks_by_kind.get(&BlockchainKind::Cosmos), - network_store.as_ref(), - &logger_factory, - metrics_registry.clone(), - ); - - let substreams_chains = networks_as_chains::( - &env_vars, - &mut blockchain_map, - &logger, - &substreams_networks, - None, - network_store.as_ref(), - &logger_factory, - metrics_registry.clone(), - ); - - let starknet_chains = networks_as_chains::( - &env_vars, - &mut blockchain_map, - &logger, - &starknet_networks, - substreams_networks_by_kind.get(&BlockchainKind::Starknet), - network_store.as_ref(), - &logger_factory, - metrics_registry.clone(), - ); - let blockchain_map = Arc::new(blockchain_map); let shards: Vec<_> = config.stores.keys().cloned().collect(); @@ -532,21 +342,13 @@ async fn main() { if !opt.disable_block_ingestor { let logger = logger.clone(); - let mut ingestors: Vec> = vec![]; - collect_ingestors!( - ingestors, - logger, - ethereum_chains, - arweave_chains, - near_chains, - cosmos_chains, - substreams_chains, - starknet_chains - ); + let ingestors = Networks::block_ingestors(&logger, &blockchain_map) + .await + .expect("unable to start block ingestors"); ingestors.into_iter().for_each(|ingestor| { let logger = logger.clone(); - info!(logger,"Starting block ingestor for network";"network_name" => &ingestor.network_name()); + info!(logger,"Starting block ingestor for network";"network_name" => &ingestor.network_name().as_str(), "kind" => ingestor.kind().to_string()); graph::spawn(ingestor.run()); }); @@ -727,186 +529,3 @@ async fn main() { graph::futures03::future::pending::<()>().await; } - -/// Return the hashmap of chains and also add them to `blockchain_map`. -fn networks_as_chains( - config: &Arc, - blockchain_map: &mut BlockchainMap, - logger: &Logger, - firehose_networks: &FirehoseNetworks, - substreams_networks: Option<&FirehoseNetworks>, - store: &Store, - logger_factory: &LoggerFactory, - metrics_registry: Arc, -) -> HashMap> -where - C: Blockchain, - BasicBlockchainBuilder: BlockchainBuilder, -{ - let chains: Vec<_> = firehose_networks - .networks - .iter() - .filter_map(|(chain_id, endpoints)| { - store - .block_store() - .chain_store(chain_id) - .map(|chain_store| (chain_id, chain_store, endpoints)) - .or_else(|| { - error!( - logger, - "No store configured for {} chain {}; ignoring this chain", - C::KIND, - chain_id - ); - None - }) - }) - .map(|(chain_id, chain_store, endpoints)| { - ( - chain_id.clone(), - Arc::new( - BasicBlockchainBuilder { - logger_factory: logger_factory.clone(), - name: chain_id.clone(), - chain_store, - firehose_endpoints: endpoints.clone(), - metrics_registry: metrics_registry.clone(), - } - .build(config), - ), - ) - }) - .collect(); - - for (chain_id, chain) in chains.iter() { - blockchain_map.insert::(chain_id.clone(), chain.clone()) - } - - if let Some(substreams_networks) = substreams_networks { - for (network_name, firehose_endpoints) in substreams_networks.networks.iter() { - let chain_store = blockchain_map - .get::(network_name.clone()) - .expect(&format!( - "{} requires an rpc or firehose endpoint defined", - network_name - )) - .chain_store(); - - blockchain_map.insert::( - network_name.clone(), - Arc::new(substreams::Chain::new( - logger_factory.clone(), - firehose_endpoints.clone(), - metrics_registry.clone(), - chain_store, - Arc::new(substreams::BlockStreamBuilder::new()), - )), - ); - } - } - - HashMap::from_iter(chains) -} - -/// Return the hashmap of ethereum chains and also add them to `blockchain_map`. -fn ethereum_networks_as_chains( - blockchain_map: &mut BlockchainMap, - logger: &Logger, - config: &Config, - node_id: NodeId, - registry: Arc, - firehose_networks: Option<&FirehoseNetworks>, - substreams_networks: Option<&FirehoseNetworks>, - eth_networks: &EthereumNetworks, - store: &Store, - chain_head_update_listener: Arc, - logger_factory: &LoggerFactory, - metrics_registry: Arc, -) -> HashMap> { - let chains: Vec<_> = eth_networks - .networks - .iter() - .filter_map(|(network_name, eth_adapters)| { - store - .block_store() - .chain_store(network_name) - .map(|chain_store| { - let is_ingestible = chain_store.is_ingestible(); - (network_name, eth_adapters, chain_store, is_ingestible) - }) - .or_else(|| { - error!( - logger, - "No store configured for Ethereum chain {}; ignoring this chain", - network_name - ); - None - }) - }) - .map(|(network_name, eth_adapters, chain_store, is_ingestible)| { - let firehose_endpoints = firehose_networks - .and_then(|v| v.networks.get(network_name)) - .map_or_else(FirehoseEndpoints::new, |v| v.clone()); - - let client = Arc::new(ChainClient::::new( - firehose_endpoints, - eth_adapters.clone(), - )); - let adapter_selector = EthereumAdapterSelector::new( - logger_factory.clone(), - client.clone(), - registry.clone(), - chain_store.clone(), - ); - - let call_cache = chain_store.cheap_clone(); - - let chain_config = config.chains.chains.get(network_name).unwrap(); - let chain = ethereum::Chain::new( - logger_factory.clone(), - network_name.clone(), - node_id.clone(), - registry.clone(), - chain_store.cheap_clone(), - call_cache, - client, - chain_head_update_listener.clone(), - Arc::new(EthereumStreamBuilder {}), - Arc::new(EthereumBlockRefetcher {}), - Arc::new(adapter_selector), - Arc::new(EthereumRuntimeAdapterBuilder {}), - Arc::new(eth_adapters.clone()), - ENV_VARS.reorg_threshold, - chain_config.polling_interval, - is_ingestible, - ); - (network_name.clone(), Arc::new(chain)) - }) - .collect(); - - for (network_name, chain) in chains.iter().cloned() { - blockchain_map.insert::(network_name, chain) - } - - if let Some(substreams_networks) = substreams_networks { - for (network_name, firehose_endpoints) in substreams_networks.networks.iter() { - let chain_store = blockchain_map - .get::(network_name.clone()) - .expect("any substreams endpoint needs an rpc or firehose chain defined") - .chain_store(); - - blockchain_map.insert::( - network_name.clone(), - Arc::new(substreams::Chain::new( - logger_factory.clone(), - firehose_endpoints.clone(), - metrics_registry.clone(), - chain_store, - Arc::new(substreams::BlockStreamBuilder::new()), - )), - ); - } - } - - HashMap::from_iter(chains) -} diff --git a/node/src/manager/commands/chain.rs b/node/src/manager/commands/chain.rs index 52d44f67f6b..5c53f4d9b23 100644 --- a/node/src/manager/commands/chain.rs +++ b/node/src/manager/commands/chain.rs @@ -174,9 +174,9 @@ pub fn change_block_cache_shard( .chain_store(&chain_name) .ok_or_else(|| anyhow!("unknown chain: {}", &chain_name))?; let new_name = format!("{}-old", &chain_name); + let ident = chain_store.chain_identifier()?; conn.transaction(|conn| -> Result<(), StoreError> { - let ident = chain_store.chain_identifier.clone(); let shard = Shard::new(shard.to_string())?; let chain = BlockStore::allocate_chain(conn, &chain_name, &shard, &ident)?; @@ -194,7 +194,7 @@ pub fn change_block_cache_shard( // Create a new chain with the name in the destination shard - let _= add_chain(conn, &chain_name, &ident, &shard)?; + let _ = add_chain(conn, &chain_name, &shard, ident)?; // Re-add the foreign key constraint sql_query( diff --git a/node/src/manager/commands/config.rs b/node/src/manager/commands/config.rs index 7f595e97e5d..f3b2abf239b 100644 --- a/node/src/manager/commands/config.rs +++ b/node/src/manager/commands/config.rs @@ -2,7 +2,10 @@ use std::{collections::BTreeMap, sync::Arc}; use graph::{ anyhow::{bail, Context}, - components::subgraph::{Setting, Settings}, + components::{ + adapter::{ChainId, MockIdentValidator}, + subgraph::{Setting, Settings}, + }, endpoint::EndpointMetrics, env::EnvVars, itertools::Itertools, @@ -12,10 +15,10 @@ use graph::{ }, slog::Logger, }; -use graph_chain_ethereum::{NodeCapabilities, ProviderEthRpcMetrics}; +use graph_chain_ethereum::NodeCapabilities; use graph_store_postgres::DeploymentPlacer; -use crate::{chain::create_ethereum_networks_for_chain, config::Config}; +use crate::{config::Config, network_setup::Networks}; pub fn place(placer: &dyn DeploymentPlacer, name: &str, network: &str) -> Result<(), Error> { match placer.place(name, network).map_err(|s| anyhow!(s))? { @@ -138,15 +141,18 @@ pub async fn provider( let metrics = Arc::new(EndpointMetrics::mock()); let caps = caps_from_features(features)?; - let eth_rpc_metrics = Arc::new(ProviderEthRpcMetrics::new(registry)); - let networks = - create_ethereum_networks_for_chain(&logger, eth_rpc_metrics, config, &network, metrics) - .await?; - let adapters = networks - .networks - .get(&network) - .ok_or_else(|| anyhow!("unknown network {}", network))?; - let adapters = adapters.all_cheapest_with(&caps); + let networks = Networks::from_config( + logger, + &config, + registry, + metrics, + Arc::new(MockIdentValidator), + ) + .await?; + let network: ChainId = network.into(); + let adapters = networks.ethereum_rpcs(network.clone()); + + let adapters = adapters.all_cheapest_with(&caps).await; println!( "deploy on network {} with features [{}] on node {}\neligible providers: {}", network, diff --git a/node/src/manager/commands/run.rs b/node/src/manager/commands/run.rs index 639b5c0e3d9..00a5be6285a 100644 --- a/node/src/manager/commands/run.rs +++ b/node/src/manager/commands/run.rs @@ -2,36 +2,26 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; -use crate::chain::{ - connect_ethereum_networks, create_ethereum_networks_for_chain, create_firehose_networks, - create_ipfs_clients, -}; +use crate::chain::create_ipfs_clients; use crate::config::Config; use crate::manager::PanicSubscriptionManager; +use crate::network_setup::Networks; use crate::store_builder::StoreBuilder; use crate::MetricsContext; -use ethereum::chain::{ - EthereumAdapterSelector, EthereumBlockRefetcher, EthereumRuntimeAdapterBuilder, - EthereumStreamBuilder, -}; -use ethereum::ProviderEthRpcMetrics; -use graph::anyhow::{bail, format_err}; -use graph::blockchain::client::ChainClient; -use graph::blockchain::{BlockchainKind, BlockchainMap}; +use graph::anyhow::bail; use graph::cheap_clone::CheapClone; +use graph::components::adapter::IdentValidator; use graph::components::link_resolver::{ArweaveClient, FileSizeLimit}; -use graph::components::store::{BlockStore as _, DeploymentLocator}; +use graph::components::store::DeploymentLocator; use graph::components::subgraph::Settings; use graph::endpoint::EndpointMetrics; use graph::env::EnvVars; -use graph::firehose::FirehoseEndpoints; use graph::prelude::{ anyhow, tokio, BlockNumber, DeploymentHash, IpfsResolver, LoggerFactory, NodeId, SubgraphAssignmentProvider, SubgraphCountMetric, SubgraphName, SubgraphRegistrar, SubgraphStore, SubgraphVersionSwitchingMode, ENV_VARS, }; use graph::slog::{debug, info, Logger}; -use graph_chain_ethereum as ethereum; use graph_core::polling_monitor::{arweave_service, ipfs_service}; use graph_core::{ SubgraphAssignmentProvider as IpfsSubgraphAssignmentProvider, SubgraphInstanceManager, @@ -50,7 +40,7 @@ fn locate(store: &dyn SubgraphStore, hash: &str) -> Result, arweave_url: String, config: Config, @@ -100,90 +90,40 @@ pub async fn run( // possible temporary DNS failures, make the resolver retry let link_resolver = Arc::new(IpfsResolver::new(ipfs_clients, env_vars.cheap_clone())); - let eth_rpc_metrics = Arc::new(ProviderEthRpcMetrics::new(metrics_registry.clone())); - let eth_networks = create_ethereum_networks_for_chain( - &logger, - eth_rpc_metrics, + let chain_head_update_listener = store_builder.chain_head_update_listener(); + let network_store = store_builder.network_store(config.chain_ids()); + let block_store = network_store.block_store(); + let ident_validator: Arc = network_store.block_store(); + let networks = Networks::from_config( + logger.cheap_clone(), &config, - &network_name, - endpoint_metrics.cheap_clone(), + metrics_registry.cheap_clone(), + endpoint_metrics, + ident_validator, ) .await - .expect("Failed to parse Ethereum networks"); - let firehose_networks_by_kind = - create_firehose_networks(logger.clone(), &config, endpoint_metrics); - let firehose_networks = firehose_networks_by_kind.get(&BlockchainKind::Ethereum); - let firehose_endpoints = firehose_networks - .and_then(|v| v.networks.get(&network_name)) - .map_or_else(FirehoseEndpoints::new, |v| v.clone()); - - let eth_adapters = match eth_networks.networks.get(&network_name) { - Some(adapters) => adapters.clone(), - None => { - return Err(format_err!( - "No ethereum adapters found, but required in this state of graphman run command" - )) - } - }; - - let eth_adapters2 = eth_adapters.clone(); - let (_, ethereum_idents) = connect_ethereum_networks(&logger, eth_networks).await?; - // let (near_networks, near_idents) = connect_firehose_networks::( - // &logger, - // firehose_networks_by_kind - // .remove(&BlockchainKind::Near) - // .unwrap_or_else(|| FirehoseNetworks::new()), - // ) - // .await; - - let chain_head_update_listener = store_builder.chain_head_update_listener(); - let network_identifiers = ethereum_idents.into_iter().collect(); - let network_store = store_builder.network_store(network_identifiers); + .expect("unable to parse network configuration"); let subgraph_store = network_store.subgraph_store(); - let chain_store = network_store - .block_store() - .chain_store(network_name.as_ref()) - .unwrap_or_else(|| panic!("No chain store for {}", &network_name)); - - let client = Arc::new(ChainClient::new(firehose_endpoints, eth_adapters)); - - let call_cache = Arc::new(ethereum::BufferedCallCache::new(chain_store.cheap_clone())); - let chain_config = config.chains.chains.get(&network_name).unwrap(); - let chain = ethereum::Chain::new( - logger_factory.clone(), - network_name.clone(), - node_id.clone(), - metrics_registry.clone(), - chain_store.cheap_clone(), - call_cache.cheap_clone(), - client.clone(), - chain_head_update_listener, - Arc::new(EthereumStreamBuilder {}), - Arc::new(EthereumBlockRefetcher {}), - Arc::new(EthereumAdapterSelector::new( - logger_factory.clone(), - client, - metrics_registry.clone(), - chain_store.cheap_clone(), - )), - Arc::new(EthereumRuntimeAdapterBuilder {}), - Arc::new(eth_adapters2), - graph::env::ENV_VARS.reorg_threshold, - chain_config.polling_interval, - // We assume the tested chain is always ingestible for now - true, + let blockchain_map = Arc::new( + networks + .blockchain_map( + &env_vars, + &node_id, + &logger, + block_store, + &logger_factory, + metrics_registry.cheap_clone(), + chain_head_update_listener, + ) + .await, ); - let mut blockchain_map = BlockchainMap::new(); - blockchain_map.insert(network_name.clone(), Arc::new(chain)); - let static_filters = ENV_VARS.experimental_static_filters; let sg_metrics = Arc::new(SubgraphCountMetric::new(metrics_registry.clone())); - let blockchain_map = Arc::new(blockchain_map); let subgraph_instance_manager = SubgraphInstanceManager::new( &logger_factory, env_vars.cheap_clone(), diff --git a/node/src/network_setup.rs b/node/src/network_setup.rs new file mode 100644 index 00000000000..4e88b9dce07 --- /dev/null +++ b/node/src/network_setup.rs @@ -0,0 +1,412 @@ +use ethereum::{ + network::{EthereumNetworkAdapter, EthereumNetworkAdapters}, + BlockIngestor, +}; +use graph::{ + anyhow::{self, bail}, + blockchain::{Blockchain, BlockchainKind, BlockchainMap, ChainIdentifier}, + cheap_clone::CheapClone, + components::{ + adapter::{ChainId, IdentValidator, MockIdentValidator, NetIdentifiable, ProviderManager}, + metrics::MetricsRegistry, + }, + endpoint::EndpointMetrics, + env::EnvVars, + firehose::{FirehoseEndpoint, FirehoseEndpoints}, + futures03::future::TryFutureExt, + itertools::Itertools, + log::factory::LoggerFactory, + prelude::{ + anyhow::{anyhow, Result}, + info, Logger, NodeId, + }, + slog::{o, warn, Discard}, +}; +use graph_chain_ethereum as ethereum; +use graph_store_postgres::{BlockStore, ChainHeadUpdateListener}; + +use std::{any::Any, cmp::Ordering, sync::Arc, time::Duration}; + +use crate::chain::{ + create_all_ethereum_networks, create_firehose_networks, create_substreams_networks, + networks_as_chains, +}; + +#[derive(Debug, Clone)] +pub struct EthAdapterConfig { + pub chain_id: ChainId, + pub adapters: Vec, + pub call_only: Vec, + // polling interval is set per chain so if set all adapter configuration will have + // the same value. + pub polling_interval: Option, +} + +#[derive(Debug, Clone)] +pub struct FirehoseAdapterConfig { + pub chain_id: ChainId, + pub kind: BlockchainKind, + pub adapters: Vec>, +} + +#[derive(Debug, Clone)] +pub enum AdapterConfiguration { + Rpc(EthAdapterConfig), + Firehose(FirehoseAdapterConfig), + Substreams(FirehoseAdapterConfig), +} + +impl AdapterConfiguration { + pub fn kind(&self) -> &BlockchainKind { + match self { + AdapterConfiguration::Rpc(_) => &BlockchainKind::Ethereum, + AdapterConfiguration::Firehose(fh) | AdapterConfiguration::Substreams(fh) => &fh.kind, + } + } + pub fn chain_id(&self) -> &ChainId { + match self { + AdapterConfiguration::Rpc(EthAdapterConfig { chain_id, .. }) + | AdapterConfiguration::Firehose(FirehoseAdapterConfig { chain_id, .. }) + | AdapterConfiguration::Substreams(FirehoseAdapterConfig { chain_id, .. }) => chain_id, + } + } + + pub fn as_rpc(&self) -> Option<&EthAdapterConfig> { + match self { + AdapterConfiguration::Rpc(rpc) => Some(rpc), + _ => None, + } + } + + pub fn as_firehose(&self) -> Option<&FirehoseAdapterConfig> { + match self { + AdapterConfiguration::Firehose(fh) => Some(fh), + _ => None, + } + } + + pub fn as_substreams(&self) -> Option<&FirehoseAdapterConfig> { + match self { + AdapterConfiguration::Substreams(fh) => Some(fh), + _ => None, + } + } +} + +pub struct Networks { + pub adapters: Vec, + rpc_provider_manager: ProviderManager, + firehose_provider_manager: ProviderManager>, + substreams_provider_manager: ProviderManager>, +} + +impl Networks { + // noop is important for query_nodes as it shortcuts a lot of the process. + fn noop() -> Self { + Self { + adapters: vec![], + rpc_provider_manager: ProviderManager::new( + Logger::root(Discard, o!()), + vec![].into_iter(), + Arc::new(MockIdentValidator), + ), + firehose_provider_manager: ProviderManager::new( + Logger::root(Discard, o!()), + vec![].into_iter(), + Arc::new(MockIdentValidator), + ), + substreams_provider_manager: ProviderManager::new( + Logger::root(Discard, o!()), + vec![].into_iter(), + Arc::new(MockIdentValidator), + ), + } + } + + pub async fn chain_identifier( + &self, + logger: &Logger, + chain_id: &ChainId, + ) -> Result { + async fn get_identifier( + pm: ProviderManager, + logger: &Logger, + chain_id: &ChainId, + provider_type: &str, + ) -> Result { + for adapter in pm.get_all_unverified(chain_id).unwrap_or_default() { + match adapter.net_identifiers().await { + Ok(ident) => return Ok(ident), + Err(err) => { + warn!( + logger, + "unable to get chain identification from {} provider {} for chain {}, err: {}", + provider_type, + adapter.provider_name(), + chain_id, + err.to_string(), + ); + } + } + } + + bail!("no working adapters for chain {}", chain_id); + } + + get_identifier( + self.rpc_provider_manager.cheap_clone(), + logger, + chain_id, + "rpc", + ) + .or_else(|_| { + get_identifier( + self.firehose_provider_manager.cheap_clone(), + logger, + chain_id, + "firehose", + ) + }) + .or_else(|_| { + get_identifier( + self.substreams_provider_manager.cheap_clone(), + logger, + chain_id, + "substreams", + ) + }) + .await + } + + pub async fn from_config( + logger: Logger, + config: &crate::config::Config, + registry: Arc, + endpoint_metrics: Arc, + store: Arc, + ) -> Result { + if config.query_only(&config.node) { + return Ok(Networks::noop()); + } + + let eth = create_all_ethereum_networks( + logger.cheap_clone(), + registry, + &config, + endpoint_metrics.cheap_clone(), + ) + .await?; + let firehose = create_firehose_networks( + logger.cheap_clone(), + &config, + endpoint_metrics.cheap_clone(), + ); + let substreams = + create_substreams_networks(logger.cheap_clone(), &config, endpoint_metrics); + let adapters: Vec<_> = eth + .into_iter() + .chain(firehose.into_iter()) + .chain(substreams.into_iter()) + .collect(); + + Ok(Networks::new(&logger, adapters, store)) + } + + fn new( + logger: &Logger, + adapters: Vec, + validator: Arc, + ) -> Self { + let adapters2 = adapters.clone(); + let eth_adapters = adapters.iter().flat_map(|a| a.as_rpc()).cloned().map( + |EthAdapterConfig { + chain_id, + mut adapters, + call_only: _, + polling_interval: _, + }| { + adapters.sort_by(|a, b| { + a.capabilities + .partial_cmp(&b.capabilities) + .unwrap_or(Ordering::Equal) + }); + + (chain_id, adapters) + }, + ); + + let firehose_adapters = adapters + .iter() + .flat_map(|a| a.as_firehose()) + .cloned() + .map( + |FirehoseAdapterConfig { + chain_id, + kind: _, + adapters, + }| { (chain_id, adapters) }, + ) + .collect_vec(); + + let substreams_adapters = adapters + .iter() + .flat_map(|a| a.as_substreams()) + .cloned() + .map( + |FirehoseAdapterConfig { + chain_id, + kind: _, + adapters, + }| { (chain_id, adapters) }, + ) + .collect_vec(); + + Self { + adapters: adapters2, + rpc_provider_manager: ProviderManager::new( + logger.clone(), + eth_adapters, + validator.cheap_clone(), + ), + firehose_provider_manager: ProviderManager::new( + logger.clone(), + firehose_adapters + .into_iter() + .map(|(chain_id, endpoints)| (chain_id, endpoints)), + validator.cheap_clone(), + ), + substreams_provider_manager: ProviderManager::new( + logger.clone(), + substreams_adapters + .into_iter() + .map(|(chain_id, endpoints)| (chain_id, endpoints)), + validator.cheap_clone(), + ), + } + } + + pub async fn block_ingestors( + logger: &Logger, + blockchain_map: &Arc, + ) -> anyhow::Result>> { + async fn block_ingestor( + logger: &Logger, + chain_id: &ChainId, + chain: &Arc, + ingestors: &mut Vec>, + ) -> anyhow::Result<()> { + let chain: Arc = chain.cheap_clone().downcast().map_err(|_| { + anyhow!("unable to downcast, wrong type for blockchain {}", C::KIND) + })?; + + let logger = logger.new(o!("network_name" => chain_id.to_string())); + + match chain.block_ingestor().await { + Ok(ingestor) => { + info!(&logger, "Creating block ingestor"); + ingestors.push(ingestor) + } + Err(err) => graph::slog::error!( + &logger, + "unable to create block_ingestor for {}: {}", + chain_id, + err.to_string() + ), + } + + Ok(()) + } + + let mut res = vec![]; + for ((kind, id), chain) in blockchain_map.iter() { + match kind { + BlockchainKind::Arweave => { + block_ingestor::(logger, id, chain, &mut res) + .await? + } + BlockchainKind::Ethereum => { + block_ingestor::(logger, id, chain, &mut res) + .await? + } + BlockchainKind::Near => { + block_ingestor::(logger, id, chain, &mut res).await? + } + BlockchainKind::Cosmos => { + block_ingestor::(logger, id, chain, &mut res).await? + } + BlockchainKind::Substreams => { + // handle substreams later + } + BlockchainKind::Starknet => { + block_ingestor::(logger, id, chain, &mut res) + .await? + } + } + } + + // substreams networks that also have other types of chain(rpc or firehose), will have + // block ingestors already running. + let visited: Vec<_> = res.iter().map(|b| b.network_name()).collect(); + for ((_, id), chain) in blockchain_map + .iter() + .filter(|((kind, id), _)| BlockchainKind::Substreams.eq(&kind) && !visited.contains(id)) + { + block_ingestor::(logger, id, chain, &mut res).await? + } + + Ok(res) + } + + pub async fn blockchain_map( + &self, + config: &Arc, + node_id: &NodeId, + logger: &Logger, + store: Arc, + logger_factory: &LoggerFactory, + metrics_registry: Arc, + chain_head_update_listener: Arc, + ) -> BlockchainMap { + let mut bm = BlockchainMap::new(); + + networks_as_chains( + config, + &mut bm, + node_id, + logger, + self, + store, + logger_factory, + metrics_registry, + chain_head_update_listener, + ) + .await; + + bm + } + + pub fn firehose_endpoints(&self, chain_id: ChainId) -> FirehoseEndpoints { + FirehoseEndpoints::new(chain_id, self.firehose_provider_manager.cheap_clone()) + } + + pub fn substreams_endpoints(&self, chain_id: ChainId) -> FirehoseEndpoints { + FirehoseEndpoints::new(chain_id, self.substreams_provider_manager.cheap_clone()) + } + + pub fn ethereum_rpcs(&self, chain_id: ChainId) -> EthereumNetworkAdapters { + let eth_adapters = self + .adapters + .iter() + .filter(|a| a.chain_id().eq(&chain_id)) + .flat_map(|a| a.as_rpc()) + .flat_map(|eth_c| eth_c.call_only.clone()) + .collect_vec(); + + EthereumNetworkAdapters::new( + chain_id, + self.rpc_provider_manager.cheap_clone(), + eth_adapters, + None, + ) + } +} diff --git a/node/src/store_builder.rs b/node/src/store_builder.rs index 6423e64b620..2a39d0ea6ed 100644 --- a/node/src/store_builder.rs +++ b/node/src/store_builder.rs @@ -1,8 +1,6 @@ -use std::collections::BTreeMap; use std::iter::FromIterator; use std::{collections::HashMap, sync::Arc}; -use graph::blockchain::ChainIdentifier; use graph::futures03::future::join_all; use graph::prelude::{o, MetricsRegistry, NodeId}; use graph::url::Url; @@ -167,14 +165,14 @@ impl StoreBuilder { pools: HashMap, subgraph_store: Arc, chains: HashMap, - networks: BTreeMap, + networks: Vec, registry: Arc, ) -> Arc { let networks = networks .into_iter() - .map(|(name, idents)| { + .map(|name| { let shard = chains.get(&name).unwrap_or(&*PRIMARY_SHARD).clone(); - (name, idents, shard) + (name, shard) }) .collect(); @@ -281,13 +279,13 @@ impl StoreBuilder { /// Return a store that combines both a `Store` for subgraph data /// and a `BlockStore` for all chain related data - pub fn network_store(self, networks: BTreeMap) -> Arc { + pub fn network_store(self, networks: Vec>) -> Arc { Self::make_store( &self.logger, self.pools, self.subgraph_store, self.chains, - networks, + networks.into_iter().map(Into::into).collect(), self.registry, ) } diff --git a/server/index-node/src/resolver.rs b/server/index-node/src/resolver.rs index 3dd363db493..6ba26a5457e 100644 --- a/server/index-node/src/resolver.rs +++ b/server/index-node/src/resolver.rs @@ -267,7 +267,7 @@ impl IndexNodeResolver { let chain = if let Ok(c) = self .blockchain_map - .get::(network.clone()) + .get::(network.as_str().into()) { c } else { @@ -593,7 +593,7 @@ impl IndexNodeResolver { } BlockchainKind::Starknet => { let unvalidated_subgraph_manifest = - UnvalidatedSubgraphManifest::::resolve( + UnvalidatedSubgraphManifest::::resolve( deployment_hash.clone(), raw_yaml, &self.link_resolver, @@ -659,7 +659,7 @@ impl IndexNodeResolver { ) -> Result, QueryExecutionError> { macro_rules! try_resolve_for_chain { ( $typ:path ) => { - let blockchain = self.blockchain_map.get::<$typ>(network.to_string()).ok(); + let blockchain = self.blockchain_map.get::<$typ>(network.as_str().into()).ok(); if let Some(blockchain) = blockchain { debug!( diff --git a/store/postgres/src/block_range.rs b/store/postgres/src/block_range.rs index 7679dd49db8..1d81eac5e81 100644 --- a/store/postgres/src/block_range.rs +++ b/store/postgres/src/block_range.rs @@ -134,6 +134,7 @@ impl<'a> QueryFragment for BlockRangeUpperBoundClause<'a> { /// Helper for generating various SQL fragments for handling the block range /// of entity versions +#[allow(unused)] #[derive(Debug, Clone, Copy)] pub enum BlockRangeColumn<'a> { Mutable { diff --git a/store/postgres/src/block_store.rs b/store/postgres/src/block_store.rs index 9b98153efb0..13b0cec2575 100644 --- a/store/postgres/src/block_store.rs +++ b/store/postgres/src/block_store.rs @@ -12,7 +12,7 @@ use diesel::{ use graph::{ blockchain::ChainIdentifier, components::store::{BlockStore as BlockStoreTrait, QueryPermit}, - prelude::{error, info, warn, BlockNumber, BlockPtr, Logger, ENV_VARS}, + prelude::{error, info, BlockNumber, BlockPtr, Logger, ENV_VARS}, slog::o, }; use graph::{constraint_violation, prelude::CheapClone}; @@ -112,8 +112,8 @@ pub mod primary { pub fn add_chain( conn: &mut PooledConnection>, name: &str, - ident: &ChainIdentifier, shard: &Shard, + ident: ChainIdentifier, ) -> Result { // For tests, we want to have a chain that still uses the // shared `ethereum_blocks` table @@ -194,6 +194,8 @@ pub struct BlockStore { /// known to the system at startup, either from configuration or from /// previous state in the database. stores: RwLock>>, + // We keep this information so we can create chain stores during startup + shards: Vec<(String, Shard)>, pools: HashMap, sender: Arc, mirror: PrimaryMirror, @@ -215,8 +217,8 @@ impl BlockStore { /// a chain uses the pool from `pools` for the given shard. pub fn new( logger: Logger, - // (network, ident, shard) - chains: Vec<(String, ChainIdentifier, Shard)>, + // (network, shard) + shards: Vec<(String, Shard)>, // shard -> pool pools: HashMap, sender: Arc, @@ -229,10 +231,12 @@ impl BlockStore { let mirror = PrimaryMirror::new(&pools); let existing_chains = mirror.read(|conn| primary::load_chains(conn))?; let chain_head_cache = TimedCache::new(CHAIN_HEAD_CACHE_TTL); + let chains = shards.clone(); let block_store = Self { logger, stores: RwLock::new(HashMap::new()), + shards, pools, sender, mirror, @@ -246,7 +250,7 @@ impl BlockStore { logger: &Logger, chain: &primary::Chain, shard: &Shard, - ident: &ChainIdentifier, + // ident: &ChainIdentifier, ) -> bool { if &chain.shard != shard { error!( @@ -258,54 +262,24 @@ impl BlockStore { ); return false; } - if chain.net_version != ident.net_version { - if chain.net_version == "0" { - warn!(logger, - "the net version for chain {} has changed from 0 to {} since the last time we ran, ignoring difference because 0 means UNSET and firehose does not provide it", - chain.name, - ident.net_version, - ) - } else { - error!(logger, - "the net version for chain {} has changed from {} to {} since the last time we ran", - chain.name, - chain.net_version, - ident.net_version - ); - return false; - } - } - if chain.genesis_block != ident.genesis_block_hash.hash_hex() { - error!(logger, - "the genesis block hash for chain {} has changed from {} to {} since the last time we ran", - chain.name, - chain.genesis_block, - ident.genesis_block_hash - ); - return false; - } true } // For each configured chain, add a chain store - for (chain_name, ident, shard) in chains { + for (chain_name, shard) in chains { match existing_chains .iter() .find(|chain| chain.name == chain_name) { Some(chain) => { - let status = if chain_ingestible(&block_store.logger, chain, &shard, &ident) { + let status = if chain_ingestible(&block_store.logger, chain, &shard) { ChainStatus::Ingestible } else { ChainStatus::ReadOnly }; block_store.add_chain_store(chain, status, false)?; } - None => { - let mut conn = block_store.mirror.primary().get()?; - let chain = primary::add_chain(&mut conn, &chain_name, &ident, &shard)?; - block_store.add_chain_store(&chain, ChainStatus::Ingestible, true)?; - } + None => {} }; } @@ -392,7 +366,6 @@ impl BlockStore { logger, chain.name.clone(), chain.storage.clone(), - &ident, status, sender, pool, @@ -509,18 +482,12 @@ impl BlockStore { // Discussed here: https://github.com/graphprotocol/graph-node/pull/4790 pub fn cleanup_ethereum_shallow_blocks( &self, - ethereum_networks: Vec<&String>, - firehose_only_networks: Option>, + eth_rpc_only_nets: Vec, ) -> Result<(), StoreError> { for store in self.stores.read().unwrap().values() { - if !ethereum_networks.contains(&&store.chain) { + if !eth_rpc_only_nets.contains(&&store.chain) { continue; }; - if let Some(fh_nets) = firehose_only_networks.clone() { - if fh_nets.contains(&&store.chain) { - continue; - }; - } if let Some(head_block) = store.remove_cursor(&&store.chain)? { let lower_bound = head_block.saturating_sub(ENV_VARS.reorg_threshold * 2); @@ -561,4 +528,33 @@ impl BlockStoreTrait for BlockStore { fn chain_store(&self, network: &str) -> Option> { self.store(network) } + + fn create_chain_store( + &self, + network: &str, + ident: ChainIdentifier, + ) -> anyhow::Result> { + match self.store(network) { + Some(chain_store) => { + return Ok(chain_store); + } + None => {} + } + + let mut conn = self.mirror.primary().get()?; + let shard = self + .shards + .iter() + .find_map(|(chain_id, shard)| { + if chain_id.as_str().eq(network) { + Some(shard) + } else { + None + } + }) + .ok_or_else(|| anyhow!("unable to find shard for network {}", network))?; + let chain = primary::add_chain(&mut conn, &network, &shard, ident)?; + self.add_chain_store(&chain, ChainStatus::Ingestible, true) + .map_err(anyhow::Error::from) + } } diff --git a/store/postgres/src/chain_store.rs b/store/postgres/src/chain_store.rs index 733ff29be14..070505e58cb 100644 --- a/store/postgres/src/chain_store.rs +++ b/store/postgres/src/chain_store.rs @@ -1658,8 +1658,6 @@ pub struct ChainStore { pool: ConnectionPool, pub chain: String, pub(crate) storage: data::Storage, - pub chain_identifier: ChainIdentifier, - genesis_block_ptr: BlockPtr, status: ChainStatus, chain_head_update_sender: ChainHeadUpdateSender, // TODO: We currently only use this cache for @@ -1677,7 +1675,6 @@ impl ChainStore { logger: Logger, chain: String, storage: data::Storage, - net_identifier: &ChainIdentifier, status: ChainStatus, chain_head_update_sender: ChainHeadUpdateSender, pool: ConnectionPool, @@ -1692,10 +1689,8 @@ impl ChainStore { pool, chain, storage, - genesis_block_ptr: BlockPtr::new(net_identifier.genesis_block_hash.clone(), 0), status, chain_head_update_sender, - chain_identifier: net_identifier.clone(), recent_blocks_cache, lookup_herd, } @@ -1816,6 +1811,12 @@ impl ChainStore { self.upsert_block(block).await.expect("can upsert block"); } + self.set_chain_identifier(&ChainIdentifier { + net_version: "0".to_string(), + genesis_block_hash: BlockHash::try_from(genesis_hash).expect("valid block hash"), + }) + .expect("unable to set chain identifier"); + use public::ethereum_networks as n; diesel::update(n::table.filter(n::name.eq(&self.chain))) .set(( @@ -1874,7 +1875,12 @@ impl ChainStore { #[async_trait] impl ChainStoreTrait for ChainStore { fn genesis_block_ptr(&self) -> Result { - Ok(self.genesis_block_ptr.clone()) + let ident = self.chain_identifier()?; + + Ok(BlockPtr { + hash: ident.genesis_block_hash, + number: 0, + }) } async fn upsert_block(&self, block: Arc) -> Result<(), Error> { @@ -1915,6 +1921,7 @@ impl ChainStoreTrait for ChainStore { let (missing, ptr) = { let chain_store = self.clone(); + let genesis_block_ptr = self.genesis_block_ptr()?.hash_as_h256(); self.pool .with_conn(move |conn, _| { let candidate = chain_store @@ -1933,7 +1940,7 @@ impl ChainStoreTrait for ChainStore { &chain_store.chain, first_block as i64, ptr.hash_as_h256(), - chain_store.genesis_block_ptr.hash_as_h256(), + genesis_block_ptr, ) .map_err(CancelableError::from)? { @@ -2296,8 +2303,32 @@ impl ChainStoreTrait for ChainStore { .await } - fn chain_identifier(&self) -> &ChainIdentifier { - &self.chain_identifier + fn set_chain_identifier(&self, ident: &ChainIdentifier) -> Result<(), Error> { + use public::ethereum_networks as n; + + let mut conn = self.pool.get()?; + diesel::update(n::table.filter(n::name.eq(&self.chain))) + .set(( + n::genesis_block_hash.eq(ident.genesis_block_hash.hash_hex()), + n::net_version.eq(&ident.net_version), + )) + .execute(&mut conn)?; + + Ok(()) + } + + fn chain_identifier(&self) -> Result { + let mut conn = self.pool.get()?; + use public::ethereum_networks as n; + let (genesis_block_hash, net_version) = n::table + .select((n::genesis_block_hash, n::net_version)) + .filter(n::name.eq(&self.chain)) + .get_result::<(BlockHash, String)>(&mut conn)?; + + Ok(ChainIdentifier { + net_version, + genesis_block_hash, + }) } } diff --git a/store/postgres/src/notification_listener.rs b/store/postgres/src/notification_listener.rs index 556bc58b6c1..1d56d73459d 100644 --- a/store/postgres/src/notification_listener.rs +++ b/store/postgres/src/notification_listener.rs @@ -323,8 +323,6 @@ mod public { // the `large_notifications` table. #[derive(Debug)] pub struct JsonNotification { - pub process_id: i32, - pub channel: String, pub payload: serde_json::Value, } @@ -373,16 +371,10 @@ impl JsonNotification { let payload: String = payload_rows.get(0).unwrap().get(0); Ok(JsonNotification { - process_id: notification.process_id(), - channel: notification.channel().to_string(), payload: serde_json::from_str(&payload)?, }) } - serde_json::Value::Object(_) => Ok(JsonNotification { - process_id: notification.process_id(), - channel: notification.channel().to_string(), - payload: value, - }), + serde_json::Value::Object(_) => Ok(JsonNotification { payload: value }), _ => Err(anyhow!("JSON notifications must be numbers or objects"))?, } } diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 8f09df120c2..4626ce0479e 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -3197,8 +3197,6 @@ impl<'a> FilterCollection<'a> { #[derive(Debug, Clone)] pub struct ChildKeyDetails<'a> { - /// Table representing the parent entity - pub parent_table: &'a Table, /// Column in the parent table that stores the connection between the parent and the child pub parent_join_column: &'a Column, /// Table representing the child entity @@ -3231,6 +3229,7 @@ pub struct ChildKeyAndIdSharedDetails<'a> { pub direction: &'static str, } +#[allow(unused)] #[derive(Debug, Clone)] pub struct ChildIdDetails<'a> { /// Table representing the parent entity @@ -3525,7 +3524,6 @@ impl<'a> SortKey<'a> { } Ok(SortKey::ChildKey(ChildKey::Single(ChildKeyDetails { - parent_table, child_table, parent_join_column: parent_column, child_join_column: child_column, @@ -3659,7 +3657,6 @@ impl<'a> SortKey<'a> { build_children_vec(layout, parent_table, entity_types, child, direction)? .iter() .map(|details| ChildKeyDetails { - parent_table: details.parent_table, parent_join_column: details.parent_join_column, child_table: details.child_table, child_join_column: details.child_join_column, diff --git a/store/test-store/src/block_store.rs b/store/test-store/src/block_store.rs index 6f161258a0e..092be0274a8 100644 --- a/store/test-store/src/block_store.rs +++ b/store/test-store/src/block_store.rs @@ -1,6 +1,6 @@ use std::{convert::TryFrom, str::FromStr, sync::Arc}; -use graph::blockchain::BlockTime; +use graph::blockchain::{BlockTime, ChainIdentifier}; use lazy_static::lazy_static; use graph::components::store::BlockStore; @@ -14,6 +14,8 @@ use graph::{ use graph_chain_ethereum::codec::{Block, BlockHeader}; use prost_types::Timestamp; +use crate::{GENESIS_PTR, NETWORK_VERSION}; + lazy_static! { // Genesis block pub static ref GENESIS_BLOCK: FakeBlock = FakeBlock { @@ -186,10 +188,19 @@ pub type FakeBlockList = Vec<&'static FakeBlock>; /// network's genesis block to `genesis_hash`, and head block to /// `null` pub async fn set_chain(chain: FakeBlockList, network: &str) -> Vec<(BlockPtr, BlockHash)> { - let store = crate::store::STORE - .block_store() - .chain_store(network) - .unwrap(); + let block_store = crate::store::STORE.block_store(); + let store = match block_store.chain_store(network) { + Some(cs) => cs, + None => block_store + .create_chain_store( + network, + ChainIdentifier { + net_version: NETWORK_VERSION.to_string(), + genesis_block_hash: GENESIS_PTR.hash.clone(), + }, + ) + .unwrap(), + }; let chain: Vec> = chain .iter() .cloned() diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index 0c499b81fda..2921d375286 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -1,6 +1,8 @@ use diesel::{self, PgConnection}; use graph::blockchain::mock::MockDataSource; use graph::blockchain::BlockTime; +use graph::blockchain::ChainIdentifier; +use graph::components::store::BlockStore; use graph::data::graphql::load_manager::LoadManager; use graph::data::query::QueryResults; use graph::data::query::QueryTarget; @@ -13,9 +15,9 @@ use graph::schema::EntityType; use graph::schema::InputSchema; use graph::semver::Version; use graph::{ - blockchain::block_stream::FirehoseCursor, blockchain::ChainIdentifier, - components::store::DeploymentLocator, components::store::StatusStore, - components::store::StoredDynamicDataSource, data::subgraph::status, prelude::NodeId, + blockchain::block_stream::FirehoseCursor, components::store::DeploymentLocator, + components::store::StatusStore, components::store::StoredDynamicDataSource, + data::subgraph::status, prelude::NodeId, }; use graph_graphql::prelude::{ execute_query, Query as PreparedQuery, QueryExecutionOptions, StoreResolver, @@ -626,19 +628,30 @@ fn build_store() -> (Arc, ConnectionPool, Config, Arc { + cs.set_chain_identifier(&ChainIdentifier { + net_version: NETWORK_VERSION.to_string(), + genesis_block_hash: GENESIS_PTR.hash.clone(), + }) + .expect("unable to set identifier"); + } + None => { + store + .block_store() + .create_chain_store(NETWORK_NAME, ident) + .expect("unable to create test network store"); + } + } + (store, primary_pool, config, subscription_manager) }) }) .join() diff --git a/tests/src/fixture/ethereum.rs b/tests/src/fixture/ethereum.rs index faa100be7b2..57a5cc85c95 100644 --- a/tests/src/fixture/ethereum.rs +++ b/tests/src/fixture/ethereum.rs @@ -45,7 +45,7 @@ pub async fn chain( let static_block_stream = Arc::new(StaticStreamBuilder { chain: blocks }); let block_stream_builder = Arc::new(MutexBlockStreamBuilder(Mutex::new(static_block_stream))); - let eth_adapters = Arc::new(EthereumNetworkAdapters::default()); + let eth_adapters = Arc::new(EthereumNetworkAdapters::empty_for_testing()); let chain = Chain::new( logger_factory, diff --git a/tests/src/fixture/mod.rs b/tests/src/fixture/mod.rs index 537efa46fac..ebed1d3a115 100644 --- a/tests/src/fixture/mod.rs +++ b/tests/src/fixture/mod.rs @@ -17,6 +17,7 @@ use graph::blockchain::{ TriggersAdapter, TriggersAdapterSelector, }; use graph::cheap_clone::CheapClone; +use graph::components::adapter::ChainId; use graph::components::link_resolver::{ArweaveClient, ArweaveResolver, FileSizeLimit}; use graph::components::metrics::MetricsRegistry; use graph::components::store::{BlockStore, DeploymentLocator, EthereumCallCache}; @@ -26,7 +27,7 @@ use graph::data::query::{Query, QueryTarget}; use graph::data::subgraph::schema::{SubgraphError, SubgraphHealth}; use graph::endpoint::EndpointMetrics; use graph::env::EnvVars; -use graph::firehose::{FirehoseEndpoint, FirehoseEndpoints, SubgraphLimit}; +use graph::firehose::{FirehoseEndpoint, FirehoseEndpoints, NoopGenesisDecoder, SubgraphLimit}; use graph::futures03::{Stream, StreamExt}; use graph::http_body_util::Full; use graph::hyper::body::Bytes; @@ -96,17 +97,18 @@ impl CommonChainConfig { let chain_store = stores.chain_store.cheap_clone(); let node_id = NodeId::new(NODE_ID).unwrap(); - let firehose_endpoints: FirehoseEndpoints = vec![Arc::new(FirehoseEndpoint::new( - "", - "https://example.com", - None, - None, - true, - false, - SubgraphLimit::Unlimited, - Arc::new(EndpointMetrics::mock()), - ))] - .into(); + let firehose_endpoints = + FirehoseEndpoints::for_testing(vec![Arc::new(FirehoseEndpoint::new( + "", + "https://example.com", + None, + None, + true, + false, + SubgraphLimit::Unlimited, + Arc::new(EndpointMetrics::mock()), + NoopGenesisDecoder::boxed(), + ))]); Self { logger_factory, @@ -359,7 +361,7 @@ impl Drop for TestContext { } pub struct Stores { - network_name: String, + network_name: ChainId, chain_head_listener: Arc, pub network_store: Arc, chain_store: Arc, @@ -398,22 +400,26 @@ pub async fn stores(test_name: &str, store_config_path: &str) -> Stores { let store_builder = StoreBuilder::new(&logger, &node_id, &config, None, mock_registry.clone()).await; - let network_name: String = config.chains.chains.iter().next().unwrap().0.to_string(); + let network_name: ChainId = config + .chains + .chains + .iter() + .next() + .unwrap() + .0 + .as_str() + .into(); let chain_head_listener = store_builder.chain_head_update_listener(); - let network_identifiers = vec![( - network_name.clone(), - ChainIdentifier { - net_version: "".into(), - genesis_block_hash: test_ptr(0).hash, - }, - )] - .into_iter() - .collect(); + let network_identifiers: Vec = vec![network_name.clone()].into_iter().collect(); let network_store = store_builder.network_store(network_identifiers); + let ident = ChainIdentifier { + net_version: "".into(), + genesis_block_hash: test_ptr(0).hash, + }; let chain_store = network_store .block_store() - .chain_store(network_name.as_ref()) - .unwrap_or_else(|| panic!("No chain store for {}", &network_name)); + .create_chain_store(&network_name, ident) + .unwrap_or_else(|_| panic!("No chain store for {}", &network_name)); Stores { network_name, diff --git a/tests/src/fixture/substreams.rs b/tests/src/fixture/substreams.rs index f40943b8914..ebaba8d854d 100644 --- a/tests/src/fixture/substreams.rs +++ b/tests/src/fixture/substreams.rs @@ -1,5 +1,7 @@ use std::sync::Arc; +use graph::blockchain::client::ChainClient; + use super::{CommonChainConfig, Stores, TestChainSubstreams}; pub async fn chain(test_name: &str, stores: &Stores) -> TestChainSubstreams { @@ -12,10 +14,13 @@ pub async fn chain(test_name: &str, stores: &Stores) -> TestChainSubstreams { } = CommonChainConfig::new(test_name, stores).await; let block_stream_builder = Arc::new(graph_chain_substreams::BlockStreamBuilder::new()); + let client = Arc::new(ChainClient::::new_firehose( + firehose_endpoints, + )); let chain = Arc::new(graph_chain_substreams::Chain::new( logger_factory, - firehose_endpoints, + client, mock_registry, chain_store, block_stream_builder.clone(),