diff --git a/Cargo.lock b/Cargo.lock index 921ffa9eb28..1edd98dc496 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3729,6 +3729,7 @@ dependencies = [ "tiny-keccak 1.5.0", "tokio", "tokio-stream", + "tokio-util", "tonic-prost-build", "tower 0.5.2", ] diff --git a/chain/ethereum/Cargo.toml b/chain/ethereum/Cargo.toml index 4caaef9b668..26dd5cebd94 100644 --- a/chain/ethereum/Cargo.toml +++ b/chain/ethereum/Cargo.toml @@ -18,6 +18,7 @@ semver = "1.0.27" thiserror = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } +tokio-util = { workspace = true } tower = { workspace = true } itertools = "0.14.0" diff --git a/chain/ethereum/src/ethereum_adapter.rs b/chain/ethereum/src/ethereum_adapter.rs index 64affbeec0b..1fb36bc7efa 100644 --- a/chain/ethereum/src/ethereum_adapter.rs +++ b/chain/ethereum/src/ethereum_adapter.rs @@ -56,7 +56,7 @@ use std::convert::TryFrom; use std::iter::FromIterator; use std::pin::Pin; use std::sync::Arc; -use std::time::Instant; +use std::time::{Duration, Instant}; use tokio::sync::RwLock; use tokio::time::timeout; @@ -1108,6 +1108,16 @@ impl EthereumAdapter { Box::new(self.load_block_ptrs_rpc(logger, blocks).collect()) } + /// Lightweight health check that calls `eth_blockNumber` with a fixed 5s timeout. + pub async fn health_check(&self) -> Result { + let alloy = self.alloy.clone(); + tokio::time::timeout(Duration::from_secs(5), async move { + alloy.get_block_number().await.map_err(Error::from) + }) + .await + .map_err(|_| anyhow!("health check timed out"))? + } + pub async fn chain_id(&self) -> Result { let logger = self.logger.clone(); let alloy = self.alloy.clone(); diff --git a/chain/ethereum/src/health.rs b/chain/ethereum/src/health.rs new file mode 100644 index 00000000000..30cfdeb0daf --- /dev/null +++ b/chain/ethereum/src/health.rs @@ -0,0 +1,77 @@ +use crate::adapter::EthereumAdapter as _; +use crate::EthereumAdapter; +use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use tokio_util::sync::CancellationToken; + +#[derive(Debug)] +pub struct Health { + provider: Arc, + latency_nanos: AtomicU64, + error_rate_bits: AtomicU64, + consecutive_failures: AtomicU32, +} + +impl Health { + pub fn new(provider: Arc) -> Self { + Self { + provider, + latency_nanos: AtomicU64::new(0), + error_rate_bits: AtomicU64::new(0f64.to_bits()), + consecutive_failures: AtomicU32::new(0), + } + } + + pub fn provider(&self) -> &str { + self.provider.provider() + } + + pub async fn check(&self) { + let start = Instant::now(); + let success = self.provider.health_check().await.is_ok(); + self.update_metrics(success, start.elapsed()); + } + + fn update_metrics(&self, success: bool, latency: Duration) { + self.latency_nanos + .store(latency.as_nanos() as u64, Ordering::Relaxed); + + let prev_error_rate = f64::from_bits(self.error_rate_bits.load(Ordering::Relaxed)); + + if success { + let new_error_rate = prev_error_rate * 0.9; + self.error_rate_bits + .store(new_error_rate.to_bits(), Ordering::Relaxed); + self.consecutive_failures.store(0, Ordering::Relaxed); + } else { + let new_error_rate = prev_error_rate * 0.9 + 0.1; + self.error_rate_bits + .store(new_error_rate.to_bits(), Ordering::Relaxed); + self.consecutive_failures.fetch_add(1, Ordering::Relaxed); + } + } + + pub fn score(&self) -> f64 { + let latency_secs = + Duration::from_nanos(self.latency_nanos.load(Ordering::Relaxed)).as_secs_f64(); + let error_rate = f64::from_bits(self.error_rate_bits.load(Ordering::Relaxed)); + let consecutive_failures = self.consecutive_failures.load(Ordering::Relaxed); + + 1.0 / (1.0 + latency_secs + error_rate + (consecutive_failures as f64)) + } +} + +pub async fn health_check_task(health_checkers: Vec>, cancel_token: CancellationToken) { + loop { + tokio::select! { + _ = cancel_token.cancelled() => break, + _ = async { + for hc in &health_checkers { + hc.check().await; + } + tokio::time::sleep(Duration::from_secs(10)).await; + } => {} + } + } +} diff --git a/chain/ethereum/src/lib.rs b/chain/ethereum/src/lib.rs index 8850764d63b..af51c78ace8 100644 --- a/chain/ethereum/src/lib.rs +++ b/chain/ethereum/src/lib.rs @@ -6,6 +6,7 @@ pub mod codec; mod data_source; mod env; mod ethereum_adapter; +pub mod health; mod ingestor; mod polling_block_stream; pub mod runtime; diff --git a/chain/ethereum/src/network.rs b/chain/ethereum/src/network.rs index 536f7a8a54d..91a8bdc548c 100644 --- a/chain/ethereum/src/network.rs +++ b/chain/ethereum/src/network.rs @@ -7,9 +7,14 @@ use graph::components::network_provider::ProviderManager; use graph::components::network_provider::ProviderName; use graph::endpoint::EndpointMetrics; use graph::firehose::{AvailableCapacity, SubgraphLimit}; -use graph::prelude::rand::seq::IteratorRandom; -use graph::prelude::rand::{self, Rng}; +use graph::prelude::rand::{ + self, + distr::{weighted::WeightedIndex, Distribution}, + seq::IteratorRandom, + Rng, +}; use itertools::Itertools; +use std::collections::HashMap; use std::sync::Arc; pub use graph::impl_slog_value; @@ -31,6 +36,7 @@ pub struct EthereumNetworkAdapter { /// that limit. That's a somewhat imprecise but convenient way to /// determine the number of connections limit: SubgraphLimit, + weight: f64, } #[async_trait] @@ -54,15 +60,21 @@ impl EthereumNetworkAdapter { capabilities: NodeCapabilities, adapter: Arc, limit: SubgraphLimit, + weight: f64, ) -> Self { Self { endpoint_metrics, capabilities, adapter, limit, + weight, } } + pub fn adapter(&self) -> &Arc { + &self.adapter + } + #[cfg(debug_assertions)] fn is_call_only(&self) -> bool { self.adapter.is_call_only() @@ -80,6 +92,8 @@ impl EthereumNetworkAdapter { } } +use crate::health::Health; + #[derive(Debug, Clone)] pub struct EthereumNetworkAdapters { chain_id: ChainName, @@ -87,6 +101,8 @@ pub struct EthereumNetworkAdapters { call_only_adapters: Vec, // Percentage of request that should be used to retest errored adapters. retest_percent: f64, + weighted: bool, + health_checkers: HashMap>, } impl EthereumNetworkAdapters { @@ -96,6 +112,8 @@ impl EthereumNetworkAdapters { manager: ProviderManager::default(), call_only_adapters: vec![], retest_percent: DEFAULT_ADAPTER_ERROR_RETEST_PERCENT, + weighted: false, + health_checkers: HashMap::new(), } } @@ -122,7 +140,7 @@ impl EthereumNetworkAdapters { ProviderCheckStrategy::MarkAsValid, ); - Self::new(chain_id, provider, call_only, None) + Self::new(chain_id, provider, call_only, None, false, HashMap::new()) } pub fn new( @@ -130,6 +148,8 @@ impl EthereumNetworkAdapters { manager: ProviderManager, call_only_adapters: Vec, retest_percent: Option, + weighted: bool, + health_checkers: HashMap>, ) -> Self { #[cfg(debug_assertions)] call_only_adapters.iter().for_each(|a| { @@ -141,6 +161,8 @@ impl EthereumNetworkAdapters { manager, call_only_adapters, retest_percent: retest_percent.unwrap_or(DEFAULT_ADAPTER_ERROR_RETEST_PERCENT), + weighted, + health_checkers, } } @@ -190,50 +212,114 @@ impl EthereumNetworkAdapters { Self::available_with_capabilities(all, required_capabilities) } - // handle adapter selection from a list, implements the availability checking with an abstracted - // source of the adapter list. + /// Main adapter selection entry point that handles both weight-based distribution + /// and error retesting logic. + /// + /// The selection process: + /// 1. First selects an adapter based on weights (if enabled) or random selection + /// 2. Occasionally overrides the selection to retest adapters with errors + /// + /// The error retesting happens AFTER weight-based selection to minimize + /// distribution skew while still allowing periodic health checks of errored endpoints. fn cheapest_from( + &self, input: Vec<&EthereumNetworkAdapter>, required_capabilities: &NodeCapabilities, - retest_percent: f64, ) -> Result, Error> { + // Select adapter based on weights or random strategy + let selected_adapter = self.select_best_adapter(&input, required_capabilities)?; + + // Occasionally override selection to retest errored adapters + // This happens AFTER weight-based selection to minimize distribution skew let retest_rng: f64 = rand::rng().random(); + if retest_rng < self.retest_percent { + if let Some(most_errored) = input + .iter() + .max_by_key(|a| a.current_error_count()) + .filter(|a| a.current_error_count() > 0) + { + return Ok(most_errored.adapter().clone()); + } + } + + Ok(selected_adapter) + } + + /// Selects the best adapter based on the configured strategy (weighted or random). + /// If weighted mode is enabled, uses weight-based probabilistic selection. + /// Otherwise, falls back to random selection with error count consideration. + fn select_best_adapter( + &self, + input: &[&EthereumNetworkAdapter], + required_capabilities: &NodeCapabilities, + ) -> Result, Error> { + if self.weighted { + self.select_weighted_adapter(input, required_capabilities) + } else { + Self::select_random_adapter(input, required_capabilities) + } + } - let cheapest = input.into_iter().choose_multiple(&mut rand::rng(), 3); - let cheapest = cheapest.iter(); + /// Performs weighted random selection of adapters based on their configured weights. + /// + /// Weights are relative values between 0.0 and 1.0 that determine the probability + /// of selecting each adapter. They don't need to sum to 1.0 as they're normalized + /// internally by the WeightedIndex distribution. + /// + /// Falls back to random selection if weights are invalid (e.g., all zeros). + fn select_weighted_adapter( + &self, + input: &[&EthereumNetworkAdapter], + required_capabilities: &NodeCapabilities, + ) -> Result, Error> { + let weights: Vec<_> = input + .iter() + .map(|a| { + let score = self + .health_checkers + .get(a.provider()) + .map_or(1.0, |h| h.score()); + a.weight * score + }) + .collect(); + if let Ok(dist) = WeightedIndex::new(&weights) { + let idx = dist.sample(&mut rand::rng()); + Ok(input[idx].adapter().clone()) + } else { + // Fallback to random selection if weights are invalid (e.g., all zeros or empty) + Self::select_random_adapter(input, required_capabilities) + } + } - // If request falls below the retest threshold, use this request to try and - // reset the failed adapter. If a request succeeds the adapter will be more - // likely to be selected afterwards. - if retest_rng < retest_percent { - cheapest.max_by_key(|adapter| adapter.current_error_count()) + /// Performs random selection of adapters with preference for those with fewer errors. + /// + /// Randomly selects up to 3 adapters from the available pool, then chooses the one + /// with the lowest error count. This provides a balance between load distribution + /// and avoiding problematic endpoints. + fn select_random_adapter( + input: &[&EthereumNetworkAdapter], + required_capabilities: &NodeCapabilities, + ) -> Result, Error> { + let choices = input.iter().copied().choose_multiple(&mut rand::rng(), 3); + if let Some(adapter) = choices.iter().min_by_key(|a| a.current_error_count()) { + Ok(adapter.adapter().clone()) } else { - // The assumption here is that most RPC endpoints will not have limits - // which makes the check for low/high available capacity less relevant. - // So we essentially assume if it had available capacity when calling - // `all_cheapest_with` then it prolly maintains that state and so we - // just select whichever adapter is working better according to - // the number of errors. - cheapest.min_by_key(|adapter| adapter.current_error_count()) + Err(anyhow!( + "A matching Ethereum network with {:?} was not found.", + required_capabilities + )) } - .map(|adapter| adapter.adapter.clone()) - .ok_or(anyhow!( - "A matching Ethereum network with {:?} was not found.", - required_capabilities - )) } pub(crate) fn unverified_cheapest_with( &self, required_capabilities: &NodeCapabilities, ) -> Result, Error> { - let cheapest = self.all_unverified_cheapest_with(required_capabilities); + let cheapest = self + .all_unverified_cheapest_with(required_capabilities) + .collect_vec(); - Self::cheapest_from( - cheapest.choose_multiple(&mut rand::rng(), 3), - required_capabilities, - self.retest_percent, - ) + self.cheapest_from(cheapest, required_capabilities) } /// This is the public entry point and should always use verified adapters @@ -244,9 +330,9 @@ impl EthereumNetworkAdapters { let cheapest = self .all_cheapest_with(required_capabilities) .await - .choose_multiple(&mut rand::rng(), 3); + .collect_vec(); - Self::cheapest_from(cheapest, required_capabilities, self.retest_percent) + self.cheapest_from(cheapest, required_capabilities) } pub async fn cheapest(&self) -> Option> { @@ -257,7 +343,7 @@ impl EthereumNetworkAdapters { .await .map(|mut adapters| adapters.next()) .unwrap_or_default() - .map(|ethereum_network_adapter| ethereum_network_adapter.adapter.clone()) + .map(|ethereum_network_adapter| ethereum_network_adapter.adapter().clone()) } /// call_or_cheapest will bypass ProviderManagers' validation in order to remain non async. @@ -289,39 +375,38 @@ impl EthereumNetworkAdapters { let adapters = self .call_only_adapters .iter() - .min_by_key(|x| Arc::strong_count(&x.adapter)) + .min_by_key(|x| Arc::strong_count(x.adapter())) .ok_or(anyhow!("no available call only endpoints"))?; // TODO: This will probably blow up a lot sooner than [limit] amount of // subgraphs, since we probably use a few instances. if !adapters .limit - .has_capacity(Arc::strong_count(&adapters.adapter)) + .has_capacity(Arc::strong_count(adapters.adapter())) { bail!("call only adapter has reached the concurrency limit"); } // Cloning here ensure we have the correct count at any given time, if we return a reference it can be cloned later // which could cause a high number of endpoints to be given away before accounting for them. - Ok(Some(adapters.adapter.clone())) + Ok(Some(adapters.adapter().clone())) } } #[cfg(test)] mod tests { + use super::Health; use graph::cheap_clone::CheapClone; use graph::components::network_provider::ProviderCheckStrategy; use graph::components::network_provider::ProviderManager; use graph::components::network_provider::ProviderName; use graph::data::value::Word; use graph::http::HeaderMap; + use graph::slog::{o, Discard, Logger}; use graph::{ - endpoint::EndpointMetrics, - firehose::SubgraphLimit, - prelude::MetricsRegistry, - slog::{o, Discard, Logger}, - url::Url, + endpoint::EndpointMetrics, firehose::SubgraphLimit, prelude::MetricsRegistry, url::Url, }; + use std::collections::HashMap; use std::sync::Arc; use crate::{EthereumAdapter, EthereumAdapterTrait, ProviderEthRpcMetrics, Transport}; @@ -430,6 +515,7 @@ mod tests { }, eth_adapter.clone(), SubgraphLimit::Limit(3), + 1.0, )], vec![EthereumNetworkAdapter::new( metrics.cheap_clone(), @@ -439,6 +525,7 @@ mod tests { }, eth_call_adapter.clone(), SubgraphLimit::Limit(3), + 1.0, )], ) .await; @@ -533,6 +620,7 @@ mod tests { }, eth_call_adapter.clone(), SubgraphLimit::Unlimited, + 1.0, )], vec![EthereumNetworkAdapter::new( metrics.cheap_clone(), @@ -542,6 +630,7 @@ mod tests { }, eth_adapter.clone(), SubgraphLimit::Limit(2), + 1.0, )], ) .await; @@ -604,6 +693,7 @@ mod tests { }, eth_call_adapter.clone(), SubgraphLimit::Disabled, + 1.0, )], vec![EthereumNetworkAdapter::new( metrics.cheap_clone(), @@ -613,6 +703,7 @@ mod tests { }, eth_adapter.clone(), SubgraphLimit::Limit(3), + 1.0, )], ) .await; @@ -656,6 +747,7 @@ mod tests { }, eth_adapter.clone(), SubgraphLimit::Limit(3), + 1.0, )], vec![], ) @@ -715,24 +807,26 @@ mod tests { SubgraphLimit::Unlimited }; - no_retest_adapters.push(EthereumNetworkAdapter { - endpoint_metrics: metrics.clone(), - capabilities: NodeCapabilities { + no_retest_adapters.push(EthereumNetworkAdapter::new( + metrics.clone(), + NodeCapabilities { archive: true, traces: false, }, - adapter: adapter.clone(), - limit: limit.clone(), - }); - always_retest_adapters.push(EthereumNetworkAdapter { - endpoint_metrics: metrics.clone(), - capabilities: NodeCapabilities { + adapter.clone(), + limit.clone(), + 1.0, + )); + always_retest_adapters.push(EthereumNetworkAdapter::new( + metrics.clone(), + NodeCapabilities { archive: true, traces: false, }, adapter, limit, - }); + 1.0, + )); }); let manager = ProviderManager::::new( logger, @@ -748,11 +842,23 @@ mod tests { ProviderCheckStrategy::MarkAsValid, ); - let no_retest_adapters = - EthereumNetworkAdapters::new(chain_id.clone(), manager.clone(), vec![], Some(0f64)); + let no_retest_adapters = EthereumNetworkAdapters::new( + chain_id.clone(), + manager.clone(), + vec![], + Some(0f64), + false, + HashMap::new(), + ); - let always_retest_adapters = - EthereumNetworkAdapters::new(chain_id, manager.clone(), vec![], Some(1f64)); + let always_retest_adapters = EthereumNetworkAdapters::new( + chain_id, + manager.clone(), + vec![], + Some(1f64), + false, + HashMap::new(), + ); assert_eq!( no_retest_adapters @@ -799,25 +905,25 @@ mod tests { metrics.report_for_test(&ProviderName::from(error_provider), false); let mut no_retest_adapters = vec![]; - no_retest_adapters.push(EthereumNetworkAdapter { - endpoint_metrics: metrics.clone(), - capabilities: NodeCapabilities { + no_retest_adapters.push(EthereumNetworkAdapter::new( + metrics.clone(), + NodeCapabilities { archive: true, traces: false, }, - adapter: fake_adapter(&logger, error_provider, &provider_metrics, &metrics, false) - .await, - limit: SubgraphLimit::Unlimited, - }); + fake_adapter(&logger, error_provider, &provider_metrics, &metrics, false).await, + SubgraphLimit::Unlimited, + 1.0, + )); let mut always_retest_adapters = vec![]; - always_retest_adapters.push(EthereumNetworkAdapter { - endpoint_metrics: metrics.clone(), - capabilities: NodeCapabilities { + always_retest_adapters.push(EthereumNetworkAdapter::new( + metrics.clone(), + NodeCapabilities { archive: true, traces: false, }, - adapter: fake_adapter( + fake_adapter( &logger, no_error_provider, &provider_metrics, @@ -825,8 +931,9 @@ mod tests { false, ) .await, - limit: SubgraphLimit::Unlimited, - }); + SubgraphLimit::Unlimited, + 1.0, + )); let manager = ProviderManager::::new( logger.clone(), always_retest_adapters @@ -836,8 +943,14 @@ mod tests { ProviderCheckStrategy::MarkAsValid, ); - let always_retest_adapters = - EthereumNetworkAdapters::new(chain_id.clone(), manager.clone(), vec![], Some(1f64)); + let always_retest_adapters = EthereumNetworkAdapters::new( + chain_id.clone(), + manager.clone(), + vec![], + Some(1f64), + false, + HashMap::new(), + ); assert_eq!( always_retest_adapters @@ -860,8 +973,14 @@ mod tests { ProviderCheckStrategy::MarkAsValid, ); - let no_retest_adapters = - EthereumNetworkAdapters::new(chain_id.clone(), manager, vec![], Some(0f64)); + let no_retest_adapters = EthereumNetworkAdapters::new( + chain_id.clone(), + manager, + vec![], + Some(0f64), + false, + HashMap::new(), + ); assert_eq!( no_retest_adapters .cheapest_with(&NodeCapabilities { @@ -875,13 +994,13 @@ mod tests { ); let mut no_available_adapter = vec![]; - no_available_adapter.push(EthereumNetworkAdapter { - endpoint_metrics: metrics.clone(), - capabilities: NodeCapabilities { + no_available_adapter.push(EthereumNetworkAdapter::new( + metrics.clone(), + NodeCapabilities { archive: true, traces: false, }, - adapter: fake_adapter( + fake_adapter( &logger, no_error_provider, &provider_metrics, @@ -889,15 +1008,17 @@ mod tests { false, ) .await, - limit: SubgraphLimit::Disabled, - }); + SubgraphLimit::Disabled, + 1.0, + )); let manager = ProviderManager::new( logger, vec![(chain_id.clone(), no_available_adapter.to_vec())].into_iter(), ProviderCheckStrategy::MarkAsValid, ); - let no_available_adapter = EthereumNetworkAdapters::new(chain_id, manager, vec![], None); + let no_available_adapter = + EthereumNetworkAdapters::new(chain_id, manager, vec![], None, false, HashMap::new()); let res = no_available_adapter .cheapest_with(&NodeCapabilities { archive: true, @@ -933,4 +1054,112 @@ mod tests { .await, ) } + + #[graph::test] + async fn test_weighted_adapter_selection() { + let metrics = Arc::new(EndpointMetrics::mock()); + let logger = graph::log::logger(true); + let mock_registry = Arc::new(MetricsRegistry::mock()); + let transport = Transport::new_rpc( + Url::parse("http://127.0.0.1").unwrap(), + HeaderMap::new(), + metrics.clone(), + "", + ); + let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); + + let adapter1 = Arc::new( + EthereumAdapter::new( + logger.clone(), + "adapter1".to_string(), + transport.clone(), + provider_metrics.clone(), + true, + false, + ) + .await, + ); + + let adapter2 = Arc::new( + EthereumAdapter::new( + logger.clone(), + "adapter2".to_string(), + transport.clone(), + provider_metrics.clone(), + true, + false, + ) + .await, + ); + + let mut adapters = EthereumNetworkAdapters::for_testing( + vec![ + EthereumNetworkAdapter::new( + metrics.cheap_clone(), + NodeCapabilities { + archive: true, + traces: false, + }, + adapter1.clone(), + SubgraphLimit::Unlimited, + 0.2, + ), + EthereumNetworkAdapter::new( + metrics.cheap_clone(), + NodeCapabilities { + archive: true, + traces: false, + }, + adapter2.clone(), + SubgraphLimit::Unlimited, + 0.8, + ), + ], + vec![], + ) + .await; + + let health_checker1 = Arc::new(Health::new(adapter1.clone())); + let health_checker2 = Arc::new(Health::new(adapter2.clone())); + + // Verify health checkers start with a perfect score of 1.0 + assert_eq!(health_checker1.score(), 1.0); + assert_eq!(health_checker2.score(), 1.0); + + let mut health_map = HashMap::new(); + health_map.insert( + health_checker1.provider().to_string(), + health_checker1.clone(), + ); + health_map.insert( + health_checker2.provider().to_string(), + health_checker2.clone(), + ); + adapters.health_checkers = health_map; + adapters.weighted = true; + + let mut adapter1_count = 0; + let mut adapter2_count = 0; + + for _ in 0..1000 { + let selected_adapter = adapters + .cheapest_with(&NodeCapabilities { + archive: true, + traces: false, + }) + .await + .unwrap(); + + if selected_adapter.provider() == "adapter1" { + adapter1_count += 1; + } else { + adapter2_count += 1; + } + } + + // Check that the selection is roughly proportional to the weights. + // Allow for a 10% tolerance. + assert!(adapter1_count > 100 && adapter1_count < 300); + assert!(adapter2_count > 700 && adapter2_count < 900); + } } diff --git a/node/resources/tests/full_config.toml b/node/resources/tests/full_config.toml index 057e774d93e..8751401b590 100644 --- a/node/resources/tests/full_config.toml +++ b/node/resources/tests/full_config.toml @@ -1,3 +1,5 @@ +weighted_rpc_steering = true + [general] query = "query_node_.*" @@ -43,28 +45,35 @@ indexers = [ "index_node_1_a", [chains] ingestor = "index_0" +# Provider weights configuration: +# - Weights must be between 0.0 and 1.0 (inclusive) +# - Weights are relative - they don't need to sum to 1.0 +# - Traffic is distributed proportionally based on weights +# - Example: weights [0.3, 0.5, 0.2] = 30%, 50%, 20% traffic distribution +# - At least one provider must have weight > 0.0 +# - Weight is only used for RPC providers; it is ignored for firehose providers [chains.mainnet] shard = "primary" provider = [ - { label = "mainnet-0", url = "http://rpc.mainnet.io", features = ["archive", "traces"] }, - { label = "mainnet-1", details = { type = "web3call", url = "http://rpc.mainnet.io", features = ["archive", "traces"] }}, - { label = "firehose", details = { type = "firehose", url = "http://localhost:9000", features = [] }}, + { label = "mainnet-0", url = "http://rpc.mainnet.io", features = ["archive", "traces"], weight = 0.1 }, + { label = "mainnet-1", details = { type = "web3call", url = "http://rpc.mainnet.io", features = ["archive", "traces"] }, weight = 0.2 }, + { label = "firehose", details = { type = "firehose", url = "http://localhost:9000", features = [] } }, ] [chains.ropsten] shard = "primary" provider = [ - { label = "ropsten-0", url = "http://rpc.ropsten.io", transport = "rpc", features = ["archive", "traces"] } + { label = "ropsten-0", url = "http://rpc.ropsten.io", transport = "rpc", features = ["archive", "traces"], weight = 1.0 } ] [chains.goerli] shard = "primary" provider = [ - { label = "goerli-0", url = "http://rpc.goerli.io", transport = "ipc", features = ["archive"] } + { label = "goerli-0", url = "http://rpc.goerli.io", transport = "ipc", features = ["archive"], weight = 1.0 } ] [chains.kovan] shard = "primary" provider = [ - { label = "kovan-0", url = "http://rpc.kovan.io", transport = "ws", features = [] } + { label = "kovan-0", url = "http://rpc.kovan.io", transport = "ws", features = [], weight = 1.0 } ] diff --git a/node/src/chain.rs b/node/src/chain.rs index b1f2b0709cb..8f55385ffa2 100644 --- a/node/src/chain.rs +++ b/node/src/chain.rs @@ -242,6 +242,7 @@ pub async fn create_ethereum_networks_for_chain( .await, ), web3.limit_for(&config.node), + provider.weight, ); if call_only { @@ -426,6 +427,7 @@ mod test { ethereum_ws: vec![], ethereum_ipc: vec![], unsafe_config: false, + weighted_rpc_steering: false, }; let metrics = Arc::new(EndpointMetrics::mock()); diff --git a/node/src/config.rs b/node/src/config.rs index b118f34da57..2531529d1dc 100644 --- a/node/src/config.rs +++ b/node/src/config.rs @@ -48,6 +48,7 @@ pub struct Opt { pub ethereum_ws: Vec, pub ethereum_ipc: Vec, pub unsafe_config: bool, + pub weighted_rpc_steering: bool, } impl Default for Opt { @@ -64,6 +65,7 @@ impl Default for Opt { ethereum_ws: vec![], ethereum_ipc: vec![], unsafe_config: false, + weighted_rpc_steering: false, } } } @@ -73,6 +75,8 @@ pub struct Config { #[serde(skip, default = "default_node_id")] pub node: NodeId, pub general: Option, + #[serde(default)] + pub weighted_rpc_steering: bool, #[serde(rename = "store")] pub stores: BTreeMap, pub chains: ChainSection, @@ -196,6 +200,7 @@ impl Config { Ok(Config { node, general: None, + weighted_rpc_steering: opt.weighted_rpc_steering, stores, chains, deployment, @@ -517,6 +522,7 @@ impl ChainSection { headers: Default::default(), rules: vec![], }), + weight: 1.0, }; let entry = chains.entry(name.to_string()).or_insert_with(|| Chain { shard: PRIMARY_SHARD.to_string(), @@ -558,6 +564,16 @@ impl Chain { return Err(anyhow!("Provider labels must be unique")); } + // Check that not all provider weights are zero + if !self.providers.is_empty() { + let all_zero_weights = self.providers.iter().all(|p| p.weight == 0.0); + if all_zero_weights { + return Err(anyhow!( + "All provider weights are 0.0; at least one provider must have a weight > 0.0" + )); + } + } + // `Config` validates that `self.shard` references a configured shard for provider in self.providers.iter_mut() { provider.validate()? @@ -592,6 +608,7 @@ fn btree_map_to_http_headers(kvs: BTreeMap) -> HeaderMap { pub struct Provider { pub label: String, pub details: ProviderDetails, + pub weight: f64, } #[derive(Clone, Debug, Deserialize, Serialize, PartialEq)] @@ -715,6 +732,11 @@ const DEFAULT_PROVIDER_FEATURES: [&str; 2] = ["traces", "archive"]; impl Provider { fn validate(&mut self) -> Result<()> { validate_name(&self.label).context("illegal provider name")?; + // Weight of 0.0 is intentional: it disables the provider from weighted selection + // while keeping it available for error-retesting and non-weighted fallback paths. + if self.weight < 0.0 || self.weight > 1.0 { + bail!("provider {} must have a weight between 0 and 1", self.label); + } match self.details { ProviderDetails::Firehose(ref mut firehose) => { @@ -808,6 +830,7 @@ impl<'de> Deserialize<'de> for Provider { { let mut label = None; let mut details = None; + let mut weight = None; let mut url = None; let mut transport = None; @@ -829,6 +852,12 @@ impl<'de> Deserialize<'de> for Provider { } details = Some(map.next_value()?); } + ProviderField::Weight => { + if weight.is_some() { + return Err(serde::de::Error::duplicate_field("weight")); + } + weight = Some(map.next_value()?); + } ProviderField::Url => { if url.is_some() { return Err(serde::de::Error::duplicate_field("url")); @@ -888,13 +917,18 @@ impl<'de> Deserialize<'de> for Provider { }), }; - Ok(Provider { label, details }) + Ok(Provider { + label, + details, + weight: weight.unwrap_or(1.0), + }) } } const FIELDS: &[&str] = &[ "label", "details", + "weight", "transport", "url", "features", @@ -909,6 +943,7 @@ impl<'de> Deserialize<'de> for Provider { enum ProviderField { Label, Details, + Weight, Match, // Deprecated fields @@ -1286,6 +1321,7 @@ mod tests { headers: HeaderMap::new(), rules: Vec::new(), }), + weight: 1.0, }, actual ); @@ -1312,6 +1348,7 @@ mod tests { headers: HeaderMap::new(), rules: Vec::new(), }), + weight: 1.0, }, actual ); @@ -1373,6 +1410,7 @@ mod tests { headers, rules: Vec::new(), }), + weight: 1.0, }, actual ); @@ -1398,6 +1436,7 @@ mod tests { headers: HeaderMap::new(), rules: Vec::new(), }), + weight: 1.0, }, actual ); @@ -1439,6 +1478,7 @@ mod tests { conn_pool_size: 20, rules: vec![], }), + weight: 1.0, }, actual ); @@ -1478,6 +1518,7 @@ mod tests { conn_pool_size: 20, rules: vec![], }), + weight: 1.0, }, actual ); @@ -1517,6 +1558,7 @@ mod tests { } ], }), + weight: 1.0, }, actual ); @@ -1608,6 +1650,7 @@ mod tests { headers: HeaderMap::new(), rules: Vec::new(), }), + weight: 1.0, }, actual ); diff --git a/node/src/network_setup.rs b/node/src/network_setup.rs index 8de5532e8ff..074ce0ff952 100644 --- a/node/src/network_setup.rs +++ b/node/src/network_setup.rs @@ -27,7 +27,7 @@ use graph::{ use graph_chain_ethereum as ethereum; use graph_store_postgres::{BlockStore, ChainHeadUpdateListener}; -use std::{any::Any, cmp::Ordering, sync::Arc, time::Duration}; +use std::{any::Any, cmp::Ordering, collections::HashMap, sync::Arc, time::Duration}; use crate::chain::{ create_ethereum_networks, create_firehose_networks, networks_as_chains, AnyChainFilter, @@ -90,10 +90,15 @@ impl AdapterConfiguration { } } +use graph_chain_ethereum::health::{health_check_task, Health}; +use tokio_util::sync::CancellationToken; + pub struct Networks { pub adapters: Vec, pub rpc_provider_manager: ProviderManager, pub firehose_provider_manager: ProviderManager>, + pub weighted_rpc_steering: bool, + pub health_checkers: HashMap>>, } impl Networks { @@ -111,6 +116,8 @@ impl Networks { vec![], ProviderCheckStrategy::MarkAsValid, ), + weighted_rpc_steering: false, + health_checkers: HashMap::new(), } } @@ -184,7 +191,12 @@ impl Networks { ); let adapters: Vec<_> = eth.into_iter().chain(firehose.into_iter()).collect(); - Ok(Networks::new(&logger, adapters, provider_checks)) + Ok(Networks::new( + &logger, + adapters, + provider_checks, + config.weighted_rpc_steering, + )) } pub async fn from_config_for_chain( @@ -229,6 +241,7 @@ impl Networks { logger: &Logger, adapters: Vec, provider_checks: &[Arc], + weighted_rpc_steering: bool, ) -> Self { let adapters2 = adapters.clone(); let eth_adapters = adapters.iter().flat_map(|a| a.as_rpc()).cloned().map( @@ -248,6 +261,22 @@ impl Networks { }, ); + let health_checkers: HashMap>> = eth_adapters + .clone() + .map(|(chain_id, adapters)| { + let checkers = adapters + .iter() + .map(|a| Arc::new(Health::new(a.adapter().clone()))) + .collect(); + (chain_id, checkers) + }) + .collect(); + if weighted_rpc_steering { + let cancel_token = CancellationToken::new(); + let all: Vec<_> = health_checkers.values().flatten().cloned().collect(); + tokio::spawn(health_check_task(all, cancel_token)); + } + let firehose_adapters = adapters .iter() .flat_map(|a| a.as_firehose()) @@ -273,6 +302,8 @@ impl Networks { firehose_adapters, ProviderCheckStrategy::RequireAll(provider_checks), ), + weighted_rpc_steering, + health_checkers, }; s @@ -365,11 +396,22 @@ impl Networks { .flat_map(|eth_c| eth_c.call_only.clone()) .collect_vec(); + let chain_checkers: std::collections::HashMap> = self + .health_checkers + .get(&chain_id) + .cloned() + .unwrap_or_default() + .into_iter() + .map(|h| (h.provider().to_string(), h)) + .collect(); + EthereumNetworkAdapters::new( chain_id, self.rpc_provider_manager.clone(), eth_adapters, None, + self.weighted_rpc_steering, + chain_checkers, ) } } diff --git a/node/src/opt.rs b/node/src/opt.rs index 3708a7da493..d027c299e4a 100644 --- a/node/src/opt.rs +++ b/node/src/opt.rs @@ -102,6 +102,12 @@ pub struct Opt { help= "Ethereum network name (e.g. 'mainnet'), optional comma-seperated capabilities (eg 'full,archive'), and an Ethereum IPC pipe, separated by a ':'", )] pub ethereum_ipc: Vec, + #[clap( + long, + env = "GRAPH_WEIGHTED_RPC_STEERING", + help = "Enable weighted random steering for Ethereum RPCs" + )] + pub weighted_rpc_steering: bool, #[clap( long, value_name = "HOST:PORT", @@ -253,6 +259,7 @@ impl From for config::Opt { ethereum_rpc, ethereum_ws, ethereum_ipc, + weighted_rpc_steering, unsafe_config, .. } = opt; @@ -268,6 +275,7 @@ impl From for config::Opt { ethereum_rpc, ethereum_ws, ethereum_ipc, + weighted_rpc_steering, unsafe_config, } }