use crate::{
error::{Error, Result},
interval::ExpIncInterval,
ServicetoWorkerMsg, WorkerConfig,
};
use std::{
collections::{HashMap, HashSet},
marker::PhantomData,
sync::Arc,
time::{Duration, Instant, SystemTime, UNIX_EPOCH},
};
use futures::{channel::mpsc, future, stream::Fuse, FutureExt, Stream, StreamExt};
use addr_cache::AddrCache;
use codec::{Decode, Encode};
use ip_network::IpNetwork;
use linked_hash_set::LinkedHashSet;
use sc_network_types::kad::{Key, PeerRecord, Record};
use log::{debug, error, trace};
use prometheus_endpoint::{register, Counter, CounterVec, Gauge, Opts, U64};
use prost::Message;
use rand::{seq::SliceRandom, thread_rng};
use sc_network::{
config::DEFAULT_KADEMLIA_REPLICATION_FACTOR, event::DhtEvent, multiaddr, KademliaKey,
Multiaddr, NetworkDHTProvider, NetworkSigner, NetworkStateInfo,
};
use sc_network_types::{multihash::Code, PeerId};
use schema::PeerSignature;
use sp_api::{ApiError, ProvideRuntimeApi};
use sp_authority_discovery::{
AuthorityDiscoveryApi, AuthorityId, AuthorityPair, AuthoritySignature,
};
use sp_blockchain::HeaderBackend;
use sp_core::crypto::{key_types, ByteArray, Pair};
use sp_keystore::{Keystore, KeystorePtr};
use sp_runtime::traits::Block as BlockT;
mod addr_cache;
mod schema {
#[cfg(test)]
mod tests;
include!(concat!(env!("OUT_DIR"), "/authority_discovery_v3.rs"));
}
#[cfg(test)]
pub mod tests;
const LOG_TARGET: &str = "sub-authority-discovery";
const MAX_ADDRESSES_PER_AUTHORITY: usize = 16;
const MAX_GLOBAL_LISTEN_ADDRESSES: usize = 4;
const MAX_ADDRESSES_TO_PUBLISH: usize = 32;
const MAX_IN_FLIGHT_LOOKUPS: usize = 8;
pub enum Role {
PublishAndDiscover(KeystorePtr),
Discover,
}
pub struct Worker<Client, Block: BlockT, DhtEventStream> {
from_service: Fuse<mpsc::Receiver<ServicetoWorkerMsg>>,
client: Arc<Client>,
network: Arc<dyn NetworkProvider>,
dht_event_rx: DhtEventStream,
publish_interval: ExpIncInterval,
publish_if_changed_interval: ExpIncInterval,
latest_published_keys: HashSet<AuthorityId>,
latest_published_kad_keys: HashSet<KademliaKey>,
publish_non_global_ips: bool,
public_addresses: LinkedHashSet<Multiaddr>,
strict_record_validation: bool,
query_interval: ExpIncInterval,
pending_lookups: Vec<AuthorityId>,
known_authorities: HashMap<KademliaKey, AuthorityId>,
authorities_queried_at: Option<Block::Hash>,
in_flight_lookups: HashMap<KademliaKey, AuthorityId>,
known_lookups: HashMap<KademliaKey, AuthorityId>,
last_known_records: HashMap<KademliaKey, RecordInfo>,
addr_cache: addr_cache::AddrCache,
metrics: Option<Metrics>,
warn_public_addresses: bool,
role: Role,
phantom: PhantomData<Block>,
}
#[derive(Debug, Clone)]
struct RecordInfo {
creation_time: u128,
peers_with_record: HashSet<PeerId>,
record: Record,
}
#[async_trait::async_trait]
pub trait AuthorityDiscovery<Block: BlockT> {
async fn authorities(&self, at: Block::Hash)
-> std::result::Result<Vec<AuthorityId>, ApiError>;
async fn best_hash(&self) -> std::result::Result<Block::Hash, Error>;
}
#[async_trait::async_trait]
impl<Block, T> AuthorityDiscovery<Block> for T
where
T: ProvideRuntimeApi<Block> + HeaderBackend<Block> + Send + Sync,
T::Api: AuthorityDiscoveryApi<Block>,
Block: BlockT,
{
async fn authorities(
&self,
at: Block::Hash,
) -> std::result::Result<Vec<AuthorityId>, ApiError> {
self.runtime_api().authorities(at)
}
async fn best_hash(&self) -> std::result::Result<Block::Hash, Error> {
Ok(self.info().best_hash)
}
}
impl<Client, Block, DhtEventStream> Worker<Client, Block, DhtEventStream>
where
Block: BlockT + Unpin + 'static,
Client: AuthorityDiscovery<Block> + 'static,
DhtEventStream: Stream<Item = DhtEvent> + Unpin,
{
pub(crate) fn new(
from_service: mpsc::Receiver<ServicetoWorkerMsg>,
client: Arc<Client>,
network: Arc<dyn NetworkProvider>,
dht_event_rx: DhtEventStream,
role: Role,
prometheus_registry: Option<prometheus_endpoint::Registry>,
config: WorkerConfig,
) -> Self {
let publish_interval =
ExpIncInterval::new(Duration::from_secs(2), config.max_publish_interval);
let query_interval = ExpIncInterval::new(Duration::from_secs(2), config.max_query_interval);
let publish_if_changed_interval =
ExpIncInterval::new(config.keystore_refresh_interval, config.keystore_refresh_interval);
let addr_cache = AddrCache::new();
let metrics = match prometheus_registry {
Some(registry) => match Metrics::register(®istry) {
Ok(metrics) => Some(metrics),
Err(e) => {
error!(target: LOG_TARGET, "Failed to register metrics: {}", e);
None
},
},
None => None,
};
let public_addresses = {
let local_peer_id = network.local_peer_id();
config
.public_addresses
.into_iter()
.map(|address| AddressType::PublicAddress(address).without_p2p(local_peer_id))
.collect()
};
Worker {
from_service: from_service.fuse(),
client,
network,
dht_event_rx,
publish_interval,
known_authorities: Default::default(),
authorities_queried_at: None,
publish_if_changed_interval,
latest_published_keys: HashSet::new(),
latest_published_kad_keys: HashSet::new(),
publish_non_global_ips: config.publish_non_global_ips,
public_addresses,
strict_record_validation: config.strict_record_validation,
query_interval,
pending_lookups: Vec::new(),
in_flight_lookups: HashMap::new(),
known_lookups: HashMap::new(),
addr_cache,
role,
metrics,
warn_public_addresses: false,
phantom: PhantomData,
last_known_records: HashMap::new(),
}
}
pub async fn run(mut self) {
loop {
self.start_new_lookups();
futures::select! {
event = self.dht_event_rx.next().fuse() => {
if let Some(event) = event {
self.handle_dht_event(event).await;
} else {
return;
}
},
msg = self.from_service.select_next_some() => {
self.process_message_from_service(msg);
},
only_if_changed = future::select(
self.publish_interval.next().map(|_| false),
self.publish_if_changed_interval.next().map(|_| true)
).map(|e| e.factor_first().0).fuse() => {
if let Err(e) = self.publish_ext_addresses(only_if_changed).await {
error!(
target: LOG_TARGET,
"Failed to publish external addresses: {}", e,
);
}
},
_ = self.query_interval.next().fuse() => {
if let Err(e) = self.refill_pending_lookups_queue().await {
error!(
target: LOG_TARGET,
"Failed to request addresses of authorities: {}", e,
);
}
},
}
}
}
fn process_message_from_service(&self, msg: ServicetoWorkerMsg) {
match msg {
ServicetoWorkerMsg::GetAddressesByAuthorityId(authority, sender) => {
let _ = sender.send(
self.addr_cache.get_addresses_by_authority_id(&authority).map(Clone::clone),
);
},
ServicetoWorkerMsg::GetAuthorityIdsByPeerId(peer_id, sender) => {
let _ = sender
.send(self.addr_cache.get_authority_ids_by_peer_id(&peer_id).map(Clone::clone));
},
}
}
fn addresses_to_publish(&mut self) -> impl Iterator<Item = Multiaddr> {
let local_peer_id = self.network.local_peer_id();
let publish_non_global_ips = self.publish_non_global_ips;
let address_is_global = |address: &Multiaddr| {
address.iter().all(|protocol| match protocol {
multiaddr::Protocol::Ip4(ip) => IpNetwork::from(ip).is_global(),
multiaddr::Protocol::Ip6(ip) => IpNetwork::from(ip).is_global(),
_ => true,
})
};
let mut global_listen_addresses = self
.network
.listen_addresses()
.into_iter()
.filter_map(|address| {
address_is_global(&address)
.then(|| AddressType::GlobalListenAddress(address).without_p2p(local_peer_id))
})
.take(MAX_GLOBAL_LISTEN_ADDRESSES)
.peekable();
let mut external_addresses = self
.network
.external_addresses()
.into_iter()
.filter_map(|address| {
(publish_non_global_ips || address_is_global(&address))
.then(|| AddressType::ExternalAddress(address).without_p2p(local_peer_id))
})
.peekable();
let has_global_listen_addresses = global_listen_addresses.peek().is_some();
trace!(
target: LOG_TARGET,
"Node has public addresses: {}, global listen addresses: {}, external addresses: {}",
!self.public_addresses.is_empty(),
has_global_listen_addresses,
external_addresses.peek().is_some(),
);
let mut seen_addresses = HashSet::new();
let addresses = self
.public_addresses
.clone()
.into_iter()
.chain(global_listen_addresses)
.chain(external_addresses)
.filter(|address| seen_addresses.insert(address.clone()))
.take(MAX_ADDRESSES_TO_PUBLISH)
.collect::<Vec<_>>();
if !addresses.is_empty() {
debug!(
target: LOG_TARGET,
"Publishing authority DHT record peer_id='{local_peer_id}' with addresses='{addresses:?}'",
);
if !self.warn_public_addresses &&
self.public_addresses.is_empty() &&
!has_global_listen_addresses
{
self.warn_public_addresses = true;
error!(
target: LOG_TARGET,
"No public addresses configured and no global listen addresses found. \
Authority DHT record may contain unreachable addresses. \
Consider setting `--public-addr` to the public IP address of this node. \
This will become a hard requirement in future versions for authorities."
);
}
}
addresses
.into_iter()
.map(move |a| a.with(multiaddr::Protocol::P2p(*local_peer_id.as_ref())))
}
async fn publish_ext_addresses(&mut self, only_if_changed: bool) -> Result<()> {
let key_store = match &self.role {
Role::PublishAndDiscover(key_store) => key_store,
Role::Discover => return Ok(()),
}
.clone();
let addresses = serialize_addresses(self.addresses_to_publish());
if addresses.is_empty() {
trace!(
target: LOG_TARGET,
"No addresses to publish. Skipping publication."
);
self.publish_interval.set_to_start();
return Ok(())
}
let keys =
Worker::<Client, Block, DhtEventStream>::get_own_public_keys_within_authority_set(
key_store.clone(),
self.client.as_ref(),
)
.await?
.into_iter()
.collect::<HashSet<_>>();
if only_if_changed {
if keys == self.latest_published_keys {
return Ok(())
}
self.publish_interval.set_to_start();
self.query_interval.set_to_start();
}
if let Some(metrics) = &self.metrics {
metrics.publish.inc();
metrics
.amount_addresses_last_published
.set(addresses.len().try_into().unwrap_or(std::u64::MAX));
}
let serialized_record = serialize_authority_record(addresses, Some(build_creation_time()))?;
let peer_signature = sign_record_with_peer_id(&serialized_record, &self.network)?;
let keys_vec = keys.iter().cloned().collect::<Vec<_>>();
let kv_pairs = sign_record_with_authority_ids(
serialized_record,
Some(peer_signature),
key_store.as_ref(),
keys_vec,
)?;
self.latest_published_kad_keys = kv_pairs.iter().map(|(k, _)| k.clone()).collect();
for (key, value) in kv_pairs.into_iter() {
self.network.put_value(key, value);
}
self.latest_published_keys = keys;
Ok(())
}
async fn refill_pending_lookups_queue(&mut self) -> Result<()> {
let best_hash = self.client.best_hash().await?;
let local_keys = match &self.role {
Role::PublishAndDiscover(key_store) => key_store
.sr25519_public_keys(key_types::AUTHORITY_DISCOVERY)
.into_iter()
.collect::<HashSet<_>>(),
Role::Discover => HashSet::new(),
};
let mut authorities = self
.client
.authorities(best_hash)
.await
.map_err(|e| Error::CallingRuntime(e.into()))?
.into_iter()
.filter(|id| !local_keys.contains(id.as_ref()))
.collect::<Vec<_>>();
self.known_authorities = authorities
.clone()
.into_iter()
.map(|authority| (hash_authority_id(authority.as_ref()), authority))
.collect::<HashMap<_, _>>();
self.authorities_queried_at = Some(best_hash);
self.addr_cache.retain_ids(&authorities);
let now = Instant::now();
self.last_known_records.retain(|k, value| {
self.known_authorities.contains_key(k) && !value.record.is_expired(now)
});
authorities.shuffle(&mut thread_rng());
self.pending_lookups = authorities;
self.in_flight_lookups.clear();
self.known_lookups.clear();
if let Some(metrics) = &self.metrics {
metrics
.requests_pending
.set(self.pending_lookups.len().try_into().unwrap_or(std::u64::MAX));
}
Ok(())
}
fn start_new_lookups(&mut self) {
while self.in_flight_lookups.len() < MAX_IN_FLIGHT_LOOKUPS {
let authority_id = match self.pending_lookups.pop() {
Some(authority) => authority,
None => return,
};
let hash = hash_authority_id(authority_id.as_ref());
self.network.get_value(&hash);
self.in_flight_lookups.insert(hash, authority_id);
if let Some(metrics) = &self.metrics {
metrics.requests.inc();
metrics
.requests_pending
.set(self.pending_lookups.len().try_into().unwrap_or(std::u64::MAX));
}
}
}
async fn handle_dht_event(&mut self, event: DhtEvent) {
match event {
DhtEvent::ValueFound(v) => {
if let Some(metrics) = &self.metrics {
metrics.dht_event_received.with_label_values(&["value_found"]).inc();
}
debug!(target: LOG_TARGET, "Value for hash '{:?}' found on Dht.", v.record.key);
if let Err(e) = self.handle_dht_value_found_event(v) {
if let Some(metrics) = &self.metrics {
metrics.handle_value_found_event_failure.inc();
}
debug!(target: LOG_TARGET, "Failed to handle Dht value found event: {}", e);
}
},
DhtEvent::ValueNotFound(hash) => {
if let Some(metrics) = &self.metrics {
metrics.dht_event_received.with_label_values(&["value_not_found"]).inc();
}
if self.in_flight_lookups.remove(&hash).is_some() {
debug!(target: LOG_TARGET, "Value for hash '{:?}' not found on Dht.", hash)
} else {
debug!(
target: LOG_TARGET,
"Received 'ValueNotFound' for unexpected hash '{:?}'.", hash
)
}
},
DhtEvent::ValuePut(hash) => {
if !self.latest_published_kad_keys.contains(&hash) {
return;
}
self.publish_interval.set_to_max();
if let Some(metrics) = &self.metrics {
metrics.dht_event_received.with_label_values(&["value_put"]).inc();
}
debug!(target: LOG_TARGET, "Successfully put hash '{:?}' on Dht.", hash)
},
DhtEvent::ValuePutFailed(hash) => {
if !self.latest_published_kad_keys.contains(&hash) {
return;
}
if let Some(metrics) = &self.metrics {
metrics.dht_event_received.with_label_values(&["value_put_failed"]).inc();
}
debug!(target: LOG_TARGET, "Failed to put hash '{:?}' on Dht.", hash)
},
DhtEvent::PutRecordRequest(record_key, record_value, publisher, expires) => {
if let Err(e) = self
.handle_put_record_requested(record_key, record_value, publisher, expires)
.await
{
debug!(target: LOG_TARGET, "Failed to handle put record request: {}", e)
}
if let Some(metrics) = &self.metrics {
metrics.dht_event_received.with_label_values(&["put_record_req"]).inc();
}
},
DhtEvent::StartProvidingFailed(..) => {},
DhtEvent::ProvidersFound(..) => {},
DhtEvent::ProvidersNotFound(..) => {},
}
}
async fn handle_put_record_requested(
&mut self,
record_key: Key,
record_value: Vec<u8>,
publisher: Option<PeerId>,
expires: Option<std::time::Instant>,
) -> Result<()> {
let publisher = publisher.ok_or(Error::MissingPublisher)?;
let best_hash = self.client.best_hash().await?;
if !self.known_authorities.contains_key(&record_key) &&
self.authorities_queried_at
.map(|authorities_queried_at| authorities_queried_at != best_hash)
.unwrap_or(true)
{
let authorities = self
.client
.authorities(best_hash)
.await
.map_err(|e| Error::CallingRuntime(e.into()))?
.into_iter()
.collect::<Vec<_>>();
self.known_authorities = authorities
.into_iter()
.map(|authority| (hash_authority_id(authority.as_ref()), authority))
.collect::<HashMap<_, _>>();
self.authorities_queried_at = Some(best_hash);
}
let authority_id =
self.known_authorities.get(&record_key).ok_or(Error::UnknownAuthority)?;
let signed_record =
Self::check_record_signed_with_authority_id(record_value.as_slice(), authority_id)?;
self.check_record_signed_with_network_key(
&signed_record.record,
signed_record.peer_signature,
publisher,
authority_id,
)?;
let records_creation_time: u128 =
schema::AuthorityRecord::decode(signed_record.record.as_slice())
.map_err(Error::DecodingProto)?
.creation_time
.map(|creation_time| {
u128::decode(&mut &creation_time.timestamp[..]).unwrap_or_default()
})
.unwrap_or_default(); let current_record_info = self.last_known_records.get(&record_key);
if let Some(current_record_info) = current_record_info {
if records_creation_time < current_record_info.creation_time {
debug!(
target: LOG_TARGET,
"Skip storing because record creation time {:?} is older than the current known record {:?}",
records_creation_time,
current_record_info.creation_time
);
return Ok(());
}
}
self.network.store_record(record_key, record_value, Some(publisher), expires);
Ok(())
}
fn check_record_signed_with_authority_id(
record: &[u8],
authority_id: &AuthorityId,
) -> Result<schema::SignedAuthorityRecord> {
let signed_record: schema::SignedAuthorityRecord =
schema::SignedAuthorityRecord::decode(record).map_err(Error::DecodingProto)?;
let auth_signature = AuthoritySignature::decode(&mut &signed_record.auth_signature[..])
.map_err(Error::EncodingDecodingScale)?;
if !AuthorityPair::verify(&auth_signature, &signed_record.record, &authority_id) {
return Err(Error::VerifyingDhtPayload)
}
Ok(signed_record)
}
fn check_record_signed_with_network_key(
&self,
record: &Vec<u8>,
peer_signature: Option<PeerSignature>,
remote_peer_id: PeerId,
authority_id: &AuthorityId,
) -> Result<()> {
if let Some(peer_signature) = peer_signature {
match self.network.verify(
remote_peer_id.into(),
&peer_signature.public_key,
&peer_signature.signature,
record,
) {
Ok(true) => {},
Ok(false) => return Err(Error::VerifyingDhtPayload),
Err(error) => return Err(Error::ParsingLibp2pIdentity(error)),
}
} else if self.strict_record_validation {
return Err(Error::MissingPeerIdSignature)
} else {
debug!(
target: LOG_TARGET,
"Received unsigned authority discovery record from {}", authority_id
);
}
Ok(())
}
fn handle_dht_value_found_event(&mut self, peer_record: PeerRecord) -> Result<()> {
let remote_key = peer_record.record.key.clone();
let authority_id: AuthorityId =
if let Some(authority_id) = self.in_flight_lookups.remove(&remote_key) {
self.known_lookups.insert(remote_key.clone(), authority_id.clone());
authority_id
} else if let Some(authority_id) = self.known_lookups.get(&remote_key) {
authority_id.clone()
} else {
return Err(Error::ReceivingUnexpectedRecord);
};
let local_peer_id = self.network.local_peer_id();
let schema::SignedAuthorityRecord { record, peer_signature, .. } =
Self::check_record_signed_with_authority_id(
peer_record.record.value.as_slice(),
&authority_id,
)?;
let authority_record =
schema::AuthorityRecord::decode(record.as_slice()).map_err(Error::DecodingProto)?;
let records_creation_time: u128 = authority_record
.creation_time
.as_ref()
.map(|creation_time| {
u128::decode(&mut &creation_time.timestamp[..]).unwrap_or_default()
})
.unwrap_or_default(); let addresses: Vec<Multiaddr> = authority_record
.addresses
.into_iter()
.map(|a| a.try_into())
.collect::<std::result::Result<_, _>>()
.map_err(Error::ParsingMultiaddress)?;
let get_peer_id = |a: &Multiaddr| match a.iter().last() {
Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key).ok(),
_ => None,
};
let addresses: Vec<Multiaddr> = addresses
.into_iter()
.filter(|a| get_peer_id(&a).filter(|p| *p != local_peer_id).is_some())
.collect();
let remote_peer_id = single(addresses.iter().map(|a| get_peer_id(&a)))
.map_err(|_| Error::ReceivingDhtValueFoundEventWithDifferentPeerIds)? .flatten()
.ok_or(Error::ReceivingDhtValueFoundEventWithNoPeerIds)?; self.check_record_signed_with_network_key(
&record,
peer_signature,
remote_peer_id,
&authority_id,
)?;
let remote_addresses: Vec<Multiaddr> =
addresses.into_iter().take(MAX_ADDRESSES_PER_AUTHORITY).collect();
let answering_peer_id = peer_record.peer.map(|peer| peer.into());
let addr_cache_needs_update = self.handle_new_record(
&authority_id,
remote_key.clone(),
RecordInfo {
creation_time: records_creation_time,
peers_with_record: answering_peer_id.into_iter().collect(),
record: peer_record.record,
},
);
if !remote_addresses.is_empty() && addr_cache_needs_update {
self.addr_cache.insert(authority_id, remote_addresses);
if let Some(metrics) = &self.metrics {
metrics
.known_authorities_count
.set(self.addr_cache.num_authority_ids().try_into().unwrap_or(std::u64::MAX));
}
}
Ok(())
}
fn handle_new_record(
&mut self,
authority_id: &AuthorityId,
kademlia_key: KademliaKey,
new_record: RecordInfo,
) -> bool {
let current_record_info = self
.last_known_records
.entry(kademlia_key.clone())
.or_insert_with(|| new_record.clone());
if new_record.creation_time > current_record_info.creation_time {
let peers_that_need_updating = current_record_info.peers_with_record.clone();
self.network.put_record_to(
new_record.record.clone(),
peers_that_need_updating.clone(),
current_record_info.peers_with_record.is_empty(),
);
debug!(
target: LOG_TARGET,
"Found a newer record for {:?} new record creation time {:?} old record creation time {:?}",
authority_id, new_record.creation_time, current_record_info.creation_time
);
self.last_known_records.insert(kademlia_key, new_record);
return true
}
if new_record.creation_time == current_record_info.creation_time {
debug!(
target: LOG_TARGET,
"Found same record for {:?} record creation time {:?}",
authority_id, new_record.creation_time
);
if current_record_info.peers_with_record.len() + new_record.peers_with_record.len() <=
DEFAULT_KADEMLIA_REPLICATION_FACTOR
{
current_record_info.peers_with_record.extend(new_record.peers_with_record);
}
return true
}
debug!(
target: LOG_TARGET,
"Found old record for {:?} received record creation time {:?} current record creation time {:?}",
authority_id, new_record.creation_time, current_record_info.creation_time,
);
self.network.put_record_to(
current_record_info.record.clone().into(),
new_record.peers_with_record.clone(),
new_record.peers_with_record.is_empty(),
);
return false
}
async fn get_own_public_keys_within_authority_set(
key_store: KeystorePtr,
client: &Client,
) -> Result<HashSet<AuthorityId>> {
let local_pub_keys = key_store
.sr25519_public_keys(key_types::AUTHORITY_DISCOVERY)
.into_iter()
.collect::<HashSet<_>>();
let best_hash = client.best_hash().await?;
let authorities = client
.authorities(best_hash)
.await
.map_err(|e| Error::CallingRuntime(e.into()))?
.into_iter()
.map(Into::into)
.collect::<HashSet<_>>();
let intersection =
local_pub_keys.intersection(&authorities).cloned().map(Into::into).collect();
Ok(intersection)
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
enum AddressType {
PublicAddress(Multiaddr),
GlobalListenAddress(Multiaddr),
ExternalAddress(Multiaddr),
}
impl AddressType {
fn without_p2p(self, local_peer_id: PeerId) -> Multiaddr {
let (mut address, source) = match self {
AddressType::PublicAddress(address) => (address, "public address"),
AddressType::GlobalListenAddress(address) => (address, "global listen address"),
AddressType::ExternalAddress(address) => (address, "external address"),
};
if let Some(multiaddr::Protocol::P2p(peer_id)) = address.iter().last() {
if peer_id != *local_peer_id.as_ref() {
error!(
target: LOG_TARGET,
"Network returned '{source}' '{address}' with peer id \
not matching the local peer id '{local_peer_id}'.",
);
}
address.pop();
}
address
}
}
pub trait NetworkProvider:
NetworkDHTProvider + NetworkStateInfo + NetworkSigner + Send + Sync
{
}
impl<T> NetworkProvider for T where
T: NetworkDHTProvider + NetworkStateInfo + NetworkSigner + Send + Sync
{
}
fn hash_authority_id(id: &[u8]) -> KademliaKey {
KademliaKey::new(&Code::Sha2_256.digest(id).digest())
}
fn single<T>(values: impl IntoIterator<Item = T>) -> std::result::Result<Option<T>, ()>
where
T: PartialEq<T>,
{
values.into_iter().try_fold(None, |acc, item| match acc {
None => Ok(Some(item)),
Some(ref prev) if *prev != item => Err(()),
Some(x) => Ok(Some(x)),
})
}
fn serialize_addresses(addresses: impl Iterator<Item = Multiaddr>) -> Vec<Vec<u8>> {
addresses.map(|a| a.to_vec()).collect()
}
fn build_creation_time() -> schema::TimestampInfo {
let creation_time = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|time| time.as_nanos())
.unwrap_or_default();
schema::TimestampInfo { timestamp: creation_time.encode() }
}
fn serialize_authority_record(
addresses: Vec<Vec<u8>>,
creation_time: Option<schema::TimestampInfo>,
) -> Result<Vec<u8>> {
let mut serialized_record = vec![];
schema::AuthorityRecord { addresses, creation_time }
.encode(&mut serialized_record)
.map_err(Error::EncodingProto)?;
Ok(serialized_record)
}
fn sign_record_with_peer_id(
serialized_record: &[u8],
network: &impl NetworkSigner,
) -> Result<schema::PeerSignature> {
let signature = network
.sign_with_local_identity(serialized_record.to_vec())
.map_err(|e| Error::CannotSign(format!("{} (network packet)", e)))?;
let public_key = signature.public_key.encode_protobuf();
let signature = signature.bytes;
Ok(schema::PeerSignature { signature, public_key })
}
fn sign_record_with_authority_ids(
serialized_record: Vec<u8>,
peer_signature: Option<schema::PeerSignature>,
key_store: &dyn Keystore,
keys: Vec<AuthorityId>,
) -> Result<Vec<(KademliaKey, Vec<u8>)>> {
let mut result = Vec::with_capacity(keys.len());
for key in keys.iter() {
let auth_signature = key_store
.sr25519_sign(key_types::AUTHORITY_DISCOVERY, key.as_ref(), &serialized_record)
.map_err(|e| Error::CannotSign(format!("{}. Key: {:?}", e, key)))?
.ok_or_else(|| {
Error::CannotSign(format!("Could not find key in keystore. Key: {:?}", key))
})?;
let auth_signature = auth_signature.encode();
let signed_record = schema::SignedAuthorityRecord {
record: serialized_record.clone(),
auth_signature,
peer_signature: peer_signature.clone(),
}
.encode_to_vec();
result.push((hash_authority_id(key.as_slice()), signed_record));
}
Ok(result)
}
#[derive(Clone)]
pub(crate) struct Metrics {
publish: Counter<U64>,
amount_addresses_last_published: Gauge<U64>,
requests: Counter<U64>,
requests_pending: Gauge<U64>,
dht_event_received: CounterVec<U64>,
handle_value_found_event_failure: Counter<U64>,
known_authorities_count: Gauge<U64>,
}
impl Metrics {
pub(crate) fn register(registry: &prometheus_endpoint::Registry) -> Result<Self> {
Ok(Self {
publish: register(
Counter::new(
"substrate_authority_discovery_times_published_total",
"Number of times authority discovery has published external addresses.",
)?,
registry,
)?,
amount_addresses_last_published: register(
Gauge::new(
"substrate_authority_discovery_amount_external_addresses_last_published",
"Number of external addresses published when authority discovery last \
published addresses.",
)?,
registry,
)?,
requests: register(
Counter::new(
"substrate_authority_discovery_authority_addresses_requested_total",
"Number of times authority discovery has requested external addresses of a \
single authority.",
)?,
registry,
)?,
requests_pending: register(
Gauge::new(
"substrate_authority_discovery_authority_address_requests_pending",
"Number of pending authority address requests.",
)?,
registry,
)?,
dht_event_received: register(
CounterVec::new(
Opts::new(
"substrate_authority_discovery_dht_event_received",
"Number of dht events received by authority discovery.",
),
&["name"],
)?,
registry,
)?,
handle_value_found_event_failure: register(
Counter::new(
"substrate_authority_discovery_handle_value_found_event_failure",
"Number of times handling a dht value found event failed.",
)?,
registry,
)?,
known_authorities_count: register(
Gauge::new(
"substrate_authority_discovery_known_authorities_count",
"Number of authorities known by authority discovery.",
)?,
registry,
)?,
})
}
}
#[cfg(test)]
impl<Block: BlockT, Client, DhtEventStream> Worker<Client, Block, DhtEventStream> {
pub(crate) fn inject_addresses(&mut self, authority: AuthorityId, addresses: Vec<Multiaddr>) {
self.addr_cache.insert(authority, addresses);
}
}