1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Defines traits which represent a common interface for Substrate pallets which want to
//! incorporate bridge functionality.
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
use crate::justification::{
GrandpaJustification, JustificationVerificationContext, JustificationVerificationError,
};
use bp_runtime::{
BasicOperatingMode, BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf, RawStorageProof,
StorageProofChecker, StorageProofError, UnderlyingChainProvider,
};
use codec::{Codec, Decode, Encode, EncodeLike, MaxEncodedLen};
use core::{clone::Clone, cmp::Eq, default::Default, fmt::Debug};
use frame_support::PalletError;
use scale_info::TypeInfo;
use serde::{Deserialize, Serialize};
use sp_consensus_grandpa::{
AuthorityList, ConsensusLog, ScheduledChange, SetId, GRANDPA_ENGINE_ID,
};
use sp_runtime::{traits::Header as HeaderT, Digest, RuntimeDebug, SaturatedConversion};
use sp_std::{boxed::Box, vec::Vec};
pub use call_info::{BridgeGrandpaCall, BridgeGrandpaCallOf, SubmitFinalityProofInfo};
mod call_info;
pub mod justification;
pub mod storage_keys;
/// Header chain error.
#[derive(Clone, Decode, Encode, Eq, PartialEq, PalletError, Debug, TypeInfo)]
pub enum HeaderChainError {
/// Header with given hash is missing from the chain.
UnknownHeader,
/// Error generated by the `storage_proof` module.
StorageProof(StorageProofError),
}
/// Header data that we're storing on-chain.
///
/// Even though we may store full header, our applications (XCM) only use couple of header
/// fields. Extracting those values makes on-chain storage and PoV smaller, which is good.
#[derive(Clone, Decode, Encode, Eq, MaxEncodedLen, PartialEq, RuntimeDebug, TypeInfo)]
pub struct StoredHeaderData<Number, Hash> {
/// Header number.
pub number: Number,
/// Header state root.
pub state_root: Hash,
}
/// Stored header data builder.
pub trait StoredHeaderDataBuilder<Number, Hash> {
/// Build header data from self.
fn build(&self) -> StoredHeaderData<Number, Hash>;
}
impl<H: HeaderT> StoredHeaderDataBuilder<H::Number, H::Hash> for H {
fn build(&self) -> StoredHeaderData<H::Number, H::Hash> {
StoredHeaderData { number: *self.number(), state_root: *self.state_root() }
}
}
/// Substrate header chain, abstracted from the way it is stored.
pub trait HeaderChain<C: Chain> {
/// Returns state (storage) root of given finalized header.
fn finalized_header_state_root(header_hash: HashOf<C>) -> Option<HashOf<C>>;
/// Get storage proof checker using finalized header.
fn verify_storage_proof(
header_hash: HashOf<C>,
storage_proof: RawStorageProof,
) -> Result<StorageProofChecker<HasherOf<C>>, HeaderChainError> {
let state_root = Self::finalized_header_state_root(header_hash)
.ok_or(HeaderChainError::UnknownHeader)?;
StorageProofChecker::new(state_root, storage_proof).map_err(HeaderChainError::StorageProof)
}
}
/// A type that can be used as a parameter in a dispatchable function.
///
/// When using `decl_module` all arguments for call functions must implement this trait.
pub trait Parameter: Codec + EncodeLike + Clone + Eq + Debug + TypeInfo {}
impl<T> Parameter for T where T: Codec + EncodeLike + Clone + Eq + Debug + TypeInfo {}
/// A GRANDPA Authority List and ID.
#[derive(Default, Encode, Eq, Decode, RuntimeDebug, PartialEq, Clone, TypeInfo)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct AuthoritySet {
/// List of GRANDPA authorities for the current round.
pub authorities: AuthorityList,
/// Monotonic identifier of the current GRANDPA authority set.
pub set_id: SetId,
}
impl AuthoritySet {
/// Create a new GRANDPA Authority Set.
pub fn new(authorities: AuthorityList, set_id: SetId) -> Self {
Self { authorities, set_id }
}
}
/// Data required for initializing the GRANDPA bridge pallet.
///
/// The bridge needs to know where to start its sync from, and this provides that initial context.
#[derive(
Default, Encode, Decode, RuntimeDebug, PartialEq, Eq, Clone, TypeInfo, Serialize, Deserialize,
)]
pub struct InitializationData<H: HeaderT> {
/// The header from which we should start syncing.
pub header: Box<H>,
/// The initial authorities of the pallet.
pub authority_list: AuthorityList,
/// The ID of the initial authority set.
pub set_id: SetId,
/// Pallet operating mode.
pub operating_mode: BasicOperatingMode,
}
/// Abstract finality proof that is justifying block finality.
pub trait FinalityProof<Hash, Number>: Clone + Send + Sync + Debug {
/// Return hash of header that this proof is generated for.
fn target_header_hash(&self) -> Hash;
/// Return number of header that this proof is generated for.
fn target_header_number(&self) -> Number;
}
/// A trait that provides helper methods for querying the consensus log.
pub trait ConsensusLogReader {
/// Returns true if digest contains item that schedules authorities set change.
fn schedules_authorities_change(digest: &Digest) -> bool;
}
/// A struct that provides helper methods for querying the GRANDPA consensus log.
pub struct GrandpaConsensusLogReader<Number>(sp_std::marker::PhantomData<Number>);
impl<Number: Codec> GrandpaConsensusLogReader<Number> {
/// Find and return scheduled (regular) change digest item.
pub fn find_scheduled_change(digest: &Digest) -> Option<ScheduledChange<Number>> {
use sp_runtime::generic::OpaqueDigestItemId;
let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID);
let filter_log = |log: ConsensusLog<Number>| match log {
ConsensusLog::ScheduledChange(change) => Some(change),
_ => None,
};
// find the first consensus digest with the right ID which converts to
// the right kind of consensus log.
digest.convert_first(|l| l.try_to(id).and_then(filter_log))
}
/// Find and return forced change digest item. Or light client can't do anything
/// with forced changes, so we can't accept header with the forced change digest.
pub fn find_forced_change(digest: &Digest) -> Option<(Number, ScheduledChange<Number>)> {
// find the first consensus digest with the right ID which converts to
// the right kind of consensus log.
digest
.convert_first(|log| log.consensus_try_to(&GRANDPA_ENGINE_ID))
.and_then(|log| match log {
ConsensusLog::ForcedChange(delay, change) => Some((delay, change)),
_ => None,
})
}
}
impl<Number: Codec> ConsensusLogReader for GrandpaConsensusLogReader<Number> {
fn schedules_authorities_change(digest: &Digest) -> bool {
GrandpaConsensusLogReader::<Number>::find_scheduled_change(digest).is_some()
}
}
/// The finality-related info associated to a header.
#[derive(Encode, Decode, Debug, PartialEq, Clone, TypeInfo)]
pub struct HeaderFinalityInfo<FinalityProof, FinalityVerificationContext> {
/// The header finality proof.
pub finality_proof: FinalityProof,
/// The new verification context introduced by the header.
pub new_verification_context: Option<FinalityVerificationContext>,
}
/// Grandpa-related info associated to a header. This info can be saved to events.
pub type StoredHeaderGrandpaInfo<Header> =
HeaderFinalityInfo<GrandpaJustification<Header>, AuthoritySet>;
/// Processed Grandpa-related info associated to a header.
pub type HeaderGrandpaInfo<Header> =
HeaderFinalityInfo<GrandpaJustification<Header>, JustificationVerificationContext>;
impl<Header: HeaderT> TryFrom<StoredHeaderGrandpaInfo<Header>> for HeaderGrandpaInfo<Header> {
type Error = JustificationVerificationError;
fn try_from(grandpa_info: StoredHeaderGrandpaInfo<Header>) -> Result<Self, Self::Error> {
Ok(Self {
finality_proof: grandpa_info.finality_proof,
new_verification_context: match grandpa_info.new_verification_context {
Some(authority_set) => Some(authority_set.try_into()?),
None => None,
},
})
}
}
/// Helper trait for finding equivocations in finality proofs.
pub trait FindEquivocations<FinalityProof, FinalityVerificationContext, EquivocationProof> {
/// The type returned when encountering an error while looking for equivocations.
type Error: Debug;
/// Find equivocations.
fn find_equivocations(
verification_context: &FinalityVerificationContext,
synced_proof: &FinalityProof,
source_proofs: &[FinalityProof],
) -> Result<Vec<EquivocationProof>, Self::Error>;
}
/// Substrate-based chain that is using direct GRANDPA finality.
///
/// Keep in mind that parachains are relying on relay chain GRANDPA, so they should not implement
/// this trait.
pub trait ChainWithGrandpa: Chain {
/// Name of the bridge GRANDPA pallet (used in `construct_runtime` macro call) that is deployed
/// at some other chain to bridge with this `ChainWithGrandpa`.
///
/// We assume that all chains that are bridging with this `ChainWithGrandpa` are using
/// the same name.
const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str;
/// Max number of GRANDPA authorities at the chain.
///
/// This is a strict constant. If bridged chain will have more authorities than that,
/// the GRANDPA bridge pallet may halt.
const MAX_AUTHORITIES_COUNT: u32;
/// Max reasonable number of headers in `votes_ancestries` vector of the GRANDPA justification.
///
/// This isn't a strict limit. The relay may submit justifications with more headers in its
/// ancestry and the pallet will accept such justification. The limit is only used to compute
/// maximal refund amount and submitting justifications which exceed the limit, may be costly
/// to submitter.
const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32;
/// Maximal size of the mandatory chain header. Mandatory header is the header that enacts new
/// GRANDPA authorities set (so it has large digest inside).
///
/// This isn't a strict limit. The relay may submit larger headers and the pallet will accept
/// the call. The limit is only used to compute maximal refund amount and doing calls which
/// exceed the limit, may be costly to submitter.
const MAX_MANDATORY_HEADER_SIZE: u32;
/// Average size of the chain header. We don't expect to see there headers that change GRANDPA
/// authorities set (GRANDPA will probably be able to finalize at least one additional header
/// per session on non test chains), so this is average size of headers that aren't changing the
/// set.
///
/// This isn't a strict limit. The relay may submit justifications with larger headers and the
/// pallet will accept the call. However, if the total size of all `submit_finality_proof`
/// arguments exceeds the maximal size, computed using this average size, relayer will only get
/// partial refund.
///
/// We expect some headers on production chains that are above this size. But they are rare and
/// if rellayer cares about its profitability, we expect it'll select other headers for
/// submission.
const AVERAGE_HEADER_SIZE: u32;
}
impl<T> ChainWithGrandpa for T
where
T: Chain + UnderlyingChainProvider,
T::Chain: ChainWithGrandpa,
{
const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str =
<T::Chain as ChainWithGrandpa>::WITH_CHAIN_GRANDPA_PALLET_NAME;
const MAX_AUTHORITIES_COUNT: u32 = <T::Chain as ChainWithGrandpa>::MAX_AUTHORITIES_COUNT;
const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 =
<T::Chain as ChainWithGrandpa>::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY;
const MAX_MANDATORY_HEADER_SIZE: u32 =
<T::Chain as ChainWithGrandpa>::MAX_MANDATORY_HEADER_SIZE;
const AVERAGE_HEADER_SIZE: u32 = <T::Chain as ChainWithGrandpa>::AVERAGE_HEADER_SIZE;
}
/// Result of checking maximal expected submit finality proof call weight and size.
#[derive(Debug)]
pub struct SubmitFinalityProofCallExtras {
/// If true, the call weight is larger than what we have assumed.
///
/// We have some assumptions about headers and justifications of the bridged chain.
/// We know that if our assumptions are correct, then the call must not have the
/// weight above some limit. The fee paid for weight above that limit, is never refunded.
pub is_weight_limit_exceeded: bool,
/// Extra size (in bytes) that we assume are included in the call.
///
/// We have some assumptions about headers and justifications of the bridged chain.
/// We know that if our assumptions are correct, then the call must not have the
/// weight above some limit. The fee paid for bytes above that limit, is never refunded.
pub extra_size: u32,
/// A flag that is true if the header is the mandatory header that enacts new
/// authorities set.
pub is_mandatory_finality_target: bool,
}
/// Checks whether the given `header` and its finality `proof` fit the maximal expected
/// call limits (size and weight). The submission may be refunded sometimes (see pallet
/// configuration for details), but it should fit some limits. If the call has some extra
/// weight and/or size included, though, we won't refund it or refund will be partial.
pub fn submit_finality_proof_limits_extras<C: ChainWithGrandpa>(
header: &C::Header,
proof: &justification::GrandpaJustification<C::Header>,
) -> SubmitFinalityProofCallExtras {
// the `submit_finality_proof` call will reject justifications with invalid, duplicate,
// unknown and extra signatures. It'll also reject justifications with less than necessary
// signatures. So we do not care about extra weight because of additional signatures here.
let precommits_len = proof.commit.precommits.len().saturated_into();
let required_precommits = precommits_len;
// the weight check is simple - we assume that there are no more than the `limit`
// headers in the ancestry proof
let votes_ancestries_len: u32 = proof.votes_ancestries.len().saturated_into();
let is_weight_limit_exceeded =
votes_ancestries_len > C::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY;
// check if the `finality_target` is a mandatory header. If so, we are ready to refund larger
// size
let is_mandatory_finality_target =
GrandpaConsensusLogReader::<BlockNumberOf<C>>::find_scheduled_change(header.digest())
.is_some();
// we can estimate extra call size easily, without any additional significant overhead
let actual_call_size: u32 =
header.encoded_size().saturating_add(proof.encoded_size()).saturated_into();
let max_expected_call_size = max_expected_submit_finality_proof_arguments_size::<C>(
is_mandatory_finality_target,
required_precommits,
);
let extra_size = actual_call_size.saturating_sub(max_expected_call_size);
SubmitFinalityProofCallExtras {
is_weight_limit_exceeded,
extra_size,
is_mandatory_finality_target,
}
}
/// Returns maximal expected size of `submit_finality_proof` call arguments.
pub fn max_expected_submit_finality_proof_arguments_size<C: ChainWithGrandpa>(
is_mandatory_finality_target: bool,
precommits: u32,
) -> u32 {
let max_expected_justification_size =
GrandpaJustification::<HeaderOf<C>>::max_reasonable_size::<C>(precommits);
// call arguments are header and justification
let max_expected_finality_target_size = if is_mandatory_finality_target {
C::MAX_MANDATORY_HEADER_SIZE
} else {
C::AVERAGE_HEADER_SIZE
};
max_expected_finality_target_size.saturating_add(max_expected_justification_size)
}
#[cfg(test)]
mod tests {
use super::*;
use bp_runtime::ChainId;
use frame_support::weights::Weight;
use sp_runtime::{
testing::H256, traits::BlakeTwo256, DigestItem, MultiSignature, StateVersion,
};
struct TestChain;
impl Chain for TestChain {
const ID: ChainId = *b"test";
type BlockNumber = u32;
type Hash = H256;
type Hasher = BlakeTwo256;
type Header = sp_runtime::generic::Header<u32, BlakeTwo256>;
type AccountId = u64;
type Balance = u64;
type Nonce = u64;
type Signature = MultiSignature;
const STATE_VERSION: StateVersion = StateVersion::V1;
fn max_extrinsic_size() -> u32 {
0
}
fn max_extrinsic_weight() -> Weight {
Weight::zero()
}
}
impl ChainWithGrandpa for TestChain {
const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = "Test";
const MAX_AUTHORITIES_COUNT: u32 = 128;
const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 2;
const MAX_MANDATORY_HEADER_SIZE: u32 = 100_000;
const AVERAGE_HEADER_SIZE: u32 = 1_024;
}
#[test]
fn max_expected_submit_finality_proof_arguments_size_respects_mandatory_argument() {
assert!(
max_expected_submit_finality_proof_arguments_size::<TestChain>(true, 100) >
max_expected_submit_finality_proof_arguments_size::<TestChain>(false, 100),
);
}
#[test]
fn find_scheduled_change_works() {
let scheduled_change = ScheduledChange { next_authorities: vec![], delay: 0 };
// first
let mut digest = Digest::default();
digest.push(DigestItem::Consensus(
GRANDPA_ENGINE_ID,
ConsensusLog::ScheduledChange(scheduled_change.clone()).encode(),
));
assert_eq!(
GrandpaConsensusLogReader::find_scheduled_change(&digest),
Some(scheduled_change.clone())
);
// not first
let mut digest = Digest::default();
digest.push(DigestItem::Consensus(
GRANDPA_ENGINE_ID,
ConsensusLog::<u64>::OnDisabled(0).encode(),
));
digest.push(DigestItem::Consensus(
GRANDPA_ENGINE_ID,
ConsensusLog::ScheduledChange(scheduled_change.clone()).encode(),
));
assert_eq!(
GrandpaConsensusLogReader::find_scheduled_change(&digest),
Some(scheduled_change.clone())
);
}
}