referrerpolicy=no-referrer-when-downgrade
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
// This file is part of Substrate.

// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0

// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// 	http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#![cfg_attr(not(feature = "std"), no_std)]
#![warn(missing_docs)]

//! A BEEFY+MMR pallet combo.
//!
//! While both BEEFY and Merkle Mountain Range (MMR) can be used separately,
//! these tools were designed to work together in unison.
//!
//! The pallet provides a standardized MMR Leaf format that can be used
//! to bridge BEEFY+MMR-based networks (both standalone and Polkadot-like).
//!
//! The MMR leaf contains:
//! 1. Block number and parent block hash.
//! 2. Merkle Tree Root Hash of next BEEFY validator set.
//! 3. Arbitrary extra leaf data to be used by downstream pallets to include custom data.
//!
//! and thanks to versioning can be easily updated in the future.

extern crate alloc;

use sp_runtime::{
	generic::OpaqueDigestItemId,
	traits::{Convert, Header, Member},
	SaturatedConversion,
};

use alloc::vec::Vec;
use codec::Decode;
use pallet_mmr::{primitives::AncestryProof, LeafDataProvider, NodesUtils, ParentNumberAndHash};
use sp_consensus_beefy::{
	known_payloads,
	mmr::{BeefyAuthoritySet, BeefyDataProvider, BeefyNextAuthoritySet, MmrLeaf, MmrLeafVersion},
	AncestryHelper, AncestryHelperWeightInfo, Commitment, ConsensusLog,
	ValidatorSet as BeefyValidatorSet,
};

use frame_support::{crypto::ecdsa::ECDSAExt, pallet_prelude::Weight, traits::Get};
use frame_system::pallet_prelude::{BlockNumberFor, HeaderFor};

pub use pallet::*;
pub use weights::WeightInfo;

mod benchmarking;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
mod weights;

/// A BEEFY consensus digest item with MMR root hash.
pub struct DepositBeefyDigest<T>(core::marker::PhantomData<T>);

impl<T> pallet_mmr::primitives::OnNewRoot<sp_consensus_beefy::MmrRootHash> for DepositBeefyDigest<T>
where
	T: pallet_mmr::Config<Hashing = sp_consensus_beefy::MmrHashing>,
	T: pallet_beefy::Config,
{
	fn on_new_root(root: &sp_consensus_beefy::MmrRootHash) {
		let digest = sp_runtime::generic::DigestItem::Consensus(
			sp_consensus_beefy::BEEFY_ENGINE_ID,
			codec::Encode::encode(&sp_consensus_beefy::ConsensusLog::<
				<T as pallet_beefy::Config>::BeefyId,
			>::MmrRoot(*root)),
		);
		frame_system::Pallet::<T>::deposit_log(digest);
	}
}

/// Convert BEEFY secp256k1 public keys into Ethereum addresses
pub struct BeefyEcdsaToEthereum;
impl Convert<sp_consensus_beefy::ecdsa_crypto::AuthorityId, Vec<u8>> for BeefyEcdsaToEthereum {
	fn convert(beefy_id: sp_consensus_beefy::ecdsa_crypto::AuthorityId) -> Vec<u8> {
		sp_core::ecdsa::Public::from(beefy_id)
			.to_eth_address()
			.map(|v| v.to_vec())
			.map_err(|_| {
				log::debug!(target: "runtime::beefy", "Failed to convert BEEFY PublicKey to ETH address!");
			})
			.unwrap_or_default()
	}
}

type MerkleRootOf<T> = <<T as pallet_mmr::Config>::Hashing as sp_runtime::traits::Hash>::Output;

#[frame_support::pallet]
pub mod pallet {
	#![allow(missing_docs)]

	use super::*;
	use frame_support::pallet_prelude::*;

	/// BEEFY-MMR pallet.
	#[pallet::pallet]
	pub struct Pallet<T>(_);

	/// The module's configuration trait.
	#[pallet::config]
	#[pallet::disable_frame_system_supertrait_check]
	pub trait Config: pallet_mmr::Config + pallet_beefy::Config {
		/// Current leaf version.
		///
		/// Specifies the version number added to every leaf that get's appended to the MMR.
		/// Read more in [`MmrLeafVersion`] docs about versioning leaves.
		type LeafVersion: Get<MmrLeafVersion>;

		/// Convert BEEFY AuthorityId to a form that would end up in the Merkle Tree.
		///
		/// For instance for ECDSA (secp256k1) we want to store uncompressed public keys (65 bytes)
		/// and later to Ethereum Addresses (160 bits) to simplify using them on Ethereum chain,
		/// but the rest of the Substrate codebase is storing them compressed (33 bytes) for
		/// efficiency reasons.
		type BeefyAuthorityToMerkleLeaf: Convert<<Self as pallet_beefy::Config>::BeefyId, Vec<u8>>;

		/// The type expected for the leaf extra data
		type LeafExtra: Member + codec::FullCodec;

		/// Retrieve arbitrary data that should be added to the mmr leaf
		type BeefyDataProvider: BeefyDataProvider<Self::LeafExtra>;

		type WeightInfo: WeightInfo;
	}

	/// Details of current BEEFY authority set.
	#[pallet::storage]
	pub type BeefyAuthorities<T: Config> =
		StorageValue<_, BeefyAuthoritySet<MerkleRootOf<T>>, ValueQuery>;

	/// Details of next BEEFY authority set.
	///
	/// This storage entry is used as cache for calls to `update_beefy_next_authority_set`.
	#[pallet::storage]
	pub type BeefyNextAuthorities<T: Config> =
		StorageValue<_, BeefyNextAuthoritySet<MerkleRootOf<T>>, ValueQuery>;
}

impl<T: Config> LeafDataProvider for Pallet<T> {
	type LeafData = MmrLeaf<
		BlockNumberFor<T>,
		<T as frame_system::Config>::Hash,
		MerkleRootOf<T>,
		T::LeafExtra,
	>;

	fn leaf_data() -> Self::LeafData {
		MmrLeaf {
			version: T::LeafVersion::get(),
			parent_number_and_hash: ParentNumberAndHash::<T>::leaf_data(),
			leaf_extra: T::BeefyDataProvider::extra_data(),
			beefy_next_authority_set: BeefyNextAuthorities::<T>::get(),
		}
	}
}

impl<T> sp_consensus_beefy::OnNewValidatorSet<<T as pallet_beefy::Config>::BeefyId> for Pallet<T>
where
	T: pallet::Config,
{
	/// Compute and cache BEEFY authority sets based on updated BEEFY validator sets.
	fn on_new_validator_set(
		current_set: &BeefyValidatorSet<<T as pallet_beefy::Config>::BeefyId>,
		next_set: &BeefyValidatorSet<<T as pallet_beefy::Config>::BeefyId>,
	) {
		let current = Pallet::<T>::compute_authority_set(current_set);
		let next = Pallet::<T>::compute_authority_set(next_set);
		// cache the result
		BeefyAuthorities::<T>::put(&current);
		BeefyNextAuthorities::<T>::put(&next);
	}
}

impl<T: Config> AncestryHelper<HeaderFor<T>> for Pallet<T>
where
	T: pallet_mmr::Config<Hashing = sp_consensus_beefy::MmrHashing>,
{
	type Proof = AncestryProof<MerkleRootOf<T>>;
	type ValidationContext = MerkleRootOf<T>;

	fn generate_proof(
		prev_block_number: BlockNumberFor<T>,
		best_known_block_number: Option<BlockNumberFor<T>>,
	) -> Option<Self::Proof> {
		pallet_mmr::Pallet::<T>::generate_ancestry_proof(prev_block_number, best_known_block_number)
			.map_err(|e| {
				log::error!(
					target: "runtime::beefy",
					"Failed to generate ancestry proof for block {:?} at {:?}: {:?}",
					prev_block_number,
					best_known_block_number,
					e
				);
				e
			})
			.ok()
	}

	fn extract_validation_context(header: HeaderFor<T>) -> Option<Self::ValidationContext> {
		// Check if the provided header is canonical.
		let expected_hash = frame_system::Pallet::<T>::block_hash(header.number());
		if expected_hash != header.hash() {
			return None;
		}

		// Extract the MMR root from the header digest
		header.digest().convert_first(|l| {
			l.try_to(OpaqueDigestItemId::Consensus(&sp_consensus_beefy::BEEFY_ENGINE_ID))
				.and_then(|log: ConsensusLog<<T as pallet_beefy::Config>::BeefyId>| match log {
					ConsensusLog::MmrRoot(mmr_root) => Some(mmr_root),
					_ => None,
				})
		})
	}

	fn is_non_canonical(
		commitment: &Commitment<BlockNumberFor<T>>,
		proof: Self::Proof,
		context: Self::ValidationContext,
	) -> bool {
		let commitment_leaf_count =
			match pallet_mmr::Pallet::<T>::block_num_to_leaf_count(commitment.block_number) {
				Ok(commitment_leaf_count) => commitment_leaf_count,
				Err(_) => {
					// We can't prove that the commitment is non-canonical if the
					// `commitment.block_number` is invalid.
					return false
				},
			};
		if commitment_leaf_count != proof.prev_leaf_count {
			// Can't prove that the commitment is non-canonical if the `commitment.block_number`
			// doesn't match the ancestry proof.
			return false;
		}

		let canonical_mmr_root = context;
		let canonical_prev_root =
			match pallet_mmr::Pallet::<T>::verify_ancestry_proof(canonical_mmr_root, proof) {
				Ok(canonical_prev_root) => canonical_prev_root,
				Err(_) => {
					// Can't prove that the commitment is non-canonical if the proof
					// is invalid.
					return false
				},
			};

		let mut found_commitment_root = false;
		let commitment_roots = commitment
			.payload
			.get_all_decoded::<MerkleRootOf<T>>(&known_payloads::MMR_ROOT_ID);
		for maybe_commitment_root in commitment_roots {
			match maybe_commitment_root {
				Some(commitment_root) => {
					found_commitment_root = true;
					if canonical_prev_root != commitment_root {
						// If the commitment contains an MMR root, that is not equal to
						// `canonical_prev_root`, the commitment is invalid
						return true;
					}
				},
				None => {
					// If the commitment contains an MMR root, that can't be decoded, it is invalid.
					return true;
				},
			}
		}
		if !found_commitment_root {
			// If the commitment doesn't contain any MMR root, while the proof is valid,
			// the commitment is invalid
			return true;
		}

		false
	}
}

impl<T: Config> AncestryHelperWeightInfo<HeaderFor<T>> for Pallet<T>
where
	T: pallet_mmr::Config<Hashing = sp_consensus_beefy::MmrHashing>,
{
	fn extract_validation_context() -> Weight {
		<T as Config>::WeightInfo::extract_validation_context()
	}

	fn is_non_canonical(proof: &<Self as AncestryHelper<HeaderFor<T>>>::Proof) -> Weight {
		let mmr_utils = NodesUtils::new(proof.leaf_count);
		let num_peaks = mmr_utils.number_of_peaks();

		// The approximated cost of verifying an ancestry proof with `n` nodes.
		// We add the previous peaks to the total number of nodes,
		// since they have to be processed as well.
		<T as Config>::WeightInfo::n_items_proof_is_non_canonical(
			proof.items.len().saturating_add(proof.prev_peaks.len()).saturated_into(),
		)
		// `n_items_proof_is_non_canonical()` uses inflated proofs that contain all the leafs,
		// where no peak needs to be read. So we need to also add the cost of reading the peaks.
		.saturating_add(<T as Config>::WeightInfo::read_peak().saturating_mul(num_peaks))
	}
}

impl<T: Config> Pallet<T> {
	/// Return the currently active BEEFY authority set proof.
	pub fn authority_set_proof() -> BeefyAuthoritySet<MerkleRootOf<T>> {
		BeefyAuthorities::<T>::get()
	}

	/// Return the next/queued BEEFY authority set proof.
	pub fn next_authority_set_proof() -> BeefyNextAuthoritySet<MerkleRootOf<T>> {
		BeefyNextAuthorities::<T>::get()
	}

	/// Returns details of a BEEFY authority set.
	///
	/// Details contain authority set id, authority set length and a merkle root,
	/// constructed from uncompressed secp256k1 public keys converted to Ethereum addresses
	/// of the next BEEFY authority set.
	fn compute_authority_set(
		validator_set: &BeefyValidatorSet<<T as pallet_beefy::Config>::BeefyId>,
	) -> BeefyAuthoritySet<MerkleRootOf<T>> {
		let id = validator_set.id();
		let beefy_addresses = validator_set
			.validators()
			.into_iter()
			.cloned()
			.map(T::BeefyAuthorityToMerkleLeaf::convert)
			.collect::<Vec<_>>();
		let default_eth_addr = [0u8; 20];
		let len = beefy_addresses.len() as u32;
		let uninitialized_addresses = beefy_addresses
			.iter()
			.filter(|&addr| addr.as_slice().eq(&default_eth_addr))
			.count();
		if uninitialized_addresses > 0 {
			log::error!(
				target: "runtime::beefy",
				"Failed to convert {} out of {} BEEFY PublicKeys to ETH addresses!",
				uninitialized_addresses,
				len,
			);
		}
		let keyset_commitment = binary_merkle_tree::merkle_root::<
			<T as pallet_mmr::Config>::Hashing,
			_,
		>(beefy_addresses)
		.into();
		BeefyAuthoritySet { id, len, keyset_commitment }
	}
}

sp_api::decl_runtime_apis! {
	/// API useful for BEEFY light clients.
	pub trait BeefyMmrApi<H>
	where
		BeefyAuthoritySet<H>: Decode,
	{
		/// Return the currently active BEEFY authority set proof.
		fn authority_set_proof() -> BeefyAuthoritySet<H>;

		/// Return the next/queued BEEFY authority set proof.
		fn next_authority_set_proof() -> BeefyNextAuthoritySet<H>;
	}
}