referrerpolicy=no-referrer-when-downgrade

snowbridge_pallet_ethereum_client/
lib.rs

1// SPDX-License-Identifier: Apache-2.0
2// SPDX-FileCopyrightText: 2023 Snowfork <hello@snowfork.com>
3//! Ethereum Beacon Client
4//!
5//! A light client that verifies consensus updates signed by the sync committee of the beacon chain.
6//!
7//! # Extrinsics
8//!
9//! ## Governance
10//!
11//! * [`Call::force_checkpoint`]: Set the initial trusted consensus checkpoint.
12//! * [`Call::set_operating_mode`]: Set the operating mode of the pallet. Can be used to disable
13//!   processing of consensus updates.
14//!
15//! ## Consensus Updates
16//!
17//! * [`Call::submit`]: Submit a finalized beacon header with an optional sync committee update
18#![cfg_attr(not(feature = "std"), no_std)]
19
20pub mod config;
21pub mod functions;
22pub mod impls;
23pub mod types;
24pub mod weights;
25
26#[cfg(any(test, feature = "fuzzing"))]
27pub mod mock;
28
29#[cfg(test)]
30mod tests;
31
32#[cfg(feature = "runtime-benchmarks")]
33mod benchmarking;
34
35use frame_support::{
36	dispatch::{DispatchResult, PostDispatchInfo},
37	pallet_prelude::OptionQuery,
38	traits::Get,
39	transactional,
40};
41use frame_system::ensure_signed;
42use snowbridge_beacon_primitives::{
43	fast_aggregate_verify,
44	merkle_proof::{generalized_index_length, subtree_index},
45	verify_merkle_branch, verify_receipt_proof, BeaconHeader, BlsError, CompactBeaconState,
46	ForkData, ForkVersion, ForkVersions, PublicKeyPrepared, SigningData,
47};
48use snowbridge_core::{BasicOperatingMode, RingBufferMap};
49use sp_core::H256;
50use sp_std::prelude::*;
51pub use weights::WeightInfo;
52
53use functions::{
54	compute_epoch, compute_period, decompress_sync_committee_bits, sync_committee_sum,
55};
56use types::{CheckpointUpdate, FinalizedBeaconStateBuffer, SyncCommitteePrepared, Update};
57
58pub use pallet::*;
59
60pub use config::SLOTS_PER_HISTORICAL_ROOT;
61
62pub const LOG_TARGET: &str = "ethereum-client";
63
64#[frame_support::pallet]
65pub mod pallet {
66	use super::*;
67
68	use frame_support::pallet_prelude::*;
69	use frame_system::pallet_prelude::*;
70
71	#[derive(scale_info::TypeInfo, codec::Encode, codec::Decode, codec::MaxEncodedLen)]
72	#[codec(mel_bound(T: Config))]
73	#[scale_info(skip_type_params(T))]
74	pub struct MaxFinalizedHeadersToKeep<T: Config>(PhantomData<T>);
75	impl<T: Config> Get<u32> for MaxFinalizedHeadersToKeep<T> {
76		fn get() -> u32 {
77			const MAX_REDUNDANCY: u32 = 20;
78			config::EPOCHS_PER_SYNC_COMMITTEE_PERIOD as u32 * MAX_REDUNDANCY
79		}
80	}
81
82	#[pallet::pallet]
83	pub struct Pallet<T>(_);
84
85	#[pallet::config]
86	pub trait Config: frame_system::Config {
87		#[allow(deprecated)]
88		type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
89		#[pallet::constant]
90		type ForkVersions: Get<ForkVersions>;
91		/// Minimum gap between finalized headers for an update to be free.
92		#[pallet::constant]
93		type FreeHeadersInterval: Get<u32>;
94		type WeightInfo: WeightInfo;
95	}
96
97	#[pallet::event]
98	#[pallet::generate_deposit(pub(super) fn deposit_event)]
99	pub enum Event<T: Config> {
100		BeaconHeaderImported {
101			block_hash: H256,
102			slot: u64,
103		},
104		SyncCommitteeUpdated {
105			period: u64,
106		},
107		/// Set OperatingMode
108		OperatingModeChanged {
109			mode: BasicOperatingMode,
110		},
111	}
112
113	#[pallet::error]
114	pub enum Error<T> {
115		SkippedSyncCommitteePeriod,
116		SyncCommitteeUpdateRequired,
117		/// Attested header is older than latest finalized header.
118		IrrelevantUpdate,
119		NotBootstrapped,
120		SyncCommitteeParticipantsNotSupermajority,
121		InvalidHeaderMerkleProof,
122		InvalidSyncCommitteeMerkleProof,
123		InvalidExecutionHeaderProof,
124		InvalidAncestryMerkleProof,
125		InvalidBlockRootsRootMerkleProof,
126		/// The gap between the finalized headers is larger than the sync committee period,
127		/// rendering execution headers unprovable using ancestry proofs (blocks root size is
128		/// the same as the sync committee period slots).
129		InvalidFinalizedHeaderGap,
130		HeaderNotFinalized,
131		BlockBodyHashTreeRootFailed,
132		HeaderHashTreeRootFailed,
133		SyncCommitteeHashTreeRootFailed,
134		SigningRootHashTreeRootFailed,
135		ForkDataHashTreeRootFailed,
136		ExpectedFinalizedHeaderNotStored,
137		BLSPreparePublicKeysFailed,
138		BLSVerificationFailed(BlsError),
139		InvalidUpdateSlot,
140		/// The given update is not in the expected period, or the given next sync committee does
141		/// not match the next sync committee in storage.
142		InvalidSyncCommitteeUpdate,
143		ExecutionHeaderTooFarBehind,
144		ExecutionHeaderSkippedBlock,
145		Halted,
146	}
147
148	/// Latest imported checkpoint root
149	#[pallet::storage]
150	#[pallet::getter(fn initial_checkpoint_root)]
151	pub type InitialCheckpointRoot<T: Config> = StorageValue<_, H256, ValueQuery>;
152
153	/// Latest imported finalized block root
154	#[pallet::storage]
155	#[pallet::getter(fn latest_finalized_block_root)]
156	pub type LatestFinalizedBlockRoot<T: Config> = StorageValue<_, H256, ValueQuery>;
157
158	/// Beacon state by finalized block root
159	#[pallet::storage]
160	#[pallet::getter(fn finalized_beacon_state)]
161	pub type FinalizedBeaconState<T: Config> =
162		StorageMap<_, Identity, H256, CompactBeaconState, OptionQuery>;
163
164	/// Finalized Headers: Current position in ring buffer
165	#[pallet::storage]
166	pub type FinalizedBeaconStateIndex<T: Config> = StorageValue<_, u32, ValueQuery>;
167
168	/// Finalized Headers: Mapping of ring buffer index to a pruning candidate
169	#[pallet::storage]
170	pub type FinalizedBeaconStateMapping<T: Config> =
171		StorageMap<_, Identity, u32, H256, ValueQuery>;
172
173	#[pallet::storage]
174	#[pallet::getter(fn validators_root)]
175	pub type ValidatorsRoot<T: Config> = StorageValue<_, H256, ValueQuery>;
176
177	/// Sync committee for current period
178	#[pallet::storage]
179	pub type CurrentSyncCommittee<T: Config> = StorageValue<_, SyncCommitteePrepared, ValueQuery>;
180
181	/// Sync committee for next period
182	#[pallet::storage]
183	pub type NextSyncCommittee<T: Config> = StorageValue<_, SyncCommitteePrepared, ValueQuery>;
184
185	/// The last period where the next sync committee was updated for free.
186	#[pallet::storage]
187	pub type LatestSyncCommitteeUpdatePeriod<T: Config> = StorageValue<_, u64, ValueQuery>;
188
189	/// The current operating mode of the pallet.
190	#[pallet::storage]
191	#[pallet::getter(fn operating_mode)]
192	pub type OperatingMode<T: Config> = StorageValue<_, BasicOperatingMode, ValueQuery>;
193
194	#[pallet::call]
195	impl<T: Config> Pallet<T> {
196		#[pallet::call_index(0)]
197		#[pallet::weight(T::WeightInfo::force_checkpoint())]
198		#[transactional]
199		/// Used for pallet initialization and light client resetting. Needs to be called by
200		/// the root origin.
201		pub fn force_checkpoint(
202			origin: OriginFor<T>,
203			update: Box<CheckpointUpdate>,
204		) -> DispatchResult {
205			ensure_root(origin)?;
206			Self::process_checkpoint_update(&update)?;
207			Ok(())
208		}
209
210		#[pallet::call_index(1)]
211		#[pallet::weight({
212			match update.next_sync_committee_update {
213				None => T::WeightInfo::submit(),
214				Some(_) => T::WeightInfo::submit_with_sync_committee(),
215			}
216		})]
217		#[transactional]
218		/// Submits a new finalized beacon header update. The update may contain the next
219		/// sync committee.
220		pub fn submit(origin: OriginFor<T>, update: Box<Update>) -> DispatchResultWithPostInfo {
221			ensure_signed(origin)?;
222			ensure!(!Self::operating_mode().is_halted(), Error::<T>::Halted);
223			Self::process_update(&update)
224		}
225
226		/// Halt or resume all pallet operations. May only be called by root.
227		#[pallet::call_index(3)]
228		#[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))]
229		pub fn set_operating_mode(
230			origin: OriginFor<T>,
231			mode: BasicOperatingMode,
232		) -> DispatchResult {
233			ensure_root(origin)?;
234			OperatingMode::<T>::set(mode);
235			Self::deposit_event(Event::OperatingModeChanged { mode });
236			Ok(())
237		}
238	}
239
240	impl<T: Config> Pallet<T> {
241		/// Forces a finalized beacon header checkpoint update. The current sync committee,
242		/// with a header attesting to the current sync committee, should be provided.
243		/// An `block_roots` proof should also be provided. This is used for ancestry proofs
244		/// for execution header updates.
245		pub(crate) fn process_checkpoint_update(update: &CheckpointUpdate) -> DispatchResult {
246			let sync_committee_root = update
247				.current_sync_committee
248				.hash_tree_root()
249				.map_err(|_| Error::<T>::SyncCommitteeHashTreeRootFailed)?;
250
251			let fork_versions = T::ForkVersions::get();
252			let sync_committee_gindex = Self::current_sync_committee_gindex_at_slot(
253				update.header.slot,
254				fork_versions.clone(),
255			);
256			// Verifies the sync committee in the Beacon state.
257			ensure!(
258				verify_merkle_branch(
259					sync_committee_root,
260					&update.current_sync_committee_branch,
261					subtree_index(sync_committee_gindex),
262					generalized_index_length(sync_committee_gindex),
263					update.header.state_root
264				),
265				Error::<T>::InvalidSyncCommitteeMerkleProof
266			);
267
268			let header_root: H256 = update
269				.header
270				.hash_tree_root()
271				.map_err(|_| Error::<T>::HeaderHashTreeRootFailed)?;
272
273			// This is used for ancestry proofs in ExecutionHeader updates. This verifies the
274			// BeaconState: the beacon state root is the tree root; the `block_roots` hash is the
275			// tree leaf.
276			let block_roots_gindex =
277				Self::block_roots_gindex_at_slot(update.header.slot, fork_versions);
278			ensure!(
279				verify_merkle_branch(
280					update.block_roots_root,
281					&update.block_roots_branch,
282					subtree_index(block_roots_gindex),
283					generalized_index_length(block_roots_gindex),
284					update.header.state_root
285				),
286				Error::<T>::InvalidBlockRootsRootMerkleProof
287			);
288
289			let sync_committee_prepared: SyncCommitteePrepared = (&update.current_sync_committee)
290				.try_into()
291				.map_err(|_| <Error<T>>::BLSPreparePublicKeysFailed)?;
292			<CurrentSyncCommittee<T>>::set(sync_committee_prepared);
293			<NextSyncCommittee<T>>::kill();
294			InitialCheckpointRoot::<T>::set(header_root);
295
296			Self::store_validators_root(update.validators_root);
297			Self::store_finalized_header(update.header, update.block_roots_root)?;
298
299			Ok(())
300		}
301
302		pub(crate) fn process_update(update: &Update) -> DispatchResultWithPostInfo {
303			Self::verify_update(update)?;
304			Self::apply_update(update)
305		}
306
307		/// References and strictly follows <https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#validate_light_client_update>
308		/// Verifies that provided next sync committee is valid through a series of checks
309		/// (including checking that a sync committee period isn't skipped and that the header is
310		/// signed by the current sync committee.
311		fn verify_update(update: &Update) -> DispatchResult {
312			// Verify sync committee has sufficient participants.
313			let participation =
314				decompress_sync_committee_bits(update.sync_aggregate.sync_committee_bits);
315			Self::sync_committee_participation_is_supermajority(&participation)?;
316
317			// Verify update does not skip a sync committee period.
318			ensure!(
319				update.signature_slot > update.attested_header.slot &&
320					update.attested_header.slot >= update.finalized_header.slot,
321				Error::<T>::InvalidUpdateSlot
322			);
323			// Retrieve latest finalized state.
324			let latest_finalized_state =
325				FinalizedBeaconState::<T>::get(LatestFinalizedBlockRoot::<T>::get())
326					.ok_or(Error::<T>::NotBootstrapped)?;
327			let store_period = compute_period(latest_finalized_state.slot);
328			let signature_period = compute_period(update.signature_slot);
329			if <NextSyncCommittee<T>>::exists() {
330				ensure!(
331					(store_period..=store_period + 1).contains(&signature_period),
332					Error::<T>::SkippedSyncCommitteePeriod
333				)
334			} else {
335				ensure!(signature_period == store_period, Error::<T>::SkippedSyncCommitteePeriod)
336			}
337
338			// Verify update is relevant.
339			let update_attested_period = compute_period(update.attested_header.slot);
340			let update_finalized_period = compute_period(update.finalized_header.slot);
341			let update_has_next_sync_committee = !<NextSyncCommittee<T>>::exists() &&
342				(update.next_sync_committee_update.is_some() &&
343					update_attested_period == store_period);
344			ensure!(
345				update.attested_header.slot > latest_finalized_state.slot ||
346					update_has_next_sync_committee,
347				Error::<T>::IrrelevantUpdate
348			);
349
350			// Verify the finalized header gap between the current finalized header and new imported
351			// header is not larger than the sync committee period, otherwise we cannot do
352			// ancestry proofs for execution headers in the gap.
353			ensure!(
354				latest_finalized_state
355					.slot
356					.saturating_add(config::SLOTS_PER_HISTORICAL_ROOT as u64) >=
357					update.finalized_header.slot,
358				Error::<T>::InvalidFinalizedHeaderGap
359			);
360
361			let fork_versions = T::ForkVersions::get();
362			let finalized_root_gindex = Self::finalized_root_gindex_at_slot(
363				update.attested_header.slot,
364				fork_versions.clone(),
365			);
366			// Verify that the `finality_branch`, if present, confirms `finalized_header` to match
367			// the finalized checkpoint root saved in the state of `attested_header`.
368			let finalized_block_root: H256 = update
369				.finalized_header
370				.hash_tree_root()
371				.map_err(|_| Error::<T>::HeaderHashTreeRootFailed)?;
372			ensure!(
373				verify_merkle_branch(
374					finalized_block_root,
375					&update.finality_branch,
376					subtree_index(finalized_root_gindex),
377					generalized_index_length(finalized_root_gindex),
378					update.attested_header.state_root
379				),
380				Error::<T>::InvalidHeaderMerkleProof
381			);
382
383			// Though following check does not belong to ALC spec we verify block_roots_root to
384			// match the finalized checkpoint root saved in the state of `finalized_header` so to
385			// cache it for later use in `verify_ancestry_proof`.
386			let block_roots_gindex = Self::block_roots_gindex_at_slot(
387				update.finalized_header.slot,
388				fork_versions.clone(),
389			);
390			ensure!(
391				verify_merkle_branch(
392					update.block_roots_root,
393					&update.block_roots_branch,
394					subtree_index(block_roots_gindex),
395					generalized_index_length(block_roots_gindex),
396					update.finalized_header.state_root
397				),
398				Error::<T>::InvalidBlockRootsRootMerkleProof
399			);
400
401			// Verify that the `next_sync_committee`, if present, actually is the next sync
402			// committee saved in the state of the `attested_header`.
403			if let Some(next_sync_committee_update) = &update.next_sync_committee_update {
404				let sync_committee_root = next_sync_committee_update
405					.next_sync_committee
406					.hash_tree_root()
407					.map_err(|_| Error::<T>::SyncCommitteeHashTreeRootFailed)?;
408				if update_attested_period == store_period && <NextSyncCommittee<T>>::exists() {
409					let next_committee_root = <NextSyncCommittee<T>>::get().root;
410					ensure!(
411						sync_committee_root == next_committee_root,
412						Error::<T>::InvalidSyncCommitteeUpdate
413					);
414				}
415				let next_sync_committee_gindex = Self::next_sync_committee_gindex_at_slot(
416					update.attested_header.slot,
417					fork_versions,
418				);
419				ensure!(
420					verify_merkle_branch(
421						sync_committee_root,
422						&next_sync_committee_update.next_sync_committee_branch,
423						subtree_index(next_sync_committee_gindex),
424						generalized_index_length(next_sync_committee_gindex),
425						update.attested_header.state_root
426					),
427					Error::<T>::InvalidSyncCommitteeMerkleProof
428				);
429			} else {
430				ensure!(
431					update_finalized_period == store_period,
432					Error::<T>::SyncCommitteeUpdateRequired
433				);
434			}
435
436			// Verify sync committee aggregate signature.
437			let sync_committee = if signature_period == store_period {
438				<CurrentSyncCommittee<T>>::get()
439			} else {
440				<NextSyncCommittee<T>>::get()
441			};
442			let absent_pubkeys =
443				Self::find_pubkeys(&participation, (*sync_committee.pubkeys).as_ref(), false);
444			let signing_root = Self::signing_root(
445				&update.attested_header,
446				Self::validators_root(),
447				update.signature_slot,
448			)?;
449			// Improvement here per <https://eth2book.info/capella/part2/building_blocks/signatures/#sync-aggregates>
450			// suggested start from the full set aggregate_pubkey then subtracting the absolute
451			// minority that did not participate.
452			fast_aggregate_verify(
453				&sync_committee.aggregate_pubkey,
454				&absent_pubkeys,
455				signing_root,
456				&update.sync_aggregate.sync_committee_signature,
457			)
458			.map_err(|e| Error::<T>::BLSVerificationFailed(e))?;
459
460			Ok(())
461		}
462
463		/// Reference and strictly follows <https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#apply_light_client_update
464		/// Applies a finalized beacon header update to the beacon client. If a next sync committee
465		/// is present in the update, verify the sync committee by converting it to a
466		/// SyncCommitteePrepared type. Stores the provided finalized header. Updates are free
467		/// if the certain conditions specified in `check_refundable` are met.
468		fn apply_update(update: &Update) -> DispatchResultWithPostInfo {
469			let latest_finalized_state =
470				FinalizedBeaconState::<T>::get(LatestFinalizedBlockRoot::<T>::get())
471					.ok_or(Error::<T>::NotBootstrapped)?;
472
473			let pays_fee = Self::check_refundable(update, latest_finalized_state.slot);
474			let actual_weight = match update.next_sync_committee_update {
475				None => T::WeightInfo::submit(),
476				Some(_) => T::WeightInfo::submit_with_sync_committee(),
477			};
478
479			if let Some(next_sync_committee_update) = &update.next_sync_committee_update {
480				let store_period = compute_period(latest_finalized_state.slot);
481				let update_finalized_period = compute_period(update.finalized_header.slot);
482				let sync_committee_prepared: SyncCommitteePrepared = (&next_sync_committee_update
483					.next_sync_committee)
484					.try_into()
485					.map_err(|_| <Error<T>>::BLSPreparePublicKeysFailed)?;
486
487				if !<NextSyncCommittee<T>>::exists() {
488					ensure!(
489						update_finalized_period == store_period,
490						<Error<T>>::InvalidSyncCommitteeUpdate
491					);
492					<NextSyncCommittee<T>>::set(sync_committee_prepared);
493				} else if update_finalized_period == store_period + 1 {
494					<CurrentSyncCommittee<T>>::set(<NextSyncCommittee<T>>::get());
495					<NextSyncCommittee<T>>::set(sync_committee_prepared);
496				}
497				log::info!(
498					target: LOG_TARGET,
499					"💫 SyncCommitteeUpdated at period {}.",
500					update_finalized_period
501				);
502				<LatestSyncCommitteeUpdatePeriod<T>>::set(update_finalized_period);
503				Self::deposit_event(Event::SyncCommitteeUpdated {
504					period: update_finalized_period,
505				});
506			};
507
508			if update.finalized_header.slot > latest_finalized_state.slot {
509				Self::store_finalized_header(update.finalized_header, update.block_roots_root)?;
510			}
511
512			Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee })
513		}
514
515		/// Computes the signing root for a given beacon header and domain. The hash tree root
516		/// of the beacon header is computed, and then the combination of the beacon header hash
517		/// and the domain makes up the signing root.
518		pub(super) fn compute_signing_root(
519			beacon_header: &BeaconHeader,
520			domain: H256,
521		) -> Result<H256, DispatchError> {
522			let beacon_header_root = beacon_header
523				.hash_tree_root()
524				.map_err(|_| Error::<T>::HeaderHashTreeRootFailed)?;
525
526			let hash_root = SigningData { object_root: beacon_header_root, domain }
527				.hash_tree_root()
528				.map_err(|_| Error::<T>::SigningRootHashTreeRootFailed)?;
529
530			Ok(hash_root)
531		}
532
533		/// Stores a compacted (slot and block roots root (hash of the `block_roots` beacon state
534		/// field, used for ancestry proof)) beacon state in a ring buffer map, with the header root
535		/// as map key.
536		pub fn store_finalized_header(
537			header: BeaconHeader,
538			block_roots_root: H256,
539		) -> DispatchResult {
540			let slot = header.slot;
541
542			let header_root: H256 =
543				header.hash_tree_root().map_err(|_| Error::<T>::HeaderHashTreeRootFailed)?;
544
545			<FinalizedBeaconStateBuffer<T>>::insert(
546				header_root,
547				CompactBeaconState { slot: header.slot, block_roots_root },
548			);
549			<LatestFinalizedBlockRoot<T>>::set(header_root);
550
551			log::info!(
552				target: LOG_TARGET,
553				"💫 Updated latest finalized block root {} at slot {}.",
554				header_root,
555				slot
556			);
557
558			Self::deposit_event(Event::BeaconHeaderImported { block_hash: header_root, slot });
559
560			Ok(())
561		}
562
563		/// Stores the validators root in storage. Validators root is the hash tree root of all the
564		/// validators at genesis and is used to used to identify the chain that we are on
565		/// (used in conjunction with the fork version).
566		/// <https://eth2book.info/capella/part3/containers/state/#genesis_validators_root>
567		fn store_validators_root(validators_root: H256) {
568			<ValidatorsRoot<T>>::set(validators_root);
569		}
570
571		/// Returns the domain for the domain_type and fork_version. The domain is used to
572		/// distinguish between the different players in the chain (see DomainTypes
573		/// <https://eth2book.info/capella/part3/config/constants/#domain-types>) and to ensure we are
574		/// addressing the correct chain.
575		/// <https://eth2book.info/capella/part3/helper/misc/#compute_domain>
576		pub(super) fn compute_domain(
577			domain_type: Vec<u8>,
578			fork_version: ForkVersion,
579			genesis_validators_root: H256,
580		) -> Result<H256, DispatchError> {
581			let fork_data_root =
582				Self::compute_fork_data_root(fork_version, genesis_validators_root)?;
583
584			let mut domain = [0u8; 32];
585			domain[0..4].copy_from_slice(&(domain_type));
586			domain[4..32].copy_from_slice(&(fork_data_root.0[..28]));
587
588			Ok(domain.into())
589		}
590
591		/// Computes the fork data root. The fork data root is a merkleization of the current
592		/// fork version and the genesis validators root.
593		fn compute_fork_data_root(
594			current_version: ForkVersion,
595			genesis_validators_root: H256,
596		) -> Result<H256, DispatchError> {
597			let hash_root = ForkData {
598				current_version,
599				genesis_validators_root: genesis_validators_root.into(),
600			}
601			.hash_tree_root()
602			.map_err(|_| Error::<T>::ForkDataHashTreeRootFailed)?;
603
604			Ok(hash_root)
605		}
606
607		/// Checks that the sync committee bits (the votes of the sync committee members,
608		/// represented by bits 0 and 1) is more than a supermajority (2/3 of the votes are
609		/// positive).
610		pub(super) fn sync_committee_participation_is_supermajority(
611			sync_committee_bits: &[u8],
612		) -> DispatchResult {
613			let sync_committee_sum = sync_committee_sum(sync_committee_bits);
614			ensure!(
615				((sync_committee_sum * 3) as usize) >= sync_committee_bits.len() * 2,
616				Error::<T>::SyncCommitteeParticipantsNotSupermajority
617			);
618
619			Ok(())
620		}
621
622		/// Returns the fork version based on the current epoch. The hard fork versions
623		/// are defined in pallet config.
624		pub(super) fn compute_fork_version(epoch: u64) -> ForkVersion {
625			Self::select_fork_version(&T::ForkVersions::get(), epoch)
626		}
627
628		/// Returns the fork version based on the current epoch.
629		pub(super) fn select_fork_version(fork_versions: &ForkVersions, epoch: u64) -> ForkVersion {
630			if epoch >= fork_versions.electra.epoch {
631				return fork_versions.electra.version
632			}
633			if epoch >= fork_versions.deneb.epoch {
634				return fork_versions.deneb.version
635			}
636			if epoch >= fork_versions.capella.epoch {
637				return fork_versions.capella.version
638			}
639			if epoch >= fork_versions.bellatrix.epoch {
640				return fork_versions.bellatrix.version
641			}
642			if epoch >= fork_versions.altair.epoch {
643				return fork_versions.altair.version
644			}
645			fork_versions.genesis.version
646		}
647
648		/// Returns a vector of public keys that participated in the sync committee block signage.
649		/// Sync committee bits is an array of 0s and 1s, 0 meaning the corresponding sync committee
650		/// member did not participate in the vote, 1 meaning they participated.
651		/// This method can find the absent or participating members, based on the participant
652		/// parameter. participant = false will return absent participants, participant = true will
653		/// return participating members.
654		pub fn find_pubkeys(
655			sync_committee_bits: &[u8],
656			sync_committee_pubkeys: &[PublicKeyPrepared],
657			participant: bool,
658		) -> Vec<PublicKeyPrepared> {
659			let mut pubkeys: Vec<PublicKeyPrepared> = Vec::new();
660			for (bit, pubkey) in sync_committee_bits.iter().zip(sync_committee_pubkeys.iter()) {
661				if *bit == u8::from(participant) {
662					pubkeys.push(pubkey.clone());
663				}
664			}
665			pubkeys
666		}
667
668		/// Calculates signing root for BeaconHeader. The signing root is used for the message
669		/// value in BLS signature verification.
670		pub fn signing_root(
671			header: &BeaconHeader,
672			validators_root: H256,
673			signature_slot: u64,
674		) -> Result<H256, DispatchError> {
675			let fork_version = Self::compute_fork_version(compute_epoch(
676				signature_slot,
677				config::SLOTS_PER_EPOCH as u64,
678			));
679			let domain_type = config::DOMAIN_SYNC_COMMITTEE.to_vec();
680			// Domains are used for seeds, for signatures, and for selecting aggregators.
681			let domain = Self::compute_domain(domain_type, fork_version, validators_root)?;
682			// Hash tree root of SigningData - object root + domain
683			let signing_root = Self::compute_signing_root(header, domain)?;
684			Ok(signing_root)
685		}
686
687		/// Updates are free if the update is successful and the interval between the latest
688		/// finalized header in storage and the newly imported header is large enough. All
689		/// successful sync committee updates are free.
690		pub(super) fn check_refundable(update: &Update, latest_slot: u64) -> Pays {
691			// If the sync committee was successfully updated, the update may be free.
692			let update_period = compute_period(update.finalized_header.slot);
693			let latest_free_update_period = LatestSyncCommitteeUpdatePeriod::<T>::get();
694			// If the next sync committee is not known and this update sets it, the update is free.
695			// If the sync committee update is in a period that we have not received an update for,
696			// the update is free.
697			let refundable =
698				!<NextSyncCommittee<T>>::exists() || update_period > latest_free_update_period;
699			if update.next_sync_committee_update.is_some() && refundable {
700				return Pays::No;
701			}
702
703			// If the latest finalized header is larger than the minimum slot interval, the header
704			// import transaction is free.
705			if update.finalized_header.slot >=
706				latest_slot.saturating_add(T::FreeHeadersInterval::get() as u64)
707			{
708				return Pays::No;
709			}
710
711			Pays::Yes
712		}
713
714		pub fn finalized_root_gindex_at_slot(slot: u64, fork_versions: ForkVersions) -> usize {
715			let epoch = compute_epoch(slot, config::SLOTS_PER_EPOCH as u64);
716
717			if epoch >= fork_versions.electra.epoch {
718				return config::electra::FINALIZED_ROOT_INDEX;
719			}
720
721			config::altair::FINALIZED_ROOT_INDEX
722		}
723
724		pub fn current_sync_committee_gindex_at_slot(
725			slot: u64,
726			fork_versions: ForkVersions,
727		) -> usize {
728			let epoch = compute_epoch(slot, config::SLOTS_PER_EPOCH as u64);
729
730			if epoch >= fork_versions.electra.epoch {
731				return config::electra::CURRENT_SYNC_COMMITTEE_INDEX;
732			}
733
734			config::altair::CURRENT_SYNC_COMMITTEE_INDEX
735		}
736
737		pub fn next_sync_committee_gindex_at_slot(slot: u64, fork_versions: ForkVersions) -> usize {
738			let epoch = compute_epoch(slot, config::SLOTS_PER_EPOCH as u64);
739
740			if epoch >= fork_versions.electra.epoch {
741				return config::electra::NEXT_SYNC_COMMITTEE_INDEX;
742			}
743
744			config::altair::NEXT_SYNC_COMMITTEE_INDEX
745		}
746
747		pub fn block_roots_gindex_at_slot(slot: u64, fork_versions: ForkVersions) -> usize {
748			let epoch = compute_epoch(slot, config::SLOTS_PER_EPOCH as u64);
749
750			if epoch >= fork_versions.electra.epoch {
751				return config::electra::BLOCK_ROOTS_INDEX;
752			}
753
754			config::altair::BLOCK_ROOTS_INDEX
755		}
756
757		pub fn execution_header_gindex() -> usize {
758			config::altair::EXECUTION_HEADER_INDEX
759		}
760	}
761}