referrerpolicy=no-referrer-when-downgrade

pallet_sassafras/
lib.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! Extension module for Sassafras consensus.
19//!
20//! [Sassafras](https://research.web3.foundation/Polkadot/protocols/block-production/SASSAFRAS)
21//! is a constant-time block production protocol that aims to ensure that there is
22//! exactly one block produced with constant time intervals rather than multiple or none.
23//!
24//! We run a lottery to distribute block production slots in an epoch and to fix the
25//! order validators produce blocks in, by the beginning of an epoch.
26//!
27//! Each validator signs the same VRF input and publishes the output on-chain. This
28//! value is their lottery ticket that can be validated against their public key.
29//!
30//! We want to keep lottery winners secret, i.e. do not publish their public keys.
31//! At the beginning of the epoch all the validators tickets are published but not
32//! their public keys.
33//!
34//! A valid tickets is validated when an honest validator reclaims it on block
35//! production.
36//!
37//! To prevent submission of fake tickets, resulting in empty slots, the validator
38//! when submitting the ticket accompanies it with a SNARK of the statement: "Here's
39//! my VRF output that has been generated using the given VRF input and my secret
40//! key. I'm not telling you my keys, but my public key is among those of the
41//! nominated validators", that is validated before the lottery.
42//!
43//! To anonymously publish the ticket to the chain a validator sends their tickets
44//! to a random validator who later puts it on-chain as a transaction.
45
46#![deny(warnings)]
47#![warn(unused_must_use, unsafe_code, unused_variables, unused_imports, missing_docs)]
48#![cfg_attr(not(feature = "std"), no_std)]
49
50extern crate alloc;
51
52use codec::{Decode, Encode, MaxEncodedLen};
53use log::{debug, error, trace, warn};
54use scale_info::TypeInfo;
55
56use alloc::vec::Vec;
57use frame_support::{
58	dispatch::{DispatchResultWithPostInfo, Pays},
59	traits::{Defensive, Get},
60	weights::Weight,
61	BoundedVec, WeakBoundedVec,
62};
63use frame_system::{
64	offchain::{CreateBare, SubmitTransaction},
65	pallet_prelude::BlockNumberFor,
66};
67use sp_consensus_sassafras::{
68	digests::{ConsensusLog, NextEpochDescriptor, SlotClaim},
69	vrf, AuthorityId, Epoch, EpochConfiguration, Randomness, Slot, TicketBody, TicketEnvelope,
70	TicketId, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID,
71};
72use sp_io::hashing;
73use sp_runtime::{
74	generic::DigestItem,
75	traits::{One, Zero},
76	BoundToRuntimeAppPublic,
77};
78
79#[cfg(feature = "runtime-benchmarks")]
80mod benchmarking;
81#[cfg(all(feature = "std", test))]
82mod mock;
83#[cfg(all(feature = "std", test))]
84mod tests;
85
86pub mod weights;
87pub use weights::WeightInfo;
88
89pub use pallet::*;
90
91const LOG_TARGET: &str = "sassafras::runtime";
92
93// Max length for segments holding unsorted tickets.
94const SEGMENT_MAX_SIZE: u32 = 128;
95
96/// Authorities bounded vector convenience type.
97pub type AuthoritiesVec<T> = WeakBoundedVec<AuthorityId, <T as Config>::MaxAuthorities>;
98
99/// Epoch length defined by the configuration.
100pub type EpochLengthFor<T> = <T as Config>::EpochLength;
101
102/// Tickets metadata.
103#[derive(Debug, Default, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, Clone, Copy)]
104pub struct TicketsMetadata {
105	/// Number of outstanding next epoch tickets requiring to be sorted.
106	///
107	/// These tickets are held by the [`UnsortedSegments`] storage map in segments
108	/// containing at most `SEGMENT_MAX_SIZE` items.
109	pub unsorted_tickets_count: u32,
110
111	/// Number of tickets available for current and next epoch.
112	///
113	/// These tickets are held by the [`TicketsIds`] storage map.
114	///
115	/// The array entry to be used for the current epoch is computed as epoch index modulo 2.
116	pub tickets_count: [u32; 2],
117}
118
119#[frame_support::pallet]
120pub mod pallet {
121	use super::*;
122	use frame_support::pallet_prelude::*;
123	use frame_system::pallet_prelude::*;
124
125	/// The Sassafras pallet.
126	#[pallet::pallet]
127	pub struct Pallet<T>(_);
128
129	/// Configuration parameters.
130	#[pallet::config]
131	pub trait Config: frame_system::Config + CreateBare<Call<Self>> {
132		/// Amount of slots that each epoch should last.
133		#[pallet::constant]
134		type EpochLength: Get<u32>;
135
136		/// Max number of authorities allowed.
137		#[pallet::constant]
138		type MaxAuthorities: Get<u32>;
139
140		/// Epoch change trigger.
141		///
142		/// Logic to be triggered on every block to query for whether an epoch has ended
143		/// and to perform the transition to the next epoch.
144		type EpochChangeTrigger: EpochChangeTrigger;
145
146		/// Weight information for all calls of this pallet.
147		type WeightInfo: WeightInfo;
148	}
149
150	/// Sassafras runtime errors.
151	#[pallet::error]
152	pub enum Error<T> {
153		/// Submitted configuration is invalid.
154		InvalidConfiguration,
155	}
156
157	/// Current epoch index.
158	#[pallet::storage]
159	#[pallet::getter(fn epoch_index)]
160	pub type EpochIndex<T> = StorageValue<_, u64, ValueQuery>;
161
162	/// Current epoch authorities.
163	#[pallet::storage]
164	#[pallet::getter(fn authorities)]
165	pub type Authorities<T: Config> = StorageValue<_, AuthoritiesVec<T>, ValueQuery>;
166
167	/// Next epoch authorities.
168	#[pallet::storage]
169	#[pallet::getter(fn next_authorities)]
170	pub type NextAuthorities<T: Config> = StorageValue<_, AuthoritiesVec<T>, ValueQuery>;
171
172	/// First block slot number.
173	///
174	/// As the slots may not be zero-based, we record the slot value for the fist block.
175	/// This allows to always compute relative indices for epochs and slots.
176	#[pallet::storage]
177	#[pallet::getter(fn genesis_slot)]
178	pub type GenesisSlot<T> = StorageValue<_, Slot, ValueQuery>;
179
180	/// Current block slot number.
181	#[pallet::storage]
182	#[pallet::getter(fn current_slot)]
183	pub type CurrentSlot<T> = StorageValue<_, Slot, ValueQuery>;
184
185	/// Current epoch randomness.
186	#[pallet::storage]
187	#[pallet::getter(fn randomness)]
188	pub type CurrentRandomness<T> = StorageValue<_, Randomness, ValueQuery>;
189
190	/// Next epoch randomness.
191	#[pallet::storage]
192	#[pallet::getter(fn next_randomness)]
193	pub type NextRandomness<T> = StorageValue<_, Randomness, ValueQuery>;
194
195	/// Randomness accumulator.
196	///
197	/// Excluded the first imported block, its value is updated on block finalization.
198	#[pallet::storage]
199	#[pallet::getter(fn randomness_accumulator)]
200	pub(crate) type RandomnessAccumulator<T> = StorageValue<_, Randomness, ValueQuery>;
201
202	/// Per slot randomness used to feed the randomness accumulator.
203	///
204	/// The value is ephemeral and is cleared on block finalization.
205	#[pallet::storage]
206	pub(crate) type SlotRandomness<T> = StorageValue<_, Randomness>;
207
208	/// The configuration for the current epoch.
209	#[pallet::storage]
210	#[pallet::getter(fn config)]
211	pub type EpochConfig<T> = StorageValue<_, EpochConfiguration, ValueQuery>;
212
213	/// The configuration for the next epoch.
214	#[pallet::storage]
215	#[pallet::getter(fn next_config)]
216	pub type NextEpochConfig<T> = StorageValue<_, EpochConfiguration>;
217
218	/// Pending epoch configuration change that will be set as `NextEpochConfig` when the next
219	/// epoch is enacted.
220	///
221	/// In other words, a configuration change submitted during epoch N will be enacted on epoch
222	/// N+2. This is to maintain coherence for already submitted tickets for epoch N+1 that where
223	/// computed using configuration parameters stored for epoch N+1.
224	#[pallet::storage]
225	pub type PendingEpochConfigChange<T> = StorageValue<_, EpochConfiguration>;
226
227	/// Stored tickets metadata.
228	#[pallet::storage]
229	pub type TicketsMeta<T> = StorageValue<_, TicketsMetadata, ValueQuery>;
230
231	/// Tickets identifiers map.
232	///
233	/// The map holds tickets ids for the current and next epoch.
234	///
235	/// The key is a tuple composed by:
236	/// - `u8` equal to epoch's index modulo 2;
237	/// - `u32` equal to the ticket's index in a sorted list of epoch's tickets.
238	///
239	/// Epoch X first N-th ticket has key (X mod 2, N)
240	///
241	/// Note that the ticket's index doesn't directly correspond to the slot index within the epoch.
242	/// The assignment is computed dynamically using an *outside-in* strategy.
243	///
244	/// Be aware that entries within this map are never removed, only overwritten.
245	/// Last element index should be fetched from the [`TicketsMeta`] value.
246	#[pallet::storage]
247	pub type TicketsIds<T> = StorageMap<_, Identity, (u8, u32), TicketId>;
248
249	/// Tickets to be used for current and next epoch.
250	#[pallet::storage]
251	pub type TicketsData<T> = StorageMap<_, Identity, TicketId, TicketBody>;
252
253	/// Next epoch tickets unsorted segments.
254	///
255	/// Contains lists of tickets where each list represents a batch of tickets
256	/// received via the `submit_tickets` extrinsic.
257	///
258	/// Each segment has max length [`SEGMENT_MAX_SIZE`].
259	#[pallet::storage]
260	pub type UnsortedSegments<T: Config> =
261		StorageMap<_, Identity, u32, BoundedVec<TicketId, ConstU32<SEGMENT_MAX_SIZE>>, ValueQuery>;
262
263	/// The most recently set of tickets which are candidates to become the next
264	/// epoch tickets.
265	#[pallet::storage]
266	pub type SortedCandidates<T> =
267		StorageValue<_, BoundedVec<TicketId, EpochLengthFor<T>>, ValueQuery>;
268
269	/// Parameters used to construct the epoch's ring verifier.
270	///
271	/// In practice: Updatable Universal Reference String and the seed.
272	#[pallet::storage]
273	#[pallet::getter(fn ring_context)]
274	pub type RingContext<T: Config> = StorageValue<_, vrf::RingContext>;
275
276	/// Ring verifier data for the current epoch.
277	#[pallet::storage]
278	pub type RingVerifierData<T: Config> = StorageValue<_, vrf::RingVerifierKey>;
279
280	/// Genesis configuration for Sassafras protocol.
281	#[pallet::genesis_config]
282	#[derive(frame_support::DefaultNoBound)]
283	pub struct GenesisConfig<T: Config> {
284		/// Genesis authorities.
285		pub authorities: Vec<AuthorityId>,
286		/// Genesis epoch configuration.
287		pub epoch_config: EpochConfiguration,
288		/// Phantom config
289		#[serde(skip)]
290		pub _phantom: core::marker::PhantomData<T>,
291	}
292
293	#[pallet::genesis_build]
294	impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
295		fn build(&self) {
296			EpochConfig::<T>::put(self.epoch_config);
297			Pallet::<T>::genesis_authorities_initialize(&self.authorities);
298
299			#[cfg(feature = "construct-dummy-ring-context")]
300			{
301				debug!(target: LOG_TARGET, "Constructing dummy ring context");
302				let ring_ctx = vrf::RingContext::new_testing();
303				RingContext::<T>::put(ring_ctx);
304				Pallet::<T>::update_ring_verifier(&self.authorities);
305			}
306		}
307	}
308
309	#[pallet::hooks]
310	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
311		fn on_initialize(block_num: BlockNumberFor<T>) -> Weight {
312			debug_assert_eq!(block_num, frame_system::Pallet::<T>::block_number());
313
314			let claim = <frame_system::Pallet<T>>::digest()
315				.logs
316				.iter()
317				.find_map(|item| item.pre_runtime_try_to::<SlotClaim>(&SASSAFRAS_ENGINE_ID))
318				.expect("Valid block must have a slot claim. qed");
319
320			CurrentSlot::<T>::put(claim.slot);
321
322			if block_num == One::one() {
323				Self::post_genesis_initialize(claim.slot);
324			}
325
326			let randomness = claim.vrf_signature.pre_output.make_bytes();
327			SlotRandomness::<T>::put(randomness);
328
329			let trigger_weight = T::EpochChangeTrigger::trigger::<T>(block_num);
330
331			T::WeightInfo::on_initialize() + trigger_weight
332		}
333
334		fn on_finalize(_: BlockNumberFor<T>) {
335			// At the end of the block, we can safely include the current slot randomness
336			// to the accumulator. If we've determined that this block was the first in
337			// a new epoch, the changeover logic has already occurred at this point
338			// (i.e. `enact_epoch_change` has already been called).
339			let randomness = SlotRandomness::<T>::take()
340				.expect("Unconditionally populated in `on_initialize`; `on_finalize` is always called after; qed");
341			Self::deposit_slot_randomness(&randomness);
342
343			// Check if we are in the epoch's second half.
344			// If so, start sorting the next epoch tickets.
345			let epoch_length = T::EpochLength::get();
346			let current_slot_idx = Self::current_slot_index();
347			if current_slot_idx >= epoch_length / 2 {
348				let mut metadata = TicketsMeta::<T>::get();
349				if metadata.unsorted_tickets_count != 0 {
350					let next_epoch_idx = EpochIndex::<T>::get() + 1;
351					let next_epoch_tag = (next_epoch_idx & 1) as u8;
352					let slots_left = epoch_length.checked_sub(current_slot_idx).unwrap_or(1);
353					Self::sort_segments(
354						metadata
355							.unsorted_tickets_count
356							.div_ceil(SEGMENT_MAX_SIZE * slots_left as u32),
357						next_epoch_tag,
358						&mut metadata,
359					);
360					TicketsMeta::<T>::set(metadata);
361				}
362			}
363		}
364	}
365
366	#[pallet::call]
367	impl<T: Config> Pallet<T> {
368		/// Submit next epoch tickets candidates.
369		///
370		/// The number of tickets allowed to be submitted in one call is equal to the epoch length.
371		#[pallet::call_index(0)]
372		#[pallet::weight(T::WeightInfo::submit_tickets(tickets.len() as u32))]
373		pub fn submit_tickets(
374			origin: OriginFor<T>,
375			tickets: BoundedVec<TicketEnvelope, EpochLengthFor<T>>,
376		) -> DispatchResultWithPostInfo {
377			ensure_none(origin)?;
378
379			debug!(target: LOG_TARGET, "Received {} tickets", tickets.len());
380
381			let epoch_length = T::EpochLength::get();
382			let current_slot_idx = Self::current_slot_index();
383			if current_slot_idx > epoch_length / 2 {
384				warn!(target: LOG_TARGET, "Tickets shall be submitted in the first epoch half",);
385				return Err("Tickets shall be submitted in the first epoch half".into())
386			}
387
388			let Some(verifier) =
389				RingVerifierData::<T>::get().map(|vk| vrf::RingContext::verifier_no_context(vk))
390			else {
391				warn!(target: LOG_TARGET, "Ring verifier key not initialized");
392				return Err("Ring verifier key not initialized".into())
393			};
394
395			let next_authorities = Self::next_authorities();
396
397			// Compute tickets threshold
398			let next_config = Self::next_config().unwrap_or_else(|| Self::config());
399			let ticket_threshold = sp_consensus_sassafras::ticket_id_threshold(
400				next_config.redundancy_factor,
401				epoch_length as u32,
402				next_config.attempts_number,
403				next_authorities.len() as u32,
404			);
405
406			// Get next epoch params
407			let randomness = NextRandomness::<T>::get();
408			let epoch_idx = EpochIndex::<T>::get() + 1;
409
410			let mut valid_tickets = BoundedVec::with_bounded_capacity(tickets.len());
411
412			for ticket in tickets {
413				debug!(target: LOG_TARGET, "Checking ring proof");
414
415				// Check threshold constraint
416				let ticket_id = vrf::make_ticket_id(&ticket.signature.pre_output);
417				if ticket_id >= ticket_threshold {
418					debug!(target: LOG_TARGET, "Ignoring ticket over threshold ({:032x} >= {:032x})", ticket_id, ticket_threshold);
419					continue
420				}
421
422				// Check for duplicates
423				if TicketsData::<T>::contains_key(ticket_id) {
424					debug!(target: LOG_TARGET, "Ignoring duplicate ticket ({:032x})", ticket_id);
425					continue
426				}
427
428				// Check ring signature
429				let ticket_id_input =
430					vrf::ticket_id_input(&randomness, ticket.body.attempt_idx, epoch_idx);
431				let sign_data = vrf::ticket_body_sign_data(&ticket.body, ticket_id_input);
432				if !ticket.signature.ring_vrf_verify(&sign_data, &verifier) {
433					debug!(target: LOG_TARGET, "Proof verification failure for ticket ({:032x})", ticket_id);
434					continue
435				}
436
437				if let Ok(_) = valid_tickets.try_push(ticket_id).defensive_proof(
438					"Input segment has same length as bounded destination vector; qed",
439				) {
440					TicketsData::<T>::set(ticket_id, Some(ticket.body));
441				}
442			}
443
444			if !valid_tickets.is_empty() {
445				Self::append_tickets(valid_tickets);
446			}
447
448			Ok(Pays::No.into())
449		}
450
451		/// Plan an epoch configuration change.
452		///
453		/// The epoch configuration change is recorded and will be announced at the beginning
454		/// of the next epoch together with next epoch authorities information.
455		/// In other words, the configuration will be enacted one epoch later.
456		///
457		/// Multiple calls to this method will replace any existing planned config change
458		/// that has not been enacted yet.
459		#[pallet::call_index(1)]
460		#[pallet::weight(T::WeightInfo::plan_config_change())]
461		pub fn plan_config_change(
462			origin: OriginFor<T>,
463			config: EpochConfiguration,
464		) -> DispatchResult {
465			ensure_root(origin)?;
466
467			ensure!(
468				config.redundancy_factor != 0 && config.attempts_number != 0,
469				Error::<T>::InvalidConfiguration
470			);
471			PendingEpochConfigChange::<T>::put(config);
472			Ok(())
473		}
474	}
475
476	#[pallet::validate_unsigned]
477	impl<T: Config> ValidateUnsigned for Pallet<T> {
478		type Call = Call<T>;
479
480		fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity {
481			let Call::submit_tickets { tickets } = call else {
482				return InvalidTransaction::Call.into()
483			};
484
485			// Discard tickets not coming from the local node or that are not included in a block
486			if source == TransactionSource::External {
487				warn!(
488					target: LOG_TARGET,
489					"Rejecting unsigned `submit_tickets` transaction from external source",
490				);
491				return InvalidTransaction::BadSigner.into()
492			}
493
494			// Current slot should be less than half of epoch length.
495			let epoch_length = T::EpochLength::get();
496			let current_slot_idx = Self::current_slot_index();
497			if current_slot_idx > epoch_length / 2 {
498				warn!(target: LOG_TARGET, "Tickets shall be proposed in the first epoch half",);
499				return InvalidTransaction::Stale.into()
500			}
501
502			// This should be set such that it is discarded after the first epoch half
503			let tickets_longevity = epoch_length / 2 - current_slot_idx;
504			let tickets_tag = tickets.using_encoded(|bytes| hashing::blake2_256(bytes));
505
506			ValidTransaction::with_tag_prefix("Sassafras")
507				.priority(TransactionPriority::max_value())
508				.longevity(tickets_longevity as u64)
509				.and_provides(tickets_tag)
510				.propagate(true)
511				.build()
512		}
513	}
514}
515
516// Inherent methods
517impl<T: Config> Pallet<T> {
518	/// Determine whether an epoch change should take place at this block.
519	///
520	/// Assumes that initialization has already taken place.
521	pub(crate) fn should_end_epoch(block_num: BlockNumberFor<T>) -> bool {
522		// The epoch has technically ended during the passage of time between this block and the
523		// last, but we have to "end" the epoch now, since there is no earlier possible block we
524		// could have done it.
525		//
526		// The exception is for block 1: the genesis has slot 0, so we treat epoch 0 as having
527		// started at the slot of block 1. We want to use the same randomness and validator set as
528		// signalled in the genesis, so we don't rotate the epoch.
529		block_num > One::one() && Self::current_slot_index() >= T::EpochLength::get()
530	}
531
532	/// Current slot index relative to the current epoch.
533	fn current_slot_index() -> u32 {
534		Self::slot_index(CurrentSlot::<T>::get())
535	}
536
537	/// Slot index relative to the current epoch.
538	fn slot_index(slot: Slot) -> u32 {
539		slot.checked_sub(*Self::current_epoch_start())
540			.and_then(|v| v.try_into().ok())
541			.unwrap_or(u32::MAX)
542	}
543
544	/// Finds the start slot of the current epoch.
545	///
546	/// Only guaranteed to give correct results after `initialize` of the first
547	/// block in the chain (as its result is based off of `GenesisSlot`).
548	fn current_epoch_start() -> Slot {
549		Self::epoch_start(EpochIndex::<T>::get())
550	}
551
552	/// Get the epoch's first slot.
553	fn epoch_start(epoch_index: u64) -> Slot {
554		const PROOF: &str = "slot number is u64; it should relate in some way to wall clock time; \
555							 if u64 is not enough we should crash for safety; qed.";
556
557		let epoch_start = epoch_index.checked_mul(T::EpochLength::get() as u64).expect(PROOF);
558		GenesisSlot::<T>::get().checked_add(epoch_start).expect(PROOF).into()
559	}
560
561	pub(crate) fn update_ring_verifier(authorities: &[AuthorityId]) {
562		debug!(target: LOG_TARGET, "Loading ring context");
563		let Some(ring_ctx) = RingContext::<T>::get() else {
564			debug!(target: LOG_TARGET, "Ring context not initialized");
565			return
566		};
567
568		let pks: Vec<_> = authorities.iter().map(|auth| *auth.as_ref()).collect();
569
570		debug!(target: LOG_TARGET, "Building ring verifier (ring size: {})", pks.len());
571		let verifier_data = ring_ctx.verifier_key(&pks);
572
573		RingVerifierData::<T>::put(verifier_data);
574	}
575
576	/// Enact an epoch change.
577	///
578	/// WARNING: Should be called on every block once and if and only if [`should_end_epoch`]
579	/// has returned `true`.
580	///
581	/// If we detect one or more skipped epochs the policy is to use the authorities and values
582	/// from the first skipped epoch. The tickets data is invalidated.
583	pub(crate) fn enact_epoch_change(
584		authorities: WeakBoundedVec<AuthorityId, T::MaxAuthorities>,
585		next_authorities: WeakBoundedVec<AuthorityId, T::MaxAuthorities>,
586	) {
587		if next_authorities != authorities {
588			Self::update_ring_verifier(&next_authorities);
589		}
590
591		// Update authorities
592		Authorities::<T>::put(&authorities);
593		NextAuthorities::<T>::put(&next_authorities);
594
595		// Update epoch index
596		let mut epoch_idx = EpochIndex::<T>::get() + 1;
597
598		let slot_idx = CurrentSlot::<T>::get().saturating_sub(Self::epoch_start(epoch_idx));
599		if slot_idx >= T::EpochLength::get() {
600			// Detected one or more skipped epochs, clear tickets data and recompute epoch index.
601			Self::reset_tickets_data();
602			let skipped_epochs = *slot_idx / T::EpochLength::get() as u64;
603			epoch_idx += skipped_epochs;
604			warn!(
605				target: LOG_TARGET,
606				"Detected {} skipped epochs, resuming from epoch {}",
607				skipped_epochs,
608				epoch_idx
609			);
610		}
611
612		let mut metadata = TicketsMeta::<T>::get();
613		let mut metadata_dirty = false;
614
615		EpochIndex::<T>::put(epoch_idx);
616
617		let next_epoch_idx = epoch_idx + 1;
618
619		// Updates current epoch randomness and computes the *next* epoch randomness.
620		let next_randomness = Self::update_epoch_randomness(next_epoch_idx);
621
622		if let Some(config) = NextEpochConfig::<T>::take() {
623			EpochConfig::<T>::put(config);
624		}
625
626		let next_config = PendingEpochConfigChange::<T>::take();
627		if let Some(next_config) = next_config {
628			NextEpochConfig::<T>::put(next_config);
629		}
630
631		// After we update the current epoch, we signal the *next* epoch change
632		// so that nodes can track changes.
633		let next_epoch = NextEpochDescriptor {
634			randomness: next_randomness,
635			authorities: next_authorities.into_inner(),
636			config: next_config,
637		};
638		Self::deposit_next_epoch_descriptor_digest(next_epoch);
639
640		let epoch_tag = (epoch_idx & 1) as u8;
641
642		// Optionally finish sorting
643		if metadata.unsorted_tickets_count != 0 {
644			Self::sort_segments(u32::MAX, epoch_tag, &mut metadata);
645			metadata_dirty = true;
646		}
647
648		// Clear the "prev ≡ next (mod 2)" epoch tickets counter and bodies.
649		// Ids are left since are just cyclically overwritten on-the-go.
650		let prev_epoch_tag = epoch_tag ^ 1;
651		let prev_epoch_tickets_count = &mut metadata.tickets_count[prev_epoch_tag as usize];
652		if *prev_epoch_tickets_count != 0 {
653			for idx in 0..*prev_epoch_tickets_count {
654				if let Some(ticket_id) = TicketsIds::<T>::get((prev_epoch_tag, idx)) {
655					TicketsData::<T>::remove(ticket_id);
656				}
657			}
658			*prev_epoch_tickets_count = 0;
659			metadata_dirty = true;
660		}
661
662		if metadata_dirty {
663			TicketsMeta::<T>::set(metadata);
664		}
665	}
666
667	// Call this function on epoch change to enact current epoch randomness.
668	//
669	// Returns the next epoch randomness.
670	fn update_epoch_randomness(next_epoch_index: u64) -> Randomness {
671		let curr_epoch_randomness = NextRandomness::<T>::get();
672		CurrentRandomness::<T>::put(curr_epoch_randomness);
673
674		let accumulator = RandomnessAccumulator::<T>::get();
675
676		let mut buf = [0; RANDOMNESS_LENGTH + 8];
677		buf[..RANDOMNESS_LENGTH].copy_from_slice(&accumulator[..]);
678		buf[RANDOMNESS_LENGTH..].copy_from_slice(&next_epoch_index.to_le_bytes());
679
680		let next_randomness = hashing::blake2_256(&buf);
681		NextRandomness::<T>::put(&next_randomness);
682
683		next_randomness
684	}
685
686	// Deposit per-slot randomness.
687	fn deposit_slot_randomness(randomness: &Randomness) {
688		let accumulator = RandomnessAccumulator::<T>::get();
689
690		let mut buf = [0; 2 * RANDOMNESS_LENGTH];
691		buf[..RANDOMNESS_LENGTH].copy_from_slice(&accumulator[..]);
692		buf[RANDOMNESS_LENGTH..].copy_from_slice(&randomness[..]);
693
694		let accumulator = hashing::blake2_256(&buf);
695		RandomnessAccumulator::<T>::put(accumulator);
696	}
697
698	// Deposit next epoch descriptor in the block header digest.
699	fn deposit_next_epoch_descriptor_digest(desc: NextEpochDescriptor) {
700		let item = ConsensusLog::NextEpochData(desc);
701		let log = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, item.encode());
702		<frame_system::Pallet<T>>::deposit_log(log)
703	}
704
705	// Initialize authorities on genesis phase.
706	//
707	// Genesis authorities may have been initialized via other means (e.g. via session pallet).
708	//
709	// If this function has already been called with some authorities, then the new list
710	// should match the previously set one.
711	fn genesis_authorities_initialize(authorities: &[AuthorityId]) {
712		let prev_authorities = Authorities::<T>::get();
713
714		if !prev_authorities.is_empty() {
715			// This function has already been called.
716			if prev_authorities.as_slice() == authorities {
717				return
718			} else {
719				panic!("Authorities were already initialized");
720			}
721		}
722
723		let authorities = WeakBoundedVec::try_from(authorities.to_vec())
724			.expect("Initial number of authorities should be lower than T::MaxAuthorities");
725		Authorities::<T>::put(&authorities);
726		NextAuthorities::<T>::put(&authorities);
727	}
728
729	// Method to be called on first block `on_initialize` to properly populate some key parameters.
730	fn post_genesis_initialize(slot: Slot) {
731		// Keep track of the actual first slot used (may not be zero based).
732		GenesisSlot::<T>::put(slot);
733
734		// Properly initialize randomness using genesis hash and current slot.
735		// This is important to guarantee that a different set of tickets are produced for:
736		// - different chains which share the same ring parameters and
737		// - same chain started with a different slot base.
738		let genesis_hash = frame_system::Pallet::<T>::parent_hash();
739		let mut buf = genesis_hash.as_ref().to_vec();
740		buf.extend_from_slice(&slot.to_le_bytes());
741		let randomness = hashing::blake2_256(buf.as_slice());
742		RandomnessAccumulator::<T>::put(randomness);
743
744		let next_randomness = Self::update_epoch_randomness(1);
745
746		// Deposit a log as this is the first block in first epoch.
747		let next_epoch = NextEpochDescriptor {
748			randomness: next_randomness,
749			authorities: Self::next_authorities().into_inner(),
750			config: None,
751		};
752		Self::deposit_next_epoch_descriptor_digest(next_epoch);
753	}
754
755	/// Current epoch information.
756	pub fn current_epoch() -> Epoch {
757		let index = EpochIndex::<T>::get();
758		Epoch {
759			index,
760			start: Self::epoch_start(index),
761			length: T::EpochLength::get(),
762			authorities: Self::authorities().into_inner(),
763			randomness: Self::randomness(),
764			config: Self::config(),
765		}
766	}
767
768	/// Next epoch information.
769	pub fn next_epoch() -> Epoch {
770		let index = EpochIndex::<T>::get() + 1;
771		Epoch {
772			index,
773			start: Self::epoch_start(index),
774			length: T::EpochLength::get(),
775			authorities: Self::next_authorities().into_inner(),
776			randomness: Self::next_randomness(),
777			config: Self::next_config().unwrap_or_else(|| Self::config()),
778		}
779	}
780
781	/// Fetch expected ticket-id for the given slot according to an "outside-in" sorting strategy.
782	///
783	/// Given an ordered sequence of tickets [t0, t1, t2, ..., tk] to be assigned to n slots,
784	/// with n >= k, then the tickets are assigned to the slots according to the following
785	/// strategy:
786	///
787	/// slot-index  : [ 0,  1,  2, ............ , n ]
788	/// tickets     : [ t1, t3, t5, ... , t4, t2, t0 ].
789	///
790	/// With slot-index computed as `epoch_start() - slot`.
791	///
792	/// If `slot` value falls within the current epoch then we fetch tickets from the current epoch
793	/// tickets list.
794	///
795	/// If `slot` value falls within the next epoch then we fetch tickets from the next epoch
796	/// tickets ids list. Note that in this case we may have not finished receiving all the tickets
797	/// for that epoch yet. The next epoch tickets should be considered "stable" only after the
798	/// current epoch first half slots were elapsed (see `submit_tickets_unsigned_extrinsic`).
799	///
800	/// Returns `None` if, according to the sorting strategy, there is no ticket associated to the
801	/// specified slot-index (happens if a ticket falls in the middle of an epoch and n > k),
802	/// or if the slot falls beyond the next epoch.
803	///
804	/// Before importing the first block this returns `None`.
805	pub fn slot_ticket_id(slot: Slot) -> Option<TicketId> {
806		if frame_system::Pallet::<T>::block_number().is_zero() {
807			return None
808		}
809		let epoch_idx = EpochIndex::<T>::get();
810		let epoch_len = T::EpochLength::get();
811		let mut slot_idx = Self::slot_index(slot);
812		let mut metadata = TicketsMeta::<T>::get();
813
814		let get_ticket_idx = |slot_idx| {
815			let ticket_idx = if slot_idx < epoch_len / 2 {
816				2 * slot_idx + 1
817			} else {
818				2 * (epoch_len - (slot_idx + 1))
819			};
820			debug!(
821				target: LOG_TARGET,
822				"slot-idx {} <-> ticket-idx {}",
823				slot_idx,
824				ticket_idx
825			);
826			ticket_idx as u32
827		};
828
829		let mut epoch_tag = (epoch_idx & 1) as u8;
830
831		if epoch_len <= slot_idx && slot_idx < 2 * epoch_len {
832			// Try to get a ticket for the next epoch. Since its state values were not enacted yet,
833			// we may have to finish sorting the tickets.
834			epoch_tag ^= 1;
835			slot_idx -= epoch_len;
836			if metadata.unsorted_tickets_count != 0 {
837				Self::sort_segments(u32::MAX, epoch_tag, &mut metadata);
838				TicketsMeta::<T>::set(metadata);
839			}
840		} else if slot_idx >= 2 * epoch_len {
841			return None
842		}
843
844		let ticket_idx = get_ticket_idx(slot_idx);
845		if ticket_idx < metadata.tickets_count[epoch_tag as usize] {
846			TicketsIds::<T>::get((epoch_tag, ticket_idx))
847		} else {
848			None
849		}
850	}
851
852	/// Returns ticket id and data associated with the given `slot`.
853	///
854	/// Refer to the `slot_ticket_id` documentation for the slot-ticket association
855	/// criteria.
856	pub fn slot_ticket(slot: Slot) -> Option<(TicketId, TicketBody)> {
857		Self::slot_ticket_id(slot).and_then(|id| TicketsData::<T>::get(id).map(|body| (id, body)))
858	}
859
860	// Sort and truncate candidate tickets, cleanup storage.
861	fn sort_and_truncate(candidates: &mut Vec<u128>, max_tickets: usize) -> u128 {
862		candidates.sort_unstable();
863		candidates.drain(max_tickets..).for_each(TicketsData::<T>::remove);
864		candidates[max_tickets - 1]
865	}
866
867	/// Sort the tickets which belong to the epoch with the specified `epoch_tag`.
868	///
869	/// At most `max_segments` are taken from the `UnsortedSegments` structure.
870	///
871	/// The tickets of the removed segments are merged with the tickets on the `SortedCandidates`
872	/// which is then sorted an truncated to contain at most `MaxTickets` entries.
873	///
874	/// If all the entries in `UnsortedSegments` are consumed, then `SortedCandidates` is elected
875	/// as the next epoch tickets, else it is saved to be used by next calls of this function.
876	pub(crate) fn sort_segments(max_segments: u32, epoch_tag: u8, metadata: &mut TicketsMetadata) {
877		let unsorted_segments_count = metadata.unsorted_tickets_count.div_ceil(SEGMENT_MAX_SIZE);
878		let max_segments = max_segments.min(unsorted_segments_count);
879		let max_tickets = Self::epoch_length() as usize;
880
881		// Fetch the sorted candidates (if any).
882		let mut candidates = SortedCandidates::<T>::take().into_inner();
883
884		// There is an upper bound to check only if we already sorted the max number
885		// of allowed tickets.
886		let mut upper_bound = *candidates.get(max_tickets - 1).unwrap_or(&TicketId::MAX);
887
888		let mut require_sort = false;
889
890		// Consume at most `max_segments` segments.
891		// During the process remove every stale ticket from `TicketsData` storage.
892		for segment_idx in (0..unsorted_segments_count).rev().take(max_segments as usize) {
893			let segment = UnsortedSegments::<T>::take(segment_idx);
894			metadata.unsorted_tickets_count -= segment.len() as u32;
895
896			// Push only ids with a value less than the current `upper_bound`.
897			let prev_len = candidates.len();
898			for ticket_id in segment {
899				if ticket_id < upper_bound {
900					candidates.push(ticket_id);
901				} else {
902					TicketsData::<T>::remove(ticket_id);
903				}
904			}
905			require_sort = candidates.len() != prev_len;
906
907			// As we approach the tail of the segments buffer the `upper_bound` value is expected
908			// to decrease (fast). We thus expect the number of tickets pushed into the
909			// `candidates` vector to follow an exponential drop.
910			//
911			// Given this, sorting and truncating after processing each segment may be an overkill
912			// as we may find pushing few tickets more and more often. Is preferable to perform
913			// the sort and truncation operations only when we reach some bigger threshold
914			// (currently set as twice the capacity of `SortCandidate`).
915			//
916			// The more is the protocol's redundancy factor (i.e. the ratio between tickets allowed
917			// to be submitted and the epoch length) the more this check becomes relevant.
918			if candidates.len() > 2 * max_tickets {
919				upper_bound = Self::sort_and_truncate(&mut candidates, max_tickets);
920				require_sort = false;
921			}
922		}
923
924		if candidates.len() > max_tickets {
925			Self::sort_and_truncate(&mut candidates, max_tickets);
926		} else if require_sort {
927			candidates.sort_unstable();
928		}
929
930		if metadata.unsorted_tickets_count == 0 {
931			// Sorting is over, write to next epoch map.
932			candidates.iter().enumerate().for_each(|(i, id)| {
933				TicketsIds::<T>::insert((epoch_tag, i as u32), id);
934			});
935			metadata.tickets_count[epoch_tag as usize] = candidates.len() as u32;
936		} else {
937			// Keep the partial result for the next calls.
938			SortedCandidates::<T>::set(BoundedVec::truncate_from(candidates));
939		}
940	}
941
942	/// Append a set of tickets to the segments map.
943	pub(crate) fn append_tickets(mut tickets: BoundedVec<TicketId, EpochLengthFor<T>>) {
944		debug!(target: LOG_TARGET, "Appending batch with {} tickets", tickets.len());
945		tickets.iter().for_each(|t| trace!(target: LOG_TARGET, "  + {t:032x}"));
946
947		let mut metadata = TicketsMeta::<T>::get();
948		let mut segment_idx = metadata.unsorted_tickets_count / SEGMENT_MAX_SIZE;
949
950		while !tickets.is_empty() {
951			let rem = metadata.unsorted_tickets_count % SEGMENT_MAX_SIZE;
952			let to_be_added = tickets.len().min((SEGMENT_MAX_SIZE - rem) as usize);
953
954			let mut segment = UnsortedSegments::<T>::get(segment_idx);
955			let _ = segment
956				.try_extend(tickets.drain(..to_be_added))
957				.defensive_proof("We don't add more than `SEGMENT_MAX_SIZE` and this is the maximum bound for the vector.");
958			UnsortedSegments::<T>::insert(segment_idx, segment);
959
960			metadata.unsorted_tickets_count += to_be_added as u32;
961			segment_idx += 1;
962		}
963
964		TicketsMeta::<T>::set(metadata);
965	}
966
967	/// Remove all tickets related data.
968	///
969	/// May not be efficient as the calling places may repeat some of this operations
970	/// but is a very extraordinary operation (hopefully never happens in production)
971	/// and better safe than sorry.
972	fn reset_tickets_data() {
973		let metadata = TicketsMeta::<T>::get();
974
975		// Remove even/odd-epoch data.
976		for epoch_tag in 0..=1 {
977			for idx in 0..metadata.tickets_count[epoch_tag] {
978				if let Some(id) = TicketsIds::<T>::get((epoch_tag as u8, idx)) {
979					TicketsData::<T>::remove(id);
980				}
981			}
982		}
983
984		// Remove all unsorted tickets segments.
985		let segments_count = metadata.unsorted_tickets_count.div_ceil(SEGMENT_MAX_SIZE);
986		(0..segments_count).for_each(UnsortedSegments::<T>::remove);
987
988		// Reset sorted candidates
989		SortedCandidates::<T>::kill();
990
991		// Reset tickets metadata
992		TicketsMeta::<T>::kill();
993	}
994
995	/// Submit next epoch validator tickets via an unsigned extrinsic constructed with a call to
996	/// `submit_unsigned_transaction`.
997	///
998	/// The submitted tickets are added to the next epoch outstanding tickets as long as the
999	/// extrinsic is called within the first half of the epoch. Tickets received during the
1000	/// second half are dropped.
1001	pub fn submit_tickets_unsigned_extrinsic(tickets: Vec<TicketEnvelope>) -> bool {
1002		let tickets = BoundedVec::truncate_from(tickets);
1003		let call = Call::submit_tickets { tickets };
1004		let xt = T::create_bare(call.into());
1005		match SubmitTransaction::<T, Call<T>>::submit_transaction(xt) {
1006			Ok(_) => true,
1007			Err(e) => {
1008				error!(target: LOG_TARGET, "Error submitting tickets {:?}", e);
1009				false
1010			},
1011		}
1012	}
1013
1014	/// Epoch length
1015	pub fn epoch_length() -> u32 {
1016		T::EpochLength::get()
1017	}
1018}
1019
1020/// Trigger an epoch change, if any should take place.
1021pub trait EpochChangeTrigger {
1022	/// May trigger an epoch change, if any should take place.
1023	///
1024	/// Returns an optional `Weight` if epoch change has been triggered.
1025	///
1026	/// This should be called during every block, after initialization is done.
1027	fn trigger<T: Config>(_: BlockNumberFor<T>) -> Weight;
1028}
1029
1030/// An `EpochChangeTrigger` which does nothing.
1031///
1032/// In practice this means that the epoch change logic is left to some external component
1033/// (e.g. pallet-session).
1034pub struct EpochChangeExternalTrigger;
1035
1036impl EpochChangeTrigger for EpochChangeExternalTrigger {
1037	fn trigger<T: Config>(_: BlockNumberFor<T>) -> Weight {
1038		// nothing - trigger is external.
1039		Weight::zero()
1040	}
1041}
1042
1043/// An `EpochChangeTrigger` which recycle the same authorities set forever.
1044///
1045/// The internal trigger should only be used when no other module is responsible for
1046/// changing authority set.
1047pub struct EpochChangeInternalTrigger;
1048
1049impl EpochChangeTrigger for EpochChangeInternalTrigger {
1050	fn trigger<T: Config>(block_num: BlockNumberFor<T>) -> Weight {
1051		if Pallet::<T>::should_end_epoch(block_num) {
1052			let authorities = Pallet::<T>::next_authorities();
1053			let next_authorities = authorities.clone();
1054			let len = next_authorities.len() as u32;
1055			Pallet::<T>::enact_epoch_change(authorities, next_authorities);
1056			T::WeightInfo::enact_epoch_change(len, T::EpochLength::get())
1057		} else {
1058			Weight::zero()
1059		}
1060	}
1061}
1062
1063impl<T: Config> BoundToRuntimeAppPublic for Pallet<T> {
1064	type Public = AuthorityId;
1065}