referrerpolicy=no-referrer-when-downgrade

pallet_staking_async/
session_rotation.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! Manages all era rotation logic based on session increments.
19//!
20//! # Lifecycle:
21//!
22//! When a session ends in RC, a session report is sent to AH with the ending session index. Given
23//! there are 6 sessions per Era, and we configure the PlanningEraOffset to be 1, the following
24//! happens.
25//!
26//! ## Idle Sessions
27//! In the happy path, first 3 sessions are idle. Nothing much happens in these sessions.
28//!
29//!
30//! ## Planning New Era Session
31//! In the happy path, `planning new era` session is initiated when 3rd session ends and the 4th
32//! starts in the active era.
33//!
34//! **Triggers**
35//! 1. `SessionProgress == SessionsPerEra - PlanningEraOffset`
36//! 2. Forcing is set to `ForceNew` or `ForceAlways`
37//!
38//! **Actions**
39//! 1. Triggers the election process,
40//! 2. Updates the CurrentEra.
41//!
42//! **SkipIf**
43//! CurrentEra = ActiveEra + 1 // this implies planning session has already been triggered.
44//!
45//! **FollowUp**
46//! When the election process is over, we send the new validator set, with the CurrentEra index
47//! as the id of the validator set.
48//!
49//!
50//! ## Era Rotation Session
51//! In the happy path, this happens when the 5th session ends and the 6th starts in the active era.
52//!
53//! **Triggers**
54//! When we receive an activation timestamp from RC.
55//!
56//! **Assertions**
57//! 1. CurrentEra must be ActiveEra + 1.
58//! 2. Id of the activation timestamp same as CurrentEra.
59//!
60//! **Actions**
61//! - Finalize the currently active era.
62//! - Increment ActiveEra by 1.
63//! - Cleanup the old era information.
64//!
65//! **Exceptional Scenarios**
66//! - Delay in exporting validator set: Triggered in a session later than 7th.
67//! - Forcing Era: May triggered in a session earlier than 7th.
68//!
69//! ## Example Flow of a happy path
70//!
71//! * end 0, start 1, plan 2
72//! * end 1, start 2, plan 3
73//! * end 2, start 3, plan 4
74//! * end 3, start 4, plan 5 // `Plan new era` session. Current Era++. Trigger Election.
75//! * **** Somewhere here: Election set is sent to RC, keyed with Current Era
76//! * end 4, start 5, plan 6 // RC::session receives and queues this set.
77//! * end 5, start 6, plan 7 // Session report contains activation timestamp with Current Era.
78
79use crate::*;
80use alloc::{boxed::Box, vec::Vec};
81use frame_election_provider_support::{BoundedSupportsOf, ElectionProvider, PageIndex};
82use frame_support::{
83	pallet_prelude::*,
84	traits::{Defensive, DefensiveMax, DefensiveSaturating, OnUnbalanced, TryCollect},
85	weights::WeightMeter,
86};
87use pallet_staking_async_rc_client::RcClientInterface;
88use sp_runtime::{Perbill, Percent, Saturating};
89use sp_staking::{
90	currency_to_vote::CurrencyToVote, Exposure, Page, PagedExposureMetadata, SessionIndex,
91};
92
93/// A handler for all era-based storage items.
94///
95/// All of the following storage items must be controlled by this type:
96///
97/// [`ErasValidatorPrefs`]
98/// [`ClaimedRewards`]
99/// [`ErasStakersPaged`]
100/// [`ErasStakersOverview`]
101/// [`ErasValidatorReward`]
102/// [`ErasRewardPoints`]
103/// [`ErasTotalStake`]
104pub struct Eras<T: Config>(core::marker::PhantomData<T>);
105
106impl<T: Config> Eras<T> {
107	pub(crate) fn set_validator_prefs(era: EraIndex, stash: &T::AccountId, prefs: ValidatorPrefs) {
108		debug_assert_eq!(era, Rotator::<T>::planned_era(), "we only set prefs for planning era");
109		<ErasValidatorPrefs<T>>::insert(era, stash, prefs);
110	}
111
112	pub(crate) fn get_validator_prefs(era: EraIndex, stash: &T::AccountId) -> ValidatorPrefs {
113		<ErasValidatorPrefs<T>>::get(era, stash)
114	}
115
116	/// Returns validator commission for this era and page.
117	pub(crate) fn get_validator_commission(era: EraIndex, stash: &T::AccountId) -> Perbill {
118		Self::get_validator_prefs(era, stash).commission
119	}
120
121	/// Returns true if validator has one or more page of era rewards not claimed yet.
122	pub(crate) fn pending_rewards(era: EraIndex, validator: &T::AccountId) -> bool {
123		<ErasStakersOverview<T>>::get(&era, validator)
124			.map(|overview| {
125				ClaimedRewards::<T>::get(era, validator).len() < overview.page_count as usize
126			})
127			.unwrap_or(false)
128	}
129
130	/// Get exposure for a validator at a given era and page.
131	///
132	/// This is mainly used for rewards and slashing. Validator's self-stake is only returned in
133	/// page 0.
134	///
135	/// This builds a paged exposure from `PagedExposureMetadata` and `ExposurePage` of the
136	/// validator.
137	pub(crate) fn get_paged_exposure(
138		era: EraIndex,
139		validator: &T::AccountId,
140		page: Page,
141	) -> Option<PagedExposure<T::AccountId, BalanceOf<T>>> {
142		let overview = <ErasStakersOverview<T>>::get(&era, validator)?;
143
144		// validator stake is added only in page zero.
145		let validator_stake = if page == 0 { overview.own } else { Zero::zero() };
146
147		// since overview is present, paged exposure will always be present except when a
148		// validator has only own stake and no nominator stake.
149		let exposure_page = <ErasStakersPaged<T>>::get((era, validator, page)).unwrap_or_default();
150
151		// build the exposure
152		Some(PagedExposure {
153			exposure_metadata: PagedExposureMetadata { own: validator_stake, ..overview },
154			exposure_page: exposure_page.into(),
155		})
156	}
157
158	/// Get full exposure of the validator at a given era.
159	pub(crate) fn get_full_exposure(
160		era: EraIndex,
161		validator: &T::AccountId,
162	) -> Exposure<T::AccountId, BalanceOf<T>> {
163		let Some(overview) = <ErasStakersOverview<T>>::get(&era, validator) else {
164			return Exposure::default();
165		};
166
167		let mut others = Vec::with_capacity(overview.nominator_count as usize);
168		for page in 0..overview.page_count {
169			let nominators = <ErasStakersPaged<T>>::get((era, validator, page));
170			others.append(&mut nominators.map(|n| n.others.clone()).defensive_unwrap_or_default());
171		}
172
173		Exposure { total: overview.total, own: overview.own, others }
174	}
175
176	/// Returns the number of pages of exposure a validator has for the given era.
177	///
178	/// For eras where paged exposure does not exist, this returns 1 to keep backward compatibility.
179	pub(crate) fn exposure_page_count(era: EraIndex, validator: &T::AccountId) -> Page {
180		<ErasStakersOverview<T>>::get(&era, validator)
181			.map(|overview| {
182				if overview.page_count == 0 && overview.own > Zero::zero() {
183					// Even though there are no nominator pages, there is still validator's own
184					// stake exposed which needs to be paid out in a page.
185					1
186				} else {
187					overview.page_count
188				}
189			})
190			// Always returns 1 page for older non-paged exposure.
191			// FIXME: Can be cleaned up with issue #13034.
192			.unwrap_or(1)
193	}
194
195	/// Returns the next page that can be claimed or `None` if nothing to claim.
196	pub(crate) fn get_next_claimable_page(era: EraIndex, validator: &T::AccountId) -> Option<Page> {
197		// Find next claimable page of paged exposure.
198		let page_count = Self::exposure_page_count(era, validator);
199		let all_claimable_pages: Vec<Page> = (0..page_count).collect();
200		let claimed_pages = ClaimedRewards::<T>::get(era, validator);
201
202		all_claimable_pages.into_iter().find(|p| !claimed_pages.contains(p))
203	}
204
205	/// Creates an entry to track validator reward has been claimed for a given era and page.
206	/// Noop if already claimed.
207	pub(crate) fn set_rewards_as_claimed(era: EraIndex, validator: &T::AccountId, page: Page) {
208		let mut claimed_pages = ClaimedRewards::<T>::get(era, validator).into_inner();
209
210		// this should never be called if the reward has already been claimed
211		if claimed_pages.contains(&page) {
212			defensive!("Trying to set an already claimed reward");
213			// nevertheless don't do anything since the page already exist in claimed rewards.
214			return
215		}
216
217		// add page to claimed entries
218		claimed_pages.push(page);
219		ClaimedRewards::<T>::insert(
220			era,
221			validator,
222			WeakBoundedVec::<_, _>::force_from(claimed_pages, Some("set_rewards_as_claimed")),
223		);
224	}
225
226	/// Store exposure for elected validators at start of an era.
227	///
228	/// If the exposure does not exist yet for the tuple (era, validator), it sets it. Otherwise,
229	/// it updates the existing record by ensuring *intermediate* exposure pages are filled up with
230	/// `T::MaxExposurePageSize` number of backers per page and the remaining exposures are added
231	/// to new exposure pages.
232	pub fn upsert_exposure(
233		era: EraIndex,
234		validator: &T::AccountId,
235		mut exposure: Exposure<T::AccountId, BalanceOf<T>>,
236	) {
237		let page_size = T::MaxExposurePageSize::get().defensive_max(1);
238		if cfg!(debug_assertions) && cfg!(not(feature = "runtime-benchmarks")) {
239			// sanitize the exposure in case some test data from this pallet is wrong.
240			// ignore benchmarks as other pallets might do weird things.
241			let expected_total = exposure
242				.others
243				.iter()
244				.map(|ie| ie.value)
245				.fold::<BalanceOf<T>, _>(Default::default(), |acc, x| acc + x)
246				.saturating_add(exposure.own);
247			debug_assert_eq!(expected_total, exposure.total, "exposure total must equal own + sum(others) for (era: {:?}, validator: {:?}, exposure: {:?})", era, validator, exposure);
248		}
249
250		if let Some(overview) = ErasStakersOverview::<T>::get(era, &validator) {
251			// collect some info from the un-touched overview for later use.
252			let last_page_idx = overview.page_count.saturating_sub(1);
253			let mut last_page =
254				ErasStakersPaged::<T>::get((era, validator, last_page_idx)).unwrap_or_default();
255			let last_page_empty_slots =
256				T::MaxExposurePageSize::get().saturating_sub(last_page.others.len() as u32);
257
258			// update nominator-count, page-count, and total stake in overview (done in
259			// `update_with`).
260			let new_stake_added = exposure.total;
261			let new_nominators_added = exposure.others.len() as u32;
262			let mut updated_overview = overview
263				.update_with::<T::MaxExposurePageSize>(new_stake_added, new_nominators_added);
264
265			// update own stake, if applicable.
266			match (updated_overview.own.is_zero(), exposure.own.is_zero()) {
267				(true, false) => {
268					// first time we see own exposure -- good.
269					// note: `total` is already updated above.
270					updated_overview.own = exposure.own;
271				},
272				(true, true) | (false, true) => {
273					// no new own exposure is added, nothing to do
274				},
275				(false, false) => {
276					debug_assert!(
277						false,
278						"validator own stake already set in overview for (era: {:?}, validator: {:?}, current overview: {:?}, new exposure: {:?})",
279						era,
280						validator,
281						updated_overview,
282						exposure,
283					);
284					defensive!("duplicate validator self stake in election");
285				},
286			};
287
288			ErasStakersOverview::<T>::insert(era, &validator, updated_overview);
289			// we are done updating the overview now, `updated_overview` should not be used anymore.
290			// We've updated:
291			// * nominator count
292			// * total stake
293			// * own stake (if applicable)
294			// * page count
295			//
296			// next step:
297			// * new-keys or updates in `ErasStakersPaged`
298			//
299			// we don't need the information about own stake anymore -- drop it.
300			exposure.total = exposure.total.saturating_sub(exposure.own);
301			exposure.own = Zero::zero();
302
303			// splits the exposure so that `append_to_last_page` will fit within the last exposure
304			// page, up to the max exposure page size. The remaining individual exposures in
305			// `put_in_new_pages` will be added to new pages.
306			let append_to_last_page = exposure.split_others(last_page_empty_slots);
307			let put_in_new_pages = exposure;
308
309			// handle last page first.
310
311			// fill up last page with exposures.
312			last_page.page_total = last_page.page_total.saturating_add(append_to_last_page.total);
313			last_page.others.extend(append_to_last_page.others);
314			ErasStakersPaged::<T>::insert((era, &validator, last_page_idx), last_page);
315
316			// now handle the remaining exposures and append the exposure pages. The metadata update
317			// has been already handled above.
318			let (_unused_metadata, put_in_new_pages_chunks) =
319				put_in_new_pages.into_pages(page_size);
320
321			put_in_new_pages_chunks
322				.into_iter()
323				.enumerate()
324				.for_each(|(idx, paged_exposure)| {
325					let append_at =
326						(last_page_idx.saturating_add(1).saturating_add(idx as u32)) as Page;
327					<ErasStakersPaged<T>>::insert((era, &validator, append_at), paged_exposure);
328				});
329		} else {
330			// expected page count is the number of nominators divided by the page size, rounded up.
331			let expected_page_count = exposure
332				.others
333				.len()
334				.defensive_saturating_add((page_size as usize).defensive_saturating_sub(1))
335				.saturating_div(page_size as usize);
336
337			// no exposures yet for this (era, validator) tuple, calculate paged exposure pages and
338			// metadata from a blank slate.
339			let (exposure_metadata, exposure_pages) = exposure.into_pages(page_size);
340			defensive_assert!(exposure_pages.len() == expected_page_count, "unexpected page count");
341
342			// insert metadata.
343			ErasStakersOverview::<T>::insert(era, &validator, exposure_metadata);
344
345			// insert validator's overview.
346			exposure_pages.into_iter().enumerate().for_each(|(idx, paged_exposure)| {
347				let append_at = idx as Page;
348				<ErasStakersPaged<T>>::insert((era, &validator, append_at), paged_exposure);
349			});
350		};
351	}
352
353	pub(crate) fn set_validators_reward(era: EraIndex, amount: BalanceOf<T>) {
354		ErasValidatorReward::<T>::insert(era, amount);
355	}
356
357	pub(crate) fn get_validators_reward(era: EraIndex) -> Option<BalanceOf<T>> {
358		ErasValidatorReward::<T>::get(era)
359	}
360
361	/// Update the total exposure for all the elected validators in the era.
362	pub(crate) fn add_total_stake(era: EraIndex, stake: BalanceOf<T>) {
363		<ErasTotalStake<T>>::mutate(era, |total_stake| {
364			*total_stake += stake;
365		});
366	}
367
368	/// Check if the rewards for the given era and page index have been claimed.
369	pub(crate) fn is_rewards_claimed(era: EraIndex, validator: &T::AccountId, page: Page) -> bool {
370		ClaimedRewards::<T>::get(era, validator).contains(&page)
371	}
372
373	/// Add reward points to validators using their stash account ID.
374	pub(crate) fn reward_active_era(
375		validators_points: impl IntoIterator<Item = (T::AccountId, u32)>,
376	) {
377		if let Some(active_era) = ActiveEra::<T>::get() {
378			<ErasRewardPoints<T>>::mutate(active_era.index, |era_rewards| {
379				for (validator, points) in validators_points.into_iter() {
380					match era_rewards.individual.get_mut(&validator) {
381						Some(individual) => individual.saturating_accrue(points),
382						None => {
383							// not much we can do -- validators should always be less than
384							// `MaxValidatorSet`.
385							let _ =
386								era_rewards.individual.try_insert(validator, points).defensive();
387						},
388					}
389					era_rewards.total.saturating_accrue(points);
390				}
391			});
392		}
393	}
394
395	pub(crate) fn get_reward_points(era: EraIndex) -> EraRewardPoints<T> {
396		ErasRewardPoints::<T>::get(era)
397	}
398}
399
400#[cfg(any(feature = "try-runtime", test, feature = "runtime-benchmarks"))]
401#[allow(unused)]
402impl<T: Config> Eras<T> {
403	/// Ensure the given era's data is fully present (all storage intact and not being pruned).
404	pub(crate) fn era_fully_present(era: EraIndex) -> Result<(), sp_runtime::TryRuntimeError> {
405		// these two are only set if we have some validators in an era.
406		let e0 = ErasValidatorPrefs::<T>::iter_prefix_values(era).count() != 0;
407		// note: we don't check `ErasStakersPaged` as a validator can have no backers.
408		let e1 = ErasStakersOverview::<T>::iter_prefix_values(era).count() != 0;
409		ensure!(e0 == e1, "ErasValidatorPrefs and ErasStakersOverview should be consistent");
410
411		// these two must always be set
412		let e2 = ErasTotalStake::<T>::contains_key(era);
413
414		let active_era = Rotator::<T>::active_era();
415		let e4 = if era.saturating_sub(1) > 0 &&
416			era.saturating_sub(1) > active_era.saturating_sub(T::HistoryDepth::get() + 1)
417		{
418			// `ErasValidatorReward` is set at active era n for era n-1, and is not set for era 0 in
419			// our tests. Moreover, it cannot be checked for presence in the oldest present era
420			// (`active_era.saturating_sub(1)`)
421			ErasValidatorReward::<T>::contains_key(era.saturating_sub(1))
422		} else {
423			// ignore
424			e2
425		};
426
427		ensure!(e2 == e4, "era info presence not consistent");
428
429		if e2 {
430			Ok(())
431		} else {
432			Err("era presence mismatch".into())
433		}
434	}
435
436	/// Check if the given era is currently being pruned.
437	pub(crate) fn era_pruning_in_progress(era: EraIndex) -> bool {
438		EraPruningState::<T>::contains_key(era)
439	}
440
441	/// Ensure the given era is either absent or currently being pruned.
442	pub(crate) fn era_absent_or_pruning(era: EraIndex) -> Result<(), sp_runtime::TryRuntimeError> {
443		if Self::era_pruning_in_progress(era) {
444			Ok(())
445		} else {
446			Self::era_absent(era)
447		}
448	}
449
450	/// Ensure the given era has indeed been already pruned. This is called by the main pallet in
451	/// do_prune_era_step.
452	pub(crate) fn era_absent(era: EraIndex) -> Result<(), sp_runtime::TryRuntimeError> {
453		// check double+ maps
454		let e0 = ErasValidatorPrefs::<T>::iter_prefix_values(era).count() != 0;
455		let e1 = ErasStakersPaged::<T>::iter_prefix_values((era,)).count() != 0;
456		let e2 = ErasStakersOverview::<T>::iter_prefix_values(era).count() != 0;
457
458		// check maps
459		// `ErasValidatorReward` is set at active era n for era n-1
460		let e3 = ErasValidatorReward::<T>::contains_key(era);
461		let e4 = ErasTotalStake::<T>::contains_key(era);
462
463		// these two are only populated conditionally, so we only check them for lack of existence
464		let e6 = ClaimedRewards::<T>::iter_prefix_values(era).count() != 0;
465		let e7 = ErasRewardPoints::<T>::contains_key(era);
466
467		// Check if era info is consistent - if not, era is in partial pruning state
468		if !vec![e0, e1, e2, e3, e4, e6, e7].windows(2).all(|w| w[0] == w[1]) {
469			return Err("era info absence not consistent - partial pruning state".into());
470		}
471
472		if !e0 {
473			Ok(())
474		} else {
475			Err("era absence mismatch".into())
476		}
477	}
478
479	pub(crate) fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> {
480		// pruning window works.
481		let active_era = Rotator::<T>::active_era();
482		// we max with 1 as in active era 0 we don't do an election and therefore we don't have some
483		// of the maps populated.
484		let oldest_present_era = active_era.saturating_sub(T::HistoryDepth::get()).max(1);
485
486		for e in oldest_present_era..=active_era {
487			Self::era_fully_present(e)?
488		}
489
490		// Ensure all eras older than oldest_present_era are either fully pruned or marked for
491		// pruning
492		ensure!(
493			(1..oldest_present_era).all(|e| Self::era_absent_or_pruning(e).is_ok()),
494			"All old eras must be either fully pruned or marked for pruning"
495		);
496
497		Ok(())
498	}
499}
500
501/// Manages session rotation logic.
502///
503/// This controls the following storage items in FULL, meaning that they should not be accessed
504/// directly from anywhere else in this pallet:
505///
506/// * `CurrentEra`: The current planning era
507/// * `ActiveEra`: The current active era
508/// * `BondedEras`: the list of ACTIVE eras and their session index
509pub struct Rotator<T: Config>(core::marker::PhantomData<T>);
510
511impl<T: Config> Rotator<T> {
512	#[cfg(feature = "runtime-benchmarks")]
513	pub(crate) fn legacy_insta_plan_era() -> Vec<T::AccountId> {
514		// Plan the era,
515		Self::plan_new_era();
516		// signal that we are about to call into elect asap.
517		<<T as Config>::ElectionProvider as ElectionProvider>::asap();
518		// immediately call into the election provider to fetch and process the results. We assume
519		// we are using an instant, onchain election here.
520		let msp = <T::ElectionProvider as ElectionProvider>::msp();
521		let lsp = 0;
522		for p in (lsp..=msp).rev() {
523			EraElectionPlanner::<T>::do_elect_paged(p);
524		}
525
526		crate::ElectableStashes::<T>::take().into_iter().collect()
527	}
528
529	#[cfg(any(feature = "try-runtime", test))]
530	pub(crate) fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> {
531		// Check planned era vs active era relationship
532		let active_era = ActiveEra::<T>::get();
533		let planned_era = CurrentEra::<T>::get();
534
535		let bonded = BondedEras::<T>::get();
536
537		match (&active_era, &planned_era) {
538			(None, None) => {
539				// Uninitialized state - both should be None
540				ensure!(bonded.is_empty(), "BondedEras must be empty when ActiveEra is None");
541			},
542			(Some(active), Some(planned)) => {
543				// Normal state - planned can be at most one more than active
544				ensure!(
545					*planned == active.index || *planned == active.index + 1,
546					"planned era is always equal or one more than active"
547				);
548
549				// If we have an active era, bonded eras must always be the range
550				// [active - bonding_duration .. active_era]
551				ensure!(
552					bonded.into_iter().map(|(era, _sess)| era).collect::<Vec<_>>() ==
553						(active.index.saturating_sub(T::BondingDuration::get())..=active.index)
554							.collect::<Vec<_>>(),
555					"BondedEras range incorrect"
556				);
557			},
558			_ => {
559				ensure!(false, "ActiveEra and CurrentEra must both be None or both be Some");
560			},
561		}
562
563		Ok(())
564	}
565
566	#[cfg(any(feature = "try-runtime", feature = "std", feature = "runtime-benchmarks", test))]
567	pub fn assert_election_ongoing() {
568		assert!(Self::is_planning().is_some(), "planning era must exist");
569		assert!(
570			T::ElectionProvider::status().is_ok(),
571			"Election provider must be in a good state during election"
572		);
573	}
574
575	/// Latest era that was planned.
576	///
577	/// The returned value does not necessarily indicate that planning for the era with this index
578	/// is underway, but rather the last era that was planned. If `Self::active_era()` is equal to
579	/// this value, it means that the era is currently active and no new era is planned.
580	///
581	/// See [`Self::is_planning()`] to only get the next index if planning in progress.
582	pub fn planned_era() -> EraIndex {
583		CurrentEra::<T>::get().unwrap_or(0)
584	}
585
586	pub fn active_era() -> EraIndex {
587		ActiveEra::<T>::get().map(|a| a.index).defensive_unwrap_or(0)
588	}
589
590	/// Next era that is planned to be started.
591	///
592	/// Returns None if no era is planned.
593	pub fn is_planning() -> Option<EraIndex> {
594		let (active, planned) = (Self::active_era(), Self::planned_era());
595		if planned.defensive_saturating_sub(active) > 1 {
596			defensive!("planned era must always be equal or one more than active");
597		}
598
599		(planned > active).then_some(planned)
600	}
601
602	/// End the session and start the next one.
603	pub(crate) fn end_session(
604		end_index: SessionIndex,
605		activation_timestamp: Option<(u64, u32)>,
606	) -> Weight {
607		// baseline weight for processing the relay chain session report
608		let weight = T::WeightInfo::rc_on_session_report();
609
610		let Some(active_era) = ActiveEra::<T>::get() else {
611			defensive!("Active era must always be available.");
612			return weight;
613		};
614		let current_planned_era = Self::is_planning();
615		let starting = end_index + 1;
616		// the session after the starting session.
617		let planning = starting + 1;
618
619		log!(
620			info,
621			"Session: end {:?}, start {:?} (ts: {:?}), planning {:?}",
622			end_index,
623			starting,
624			activation_timestamp,
625			planning
626		);
627		log!(info, "Era: active {:?}, planned {:?}", active_era.index, current_planned_era);
628
629		match activation_timestamp {
630			Some((time, id)) if Some(id) == current_planned_era => {
631				// We rotate the era if we have the activation timestamp.
632				Self::start_era(active_era, starting, time);
633			},
634			Some((_time, id)) => {
635				// RC has done something wrong -- we received the wrong ID. Don't start a new era.
636				crate::log!(
637					warn,
638					"received wrong ID with activation timestamp. Got {}, expected {:?}",
639					id,
640					current_planned_era
641				);
642				Pallet::<T>::deposit_event(Event::Unexpected(
643					UnexpectedKind::UnknownValidatorActivation,
644				));
645			},
646			None => (),
647		}
648
649		// check if we should plan new era.
650		let should_plan_era = match ForceEra::<T>::get() {
651			// see if it's good time to plan a new era.
652			Forcing::NotForcing => Self::is_plan_era_deadline(starting),
653			// Force plan new era only once.
654			Forcing::ForceNew => {
655				ForceEra::<T>::put(Forcing::NotForcing);
656				true
657			},
658			// always plan the new era.
659			Forcing::ForceAlways => true,
660			// never force.
661			Forcing::ForceNone => false,
662		};
663
664		// Note: we call `planning_era` again, as a new era might have started since we checked
665		// it last.
666		let has_pending_era = Self::is_planning().is_some();
667		match (should_plan_era, has_pending_era) {
668			(false, _) => {
669				// nothing to consider
670			},
671			(true, false) => {
672				// happy path
673				Self::plan_new_era();
674			},
675			(true, true) => {
676				// we are waiting for to start the previously planned era, we cannot plan a new era
677				// now.
678				crate::log!(
679					debug,
680					"time to plan a new era {:?}, but waiting for the activation of the previous.",
681					current_planned_era
682				);
683			},
684		}
685
686		Pallet::<T>::deposit_event(Event::SessionRotated {
687			starting_session: starting,
688			active_era: Self::active_era(),
689			planned_era: Self::planned_era(),
690		});
691
692		weight
693	}
694
695	pub(crate) fn start_era(
696		ending_era: ActiveEraInfo,
697		starting_session: SessionIndex,
698		new_era_start_timestamp: u64,
699	) {
700		// verify that a new era was planned
701		debug_assert!(CurrentEra::<T>::get().unwrap_or(0) == ending_era.index + 1);
702
703		let starting_era = ending_era.index + 1;
704
705		// finalize the ending era.
706		Self::end_era(&ending_era, new_era_start_timestamp);
707
708		// start the next era.
709		Self::start_era_inc_active_era(new_era_start_timestamp);
710		Self::start_era_update_bonded_eras(starting_era, starting_session);
711
712		// cleanup election state
713		EraElectionPlanner::<T>::cleanup();
714
715		// Mark ancient era for lazy pruning instead of immediately pruning it.
716		if let Some(old_era) = starting_era.checked_sub(T::HistoryDepth::get() + 1) {
717			log!(debug, "Marking era {:?} for lazy pruning", old_era);
718			EraPruningState::<T>::insert(old_era, PruningStep::ErasStakersPaged);
719		}
720	}
721
722	fn start_era_inc_active_era(start_timestamp: u64) {
723		ActiveEra::<T>::mutate(|active_era| {
724			let new_index = active_era.as_ref().map(|info| info.index + 1).unwrap_or(0);
725			log!(
726				debug,
727				"starting active era {:?} with RC-provided timestamp {:?}",
728				new_index,
729				start_timestamp
730			);
731			*active_era = Some(ActiveEraInfo { index: new_index, start: Some(start_timestamp) });
732		});
733	}
734
735	/// The session index of the current active era.
736	///
737	/// This must always exist in the `BondedEras` storage item, ergo the function is infallible.
738	pub fn active_era_start_session_index() -> SessionIndex {
739		Self::era_start_session_index(Self::active_era()).defensive_unwrap_or(0)
740	}
741
742	/// The session index of a given era.
743	pub fn era_start_session_index(era: EraIndex) -> Option<SessionIndex> {
744		BondedEras::<T>::get()
745			.into_iter()
746			.rev()
747			.find_map(|(e, s)| if e == era { Some(s) } else { None })
748	}
749
750	fn start_era_update_bonded_eras(starting_era: EraIndex, start_session: SessionIndex) {
751		let bonding_duration = T::BondingDuration::get();
752
753		BondedEras::<T>::mutate(|bonded| {
754			if bonded.is_full() {
755				// remove oldest
756				let (era_removed, _) = bonded.remove(0);
757				debug_assert!(
758					era_removed <= (starting_era.saturating_sub(bonding_duration)),
759					"should not delete an era that is not older than bonding duration"
760				);
761				slashing::clear_era_metadata::<T>(era_removed);
762			}
763
764			// must work -- we were not full, or just removed the oldest era.
765			let _ = bonded.try_push((starting_era, start_session)).defensive();
766		});
767	}
768
769	fn end_era(ending_era: &ActiveEraInfo, new_era_start: u64) {
770		let previous_era_start = ending_era.start.defensive_unwrap_or(new_era_start);
771		let uncapped_era_duration = new_era_start.saturating_sub(previous_era_start);
772
773		// maybe cap the era duration to the maximum allowed by the runtime.
774		let cap = T::MaxEraDuration::get();
775		let era_duration = if cap == 0 {
776			// if the cap is zero (not set), we don't cap the era duration.
777			uncapped_era_duration
778		} else if uncapped_era_duration > cap {
779			Pallet::<T>::deposit_event(Event::Unexpected(UnexpectedKind::EraDurationBoundExceeded));
780
781			// if the cap is set, and era duration exceeds the cap, we cap the era duration to the
782			// maximum allowed.
783			log!(
784				warn,
785				"capping era duration for era {:?} from {:?} to max allowed {:?}",
786				ending_era.index,
787				uncapped_era_duration,
788				cap
789			);
790			cap
791		} else {
792			uncapped_era_duration
793		};
794
795		Self::end_era_compute_payout(ending_era, era_duration);
796	}
797
798	fn end_era_compute_payout(ending_era: &ActiveEraInfo, era_duration: u64) {
799		let staked = ErasTotalStake::<T>::get(ending_era.index);
800		let issuance = asset::total_issuance::<T>();
801
802		log!(
803			debug,
804			"computing inflation for era {:?} with duration {:?}",
805			ending_era.index,
806			era_duration
807		);
808		let (validator_payout, remainder) =
809			T::EraPayout::era_payout(staked, issuance, era_duration);
810
811		let total_payout = validator_payout.saturating_add(remainder);
812		let max_staked_rewards = MaxStakedRewards::<T>::get().unwrap_or(Percent::from_percent(100));
813
814		// apply cap to validators payout and add difference to remainder.
815		let validator_payout = validator_payout.min(max_staked_rewards * total_payout);
816		let remainder = total_payout.saturating_sub(validator_payout);
817
818		Pallet::<T>::deposit_event(Event::<T>::EraPaid {
819			era_index: ending_era.index,
820			validator_payout,
821			remainder,
822		});
823
824		// Set ending era reward.
825		Eras::<T>::set_validators_reward(ending_era.index, validator_payout);
826		T::RewardRemainder::on_unbalanced(asset::issue::<T>(remainder));
827	}
828
829	/// Plans a new era by kicking off the election process.
830	///
831	/// The newly planned era is targeted to activate in the next session.
832	fn plan_new_era() {
833		let _ = CurrentEra::<T>::try_mutate(|x| {
834			log!(info, "Planning new era: {:?}, sending election start signal", x.unwrap_or(0));
835			let could_start_election = EraElectionPlanner::<T>::plan_new_election();
836			*x = Some(x.unwrap_or(0) + 1);
837			could_start_election
838		});
839	}
840
841	/// Returns whether we are at the session where we should plan the new era.
842	fn is_plan_era_deadline(start_session: SessionIndex) -> bool {
843		let planning_era_offset = T::PlanningEraOffset::get().min(T::SessionsPerEra::get());
844		// session at which we should plan the new era.
845		let target_plan_era_session = T::SessionsPerEra::get().saturating_sub(planning_era_offset);
846		let era_start_session = Self::active_era_start_session_index();
847
848		// progress of the active era in sessions.
849		let session_progress = start_session.defensive_saturating_sub(era_start_session);
850
851		log!(
852			debug,
853			"Session progress within era: {:?}, target_plan_era_session: {:?}",
854			session_progress,
855			target_plan_era_session
856		);
857		session_progress >= target_plan_era_session
858	}
859}
860
861/// Manager type which collects the election results from [`Config::ElectionProvider`] and
862/// finalizes the planning of a new era.
863///
864/// This type managed 3 storage items:
865///
866/// * [`crate::VoterSnapshotStatus`]
867/// * [`crate::NextElectionPage`]
868/// * [`crate::ElectableStashes`]
869///
870/// A new election is fetched over multiple pages, and finalized upon fetching the last page.
871///
872/// * The intermediate state of fetching the election result is kept in [`NextElectionPage`]. If
873///   `Some(_)` something is ongoing, otherwise not.
874/// * We fully trust [`Config::ElectionProvider`] to give us a full set of validators, with enough
875///   backing after all calls to `maybe_fetch_election_results` are done. Note that older versions
876///   of this pallet had a `MinimumValidatorCount` to double-check this, but we don't check it
877///   anymore.
878/// * `maybe_fetch_election_results` returns no weight. Its weight should be taken account in the
879///   e2e benchmarking of the [`Config::ElectionProvider`].
880///
881/// TODOs:
882///
883/// * Add a try-state check based on the 3 storage items
884/// * Move snapshot creation functions here as well.
885pub(crate) struct EraElectionPlanner<T: Config>(PhantomData<T>);
886impl<T: Config> EraElectionPlanner<T> {
887	/// Cleanup all associated storage items.
888	pub(crate) fn cleanup() {
889		VoterSnapshotStatus::<T>::kill();
890		NextElectionPage::<T>::kill();
891		ElectableStashes::<T>::kill();
892		Pallet::<T>::register_weight(T::DbWeight::get().writes(3));
893	}
894
895	/// Fetches the number of pages configured by the election provider.
896	pub(crate) fn election_pages() -> u32 {
897		<<T as Config>::ElectionProvider as ElectionProvider>::Pages::get()
898	}
899
900	/// Plan a new election
901	pub(crate) fn plan_new_election() -> Result<(), <T::ElectionProvider as ElectionProvider>::Error>
902	{
903		T::ElectionProvider::start()
904			.inspect_err(|e| log!(warn, "Election provider failed to start: {:?}", e))
905	}
906
907	pub(crate) fn maybe_fetch_election_results() -> (Weight, Box<dyn Fn(&mut WeightMeter)>) {
908		let Ok(Some(mut required_weight)) = T::ElectionProvider::status() else {
909			// no election ongoing
910			let weight = T::DbWeight::get().reads(1);
911			return (weight, Box::new(move |meter: &mut WeightMeter| meter.consume(weight)))
912		};
913		let exec = Box::new(move |meter: &mut WeightMeter| {
914			crate::log!(
915				debug,
916				"Election provider is ready, our status is {:?}",
917				NextElectionPage::<T>::get()
918			);
919
920			debug_assert!(
921				CurrentEra::<T>::get().unwrap_or(0) ==
922					ActiveEra::<T>::get().map_or(0, |a| a.index) + 1,
923				"Next era must be already planned."
924			);
925
926			let current_page = NextElectionPage::<T>::get()
927				.unwrap_or(Self::election_pages().defensive_saturating_sub(1));
928			let maybe_next_page = current_page.checked_sub(1);
929			crate::log!(debug, "fetching page {:?}, next {:?}", current_page, maybe_next_page);
930
931			Self::do_elect_paged(current_page);
932			NextElectionPage::<T>::set(maybe_next_page);
933
934			if maybe_next_page.is_none() {
935				let id = CurrentEra::<T>::get().defensive_unwrap_or(0);
936				let prune_up_to = Self::get_prune_up_to();
937				let rc_validators = ElectableStashes::<T>::take().into_iter().collect::<Vec<_>>();
938
939				crate::log!(
940					info,
941					"Sending new validator set of size {:?} to RC. ID: {:?}, prune_up_to: {:?}",
942					rc_validators.len(),
943					id,
944					prune_up_to
945				);
946				T::RcClientInterface::validator_set(rc_validators, id, prune_up_to);
947			}
948
949			// consume the reported worst case weight.
950			meter.consume(required_weight)
951		});
952
953		// Add a few things to the required weights that are not captured in `do_elect_paged`, which
954		// is benchmarked via `fetch_page`.
955		// * 1 extra read and write for `NextElectionPage`
956		// * 1 extra write for `RcClientInterface::validator_set` (implementation leak -- we assume
957		//   that we know this writes one storage item under the hood)
958		// * 1 extra read for `CurrentEra`
959		// * 1 extra read for `BondedEras` in `get_prune_up_to`
960		// ElectableStashes already read in `do_elect_paged`
961		required_weight.saturating_accrue(T::DbWeight::get().reads_writes(3, 2));
962
963		(required_weight, exec)
964	}
965
966	/// Get the right value of the first session that needs to be pruned on the RC's historical
967	/// session pallet.
968	fn get_prune_up_to() -> Option<SessionIndex> {
969		let bonded_eras = BondedEras::<T>::get();
970
971		// get the first session of the oldest era in the bonded eras.
972		if bonded_eras.is_full() {
973			bonded_eras.first().map(|(_, first_session)| first_session.saturating_sub(1))
974		} else {
975			None
976		}
977	}
978
979	/// Paginated elect.
980	///
981	/// Fetches the election page with index `page` from the election provider.
982	///
983	/// The results from the elect call should be stored in the `ElectableStashes` storage. In
984	/// addition, it stores stakers' information for next planned era based on the paged
985	/// solution data returned.
986	///
987	/// If any new election winner does not fit in the electable stashes storage, it truncates
988	/// the result of the election. We ensure that only the winners that are part of the
989	/// electable stashes have exposures collected for the next era.
990	pub(crate) fn do_elect_paged(page: PageIndex) {
991		let election_result = T::ElectionProvider::elect(page);
992		match election_result {
993			Ok(supports) => {
994				let inner_processing_results = Self::do_elect_paged_inner(supports);
995				if let Err(not_included) = inner_processing_results {
996					defensive!(
997						"electable stashes exceeded limit, unexpected but election proceeds.\
998                		{} stashes from election result discarded",
999						not_included
1000					);
1001				};
1002
1003				Pallet::<T>::deposit_event(Event::PagedElectionProceeded {
1004					page,
1005					result: inner_processing_results.map(|x| x as u32).map_err(|x| x as u32),
1006				});
1007			},
1008			Err(e) => {
1009				log!(warn, "election provider page failed due to {:?} (page: {})", e, page);
1010				Pallet::<T>::deposit_event(Event::PagedElectionProceeded { page, result: Err(0) });
1011			},
1012		}
1013	}
1014
1015	/// Inner implementation of [`Self::do_elect_paged`].
1016	///
1017	/// Returns an error if adding election winners to the electable stashes storage fails due
1018	/// to exceeded bounds. In case of error, it returns the index of the first stash that
1019	/// failed to be included.
1020	pub(crate) fn do_elect_paged_inner(
1021		mut supports: BoundedSupportsOf<T::ElectionProvider>,
1022	) -> Result<usize, usize> {
1023		let planning_era = Rotator::<T>::planned_era();
1024
1025		match Self::add_electables(supports.iter().map(|(s, _)| s.clone())) {
1026			Ok(added) => {
1027				let exposures = Self::collect_exposures(supports);
1028				let _ = Self::store_stakers_info(exposures, planning_era);
1029				Ok(added)
1030			},
1031			Err(not_included_idx) => {
1032				let not_included = supports.len().saturating_sub(not_included_idx);
1033
1034				log!(
1035					warn,
1036					"not all winners fit within the electable stashes, excluding {:?} accounts from solution.",
1037					not_included,
1038				);
1039
1040				// filter out supports of stashes that do not fit within the electable stashes
1041				// storage bounds to prevent collecting their exposures.
1042				supports.truncate(not_included_idx);
1043				let exposures = Self::collect_exposures(supports);
1044				let _ = Self::store_stakers_info(exposures, planning_era);
1045
1046				Err(not_included)
1047			},
1048		}
1049	}
1050
1051	/// Process the output of a paged election.
1052	///
1053	/// Store staking information for the new planned era of a single election page.
1054	pub(crate) fn store_stakers_info(
1055		exposures: BoundedExposuresOf<T>,
1056		new_planned_era: EraIndex,
1057	) -> BoundedVec<T::AccountId, MaxWinnersPerPageOf<T::ElectionProvider>> {
1058		// populate elected stash, stakers, exposures, and the snapshot of validator prefs.
1059		let mut total_stake_page: BalanceOf<T> = Zero::zero();
1060		let mut elected_stashes_page = Vec::with_capacity(exposures.len());
1061		let mut total_backers = 0u32;
1062
1063		exposures.into_iter().for_each(|(stash, exposure)| {
1064			log!(
1065				trace,
1066				"storing exposure for stash {:?} with {:?} own-stake and {:?} backers",
1067				stash,
1068				exposure.own,
1069				exposure.others.len()
1070			);
1071			// build elected stash.
1072			elected_stashes_page.push(stash.clone());
1073			// accumulate total stake and backer count for bookkeeping.
1074			total_stake_page = total_stake_page.saturating_add(exposure.total);
1075			total_backers += exposure.others.len() as u32;
1076			// set or update staker exposure for this era.
1077			Eras::<T>::upsert_exposure(new_planned_era, &stash, exposure);
1078		});
1079
1080		let elected_stashes: BoundedVec<_, MaxWinnersPerPageOf<T::ElectionProvider>> =
1081			elected_stashes_page
1082				.try_into()
1083				.expect("both types are bounded by MaxWinnersPerPageOf; qed");
1084
1085		// adds to total stake in this era.
1086		Eras::<T>::add_total_stake(new_planned_era, total_stake_page);
1087
1088		// collect or update the pref of all winners.
1089		// TODO: rather inefficient, we can do this once at the last page across all entries in
1090		// `ElectableStashes`.
1091		for stash in &elected_stashes {
1092			let pref = Validators::<T>::get(stash);
1093			Eras::<T>::set_validator_prefs(new_planned_era, stash, pref);
1094		}
1095
1096		log!(
1097			debug,
1098			"stored a page of stakers with {:?} validators and {:?} total backers for era {:?}",
1099			elected_stashes.len(),
1100			total_backers,
1101			new_planned_era,
1102		);
1103
1104		elected_stashes
1105	}
1106
1107	/// Consume a set of [`BoundedSupports`] from [`sp_npos_elections`] and collect them into a
1108	/// [`Exposure`].
1109	///
1110	/// Returns vec of all the exposures of a validator in `paged_supports`, bounded by the
1111	/// number of max winners per page returned by the election provider.
1112	fn collect_exposures(
1113		supports: BoundedSupportsOf<T::ElectionProvider>,
1114	) -> BoundedExposuresOf<T> {
1115		let total_issuance = asset::total_issuance::<T>();
1116		let to_currency = |e: frame_election_provider_support::ExtendedBalance| {
1117			T::CurrencyToVote::to_currency(e, total_issuance)
1118		};
1119
1120		supports
1121			.into_iter()
1122			.map(|(validator, support)| {
1123				// Build `struct exposure` from `support`.
1124				let mut others = Vec::with_capacity(support.voters.len());
1125				let mut own: BalanceOf<T> = Zero::zero();
1126				let mut total: BalanceOf<T> = Zero::zero();
1127				support
1128					.voters
1129					.into_iter()
1130					.map(|(nominator, weight)| (nominator, to_currency(weight)))
1131					.for_each(|(nominator, stake)| {
1132						if nominator == validator {
1133							defensive_assert!(own == Zero::zero(), "own stake should be unique");
1134							own = own.saturating_add(stake);
1135						} else {
1136							others.push(IndividualExposure { who: nominator, value: stake });
1137						}
1138						total = total.saturating_add(stake);
1139					});
1140
1141				let exposure = Exposure { own, others, total };
1142				(validator, exposure)
1143			})
1144			.try_collect()
1145			.expect("we only map through support vector which cannot change the size; qed")
1146	}
1147
1148	/// Adds a new set of stashes to the electable stashes.
1149	///
1150	/// Returns:
1151	///
1152	/// `Ok(newly_added)` if all stashes were added successfully.
1153	/// `Err(first_un_included)` if some stashes cannot be added due to bounds.
1154	pub(crate) fn add_electables(
1155		new_stashes: impl Iterator<Item = T::AccountId>,
1156	) -> Result<usize, usize> {
1157		ElectableStashes::<T>::mutate(|electable| {
1158			let pre_size = electable.len();
1159
1160			for (idx, stash) in new_stashes.enumerate() {
1161				if electable.try_insert(stash).is_err() {
1162					return Err(idx);
1163				}
1164			}
1165
1166			Ok(electable.len() - pre_size)
1167		})
1168	}
1169}