referrerpolicy=no-referrer-when-downgrade

pallet_people/
lib.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! # People Pallet
19//!
20//! A pallet managing the registry of proven individuals.
21//!
22//! ## Overview
23//!
24//! The People pallet stores and manages identifiers of individuals who have proven their
25//! personhood. It tracks their personal IDs, organizes their cryptographic keys into rings, and
26//! allows them to use contextual aliases through authentication in extensions. When transactions
27//! include cryptographic proofs of belonging to the people set, the pallet's transaction extension
28//! verifies these proofs before allowing the transaction to proceed. This enables other pallets to
29//! check if actions come from unique persons while preserving privacy through the ring-based
30//! structure.
31//!
32//! The pallet accepts new persons after they prove their uniqueness elsewhere, stores their
33//! information, and supports removing persons via suspensions. While other systems (e.g., wallets)
34//! generate the proofs, this pallet handles the storage of all necessary data and verifies the
35//! proofs when used.
36//!
37//! ## Key Features
38//!
39//! - **Stores Identity Data**: Tracks personal IDs and cryptographic keys of proven persons
40//! - **Organizes Keys**: Groups keys into rings to enable privacy-preserving proofs
41//! - **Verifies Proofs**: Checks personhood proofs attached to transactions
42//! - **Links Accounts**: Allows connecting blockchain accounts to contextual aliases
43//! - **Manages Registry**: Adds proven persons and will support removing them
44//!
45//! ## Interface
46//!
47//! ### Dispatchable Functions
48//!
49//! - `set_alias_account(origin, account)`: Link an account to a contextual alias Once linked, this
50//!   allows the account to dispatch transactions as a person with the alias origin using a regular
51//!   signed transaction with a nonce, providing a simpler alternative to attaching full proofs.
52//! - `unset_alias_account(origin)`: Remove an account-alias link.
53//! - `merge_rings`: Merge the people in two rings into a single, new ring.
54//! - `force_recognize_personhood`: Recognize a set of people without any additional checks.
55//! - `set_personal_id_account`: Set a personal id account.
56//! - `unset_personal_id_account`: Unset the personal id account.
57//! - `migrate_included_key`: Migrate the key for a person who was onboarded and is currently
58//!   included in a ring.
59//! - `migrate_onboarding_key`: Migrate the key for a person who is currently onboarding. The
60//!   operation is instant, replacing the old key in the onboarding queue.
61//! - `set_onboarding_size`: Force set the onboarding size for new people. This call requires root
62//!   privileges.
63//! - `build_ring_manual`: Manually build a ring root by including registered people. The
64//!   transaction fee is refunded on a successful call.
65//! - `onboard_people_manual`: Manually onboard people into a ring. The transaction fee is refunded
66//!   on a successful call.
67//!
68//! ### Automated tasks performed by the pallet in hooks
69//!
70//! - Ring building: Build or update a ring's cryptographic commitment. This task processes queued
71//!   keys into a ring commitment that enables proof generation and verification. Since ring
72//!   construction, or rather adding keys to the ring, is computationally expensive, it's performed
73//!   periodically in batches rather than processing each key immediately. The batch size needs to
74//!   be reasonably large to enhance privacy by obscuring the exact timing of when individuals' keys
75//!   were added to the ring, making it more difficult to correlate specific persons with their
76//!   keys.
77//! - People onboarding: Onboard people from the onboarding queue into a ring. This task takes the
78//!   unincluded keys of recognized people from the onboarding queue and registers them into the
79//!   ring. People can be onboarded only in batches of at least `OnboardingSize` and when the
80//!   remaining open slots in a ring are at least `OnboardingSize`. This does not compute the root,
81//!   that is done using `build_ring`.
82//! - Cleaning of suspended people: Remove people's keys marked as suspended or inactive from rings.
83//!   The keys are stored in the `PendingSuspensions` map and they are removed from rings and their
84//!   roots are reset. The ring roots will subsequently be build in the ring building phase from
85//!   scratch. sequentially.
86//! - Key migration: Migrate the keys for people who were onboarded and are currently included in
87//!   rings. The migration is not instant as the key replacement and subsequent inclusion in a new
88//!   ring root will happen only after the next mutation session.
89//! - Onboarding queue page merging: Merge the two pages at the front of the onboarding queue. After
90//!   a round of suspensions, it is possible for the second page of the onboarding queue to be left
91//!   with few members such that, if the first page also has few members, the total count is below
92//!   the required onboarding size, thus stalling the queue. This function fixes this by moving the
93//!   people from the first page to the front of the second page, defragmenting the queue.
94//!
95//! ### Transaction Extension
96//!
97//! The pallet provides the `AsPerson` transaction extension that allows transactions to be
98//! dispatched with special origins: `PersonalIdentity` and `PersonalAlias`. These origins prove the
99//! transaction comes from a unique person, either through their identity or through a contextual
100//! alias. To make use of the personhood system, other pallets should check for these origins.
101//!
102//! The extension verifies the proof of personhood during transaction validation and, if valid,
103//! transforms the transaction's origin into one of these special origins.
104//!
105//! ## Usage
106//!
107//! Other pallets can verify personhood through origin checks:
108//!
109//! - `EnsurePersonalIdentity`: Verifies the origin represents a specific person using their
110//!   PersonalId
111//! - `EnsurePersonalAlias`: Verifies the origin has a valid alias for any context
112//! - `EnsurePersonalAliasInContext`: Verifies the origin has a valid alias for a specific context
113
114#![cfg_attr(not(feature = "std"), no_std)]
115#![recursion_limit = "128"]
116#![allow(clippy::borrowed_box)]
117extern crate alloc;
118use alloc::{boxed::Box, vec::Vec};
119
120#[cfg(test)]
121mod mock;
122#[cfg(test)]
123mod tests;
124
125#[cfg(feature = "runtime-benchmarks")]
126pub mod benchmarking;
127pub mod extension;
128pub mod types;
129pub mod weights;
130pub use pallet::*;
131pub use types::*;
132pub use weights::WeightInfo;
133
134use codec::{Decode, Encode, MaxEncodedLen};
135use core::{
136	cmp::{self},
137	ops::Range,
138};
139use frame_support::{
140	dispatch::{
141		extract_actual_weight, DispatchInfo, DispatchResultWithPostInfo, GetDispatchInfo,
142		PostDispatchInfo,
143	},
144	storage::with_storage_layer,
145	traits::{
146		reality::{
147			AddOnlyPeopleTrait, Context, ContextualAlias, CountedMembers, PeopleTrait, PersonalId,
148			RingIndex,
149		},
150		Defensive, EnsureOriginWithArg, IsSubType, OriginTrait,
151	},
152	transactional,
153	weights::WeightMeter,
154};
155use scale_info::TypeInfo;
156use sp_runtime::{
157	traits::{BadOrigin, Dispatchable},
158	ArithmeticError, Debug, SaturatedConversion, Saturating,
159};
160use verifiable::{Alias, GenerateVerifiable};
161
162#[cfg(feature = "runtime-benchmarks")]
163pub use benchmarking::BenchmarkHelper;
164
165#[frame_support::pallet]
166pub mod pallet {
167	use super::*;
168	use frame_support::{pallet_prelude::*, traits::Contains};
169	use frame_system::pallet_prelude::{BlockNumberFor, *};
170
171	const LOG_TARGET: &str = "runtime::people";
172
173	#[pallet::pallet]
174	pub struct Pallet<T>(_);
175
176	#[pallet::config]
177	pub trait Config:
178		frame_system::Config<
179		RuntimeOrigin: From<Origin>
180		                   + From<<Self::RuntimeOrigin as OriginTrait>::PalletsOrigin>
181		                   + OriginTrait<
182			PalletsOrigin: From<Origin>
183			                   + TryInto<
184				Origin,
185				Error = <Self::RuntimeOrigin as OriginTrait>::PalletsOrigin,
186			>,
187		>,
188		RuntimeCall: Parameter
189		                 + GetDispatchInfo
190		                 + IsSubType<Call<Self>>
191		                 + Dispatchable<
192			RuntimeOrigin = Self::RuntimeOrigin,
193			Info = DispatchInfo,
194			PostInfo = PostDispatchInfo,
195		>,
196	>
197	{
198		/// Weight information for extrinsics in this pallet.
199		type WeightInfo: WeightInfo;
200
201		/// The runtime event type.
202		#[allow(deprecated)]
203		type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
204
205		/// Trait allowing cryptographic proof of membership without exposing the underlying member.
206		/// Normally a Ring-VRF.
207		type Crypto: GenerateVerifiable<
208			Proof: Send + Sync + DecodeWithMemTracking,
209			Signature: Send + Sync + DecodeWithMemTracking,
210			Member: DecodeWithMemTracking,
211		>;
212
213		/// Contexts which may validly have an account alias behind it for everyone.
214		type AccountContexts: Contains<Context>;
215
216		/// Number of chunks per page.
217		#[pallet::constant]
218		type ChunkPageSize: Get<u32>;
219
220		/// Maximum number of people included in a ring before a new one is created.
221		#[pallet::constant]
222		type MaxRingSize: Get<u32>;
223
224		/// Maximum number of people included in an onboarding queue page before a new one is
225		/// created.
226		#[pallet::constant]
227		type OnboardingQueuePageSize: Get<u32>;
228
229		/// Helper for benchmarks.
230		#[cfg(feature = "runtime-benchmarks")]
231		type BenchmarkHelper: BenchmarkHelper<<Self::Crypto as GenerateVerifiable>::StaticChunk>;
232	}
233
234	/// The current individuals we recognise.
235	#[pallet::storage]
236	pub type Root<T> = StorageMap<_, Blake2_128Concat, RingIndex, RingRoot<T>>;
237
238	/// Keeps track of the ring index currently being populated.
239	#[pallet::storage]
240	pub type CurrentRingIndex<T: Config> = StorageValue<_, u32, ValueQuery>;
241
242	/// Maximum number of people queued before onboarding to a ring.
243	#[pallet::storage]
244	pub type OnboardingSize<T: Config> = StorageValue<_, u32, ValueQuery>;
245
246	/// Hint for the maximum number of people that can be included in a ring through a single root
247	/// building call. If no value is set, then the onboarding size will be used instead.
248	#[pallet::storage]
249	pub type RingBuildingPeopleLimit<T: Config> = StorageValue<_, u32, OptionQuery>;
250
251	/// Both the keys that are included in built rings
252	/// and the keys that will be used in future rings.
253	#[pallet::storage]
254	pub type RingKeys<T: Config> = StorageMap<
255		_,
256		Blake2_128Concat,
257		RingIndex,
258		BoundedVec<MemberOf<T>, T::MaxRingSize>,
259		ValueQuery,
260	>;
261
262	/// Stores the meta information for each ring, the number of keys and how many are actually
263	/// included in the root.
264	#[pallet::storage]
265	pub type RingKeysStatus<T: Config> =
266		StorageMap<_, Blake2_128Concat, RingIndex, RingStatus, ValueQuery>;
267
268	/// A map of all rings which currently have pending suspensions and need cleaning, along with
269	/// their respective number of suspended keys which need to be removed.
270	#[pallet::storage]
271	pub type PendingSuspensions<T: Config> =
272		StorageMap<_, Twox64Concat, RingIndex, BoundedVec<u32, T::MaxRingSize>, ValueQuery>;
273
274	/// The number of people currently included in a ring.
275	#[pallet::storage]
276	pub type ActiveMembers<T: Config> = StorageValue<_, u32, ValueQuery>;
277
278	/// The current individuals we recognise, but not necessarily yet included in a ring.
279	///
280	/// Look-up from the crypto (public) key to the immutable ID of the individual (`PersonalId`). A
281	/// person can have two different entries in this map if they queued a key migration which
282	/// hasn't been enacted yet.
283	#[pallet::storage]
284	pub type Keys<T> = CountedStorageMap<_, Blake2_128Concat, MemberOf<T>, PersonalId>;
285
286	/// A map of all the people who have declared their intent to migrate their keys and are waiting
287	/// for the next mutation session.
288	#[pallet::storage]
289	pub type KeyMigrationQueue<T: Config> =
290		StorageMap<_, Blake2_128Concat, PersonalId, MemberOf<T>>;
291
292	/// The current individuals we recognise, but not necessarily yet included in a ring.
293	///
294	/// Immutable ID of the individual (`PersonalId`) to information about their key and status.
295	#[pallet::storage]
296	pub type People<T: Config> =
297		StorageMap<_, Blake2_128Concat, PersonalId, PersonRecord<MemberOf<T>, T::AccountId>>;
298
299	/// Conversion of a contextual alias to an account ID.
300	#[pallet::storage]
301	pub type AliasToAccount<T> = StorageMap<
302		_,
303		Blake2_128Concat,
304		ContextualAlias,
305		<T as frame_system::Config>::AccountId,
306		OptionQuery,
307	>;
308
309	/// Conversion of an account ID to a contextual alias.
310	#[pallet::storage]
311	pub type AccountToAlias<T> = StorageMap<
312		_,
313		Blake2_128Concat,
314		<T as frame_system::Config>::AccountId,
315		RevisedContextualAlias,
316		OptionQuery,
317	>;
318
319	/// Association of an account ID to a personal ID.
320	///
321	/// Managed with `set_personal_id_account` and `unset_personal_id_account`.
322	/// Reverse lookup is inside `People` storage, inside the record.
323	#[pallet::storage]
324	pub type AccountToPersonalId<T> = StorageMap<
325		_,
326		Blake2_128Concat,
327		<T as frame_system::Config>::AccountId,
328		PersonalId,
329		OptionQuery,
330	>;
331
332	/// Paginated collection of static chunks used by the verifiable crypto.
333	#[pallet::storage]
334	pub type Chunks<T> = StorageMap<_, Twox64Concat, PageIndex, ChunksOf<T>, OptionQuery>;
335
336	/// The next free and never reserved personal ID.
337	#[pallet::storage]
338	pub type NextPersonalId<T> = StorageValue<_, PersonalId, ValueQuery>;
339
340	/// The state of the pallet regarding the actions that are currently allowed to be performed on
341	/// all existing rings.
342	#[pallet::storage]
343	pub type RingsState<T> = StorageValue<_, RingMembersState, ValueQuery>;
344
345	/// Candidates' reserved identities which we track.
346	#[pallet::storage]
347	pub type ReservedPersonalId<T: Config> =
348		StorageMap<_, Twox64Concat, PersonalId, (), OptionQuery>;
349
350	/// Keeps track of the page indices of the head and tail of the onboarding queue.
351	#[pallet::storage]
352	pub type QueuePageIndices<T: Config> = StorageValue<_, (PageIndex, PageIndex), ValueQuery>;
353
354	/// Paginated collection of people public keys ready to be included in a ring.
355	#[pallet::storage]
356	pub type OnboardingQueue<T> = StorageMap<
357		_,
358		Twox64Concat,
359		PageIndex,
360		BoundedVec<MemberOf<T>, <T as Config>::OnboardingQueuePageSize>,
361		ValueQuery,
362	>;
363
364	#[pallet::event]
365	#[pallet::generate_deposit(pub(super) fn deposit_event)]
366	pub enum Event<T: Config> {
367		/// An individual has had their personhood recognised and indexed.
368		PersonhoodRecognized { who: PersonalId, key: MemberOf<T> },
369		/// An individual has had their personhood recognised again and indexed.
370		PersonOnboarding { who: PersonalId, key: MemberOf<T> },
371	}
372
373	#[pallet::extra_constants]
374	impl<T: Config> Pallet<T> {
375		/// The amount of block number tolerance we allow for a setup account transaction.
376		///
377		/// `set_alias_account` and `set_personal_id_account` calls contains
378		/// `call_valid_at` as a parameter, those calls are valid if the block number is within
379		/// the tolerance period.
380		pub fn account_setup_time_tolerance() -> BlockNumberFor<T> {
381			600u32.into()
382		}
383	}
384
385	#[pallet::error]
386	pub enum Error<T> {
387		/// The supplied identifier does not represent a person.
388		NotPerson,
389		/// The given person has no associated key.
390		NoKey,
391		/// The context is not a member of those allowed to have account aliases held.
392		InvalidContext,
393		/// The account is not known.
394		InvalidAccount,
395		/// The account is already in use under another alias.
396		AccountInUse,
397		/// The proof is invalid.
398		InvalidProof,
399		/// The signature is invalid.
400		InvalidSignature,
401		/// There are not yet any members of our personhood set.
402		NoMembers,
403		/// The root cannot be finalized as there are still unpushed members.
404		Incomplete,
405		/// The root is still fresh.
406		StillFresh,
407		/// Too many members have been pushed.
408		TooManyMembers,
409		/// Key already in use by another person.
410		KeyAlreadyInUse,
411		/// The old key was not found when expected.
412		KeyNotFound,
413		/// Could not push member into the ring.
414		CouldNotPush,
415		/// The record is already using this key.
416		SameKey,
417		/// Personal Id was not reserved.
418		PersonalIdNotReserved,
419		/// Personal Id has never been reserved.
420		PersonalIdReservationCannotRenew,
421		/// Personal Id was not reserved or not already recognized.
422		PersonalIdNotReservedOrNotRecognized,
423		/// Ring cannot be merged if it's the top ring.
424		InvalidRing,
425		/// Ring cannot be built while there are suspensions pending.
426		SuspensionsPending,
427		/// Ring cannot be merged if it's not below 1/2 capacity.
428		RingAboveMergeThreshold,
429		/// Suspension indices provided are invalid.
430		InvalidSuspensions,
431		/// An mutating action was queued when there was no mutation session in progress.
432		NoMutationSession,
433		/// An mutating session could not be started.
434		CouldNotStartMutationSession,
435		/// Cannot merge rings while a suspension session is in progress.
436		SuspensionSessionInProgress,
437		/// Call is too late or too early.
438		TimeOutOfRange,
439		/// Alias <-> Account is already set and up to date.
440		AliasAccountAlreadySet,
441		/// Personhood cannot be resumed if it is not suspended.
442		NotSuspended,
443		/// Personhood is suspended.
444		Suspended,
445		/// Invalid state for attempted key migration.
446		InvalidKeyMigration,
447		/// Invalid suspension of a key belonging to a person whose index in the ring has already
448		/// been included in the pending suspensions list.
449		KeyAlreadySuspended,
450		/// The onboarding size must not exceed the maximum ring size.
451		InvalidOnboardingSize,
452	}
453
454	#[pallet::origin]
455	#[derive(
456		Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo, DecodeWithMemTracking,
457	)]
458	pub enum Origin {
459		PersonalIdentity(PersonalId),
460		PersonalAlias(RevisedContextualAlias),
461	}
462
463	#[pallet::hooks]
464	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
465		fn integrity_test() {
466			assert!(
467				<T as Config>::ChunkPageSize::get() > 0,
468				"chunk page size must hold at least one element"
469			);
470			assert!(<T as Config>::MaxRingSize::get() > 0, "rings must hold at least one person");
471			assert!(
472				<T as Config>::MaxRingSize::get() <= <T as Config>::OnboardingQueuePageSize::get(),
473				"onboarding queue page size must greater than or equal to max ring size"
474			);
475		}
476
477		fn on_poll(_: BlockNumberFor<T>, weight_meter: &mut WeightMeter) {
478			// Check if there are any keys to migrate.
479			if weight_meter.try_consume(T::WeightInfo::on_poll_base()).is_err() {
480				return;
481			}
482			if RingsState::<T>::get().key_migration() {
483				Self::migrate_keys(weight_meter);
484			}
485
486			// Check if there are any rings with suspensions and try to clean the first one.
487			if let Some(ring_index) = PendingSuspensions::<T>::iter_keys().next() {
488				if Self::should_remove_suspended_keys(ring_index, true) &&
489					weight_meter.can_consume(T::WeightInfo::remove_suspended_people(
490						T::MaxRingSize::get(),
491					)) {
492					let actual = Self::remove_suspended_keys(ring_index);
493					weight_meter.consume(actual)
494				}
495			}
496
497			let merge_weight = T::WeightInfo::merge_queue_pages();
498			if !weight_meter.can_consume(merge_weight) {
499				return;
500			}
501			let merge_action = Self::should_merge_queue_pages();
502			if let QueueMergeAction::Merge {
503				initial_head,
504				new_head,
505				first_key_page,
506				second_key_page,
507			} = merge_action
508			{
509				Self::merge_queue_pages(initial_head, new_head, first_key_page, second_key_page);
510				weight_meter.consume(merge_weight);
511			}
512		}
513
514		fn on_idle(_block: BlockNumberFor<T>, limit: Weight) -> Weight {
515			let mut weight_meter = WeightMeter::with_limit(limit.saturating_div(2));
516			let on_idle_weight = T::WeightInfo::on_idle_base();
517			if !weight_meter.can_consume(on_idle_weight) {
518				return weight_meter.consumed();
519			}
520			weight_meter.consume(on_idle_weight);
521
522			let max_ring_size = T::MaxRingSize::get();
523			let remove_people_weight = T::WeightInfo::remove_suspended_people(max_ring_size);
524			let rings_state = RingsState::<T>::get();
525
526			// Check if there are any rings with suspensions and try to clean as many as possible.
527			// First check the state of the rings allow for removals.
528			if !rings_state.append_only() {
529				return weight_meter.consumed();
530			}
531			// Account for the first iteration of the loop.
532			let suspension_step_weight = T::WeightInfo::pending_suspensions_iteration();
533			if !weight_meter.can_consume(suspension_step_weight) {
534				return weight_meter.consumed();
535			}
536			// Always renew the iterator because in each iteration we remove a key, which would make
537			// the old iterator unstable.
538			while let Some(ring_index) = PendingSuspensions::<T>::iter_keys().next() {
539				weight_meter.consume(suspension_step_weight);
540				// Break the loop if we run out of weight.
541				if !weight_meter.can_consume(remove_people_weight) {
542					return weight_meter.consumed();
543				}
544				if Self::should_remove_suspended_keys(ring_index, false) {
545					let actual = Self::remove_suspended_keys(ring_index);
546					weight_meter.consume(actual)
547				}
548				// Break the loop if we run out of weight.
549				if !weight_meter.can_consume(suspension_step_weight) {
550					return weight_meter.consumed();
551				}
552			}
553
554			// Ring state must be append only for both onboarding and ring building, but it is
555			// already checked above.
556
557			let onboard_people_weight = T::WeightInfo::onboard_people();
558			if !weight_meter.can_consume(onboard_people_weight) {
559				return weight_meter.consumed();
560			}
561			let op_res = with_storage_layer::<(), DispatchError, _>(|| Self::onboard_people());
562			weight_meter.consume(onboard_people_weight);
563			if let Err(e) = op_res {
564				log::debug!(target: LOG_TARGET, "failed to onboard people: {:?}", e);
565			}
566
567			let current_ring = CurrentRingIndex::<T>::get();
568			let should_build_ring_weight = T::WeightInfo::should_build_ring(max_ring_size);
569			let build_ring_weight = T::WeightInfo::build_ring(max_ring_size);
570			for ring_index in (0..=current_ring).rev() {
571				if !weight_meter.can_consume(should_build_ring_weight) {
572					return weight_meter.consumed();
573				}
574
575				let maybe_to_include = Self::should_build_ring(ring_index, max_ring_size);
576				weight_meter.consume(should_build_ring_weight);
577				let Some(to_include) = maybe_to_include else { continue };
578				if !weight_meter.can_consume(build_ring_weight) {
579					return weight_meter.consumed();
580				}
581				let op_res = with_storage_layer::<(), DispatchError, _>(|| {
582					Self::build_ring(ring_index, to_include)
583				});
584				weight_meter.consume(build_ring_weight);
585				if let Err(e) = op_res {
586					log::error!(target: LOG_TARGET, "failed to build ring: {:?}", e);
587				}
588			}
589
590			weight_meter.consumed()
591		}
592	}
593
594	#[pallet::genesis_config]
595	pub struct GenesisConfig<T: Config> {
596		pub encoded_chunks: Vec<u8>,
597		#[serde(skip)]
598		pub _phantom_data: core::marker::PhantomData<T>,
599		pub onboarding_size: u32,
600	}
601
602	impl<T: Config> Default for GenesisConfig<T> {
603		fn default() -> Self {
604			// The default genesis config will put in the chunks that pertain to the ring vrf
605			// implementation in the `verifiable` crate. This default config will not work for other
606			// custom `GenerateVerifiable` implementations.
607			use verifiable::ring_vrf_impl::StaticChunk;
608			let params = verifiable::ring_vrf_impl::ring_verifier_builder_params();
609			let chunks: Vec<StaticChunk> = params.0.iter().map(|c| StaticChunk(*c)).collect();
610			Self {
611				encoded_chunks: chunks.encode(),
612				_phantom_data: PhantomData,
613				onboarding_size: T::MaxRingSize::get(),
614			}
615		}
616	}
617
618	#[pallet::genesis_build]
619	impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
620		fn build(&self) {
621			let chunks: Vec<<<T as Config>::Crypto as GenerateVerifiable>::StaticChunk> =
622				Decode::decode(&mut &(self.encoded_chunks.clone())[..])
623					.expect("couldn't decode chunks");
624			assert_eq!(chunks.len(), 1 << 9);
625			let page_size = <T as Config>::ChunkPageSize::get();
626
627			let mut page_idx = 0;
628			let mut chunk_idx = 0;
629			while chunk_idx < chunks.len() {
630				let chunk_idx_end = cmp::min(chunk_idx + page_size as usize, chunks.len());
631				let chunk_page: ChunksOf<T> = chunks[chunk_idx..chunk_idx_end]
632					.to_vec()
633					.try_into()
634					.expect("page size was checked against the array length; qed");
635				Chunks::<T>::insert(page_idx, chunk_page);
636				page_idx += 1;
637				chunk_idx = chunk_idx_end;
638			}
639
640			OnboardingSize::<T>::set(self.onboarding_size);
641		}
642	}
643
644	#[pallet::call(weight = <T as Config>::WeightInfo)]
645	impl<T: Config> Pallet<T> {
646		/// Build a ring root by including registered people.
647		///
648		/// This task is performed automatically by the pallet through the `on_idle` hook whenever
649		/// there is leftover weight in a block. This call is meant to be a backup in case of
650		/// extreme congestion and should be submitted by signed origins.
651		#[pallet::weight(
652			T::WeightInfo::should_build_ring(
653				limit.unwrap_or_else(T::MaxRingSize::get)
654			).saturating_add(T::WeightInfo::build_ring(limit.unwrap_or_else(T::MaxRingSize::get))))]
655		#[pallet::call_index(100)]
656		pub fn build_ring_manual(
657			origin: OriginFor<T>,
658			ring_index: RingIndex,
659			limit: Option<u32>,
660		) -> DispatchResultWithPostInfo {
661			ensure_signed(origin)?;
662
663			// Get the keys for this ring, and make sure that the ring is full before we build it.
664			let (keys, mut ring_status) = Self::ring_keys_and_info(ring_index);
665			let to_include =
666				Self::should_build_ring(ring_index, limit.unwrap_or_else(T::MaxRingSize::get))
667					.ok_or(Error::<T>::StillFresh)?;
668
669			// Get the current ring, and check it should be rebuilt.
670			// Return the next revision.
671			let (next_revision, mut intermediate) =
672				if let Some(existing_root) = Root::<T>::get(ring_index) {
673					// We should build a new ring. Return the new revision number we should use.
674					(
675						existing_root.revision.checked_add(1).ok_or(ArithmeticError::Overflow)?,
676						existing_root.intermediate,
677					)
678				} else {
679					// No ring has been built at this index, so we start at revision 0.
680					(0, T::Crypto::start_members())
681				};
682
683			// Push the members.
684			T::Crypto::push_members(
685				&mut intermediate,
686				keys.iter()
687					.skip(ring_status.included as usize)
688					.take(to_include as usize)
689					.cloned(),
690				Self::fetch_chunks,
691			)
692			.map_err(|_| Error::<T>::CouldNotPush)?;
693
694			// By the end of the loop, we have included the maximum number of keys in the vector.
695			ring_status.included = ring_status.included.saturating_add(to_include);
696			RingKeysStatus::<T>::insert(ring_index, ring_status);
697
698			// We create the root after pushing all members.
699			let root = T::Crypto::finish_members(intermediate.clone());
700			let ring_root = RingRoot { root, revision: next_revision, intermediate };
701			Root::<T>::insert(ring_index, ring_root);
702
703			Ok(Pays::No.into())
704		}
705
706		/// Onboard people into a ring by taking their keys from the onboarding queue and
707		/// registering them into the ring. This does not compute the root, that is done using
708		/// `build_ring`.
709		///
710		/// This task is performed automatically by the pallet through the `on_idle` hook whenever
711		/// there is leftover weight in a block. This call is meant to be a backup in case of
712		/// extreme congestion and should be submitted by signed origins.
713		#[pallet::weight(T::WeightInfo::onboard_people())]
714		#[pallet::call_index(101)]
715		pub fn onboard_people_manual(origin: OriginFor<T>) -> DispatchResultWithPostInfo {
716			ensure_signed(origin)?;
717
718			// Get the keys for this ring, and make sure that the ring is full before we build it.
719			let (top_ring_index, mut keys) = Self::available_ring();
720			let mut ring_status = RingKeysStatus::<T>::get(top_ring_index);
721			defensive_assert!(
722				keys.len() == ring_status.total as usize,
723				"Stored key count doesn't match the actual length"
724			);
725
726			let keys_len = keys.len() as u32;
727			let open_slots = T::MaxRingSize::get().saturating_sub(keys_len);
728
729			let (mut head, tail) = QueuePageIndices::<T>::get();
730			let old_head = head;
731			let mut keys_to_include: Vec<MemberOf<T>> =
732				OnboardingQueue::<T>::take(head).into_inner();
733
734			// A `head != tail` condition should mean that there is at least one key in the page
735			// following this one.
736			if keys_to_include.len() < open_slots as usize && head != tail {
737				head = head.checked_add(1).unwrap_or(0);
738				let second_key_page = OnboardingQueue::<T>::take(head);
739				defensive_assert!(!second_key_page.is_empty());
740				keys_to_include.extend(second_key_page.into_iter());
741			}
742
743			let onboarding_size = OnboardingSize::<T>::get();
744
745			let (to_include, ring_filled) = Self::should_onboard_people(
746				top_ring_index,
747				&ring_status,
748				open_slots,
749				keys_to_include.len().saturated_into(),
750				onboarding_size,
751			)
752			.ok_or(Error::<T>::Incomplete)?;
753
754			let mut remaining_keys = keys_to_include.split_off(to_include as usize);
755			for key in keys_to_include.into_iter() {
756				let personal_id = Keys::<T>::get(&key).defensive().ok_or(Error::<T>::NotPerson)?;
757				let mut record =
758					People::<T>::get(personal_id).defensive().ok_or(Error::<T>::KeyNotFound)?;
759				record.position = RingPosition::Included {
760					ring_index: top_ring_index,
761					ring_position: keys.len().saturated_into(),
762					scheduled_for_removal: false,
763				};
764				People::<T>::insert(personal_id, record);
765				keys.try_push(key).map_err(|_| Error::<T>::TooManyMembers)?;
766			}
767			RingKeys::<T>::insert(top_ring_index, keys);
768			ActiveMembers::<T>::mutate(|active| *active = active.saturating_add(to_include));
769			ring_status.total = ring_status.total.saturating_add(to_include);
770			RingKeysStatus::<T>::insert(top_ring_index, ring_status);
771
772			// Update the top ring index if this onboarding round filled the current ring.
773			if ring_filled {
774				CurrentRingIndex::<T>::mutate(|i| i.saturating_inc());
775			}
776
777			if remaining_keys.len() > T::OnboardingQueuePageSize::get() as usize {
778				let split_idx =
779					remaining_keys.len().saturating_sub(T::OnboardingQueuePageSize::get() as usize);
780				let second_page_keys: BoundedVec<MemberOf<T>, T::OnboardingQueuePageSize> =
781					remaining_keys
782						.split_off(split_idx)
783						.try_into()
784						.expect("the list shrunk so it must fit; qed");
785				let remaining_keys: BoundedVec<MemberOf<T>, T::OnboardingQueuePageSize> =
786					remaining_keys.try_into().expect("the list shrunk so it must fit; qed");
787				OnboardingQueue::<T>::insert(old_head, remaining_keys);
788				OnboardingQueue::<T>::insert(head, second_page_keys);
789				QueuePageIndices::<T>::put((old_head, tail));
790			} else if !remaining_keys.is_empty() {
791				let remaining_keys: BoundedVec<MemberOf<T>, T::OnboardingQueuePageSize> =
792					remaining_keys.try_into().expect("the list shrunk so it must fit; qed");
793				OnboardingQueue::<T>::insert(head, remaining_keys);
794				QueuePageIndices::<T>::put((head, tail));
795			} else {
796				// We have nothing to put back into the queue, so if this isn't the last page, move
797				// the head to the next page of the queue.
798				if head != tail {
799					head = head.checked_add(1).unwrap_or(0);
800				}
801				QueuePageIndices::<T>::put((head, tail));
802			}
803
804			Ok(Pays::No.into())
805		}
806
807		/// Merge the people in two rings into a single, new ring. In order for the rings to be
808		/// eligible for merging, they must be below 1/2 of max capacity, have no pending
809		/// suspensions and not be the top ring used for onboarding.
810		#[pallet::call_index(102)]
811		pub fn merge_rings(
812			origin: OriginFor<T>,
813			base_ring_index: RingIndex,
814			target_ring_index: RingIndex,
815		) -> DispatchResultWithPostInfo {
816			let _ = ensure_signed(origin)?;
817
818			ensure!(RingsState::<T>::get().append_only(), Error::<T>::SuspensionSessionInProgress);
819			// Top ring that onboards new candidates cannot be merged. Identical rings cannot be
820			// merged.
821			let current_ring_index = CurrentRingIndex::<T>::get();
822			ensure!(
823				base_ring_index != target_ring_index &&
824					base_ring_index != current_ring_index &&
825					target_ring_index != current_ring_index,
826				Error::<T>::InvalidRing
827			);
828
829			// Enforce eligibility criteria.
830			let (mut base_keys, mut base_ring_status) = Self::ring_keys_and_info(base_ring_index);
831			ensure!(
832				base_keys.len() < T::MaxRingSize::get() as usize / 2,
833				Error::<T>::RingAboveMergeThreshold
834			);
835			ensure!(
836				PendingSuspensions::<T>::decode_len(base_ring_index).unwrap_or(0) == 0,
837				Error::<T>::SuspensionsPending
838			);
839			let target_keys = RingKeys::<T>::get(target_ring_index);
840			RingKeysStatus::<T>::remove(target_ring_index);
841			ensure!(
842				target_keys.len() < T::MaxRingSize::get() as usize / 2,
843				Error::<T>::RingAboveMergeThreshold
844			);
845			ensure!(
846				PendingSuspensions::<T>::decode_len(target_ring_index).unwrap_or(0) == 0,
847				Error::<T>::SuspensionsPending
848			);
849
850			// Update the status of the ring to reflect the newly added keys.
851			base_ring_status.total =
852				base_ring_status.total.saturating_add(target_keys.len().saturated_into());
853
854			for key in target_keys {
855				let personal_id =
856					Keys::<T>::get(&key).defensive().ok_or(Error::<T>::KeyNotFound)?;
857				let mut record =
858					People::<T>::get(personal_id).defensive().ok_or(Error::<T>::NotPerson)?;
859				record.position = RingPosition::Included {
860					ring_index: base_ring_index,
861					ring_position: base_keys.len().saturated_into(),
862					scheduled_for_removal: false,
863				};
864				base_keys.try_push(key).map_err(|_| Error::<T>::TooManyMembers)?;
865				People::<T>::insert(personal_id, record)
866			}
867
868			// Newly added keys are not yet included.
869			RingKeys::<T>::insert(base_ring_index, base_keys);
870			RingKeysStatus::<T>::insert(base_ring_index, base_ring_status);
871			// Remove the stale ring root of the target ring. The keys in the target ring will be
872			// part of a valid ring root again when the base ring is rebuilt.
873			Root::<T>::remove(target_ring_index);
874			RingKeys::<T>::remove(target_ring_index);
875			RingKeysStatus::<T>::remove(target_ring_index);
876
877			Ok(Pays::No.into())
878		}
879
880		/// Dispatch a call under an alias using the `account <-> alias` mapping.
881		///
882		/// This is a call version of the transaction extension `AsPersonalAliasWithAccount`.
883		/// It is recommended to use the transaction extension instead when suitable.
884		#[pallet::call_index(0)]
885		#[pallet::weight(T::WeightInfo::under_alias().saturating_add(call.get_dispatch_info().call_weight))]
886		pub fn under_alias(
887			origin: OriginFor<T>,
888			call: Box<<T as frame_system::Config>::RuntimeCall>,
889		) -> DispatchResultWithPostInfo {
890			let account = ensure_signed(origin.clone())?;
891			let rev_ca = AccountToAlias::<T>::get(&account).ok_or(Error::<T>::InvalidAccount)?;
892			ensure!(
893				Root::<T>::get(rev_ca.ring).is_some_and(|ring| ring.revision == rev_ca.revision),
894				DispatchError::BadOrigin,
895			);
896
897			let derivation_weight = T::WeightInfo::under_alias();
898			let local_origin = Origin::PersonalAlias(rev_ca);
899			Self::derivative_call(origin, local_origin, *call, derivation_weight)
900		}
901
902		/// This transaction is refunded if successful and no alias was previously set.
903		///
904		/// The call is valid from `call_valid_at` until
905		/// `call_valid_at + account_setup_time_tolerance`.
906		/// `account_setup_time_tolerance` is a constant available in the metadata.
907		///
908		/// Parameters:
909		/// - `account`: The account to set the alias for.
910		/// - `call_valid_at`: The block number when the call becomes valid.
911		#[pallet::call_index(1)]
912		pub fn set_alias_account(
913			origin: OriginFor<T>,
914			account: T::AccountId,
915			call_valid_at: BlockNumberFor<T>,
916		) -> DispatchResultWithPostInfo {
917			let rev_ca = Self::ensure_revised_personal_alias(origin)?;
918			let now = frame_system::Pallet::<T>::block_number();
919			let time_tolerance = Self::account_setup_time_tolerance();
920			ensure!(
921				call_valid_at <= now && now <= call_valid_at.saturating_add(time_tolerance),
922				Error::<T>::TimeOutOfRange
923			);
924			ensure!(T::AccountContexts::contains(&rev_ca.ca.context), Error::<T>::InvalidContext);
925			ensure!(!AccountToPersonalId::<T>::contains_key(&account), Error::<T>::AccountInUse);
926
927			let old_account = AliasToAccount::<T>::get(&rev_ca.ca);
928			let old_rev_ca = old_account.as_ref().and_then(AccountToAlias::<T>::get);
929
930			let needs_revision = old_rev_ca.is_some_and(|old_rev_ca| {
931				old_rev_ca.revision != rev_ca.revision || old_rev_ca.ring != rev_ca.ring
932			});
933
934			// Ensure it changes the account associated, or it needs revision.
935			ensure!(
936				old_account.as_ref() != Some(&account) || needs_revision,
937				Error::<T>::AliasAccountAlreadySet
938			);
939
940			// If the old account is different from the new one:
941			// * decrease the sufficients of the old account
942			// * increase the sufficients of the new account
943			// * check new account is not already in use
944			if old_account.as_ref() != Some(&account) {
945				ensure!(!AccountToAlias::<T>::contains_key(&account), Error::<T>::AccountInUse);
946				if let Some(old_account) = &old_account {
947					frame_system::Pallet::<T>::dec_sufficients(old_account);
948					AccountToAlias::<T>::remove(old_account);
949				}
950				frame_system::Pallet::<T>::inc_sufficients(&account);
951			}
952
953			AccountToAlias::<T>::insert(&account, &rev_ca);
954			AliasToAccount::<T>::insert(&rev_ca.ca, &account);
955
956			if old_account.is_none() || needs_revision {
957				Ok(Pays::No.into())
958			} else {
959				Ok(Pays::Yes.into())
960			}
961		}
962
963		/// Remove the mapping from a particular alias to its registered account.
964		#[pallet::call_index(2)]
965		pub fn unset_alias_account(origin: OriginFor<T>) -> DispatchResult {
966			let alias = Self::ensure_personal_alias(origin)?;
967			let account = AliasToAccount::<T>::take(&alias).ok_or(Error::<T>::InvalidAccount)?;
968			AccountToAlias::<T>::remove(&account);
969			frame_system::Pallet::<T>::dec_sufficients(&account);
970
971			Ok(())
972		}
973
974		/// Recognize a set of people without any additional checks.
975		///
976		/// The people are identified by the provided list of keys and will each be assigned, in
977		/// order, the next available personal ID.
978		///
979		/// The origin for this call must have root privileges.
980		#[pallet::call_index(3)]
981		pub fn force_recognize_personhood(
982			origin: OriginFor<T>,
983			people: Vec<MemberOf<T>>,
984		) -> DispatchResultWithPostInfo {
985			ensure_root(origin)?;
986			for key in people {
987				let personal_id = Self::reserve_new_id();
988				Self::recognize_personhood(personal_id, Some(key))?;
989			}
990			Ok(().into())
991		}
992
993		/// Set a personal id account.
994		///
995		/// The account can then be used to sign transactions on behalf of the personal id, and
996		/// provide replay protection with the nonce.
997		///
998		/// This transaction is refunded if successful and no account was previously set for the
999		/// personal id.
1000		///
1001		/// The call is valid from `call_valid_at` until
1002		/// `call_valid_at + account_setup_time_tolerance`.
1003		/// `account_setup_time_tolerance` is a constant available in the metadata.
1004		///
1005		/// Parameters:
1006		/// - `account`: The account to set the alias for.
1007		/// - `call_valid_at`: The block number when the call becomes valid.
1008		#[pallet::call_index(4)]
1009		pub fn set_personal_id_account(
1010			origin: OriginFor<T>,
1011			account: T::AccountId,
1012			call_valid_at: BlockNumberFor<T>,
1013		) -> DispatchResultWithPostInfo {
1014			let id = Self::ensure_personal_identity(origin)?;
1015			let now = frame_system::Pallet::<T>::block_number();
1016			let time_tolerance = Self::account_setup_time_tolerance();
1017			ensure!(
1018				call_valid_at <= now && now <= call_valid_at.saturating_add(time_tolerance),
1019				Error::<T>::TimeOutOfRange
1020			);
1021			ensure!(!AccountToPersonalId::<T>::contains_key(&account), Error::<T>::AccountInUse);
1022			ensure!(!AccountToAlias::<T>::contains_key(&account), Error::<T>::AccountInUse);
1023			let mut record = People::<T>::get(id).ok_or(Error::<T>::NotPerson)?;
1024			let pays = if let Some(old_account) = record.account {
1025				frame_system::Pallet::<T>::dec_sufficients(&old_account);
1026				AccountToPersonalId::<T>::remove(&old_account);
1027				Pays::Yes
1028			} else {
1029				Pays::No
1030			};
1031			record.account = Some(account.clone());
1032			frame_system::Pallet::<T>::inc_sufficients(&account);
1033			AccountToPersonalId::<T>::insert(&account, id);
1034			People::<T>::insert(id, &record);
1035
1036			Ok(pays.into())
1037		}
1038
1039		/// Unset the personal id account.
1040		#[pallet::call_index(5)]
1041		pub fn unset_personal_id_account(origin: OriginFor<T>) -> DispatchResultWithPostInfo {
1042			let id = Self::ensure_personal_identity(origin)?;
1043			let mut record = People::<T>::get(id).ok_or(Error::<T>::NotPerson)?;
1044			let account = record.account.take().ok_or(Error::<T>::InvalidAccount)?;
1045			AccountToPersonalId::<T>::take(&account).ok_or(Error::<T>::InvalidAccount)?;
1046			frame_system::Pallet::<T>::dec_sufficients(&account);
1047			People::<T>::insert(id, &record);
1048
1049			Ok(Pays::Yes.into())
1050		}
1051
1052		/// Migrate the key for a person who was onboarded and is currently included in a ring. The
1053		/// migration is not instant as the key replacement and subsequent inclusion in a new ring
1054		/// root will happen only after the next mutation session.
1055		#[pallet::call_index(6)]
1056		pub fn migrate_included_key(
1057			origin: OriginFor<T>,
1058			new_key: MemberOf<T>,
1059		) -> DispatchResultWithPostInfo {
1060			let id = Self::ensure_personal_identity(origin)?;
1061			ensure!(!Keys::<T>::contains_key(&new_key), Error::<T>::KeyAlreadyInUse);
1062			let mut record = People::<T>::get(id).ok_or(Error::<T>::NotPerson)?;
1063			ensure!(record.key != new_key, Error::<T>::SameKey);
1064			match &record.position {
1065				// If the key is already included in a ring, enqueue it for migration during the
1066				// next mutation session.
1067				RingPosition::Included { ring_index, ring_position, .. } => {
1068					// If the person scheduled another migration before, remove the key we are
1069					// replacing from the key registry.
1070					if let Some(old_migrated_key) = KeyMigrationQueue::<T>::get(id) {
1071						Keys::<T>::remove(old_migrated_key);
1072					}
1073					// Add this new key to the migration queue.
1074					KeyMigrationQueue::<T>::insert(id, &new_key);
1075					// Mark this record as stale.
1076					record.position = RingPosition::Included {
1077						ring_index: *ring_index,
1078						ring_position: *ring_position,
1079						scheduled_for_removal: true,
1080					};
1081					// Update the record.
1082					People::<T>::insert(id, record);
1083				},
1084				// This call accepts migrations only for included keys.
1085				RingPosition::Onboarding { .. } =>
1086					return Err(Error::<T>::InvalidKeyMigration.into()),
1087				// Suspended people shouldn't be able to call this, but protect against this case
1088				// anyway.
1089				RingPosition::Suspended => return Err(Error::<T>::Suspended.into()),
1090			}
1091			Keys::<T>::insert(new_key, id);
1092
1093			Ok(().into())
1094		}
1095
1096		/// Migrate the key for a person who is currently onboarding. The operation is instant,
1097		/// replacing the old key in the onboarding queue.
1098		#[pallet::call_index(7)]
1099		pub fn migrate_onboarding_key(
1100			origin: OriginFor<T>,
1101			new_key: MemberOf<T>,
1102		) -> DispatchResultWithPostInfo {
1103			let id = Self::ensure_personal_identity(origin)?;
1104			ensure!(!Keys::<T>::contains_key(&new_key), Error::<T>::KeyAlreadyInUse);
1105			let mut record = People::<T>::get(id).ok_or(Error::<T>::NotPerson)?;
1106			ensure!(record.key != new_key, Error::<T>::SameKey);
1107			match &record.position {
1108				// If it's still onboarding, just replace the old key in the queue.
1109				RingPosition::Onboarding { queue_page } => {
1110					let mut keys = OnboardingQueue::<T>::get(queue_page);
1111					if let Some(idx) = keys.iter().position(|k| *k == record.key) {
1112						// Remove the key that never made it into a ring.
1113						Keys::<T>::remove(&keys[idx]);
1114						// Update the key in the queue.
1115						keys[idx] = new_key.clone();
1116						OnboardingQueue::<T>::insert(queue_page, keys);
1117						// Replace the key in the record.
1118						record.key = new_key.clone();
1119						// Update the record.
1120						People::<T>::insert(id, record);
1121					} else {
1122						defensive!("No key found at the position in the person record of {}", id);
1123					}
1124				},
1125				// This call accepts migrations only for included keys.
1126				RingPosition::Included { .. } => return Err(Error::<T>::InvalidKeyMigration.into()),
1127				// Suspended people shouldn't be able to call this, but protect against this case
1128				// anyway.
1129				RingPosition::Suspended => return Err(Error::<T>::Suspended.into()),
1130			}
1131			Keys::<T>::insert(new_key, id);
1132
1133			Ok(().into())
1134		}
1135
1136		/// Force set the onboarding size for new people. This call requires root privileges.
1137		#[pallet::call_index(8)]
1138		pub fn set_onboarding_size(
1139			origin: OriginFor<T>,
1140			onboarding_size: u32,
1141		) -> DispatchResultWithPostInfo {
1142			ensure_root(origin)?;
1143			ensure!(
1144				onboarding_size <= <T as Config>::MaxRingSize::get(),
1145				Error::<T>::InvalidOnboardingSize
1146			);
1147			OnboardingSize::<T>::put(onboarding_size);
1148			Ok(Pays::No.into())
1149		}
1150	}
1151
1152	impl<T: Config> Pallet<T> {
1153		/// If the conditions to build a ring are met, this function returns the number of people to
1154		/// be included in a `build_ring` call. Otherwise, this function returns `None`.
1155		pub(crate) fn should_build_ring(ring_index: RingIndex, limit: u32) -> Option<u32> {
1156			// Ring root cannot be built while there are people to remove.
1157			if !RingsState::<T>::get().append_only() {
1158				return None;
1159			}
1160			// Suspended people should be removed from the ring before building it.
1161			if PendingSuspensions::<T>::contains_key(ring_index) {
1162				return None;
1163			}
1164
1165			let ring_status = RingKeysStatus::<T>::get(ring_index);
1166			let not_included_count = ring_status.total.saturating_sub(ring_status.included);
1167			let to_include = not_included_count.min(limit);
1168			// There must be at least one person waiting to be included to build the ring.
1169			if to_include == 0 {
1170				return None;
1171			}
1172
1173			Some(to_include)
1174		}
1175
1176		/// If the conditions to onboard new people into rings are met, this function returns the
1177		/// number of people to be onboarded from the queue in a `onboard_people` call along with a
1178		/// flag which states whether the call will completely populate the ring. Otherwise, this
1179		/// function returns `None`.
1180		fn should_onboard_people(
1181			ring_index: RingIndex,
1182			ring_status: &RingStatus,
1183			open_slots: u32,
1184			available_for_inclusion: u32,
1185			onboarding_size: u32,
1186		) -> Option<(u32, bool)> {
1187			// People cannot be onboarded while suspensions are ongoing.
1188			if !RingsState::<T>::get().append_only() {
1189				return None;
1190			}
1191
1192			// Suspended people should be removed from the ring before building it.
1193			if PendingSuspensions::<T>::contains_key(ring_index) {
1194				return None;
1195			}
1196
1197			let to_include = available_for_inclusion.min(open_slots);
1198			// If everything is already included, nothing to do.
1199			if to_include == 0 {
1200				return None;
1201			}
1202
1203			// Here we check we have enough items in the queue so that the onboarding group size is
1204			// respected, but also that we can support another queue of at least onboarding size
1205			// in a future call.
1206			let can_onboard_with_cohort = to_include >= onboarding_size &&
1207				ring_status.total.saturating_add(to_include.saturated_into()) <=
1208					T::MaxRingSize::get().saturating_sub(onboarding_size);
1209			// If this call completely fills the ring, no onboarding rule enforcement will be
1210			// necessary.
1211			let ring_filled = open_slots == to_include;
1212
1213			let should_onboard = ring_filled || can_onboard_with_cohort;
1214			if !should_onboard {
1215				return None;
1216			}
1217
1218			Some((to_include, ring_filled))
1219		}
1220
1221		/// Returns whether suspensions are allowed and necessary for a given ring index.
1222		pub(crate) fn should_remove_suspended_keys(
1223			ring_index: RingIndex,
1224			check_rings_state: bool,
1225		) -> bool {
1226			if check_rings_state && !RingsState::<T>::get().append_only() {
1227				return false;
1228			}
1229			let suspended_count = PendingSuspensions::<T>::decode_len(ring_index).unwrap_or(0);
1230			// There must be keys to suspend.
1231			if suspended_count == 0 {
1232				return false;
1233			}
1234
1235			true
1236		}
1237
1238		/// Function that checks if the top two onboarding queue pages can be merged into a single
1239		/// page to defragment the list. This function returns an action to take following the
1240		/// check. In case a merge is needed, the following information is provided, in order:
1241		/// * The initial `head` of the queue - will need to remove the page at this index in case
1242		///   the merge is performed.
1243		/// * The new `head` of the queue.
1244		/// * The keys on the first page of the queue.
1245		/// * The keys on the second page of the queue.
1246		pub(crate) fn should_merge_queue_pages() -> QueueMergeAction<T> {
1247			let (initial_head, tail) = QueuePageIndices::<T>::get();
1248			let first_key_page = OnboardingQueue::<T>::get(initial_head);
1249			// A `head != tail` condition should mean that there is at least one more page
1250			// following this one.
1251			if initial_head == tail {
1252				return QueueMergeAction::NoAction;
1253			}
1254			let new_head = initial_head.checked_add(1).unwrap_or(0);
1255			let second_key_page = OnboardingQueue::<T>::get(new_head);
1256
1257			let page_size = T::OnboardingQueuePageSize::get();
1258			// Make sure the pages can be merged.
1259			if first_key_page.len().saturating_add(second_key_page.len()) > page_size as usize {
1260				return QueueMergeAction::NoAction;
1261			}
1262
1263			QueueMergeAction::Merge { initial_head, new_head, first_key_page, second_key_page }
1264		}
1265
1266		/// Build a ring root by adding all people who were assigned to this ring but not yet
1267		/// included into the root.
1268		pub(crate) fn build_ring(ring_index: RingIndex, to_include: u32) -> DispatchResult {
1269			let (keys, mut ring_status) = Self::ring_keys_and_info(ring_index);
1270			// Get the current ring, and check it should be rebuilt.
1271			// Return the next revision.
1272			let (next_revision, mut intermediate) =
1273				if let Some(existing_root) = Root::<T>::get(ring_index) {
1274					// We should build a new ring. Return the new revision number we should use.
1275					(
1276						existing_root.revision.checked_add(1).ok_or(ArithmeticError::Overflow)?,
1277						existing_root.intermediate,
1278					)
1279				} else {
1280					// No ring has been built at this index, so we start at revision 0.
1281					(0, T::Crypto::start_members())
1282				};
1283
1284			// Push the members.
1285			T::Crypto::push_members(
1286				&mut intermediate,
1287				keys.iter()
1288					.skip(ring_status.included as usize)
1289					.take(to_include as usize)
1290					.cloned(),
1291				Self::fetch_chunks,
1292			)
1293			.defensive()
1294			.map_err(|_| Error::<T>::CouldNotPush)?;
1295
1296			// By the end of the loop, we have included the maximum number of keys in the vector.
1297			ring_status.included = ring_status.included.saturating_add(to_include);
1298			RingKeysStatus::<T>::insert(ring_index, ring_status);
1299
1300			// We create the root after pushing all members.
1301			let root = T::Crypto::finish_members(intermediate.clone());
1302			let ring_root = RingRoot { root, revision: next_revision, intermediate };
1303			Root::<T>::insert(ring_index, ring_root);
1304			Ok(())
1305		}
1306
1307		/// Onboard as many people as possible into the available ring.
1308		///
1309		/// This function returns an error if there aren't enough people in the onboarding queue to
1310		/// complete the operation, or if the number of remaining open slots in the ring would be
1311		/// below the minimum onboarding size allowed.
1312		#[transactional]
1313		pub(crate) fn onboard_people() -> DispatchResult {
1314			// Get the keys for this ring, and make sure that the ring is full before we build it.
1315			let (top_ring_index, mut keys) = Self::available_ring();
1316			let mut ring_status = RingKeysStatus::<T>::get(top_ring_index);
1317			defensive_assert!(
1318				keys.len() == ring_status.total as usize,
1319				"Stored key count doesn't match the actual length"
1320			);
1321
1322			let keys_len = keys.len() as u32;
1323			let open_slots = T::MaxRingSize::get().saturating_sub(keys_len);
1324
1325			let (mut head, tail) = QueuePageIndices::<T>::get();
1326			let old_head = head;
1327			let mut keys_to_include: Vec<MemberOf<T>> =
1328				OnboardingQueue::<T>::take(head).into_inner();
1329
1330			// A `head != tail` condition should mean that there is at least one key in the page
1331			// following this one.
1332			if keys_to_include.len() < open_slots as usize && head != tail {
1333				head = head.checked_add(1).unwrap_or(0);
1334				let second_key_page = OnboardingQueue::<T>::take(head);
1335				defensive_assert!(!second_key_page.is_empty());
1336				keys_to_include.extend(second_key_page.into_iter());
1337			}
1338
1339			let onboarding_size = OnboardingSize::<T>::get();
1340
1341			let (to_include, ring_filled) = Self::should_onboard_people(
1342				top_ring_index,
1343				&ring_status,
1344				open_slots,
1345				keys_to_include.len().saturated_into(),
1346				onboarding_size,
1347			)
1348			.ok_or(Error::<T>::Incomplete)?;
1349
1350			let mut remaining_keys = keys_to_include.split_off(to_include as usize);
1351			for key in keys_to_include.into_iter() {
1352				let personal_id = Keys::<T>::get(&key).defensive().ok_or(Error::<T>::NotPerson)?;
1353				let mut record =
1354					People::<T>::get(personal_id).defensive().ok_or(Error::<T>::KeyNotFound)?;
1355				record.position = RingPosition::Included {
1356					ring_index: top_ring_index,
1357					ring_position: keys.len().saturated_into(),
1358					scheduled_for_removal: false,
1359				};
1360				People::<T>::insert(personal_id, record);
1361				keys.try_push(key).defensive().map_err(|_| Error::<T>::TooManyMembers)?;
1362			}
1363			RingKeys::<T>::insert(top_ring_index, keys);
1364			ActiveMembers::<T>::mutate(|active| *active = active.saturating_add(to_include));
1365			ring_status.total = ring_status.total.saturating_add(to_include);
1366			RingKeysStatus::<T>::insert(top_ring_index, ring_status);
1367
1368			// Update the top ring index if this onboarding round filled the current ring.
1369			if ring_filled {
1370				CurrentRingIndex::<T>::mutate(|i| i.saturating_inc());
1371			}
1372
1373			if remaining_keys.len() > T::OnboardingQueuePageSize::get() as usize {
1374				let split_idx =
1375					remaining_keys.len().saturating_sub(T::OnboardingQueuePageSize::get() as usize);
1376				let second_page_keys: BoundedVec<MemberOf<T>, T::OnboardingQueuePageSize> =
1377					remaining_keys
1378						.split_off(split_idx)
1379						.try_into()
1380						.expect("the list shrunk so it must fit; qed");
1381				let remaining_keys: BoundedVec<MemberOf<T>, T::OnboardingQueuePageSize> =
1382					remaining_keys.try_into().expect("the list shrunk so it must fit; qed");
1383				OnboardingQueue::<T>::insert(old_head, remaining_keys);
1384				OnboardingQueue::<T>::insert(head, second_page_keys);
1385				QueuePageIndices::<T>::put((old_head, tail));
1386			} else if !remaining_keys.is_empty() {
1387				let remaining_keys: BoundedVec<MemberOf<T>, T::OnboardingQueuePageSize> =
1388					remaining_keys.try_into().expect("the list shrunk so it must fit; qed");
1389				OnboardingQueue::<T>::insert(head, remaining_keys);
1390				QueuePageIndices::<T>::put((head, tail));
1391			} else {
1392				// We have nothing to put back into the queue, so if this isn't the last page, move
1393				// the head to the next page of the queue.
1394				if head != tail {
1395					head = head.checked_add(1).unwrap_or(0);
1396				}
1397				QueuePageIndices::<T>::put((head, tail));
1398			}
1399			Ok(())
1400		}
1401
1402		fn derivative_call(
1403			mut origin: OriginFor<T>,
1404			local_origin: Origin,
1405			call: <T as frame_system::Config>::RuntimeCall,
1406			derivation_weight: Weight,
1407		) -> DispatchResultWithPostInfo {
1408			origin.set_caller_from(<T::RuntimeOrigin as OriginTrait>::PalletsOrigin::from(
1409				local_origin,
1410			));
1411			let info = call.get_dispatch_info();
1412			let result = call.dispatch(origin);
1413			let weight = derivation_weight.saturating_add(extract_actual_weight(&result, &info));
1414			result
1415				.map(|p| PostDispatchInfo { actual_weight: Some(weight), pays_fee: p.pays_fee })
1416				.map_err(|mut err| {
1417					err.post_info = Some(weight).into();
1418					err
1419				})
1420		}
1421
1422		/// Ensure that the origin `o` represents a person.
1423		/// Returns `Ok` with the base identity of the person on success.
1424		pub fn ensure_personal_identity(
1425			origin: T::RuntimeOrigin,
1426		) -> Result<PersonalId, DispatchError> {
1427			Ok(ensure_personal_identity(origin.into_caller())?)
1428		}
1429
1430		/// Ensure that the origin `o` represents a person.
1431		/// Returns `Ok` with the alias of the person together with the context in which it can
1432		/// be used on success.
1433		pub fn ensure_personal_alias(
1434			origin: T::RuntimeOrigin,
1435		) -> Result<ContextualAlias, DispatchError> {
1436			Ok(ensure_personal_alias(origin.into_caller())?)
1437		}
1438
1439		/// Ensure that the origin `o` represents a person.
1440		/// On success returns `Ok` with the revised alias of the person together with the context
1441		/// in which it can be used and the revision of the ring the person is in.
1442		pub fn ensure_revised_personal_alias(
1443			origin: T::RuntimeOrigin,
1444		) -> Result<RevisedContextualAlias, DispatchError> {
1445			Ok(ensure_revised_personal_alias(origin.into_caller())?)
1446		}
1447
1448		// This function always returns the ring index and the keys for the ring which is currently
1449		// accepting new members.
1450		pub fn available_ring() -> (RingIndex, BoundedVec<MemberOf<T>, T::MaxRingSize>) {
1451			let mut current_ring_index = CurrentRingIndex::<T>::get();
1452			let mut current_keys = RingKeys::<T>::get(current_ring_index);
1453
1454			defensive_assert!(
1455				!current_keys.is_full(),
1456				"Something bad happened inside the STF, where the current keys are full, but we should have incremented in that case."
1457			);
1458
1459			// This condition shouldn't be reached, but we handle the error just in case.
1460			if current_keys.is_full() {
1461				current_ring_index.saturating_inc();
1462				CurrentRingIndex::<T>::put(current_ring_index);
1463				current_keys = RingKeys::<T>::get(current_ring_index);
1464			}
1465
1466			defensive_assert!(
1467				!current_keys.is_full(),
1468				"Something bad happened inside the STF, where the current key and next key are both full. Nothing we can do here."
1469			);
1470
1471			(current_ring_index, current_keys)
1472		}
1473
1474		// This allows us to associate a key with a person.
1475		pub fn do_insert_key(who: PersonalId, key: MemberOf<T>) -> DispatchResult {
1476			// If the key is already in use by another person then error.
1477			ensure!(!Keys::<T>::contains_key(&key), Error::<T>::KeyAlreadyInUse);
1478			// This is a first time key, so it must be reserved.
1479			ensure!(
1480				ReservedPersonalId::<T>::take(who).is_some(),
1481				Error::<T>::PersonalIdNotReservedOrNotRecognized
1482			);
1483
1484			Self::push_to_onboarding_queue(who, key, None)
1485		}
1486
1487		// Enqueue personhood suspensions. This function can be called multiple times until all
1488		// people are marked as suspended, but it can only happen while there is a mutation session
1489		// in progress.
1490		pub fn queue_personhood_suspensions(suspensions: &[PersonalId]) -> DispatchResult {
1491			ensure!(RingsState::<T>::get().mutating(), Error::<T>::NoMutationSession);
1492			for who in suspensions {
1493				let mut record = People::<T>::get(who).ok_or(Error::<T>::InvalidSuspensions)?;
1494				match record.position {
1495					RingPosition::Included { ring_index, ring_position, .. } => {
1496						let mut suspended_indices = PendingSuspensions::<T>::get(ring_index);
1497						let Err(insert_idx) = suspended_indices.binary_search(&ring_position)
1498						else {
1499							return Err(Error::<T>::KeyAlreadySuspended.into())
1500						};
1501						suspended_indices
1502							.try_insert(insert_idx, ring_position)
1503							.defensive()
1504							.map_err(|_| Error::<T>::TooManyMembers)?;
1505						PendingSuspensions::<T>::insert(ring_index, suspended_indices);
1506					},
1507					RingPosition::Onboarding { queue_page } => {
1508						let mut keys = OnboardingQueue::<T>::get(queue_page);
1509						let queue_idx = keys.iter().position(|k| *k == record.key);
1510						if let Some(idx) = queue_idx {
1511							// It is expensive to shift the whole vec in the worst case to remove a
1512							// suspended person from onboarding, but the pages will be small and
1513							// suspension of people who are not yet onboarded is supposed to be
1514							// extremely rare if not impossible as the pallet hooks should have
1515							// plenty of time to include someone recognized before the beginning of
1516							// the next suspension round. The only legitimate case when this could
1517							// happen is if someone is sitting in the onboarding queue for a long
1518							// time and cannot be included because not enough people are joining,
1519							// but it should be a rare case.
1520							keys.remove(idx);
1521							OnboardingQueue::<T>::insert(queue_page, keys);
1522						} else {
1523							defensive!(
1524								"No key found at the position in the person record of {}",
1525								who
1526							);
1527						}
1528					},
1529					RingPosition::Suspended => {
1530						defensive!("Suspension queued for person {} while already suspended", who);
1531					},
1532				}
1533
1534				record.position = RingPosition::Suspended;
1535				if let Some(account) = record.account {
1536					AccountToPersonalId::<T>::remove(account);
1537					record.account = None;
1538				}
1539
1540				People::<T>::insert(who, record);
1541			}
1542
1543			Ok(())
1544		}
1545
1546		// Resume someone's personhood. This assumes that their personhood is currently suspended,
1547		// so the person was previously recognized.
1548		pub fn resume_personhood(who: PersonalId) -> DispatchResult {
1549			let record = People::<T>::get(who).ok_or(Error::<T>::NotPerson)?;
1550			ensure!(record.position.suspended(), Error::<T>::NotSuspended);
1551			ensure!(Keys::<T>::get(&record.key) == Some(who), Error::<T>::NoKey);
1552
1553			Self::push_to_onboarding_queue(who, record.key, record.account)
1554		}
1555
1556		fn push_to_onboarding_queue(
1557			who: PersonalId,
1558			key: MemberOf<T>,
1559			account: Option<T::AccountId>,
1560		) -> DispatchResult {
1561			let (head, mut tail) = QueuePageIndices::<T>::get();
1562			let mut keys = OnboardingQueue::<T>::get(tail);
1563			if let Err(k) = keys.try_push(key.clone()) {
1564				tail = tail.checked_add(1).unwrap_or(0);
1565				ensure!(tail != head, Error::<T>::TooManyMembers);
1566				keys = alloc::vec![k].try_into().expect("must be able to hold one key; qed");
1567			};
1568
1569			let record = PersonRecord {
1570				key,
1571				position: RingPosition::Onboarding { queue_page: tail },
1572				account,
1573			};
1574			Keys::<T>::insert(&record.key, who);
1575			People::<T>::insert(who, &record);
1576			Self::deposit_event(Event::<T>::PersonOnboarding { who, key: record.key });
1577
1578			QueuePageIndices::<T>::put((head, tail));
1579			OnboardingQueue::<T>::insert(tail, keys);
1580			Ok(())
1581		}
1582
1583		/// Fetch the keys in a ring along with stored inclusion information.
1584		pub fn ring_keys_and_info(
1585			ring_index: RingIndex,
1586		) -> (BoundedVec<MemberOf<T>, T::MaxRingSize>, RingStatus) {
1587			let keys = RingKeys::<T>::get(ring_index);
1588			let ring_status = RingKeysStatus::<T>::get(ring_index);
1589			defensive_assert!(
1590				keys.len() == ring_status.total as usize,
1591				"Stored key count doesn't match the actual length"
1592			);
1593			(keys, ring_status)
1594		}
1595
1596		// Given a range, returns the list of chunks that maps to the keys at those indices.
1597		pub(crate) fn fetch_chunks(
1598			range: Range<usize>,
1599		) -> Result<Vec<<T::Crypto as GenerateVerifiable>::StaticChunk>, ()> {
1600			let chunk_page_size = T::ChunkPageSize::get();
1601			let expected_len = range.end.saturating_sub(range.start);
1602			let mut page_idx = range.start.checked_div(chunk_page_size as usize).ok_or(())?;
1603			let mut chunks: Vec<_> = Chunks::<T>::get(page_idx.saturated_into::<u32>())
1604				.defensive()
1605				.ok_or(())?
1606				.into_iter()
1607				.skip(range.start % chunk_page_size as usize)
1608				.take(expected_len)
1609				.collect();
1610			while chunks.len() < expected_len {
1611				// Condition to eventually break out of a possible infinite loop in case
1612				// storage is full of empty chunk pages.
1613				page_idx = page_idx.checked_add(1).ok_or(())?;
1614				let page =
1615					Chunks::<T>::get(page_idx.saturated_into::<u32>()).defensive().ok_or(())?;
1616				chunks.extend(
1617					page.into_inner().into_iter().take(expected_len.saturating_sub(chunks.len())),
1618				);
1619			}
1620
1621			Ok(chunks)
1622		}
1623
1624		/// Migrates keys that people intend to replace with other keys, if possible. As this
1625		/// function mutates a fair amount of storage, it comes with a weight meter to limit on the
1626		/// number of keys to migrate in one call.
1627		pub(crate) fn migrate_keys(meter: &mut WeightMeter) {
1628			let mut drain = KeyMigrationQueue::<T>::drain();
1629			loop {
1630				// Ensure we have enough weight to look into `KeyMigrationQueue` and perform a
1631				// removal.
1632				let weight = T::WeightInfo::migrate_keys_single_included_key()
1633					.saturating_add(T::DbWeight::get().reads_writes(1, 1));
1634				if !meter.can_consume(weight) {
1635					return;
1636				}
1637
1638				let op_res = with_storage_layer::<bool, DispatchError, _>(|| match drain.next() {
1639					Some((id, new_key)) =>
1640						Self::migrate_keys_single_included_key(id, new_key).map(|_| false),
1641					None => {
1642						let rings_state = RingsState::<T>::get()
1643							.end_key_migration()
1644							.map_err(|_| Error::<T>::NoMutationSession)?;
1645						RingsState::<T>::put(rings_state);
1646						meter.consume(T::DbWeight::get().reads_writes(1, 1));
1647						Ok(true)
1648					},
1649				});
1650				match op_res {
1651					Ok(false) => meter.consume(weight),
1652					Ok(true) => {
1653						// Read on `KeyMigrationQueue`.
1654						meter.consume(T::DbWeight::get().reads(1));
1655						break
1656					},
1657					Err(e) => {
1658						meter.consume(weight);
1659						log::error!(target: LOG_TARGET, "failed to migrate keys: {:?}", e);
1660						break;
1661					},
1662				}
1663			}
1664		}
1665
1666		/// A single iteration of the key migration process where an included key marked for
1667		/// suspension is being removed from a ring.
1668		pub(crate) fn migrate_keys_single_included_key(
1669			id: PersonalId,
1670			new_key: MemberOf<T>,
1671		) -> DispatchResult {
1672			if let Some(record) = People::<T>::get(id) {
1673				let RingPosition::Included {
1674					ring_index,
1675					ring_position,
1676					scheduled_for_removal: true,
1677				} = record.position
1678				else {
1679					Keys::<T>::remove(new_key);
1680					return Ok(())
1681				};
1682				let mut suspended_indices = PendingSuspensions::<T>::get(ring_index);
1683				let Err(insert_idx) = suspended_indices.binary_search(&ring_position) else {
1684					log::info!(target: LOG_TARGET, "key migration for person {} skipped as the person's key was already suspended", id);
1685					return Ok(());
1686				};
1687				suspended_indices
1688					.try_insert(insert_idx, ring_position)
1689					.map_err(|_| Error::<T>::TooManyMembers)?;
1690				PendingSuspensions::<T>::insert(ring_index, suspended_indices);
1691				Keys::<T>::remove(&record.key);
1692				Self::push_to_onboarding_queue(id, new_key, record.account)?;
1693			} else {
1694				log::info!(target: LOG_TARGET, "key migration for person {} skipped as no record was found", id);
1695			}
1696			Ok(())
1697		}
1698
1699		/// Removes people's keys marked as suspended or inactive from a ring with a given index.
1700		pub(crate) fn remove_suspended_keys(ring_index: RingIndex) -> Weight {
1701			let keys = RingKeys::<T>::get(ring_index);
1702			let keys_len = keys.len();
1703			let suspended_indices = PendingSuspensions::<T>::get(ring_index);
1704			// Construct the new keys map by skipping the suspended keys. This should prevent
1705			// reallocations in the `Vec` which happens with `remove`.
1706			let mut new_keys: BoundedVec<MemberOf<T>, T::MaxRingSize> = Default::default();
1707			let mut j = 0;
1708			for (i, key) in keys.into_iter().enumerate() {
1709				if j < suspended_indices.len() && i == suspended_indices[j] as usize {
1710					j += 1;
1711				} else if new_keys
1712					.try_push(key)
1713					.defensive_proof("cannot move more ring members than the max ring size; qed")
1714					.is_err()
1715				{
1716					return T::WeightInfo::remove_suspended_people(
1717						keys_len.try_into().unwrap_or(u32::MAX),
1718					);
1719				}
1720			}
1721
1722			let suspended_count = RingKeysStatus::<T>::mutate(ring_index, |ring_status| {
1723				let new_total = new_keys.len().saturated_into();
1724				let suspended_count = ring_status.total.saturating_sub(new_total);
1725				ring_status.total = new_total;
1726				ring_status.included = 0;
1727				suspended_count
1728			});
1729			ActiveMembers::<T>::mutate(|active| *active = active.saturating_sub(suspended_count));
1730			RingKeys::<T>::insert(ring_index, new_keys);
1731			Root::<T>::mutate(ring_index, |maybe_root| {
1732				if let Some(root) = maybe_root {
1733					// The revision will be incremented on the next call of `build_ring`. The
1734					// current root is preserved.
1735					root.intermediate = T::Crypto::start_members();
1736				}
1737			});
1738
1739			// Make sure to remove the entry from the map so that the pallet hooks don't iterate
1740			// over it.
1741			PendingSuspensions::<T>::remove(ring_index);
1742			T::WeightInfo::remove_suspended_people(keys_len.try_into().unwrap_or(u32::MAX))
1743		}
1744
1745		/// Merges the two pages at the front of the onboarding queue. After a round of suspensions,
1746		/// it is possible for the second page of the onboarding queue to be left with few members
1747		/// such that, if the first page also has few members, the total count is below the required
1748		/// onboarding size, thus stalling the queue. This function fixes this by moving the people
1749		/// from the first page to the front of the second page, defragmenting the queue.
1750		///
1751		/// If the operation fails, the storage is rolled back.
1752		pub(crate) fn merge_queue_pages(
1753			initial_head: u32,
1754			new_head: u32,
1755			mut first_key_page: BoundedVec<MemberOf<T>, T::OnboardingQueuePageSize>,
1756			second_key_page: BoundedVec<MemberOf<T>, T::OnboardingQueuePageSize>,
1757		) {
1758			let op_res = with_storage_layer::<(), DispatchError, _>(|| {
1759				// Update the records of the people in the first page.
1760				for key in first_key_page.iter() {
1761					let personal_id =
1762						Keys::<T>::get(key).defensive().ok_or(Error::<T>::NotPerson)?;
1763					let mut record =
1764						People::<T>::get(personal_id).defensive().ok_or(Error::<T>::KeyNotFound)?;
1765					record.position = RingPosition::Onboarding { queue_page: new_head };
1766					People::<T>::insert(personal_id, record);
1767				}
1768
1769				first_key_page
1770					.try_extend(second_key_page.into_iter())
1771					.defensive()
1772					.map_err(|_| Error::<T>::TooManyMembers)?;
1773				OnboardingQueue::<T>::remove(initial_head);
1774				OnboardingQueue::<T>::insert(new_head, first_key_page);
1775				QueuePageIndices::<T>::mutate(|(h, _)| *h = new_head);
1776				Ok(())
1777			});
1778			if let Err(e) = op_res {
1779				log::error!(target: LOG_TARGET, "failed to merge queue pages: {:?}", e);
1780			}
1781		}
1782	}
1783
1784	impl<T: Config> AddOnlyPeopleTrait for Pallet<T> {
1785		type Member = MemberOf<T>;
1786
1787		fn reserve_new_id() -> PersonalId {
1788			let new_id = NextPersonalId::<T>::mutate(|id| {
1789				let new_id = *id;
1790				id.saturating_inc();
1791				new_id
1792			});
1793			ReservedPersonalId::<T>::insert(new_id, ());
1794			new_id
1795		}
1796
1797		fn cancel_id_reservation(personal_id: PersonalId) -> Result<(), DispatchError> {
1798			ReservedPersonalId::<T>::take(personal_id).ok_or(Error::<T>::PersonalIdNotReserved)?;
1799			Ok(())
1800		}
1801
1802		fn renew_id_reservation(personal_id: PersonalId) -> Result<(), DispatchError> {
1803			if NextPersonalId::<T>::get() <= personal_id ||
1804				People::<T>::contains_key(personal_id) ||
1805				ReservedPersonalId::<T>::contains_key(personal_id)
1806			{
1807				return Err(Error::<T>::PersonalIdReservationCannotRenew.into());
1808			}
1809			ReservedPersonalId::<T>::insert(personal_id, ());
1810			Ok(())
1811		}
1812
1813		fn recognize_personhood(
1814			who: PersonalId,
1815			maybe_key: Option<MemberOf<T>>,
1816		) -> Result<(), DispatchError> {
1817			match maybe_key {
1818				Some(key) => Self::do_insert_key(who, key),
1819				None => Self::resume_personhood(who),
1820			}
1821		}
1822
1823		#[cfg(feature = "runtime-benchmarks")]
1824		type Secret = <<T as Config>::Crypto as GenerateVerifiable>::Secret;
1825
1826		#[cfg(feature = "runtime-benchmarks")]
1827		fn mock_key(who: PersonalId) -> (Self::Member, Self::Secret) {
1828			let mut buf = [0u8; 32];
1829			buf[..core::mem::size_of::<PersonalId>()].copy_from_slice(&who.to_le_bytes()[..]);
1830			let secret = T::Crypto::new_secret(buf);
1831			(T::Crypto::member_from_secret(&secret), secret)
1832		}
1833	}
1834
1835	impl<T: Config> PeopleTrait for Pallet<T> {
1836		fn suspend_personhood(suspensions: &[PersonalId]) -> DispatchResult {
1837			Self::queue_personhood_suspensions(suspensions)
1838		}
1839		fn start_people_set_mutation_session() -> DispatchResult {
1840			let current_state = RingsState::<T>::get();
1841			RingsState::<T>::put(
1842				current_state
1843					.start_mutation_session()
1844					.map_err(|_| Error::<T>::CouldNotStartMutationSession)?,
1845			);
1846			Ok(())
1847		}
1848		fn end_people_set_mutation_session() -> DispatchResult {
1849			let current_state = RingsState::<T>::get();
1850			RingsState::<T>::put(
1851				current_state
1852					.end_mutation_session()
1853					.map_err(|_| Error::<T>::NoMutationSession)?,
1854			);
1855			Ok(())
1856		}
1857	}
1858
1859	/// Ensure that the origin `o` represents an extrinsic (i.e. transaction) from a personal
1860	/// identity. Returns `Ok` with the personal identity that signed the extrinsic or an `Err`
1861	/// otherwise.
1862	pub fn ensure_personal_identity<OuterOrigin>(o: OuterOrigin) -> Result<PersonalId, BadOrigin>
1863	where
1864		OuterOrigin: TryInto<Origin, Error = OuterOrigin>,
1865	{
1866		match o.try_into() {
1867			Ok(Origin::PersonalIdentity(m)) => Ok(m),
1868			_ => Err(BadOrigin),
1869		}
1870	}
1871
1872	/// Ensure that the origin `o` represents an extrinsic (i.e. transaction) from a personal alias.
1873	/// Returns `Ok` with the personal alias that signed the extrinsic or an `Err` otherwise.
1874	pub fn ensure_personal_alias<OuterOrigin>(o: OuterOrigin) -> Result<ContextualAlias, BadOrigin>
1875	where
1876		OuterOrigin: TryInto<Origin, Error = OuterOrigin>,
1877	{
1878		match o.try_into() {
1879			Ok(Origin::PersonalAlias(rev_ca)) => Ok(rev_ca.ca),
1880			_ => Err(BadOrigin),
1881		}
1882	}
1883
1884	/// Guard to ensure that the given origin is a person. The underlying identity of the person is
1885	/// provided on success.
1886	pub struct EnsurePersonalIdentity<T>(PhantomData<T>);
1887	impl<T: Config> EnsureOrigin<OriginFor<T>> for EnsurePersonalIdentity<T> {
1888		type Success = PersonalId;
1889
1890		fn try_origin(o: OriginFor<T>) -> Result<Self::Success, OriginFor<T>> {
1891			ensure_personal_identity(o.clone().into_caller()).map_err(|_| o)
1892		}
1893
1894		#[cfg(feature = "runtime-benchmarks")]
1895		fn try_successful_origin() -> Result<OriginFor<T>, ()> {
1896			Ok(Origin::PersonalIdentity(0).into())
1897		}
1898	}
1899
1900	frame_support::impl_ensure_origin_with_arg_ignoring_arg! {
1901		impl<{ T: Config, A }>
1902			EnsureOriginWithArg< OriginFor<T>, A> for EnsurePersonalIdentity<T>
1903		{}
1904	}
1905
1906	impl<T: Config> CountedMembers for EnsurePersonalIdentity<T> {
1907		fn active_count(&self) -> u32 {
1908			Keys::<T>::count()
1909		}
1910	}
1911
1912	/// Guard to ensure that the given origin is a person. The contextual alias of the person is
1913	/// provided on success.
1914	pub struct EnsurePersonalAlias<T>(PhantomData<T>);
1915	impl<T: Config> EnsureOrigin<OriginFor<T>> for EnsurePersonalAlias<T> {
1916		type Success = ContextualAlias;
1917
1918		fn try_origin(o: OriginFor<T>) -> Result<Self::Success, OriginFor<T>> {
1919			ensure_personal_alias(o.clone().into_caller()).map_err(|_| o)
1920		}
1921
1922		#[cfg(feature = "runtime-benchmarks")]
1923		fn try_successful_origin() -> Result<OriginFor<T>, ()> {
1924			Ok(Origin::PersonalAlias(RevisedContextualAlias {
1925				revision: 0,
1926				ring: 0,
1927				ca: ContextualAlias { alias: [1; 32], context: [0; 32] },
1928			})
1929			.into())
1930		}
1931	}
1932
1933	frame_support::impl_ensure_origin_with_arg_ignoring_arg! {
1934		impl<{ T: Config, A }>
1935			EnsureOriginWithArg< OriginFor<T>, A> for EnsurePersonalAlias<T>
1936		{}
1937	}
1938
1939	impl<T: Config> CountedMembers for EnsurePersonalAlias<T> {
1940		fn active_count(&self) -> u32 {
1941			ActiveMembers::<T>::get()
1942		}
1943	}
1944
1945	/// Guard to ensure that the given origin is a person. The alias of the person within the
1946	/// context provided as an argument is returned on success.
1947	pub struct EnsurePersonalAliasInContext<T>(PhantomData<T>);
1948	impl<T: Config> EnsureOriginWithArg<OriginFor<T>, Context> for EnsurePersonalAliasInContext<T> {
1949		type Success = Alias;
1950
1951		fn try_origin(o: OriginFor<T>, arg: &Context) -> Result<Self::Success, OriginFor<T>> {
1952			match ensure_personal_alias(o.clone().into_caller()) {
1953				Ok(ca) if &ca.context == arg => Ok(ca.alias),
1954				_ => Err(o),
1955			}
1956		}
1957
1958		#[cfg(feature = "runtime-benchmarks")]
1959		fn try_successful_origin(context: &Context) -> Result<OriginFor<T>, ()> {
1960			Ok(Origin::PersonalAlias(RevisedContextualAlias {
1961				revision: 0,
1962				ring: 0,
1963				ca: ContextualAlias { alias: [1; 32], context: *context },
1964			})
1965			.into())
1966		}
1967	}
1968
1969	impl<T: Config> CountedMembers for EnsurePersonalAliasInContext<T> {
1970		fn active_count(&self) -> u32 {
1971			ActiveMembers::<T>::get()
1972		}
1973	}
1974
1975	/// Ensure that the origin `o` represents an extrinsic (i.e. transaction) from a personal alias
1976	/// with revision information.
1977	///
1978	/// Returns `Ok` with the revised personal alias that signed the extrinsic or an `Err`
1979	/// otherwise.
1980	pub fn ensure_revised_personal_alias<OuterOrigin>(
1981		o: OuterOrigin,
1982	) -> Result<RevisedContextualAlias, BadOrigin>
1983	where
1984		OuterOrigin: TryInto<Origin, Error = OuterOrigin>,
1985	{
1986		match o.try_into() {
1987			Ok(Origin::PersonalAlias(rev_ca)) => Ok(rev_ca),
1988			_ => Err(BadOrigin),
1989		}
1990	}
1991
1992	/// Guard to ensure that the given origin is a person.
1993	///
1994	/// The revised contextual alias of the person is provided on success. The revision can be used
1995	/// to tell in the future if an alias may have been suspended. See [`RevisedContextualAlias`].
1996	pub struct EnsureRevisedPersonalAlias<T>(PhantomData<T>);
1997	impl<T: Config> EnsureOrigin<OriginFor<T>> for EnsureRevisedPersonalAlias<T> {
1998		type Success = RevisedContextualAlias;
1999
2000		fn try_origin(o: OriginFor<T>) -> Result<Self::Success, OriginFor<T>> {
2001			ensure_revised_personal_alias(o.clone().into_caller()).map_err(|_| o)
2002		}
2003
2004		#[cfg(feature = "runtime-benchmarks")]
2005		fn try_successful_origin() -> Result<OriginFor<T>, ()> {
2006			Ok(Origin::PersonalAlias(RevisedContextualAlias {
2007				revision: 0,
2008				ring: 0,
2009				ca: ContextualAlias { alias: [1; 32], context: [0; 32] },
2010			})
2011			.into())
2012		}
2013	}
2014
2015	frame_support::impl_ensure_origin_with_arg_ignoring_arg! {
2016		impl<{ T: Config, A }>
2017			EnsureOriginWithArg< OriginFor<T>, A> for EnsureRevisedPersonalAlias<T>
2018		{}
2019	}
2020
2021	impl<T: Config> CountedMembers for EnsureRevisedPersonalAlias<T> {
2022		fn active_count(&self) -> u32 {
2023			ActiveMembers::<T>::get()
2024		}
2025	}
2026
2027	/// Guard to ensure that the given origin is a person.
2028	///
2029	/// The revised alias of the person within the context provided as an argument is returned on
2030	/// success. The revision can be used to tell in the future if an alias may have been suspended.
2031	/// See [`RevisedAlias`].
2032	pub struct EnsureRevisedPersonalAliasInContext<T>(PhantomData<T>);
2033	impl<T: Config> EnsureOriginWithArg<OriginFor<T>, Context>
2034		for EnsureRevisedPersonalAliasInContext<T>
2035	{
2036		type Success = RevisedAlias;
2037
2038		fn try_origin(o: OriginFor<T>, arg: &Context) -> Result<Self::Success, OriginFor<T>> {
2039			match ensure_revised_personal_alias(o.clone().into_caller()) {
2040				Ok(ca) if &ca.ca.context == arg =>
2041					Ok(RevisedAlias { revision: ca.revision, ring: ca.ring, alias: ca.ca.alias }),
2042				_ => Err(o),
2043			}
2044		}
2045
2046		#[cfg(feature = "runtime-benchmarks")]
2047		fn try_successful_origin(context: &Context) -> Result<OriginFor<T>, ()> {
2048			Ok(Origin::PersonalAlias(RevisedContextualAlias {
2049				revision: 0,
2050				ring: 0,
2051				ca: ContextualAlias { alias: [1; 32], context: *context },
2052			})
2053			.into())
2054		}
2055	}
2056
2057	impl<T: Config> CountedMembers for EnsureRevisedPersonalAliasInContext<T> {
2058		fn active_count(&self) -> u32 {
2059			ActiveMembers::<T>::get()
2060		}
2061	}
2062}