referrerpolicy=no-referrer-when-downgrade

polkadot_runtime_parachains/inclusion/
mod.rs

1// Copyright (C) Parity Technologies (UK) Ltd.
2// This file is part of Polkadot.
3
4// Polkadot is free software: you can redistribute it and/or modify
5// it under the terms of the GNU General Public License as published by
6// the Free Software Foundation, either version 3 of the License, or
7// (at your option) any later version.
8
9// Polkadot is distributed in the hope that it will be useful,
10// but WITHOUT ANY WARRANTY; without even the implied warranty of
11// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12// GNU General Public License for more details.
13
14// You should have received a copy of the GNU General Public License
15// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
16
17//! The inclusion pallet is responsible for inclusion and availability of scheduled parachains.
18//!
19//! It is responsible for carrying candidates from being backable to being backed, and then from
20//! backed to included.
21
22use crate::{
23	configuration::{self, HostConfiguration},
24	disputes, dmp, hrmp,
25	paras::{self, UpgradeStrategy},
26	scheduler,
27	shared::{self, AllowedRelayParentsTracker},
28	util::make_persisted_validation_data_with_parent,
29};
30use alloc::{
31	collections::{btree_map::BTreeMap, btree_set::BTreeSet, vec_deque::VecDeque},
32	vec,
33	vec::Vec,
34};
35use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec};
36use codec::{Decode, DecodeWithMemTracking, Encode};
37use core::fmt;
38use frame_support::{
39	defensive,
40	pallet_prelude::*,
41	traits::{EnqueueMessage, Footprint, QueueFootprint, QueueFootprintQuery},
42	BoundedSlice,
43};
44use frame_system::pallet_prelude::*;
45use pallet_message_queue::OnQueueChanged;
46use polkadot_primitives::{
47	effective_minimum_backing_votes, skip_ump_signals, supermajority_threshold, well_known_keys,
48	BackedCandidate, CandidateCommitments, CandidateDescriptorV2 as CandidateDescriptor,
49	CandidateHash, CandidateReceiptV2 as CandidateReceipt,
50	CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreIndex, GroupIndex, HeadData,
51	Id as ParaId, SignedAvailabilityBitfields, SigningContext, UpwardMessage, ValidatorId,
52	ValidatorIndex, ValidityAttestation,
53};
54use scale_info::TypeInfo;
55use sp_runtime::{traits::One, DispatchError, SaturatedConversion, Saturating};
56
57pub use pallet::*;
58
59#[cfg(test)]
60pub(crate) mod tests;
61
62#[cfg(feature = "runtime-benchmarks")]
63mod benchmarking;
64
65pub mod migration;
66
67pub trait WeightInfo {
68	/// Weight for `enact_candidate` extrinsic given the number of sent messages
69	/// (ump, hrmp) and whether there is a new code for a runtime upgrade.
70	///
71	/// NOTE: due to a shortcoming of the current benchmarking framework,
72	/// we use `u32` for the code upgrade, even though it is a `bool`.
73	fn enact_candidate(u: u32, h: u32, c: u32) -> Weight;
74}
75
76pub struct TestWeightInfo;
77impl WeightInfo for TestWeightInfo {
78	fn enact_candidate(_u: u32, _h: u32, _c: u32) -> Weight {
79		Weight::zero()
80	}
81}
82
83impl WeightInfo for () {
84	fn enact_candidate(_u: u32, _h: u32, _c: u32) -> Weight {
85		Weight::zero()
86	}
87}
88
89/// Maximum value that `config.max_upward_message_size` can be set to.
90///
91/// This is used for benchmarking sanely bounding relevant storage items. It is expected from the
92/// `configuration` pallet to check these values before setting.
93pub const MAX_UPWARD_MESSAGE_SIZE_BOUND: u32 = 128 * 1024;
94
95/// A backed candidate pending availability.
96#[derive(Encode, Decode, PartialEq, TypeInfo, Clone)]
97#[cfg_attr(test, derive(Debug))]
98pub struct CandidatePendingAvailability<H, N> {
99	/// The availability core this is assigned to.
100	core: CoreIndex,
101	/// The candidate hash.
102	hash: CandidateHash,
103	/// The candidate descriptor.
104	descriptor: CandidateDescriptor<H>,
105	/// The candidate commitments.
106	commitments: CandidateCommitments,
107	/// The received availability votes. One bit per validator.
108	availability_votes: BitVec<u8, BitOrderLsb0>,
109	/// The backers of the candidate pending availability.
110	backers: BitVec<u8, BitOrderLsb0>,
111	/// The block number of the relay-parent of the receipt.
112	relay_parent_number: N,
113	/// The block number of the relay-chain block this was backed in.
114	backed_in_number: N,
115	/// The group index backing this block.
116	backing_group: GroupIndex,
117}
118
119impl<H, N> CandidatePendingAvailability<H, N> {
120	/// Get the availability votes on the candidate.
121	pub(crate) fn availability_votes(&self) -> &BitVec<u8, BitOrderLsb0> {
122		&self.availability_votes
123	}
124
125	/// Get the relay-chain block number this was backed in.
126	pub(crate) fn backed_in_number(&self) -> N
127	where
128		N: Clone,
129	{
130		self.backed_in_number.clone()
131	}
132
133	/// Get the core index.
134	pub(crate) fn core_occupied(&self) -> CoreIndex {
135		self.core
136	}
137
138	/// Get the candidate hash.
139	pub(crate) fn candidate_hash(&self) -> CandidateHash {
140		self.hash
141	}
142
143	/// Get the candidate descriptor.
144	pub(crate) fn candidate_descriptor(&self) -> &CandidateDescriptor<H> {
145		&self.descriptor
146	}
147
148	/// Get the candidate commitments.
149	pub(crate) fn candidate_commitments(&self) -> &CandidateCommitments {
150		&self.commitments
151	}
152
153	/// Get the candidate's relay parent's number.
154	pub(crate) fn relay_parent_number(&self) -> N
155	where
156		N: Clone,
157	{
158		self.relay_parent_number.clone()
159	}
160
161	#[cfg(any(feature = "runtime-benchmarks", test))]
162	pub(crate) fn new(
163		core: CoreIndex,
164		hash: CandidateHash,
165		descriptor: CandidateDescriptor<H>,
166		commitments: CandidateCommitments,
167		availability_votes: BitVec<u8, BitOrderLsb0>,
168		backers: BitVec<u8, BitOrderLsb0>,
169		relay_parent_number: N,
170		backed_in_number: N,
171		backing_group: GroupIndex,
172	) -> Self {
173		Self {
174			core,
175			hash,
176			descriptor,
177			commitments,
178			availability_votes,
179			backers,
180			relay_parent_number,
181			backed_in_number,
182			backing_group,
183		}
184	}
185}
186
187/// A hook for applying validator rewards
188pub trait RewardValidators {
189	// Reward the validators with the given indices for issuing backing statements.
190	fn reward_backing(validators: impl IntoIterator<Item = ValidatorIndex>);
191	// Reward the validators with the given indices for issuing availability bitfields.
192	// Validators are sent to this hook when they have contributed to the availability
193	// of a candidate by setting a bit in their bitfield.
194	fn reward_bitfields(validators: impl IntoIterator<Item = ValidatorIndex>);
195}
196
197impl RewardValidators for () {
198	fn reward_backing(_: impl IntoIterator<Item = ValidatorIndex>) {}
199	fn reward_bitfields(_: impl IntoIterator<Item = ValidatorIndex>) {}
200}
201
202/// Reads the footprint of queues for a specific origin type.
203pub trait QueueFootprinter {
204	type Origin;
205
206	fn message_count(origin: Self::Origin) -> u64;
207}
208
209impl QueueFootprinter for () {
210	type Origin = UmpQueueId;
211
212	fn message_count(_: Self::Origin) -> u64 {
213		0
214	}
215}
216
217/// Aggregate message origin for the `MessageQueue` pallet.
218///
219/// Can be extended to serve further use-cases besides just UMP. Is stored in storage, so any change
220/// to existing values will require a migration.
221#[derive(
222	Encode, Decode, DecodeWithMemTracking, Clone, MaxEncodedLen, Eq, PartialEq, Debug, TypeInfo,
223)]
224pub enum AggregateMessageOrigin {
225	/// Inbound upward message.
226	#[codec(index = 0)]
227	Ump(UmpQueueId),
228}
229
230/// Identifies a UMP queue inside the `MessageQueue` pallet.
231///
232/// It is written in verbose form since future variants like `Here` and `Bridged` are already
233/// foreseeable.
234#[derive(
235	Encode, Decode, DecodeWithMemTracking, Clone, MaxEncodedLen, Eq, PartialEq, Debug, TypeInfo,
236)]
237pub enum UmpQueueId {
238	/// The message originated from this parachain.
239	#[codec(index = 0)]
240	Para(ParaId),
241}
242
243#[cfg(feature = "runtime-benchmarks")]
244impl From<u32> for AggregateMessageOrigin {
245	fn from(n: u32) -> Self {
246		// Some dummy for the benchmarks.
247		Self::Ump(UmpQueueId::Para(n.into()))
248	}
249}
250
251/// The maximal length of a UMP message.
252pub type MaxUmpMessageLenOf<T> =
253	<<T as Config>::MessageQueue as EnqueueMessage<AggregateMessageOrigin>>::MaxMessageLen;
254
255#[frame_support::pallet]
256pub mod pallet {
257	use super::*;
258
259	const STORAGE_VERSION: StorageVersion = StorageVersion::new(1);
260	#[pallet::pallet]
261	#[pallet::without_storage_info]
262	#[pallet::storage_version(STORAGE_VERSION)]
263	pub struct Pallet<T>(_);
264
265	#[pallet::config]
266	pub trait Config:
267		frame_system::Config
268		+ shared::Config
269		+ paras::Config
270		+ dmp::Config
271		+ hrmp::Config
272		+ configuration::Config
273		+ scheduler::Config
274	{
275		#[allow(deprecated)]
276		type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
277		type DisputesHandler: disputes::DisputesHandler<BlockNumberFor<Self>>;
278		type RewardValidators: RewardValidators;
279
280		/// The system message queue.
281		///
282		/// The message queue provides general queueing and processing functionality. Currently it
283		/// replaces the old `UMP` dispatch queue. Other use-cases can be implemented as well by
284		/// adding new variants to `AggregateMessageOrigin`.
285		type MessageQueue: EnqueueMessage<AggregateMessageOrigin>
286			+ QueueFootprintQuery<AggregateMessageOrigin, MaxMessageLen = MaxUmpMessageLenOf<Self>>;
287
288		/// Weight info for the calls of this pallet.
289		type WeightInfo: WeightInfo;
290	}
291
292	#[pallet::event]
293	#[pallet::generate_deposit(pub(super) fn deposit_event)]
294	pub enum Event<T: Config> {
295		/// A candidate was backed. `[candidate, head_data]`
296		CandidateBacked(CandidateReceipt<T::Hash>, HeadData, CoreIndex, GroupIndex),
297		/// A candidate was included. `[candidate, head_data]`
298		CandidateIncluded(CandidateReceipt<T::Hash>, HeadData, CoreIndex, GroupIndex),
299		/// A candidate timed out. `[candidate, head_data]`
300		CandidateTimedOut(CandidateReceipt<T::Hash>, HeadData, CoreIndex),
301		/// Some upward messages have been received and will be processed.
302		UpwardMessagesReceived { from: ParaId, count: u32 },
303	}
304
305	#[pallet::error]
306	pub enum Error<T> {
307		/// Validator index out of bounds.
308		ValidatorIndexOutOfBounds,
309		/// Candidate submitted but para not scheduled.
310		UnscheduledCandidate,
311		/// Head data exceeds the configured maximum.
312		HeadDataTooLarge,
313		/// Code upgrade prematurely.
314		PrematureCodeUpgrade,
315		/// Output code is too large
316		NewCodeTooLarge,
317		/// The candidate's relay-parent was not allowed. Either it was
318		/// not recent enough or it didn't advance based on the last parachain block.
319		DisallowedRelayParent,
320		/// Failed to compute group index for the core: either it's out of bounds
321		/// or the relay parent doesn't belong to the current session.
322		InvalidAssignment,
323		/// Invalid group index in core assignment.
324		InvalidGroupIndex,
325		/// Insufficient (non-majority) backing.
326		InsufficientBacking,
327		/// Invalid (bad signature, unknown validator, etc.) backing.
328		InvalidBacking,
329		/// The validation data hash does not match expected.
330		ValidationDataHashMismatch,
331		/// The downward message queue is not processed correctly.
332		IncorrectDownwardMessageHandling,
333		/// At least one upward message sent does not pass the acceptance criteria.
334		InvalidUpwardMessages,
335		/// The candidate didn't follow the rules of HRMP watermark advancement.
336		HrmpWatermarkMishandling,
337		/// The HRMP messages sent by the candidate is not valid.
338		InvalidOutboundHrmp,
339		/// The validation code hash of the candidate is not valid.
340		InvalidValidationCodeHash,
341		/// The `para_head` hash in the candidate descriptor doesn't match the hash of the actual
342		/// para head in the commitments.
343		ParaHeadMismatch,
344	}
345
346	/// Candidates pending availability by `ParaId`. They form a chain starting from the latest
347	/// included head of the para.
348	/// Use a different prefix post-migration to v1, since the v0 `PendingAvailability` storage
349	/// would otherwise have the exact same prefix which could cause undefined behaviour when doing
350	/// the migration.
351	#[pallet::storage]
352	#[pallet::storage_prefix = "V1"]
353	pub(crate) type PendingAvailability<T: Config> = StorageMap<
354		_,
355		Twox64Concat,
356		ParaId,
357		VecDeque<CandidatePendingAvailability<T::Hash, BlockNumberFor<T>>>,
358	>;
359
360	#[pallet::call]
361	impl<T: Config> Pallet<T> {}
362}
363
364const LOG_TARGET: &str = "runtime::inclusion";
365
366/// The reason that a candidate's outputs were rejected for.
367#[derive(Debug)]
368enum AcceptanceCheckErr {
369	HeadDataTooLarge,
370	/// Code upgrades are not permitted at the current time.
371	PrematureCodeUpgrade,
372	/// The new runtime blob is too large.
373	NewCodeTooLarge,
374	/// The candidate violated this DMP acceptance criteria.
375	ProcessedDownwardMessages,
376	/// The candidate violated this UMP acceptance criteria.
377	UpwardMessages,
378	/// The candidate violated this HRMP watermark acceptance criteria.
379	HrmpWatermark,
380	/// The candidate violated this outbound HRMP acceptance criteria.
381	OutboundHrmp,
382}
383
384impl From<dmp::ProcessedDownwardMessagesAcceptanceErr> for AcceptanceCheckErr {
385	fn from(_: dmp::ProcessedDownwardMessagesAcceptanceErr) -> Self {
386		Self::ProcessedDownwardMessages
387	}
388}
389
390impl From<UmpAcceptanceCheckErr> for AcceptanceCheckErr {
391	fn from(_: UmpAcceptanceCheckErr) -> Self {
392		Self::UpwardMessages
393	}
394}
395
396impl<BlockNumber> From<hrmp::HrmpWatermarkAcceptanceErr<BlockNumber>> for AcceptanceCheckErr {
397	fn from(_: hrmp::HrmpWatermarkAcceptanceErr<BlockNumber>) -> Self {
398		Self::HrmpWatermark
399	}
400}
401
402impl From<hrmp::OutboundHrmpAcceptanceErr> for AcceptanceCheckErr {
403	fn from(_: hrmp::OutboundHrmpAcceptanceErr) -> Self {
404		Self::OutboundHrmp
405	}
406}
407
408/// An error returned by [`Pallet::check_upward_messages`] that indicates a violation of one of
409/// acceptance criteria rules.
410#[cfg_attr(test, derive(PartialEq))]
411#[allow(dead_code)]
412pub(crate) enum UmpAcceptanceCheckErr {
413	/// The maximal number of messages that can be submitted in one batch was exceeded.
414	MoreMessagesThanPermitted { sent: u32, permitted: u32 },
415	/// The maximal size of a single message was exceeded.
416	MessageSize { idx: u32, msg_size: u32, max_size: u32 },
417	/// The allowed number of messages in the queue was exceeded.
418	CapacityExceeded { count: u64, limit: u64 },
419	/// The allowed combined message size in the queue was exceeded.
420	TotalSizeExceeded { total_size: u64, limit: u64 },
421	/// A para-chain cannot send UMP messages while it is offboarding.
422	IsOffboarding,
423}
424
425impl fmt::Debug for UmpAcceptanceCheckErr {
426	fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
427		match *self {
428			UmpAcceptanceCheckErr::MoreMessagesThanPermitted { sent, permitted } => write!(
429				fmt,
430				"more upward messages than permitted by config ({} > {})",
431				sent, permitted,
432			),
433			UmpAcceptanceCheckErr::MessageSize { idx, msg_size, max_size } => write!(
434				fmt,
435				"upward message idx {} larger than permitted by config ({} > {})",
436				idx, msg_size, max_size,
437			),
438			UmpAcceptanceCheckErr::CapacityExceeded { count, limit } => write!(
439				fmt,
440				"the ump queue would have more items than permitted by config ({} > {})",
441				count, limit,
442			),
443			UmpAcceptanceCheckErr::TotalSizeExceeded { total_size, limit } => write!(
444				fmt,
445				"the ump queue would have grown past the max size permitted by config ({} > {})",
446				total_size, limit,
447			),
448			UmpAcceptanceCheckErr::IsOffboarding => {
449				write!(fmt, "upward message rejected because the para is off-boarding")
450			},
451		}
452	}
453}
454
455impl<T: Config> Pallet<T> {
456	/// Block initialization logic, called by initializer.
457	pub(crate) fn initializer_initialize(_now: BlockNumberFor<T>) -> Weight {
458		Weight::zero()
459	}
460
461	/// Block finalization logic, called by initializer.
462	pub(crate) fn initializer_finalize() {}
463
464	/// Handle an incoming session change.
465	pub(crate) fn initializer_on_new_session(
466		_notification: &crate::initializer::SessionChangeNotification<BlockNumberFor<T>>,
467		outgoing_paras: &[ParaId],
468	) {
469		// unlike most drain methods, drained elements are not cleared on `Drop` of the iterator
470		// and require consumption.
471		for _ in PendingAvailability::<T>::drain() {}
472
473		Self::cleanup_outgoing_ump_dispatch_queues(outgoing_paras);
474	}
475
476	pub(crate) fn cleanup_outgoing_ump_dispatch_queues(outgoing: &[ParaId]) {
477		for outgoing_para in outgoing {
478			Self::cleanup_outgoing_ump_dispatch_queue(*outgoing_para);
479		}
480	}
481
482	pub(crate) fn cleanup_outgoing_ump_dispatch_queue(para: ParaId) {
483		T::MessageQueue::sweep_queue(AggregateMessageOrigin::Ump(UmpQueueId::Para(para)));
484	}
485
486	pub(crate) fn get_occupied_cores(
487	) -> impl Iterator<Item = (CoreIndex, CandidatePendingAvailability<T::Hash, BlockNumberFor<T>>)>
488	{
489		PendingAvailability::<T>::iter_values().flat_map(|pending_candidates| {
490			pending_candidates.into_iter().map(|c| (c.core, c.clone()))
491		})
492	}
493
494	/// Extract the freed cores based on cores that became available.
495	///
496	/// Bitfields are expected to have been sanitized already. E.g. via `sanitize_bitfields`!
497	///
498	/// Updates storage items `PendingAvailability`.
499	///
500	/// Returns a `Vec` of `CandidateHash`es and their respective `AvailabilityCore`s that became
501	/// available, and cores free.
502	pub(crate) fn update_pending_availability_and_get_freed_cores(
503		validators: &[ValidatorId],
504		signed_bitfields: SignedAvailabilityBitfields,
505	) -> (Weight, Vec<(CoreIndex, CandidateHash)>) {
506		let threshold = availability_threshold(validators.len());
507
508		let mut votes_per_core: BTreeMap<CoreIndex, BTreeSet<ValidatorIndex>> = BTreeMap::new();
509
510		for (checked_bitfield, validator_index) in
511			signed_bitfields.into_iter().map(|signed_bitfield| {
512				let validator_idx = signed_bitfield.validator_index();
513				let checked_bitfield = signed_bitfield.into_payload();
514				(checked_bitfield, validator_idx)
515			}) {
516			for (bit_idx, _) in checked_bitfield.0.iter().enumerate().filter(|(_, is_av)| **is_av) {
517				let core_index = CoreIndex(bit_idx as u32);
518				votes_per_core
519					.entry(core_index)
520					.or_insert_with(|| BTreeSet::new())
521					.insert(validator_index);
522			}
523		}
524
525		let mut freed_cores = vec![];
526		let mut weight = Weight::zero();
527
528		let pending_paraids: Vec<_> = PendingAvailability::<T>::iter_keys().collect();
529		for paraid in pending_paraids {
530			PendingAvailability::<T>::mutate(paraid, |candidates| {
531				if let Some(candidates) = candidates {
532					let mut last_enacted_index: Option<usize> = None;
533
534					for (candidate_index, candidate) in candidates.iter_mut().enumerate() {
535						if let Some(validator_indices) = votes_per_core.remove(&candidate.core) {
536							for validator_index in validator_indices.iter() {
537								// defensive check - this is constructed by loading the
538								// availability bitfield record, which is always `Some` if
539								// the core is occupied - that's why we're here.
540								if let Some(mut bit) =
541									candidate.availability_votes.get_mut(validator_index.0 as usize)
542								{
543									*bit = true;
544								}
545							}
546						}
547
548						// We check for the candidate's availability even if we didn't get any new
549						// bitfields for its core, as it may have already been available at a
550						// previous block but wasn't enacted due to its predecessors not being
551						// available.
552						if candidate.availability_votes.count_ones() >= threshold {
553							// We can only enact a candidate if we've enacted all of its
554							// predecessors already.
555							let can_enact = if candidate_index == 0 {
556								last_enacted_index == None
557							} else {
558								let prev_candidate_index = usize::try_from(candidate_index - 1)
559									.expect("Previous `if` would have caught a 0 candidate index.");
560								matches!(last_enacted_index, Some(old_index) if old_index == prev_candidate_index)
561							};
562
563							if can_enact {
564								last_enacted_index = Some(candidate_index);
565							}
566						}
567					}
568
569					// Trim the pending availability candidates storage and enact candidates of this
570					// para now.
571					if let Some(last_enacted_index) = last_enacted_index {
572						let evicted_candidates = candidates.drain(0..=last_enacted_index);
573						for candidate in evicted_candidates {
574							freed_cores.push((candidate.core, candidate.hash));
575
576							let receipt = CommittedCandidateReceipt {
577								descriptor: candidate.descriptor,
578								commitments: candidate.commitments,
579							};
580
581							let has_runtime_upgrade =
582								receipt.commitments.new_validation_code.as_ref().map_or(0, |_| 1);
583							let u = receipt.commitments.upward_messages.len() as u32;
584							let h = receipt.commitments.horizontal_messages.len() as u32;
585							let enact_weight = <T as Config>::WeightInfo::enact_candidate(
586								u,
587								h,
588								has_runtime_upgrade,
589							);
590							Self::enact_candidate(
591								candidate.relay_parent_number,
592								receipt,
593								candidate.backers,
594								candidate.availability_votes,
595								candidate.core,
596								candidate.backing_group,
597							);
598							weight.saturating_accrue(enact_weight);
599						}
600					}
601				}
602			});
603		}
604		// For relay chain blocks, we're (ab)using the proof size
605		// to limit the raw transaction size of `ParaInherent` and
606		// there's no state proof (aka PoV) associated with it.
607		// Since we already accounted for bitfields size, we should
608		// not include `enact_candidate` PoV impact here.
609		(weight.set_proof_size(0), freed_cores)
610	}
611
612	/// Process candidates that have been backed. Provide a set of
613	/// candidates along with their scheduled cores.
614	///
615	/// Candidates of the same paraid should be sorted according to their dependency order (they
616	/// should form a chain). If this condition is not met, this function will return an error.
617	/// (This really should not happen here, if the candidates were properly sanitised in
618	/// paras_inherent).
619	pub(crate) fn process_candidates<GV>(
620		allowed_relay_parents: &AllowedRelayParentsTracker<T::Hash, BlockNumberFor<T>>,
621		candidates: &BTreeMap<ParaId, Vec<(BackedCandidate<T::Hash>, CoreIndex)>>,
622		group_validators: GV,
623	) -> Result<
624		Vec<(CandidateReceipt<T::Hash>, Vec<(ValidatorIndex, ValidityAttestation)>)>,
625		DispatchError,
626	>
627	where
628		GV: Fn(GroupIndex) -> Option<Vec<ValidatorIndex>>,
629	{
630		if candidates.is_empty() {
631			return Ok(Default::default())
632		}
633
634		let now = frame_system::Pallet::<T>::block_number();
635		let validators = shared::ActiveValidatorKeys::<T>::get();
636
637		// Collect candidate receipts with backers.
638		let mut candidate_receipt_with_backing_validator_indices =
639			Vec::with_capacity(candidates.len());
640
641		for (para_id, para_candidates) in candidates {
642			let mut latest_head_data = match Self::para_latest_head_data(para_id) {
643				None => {
644					defensive!("Latest included head data for paraid {:?} is None", para_id);
645					continue
646				},
647				Some(latest_head_data) => latest_head_data,
648			};
649
650			for (candidate, core) in para_candidates.iter() {
651				let candidate_hash = candidate.candidate().hash();
652
653				// The previous context is None, as it's already checked during candidate
654				// sanitization.
655				let check_ctx = CandidateCheckContext::<T>::new(None);
656				let relay_parent_number = check_ctx.verify_backed_candidate(
657					&allowed_relay_parents,
658					candidate.candidate(),
659					latest_head_data.clone(),
660				)?;
661
662				// The candidate based upon relay parent `N` should be backed by a
663				// group assigned to core at block `N + 1`. Thus,
664				// `relay_parent_number + 1` will always land in the current
665				// session.
666				let group_idx = scheduler::Pallet::<T>::group_assigned_to_core(
667					*core,
668					relay_parent_number + One::one(),
669				)
670				.ok_or_else(|| {
671					log::warn!(
672						target: LOG_TARGET,
673						"Failed to compute group index for candidate {:?}",
674						candidate_hash
675					);
676					Error::<T>::InvalidAssignment
677				})?;
678				let group_vals =
679					group_validators(group_idx).ok_or_else(|| Error::<T>::InvalidGroupIndex)?;
680
681				// Check backing vote count and validity.
682				let (backers, backer_idx_and_attestation) =
683					Self::check_backing_votes(candidate, &validators, group_vals)?;
684
685				// Found a valid candidate.
686				latest_head_data = candidate.candidate().commitments.head_data.clone();
687				candidate_receipt_with_backing_validator_indices
688					.push((candidate.receipt(), backer_idx_and_attestation));
689
690				// Update storage now
691				PendingAvailability::<T>::mutate(&para_id, |pending_availability| {
692					let new_candidate = CandidatePendingAvailability {
693						core: *core,
694						hash: candidate_hash,
695						descriptor: candidate.candidate().descriptor.clone(),
696						commitments: candidate.candidate().commitments.clone(),
697						// initialize all availability votes to 0.
698						availability_votes: bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()],
699						relay_parent_number,
700						backers: backers.to_bitvec(),
701						backed_in_number: now,
702						backing_group: group_idx,
703					};
704
705					if let Some(pending_availability) = pending_availability {
706						pending_availability.push_back(new_candidate);
707					} else {
708						*pending_availability =
709							Some([new_candidate].into_iter().collect::<VecDeque<_>>())
710					}
711				});
712
713				// Deposit backed event.
714				Self::deposit_event(Event::<T>::CandidateBacked(
715					candidate.candidate().to_plain(),
716					candidate.candidate().commitments.head_data.clone(),
717					*core,
718					group_idx,
719				));
720			}
721		}
722
723		Ok(candidate_receipt_with_backing_validator_indices)
724	}
725
726	// Get the latest backed output head data of this para (including pending availability).
727	pub(crate) fn para_latest_head_data(para_id: &ParaId) -> Option<HeadData> {
728		match PendingAvailability::<T>::get(para_id).and_then(|pending_candidates| {
729			pending_candidates.back().map(|x| x.commitments.head_data.clone())
730		}) {
731			Some(head_data) => Some(head_data),
732			None => paras::Heads::<T>::get(para_id),
733		}
734	}
735
736	// Get the relay parent number of the most recent candidate (including pending availability).
737	pub(crate) fn para_most_recent_context(para_id: &ParaId) -> Option<BlockNumberFor<T>> {
738		match PendingAvailability::<T>::get(para_id)
739			.and_then(|pending_candidates| pending_candidates.back().map(|x| x.relay_parent_number))
740		{
741			Some(relay_parent_number) => Some(relay_parent_number),
742			None => paras::MostRecentContext::<T>::get(para_id),
743		}
744	}
745
746	fn check_backing_votes(
747		backed_candidate: &BackedCandidate<T::Hash>,
748		validators: &[ValidatorId],
749		group_vals: Vec<ValidatorIndex>,
750	) -> Result<(BitVec<u8, BitOrderLsb0>, Vec<(ValidatorIndex, ValidityAttestation)>), Error<T>> {
751		let minimum_backing_votes = configuration::ActiveConfig::<T>::get().minimum_backing_votes;
752
753		let mut backers = bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()];
754		let signing_context = SigningContext {
755			parent_hash: backed_candidate.descriptor().relay_parent(),
756			session_index: shared::CurrentSessionIndex::<T>::get(),
757		};
758
759		let (validator_indices, _) = backed_candidate.validator_indices_and_core_index();
760
761		// check the signatures in the backing and that it is a majority.
762		let maybe_amount_validated = polkadot_primitives::check_candidate_backing(
763			backed_candidate.candidate().hash(),
764			backed_candidate.validity_votes(),
765			validator_indices,
766			&signing_context,
767			group_vals.len(),
768			|intra_group_vi| {
769				group_vals
770					.get(intra_group_vi)
771					.and_then(|vi| validators.get(vi.0 as usize))
772					.map(|v| v.clone())
773			},
774		);
775
776		match maybe_amount_validated {
777			Ok(amount_validated) => ensure!(
778				amount_validated >=
779					effective_minimum_backing_votes(group_vals.len(), minimum_backing_votes),
780				Error::<T>::InsufficientBacking,
781			),
782			Err(()) => {
783				Err(Error::<T>::InvalidBacking)?;
784			},
785		}
786
787		let mut backer_idx_and_attestation =
788			Vec::<(ValidatorIndex, ValidityAttestation)>::with_capacity(
789				validator_indices.count_ones(),
790			);
791
792		for ((bit_idx, _), attestation) in validator_indices
793			.iter()
794			.enumerate()
795			.filter(|(_, signed)| **signed)
796			.zip(backed_candidate.validity_votes().iter().cloned())
797		{
798			let val_idx = group_vals.get(bit_idx).expect("this query succeeded above; qed");
799			backer_idx_and_attestation.push((*val_idx, attestation));
800
801			backers.set(val_idx.0 as _, true);
802		}
803
804		Ok((backers, backer_idx_and_attestation))
805	}
806
807	/// Run the acceptance criteria checks on the given candidate commitments.
808	pub(crate) fn check_validation_outputs_for_runtime_api(
809		para_id: ParaId,
810		relay_parent_number: BlockNumberFor<T>,
811		validation_outputs: polkadot_primitives::CandidateCommitments,
812	) -> bool {
813		let prev_context = Self::para_most_recent_context(&para_id);
814		let check_ctx = CandidateCheckContext::<T>::new(prev_context);
815
816		if let Err(err) = check_ctx.check_validation_outputs(
817			para_id,
818			relay_parent_number,
819			&validation_outputs.head_data,
820			&validation_outputs.new_validation_code,
821			validation_outputs.processed_downward_messages,
822			&validation_outputs.upward_messages,
823			BlockNumberFor::<T>::from(validation_outputs.hrmp_watermark),
824			&validation_outputs.horizontal_messages,
825		) {
826			log::debug!(
827				target: LOG_TARGET,
828				"Validation outputs checking for parachain `{}` failed, error: {:?}",
829				u32::from(para_id), err
830			);
831			false
832		} else {
833			true
834		}
835	}
836
837	fn enact_candidate(
838		relay_parent_number: BlockNumberFor<T>,
839		receipt: CommittedCandidateReceipt<T::Hash>,
840		backers: BitVec<u8, BitOrderLsb0>,
841		availability_votes: BitVec<u8, BitOrderLsb0>,
842		core_index: CoreIndex,
843		backing_group: GroupIndex,
844	) {
845		let plain = receipt.to_plain();
846		let commitments = receipt.commitments;
847		let config = configuration::ActiveConfig::<T>::get();
848
849		T::RewardValidators::reward_backing(
850			backers
851				.iter()
852				.enumerate()
853				.filter(|(_, backed)| **backed)
854				.map(|(i, _)| ValidatorIndex(i as _)),
855		);
856
857		T::RewardValidators::reward_bitfields(
858			availability_votes
859				.iter()
860				.enumerate()
861				.filter(|(_, voted)| **voted)
862				.map(|(i, _)| ValidatorIndex(i as _)),
863		);
864
865		if let Some(new_code) = commitments.new_validation_code {
866			// Block number of candidate's inclusion.
867			let now = frame_system::Pallet::<T>::block_number();
868
869			paras::Pallet::<T>::schedule_code_upgrade(
870				receipt.descriptor.para_id(),
871				new_code,
872				now,
873				&config,
874				UpgradeStrategy::SetGoAheadSignal,
875			);
876		}
877
878		// enact the messaging facet of the candidate.
879		dmp::Pallet::<T>::prune_dmq(
880			receipt.descriptor.para_id(),
881			commitments.processed_downward_messages,
882		);
883		Self::receive_upward_messages(
884			receipt.descriptor.para_id(),
885			commitments.upward_messages.as_slice(),
886		);
887		hrmp::Pallet::<T>::prune_hrmp(
888			receipt.descriptor.para_id(),
889			BlockNumberFor::<T>::from(commitments.hrmp_watermark),
890		);
891		hrmp::Pallet::<T>::queue_outbound_hrmp(
892			receipt.descriptor.para_id(),
893			commitments.horizontal_messages,
894		);
895
896		Self::deposit_event(Event::<T>::CandidateIncluded(
897			plain,
898			commitments.head_data.clone(),
899			core_index,
900			backing_group,
901		));
902
903		paras::Pallet::<T>::note_new_head(
904			receipt.descriptor.para_id(),
905			commitments.head_data,
906			relay_parent_number,
907		);
908	}
909
910	pub(crate) fn relay_dispatch_queue_size(para_id: ParaId) -> (u32, u32) {
911		let fp = T::MessageQueue::footprint(AggregateMessageOrigin::Ump(UmpQueueId::Para(para_id)));
912		(fp.storage.count as u32, fp.storage.size as u32)
913	}
914
915	/// Check that all the upward messages sent by a candidate pass the acceptance criteria.
916	pub(crate) fn check_upward_messages(
917		config: &HostConfiguration<BlockNumberFor<T>>,
918		para: ParaId,
919		upward_messages: &[UpwardMessage],
920	) -> Result<(), UmpAcceptanceCheckErr> {
921		// Filter any pending UMP signals and the separator.
922		let upward_messages = skip_ump_signals(upward_messages.iter()).collect::<Vec<_>>();
923
924		// Cannot send UMP messages while off-boarding.
925		if paras::Pallet::<T>::is_offboarding(para) {
926			ensure!(upward_messages.is_empty(), UmpAcceptanceCheckErr::IsOffboarding);
927		}
928
929		let additional_msgs = upward_messages.len() as u32;
930		if additional_msgs > config.max_upward_message_num_per_candidate {
931			return Err(UmpAcceptanceCheckErr::MoreMessagesThanPermitted {
932				sent: additional_msgs,
933				permitted: config.max_upward_message_num_per_candidate,
934			})
935		}
936
937		let (para_queue_count, mut para_queue_size) = Self::relay_dispatch_queue_size(para);
938
939		if para_queue_count.saturating_add(additional_msgs) > config.max_upward_queue_count {
940			return Err(UmpAcceptanceCheckErr::CapacityExceeded {
941				count: para_queue_count.saturating_add(additional_msgs).into(),
942				limit: config.max_upward_queue_count.into(),
943			})
944		}
945
946		for (idx, msg) in upward_messages.into_iter().enumerate() {
947			let msg_size = msg.len() as u32;
948			if msg_size > config.max_upward_message_size {
949				return Err(UmpAcceptanceCheckErr::MessageSize {
950					idx: idx as u32,
951					msg_size,
952					max_size: config.max_upward_message_size,
953				})
954			}
955			// make sure that the queue is not overfilled.
956			// we do it here only once since returning false invalidates the whole relay-chain
957			// block.
958			if para_queue_size.saturating_add(msg_size) > config.max_upward_queue_size {
959				return Err(UmpAcceptanceCheckErr::TotalSizeExceeded {
960					total_size: para_queue_size.saturating_add(msg_size).into(),
961					limit: config.max_upward_queue_size.into(),
962				})
963			}
964			para_queue_size.saturating_accrue(msg_size);
965		}
966
967		Ok(())
968	}
969
970	/// Enqueues `upward_messages` from a `para`'s accepted candidate block.
971	///
972	/// This function is infallible since the candidate was already accepted and we therefore need
973	/// to deal with the messages as given. Messages that are too long will be ignored since such
974	/// candidates should have already been rejected in [`Self::check_upward_messages`].
975	pub(crate) fn receive_upward_messages(para: ParaId, upward_messages: &[Vec<u8>]) {
976		let bounded = skip_ump_signals(upward_messages.iter())
977			.filter_map(|d| {
978				BoundedSlice::try_from(&d[..])
979					.inspect_err(|_| {
980						defensive!("Accepted candidate contains too long msg, len=", d.len());
981					})
982					.ok()
983			})
984			.collect();
985		Self::receive_bounded_upward_messages(para, bounded)
986	}
987
988	/// Enqueues storage-bounded `upward_messages` from a `para`'s accepted candidate block.
989	pub(crate) fn receive_bounded_upward_messages(
990		para: ParaId,
991		messages: Vec<BoundedSlice<'_, u8, MaxUmpMessageLenOf<T>>>,
992	) {
993		let count = messages.len() as u32;
994		if count == 0 {
995			return
996		}
997
998		T::MessageQueue::enqueue_messages(
999			messages.into_iter(),
1000			AggregateMessageOrigin::Ump(UmpQueueId::Para(para)),
1001		);
1002		Self::deposit_event(Event::UpwardMessagesReceived { from: para, count });
1003	}
1004
1005	/// Cleans up all timed out candidates as well as their descendant candidates.
1006	///
1007	/// Returns a vector of cleaned-up core IDs.
1008	pub(crate) fn free_timedout() -> Vec<CoreIndex> {
1009		let timeout_pred = scheduler::Pallet::<T>::availability_timeout_predicate();
1010
1011		let timed_out: Vec<_> = Self::free_failed_cores(
1012			|candidate| timeout_pred(candidate.backed_in_number).timed_out,
1013			None,
1014		)
1015		.collect();
1016
1017		let mut timed_out_cores = Vec::with_capacity(timed_out.len());
1018		for candidate in timed_out.iter() {
1019			timed_out_cores.push(candidate.core);
1020
1021			let receipt = CandidateReceipt {
1022				descriptor: candidate.descriptor.clone(),
1023				commitments_hash: candidate.commitments.hash(),
1024			};
1025
1026			Self::deposit_event(Event::<T>::CandidateTimedOut(
1027				receipt,
1028				candidate.commitments.head_data.clone(),
1029				candidate.core,
1030			));
1031		}
1032
1033		timed_out_cores
1034	}
1035
1036	/// Cleans up all cores pending availability occupied by one of the disputed candidates or which
1037	/// are descendants of a disputed candidate.
1038	///
1039	/// Returns a vector of cleaned-up core IDs, along with the evicted candidate hashes.
1040	pub(crate) fn free_disputed(
1041		disputed: &BTreeSet<CandidateHash>,
1042	) -> Vec<(CoreIndex, CandidateHash)> {
1043		Self::free_failed_cores(
1044			|candidate| disputed.contains(&candidate.hash),
1045			Some(disputed.len()),
1046		)
1047		.map(|candidate| (candidate.core, candidate.hash))
1048		.collect()
1049	}
1050
1051	// Clean up cores whose candidates are deemed as failed by the predicate. `pred` returns true if
1052	// a candidate is considered failed.
1053	// A failed candidate also frees all subsequent cores which hold descendants of said candidate.
1054	fn free_failed_cores<
1055		P: Fn(&CandidatePendingAvailability<T::Hash, BlockNumberFor<T>>) -> bool,
1056	>(
1057		pred: P,
1058		capacity_hint: Option<usize>,
1059	) -> impl Iterator<Item = CandidatePendingAvailability<T::Hash, BlockNumberFor<T>>> {
1060		let mut earliest_dropped_indices: BTreeMap<ParaId, usize> = BTreeMap::new();
1061
1062		for (para_id, pending_candidates) in PendingAvailability::<T>::iter() {
1063			// We assume that pending candidates are stored in dependency order. So we need to store
1064			// the earliest dropped candidate. All others that follow will get freed as well.
1065			let mut earliest_dropped_idx = None;
1066			for (index, candidate) in pending_candidates.iter().enumerate() {
1067				if pred(candidate) {
1068					earliest_dropped_idx = Some(index);
1069					// Since we're looping the candidates in dependency order, we've found the
1070					// earliest failed index for this paraid.
1071					break;
1072				}
1073			}
1074
1075			if let Some(earliest_dropped_idx) = earliest_dropped_idx {
1076				earliest_dropped_indices.insert(para_id, earliest_dropped_idx);
1077			}
1078		}
1079
1080		let mut cleaned_up_cores =
1081			if let Some(capacity) = capacity_hint { Vec::with_capacity(capacity) } else { vec![] };
1082
1083		for (para_id, earliest_dropped_idx) in earliest_dropped_indices {
1084			// Do cleanups and record the cleaned up cores
1085			PendingAvailability::<T>::mutate(&para_id, |record| {
1086				if let Some(record) = record {
1087					let cleaned_up = record.drain(earliest_dropped_idx..);
1088					cleaned_up_cores.extend(cleaned_up);
1089				}
1090			});
1091		}
1092
1093		cleaned_up_cores.into_iter()
1094	}
1095
1096	/// Forcibly enact the pending candidates of the given paraid as though they had been deemed
1097	/// available by bitfields.
1098	///
1099	/// Is a no-op if there is no candidate pending availability for this para-id.
1100	/// If there are multiple candidates pending availability for this para-id, it will enact all of
1101	/// them. This should generally not be used but it is useful during execution of Runtime APIs,
1102	/// where the changes to the state are expected to be discarded directly after.
1103	pub(crate) fn force_enact(para: ParaId) {
1104		PendingAvailability::<T>::mutate(&para, |candidates| {
1105			if let Some(candidates) = candidates {
1106				for candidate in candidates.drain(..) {
1107					let receipt = CommittedCandidateReceipt {
1108						descriptor: candidate.descriptor,
1109						commitments: candidate.commitments,
1110					};
1111
1112					Self::enact_candidate(
1113						candidate.relay_parent_number,
1114						receipt,
1115						candidate.backers,
1116						candidate.availability_votes,
1117						candidate.core,
1118						candidate.backing_group,
1119					);
1120				}
1121			}
1122		});
1123	}
1124
1125	/// Returns the first `CommittedCandidateReceipt` pending availability for the para provided, if
1126	/// any.
1127	/// A para_id could have more than one candidates pending availability, if it's using elastic
1128	/// scaling. These candidates form a chain. This function returns the first in the chain.
1129	pub(crate) fn first_candidate_pending_availability(
1130		para: ParaId,
1131	) -> Option<CommittedCandidateReceipt<T::Hash>> {
1132		PendingAvailability::<T>::get(&para).and_then(|p| {
1133			p.get(0).map(|p| CommittedCandidateReceipt {
1134				descriptor: p.descriptor.clone(),
1135				commitments: p.commitments.clone(),
1136			})
1137		})
1138	}
1139
1140	/// Returns all the `CommittedCandidateReceipt` pending availability for the para provided, if
1141	/// any.
1142	pub(crate) fn candidates_pending_availability(
1143		para: ParaId,
1144	) -> Vec<CommittedCandidateReceipt<T::Hash>> {
1145		<PendingAvailability<T>>::get(&para)
1146			.map(|candidates| {
1147				candidates
1148					.into_iter()
1149					.map(|candidate| CommittedCandidateReceipt {
1150						descriptor: candidate.descriptor.clone(),
1151						commitments: candidate.commitments.clone(),
1152					})
1153					.collect()
1154			})
1155			.unwrap_or_default()
1156	}
1157}
1158
1159const fn availability_threshold(n_validators: usize) -> usize {
1160	supermajority_threshold(n_validators)
1161}
1162
1163impl AcceptanceCheckErr {
1164	/// Returns the same error so that it can be threaded through a needle of `DispatchError` and
1165	/// ultimately returned from a `Dispatchable`.
1166	fn strip_into_dispatch_err<T: Config>(self) -> Error<T> {
1167		use AcceptanceCheckErr::*;
1168		match self {
1169			HeadDataTooLarge => Error::<T>::HeadDataTooLarge,
1170			PrematureCodeUpgrade => Error::<T>::PrematureCodeUpgrade,
1171			NewCodeTooLarge => Error::<T>::NewCodeTooLarge,
1172			ProcessedDownwardMessages => Error::<T>::IncorrectDownwardMessageHandling,
1173			UpwardMessages => Error::<T>::InvalidUpwardMessages,
1174			HrmpWatermark => Error::<T>::HrmpWatermarkMishandling,
1175			OutboundHrmp => Error::<T>::InvalidOutboundHrmp,
1176		}
1177	}
1178}
1179
1180impl<T: Config> OnQueueChanged<AggregateMessageOrigin> for Pallet<T> {
1181	// Write back the remaining queue capacity into `relay_dispatch_queue_remaining_capacity`.
1182	fn on_queue_changed(origin: AggregateMessageOrigin, fp: QueueFootprint) {
1183		let para = match origin {
1184			AggregateMessageOrigin::Ump(UmpQueueId::Para(p)) => p,
1185		};
1186		let QueueFootprint { storage: Footprint { count, size }, .. } = fp;
1187		let (count, size) = (count.saturated_into(), size.saturated_into());
1188		// TODO paritytech/polkadot#6283: Remove all usages of `relay_dispatch_queue_size`
1189		#[allow(deprecated)]
1190		well_known_keys::relay_dispatch_queue_size_typed(para).set((count, size));
1191
1192		let config = configuration::ActiveConfig::<T>::get();
1193		let remaining_count = config.max_upward_queue_count.saturating_sub(count);
1194		let remaining_size = config.max_upward_queue_size.saturating_sub(size);
1195		well_known_keys::relay_dispatch_queue_remaining_capacity(para)
1196			.set((remaining_count, remaining_size));
1197	}
1198}
1199
1200/// A collection of data required for checking a candidate.
1201pub(crate) struct CandidateCheckContext<T: Config> {
1202	config: configuration::HostConfiguration<BlockNumberFor<T>>,
1203	prev_context: Option<BlockNumberFor<T>>,
1204}
1205
1206impl<T: Config> CandidateCheckContext<T> {
1207	pub(crate) fn new(prev_context: Option<BlockNumberFor<T>>) -> Self {
1208		Self { config: configuration::ActiveConfig::<T>::get(), prev_context }
1209	}
1210
1211	/// Execute verification of the candidate.
1212	///
1213	/// Assures:
1214	///  * relay-parent in-bounds
1215	///  * code hash of commitments matches current code hash
1216	///  * para head in the descriptor and commitments match
1217	///
1218	/// Returns the relay-parent block number.
1219	pub(crate) fn verify_backed_candidate(
1220		&self,
1221		allowed_relay_parents: &AllowedRelayParentsTracker<T::Hash, BlockNumberFor<T>>,
1222		backed_candidate_receipt: &CommittedCandidateReceipt<<T as frame_system::Config>::Hash>,
1223		parent_head_data: HeadData,
1224	) -> Result<BlockNumberFor<T>, Error<T>> {
1225		let para_id = backed_candidate_receipt.descriptor.para_id();
1226		let relay_parent = backed_candidate_receipt.descriptor.relay_parent();
1227
1228		// Check that the relay-parent is one of the allowed relay-parents.
1229		let (state_root, relay_parent_number) = {
1230			match allowed_relay_parents.acquire_info(relay_parent, self.prev_context) {
1231				None => return Err(Error::<T>::DisallowedRelayParent),
1232				Some((info, relay_parent_number)) => (info.state_root, relay_parent_number),
1233			}
1234		};
1235
1236		{
1237			let persisted_validation_data = make_persisted_validation_data_with_parent::<T>(
1238				relay_parent_number,
1239				state_root,
1240				parent_head_data,
1241			);
1242
1243			let expected = persisted_validation_data.hash();
1244
1245			ensure!(
1246				expected == backed_candidate_receipt.descriptor.persisted_validation_data_hash(),
1247				Error::<T>::ValidationDataHashMismatch,
1248			);
1249		}
1250
1251		let validation_code_hash = paras::CurrentCodeHash::<T>::get(para_id)
1252			// A candidate for a parachain without current validation code is not scheduled.
1253			.ok_or_else(|| Error::<T>::UnscheduledCandidate)?;
1254		ensure!(
1255			backed_candidate_receipt.descriptor.validation_code_hash() == validation_code_hash,
1256			Error::<T>::InvalidValidationCodeHash,
1257		);
1258
1259		ensure!(
1260			backed_candidate_receipt.descriptor.para_head() ==
1261				backed_candidate_receipt.commitments.head_data.hash(),
1262			Error::<T>::ParaHeadMismatch,
1263		);
1264
1265		if let Err(err) = self.check_validation_outputs(
1266			para_id,
1267			relay_parent_number,
1268			&backed_candidate_receipt.commitments.head_data,
1269			&backed_candidate_receipt.commitments.new_validation_code,
1270			backed_candidate_receipt.commitments.processed_downward_messages,
1271			&backed_candidate_receipt.commitments.upward_messages,
1272			BlockNumberFor::<T>::from(backed_candidate_receipt.commitments.hrmp_watermark),
1273			&backed_candidate_receipt.commitments.horizontal_messages,
1274		) {
1275			log::debug!(
1276				target: LOG_TARGET,
1277				"Validation outputs checking during inclusion of a candidate {:?} for parachain `{}` failed, error: {:?}",
1278				backed_candidate_receipt.hash(),
1279				u32::from(para_id),
1280				err
1281			);
1282			Err(err.strip_into_dispatch_err::<T>())?;
1283		};
1284		Ok(relay_parent_number)
1285	}
1286
1287	/// Check the given outputs after candidate validation on whether it passes the acceptance
1288	/// criteria.
1289	///
1290	/// The things that are checked can be roughly divided into limits and minimums.
1291	///
1292	/// Limits are things like max message queue sizes and max head data size.
1293	///
1294	/// Minimums are things like the minimum amount of messages that must be processed
1295	/// by the parachain block.
1296	///
1297	/// Limits are checked against the current state. The parachain block must be acceptable
1298	/// by the current relay-chain state regardless of whether it was acceptable at some relay-chain
1299	/// state in the past.
1300	///
1301	/// Minimums are checked against the current state but modulated by
1302	/// considering the information available at the relay-parent of the parachain block.
1303	fn check_validation_outputs(
1304		&self,
1305		para_id: ParaId,
1306		relay_parent_number: BlockNumberFor<T>,
1307		head_data: &HeadData,
1308		new_validation_code: &Option<polkadot_primitives::ValidationCode>,
1309		processed_downward_messages: u32,
1310		upward_messages: &[polkadot_primitives::UpwardMessage],
1311		hrmp_watermark: BlockNumberFor<T>,
1312		horizontal_messages: &[polkadot_primitives::OutboundHrmpMessage<ParaId>],
1313	) -> Result<(), AcceptanceCheckErr> {
1314		// Safe convertions when `self.config.max_head_data_size` is in bounds of `usize` type.
1315		let max_head_data_size = usize::try_from(self.config.max_head_data_size)
1316			.map_err(|_| AcceptanceCheckErr::HeadDataTooLarge)?;
1317		ensure!(head_data.0.len() <= max_head_data_size, AcceptanceCheckErr::HeadDataTooLarge);
1318
1319		// if any, the code upgrade attempt is allowed.
1320		if let Some(new_validation_code) = new_validation_code {
1321			// Safe convertions when `self.config.max_code_size` is in bounds of `usize` type.
1322			let max_code_size = usize::try_from(self.config.max_code_size)
1323				.map_err(|_| AcceptanceCheckErr::NewCodeTooLarge)?;
1324
1325			ensure!(
1326				paras::Pallet::<T>::can_upgrade_validation_code(para_id),
1327				AcceptanceCheckErr::PrematureCodeUpgrade,
1328			);
1329			ensure!(
1330				new_validation_code.0.len() <= max_code_size,
1331				AcceptanceCheckErr::NewCodeTooLarge,
1332			);
1333		}
1334
1335		// check if the candidate passes the messaging acceptance criteria
1336		dmp::Pallet::<T>::check_processed_downward_messages(
1337			para_id,
1338			relay_parent_number,
1339			processed_downward_messages,
1340		)
1341		.map_err(|e| {
1342			log::debug!(
1343				target: LOG_TARGET,
1344				"Check processed downward messages for parachain `{}` on relay parent number `{:?}` failed, error: {:?}",
1345				u32::from(para_id),
1346				relay_parent_number,
1347				e
1348			);
1349			e
1350		})?;
1351		Pallet::<T>::check_upward_messages(&self.config, para_id, upward_messages).map_err(
1352			|e| {
1353				log::debug!(
1354					target: LOG_TARGET,
1355					"Check upward messages for parachain `{}` failed, error: {:?}",
1356					u32::from(para_id),
1357					e
1358				);
1359				e
1360			},
1361		)?;
1362		hrmp::Pallet::<T>::check_hrmp_watermark(para_id, relay_parent_number, hrmp_watermark)
1363			.map_err(|e| {
1364				log::debug!(
1365					target: LOG_TARGET,
1366					"Check hrmp watermark for parachain `{}` on relay parent number `{:?}` failed, error: {:?}",
1367					u32::from(para_id),
1368					relay_parent_number,
1369					e
1370				);
1371				e
1372			})?;
1373		hrmp::Pallet::<T>::check_outbound_hrmp(&self.config, para_id, horizontal_messages)
1374			.map_err(|e| {
1375				log::debug!(
1376					target: LOG_TARGET,
1377					"Check outbound hrmp for parachain `{}` failed, error: {:?}",
1378					u32::from(para_id),
1379					e
1380				);
1381				e
1382			})?;
1383
1384		Ok(())
1385	}
1386}
1387
1388impl<T: Config> QueueFootprinter for Pallet<T> {
1389	type Origin = UmpQueueId;
1390
1391	fn message_count(origin: Self::Origin) -> u64 {
1392		T::MessageQueue::footprint(AggregateMessageOrigin::Ump(origin)).storage.count
1393	}
1394}