1#![cfg_attr(not(feature = "std"), no_std)]
18
19extern crate alloc;
31
32use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec};
33use codec::{Decode, DecodeLimit, Encode};
34use core::cmp;
35use cumulus_primitives_core::{
36 relay_chain, AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo,
37 CumulusDigestItem, GetChannelInfo, ListChannelInfos, MessageSendError, OutboundHrmpMessage,
38 ParaId, PersistedValidationData, UpwardMessage, UpwardMessageSender, XcmpMessageHandler,
39 XcmpMessageSource,
40};
41use cumulus_primitives_parachain_inherent::{v0, MessageQueueChain, ParachainInherentData};
42use frame_support::{
43 dispatch::{DispatchClass, DispatchResult},
44 ensure,
45 inherent::{InherentData, InherentIdentifier, ProvideInherent},
46 traits::{Get, HandleMessage},
47 weights::Weight,
48};
49use frame_system::{ensure_none, ensure_root, pallet_prelude::HeaderFor};
50use parachain_inherent::{
51 deconstruct_parachain_inherent_data, AbridgedInboundDownwardMessages,
52 AbridgedInboundHrmpMessages, BasicParachainInherentData, InboundMessageId, InboundMessagesData,
53};
54use polkadot_parachain_primitives::primitives::RelayChainBlockNumber;
55use polkadot_runtime_parachains::{FeeTracker, GetMinFeeFactor};
56use scale_info::TypeInfo;
57use sp_runtime::{
58 traits::{Block as BlockT, BlockNumberProvider, Hash},
59 FixedU128, RuntimeDebug, SaturatedConversion,
60};
61use xcm::{latest::XcmHash, VersionedLocation, VersionedXcm, MAX_XCM_DECODE_DEPTH};
62use xcm_builder::InspectMessageQueues;
63
64mod benchmarking;
65pub mod migration;
66mod mock;
67#[cfg(test)]
68mod tests;
69pub mod weights;
70
71pub use weights::WeightInfo;
72
73mod unincluded_segment;
74
75pub mod consensus_hook;
76pub mod relay_state_snapshot;
77#[macro_use]
78pub mod validate_block;
79mod descendant_validation;
80pub mod parachain_inherent;
81
82use unincluded_segment::{
83 HrmpChannelUpdate, HrmpWatermarkUpdate, OutboundBandwidthLimits, SegmentTracker,
84};
85
86pub use consensus_hook::{ConsensusHook, ExpectParentIncluded};
87pub use cumulus_pallet_parachain_system_proc_macro::register_validate_block;
110pub use relay_state_snapshot::{MessagingStateSnapshot, RelayChainStateProof};
111pub use unincluded_segment::{Ancestor, UsedBandwidth};
112
113pub use pallet::*;
114
115const LOG_TARGET: &str = "parachain-system";
116
117pub trait CheckAssociatedRelayNumber {
126 fn check_associated_relay_number(
130 current: RelayChainBlockNumber,
131 previous: RelayChainBlockNumber,
132 );
133}
134
135pub struct RelayNumberStrictlyIncreases;
140
141impl CheckAssociatedRelayNumber for RelayNumberStrictlyIncreases {
142 fn check_associated_relay_number(
143 current: RelayChainBlockNumber,
144 previous: RelayChainBlockNumber,
145 ) {
146 if current <= previous {
147 panic!("Relay chain block number needs to strictly increase between Parachain blocks!")
148 }
149 }
150}
151
152pub struct AnyRelayNumber;
157
158impl CheckAssociatedRelayNumber for AnyRelayNumber {
159 fn check_associated_relay_number(_: RelayChainBlockNumber, _: RelayChainBlockNumber) {}
160}
161
162pub struct RelayNumberMonotonicallyIncreases;
167
168impl CheckAssociatedRelayNumber for RelayNumberMonotonicallyIncreases {
169 fn check_associated_relay_number(
170 current: RelayChainBlockNumber,
171 previous: RelayChainBlockNumber,
172 ) {
173 if current < previous {
174 panic!("Relay chain block number needs to monotonically increase between Parachain blocks!")
175 }
176 }
177}
178
179pub type MaxDmpMessageLenOf<T> = <<T as Config>::DmpQueue as HandleMessage>::MaxMessageLen;
181
182pub mod ump_constants {
183 pub const THRESHOLD_FACTOR: u32 = 2;
187}
188
189#[frame_support::pallet]
190pub mod pallet {
191 use super::*;
192 use cumulus_primitives_core::CoreInfoExistsAtMaxOnce;
193 use frame_support::pallet_prelude::*;
194 use frame_system::pallet_prelude::*;
195
196 #[pallet::pallet]
197 #[pallet::storage_version(migration::STORAGE_VERSION)]
198 #[pallet::without_storage_info]
199 pub struct Pallet<T>(_);
200
201 #[pallet::config]
202 pub trait Config: frame_system::Config<OnSetCode = ParachainSetCode<Self>> {
203 #[allow(deprecated)]
205 type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
206
207 type OnSystemEvent: OnSystemEvent;
209
210 #[pallet::constant]
212 type SelfParaId: Get<ParaId>;
213
214 type OutboundXcmpMessageSource: XcmpMessageSource;
216
217 type DmpQueue: HandleMessage;
222
223 type ReservedDmpWeight: Get<Weight>;
225
226 type XcmpMessageHandler: XcmpMessageHandler;
230
231 type ReservedXcmpWeight: Get<Weight>;
233
234 type CheckAssociatedRelayNumber: CheckAssociatedRelayNumber;
236
237 type WeightInfo: WeightInfo;
239
240 type ConsensusHook: ConsensusHook;
251
252 type RelayParentOffset: Get<u32>;
267 }
268
269 #[pallet::hooks]
270 impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
271 fn on_finalize(_: BlockNumberFor<T>) {
276 <DidSetValidationCode<T>>::kill();
277 <UpgradeRestrictionSignal<T>>::kill();
278 let relay_upgrade_go_ahead = <UpgradeGoAhead<T>>::take();
279
280 let vfp = <ValidationData<T>>::get().expect(
281 r"Missing required set_validation_data inherent. This inherent must be
282 present in every block. This error typically occurs when the set_validation_data
283 execution failed and was rejected by the block builder. Check earlier log entries
284 for the specific cause of the failure.",
285 );
286
287 LastRelayChainBlockNumber::<T>::put(vfp.relay_parent_number);
288
289 let host_config = match HostConfiguration::<T>::get() {
290 Some(ok) => ok,
291 None => {
292 debug_assert!(
293 false,
294 "host configuration is promised to set until `on_finalize`; qed",
295 );
296 return
297 },
298 };
299
300 let total_bandwidth_out = match RelevantMessagingState::<T>::get() {
304 Some(s) => OutboundBandwidthLimits::from_relay_chain_state(&s),
305 None => {
306 debug_assert!(
307 false,
308 "relevant messaging state is promised to be set until `on_finalize`; \
309 qed",
310 );
311 return
312 },
313 };
314
315 Self::adjust_egress_bandwidth_limits();
318
319 let (ump_msg_count, ump_total_bytes) = <PendingUpwardMessages<T>>::mutate(|up| {
320 let (available_capacity, available_size) = match RelevantMessagingState::<T>::get()
321 {
322 Some(limits) => (
323 limits.relay_dispatch_queue_remaining_capacity.remaining_count,
324 limits.relay_dispatch_queue_remaining_capacity.remaining_size,
325 ),
326 None => {
327 debug_assert!(
328 false,
329 "relevant messaging state is promised to be set until `on_finalize`; \
330 qed",
331 );
332 return (0, 0)
333 },
334 };
335
336 let available_capacity =
337 cmp::min(available_capacity, host_config.max_upward_message_num_per_candidate);
338
339 let (num, total_size) = up
342 .iter()
343 .scan((0u32, 0u32), |state, msg| {
344 let (cap_used, size_used) = *state;
345 let new_cap = cap_used.saturating_add(1);
346 let new_size = size_used.saturating_add(msg.len() as u32);
347 match available_capacity
348 .checked_sub(new_cap)
349 .and(available_size.checked_sub(new_size))
350 {
351 Some(_) => {
352 *state = (new_cap, new_size);
353 Some(*state)
354 },
355 _ => None,
356 }
357 })
358 .last()
359 .unwrap_or_default();
360
361 UpwardMessages::<T>::put(&up[..num as usize]);
364 *up = up.split_off(num as usize);
365
366 #[cfg(feature = "experimental-ump-signals")]
369 Self::send_ump_signal();
370
371 let threshold = host_config
375 .max_upward_queue_size
376 .saturating_div(ump_constants::THRESHOLD_FACTOR);
377 let remaining_total_size: usize = up.iter().map(UpwardMessage::len).sum();
378 if remaining_total_size <= threshold as usize {
379 Self::decrease_fee_factor(());
380 }
381
382 (num, total_size)
383 });
384
385 let maximum_channels = host_config
395 .hrmp_max_message_num_per_candidate
396 .min(<AnnouncedHrmpMessagesPerCandidate<T>>::take())
397 as usize;
398
399 let outbound_messages =
403 T::OutboundXcmpMessageSource::take_outbound_messages(maximum_channels)
404 .into_iter()
405 .map(|(recipient, data)| OutboundHrmpMessage { recipient, data })
406 .collect::<Vec<_>>();
407
408 {
411 let hrmp_outgoing = outbound_messages
412 .iter()
413 .map(|msg| {
414 (
415 msg.recipient,
416 HrmpChannelUpdate { msg_count: 1, total_bytes: msg.data.len() as u32 },
417 )
418 })
419 .collect();
420 let used_bandwidth =
421 UsedBandwidth { ump_msg_count, ump_total_bytes, hrmp_outgoing };
422
423 let mut aggregated_segment =
424 AggregatedUnincludedSegment::<T>::get().unwrap_or_default();
425 let consumed_go_ahead_signal =
426 if aggregated_segment.consumed_go_ahead_signal().is_some() {
427 None
430 } else {
431 relay_upgrade_go_ahead
432 };
433 let ancestor = Ancestor::new_unchecked(used_bandwidth, consumed_go_ahead_signal);
435
436 let watermark = HrmpWatermark::<T>::get();
437 let watermark_update = HrmpWatermarkUpdate::new(watermark, vfp.relay_parent_number);
438
439 aggregated_segment
440 .append(&ancestor, watermark_update, &total_bandwidth_out)
441 .expect("unincluded segment limits exceeded");
442 AggregatedUnincludedSegment::<T>::put(aggregated_segment);
443 UnincludedSegment::<T>::append(ancestor);
445 }
446 HrmpOutboundMessages::<T>::put(outbound_messages);
447 }
448
449 fn on_initialize(_n: BlockNumberFor<T>) -> Weight {
450 let mut weight = Weight::zero();
451
452 if !<DidSetValidationCode<T>>::get() {
456 NewValidationCode::<T>::kill();
460 weight += T::DbWeight::get().writes(1);
461 }
462
463 {
465 <UnincludedSegment<T>>::mutate(|chain| {
466 if let Some(ancestor) = chain.last_mut() {
467 let parent = frame_system::Pallet::<T>::parent_hash();
468 ancestor.replace_para_head_hash(parent);
471 }
472 });
473 weight += T::DbWeight::get().reads_writes(1, 1);
474
475 weight += T::DbWeight::get().reads_writes(3, 2);
477 }
478
479 ValidationData::<T>::kill();
481 ProcessedDownwardMessages::<T>::kill();
485 UpwardMessages::<T>::kill();
486 HrmpOutboundMessages::<T>::kill();
487 CustomValidationHeadData::<T>::kill();
488 HrmpWatermark::<T>::get();
490 weight += T::DbWeight::get().reads_writes(1, 5);
491
492 weight += T::DbWeight::get().reads_writes(1, 1);
511 let hrmp_max_message_num_per_candidate = HostConfiguration::<T>::get()
512 .map(|cfg| cfg.hrmp_max_message_num_per_candidate)
513 .unwrap_or(0);
514 <AnnouncedHrmpMessagesPerCandidate<T>>::put(hrmp_max_message_num_per_candidate);
515
516 weight += T::DbWeight::get().reads_writes(
518 3 + hrmp_max_message_num_per_candidate as u64,
519 4 + hrmp_max_message_num_per_candidate as u64,
520 );
521
522 weight += T::DbWeight::get().reads_writes(1, 1);
524
525 weight += T::DbWeight::get().reads_writes(6, 3);
527
528 weight += T::DbWeight::get().reads(1);
530
531 match CumulusDigestItem::core_info_exists_at_max_once(
533 &frame_system::Pallet::<T>::digest(),
534 ) {
535 CoreInfoExistsAtMaxOnce::Once(core_info) => {
536 assert_eq!(
537 core_info.claim_queue_offset.0,
538 T::RelayParentOffset::get() as u8,
539 "Only {} is supported as valid claim queue offset",
540 T::RelayParentOffset::get()
541 );
542 },
543 CoreInfoExistsAtMaxOnce::NotFound => {},
544 CoreInfoExistsAtMaxOnce::MoreThanOnce => {
545 panic!("`CumulusDigestItem::CoreInfo` must exist at max once.");
546 },
547 }
548
549 weight
550 }
551 }
552
553 #[pallet::call]
554 impl<T: Config> Pallet<T> {
555 #[pallet::call_index(0)]
565 #[pallet::weight((0, DispatchClass::Mandatory))]
566 pub fn set_validation_data(
569 origin: OriginFor<T>,
570 data: BasicParachainInherentData,
571 inbound_messages_data: InboundMessagesData,
572 ) -> DispatchResult {
573 ensure_none(origin)?;
574 assert!(
575 !<ValidationData<T>>::exists(),
576 "ValidationData must be updated only once in a block",
577 );
578
579 let mut total_weight = Weight::zero();
581
582 let BasicParachainInherentData {
589 validation_data: vfp,
590 relay_chain_state,
591 relay_parent_descendants,
592 collator_peer_id: _,
593 } = data;
594
595 T::CheckAssociatedRelayNumber::check_associated_relay_number(
597 vfp.relay_parent_number,
598 LastRelayChainBlockNumber::<T>::get(),
599 );
600
601 let relay_state_proof = RelayChainStateProof::new(
602 T::SelfParaId::get(),
603 vfp.relay_parent_storage_root,
604 relay_chain_state.clone(),
605 )
606 .expect("Invalid relay chain state proof");
607
608 let expected_rp_descendants_num = T::RelayParentOffset::get();
609
610 if expected_rp_descendants_num > 0 {
611 if let Err(err) = descendant_validation::verify_relay_parent_descendants(
612 &relay_state_proof,
613 relay_parent_descendants,
614 vfp.relay_parent_storage_root,
615 expected_rp_descendants_num,
616 ) {
617 panic!(
618 "Unable to verify provided relay parent descendants. \
619 expected_rp_descendants_num: {expected_rp_descendants_num} \
620 error: {err:?}"
621 );
622 };
623 }
624
625 let (consensus_hook_weight, capacity) =
627 T::ConsensusHook::on_state_proof(&relay_state_proof);
628 total_weight += consensus_hook_weight;
629 total_weight += Self::maybe_drop_included_ancestors(&relay_state_proof, capacity);
630 frame_system::Pallet::<T>::deposit_log(
634 cumulus_primitives_core::rpsr_digest::relay_parent_storage_root_item(
635 vfp.relay_parent_storage_root,
636 vfp.relay_parent_number,
637 ),
638 );
639
640 let upgrade_go_ahead_signal = relay_state_proof
644 .read_upgrade_go_ahead_signal()
645 .expect("Invalid upgrade go ahead signal");
646
647 let upgrade_signal_in_segment = AggregatedUnincludedSegment::<T>::get()
648 .as_ref()
649 .and_then(SegmentTracker::consumed_go_ahead_signal);
650 if let Some(signal_in_segment) = upgrade_signal_in_segment.as_ref() {
651 assert_eq!(upgrade_go_ahead_signal, Some(*signal_in_segment));
654 }
655 match upgrade_go_ahead_signal {
656 Some(_signal) if upgrade_signal_in_segment.is_some() => {
657 },
659 Some(relay_chain::UpgradeGoAhead::GoAhead) => {
660 assert!(
661 <PendingValidationCode<T>>::exists(),
662 "No new validation function found in storage, GoAhead signal is not expected",
663 );
664 let validation_code = <PendingValidationCode<T>>::take();
665
666 frame_system::Pallet::<T>::update_code_in_storage(&validation_code);
667 <T::OnSystemEvent as OnSystemEvent>::on_validation_code_applied();
668 Self::deposit_event(Event::ValidationFunctionApplied {
669 relay_chain_block_num: vfp.relay_parent_number,
670 });
671 },
672 Some(relay_chain::UpgradeGoAhead::Abort) => {
673 <PendingValidationCode<T>>::kill();
674 Self::deposit_event(Event::ValidationFunctionDiscarded);
675 },
676 None => {},
677 }
678 <UpgradeRestrictionSignal<T>>::put(
679 relay_state_proof
680 .read_upgrade_restriction_signal()
681 .expect("Invalid upgrade restriction signal"),
682 );
683 <UpgradeGoAhead<T>>::put(upgrade_go_ahead_signal);
684
685 let host_config = relay_state_proof
686 .read_abridged_host_configuration()
687 .expect("Invalid host configuration in relay chain state proof");
688
689 let relevant_messaging_state = relay_state_proof
690 .read_messaging_state_snapshot(&host_config)
691 .expect("Invalid messaging state in relay chain state proof");
692
693 <ValidationData<T>>::put(&vfp);
694 <RelayStateProof<T>>::put(relay_chain_state);
695 <RelevantMessagingState<T>>::put(relevant_messaging_state.clone());
696 <HostConfiguration<T>>::put(host_config);
697
698 <T::OnSystemEvent as OnSystemEvent>::on_validation_data(&vfp);
699
700 total_weight.saturating_accrue(Self::enqueue_inbound_downward_messages(
701 relevant_messaging_state.dmq_mqc_head,
702 inbound_messages_data.downward_messages,
703 ));
704 total_weight.saturating_accrue(Self::enqueue_inbound_horizontal_messages(
705 &relevant_messaging_state.ingress_channels,
706 inbound_messages_data.horizontal_messages,
707 vfp.relay_parent_number,
708 ));
709
710 frame_system::Pallet::<T>::register_extra_weight_unchecked(
711 total_weight,
712 DispatchClass::Mandatory,
713 );
714
715 Ok(())
716 }
717
718 #[pallet::call_index(1)]
719 #[pallet::weight((1_000, DispatchClass::Operational))]
720 pub fn sudo_send_upward_message(
721 origin: OriginFor<T>,
722 message: UpwardMessage,
723 ) -> DispatchResult {
724 ensure_root(origin)?;
725 let _ = Self::send_upward_message(message);
726 Ok(())
727 }
728
729 }
732
733 #[pallet::event]
734 #[pallet::generate_deposit(pub(super) fn deposit_event)]
735 pub enum Event<T: Config> {
736 ValidationFunctionStored,
738 ValidationFunctionApplied { relay_chain_block_num: RelayChainBlockNumber },
740 ValidationFunctionDiscarded,
742 DownwardMessagesReceived { count: u32 },
744 DownwardMessagesProcessed { weight_used: Weight, dmq_head: relay_chain::Hash },
746 UpwardMessageSent { message_hash: Option<XcmHash> },
748 }
749
750 #[pallet::error]
751 pub enum Error<T> {
752 OverlappingUpgrades,
754 ProhibitedByPolkadot,
756 TooBig,
759 ValidationDataNotAvailable,
761 HostConfigurationNotAvailable,
763 NotScheduled,
765 }
766
767 #[pallet::storage]
774 pub type UnincludedSegment<T: Config> = StorageValue<_, Vec<Ancestor<T::Hash>>, ValueQuery>;
775
776 #[pallet::storage]
780 pub type AggregatedUnincludedSegment<T: Config> =
781 StorageValue<_, SegmentTracker<T::Hash>, OptionQuery>;
782
783 #[pallet::storage]
790 pub type PendingValidationCode<T: Config> = StorageValue<_, Vec<u8>, ValueQuery>;
791
792 #[pallet::storage]
798 pub type NewValidationCode<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
799
800 #[pallet::storage]
804 pub type ValidationData<T: Config> = StorageValue<_, PersistedValidationData>;
805
806 #[pallet::storage]
808 pub type DidSetValidationCode<T: Config> = StorageValue<_, bool, ValueQuery>;
809
810 #[pallet::storage]
814 pub type LastRelayChainBlockNumber<T: Config> =
815 StorageValue<_, RelayChainBlockNumber, ValueQuery>;
816
817 #[pallet::storage]
825 pub type UpgradeRestrictionSignal<T: Config> =
826 StorageValue<_, Option<relay_chain::UpgradeRestriction>, ValueQuery>;
827
828 #[pallet::storage]
834 pub type UpgradeGoAhead<T: Config> =
835 StorageValue<_, Option<relay_chain::UpgradeGoAhead>, ValueQuery>;
836
837 #[pallet::storage]
844 pub type RelayStateProof<T: Config> = StorageValue<_, sp_trie::StorageProof>;
845
846 #[pallet::storage]
854 pub type RelevantMessagingState<T: Config> = StorageValue<_, MessagingStateSnapshot>;
855
856 #[pallet::storage]
863 #[pallet::disable_try_decode_storage]
864 pub type HostConfiguration<T: Config> = StorageValue<_, AbridgedHostConfiguration>;
865
866 #[pallet::storage]
871 pub type LastDmqMqcHead<T: Config> = StorageValue<_, MessageQueueChain, ValueQuery>;
872
873 #[pallet::storage]
878 pub type LastHrmpMqcHeads<T: Config> =
879 StorageValue<_, BTreeMap<ParaId, MessageQueueChain>, ValueQuery>;
880
881 #[pallet::storage]
885 pub type ProcessedDownwardMessages<T: Config> = StorageValue<_, u32, ValueQuery>;
886
887 #[pallet::storage]
891 pub type LastProcessedDownwardMessage<T: Config> = StorageValue<_, InboundMessageId>;
892
893 #[pallet::storage]
895 pub type HrmpWatermark<T: Config> = StorageValue<_, relay_chain::BlockNumber, ValueQuery>;
896
897 #[pallet::storage]
901 pub type LastProcessedHrmpMessage<T: Config> = StorageValue<_, InboundMessageId>;
902
903 #[pallet::storage]
907 pub type HrmpOutboundMessages<T: Config> =
908 StorageValue<_, Vec<OutboundHrmpMessage>, ValueQuery>;
909
910 #[pallet::storage]
914 pub type UpwardMessages<T: Config> = StorageValue<_, Vec<UpwardMessage>, ValueQuery>;
915
916 #[pallet::storage]
918 pub type PendingUpwardMessages<T: Config> = StorageValue<_, Vec<UpwardMessage>, ValueQuery>;
919
920 #[pallet::storage]
922 pub type UpwardDeliveryFeeFactor<T: Config> =
923 StorageValue<_, FixedU128, ValueQuery, GetMinFeeFactor<Pallet<T>>>;
924
925 #[pallet::storage]
928 pub type AnnouncedHrmpMessagesPerCandidate<T: Config> = StorageValue<_, u32, ValueQuery>;
929
930 #[pallet::storage]
933 pub type ReservedXcmpWeightOverride<T: Config> = StorageValue<_, Weight>;
934
935 #[pallet::storage]
938 pub type ReservedDmpWeightOverride<T: Config> = StorageValue<_, Weight>;
939
940 #[pallet::storage]
944 pub type CustomValidationHeadData<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
945
946 #[pallet::inherent]
947 impl<T: Config> ProvideInherent for Pallet<T> {
948 type Call = Call<T>;
949 type Error = sp_inherents::MakeFatalError<()>;
950 const INHERENT_IDENTIFIER: InherentIdentifier =
951 cumulus_primitives_parachain_inherent::INHERENT_IDENTIFIER;
952
953 fn create_inherent(data: &InherentData) -> Option<Self::Call> {
954 let data = match data
955 .get_data::<ParachainInherentData>(&Self::INHERENT_IDENTIFIER)
956 .ok()
957 .flatten()
958 {
959 None => {
960 let data = data
965 .get_data::<v0::ParachainInherentData>(
966 &cumulus_primitives_parachain_inherent::PARACHAIN_INHERENT_IDENTIFIER_V0,
967 )
968 .ok()
969 .flatten()?;
970 data.into()
971 },
972 Some(data) => data,
973 };
974
975 Some(Self::do_create_inherent(data))
976 }
977
978 fn is_inherent(call: &Self::Call) -> bool {
979 matches!(call, Call::set_validation_data { .. })
980 }
981 }
982
983 #[pallet::genesis_config]
984 #[derive(frame_support::DefaultNoBound)]
985 pub struct GenesisConfig<T: Config> {
986 #[serde(skip)]
987 pub _config: core::marker::PhantomData<T>,
988 }
989
990 #[pallet::genesis_build]
991 impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
992 fn build(&self) {
993 sp_io::storage::set(b":c", &[]);
995 }
996 }
997}
998
999impl<T: Config> Pallet<T> {
1000 pub fn unincluded_segment_size_after(included_hash: T::Hash) -> u32 {
1008 let segment = UnincludedSegment::<T>::get();
1009 crate::unincluded_segment::size_after_included(included_hash, &segment)
1010 }
1011}
1012
1013impl<T: Config> FeeTracker for Pallet<T> {
1014 type Id = ();
1015
1016 fn get_fee_factor(_id: Self::Id) -> FixedU128 {
1017 UpwardDeliveryFeeFactor::<T>::get()
1018 }
1019
1020 fn set_fee_factor(_id: Self::Id, val: FixedU128) {
1021 UpwardDeliveryFeeFactor::<T>::set(val);
1022 }
1023}
1024
1025impl<T: Config> ListChannelInfos for Pallet<T> {
1026 fn outgoing_channels() -> Vec<ParaId> {
1027 let Some(state) = RelevantMessagingState::<T>::get() else { return Vec::new() };
1028 state.egress_channels.into_iter().map(|(id, _)| id).collect()
1029 }
1030}
1031
1032impl<T: Config> GetChannelInfo for Pallet<T> {
1033 fn get_channel_status(id: ParaId) -> ChannelStatus {
1034 let channels = match RelevantMessagingState::<T>::get() {
1049 None => {
1050 log::warn!("calling `get_channel_status` with no RelevantMessagingState?!");
1051 return ChannelStatus::Closed
1052 },
1053 Some(d) => d.egress_channels,
1054 };
1055 let index = match channels.binary_search_by_key(&id, |item| item.0) {
1062 Err(_) => return ChannelStatus::Closed,
1063 Ok(i) => i,
1064 };
1065 let meta = &channels[index].1;
1066 if meta.msg_count + 1 > meta.max_capacity {
1067 return ChannelStatus::Full
1069 }
1070 let max_size_now = meta.max_total_size - meta.total_size;
1071 let max_size_ever = meta.max_message_size;
1072 ChannelStatus::Ready(max_size_now as usize, max_size_ever as usize)
1073 }
1074
1075 fn get_channel_info(id: ParaId) -> Option<ChannelInfo> {
1076 let channels = RelevantMessagingState::<T>::get()?.egress_channels;
1077 let index = channels.binary_search_by_key(&id, |item| item.0).ok()?;
1078 let info = ChannelInfo {
1079 max_capacity: channels[index].1.max_capacity,
1080 max_total_size: channels[index].1.max_total_size,
1081 max_message_size: channels[index].1.max_message_size,
1082 msg_count: channels[index].1.msg_count,
1083 total_size: channels[index].1.total_size,
1084 };
1085 Some(info)
1086 }
1087}
1088
1089impl<T: Config> Pallet<T> {
1090 fn messages_collection_size_limit() -> usize {
1100 let max_block_weight = <T as frame_system::Config>::BlockWeights::get().max_block;
1101 let max_block_pov = max_block_weight.proof_size();
1102 (max_block_pov / 6).saturated_into()
1103 }
1104
1105 fn do_create_inherent(data: ParachainInherentData) -> Call<T> {
1111 let (data, mut downward_messages, mut horizontal_messages) =
1112 deconstruct_parachain_inherent_data(data);
1113 let last_relay_block_number = LastRelayChainBlockNumber::<T>::get();
1114
1115 let messages_collection_size_limit = Self::messages_collection_size_limit();
1116 let last_processed_msg = LastProcessedDownwardMessage::<T>::get()
1118 .unwrap_or(InboundMessageId { sent_at: last_relay_block_number, reverse_idx: 0 });
1119 downward_messages.drop_processed_messages(&last_processed_msg);
1120 let mut size_limit = messages_collection_size_limit;
1121 let downward_messages = downward_messages.into_abridged(&mut size_limit);
1122
1123 let last_processed_msg = LastProcessedHrmpMessage::<T>::get()
1125 .unwrap_or(InboundMessageId { sent_at: last_relay_block_number, reverse_idx: 0 });
1126 horizontal_messages.drop_processed_messages(&last_processed_msg);
1127 size_limit = size_limit.saturating_add(messages_collection_size_limit);
1128 let horizontal_messages = horizontal_messages.into_abridged(&mut size_limit);
1129
1130 let inbound_messages_data =
1131 InboundMessagesData::new(downward_messages, horizontal_messages);
1132
1133 Call::set_validation_data { data, inbound_messages_data }
1134 }
1135
1136 fn enqueue_inbound_downward_messages(
1146 expected_dmq_mqc_head: relay_chain::Hash,
1147 downward_messages: AbridgedInboundDownwardMessages,
1148 ) -> Weight {
1149 downward_messages.check_enough_messages_included("DMQ");
1150
1151 let mut dmq_head = <LastDmqMqcHead<T>>::get();
1152
1153 let (messages, hashed_messages) = downward_messages.messages();
1154 let message_count = messages.len() as u32;
1155 let weight_used = T::WeightInfo::enqueue_inbound_downward_messages(message_count);
1156 if let Some(last_msg) = messages.last() {
1157 Self::deposit_event(Event::DownwardMessagesReceived { count: message_count });
1158
1159 for msg in messages {
1161 dmq_head.extend_downward(msg);
1162 }
1163 <LastDmqMqcHead<T>>::put(&dmq_head);
1164 Self::deposit_event(Event::DownwardMessagesProcessed {
1165 weight_used,
1166 dmq_head: dmq_head.head(),
1167 });
1168
1169 let mut last_processed_msg =
1170 InboundMessageId { sent_at: last_msg.sent_at, reverse_idx: 0 };
1171 for msg in hashed_messages {
1172 dmq_head.extend_with_hashed_msg(msg);
1173
1174 if msg.sent_at == last_processed_msg.sent_at {
1175 last_processed_msg.reverse_idx += 1;
1176 }
1177 }
1178 LastProcessedDownwardMessage::<T>::put(last_processed_msg);
1179
1180 T::DmpQueue::handle_messages(downward_messages.bounded_msgs_iter());
1181 }
1182
1183 assert_eq!(dmq_head.head(), expected_dmq_mqc_head, "DMQ head mismatch");
1189
1190 ProcessedDownwardMessages::<T>::put(message_count);
1191
1192 weight_used
1193 }
1194
1195 fn check_hrmp_mcq_heads(
1196 ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1197 mqc_heads: &mut BTreeMap<ParaId, MessageQueueChain>,
1198 ) {
1199 for (sender, channel) in ingress_channels {
1207 let cur_head = mqc_heads.entry(*sender).or_default().head();
1208 let target_head = channel.mqc_head.unwrap_or_default();
1209 assert_eq!(cur_head, target_head, "HRMP head mismatch");
1210 }
1211 }
1212
1213 fn check_hrmp_message_metadata(
1218 ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1219 maybe_prev_msg_metadata: &mut Option<(u32, ParaId)>,
1220 msg_metadata: (u32, ParaId),
1221 ) {
1222 if let Some(prev_msg) = maybe_prev_msg_metadata {
1224 assert!(&msg_metadata >= prev_msg, "[HRMP] Messages order violation");
1225 }
1226 *maybe_prev_msg_metadata = Some(msg_metadata);
1227
1228 let sender = msg_metadata.1;
1231 let maybe_channel_idx =
1232 ingress_channels.binary_search_by_key(&sender, |&(channel_sender, _)| channel_sender);
1233 assert!(
1234 maybe_channel_idx.is_ok(),
1235 "One of the messages submitted by the collator was sent from a sender ({}) \
1236 that doesn't have a channel opened to this parachain",
1237 <ParaId as Into<u32>>::into(sender)
1238 );
1239 }
1240
1241 fn enqueue_inbound_horizontal_messages(
1252 ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1253 horizontal_messages: AbridgedInboundHrmpMessages,
1254 relay_parent_number: relay_chain::BlockNumber,
1255 ) -> Weight {
1256 horizontal_messages.check_enough_messages_included("HRMP");
1258
1259 let (messages, hashed_messages) = horizontal_messages.messages();
1260 let mut mqc_heads = <LastHrmpMqcHeads<T>>::get();
1261
1262 if messages.is_empty() {
1263 Self::check_hrmp_mcq_heads(ingress_channels, &mut mqc_heads);
1264 let last_processed_msg =
1265 InboundMessageId { sent_at: relay_parent_number, reverse_idx: 0 };
1266 LastProcessedHrmpMessage::<T>::put(last_processed_msg);
1267 HrmpWatermark::<T>::put(relay_parent_number);
1268 return T::DbWeight::get().reads_writes(1, 2);
1269 }
1270
1271 let mut prev_msg_metadata = None;
1272 let mut last_processed_block = HrmpWatermark::<T>::get();
1273 let mut last_processed_msg = InboundMessageId { sent_at: 0, reverse_idx: 0 };
1274 for (sender, msg) in messages {
1275 Self::check_hrmp_message_metadata(
1276 ingress_channels,
1277 &mut prev_msg_metadata,
1278 (msg.sent_at, *sender),
1279 );
1280 mqc_heads.entry(*sender).or_default().extend_hrmp(msg);
1281
1282 if msg.sent_at > last_processed_msg.sent_at && last_processed_msg.sent_at > 0 {
1283 last_processed_block = last_processed_msg.sent_at;
1284 }
1285 last_processed_msg.sent_at = msg.sent_at;
1286 }
1287 <LastHrmpMqcHeads<T>>::put(&mqc_heads);
1288 for (sender, msg) in hashed_messages {
1289 Self::check_hrmp_message_metadata(
1290 ingress_channels,
1291 &mut prev_msg_metadata,
1292 (msg.sent_at, *sender),
1293 );
1294 mqc_heads.entry(*sender).or_default().extend_with_hashed_msg(msg);
1295
1296 if msg.sent_at == last_processed_msg.sent_at {
1297 last_processed_msg.reverse_idx += 1;
1298 }
1299 }
1300 if last_processed_msg.sent_at > 0 && last_processed_msg.reverse_idx == 0 {
1301 last_processed_block = last_processed_msg.sent_at;
1302 }
1303 LastProcessedHrmpMessage::<T>::put(&last_processed_msg);
1304 Self::check_hrmp_mcq_heads(ingress_channels, &mut mqc_heads);
1305
1306 let max_weight =
1307 <ReservedXcmpWeightOverride<T>>::get().unwrap_or_else(T::ReservedXcmpWeight::get);
1308 let weight_used = T::XcmpMessageHandler::handle_xcmp_messages(
1309 horizontal_messages.flat_msgs_iter(),
1310 max_weight,
1311 );
1312
1313 HrmpWatermark::<T>::put(last_processed_block);
1315
1316 weight_used.saturating_add(T::DbWeight::get().reads_writes(2, 3))
1317 }
1318
1319 fn maybe_drop_included_ancestors(
1321 relay_state_proof: &RelayChainStateProof,
1322 capacity: consensus_hook::UnincludedSegmentCapacity,
1323 ) -> Weight {
1324 let mut weight_used = Weight::zero();
1325 let para_head =
1327 relay_state_proof.read_included_para_head().ok().map(|h| T::Hashing::hash(&h.0));
1328
1329 let unincluded_segment_len = <UnincludedSegment<T>>::decode_len().unwrap_or(0);
1330 weight_used += T::DbWeight::get().reads(1);
1331
1332 let included_head = match (para_head, capacity.is_expecting_included_parent()) {
1334 (Some(h), true) => {
1335 assert_eq!(
1336 h,
1337 frame_system::Pallet::<T>::parent_hash(),
1338 "expected parent to be included"
1339 );
1340
1341 h
1342 },
1343 (Some(h), false) => h,
1344 (None, true) => {
1345 frame_system::Pallet::<T>::parent_hash()
1348 },
1349 (None, false) => panic!("included head not present in relay storage proof"),
1350 };
1351
1352 let new_len = {
1353 let para_head_hash = included_head;
1354 let dropped: Vec<Ancestor<T::Hash>> = <UnincludedSegment<T>>::mutate(|chain| {
1355 let idx = chain
1358 .iter()
1359 .position(|block| {
1360 let head_hash = block
1361 .para_head_hash()
1362 .expect("para head hash is updated during block initialization; qed");
1363 head_hash == ¶_head_hash
1364 })
1365 .map_or(0, |idx| idx + 1); chain.drain(..idx).collect()
1368 });
1369 weight_used += T::DbWeight::get().reads_writes(1, 1);
1370
1371 let new_len = unincluded_segment_len - dropped.len();
1372 if !dropped.is_empty() {
1373 <AggregatedUnincludedSegment<T>>::mutate(|agg| {
1374 let agg = agg.as_mut().expect(
1375 "dropped part of the segment wasn't empty, hence value exists; qed",
1376 );
1377 for block in dropped {
1378 agg.subtract(&block);
1379 }
1380 });
1381 weight_used += T::DbWeight::get().reads_writes(1, 1);
1382 }
1383
1384 new_len as u32
1385 };
1386
1387 assert!(new_len < capacity.get(), "no space left for the block in the unincluded segment");
1392 weight_used
1393 }
1394
1395 fn adjust_egress_bandwidth_limits() {
1401 let unincluded_segment = match AggregatedUnincludedSegment::<T>::get() {
1402 None => return,
1403 Some(s) => s,
1404 };
1405
1406 <RelevantMessagingState<T>>::mutate(|messaging_state| {
1407 let messaging_state = match messaging_state {
1408 None => return,
1409 Some(s) => s,
1410 };
1411
1412 let used_bandwidth = unincluded_segment.used_bandwidth();
1413
1414 let channels = &mut messaging_state.egress_channels;
1415 for (para_id, used) in used_bandwidth.hrmp_outgoing.iter() {
1416 let i = match channels.binary_search_by_key(para_id, |item| item.0) {
1417 Ok(i) => i,
1418 Err(_) => continue, };
1420
1421 let c = &mut channels[i].1;
1422
1423 c.total_size = (c.total_size + used.total_bytes).min(c.max_total_size);
1424 c.msg_count = (c.msg_count + used.msg_count).min(c.max_capacity);
1425 }
1426
1427 let upward_capacity = &mut messaging_state.relay_dispatch_queue_remaining_capacity;
1428 upward_capacity.remaining_count =
1429 upward_capacity.remaining_count.saturating_sub(used_bandwidth.ump_msg_count);
1430 upward_capacity.remaining_size =
1431 upward_capacity.remaining_size.saturating_sub(used_bandwidth.ump_total_bytes);
1432 });
1433 }
1434
1435 fn notify_polkadot_of_pending_upgrade(code: &[u8]) {
1439 NewValidationCode::<T>::put(code);
1440 <DidSetValidationCode<T>>::put(true);
1441 }
1442
1443 pub fn max_code_size() -> Option<u32> {
1447 <HostConfiguration<T>>::get().map(|cfg| cfg.max_code_size)
1448 }
1449
1450 pub fn schedule_code_upgrade(validation_function: Vec<u8>) -> DispatchResult {
1452 ensure!(<ValidationData<T>>::exists(), Error::<T>::ValidationDataNotAvailable,);
1456 ensure!(<UpgradeRestrictionSignal<T>>::get().is_none(), Error::<T>::ProhibitedByPolkadot);
1457
1458 ensure!(!<PendingValidationCode<T>>::exists(), Error::<T>::OverlappingUpgrades);
1459 let cfg = HostConfiguration::<T>::get().ok_or(Error::<T>::HostConfigurationNotAvailable)?;
1460 ensure!(validation_function.len() <= cfg.max_code_size as usize, Error::<T>::TooBig);
1461
1462 Self::notify_polkadot_of_pending_upgrade(&validation_function);
1470 <PendingValidationCode<T>>::put(validation_function);
1471 Self::deposit_event(Event::ValidationFunctionStored);
1472
1473 Ok(())
1474 }
1475
1476 pub fn collect_collation_info(header: &HeaderFor<T>) -> CollationInfo {
1484 CollationInfo {
1485 hrmp_watermark: HrmpWatermark::<T>::get(),
1486 horizontal_messages: HrmpOutboundMessages::<T>::get(),
1487 upward_messages: UpwardMessages::<T>::get(),
1488 processed_downward_messages: ProcessedDownwardMessages::<T>::get(),
1489 new_validation_code: NewValidationCode::<T>::get().map(Into::into),
1490 head_data: CustomValidationHeadData::<T>::get()
1493 .map_or_else(|| header.encode(), |v| v)
1494 .into(),
1495 }
1496 }
1497
1498 pub fn set_custom_validation_head_data(head_data: Vec<u8>) {
1511 CustomValidationHeadData::<T>::put(head_data);
1512 }
1513
1514 #[cfg(feature = "experimental-ump-signals")]
1516 fn send_ump_signal() {
1517 use cumulus_primitives_core::relay_chain::{UMPSignal, UMP_SEPARATOR};
1518
1519 UpwardMessages::<T>::mutate(|up| {
1520 if let Some(core_info) =
1521 CumulusDigestItem::find_core_info(&frame_system::Pallet::<T>::digest())
1522 {
1523 up.push(UMP_SEPARATOR);
1524
1525 up.push(
1527 UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset)
1528 .encode(),
1529 );
1530 }
1531 });
1532 }
1533
1534 #[cfg(any(feature = "runtime-benchmarks", feature = "std"))]
1539 pub fn open_outbound_hrmp_channel_for_benchmarks_or_tests(target_parachain: ParaId) {
1540 RelevantMessagingState::<T>::put(MessagingStateSnapshot {
1541 dmq_mqc_head: Default::default(),
1542 relay_dispatch_queue_remaining_capacity: Default::default(),
1543 ingress_channels: Default::default(),
1544 egress_channels: vec![(
1545 target_parachain,
1546 cumulus_primitives_core::AbridgedHrmpChannel {
1547 max_capacity: 10,
1548 max_total_size: 10_000_000_u32,
1549 max_message_size: 10_000_000_u32,
1550 msg_count: 5,
1551 total_size: 5_000_000_u32,
1552 mqc_head: None,
1553 },
1554 )],
1555 })
1556 }
1557
1558 #[cfg(any(feature = "runtime-benchmarks", feature = "std"))]
1563 pub fn open_custom_outbound_hrmp_channel_for_benchmarks_or_tests(
1564 target_parachain: ParaId,
1565 channel: cumulus_primitives_core::AbridgedHrmpChannel,
1566 ) {
1567 RelevantMessagingState::<T>::put(MessagingStateSnapshot {
1568 dmq_mqc_head: Default::default(),
1569 relay_dispatch_queue_remaining_capacity: Default::default(),
1570 ingress_channels: Default::default(),
1571 egress_channels: vec![(target_parachain, channel)],
1572 })
1573 }
1574
1575 #[cfg(feature = "runtime-benchmarks")]
1577 pub fn initialize_for_set_code_benchmark(max_code_size: u32) {
1578 let vfp = PersistedValidationData {
1580 parent_head: polkadot_parachain_primitives::primitives::HeadData(Default::default()),
1581 relay_parent_number: 1,
1582 relay_parent_storage_root: Default::default(),
1583 max_pov_size: 1_000,
1584 };
1585 <ValidationData<T>>::put(&vfp);
1586
1587 let host_config = AbridgedHostConfiguration {
1589 max_code_size,
1590 max_head_data_size: 32 * 1024,
1591 max_upward_queue_count: 8,
1592 max_upward_queue_size: 1024 * 1024,
1593 max_upward_message_size: 4 * 1024,
1594 max_upward_message_num_per_candidate: 2,
1595 hrmp_max_message_num_per_candidate: 2,
1596 validation_upgrade_cooldown: 2,
1597 validation_upgrade_delay: 2,
1598 async_backing_params: relay_chain::AsyncBackingParams {
1599 allowed_ancestry_len: 0,
1600 max_candidate_depth: 0,
1601 },
1602 };
1603 <HostConfiguration<T>>::put(host_config);
1604 }
1605}
1606
1607pub struct ParachainSetCode<T>(core::marker::PhantomData<T>);
1609impl<T: Config> frame_system::SetCode<T> for ParachainSetCode<T> {
1610 fn set_code(code: Vec<u8>) -> DispatchResult {
1611 Pallet::<T>::schedule_code_upgrade(code)
1612 }
1613}
1614
1615impl<T: Config> Pallet<T> {
1616 pub fn send_upward_message(message: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> {
1622 let message_len = message.len();
1623 if let Some(cfg) = HostConfiguration::<T>::get() {
1636 if message_len > cfg.max_upward_message_size as usize {
1637 return Err(MessageSendError::TooBig);
1638 }
1639 let threshold =
1640 cfg.max_upward_queue_size.saturating_div(ump_constants::THRESHOLD_FACTOR);
1641 <PendingUpwardMessages<T>>::append(message.clone());
1644 let pending_messages = PendingUpwardMessages::<T>::get();
1645 let total_size: usize = pending_messages.iter().map(UpwardMessage::len).sum();
1646 if total_size > threshold as usize {
1647 Self::increase_fee_factor((), message_len as u128);
1649 }
1650 } else {
1651 <PendingUpwardMessages<T>>::append(message.clone());
1661 };
1662
1663 let hash = sp_io::hashing::blake2_256(&message);
1666 Self::deposit_event(Event::UpwardMessageSent { message_hash: Some(hash) });
1667 Ok((0, hash))
1668 }
1669
1670 pub fn last_relay_block_number() -> RelayChainBlockNumber {
1673 LastRelayChainBlockNumber::<T>::get()
1674 }
1675}
1676
1677impl<T: Config> UpwardMessageSender for Pallet<T> {
1678 fn send_upward_message(message: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> {
1679 Self::send_upward_message(message)
1680 }
1681
1682 fn can_send_upward_message(message: &UpwardMessage) -> Result<(), MessageSendError> {
1683 let max_upward_message_size = HostConfiguration::<T>::get()
1684 .map(|cfg| cfg.max_upward_message_size)
1685 .ok_or(MessageSendError::Other)?;
1686 if message.len() > max_upward_message_size as usize {
1687 Err(MessageSendError::TooBig)
1688 } else {
1689 Ok(())
1690 }
1691 }
1692
1693 #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
1694 fn ensure_successful_delivery() {
1695 const MAX_UPWARD_MESSAGE_SIZE: u32 = 65_531 * 3;
1696 const MAX_CODE_SIZE: u32 = 3 * 1024 * 1024;
1697 HostConfiguration::<T>::mutate(|cfg| match cfg {
1698 Some(cfg) => cfg.max_upward_message_size = MAX_UPWARD_MESSAGE_SIZE,
1699 None =>
1700 *cfg = Some(AbridgedHostConfiguration {
1701 max_code_size: MAX_CODE_SIZE,
1702 max_head_data_size: 32 * 1024,
1703 max_upward_queue_count: 8,
1704 max_upward_queue_size: 1024 * 1024,
1705 max_upward_message_size: MAX_UPWARD_MESSAGE_SIZE,
1706 max_upward_message_num_per_candidate: 2,
1707 hrmp_max_message_num_per_candidate: 2,
1708 validation_upgrade_cooldown: 2,
1709 validation_upgrade_delay: 2,
1710 async_backing_params: relay_chain::AsyncBackingParams {
1711 allowed_ancestry_len: 0,
1712 max_candidate_depth: 0,
1713 },
1714 }),
1715 })
1716 }
1717}
1718
1719impl<T: Config> InspectMessageQueues for Pallet<T> {
1720 fn clear_messages() {
1721 PendingUpwardMessages::<T>::kill();
1722 }
1723
1724 fn get_messages() -> Vec<(VersionedLocation, Vec<VersionedXcm<()>>)> {
1725 use xcm::prelude::*;
1726
1727 let messages: Vec<VersionedXcm<()>> = PendingUpwardMessages::<T>::get()
1728 .iter()
1729 .map(|encoded_message| {
1730 VersionedXcm::<()>::decode_all_with_depth_limit(
1731 MAX_XCM_DECODE_DEPTH,
1732 &mut &encoded_message[..],
1733 )
1734 .unwrap()
1735 })
1736 .collect();
1737
1738 if messages.is_empty() {
1739 vec![]
1740 } else {
1741 vec![(VersionedLocation::from(Location::parent()), messages)]
1742 }
1743 }
1744}
1745
1746#[cfg(feature = "runtime-benchmarks")]
1747impl<T: Config> polkadot_runtime_parachains::EnsureForParachain for Pallet<T> {
1748 fn ensure(para_id: ParaId) {
1749 if let ChannelStatus::Closed = Self::get_channel_status(para_id) {
1750 Self::open_outbound_hrmp_channel_for_benchmarks_or_tests(para_id)
1751 }
1752 }
1753}
1754
1755#[deprecated(note = "This trait is deprecated and will be removed by September 2024. \
1757 Consider switching to `cumulus-pallet-parachain-system::ConsensusHook`")]
1758pub trait CheckInherents<Block: BlockT> {
1759 fn check_inherents(
1764 block: &Block,
1765 validation_data: &RelayChainStateProof,
1766 ) -> frame_support::inherent::CheckInherentsResult;
1767}
1768
1769#[doc(hidden)]
1771pub struct DummyCheckInherents<Block>(core::marker::PhantomData<Block>);
1772
1773#[allow(deprecated)]
1774impl<Block: BlockT> CheckInherents<Block> for DummyCheckInherents<Block> {
1775 fn check_inherents(
1776 _: &Block,
1777 _: &RelayChainStateProof,
1778 ) -> frame_support::inherent::CheckInherentsResult {
1779 sp_inherents::CheckInherentsResult::new()
1780 }
1781}
1782
1783#[impl_trait_for_tuples::impl_for_tuples(30)]
1791pub trait OnSystemEvent {
1792 fn on_validation_data(data: &PersistedValidationData);
1794 fn on_validation_code_applied();
1797}
1798
1799#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Default, RuntimeDebug)]
1801pub struct RelayChainState {
1802 pub number: relay_chain::BlockNumber,
1804 pub state_root: relay_chain::Hash,
1806}
1807
1808pub trait RelaychainStateProvider {
1812 fn current_relay_chain_state() -> RelayChainState;
1816
1817 #[cfg(feature = "runtime-benchmarks")]
1822 fn set_current_relay_chain_state(_state: RelayChainState) {}
1823}
1824
1825#[deprecated = "Use `RelaychainDataProvider` instead"]
1833pub type RelaychainBlockNumberProvider<T> = RelaychainDataProvider<T>;
1834
1835pub struct RelaychainDataProvider<T>(core::marker::PhantomData<T>);
1845
1846impl<T: Config> BlockNumberProvider for RelaychainDataProvider<T> {
1847 type BlockNumber = relay_chain::BlockNumber;
1848
1849 fn current_block_number() -> relay_chain::BlockNumber {
1850 ValidationData::<T>::get()
1851 .map(|d| d.relay_parent_number)
1852 .unwrap_or_else(|| Pallet::<T>::last_relay_block_number())
1853 }
1854
1855 #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
1856 fn set_block_number(block: Self::BlockNumber) {
1857 let mut validation_data = ValidationData::<T>::get().unwrap_or_else(||
1858 PersistedValidationData {
1860 parent_head: vec![].into(),
1861 relay_parent_number: Default::default(),
1862 max_pov_size: Default::default(),
1863 relay_parent_storage_root: Default::default(),
1864 });
1865 validation_data.relay_parent_number = block;
1866 ValidationData::<T>::put(validation_data)
1867 }
1868}
1869
1870impl<T: Config> RelaychainStateProvider for RelaychainDataProvider<T> {
1871 fn current_relay_chain_state() -> RelayChainState {
1872 ValidationData::<T>::get()
1873 .map(|d| RelayChainState {
1874 number: d.relay_parent_number,
1875 state_root: d.relay_parent_storage_root,
1876 })
1877 .unwrap_or_default()
1878 }
1879
1880 #[cfg(feature = "runtime-benchmarks")]
1881 fn set_current_relay_chain_state(state: RelayChainState) {
1882 let mut validation_data = ValidationData::<T>::get().unwrap_or_else(||
1883 PersistedValidationData {
1885 parent_head: vec![].into(),
1886 relay_parent_number: Default::default(),
1887 max_pov_size: Default::default(),
1888 relay_parent_storage_root: Default::default(),
1889 });
1890 validation_data.relay_parent_number = state.number;
1891 validation_data.relay_parent_storage_root = state.state_root;
1892 ValidationData::<T>::put(validation_data)
1893 }
1894}