1#![cfg_attr(not(feature = "std"), no_std)]
18
19extern crate alloc;
31
32use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec};
33use codec::{Decode, DecodeLimit, Encode};
34use core::cmp;
35use cumulus_primitives_core::{
36 relay_chain::{self, UMPSignal, UMP_SEPARATOR},
37 AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, CumulusDigestItem,
38 GetChannelInfo, ListChannelInfos, MessageSendError, OutboundHrmpMessage, ParaId,
39 PersistedValidationData, UpwardMessage, UpwardMessageSender, XcmpMessageHandler,
40 XcmpMessageSource,
41};
42use cumulus_primitives_parachain_inherent::{v0, MessageQueueChain, ParachainInherentData};
43use frame_support::{
44 dispatch::{DispatchClass, DispatchResult},
45 ensure,
46 inherent::{InherentData, InherentIdentifier, ProvideInherent},
47 traits::{Get, HandleMessage},
48 weights::Weight,
49};
50use frame_system::{ensure_none, ensure_root, pallet_prelude::HeaderFor};
51use parachain_inherent::{
52 deconstruct_parachain_inherent_data, AbridgedInboundDownwardMessages,
53 AbridgedInboundHrmpMessages, BasicParachainInherentData, InboundMessageId, InboundMessagesData,
54};
55use polkadot_parachain_primitives::primitives::RelayChainBlockNumber;
56use polkadot_runtime_parachains::{FeeTracker, GetMinFeeFactor};
57use scale_info::TypeInfo;
58use sp_runtime::{
59 traits::{BlockNumberProvider, Hash},
60 Debug, FixedU128, SaturatedConversion,
61};
62use xcm::{latest::XcmHash, VersionedLocation, VersionedXcm, MAX_XCM_DECODE_DEPTH};
63use xcm_builder::InspectMessageQueues;
64
65mod benchmarking;
66pub mod migration;
67mod mock;
68#[cfg(test)]
69mod tests;
70pub mod weights;
71
72pub use weights::WeightInfo;
73
74mod unincluded_segment;
75
76pub mod consensus_hook;
77pub mod relay_state_snapshot;
78#[macro_use]
79pub mod validate_block;
80mod descendant_validation;
81pub mod parachain_inherent;
82
83use unincluded_segment::{
84 HrmpChannelUpdate, HrmpWatermarkUpdate, OutboundBandwidthLimits, SegmentTracker,
85};
86
87pub use consensus_hook::{ConsensusHook, ExpectParentIncluded};
88pub use cumulus_pallet_parachain_system_proc_macro::register_validate_block;
109pub use relay_state_snapshot::{MessagingStateSnapshot, RelayChainStateProof};
110pub use unincluded_segment::{Ancestor, UsedBandwidth};
111
112pub use pallet::*;
113
114const LOG_TARGET: &str = "parachain-system";
115
116pub trait CheckAssociatedRelayNumber {
125 fn check_associated_relay_number(
129 current: RelayChainBlockNumber,
130 previous: RelayChainBlockNumber,
131 );
132}
133
134pub struct RelayNumberStrictlyIncreases;
139
140impl CheckAssociatedRelayNumber for RelayNumberStrictlyIncreases {
141 fn check_associated_relay_number(
142 current: RelayChainBlockNumber,
143 previous: RelayChainBlockNumber,
144 ) {
145 if current <= previous {
146 panic!("Relay chain block number needs to strictly increase between Parachain blocks!")
147 }
148 }
149}
150
151pub struct AnyRelayNumber;
156
157impl CheckAssociatedRelayNumber for AnyRelayNumber {
158 fn check_associated_relay_number(_: RelayChainBlockNumber, _: RelayChainBlockNumber) {}
159}
160
161pub struct RelayNumberMonotonicallyIncreases;
166
167impl CheckAssociatedRelayNumber for RelayNumberMonotonicallyIncreases {
168 fn check_associated_relay_number(
169 current: RelayChainBlockNumber,
170 previous: RelayChainBlockNumber,
171 ) {
172 if current < previous {
173 panic!("Relay chain block number needs to monotonically increase between Parachain blocks!")
174 }
175 }
176}
177
178pub type MaxDmpMessageLenOf<T> = <<T as Config>::DmpQueue as HandleMessage>::MaxMessageLen;
180
181pub mod ump_constants {
182 pub const THRESHOLD_FACTOR: u32 = 2;
186}
187
188#[frame_support::pallet]
189pub mod pallet {
190 use super::*;
191 use cumulus_primitives_core::CoreInfoExistsAtMaxOnce;
192 use frame_support::pallet_prelude::*;
193 use frame_system::pallet_prelude::*;
194
195 #[pallet::pallet]
196 #[pallet::storage_version(migration::STORAGE_VERSION)]
197 #[pallet::without_storage_info]
198 pub struct Pallet<T>(_);
199
200 #[pallet::config]
201 pub trait Config: frame_system::Config<OnSetCode = ParachainSetCode<Self>> {
202 #[allow(deprecated)]
204 type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
205
206 type OnSystemEvent: OnSystemEvent;
208
209 #[pallet::constant]
211 type SelfParaId: Get<ParaId>;
212
213 type OutboundXcmpMessageSource: XcmpMessageSource;
215
216 type DmpQueue: HandleMessage;
221
222 type ReservedDmpWeight: Get<Weight>;
224
225 type XcmpMessageHandler: XcmpMessageHandler;
229
230 type ReservedXcmpWeight: Get<Weight>;
232
233 type CheckAssociatedRelayNumber: CheckAssociatedRelayNumber;
235
236 type WeightInfo: WeightInfo;
238
239 type ConsensusHook: ConsensusHook;
250
251 type RelayParentOffset: Get<u32>;
266 }
267
268 #[pallet::hooks]
269 impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
270 fn on_finalize(_: BlockNumberFor<T>) {
275 <DidSetValidationCode<T>>::kill();
276 <UpgradeRestrictionSignal<T>>::kill();
277 let relay_upgrade_go_ahead = <UpgradeGoAhead<T>>::take();
278
279 let vfp = <ValidationData<T>>::get().expect(
280 r"Missing required set_validation_data inherent. This inherent must be
281 present in every block. This error typically occurs when the set_validation_data
282 execution failed and was rejected by the block builder. Check earlier log entries
283 for the specific cause of the failure.",
284 );
285
286 LastRelayChainBlockNumber::<T>::put(vfp.relay_parent_number);
287
288 let host_config = match HostConfiguration::<T>::get() {
289 Some(ok) => ok,
290 None => {
291 debug_assert!(
292 false,
293 "host configuration is promised to set until `on_finalize`; qed",
294 );
295 return
296 },
297 };
298
299 let total_bandwidth_out = match RelevantMessagingState::<T>::get() {
303 Some(s) => OutboundBandwidthLimits::from_relay_chain_state(&s),
304 None => {
305 debug_assert!(
306 false,
307 "relevant messaging state is promised to be set until `on_finalize`; \
308 qed",
309 );
310 return
311 },
312 };
313
314 Self::adjust_egress_bandwidth_limits();
317
318 let (ump_msg_count, ump_total_bytes) = <PendingUpwardMessages<T>>::mutate(|up| {
319 let (available_capacity, available_size) = match RelevantMessagingState::<T>::get()
320 {
321 Some(limits) => (
322 limits.relay_dispatch_queue_remaining_capacity.remaining_count,
323 limits.relay_dispatch_queue_remaining_capacity.remaining_size,
324 ),
325 None => {
326 debug_assert!(
327 false,
328 "relevant messaging state is promised to be set until `on_finalize`; \
329 qed",
330 );
331 return (0, 0)
332 },
333 };
334
335 let available_capacity =
336 cmp::min(available_capacity, host_config.max_upward_message_num_per_candidate);
337
338 let (num, total_size) = up
341 .iter()
342 .scan((0u32, 0u32), |state, msg| {
343 let (cap_used, size_used) = *state;
344 let new_cap = cap_used.saturating_add(1);
345 let new_size = size_used.saturating_add(msg.len() as u32);
346 match available_capacity
347 .checked_sub(new_cap)
348 .and(available_size.checked_sub(new_size))
349 {
350 Some(_) => {
351 *state = (new_cap, new_size);
352 Some(*state)
353 },
354 _ => None,
355 }
356 })
357 .last()
358 .unwrap_or_default();
359
360 UpwardMessages::<T>::put(&up[..num as usize]);
363 *up = up.split_off(num as usize);
364
365 if let Some(core_info) =
366 CumulusDigestItem::find_core_info(&frame_system::Pallet::<T>::digest())
367 {
368 PendingUpwardSignals::<T>::append(
369 UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset)
370 .encode(),
371 );
372 }
373
374 Self::send_ump_signals();
376
377 let threshold = host_config
381 .max_upward_queue_size
382 .saturating_div(ump_constants::THRESHOLD_FACTOR);
383 let remaining_total_size: usize = up.iter().map(UpwardMessage::len).sum();
384 if remaining_total_size <= threshold as usize {
385 Self::decrease_fee_factor(());
386 }
387
388 (num, total_size)
389 });
390
391 let maximum_channels = host_config
401 .hrmp_max_message_num_per_candidate
402 .min(<AnnouncedHrmpMessagesPerCandidate<T>>::take())
403 as usize;
404
405 let outbound_messages =
409 T::OutboundXcmpMessageSource::take_outbound_messages(maximum_channels)
410 .into_iter()
411 .map(|(recipient, data)| OutboundHrmpMessage { recipient, data })
412 .collect::<Vec<_>>();
413
414 {
417 let hrmp_outgoing = outbound_messages
418 .iter()
419 .map(|msg| {
420 (
421 msg.recipient,
422 HrmpChannelUpdate { msg_count: 1, total_bytes: msg.data.len() as u32 },
423 )
424 })
425 .collect();
426 let used_bandwidth =
427 UsedBandwidth { ump_msg_count, ump_total_bytes, hrmp_outgoing };
428
429 let mut aggregated_segment =
430 AggregatedUnincludedSegment::<T>::get().unwrap_or_default();
431 let consumed_go_ahead_signal =
432 if aggregated_segment.consumed_go_ahead_signal().is_some() {
433 None
436 } else {
437 relay_upgrade_go_ahead
438 };
439 let ancestor = Ancestor::new_unchecked(used_bandwidth, consumed_go_ahead_signal);
441
442 let watermark = HrmpWatermark::<T>::get();
443 let watermark_update = HrmpWatermarkUpdate::new(watermark, vfp.relay_parent_number);
444
445 aggregated_segment
446 .append(&ancestor, watermark_update, &total_bandwidth_out)
447 .expect("unincluded segment limits exceeded");
448 AggregatedUnincludedSegment::<T>::put(aggregated_segment);
449 UnincludedSegment::<T>::append(ancestor);
451 }
452 HrmpOutboundMessages::<T>::put(outbound_messages);
453 }
454
455 fn on_initialize(_n: BlockNumberFor<T>) -> Weight {
456 let mut weight = Weight::zero();
457
458 if !<DidSetValidationCode<T>>::get() {
462 NewValidationCode::<T>::kill();
466 weight += T::DbWeight::get().writes(1);
467 }
468
469 {
471 <UnincludedSegment<T>>::mutate(|chain| {
472 if let Some(ancestor) = chain.last_mut() {
473 let parent = frame_system::Pallet::<T>::parent_hash();
474 ancestor.replace_para_head_hash(parent);
477 }
478 });
479 weight += T::DbWeight::get().reads_writes(1, 1);
480
481 weight += T::DbWeight::get().reads_writes(3, 2);
483 }
484
485 ValidationData::<T>::kill();
487 ProcessedDownwardMessages::<T>::kill();
491 UpwardMessages::<T>::kill();
492 HrmpOutboundMessages::<T>::kill();
493 CustomValidationHeadData::<T>::kill();
494 HrmpWatermark::<T>::get();
496 weight += T::DbWeight::get().reads_writes(1, 5);
497
498 weight += T::DbWeight::get().reads_writes(1, 1);
517 let hrmp_max_message_num_per_candidate = HostConfiguration::<T>::get()
518 .map(|cfg| cfg.hrmp_max_message_num_per_candidate)
519 .unwrap_or(0);
520 <AnnouncedHrmpMessagesPerCandidate<T>>::put(hrmp_max_message_num_per_candidate);
521
522 weight += T::DbWeight::get().reads_writes(
524 3 + hrmp_max_message_num_per_candidate as u64,
525 4 + hrmp_max_message_num_per_candidate as u64,
526 );
527
528 weight += T::DbWeight::get().reads_writes(1, 1);
530
531 weight += T::DbWeight::get().reads_writes(6, 3);
533
534 weight += T::DbWeight::get().reads(1);
536
537 match CumulusDigestItem::core_info_exists_at_max_once(
539 &frame_system::Pallet::<T>::digest(),
540 ) {
541 CoreInfoExistsAtMaxOnce::Once(core_info) => {
542 assert_eq!(
543 core_info.claim_queue_offset.0,
544 T::RelayParentOffset::get() as u8,
545 "Only {} is supported as valid claim queue offset",
546 T::RelayParentOffset::get()
547 );
548 },
549 CoreInfoExistsAtMaxOnce::NotFound => {},
550 CoreInfoExistsAtMaxOnce::MoreThanOnce => {
551 panic!("`CumulusDigestItem::CoreInfo` must exist at max once.");
552 },
553 }
554
555 weight
556 }
557 }
558
559 #[pallet::call]
560 impl<T: Config> Pallet<T> {
561 #[pallet::call_index(0)]
571 #[pallet::weight((0, DispatchClass::Mandatory))]
572 pub fn set_validation_data(
575 origin: OriginFor<T>,
576 data: BasicParachainInherentData,
577 inbound_messages_data: InboundMessagesData,
578 ) -> DispatchResult {
579 ensure_none(origin)?;
580 assert!(
581 !<ValidationData<T>>::exists(),
582 "ValidationData must be updated only once in a block",
583 );
584
585 let mut total_weight = Weight::zero();
587
588 let BasicParachainInherentData {
595 validation_data: vfp,
596 relay_chain_state,
597 relay_parent_descendants,
598 collator_peer_id,
599 } = data;
600
601 T::CheckAssociatedRelayNumber::check_associated_relay_number(
603 vfp.relay_parent_number,
604 LastRelayChainBlockNumber::<T>::get(),
605 );
606
607 let relay_state_proof = RelayChainStateProof::new(
608 T::SelfParaId::get(),
609 vfp.relay_parent_storage_root,
610 relay_chain_state.clone(),
611 )
612 .expect("Invalid relay chain state proof");
613
614 let expected_rp_descendants_num = T::RelayParentOffset::get();
615
616 if expected_rp_descendants_num > 0 {
617 if let Err(err) = descendant_validation::verify_relay_parent_descendants(
618 &relay_state_proof,
619 relay_parent_descendants,
620 vfp.relay_parent_storage_root,
621 expected_rp_descendants_num,
622 ) {
623 panic!(
624 "Unable to verify provided relay parent descendants. \
625 expected_rp_descendants_num: {expected_rp_descendants_num} \
626 error: {err:?}"
627 );
628 };
629 }
630
631 let (consensus_hook_weight, capacity) =
633 T::ConsensusHook::on_state_proof(&relay_state_proof);
634 total_weight += consensus_hook_weight;
635 total_weight += Self::maybe_drop_included_ancestors(&relay_state_proof, capacity);
636 frame_system::Pallet::<T>::deposit_log(
640 cumulus_primitives_core::rpsr_digest::relay_parent_storage_root_item(
641 vfp.relay_parent_storage_root,
642 vfp.relay_parent_number,
643 ),
644 );
645
646 let upgrade_go_ahead_signal = relay_state_proof
650 .read_upgrade_go_ahead_signal()
651 .expect("Invalid upgrade go ahead signal");
652
653 let upgrade_signal_in_segment = AggregatedUnincludedSegment::<T>::get()
654 .as_ref()
655 .and_then(SegmentTracker::consumed_go_ahead_signal);
656 if let Some(signal_in_segment) = upgrade_signal_in_segment.as_ref() {
657 assert_eq!(upgrade_go_ahead_signal, Some(*signal_in_segment));
660 }
661 match upgrade_go_ahead_signal {
662 Some(_signal) if upgrade_signal_in_segment.is_some() => {
663 },
665 Some(relay_chain::UpgradeGoAhead::GoAhead) => {
666 assert!(
667 <PendingValidationCode<T>>::exists(),
668 "No new validation function found in storage, GoAhead signal is not expected",
669 );
670 let validation_code = <PendingValidationCode<T>>::take();
671
672 frame_system::Pallet::<T>::update_code_in_storage(&validation_code);
673 <T::OnSystemEvent as OnSystemEvent>::on_validation_code_applied();
674 Self::deposit_event(Event::ValidationFunctionApplied {
675 relay_chain_block_num: vfp.relay_parent_number,
676 });
677 },
678 Some(relay_chain::UpgradeGoAhead::Abort) => {
679 <PendingValidationCode<T>>::kill();
680 Self::deposit_event(Event::ValidationFunctionDiscarded);
681 },
682 None => {},
683 }
684 <UpgradeRestrictionSignal<T>>::put(
685 relay_state_proof
686 .read_upgrade_restriction_signal()
687 .expect("Invalid upgrade restriction signal"),
688 );
689 <UpgradeGoAhead<T>>::put(upgrade_go_ahead_signal);
690
691 let host_config = relay_state_proof
692 .read_abridged_host_configuration()
693 .expect("Invalid host configuration in relay chain state proof");
694
695 let relevant_messaging_state = relay_state_proof
696 .read_messaging_state_snapshot(&host_config)
697 .expect("Invalid messaging state in relay chain state proof");
698
699 <ValidationData<T>>::put(&vfp);
700 <RelayStateProof<T>>::put(relay_chain_state);
701 <RelevantMessagingState<T>>::put(relevant_messaging_state.clone());
702 <HostConfiguration<T>>::put(host_config);
703
704 <T::OnSystemEvent as OnSystemEvent>::on_validation_data(&vfp);
705
706 if let Some(collator_peer_id) = collator_peer_id {
707 PendingUpwardSignals::<T>::append(
708 UMPSignal::ApprovedPeer(collator_peer_id).encode(),
709 );
710 }
711
712 total_weight.saturating_accrue(Self::enqueue_inbound_downward_messages(
713 relevant_messaging_state.dmq_mqc_head,
714 inbound_messages_data.downward_messages,
715 ));
716 total_weight.saturating_accrue(Self::enqueue_inbound_horizontal_messages(
717 &relevant_messaging_state.ingress_channels,
718 inbound_messages_data.horizontal_messages,
719 vfp.relay_parent_number,
720 ));
721
722 frame_system::Pallet::<T>::register_extra_weight_unchecked(
723 total_weight,
724 DispatchClass::Mandatory,
725 );
726
727 Ok(())
728 }
729
730 #[pallet::call_index(1)]
731 #[pallet::weight((1_000, DispatchClass::Operational))]
732 pub fn sudo_send_upward_message(
733 origin: OriginFor<T>,
734 message: UpwardMessage,
735 ) -> DispatchResult {
736 ensure_root(origin)?;
737 let _ = Self::send_upward_message(message);
738 Ok(())
739 }
740
741 }
744
745 #[pallet::event]
746 #[pallet::generate_deposit(pub(super) fn deposit_event)]
747 pub enum Event<T: Config> {
748 ValidationFunctionStored,
750 ValidationFunctionApplied { relay_chain_block_num: RelayChainBlockNumber },
752 ValidationFunctionDiscarded,
754 DownwardMessagesReceived { count: u32 },
756 DownwardMessagesProcessed { weight_used: Weight, dmq_head: relay_chain::Hash },
758 UpwardMessageSent { message_hash: Option<XcmHash> },
760 }
761
762 #[pallet::error]
763 pub enum Error<T> {
764 OverlappingUpgrades,
766 ProhibitedByPolkadot,
768 TooBig,
771 ValidationDataNotAvailable,
773 HostConfigurationNotAvailable,
775 NotScheduled,
777 }
778
779 #[pallet::storage]
786 pub type UnincludedSegment<T: Config> = StorageValue<_, Vec<Ancestor<T::Hash>>, ValueQuery>;
787
788 #[pallet::storage]
792 pub type AggregatedUnincludedSegment<T: Config> =
793 StorageValue<_, SegmentTracker<T::Hash>, OptionQuery>;
794
795 #[pallet::storage]
802 pub type PendingValidationCode<T: Config> = StorageValue<_, Vec<u8>, ValueQuery>;
803
804 #[pallet::storage]
810 pub type NewValidationCode<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
811
812 #[pallet::storage]
816 pub type ValidationData<T: Config> = StorageValue<_, PersistedValidationData>;
817
818 #[pallet::storage]
820 pub type DidSetValidationCode<T: Config> = StorageValue<_, bool, ValueQuery>;
821
822 #[pallet::storage]
826 pub type LastRelayChainBlockNumber<T: Config> =
827 StorageValue<_, RelayChainBlockNumber, ValueQuery>;
828
829 #[pallet::storage]
837 pub type UpgradeRestrictionSignal<T: Config> =
838 StorageValue<_, Option<relay_chain::UpgradeRestriction>, ValueQuery>;
839
840 #[pallet::storage]
846 pub type UpgradeGoAhead<T: Config> =
847 StorageValue<_, Option<relay_chain::UpgradeGoAhead>, ValueQuery>;
848
849 #[pallet::storage]
856 pub type RelayStateProof<T: Config> = StorageValue<_, sp_trie::StorageProof>;
857
858 #[pallet::storage]
866 pub type RelevantMessagingState<T: Config> = StorageValue<_, MessagingStateSnapshot>;
867
868 #[pallet::storage]
875 #[pallet::disable_try_decode_storage]
876 pub type HostConfiguration<T: Config> = StorageValue<_, AbridgedHostConfiguration>;
877
878 #[pallet::storage]
883 pub type LastDmqMqcHead<T: Config> = StorageValue<_, MessageQueueChain, ValueQuery>;
884
885 #[pallet::storage]
890 pub type LastHrmpMqcHeads<T: Config> =
891 StorageValue<_, BTreeMap<ParaId, MessageQueueChain>, ValueQuery>;
892
893 #[pallet::storage]
897 pub type ProcessedDownwardMessages<T: Config> = StorageValue<_, u32, ValueQuery>;
898
899 #[pallet::storage]
903 pub type LastProcessedDownwardMessage<T: Config> = StorageValue<_, InboundMessageId>;
904
905 #[pallet::storage]
907 pub type HrmpWatermark<T: Config> = StorageValue<_, relay_chain::BlockNumber, ValueQuery>;
908
909 #[pallet::storage]
913 pub type LastProcessedHrmpMessage<T: Config> = StorageValue<_, InboundMessageId>;
914
915 #[pallet::storage]
919 pub type HrmpOutboundMessages<T: Config> =
920 StorageValue<_, Vec<OutboundHrmpMessage>, ValueQuery>;
921
922 #[pallet::storage]
926 pub type UpwardMessages<T: Config> = StorageValue<_, Vec<UpwardMessage>, ValueQuery>;
927
928 #[pallet::storage]
930 pub type PendingUpwardMessages<T: Config> = StorageValue<_, Vec<UpwardMessage>, ValueQuery>;
931
932 #[pallet::storage]
936 pub type PendingUpwardSignals<T: Config> = StorageValue<_, Vec<UpwardMessage>, ValueQuery>;
937
938 #[pallet::storage]
940 pub type UpwardDeliveryFeeFactor<T: Config> =
941 StorageValue<_, FixedU128, ValueQuery, GetMinFeeFactor<Pallet<T>>>;
942
943 #[pallet::storage]
946 pub type AnnouncedHrmpMessagesPerCandidate<T: Config> = StorageValue<_, u32, ValueQuery>;
947
948 #[pallet::storage]
951 pub type ReservedXcmpWeightOverride<T: Config> = StorageValue<_, Weight>;
952
953 #[pallet::storage]
956 pub type ReservedDmpWeightOverride<T: Config> = StorageValue<_, Weight>;
957
958 #[pallet::storage]
962 pub type CustomValidationHeadData<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
963
964 #[pallet::inherent]
965 impl<T: Config> ProvideInherent for Pallet<T> {
966 type Call = Call<T>;
967 type Error = sp_inherents::MakeFatalError<()>;
968 const INHERENT_IDENTIFIER: InherentIdentifier =
969 cumulus_primitives_parachain_inherent::INHERENT_IDENTIFIER;
970
971 fn create_inherent(data: &InherentData) -> Option<Self::Call> {
972 let data = match data
973 .get_data::<ParachainInherentData>(&Self::INHERENT_IDENTIFIER)
974 .ok()
975 .flatten()
976 {
977 None => {
978 let data = data
983 .get_data::<v0::ParachainInherentData>(
984 &cumulus_primitives_parachain_inherent::PARACHAIN_INHERENT_IDENTIFIER_V0,
985 )
986 .ok()
987 .flatten()?;
988 data.into()
989 },
990 Some(data) => data,
991 };
992
993 Some(Self::do_create_inherent(data))
994 }
995
996 fn is_inherent(call: &Self::Call) -> bool {
997 matches!(call, Call::set_validation_data { .. })
998 }
999 }
1000
1001 #[pallet::genesis_config]
1002 #[derive(frame_support::DefaultNoBound)]
1003 pub struct GenesisConfig<T: Config> {
1004 #[serde(skip)]
1005 pub _config: core::marker::PhantomData<T>,
1006 }
1007
1008 #[pallet::genesis_build]
1009 impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
1010 fn build(&self) {
1011 sp_io::storage::set(b":c", &[]);
1013 }
1014 }
1015}
1016
1017impl<T: Config> Pallet<T> {
1018 pub fn unincluded_segment_size_after(included_hash: T::Hash) -> u32 {
1026 let segment = UnincludedSegment::<T>::get();
1027 crate::unincluded_segment::size_after_included(included_hash, &segment)
1028 }
1029}
1030
1031impl<T: Config> FeeTracker for Pallet<T> {
1032 type Id = ();
1033
1034 fn get_fee_factor(_id: Self::Id) -> FixedU128 {
1035 UpwardDeliveryFeeFactor::<T>::get()
1036 }
1037
1038 fn set_fee_factor(_id: Self::Id, val: FixedU128) {
1039 UpwardDeliveryFeeFactor::<T>::set(val);
1040 }
1041}
1042
1043impl<T: Config> ListChannelInfos for Pallet<T> {
1044 fn outgoing_channels() -> Vec<ParaId> {
1045 let Some(state) = RelevantMessagingState::<T>::get() else { return Vec::new() };
1046 state.egress_channels.into_iter().map(|(id, _)| id).collect()
1047 }
1048}
1049
1050impl<T: Config> GetChannelInfo for Pallet<T> {
1051 fn get_channel_status(id: ParaId) -> ChannelStatus {
1052 let channels = match RelevantMessagingState::<T>::get() {
1067 None => {
1068 log::warn!("calling `get_channel_status` with no RelevantMessagingState?!");
1069 return ChannelStatus::Closed
1070 },
1071 Some(d) => d.egress_channels,
1072 };
1073 let index = match channels.binary_search_by_key(&id, |item| item.0) {
1080 Err(_) => return ChannelStatus::Closed,
1081 Ok(i) => i,
1082 };
1083 let meta = &channels[index].1;
1084 if meta.msg_count + 1 > meta.max_capacity {
1085 return ChannelStatus::Full
1087 }
1088 let max_size_now = meta.max_total_size - meta.total_size;
1089 let max_size_ever = meta.max_message_size;
1090 ChannelStatus::Ready(max_size_now as usize, max_size_ever as usize)
1091 }
1092
1093 fn get_channel_info(id: ParaId) -> Option<ChannelInfo> {
1094 let channels = RelevantMessagingState::<T>::get()?.egress_channels;
1095 let index = channels.binary_search_by_key(&id, |item| item.0).ok()?;
1096 let info = ChannelInfo {
1097 max_capacity: channels[index].1.max_capacity,
1098 max_total_size: channels[index].1.max_total_size,
1099 max_message_size: channels[index].1.max_message_size,
1100 msg_count: channels[index].1.msg_count,
1101 total_size: channels[index].1.total_size,
1102 };
1103 Some(info)
1104 }
1105}
1106
1107impl<T: Config> Pallet<T> {
1108 fn messages_collection_size_limit() -> usize {
1118 let max_block_weight = <T as frame_system::Config>::BlockWeights::get().max_block;
1119 let max_block_pov = max_block_weight.proof_size();
1120 (max_block_pov / 6).saturated_into()
1121 }
1122
1123 fn do_create_inherent(data: ParachainInherentData) -> Call<T> {
1129 let (data, mut downward_messages, mut horizontal_messages) =
1130 deconstruct_parachain_inherent_data(data);
1131 let last_relay_block_number = LastRelayChainBlockNumber::<T>::get();
1132
1133 let messages_collection_size_limit = Self::messages_collection_size_limit();
1134 let last_processed_msg = LastProcessedDownwardMessage::<T>::get()
1136 .unwrap_or(InboundMessageId { sent_at: last_relay_block_number, reverse_idx: 0 });
1137 downward_messages.drop_processed_messages(&last_processed_msg);
1138 let mut size_limit = messages_collection_size_limit;
1139 let downward_messages = downward_messages.into_abridged(&mut size_limit);
1140
1141 let last_processed_msg = LastProcessedHrmpMessage::<T>::get()
1143 .unwrap_or(InboundMessageId { sent_at: last_relay_block_number, reverse_idx: 0 });
1144 horizontal_messages.drop_processed_messages(&last_processed_msg);
1145 size_limit = size_limit.saturating_add(messages_collection_size_limit);
1146 let horizontal_messages = horizontal_messages.into_abridged(&mut size_limit);
1147
1148 let inbound_messages_data =
1149 InboundMessagesData::new(downward_messages, horizontal_messages);
1150
1151 Call::set_validation_data { data, inbound_messages_data }
1152 }
1153
1154 fn enqueue_inbound_downward_messages(
1164 expected_dmq_mqc_head: relay_chain::Hash,
1165 downward_messages: AbridgedInboundDownwardMessages,
1166 ) -> Weight {
1167 downward_messages.check_enough_messages_included("DMQ");
1168
1169 let mut dmq_head = <LastDmqMqcHead<T>>::get();
1170
1171 let (messages, hashed_messages) = downward_messages.messages();
1172 let message_count = messages.len() as u32;
1173 let weight_used = T::WeightInfo::enqueue_inbound_downward_messages(message_count);
1174 if let Some(last_msg) = messages.last() {
1175 Self::deposit_event(Event::DownwardMessagesReceived { count: message_count });
1176
1177 for msg in messages {
1179 dmq_head.extend_downward(msg);
1180 }
1181 <LastDmqMqcHead<T>>::put(&dmq_head);
1182 Self::deposit_event(Event::DownwardMessagesProcessed {
1183 weight_used,
1184 dmq_head: dmq_head.head(),
1185 });
1186
1187 let mut last_processed_msg =
1188 InboundMessageId { sent_at: last_msg.sent_at, reverse_idx: 0 };
1189 for msg in hashed_messages {
1190 dmq_head.extend_with_hashed_msg(msg);
1191
1192 if msg.sent_at == last_processed_msg.sent_at {
1193 last_processed_msg.reverse_idx += 1;
1194 }
1195 }
1196 LastProcessedDownwardMessage::<T>::put(last_processed_msg);
1197
1198 T::DmpQueue::handle_messages(downward_messages.bounded_msgs_iter());
1199 }
1200
1201 assert_eq!(dmq_head.head(), expected_dmq_mqc_head, "DMQ head mismatch");
1207
1208 ProcessedDownwardMessages::<T>::put(message_count);
1209
1210 weight_used
1211 }
1212
1213 fn check_hrmp_mcq_heads(
1214 ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1215 mqc_heads: &mut BTreeMap<ParaId, MessageQueueChain>,
1216 ) {
1217 for (sender, channel) in ingress_channels {
1225 let cur_head = mqc_heads.entry(*sender).or_default().head();
1226 let target_head = channel.mqc_head.unwrap_or_default();
1227 assert_eq!(cur_head, target_head, "HRMP head mismatch");
1228 }
1229 }
1230
1231 fn check_hrmp_message_metadata(
1236 ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1237 maybe_prev_msg_metadata: &mut Option<(u32, ParaId)>,
1238 msg_metadata: (u32, ParaId),
1239 ) {
1240 if let Some(prev_msg) = maybe_prev_msg_metadata {
1242 assert!(&msg_metadata >= prev_msg, "[HRMP] Messages order violation");
1243 }
1244 *maybe_prev_msg_metadata = Some(msg_metadata);
1245
1246 let sender = msg_metadata.1;
1249 let maybe_channel_idx =
1250 ingress_channels.binary_search_by_key(&sender, |&(channel_sender, _)| channel_sender);
1251 assert!(
1252 maybe_channel_idx.is_ok(),
1253 "One of the messages submitted by the collator was sent from a sender ({}) \
1254 that doesn't have a channel opened to this parachain",
1255 <ParaId as Into<u32>>::into(sender)
1256 );
1257 }
1258
1259 fn enqueue_inbound_horizontal_messages(
1270 ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1271 horizontal_messages: AbridgedInboundHrmpMessages,
1272 relay_parent_number: relay_chain::BlockNumber,
1273 ) -> Weight {
1274 horizontal_messages.check_enough_messages_included("HRMP");
1276
1277 let (messages, hashed_messages) = horizontal_messages.messages();
1278 let mut mqc_heads = <LastHrmpMqcHeads<T>>::get();
1279
1280 if messages.is_empty() {
1281 Self::check_hrmp_mcq_heads(ingress_channels, &mut mqc_heads);
1282 let last_processed_msg =
1283 InboundMessageId { sent_at: relay_parent_number, reverse_idx: 0 };
1284 LastProcessedHrmpMessage::<T>::put(last_processed_msg);
1285 HrmpWatermark::<T>::put(relay_parent_number);
1286 return T::DbWeight::get().reads_writes(1, 2);
1287 }
1288
1289 let mut prev_msg_metadata = None;
1290 let mut last_processed_block = HrmpWatermark::<T>::get();
1291 let mut last_processed_msg = InboundMessageId { sent_at: 0, reverse_idx: 0 };
1292 for (sender, msg) in messages {
1293 Self::check_hrmp_message_metadata(
1294 ingress_channels,
1295 &mut prev_msg_metadata,
1296 (msg.sent_at, *sender),
1297 );
1298 mqc_heads.entry(*sender).or_default().extend_hrmp(msg);
1299
1300 if msg.sent_at > last_processed_msg.sent_at && last_processed_msg.sent_at > 0 {
1301 last_processed_block = last_processed_msg.sent_at;
1302 }
1303 last_processed_msg.sent_at = msg.sent_at;
1304 }
1305 <LastHrmpMqcHeads<T>>::put(&mqc_heads);
1306 for (sender, msg) in hashed_messages {
1307 Self::check_hrmp_message_metadata(
1308 ingress_channels,
1309 &mut prev_msg_metadata,
1310 (msg.sent_at, *sender),
1311 );
1312 mqc_heads.entry(*sender).or_default().extend_with_hashed_msg(msg);
1313
1314 if msg.sent_at == last_processed_msg.sent_at {
1315 last_processed_msg.reverse_idx += 1;
1316 }
1317 }
1318 if last_processed_msg.sent_at > 0 && last_processed_msg.reverse_idx == 0 {
1319 last_processed_block = last_processed_msg.sent_at;
1320 }
1321 LastProcessedHrmpMessage::<T>::put(&last_processed_msg);
1322 Self::check_hrmp_mcq_heads(ingress_channels, &mut mqc_heads);
1323
1324 let max_weight =
1325 <ReservedXcmpWeightOverride<T>>::get().unwrap_or_else(T::ReservedXcmpWeight::get);
1326 let weight_used = T::XcmpMessageHandler::handle_xcmp_messages(
1327 horizontal_messages.flat_msgs_iter(),
1328 max_weight,
1329 );
1330
1331 HrmpWatermark::<T>::put(last_processed_block);
1333
1334 weight_used.saturating_add(T::DbWeight::get().reads_writes(2, 3))
1335 }
1336
1337 fn maybe_drop_included_ancestors(
1339 relay_state_proof: &RelayChainStateProof,
1340 capacity: consensus_hook::UnincludedSegmentCapacity,
1341 ) -> Weight {
1342 let mut weight_used = Weight::zero();
1343 let para_head =
1345 relay_state_proof.read_included_para_head().ok().map(|h| T::Hashing::hash(&h.0));
1346
1347 let unincluded_segment_len = <UnincludedSegment<T>>::decode_len().unwrap_or(0);
1348 weight_used += T::DbWeight::get().reads(1);
1349
1350 let included_head = match (para_head, capacity.is_expecting_included_parent()) {
1352 (Some(h), true) => {
1353 assert_eq!(
1354 h,
1355 frame_system::Pallet::<T>::parent_hash(),
1356 "expected parent to be included"
1357 );
1358
1359 h
1360 },
1361 (Some(h), false) => h,
1362 (None, true) => {
1363 frame_system::Pallet::<T>::parent_hash()
1366 },
1367 (None, false) => panic!("included head not present in relay storage proof"),
1368 };
1369
1370 let new_len = {
1371 let para_head_hash = included_head;
1372 let dropped: Vec<Ancestor<T::Hash>> = <UnincludedSegment<T>>::mutate(|chain| {
1373 let idx = chain
1376 .iter()
1377 .position(|block| {
1378 let head_hash = block
1379 .para_head_hash()
1380 .expect("para head hash is updated during block initialization; qed");
1381 head_hash == ¶_head_hash
1382 })
1383 .map_or(0, |idx| idx + 1); chain.drain(..idx).collect()
1386 });
1387 weight_used += T::DbWeight::get().reads_writes(1, 1);
1388
1389 let new_len = unincluded_segment_len - dropped.len();
1390 if !dropped.is_empty() {
1391 <AggregatedUnincludedSegment<T>>::mutate(|agg| {
1392 let agg = agg.as_mut().expect(
1393 "dropped part of the segment wasn't empty, hence value exists; qed",
1394 );
1395 for block in dropped {
1396 agg.subtract(&block);
1397 }
1398 });
1399 weight_used += T::DbWeight::get().reads_writes(1, 1);
1400 }
1401
1402 new_len as u32
1403 };
1404
1405 assert!(new_len < capacity.get(), "no space left for the block in the unincluded segment");
1410 weight_used
1411 }
1412
1413 fn adjust_egress_bandwidth_limits() {
1419 let unincluded_segment = match AggregatedUnincludedSegment::<T>::get() {
1420 None => return,
1421 Some(s) => s,
1422 };
1423
1424 <RelevantMessagingState<T>>::mutate(|messaging_state| {
1425 let messaging_state = match messaging_state {
1426 None => return,
1427 Some(s) => s,
1428 };
1429
1430 let used_bandwidth = unincluded_segment.used_bandwidth();
1431
1432 let channels = &mut messaging_state.egress_channels;
1433 for (para_id, used) in used_bandwidth.hrmp_outgoing.iter() {
1434 let i = match channels.binary_search_by_key(para_id, |item| item.0) {
1435 Ok(i) => i,
1436 Err(_) => continue, };
1438
1439 let c = &mut channels[i].1;
1440
1441 c.total_size = (c.total_size + used.total_bytes).min(c.max_total_size);
1442 c.msg_count = (c.msg_count + used.msg_count).min(c.max_capacity);
1443 }
1444
1445 let upward_capacity = &mut messaging_state.relay_dispatch_queue_remaining_capacity;
1446 upward_capacity.remaining_count =
1447 upward_capacity.remaining_count.saturating_sub(used_bandwidth.ump_msg_count);
1448 upward_capacity.remaining_size =
1449 upward_capacity.remaining_size.saturating_sub(used_bandwidth.ump_total_bytes);
1450 });
1451 }
1452
1453 fn notify_polkadot_of_pending_upgrade(code: &[u8]) {
1457 NewValidationCode::<T>::put(code);
1458 <DidSetValidationCode<T>>::put(true);
1459 }
1460
1461 pub fn max_code_size() -> Option<u32> {
1465 <HostConfiguration<T>>::get().map(|cfg| cfg.max_code_size)
1466 }
1467
1468 pub fn schedule_code_upgrade(validation_function: Vec<u8>) -> DispatchResult {
1470 ensure!(<ValidationData<T>>::exists(), Error::<T>::ValidationDataNotAvailable,);
1474 ensure!(<UpgradeRestrictionSignal<T>>::get().is_none(), Error::<T>::ProhibitedByPolkadot);
1475
1476 ensure!(!<PendingValidationCode<T>>::exists(), Error::<T>::OverlappingUpgrades);
1477 let cfg = HostConfiguration::<T>::get().ok_or(Error::<T>::HostConfigurationNotAvailable)?;
1478 ensure!(validation_function.len() <= cfg.max_code_size as usize, Error::<T>::TooBig);
1479
1480 Self::notify_polkadot_of_pending_upgrade(&validation_function);
1488 <PendingValidationCode<T>>::put(validation_function);
1489 Self::deposit_event(Event::ValidationFunctionStored);
1490
1491 Ok(())
1492 }
1493
1494 pub fn collect_collation_info(header: &HeaderFor<T>) -> CollationInfo {
1502 CollationInfo {
1503 hrmp_watermark: HrmpWatermark::<T>::get(),
1504 horizontal_messages: HrmpOutboundMessages::<T>::get(),
1505 upward_messages: UpwardMessages::<T>::get(),
1506 processed_downward_messages: ProcessedDownwardMessages::<T>::get(),
1507 new_validation_code: NewValidationCode::<T>::get().map(Into::into),
1508 head_data: CustomValidationHeadData::<T>::get()
1511 .map_or_else(|| header.encode(), |v| v)
1512 .into(),
1513 }
1514 }
1515
1516 pub fn set_custom_validation_head_data(head_data: Vec<u8>) {
1529 CustomValidationHeadData::<T>::put(head_data);
1530 }
1531
1532 fn send_ump_signals() {
1534 let ump_signals = PendingUpwardSignals::<T>::take();
1535 if !ump_signals.is_empty() {
1536 UpwardMessages::<T>::append(UMP_SEPARATOR);
1537 ump_signals.into_iter().for_each(|s| UpwardMessages::<T>::append(s));
1538 }
1539 }
1540
1541 #[cfg(any(feature = "runtime-benchmarks", feature = "std"))]
1546 pub fn open_outbound_hrmp_channel_for_benchmarks_or_tests(target_parachain: ParaId) {
1547 RelevantMessagingState::<T>::put(MessagingStateSnapshot {
1548 dmq_mqc_head: Default::default(),
1549 relay_dispatch_queue_remaining_capacity: Default::default(),
1550 ingress_channels: Default::default(),
1551 egress_channels: vec![(
1552 target_parachain,
1553 cumulus_primitives_core::AbridgedHrmpChannel {
1554 max_capacity: 10,
1555 max_total_size: 10_000_000_u32,
1556 max_message_size: 10_000_000_u32,
1557 msg_count: 5,
1558 total_size: 5_000_000_u32,
1559 mqc_head: None,
1560 },
1561 )],
1562 })
1563 }
1564
1565 #[cfg(any(feature = "runtime-benchmarks", feature = "std"))]
1570 pub fn open_custom_outbound_hrmp_channel_for_benchmarks_or_tests(
1571 target_parachain: ParaId,
1572 channel: cumulus_primitives_core::AbridgedHrmpChannel,
1573 ) {
1574 RelevantMessagingState::<T>::put(MessagingStateSnapshot {
1575 dmq_mqc_head: Default::default(),
1576 relay_dispatch_queue_remaining_capacity: Default::default(),
1577 ingress_channels: Default::default(),
1578 egress_channels: vec![(target_parachain, channel)],
1579 })
1580 }
1581
1582 #[cfg(feature = "runtime-benchmarks")]
1584 pub fn initialize_for_set_code_benchmark(max_code_size: u32) {
1585 let vfp = PersistedValidationData {
1587 parent_head: polkadot_parachain_primitives::primitives::HeadData(Default::default()),
1588 relay_parent_number: 1,
1589 relay_parent_storage_root: Default::default(),
1590 max_pov_size: 1_000,
1591 };
1592 <ValidationData<T>>::put(&vfp);
1593
1594 let host_config = AbridgedHostConfiguration {
1596 max_code_size,
1597 max_head_data_size: 32 * 1024,
1598 max_upward_queue_count: 8,
1599 max_upward_queue_size: 1024 * 1024,
1600 max_upward_message_size: 4 * 1024,
1601 max_upward_message_num_per_candidate: 2,
1602 hrmp_max_message_num_per_candidate: 2,
1603 validation_upgrade_cooldown: 2,
1604 validation_upgrade_delay: 2,
1605 async_backing_params: relay_chain::AsyncBackingParams {
1606 allowed_ancestry_len: 0,
1607 max_candidate_depth: 0,
1608 },
1609 };
1610 <HostConfiguration<T>>::put(host_config);
1611 }
1612}
1613
1614pub struct ParachainSetCode<T>(core::marker::PhantomData<T>);
1616impl<T: Config> frame_system::SetCode<T> for ParachainSetCode<T> {
1617 fn set_code(code: Vec<u8>) -> DispatchResult {
1618 Pallet::<T>::schedule_code_upgrade(code)
1619 }
1620}
1621
1622impl<T: Config> Pallet<T> {
1623 pub fn send_upward_message(message: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> {
1629 let message_len = message.len();
1630 if let Some(cfg) = HostConfiguration::<T>::get() {
1643 if message_len > cfg.max_upward_message_size as usize {
1644 return Err(MessageSendError::TooBig);
1645 }
1646 let threshold =
1647 cfg.max_upward_queue_size.saturating_div(ump_constants::THRESHOLD_FACTOR);
1648 <PendingUpwardMessages<T>>::append(message.clone());
1651 let pending_messages = PendingUpwardMessages::<T>::get();
1652 let total_size: usize = pending_messages.iter().map(UpwardMessage::len).sum();
1653 if total_size > threshold as usize {
1654 Self::increase_fee_factor((), message_len as u128);
1656 }
1657 } else {
1658 <PendingUpwardMessages<T>>::append(message.clone());
1668 };
1669
1670 let hash = sp_io::hashing::blake2_256(&message);
1673 Self::deposit_event(Event::UpwardMessageSent { message_hash: Some(hash) });
1674 Ok((0, hash))
1675 }
1676
1677 pub fn last_relay_block_number() -> RelayChainBlockNumber {
1680 LastRelayChainBlockNumber::<T>::get()
1681 }
1682}
1683
1684impl<T: Config> UpwardMessageSender for Pallet<T> {
1685 fn send_upward_message(message: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> {
1686 Self::send_upward_message(message)
1687 }
1688
1689 fn can_send_upward_message(message: &UpwardMessage) -> Result<(), MessageSendError> {
1690 let max_upward_message_size = HostConfiguration::<T>::get()
1691 .map(|cfg| cfg.max_upward_message_size)
1692 .ok_or(MessageSendError::Other)?;
1693 if message.len() > max_upward_message_size as usize {
1694 Err(MessageSendError::TooBig)
1695 } else {
1696 Ok(())
1697 }
1698 }
1699
1700 #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
1701 fn ensure_successful_delivery() {
1702 const MAX_UPWARD_MESSAGE_SIZE: u32 = 65_531 * 3;
1703 const MAX_CODE_SIZE: u32 = 3 * 1024 * 1024;
1704 HostConfiguration::<T>::mutate(|cfg| match cfg {
1705 Some(cfg) => cfg.max_upward_message_size = MAX_UPWARD_MESSAGE_SIZE,
1706 None =>
1707 *cfg = Some(AbridgedHostConfiguration {
1708 max_code_size: MAX_CODE_SIZE,
1709 max_head_data_size: 32 * 1024,
1710 max_upward_queue_count: 8,
1711 max_upward_queue_size: 1024 * 1024,
1712 max_upward_message_size: MAX_UPWARD_MESSAGE_SIZE,
1713 max_upward_message_num_per_candidate: 2,
1714 hrmp_max_message_num_per_candidate: 2,
1715 validation_upgrade_cooldown: 2,
1716 validation_upgrade_delay: 2,
1717 async_backing_params: relay_chain::AsyncBackingParams {
1718 allowed_ancestry_len: 0,
1719 max_candidate_depth: 0,
1720 },
1721 }),
1722 })
1723 }
1724}
1725
1726impl<T: Config> InspectMessageQueues for Pallet<T> {
1727 fn clear_messages() {
1728 PendingUpwardMessages::<T>::kill();
1729 }
1730
1731 fn get_messages() -> Vec<(VersionedLocation, Vec<VersionedXcm<()>>)> {
1732 use xcm::prelude::*;
1733
1734 let messages: Vec<VersionedXcm<()>> = PendingUpwardMessages::<T>::get()
1735 .iter()
1736 .map(|encoded_message| {
1737 VersionedXcm::<()>::decode_all_with_depth_limit(
1738 MAX_XCM_DECODE_DEPTH,
1739 &mut &encoded_message[..],
1740 )
1741 .unwrap()
1742 })
1743 .collect();
1744
1745 if messages.is_empty() {
1746 vec![]
1747 } else {
1748 vec![(VersionedLocation::from(Location::parent()), messages)]
1749 }
1750 }
1751}
1752
1753#[cfg(feature = "runtime-benchmarks")]
1754impl<T: Config> polkadot_runtime_parachains::EnsureForParachain for Pallet<T> {
1755 fn ensure(para_id: ParaId) {
1756 if let ChannelStatus::Closed = Self::get_channel_status(para_id) {
1757 Self::open_outbound_hrmp_channel_for_benchmarks_or_tests(para_id)
1758 }
1759 }
1760}
1761
1762#[impl_trait_for_tuples::impl_for_tuples(30)]
1770pub trait OnSystemEvent {
1771 fn on_validation_data(data: &PersistedValidationData);
1773 fn on_validation_code_applied();
1776}
1777
1778#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Default, Debug)]
1780pub struct RelayChainState {
1781 pub number: relay_chain::BlockNumber,
1783 pub state_root: relay_chain::Hash,
1785}
1786
1787pub trait RelaychainStateProvider {
1791 fn current_relay_chain_state() -> RelayChainState;
1795
1796 #[cfg(feature = "runtime-benchmarks")]
1801 fn set_current_relay_chain_state(_state: RelayChainState) {}
1802}
1803
1804#[deprecated = "Use `RelaychainDataProvider` instead"]
1812pub type RelaychainBlockNumberProvider<T> = RelaychainDataProvider<T>;
1813
1814pub struct RelaychainDataProvider<T>(core::marker::PhantomData<T>);
1824
1825impl<T: Config> BlockNumberProvider for RelaychainDataProvider<T> {
1826 type BlockNumber = relay_chain::BlockNumber;
1827
1828 fn current_block_number() -> relay_chain::BlockNumber {
1829 ValidationData::<T>::get()
1830 .map(|d| d.relay_parent_number)
1831 .unwrap_or_else(|| Pallet::<T>::last_relay_block_number())
1832 }
1833
1834 #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
1835 fn set_block_number(block: Self::BlockNumber) {
1836 let mut validation_data = ValidationData::<T>::get().unwrap_or_else(||
1837 PersistedValidationData {
1839 parent_head: vec![].into(),
1840 relay_parent_number: Default::default(),
1841 max_pov_size: Default::default(),
1842 relay_parent_storage_root: Default::default(),
1843 });
1844 validation_data.relay_parent_number = block;
1845 ValidationData::<T>::put(validation_data)
1846 }
1847}
1848
1849impl<T: Config> RelaychainStateProvider for RelaychainDataProvider<T> {
1850 fn current_relay_chain_state() -> RelayChainState {
1851 ValidationData::<T>::get()
1852 .map(|d| RelayChainState {
1853 number: d.relay_parent_number,
1854 state_root: d.relay_parent_storage_root,
1855 })
1856 .unwrap_or_default()
1857 }
1858
1859 #[cfg(feature = "runtime-benchmarks")]
1860 fn set_current_relay_chain_state(state: RelayChainState) {
1861 let mut validation_data = ValidationData::<T>::get().unwrap_or_else(||
1862 PersistedValidationData {
1864 parent_head: vec![].into(),
1865 relay_parent_number: Default::default(),
1866 max_pov_size: Default::default(),
1867 relay_parent_storage_root: Default::default(),
1868 });
1869 validation_data.relay_parent_number = state.number;
1870 validation_data.relay_parent_storage_root = state.state_root;
1871 ValidationData::<T>::put(validation_data)
1872 }
1873}