1#![cfg_attr(not(feature = "std"), no_std)]
18
19extern crate alloc;
31
32use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec};
33use codec::{Decode, DecodeLimit, Encode};
34use core::cmp;
35use cumulus_primitives_core::{
36 relay_chain, AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo,
37 CumulusDigestItem, GetChannelInfo, ListChannelInfos, MessageSendError, OutboundHrmpMessage,
38 ParaId, PersistedValidationData, UpwardMessage, UpwardMessageSender, XcmpMessageHandler,
39 XcmpMessageSource,
40};
41use cumulus_primitives_parachain_inherent::{v0, MessageQueueChain, ParachainInherentData};
42use frame_support::{
43 dispatch::{DispatchClass, DispatchResult},
44 ensure,
45 inherent::{InherentData, InherentIdentifier, ProvideInherent},
46 traits::{Get, HandleMessage},
47 weights::Weight,
48};
49use frame_system::{ensure_none, ensure_root, pallet_prelude::HeaderFor};
50use parachain_inherent::{
51 deconstruct_parachain_inherent_data, AbridgedInboundDownwardMessages,
52 AbridgedInboundHrmpMessages, BasicParachainInherentData, InboundMessageId, InboundMessagesData,
53};
54use polkadot_parachain_primitives::primitives::RelayChainBlockNumber;
55use polkadot_runtime_parachains::{FeeTracker, GetMinFeeFactor};
56use scale_info::TypeInfo;
57use sp_runtime::{
58 traits::{BlockNumberProvider, Hash},
59 FixedU128, RuntimeDebug, SaturatedConversion,
60};
61use xcm::{latest::XcmHash, VersionedLocation, VersionedXcm, MAX_XCM_DECODE_DEPTH};
62use xcm_builder::InspectMessageQueues;
63
64mod benchmarking;
65pub mod migration;
66mod mock;
67#[cfg(test)]
68mod tests;
69pub mod weights;
70
71pub use weights::WeightInfo;
72
73mod unincluded_segment;
74
75pub mod consensus_hook;
76pub mod relay_state_snapshot;
77#[macro_use]
78pub mod validate_block;
79mod descendant_validation;
80pub mod parachain_inherent;
81
82use unincluded_segment::{
83 HrmpChannelUpdate, HrmpWatermarkUpdate, OutboundBandwidthLimits, SegmentTracker,
84};
85
86pub use consensus_hook::{ConsensusHook, ExpectParentIncluded};
87pub use cumulus_pallet_parachain_system_proc_macro::register_validate_block;
108pub use relay_state_snapshot::{MessagingStateSnapshot, RelayChainStateProof};
109pub use unincluded_segment::{Ancestor, UsedBandwidth};
110
111pub use pallet::*;
112
113const LOG_TARGET: &str = "parachain-system";
114
115pub trait CheckAssociatedRelayNumber {
124 fn check_associated_relay_number(
128 current: RelayChainBlockNumber,
129 previous: RelayChainBlockNumber,
130 );
131}
132
133pub struct RelayNumberStrictlyIncreases;
138
139impl CheckAssociatedRelayNumber for RelayNumberStrictlyIncreases {
140 fn check_associated_relay_number(
141 current: RelayChainBlockNumber,
142 previous: RelayChainBlockNumber,
143 ) {
144 if current <= previous {
145 panic!("Relay chain block number needs to strictly increase between Parachain blocks!")
146 }
147 }
148}
149
150pub struct AnyRelayNumber;
155
156impl CheckAssociatedRelayNumber for AnyRelayNumber {
157 fn check_associated_relay_number(_: RelayChainBlockNumber, _: RelayChainBlockNumber) {}
158}
159
160pub struct RelayNumberMonotonicallyIncreases;
165
166impl CheckAssociatedRelayNumber for RelayNumberMonotonicallyIncreases {
167 fn check_associated_relay_number(
168 current: RelayChainBlockNumber,
169 previous: RelayChainBlockNumber,
170 ) {
171 if current < previous {
172 panic!("Relay chain block number needs to monotonically increase between Parachain blocks!")
173 }
174 }
175}
176
177pub type MaxDmpMessageLenOf<T> = <<T as Config>::DmpQueue as HandleMessage>::MaxMessageLen;
179
180pub mod ump_constants {
181 pub const THRESHOLD_FACTOR: u32 = 2;
185}
186
187#[frame_support::pallet]
188pub mod pallet {
189 use super::*;
190 use cumulus_primitives_core::CoreInfoExistsAtMaxOnce;
191 use frame_support::pallet_prelude::*;
192 use frame_system::pallet_prelude::*;
193
194 #[pallet::pallet]
195 #[pallet::storage_version(migration::STORAGE_VERSION)]
196 #[pallet::without_storage_info]
197 pub struct Pallet<T>(_);
198
199 #[pallet::config]
200 pub trait Config: frame_system::Config<OnSetCode = ParachainSetCode<Self>> {
201 #[allow(deprecated)]
203 type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
204
205 type OnSystemEvent: OnSystemEvent;
207
208 #[pallet::constant]
210 type SelfParaId: Get<ParaId>;
211
212 type OutboundXcmpMessageSource: XcmpMessageSource;
214
215 type DmpQueue: HandleMessage;
220
221 type ReservedDmpWeight: Get<Weight>;
223
224 type XcmpMessageHandler: XcmpMessageHandler;
228
229 type ReservedXcmpWeight: Get<Weight>;
231
232 type CheckAssociatedRelayNumber: CheckAssociatedRelayNumber;
234
235 type WeightInfo: WeightInfo;
237
238 type ConsensusHook: ConsensusHook;
249
250 type RelayParentOffset: Get<u32>;
265 }
266
267 #[pallet::hooks]
268 impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
269 fn on_finalize(_: BlockNumberFor<T>) {
274 <DidSetValidationCode<T>>::kill();
275 <UpgradeRestrictionSignal<T>>::kill();
276 let relay_upgrade_go_ahead = <UpgradeGoAhead<T>>::take();
277
278 let vfp = <ValidationData<T>>::get().expect(
279 r"Missing required set_validation_data inherent. This inherent must be
280 present in every block. This error typically occurs when the set_validation_data
281 execution failed and was rejected by the block builder. Check earlier log entries
282 for the specific cause of the failure.",
283 );
284
285 LastRelayChainBlockNumber::<T>::put(vfp.relay_parent_number);
286
287 let host_config = match HostConfiguration::<T>::get() {
288 Some(ok) => ok,
289 None => {
290 debug_assert!(
291 false,
292 "host configuration is promised to set until `on_finalize`; qed",
293 );
294 return
295 },
296 };
297
298 let total_bandwidth_out = match RelevantMessagingState::<T>::get() {
302 Some(s) => OutboundBandwidthLimits::from_relay_chain_state(&s),
303 None => {
304 debug_assert!(
305 false,
306 "relevant messaging state is promised to be set until `on_finalize`; \
307 qed",
308 );
309 return
310 },
311 };
312
313 Self::adjust_egress_bandwidth_limits();
316
317 let (ump_msg_count, ump_total_bytes) = <PendingUpwardMessages<T>>::mutate(|up| {
318 let (available_capacity, available_size) = match RelevantMessagingState::<T>::get()
319 {
320 Some(limits) => (
321 limits.relay_dispatch_queue_remaining_capacity.remaining_count,
322 limits.relay_dispatch_queue_remaining_capacity.remaining_size,
323 ),
324 None => {
325 debug_assert!(
326 false,
327 "relevant messaging state is promised to be set until `on_finalize`; \
328 qed",
329 );
330 return (0, 0)
331 },
332 };
333
334 let available_capacity =
335 cmp::min(available_capacity, host_config.max_upward_message_num_per_candidate);
336
337 let (num, total_size) = up
340 .iter()
341 .scan((0u32, 0u32), |state, msg| {
342 let (cap_used, size_used) = *state;
343 let new_cap = cap_used.saturating_add(1);
344 let new_size = size_used.saturating_add(msg.len() as u32);
345 match available_capacity
346 .checked_sub(new_cap)
347 .and(available_size.checked_sub(new_size))
348 {
349 Some(_) => {
350 *state = (new_cap, new_size);
351 Some(*state)
352 },
353 _ => None,
354 }
355 })
356 .last()
357 .unwrap_or_default();
358
359 UpwardMessages::<T>::put(&up[..num as usize]);
362 *up = up.split_off(num as usize);
363
364 #[cfg(feature = "experimental-ump-signals")]
367 Self::send_ump_signal();
368
369 let threshold = host_config
373 .max_upward_queue_size
374 .saturating_div(ump_constants::THRESHOLD_FACTOR);
375 let remaining_total_size: usize = up.iter().map(UpwardMessage::len).sum();
376 if remaining_total_size <= threshold as usize {
377 Self::decrease_fee_factor(());
378 }
379
380 (num, total_size)
381 });
382
383 let maximum_channels = host_config
393 .hrmp_max_message_num_per_candidate
394 .min(<AnnouncedHrmpMessagesPerCandidate<T>>::take())
395 as usize;
396
397 let outbound_messages =
401 T::OutboundXcmpMessageSource::take_outbound_messages(maximum_channels)
402 .into_iter()
403 .map(|(recipient, data)| OutboundHrmpMessage { recipient, data })
404 .collect::<Vec<_>>();
405
406 {
409 let hrmp_outgoing = outbound_messages
410 .iter()
411 .map(|msg| {
412 (
413 msg.recipient,
414 HrmpChannelUpdate { msg_count: 1, total_bytes: msg.data.len() as u32 },
415 )
416 })
417 .collect();
418 let used_bandwidth =
419 UsedBandwidth { ump_msg_count, ump_total_bytes, hrmp_outgoing };
420
421 let mut aggregated_segment =
422 AggregatedUnincludedSegment::<T>::get().unwrap_or_default();
423 let consumed_go_ahead_signal =
424 if aggregated_segment.consumed_go_ahead_signal().is_some() {
425 None
428 } else {
429 relay_upgrade_go_ahead
430 };
431 let ancestor = Ancestor::new_unchecked(used_bandwidth, consumed_go_ahead_signal);
433
434 let watermark = HrmpWatermark::<T>::get();
435 let watermark_update = HrmpWatermarkUpdate::new(watermark, vfp.relay_parent_number);
436
437 aggregated_segment
438 .append(&ancestor, watermark_update, &total_bandwidth_out)
439 .expect("unincluded segment limits exceeded");
440 AggregatedUnincludedSegment::<T>::put(aggregated_segment);
441 UnincludedSegment::<T>::append(ancestor);
443 }
444 HrmpOutboundMessages::<T>::put(outbound_messages);
445 }
446
447 fn on_initialize(_n: BlockNumberFor<T>) -> Weight {
448 let mut weight = Weight::zero();
449
450 if !<DidSetValidationCode<T>>::get() {
454 NewValidationCode::<T>::kill();
458 weight += T::DbWeight::get().writes(1);
459 }
460
461 {
463 <UnincludedSegment<T>>::mutate(|chain| {
464 if let Some(ancestor) = chain.last_mut() {
465 let parent = frame_system::Pallet::<T>::parent_hash();
466 ancestor.replace_para_head_hash(parent);
469 }
470 });
471 weight += T::DbWeight::get().reads_writes(1, 1);
472
473 weight += T::DbWeight::get().reads_writes(3, 2);
475 }
476
477 ValidationData::<T>::kill();
479 ProcessedDownwardMessages::<T>::kill();
483 UpwardMessages::<T>::kill();
484 HrmpOutboundMessages::<T>::kill();
485 CustomValidationHeadData::<T>::kill();
486 HrmpWatermark::<T>::get();
488 weight += T::DbWeight::get().reads_writes(1, 5);
489
490 weight += T::DbWeight::get().reads_writes(1, 1);
509 let hrmp_max_message_num_per_candidate = HostConfiguration::<T>::get()
510 .map(|cfg| cfg.hrmp_max_message_num_per_candidate)
511 .unwrap_or(0);
512 <AnnouncedHrmpMessagesPerCandidate<T>>::put(hrmp_max_message_num_per_candidate);
513
514 weight += T::DbWeight::get().reads_writes(
516 3 + hrmp_max_message_num_per_candidate as u64,
517 4 + hrmp_max_message_num_per_candidate as u64,
518 );
519
520 weight += T::DbWeight::get().reads_writes(1, 1);
522
523 weight += T::DbWeight::get().reads_writes(6, 3);
525
526 weight += T::DbWeight::get().reads(1);
528
529 match CumulusDigestItem::core_info_exists_at_max_once(
531 &frame_system::Pallet::<T>::digest(),
532 ) {
533 CoreInfoExistsAtMaxOnce::Once(core_info) => {
534 assert_eq!(
535 core_info.claim_queue_offset.0,
536 T::RelayParentOffset::get() as u8,
537 "Only {} is supported as valid claim queue offset",
538 T::RelayParentOffset::get()
539 );
540 },
541 CoreInfoExistsAtMaxOnce::NotFound => {},
542 CoreInfoExistsAtMaxOnce::MoreThanOnce => {
543 panic!("`CumulusDigestItem::CoreInfo` must exist at max once.");
544 },
545 }
546
547 weight
548 }
549 }
550
551 #[pallet::call]
552 impl<T: Config> Pallet<T> {
553 #[pallet::call_index(0)]
563 #[pallet::weight((0, DispatchClass::Mandatory))]
564 pub fn set_validation_data(
567 origin: OriginFor<T>,
568 data: BasicParachainInherentData,
569 inbound_messages_data: InboundMessagesData,
570 ) -> DispatchResult {
571 ensure_none(origin)?;
572 assert!(
573 !<ValidationData<T>>::exists(),
574 "ValidationData must be updated only once in a block",
575 );
576
577 let mut total_weight = Weight::zero();
579
580 let BasicParachainInherentData {
587 validation_data: vfp,
588 relay_chain_state,
589 relay_parent_descendants,
590 collator_peer_id: _,
591 } = data;
592
593 T::CheckAssociatedRelayNumber::check_associated_relay_number(
595 vfp.relay_parent_number,
596 LastRelayChainBlockNumber::<T>::get(),
597 );
598
599 let relay_state_proof = RelayChainStateProof::new(
600 T::SelfParaId::get(),
601 vfp.relay_parent_storage_root,
602 relay_chain_state.clone(),
603 )
604 .expect("Invalid relay chain state proof");
605
606 let expected_rp_descendants_num = T::RelayParentOffset::get();
607
608 if expected_rp_descendants_num > 0 {
609 if let Err(err) = descendant_validation::verify_relay_parent_descendants(
610 &relay_state_proof,
611 relay_parent_descendants,
612 vfp.relay_parent_storage_root,
613 expected_rp_descendants_num,
614 ) {
615 panic!(
616 "Unable to verify provided relay parent descendants. \
617 expected_rp_descendants_num: {expected_rp_descendants_num} \
618 error: {err:?}"
619 );
620 };
621 }
622
623 let (consensus_hook_weight, capacity) =
625 T::ConsensusHook::on_state_proof(&relay_state_proof);
626 total_weight += consensus_hook_weight;
627 total_weight += Self::maybe_drop_included_ancestors(&relay_state_proof, capacity);
628 frame_system::Pallet::<T>::deposit_log(
632 cumulus_primitives_core::rpsr_digest::relay_parent_storage_root_item(
633 vfp.relay_parent_storage_root,
634 vfp.relay_parent_number,
635 ),
636 );
637
638 let upgrade_go_ahead_signal = relay_state_proof
642 .read_upgrade_go_ahead_signal()
643 .expect("Invalid upgrade go ahead signal");
644
645 let upgrade_signal_in_segment = AggregatedUnincludedSegment::<T>::get()
646 .as_ref()
647 .and_then(SegmentTracker::consumed_go_ahead_signal);
648 if let Some(signal_in_segment) = upgrade_signal_in_segment.as_ref() {
649 assert_eq!(upgrade_go_ahead_signal, Some(*signal_in_segment));
652 }
653 match upgrade_go_ahead_signal {
654 Some(_signal) if upgrade_signal_in_segment.is_some() => {
655 },
657 Some(relay_chain::UpgradeGoAhead::GoAhead) => {
658 assert!(
659 <PendingValidationCode<T>>::exists(),
660 "No new validation function found in storage, GoAhead signal is not expected",
661 );
662 let validation_code = <PendingValidationCode<T>>::take();
663
664 frame_system::Pallet::<T>::update_code_in_storage(&validation_code);
665 <T::OnSystemEvent as OnSystemEvent>::on_validation_code_applied();
666 Self::deposit_event(Event::ValidationFunctionApplied {
667 relay_chain_block_num: vfp.relay_parent_number,
668 });
669 },
670 Some(relay_chain::UpgradeGoAhead::Abort) => {
671 <PendingValidationCode<T>>::kill();
672 Self::deposit_event(Event::ValidationFunctionDiscarded);
673 },
674 None => {},
675 }
676 <UpgradeRestrictionSignal<T>>::put(
677 relay_state_proof
678 .read_upgrade_restriction_signal()
679 .expect("Invalid upgrade restriction signal"),
680 );
681 <UpgradeGoAhead<T>>::put(upgrade_go_ahead_signal);
682
683 let host_config = relay_state_proof
684 .read_abridged_host_configuration()
685 .expect("Invalid host configuration in relay chain state proof");
686
687 let relevant_messaging_state = relay_state_proof
688 .read_messaging_state_snapshot(&host_config)
689 .expect("Invalid messaging state in relay chain state proof");
690
691 <ValidationData<T>>::put(&vfp);
692 <RelayStateProof<T>>::put(relay_chain_state);
693 <RelevantMessagingState<T>>::put(relevant_messaging_state.clone());
694 <HostConfiguration<T>>::put(host_config);
695
696 <T::OnSystemEvent as OnSystemEvent>::on_validation_data(&vfp);
697
698 total_weight.saturating_accrue(Self::enqueue_inbound_downward_messages(
699 relevant_messaging_state.dmq_mqc_head,
700 inbound_messages_data.downward_messages,
701 ));
702 total_weight.saturating_accrue(Self::enqueue_inbound_horizontal_messages(
703 &relevant_messaging_state.ingress_channels,
704 inbound_messages_data.horizontal_messages,
705 vfp.relay_parent_number,
706 ));
707
708 frame_system::Pallet::<T>::register_extra_weight_unchecked(
709 total_weight,
710 DispatchClass::Mandatory,
711 );
712
713 Ok(())
714 }
715
716 #[pallet::call_index(1)]
717 #[pallet::weight((1_000, DispatchClass::Operational))]
718 pub fn sudo_send_upward_message(
719 origin: OriginFor<T>,
720 message: UpwardMessage,
721 ) -> DispatchResult {
722 ensure_root(origin)?;
723 let _ = Self::send_upward_message(message);
724 Ok(())
725 }
726
727 }
730
731 #[pallet::event]
732 #[pallet::generate_deposit(pub(super) fn deposit_event)]
733 pub enum Event<T: Config> {
734 ValidationFunctionStored,
736 ValidationFunctionApplied { relay_chain_block_num: RelayChainBlockNumber },
738 ValidationFunctionDiscarded,
740 DownwardMessagesReceived { count: u32 },
742 DownwardMessagesProcessed { weight_used: Weight, dmq_head: relay_chain::Hash },
744 UpwardMessageSent { message_hash: Option<XcmHash> },
746 }
747
748 #[pallet::error]
749 pub enum Error<T> {
750 OverlappingUpgrades,
752 ProhibitedByPolkadot,
754 TooBig,
757 ValidationDataNotAvailable,
759 HostConfigurationNotAvailable,
761 NotScheduled,
763 }
764
765 #[pallet::storage]
772 pub type UnincludedSegment<T: Config> = StorageValue<_, Vec<Ancestor<T::Hash>>, ValueQuery>;
773
774 #[pallet::storage]
778 pub type AggregatedUnincludedSegment<T: Config> =
779 StorageValue<_, SegmentTracker<T::Hash>, OptionQuery>;
780
781 #[pallet::storage]
788 pub type PendingValidationCode<T: Config> = StorageValue<_, Vec<u8>, ValueQuery>;
789
790 #[pallet::storage]
796 pub type NewValidationCode<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
797
798 #[pallet::storage]
802 pub type ValidationData<T: Config> = StorageValue<_, PersistedValidationData>;
803
804 #[pallet::storage]
806 pub type DidSetValidationCode<T: Config> = StorageValue<_, bool, ValueQuery>;
807
808 #[pallet::storage]
812 pub type LastRelayChainBlockNumber<T: Config> =
813 StorageValue<_, RelayChainBlockNumber, ValueQuery>;
814
815 #[pallet::storage]
823 pub type UpgradeRestrictionSignal<T: Config> =
824 StorageValue<_, Option<relay_chain::UpgradeRestriction>, ValueQuery>;
825
826 #[pallet::storage]
832 pub type UpgradeGoAhead<T: Config> =
833 StorageValue<_, Option<relay_chain::UpgradeGoAhead>, ValueQuery>;
834
835 #[pallet::storage]
842 pub type RelayStateProof<T: Config> = StorageValue<_, sp_trie::StorageProof>;
843
844 #[pallet::storage]
852 pub type RelevantMessagingState<T: Config> = StorageValue<_, MessagingStateSnapshot>;
853
854 #[pallet::storage]
861 #[pallet::disable_try_decode_storage]
862 pub type HostConfiguration<T: Config> = StorageValue<_, AbridgedHostConfiguration>;
863
864 #[pallet::storage]
869 pub type LastDmqMqcHead<T: Config> = StorageValue<_, MessageQueueChain, ValueQuery>;
870
871 #[pallet::storage]
876 pub type LastHrmpMqcHeads<T: Config> =
877 StorageValue<_, BTreeMap<ParaId, MessageQueueChain>, ValueQuery>;
878
879 #[pallet::storage]
883 pub type ProcessedDownwardMessages<T: Config> = StorageValue<_, u32, ValueQuery>;
884
885 #[pallet::storage]
889 pub type LastProcessedDownwardMessage<T: Config> = StorageValue<_, InboundMessageId>;
890
891 #[pallet::storage]
893 pub type HrmpWatermark<T: Config> = StorageValue<_, relay_chain::BlockNumber, ValueQuery>;
894
895 #[pallet::storage]
899 pub type LastProcessedHrmpMessage<T: Config> = StorageValue<_, InboundMessageId>;
900
901 #[pallet::storage]
905 pub type HrmpOutboundMessages<T: Config> =
906 StorageValue<_, Vec<OutboundHrmpMessage>, ValueQuery>;
907
908 #[pallet::storage]
912 pub type UpwardMessages<T: Config> = StorageValue<_, Vec<UpwardMessage>, ValueQuery>;
913
914 #[pallet::storage]
916 pub type PendingUpwardMessages<T: Config> = StorageValue<_, Vec<UpwardMessage>, ValueQuery>;
917
918 #[pallet::storage]
920 pub type UpwardDeliveryFeeFactor<T: Config> =
921 StorageValue<_, FixedU128, ValueQuery, GetMinFeeFactor<Pallet<T>>>;
922
923 #[pallet::storage]
926 pub type AnnouncedHrmpMessagesPerCandidate<T: Config> = StorageValue<_, u32, ValueQuery>;
927
928 #[pallet::storage]
931 pub type ReservedXcmpWeightOverride<T: Config> = StorageValue<_, Weight>;
932
933 #[pallet::storage]
936 pub type ReservedDmpWeightOverride<T: Config> = StorageValue<_, Weight>;
937
938 #[pallet::storage]
942 pub type CustomValidationHeadData<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
943
944 #[pallet::inherent]
945 impl<T: Config> ProvideInherent for Pallet<T> {
946 type Call = Call<T>;
947 type Error = sp_inherents::MakeFatalError<()>;
948 const INHERENT_IDENTIFIER: InherentIdentifier =
949 cumulus_primitives_parachain_inherent::INHERENT_IDENTIFIER;
950
951 fn create_inherent(data: &InherentData) -> Option<Self::Call> {
952 let data = match data
953 .get_data::<ParachainInherentData>(&Self::INHERENT_IDENTIFIER)
954 .ok()
955 .flatten()
956 {
957 None => {
958 let data = data
963 .get_data::<v0::ParachainInherentData>(
964 &cumulus_primitives_parachain_inherent::PARACHAIN_INHERENT_IDENTIFIER_V0,
965 )
966 .ok()
967 .flatten()?;
968 data.into()
969 },
970 Some(data) => data,
971 };
972
973 Some(Self::do_create_inherent(data))
974 }
975
976 fn is_inherent(call: &Self::Call) -> bool {
977 matches!(call, Call::set_validation_data { .. })
978 }
979 }
980
981 #[pallet::genesis_config]
982 #[derive(frame_support::DefaultNoBound)]
983 pub struct GenesisConfig<T: Config> {
984 #[serde(skip)]
985 pub _config: core::marker::PhantomData<T>,
986 }
987
988 #[pallet::genesis_build]
989 impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
990 fn build(&self) {
991 sp_io::storage::set(b":c", &[]);
993 }
994 }
995}
996
997impl<T: Config> Pallet<T> {
998 pub fn unincluded_segment_size_after(included_hash: T::Hash) -> u32 {
1006 let segment = UnincludedSegment::<T>::get();
1007 crate::unincluded_segment::size_after_included(included_hash, &segment)
1008 }
1009}
1010
1011impl<T: Config> FeeTracker for Pallet<T> {
1012 type Id = ();
1013
1014 fn get_fee_factor(_id: Self::Id) -> FixedU128 {
1015 UpwardDeliveryFeeFactor::<T>::get()
1016 }
1017
1018 fn set_fee_factor(_id: Self::Id, val: FixedU128) {
1019 UpwardDeliveryFeeFactor::<T>::set(val);
1020 }
1021}
1022
1023impl<T: Config> ListChannelInfos for Pallet<T> {
1024 fn outgoing_channels() -> Vec<ParaId> {
1025 let Some(state) = RelevantMessagingState::<T>::get() else { return Vec::new() };
1026 state.egress_channels.into_iter().map(|(id, _)| id).collect()
1027 }
1028}
1029
1030impl<T: Config> GetChannelInfo for Pallet<T> {
1031 fn get_channel_status(id: ParaId) -> ChannelStatus {
1032 let channels = match RelevantMessagingState::<T>::get() {
1047 None => {
1048 log::warn!("calling `get_channel_status` with no RelevantMessagingState?!");
1049 return ChannelStatus::Closed
1050 },
1051 Some(d) => d.egress_channels,
1052 };
1053 let index = match channels.binary_search_by_key(&id, |item| item.0) {
1060 Err(_) => return ChannelStatus::Closed,
1061 Ok(i) => i,
1062 };
1063 let meta = &channels[index].1;
1064 if meta.msg_count + 1 > meta.max_capacity {
1065 return ChannelStatus::Full
1067 }
1068 let max_size_now = meta.max_total_size - meta.total_size;
1069 let max_size_ever = meta.max_message_size;
1070 ChannelStatus::Ready(max_size_now as usize, max_size_ever as usize)
1071 }
1072
1073 fn get_channel_info(id: ParaId) -> Option<ChannelInfo> {
1074 let channels = RelevantMessagingState::<T>::get()?.egress_channels;
1075 let index = channels.binary_search_by_key(&id, |item| item.0).ok()?;
1076 let info = ChannelInfo {
1077 max_capacity: channels[index].1.max_capacity,
1078 max_total_size: channels[index].1.max_total_size,
1079 max_message_size: channels[index].1.max_message_size,
1080 msg_count: channels[index].1.msg_count,
1081 total_size: channels[index].1.total_size,
1082 };
1083 Some(info)
1084 }
1085}
1086
1087impl<T: Config> Pallet<T> {
1088 fn messages_collection_size_limit() -> usize {
1098 let max_block_weight = <T as frame_system::Config>::BlockWeights::get().max_block;
1099 let max_block_pov = max_block_weight.proof_size();
1100 (max_block_pov / 6).saturated_into()
1101 }
1102
1103 fn do_create_inherent(data: ParachainInherentData) -> Call<T> {
1109 let (data, mut downward_messages, mut horizontal_messages) =
1110 deconstruct_parachain_inherent_data(data);
1111 let last_relay_block_number = LastRelayChainBlockNumber::<T>::get();
1112
1113 let messages_collection_size_limit = Self::messages_collection_size_limit();
1114 let last_processed_msg = LastProcessedDownwardMessage::<T>::get()
1116 .unwrap_or(InboundMessageId { sent_at: last_relay_block_number, reverse_idx: 0 });
1117 downward_messages.drop_processed_messages(&last_processed_msg);
1118 let mut size_limit = messages_collection_size_limit;
1119 let downward_messages = downward_messages.into_abridged(&mut size_limit);
1120
1121 let last_processed_msg = LastProcessedHrmpMessage::<T>::get()
1123 .unwrap_or(InboundMessageId { sent_at: last_relay_block_number, reverse_idx: 0 });
1124 horizontal_messages.drop_processed_messages(&last_processed_msg);
1125 size_limit = size_limit.saturating_add(messages_collection_size_limit);
1126 let horizontal_messages = horizontal_messages.into_abridged(&mut size_limit);
1127
1128 let inbound_messages_data =
1129 InboundMessagesData::new(downward_messages, horizontal_messages);
1130
1131 Call::set_validation_data { data, inbound_messages_data }
1132 }
1133
1134 fn enqueue_inbound_downward_messages(
1144 expected_dmq_mqc_head: relay_chain::Hash,
1145 downward_messages: AbridgedInboundDownwardMessages,
1146 ) -> Weight {
1147 downward_messages.check_enough_messages_included("DMQ");
1148
1149 let mut dmq_head = <LastDmqMqcHead<T>>::get();
1150
1151 let (messages, hashed_messages) = downward_messages.messages();
1152 let message_count = messages.len() as u32;
1153 let weight_used = T::WeightInfo::enqueue_inbound_downward_messages(message_count);
1154 if let Some(last_msg) = messages.last() {
1155 Self::deposit_event(Event::DownwardMessagesReceived { count: message_count });
1156
1157 for msg in messages {
1159 dmq_head.extend_downward(msg);
1160 }
1161 <LastDmqMqcHead<T>>::put(&dmq_head);
1162 Self::deposit_event(Event::DownwardMessagesProcessed {
1163 weight_used,
1164 dmq_head: dmq_head.head(),
1165 });
1166
1167 let mut last_processed_msg =
1168 InboundMessageId { sent_at: last_msg.sent_at, reverse_idx: 0 };
1169 for msg in hashed_messages {
1170 dmq_head.extend_with_hashed_msg(msg);
1171
1172 if msg.sent_at == last_processed_msg.sent_at {
1173 last_processed_msg.reverse_idx += 1;
1174 }
1175 }
1176 LastProcessedDownwardMessage::<T>::put(last_processed_msg);
1177
1178 T::DmpQueue::handle_messages(downward_messages.bounded_msgs_iter());
1179 }
1180
1181 assert_eq!(dmq_head.head(), expected_dmq_mqc_head, "DMQ head mismatch");
1187
1188 ProcessedDownwardMessages::<T>::put(message_count);
1189
1190 weight_used
1191 }
1192
1193 fn check_hrmp_mcq_heads(
1194 ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1195 mqc_heads: &mut BTreeMap<ParaId, MessageQueueChain>,
1196 ) {
1197 for (sender, channel) in ingress_channels {
1205 let cur_head = mqc_heads.entry(*sender).or_default().head();
1206 let target_head = channel.mqc_head.unwrap_or_default();
1207 assert_eq!(cur_head, target_head, "HRMP head mismatch");
1208 }
1209 }
1210
1211 fn check_hrmp_message_metadata(
1216 ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1217 maybe_prev_msg_metadata: &mut Option<(u32, ParaId)>,
1218 msg_metadata: (u32, ParaId),
1219 ) {
1220 if let Some(prev_msg) = maybe_prev_msg_metadata {
1222 assert!(&msg_metadata >= prev_msg, "[HRMP] Messages order violation");
1223 }
1224 *maybe_prev_msg_metadata = Some(msg_metadata);
1225
1226 let sender = msg_metadata.1;
1229 let maybe_channel_idx =
1230 ingress_channels.binary_search_by_key(&sender, |&(channel_sender, _)| channel_sender);
1231 assert!(
1232 maybe_channel_idx.is_ok(),
1233 "One of the messages submitted by the collator was sent from a sender ({}) \
1234 that doesn't have a channel opened to this parachain",
1235 <ParaId as Into<u32>>::into(sender)
1236 );
1237 }
1238
1239 fn enqueue_inbound_horizontal_messages(
1250 ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1251 horizontal_messages: AbridgedInboundHrmpMessages,
1252 relay_parent_number: relay_chain::BlockNumber,
1253 ) -> Weight {
1254 horizontal_messages.check_enough_messages_included("HRMP");
1256
1257 let (messages, hashed_messages) = horizontal_messages.messages();
1258 let mut mqc_heads = <LastHrmpMqcHeads<T>>::get();
1259
1260 if messages.is_empty() {
1261 Self::check_hrmp_mcq_heads(ingress_channels, &mut mqc_heads);
1262 let last_processed_msg =
1263 InboundMessageId { sent_at: relay_parent_number, reverse_idx: 0 };
1264 LastProcessedHrmpMessage::<T>::put(last_processed_msg);
1265 HrmpWatermark::<T>::put(relay_parent_number);
1266 return T::DbWeight::get().reads_writes(1, 2);
1267 }
1268
1269 let mut prev_msg_metadata = None;
1270 let mut last_processed_block = HrmpWatermark::<T>::get();
1271 let mut last_processed_msg = InboundMessageId { sent_at: 0, reverse_idx: 0 };
1272 for (sender, msg) in messages {
1273 Self::check_hrmp_message_metadata(
1274 ingress_channels,
1275 &mut prev_msg_metadata,
1276 (msg.sent_at, *sender),
1277 );
1278 mqc_heads.entry(*sender).or_default().extend_hrmp(msg);
1279
1280 if msg.sent_at > last_processed_msg.sent_at && last_processed_msg.sent_at > 0 {
1281 last_processed_block = last_processed_msg.sent_at;
1282 }
1283 last_processed_msg.sent_at = msg.sent_at;
1284 }
1285 <LastHrmpMqcHeads<T>>::put(&mqc_heads);
1286 for (sender, msg) in hashed_messages {
1287 Self::check_hrmp_message_metadata(
1288 ingress_channels,
1289 &mut prev_msg_metadata,
1290 (msg.sent_at, *sender),
1291 );
1292 mqc_heads.entry(*sender).or_default().extend_with_hashed_msg(msg);
1293
1294 if msg.sent_at == last_processed_msg.sent_at {
1295 last_processed_msg.reverse_idx += 1;
1296 }
1297 }
1298 if last_processed_msg.sent_at > 0 && last_processed_msg.reverse_idx == 0 {
1299 last_processed_block = last_processed_msg.sent_at;
1300 }
1301 LastProcessedHrmpMessage::<T>::put(&last_processed_msg);
1302 Self::check_hrmp_mcq_heads(ingress_channels, &mut mqc_heads);
1303
1304 let max_weight =
1305 <ReservedXcmpWeightOverride<T>>::get().unwrap_or_else(T::ReservedXcmpWeight::get);
1306 let weight_used = T::XcmpMessageHandler::handle_xcmp_messages(
1307 horizontal_messages.flat_msgs_iter(),
1308 max_weight,
1309 );
1310
1311 HrmpWatermark::<T>::put(last_processed_block);
1313
1314 weight_used.saturating_add(T::DbWeight::get().reads_writes(2, 3))
1315 }
1316
1317 fn maybe_drop_included_ancestors(
1319 relay_state_proof: &RelayChainStateProof,
1320 capacity: consensus_hook::UnincludedSegmentCapacity,
1321 ) -> Weight {
1322 let mut weight_used = Weight::zero();
1323 let para_head =
1325 relay_state_proof.read_included_para_head().ok().map(|h| T::Hashing::hash(&h.0));
1326
1327 let unincluded_segment_len = <UnincludedSegment<T>>::decode_len().unwrap_or(0);
1328 weight_used += T::DbWeight::get().reads(1);
1329
1330 let included_head = match (para_head, capacity.is_expecting_included_parent()) {
1332 (Some(h), true) => {
1333 assert_eq!(
1334 h,
1335 frame_system::Pallet::<T>::parent_hash(),
1336 "expected parent to be included"
1337 );
1338
1339 h
1340 },
1341 (Some(h), false) => h,
1342 (None, true) => {
1343 frame_system::Pallet::<T>::parent_hash()
1346 },
1347 (None, false) => panic!("included head not present in relay storage proof"),
1348 };
1349
1350 let new_len = {
1351 let para_head_hash = included_head;
1352 let dropped: Vec<Ancestor<T::Hash>> = <UnincludedSegment<T>>::mutate(|chain| {
1353 let idx = chain
1356 .iter()
1357 .position(|block| {
1358 let head_hash = block
1359 .para_head_hash()
1360 .expect("para head hash is updated during block initialization; qed");
1361 head_hash == ¶_head_hash
1362 })
1363 .map_or(0, |idx| idx + 1); chain.drain(..idx).collect()
1366 });
1367 weight_used += T::DbWeight::get().reads_writes(1, 1);
1368
1369 let new_len = unincluded_segment_len - dropped.len();
1370 if !dropped.is_empty() {
1371 <AggregatedUnincludedSegment<T>>::mutate(|agg| {
1372 let agg = agg.as_mut().expect(
1373 "dropped part of the segment wasn't empty, hence value exists; qed",
1374 );
1375 for block in dropped {
1376 agg.subtract(&block);
1377 }
1378 });
1379 weight_used += T::DbWeight::get().reads_writes(1, 1);
1380 }
1381
1382 new_len as u32
1383 };
1384
1385 assert!(new_len < capacity.get(), "no space left for the block in the unincluded segment");
1390 weight_used
1391 }
1392
1393 fn adjust_egress_bandwidth_limits() {
1399 let unincluded_segment = match AggregatedUnincludedSegment::<T>::get() {
1400 None => return,
1401 Some(s) => s,
1402 };
1403
1404 <RelevantMessagingState<T>>::mutate(|messaging_state| {
1405 let messaging_state = match messaging_state {
1406 None => return,
1407 Some(s) => s,
1408 };
1409
1410 let used_bandwidth = unincluded_segment.used_bandwidth();
1411
1412 let channels = &mut messaging_state.egress_channels;
1413 for (para_id, used) in used_bandwidth.hrmp_outgoing.iter() {
1414 let i = match channels.binary_search_by_key(para_id, |item| item.0) {
1415 Ok(i) => i,
1416 Err(_) => continue, };
1418
1419 let c = &mut channels[i].1;
1420
1421 c.total_size = (c.total_size + used.total_bytes).min(c.max_total_size);
1422 c.msg_count = (c.msg_count + used.msg_count).min(c.max_capacity);
1423 }
1424
1425 let upward_capacity = &mut messaging_state.relay_dispatch_queue_remaining_capacity;
1426 upward_capacity.remaining_count =
1427 upward_capacity.remaining_count.saturating_sub(used_bandwidth.ump_msg_count);
1428 upward_capacity.remaining_size =
1429 upward_capacity.remaining_size.saturating_sub(used_bandwidth.ump_total_bytes);
1430 });
1431 }
1432
1433 fn notify_polkadot_of_pending_upgrade(code: &[u8]) {
1437 NewValidationCode::<T>::put(code);
1438 <DidSetValidationCode<T>>::put(true);
1439 }
1440
1441 pub fn max_code_size() -> Option<u32> {
1445 <HostConfiguration<T>>::get().map(|cfg| cfg.max_code_size)
1446 }
1447
1448 pub fn schedule_code_upgrade(validation_function: Vec<u8>) -> DispatchResult {
1450 ensure!(<ValidationData<T>>::exists(), Error::<T>::ValidationDataNotAvailable,);
1454 ensure!(<UpgradeRestrictionSignal<T>>::get().is_none(), Error::<T>::ProhibitedByPolkadot);
1455
1456 ensure!(!<PendingValidationCode<T>>::exists(), Error::<T>::OverlappingUpgrades);
1457 let cfg = HostConfiguration::<T>::get().ok_or(Error::<T>::HostConfigurationNotAvailable)?;
1458 ensure!(validation_function.len() <= cfg.max_code_size as usize, Error::<T>::TooBig);
1459
1460 Self::notify_polkadot_of_pending_upgrade(&validation_function);
1468 <PendingValidationCode<T>>::put(validation_function);
1469 Self::deposit_event(Event::ValidationFunctionStored);
1470
1471 Ok(())
1472 }
1473
1474 pub fn collect_collation_info(header: &HeaderFor<T>) -> CollationInfo {
1482 CollationInfo {
1483 hrmp_watermark: HrmpWatermark::<T>::get(),
1484 horizontal_messages: HrmpOutboundMessages::<T>::get(),
1485 upward_messages: UpwardMessages::<T>::get(),
1486 processed_downward_messages: ProcessedDownwardMessages::<T>::get(),
1487 new_validation_code: NewValidationCode::<T>::get().map(Into::into),
1488 head_data: CustomValidationHeadData::<T>::get()
1491 .map_or_else(|| header.encode(), |v| v)
1492 .into(),
1493 }
1494 }
1495
1496 pub fn set_custom_validation_head_data(head_data: Vec<u8>) {
1509 CustomValidationHeadData::<T>::put(head_data);
1510 }
1511
1512 #[cfg(feature = "experimental-ump-signals")]
1514 fn send_ump_signal() {
1515 use cumulus_primitives_core::relay_chain::{UMPSignal, UMP_SEPARATOR};
1516
1517 UpwardMessages::<T>::mutate(|up| {
1518 if let Some(core_info) =
1519 CumulusDigestItem::find_core_info(&frame_system::Pallet::<T>::digest())
1520 {
1521 up.push(UMP_SEPARATOR);
1522
1523 up.push(
1525 UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset)
1526 .encode(),
1527 );
1528 }
1529 });
1530 }
1531
1532 #[cfg(any(feature = "runtime-benchmarks", feature = "std"))]
1537 pub fn open_outbound_hrmp_channel_for_benchmarks_or_tests(target_parachain: ParaId) {
1538 RelevantMessagingState::<T>::put(MessagingStateSnapshot {
1539 dmq_mqc_head: Default::default(),
1540 relay_dispatch_queue_remaining_capacity: Default::default(),
1541 ingress_channels: Default::default(),
1542 egress_channels: vec![(
1543 target_parachain,
1544 cumulus_primitives_core::AbridgedHrmpChannel {
1545 max_capacity: 10,
1546 max_total_size: 10_000_000_u32,
1547 max_message_size: 10_000_000_u32,
1548 msg_count: 5,
1549 total_size: 5_000_000_u32,
1550 mqc_head: None,
1551 },
1552 )],
1553 })
1554 }
1555
1556 #[cfg(any(feature = "runtime-benchmarks", feature = "std"))]
1561 pub fn open_custom_outbound_hrmp_channel_for_benchmarks_or_tests(
1562 target_parachain: ParaId,
1563 channel: cumulus_primitives_core::AbridgedHrmpChannel,
1564 ) {
1565 RelevantMessagingState::<T>::put(MessagingStateSnapshot {
1566 dmq_mqc_head: Default::default(),
1567 relay_dispatch_queue_remaining_capacity: Default::default(),
1568 ingress_channels: Default::default(),
1569 egress_channels: vec![(target_parachain, channel)],
1570 })
1571 }
1572
1573 #[cfg(feature = "runtime-benchmarks")]
1575 pub fn initialize_for_set_code_benchmark(max_code_size: u32) {
1576 let vfp = PersistedValidationData {
1578 parent_head: polkadot_parachain_primitives::primitives::HeadData(Default::default()),
1579 relay_parent_number: 1,
1580 relay_parent_storage_root: Default::default(),
1581 max_pov_size: 1_000,
1582 };
1583 <ValidationData<T>>::put(&vfp);
1584
1585 let host_config = AbridgedHostConfiguration {
1587 max_code_size,
1588 max_head_data_size: 32 * 1024,
1589 max_upward_queue_count: 8,
1590 max_upward_queue_size: 1024 * 1024,
1591 max_upward_message_size: 4 * 1024,
1592 max_upward_message_num_per_candidate: 2,
1593 hrmp_max_message_num_per_candidate: 2,
1594 validation_upgrade_cooldown: 2,
1595 validation_upgrade_delay: 2,
1596 async_backing_params: relay_chain::AsyncBackingParams {
1597 allowed_ancestry_len: 0,
1598 max_candidate_depth: 0,
1599 },
1600 };
1601 <HostConfiguration<T>>::put(host_config);
1602 }
1603}
1604
1605pub struct ParachainSetCode<T>(core::marker::PhantomData<T>);
1607impl<T: Config> frame_system::SetCode<T> for ParachainSetCode<T> {
1608 fn set_code(code: Vec<u8>) -> DispatchResult {
1609 Pallet::<T>::schedule_code_upgrade(code)
1610 }
1611}
1612
1613impl<T: Config> Pallet<T> {
1614 pub fn send_upward_message(message: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> {
1620 let message_len = message.len();
1621 if let Some(cfg) = HostConfiguration::<T>::get() {
1634 if message_len > cfg.max_upward_message_size as usize {
1635 return Err(MessageSendError::TooBig);
1636 }
1637 let threshold =
1638 cfg.max_upward_queue_size.saturating_div(ump_constants::THRESHOLD_FACTOR);
1639 <PendingUpwardMessages<T>>::append(message.clone());
1642 let pending_messages = PendingUpwardMessages::<T>::get();
1643 let total_size: usize = pending_messages.iter().map(UpwardMessage::len).sum();
1644 if total_size > threshold as usize {
1645 Self::increase_fee_factor((), message_len as u128);
1647 }
1648 } else {
1649 <PendingUpwardMessages<T>>::append(message.clone());
1659 };
1660
1661 let hash = sp_io::hashing::blake2_256(&message);
1664 Self::deposit_event(Event::UpwardMessageSent { message_hash: Some(hash) });
1665 Ok((0, hash))
1666 }
1667
1668 pub fn last_relay_block_number() -> RelayChainBlockNumber {
1671 LastRelayChainBlockNumber::<T>::get()
1672 }
1673}
1674
1675impl<T: Config> UpwardMessageSender for Pallet<T> {
1676 fn send_upward_message(message: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> {
1677 Self::send_upward_message(message)
1678 }
1679
1680 fn can_send_upward_message(message: &UpwardMessage) -> Result<(), MessageSendError> {
1681 let max_upward_message_size = HostConfiguration::<T>::get()
1682 .map(|cfg| cfg.max_upward_message_size)
1683 .ok_or(MessageSendError::Other)?;
1684 if message.len() > max_upward_message_size as usize {
1685 Err(MessageSendError::TooBig)
1686 } else {
1687 Ok(())
1688 }
1689 }
1690
1691 #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
1692 fn ensure_successful_delivery() {
1693 const MAX_UPWARD_MESSAGE_SIZE: u32 = 65_531 * 3;
1694 const MAX_CODE_SIZE: u32 = 3 * 1024 * 1024;
1695 HostConfiguration::<T>::mutate(|cfg| match cfg {
1696 Some(cfg) => cfg.max_upward_message_size = MAX_UPWARD_MESSAGE_SIZE,
1697 None =>
1698 *cfg = Some(AbridgedHostConfiguration {
1699 max_code_size: MAX_CODE_SIZE,
1700 max_head_data_size: 32 * 1024,
1701 max_upward_queue_count: 8,
1702 max_upward_queue_size: 1024 * 1024,
1703 max_upward_message_size: MAX_UPWARD_MESSAGE_SIZE,
1704 max_upward_message_num_per_candidate: 2,
1705 hrmp_max_message_num_per_candidate: 2,
1706 validation_upgrade_cooldown: 2,
1707 validation_upgrade_delay: 2,
1708 async_backing_params: relay_chain::AsyncBackingParams {
1709 allowed_ancestry_len: 0,
1710 max_candidate_depth: 0,
1711 },
1712 }),
1713 })
1714 }
1715}
1716
1717impl<T: Config> InspectMessageQueues for Pallet<T> {
1718 fn clear_messages() {
1719 PendingUpwardMessages::<T>::kill();
1720 }
1721
1722 fn get_messages() -> Vec<(VersionedLocation, Vec<VersionedXcm<()>>)> {
1723 use xcm::prelude::*;
1724
1725 let messages: Vec<VersionedXcm<()>> = PendingUpwardMessages::<T>::get()
1726 .iter()
1727 .map(|encoded_message| {
1728 VersionedXcm::<()>::decode_all_with_depth_limit(
1729 MAX_XCM_DECODE_DEPTH,
1730 &mut &encoded_message[..],
1731 )
1732 .unwrap()
1733 })
1734 .collect();
1735
1736 if messages.is_empty() {
1737 vec![]
1738 } else {
1739 vec![(VersionedLocation::from(Location::parent()), messages)]
1740 }
1741 }
1742}
1743
1744#[cfg(feature = "runtime-benchmarks")]
1745impl<T: Config> polkadot_runtime_parachains::EnsureForParachain for Pallet<T> {
1746 fn ensure(para_id: ParaId) {
1747 if let ChannelStatus::Closed = Self::get_channel_status(para_id) {
1748 Self::open_outbound_hrmp_channel_for_benchmarks_or_tests(para_id)
1749 }
1750 }
1751}
1752
1753#[impl_trait_for_tuples::impl_for_tuples(30)]
1761pub trait OnSystemEvent {
1762 fn on_validation_data(data: &PersistedValidationData);
1764 fn on_validation_code_applied();
1767}
1768
1769#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Default, RuntimeDebug)]
1771pub struct RelayChainState {
1772 pub number: relay_chain::BlockNumber,
1774 pub state_root: relay_chain::Hash,
1776}
1777
1778pub trait RelaychainStateProvider {
1782 fn current_relay_chain_state() -> RelayChainState;
1786
1787 #[cfg(feature = "runtime-benchmarks")]
1792 fn set_current_relay_chain_state(_state: RelayChainState) {}
1793}
1794
1795#[deprecated = "Use `RelaychainDataProvider` instead"]
1803pub type RelaychainBlockNumberProvider<T> = RelaychainDataProvider<T>;
1804
1805pub struct RelaychainDataProvider<T>(core::marker::PhantomData<T>);
1815
1816impl<T: Config> BlockNumberProvider for RelaychainDataProvider<T> {
1817 type BlockNumber = relay_chain::BlockNumber;
1818
1819 fn current_block_number() -> relay_chain::BlockNumber {
1820 ValidationData::<T>::get()
1821 .map(|d| d.relay_parent_number)
1822 .unwrap_or_else(|| Pallet::<T>::last_relay_block_number())
1823 }
1824
1825 #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
1826 fn set_block_number(block: Self::BlockNumber) {
1827 let mut validation_data = ValidationData::<T>::get().unwrap_or_else(||
1828 PersistedValidationData {
1830 parent_head: vec![].into(),
1831 relay_parent_number: Default::default(),
1832 max_pov_size: Default::default(),
1833 relay_parent_storage_root: Default::default(),
1834 });
1835 validation_data.relay_parent_number = block;
1836 ValidationData::<T>::put(validation_data)
1837 }
1838}
1839
1840impl<T: Config> RelaychainStateProvider for RelaychainDataProvider<T> {
1841 fn current_relay_chain_state() -> RelayChainState {
1842 ValidationData::<T>::get()
1843 .map(|d| RelayChainState {
1844 number: d.relay_parent_number,
1845 state_root: d.relay_parent_storage_root,
1846 })
1847 .unwrap_or_default()
1848 }
1849
1850 #[cfg(feature = "runtime-benchmarks")]
1851 fn set_current_relay_chain_state(state: RelayChainState) {
1852 let mut validation_data = ValidationData::<T>::get().unwrap_or_else(||
1853 PersistedValidationData {
1855 parent_head: vec![].into(),
1856 relay_parent_number: Default::default(),
1857 max_pov_size: Default::default(),
1858 relay_parent_storage_root: Default::default(),
1859 });
1860 validation_data.relay_parent_number = state.number;
1861 validation_data.relay_parent_storage_root = state.state_root;
1862 ValidationData::<T>::put(validation_data)
1863 }
1864}