referrerpolicy=no-referrer-when-downgrade

pallet_scheduler/
lib.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! > Made with *Substrate*, for *Polkadot*.
19//!
20//! [![github]](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/frame/scheduler) -
21//! [![polkadot]](https://polkadot.com)
22//!
23//! [polkadot]: https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white
24//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
25//!
26//! # Scheduler Pallet
27//!
28//! A Pallet for scheduling runtime calls.
29//!
30//! ## Overview
31//!
32//! This Pallet exposes capabilities for scheduling runtime calls to occur at a specified block
33//! number or at a specified period. These scheduled runtime calls may be named or anonymous and may
34//! be canceled.
35//!
36//! __NOTE:__ Instead of using the filter contained in the origin to call `fn schedule`, scheduled
37//! runtime calls will be dispatched with the default filter for the origin: namely
38//! `frame_system::Config::BaseCallFilter` for all origin types (except root which will get no
39//! filter).
40//!
41//! If a call is scheduled using proxy or whatever mechanism which adds filter, then those filter
42//! will not be used when dispatching the schedule runtime call.
43//!
44//! ### Examples
45//!
46//! 1. Scheduling a runtime call at a specific block.
47#![doc = docify::embed!("src/tests.rs", basic_scheduling_works)]
48//!
49//! 2. Scheduling a preimage hash of a runtime call at a specific block
50#![doc = docify::embed!("src/tests.rs", scheduling_with_preimages_works)]
51
52//!
53//! ## Pallet API
54//!
55//! See the [`pallet`] module for more information about the interfaces this pallet exposes,
56//! including its configuration trait, dispatchables, storage items, events and errors.
57//!
58//! ## Warning
59//!
60//! This Pallet executes all scheduled runtime calls in the [`on_initialize`] hook. Do not execute
61//! any runtime calls which should not be considered mandatory.
62//!
63//! Please be aware that any scheduled runtime calls executed in a future block may __fail__ or may
64//! result in __undefined behavior__ since the runtime could have upgraded between the time of
65//! scheduling and execution. For example, the runtime upgrade could have:
66//!
67//! * Modified the implementation of the runtime call (runtime specification upgrade).
68//!     * Could lead to undefined behavior.
69//! * Removed or changed the ordering/index of the runtime call.
70//!     * Could fail due to the runtime call index not being part of the `Call`.
71//!     * Could lead to undefined behavior, such as executing another runtime call with the same
72//!       index.
73//!
74//! [`on_initialize`]: frame_support::traits::Hooks::on_initialize
75
76// Ensure we're `no_std` when compiling for Wasm.
77#![cfg_attr(not(feature = "std"), no_std)]
78
79#[cfg(feature = "runtime-benchmarks")]
80mod benchmarking;
81pub mod migration;
82#[cfg(test)]
83mod mock;
84#[cfg(test)]
85mod tests;
86pub mod weights;
87
88extern crate alloc;
89
90use alloc::{boxed::Box, vec::Vec};
91use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
92use core::{borrow::Borrow, cmp::Ordering, marker::PhantomData};
93use frame_support::{
94	dispatch::{DispatchResult, GetDispatchInfo, Parameter, RawOrigin},
95	ensure,
96	traits::{
97		schedule::{self, DispatchTime, MaybeHashed},
98		Bounded, CallerTrait, EnsureOrigin, Get, IsType, OriginTrait, PalletInfoAccess,
99		PrivilegeCmp, QueryPreimage, StorageVersion, StorePreimage,
100	},
101	weights::{Weight, WeightMeter},
102};
103use frame_system::{self as system};
104use scale_info::TypeInfo;
105use sp_io::hashing::blake2_256;
106use sp_runtime::{
107	traits::{BadOrigin, BlockNumberProvider, Dispatchable, One, Saturating, Zero},
108	BoundedVec, Debug, DispatchError,
109};
110
111pub use pallet::*;
112pub use weights::WeightInfo;
113
114/// Just a simple index for naming period tasks.
115pub type PeriodicIndex = u32;
116/// The location of a scheduled task that can be used to remove it.
117pub type TaskAddress<BlockNumber> = (BlockNumber, u32);
118
119pub type CallOrHashOf<T> =
120	MaybeHashed<<T as Config>::RuntimeCall, <T as frame_system::Config>::Hash>;
121
122pub type BoundedCallOf<T> =
123	Bounded<<T as Config>::RuntimeCall, <T as frame_system::Config>::Hashing>;
124
125pub type BlockNumberFor<T> =
126	<<T as Config>::BlockNumberProvider as BlockNumberProvider>::BlockNumber;
127
128/// The configuration of the retry mechanism for a given task along with its current state.
129#[derive(
130	Clone,
131	Copy,
132	Debug,
133	PartialEq,
134	Eq,
135	Encode,
136	Decode,
137	DecodeWithMemTracking,
138	MaxEncodedLen,
139	TypeInfo,
140)]
141pub struct RetryConfig<Period> {
142	/// Initial amount of retries allowed.
143	pub total_retries: u8,
144	/// Amount of retries left.
145	pub remaining: u8,
146	/// Period of time between retry attempts.
147	pub period: Period,
148}
149
150#[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq))]
151#[derive(Clone, Debug, Encode, Decode)]
152struct ScheduledV1<Call, BlockNumber> {
153	maybe_id: Option<Vec<u8>>,
154	priority: schedule::Priority,
155	call: Call,
156	maybe_periodic: Option<schedule::Period<BlockNumber>>,
157}
158
159/// Information regarding an item to be executed in the future.
160#[derive(
161	Clone, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo, DecodeWithMemTracking,
162)]
163pub struct Scheduled<Name, Call, BlockNumber, PalletsOrigin, AccountId> {
164	/// The unique identity for this task, if there is one.
165	pub maybe_id: Option<Name>,
166	/// This task's priority.
167	pub priority: schedule::Priority,
168	/// The call to be dispatched.
169	pub call: Call,
170	/// If the call is periodic, then this points to the information concerning that.
171	pub maybe_periodic: Option<schedule::Period<BlockNumber>>,
172	/// The origin with which to dispatch the call.
173	pub origin: PalletsOrigin,
174	#[doc(hidden)]
175	pub _phantom: PhantomData<AccountId>,
176}
177
178impl<Name, Call, BlockNumber, PalletsOrigin, AccountId>
179	Scheduled<Name, Call, BlockNumber, PalletsOrigin, AccountId>
180where
181	Call: Clone,
182	PalletsOrigin: Clone,
183{
184	/// Create a new task to be used for retry attempts of the original one. The cloned task will
185	/// have the same `priority`, `call` and `origin`, but will always be non-periodic and unnamed.
186	pub fn as_retry(&self) -> Self {
187		Self {
188			maybe_id: None,
189			priority: self.priority,
190			call: self.call.clone(),
191			maybe_periodic: None,
192			origin: self.origin.clone(),
193			_phantom: Default::default(),
194		}
195	}
196}
197
198use crate::{Scheduled as ScheduledV3, Scheduled as ScheduledV2};
199
200pub type ScheduledV2Of<T> = ScheduledV2<
201	Vec<u8>,
202	<T as Config>::RuntimeCall,
203	BlockNumberFor<T>,
204	<T as Config>::PalletsOrigin,
205	<T as frame_system::Config>::AccountId,
206>;
207
208pub type ScheduledV3Of<T> = ScheduledV3<
209	Vec<u8>,
210	CallOrHashOf<T>,
211	BlockNumberFor<T>,
212	<T as Config>::PalletsOrigin,
213	<T as frame_system::Config>::AccountId,
214>;
215
216pub type ScheduledOf<T> = Scheduled<
217	TaskName,
218	BoundedCallOf<T>,
219	BlockNumberFor<T>,
220	<T as Config>::PalletsOrigin,
221	<T as frame_system::Config>::AccountId,
222>;
223
224pub(crate) trait MarginalWeightInfo: WeightInfo {
225	fn service_task(maybe_lookup_len: Option<usize>, named: bool, periodic: bool) -> Weight {
226		let base = Self::service_task_base();
227		let mut total = match maybe_lookup_len {
228			None => base,
229			Some(l) => Self::service_task_fetched(l as u32),
230		};
231		if named {
232			total.saturating_accrue(Self::service_task_named().saturating_sub(base));
233		}
234		if periodic {
235			total.saturating_accrue(Self::service_task_periodic().saturating_sub(base));
236		}
237		total
238	}
239}
240impl<T: WeightInfo> MarginalWeightInfo for T {}
241
242#[frame_support::pallet]
243pub mod pallet {
244	use super::*;
245	use frame_support::{dispatch::PostDispatchInfo, pallet_prelude::*};
246	use frame_system::pallet_prelude::{BlockNumberFor as SystemBlockNumberFor, OriginFor};
247
248	/// The in-code storage version.
249	const STORAGE_VERSION: StorageVersion = StorageVersion::new(4);
250
251	#[pallet::pallet]
252	#[pallet::storage_version(STORAGE_VERSION)]
253	pub struct Pallet<T>(_);
254
255	/// `system::Config` should always be included in our implied traits.
256	#[pallet::config]
257	pub trait Config: frame_system::Config {
258		/// The overarching event type.
259		#[allow(deprecated)]
260		type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
261
262		/// The aggregated origin which the dispatch will take.
263		type RuntimeOrigin: OriginTrait<PalletsOrigin = Self::PalletsOrigin>
264			+ From<Self::PalletsOrigin>
265			+ IsType<<Self as system::Config>::RuntimeOrigin>;
266
267		/// The caller origin, overarching type of all pallets origins.
268		type PalletsOrigin: From<system::RawOrigin<Self::AccountId>>
269			+ CallerTrait<Self::AccountId>
270			+ MaxEncodedLen;
271
272		/// The aggregated call type.
273		type RuntimeCall: Parameter
274			+ Dispatchable<
275				RuntimeOrigin = <Self as Config>::RuntimeOrigin,
276				PostInfo = PostDispatchInfo,
277			> + GetDispatchInfo
278			+ From<system::Call<Self>>;
279
280		/// The maximum weight that may be scheduled per block for any dispatchables.
281		#[pallet::constant]
282		type MaximumWeight: Get<Weight>;
283
284		/// Required origin to schedule or cancel calls.
285		type ScheduleOrigin: EnsureOrigin<<Self as system::Config>::RuntimeOrigin>;
286
287		/// Compare the privileges of origins.
288		///
289		/// This will be used when canceling a task, to ensure that the origin that tries
290		/// to cancel has greater or equal privileges as the origin that created the scheduled task.
291		///
292		/// For simplicity the [`EqualPrivilegeOnly`](frame_support::traits::EqualPrivilegeOnly) can
293		/// be used. This will only check if two given origins are equal.
294		type OriginPrivilegeCmp: PrivilegeCmp<Self::PalletsOrigin>;
295
296		/// The maximum number of scheduled calls in the queue for a single block.
297		///
298		/// NOTE:
299		/// + Dependent pallets' benchmarks might require a higher limit for the setting. Set a
300		/// higher limit under `runtime-benchmarks` feature.
301		#[pallet::constant]
302		type MaxScheduledPerBlock: Get<u32>;
303
304		/// Weight information for extrinsics in this pallet.
305		type WeightInfo: WeightInfo;
306
307		/// The preimage provider with which we look up call hashes to get the call.
308		type Preimages: QueryPreimage<H = Self::Hashing> + StorePreimage;
309
310		/// Query the current block number.
311		///
312		/// Must return monotonically increasing values when called from consecutive blocks. It is
313		/// generally expected that the values also do not differ "too much" between consecutive
314		/// blocks. A future addition to this pallet will allow bigger difference between
315		/// consecutive blocks to make it possible to be utilized by parachains with *Agile
316		/// Coretime*. *Agile Coretime* parachains are currently not supported and must continue to
317		/// use their local block number provider.
318		///
319		/// Can be configured to return either:
320		/// - the local block number of the runtime via `frame_system::Pallet`
321		/// - a remote block number, eg from the relay chain through `RelaychainDataProvider`
322		/// - an arbitrary value through a custom implementation of the trait
323		///
324		/// Suggested values:
325		/// - Solo- and Relay-chains should use `frame_system::Pallet`. There are no concerns with
326		///   this configuration.
327		/// - Parachains should also use `frame_system::Pallet` for the time being. The scheduler
328		///   pallet is not yet ready for the case that big numbers of blocks are skipped. In an
329		///   *Agile Coretime* chain with relay chain number provider configured, it could otherwise
330		///   happen that the scheduler will not be able to catch up to its agendas, since too many
331		///   relay blocks are missing if the parachain only produces blocks rarely.
332		///
333		/// There is currently no migration provided to "hot-swap" block number providers and it is
334		/// therefore highly advised to stay with the default (local) values. If you still want to
335		/// swap block number providers on the fly, then please at least ensure that you do not run
336		/// any pallet migration in the same runtime upgrade.
337		type BlockNumberProvider: BlockNumberProvider;
338	}
339
340	/// Block number at which the agenda began incomplete execution.
341	#[pallet::storage]
342	pub type IncompleteSince<T: Config> = StorageValue<_, BlockNumberFor<T>>;
343
344	/// Items to be executed, indexed by the block number that they should be executed on.
345	#[pallet::storage]
346	pub type Agenda<T: Config> = StorageMap<
347		_,
348		Twox64Concat,
349		BlockNumberFor<T>,
350		BoundedVec<Option<ScheduledOf<T>>, T::MaxScheduledPerBlock>,
351		ValueQuery,
352	>;
353
354	/// Retry configurations for items to be executed, indexed by task address.
355	#[pallet::storage]
356	pub type Retries<T: Config> = StorageMap<
357		_,
358		Blake2_128Concat,
359		TaskAddress<BlockNumberFor<T>>,
360		RetryConfig<BlockNumberFor<T>>,
361		OptionQuery,
362	>;
363
364	/// Lookup from a name to the block number and index of the task.
365	///
366	/// For v3 -> v4 the previously unbounded identities are Blake2-256 hashed to form the v4
367	/// identities.
368	#[pallet::storage]
369	pub type Lookup<T: Config> =
370		StorageMap<_, Twox64Concat, TaskName, TaskAddress<BlockNumberFor<T>>>;
371
372	/// Events type.
373	#[pallet::event]
374	#[pallet::generate_deposit(pub(super) fn deposit_event)]
375	pub enum Event<T: Config> {
376		/// Scheduled some task.
377		Scheduled { when: BlockNumberFor<T>, index: u32 },
378		/// Canceled some task.
379		Canceled { when: BlockNumberFor<T>, index: u32 },
380		/// Dispatched some task.
381		Dispatched {
382			task: TaskAddress<BlockNumberFor<T>>,
383			id: Option<TaskName>,
384			result: DispatchResult,
385		},
386		/// Set a retry configuration for some task.
387		RetrySet {
388			task: TaskAddress<BlockNumberFor<T>>,
389			id: Option<TaskName>,
390			period: BlockNumberFor<T>,
391			retries: u8,
392		},
393		/// Cancel a retry configuration for some task.
394		RetryCancelled { task: TaskAddress<BlockNumberFor<T>>, id: Option<TaskName> },
395		/// The call for the provided hash was not found so the task has been aborted.
396		CallUnavailable { task: TaskAddress<BlockNumberFor<T>>, id: Option<TaskName> },
397		/// The given task was unable to be renewed since the agenda is full at that block.
398		PeriodicFailed { task: TaskAddress<BlockNumberFor<T>>, id: Option<TaskName> },
399		/// The given task was unable to be retried since the agenda is full at that block or there
400		/// was not enough weight to reschedule it.
401		RetryFailed { task: TaskAddress<BlockNumberFor<T>>, id: Option<TaskName> },
402		/// The given task can never be executed since it is overweight.
403		PermanentlyOverweight { task: TaskAddress<BlockNumberFor<T>>, id: Option<TaskName> },
404		/// Agenda is incomplete from `when`.
405		AgendaIncomplete { when: BlockNumberFor<T> },
406	}
407
408	#[pallet::error]
409	pub enum Error<T> {
410		/// Failed to schedule a call
411		FailedToSchedule,
412		/// Cannot find the scheduled call.
413		NotFound,
414		/// Given target block number is in the past.
415		TargetBlockNumberInPast,
416		/// Reschedule failed because it does not change scheduled time.
417		RescheduleNoChange,
418		/// Attempt to use a non-named function on a named task.
419		Named,
420	}
421
422	#[pallet::hooks]
423	impl<T: Config> Hooks<SystemBlockNumberFor<T>> for Pallet<T> {
424		/// Execute the scheduled calls
425		fn on_initialize(_now: SystemBlockNumberFor<T>) -> Weight {
426			let now = T::BlockNumberProvider::current_block_number();
427			let mut weight_counter = frame_system::Pallet::<T>::remaining_block_weight()
428				.limit_to(T::MaximumWeight::get());
429			Self::service_agendas(&mut weight_counter, now, u32::MAX);
430			weight_counter.consumed()
431		}
432
433		#[cfg(feature = "std")]
434		fn integrity_test() {
435			/// Calculate the maximum weight that a lookup of a given size can take.
436			fn lookup_weight<T: Config>(s: usize) -> Weight {
437				T::WeightInfo::service_agendas_base() +
438					T::WeightInfo::service_agenda_base(T::MaxScheduledPerBlock::get()) +
439					T::WeightInfo::service_task(Some(s), true, true)
440			}
441
442			let limit = sp_runtime::Perbill::from_percent(90) * T::MaximumWeight::get();
443
444			let small_lookup = lookup_weight::<T>(128);
445			assert!(small_lookup.all_lte(limit), "Must be possible to submit a small lookup");
446
447			let medium_lookup = lookup_weight::<T>(1024);
448			assert!(medium_lookup.all_lte(limit), "Must be possible to submit a medium lookup");
449
450			let large_lookup = lookup_weight::<T>(1024 * 1024);
451			assert!(large_lookup.all_lte(limit), "Must be possible to submit a large lookup");
452		}
453	}
454
455	#[pallet::call]
456	impl<T: Config> Pallet<T> {
457		/// Anonymously schedule a task.
458		#[pallet::call_index(0)]
459		#[pallet::weight(<T as Config>::WeightInfo::schedule(T::MaxScheduledPerBlock::get()))]
460		pub fn schedule(
461			origin: OriginFor<T>,
462			when: BlockNumberFor<T>,
463			maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
464			priority: schedule::Priority,
465			call: Box<<T as Config>::RuntimeCall>,
466		) -> DispatchResult {
467			T::ScheduleOrigin::ensure_origin(origin.clone())?;
468			let origin = <T as Config>::RuntimeOrigin::from(origin);
469			Self::do_schedule(
470				DispatchTime::At(when),
471				maybe_periodic,
472				priority,
473				origin.caller().clone(),
474				T::Preimages::bound(*call)?,
475			)?;
476			Ok(())
477		}
478
479		/// Cancel a scheduled task (named or anonymous), by providing the block it is scheduled for
480		/// execution in, as well as the index of the task in that block's agenda.
481		///
482		/// In the case of a named task, it will remove it from the lookup table as well.
483		#[pallet::call_index(1)]
484		#[pallet::weight(<T as Config>::WeightInfo::cancel(T::MaxScheduledPerBlock::get()))]
485		pub fn cancel(origin: OriginFor<T>, when: BlockNumberFor<T>, index: u32) -> DispatchResult {
486			T::ScheduleOrigin::ensure_origin(origin.clone())?;
487			let origin = <T as Config>::RuntimeOrigin::from(origin);
488			Self::do_cancel(Some(origin.caller().clone()), (when, index))?;
489			Ok(())
490		}
491
492		/// Schedule a named task.
493		#[pallet::call_index(2)]
494		#[pallet::weight(<T as Config>::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))]
495		pub fn schedule_named(
496			origin: OriginFor<T>,
497			id: TaskName,
498			when: BlockNumberFor<T>,
499			maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
500			priority: schedule::Priority,
501			call: Box<<T as Config>::RuntimeCall>,
502		) -> DispatchResult {
503			T::ScheduleOrigin::ensure_origin(origin.clone())?;
504			let origin = <T as Config>::RuntimeOrigin::from(origin);
505			Self::do_schedule_named(
506				id,
507				DispatchTime::At(when),
508				maybe_periodic,
509				priority,
510				origin.caller().clone(),
511				T::Preimages::bound(*call)?,
512			)?;
513			Ok(())
514		}
515
516		/// Cancel a named scheduled task.
517		#[pallet::call_index(3)]
518		#[pallet::weight(<T as Config>::WeightInfo::cancel_named(T::MaxScheduledPerBlock::get()))]
519		pub fn cancel_named(origin: OriginFor<T>, id: TaskName) -> DispatchResult {
520			T::ScheduleOrigin::ensure_origin(origin.clone())?;
521			let origin = <T as Config>::RuntimeOrigin::from(origin);
522			Self::do_cancel_named(Some(origin.caller().clone()), id)?;
523			Ok(())
524		}
525
526		/// Anonymously schedule a task after a delay.
527		#[pallet::call_index(4)]
528		#[pallet::weight(<T as Config>::WeightInfo::schedule(T::MaxScheduledPerBlock::get()))]
529		pub fn schedule_after(
530			origin: OriginFor<T>,
531			after: BlockNumberFor<T>,
532			maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
533			priority: schedule::Priority,
534			call: Box<<T as Config>::RuntimeCall>,
535		) -> DispatchResult {
536			T::ScheduleOrigin::ensure_origin(origin.clone())?;
537			let origin = <T as Config>::RuntimeOrigin::from(origin);
538			Self::do_schedule(
539				DispatchTime::After(after),
540				maybe_periodic,
541				priority,
542				origin.caller().clone(),
543				T::Preimages::bound(*call)?,
544			)?;
545			Ok(())
546		}
547
548		/// Schedule a named task after a delay.
549		#[pallet::call_index(5)]
550		#[pallet::weight(<T as Config>::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))]
551		pub fn schedule_named_after(
552			origin: OriginFor<T>,
553			id: TaskName,
554			after: BlockNumberFor<T>,
555			maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
556			priority: schedule::Priority,
557			call: Box<<T as Config>::RuntimeCall>,
558		) -> DispatchResult {
559			T::ScheduleOrigin::ensure_origin(origin.clone())?;
560			let origin = <T as Config>::RuntimeOrigin::from(origin);
561			Self::do_schedule_named(
562				id,
563				DispatchTime::After(after),
564				maybe_periodic,
565				priority,
566				origin.caller().clone(),
567				T::Preimages::bound(*call)?,
568			)?;
569			Ok(())
570		}
571
572		/// Set a retry configuration for a task so that, in case its scheduled run fails, it will
573		/// be retried after `period` blocks, for a total amount of `retries` retries or until it
574		/// succeeds.
575		///
576		/// Tasks which need to be scheduled for a retry are still subject to weight metering and
577		/// agenda space, same as a regular task. If a periodic task fails, it will be scheduled
578		/// normally while the task is retrying.
579		///
580		/// Tasks scheduled as a result of a retry for a periodic task are unnamed, non-periodic
581		/// clones of the original task. Their retry configuration will be derived from the
582		/// original task's configuration, but will have a lower value for `remaining` than the
583		/// original `total_retries`.
584		///
585		/// This call **cannot** be used to set a retry configuration for a named task.
586		#[pallet::call_index(6)]
587		#[pallet::weight(<T as Config>::WeightInfo::set_retry())]
588		pub fn set_retry(
589			origin: OriginFor<T>,
590			task: TaskAddress<BlockNumberFor<T>>,
591			retries: u8,
592			period: BlockNumberFor<T>,
593		) -> DispatchResult {
594			T::ScheduleOrigin::ensure_origin(origin.clone())?;
595			let origin = <T as Config>::RuntimeOrigin::from(origin);
596			let (when, index) = task;
597			let agenda = Agenda::<T>::get(when);
598			let scheduled = agenda
599				.get(index as usize)
600				.and_then(Option::as_ref)
601				.ok_or(Error::<T>::NotFound)?;
602			Self::ensure_privilege(origin.caller(), &scheduled.origin)?;
603			Retries::<T>::insert(
604				(when, index),
605				RetryConfig { total_retries: retries, remaining: retries, period },
606			);
607			Self::deposit_event(Event::RetrySet { task, id: None, period, retries });
608			Ok(())
609		}
610
611		/// Set a retry configuration for a named task so that, in case its scheduled run fails, it
612		/// will be retried after `period` blocks, for a total amount of `retries` retries or until
613		/// it succeeds.
614		///
615		/// Tasks which need to be scheduled for a retry are still subject to weight metering and
616		/// agenda space, same as a regular task. If a periodic task fails, it will be scheduled
617		/// normally while the task is retrying.
618		///
619		/// Tasks scheduled as a result of a retry for a periodic task are unnamed, non-periodic
620		/// clones of the original task. Their retry configuration will be derived from the
621		/// original task's configuration, but will have a lower value for `remaining` than the
622		/// original `total_retries`.
623		///
624		/// This is the only way to set a retry configuration for a named task.
625		#[pallet::call_index(7)]
626		#[pallet::weight(<T as Config>::WeightInfo::set_retry_named())]
627		pub fn set_retry_named(
628			origin: OriginFor<T>,
629			id: TaskName,
630			retries: u8,
631			period: BlockNumberFor<T>,
632		) -> DispatchResult {
633			T::ScheduleOrigin::ensure_origin(origin.clone())?;
634			let origin = <T as Config>::RuntimeOrigin::from(origin);
635			let (when, agenda_index) = Lookup::<T>::get(&id).ok_or(Error::<T>::NotFound)?;
636			let agenda = Agenda::<T>::get(when);
637			let scheduled = agenda
638				.get(agenda_index as usize)
639				.and_then(Option::as_ref)
640				.ok_or(Error::<T>::NotFound)?;
641			Self::ensure_privilege(origin.caller(), &scheduled.origin)?;
642			Retries::<T>::insert(
643				(when, agenda_index),
644				RetryConfig { total_retries: retries, remaining: retries, period },
645			);
646			Self::deposit_event(Event::RetrySet {
647				task: (when, agenda_index),
648				id: Some(id),
649				period,
650				retries,
651			});
652			Ok(())
653		}
654
655		/// Removes the retry configuration of a task.
656		#[pallet::call_index(8)]
657		#[pallet::weight(<T as Config>::WeightInfo::cancel_retry())]
658		pub fn cancel_retry(
659			origin: OriginFor<T>,
660			task: TaskAddress<BlockNumberFor<T>>,
661		) -> DispatchResult {
662			T::ScheduleOrigin::ensure_origin(origin.clone())?;
663			let origin = <T as Config>::RuntimeOrigin::from(origin);
664			Self::do_cancel_retry(origin.caller(), task)?;
665			Self::deposit_event(Event::RetryCancelled { task, id: None });
666			Ok(())
667		}
668
669		/// Cancel the retry configuration of a named task.
670		#[pallet::call_index(9)]
671		#[pallet::weight(<T as Config>::WeightInfo::cancel_retry_named())]
672		pub fn cancel_retry_named(origin: OriginFor<T>, id: TaskName) -> DispatchResult {
673			T::ScheduleOrigin::ensure_origin(origin.clone())?;
674			let origin = <T as Config>::RuntimeOrigin::from(origin);
675			let task = Lookup::<T>::get(&id).ok_or(Error::<T>::NotFound)?;
676			Self::do_cancel_retry(origin.caller(), task)?;
677			Self::deposit_event(Event::RetryCancelled { task, id: Some(id) });
678			Ok(())
679		}
680	}
681}
682
683impl<T: Config> Pallet<T> {
684	/// Migrate storage format from V1 to V4.
685	///
686	/// Returns the weight consumed by this migration.
687	pub fn migrate_v1_to_v4() -> Weight {
688		use migration::v1 as old;
689		let mut weight = T::DbWeight::get().reads_writes(1, 1);
690
691		// Delete all undecodable values.
692		// `StorageMap::translate` is not enough since it just skips them and leaves the keys in.
693		let keys = old::Agenda::<T>::iter_keys().collect::<Vec<_>>();
694		for key in keys {
695			weight.saturating_accrue(T::DbWeight::get().reads(1));
696			if let Err(_) = old::Agenda::<T>::try_get(&key) {
697				weight.saturating_accrue(T::DbWeight::get().writes(1));
698				old::Agenda::<T>::remove(&key);
699				log::warn!("Deleted undecodable agenda");
700			}
701		}
702
703		Agenda::<T>::translate::<
704			Vec<Option<ScheduledV1<<T as Config>::RuntimeCall, BlockNumberFor<T>>>>,
705			_,
706		>(|_, agenda| {
707			Some(BoundedVec::truncate_from(
708				agenda
709					.into_iter()
710					.map(|schedule| {
711						weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1));
712
713						schedule.and_then(|schedule| {
714							if let Some(id) = schedule.maybe_id.as_ref() {
715								let name = blake2_256(id);
716								if let Some(item) = old::Lookup::<T>::take(id) {
717									Lookup::<T>::insert(name, item);
718								}
719								weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2));
720							}
721
722							let call = T::Preimages::bound(schedule.call).ok()?;
723
724							if call.lookup_needed() {
725								weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 1));
726							}
727
728							Some(Scheduled {
729								maybe_id: schedule.maybe_id.map(|x| blake2_256(&x[..])),
730								priority: schedule.priority,
731								call,
732								maybe_periodic: schedule.maybe_periodic,
733								origin: system::RawOrigin::Root.into(),
734								_phantom: Default::default(),
735							})
736						})
737					})
738					.collect::<Vec<_>>(),
739			))
740		});
741
742		#[allow(deprecated)]
743		frame_support::storage::migration::remove_storage_prefix(
744			Self::name().as_bytes(),
745			b"StorageVersion",
746			&[],
747		);
748
749		StorageVersion::new(4).put::<Self>();
750
751		weight + T::DbWeight::get().writes(2)
752	}
753
754	/// Migrate storage format from V2 to V4.
755	///
756	/// Returns the weight consumed by this migration.
757	pub fn migrate_v2_to_v4() -> Weight {
758		use migration::v2 as old;
759		let mut weight = T::DbWeight::get().reads_writes(1, 1);
760
761		// Delete all undecodable values.
762		// `StorageMap::translate` is not enough since it just skips them and leaves the keys in.
763		let keys = old::Agenda::<T>::iter_keys().collect::<Vec<_>>();
764		for key in keys {
765			weight.saturating_accrue(T::DbWeight::get().reads(1));
766			if let Err(_) = old::Agenda::<T>::try_get(&key) {
767				weight.saturating_accrue(T::DbWeight::get().writes(1));
768				old::Agenda::<T>::remove(&key);
769				log::warn!("Deleted undecodable agenda");
770			}
771		}
772
773		Agenda::<T>::translate::<Vec<Option<ScheduledV2Of<T>>>, _>(|_, agenda| {
774			Some(BoundedVec::truncate_from(
775				agenda
776					.into_iter()
777					.map(|schedule| {
778						weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1));
779						schedule.and_then(|schedule| {
780							if let Some(id) = schedule.maybe_id.as_ref() {
781								let name = blake2_256(id);
782								if let Some(item) = old::Lookup::<T>::take(id) {
783									Lookup::<T>::insert(name, item);
784								}
785								weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2));
786							}
787
788							let call = T::Preimages::bound(schedule.call).ok()?;
789							if call.lookup_needed() {
790								weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 1));
791							}
792
793							Some(Scheduled {
794								maybe_id: schedule.maybe_id.map(|x| blake2_256(&x[..])),
795								priority: schedule.priority,
796								call,
797								maybe_periodic: schedule.maybe_periodic,
798								origin: schedule.origin,
799								_phantom: Default::default(),
800							})
801						})
802					})
803					.collect::<Vec<_>>(),
804			))
805		});
806
807		#[allow(deprecated)]
808		frame_support::storage::migration::remove_storage_prefix(
809			Self::name().as_bytes(),
810			b"StorageVersion",
811			&[],
812		);
813
814		StorageVersion::new(4).put::<Self>();
815
816		weight + T::DbWeight::get().writes(2)
817	}
818
819	/// Migrate storage format from V3 to V4.
820	///
821	/// Returns the weight consumed by this migration.
822	#[allow(deprecated)]
823	pub fn migrate_v3_to_v4() -> Weight {
824		use migration::v3 as old;
825		let mut weight = T::DbWeight::get().reads_writes(2, 1);
826
827		// Delete all undecodable values.
828		// `StorageMap::translate` is not enough since it just skips them and leaves the keys in.
829		let blocks = old::Agenda::<T>::iter_keys().collect::<Vec<_>>();
830		for block in blocks {
831			weight.saturating_accrue(T::DbWeight::get().reads(1));
832			if let Err(_) = old::Agenda::<T>::try_get(&block) {
833				weight.saturating_accrue(T::DbWeight::get().writes(1));
834				old::Agenda::<T>::remove(&block);
835				log::warn!("Deleted undecodable agenda of block: {:?}", block);
836			}
837		}
838
839		Agenda::<T>::translate::<Vec<Option<ScheduledV3Of<T>>>, _>(|block, agenda| {
840			log::info!("Migrating agenda of block: {:?}", &block);
841			Some(BoundedVec::truncate_from(
842				agenda
843					.into_iter()
844					.map(|schedule| {
845						weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1));
846						schedule
847							.and_then(|schedule| {
848								if let Some(id) = schedule.maybe_id.as_ref() {
849									let name = blake2_256(id);
850									if let Some(item) = old::Lookup::<T>::take(id) {
851										Lookup::<T>::insert(name, item);
852										log::info!("Migrated name for id: {:?}", id);
853									} else {
854										log::error!("No name in Lookup for id: {:?}", &id);
855									}
856									weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2));
857								} else {
858									log::info!("Schedule is unnamed");
859								}
860
861								let call = match schedule.call {
862									MaybeHashed::Hash(h) => {
863										let bounded = Bounded::from_legacy_hash(h);
864										// Check that the call can be decoded in the new runtime.
865										if let Err(err) = T::Preimages::peek::<
866											<T as Config>::RuntimeCall,
867										>(&bounded)
868										{
869											log::error!(
870												"Dropping undecodable call {:?}: {:?}",
871												&h,
872												&err
873											);
874											return None
875										}
876										weight.saturating_accrue(T::DbWeight::get().reads(1));
877										log::info!("Migrated call by hash, hash: {:?}", h);
878										bounded
879									},
880									MaybeHashed::Value(v) => {
881										let call = T::Preimages::bound(v)
882											.map_err(|e| {
883												log::error!("Could not bound Call: {:?}", e)
884											})
885											.ok()?;
886										if call.lookup_needed() {
887											weight.saturating_accrue(
888												T::DbWeight::get().reads_writes(0, 1),
889											);
890										}
891										log::info!(
892											"Migrated call by value, hash: {:?}",
893											call.hash()
894										);
895										call
896									},
897								};
898
899								Some(Scheduled {
900									maybe_id: schedule.maybe_id.map(|x| blake2_256(&x[..])),
901									priority: schedule.priority,
902									call,
903									maybe_periodic: schedule.maybe_periodic,
904									origin: schedule.origin,
905									_phantom: Default::default(),
906								})
907							})
908							.or_else(|| {
909								log::info!("Schedule in agenda for block {:?} is empty - nothing to do here.", &block);
910								None
911							})
912					})
913					.collect::<Vec<_>>(),
914			))
915		});
916
917		#[allow(deprecated)]
918		frame_support::storage::migration::remove_storage_prefix(
919			Self::name().as_bytes(),
920			b"StorageVersion",
921			&[],
922		);
923
924		StorageVersion::new(4).put::<Self>();
925
926		weight + T::DbWeight::get().writes(2)
927	}
928}
929
930impl<T: Config> Pallet<T> {
931	/// Helper to migrate scheduler when the pallet origin type has changed.
932	pub fn migrate_origin<OldOrigin: Into<T::PalletsOrigin> + codec::Decode>() {
933		Agenda::<T>::translate::<
934			Vec<
935				Option<
936					Scheduled<
937						TaskName,
938						BoundedCallOf<T>,
939						BlockNumberFor<T>,
940						OldOrigin,
941						T::AccountId,
942					>,
943				>,
944			>,
945			_,
946		>(|_, agenda| {
947			Some(BoundedVec::truncate_from(
948				agenda
949					.into_iter()
950					.map(|schedule| {
951						schedule.map(|schedule| Scheduled {
952							maybe_id: schedule.maybe_id,
953							priority: schedule.priority,
954							call: schedule.call,
955							maybe_periodic: schedule.maybe_periodic,
956							origin: schedule.origin.into(),
957							_phantom: Default::default(),
958						})
959					})
960					.collect::<Vec<_>>(),
961			))
962		});
963	}
964
965	fn resolve_time(
966		when: DispatchTime<BlockNumberFor<T>>,
967	) -> Result<BlockNumberFor<T>, DispatchError> {
968		let now = T::BlockNumberProvider::current_block_number();
969		let when = match when {
970			DispatchTime::At(x) => x,
971			// The current block has already completed it's scheduled tasks, so
972			// Schedule the task at lest one block after this current block.
973			DispatchTime::After(x) => now.saturating_add(x).saturating_add(One::one()),
974		};
975
976		if when <= now {
977			return Err(Error::<T>::TargetBlockNumberInPast.into())
978		}
979
980		Ok(when)
981	}
982
983	fn place_task(
984		when: BlockNumberFor<T>,
985		what: ScheduledOf<T>,
986	) -> Result<TaskAddress<BlockNumberFor<T>>, (DispatchError, ScheduledOf<T>)> {
987		let maybe_name = what.maybe_id;
988		let index = Self::push_to_agenda(when, what)?;
989		let address = (when, index);
990		if let Some(name) = maybe_name {
991			Lookup::<T>::insert(name, address)
992		}
993		Self::deposit_event(Event::Scheduled { when: address.0, index: address.1 });
994		Ok(address)
995	}
996
997	fn push_to_agenda(
998		when: BlockNumberFor<T>,
999		what: ScheduledOf<T>,
1000	) -> Result<u32, (DispatchError, ScheduledOf<T>)> {
1001		let mut agenda = Agenda::<T>::get(when);
1002		let index = if (agenda.len() as u32) < T::MaxScheduledPerBlock::get() {
1003			// will always succeed due to the above check.
1004			let _ = agenda.try_push(Some(what));
1005			agenda.len() as u32 - 1
1006		} else {
1007			if let Some(hole_index) = agenda.iter().position(|i| i.is_none()) {
1008				agenda[hole_index] = Some(what);
1009				hole_index as u32
1010			} else {
1011				return Err((DispatchError::Exhausted, what))
1012			}
1013		};
1014		Agenda::<T>::insert(when, agenda);
1015		Ok(index)
1016	}
1017
1018	/// Remove trailing `None` items of an agenda at `when`. If all items are `None` remove the
1019	/// agenda record entirely.
1020	fn cleanup_agenda(when: BlockNumberFor<T>) {
1021		let mut agenda = Agenda::<T>::get(when);
1022		match agenda.iter().rposition(|i| i.is_some()) {
1023			// Note that `agenda.len() > i + 1` implies that the agenda ends on a sequence of at
1024			// least one `None` item(s).
1025			Some(i) if agenda.len() > i + 1 => {
1026				agenda.truncate(i + 1);
1027				Agenda::<T>::insert(when, agenda);
1028			},
1029			// This branch is taken if `agenda.len() <= i + 1 ==> agenda.len() == i + 1 <==>
1030			// agenda.len() - 1 == i` i.e. the agenda's last item is `Some`.
1031			Some(_) => {},
1032			// All items in the agenda are `None`.
1033			None => {
1034				Agenda::<T>::remove(when);
1035			},
1036		}
1037	}
1038
1039	fn do_schedule(
1040		when: DispatchTime<BlockNumberFor<T>>,
1041		maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
1042		priority: schedule::Priority,
1043		origin: T::PalletsOrigin,
1044		call: BoundedCallOf<T>,
1045	) -> Result<TaskAddress<BlockNumberFor<T>>, DispatchError> {
1046		let when = Self::resolve_time(when)?;
1047
1048		let lookup_hash = call.lookup_hash();
1049
1050		// sanitize maybe_periodic
1051		let maybe_periodic = maybe_periodic
1052			.filter(|p| p.1 > 1 && !p.0.is_zero())
1053			// Remove one from the number of repetitions since we will schedule one now.
1054			.map(|(p, c)| (p, c - 1));
1055		let task = Scheduled {
1056			maybe_id: None,
1057			priority,
1058			call,
1059			maybe_periodic,
1060			origin,
1061			_phantom: PhantomData,
1062		};
1063		let res = Self::place_task(when, task).map_err(|x| x.0)?;
1064
1065		if let Some(hash) = lookup_hash {
1066			// Request the call to be made available.
1067			T::Preimages::request(&hash);
1068		}
1069
1070		Ok(res)
1071	}
1072
1073	fn do_cancel(
1074		origin: Option<T::PalletsOrigin>,
1075		(when, index): TaskAddress<BlockNumberFor<T>>,
1076	) -> Result<(), DispatchError> {
1077		let scheduled = Agenda::<T>::try_mutate(when, |agenda| {
1078			agenda.get_mut(index as usize).map_or(
1079				Ok(None),
1080				|s| -> Result<Option<Scheduled<_, _, _, _, _>>, DispatchError> {
1081					if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) {
1082						Self::ensure_privilege(o, &s.origin)?;
1083					};
1084					Ok(s.take())
1085				},
1086			)
1087		})?;
1088		if let Some(s) = scheduled {
1089			T::Preimages::drop(&s.call);
1090			if let Some(id) = s.maybe_id {
1091				Lookup::<T>::remove(id);
1092			}
1093			Retries::<T>::remove((when, index));
1094			Self::cleanup_agenda(when);
1095			Self::deposit_event(Event::Canceled { when, index });
1096			Ok(())
1097		} else {
1098			return Err(Error::<T>::NotFound.into())
1099		}
1100	}
1101
1102	fn do_reschedule(
1103		(when, index): TaskAddress<BlockNumberFor<T>>,
1104		new_time: DispatchTime<BlockNumberFor<T>>,
1105	) -> Result<TaskAddress<BlockNumberFor<T>>, DispatchError> {
1106		let new_time = Self::resolve_time(new_time)?;
1107
1108		if new_time == when {
1109			return Err(Error::<T>::RescheduleNoChange.into())
1110		}
1111
1112		let task = Agenda::<T>::try_mutate(when, |agenda| {
1113			let task = agenda.get_mut(index as usize).ok_or(Error::<T>::NotFound)?;
1114			ensure!(!matches!(task, Some(Scheduled { maybe_id: Some(_), .. })), Error::<T>::Named);
1115			task.take().ok_or(Error::<T>::NotFound)
1116		})?;
1117		Self::cleanup_agenda(when);
1118		Self::deposit_event(Event::Canceled { when, index });
1119
1120		Self::place_task(new_time, task).map_err(|x| x.0)
1121	}
1122
1123	fn do_schedule_named(
1124		id: TaskName,
1125		when: DispatchTime<BlockNumberFor<T>>,
1126		maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
1127		priority: schedule::Priority,
1128		origin: T::PalletsOrigin,
1129		call: BoundedCallOf<T>,
1130	) -> Result<TaskAddress<BlockNumberFor<T>>, DispatchError> {
1131		// ensure id it is unique
1132		if Lookup::<T>::contains_key(&id) {
1133			return Err(Error::<T>::FailedToSchedule.into())
1134		}
1135
1136		let when = Self::resolve_time(when)?;
1137
1138		let lookup_hash = call.lookup_hash();
1139
1140		// sanitize maybe_periodic
1141		let maybe_periodic = maybe_periodic
1142			.filter(|p| p.1 > 1 && !p.0.is_zero())
1143			// Remove one from the number of repetitions since we will schedule one now.
1144			.map(|(p, c)| (p, c - 1));
1145
1146		let task = Scheduled {
1147			maybe_id: Some(id),
1148			priority,
1149			call,
1150			maybe_periodic,
1151			origin,
1152			_phantom: Default::default(),
1153		};
1154		let res = Self::place_task(when, task).map_err(|x| x.0)?;
1155
1156		if let Some(hash) = lookup_hash {
1157			// Request the call to be made available.
1158			T::Preimages::request(&hash);
1159		}
1160
1161		Ok(res)
1162	}
1163
1164	fn do_cancel_named(origin: Option<T::PalletsOrigin>, id: TaskName) -> DispatchResult {
1165		Lookup::<T>::try_mutate_exists(id, |lookup| -> DispatchResult {
1166			if let Some((when, index)) = lookup.take() {
1167				let i = index as usize;
1168				Agenda::<T>::try_mutate(when, |agenda| -> DispatchResult {
1169					if let Some(s) = agenda.get_mut(i) {
1170						if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) {
1171							Self::ensure_privilege(o, &s.origin)?;
1172							Retries::<T>::remove((when, index));
1173							T::Preimages::drop(&s.call);
1174						}
1175						*s = None;
1176					}
1177					Ok(())
1178				})?;
1179				Self::cleanup_agenda(when);
1180				Self::deposit_event(Event::Canceled { when, index });
1181				Ok(())
1182			} else {
1183				return Err(Error::<T>::NotFound.into())
1184			}
1185		})
1186	}
1187
1188	fn do_reschedule_named(
1189		id: TaskName,
1190		new_time: DispatchTime<BlockNumberFor<T>>,
1191	) -> Result<TaskAddress<BlockNumberFor<T>>, DispatchError> {
1192		let new_time = Self::resolve_time(new_time)?;
1193
1194		let lookup = Lookup::<T>::get(id);
1195		let (when, index) = lookup.ok_or(Error::<T>::NotFound)?;
1196
1197		if new_time == when {
1198			return Err(Error::<T>::RescheduleNoChange.into())
1199		}
1200
1201		let task = Agenda::<T>::try_mutate(when, |agenda| {
1202			let task = agenda.get_mut(index as usize).ok_or(Error::<T>::NotFound)?;
1203			task.take().ok_or(Error::<T>::NotFound)
1204		})?;
1205		Self::cleanup_agenda(when);
1206		Self::deposit_event(Event::Canceled { when, index });
1207		Self::place_task(new_time, task).map_err(|x| x.0)
1208	}
1209
1210	fn do_cancel_retry(
1211		origin: &T::PalletsOrigin,
1212		(when, index): TaskAddress<BlockNumberFor<T>>,
1213	) -> Result<(), DispatchError> {
1214		let agenda = Agenda::<T>::get(when);
1215		let scheduled = agenda
1216			.get(index as usize)
1217			.and_then(Option::as_ref)
1218			.ok_or(Error::<T>::NotFound)?;
1219		Self::ensure_privilege(origin, &scheduled.origin)?;
1220		Retries::<T>::remove((when, index));
1221		Ok(())
1222	}
1223}
1224
1225enum ServiceTaskError {
1226	/// Could not be executed due to missing preimage.
1227	Unavailable,
1228	/// Could not be executed due to weight limitations.
1229	Overweight,
1230}
1231use ServiceTaskError::*;
1232
1233impl<T: Config> Pallet<T> {
1234	/// Service up to `max` agendas queue starting from earliest incompletely executed agenda.
1235	fn service_agendas(weight: &mut WeightMeter, now: BlockNumberFor<T>, max: u32) {
1236		if weight.try_consume(T::WeightInfo::service_agendas_base()).is_err() {
1237			return
1238		}
1239
1240		let mut incomplete_since = now + One::one();
1241		let mut when = IncompleteSince::<T>::take().unwrap_or(now);
1242		let mut is_first = true; // first task from the first agenda.
1243
1244		let max_items = T::MaxScheduledPerBlock::get();
1245		let mut count_down = max;
1246		let service_agenda_base_weight = T::WeightInfo::service_agenda_base(max_items);
1247		while count_down > 0 && when <= now && weight.can_consume(service_agenda_base_weight) {
1248			if !Self::service_agenda(weight, is_first, now, when, u32::MAX) {
1249				incomplete_since = incomplete_since.min(when);
1250			}
1251			is_first = false;
1252			when.saturating_inc();
1253			count_down.saturating_dec();
1254		}
1255		incomplete_since = incomplete_since.min(when);
1256		if incomplete_since <= now {
1257			Self::deposit_event(Event::AgendaIncomplete { when: incomplete_since });
1258			IncompleteSince::<T>::put(incomplete_since);
1259		} else {
1260			// The next scheduler iteration should typically start from `now + 1` (`next_iter_now`).
1261			// However, if the [`Config::BlockNumberProvider`] is not a local block number provider,
1262			// then `next_iter_now` could be `now + n` where `n > 1`. In this case, we want to start
1263			// from `now + 1` to ensure we don't miss any agendas.
1264			IncompleteSince::<T>::put(now + One::one());
1265		}
1266	}
1267
1268	/// Returns `true` if the agenda was fully completed, `false` if it should be revisited at a
1269	/// later block.
1270	fn service_agenda(
1271		weight: &mut WeightMeter,
1272		mut is_first: bool,
1273		now: BlockNumberFor<T>,
1274		when: BlockNumberFor<T>,
1275		max: u32,
1276	) -> bool {
1277		let mut agenda = Agenda::<T>::get(when);
1278		let mut ordered = agenda
1279			.iter()
1280			.enumerate()
1281			.filter_map(|(index, maybe_item)| {
1282				maybe_item.as_ref().map(|item| (index as u32, item.priority))
1283			})
1284			.collect::<Vec<_>>();
1285		ordered.sort_by_key(|k| k.1);
1286		let within_limit = weight
1287			.try_consume(T::WeightInfo::service_agenda_base(ordered.len() as u32))
1288			.is_ok();
1289		debug_assert!(within_limit, "weight limit should have been checked in advance");
1290
1291		// Items which we know can be executed and have postponed for execution in a later block.
1292		let mut postponed = (ordered.len() as u32).saturating_sub(max);
1293		// Items which we don't know can ever be executed.
1294		let mut dropped = 0;
1295
1296		for (agenda_index, _) in ordered.into_iter().take(max as usize) {
1297			let Some(task) = agenda[agenda_index as usize].take() else { continue };
1298			let base_weight = T::WeightInfo::service_task(
1299				task.call.lookup_len().map(|x| x as usize),
1300				task.maybe_id.is_some(),
1301				task.maybe_periodic.is_some(),
1302			);
1303			if !weight.can_consume(base_weight) {
1304				postponed += 1;
1305				agenda[agenda_index as usize] = Some(task);
1306				break
1307			}
1308			let result = Self::service_task(weight, now, when, agenda_index, is_first, task);
1309			agenda[agenda_index as usize] = match result {
1310				Err((Unavailable, slot)) => {
1311					dropped += 1;
1312					slot
1313				},
1314				Err((Overweight, slot)) => {
1315					postponed += 1;
1316					slot
1317				},
1318				Ok(()) => {
1319					is_first = false;
1320					None
1321				},
1322			};
1323		}
1324		if postponed > 0 || dropped > 0 {
1325			Agenda::<T>::insert(when, agenda);
1326		} else {
1327			Agenda::<T>::remove(when);
1328		}
1329
1330		postponed == 0
1331	}
1332
1333	/// Service (i.e. execute) the given task, being careful not to overflow the `weight` counter.
1334	///
1335	/// This involves:
1336	/// - removing and potentially replacing the `Lookup` entry for the task.
1337	/// - realizing the task's call which can include a preimage lookup.
1338	/// - Rescheduling the task for execution in a later agenda if periodic.
1339	fn service_task(
1340		weight: &mut WeightMeter,
1341		now: BlockNumberFor<T>,
1342		when: BlockNumberFor<T>,
1343		agenda_index: u32,
1344		is_first: bool,
1345		mut task: ScheduledOf<T>,
1346	) -> Result<(), (ServiceTaskError, Option<ScheduledOf<T>>)> {
1347		if let Some(ref id) = task.maybe_id {
1348			Lookup::<T>::remove(id);
1349		}
1350
1351		let (call, lookup_len) = match T::Preimages::peek(&task.call) {
1352			Ok(c) => c,
1353			Err(_) => {
1354				Self::deposit_event(Event::CallUnavailable {
1355					task: (when, agenda_index),
1356					id: task.maybe_id,
1357				});
1358
1359				// It was not available when we needed it, so we don't need to have requested it
1360				// anymore.
1361				T::Preimages::drop(&task.call);
1362
1363				// We don't know why `peek` failed, thus we most account here for the "full weight".
1364				let _ = weight.try_consume(T::WeightInfo::service_task(
1365					task.call.lookup_len().map(|x| x as usize),
1366					task.maybe_id.is_some(),
1367					task.maybe_periodic.is_some(),
1368				));
1369
1370				return Err((Unavailable, Some(task)))
1371			},
1372		};
1373
1374		let _ = weight.try_consume(T::WeightInfo::service_task(
1375			lookup_len.map(|x| x as usize),
1376			task.maybe_id.is_some(),
1377			task.maybe_periodic.is_some(),
1378		));
1379
1380		match Self::execute_dispatch(weight, task.origin.clone(), call) {
1381			Err(()) if is_first => {
1382				T::Preimages::drop(&task.call);
1383				Self::deposit_event(Event::PermanentlyOverweight {
1384					task: (when, agenda_index),
1385					id: task.maybe_id,
1386				});
1387				Err((Unavailable, Some(task)))
1388			},
1389			Err(()) => Err((Overweight, Some(task))),
1390			Ok(result) => {
1391				let failed = result.is_err();
1392				let maybe_retry_config = Retries::<T>::take((when, agenda_index));
1393				Self::deposit_event(Event::Dispatched {
1394					task: (when, agenda_index),
1395					id: task.maybe_id,
1396					result,
1397				});
1398
1399				match maybe_retry_config {
1400					Some(retry_config) if failed => {
1401						Self::schedule_retry(weight, now, when, agenda_index, &task, retry_config);
1402					},
1403					_ => {},
1404				}
1405
1406				if let &Some((period, count)) = &task.maybe_periodic {
1407					if count > 1 {
1408						task.maybe_periodic = Some((period, count - 1));
1409					} else {
1410						task.maybe_periodic = None;
1411					}
1412					let wake = now.saturating_add(period);
1413					match Self::place_task(wake, task) {
1414						Ok(new_address) =>
1415							if let Some(retry_config) = maybe_retry_config {
1416								Retries::<T>::insert(new_address, retry_config);
1417							},
1418						Err((_, task)) => {
1419							// TODO: Leave task in storage somewhere for it to be rescheduled
1420							// manually.
1421							T::Preimages::drop(&task.call);
1422							Self::deposit_event(Event::PeriodicFailed {
1423								task: (when, agenda_index),
1424								id: task.maybe_id,
1425							});
1426						},
1427					}
1428				} else {
1429					T::Preimages::drop(&task.call);
1430				}
1431				Ok(())
1432			},
1433		}
1434	}
1435
1436	/// Make a dispatch to the given `call` from the given `origin`, ensuring that the `weight`
1437	/// counter does not exceed its limit and that it is counted accurately (e.g. accounted using
1438	/// post info if available).
1439	///
1440	/// NOTE: Only the weight for this function will be counted (origin lookup, dispatch and the
1441	/// call itself).
1442	///
1443	/// Returns an error if the call is overweight.
1444	fn execute_dispatch(
1445		weight: &mut WeightMeter,
1446		origin: T::PalletsOrigin,
1447		call: <T as Config>::RuntimeCall,
1448	) -> Result<DispatchResult, ()> {
1449		let base_weight = match origin.as_system_ref() {
1450			Some(&RawOrigin::Signed(_)) => T::WeightInfo::execute_dispatch_signed(),
1451			_ => T::WeightInfo::execute_dispatch_unsigned(),
1452		};
1453		let call_weight = call.get_dispatch_info().call_weight;
1454		// We only allow a scheduled call if it cannot push the weight past the limit.
1455		let max_weight = base_weight.saturating_add(call_weight);
1456
1457		if !weight.can_consume(max_weight) {
1458			return Err(())
1459		}
1460
1461		let dispatch_origin = origin.into();
1462		let (maybe_actual_call_weight, result) = match call.dispatch(dispatch_origin) {
1463			Ok(post_info) => (post_info.actual_weight, Ok(())),
1464			Err(error_and_info) =>
1465				(error_and_info.post_info.actual_weight, Err(error_and_info.error)),
1466		};
1467		let call_weight = maybe_actual_call_weight.unwrap_or(call_weight);
1468		let _ = weight.try_consume(base_weight);
1469		let _ = weight.try_consume(call_weight);
1470		Ok(result)
1471	}
1472
1473	/// Check if a task has a retry configuration in place and, if so, try to reschedule it.
1474	///
1475	/// Possible causes for failure to schedule a retry for a task:
1476	/// - there wasn't enough weight to run the task reschedule logic
1477	/// - there was no retry configuration in place
1478	/// - there were no more retry attempts left
1479	/// - the agenda was full.
1480	fn schedule_retry(
1481		weight: &mut WeightMeter,
1482		now: BlockNumberFor<T>,
1483		when: BlockNumberFor<T>,
1484		agenda_index: u32,
1485		task: &ScheduledOf<T>,
1486		retry_config: RetryConfig<BlockNumberFor<T>>,
1487	) {
1488		if weight
1489			.try_consume(T::WeightInfo::schedule_retry(T::MaxScheduledPerBlock::get()))
1490			.is_err()
1491		{
1492			Self::deposit_event(Event::RetryFailed {
1493				task: (when, agenda_index),
1494				id: task.maybe_id,
1495			});
1496			return;
1497		}
1498
1499		let RetryConfig { total_retries, mut remaining, period } = retry_config;
1500		remaining = match remaining.checked_sub(1) {
1501			Some(n) => n,
1502			None => return,
1503		};
1504		let wake = now.saturating_add(period);
1505		match Self::place_task(wake, task.as_retry()) {
1506			Ok(address) => {
1507				// Reinsert the retry config to the new address of the task after it was
1508				// placed.
1509				Retries::<T>::insert(address, RetryConfig { total_retries, remaining, period });
1510			},
1511			Err((_, task)) => {
1512				// TODO: Leave task in storage somewhere for it to be
1513				// rescheduled manually.
1514				T::Preimages::drop(&task.call);
1515				Self::deposit_event(Event::RetryFailed {
1516					task: (when, agenda_index),
1517					id: task.maybe_id,
1518				});
1519			},
1520		}
1521	}
1522
1523	/// Ensure that `left` has at least the same level of privilege or higher than `right`.
1524	///
1525	/// Returns an error if `left` has a lower level of privilege or the two cannot be compared.
1526	fn ensure_privilege(
1527		left: &<T as Config>::PalletsOrigin,
1528		right: &<T as Config>::PalletsOrigin,
1529	) -> Result<(), DispatchError> {
1530		if matches!(T::OriginPrivilegeCmp::cmp_privilege(left, right), Some(Ordering::Less) | None)
1531		{
1532			return Err(BadOrigin.into());
1533		}
1534		Ok(())
1535	}
1536}
1537
1538#[allow(deprecated)]
1539impl<T: Config> schedule::v2::Anon<BlockNumberFor<T>, <T as Config>::RuntimeCall, T::PalletsOrigin>
1540	for Pallet<T>
1541{
1542	type Address = TaskAddress<BlockNumberFor<T>>;
1543	type Hash = T::Hash;
1544
1545	fn schedule(
1546		when: DispatchTime<BlockNumberFor<T>>,
1547		maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
1548		priority: schedule::Priority,
1549		origin: T::PalletsOrigin,
1550		call: CallOrHashOf<T>,
1551	) -> Result<Self::Address, DispatchError> {
1552		let call = call.as_value().ok_or(DispatchError::CannotLookup)?;
1553		let call = T::Preimages::bound(call)?.transmute();
1554		Self::do_schedule(when, maybe_periodic, priority, origin, call)
1555	}
1556
1557	fn cancel((when, index): Self::Address) -> Result<(), ()> {
1558		Self::do_cancel(None, (when, index)).map_err(|_| ())
1559	}
1560
1561	fn reschedule(
1562		address: Self::Address,
1563		when: DispatchTime<BlockNumberFor<T>>,
1564	) -> Result<Self::Address, DispatchError> {
1565		Self::do_reschedule(address, when)
1566	}
1567
1568	fn next_dispatch_time((when, index): Self::Address) -> Result<BlockNumberFor<T>, ()> {
1569		Agenda::<T>::get(when).get(index as usize).ok_or(()).map(|_| when)
1570	}
1571}
1572
1573// TODO: migrate `schedule::v2::Anon` to `v3`
1574#[allow(deprecated)]
1575impl<T: Config> schedule::v2::Named<BlockNumberFor<T>, <T as Config>::RuntimeCall, T::PalletsOrigin>
1576	for Pallet<T>
1577{
1578	type Address = TaskAddress<BlockNumberFor<T>>;
1579	type Hash = T::Hash;
1580
1581	fn schedule_named(
1582		id: Vec<u8>,
1583		when: DispatchTime<BlockNumberFor<T>>,
1584		maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
1585		priority: schedule::Priority,
1586		origin: T::PalletsOrigin,
1587		call: CallOrHashOf<T>,
1588	) -> Result<Self::Address, ()> {
1589		let call = call.as_value().ok_or(())?;
1590		let call = T::Preimages::bound(call).map_err(|_| ())?.transmute();
1591		let name = blake2_256(&id[..]);
1592		Self::do_schedule_named(name, when, maybe_periodic, priority, origin, call).map_err(|_| ())
1593	}
1594
1595	fn cancel_named(id: Vec<u8>) -> Result<(), ()> {
1596		let name = blake2_256(&id[..]);
1597		Self::do_cancel_named(None, name).map_err(|_| ())
1598	}
1599
1600	fn reschedule_named(
1601		id: Vec<u8>,
1602		when: DispatchTime<BlockNumberFor<T>>,
1603	) -> Result<Self::Address, DispatchError> {
1604		let name = blake2_256(&id[..]);
1605		Self::do_reschedule_named(name, when)
1606	}
1607
1608	fn next_dispatch_time(id: Vec<u8>) -> Result<BlockNumberFor<T>, ()> {
1609		let name = blake2_256(&id[..]);
1610		Lookup::<T>::get(name)
1611			.and_then(|(when, index)| Agenda::<T>::get(when).get(index as usize).map(|_| when))
1612			.ok_or(())
1613	}
1614}
1615
1616impl<T: Config> schedule::v3::Anon<BlockNumberFor<T>, <T as Config>::RuntimeCall, T::PalletsOrigin>
1617	for Pallet<T>
1618{
1619	type Address = TaskAddress<BlockNumberFor<T>>;
1620	type Hasher = T::Hashing;
1621
1622	fn schedule(
1623		when: DispatchTime<BlockNumberFor<T>>,
1624		maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
1625		priority: schedule::Priority,
1626		origin: T::PalletsOrigin,
1627		call: BoundedCallOf<T>,
1628	) -> Result<Self::Address, DispatchError> {
1629		Self::do_schedule(when, maybe_periodic, priority, origin, call)
1630	}
1631
1632	fn cancel((when, index): Self::Address) -> Result<(), DispatchError> {
1633		Self::do_cancel(None, (when, index)).map_err(map_err_to_v3_err::<T>)
1634	}
1635
1636	fn reschedule(
1637		address: Self::Address,
1638		when: DispatchTime<BlockNumberFor<T>>,
1639	) -> Result<Self::Address, DispatchError> {
1640		Self::do_reschedule(address, when).map_err(map_err_to_v3_err::<T>)
1641	}
1642
1643	fn next_dispatch_time(
1644		(when, index): Self::Address,
1645	) -> Result<BlockNumberFor<T>, DispatchError> {
1646		Agenda::<T>::get(when)
1647			.get(index as usize)
1648			.ok_or(DispatchError::Unavailable)
1649			.map(|_| when)
1650	}
1651}
1652
1653use schedule::v3::TaskName;
1654
1655impl<T: Config> schedule::v3::Named<BlockNumberFor<T>, <T as Config>::RuntimeCall, T::PalletsOrigin>
1656	for Pallet<T>
1657{
1658	type Address = TaskAddress<BlockNumberFor<T>>;
1659	type Hasher = T::Hashing;
1660
1661	fn schedule_named(
1662		id: TaskName,
1663		when: DispatchTime<BlockNumberFor<T>>,
1664		maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
1665		priority: schedule::Priority,
1666		origin: T::PalletsOrigin,
1667		call: BoundedCallOf<T>,
1668	) -> Result<Self::Address, DispatchError> {
1669		Self::do_schedule_named(id, when, maybe_periodic, priority, origin, call)
1670	}
1671
1672	fn cancel_named(id: TaskName) -> Result<(), DispatchError> {
1673		Self::do_cancel_named(None, id).map_err(map_err_to_v3_err::<T>)
1674	}
1675
1676	fn reschedule_named(
1677		id: TaskName,
1678		when: DispatchTime<BlockNumberFor<T>>,
1679	) -> Result<Self::Address, DispatchError> {
1680		Self::do_reschedule_named(id, when).map_err(map_err_to_v3_err::<T>)
1681	}
1682
1683	fn next_dispatch_time(id: TaskName) -> Result<BlockNumberFor<T>, DispatchError> {
1684		Lookup::<T>::get(id)
1685			.and_then(|(when, index)| Agenda::<T>::get(when).get(index as usize).map(|_| when))
1686			.ok_or(DispatchError::Unavailable)
1687	}
1688}
1689
1690/// Maps a pallet error to an `schedule::v3` error.
1691fn map_err_to_v3_err<T: Config>(err: DispatchError) -> DispatchError {
1692	if err == DispatchError::from(Error::<T>::NotFound) {
1693		DispatchError::Unavailable
1694	} else {
1695		err
1696	}
1697}