pallet_broker/lib.rs
1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18#![cfg_attr(not(feature = "std"), no_std)]
19#![doc = include_str!("../README.md")]
20
21pub use pallet::*;
22
23mod adapt_price;
24mod benchmarking;
25mod core_mask;
26mod coretime_interface;
27mod dispatchable_impls;
28#[cfg(test)]
29mod mock;
30mod nonfungible_impl;
31#[cfg(test)]
32mod test_fungibles;
33#[cfg(test)]
34mod tests;
35mod tick_impls;
36mod types;
37mod utility_impls;
38
39pub mod migration;
40pub mod runtime_api;
41
42pub mod weights;
43pub use weights::WeightInfo;
44
45pub use adapt_price::*;
46pub use core_mask::*;
47pub use coretime_interface::*;
48pub use types::*;
49
50extern crate alloc;
51
52/// The log target for this pallet.
53const LOG_TARGET: &str = "runtime::broker";
54
55#[frame_support::pallet]
56pub mod pallet {
57 use super::*;
58 use alloc::vec::Vec;
59 use frame_support::{
60 pallet_prelude::{DispatchResult, DispatchResultWithPostInfo, *},
61 traits::{
62 fungible::{Balanced, Credit, Mutate},
63 BuildGenesisConfig, EnsureOrigin, OnUnbalanced,
64 },
65 PalletId,
66 };
67 use frame_system::pallet_prelude::*;
68 use sp_runtime::traits::{Convert, ConvertBack, MaybeConvert};
69
70 const STORAGE_VERSION: StorageVersion = StorageVersion::new(4);
71
72 #[pallet::pallet]
73 #[pallet::storage_version(STORAGE_VERSION)]
74 pub struct Pallet<T>(_);
75
76 #[pallet::config]
77 pub trait Config: frame_system::Config {
78 #[allow(deprecated)]
79 type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
80
81 /// Weight information for all calls of this pallet.
82 type WeightInfo: WeightInfo;
83
84 /// Currency used to pay for Coretime.
85 type Currency: Mutate<Self::AccountId> + Balanced<Self::AccountId>;
86
87 /// The origin test needed for administrating this pallet.
88 type AdminOrigin: EnsureOrigin<Self::RuntimeOrigin>;
89
90 /// What to do with any revenues collected from the sale of Coretime.
91 type OnRevenue: OnUnbalanced<Credit<Self::AccountId, Self::Currency>>;
92
93 /// Relay chain's Coretime API used to interact with and instruct the low-level scheduling
94 /// system.
95 type Coretime: CoretimeInterface;
96
97 /// The algorithm to determine the next price on the basis of market performance.
98 type PriceAdapter: AdaptPrice<BalanceOf<Self>>;
99
100 /// Reversible conversion from local balance to Relay-chain balance. This will typically be
101 /// the `Identity`, but provided just in case the chains use different representations.
102 type ConvertBalance: Convert<BalanceOf<Self>, RelayBalanceOf<Self>>
103 + ConvertBack<BalanceOf<Self>, RelayBalanceOf<Self>>;
104
105 /// Type used for getting the associated account of a task. This account is controlled by
106 /// the task itself.
107 type SovereignAccountOf: MaybeConvert<TaskId, Self::AccountId>;
108
109 /// Identifier from which the internal Pot is generated.
110 #[pallet::constant]
111 type PalletId: Get<PalletId>;
112
113 /// Number of Relay-chain blocks per timeslice.
114 #[pallet::constant]
115 type TimeslicePeriod: Get<RelayBlockNumberOf<Self>>;
116
117 /// Maximum number of legacy leases.
118 #[pallet::constant]
119 type MaxLeasedCores: Get<u32>;
120
121 /// Maximum number of system cores.
122 #[pallet::constant]
123 type MaxReservedCores: Get<u32>;
124
125 /// Given that we are performing all auto-renewals in a single block, it has to be limited.
126 #[pallet::constant]
127 type MaxAutoRenewals: Get<u32>;
128
129 /// The smallest amount of credits a user can purchase.
130 ///
131 /// Needed to prevent spam attacks.
132 #[pallet::constant]
133 type MinimumCreditPurchase: Get<BalanceOf<Self>>;
134 }
135
136 /// The current configuration of this pallet.
137 #[pallet::storage]
138 pub type Configuration<T> = StorageValue<_, ConfigRecordOf<T>, OptionQuery>;
139
140 /// The Polkadot Core reservations (generally tasked with the maintenance of System Chains).
141 #[pallet::storage]
142 pub type Reservations<T> = StorageValue<_, ReservationsRecordOf<T>, ValueQuery>;
143
144 /// The Polkadot Core legacy leases.
145 #[pallet::storage]
146 pub type Leases<T> = StorageValue<_, LeasesRecordOf<T>, ValueQuery>;
147
148 /// The current status of miscellaneous subsystems of this pallet.
149 #[pallet::storage]
150 pub type Status<T> = StorageValue<_, StatusRecord, OptionQuery>;
151
152 /// The details of the current sale, including its properties and status.
153 #[pallet::storage]
154 pub type SaleInfo<T> = StorageValue<_, SaleInfoRecordOf<T>, OptionQuery>;
155
156 /// Records of potential renewals.
157 ///
158 /// Renewals will only actually be allowed if `CompletionStatus` is actually `Complete`.
159 #[pallet::storage]
160 pub type PotentialRenewals<T> =
161 StorageMap<_, Twox64Concat, PotentialRenewalId, PotentialRenewalRecordOf<T>, OptionQuery>;
162
163 /// The current (unassigned or provisionally assigend) Regions.
164 #[pallet::storage]
165 pub type Regions<T> = StorageMap<_, Blake2_128Concat, RegionId, RegionRecordOf<T>, OptionQuery>;
166
167 /// The work we plan on having each core do at a particular time in the future.
168 #[pallet::storage]
169 pub type Workplan<T> =
170 StorageMap<_, Twox64Concat, (Timeslice, CoreIndex), Schedule, OptionQuery>;
171
172 /// The current workload of each core. This gets updated with workplan as timeslices pass.
173 #[pallet::storage]
174 pub type Workload<T> = StorageMap<_, Twox64Concat, CoreIndex, Schedule, ValueQuery>;
175
176 /// Record of a single contribution to the Instantaneous Coretime Pool.
177 #[pallet::storage]
178 pub type InstaPoolContribution<T> =
179 StorageMap<_, Blake2_128Concat, RegionId, ContributionRecordOf<T>, OptionQuery>;
180
181 /// Record of Coretime entering or leaving the Instantaneous Coretime Pool.
182 #[pallet::storage]
183 pub type InstaPoolIo<T> = StorageMap<_, Blake2_128Concat, Timeslice, PoolIoRecord, ValueQuery>;
184
185 /// Total InstaPool rewards for each Timeslice and the number of core parts which contributed.
186 #[pallet::storage]
187 pub type InstaPoolHistory<T> =
188 StorageMap<_, Blake2_128Concat, Timeslice, InstaPoolHistoryRecordOf<T>>;
189
190 /// Received core count change from the relay chain.
191 #[pallet::storage]
192 pub type CoreCountInbox<T> = StorageValue<_, CoreIndex, OptionQuery>;
193
194 /// Keeping track of cores which have auto-renewal enabled.
195 ///
196 /// Sorted by `CoreIndex` to make the removal of cores from auto-renewal more efficient.
197 #[pallet::storage]
198 pub type AutoRenewals<T: Config> =
199 StorageValue<_, BoundedVec<AutoRenewalRecord, T::MaxAutoRenewals>, ValueQuery>;
200
201 /// Received revenue info from the relay chain.
202 #[pallet::storage]
203 pub type RevenueInbox<T> = StorageValue<_, OnDemandRevenueRecordOf<T>, OptionQuery>;
204
205 #[pallet::event]
206 #[pallet::generate_deposit(pub(super) fn deposit_event)]
207 pub enum Event<T: Config> {
208 /// A Region of Bulk Coretime has been purchased.
209 Purchased {
210 /// The identity of the purchaser.
211 who: T::AccountId,
212 /// The identity of the Region.
213 region_id: RegionId,
214 /// The price paid for this Region.
215 price: BalanceOf<T>,
216 /// The duration of the Region.
217 duration: Timeslice,
218 },
219 /// The workload of a core has become renewable.
220 Renewable {
221 /// The core whose workload can be renewed.
222 core: CoreIndex,
223 /// The price at which the workload can be renewed.
224 price: BalanceOf<T>,
225 /// The time at which the workload would recommence of this renewal. The call to renew
226 /// cannot happen before the beginning of the interlude prior to the sale for regions
227 /// which begin at this time.
228 begin: Timeslice,
229 /// The actual workload which can be renewed.
230 workload: Schedule,
231 },
232 /// A workload has been renewed.
233 Renewed {
234 /// The identity of the renewer.
235 who: T::AccountId,
236 /// The price paid for this renewal.
237 price: BalanceOf<T>,
238 /// The index of the core on which the `workload` was previously scheduled.
239 old_core: CoreIndex,
240 /// The index of the core on which the renewed `workload` has been scheduled.
241 core: CoreIndex,
242 /// The time at which the `workload` will begin on the `core`.
243 begin: Timeslice,
244 /// The number of timeslices for which this `workload` is newly scheduled.
245 duration: Timeslice,
246 /// The workload which was renewed.
247 workload: Schedule,
248 },
249 /// Ownership of a Region has been transferred.
250 Transferred {
251 /// The Region which has been transferred.
252 region_id: RegionId,
253 /// The duration of the Region.
254 duration: Timeslice,
255 /// The old owner of the Region.
256 old_owner: Option<T::AccountId>,
257 /// The new owner of the Region.
258 owner: Option<T::AccountId>,
259 },
260 /// A Region has been split into two non-overlapping Regions.
261 Partitioned {
262 /// The Region which was split.
263 old_region_id: RegionId,
264 /// The new Regions into which it became.
265 new_region_ids: (RegionId, RegionId),
266 },
267 /// A Region has been converted into two overlapping Regions each of lesser regularity.
268 Interlaced {
269 /// The Region which was interlaced.
270 old_region_id: RegionId,
271 /// The new Regions into which it became.
272 new_region_ids: (RegionId, RegionId),
273 },
274 /// A Region has been assigned to a particular task.
275 Assigned {
276 /// The Region which was assigned.
277 region_id: RegionId,
278 /// The duration of the assignment.
279 duration: Timeslice,
280 /// The task to which the Region was assigned.
281 task: TaskId,
282 },
283 /// An assignment has been removed from the workplan.
284 AssignmentRemoved {
285 /// The Region which was removed from the workplan.
286 region_id: RegionId,
287 },
288 /// A Region has been added to the Instantaneous Coretime Pool.
289 Pooled {
290 /// The Region which was added to the Instantaneous Coretime Pool.
291 region_id: RegionId,
292 /// The duration of the Region.
293 duration: Timeslice,
294 },
295 /// A new number of cores has been requested.
296 CoreCountRequested {
297 /// The number of cores requested.
298 core_count: CoreIndex,
299 },
300 /// The number of cores available for scheduling has changed.
301 CoreCountChanged {
302 /// The new number of cores available for scheduling.
303 core_count: CoreIndex,
304 },
305 /// There is a new reservation for a workload.
306 ReservationMade {
307 /// The index of the reservation.
308 index: u32,
309 /// The workload of the reservation.
310 workload: Schedule,
311 },
312 /// A reservation for a workload has been cancelled.
313 ReservationCancelled {
314 /// The index of the reservation which was cancelled.
315 index: u32,
316 /// The workload of the now cancelled reservation.
317 workload: Schedule,
318 },
319 /// A new sale has been initialized.
320 SaleInitialized {
321 /// The relay block number at which the sale will/did start.
322 sale_start: RelayBlockNumberOf<T>,
323 /// The length in relay chain blocks of the Leadin Period (where the price is
324 /// decreasing).
325 leadin_length: RelayBlockNumberOf<T>,
326 /// The price of Bulk Coretime at the beginning of the Leadin Period.
327 start_price: BalanceOf<T>,
328 /// The price of Bulk Coretime after the Leadin Period.
329 end_price: BalanceOf<T>,
330 /// The first timeslice of the Regions which are being sold in this sale.
331 region_begin: Timeslice,
332 /// The timeslice on which the Regions which are being sold in the sale terminate.
333 /// (i.e. One after the last timeslice which the Regions control.)
334 region_end: Timeslice,
335 /// The number of cores we want to sell, ideally.
336 ideal_cores_sold: CoreIndex,
337 /// Number of cores which are/have been offered for sale.
338 cores_offered: CoreIndex,
339 },
340 /// A new lease has been created.
341 Leased {
342 /// The task to which a core will be assigned.
343 task: TaskId,
344 /// The timeslice contained in the sale period after which this lease will
345 /// self-terminate (and therefore the earliest timeslice at which the lease may no
346 /// longer apply).
347 until: Timeslice,
348 },
349 /// A lease has been removed.
350 LeaseRemoved {
351 /// The task to which a core was assigned.
352 task: TaskId,
353 },
354 /// A lease is about to end.
355 LeaseEnding {
356 /// The task to which a core was assigned.
357 task: TaskId,
358 /// The timeslice at which the task will no longer be scheduled.
359 when: Timeslice,
360 },
361 /// The sale rotation has been started and a new sale is imminent.
362 SalesStarted {
363 /// The nominal price of an Region of Bulk Coretime.
364 price: BalanceOf<T>,
365 /// The maximum number of cores which this pallet will attempt to assign.
366 core_count: CoreIndex,
367 },
368 /// The act of claiming revenue has begun.
369 RevenueClaimBegun {
370 /// The region to be claimed for.
371 region: RegionId,
372 /// The maximum number of timeslices which should be searched for claimed.
373 max_timeslices: Timeslice,
374 },
375 /// A particular timeslice has a non-zero claim.
376 RevenueClaimItem {
377 /// The timeslice whose claim is being processed.
378 when: Timeslice,
379 /// The amount which was claimed at this timeslice.
380 amount: BalanceOf<T>,
381 },
382 /// A revenue claim has (possibly only in part) been paid.
383 RevenueClaimPaid {
384 /// The account to whom revenue has been paid.
385 who: T::AccountId,
386 /// The total amount of revenue claimed and paid.
387 amount: BalanceOf<T>,
388 /// The next region which should be claimed for the continuation of this contribution.
389 next: Option<RegionId>,
390 },
391 /// Some Instantaneous Coretime Pool credit has been purchased.
392 CreditPurchased {
393 /// The account which purchased the credit.
394 who: T::AccountId,
395 /// The Relay-chain account to which the credit will be made.
396 beneficiary: RelayAccountIdOf<T>,
397 /// The amount of credit purchased.
398 amount: BalanceOf<T>,
399 },
400 /// A Region has been dropped due to being out of date.
401 RegionDropped {
402 /// The Region which no longer exists.
403 region_id: RegionId,
404 /// The duration of the Region.
405 duration: Timeslice,
406 },
407 /// Some historical Instantaneous Core Pool contribution record has been dropped.
408 ContributionDropped {
409 /// The Region whose contribution is no longer exists.
410 region_id: RegionId,
411 },
412 /// A region has been force-removed from the pool. This is usually due to a provisionally
413 /// pooled region being redeployed.
414 RegionUnpooled {
415 /// The Region which has been force-removed from the pool.
416 region_id: RegionId,
417 /// The timeslice at which the region was force-removed.
418 when: Timeslice,
419 },
420 /// Some historical Instantaneous Core Pool payment record has been initialized.
421 HistoryInitialized {
422 /// The timeslice whose history has been initialized.
423 when: Timeslice,
424 /// The amount of privately contributed Coretime to the Instantaneous Coretime Pool.
425 private_pool_size: CoreMaskBitCount,
426 /// The amount of Coretime contributed to the Instantaneous Coretime Pool by the
427 /// Polkadot System.
428 system_pool_size: CoreMaskBitCount,
429 },
430 /// Some historical Instantaneous Core Pool payment record has been dropped.
431 HistoryDropped {
432 /// The timeslice whose history is no longer available.
433 when: Timeslice,
434 /// The amount of revenue the system has taken.
435 revenue: BalanceOf<T>,
436 },
437 /// Some historical Instantaneous Core Pool payment record has been ignored because the
438 /// timeslice was already known. Governance may need to intervene.
439 HistoryIgnored {
440 /// The timeslice whose history is was ignored.
441 when: Timeslice,
442 /// The amount of revenue which was ignored.
443 revenue: BalanceOf<T>,
444 },
445 /// Some historical Instantaneous Core Pool Revenue is ready for payout claims.
446 ClaimsReady {
447 /// The timeslice whose history is available.
448 when: Timeslice,
449 /// The amount of revenue the Polkadot System has already taken.
450 system_payout: BalanceOf<T>,
451 /// The total amount of revenue remaining to be claimed.
452 private_payout: BalanceOf<T>,
453 },
454 /// A Core has been assigned to one or more tasks and/or the Pool on the Relay-chain.
455 CoreAssigned {
456 /// The index of the Core which has been assigned.
457 core: CoreIndex,
458 /// The Relay-chain block at which this assignment should take effect.
459 when: RelayBlockNumberOf<T>,
460 /// The workload to be done on the Core.
461 assignment: Vec<(CoreAssignment, PartsOf57600)>,
462 },
463 /// Some historical Instantaneous Core Pool payment record has been dropped.
464 PotentialRenewalDropped {
465 /// The timeslice whose renewal is no longer available.
466 when: Timeslice,
467 /// The core whose workload is no longer available to be renewed for `when`.
468 core: CoreIndex,
469 },
470 AutoRenewalEnabled {
471 /// The core for which the renewal was enabled.
472 core: CoreIndex,
473 /// The task for which the renewal was enabled.
474 task: TaskId,
475 },
476 AutoRenewalDisabled {
477 /// The core for which the renewal was disabled.
478 core: CoreIndex,
479 /// The task for which the renewal was disabled.
480 task: TaskId,
481 },
482 /// Failed to auto-renew a core, likely due to the payer account not being sufficiently
483 /// funded.
484 AutoRenewalFailed {
485 /// The core for which the renewal failed.
486 core: CoreIndex,
487 /// The account which was supposed to pay for renewal.
488 ///
489 /// If `None` it indicates that we failed to get the sovereign account of a task.
490 payer: Option<T::AccountId>,
491 },
492 /// The auto-renewal limit has been reached upon renewing cores.
493 ///
494 /// This should never happen, given that enable_auto_renew checks for this before enabling
495 /// auto-renewal.
496 AutoRenewalLimitReached,
497 }
498
499 #[pallet::error]
500 #[derive(PartialEq)]
501 pub enum Error<T> {
502 /// The given region identity is not known.
503 UnknownRegion,
504 /// The owner of the region is not the origin.
505 NotOwner,
506 /// The pivot point of the partition at or after the end of the region.
507 PivotTooLate,
508 /// The pivot point of the partition at the beginning of the region.
509 PivotTooEarly,
510 /// The pivot mask for the interlacing is not contained within the region's interlace mask.
511 ExteriorPivot,
512 /// The pivot mask for the interlacing is void (and therefore unschedulable).
513 VoidPivot,
514 /// The pivot mask for the interlacing is complete (and therefore not a strict subset).
515 CompletePivot,
516 /// The workplan of the pallet's state is invalid. This indicates a state corruption.
517 CorruptWorkplan,
518 /// There is no sale happening currently.
519 NoSales,
520 /// The price limit is exceeded.
521 Overpriced,
522 /// There are no cores available.
523 Unavailable,
524 /// The sale limit has been reached.
525 SoldOut,
526 /// The renewal operation is not valid at the current time (it may become valid in the next
527 /// sale).
528 WrongTime,
529 /// Invalid attempt to renew.
530 NotAllowed,
531 /// This pallet has not yet been initialized.
532 Uninitialized,
533 /// The purchase cannot happen yet as the sale period is yet to begin.
534 TooEarly,
535 /// There is no work to be done.
536 NothingToDo,
537 /// The maximum amount of reservations has already been reached.
538 TooManyReservations,
539 /// The maximum amount of leases has already been reached.
540 TooManyLeases,
541 /// The lease does not exist.
542 LeaseNotFound,
543 /// The revenue for the Instantaneous Core Sales of this period is not (yet) known and thus
544 /// this operation cannot proceed.
545 UnknownRevenue,
546 /// The identified contribution to the Instantaneous Core Pool is unknown.
547 UnknownContribution,
548 /// The workload assigned for renewal is incomplete. This is unexpected and indicates a
549 /// logic error.
550 IncompleteAssignment,
551 /// An item cannot be dropped because it is still valid.
552 StillValid,
553 /// The history item does not exist.
554 NoHistory,
555 /// No reservation of the given index exists.
556 UnknownReservation,
557 /// The renewal record cannot be found.
558 UnknownRenewal,
559 /// The lease expiry time has already passed.
560 AlreadyExpired,
561 /// The configuration could not be applied because it is invalid.
562 InvalidConfig,
563 /// The revenue must be claimed for 1 or more timeslices.
564 NoClaimTimeslices,
565 /// The caller doesn't have the permission to enable or disable auto-renewal.
566 NoPermission,
567 /// We reached the limit for auto-renewals.
568 TooManyAutoRenewals,
569 /// Only cores which are assigned to a task can be auto-renewed.
570 NonTaskAutoRenewal,
571 /// Failed to get the sovereign account of a task.
572 SovereignAccountNotFound,
573 /// Attempted to disable auto-renewal for a core that didn't have it enabled.
574 AutoRenewalNotEnabled,
575 /// Attempted to force remove an assignment that doesn't exist.
576 AssignmentNotFound,
577 /// Needed to prevent spam attacks.The amount of credits the user attempted to purchase is
578 /// below `T::MinimumCreditPurchase`.
579 CreditPurchaseTooSmall,
580 }
581
582 #[derive(frame_support::DefaultNoBound)]
583 #[pallet::genesis_config]
584 pub struct GenesisConfig<T: Config> {
585 #[serde(skip)]
586 pub _config: core::marker::PhantomData<T>,
587 }
588
589 #[pallet::genesis_build]
590 impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
591 fn build(&self) {
592 frame_system::Pallet::<T>::inc_providers(&Pallet::<T>::account_id());
593 }
594 }
595
596 #[pallet::hooks]
597 impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
598 fn on_initialize(_now: BlockNumberFor<T>) -> Weight {
599 Self::do_tick()
600 }
601 }
602
603 #[pallet::call(weight(<T as Config>::WeightInfo))]
604 impl<T: Config> Pallet<T> {
605 /// Configure the pallet.
606 ///
607 /// - `origin`: Must be Root or pass `AdminOrigin`.
608 /// - `config`: The configuration for this pallet.
609 #[pallet::call_index(0)]
610 pub fn configure(
611 origin: OriginFor<T>,
612 config: ConfigRecordOf<T>,
613 ) -> DispatchResultWithPostInfo {
614 T::AdminOrigin::ensure_origin_or_root(origin)?;
615 Self::do_configure(config)?;
616 Ok(Pays::No.into())
617 }
618
619 /// Reserve a core for a workload.
620 ///
621 /// The workload will be given a reservation, but two sale period boundaries must pass
622 /// before the core is actually assigned.
623 ///
624 /// - `origin`: Must be Root or pass `AdminOrigin`.
625 /// - `workload`: The workload which should be permanently placed on a core.
626 #[pallet::call_index(1)]
627 pub fn reserve(origin: OriginFor<T>, workload: Schedule) -> DispatchResultWithPostInfo {
628 T::AdminOrigin::ensure_origin_or_root(origin)?;
629 Self::do_reserve(workload)?;
630 Ok(Pays::No.into())
631 }
632
633 /// Cancel a reservation for a workload.
634 ///
635 /// - `origin`: Must be Root or pass `AdminOrigin`.
636 /// - `item_index`: The index of the reservation. Usually this will also be the index of the
637 /// core on which the reservation has been scheduled. However, it is possible that if
638 /// other cores are reserved or unreserved in the same sale rotation that they won't
639 /// correspond, so it's better to look up the core properly in the `Reservations` storage.
640 #[pallet::call_index(2)]
641 pub fn unreserve(origin: OriginFor<T>, item_index: u32) -> DispatchResultWithPostInfo {
642 T::AdminOrigin::ensure_origin_or_root(origin)?;
643 Self::do_unreserve(item_index)?;
644 Ok(Pays::No.into())
645 }
646
647 /// Reserve a core for a single task workload for a limited period.
648 ///
649 /// In the interlude and sale period where Bulk Coretime is sold for the period immediately
650 /// after `until`, then the same workload may be renewed.
651 ///
652 /// - `origin`: Must be Root or pass `AdminOrigin`.
653 /// - `task`: The workload which should be placed on a core.
654 /// - `until`: The timeslice now earlier than which `task` should be placed as a workload on
655 /// a core.
656 #[pallet::call_index(3)]
657 pub fn set_lease(
658 origin: OriginFor<T>,
659 task: TaskId,
660 until: Timeslice,
661 ) -> DispatchResultWithPostInfo {
662 T::AdminOrigin::ensure_origin_or_root(origin)?;
663 Self::do_set_lease(task, until)?;
664 Ok(Pays::No.into())
665 }
666
667 /// Begin the Bulk Coretime sales rotation.
668 ///
669 /// - `origin`: Must be Root or pass `AdminOrigin`.
670 /// - `end_price`: The price after the leadin period of Bulk Coretime in the first sale.
671 /// - `extra_cores`: Number of extra cores that should be requested on top of the cores
672 /// required for `Reservations` and `Leases`.
673 ///
674 /// This will call [`Self::request_core_count`] internally to set the correct core count on
675 /// the relay chain.
676 #[pallet::call_index(4)]
677 #[pallet::weight(T::WeightInfo::start_sales(
678 T::MaxLeasedCores::get() + T::MaxReservedCores::get() + *extra_cores as u32
679 ))]
680 pub fn start_sales(
681 origin: OriginFor<T>,
682 end_price: BalanceOf<T>,
683 extra_cores: CoreIndex,
684 ) -> DispatchResultWithPostInfo {
685 T::AdminOrigin::ensure_origin_or_root(origin)?;
686 Self::do_start_sales(end_price, extra_cores)?;
687 Ok(Pays::No.into())
688 }
689
690 /// Purchase Bulk Coretime in the ongoing Sale.
691 ///
692 /// - `origin`: Must be a Signed origin with at least enough funds to pay the current price
693 /// of Bulk Coretime.
694 /// - `price_limit`: An amount no more than which should be paid.
695 #[pallet::call_index(5)]
696 pub fn purchase(
697 origin: OriginFor<T>,
698 price_limit: BalanceOf<T>,
699 ) -> DispatchResultWithPostInfo {
700 let who = ensure_signed(origin)?;
701 Self::do_purchase(who, price_limit)?;
702 Ok(Pays::No.into())
703 }
704
705 /// Renew Bulk Coretime in the ongoing Sale or its prior Interlude Period.
706 ///
707 /// - `origin`: Must be a Signed origin with at least enough funds to pay the renewal price
708 /// of the core.
709 /// - `core`: The core which should be renewed.
710 #[pallet::call_index(6)]
711 pub fn renew(origin: OriginFor<T>, core: CoreIndex) -> DispatchResultWithPostInfo {
712 let who = ensure_signed(origin)?;
713 Self::do_renew(who, core)?;
714 Ok(Pays::No.into())
715 }
716
717 /// Transfer a Bulk Coretime Region to a new owner.
718 ///
719 /// - `origin`: Must be a Signed origin of the account which owns the Region `region_id`.
720 /// - `region_id`: The Region whose ownership should change.
721 /// - `new_owner`: The new owner for the Region.
722 #[pallet::call_index(7)]
723 pub fn transfer(
724 origin: OriginFor<T>,
725 region_id: RegionId,
726 new_owner: T::AccountId,
727 ) -> DispatchResult {
728 let who = ensure_signed(origin)?;
729 Self::do_transfer(region_id, Some(who), new_owner)?;
730 Ok(())
731 }
732
733 /// Split a Bulk Coretime Region into two non-overlapping Regions at a particular time into
734 /// the region.
735 ///
736 /// - `origin`: Must be a Signed origin of the account which owns the Region `region_id`.
737 /// - `region_id`: The Region which should be partitioned into two non-overlapping Regions.
738 /// - `pivot`: The offset in time into the Region at which to make the split.
739 #[pallet::call_index(8)]
740 pub fn partition(
741 origin: OriginFor<T>,
742 region_id: RegionId,
743 pivot: Timeslice,
744 ) -> DispatchResult {
745 let who = ensure_signed(origin)?;
746 Self::do_partition(region_id, Some(who), pivot)?;
747 Ok(())
748 }
749
750 /// Split a Bulk Coretime Region into two wholly-overlapping Regions with complementary
751 /// interlace masks which together make up the original Region's interlace mask.
752 ///
753 /// - `origin`: Must be a Signed origin of the account which owns the Region `region_id`.
754 /// - `region_id`: The Region which should become two interlaced Regions of incomplete
755 /// regularity.
756 /// - `pivot`: The interlace mask of one of the two new regions (the other is its partial
757 /// complement).
758 #[pallet::call_index(9)]
759 pub fn interlace(
760 origin: OriginFor<T>,
761 region_id: RegionId,
762 pivot: CoreMask,
763 ) -> DispatchResult {
764 let who = ensure_signed(origin)?;
765 Self::do_interlace(region_id, Some(who), pivot)?;
766 Ok(())
767 }
768
769 /// Assign a Bulk Coretime Region to a task.
770 ///
771 /// - `origin`: Must be a Signed origin of the account which owns the Region `region_id`.
772 /// - `region_id`: The Region which should be assigned to the task.
773 /// - `task`: The task to assign.
774 /// - `finality`: Indication of whether this assignment is final (in which case it may be
775 /// eligible for renewal) or provisional (in which case it may be manipulated and/or
776 /// reassigned at a later stage).
777 #[pallet::call_index(10)]
778 pub fn assign(
779 origin: OriginFor<T>,
780 region_id: RegionId,
781 task: TaskId,
782 finality: Finality,
783 ) -> DispatchResultWithPostInfo {
784 let who = ensure_signed(origin)?;
785 Self::do_assign(region_id, Some(who), task, finality)?;
786 Ok(if finality == Finality::Final { Pays::No } else { Pays::Yes }.into())
787 }
788
789 /// Place a Bulk Coretime Region into the Instantaneous Coretime Pool.
790 ///
791 /// - `origin`: Must be a Signed origin of the account which owns the Region `region_id`.
792 /// - `region_id`: The Region which should be assigned to the Pool.
793 /// - `payee`: The account which is able to collect any revenue due for the usage of this
794 /// Coretime.
795 #[pallet::call_index(11)]
796 pub fn pool(
797 origin: OriginFor<T>,
798 region_id: RegionId,
799 payee: T::AccountId,
800 finality: Finality,
801 ) -> DispatchResultWithPostInfo {
802 let who = ensure_signed(origin)?;
803 Self::do_pool(region_id, Some(who), payee, finality)?;
804 Ok(if finality == Finality::Final { Pays::No } else { Pays::Yes }.into())
805 }
806
807 /// Claim the revenue owed from inclusion in the Instantaneous Coretime Pool.
808 ///
809 /// - `origin`: Must be a Signed origin.
810 /// - `region_id`: The Region which was assigned to the Pool.
811 /// - `max_timeslices`: The maximum number of timeslices which should be processed. This
812 /// must be greater than 0. This may affect the weight of the call but should be ideally
813 /// made equivalent to the length of the Region `region_id`. If less, further dispatches
814 /// will be required with the same `region_id` to claim revenue for the remainder.
815 #[pallet::call_index(12)]
816 #[pallet::weight(T::WeightInfo::claim_revenue(*max_timeslices))]
817 pub fn claim_revenue(
818 origin: OriginFor<T>,
819 region_id: RegionId,
820 max_timeslices: Timeslice,
821 ) -> DispatchResultWithPostInfo {
822 ensure_signed(origin)?;
823 Self::do_claim_revenue(region_id, max_timeslices)?;
824 Ok(Pays::No.into())
825 }
826
827 /// Purchase credit for use in the Instantaneous Coretime Pool.
828 ///
829 /// - `origin`: Must be a Signed origin able to pay at least `amount`.
830 /// - `amount`: The amount of credit to purchase.
831 /// - `beneficiary`: The account on the Relay-chain which controls the credit (generally
832 /// this will be the collator's hot wallet).
833 #[pallet::call_index(13)]
834 pub fn purchase_credit(
835 origin: OriginFor<T>,
836 amount: BalanceOf<T>,
837 beneficiary: RelayAccountIdOf<T>,
838 ) -> DispatchResult {
839 let who = ensure_signed(origin)?;
840 Self::do_purchase_credit(who, amount, beneficiary)?;
841 Ok(())
842 }
843
844 /// Drop an expired Region from the chain.
845 ///
846 /// - `origin`: Can be any kind of origin.
847 /// - `region_id`: The Region which has expired.
848 #[pallet::call_index(14)]
849 pub fn drop_region(
850 _origin: OriginFor<T>,
851 region_id: RegionId,
852 ) -> DispatchResultWithPostInfo {
853 Self::do_drop_region(region_id)?;
854 Ok(Pays::No.into())
855 }
856
857 /// Drop an expired Instantaneous Pool Contribution record from the chain.
858 ///
859 /// - `origin`: Can be any kind of origin.
860 /// - `region_id`: The Region identifying the Pool Contribution which has expired.
861 #[pallet::call_index(15)]
862 pub fn drop_contribution(
863 _origin: OriginFor<T>,
864 region_id: RegionId,
865 ) -> DispatchResultWithPostInfo {
866 Self::do_drop_contribution(region_id)?;
867 Ok(Pays::No.into())
868 }
869
870 /// Drop an expired Instantaneous Pool History record from the chain.
871 ///
872 /// - `origin`: Can be any kind of origin.
873 /// - `region_id`: The time of the Pool History record which has expired.
874 #[pallet::call_index(16)]
875 pub fn drop_history(_origin: OriginFor<T>, when: Timeslice) -> DispatchResultWithPostInfo {
876 Self::do_drop_history(when)?;
877 Ok(Pays::No.into())
878 }
879
880 /// Drop an expired Allowed Renewal record from the chain.
881 ///
882 /// - `origin`: Can be any kind of origin.
883 /// - `core`: The core to which the expired renewal refers.
884 /// - `when`: The timeslice to which the expired renewal refers. This must have passed.
885 #[pallet::call_index(17)]
886 pub fn drop_renewal(
887 _origin: OriginFor<T>,
888 core: CoreIndex,
889 when: Timeslice,
890 ) -> DispatchResultWithPostInfo {
891 Self::do_drop_renewal(core, when)?;
892 Ok(Pays::No.into())
893 }
894
895 /// Request a change to the number of cores available for scheduling work.
896 ///
897 /// - `origin`: Must be Root or pass `AdminOrigin`.
898 /// - `core_count`: The desired number of cores to be made available.
899 #[pallet::call_index(18)]
900 #[pallet::weight(T::WeightInfo::request_core_count((*core_count).into()))]
901 pub fn request_core_count(origin: OriginFor<T>, core_count: CoreIndex) -> DispatchResult {
902 T::AdminOrigin::ensure_origin_or_root(origin)?;
903 Self::do_request_core_count(core_count)?;
904 Ok(())
905 }
906
907 #[pallet::call_index(19)]
908 #[pallet::weight(T::WeightInfo::notify_core_count())]
909 pub fn notify_core_count(origin: OriginFor<T>, core_count: CoreIndex) -> DispatchResult {
910 T::AdminOrigin::ensure_origin_or_root(origin)?;
911 Self::do_notify_core_count(core_count)?;
912 Ok(())
913 }
914
915 #[pallet::call_index(20)]
916 #[pallet::weight(T::WeightInfo::notify_revenue())]
917 pub fn notify_revenue(
918 origin: OriginFor<T>,
919 revenue: OnDemandRevenueRecordOf<T>,
920 ) -> DispatchResult {
921 T::AdminOrigin::ensure_origin_or_root(origin)?;
922 Self::do_notify_revenue(revenue)?;
923 Ok(())
924 }
925
926 /// Extrinsic for enabling auto renewal.
927 ///
928 /// Callable by the sovereign account of the task on the specified core. This account
929 /// will be charged at the start of every bulk period for renewing core time.
930 ///
931 /// - `origin`: Must be the sovereign account of the task
932 /// - `core`: The core to which the task to be renewed is currently assigned.
933 /// - `task`: The task for which we want to enable auto renewal.
934 /// - `workload_end_hint`: should be used when enabling auto-renewal for a core that is not
935 /// expiring in the upcoming bulk period (e.g., due to holding a lease) since it would be
936 /// inefficient to look up when the core expires to schedule the next renewal.
937 #[pallet::call_index(21)]
938 #[pallet::weight(T::WeightInfo::enable_auto_renew())]
939 pub fn enable_auto_renew(
940 origin: OriginFor<T>,
941 core: CoreIndex,
942 task: TaskId,
943 workload_end_hint: Option<Timeslice>,
944 ) -> DispatchResult {
945 let who = ensure_signed(origin)?;
946
947 let sovereign_account = T::SovereignAccountOf::maybe_convert(task)
948 .ok_or(Error::<T>::SovereignAccountNotFound)?;
949 // Only the sovereign account of a task can enable auto renewal for its own core.
950 ensure!(who == sovereign_account, Error::<T>::NoPermission);
951
952 Self::do_enable_auto_renew(sovereign_account, core, task, workload_end_hint)?;
953 Ok(())
954 }
955
956 /// Extrinsic for disabling auto renewal.
957 ///
958 /// Callable by the sovereign account of the task on the specified core.
959 ///
960 /// - `origin`: Must be the sovereign account of the task.
961 /// - `core`: The core for which we want to disable auto renewal.
962 /// - `task`: The task for which we want to disable auto renewal.
963 #[pallet::call_index(22)]
964 #[pallet::weight(T::WeightInfo::disable_auto_renew())]
965 pub fn disable_auto_renew(
966 origin: OriginFor<T>,
967 core: CoreIndex,
968 task: TaskId,
969 ) -> DispatchResult {
970 let who = ensure_signed(origin)?;
971
972 let sovereign_account = T::SovereignAccountOf::maybe_convert(task)
973 .ok_or(Error::<T>::SovereignAccountNotFound)?;
974 // Only the sovereign account of the task can disable auto-renewal.
975 ensure!(who == sovereign_account, Error::<T>::NoPermission);
976
977 Self::do_disable_auto_renew(core, task)?;
978
979 Ok(())
980 }
981
982 /// Reserve a core for a workload immediately.
983 ///
984 /// - `origin`: Must be Root or pass `AdminOrigin`.
985 /// - `workload`: The workload which should be permanently placed on a core starting
986 /// immediately.
987 /// - `core`: The core to which the assignment should be made until the reservation takes
988 /// effect. It is left to the caller to either add this new core or reassign any other
989 /// tasks to this existing core.
990 ///
991 /// This reserves the workload and then injects the workload into the Workplan for the next
992 /// two sale periods. This overwrites any existing assignments for this core at the start of
993 /// the next sale period.
994 #[pallet::call_index(23)]
995 pub fn force_reserve(
996 origin: OriginFor<T>,
997 workload: Schedule,
998 core: CoreIndex,
999 ) -> DispatchResultWithPostInfo {
1000 T::AdminOrigin::ensure_origin_or_root(origin)?;
1001 Self::do_force_reserve(workload, core)?;
1002 Ok(Pays::No.into())
1003 }
1004
1005 /// Remove a lease.
1006 ///
1007 /// - `origin`: Must be Root or pass `AdminOrigin`.
1008 /// - `task`: The task id of the lease which should be removed.
1009 #[pallet::call_index(24)]
1010 pub fn remove_lease(origin: OriginFor<T>, task: TaskId) -> DispatchResult {
1011 T::AdminOrigin::ensure_origin_or_root(origin)?;
1012 Self::do_remove_lease(task)
1013 }
1014
1015 /// Remove an assignment from the Workplan.
1016 ///
1017 /// - `origin`: Must be Root or pass `AdminOrigin`.
1018 /// - `region_id`: The Region to be removed from the workplan.
1019 #[pallet::call_index(26)]
1020 pub fn remove_assignment(origin: OriginFor<T>, region_id: RegionId) -> DispatchResult {
1021 T::AdminOrigin::ensure_origin_or_root(origin)?;
1022 Self::do_remove_assignment(region_id)
1023 }
1024
1025 #[pallet::call_index(99)]
1026 #[pallet::weight(T::WeightInfo::swap_leases())]
1027 pub fn swap_leases(origin: OriginFor<T>, id: TaskId, other: TaskId) -> DispatchResult {
1028 T::AdminOrigin::ensure_origin_or_root(origin)?;
1029 Self::do_swap_leases(id, other)?;
1030 Ok(())
1031 }
1032 }
1033}