referrerpolicy=no-referrer-when-downgrade

pallet_broker/
tick_impls.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18use super::*;
19use alloc::{vec, vec::Vec};
20use frame_support::{pallet_prelude::*, traits::defensive_prelude::*, weights::WeightMeter};
21use sp_arithmetic::traits::{One, SaturatedConversion, Saturating, Zero};
22use sp_runtime::traits::{BlockNumberProvider, ConvertBack, MaybeConvert};
23use CompletionStatus::Complete;
24
25impl<T: Config> Pallet<T> {
26	/// Attempt to tick things along.
27	///
28	/// This may do several things:
29	/// - Processes notifications of the core count changing
30	/// - Processes reports of Instantaneous Core Market Revenue
31	/// - Commit a timeslice
32	/// - Rotate the sale period
33	/// - Request revenue information for a previous timeslice
34	/// - Initialize an instantaneous core pool historical revenue record
35	pub(crate) fn do_tick() -> Weight {
36		let mut meter = WeightMeter::new();
37		meter.consume(T::WeightInfo::do_tick_base());
38
39		let (mut status, config) = match (Status::<T>::get(), Configuration::<T>::get()) {
40			(Some(s), Some(c)) => (s, c),
41			_ => return meter.consumed(),
42		};
43
44		if Self::process_core_count(&mut status) {
45			meter.consume(T::WeightInfo::process_core_count(status.core_count.into()));
46		}
47
48		if Self::process_revenue() {
49			meter.consume(T::WeightInfo::process_revenue());
50		}
51
52		if let Some(commit_timeslice) = Self::next_timeslice_to_commit(&config, &status) {
53			status.last_committed_timeslice = commit_timeslice;
54			if let Some(sale) = SaleInfo::<T>::get() {
55				if commit_timeslice >= sale.region_begin {
56					// Sale can be rotated.
57					Self::rotate_sale(sale, &config, &status);
58					meter.consume(T::WeightInfo::rotate_sale(status.core_count.into()));
59				}
60			}
61
62			Self::process_pool(commit_timeslice, &mut status);
63			meter.consume(T::WeightInfo::process_pool());
64
65			let timeslice_period = T::TimeslicePeriod::get();
66			let rc_begin = RelayBlockNumberOf::<T>::from(commit_timeslice) * timeslice_period;
67			for core in 0..status.core_count {
68				Self::process_core_schedule(commit_timeslice, rc_begin, core);
69				meter.consume(T::WeightInfo::process_core_schedule());
70			}
71		}
72
73		let current_timeslice = Self::current_timeslice();
74		if status.last_timeslice < current_timeslice {
75			status.last_timeslice.saturating_inc();
76			let rc_block = T::TimeslicePeriod::get() * status.last_timeslice.into();
77			T::Coretime::request_revenue_info_at(rc_block);
78			meter.consume(T::WeightInfo::request_revenue_info_at());
79			T::Coretime::on_new_timeslice(status.last_timeslice);
80			meter.consume(T::WeightInfo::on_new_timeslice());
81		}
82
83		Status::<T>::put(&status);
84
85		meter.consumed()
86	}
87
88	pub(crate) fn process_core_count(status: &mut StatusRecord) -> bool {
89		if let Some(core_count) = CoreCountInbox::<T>::take() {
90			status.core_count = core_count;
91			Self::deposit_event(Event::<T>::CoreCountChanged { core_count });
92			return true
93		}
94		false
95	}
96
97	pub(crate) fn process_revenue() -> bool {
98		let Some(OnDemandRevenueRecord { until, amount }) = RevenueInbox::<T>::take() else {
99			return false
100		};
101		let when: Timeslice =
102			(until / T::TimeslicePeriod::get()).saturating_sub(One::one()).saturated_into();
103		let mut revenue = T::ConvertBalance::convert_back(amount.clone());
104		if revenue.is_zero() {
105			Self::deposit_event(Event::<T>::HistoryDropped { when, revenue });
106			InstaPoolHistory::<T>::remove(when);
107			return true
108		}
109
110		log::debug!(
111			target: "pallet_broker::process_revenue",
112			"Received {amount:?} from RC, converted into {revenue:?} revenue",
113		);
114
115		let mut r = InstaPoolHistory::<T>::get(when).unwrap_or_default();
116		if r.maybe_payout.is_some() {
117			Self::deposit_event(Event::<T>::HistoryIgnored { when, revenue });
118			return true
119		}
120		// Payout system InstaPool Cores.
121		let total_contrib = r.system_contributions.saturating_add(r.private_contributions);
122		let system_payout = if !total_contrib.is_zero() {
123			let system_payout =
124				revenue.saturating_mul(r.system_contributions.into()) / total_contrib.into();
125			Self::charge(&Self::account_id(), system_payout).defensive_ok();
126			revenue.saturating_reduce(system_payout);
127
128			system_payout
129		} else {
130			Zero::zero()
131		};
132
133		log::debug!(
134			target: "pallet_broker::process_revenue",
135			"Charged {system_payout:?} for system payouts, {revenue:?} remaining for private contributions",
136		);
137
138		if !revenue.is_zero() && r.private_contributions > 0 {
139			r.maybe_payout = Some(revenue);
140			InstaPoolHistory::<T>::insert(when, &r);
141			Self::deposit_event(Event::<T>::ClaimsReady {
142				when,
143				system_payout,
144				private_payout: revenue,
145			});
146		} else {
147			InstaPoolHistory::<T>::remove(when);
148			Self::deposit_event(Event::<T>::HistoryDropped { when, revenue });
149		}
150		true
151	}
152
153	/// Begin selling for the next sale period.
154	///
155	/// Triggered by Relay-chain block number/timeslice.
156	pub(crate) fn rotate_sale(
157		old_sale: SaleInfoRecordOf<T>,
158		config: &ConfigRecordOf<T>,
159		status: &StatusRecord,
160	) -> Option<()> {
161		let now = RCBlockNumberProviderOf::<T::Coretime>::current_block_number();
162
163		let pool_item =
164			ScheduleItem { assignment: CoreAssignment::Pool, mask: CoreMask::complete() };
165		let just_pool = Schedule::truncate_from(vec![pool_item]);
166
167		// Clean up the old sale - we need to use up any unused cores by putting them into the
168		// InstaPool.
169		let mut old_pooled: SignedCoreMaskBitCount = 0;
170		for i in old_sale.cores_sold..old_sale.cores_offered {
171			old_pooled.saturating_accrue(80);
172			Workplan::<T>::insert((old_sale.region_begin, old_sale.first_core + i), &just_pool);
173		}
174		InstaPoolIo::<T>::mutate(old_sale.region_begin, |r| r.system.saturating_accrue(old_pooled));
175		InstaPoolIo::<T>::mutate(old_sale.region_end, |r| r.system.saturating_reduce(old_pooled));
176
177		// Calculate the start price for the upcoming sale.
178		let new_prices = T::PriceAdapter::adapt_price(SalePerformance::from_sale(&old_sale));
179
180		log::debug!(
181			"Rotated sale, new prices: {:?}, {:?}",
182			new_prices.end_price,
183			new_prices.target_price
184		);
185
186		// Set workload for the reserved (system, probably) workloads.
187		let region_begin = old_sale.region_end;
188		let region_end = region_begin + config.region_length;
189
190		let mut first_core = 0;
191		let mut total_pooled: SignedCoreMaskBitCount = 0;
192		for schedule in Reservations::<T>::get().into_iter() {
193			let parts: u32 = schedule
194				.iter()
195				.filter(|i| matches!(i.assignment, CoreAssignment::Pool))
196				.map(|i| i.mask.count_ones())
197				.sum();
198			total_pooled.saturating_accrue(parts as i32);
199
200			Workplan::<T>::insert((region_begin, first_core), &schedule);
201			first_core.saturating_inc();
202		}
203		InstaPoolIo::<T>::mutate(region_begin, |r| r.system.saturating_accrue(total_pooled));
204		InstaPoolIo::<T>::mutate(region_end, |r| r.system.saturating_reduce(total_pooled));
205
206		let mut leases = Leases::<T>::get();
207		// Can morph to a renewable as long as it's >=begin and <end.
208		leases.retain(|&LeaseRecordItem { until, task }| {
209			let mask = CoreMask::complete();
210			let assignment = CoreAssignment::Task(task);
211			let schedule = BoundedVec::truncate_from(vec![ScheduleItem { mask, assignment }]);
212			Workplan::<T>::insert((region_begin, first_core), &schedule);
213			// Will the lease expire at the end of the period?
214			let expire = until < region_end;
215			if expire {
216				// last time for this one - make it renewable in the next sale.
217				let renewal_id = PotentialRenewalId { core: first_core, when: region_end };
218				let record = PotentialRenewalRecord {
219					price: new_prices.target_price,
220					completion: Complete(schedule),
221				};
222				PotentialRenewals::<T>::insert(renewal_id, &record);
223				Self::deposit_event(Event::Renewable {
224					core: first_core,
225					price: new_prices.target_price,
226					begin: region_end,
227					workload: record.completion.drain_complete().unwrap_or_default(),
228				});
229				Self::deposit_event(Event::LeaseEnding { when: region_end, task });
230			}
231
232			first_core.saturating_inc();
233
234			!expire
235		});
236		Leases::<T>::put(&leases);
237
238		let max_possible_sales = status.core_count.saturating_sub(first_core);
239		let limit_cores_offered = config.limit_cores_offered.unwrap_or(CoreIndex::max_value());
240		let cores_offered = limit_cores_offered.min(max_possible_sales);
241		let sale_start = now.saturating_add(config.interlude_length);
242		let leadin_length = config.leadin_length;
243		let ideal_cores_sold = (config.ideal_bulk_proportion * cores_offered as u32) as u16;
244		let sellout_price = if cores_offered > 0 {
245			// No core sold -> price was too high -> we have to adjust downwards.
246			Some(new_prices.end_price)
247		} else {
248			None
249		};
250
251		// Update SaleInfo
252		let new_sale = SaleInfoRecord {
253			sale_start,
254			leadin_length,
255			end_price: new_prices.end_price,
256			sellout_price,
257			region_begin,
258			region_end,
259			first_core,
260			ideal_cores_sold,
261			cores_offered,
262			cores_sold: 0,
263		};
264
265		SaleInfo::<T>::put(&new_sale);
266
267		Self::renew_cores(&new_sale);
268
269		Self::deposit_event(Event::SaleInitialized {
270			sale_start,
271			leadin_length,
272			start_price: Self::sale_price(&new_sale, now),
273			end_price: new_prices.end_price,
274			region_begin,
275			region_end,
276			ideal_cores_sold,
277			cores_offered,
278		});
279
280		Some(())
281	}
282
283	pub(crate) fn process_pool(when: Timeslice, status: &mut StatusRecord) {
284		let pool_io = InstaPoolIo::<T>::take(when);
285		status.private_pool_size = (status.private_pool_size as SignedCoreMaskBitCount)
286			.saturating_add(pool_io.private) as CoreMaskBitCount;
287		status.system_pool_size = (status.system_pool_size as SignedCoreMaskBitCount)
288			.saturating_add(pool_io.system) as CoreMaskBitCount;
289		let record = InstaPoolHistoryRecord {
290			private_contributions: status.private_pool_size,
291			system_contributions: status.system_pool_size,
292			maybe_payout: None,
293		};
294		InstaPoolHistory::<T>::insert(when, record);
295		Self::deposit_event(Event::<T>::HistoryInitialized {
296			when,
297			private_pool_size: status.private_pool_size,
298			system_pool_size: status.system_pool_size,
299		});
300	}
301
302	/// Schedule cores for the given `timeslice`.
303	pub(crate) fn process_core_schedule(
304		timeslice: Timeslice,
305		rc_begin: RelayBlockNumberOf<T>,
306		core: CoreIndex,
307	) {
308		let Some(workplan) = Workplan::<T>::take((timeslice, core)) else { return };
309		let workload = Workload::<T>::get(core);
310		let parts_used = workplan.iter().map(|i| i.mask).fold(CoreMask::void(), |a, i| a | i);
311		let mut workplan = workplan.into_inner();
312		workplan.extend(workload.into_iter().filter(|i| (i.mask & parts_used).is_void()));
313		let workplan = Schedule::truncate_from(workplan);
314		Workload::<T>::insert(core, &workplan);
315
316		let mut total_used = 0;
317		let mut intermediate = workplan
318			.into_iter()
319			.map(|i| (i.assignment, i.mask.count_ones() as u16 * (57_600 / 80)))
320			.inspect(|i| total_used.saturating_accrue(i.1))
321			.collect::<Vec<_>>();
322		if total_used < 57_600 {
323			intermediate.push((CoreAssignment::Idle, 57_600 - total_used));
324		}
325		intermediate.sort();
326		let mut assignment: Vec<(CoreAssignment, PartsOf57600)> =
327			Vec::with_capacity(intermediate.len());
328		for i in intermediate.into_iter() {
329			if let Some(ref mut last) = assignment.last_mut() {
330				if last.0 == i.0 {
331					last.1 += i.1;
332					continue
333				}
334			}
335			assignment.push(i);
336		}
337		T::Coretime::assign_core(core, rc_begin, assignment.clone(), None);
338		Self::deposit_event(Event::<T>::CoreAssigned { core, when: rc_begin, assignment });
339	}
340
341	/// Renews all the cores which have auto-renewal enabled.
342	pub(crate) fn renew_cores(sale: &SaleInfoRecordOf<T>) {
343		let renewals = AutoRenewals::<T>::get();
344
345		let Ok(auto_renewals) = renewals
346			.into_iter()
347			.flat_map(|record| {
348				// Check if the next renewal is scheduled further in the future than the start of
349				// the next region beginning. If so, we skip the renewal for this core.
350				if sale.region_begin < record.next_renewal {
351					return Some(record)
352				}
353
354				let Some(payer) = T::SovereignAccountOf::maybe_convert(record.task) else {
355					Self::deposit_event(Event::<T>::AutoRenewalFailed {
356						core: record.core,
357						payer: None,
358					});
359					return None
360				};
361
362				if let Ok(new_core_index) = Self::do_renew(payer.clone(), record.core) {
363					Some(AutoRenewalRecord {
364						core: new_core_index,
365						task: record.task,
366						next_renewal: sale.region_end,
367					})
368				} else {
369					Self::deposit_event(Event::<T>::AutoRenewalFailed {
370						core: record.core,
371						payer: Some(payer),
372					});
373
374					None
375				}
376			})
377			.collect::<Vec<AutoRenewalRecord>>()
378			.try_into()
379		else {
380			Self::deposit_event(Event::<T>::AutoRenewalLimitReached);
381			return;
382		};
383
384		AutoRenewals::<T>::set(auto_renewals);
385	}
386}