referrerpolicy=no-referrer-when-downgrade

pallet_staking_async_rc_client/
lib.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! The client for the relay chain, intended to be used in AssetHub.
19//!
20//! The counter-part for this pallet is `pallet-staking-async-ah-client` on the relay chain.
21//!
22//! This documentation is divided into the following sections:
23//!
24//! 1. Incoming messages: the messages that we receive from the relay chian.
25//! 2. Outgoing messages: the messaged that we sent to the relay chain.
26//! 3. Local interfaces: the interfaces that we expose to other pallets in the runtime.
27//!
28//! ## Incoming Messages
29//!
30//! All incoming messages are handled via [`Call`]. They are all gated to be dispatched only by the
31//! relay chain origin, as per [`Config::RelayChainOrigin`].
32//!
33//! After potential queuing, they are passed to pallet-staking-async via [`AHStakingInterface`].
34//!
35//! The calls are:
36//!
37//! * [`Call::relay_session_report`]: A report from the relay chain, indicating the end of a
38//!   session. We allow ourselves to know an implementation detail: **The ending of session `x`
39//!   always implies start of session `x+1` and planning of session `x+2`.** This allows us to have
40//!   just one message per session.
41//!
42//! > Note that in the code, due to historical reasons, planning of a new session is called
43//! > `new_session`.
44//!
45//! * [`Call::relay_new_offence`]: A report of one or more offences on the relay chain.
46//!
47//! ## Outgoing Messages
48//!
49//! The outgoing messages are expressed in [`SendToRelayChain`].
50//!
51//! ## Local Interfaces
52//!
53//! Within this pallet, we need to talk to the staking-async pallet in AH. This is done via
54//! [`AHStakingInterface`] trait.
55//!
56//! The staking pallet in AH has no communication with session pallet whatsoever, therefore its
57//! implementation of `SessionManager`, and it associated type `SessionInterface` no longer exists.
58//! Moreover, pallet-staking-async no longer has a notion of timestamp locally, and only relies in
59//! the timestamp passed in in the `SessionReport`.
60//!
61//! ## Shared Types
62//!
63//! Note that a number of types need to be shared between this crate and `ah-client`. For now, as a
64//! convention, they are kept in this crate. This can later be decoupled into a shared crate, or
65//! `sp-staking`.
66//!
67//! TODO: the rest should go to staking-async docs.
68//!
69//! ## Session Change
70//!
71//! Further details of how the session change works follows. These details are important to how
72//! `pallet-staking-async` should rotate sessions/eras going forward.
73//!
74//! ### Synchronous Model
75//!
76//! Let's first consider the old school model, when staking and session lived in the same runtime.
77//! Assume 3 sessions is one era.
78//!
79//! The session pallet issues the following events:
80//!
81//! end_session / start_session / new_session (plan session)
82//!
83//! * end 0, start 1, plan 2
84//! * end 1, start 2, plan 3 (new validator set returned)
85//! * end 2, start 3 (new validator set activated), plan 4
86//! * end 3, start 4, plan 5
87//! * end 4, start 5, plan 6 (ah-client to already return validator set) and so on.
88//!
89//! Staking should then do the following:
90//!
91//! * once a request to plan session 3 comes in, it must return a validator set. This is queued
92//!   internally in the session pallet, and is enacted later.
93//! * at the same time, staking increases its notion of `current_era` by 1. Yet, `active_era` is
94//!   intact. This is because the validator elected for era n+1 are not yet active in the session
95//!   pallet.
96//! * once a request to _start_ session 3 comes in, staking will rotate its `active_era` to also be
97//!   incremented to n+1.
98//!
99//! ### Asynchronous Model
100//!
101//! Now, if staking lives in AH and the session pallet lives in the relay chain, how will this look
102//! like?
103//!
104//! Staking knows that by the time the relay-chain session index `3` (and later on `6` and so on) is
105//! _planned_, it must have already returned a validator set via XCM.
106//!
107//! conceptually, staking must:
108//!
109//! - listen to the [`SessionReport`]s coming in, and start a new staking election such that we can
110//!   be sure it is delivered to the RC well before the the message for planning session 3 received.
111//! - Staking should know that, regardless of the timing, these validators correspond to session 3,
112//!   and an upcoming era.
113//! - Staking will keep these pending validators internally within its state.
114//! - Once the message to start session 3 is received, staking will act upon it locally.
115
116#![cfg_attr(not(feature = "std"), no_std)]
117
118extern crate alloc;
119use alloc::{vec, vec::Vec};
120use core::fmt::Display;
121use frame_support::pallet_prelude::*;
122use sp_runtime::{traits::Convert, Perbill};
123use sp_staking::SessionIndex;
124use xcm::latest::{send_xcm, Location, SendError, SendXcm, Xcm};
125
126/// Export everything needed for the pallet to be used in the runtime.
127pub use pallet::*;
128
129const LOG_TARGET: &str = "runtime::staking-async::rc-client";
130
131// syntactic sugar for logging.
132#[macro_export]
133macro_rules! log {
134	($level:tt, $patter:expr $(, $values:expr)* $(,)?) => {
135		log::$level!(
136			target: $crate::LOG_TARGET,
137			concat!("[{:?}] โฌ†๏ธ ", $patter), <frame_system::Pallet<T>>::block_number() $(, $values)*
138		)
139	};
140}
141
142/// The communication trait of `pallet-staking-async-rc-client` -> `relay-chain`.
143///
144/// This trait should only encapsulate our _outgoing_ communication to the RC. Any incoming
145/// communication comes it directly via our calls.
146///
147/// In a real runtime, this is implemented via XCM calls, much like how the core-time pallet works.
148/// In a test runtime, it can be wired to direct function calls.
149pub trait SendToRelayChain {
150	/// The validator account ids.
151	type AccountId;
152
153	/// Send a new validator set report to relay chain.
154	fn validator_set(report: ValidatorSetReport<Self::AccountId>);
155}
156
157#[derive(Encode, Decode, DecodeWithMemTracking, Clone, PartialEq, TypeInfo)]
158/// A report about a new validator set. This is sent from AH -> RC.
159pub struct ValidatorSetReport<AccountId> {
160	/// The new validator set.
161	pub new_validator_set: Vec<AccountId>,
162	/// The id of this validator set.
163	///
164	/// Is an always incrementing identifier for this validator set, the activation of which can be
165	/// later pointed to in a `SessionReport`.
166	///
167	/// Implementation detail: within `pallet-staking-async`, this is always set to the
168	/// `planning-era` (aka. `CurrentEra`).
169	pub id: u32,
170	/// Signal the relay chain that it can prune up to this session, and enough eras have passed.
171	///
172	/// This can always have a safety buffer. For example, whatever is a sane value, it can be
173	/// `value - 5`.
174	pub prune_up_to: Option<SessionIndex>,
175	/// Same semantics as [`SessionReport::leftover`].
176	pub leftover: bool,
177}
178
179impl<AccountId: core::fmt::Debug> core::fmt::Debug for ValidatorSetReport<AccountId> {
180	fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
181		f.debug_struct("ValidatorSetReport")
182			.field("new_validator_set", &self.new_validator_set)
183			.field("id", &self.id)
184			.field("prune_up_to", &self.prune_up_to)
185			.field("leftover", &self.leftover)
186			.finish()
187	}
188}
189
190impl<AccountId> core::fmt::Display for ValidatorSetReport<AccountId> {
191	fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
192		f.debug_struct("ValidatorSetReport")
193			.field("new_validator_set", &self.new_validator_set.len())
194			.field("id", &self.id)
195			.field("prune_up_to", &self.prune_up_to)
196			.field("leftover", &self.leftover)
197			.finish()
198	}
199}
200
201impl<AccountId> ValidatorSetReport<AccountId> {
202	/// A new instance of self that is terminal. This is useful when we want to send everything in
203	/// one go.
204	pub fn new_terminal(
205		new_validator_set: Vec<AccountId>,
206		id: u32,
207		prune_up_to: Option<SessionIndex>,
208	) -> Self {
209		Self { new_validator_set, id, prune_up_to, leftover: false }
210	}
211
212	/// Merge oneself with another instance.
213	pub fn merge(mut self, other: Self) -> Result<Self, UnexpectedKind> {
214		if self.id != other.id || self.prune_up_to != other.prune_up_to {
215			// Must be some bug -- don't merge.
216			return Err(UnexpectedKind::ValidatorSetIntegrityFailed);
217		}
218		self.new_validator_set.extend(other.new_validator_set);
219		self.leftover = other.leftover;
220		Ok(self)
221	}
222
223	/// Split self into chunks of `chunk_size` element.
224	pub fn split(self, chunk_size: usize) -> Vec<Self>
225	where
226		AccountId: Clone,
227	{
228		let splitted_points = self.new_validator_set.chunks(chunk_size.max(1)).map(|x| x.to_vec());
229		let mut parts = splitted_points
230			.into_iter()
231			.map(|new_validator_set| Self { new_validator_set, leftover: true, ..self })
232			.collect::<Vec<_>>();
233		if let Some(x) = parts.last_mut() {
234			x.leftover = false
235		}
236		parts
237	}
238}
239
240#[derive(Encode, Decode, DecodeWithMemTracking, Clone, PartialEq, TypeInfo, MaxEncodedLen)]
241/// The information that is sent from RC -> AH on session end.
242pub struct SessionReport<AccountId> {
243	/// The session that is ending.
244	///
245	/// This always implies start of `end_index + 1`, and planning of `end_index + 2`.
246	pub end_index: SessionIndex,
247	/// All of the points that validators have accumulated.
248	///
249	/// This can be either from block authoring, or from parachain consensus, or anything else.
250	pub validator_points: Vec<(AccountId, u32)>,
251	/// If none, it means no new validator set was activated as a part of this session.
252	///
253	/// If `Some((timestamp, id))`, it means that the new validator set was activated at the given
254	/// timestamp, and the id of the validator set is `id`.
255	///
256	/// This `id` is what was previously communicated to the RC as a part of
257	/// [`ValidatorSetReport::id`].
258	pub activation_timestamp: Option<(u64, u32)>,
259	/// If this session report is self-contained, then it is false.
260	///
261	/// If this session report has some leftover, it should not be acted upon until a subsequent
262	/// message with `leftover = true` comes in. The client pallets should handle this queuing.
263	///
264	/// This is in place to future proof us against possibly needing to send multiple rounds of
265	/// messages to convey all of the `validator_points`.
266	///
267	/// Upon processing, this should always be true, and it should be ignored.
268	pub leftover: bool,
269}
270
271impl<AccountId: core::fmt::Debug> core::fmt::Debug for SessionReport<AccountId> {
272	fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
273		f.debug_struct("SessionReport")
274			.field("end_index", &self.end_index)
275			.field("validator_points", &self.validator_points)
276			.field("activation_timestamp", &self.activation_timestamp)
277			.field("leftover", &self.leftover)
278			.finish()
279	}
280}
281
282impl<AccountId> core::fmt::Display for SessionReport<AccountId> {
283	fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
284		f.debug_struct("SessionReport")
285			.field("end_index", &self.end_index)
286			.field("validator_points", &self.validator_points.len())
287			.field("activation_timestamp", &self.activation_timestamp)
288			.field("leftover", &self.leftover)
289			.finish()
290	}
291}
292
293impl<AccountId> SessionReport<AccountId> {
294	/// A new instance of self that is terminal. This is useful when we want to send everything in
295	/// one go.
296	pub fn new_terminal(
297		end_index: SessionIndex,
298		validator_points: Vec<(AccountId, u32)>,
299		activation_timestamp: Option<(u64, u32)>,
300	) -> Self {
301		Self { end_index, validator_points, activation_timestamp, leftover: false }
302	}
303
304	/// Merge oneself with another instance.
305	pub fn merge(mut self, other: Self) -> Result<Self, UnexpectedKind> {
306		if self.end_index != other.end_index ||
307			self.activation_timestamp != other.activation_timestamp
308		{
309			// Must be some bug -- don't merge.
310			return Err(UnexpectedKind::SessionReportIntegrityFailed);
311		}
312		self.validator_points.extend(other.validator_points);
313		self.leftover = other.leftover;
314		Ok(self)
315	}
316
317	/// Split oneself into `count` number of pieces.
318	pub fn split(self, chunk_size: usize) -> Vec<Self>
319	where
320		AccountId: Clone,
321	{
322		let splitted_points = self.validator_points.chunks(chunk_size.max(1)).map(|x| x.to_vec());
323		let mut parts = splitted_points
324			.into_iter()
325			.map(|validator_points| Self { validator_points, leftover: true, ..self })
326			.collect::<Vec<_>>();
327		if let Some(x) = parts.last_mut() {
328			x.leftover = false
329		}
330		parts
331	}
332}
333
334/// A trait to encapsulate messages between RC and AH that can be splitted into smaller chunks.
335///
336/// Implemented for [`SessionReport`] and [`ValidatorSetReport`].
337#[allow(clippy::len_without_is_empty)]
338pub trait SplittableMessage: Sized {
339	/// Split yourself into pieces of `chunk_size` size.
340	fn split_by(self, chunk_size: usize) -> Vec<Self>;
341
342	/// Current length of the message.
343	fn len(&self) -> usize;
344}
345
346impl<AccountId: Clone> SplittableMessage for SessionReport<AccountId> {
347	fn split_by(self, chunk_size: usize) -> Vec<Self> {
348		self.split(chunk_size)
349	}
350	fn len(&self) -> usize {
351		self.validator_points.len()
352	}
353}
354
355impl<AccountId: Clone> SplittableMessage for ValidatorSetReport<AccountId> {
356	fn split_by(self, chunk_size: usize) -> Vec<Self> {
357		self.split(chunk_size)
358	}
359	fn len(&self) -> usize {
360		self.new_validator_set.len()
361	}
362}
363
364/// Common utility to send XCM messages that can use [`SplittableMessage`].
365///
366/// It can be used both in the RC and AH. `Message` is the splittable message type, and `ToXcm`
367/// should be configured by the user, converting `message` to a valida `Xcm<()>`. It should utilize
368/// the correct call indices, which we only know at the runtime level.
369pub struct XCMSender<Sender, Destination, Message, ToXcm>(
370	core::marker::PhantomData<(Sender, Destination, Message, ToXcm)>,
371);
372
373impl<Sender, Destination, Message, ToXcm> XCMSender<Sender, Destination, Message, ToXcm>
374where
375	Sender: SendXcm,
376	Destination: Get<Location>,
377	Message: SplittableMessage + Display + Clone + Encode,
378	ToXcm: Convert<Message, Xcm<()>>,
379{
380	/// Safe send method to send a `message`, while validating it and using [`SplittableMessage`] to
381	/// split it into smaller pieces if XCM validation fails with `ExceedsMaxMessageSize`. It will
382	/// fail on other errors.
383	///
384	/// It will only emit some logs, and has no return value. This is used in the runtime, so it
385	/// cannot deposit any events at this level.
386	pub fn split_then_send(message: Message, maybe_max_steps: Option<u32>) {
387		let message_type_name = core::any::type_name::<Message>();
388		let dest = Destination::get();
389		let xcms = match Self::prepare(message, maybe_max_steps) {
390			Ok(x) => x,
391			Err(e) => {
392				log::error!(target: "runtime::rc-client", "๐Ÿ“จ Failed to split message {}: {:?}", message_type_name, e);
393				return;
394			},
395		};
396
397		for (idx, xcm) in xcms.into_iter().enumerate() {
398			log::debug!(target: "runtime::rc-client", "๐Ÿ“จ sending {} message index {}, size: {:?}", message_type_name, idx, xcm.encoded_size());
399			let result = send_xcm::<Sender>(dest.clone(), xcm);
400			match result {
401				Ok(_) => {
402					log::debug!(target: "runtime::rc-client", "๐Ÿ“จ Successfully sent {} message part {} to relay chain", message_type_name,  idx)
403				},
404				Err(e) => {
405					log::error!(target: "runtime::rc-client", "๐Ÿ“จ Failed to send {} message to relay chain: {:?}", message_type_name, e)
406				},
407			}
408		}
409	}
410
411	fn prepare(message: Message, maybe_max_steps: Option<u32>) -> Result<Vec<Xcm<()>>, SendError> {
412		// initial chunk size is the entire thing, so it will be a vector of 1 item.
413		let mut chunk_size = message.len();
414		let mut steps = 0;
415
416		loop {
417			let current_messages = message.clone().split_by(chunk_size);
418
419			// the first message is the heaviest, the last one might be smaller.
420			let first_message = if let Some(r) = current_messages.first() {
421				r
422			} else {
423				log::debug!(target: "runtime::staking-async::xcm", "๐Ÿ“จ unexpected: no messages to send");
424				return Ok(vec![]);
425			};
426
427			log::debug!(
428				target: "runtime::staking-async::xcm",
429				"๐Ÿ“จ step: {:?}, chunk_size: {:?}, message_size: {:?}",
430				steps,
431				chunk_size,
432				first_message.encoded_size(),
433			);
434
435			let first_xcm = ToXcm::convert(first_message.clone());
436			match <Sender as SendXcm>::validate(&mut Some(Destination::get()), &mut Some(first_xcm))
437			{
438				Ok((_ticket, price)) => {
439					log::debug!(target: "runtime::staking-async::xcm", "๐Ÿ“จ validated, price: {:?}", price);
440					return Ok(current_messages.into_iter().map(ToXcm::convert).collect::<Vec<_>>());
441				},
442				Err(SendError::ExceedsMaxMessageSize) => {
443					log::debug!(target: "runtime::staking-async::xcm", "๐Ÿ“จ ExceedsMaxMessageSize -- reducing chunk_size");
444					chunk_size = chunk_size.saturating_div(2);
445					steps += 1;
446					if maybe_max_steps.is_some_and(|max_steps| steps > max_steps) ||
447						chunk_size.is_zero()
448					{
449						log::error!(target: "runtime::staking-async::xcm", "๐Ÿ“จ Exceeded max steps or chunk_size = 0");
450						return Err(SendError::ExceedsMaxMessageSize);
451					} else {
452						// try again with the new `chunk_size`
453						continue;
454					}
455				},
456				Err(other) => {
457					log::error!(target: "runtime::staking-async::xcm", "๐Ÿ“จ other error -- cannot send XCM: {:?}", other);
458					return Err(other);
459				},
460			}
461		}
462	}
463}
464
465/// Our communication trait of `pallet-staking-async-rc-client` -> `pallet-staking-async`.
466///
467/// This is merely a shorthand to avoid tightly-coupling the staking pallet to this pallet. It
468/// limits what we can say to `pallet-staking-async` to only these functions.
469pub trait AHStakingInterface {
470	/// The validator account id type.
471	type AccountId;
472	/// Maximum number of validators that the staking system may have.
473	type MaxValidatorSet: Get<u32>;
474
475	/// New session report from the relay chain.
476	fn on_relay_session_report(report: SessionReport<Self::AccountId>);
477
478	/// Report one or more offences on the relay chain.
479	///
480	/// This returns its consumed weight because its complexity is hard to measure.
481	fn on_new_offences(slash_session: SessionIndex, offences: Vec<Offence<Self::AccountId>>);
482}
483
484/// The communication trait of `pallet-staking-async` -> `pallet-staking-async-rc-client`.
485pub trait RcClientInterface {
486	/// The validator account ids.
487	type AccountId;
488
489	/// Report a new validator set.
490	fn validator_set(new_validator_set: Vec<Self::AccountId>, id: u32, prune_up_tp: Option<u32>);
491}
492
493/// An offence on the relay chain. Based on [`sp_staking::offence::OffenceDetails`].
494#[derive(Encode, Decode, DecodeWithMemTracking, Debug, Clone, PartialEq, TypeInfo)]
495pub struct Offence<AccountId> {
496	/// The offender.
497	pub offender: AccountId,
498	/// Those who have reported this offence.
499	pub reporters: Vec<AccountId>,
500	/// The amount that they should be slashed.
501	pub slash_fraction: Perbill,
502}
503
504#[frame_support::pallet]
505pub mod pallet {
506	use super::*;
507	use alloc::vec;
508	use frame_system::pallet_prelude::*;
509
510	/// The in-code storage version.
511	const STORAGE_VERSION: StorageVersion = StorageVersion::new(1);
512
513	/// An incomplete incoming session report that we have not acted upon yet.
514	// Note: this can remain unbounded, as the internals of `AHStakingInterface` is benchmarked, and
515	// is worst case.
516	#[pallet::storage]
517	#[pallet::unbounded]
518	pub type IncompleteSessionReport<T: Config> =
519		StorageValue<_, SessionReport<T::AccountId>, OptionQuery>;
520
521	/// The last session report's `end_index` that we have acted upon.
522	///
523	/// This allows this pallet to ensure a sequentially increasing sequence of session reports
524	/// passed to staking.
525	///
526	/// Note that with the XCM being the backbone of communication, we have a guarantee on the
527	/// ordering of messages. As long as the RC sends session reports in order, we _eventually_
528	/// receive them in the same correct order as well.
529	#[pallet::storage]
530	pub type LastSessionReportEndingIndex<T: Config> = StorageValue<_, SessionIndex, OptionQuery>;
531
532	#[pallet::pallet]
533	#[pallet::storage_version(STORAGE_VERSION)]
534	pub struct Pallet<T>(_);
535
536	#[pallet::config]
537	pub trait Config: frame_system::Config {
538		/// An origin type that allows us to be sure a call is being dispatched by the relay chain.
539		///
540		/// It be can be configured to something like `Root` or relay chain or similar.
541		type RelayChainOrigin: EnsureOrigin<Self::RuntimeOrigin>;
542
543		/// Our communication handle to the local staking pallet.
544		type AHStakingInterface: AHStakingInterface<AccountId = Self::AccountId>;
545
546		/// Our communication handle to the relay chain.
547		type SendToRelayChain: SendToRelayChain<AccountId = Self::AccountId>;
548	}
549
550	#[pallet::event]
551	#[pallet::generate_deposit(pub(crate) fn deposit_event)]
552	pub enum Event<T: Config> {
553		/// A said session report was received.
554		SessionReportReceived {
555			end_index: SessionIndex,
556			activation_timestamp: Option<(u64, u32)>,
557			validator_points_counts: u32,
558			leftover: bool,
559		},
560		/// A new offence was reported.
561		OffenceReceived { slash_session: SessionIndex, offences_count: u32 },
562		/// Something occurred that should never happen under normal operation.
563		/// Logged as an event for fail-safe observability.
564		Unexpected(UnexpectedKind),
565	}
566
567	/// Represents unexpected or invariant-breaking conditions encountered during execution.
568	///
569	/// These variants are emitted as [`Event::Unexpected`] and indicate a defensive check has
570	/// failed. While these should never occur under normal operation, they are useful for
571	/// diagnosing issues in production or test environments.
572	#[derive(Clone, Encode, Decode, DecodeWithMemTracking, PartialEq, TypeInfo, RuntimeDebug)]
573	pub enum UnexpectedKind {
574		/// We could not merge the chunks, and therefore dropped the session report.
575		SessionReportIntegrityFailed,
576		/// We could not merge the chunks, and therefore dropped the validator set.
577		ValidatorSetIntegrityFailed,
578		/// The received session index is more than what we expected.
579		SessionSkipped,
580		/// A session in the past was received. This will not raise any errors, just emit an event
581		/// and stop processing the report.
582		SessionAlreadyProcessed,
583	}
584
585	impl<T: Config> RcClientInterface for Pallet<T> {
586		type AccountId = T::AccountId;
587
588		fn validator_set(
589			new_validator_set: Vec<Self::AccountId>,
590			id: u32,
591			prune_up_tp: Option<u32>,
592		) {
593			let report = ValidatorSetReport::new_terminal(new_validator_set, id, prune_up_tp);
594			T::SendToRelayChain::validator_set(report);
595		}
596	}
597
598	#[pallet::call]
599	impl<T: Config> Pallet<T> {
600		/// Called to indicate the start of a new session on the relay chain.
601		#[pallet::call_index(0)]
602		#[pallet::weight(
603			// `LastSessionReportEndingIndex`: rw
604			// `IncompleteSessionReport`: rw
605			// NOTE: what happens inside `AHStakingInterface` is benchmarked and registered in `pallet-staking-async`
606			T::DbWeight::get().reads_writes(2, 2)
607		)]
608		pub fn relay_session_report(
609			origin: OriginFor<T>,
610			report: SessionReport<T::AccountId>,
611		) -> DispatchResult {
612			log!(debug, "Received session report: {}", report);
613			T::RelayChainOrigin::ensure_origin_or_root(origin)?;
614
615			match LastSessionReportEndingIndex::<T>::get() {
616				None => {
617					// first session report post genesis, okay.
618				},
619				Some(last) if report.end_index == last + 1 => {
620					// incremental -- good
621				},
622				Some(last) if report.end_index > last + 1 => {
623					// deposit a warning event, but proceed
624					Self::deposit_event(Event::Unexpected(UnexpectedKind::SessionSkipped));
625					log!(
626						warn,
627						"Session report end index is more than expected. last_index={:?}, report.index={:?}",
628						last,
629						report.end_index
630					);
631				},
632				Some(past) => {
633					log!(
634						error,
635						"Session report end index is not valid. last_index={:?}, report.index={:?}",
636						past,
637						report.end_index
638					);
639					Self::deposit_event(Event::Unexpected(UnexpectedKind::SessionAlreadyProcessed));
640					IncompleteSessionReport::<T>::kill();
641					return Ok(());
642				},
643			}
644
645			Self::deposit_event(Event::SessionReportReceived {
646				end_index: report.end_index,
647				activation_timestamp: report.activation_timestamp,
648				validator_points_counts: report.validator_points.len() as u32,
649				leftover: report.leftover,
650			});
651
652			// If we have anything previously buffered, then merge it.
653			let maybe_new_session_report = match IncompleteSessionReport::<T>::take() {
654				Some(old) => old.merge(report.clone()),
655				None => Ok(report),
656			};
657
658			if let Err(e) = maybe_new_session_report {
659				Self::deposit_event(Event::Unexpected(e));
660				debug_assert!(
661					IncompleteSessionReport::<T>::get().is_none(),
662					"we have ::take() it above, we don't want to keep the old data"
663				);
664				return Ok(());
665			}
666			let new_session_report = maybe_new_session_report.expect("checked above; qed");
667
668			if new_session_report.leftover {
669				// this is still not final -- buffer it.
670				IncompleteSessionReport::<T>::put(new_session_report);
671			} else {
672				// this is final, report it.
673				LastSessionReportEndingIndex::<T>::put(new_session_report.end_index);
674				T::AHStakingInterface::on_relay_session_report(new_session_report);
675			}
676
677			Ok(())
678		}
679
680		/// Called to report one or more new offenses on the relay chain.
681		#[pallet::call_index(1)]
682		#[pallet::weight(
683			// `on_new_offences` is benchmarked by `pallet-staking-async`
684			// events are free
685			// origin check is negligible.
686			Weight::default()
687		)]
688		pub fn relay_new_offence(
689			origin: OriginFor<T>,
690			slash_session: SessionIndex,
691			offences: Vec<Offence<T::AccountId>>,
692		) -> DispatchResult {
693			log!(info, "Received new offence at slash_session: {:?}", slash_session);
694			T::RelayChainOrigin::ensure_origin_or_root(origin)?;
695
696			Self::deposit_event(Event::OffenceReceived {
697				slash_session,
698				offences_count: offences.len() as u32,
699			});
700
701			T::AHStakingInterface::on_new_offences(slash_session, offences);
702			Ok(())
703		}
704	}
705}