referrerpolicy=no-referrer-when-downgrade

sc_consensus_slots/
lib.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19//! Slots functionality for Substrate.
20//!
21//! Some consensus algorithms have a concept of *slots*, which are intervals in
22//! time during which certain events can and/or must occur.  This crate
23//! provides generic functionality for slots.
24
25#![forbid(unsafe_code)]
26#![warn(missing_docs)]
27
28mod aux_schema;
29mod slots;
30
31pub use aux_schema::{check_equivocation, MAX_SLOT_CAPACITY, PRUNING_BOUND};
32use slots::Slots;
33pub use slots::{time_until_next_slot, SlotInfo};
34
35use futures::{future::Either, Future, TryFutureExt};
36use futures_timer::Delay;
37use log::{debug, info, warn};
38use sc_consensus::{BlockImport, JustificationSyncLink};
39use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO, CONSENSUS_WARN};
40use sp_arithmetic::traits::BaseArithmetic;
41use sp_consensus::{Proposal, ProposeArgs, Proposer, SelectChain, SyncOracle};
42use sp_consensus_slots::{Slot, SlotDuration};
43use sp_inherents::CreateInherentDataProviders;
44use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT};
45use std::{
46	ops::Deref,
47	time::{Duration, Instant},
48};
49
50const LOG_TARGET: &str = "slots";
51
52/// The changes that need to applied to the storage to create the state for a block.
53///
54/// See [`sp_state_machine::StorageChanges`] for more information.
55pub type StorageChanges<Block> = sp_state_machine::StorageChanges<HashingFor<Block>>;
56
57/// A worker that should be invoked at every new slot.
58///
59/// The implementation should not make any assumptions of the slot being bound to the time or
60/// similar. The only valid assumption is that the slot number is always increasing.
61#[async_trait::async_trait]
62pub trait SlotWorker<B: BlockT> {
63	/// Called when a new slot is triggered.
64	///
65	/// Returns a future that resolves to a block.
66	///
67	/// If block production failed, `None` is returned.
68	async fn on_slot(&mut self, slot_info: SlotInfo<B>) -> Option<B>;
69}
70
71/// A skeleton implementation for `SlotWorker` which tries to claim a slot at
72/// its beginning and tries to produce a block if successfully claimed, timing
73/// out if block production takes too long.
74#[async_trait::async_trait]
75pub trait SimpleSlotWorker<B: BlockT> {
76	/// A handle to a `BlockImport`.
77	type BlockImport: BlockImport<B> + Send + 'static;
78
79	/// A handle to a `SyncOracle`.
80	type SyncOracle: SyncOracle;
81
82	/// A handle to a `JustificationSyncLink`, allows hooking into the sync module to control the
83	/// justification sync process.
84	type JustificationSyncLink: JustificationSyncLink<B>;
85
86	/// The type of future resolving to the proposer.
87	type CreateProposer: Future<Output = Result<Self::Proposer, sp_consensus::Error>>
88		+ Send
89		+ Unpin
90		+ 'static;
91
92	/// The type of proposer to use to build blocks.
93	type Proposer: Proposer<B> + Send;
94
95	/// Data associated with a slot claim.
96	type Claim: Send + Sync + 'static;
97
98	/// Auxiliary data necessary for authoring.
99	type AuxData: Send + Sync + 'static;
100
101	/// The logging target to use when logging messages.
102	fn logging_target(&self) -> &'static str;
103
104	/// A handle to a `BlockImport`.
105	fn block_import(&mut self) -> &mut Self::BlockImport;
106
107	/// Returns the auxiliary data necessary for authoring.
108	fn aux_data(
109		&self,
110		header: &B::Header,
111		slot: Slot,
112	) -> Result<Self::AuxData, sp_consensus::Error>;
113
114	/// Returns the number of authorities.
115	/// None indicate that the authorities information is incomplete.
116	fn authorities_len(&self, aux_data: &Self::AuxData) -> Option<usize>;
117
118	/// Tries to claim the given slot, returning an object with claim data if successful.
119	async fn claim_slot(
120		&mut self,
121		header: &B::Header,
122		slot: Slot,
123		aux_data: &Self::AuxData,
124	) -> Option<Self::Claim>;
125
126	/// Notifies the given slot. Similar to `claim_slot`, but will be called no matter whether we
127	/// need to author blocks or not.
128	fn notify_slot(&self, _header: &B::Header, _slot: Slot, _aux_data: &Self::AuxData) {}
129
130	/// Return the pre digest data to include in a block authored with the given claim.
131	fn pre_digest_data(&self, slot: Slot, claim: &Self::Claim) -> Vec<sp_runtime::DigestItem>;
132
133	/// Returns a function which produces a `BlockImportParams`.
134	async fn block_import_params(
135		&self,
136		header: B::Header,
137		header_hash: &B::Hash,
138		body: Vec<B::Extrinsic>,
139		storage_changes: StorageChanges<B>,
140		public: Self::Claim,
141		aux_data: Self::AuxData,
142	) -> Result<sc_consensus::BlockImportParams<B>, sp_consensus::Error>;
143
144	/// Whether to force authoring if offline.
145	fn force_authoring(&self) -> bool;
146
147	/// Returns whether the block production should back off.
148	///
149	/// By default this function always returns `false`.
150	///
151	/// An example strategy that back offs if the finalized head is lagging too much behind the tip
152	/// is implemented by [`BackoffAuthoringOnFinalizedHeadLagging`].
153	fn should_backoff(&self, _slot: Slot, _chain_head: &B::Header) -> bool {
154		false
155	}
156
157	/// Returns a handle to a `SyncOracle`.
158	fn sync_oracle(&mut self) -> &mut Self::SyncOracle;
159
160	/// Returns a handle to a `JustificationSyncLink`.
161	fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink;
162
163	/// Returns a `Proposer` to author on top of the given block.
164	fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer;
165
166	/// Returns a [`TelemetryHandle`] if any.
167	fn telemetry(&self) -> Option<TelemetryHandle>;
168
169	/// Remaining duration for proposing.
170	fn proposing_remaining_duration(&self, slot_info: &SlotInfo<B>) -> Duration;
171
172	/// Propose a block by `Proposer`.
173	async fn propose(
174		&mut self,
175		proposer: Self::Proposer,
176		claim: &Self::Claim,
177		slot_info: SlotInfo<B>,
178		end_proposing_at: Instant,
179	) -> Option<Proposal<B>> {
180		let slot = slot_info.slot;
181		let telemetry = self.telemetry();
182		let log_target = self.logging_target();
183
184		let inherent_data =
185			Self::create_inherent_data(&slot_info, &log_target, end_proposing_at).await?;
186
187		let proposing_remaining_duration =
188			end_proposing_at.saturating_duration_since(Instant::now());
189		let logs = self.pre_digest_data(slot, claim);
190
191		// deadline our production to 98% of the total time left for proposing. As we deadline
192		// the proposing below to the same total time left, the 2% margin should be enough for
193		// the result to be returned.
194		let propose_args = ProposeArgs {
195			inherent_data,
196			inherent_digests: sp_runtime::generic::Digest { logs },
197			max_duration: proposing_remaining_duration.mul_f32(0.98),
198			block_size_limit: slot_info.block_size_limit,
199			storage_proof_recorder: slot_info.storage_proof_recorder,
200			..Default::default()
201		};
202
203		let proposing = proposer
204			.propose(propose_args)
205			.map_err(|e| sp_consensus::Error::ClientImport(e.to_string()));
206
207		let proposal = match futures::future::select(
208			proposing,
209			Delay::new(proposing_remaining_duration),
210		)
211		.await
212		{
213			Either::Left((Ok(p), _)) => p,
214			Either::Left((Err(err), _)) => {
215				warn!(target: log_target, "Proposing failed: {}", err);
216
217				return None
218			},
219			Either::Right(_) => {
220				info!(
221					target: log_target,
222					"โŒ›๏ธ Discarding proposal for slot {}; block production took too long", slot,
223				);
224				// If the node was compiled with debug, tell the user to use release optimizations.
225				#[cfg(build_profile = "debug")]
226				info!(
227					target: log_target,
228					"๐Ÿ‘‰ Recompile your node in `--release` mode to mitigate this problem.",
229				);
230				telemetry!(
231					telemetry;
232					CONSENSUS_INFO;
233					"slots.discarding_proposal_took_too_long";
234					"slot" => *slot,
235				);
236
237				return None
238			},
239		};
240
241		Some(proposal)
242	}
243
244	/// Calls `create_inherent_data` and handles errors.
245	async fn create_inherent_data(
246		slot_info: &SlotInfo<B>,
247		logging_target: &str,
248		end_proposing_at: Instant,
249	) -> Option<sp_inherents::InherentData> {
250		let remaining_duration = end_proposing_at.saturating_duration_since(Instant::now());
251		let delay = Delay::new(remaining_duration);
252		let cid = slot_info.create_inherent_data.create_inherent_data();
253		let inherent_data = match futures::future::select(delay, cid).await {
254			Either::Right((Ok(data), _)) => data,
255			Either::Right((Err(err), _)) => {
256				warn!(
257					target: logging_target,
258					"Unable to create inherent data for block {:?}: {}",
259					slot_info.chain_head.hash(),
260					err,
261				);
262
263				return None
264			},
265			Either::Left(_) => {
266				warn!(
267					target: logging_target,
268					"Creating inherent data took more time than we had left for slot {} for block {:?}.",
269					slot_info.slot,
270					slot_info.chain_head.hash(),
271				);
272
273				return None
274			},
275		};
276
277		Some(inherent_data)
278	}
279
280	/// Implements [`SlotWorker::on_slot`].
281	async fn on_slot(&mut self, slot_info: SlotInfo<B>) -> Option<B>
282	where
283		Self: Sync,
284	{
285		let slot = slot_info.slot;
286		let telemetry = self.telemetry();
287		let logging_target = self.logging_target();
288
289		let proposing_remaining_duration = self.proposing_remaining_duration(&slot_info);
290
291		let end_proposing_at = if proposing_remaining_duration == Duration::default() {
292			debug!(
293				target: logging_target,
294				"Skipping proposal slot {} since there's no time left to propose", slot,
295			);
296
297			return None
298		} else {
299			Instant::now() + proposing_remaining_duration
300		};
301
302		let aux_data = match self.aux_data(&slot_info.chain_head, slot) {
303			Ok(aux_data) => aux_data,
304			Err(err) => {
305				warn!(
306					target: logging_target,
307					"Unable to fetch auxiliary data for block {:?}: {}",
308					slot_info.chain_head.hash(),
309					err,
310				);
311
312				telemetry!(
313					telemetry;
314					CONSENSUS_WARN;
315					"slots.unable_fetching_authorities";
316					"slot" => ?slot_info.chain_head.hash(),
317					"err" => ?err,
318				);
319
320				return None
321			},
322		};
323
324		self.notify_slot(&slot_info.chain_head, slot, &aux_data);
325
326		let authorities_len = self.authorities_len(&aux_data);
327
328		if !self.force_authoring() &&
329			self.sync_oracle().is_offline() &&
330			authorities_len.map(|a| a > 1).unwrap_or(false)
331		{
332			debug!(target: logging_target, "Skipping proposal slot. Waiting for the network.");
333			telemetry!(
334				telemetry;
335				CONSENSUS_DEBUG;
336				"slots.skipping_proposal_slot";
337				"authorities_len" => authorities_len,
338			);
339
340			return None
341		}
342
343		let claim = self.claim_slot(&slot_info.chain_head, slot, &aux_data).await?;
344
345		if self.should_backoff(slot, &slot_info.chain_head) {
346			return None
347		}
348
349		debug!(target: logging_target, "Starting authorship at slot: {slot}");
350
351		telemetry!(telemetry; CONSENSUS_DEBUG; "slots.starting_authorship"; "slot_num" => slot);
352
353		let proposer = match self.proposer(&slot_info.chain_head).await {
354			Ok(p) => p,
355			Err(err) => {
356				warn!(target: logging_target, "Unable to author block in slot {slot:?}: {err}");
357
358				telemetry!(
359					telemetry;
360					CONSENSUS_WARN;
361					"slots.unable_authoring_block";
362					"slot" => *slot,
363					"err" => ?err
364				);
365
366				return None
367			},
368		};
369
370		let proposal = self.propose(proposer, &claim, slot_info, end_proposing_at).await?;
371
372		let block = proposal.block;
373		let (header, body) = block.deconstruct();
374		let header_num = *header.number();
375		let header_hash = header.hash();
376		let parent_hash = *header.parent_hash();
377
378		let block_import_params = match self
379			.block_import_params(
380				header,
381				&header_hash,
382				body.clone(),
383				proposal.storage_changes,
384				claim,
385				aux_data,
386			)
387			.await
388		{
389			Ok(bi) => bi,
390			Err(err) => {
391				warn!(target: logging_target, "Failed to create block import params: {}", err);
392
393				return None
394			},
395		};
396
397		info!(
398			target: logging_target,
399			"๐Ÿ”– Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.",
400			header_num,
401			block_import_params.post_hash(),
402			header_hash,
403		);
404
405		telemetry!(
406			telemetry;
407			CONSENSUS_INFO;
408			"slots.pre_sealed_block";
409			"header_num" => ?header_num,
410			"hash_now" => ?block_import_params.post_hash(),
411			"hash_previously" => ?header_hash,
412		);
413
414		let header = block_import_params.post_header();
415		match self.block_import().import_block(block_import_params).await {
416			Ok(res) => {
417				res.handle_justification(
418					&header.hash(),
419					*header.number(),
420					self.justification_sync_link(),
421				);
422			},
423			Err(err) => {
424				warn!(
425					target: logging_target,
426					"Error with block built on {:?}: {}", parent_hash, err,
427				);
428
429				telemetry!(
430					telemetry;
431					CONSENSUS_WARN;
432					"slots.err_with_block_built_on";
433					"hash" => ?parent_hash,
434					"err" => ?err,
435				);
436			},
437		}
438
439		Some(B::new(header, body))
440	}
441}
442
443/// A type that implements [`SlotWorker`] for a type that implements [`SimpleSlotWorker`].
444///
445/// This is basically a workaround for Rust not supporting specialization. Otherwise we could
446/// implement [`SlotWorker`] for any `T` that implements [`SimpleSlotWorker`], but currently
447/// that would prevent downstream users to implement [`SlotWorker`] for their own types.
448pub struct SimpleSlotWorkerToSlotWorker<T>(pub T);
449
450#[async_trait::async_trait]
451impl<T: SimpleSlotWorker<B> + Send + Sync, B: BlockT> SlotWorker<B>
452	for SimpleSlotWorkerToSlotWorker<T>
453{
454	async fn on_slot(&mut self, slot_info: SlotInfo<B>) -> Option<B> {
455		self.0.on_slot(slot_info).await
456	}
457}
458
459/// Slot specific extension that the inherent data provider needs to implement.
460pub trait InherentDataProviderExt {
461	/// The current slot that will be found in the [`InherentData`](`sp_inherents::InherentData`).
462	fn slot(&self) -> Slot;
463}
464
465/// Small macro for implementing `InherentDataProviderExt` for inherent data provider tuple.
466macro_rules! impl_inherent_data_provider_ext_tuple {
467	( S $(, $TN:ident)* $( , )?) => {
468		impl<S, $( $TN ),*>  InherentDataProviderExt for (S, $($TN),*)
469		where
470			S: Deref<Target = Slot>,
471		{
472			fn slot(&self) -> Slot {
473				*self.0.deref()
474			}
475		}
476	}
477}
478
479impl_inherent_data_provider_ext_tuple!(S);
480impl_inherent_data_provider_ext_tuple!(S, A);
481impl_inherent_data_provider_ext_tuple!(S, A, B);
482impl_inherent_data_provider_ext_tuple!(S, A, B, C);
483impl_inherent_data_provider_ext_tuple!(S, A, B, C, D);
484impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E);
485impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F);
486impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G);
487impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G, H);
488impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G, H, I);
489impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G, H, I, J);
490
491/// Start a new slot worker.
492///
493/// Every time a new slot is triggered, `worker.on_slot` is called and the future it returns is
494/// polled until completion, unless we are major syncing.
495pub async fn start_slot_worker<B, C, W, SO, CIDP>(
496	slot_duration: SlotDuration,
497	client: C,
498	mut worker: W,
499	sync_oracle: SO,
500	create_inherent_data_providers: CIDP,
501) where
502	B: BlockT,
503	C: SelectChain<B>,
504	W: SlotWorker<B>,
505	SO: SyncOracle + Send,
506	CIDP: CreateInherentDataProviders<B, ()> + Send + 'static,
507	CIDP::InherentDataProviders: InherentDataProviderExt + Send,
508{
509	let mut slots = Slots::new(
510		slot_duration.as_duration(),
511		create_inherent_data_providers,
512		client,
513		sync_oracle,
514	);
515
516	loop {
517		let slot_info = slots.next_slot().await;
518		let _ = worker.on_slot(slot_info).await;
519	}
520}
521
522/// A header which has been checked
523pub enum CheckedHeader<H, S> {
524	/// A header which has slot in the future. this is the full header (not stripped)
525	/// and the slot in which it should be processed.
526	Deferred(H, Slot),
527	/// A header which is fully checked, including signature. This is the pre-header
528	/// accompanied by the seal components.
529	///
530	/// Includes the digest item that encoded the seal.
531	Checked(H, S),
532}
533
534/// A unit type wrapper to express the proportion of a slot.
535pub struct SlotProportion(f32);
536
537impl SlotProportion {
538	/// Create a new proportion.
539	///
540	/// The given value `inner` should be in the range `[0,1]`. If the value is not in the required
541	/// range, it is clamped into the range.
542	pub fn new(inner: f32) -> Self {
543		Self(inner.clamp(0.0, 1.0))
544	}
545
546	/// Returns the inner that is guaranteed to be in the range `[0,1]`.
547	pub fn get(&self) -> f32 {
548		self.0
549	}
550}
551
552/// The strategy used to calculate the slot lenience used to increase the block proposal time when
553/// slots have been skipped with no blocks authored.
554pub enum SlotLenienceType {
555	/// Increase the lenience linearly with the number of skipped slots.
556	Linear,
557	/// Increase the lenience exponentially with the number of skipped slots.
558	Exponential,
559}
560
561impl SlotLenienceType {
562	fn as_str(&self) -> &'static str {
563		match self {
564			SlotLenienceType::Linear => "linear",
565			SlotLenienceType::Exponential => "exponential",
566		}
567	}
568}
569
570/// Calculate the remaining duration for block proposal taking into account whether any slots have
571/// been skipped and applying the given lenience strategy. If `max_block_proposal_slot_portion` is
572/// not none this method guarantees that the returned duration must be lower or equal to
573/// `slot_info.duration * max_block_proposal_slot_portion`.
574pub fn proposing_remaining_duration<Block: BlockT>(
575	parent_slot: Option<Slot>,
576	slot_info: &SlotInfo<Block>,
577	block_proposal_slot_portion: &SlotProportion,
578	max_block_proposal_slot_portion: Option<&SlotProportion>,
579	slot_lenience_type: SlotLenienceType,
580	log_target: &str,
581) -> Duration {
582	use sp_runtime::traits::Zero;
583
584	let proposing_duration = slot_info.duration.mul_f32(block_proposal_slot_portion.get());
585
586	let slot_remaining = slot_info
587		.ends_at
588		.checked_duration_since(std::time::Instant::now())
589		.unwrap_or_default();
590
591	let proposing_duration = std::cmp::min(slot_remaining, proposing_duration);
592
593	// If parent is genesis block, we don't require any lenience factor.
594	if slot_info.chain_head.number().is_zero() {
595		return proposing_duration
596	}
597
598	let parent_slot = match parent_slot {
599		Some(parent_slot) => parent_slot,
600		None => return proposing_duration,
601	};
602
603	let slot_lenience = match slot_lenience_type {
604		SlotLenienceType::Exponential => slot_lenience_exponential(parent_slot, slot_info),
605		SlotLenienceType::Linear => slot_lenience_linear(parent_slot, slot_info),
606	};
607
608	if let Some(slot_lenience) = slot_lenience {
609		let lenient_proposing_duration =
610			proposing_duration + slot_lenience.mul_f32(block_proposal_slot_portion.get());
611
612		// if we defined a maximum portion of the slot for proposal then we must make sure the
613		// lenience doesn't go over it
614		let lenient_proposing_duration =
615			if let Some(max_block_proposal_slot_portion) = max_block_proposal_slot_portion {
616				std::cmp::min(
617					lenient_proposing_duration,
618					slot_info.duration.mul_f32(max_block_proposal_slot_portion.get()),
619				)
620			} else {
621				lenient_proposing_duration
622			};
623
624		debug!(
625			target: log_target,
626			"No block for {} slots. Applying {} lenience, total proposing duration: {}ms",
627			slot_info.slot.saturating_sub(parent_slot + 1),
628			slot_lenience_type.as_str(),
629			lenient_proposing_duration.as_millis(),
630		);
631
632		lenient_proposing_duration
633	} else {
634		proposing_duration
635	}
636}
637
638/// Calculate a slot duration lenience based on the number of missed slots from current
639/// to parent. If the number of skipped slots is greater than 0 this method will apply
640/// an exponential backoff of at most `2^7 * slot_duration`, if no slots were skipped
641/// this method will return `None.`
642pub fn slot_lenience_exponential<Block: BlockT>(
643	parent_slot: Slot,
644	slot_info: &SlotInfo<Block>,
645) -> Option<Duration> {
646	// never give more than 2^this times the lenience.
647	const BACKOFF_CAP: u64 = 7;
648
649	// how many slots it takes before we double the lenience.
650	const BACKOFF_STEP: u64 = 2;
651
652	// we allow a lenience of the number of slots since the head of the
653	// chain was produced, minus 1 (since there is always a difference of at least 1)
654	//
655	// exponential back-off.
656	// in normal cases we only attempt to issue blocks up to the end of the slot.
657	// when the chain has been stalled for a few slots, we give more lenience.
658	let skipped_slots = *slot_info.slot.saturating_sub(parent_slot + 1);
659
660	if skipped_slots == 0 {
661		None
662	} else {
663		let slot_lenience = skipped_slots / BACKOFF_STEP;
664		let slot_lenience = std::cmp::min(slot_lenience, BACKOFF_CAP);
665		let slot_lenience = 1 << slot_lenience;
666		Some(slot_lenience * slot_info.duration)
667	}
668}
669
670/// Calculate a slot duration lenience based on the number of missed slots from current
671/// to parent. If the number of skipped slots is greater than 0 this method will apply
672/// a linear backoff of at most `20 * slot_duration`, if no slots were skipped
673/// this method will return `None.`
674pub fn slot_lenience_linear<Block: BlockT>(
675	parent_slot: Slot,
676	slot_info: &SlotInfo<Block>,
677) -> Option<Duration> {
678	// never give more than 20 times more lenience.
679	const BACKOFF_CAP: u64 = 20;
680
681	// we allow a lenience of the number of slots since the head of the
682	// chain was produced, minus 1 (since there is always a difference of at least 1)
683	//
684	// linear back-off.
685	// in normal cases we only attempt to issue blocks up to the end of the slot.
686	// when the chain has been stalled for a few slots, we give more lenience.
687	let skipped_slots = *slot_info.slot.saturating_sub(parent_slot + 1);
688
689	if skipped_slots == 0 {
690		None
691	} else {
692		let slot_lenience = std::cmp::min(skipped_slots, BACKOFF_CAP);
693		// We cap `slot_lenience` to `20`, so it should always fit into an `u32`.
694		Some(slot_info.duration * (slot_lenience as u32))
695	}
696}
697
698/// Trait for providing the strategy for when to backoff block authoring.
699pub trait BackoffAuthoringBlocksStrategy<N> {
700	/// Returns true if we should backoff authoring new blocks.
701	fn should_backoff(
702		&self,
703		chain_head_number: N,
704		chain_head_slot: Slot,
705		finalized_number: N,
706		slow_now: Slot,
707		logging_target: &str,
708	) -> bool;
709}
710
711/// A simple default strategy for how to decide backing off authoring blocks if the number of
712/// unfinalized blocks grows too large.
713#[derive(Clone)]
714pub struct BackoffAuthoringOnFinalizedHeadLagging<N> {
715	/// The max interval to backoff when authoring blocks, regardless of delay in finality.
716	pub max_interval: N,
717	/// The number of unfinalized blocks allowed before starting to consider to backoff authoring
718	/// blocks. Note that depending on the value for `authoring_bias`, there might still be an
719	/// additional wait until block authorship starts getting declined.
720	pub unfinalized_slack: N,
721	/// Scales the backoff rate. A higher value effectively means we backoff slower, taking longer
722	/// time to reach the maximum backoff as the unfinalized head of chain grows.
723	pub authoring_bias: N,
724}
725
726/// These parameters is supposed to be some form of sensible defaults.
727impl<N: BaseArithmetic> Default for BackoffAuthoringOnFinalizedHeadLagging<N> {
728	fn default() -> Self {
729		Self {
730			// Never wait more than 100 slots before authoring blocks, regardless of delay in
731			// finality.
732			max_interval: 100.into(),
733			// Start to consider backing off block authorship once we have 50 or more unfinalized
734			// blocks at the head of the chain.
735			unfinalized_slack: 50.into(),
736			// A reasonable default for the authoring bias, or reciprocal interval scaling, is 2.
737			// Effectively meaning that consider the unfinalized head suffix length to grow half as
738			// fast as in actuality.
739			authoring_bias: 2.into(),
740		}
741	}
742}
743
744impl<N> BackoffAuthoringBlocksStrategy<N> for BackoffAuthoringOnFinalizedHeadLagging<N>
745where
746	N: BaseArithmetic + Copy,
747{
748	fn should_backoff(
749		&self,
750		chain_head_number: N,
751		chain_head_slot: Slot,
752		finalized_number: N,
753		slot_now: Slot,
754		logging_target: &str,
755	) -> bool {
756		// This should not happen, but we want to keep the previous behaviour if it does.
757		if slot_now <= chain_head_slot {
758			return false
759		}
760
761		// There can be race between getting the finalized number and getting the best number.
762		// So, better be safe than sorry.
763		let unfinalized_block_length = chain_head_number.saturating_sub(finalized_number);
764		let interval =
765			unfinalized_block_length.saturating_sub(self.unfinalized_slack) / self.authoring_bias;
766		let interval = interval.min(self.max_interval);
767
768		// We're doing arithmetic between block and slot numbers.
769		let interval: u64 = interval.unique_saturated_into();
770
771		// If interval is nonzero we backoff if the current slot isn't far enough ahead of the chain
772		// head.
773		if *slot_now <= *chain_head_slot + interval {
774			info!(
775				target: logging_target,
776				"Backing off claiming new slot for block authorship: finality is lagging.",
777			);
778			true
779		} else {
780			false
781		}
782	}
783}
784
785impl<N> BackoffAuthoringBlocksStrategy<N> for () {
786	fn should_backoff(
787		&self,
788		_chain_head_number: N,
789		_chain_head_slot: Slot,
790		_finalized_number: N,
791		_slot_now: Slot,
792		_logging_target: &str,
793	) -> bool {
794		false
795	}
796}
797
798#[cfg(test)]
799mod test {
800	use super::*;
801	use sp_runtime::traits::NumberFor;
802	use std::time::{Duration, Instant};
803	use substrate_test_runtime_client::runtime::{Block, Header};
804
805	const SLOT_DURATION: Duration = Duration::from_millis(6000);
806
807	fn slot(slot: u64) -> super::slots::SlotInfo<Block> {
808		super::slots::SlotInfo {
809			slot: slot.into(),
810			duration: SLOT_DURATION,
811			create_inherent_data: Box::new(()),
812			ends_at: Instant::now() + SLOT_DURATION,
813			chain_head: Header::new(
814				1,
815				Default::default(),
816				Default::default(),
817				Default::default(),
818				Default::default(),
819			),
820			block_size_limit: None,
821			storage_proof_recorder: None,
822		}
823	}
824
825	#[test]
826	fn linear_slot_lenience() {
827		// if no slots are skipped there should be no lenience
828		assert_eq!(super::slot_lenience_linear(1u64.into(), &slot(2)), None);
829
830		// otherwise the lenience is incremented linearly with
831		// the number of skipped slots.
832		for n in 3..=22 {
833			assert_eq!(
834				super::slot_lenience_linear(1u64.into(), &slot(n)),
835				Some(SLOT_DURATION * (n - 2) as u32),
836			);
837		}
838
839		// but we cap it to a maximum of 20 slots
840		assert_eq!(super::slot_lenience_linear(1u64.into(), &slot(23)), Some(SLOT_DURATION * 20));
841	}
842
843	#[test]
844	fn exponential_slot_lenience() {
845		// if no slots are skipped there should be no lenience
846		assert_eq!(super::slot_lenience_exponential(1u64.into(), &slot(2)), None);
847
848		// otherwise the lenience is incremented exponentially every two slots
849		for n in 3..=17 {
850			assert_eq!(
851				super::slot_lenience_exponential(1u64.into(), &slot(n)),
852				Some(SLOT_DURATION * 2u32.pow((n / 2 - 1) as u32)),
853			);
854		}
855
856		// but we cap it to a maximum of 14 slots
857		assert_eq!(
858			super::slot_lenience_exponential(1u64.into(), &slot(18)),
859			Some(SLOT_DURATION * 2u32.pow(7)),
860		);
861
862		assert_eq!(
863			super::slot_lenience_exponential(1u64.into(), &slot(19)),
864			Some(SLOT_DURATION * 2u32.pow(7)),
865		);
866	}
867
868	#[test]
869	fn proposing_remaining_duration_should_apply_lenience_based_on_proposal_slot_proportion() {
870		assert_eq!(
871			proposing_remaining_duration(
872				Some(0.into()),
873				&slot(2),
874				&SlotProportion(0.25),
875				None,
876				SlotLenienceType::Linear,
877				"test",
878			),
879			SLOT_DURATION.mul_f32(0.25 * 2.0),
880		);
881	}
882
883	#[test]
884	fn proposing_remaining_duration_should_never_exceed_max_proposal_slot_proportion() {
885		assert_eq!(
886			proposing_remaining_duration(
887				Some(0.into()),
888				&slot(100),
889				&SlotProportion(0.25),
890				Some(SlotProportion(0.9)).as_ref(),
891				SlotLenienceType::Exponential,
892				"test",
893			),
894			SLOT_DURATION.mul_f32(0.9),
895		);
896	}
897
898	#[derive(PartialEq, Debug)]
899	struct HeadState {
900		head_number: NumberFor<Block>,
901		head_slot: u64,
902		slot_now: NumberFor<Block>,
903	}
904
905	impl HeadState {
906		fn author_block(&mut self) {
907			// Add a block to the head, and set latest slot to the current
908			self.head_number += 1;
909			self.head_slot = self.slot_now;
910			// Advance slot to next
911			self.slot_now += 1;
912		}
913
914		fn dont_author_block(&mut self) {
915			self.slot_now += 1;
916		}
917	}
918
919	#[test]
920	fn should_never_backoff_when_head_not_advancing() {
921		let strategy = BackoffAuthoringOnFinalizedHeadLagging::<NumberFor<Block>> {
922			max_interval: 100,
923			unfinalized_slack: 5,
924			authoring_bias: 2,
925		};
926
927		let head_number = 1;
928		let head_slot = 1;
929		let finalized_number = 1;
930		let slot_now = 2;
931
932		let should_backoff: Vec<bool> = (slot_now..1000)
933			.map(|s| {
934				strategy.should_backoff(
935					head_number,
936					head_slot.into(),
937					finalized_number,
938					s.into(),
939					"slots",
940				)
941			})
942			.collect();
943
944		// Should always be false, since the head isn't advancing
945		let expected: Vec<bool> = (slot_now..1000).map(|_| false).collect();
946		assert_eq!(should_backoff, expected);
947	}
948
949	#[test]
950	fn should_stop_authoring_if_blocks_are_still_produced_when_finality_stalled() {
951		let strategy = BackoffAuthoringOnFinalizedHeadLagging::<NumberFor<Block>> {
952			max_interval: 100,
953			unfinalized_slack: 5,
954			authoring_bias: 2,
955		};
956
957		let mut head_number = 1;
958		let mut head_slot = 1;
959		let finalized_number = 1;
960		let slot_now = 2;
961
962		let should_backoff: Vec<bool> = (slot_now..300)
963			.map(move |s| {
964				let b = strategy.should_backoff(
965					head_number,
966					head_slot.into(),
967					finalized_number,
968					s.into(),
969					"slots",
970				);
971				// Chain is still advancing (by someone else)
972				head_number += 1;
973				head_slot = s;
974				b
975			})
976			.collect();
977
978		// Should always be true after a short while, since the chain is advancing but finality is
979		// stalled
980		let expected: Vec<bool> = (slot_now..300).map(|s| s > 8).collect();
981		assert_eq!(should_backoff, expected);
982	}
983
984	#[test]
985	fn should_never_backoff_if_max_interval_is_reached() {
986		let strategy = BackoffAuthoringOnFinalizedHeadLagging::<NumberFor<Block>> {
987			max_interval: 100,
988			unfinalized_slack: 5,
989			authoring_bias: 2,
990		};
991
992		// The limit `max_interval` is used when the unfinalized chain grows to
993		// 	`max_interval * authoring_bias + unfinalized_slack`,
994		// which for the above parameters becomes
995		// 	100 * 2 + 5 = 205.
996		// Hence we trigger this with head_number > finalized_number + 205.
997		let head_number = 207;
998		let finalized_number = 1;
999
1000		// The limit is then used once the current slot is `max_interval` ahead of slot of the head.
1001		let head_slot = 1;
1002		let slot_now = 2;
1003		let max_interval = strategy.max_interval;
1004
1005		let should_backoff: Vec<bool> = (slot_now..200)
1006			.map(|s| {
1007				strategy.should_backoff(
1008					head_number,
1009					head_slot.into(),
1010					finalized_number,
1011					s.into(),
1012					"slots",
1013				)
1014			})
1015			.collect();
1016
1017		// Should backoff (true) until we are `max_interval` number of slots ahead of the chain
1018		// head slot, then we never backoff (false).
1019		let expected: Vec<bool> = (slot_now..200).map(|s| s <= max_interval + head_slot).collect();
1020		assert_eq!(should_backoff, expected);
1021	}
1022
1023	#[test]
1024	fn should_backoff_authoring_when_finality_stalled() {
1025		let param = BackoffAuthoringOnFinalizedHeadLagging {
1026			max_interval: 100,
1027			unfinalized_slack: 5,
1028			authoring_bias: 2,
1029		};
1030
1031		let finalized_number = 2;
1032		let mut head_state = HeadState { head_number: 4, head_slot: 10, slot_now: 11 };
1033
1034		let should_backoff = |head_state: &HeadState| -> bool {
1035			<dyn BackoffAuthoringBlocksStrategy<NumberFor<Block>>>::should_backoff(
1036				&param,
1037				head_state.head_number,
1038				head_state.head_slot.into(),
1039				finalized_number,
1040				head_state.slot_now.into(),
1041				"slots",
1042			)
1043		};
1044
1045		let backoff: Vec<bool> = (head_state.slot_now..200)
1046			.map(|_| {
1047				if should_backoff(&head_state) {
1048					head_state.dont_author_block();
1049					true
1050				} else {
1051					head_state.author_block();
1052					false
1053				}
1054			})
1055			.collect();
1056
1057		// Gradually start to backoff more and more frequently
1058		let expected = [
1059			false, false, false, false, false, // no effect
1060			true, false, true, false, // 1:1
1061			true, true, false, true, true, false, // 2:1
1062			true, true, true, false, true, true, true, false, // 3:1
1063			true, true, true, true, false, true, true, true, true, false, // 4:1
1064			true, true, true, true, true, false, true, true, true, true, true, false, // 5:1
1065			true, true, true, true, true, true, false, true, true, true, true, true, true,
1066			false, // 6:1
1067			true, true, true, true, true, true, true, false, true, true, true, true, true, true,
1068			true, false, // 7:1
1069			true, true, true, true, true, true, true, true, false, true, true, true, true, true,
1070			true, true, true, false, // 8:1
1071			true, true, true, true, true, true, true, true, true, false, true, true, true, true,
1072			true, true, true, true, true, false, // 9:1
1073			true, true, true, true, true, true, true, true, true, true, false, true, true, true,
1074			true, true, true, true, true, true, true, false, // 10:1
1075			true, true, true, true, true, true, true, true, true, true, true, false, true, true,
1076			true, true, true, true, true, true, true, true, true, false, // 11:1
1077			true, true, true, true, true, true, true, true, true, true, true, true, false, true,
1078			true, true, true, true, true, true, true, true, true, true, true, false, // 12:1
1079			true, true, true, true,
1080		];
1081
1082		assert_eq!(backoff.as_slice(), &expected[..]);
1083	}
1084
1085	#[test]
1086	fn should_never_wait_more_than_max_interval() {
1087		let param = BackoffAuthoringOnFinalizedHeadLagging {
1088			max_interval: 100,
1089			unfinalized_slack: 5,
1090			authoring_bias: 2,
1091		};
1092
1093		let finalized_number = 2;
1094		let starting_slot = 11;
1095		let mut head_state = HeadState { head_number: 4, head_slot: 10, slot_now: starting_slot };
1096
1097		let should_backoff = |head_state: &HeadState| -> bool {
1098			<dyn BackoffAuthoringBlocksStrategy<NumberFor<Block>>>::should_backoff(
1099				&param,
1100				head_state.head_number,
1101				head_state.head_slot.into(),
1102				finalized_number,
1103				head_state.slot_now.into(),
1104				"slots",
1105			)
1106		};
1107
1108		let backoff: Vec<bool> = (head_state.slot_now..40000)
1109			.map(|_| {
1110				if should_backoff(&head_state) {
1111					head_state.dont_author_block();
1112					true
1113				} else {
1114					head_state.author_block();
1115					false
1116				}
1117			})
1118			.collect();
1119
1120		let slots_claimed: Vec<usize> = backoff
1121			.iter()
1122			.enumerate()
1123			.filter(|&(_i, x)| x == &false)
1124			.map(|(i, _x)| i + starting_slot as usize)
1125			.collect();
1126
1127		let last_slot = backoff.len() + starting_slot as usize;
1128		let mut last_two_claimed = slots_claimed.iter().rev().take(2);
1129
1130		// Check that we claimed all the way to the end. Check two slots for when we have an uneven
1131		// number of slots_claimed.
1132		let expected_distance = param.max_interval as usize + 1;
1133		assert_eq!(last_slot - last_two_claimed.next().unwrap(), 92);
1134		assert_eq!(last_slot - last_two_claimed.next().unwrap(), 92 + expected_distance);
1135
1136		let intervals: Vec<_> = slots_claimed.windows(2).map(|x| x[1] - x[0]).collect();
1137
1138		// The key thing is that the distance between claimed slots is capped to `max_interval + 1`
1139		// assert_eq!(max_observed_interval, Some(&expected_distance));
1140		assert_eq!(intervals.iter().max(), Some(&expected_distance));
1141
1142		// But lets assert all distances, which we expect to grow linearly until `max_interval + 1`
1143		let expected_intervals: Vec<_> =
1144			(0..497).map(|i| (i / 2).clamp(1, expected_distance)).collect();
1145
1146		assert_eq!(intervals, expected_intervals);
1147	}
1148
1149	fn run_until_max_interval(param: BackoffAuthoringOnFinalizedHeadLagging<u64>) -> (u64, u64) {
1150		let finalized_number = 0;
1151		let mut head_state = HeadState { head_number: 0, head_slot: 0, slot_now: 1 };
1152
1153		let should_backoff = |head_state: &HeadState| -> bool {
1154			<dyn BackoffAuthoringBlocksStrategy<NumberFor<Block>>>::should_backoff(
1155				&param,
1156				head_state.head_number,
1157				head_state.head_slot.into(),
1158				finalized_number,
1159				head_state.slot_now.into(),
1160				"slots",
1161			)
1162		};
1163
1164		// Number of blocks until we reach the max interval
1165		let block_for_max_interval =
1166			param.max_interval * param.authoring_bias + param.unfinalized_slack;
1167
1168		while head_state.head_number < block_for_max_interval {
1169			if should_backoff(&head_state) {
1170				head_state.dont_author_block();
1171			} else {
1172				head_state.author_block();
1173			}
1174		}
1175
1176		let slot_time = 6;
1177		let time_to_reach_limit = slot_time * head_state.slot_now;
1178		(block_for_max_interval, time_to_reach_limit)
1179	}
1180
1181	// Denoting
1182	// 	C: unfinalized_slack
1183	// 	M: authoring_bias
1184	// 	X: max_interval
1185	// then the number of slots to reach the max interval can be computed from
1186	// 	(start_slot + C) + M * sum(n, 1, X)
1187	// or
1188	// 	(start_slot + C) + M * X*(X+1)/2
1189	fn expected_time_to_reach_max_interval(
1190		param: &BackoffAuthoringOnFinalizedHeadLagging<u64>,
1191	) -> (u64, u64) {
1192		let c = param.unfinalized_slack;
1193		let m = param.authoring_bias;
1194		let x = param.max_interval;
1195		let slot_time = 6;
1196
1197		let block_for_max_interval = x * m + c;
1198
1199		// The 1 is because we start at slot_now = 1.
1200		let expected_number_of_slots = (1 + c) + m * x * (x + 1) / 2;
1201		let time_to_reach = expected_number_of_slots * slot_time;
1202
1203		(block_for_max_interval, time_to_reach)
1204	}
1205
1206	#[test]
1207	fn time_to_reach_upper_bound_for_smaller_slack() {
1208		let param = BackoffAuthoringOnFinalizedHeadLagging {
1209			max_interval: 100,
1210			unfinalized_slack: 5,
1211			authoring_bias: 2,
1212		};
1213		let expected = expected_time_to_reach_max_interval(&param);
1214		let (block_for_max_interval, time_to_reach_limit) = run_until_max_interval(param);
1215		assert_eq!((block_for_max_interval, time_to_reach_limit), expected);
1216		// Note: 16 hours is 57600 sec
1217		assert_eq!((block_for_max_interval, time_to_reach_limit), (205, 60636));
1218	}
1219
1220	#[test]
1221	fn time_to_reach_upper_bound_for_larger_slack() {
1222		let param = BackoffAuthoringOnFinalizedHeadLagging {
1223			max_interval: 100,
1224			unfinalized_slack: 50,
1225			authoring_bias: 2,
1226		};
1227		let expected = expected_time_to_reach_max_interval(&param);
1228		let (block_for_max_interval, time_to_reach_limit) = run_until_max_interval(param);
1229		assert_eq!((block_for_max_interval, time_to_reach_limit), expected);
1230		assert_eq!((block_for_max_interval, time_to_reach_limit), (250, 60906));
1231	}
1232}