referrerpolicy=no-referrer-when-downgrade

sc_consensus_slots/
lib.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19//! Slots functionality for Substrate.
20//!
21//! Some consensus algorithms have a concept of *slots*, which are intervals in
22//! time during which certain events can and/or must occur.  This crate
23//! provides generic functionality for slots.
24
25#![forbid(unsafe_code)]
26#![warn(missing_docs)]
27
28mod aux_schema;
29mod slots;
30
31pub use aux_schema::{check_equivocation, MAX_SLOT_CAPACITY, PRUNING_BOUND};
32use slots::Slots;
33pub use slots::{time_until_next_slot, SlotInfo};
34
35use futures::{future::Either, Future, TryFutureExt};
36use futures_timer::Delay;
37use log::{debug, info, warn};
38use sc_consensus::{BlockImport, JustificationSyncLink};
39use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO, CONSENSUS_WARN};
40use sp_arithmetic::traits::BaseArithmetic;
41use sp_consensus::{Proposal, Proposer, SelectChain, SyncOracle};
42use sp_consensus_slots::{Slot, SlotDuration};
43use sp_inherents::CreateInherentDataProviders;
44use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT};
45use std::{
46	fmt::Debug,
47	ops::Deref,
48	time::{Duration, Instant},
49};
50
51const LOG_TARGET: &str = "slots";
52
53/// The changes that need to applied to the storage to create the state for a block.
54///
55/// See [`sp_state_machine::StorageChanges`] for more information.
56pub type StorageChanges<Block> = sp_state_machine::StorageChanges<HashingFor<Block>>;
57
58/// The result of [`SlotWorker::on_slot`].
59#[derive(Debug, Clone)]
60pub struct SlotResult<Block: BlockT, Proof> {
61	/// The block that was built.
62	pub block: Block,
63	/// The storage proof that was recorded while building the block.
64	pub storage_proof: Proof,
65}
66
67/// A worker that should be invoked at every new slot.
68///
69/// The implementation should not make any assumptions of the slot being bound to the time or
70/// similar. The only valid assumption is that the slot number is always increasing.
71#[async_trait::async_trait]
72pub trait SlotWorker<B: BlockT, Proof> {
73	/// Called when a new slot is triggered.
74	///
75	/// Returns a future that resolves to a [`SlotResult`] iff a block was successfully built in
76	/// the slot. Otherwise `None` is returned.
77	async fn on_slot(&mut self, slot_info: SlotInfo<B>) -> Option<SlotResult<B, Proof>>;
78}
79
80/// A skeleton implementation for `SlotWorker` which tries to claim a slot at
81/// its beginning and tries to produce a block if successfully claimed, timing
82/// out if block production takes too long.
83#[async_trait::async_trait]
84pub trait SimpleSlotWorker<B: BlockT> {
85	/// A handle to a `BlockImport`.
86	type BlockImport: BlockImport<B> + Send + 'static;
87
88	/// A handle to a `SyncOracle`.
89	type SyncOracle: SyncOracle;
90
91	/// A handle to a `JustificationSyncLink`, allows hooking into the sync module to control the
92	/// justification sync process.
93	type JustificationSyncLink: JustificationSyncLink<B>;
94
95	/// The type of future resolving to the proposer.
96	type CreateProposer: Future<Output = Result<Self::Proposer, sp_consensus::Error>>
97		+ Send
98		+ Unpin
99		+ 'static;
100
101	/// The type of proposer to use to build blocks.
102	type Proposer: Proposer<B> + Send;
103
104	/// Data associated with a slot claim.
105	type Claim: Send + Sync + 'static;
106
107	/// Auxiliary data necessary for authoring.
108	type AuxData: Send + Sync + 'static;
109
110	/// The logging target to use when logging messages.
111	fn logging_target(&self) -> &'static str;
112
113	/// A handle to a `BlockImport`.
114	fn block_import(&mut self) -> &mut Self::BlockImport;
115
116	/// Returns the auxiliary data necessary for authoring.
117	fn aux_data(
118		&self,
119		header: &B::Header,
120		slot: Slot,
121	) -> Result<Self::AuxData, sp_consensus::Error>;
122
123	/// Returns the number of authorities.
124	/// None indicate that the authorities information is incomplete.
125	fn authorities_len(&self, aux_data: &Self::AuxData) -> Option<usize>;
126
127	/// Tries to claim the given slot, returning an object with claim data if successful.
128	async fn claim_slot(
129		&mut self,
130		header: &B::Header,
131		slot: Slot,
132		aux_data: &Self::AuxData,
133	) -> Option<Self::Claim>;
134
135	/// Notifies the given slot. Similar to `claim_slot`, but will be called no matter whether we
136	/// need to author blocks or not.
137	fn notify_slot(&self, _header: &B::Header, _slot: Slot, _aux_data: &Self::AuxData) {}
138
139	/// Return the pre digest data to include in a block authored with the given claim.
140	fn pre_digest_data(&self, slot: Slot, claim: &Self::Claim) -> Vec<sp_runtime::DigestItem>;
141
142	/// Returns a function which produces a `BlockImportParams`.
143	async fn block_import_params(
144		&self,
145		header: B::Header,
146		header_hash: &B::Hash,
147		body: Vec<B::Extrinsic>,
148		storage_changes: StorageChanges<B>,
149		public: Self::Claim,
150		aux_data: Self::AuxData,
151	) -> Result<sc_consensus::BlockImportParams<B>, sp_consensus::Error>;
152
153	/// Whether to force authoring if offline.
154	fn force_authoring(&self) -> bool;
155
156	/// Returns whether the block production should back off.
157	///
158	/// By default this function always returns `false`.
159	///
160	/// An example strategy that back offs if the finalized head is lagging too much behind the tip
161	/// is implemented by [`BackoffAuthoringOnFinalizedHeadLagging`].
162	fn should_backoff(&self, _slot: Slot, _chain_head: &B::Header) -> bool {
163		false
164	}
165
166	/// Returns a handle to a `SyncOracle`.
167	fn sync_oracle(&mut self) -> &mut Self::SyncOracle;
168
169	/// Returns a handle to a `JustificationSyncLink`.
170	fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink;
171
172	/// Returns a `Proposer` to author on top of the given block.
173	fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer;
174
175	/// Returns a [`TelemetryHandle`] if any.
176	fn telemetry(&self) -> Option<TelemetryHandle>;
177
178	/// Remaining duration for proposing.
179	fn proposing_remaining_duration(&self, slot_info: &SlotInfo<B>) -> Duration;
180
181	/// Propose a block by `Proposer`.
182	async fn propose(
183		&mut self,
184		proposer: Self::Proposer,
185		claim: &Self::Claim,
186		slot_info: SlotInfo<B>,
187		end_proposing_at: Instant,
188	) -> Option<Proposal<B, <Self::Proposer as Proposer<B>>::Proof>> {
189		let slot = slot_info.slot;
190		let telemetry = self.telemetry();
191		let log_target = self.logging_target();
192
193		let inherent_data =
194			Self::create_inherent_data(&slot_info, &log_target, end_proposing_at).await?;
195
196		let proposing_remaining_duration =
197			end_proposing_at.saturating_duration_since(Instant::now());
198		let logs = self.pre_digest_data(slot, claim);
199
200		// deadline our production to 98% of the total time left for proposing. As we deadline
201		// the proposing below to the same total time left, the 2% margin should be enough for
202		// the result to be returned.
203		let proposing = proposer
204			.propose(
205				inherent_data,
206				sp_runtime::generic::Digest { logs },
207				proposing_remaining_duration.mul_f32(0.98),
208				slot_info.block_size_limit,
209			)
210			.map_err(|e| sp_consensus::Error::ClientImport(e.to_string()));
211
212		let proposal = match futures::future::select(
213			proposing,
214			Delay::new(proposing_remaining_duration),
215		)
216		.await
217		{
218			Either::Left((Ok(p), _)) => p,
219			Either::Left((Err(err), _)) => {
220				warn!(target: log_target, "Proposing failed: {}", err);
221
222				return None
223			},
224			Either::Right(_) => {
225				info!(
226					target: log_target,
227					"โŒ›๏ธ Discarding proposal for slot {}; block production took too long", slot,
228				);
229				// If the node was compiled with debug, tell the user to use release optimizations.
230				#[cfg(build_profile = "debug")]
231				info!(
232					target: log_target,
233					"๐Ÿ‘‰ Recompile your node in `--release` mode to mitigate this problem.",
234				);
235				telemetry!(
236					telemetry;
237					CONSENSUS_INFO;
238					"slots.discarding_proposal_took_too_long";
239					"slot" => *slot,
240				);
241
242				return None
243			},
244		};
245
246		Some(proposal)
247	}
248
249	/// Calls `create_inherent_data` and handles errors.
250	async fn create_inherent_data(
251		slot_info: &SlotInfo<B>,
252		logging_target: &str,
253		end_proposing_at: Instant,
254	) -> Option<sp_inherents::InherentData> {
255		let remaining_duration = end_proposing_at.saturating_duration_since(Instant::now());
256		let delay = Delay::new(remaining_duration);
257		let cid = slot_info.create_inherent_data.create_inherent_data();
258		let inherent_data = match futures::future::select(delay, cid).await {
259			Either::Right((Ok(data), _)) => data,
260			Either::Right((Err(err), _)) => {
261				warn!(
262					target: logging_target,
263					"Unable to create inherent data for block {:?}: {}",
264					slot_info.chain_head.hash(),
265					err,
266				);
267
268				return None
269			},
270			Either::Left(_) => {
271				warn!(
272					target: logging_target,
273					"Creating inherent data took more time than we had left for slot {} for block {:?}.",
274					slot_info.slot,
275					slot_info.chain_head.hash(),
276				);
277
278				return None
279			},
280		};
281
282		Some(inherent_data)
283	}
284
285	/// Implements [`SlotWorker::on_slot`].
286	async fn on_slot(
287		&mut self,
288		slot_info: SlotInfo<B>,
289	) -> Option<SlotResult<B, <Self::Proposer as Proposer<B>>::Proof>>
290	where
291		Self: Sync,
292	{
293		let slot = slot_info.slot;
294		let telemetry = self.telemetry();
295		let logging_target = self.logging_target();
296
297		let proposing_remaining_duration = self.proposing_remaining_duration(&slot_info);
298
299		let end_proposing_at = if proposing_remaining_duration == Duration::default() {
300			debug!(
301				target: logging_target,
302				"Skipping proposal slot {} since there's no time left to propose", slot,
303			);
304
305			return None
306		} else {
307			Instant::now() + proposing_remaining_duration
308		};
309
310		let aux_data = match self.aux_data(&slot_info.chain_head, slot) {
311			Ok(aux_data) => aux_data,
312			Err(err) => {
313				warn!(
314					target: logging_target,
315					"Unable to fetch auxiliary data for block {:?}: {}",
316					slot_info.chain_head.hash(),
317					err,
318				);
319
320				telemetry!(
321					telemetry;
322					CONSENSUS_WARN;
323					"slots.unable_fetching_authorities";
324					"slot" => ?slot_info.chain_head.hash(),
325					"err" => ?err,
326				);
327
328				return None
329			},
330		};
331
332		self.notify_slot(&slot_info.chain_head, slot, &aux_data);
333
334		let authorities_len = self.authorities_len(&aux_data);
335
336		if !self.force_authoring() &&
337			self.sync_oracle().is_offline() &&
338			authorities_len.map(|a| a > 1).unwrap_or(false)
339		{
340			debug!(target: logging_target, "Skipping proposal slot. Waiting for the network.");
341			telemetry!(
342				telemetry;
343				CONSENSUS_DEBUG;
344				"slots.skipping_proposal_slot";
345				"authorities_len" => authorities_len,
346			);
347
348			return None
349		}
350
351		let claim = self.claim_slot(&slot_info.chain_head, slot, &aux_data).await?;
352
353		if self.should_backoff(slot, &slot_info.chain_head) {
354			return None
355		}
356
357		debug!(target: logging_target, "Starting authorship at slot: {slot}");
358
359		telemetry!(telemetry; CONSENSUS_DEBUG; "slots.starting_authorship"; "slot_num" => slot);
360
361		let proposer = match self.proposer(&slot_info.chain_head).await {
362			Ok(p) => p,
363			Err(err) => {
364				warn!(target: logging_target, "Unable to author block in slot {slot:?}: {err}");
365
366				telemetry!(
367					telemetry;
368					CONSENSUS_WARN;
369					"slots.unable_authoring_block";
370					"slot" => *slot,
371					"err" => ?err
372				);
373
374				return None
375			},
376		};
377
378		let proposal = self.propose(proposer, &claim, slot_info, end_proposing_at).await?;
379
380		let (block, storage_proof) = (proposal.block, proposal.proof);
381		let (header, body) = block.deconstruct();
382		let header_num = *header.number();
383		let header_hash = header.hash();
384		let parent_hash = *header.parent_hash();
385
386		let block_import_params = match self
387			.block_import_params(
388				header,
389				&header_hash,
390				body.clone(),
391				proposal.storage_changes,
392				claim,
393				aux_data,
394			)
395			.await
396		{
397			Ok(bi) => bi,
398			Err(err) => {
399				warn!(target: logging_target, "Failed to create block import params: {}", err);
400
401				return None
402			},
403		};
404
405		info!(
406			target: logging_target,
407			"๐Ÿ”– Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.",
408			header_num,
409			block_import_params.post_hash(),
410			header_hash,
411		);
412
413		telemetry!(
414			telemetry;
415			CONSENSUS_INFO;
416			"slots.pre_sealed_block";
417			"header_num" => ?header_num,
418			"hash_now" => ?block_import_params.post_hash(),
419			"hash_previously" => ?header_hash,
420		);
421
422		let header = block_import_params.post_header();
423		match self.block_import().import_block(block_import_params).await {
424			Ok(res) => {
425				res.handle_justification(
426					&header.hash(),
427					*header.number(),
428					self.justification_sync_link(),
429				);
430			},
431			Err(err) => {
432				warn!(
433					target: logging_target,
434					"Error with block built on {:?}: {}", parent_hash, err,
435				);
436
437				telemetry!(
438					telemetry;
439					CONSENSUS_WARN;
440					"slots.err_with_block_built_on";
441					"hash" => ?parent_hash,
442					"err" => ?err,
443				);
444			},
445		}
446
447		Some(SlotResult { block: B::new(header, body), storage_proof })
448	}
449}
450
451/// A type that implements [`SlotWorker`] for a type that implements [`SimpleSlotWorker`].
452///
453/// This is basically a workaround for Rust not supporting specialization. Otherwise we could
454/// implement [`SlotWorker`] for any `T` that implements [`SimpleSlotWorker`], but currently
455/// that would prevent downstream users to implement [`SlotWorker`] for their own types.
456pub struct SimpleSlotWorkerToSlotWorker<T>(pub T);
457
458#[async_trait::async_trait]
459impl<T: SimpleSlotWorker<B> + Send + Sync, B: BlockT>
460	SlotWorker<B, <T::Proposer as Proposer<B>>::Proof> for SimpleSlotWorkerToSlotWorker<T>
461{
462	async fn on_slot(
463		&mut self,
464		slot_info: SlotInfo<B>,
465	) -> Option<SlotResult<B, <T::Proposer as Proposer<B>>::Proof>> {
466		self.0.on_slot(slot_info).await
467	}
468}
469
470/// Slot specific extension that the inherent data provider needs to implement.
471pub trait InherentDataProviderExt {
472	/// The current slot that will be found in the [`InherentData`](`sp_inherents::InherentData`).
473	fn slot(&self) -> Slot;
474}
475
476/// Small macro for implementing `InherentDataProviderExt` for inherent data provider tuple.
477macro_rules! impl_inherent_data_provider_ext_tuple {
478	( S $(, $TN:ident)* $( , )?) => {
479		impl<S, $( $TN ),*>  InherentDataProviderExt for (S, $($TN),*)
480		where
481			S: Deref<Target = Slot>,
482		{
483			fn slot(&self) -> Slot {
484				*self.0.deref()
485			}
486		}
487	}
488}
489
490impl_inherent_data_provider_ext_tuple!(S);
491impl_inherent_data_provider_ext_tuple!(S, A);
492impl_inherent_data_provider_ext_tuple!(S, A, B);
493impl_inherent_data_provider_ext_tuple!(S, A, B, C);
494impl_inherent_data_provider_ext_tuple!(S, A, B, C, D);
495impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E);
496impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F);
497impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G);
498impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G, H);
499impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G, H, I);
500impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G, H, I, J);
501
502/// Start a new slot worker.
503///
504/// Every time a new slot is triggered, `worker.on_slot` is called and the future it returns is
505/// polled until completion, unless we are major syncing.
506pub async fn start_slot_worker<B, C, W, SO, CIDP, Proof>(
507	slot_duration: SlotDuration,
508	client: C,
509	mut worker: W,
510	sync_oracle: SO,
511	create_inherent_data_providers: CIDP,
512) where
513	B: BlockT,
514	C: SelectChain<B>,
515	W: SlotWorker<B, Proof>,
516	SO: SyncOracle + Send,
517	CIDP: CreateInherentDataProviders<B, ()> + Send + 'static,
518	CIDP::InherentDataProviders: InherentDataProviderExt + Send,
519{
520	let mut slots = Slots::new(
521		slot_duration.as_duration(),
522		create_inherent_data_providers,
523		client,
524		sync_oracle,
525	);
526
527	loop {
528		let slot_info = slots.next_slot().await;
529		let _ = worker.on_slot(slot_info).await;
530	}
531}
532
533/// A header which has been checked
534pub enum CheckedHeader<H, S> {
535	/// A header which has slot in the future. this is the full header (not stripped)
536	/// and the slot in which it should be processed.
537	Deferred(H, Slot),
538	/// A header which is fully checked, including signature. This is the pre-header
539	/// accompanied by the seal components.
540	///
541	/// Includes the digest item that encoded the seal.
542	Checked(H, S),
543}
544
545/// A unit type wrapper to express the proportion of a slot.
546pub struct SlotProportion(f32);
547
548impl SlotProportion {
549	/// Create a new proportion.
550	///
551	/// The given value `inner` should be in the range `[0,1]`. If the value is not in the required
552	/// range, it is clamped into the range.
553	pub fn new(inner: f32) -> Self {
554		Self(inner.clamp(0.0, 1.0))
555	}
556
557	/// Returns the inner that is guaranteed to be in the range `[0,1]`.
558	pub fn get(&self) -> f32 {
559		self.0
560	}
561}
562
563/// The strategy used to calculate the slot lenience used to increase the block proposal time when
564/// slots have been skipped with no blocks authored.
565pub enum SlotLenienceType {
566	/// Increase the lenience linearly with the number of skipped slots.
567	Linear,
568	/// Increase the lenience exponentially with the number of skipped slots.
569	Exponential,
570}
571
572impl SlotLenienceType {
573	fn as_str(&self) -> &'static str {
574		match self {
575			SlotLenienceType::Linear => "linear",
576			SlotLenienceType::Exponential => "exponential",
577		}
578	}
579}
580
581/// Calculate the remaining duration for block proposal taking into account whether any slots have
582/// been skipped and applying the given lenience strategy. If `max_block_proposal_slot_portion` is
583/// not none this method guarantees that the returned duration must be lower or equal to
584/// `slot_info.duration * max_block_proposal_slot_portion`.
585pub fn proposing_remaining_duration<Block: BlockT>(
586	parent_slot: Option<Slot>,
587	slot_info: &SlotInfo<Block>,
588	block_proposal_slot_portion: &SlotProportion,
589	max_block_proposal_slot_portion: Option<&SlotProportion>,
590	slot_lenience_type: SlotLenienceType,
591	log_target: &str,
592) -> Duration {
593	use sp_runtime::traits::Zero;
594
595	let proposing_duration = slot_info.duration.mul_f32(block_proposal_slot_portion.get());
596
597	let slot_remaining = slot_info
598		.ends_at
599		.checked_duration_since(std::time::Instant::now())
600		.unwrap_or_default();
601
602	let proposing_duration = std::cmp::min(slot_remaining, proposing_duration);
603
604	// If parent is genesis block, we don't require any lenience factor.
605	if slot_info.chain_head.number().is_zero() {
606		return proposing_duration
607	}
608
609	let parent_slot = match parent_slot {
610		Some(parent_slot) => parent_slot,
611		None => return proposing_duration,
612	};
613
614	let slot_lenience = match slot_lenience_type {
615		SlotLenienceType::Exponential => slot_lenience_exponential(parent_slot, slot_info),
616		SlotLenienceType::Linear => slot_lenience_linear(parent_slot, slot_info),
617	};
618
619	if let Some(slot_lenience) = slot_lenience {
620		let lenient_proposing_duration =
621			proposing_duration + slot_lenience.mul_f32(block_proposal_slot_portion.get());
622
623		// if we defined a maximum portion of the slot for proposal then we must make sure the
624		// lenience doesn't go over it
625		let lenient_proposing_duration =
626			if let Some(max_block_proposal_slot_portion) = max_block_proposal_slot_portion {
627				std::cmp::min(
628					lenient_proposing_duration,
629					slot_info.duration.mul_f32(max_block_proposal_slot_portion.get()),
630				)
631			} else {
632				lenient_proposing_duration
633			};
634
635		debug!(
636			target: log_target,
637			"No block for {} slots. Applying {} lenience, total proposing duration: {}ms",
638			slot_info.slot.saturating_sub(parent_slot + 1),
639			slot_lenience_type.as_str(),
640			lenient_proposing_duration.as_millis(),
641		);
642
643		lenient_proposing_duration
644	} else {
645		proposing_duration
646	}
647}
648
649/// Calculate a slot duration lenience based on the number of missed slots from current
650/// to parent. If the number of skipped slots is greater than 0 this method will apply
651/// an exponential backoff of at most `2^7 * slot_duration`, if no slots were skipped
652/// this method will return `None.`
653pub fn slot_lenience_exponential<Block: BlockT>(
654	parent_slot: Slot,
655	slot_info: &SlotInfo<Block>,
656) -> Option<Duration> {
657	// never give more than 2^this times the lenience.
658	const BACKOFF_CAP: u64 = 7;
659
660	// how many slots it takes before we double the lenience.
661	const BACKOFF_STEP: u64 = 2;
662
663	// we allow a lenience of the number of slots since the head of the
664	// chain was produced, minus 1 (since there is always a difference of at least 1)
665	//
666	// exponential back-off.
667	// in normal cases we only attempt to issue blocks up to the end of the slot.
668	// when the chain has been stalled for a few slots, we give more lenience.
669	let skipped_slots = *slot_info.slot.saturating_sub(parent_slot + 1);
670
671	if skipped_slots == 0 {
672		None
673	} else {
674		let slot_lenience = skipped_slots / BACKOFF_STEP;
675		let slot_lenience = std::cmp::min(slot_lenience, BACKOFF_CAP);
676		let slot_lenience = 1 << slot_lenience;
677		Some(slot_lenience * slot_info.duration)
678	}
679}
680
681/// Calculate a slot duration lenience based on the number of missed slots from current
682/// to parent. If the number of skipped slots is greater than 0 this method will apply
683/// a linear backoff of at most `20 * slot_duration`, if no slots were skipped
684/// this method will return `None.`
685pub fn slot_lenience_linear<Block: BlockT>(
686	parent_slot: Slot,
687	slot_info: &SlotInfo<Block>,
688) -> Option<Duration> {
689	// never give more than 20 times more lenience.
690	const BACKOFF_CAP: u64 = 20;
691
692	// we allow a lenience of the number of slots since the head of the
693	// chain was produced, minus 1 (since there is always a difference of at least 1)
694	//
695	// linear back-off.
696	// in normal cases we only attempt to issue blocks up to the end of the slot.
697	// when the chain has been stalled for a few slots, we give more lenience.
698	let skipped_slots = *slot_info.slot.saturating_sub(parent_slot + 1);
699
700	if skipped_slots == 0 {
701		None
702	} else {
703		let slot_lenience = std::cmp::min(skipped_slots, BACKOFF_CAP);
704		// We cap `slot_lenience` to `20`, so it should always fit into an `u32`.
705		Some(slot_info.duration * (slot_lenience as u32))
706	}
707}
708
709/// Trait for providing the strategy for when to backoff block authoring.
710pub trait BackoffAuthoringBlocksStrategy<N> {
711	/// Returns true if we should backoff authoring new blocks.
712	fn should_backoff(
713		&self,
714		chain_head_number: N,
715		chain_head_slot: Slot,
716		finalized_number: N,
717		slow_now: Slot,
718		logging_target: &str,
719	) -> bool;
720}
721
722/// A simple default strategy for how to decide backing off authoring blocks if the number of
723/// unfinalized blocks grows too large.
724#[derive(Clone)]
725pub struct BackoffAuthoringOnFinalizedHeadLagging<N> {
726	/// The max interval to backoff when authoring blocks, regardless of delay in finality.
727	pub max_interval: N,
728	/// The number of unfinalized blocks allowed before starting to consider to backoff authoring
729	/// blocks. Note that depending on the value for `authoring_bias`, there might still be an
730	/// additional wait until block authorship starts getting declined.
731	pub unfinalized_slack: N,
732	/// Scales the backoff rate. A higher value effectively means we backoff slower, taking longer
733	/// time to reach the maximum backoff as the unfinalized head of chain grows.
734	pub authoring_bias: N,
735}
736
737/// These parameters is supposed to be some form of sensible defaults.
738impl<N: BaseArithmetic> Default for BackoffAuthoringOnFinalizedHeadLagging<N> {
739	fn default() -> Self {
740		Self {
741			// Never wait more than 100 slots before authoring blocks, regardless of delay in
742			// finality.
743			max_interval: 100.into(),
744			// Start to consider backing off block authorship once we have 50 or more unfinalized
745			// blocks at the head of the chain.
746			unfinalized_slack: 50.into(),
747			// A reasonable default for the authoring bias, or reciprocal interval scaling, is 2.
748			// Effectively meaning that consider the unfinalized head suffix length to grow half as
749			// fast as in actuality.
750			authoring_bias: 2.into(),
751		}
752	}
753}
754
755impl<N> BackoffAuthoringBlocksStrategy<N> for BackoffAuthoringOnFinalizedHeadLagging<N>
756where
757	N: BaseArithmetic + Copy,
758{
759	fn should_backoff(
760		&self,
761		chain_head_number: N,
762		chain_head_slot: Slot,
763		finalized_number: N,
764		slot_now: Slot,
765		logging_target: &str,
766	) -> bool {
767		// This should not happen, but we want to keep the previous behaviour if it does.
768		if slot_now <= chain_head_slot {
769			return false
770		}
771
772		// There can be race between getting the finalized number and getting the best number.
773		// So, better be safe than sorry.
774		let unfinalized_block_length = chain_head_number.saturating_sub(finalized_number);
775		let interval =
776			unfinalized_block_length.saturating_sub(self.unfinalized_slack) / self.authoring_bias;
777		let interval = interval.min(self.max_interval);
778
779		// We're doing arithmetic between block and slot numbers.
780		let interval: u64 = interval.unique_saturated_into();
781
782		// If interval is nonzero we backoff if the current slot isn't far enough ahead of the chain
783		// head.
784		if *slot_now <= *chain_head_slot + interval {
785			info!(
786				target: logging_target,
787				"Backing off claiming new slot for block authorship: finality is lagging.",
788			);
789			true
790		} else {
791			false
792		}
793	}
794}
795
796impl<N> BackoffAuthoringBlocksStrategy<N> for () {
797	fn should_backoff(
798		&self,
799		_chain_head_number: N,
800		_chain_head_slot: Slot,
801		_finalized_number: N,
802		_slot_now: Slot,
803		_logging_target: &str,
804	) -> bool {
805		false
806	}
807}
808
809#[cfg(test)]
810mod test {
811	use super::*;
812	use sp_runtime::traits::NumberFor;
813	use std::time::{Duration, Instant};
814	use substrate_test_runtime_client::runtime::{Block, Header};
815
816	const SLOT_DURATION: Duration = Duration::from_millis(6000);
817
818	fn slot(slot: u64) -> super::slots::SlotInfo<Block> {
819		super::slots::SlotInfo {
820			slot: slot.into(),
821			duration: SLOT_DURATION,
822			create_inherent_data: Box::new(()),
823			ends_at: Instant::now() + SLOT_DURATION,
824			chain_head: Header::new(
825				1,
826				Default::default(),
827				Default::default(),
828				Default::default(),
829				Default::default(),
830			),
831			block_size_limit: None,
832		}
833	}
834
835	#[test]
836	fn linear_slot_lenience() {
837		// if no slots are skipped there should be no lenience
838		assert_eq!(super::slot_lenience_linear(1u64.into(), &slot(2)), None);
839
840		// otherwise the lenience is incremented linearly with
841		// the number of skipped slots.
842		for n in 3..=22 {
843			assert_eq!(
844				super::slot_lenience_linear(1u64.into(), &slot(n)),
845				Some(SLOT_DURATION * (n - 2) as u32),
846			);
847		}
848
849		// but we cap it to a maximum of 20 slots
850		assert_eq!(super::slot_lenience_linear(1u64.into(), &slot(23)), Some(SLOT_DURATION * 20));
851	}
852
853	#[test]
854	fn exponential_slot_lenience() {
855		// if no slots are skipped there should be no lenience
856		assert_eq!(super::slot_lenience_exponential(1u64.into(), &slot(2)), None);
857
858		// otherwise the lenience is incremented exponentially every two slots
859		for n in 3..=17 {
860			assert_eq!(
861				super::slot_lenience_exponential(1u64.into(), &slot(n)),
862				Some(SLOT_DURATION * 2u32.pow((n / 2 - 1) as u32)),
863			);
864		}
865
866		// but we cap it to a maximum of 14 slots
867		assert_eq!(
868			super::slot_lenience_exponential(1u64.into(), &slot(18)),
869			Some(SLOT_DURATION * 2u32.pow(7)),
870		);
871
872		assert_eq!(
873			super::slot_lenience_exponential(1u64.into(), &slot(19)),
874			Some(SLOT_DURATION * 2u32.pow(7)),
875		);
876	}
877
878	#[test]
879	fn proposing_remaining_duration_should_apply_lenience_based_on_proposal_slot_proportion() {
880		assert_eq!(
881			proposing_remaining_duration(
882				Some(0.into()),
883				&slot(2),
884				&SlotProportion(0.25),
885				None,
886				SlotLenienceType::Linear,
887				"test",
888			),
889			SLOT_DURATION.mul_f32(0.25 * 2.0),
890		);
891	}
892
893	#[test]
894	fn proposing_remaining_duration_should_never_exceed_max_proposal_slot_proportion() {
895		assert_eq!(
896			proposing_remaining_duration(
897				Some(0.into()),
898				&slot(100),
899				&SlotProportion(0.25),
900				Some(SlotProportion(0.9)).as_ref(),
901				SlotLenienceType::Exponential,
902				"test",
903			),
904			SLOT_DURATION.mul_f32(0.9),
905		);
906	}
907
908	#[derive(PartialEq, Debug)]
909	struct HeadState {
910		head_number: NumberFor<Block>,
911		head_slot: u64,
912		slot_now: NumberFor<Block>,
913	}
914
915	impl HeadState {
916		fn author_block(&mut self) {
917			// Add a block to the head, and set latest slot to the current
918			self.head_number += 1;
919			self.head_slot = self.slot_now;
920			// Advance slot to next
921			self.slot_now += 1;
922		}
923
924		fn dont_author_block(&mut self) {
925			self.slot_now += 1;
926		}
927	}
928
929	#[test]
930	fn should_never_backoff_when_head_not_advancing() {
931		let strategy = BackoffAuthoringOnFinalizedHeadLagging::<NumberFor<Block>> {
932			max_interval: 100,
933			unfinalized_slack: 5,
934			authoring_bias: 2,
935		};
936
937		let head_number = 1;
938		let head_slot = 1;
939		let finalized_number = 1;
940		let slot_now = 2;
941
942		let should_backoff: Vec<bool> = (slot_now..1000)
943			.map(|s| {
944				strategy.should_backoff(
945					head_number,
946					head_slot.into(),
947					finalized_number,
948					s.into(),
949					"slots",
950				)
951			})
952			.collect();
953
954		// Should always be false, since the head isn't advancing
955		let expected: Vec<bool> = (slot_now..1000).map(|_| false).collect();
956		assert_eq!(should_backoff, expected);
957	}
958
959	#[test]
960	fn should_stop_authoring_if_blocks_are_still_produced_when_finality_stalled() {
961		let strategy = BackoffAuthoringOnFinalizedHeadLagging::<NumberFor<Block>> {
962			max_interval: 100,
963			unfinalized_slack: 5,
964			authoring_bias: 2,
965		};
966
967		let mut head_number = 1;
968		let mut head_slot = 1;
969		let finalized_number = 1;
970		let slot_now = 2;
971
972		let should_backoff: Vec<bool> = (slot_now..300)
973			.map(move |s| {
974				let b = strategy.should_backoff(
975					head_number,
976					head_slot.into(),
977					finalized_number,
978					s.into(),
979					"slots",
980				);
981				// Chain is still advancing (by someone else)
982				head_number += 1;
983				head_slot = s;
984				b
985			})
986			.collect();
987
988		// Should always be true after a short while, since the chain is advancing but finality is
989		// stalled
990		let expected: Vec<bool> = (slot_now..300).map(|s| s > 8).collect();
991		assert_eq!(should_backoff, expected);
992	}
993
994	#[test]
995	fn should_never_backoff_if_max_interval_is_reached() {
996		let strategy = BackoffAuthoringOnFinalizedHeadLagging::<NumberFor<Block>> {
997			max_interval: 100,
998			unfinalized_slack: 5,
999			authoring_bias: 2,
1000		};
1001
1002		// The limit `max_interval` is used when the unfinalized chain grows to
1003		// 	`max_interval * authoring_bias + unfinalized_slack`,
1004		// which for the above parameters becomes
1005		// 	100 * 2 + 5 = 205.
1006		// Hence we trigger this with head_number > finalized_number + 205.
1007		let head_number = 207;
1008		let finalized_number = 1;
1009
1010		// The limit is then used once the current slot is `max_interval` ahead of slot of the head.
1011		let head_slot = 1;
1012		let slot_now = 2;
1013		let max_interval = strategy.max_interval;
1014
1015		let should_backoff: Vec<bool> = (slot_now..200)
1016			.map(|s| {
1017				strategy.should_backoff(
1018					head_number,
1019					head_slot.into(),
1020					finalized_number,
1021					s.into(),
1022					"slots",
1023				)
1024			})
1025			.collect();
1026
1027		// Should backoff (true) until we are `max_interval` number of slots ahead of the chain
1028		// head slot, then we never backoff (false).
1029		let expected: Vec<bool> = (slot_now..200).map(|s| s <= max_interval + head_slot).collect();
1030		assert_eq!(should_backoff, expected);
1031	}
1032
1033	#[test]
1034	fn should_backoff_authoring_when_finality_stalled() {
1035		let param = BackoffAuthoringOnFinalizedHeadLagging {
1036			max_interval: 100,
1037			unfinalized_slack: 5,
1038			authoring_bias: 2,
1039		};
1040
1041		let finalized_number = 2;
1042		let mut head_state = HeadState { head_number: 4, head_slot: 10, slot_now: 11 };
1043
1044		let should_backoff = |head_state: &HeadState| -> bool {
1045			<dyn BackoffAuthoringBlocksStrategy<NumberFor<Block>>>::should_backoff(
1046				&param,
1047				head_state.head_number,
1048				head_state.head_slot.into(),
1049				finalized_number,
1050				head_state.slot_now.into(),
1051				"slots",
1052			)
1053		};
1054
1055		let backoff: Vec<bool> = (head_state.slot_now..200)
1056			.map(|_| {
1057				if should_backoff(&head_state) {
1058					head_state.dont_author_block();
1059					true
1060				} else {
1061					head_state.author_block();
1062					false
1063				}
1064			})
1065			.collect();
1066
1067		// Gradually start to backoff more and more frequently
1068		let expected = [
1069			false, false, false, false, false, // no effect
1070			true, false, true, false, // 1:1
1071			true, true, false, true, true, false, // 2:1
1072			true, true, true, false, true, true, true, false, // 3:1
1073			true, true, true, true, false, true, true, true, true, false, // 4:1
1074			true, true, true, true, true, false, true, true, true, true, true, false, // 5:1
1075			true, true, true, true, true, true, false, true, true, true, true, true, true,
1076			false, // 6:1
1077			true, true, true, true, true, true, true, false, true, true, true, true, true, true,
1078			true, false, // 7:1
1079			true, true, true, true, true, true, true, true, false, true, true, true, true, true,
1080			true, true, true, false, // 8:1
1081			true, true, true, true, true, true, true, true, true, false, true, true, true, true,
1082			true, true, true, true, true, false, // 9:1
1083			true, true, true, true, true, true, true, true, true, true, false, true, true, true,
1084			true, true, true, true, true, true, true, false, // 10:1
1085			true, true, true, true, true, true, true, true, true, true, true, false, true, true,
1086			true, true, true, true, true, true, true, true, true, false, // 11:1
1087			true, true, true, true, true, true, true, true, true, true, true, true, false, true,
1088			true, true, true, true, true, true, true, true, true, true, true, false, // 12:1
1089			true, true, true, true,
1090		];
1091
1092		assert_eq!(backoff.as_slice(), &expected[..]);
1093	}
1094
1095	#[test]
1096	fn should_never_wait_more_than_max_interval() {
1097		let param = BackoffAuthoringOnFinalizedHeadLagging {
1098			max_interval: 100,
1099			unfinalized_slack: 5,
1100			authoring_bias: 2,
1101		};
1102
1103		let finalized_number = 2;
1104		let starting_slot = 11;
1105		let mut head_state = HeadState { head_number: 4, head_slot: 10, slot_now: starting_slot };
1106
1107		let should_backoff = |head_state: &HeadState| -> bool {
1108			<dyn BackoffAuthoringBlocksStrategy<NumberFor<Block>>>::should_backoff(
1109				&param,
1110				head_state.head_number,
1111				head_state.head_slot.into(),
1112				finalized_number,
1113				head_state.slot_now.into(),
1114				"slots",
1115			)
1116		};
1117
1118		let backoff: Vec<bool> = (head_state.slot_now..40000)
1119			.map(|_| {
1120				if should_backoff(&head_state) {
1121					head_state.dont_author_block();
1122					true
1123				} else {
1124					head_state.author_block();
1125					false
1126				}
1127			})
1128			.collect();
1129
1130		let slots_claimed: Vec<usize> = backoff
1131			.iter()
1132			.enumerate()
1133			.filter(|&(_i, x)| x == &false)
1134			.map(|(i, _x)| i + starting_slot as usize)
1135			.collect();
1136
1137		let last_slot = backoff.len() + starting_slot as usize;
1138		let mut last_two_claimed = slots_claimed.iter().rev().take(2);
1139
1140		// Check that we claimed all the way to the end. Check two slots for when we have an uneven
1141		// number of slots_claimed.
1142		let expected_distance = param.max_interval as usize + 1;
1143		assert_eq!(last_slot - last_two_claimed.next().unwrap(), 92);
1144		assert_eq!(last_slot - last_two_claimed.next().unwrap(), 92 + expected_distance);
1145
1146		let intervals: Vec<_> = slots_claimed.windows(2).map(|x| x[1] - x[0]).collect();
1147
1148		// The key thing is that the distance between claimed slots is capped to `max_interval + 1`
1149		// assert_eq!(max_observed_interval, Some(&expected_distance));
1150		assert_eq!(intervals.iter().max(), Some(&expected_distance));
1151
1152		// But lets assert all distances, which we expect to grow linearly until `max_interval + 1`
1153		let expected_intervals: Vec<_> =
1154			(0..497).map(|i| (i / 2).clamp(1, expected_distance)).collect();
1155
1156		assert_eq!(intervals, expected_intervals);
1157	}
1158
1159	fn run_until_max_interval(param: BackoffAuthoringOnFinalizedHeadLagging<u64>) -> (u64, u64) {
1160		let finalized_number = 0;
1161		let mut head_state = HeadState { head_number: 0, head_slot: 0, slot_now: 1 };
1162
1163		let should_backoff = |head_state: &HeadState| -> bool {
1164			<dyn BackoffAuthoringBlocksStrategy<NumberFor<Block>>>::should_backoff(
1165				&param,
1166				head_state.head_number,
1167				head_state.head_slot.into(),
1168				finalized_number,
1169				head_state.slot_now.into(),
1170				"slots",
1171			)
1172		};
1173
1174		// Number of blocks until we reach the max interval
1175		let block_for_max_interval =
1176			param.max_interval * param.authoring_bias + param.unfinalized_slack;
1177
1178		while head_state.head_number < block_for_max_interval {
1179			if should_backoff(&head_state) {
1180				head_state.dont_author_block();
1181			} else {
1182				head_state.author_block();
1183			}
1184		}
1185
1186		let slot_time = 6;
1187		let time_to_reach_limit = slot_time * head_state.slot_now;
1188		(block_for_max_interval, time_to_reach_limit)
1189	}
1190
1191	// Denoting
1192	// 	C: unfinalized_slack
1193	// 	M: authoring_bias
1194	// 	X: max_interval
1195	// then the number of slots to reach the max interval can be computed from
1196	// 	(start_slot + C) + M * sum(n, 1, X)
1197	// or
1198	// 	(start_slot + C) + M * X*(X+1)/2
1199	fn expected_time_to_reach_max_interval(
1200		param: &BackoffAuthoringOnFinalizedHeadLagging<u64>,
1201	) -> (u64, u64) {
1202		let c = param.unfinalized_slack;
1203		let m = param.authoring_bias;
1204		let x = param.max_interval;
1205		let slot_time = 6;
1206
1207		let block_for_max_interval = x * m + c;
1208
1209		// The 1 is because we start at slot_now = 1.
1210		let expected_number_of_slots = (1 + c) + m * x * (x + 1) / 2;
1211		let time_to_reach = expected_number_of_slots * slot_time;
1212
1213		(block_for_max_interval, time_to_reach)
1214	}
1215
1216	#[test]
1217	fn time_to_reach_upper_bound_for_smaller_slack() {
1218		let param = BackoffAuthoringOnFinalizedHeadLagging {
1219			max_interval: 100,
1220			unfinalized_slack: 5,
1221			authoring_bias: 2,
1222		};
1223		let expected = expected_time_to_reach_max_interval(&param);
1224		let (block_for_max_interval, time_to_reach_limit) = run_until_max_interval(param);
1225		assert_eq!((block_for_max_interval, time_to_reach_limit), expected);
1226		// Note: 16 hours is 57600 sec
1227		assert_eq!((block_for_max_interval, time_to_reach_limit), (205, 60636));
1228	}
1229
1230	#[test]
1231	fn time_to_reach_upper_bound_for_larger_slack() {
1232		let param = BackoffAuthoringOnFinalizedHeadLagging {
1233			max_interval: 100,
1234			unfinalized_slack: 50,
1235			authoring_bias: 2,
1236		};
1237		let expected = expected_time_to_reach_max_interval(&param);
1238		let (block_for_max_interval, time_to_reach_limit) = run_until_max_interval(param);
1239		assert_eq!((block_for_max_interval, time_to_reach_limit), expected);
1240		assert_eq!((block_for_max_interval, time_to_reach_limit), (250, 60906));
1241	}
1242}