referrerpolicy=no-referrer-when-downgrade

sc_basic_authorship/
basic_authorship.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19//! A consensus proposer for "basic" chains which use the primitive inherent-data.
20
21// FIXME #1021 move this into sp-consensus
22
23use codec::Encode;
24use futures::{
25	channel::oneshot,
26	future,
27	future::{Future, FutureExt},
28};
29use log::{debug, error, info, trace, warn};
30use prometheus_endpoint::Registry as PrometheusRegistry;
31use sc_block_builder::{BlockBuilderApi, BlockBuilderBuilder};
32use sc_proposer_metrics::{EndProposingReason, MetricsLink as PrometheusMetrics};
33use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO};
34use sc_transaction_pool_api::{InPoolTransaction, TransactionPool, TxInvalidityReportMap};
35use sp_api::{ApiExt, CallApiAt, ProofRecorder, ProvideRuntimeApi};
36use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed, HeaderBackend};
37use sp_consensus::{DisableProofRecording, EnableProofRecording, ProofRecording, Proposal};
38use sp_core::traits::SpawnNamed;
39use sp_inherents::InherentData;
40use sp_runtime::{
41	traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT},
42	Digest, ExtrinsicInclusionMode, Percent, SaturatedConversion,
43};
44use sp_trie::recorder::IgnoredNodes;
45use std::{marker::PhantomData, pin::Pin, sync::Arc, time};
46
47/// Default block size limit in bytes used by [`Proposer`].
48///
49/// Can be overwritten by [`ProposerFactory::set_default_block_size_limit`].
50///
51/// Be aware that there is also an upper packet size on what the networking code
52/// will accept. If the block doesn't fit in such a package, it can not be
53/// transferred to other nodes.
54pub const DEFAULT_BLOCK_SIZE_LIMIT: usize = 4 * 1024 * 1024 + 512;
55
56const DEFAULT_SOFT_DEADLINE_PERCENT: Percent = Percent::from_percent(50);
57
58const LOG_TARGET: &'static str = "basic-authorship";
59
60/// [`Proposer`] factory.
61pub struct ProposerFactory<A, C, PR> {
62	spawn_handle: Box<dyn SpawnNamed>,
63	/// The client instance.
64	client: Arc<C>,
65	/// The transaction pool.
66	transaction_pool: Arc<A>,
67	/// Prometheus Link,
68	metrics: PrometheusMetrics,
69	/// The default block size limit.
70	///
71	/// If no `block_size_limit` is passed to [`sp_consensus::Proposer::propose`], this block size
72	/// limit will be used.
73	default_block_size_limit: usize,
74	/// Soft deadline percentage of hard deadline.
75	///
76	/// The value is used to compute soft deadline during block production.
77	/// The soft deadline indicates where we should stop attempting to add transactions
78	/// to the block, which exhaust resources. After soft deadline is reached,
79	/// we switch to a fixed-amount mode, in which after we see `MAX_SKIPPED_TRANSACTIONS`
80	/// transactions which exhaust resources, we will conclude that the block is full.
81	soft_deadline_percent: Percent,
82	telemetry: Option<TelemetryHandle>,
83	/// When estimating the block size, should the proof be included?
84	include_proof_in_block_size_estimation: bool,
85	/// phantom member to pin the `ProofRecording` type.
86	_phantom: PhantomData<PR>,
87}
88
89impl<A, C, PR> Clone for ProposerFactory<A, C, PR> {
90	fn clone(&self) -> Self {
91		Self {
92			spawn_handle: self.spawn_handle.clone(),
93			client: self.client.clone(),
94			transaction_pool: self.transaction_pool.clone(),
95			metrics: self.metrics.clone(),
96			default_block_size_limit: self.default_block_size_limit,
97			soft_deadline_percent: self.soft_deadline_percent,
98			telemetry: self.telemetry.clone(),
99			include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation,
100			_phantom: self._phantom,
101		}
102	}
103}
104
105impl<A, C> ProposerFactory<A, C, DisableProofRecording> {
106	/// Create a new proposer factory.
107	///
108	/// Proof recording will be disabled when using proposers built by this instance to build
109	/// blocks.
110	pub fn new(
111		spawn_handle: impl SpawnNamed + 'static,
112		client: Arc<C>,
113		transaction_pool: Arc<A>,
114		prometheus: Option<&PrometheusRegistry>,
115		telemetry: Option<TelemetryHandle>,
116	) -> Self {
117		ProposerFactory {
118			spawn_handle: Box::new(spawn_handle),
119			transaction_pool,
120			metrics: PrometheusMetrics::new(prometheus),
121			default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT,
122			soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT,
123			telemetry,
124			client,
125			include_proof_in_block_size_estimation: false,
126			_phantom: PhantomData,
127		}
128	}
129}
130
131impl<A, C> ProposerFactory<A, C, EnableProofRecording> {
132	/// Create a new proposer factory with proof recording enabled.
133	///
134	/// Each proposer created by this instance will record a proof while building a block.
135	///
136	/// This will also include the proof into the estimation of the block size. This can be disabled
137	/// by calling [`ProposerFactory::disable_proof_in_block_size_estimation`].
138	pub fn with_proof_recording(
139		spawn_handle: impl SpawnNamed + 'static,
140		client: Arc<C>,
141		transaction_pool: Arc<A>,
142		prometheus: Option<&PrometheusRegistry>,
143		telemetry: Option<TelemetryHandle>,
144	) -> Self {
145		ProposerFactory {
146			client,
147			spawn_handle: Box::new(spawn_handle),
148			transaction_pool,
149			metrics: PrometheusMetrics::new(prometheus),
150			default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT,
151			soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT,
152			telemetry,
153			include_proof_in_block_size_estimation: true,
154			_phantom: PhantomData,
155		}
156	}
157
158	/// Disable the proof inclusion when estimating the block size.
159	pub fn disable_proof_in_block_size_estimation(&mut self) {
160		self.include_proof_in_block_size_estimation = false;
161	}
162}
163
164impl<A, C, PR> ProposerFactory<A, C, PR> {
165	/// Set the default block size limit in bytes.
166	///
167	/// The default value for the block size limit is:
168	/// [`DEFAULT_BLOCK_SIZE_LIMIT`].
169	///
170	/// If there is no block size limit passed to [`sp_consensus::Proposer::propose`], this value
171	/// will be used.
172	pub fn set_default_block_size_limit(&mut self, limit: usize) {
173		self.default_block_size_limit = limit;
174	}
175
176	/// Set soft deadline percentage.
177	///
178	/// The value is used to compute soft deadline during block production.
179	/// The soft deadline indicates where we should stop attempting to add transactions
180	/// to the block, which exhaust resources. After soft deadline is reached,
181	/// we switch to a fixed-amount mode, in which after we see `MAX_SKIPPED_TRANSACTIONS`
182	/// transactions which exhaust resources, we will conclude that the block is full.
183	///
184	/// Setting the value too low will significantly limit the amount of transactions
185	/// we try in case they exhaust resources. Setting the value too high can
186	/// potentially open a DoS vector, where many "exhaust resources" transactions
187	/// are being tried with no success, hence block producer ends up creating an empty block.
188	pub fn set_soft_deadline(&mut self, percent: Percent) {
189		self.soft_deadline_percent = percent;
190	}
191}
192
193impl<Block, C, A, PR> ProposerFactory<A, C, PR>
194where
195	A: TransactionPool<Block = Block> + 'static,
196	Block: BlockT,
197	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + Send + Sync + 'static,
198	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
199{
200	fn init_with_now(
201		&mut self,
202		parent_header: &<Block as BlockT>::Header,
203		now: Box<dyn Fn() -> time::Instant + Send + Sync>,
204	) -> Proposer<Block, C, A, PR> {
205		let parent_hash = parent_header.hash();
206
207		info!(
208			"๐Ÿ™Œ Starting consensus session on top of parent {:?} (#{})",
209			parent_hash,
210			parent_header.number()
211		);
212
213		let proposer = Proposer::<_, _, _, PR> {
214			spawn_handle: self.spawn_handle.clone(),
215			client: self.client.clone(),
216			parent_hash,
217			parent_number: *parent_header.number(),
218			transaction_pool: self.transaction_pool.clone(),
219			now,
220			metrics: self.metrics.clone(),
221			default_block_size_limit: self.default_block_size_limit,
222			soft_deadline_percent: self.soft_deadline_percent,
223			telemetry: self.telemetry.clone(),
224			_phantom: PhantomData,
225			include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation,
226		};
227
228		proposer
229	}
230}
231
232impl<A, Block, C, PR> sp_consensus::Environment<Block> for ProposerFactory<A, C, PR>
233where
234	A: TransactionPool<Block = Block> + 'static,
235	Block: BlockT,
236	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
237	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
238	PR: ProofRecording,
239{
240	type CreateProposer = future::Ready<Result<Self::Proposer, Self::Error>>;
241	type Proposer = Proposer<Block, C, A, PR>;
242	type Error = sp_blockchain::Error;
243
244	fn init(&mut self, parent_header: &<Block as BlockT>::Header) -> Self::CreateProposer {
245		future::ready(Ok(self.init_with_now(parent_header, Box::new(time::Instant::now))))
246	}
247}
248
249/// The proposer logic.
250pub struct Proposer<Block: BlockT, C, A: TransactionPool, PR> {
251	spawn_handle: Box<dyn SpawnNamed>,
252	client: Arc<C>,
253	parent_hash: Block::Hash,
254	parent_number: <<Block as BlockT>::Header as HeaderT>::Number,
255	transaction_pool: Arc<A>,
256	now: Box<dyn Fn() -> time::Instant + Send + Sync>,
257	metrics: PrometheusMetrics,
258	default_block_size_limit: usize,
259	include_proof_in_block_size_estimation: bool,
260	soft_deadline_percent: Percent,
261	telemetry: Option<TelemetryHandle>,
262	_phantom: PhantomData<PR>,
263}
264
265impl<A, Block, C, PR> sp_consensus::Proposer<Block> for Proposer<Block, C, A, PR>
266where
267	A: TransactionPool<Block = Block> + 'static,
268	Block: BlockT,
269	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
270	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
271	PR: ProofRecording,
272{
273	type Proposal =
274		Pin<Box<dyn Future<Output = Result<Proposal<Block, PR::Proof>, Self::Error>> + Send>>;
275	type Error = sp_blockchain::Error;
276	type ProofRecording = PR;
277	type Proof = PR::Proof;
278
279	fn propose(
280		self,
281		inherent_data: InherentData,
282		inherent_digests: Digest,
283		max_duration: time::Duration,
284		block_size_limit: Option<usize>,
285	) -> Self::Proposal {
286		self.propose_block(ProposeArgs {
287			inherent_data,
288			inherent_digests,
289			max_duration,
290			block_size_limit,
291			ignored_nodes_by_proof_recording: None,
292		})
293		.boxed()
294	}
295}
296
297/// Arguments for [`Proposer::propose_block`].
298pub struct ProposeArgs<Block: BlockT> {
299	/// The inherent data to pass to the block production.
300	pub inherent_data: InherentData,
301	/// The inherent digests to include in the produced block.
302	pub inherent_digests: Digest,
303	/// Max duration for building the block.
304	pub max_duration: time::Duration,
305	/// Optional size limit for the produced block.
306	///
307	/// When set, block production ends before hitting this limit. The limit includes the storage
308	/// proof, when proof recording is activated.
309	pub block_size_limit: Option<usize>,
310	/// Trie nodes that should not be recorded.
311	///
312	/// Only applies when proof recording is enabled.
313	pub ignored_nodes_by_proof_recording: Option<IgnoredNodes<Block::Hash>>,
314}
315
316impl<Block: BlockT> Default for ProposeArgs<Block> {
317	fn default() -> Self {
318		Self {
319			inherent_data: Default::default(),
320			inherent_digests: Default::default(),
321			max_duration: Default::default(),
322			block_size_limit: None,
323			ignored_nodes_by_proof_recording: None,
324		}
325	}
326}
327
328/// If the block is full we will attempt to push at most
329/// this number of transactions before quitting for real.
330/// It allows us to increase block utilization.
331const MAX_SKIPPED_TRANSACTIONS: usize = 8;
332
333impl<A, Block, C, PR> Proposer<Block, C, A, PR>
334where
335	A: TransactionPool<Block = Block> + 'static,
336	Block: BlockT,
337	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
338	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
339	PR: ProofRecording,
340{
341	/// Propose a new block.
342	pub async fn propose_block(
343		self,
344		args: ProposeArgs<Block>,
345	) -> Result<Proposal<Block, PR::Proof>, sp_blockchain::Error> {
346		let (tx, rx) = oneshot::channel();
347		let spawn_handle = self.spawn_handle.clone();
348
349		// Spawn on a new thread, because block production is a blocking operation.
350		spawn_handle.spawn_blocking(
351			"basic-authorship-proposer",
352			None,
353			async move {
354				let res = self.propose_with(args).await;
355				if tx.send(res).is_err() {
356					trace!(
357						target: LOG_TARGET,
358						"Could not send block production result to proposer!"
359					);
360				}
361			}
362			.boxed(),
363		);
364
365		rx.await?.map_err(Into::into)
366	}
367
368	async fn propose_with(
369		self,
370		ProposeArgs {
371			inherent_data,
372			inherent_digests,
373			max_duration,
374			block_size_limit,
375			ignored_nodes_by_proof_recording,
376		}: ProposeArgs<Block>,
377	) -> Result<Proposal<Block, PR::Proof>, sp_blockchain::Error> {
378		// leave some time for evaluation and block finalization (10%)
379		let deadline = (self.now)() + max_duration - max_duration / 10;
380		let block_timer = time::Instant::now();
381		let mut block_builder = BlockBuilderBuilder::new(&*self.client)
382			.on_parent_block(self.parent_hash)
383			.with_parent_block_number(self.parent_number)
384			.with_proof_recorder(PR::ENABLED.then(|| {
385				ProofRecorder::<Block>::with_ignored_nodes(
386					ignored_nodes_by_proof_recording.unwrap_or_default(),
387				)
388			}))
389			.with_inherent_digests(inherent_digests)
390			.build()?;
391
392		self.apply_inherents(&mut block_builder, inherent_data)?;
393
394		let mode = block_builder.extrinsic_inclusion_mode();
395		let end_reason = match mode {
396			ExtrinsicInclusionMode::AllExtrinsics =>
397				self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?,
398			ExtrinsicInclusionMode::OnlyInherents => EndProposingReason::TransactionForbidden,
399		};
400		let (block, storage_changes, proof) = block_builder.build()?.into_inner();
401		let block_took = block_timer.elapsed();
402
403		let proof =
404			PR::into_proof(proof).map_err(|e| sp_blockchain::Error::Application(Box::new(e)))?;
405
406		self.print_summary(&block, end_reason, block_took, block_timer.elapsed());
407		Ok(Proposal { block, proof, storage_changes })
408	}
409
410	/// Apply all inherents to the block.
411	fn apply_inherents(
412		&self,
413		block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C>,
414		inherent_data: InherentData,
415	) -> Result<(), sp_blockchain::Error> {
416		let create_inherents_start = time::Instant::now();
417		let inherents = block_builder.create_inherents(inherent_data)?;
418		let create_inherents_end = time::Instant::now();
419
420		self.metrics.report(|metrics| {
421			metrics.create_inherents_time.observe(
422				create_inherents_end
423					.saturating_duration_since(create_inherents_start)
424					.as_secs_f64(),
425			);
426		});
427
428		for inherent in inherents {
429			match block_builder.push(inherent) {
430				Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => {
431					warn!(
432						target: LOG_TARGET,
433						"โš ๏ธ  Dropping non-mandatory inherent from overweight block."
434					)
435				},
436				Err(ApplyExtrinsicFailed(Validity(e))) if e.was_mandatory() => {
437					error!(
438						"โŒ๏ธ Mandatory inherent extrinsic returned error. Block cannot be produced."
439					);
440					return Err(ApplyExtrinsicFailed(Validity(e)))
441				},
442				Err(e) => {
443					warn!(
444						target: LOG_TARGET,
445						"โ—๏ธ Inherent extrinsic returned unexpected error: {}. Dropping.", e
446					);
447				},
448				Ok(_) => {},
449			}
450		}
451		Ok(())
452	}
453
454	/// Apply as many extrinsics as possible to the block.
455	async fn apply_extrinsics(
456		&self,
457		block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C>,
458		deadline: time::Instant,
459		block_size_limit: Option<usize>,
460	) -> Result<EndProposingReason, sp_blockchain::Error> {
461		// proceed with transactions
462		// We calculate soft deadline used only in case we start skipping transactions.
463		let now = (self.now)();
464		let left = deadline.saturating_duration_since(now);
465		let left_micros: u64 = left.as_micros().saturated_into();
466		let soft_deadline =
467			now + time::Duration::from_micros(self.soft_deadline_percent.mul_floor(left_micros));
468		let mut skipped = 0;
469		let mut unqueue_invalid = TxInvalidityReportMap::new();
470		let mut limit_hit_reason: Option<EndProposingReason> = None;
471
472		let delay = deadline.saturating_duration_since((self.now)()) / 8;
473		let mut pending_iterator =
474			self.transaction_pool.ready_at_with_timeout(self.parent_hash, delay).await;
475
476		let block_size_limit = block_size_limit.unwrap_or(self.default_block_size_limit);
477
478		debug!(target: LOG_TARGET, "Attempting to push transactions from the pool at {:?}.", self.parent_hash);
479		let mut transaction_pushed = false;
480
481		let end_reason = loop {
482			let pending_tx = if let Some(pending_tx) = pending_iterator.next() {
483				pending_tx
484			} else {
485				debug!(
486					target: LOG_TARGET,
487					"No more transactions, proceeding with proposing."
488				);
489
490				break limit_hit_reason.unwrap_or(EndProposingReason::NoMoreTransactions)
491			};
492
493			let now = (self.now)();
494			if now > deadline {
495				debug!(
496					target: LOG_TARGET,
497					"Consensus deadline reached when pushing block transactions, \
498				proceeding with proposing."
499				);
500				break limit_hit_reason.unwrap_or(EndProposingReason::HitDeadline)
501			}
502
503			let pending_tx_data = (**pending_tx.data()).clone();
504			let pending_tx_hash = pending_tx.hash().clone();
505
506			let block_size =
507				block_builder.estimate_block_size(self.include_proof_in_block_size_estimation);
508			if block_size + pending_tx_data.encoded_size() > block_size_limit {
509				pending_iterator.report_invalid(&pending_tx);
510				limit_hit_reason = Some(EndProposingReason::HitBlockSizeLimit);
511				if skipped < MAX_SKIPPED_TRANSACTIONS {
512					skipped += 1;
513					debug!(
514						target: LOG_TARGET,
515						"Transaction would overflow the block size limit, \
516					 but will try {} more transactions before quitting.",
517						MAX_SKIPPED_TRANSACTIONS - skipped,
518					);
519					continue
520				} else if now < soft_deadline {
521					debug!(
522						target: LOG_TARGET,
523						"Transaction would overflow the block size limit, \
524					 but we still have time before the soft deadline, so \
525					 we will try a bit more."
526					);
527					continue
528				} else {
529					debug!(
530						target: LOG_TARGET,
531						"Reached block size limit, proceeding with proposing."
532					);
533					break EndProposingReason::HitBlockSizeLimit
534				}
535			}
536
537			trace!(target: LOG_TARGET, "[{:?}] Pushing to the block.", pending_tx_hash);
538			match sc_block_builder::BlockBuilder::push(block_builder, pending_tx_data) {
539				Ok(()) => {
540					transaction_pushed = true;
541					limit_hit_reason = None;
542					trace!(target: LOG_TARGET, "[{:?}] Pushed to the block.", pending_tx_hash);
543				},
544				Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => {
545					pending_iterator.report_invalid(&pending_tx);
546					limit_hit_reason = Some(EndProposingReason::HitBlockWeightLimit);
547					if skipped < MAX_SKIPPED_TRANSACTIONS {
548						skipped += 1;
549						debug!(target: LOG_TARGET,
550							"Block seems full, but will try {} more transactions before quitting.",
551							MAX_SKIPPED_TRANSACTIONS - skipped,
552						);
553					} else if (self.now)() < soft_deadline {
554						debug!(target: LOG_TARGET,
555							"Block seems full, but we still have time before the soft deadline, \
556							 so we will try a bit more before quitting."
557						);
558					} else {
559						debug!(
560							target: LOG_TARGET,
561							"Reached block weight limit, proceeding with proposing."
562						);
563						break EndProposingReason::HitBlockWeightLimit
564					}
565				},
566				Err(e) => {
567					pending_iterator.report_invalid(&pending_tx);
568					debug!(
569						target: LOG_TARGET,
570						"[{:?}] Invalid transaction: {} at: {}", pending_tx_hash, e, self.parent_hash
571					);
572
573					let error_to_report = match e {
574						ApplyExtrinsicFailed(Validity(e)) => Some(e),
575						_ => None,
576					};
577
578					unqueue_invalid.insert(pending_tx_hash, error_to_report);
579				},
580			}
581		};
582
583		if matches!(end_reason, EndProposingReason::HitBlockSizeLimit) && !transaction_pushed {
584			warn!(
585				target: LOG_TARGET,
586				"Hit block size limit of `{}` without including any transaction!", block_size_limit,
587			);
588		}
589
590		self.transaction_pool
591			.report_invalid(Some(self.parent_hash), unqueue_invalid)
592			.await;
593		Ok(end_reason)
594	}
595
596	/// Prints a summary and does telemetry + metrics.
597	///
598	/// - `block`: The block that was build.
599	/// - `end_reason`: Why did we stop producing the block?
600	/// - `block_took`: How long did it took to produce the actual block?
601	/// - `propose_took`: How long did the entire proposing took?
602	fn print_summary(
603		&self,
604		block: &Block,
605		end_reason: EndProposingReason,
606		block_took: time::Duration,
607		propose_took: time::Duration,
608	) {
609		let extrinsics = block.extrinsics();
610		self.metrics.report(|metrics| {
611			metrics.number_of_transactions.set(extrinsics.len() as u64);
612			metrics.block_constructed.observe(block_took.as_secs_f64());
613			metrics.report_end_proposing_reason(end_reason);
614			metrics.create_block_proposal_time.observe(propose_took.as_secs_f64());
615		});
616
617		let extrinsics_summary = if extrinsics.is_empty() {
618			"no extrinsics".to_string()
619		} else {
620			format!(
621				"extrinsics ({}): [{}]",
622				extrinsics.len(),
623				extrinsics
624					.iter()
625					.map(|xt| BlakeTwo256::hash_of(xt).to_string())
626					.collect::<Vec<_>>()
627					.join(", ")
628			)
629		};
630
631		if log::log_enabled!(log::Level::Info) {
632			info!(
633				"๐ŸŽ Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; extrinsics_count: {}",
634				block.header().number(),
635				block_took.as_millis(),
636				<Block as BlockT>::Hash::from(block.header().hash()),
637				block.header().parent_hash(),
638				end_reason,
639				extrinsics.len()
640			)
641		} else if log::log_enabled!(log::Level::Trace) {
642			trace!(
643				"๐ŸŽ Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; {extrinsics_summary}",
644				block.header().number(),
645				block_took.as_millis(),
646				<Block as BlockT>::Hash::from(block.header().hash()),
647				block.header().parent_hash(),
648				end_reason
649			);
650		}
651
652		telemetry!(
653			self.telemetry;
654			CONSENSUS_INFO;
655			"prepared_block_for_proposing";
656			"number" => ?block.header().number(),
657			"hash" => ?<Block as BlockT>::Hash::from(block.header().hash()),
658		);
659	}
660}
661
662#[cfg(test)]
663mod tests {
664	use super::*;
665	use futures::executor::block_on;
666	use parking_lot::Mutex;
667	use sc_client_api::{Backend, TrieCacheContext};
668	use sc_transaction_pool::BasicPool;
669	use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionSource};
670	use sp_api::Core;
671	use sp_blockchain::HeaderBackend;
672	use sp_consensus::{BlockOrigin, Environment};
673	use sp_runtime::{generic::BlockId, traits::NumberFor, Perbill};
674	use substrate_test_runtime_client::{
675		prelude::*,
676		runtime::{Block as TestBlock, Extrinsic, ExtrinsicBuilder, Transfer},
677		TestClientBuilder, TestClientBuilderExt,
678	};
679
680	const SOURCE: TransactionSource = TransactionSource::External;
681
682	// Note:
683	// Maximum normal extrinsic size for `substrate_test_runtime` is ~65% of max_block (refer to
684	// `substrate_test_runtime::RuntimeBlockWeights` for details).
685	// This extrinsic sizing allows for:
686	// - one huge xts + a lot of tiny dust
687	// - one huge, no medium,
688	// - two medium xts
689	// This is widely exploited in following tests.
690	const HUGE: u32 = 649000000;
691	const MEDIUM: u32 = 250000000;
692	const TINY: u32 = 1000;
693
694	fn extrinsic(nonce: u64) -> Extrinsic {
695		ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)).nonce(nonce).build()
696	}
697
698	fn chain_event<B: BlockT>(header: B::Header) -> ChainEvent<B>
699	where
700		NumberFor<B>: From<u64>,
701	{
702		ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }
703	}
704
705	#[test]
706	fn should_cease_building_block_when_deadline_is_reached() {
707		// given
708		let client = Arc::new(substrate_test_runtime_client::new());
709		let spawner = sp_core::testing::TaskExecutor::new();
710		let txpool = Arc::from(BasicPool::new_full(
711			Default::default(),
712			true.into(),
713			None,
714			spawner.clone(),
715			client.clone(),
716		));
717
718		let hashof0 = client.info().genesis_hash;
719		block_on(txpool.submit_at(hashof0, SOURCE, vec![extrinsic(0), extrinsic(1)])).unwrap();
720
721		block_on(
722			txpool.maintain(chain_event(
723				client.expect_header(hashof0).expect("there should be header"),
724			)),
725		);
726
727		let mut proposer_factory =
728			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
729
730		let cell = Mutex::new((false, time::Instant::now()));
731		let proposer = proposer_factory.init_with_now(
732			&client.expect_header(hashof0).unwrap(),
733			Box::new(move || {
734				let mut value = cell.lock();
735				if !value.0 {
736					value.0 = true;
737					return value.1
738				}
739				let old = value.1;
740				let new = old + time::Duration::from_secs(1);
741				*value = (true, new);
742				old
743			}),
744		);
745
746		// when
747		let deadline = time::Duration::from_secs(3);
748		let block = block_on(
749			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
750		)
751		.map(|r| r.block)
752		.unwrap();
753
754		// then
755		// block should have some extrinsics although we have some more in the pool.
756		assert_eq!(block.extrinsics().len(), 1);
757		assert_eq!(txpool.ready().count(), 2);
758	}
759
760	#[test]
761	fn should_not_panic_when_deadline_is_reached() {
762		let client = Arc::new(substrate_test_runtime_client::new());
763		let spawner = sp_core::testing::TaskExecutor::new();
764		let txpool = Arc::from(BasicPool::new_full(
765			Default::default(),
766			true.into(),
767			None,
768			spawner.clone(),
769			client.clone(),
770		));
771
772		let mut proposer_factory =
773			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
774
775		let cell = Mutex::new((false, time::Instant::now()));
776		let proposer = proposer_factory.init_with_now(
777			&client.expect_header(client.info().genesis_hash).unwrap(),
778			Box::new(move || {
779				let mut value = cell.lock();
780				if !value.0 {
781					value.0 = true;
782					return value.1
783				}
784				let new = value.1 + time::Duration::from_secs(160);
785				*value = (true, new);
786				new
787			}),
788		);
789
790		let deadline = time::Duration::from_secs(1);
791		block_on(
792			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
793		)
794		.map(|r| r.block)
795		.unwrap();
796	}
797
798	#[test]
799	fn proposed_storage_changes_should_match_execute_block_storage_changes() {
800		let (client, backend) = TestClientBuilder::new().build_with_backend();
801		let client = Arc::new(client);
802		let spawner = sp_core::testing::TaskExecutor::new();
803		let txpool = Arc::from(BasicPool::new_full(
804			Default::default(),
805			true.into(),
806			None,
807			spawner.clone(),
808			client.clone(),
809		));
810
811		let genesis_hash = client.info().best_hash;
812
813		block_on(txpool.submit_at(genesis_hash, SOURCE, vec![extrinsic(0)])).unwrap();
814
815		block_on(
816			txpool.maintain(chain_event(
817				client
818					.expect_header(client.info().genesis_hash)
819					.expect("there should be header"),
820			)),
821		);
822
823		let mut proposer_factory =
824			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
825
826		let proposer = proposer_factory.init_with_now(
827			&client.header(genesis_hash).unwrap().unwrap(),
828			Box::new(move || time::Instant::now()),
829		);
830
831		let deadline = time::Duration::from_secs(9);
832		let proposal = block_on(
833			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
834		)
835		.unwrap();
836
837		assert_eq!(proposal.block.extrinsics().len(), 1);
838
839		let api = client.runtime_api();
840		api.execute_block(genesis_hash, proposal.block).unwrap();
841
842		let state = backend.state_at(genesis_hash, TrieCacheContext::Untrusted).unwrap();
843
844		let storage_changes = api.into_storage_changes(&state, genesis_hash).unwrap();
845
846		assert_eq!(
847			proposal.storage_changes.transaction_storage_root,
848			storage_changes.transaction_storage_root,
849		);
850	}
851
852	// This test ensures that if one transaction of a user was rejected, because for example
853	// the weight limit was hit, we don't mark the other transactions of the user as invalid because
854	// the nonce is not matching.
855	#[test]
856	fn should_not_remove_invalid_transactions_from_the_same_sender_after_one_was_invalid() {
857		// given
858		let client = Arc::new(substrate_test_runtime_client::new());
859		let spawner = sp_core::testing::TaskExecutor::new();
860		let txpool = Arc::from(BasicPool::new_full(
861			Default::default(),
862			true.into(),
863			None,
864			spawner.clone(),
865			client.clone(),
866		));
867
868		let medium = |nonce| {
869			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(MEDIUM))
870				.nonce(nonce)
871				.build()
872		};
873		let huge = |nonce| {
874			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE)).nonce(nonce).build()
875		};
876
877		block_on(txpool.submit_at(
878			client.info().genesis_hash,
879			SOURCE,
880			vec![medium(0), medium(1), huge(2), medium(3), huge(4), medium(5), medium(6)],
881		))
882		.unwrap();
883
884		let mut proposer_factory =
885			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
886		let mut propose_block = |client: &TestClient,
887		                         parent_number,
888		                         expected_block_extrinsics,
889		                         expected_pool_transactions| {
890			let hash = client.expect_block_hash_from_id(&BlockId::Number(parent_number)).unwrap();
891			let proposer = proposer_factory.init_with_now(
892				&client.expect_header(hash).unwrap(),
893				Box::new(move || time::Instant::now()),
894			);
895
896			// when
897			let deadline = time::Duration::from_secs(900);
898			let block = block_on(
899				proposer
900					.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
901			)
902			.map(|r| r.block)
903			.unwrap();
904
905			// then
906			// block should have some extrinsics although we have some more in the pool.
907			assert_eq!(
908				txpool.ready().count(),
909				expected_pool_transactions,
910				"at block: {}",
911				block.header.number
912			);
913			assert_eq!(
914				block.extrinsics().len(),
915				expected_block_extrinsics,
916				"at block: {}",
917				block.header.number
918			);
919
920			block
921		};
922
923		let import_and_maintain = |client: Arc<TestClient>, block: TestBlock| {
924			let hash = block.hash();
925			block_on(client.import(BlockOrigin::Own, block)).unwrap();
926			block_on(txpool.maintain(chain_event(
927				client.expect_header(hash).expect("there should be header"),
928			)));
929		};
930
931		block_on(
932			txpool.maintain(chain_event(
933				client
934					.expect_header(client.info().genesis_hash)
935					.expect("there should be header"),
936			)),
937		);
938		assert_eq!(txpool.ready().count(), 7);
939
940		// let's create one block and import it
941		let block = propose_block(&client, 0, 2, 7);
942		import_and_maintain(client.clone(), block.clone());
943		assert_eq!(txpool.ready().count(), 5);
944
945		// now let's make sure that we can still make some progress
946		let block = propose_block(&client, 1, 1, 5);
947		import_and_maintain(client.clone(), block.clone());
948		assert_eq!(txpool.ready().count(), 4);
949
950		// again let's make sure that we can still make some progress
951		let block = propose_block(&client, 2, 1, 4);
952		import_and_maintain(client.clone(), block.clone());
953		assert_eq!(txpool.ready().count(), 3);
954
955		// again let's make sure that we can still make some progress
956		let block = propose_block(&client, 3, 1, 3);
957		import_and_maintain(client.clone(), block.clone());
958		assert_eq!(txpool.ready().count(), 2);
959
960		// again let's make sure that we can still make some progress
961		let block = propose_block(&client, 4, 2, 2);
962		import_and_maintain(client.clone(), block.clone());
963		assert_eq!(txpool.ready().count(), 0);
964	}
965
966	#[test]
967	fn should_cease_building_block_when_block_limit_is_reached() {
968		let client = Arc::new(substrate_test_runtime_client::new());
969		let spawner = sp_core::testing::TaskExecutor::new();
970		let txpool = Arc::from(BasicPool::new_full(
971			Default::default(),
972			true.into(),
973			None,
974			spawner.clone(),
975			client.clone(),
976		));
977		let genesis_hash = client.info().genesis_hash;
978		let genesis_header = client.expect_header(genesis_hash).expect("there should be header");
979
980		let extrinsics_num = 5;
981		let extrinsics = std::iter::once(
982			Transfer {
983				from: Sr25519Keyring::Alice.into(),
984				to: Sr25519Keyring::Bob.into(),
985				amount: 100,
986				nonce: 0,
987			}
988			.into_unchecked_extrinsic(),
989		)
990		.chain((1..extrinsics_num as u64).map(extrinsic))
991		.collect::<Vec<_>>();
992
993		let block_limit = genesis_header.encoded_size() +
994			extrinsics
995				.iter()
996				.take(extrinsics_num - 1)
997				.map(Encode::encoded_size)
998				.sum::<usize>() +
999			Vec::<Extrinsic>::new().encoded_size();
1000
1001		block_on(txpool.submit_at(genesis_hash, SOURCE, extrinsics.clone())).unwrap();
1002
1003		block_on(txpool.maintain(chain_event(genesis_header.clone())));
1004
1005		let mut proposer_factory =
1006			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
1007
1008		let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
1009
1010		// Give it enough time
1011		let deadline = time::Duration::from_secs(300);
1012		let block = block_on(proposer.propose_block(ProposeArgs {
1013			max_duration: deadline,
1014			block_size_limit: Some(block_limit),
1015			..Default::default()
1016		}))
1017		.map(|r| r.block)
1018		.unwrap();
1019
1020		// Based on the block limit, one transaction shouldn't be included.
1021		assert_eq!(block.extrinsics().len(), extrinsics_num - 1);
1022
1023		let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
1024
1025		let block = block_on(
1026			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
1027		)
1028		.map(|r| r.block)
1029		.unwrap();
1030
1031		// Without a block limit we should include all of them
1032		assert_eq!(block.extrinsics().len(), extrinsics_num);
1033
1034		let mut proposer_factory = ProposerFactory::with_proof_recording(
1035			spawner.clone(),
1036			client.clone(),
1037			txpool.clone(),
1038			None,
1039			None,
1040		);
1041
1042		let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
1043
1044		// Exact block_limit, which includes:
1045		// 99 (header_size) + 718 (proof@initialize_block) + 246 (one Transfer extrinsic)
1046		let block_limit = {
1047			let builder = BlockBuilderBuilder::new(&*client)
1048				.on_parent_block(genesis_header.hash())
1049				.with_parent_block_number(0)
1050				.enable_proof_recording()
1051				.build()
1052				.unwrap();
1053			builder.estimate_block_size(true) + extrinsics[0].encoded_size()
1054		};
1055		let block = block_on(proposer.propose_block(ProposeArgs {
1056			max_duration: deadline,
1057			block_size_limit: Some(block_limit),
1058			..Default::default()
1059		}))
1060		.map(|r| r.block)
1061		.unwrap();
1062
1063		// The block limit was increased, but we now include the proof in the estimation of the
1064		// block size and thus, only the `Transfer` will fit into the block. It reads more data
1065		// than we have reserved in the block limit.
1066		assert_eq!(block.extrinsics().len(), 1);
1067	}
1068
1069	#[test]
1070	fn should_keep_adding_transactions_after_exhausts_resources_before_soft_deadline() {
1071		// given
1072		let client = Arc::new(substrate_test_runtime_client::new());
1073		let spawner = sp_core::testing::TaskExecutor::new();
1074		let txpool = Arc::from(BasicPool::new_full(
1075			Default::default(),
1076			true.into(),
1077			None,
1078			spawner.clone(),
1079			client.clone(),
1080		));
1081		let genesis_hash = client.info().genesis_hash;
1082
1083		let tiny = |nonce| {
1084			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)).nonce(nonce).build()
1085		};
1086		let huge = |who| {
1087			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE))
1088				.signer(Sr25519Keyring::numeric(who))
1089				.build()
1090		};
1091
1092		block_on(
1093			txpool.submit_at(
1094				genesis_hash,
1095				SOURCE,
1096				// add 2 * MAX_SKIPPED_TRANSACTIONS that exhaust resources
1097				(0..MAX_SKIPPED_TRANSACTIONS * 2)
1098					.into_iter()
1099					.map(huge)
1100					// and some transactions that are okay.
1101					.chain((0..MAX_SKIPPED_TRANSACTIONS as u64).into_iter().map(tiny))
1102					.collect(),
1103			),
1104		)
1105		.unwrap();
1106
1107		block_on(txpool.maintain(chain_event(
1108			client.expect_header(genesis_hash).expect("there should be header"),
1109		)));
1110		assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 3);
1111
1112		let mut proposer_factory =
1113			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
1114
1115		let cell = Mutex::new(time::Instant::now());
1116		let proposer = proposer_factory.init_with_now(
1117			&client.expect_header(genesis_hash).unwrap(),
1118			Box::new(move || {
1119				let mut value = cell.lock();
1120				let old = *value;
1121				*value = old + time::Duration::from_secs(1);
1122				old
1123			}),
1124		);
1125
1126		// when
1127		// give it enough time so that deadline is never triggered.
1128		let deadline = time::Duration::from_secs(900);
1129		let block = block_on(
1130			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
1131		)
1132		.map(|r| r.block)
1133		.unwrap();
1134
1135		// then block should have all non-exhaust resources extrinsics (+ the first one).
1136		assert_eq!(block.extrinsics().len(), MAX_SKIPPED_TRANSACTIONS + 1);
1137	}
1138
1139	#[test]
1140	fn should_only_skip_up_to_some_limit_after_soft_deadline() {
1141		// given
1142		let client = Arc::new(substrate_test_runtime_client::new());
1143		let spawner = sp_core::testing::TaskExecutor::new();
1144		let txpool = Arc::from(BasicPool::new_full(
1145			Default::default(),
1146			true.into(),
1147			None,
1148			spawner.clone(),
1149			client.clone(),
1150		));
1151		let genesis_hash = client.info().genesis_hash;
1152
1153		let tiny = |who| {
1154			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY))
1155				.signer(Sr25519Keyring::numeric(who))
1156				.nonce(1)
1157				.build()
1158		};
1159		let huge = |who| {
1160			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE))
1161				.signer(Sr25519Keyring::numeric(who))
1162				.build()
1163		};
1164
1165		block_on(
1166			txpool.submit_at(
1167				genesis_hash,
1168				SOURCE,
1169				(0..MAX_SKIPPED_TRANSACTIONS + 2)
1170					.into_iter()
1171					.map(huge)
1172					// and some transactions that are okay.
1173					.chain((0..MAX_SKIPPED_TRANSACTIONS + 2).into_iter().map(tiny))
1174					.collect(),
1175			),
1176		)
1177		.unwrap();
1178
1179		block_on(txpool.maintain(chain_event(
1180			client.expect_header(genesis_hash).expect("there should be header"),
1181		)));
1182		assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 2 + 4);
1183
1184		let mut proposer_factory =
1185			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
1186
1187		let deadline = time::Duration::from_secs(600);
1188		let cell = Arc::new(Mutex::new((0, time::Instant::now())));
1189		let cell2 = cell.clone();
1190		let proposer = proposer_factory.init_with_now(
1191			&client.expect_header(genesis_hash).unwrap(),
1192			Box::new(move || {
1193				let mut value = cell.lock();
1194				let (called, old) = *value;
1195				// add time after deadline is calculated internally (hence 1)
1196				let increase = if called == 1 {
1197					// we start after the soft_deadline should have already been reached.
1198					deadline / 2
1199				} else {
1200					// but we make sure to never reach the actual deadline
1201					time::Duration::from_millis(0)
1202				};
1203				*value = (called + 1, old + increase);
1204				old
1205			}),
1206		);
1207
1208		let block = block_on(
1209			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
1210		)
1211		.map(|r| r.block)
1212		.unwrap();
1213
1214		// then the block should have one or two transactions. This maybe random as they are
1215		// processed in parallel. The same signer and consecutive nonces for huge and tiny
1216		// transactions guarantees that max two transactions will get to the block.
1217		assert!(
1218			(1..3).contains(&block.extrinsics().len()),
1219			"Block shall contain one or two extrinsics."
1220		);
1221		assert!(
1222			cell2.lock().0 > MAX_SKIPPED_TRANSACTIONS,
1223			"Not enough calls to current time, which indicates the test might have ended because of deadline, not soft deadline"
1224		);
1225	}
1226}