referrerpolicy=no-referrer-when-downgrade

sc_basic_authorship/
basic_authorship.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19//! A consensus proposer for "basic" chains which use the primitive inherent-data.
20
21// FIXME #1021 move this into sp-consensus
22
23use codec::Encode;
24use futures::{
25	channel::oneshot,
26	future,
27	future::{Future, FutureExt},
28};
29use log::{debug, error, info, log_enabled, trace, warn, Level};
30use prometheus_endpoint::Registry as PrometheusRegistry;
31use sc_block_builder::{BlockBuilderApi, BlockBuilderBuilder};
32use sc_proposer_metrics::{EndProposingReason, MetricsLink as PrometheusMetrics};
33use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO};
34use sc_transaction_pool_api::{InPoolTransaction, TransactionPool, TxInvalidityReportMap};
35use sp_api::{ApiExt, CallApiAt, ProvideRuntimeApi};
36use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed, HeaderBackend};
37use sp_consensus::{Proposal, ProposeArgs};
38use sp_core::traits::SpawnNamed;
39use sp_inherents::InherentData;
40use sp_runtime::{
41	traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT},
42	ExtrinsicInclusionMode, Percent, SaturatedConversion,
43};
44use std::{pin::Pin, sync::Arc, time};
45
46/// Default block size limit in bytes used by [`Proposer`].
47///
48/// Can be overwritten by [`ProposerFactory::set_default_block_size_limit`].
49///
50/// Be aware that there is also an upper packet size on what the networking code
51/// will accept. If the block doesn't fit in such a package, it can not be
52/// transferred to other nodes.
53pub const DEFAULT_BLOCK_SIZE_LIMIT: usize = 4 * 1024 * 1024 + 512;
54
55const DEFAULT_SOFT_DEADLINE_PERCENT: Percent = Percent::from_percent(50);
56
57const LOG_TARGET: &'static str = "basic-authorship";
58
59/// [`Proposer`] factory.
60pub struct ProposerFactory<A, C> {
61	spawn_handle: Box<dyn SpawnNamed>,
62	/// The client instance.
63	client: Arc<C>,
64	/// The transaction pool.
65	transaction_pool: Arc<A>,
66	/// Prometheus Link,
67	metrics: PrometheusMetrics,
68	/// The default block size limit.
69	///
70	/// If no `block_size_limit` is passed to [`sp_consensus::Proposer::propose`], this block size
71	/// limit will be used.
72	default_block_size_limit: usize,
73	/// Soft deadline percentage of hard deadline.
74	///
75	/// The value is used to compute soft deadline during block production.
76	/// The soft deadline indicates where we should stop attempting to add transactions
77	/// to the block, which exhaust resources. After soft deadline is reached,
78	/// we switch to a fixed-amount mode, in which after we see `MAX_SKIPPED_TRANSACTIONS`
79	/// transactions which exhaust resources, we will conclude that the block is full.
80	soft_deadline_percent: Percent,
81	telemetry: Option<TelemetryHandle>,
82}
83
84impl<A, C> Clone for ProposerFactory<A, C> {
85	fn clone(&self) -> Self {
86		Self {
87			spawn_handle: self.spawn_handle.clone(),
88			client: self.client.clone(),
89			transaction_pool: self.transaction_pool.clone(),
90			metrics: self.metrics.clone(),
91			default_block_size_limit: self.default_block_size_limit,
92			soft_deadline_percent: self.soft_deadline_percent,
93			telemetry: self.telemetry.clone(),
94		}
95	}
96}
97
98impl<A, C> ProposerFactory<A, C> {
99	/// Create a new proposer factory.
100	pub fn new(
101		spawn_handle: impl SpawnNamed + 'static,
102		client: Arc<C>,
103		transaction_pool: Arc<A>,
104		prometheus: Option<&PrometheusRegistry>,
105		telemetry: Option<TelemetryHandle>,
106	) -> Self {
107		ProposerFactory {
108			spawn_handle: Box::new(spawn_handle),
109			transaction_pool,
110			metrics: PrometheusMetrics::new(prometheus),
111			default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT,
112			soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT,
113			telemetry,
114			client,
115		}
116	}
117
118	/// Deprecated, use [`Self::new`] instead.
119	#[deprecated(note = "Proof recording is now handled differently. Use `new` instead.")]
120	pub fn with_proof_recording(
121		spawn_handle: impl SpawnNamed + 'static,
122		client: Arc<C>,
123		transaction_pool: Arc<A>,
124		prometheus: Option<&PrometheusRegistry>,
125		telemetry: Option<TelemetryHandle>,
126	) -> Self {
127		Self::new(spawn_handle, client, transaction_pool, prometheus, telemetry)
128	}
129
130	/// Set the default block size limit in bytes.
131	///
132	/// The default value for the block size limit is:
133	/// [`DEFAULT_BLOCK_SIZE_LIMIT`].
134	///
135	/// If there is no block size limit passed to [`sp_consensus::Proposer::propose`], this value
136	/// will be used.
137	pub fn set_default_block_size_limit(&mut self, limit: usize) {
138		self.default_block_size_limit = limit;
139	}
140
141	/// Set soft deadline percentage.
142	///
143	/// The value is used to compute soft deadline during block production.
144	/// The soft deadline indicates where we should stop attempting to add transactions
145	/// to the block, which exhaust resources. After soft deadline is reached,
146	/// we switch to a fixed-amount mode, in which after we see `MAX_SKIPPED_TRANSACTIONS`
147	/// transactions which exhaust resources, we will conclude that the block is full.
148	///
149	/// Setting the value too low will significantly limit the amount of transactions
150	/// we try in case they exhaust resources. Setting the value too high can
151	/// potentially open a DoS vector, where many "exhaust resources" transactions
152	/// are being tried with no success, hence block producer ends up creating an empty block.
153	pub fn set_soft_deadline(&mut self, percent: Percent) {
154		self.soft_deadline_percent = percent;
155	}
156}
157
158impl<Block, C, A> ProposerFactory<A, C>
159where
160	A: TransactionPool<Block = Block> + 'static,
161	Block: BlockT,
162	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + Send + Sync + 'static,
163	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
164{
165	fn init_with_now(
166		&mut self,
167		parent_header: &<Block as BlockT>::Header,
168		now: Box<dyn Fn() -> time::Instant + Send + Sync>,
169	) -> Proposer<Block, C, A> {
170		let parent_hash = parent_header.hash();
171
172		info!(
173			"๐Ÿ™Œ Starting consensus session on top of parent {:?} (#{})",
174			parent_hash,
175			parent_header.number()
176		);
177
178		let proposer = Proposer::<_, _, _> {
179			spawn_handle: self.spawn_handle.clone(),
180			client: self.client.clone(),
181			parent_hash,
182			parent_number: *parent_header.number(),
183			transaction_pool: self.transaction_pool.clone(),
184			now,
185			metrics: self.metrics.clone(),
186			default_block_size_limit: self.default_block_size_limit,
187			soft_deadline_percent: self.soft_deadline_percent,
188			telemetry: self.telemetry.clone(),
189		};
190
191		proposer
192	}
193}
194
195impl<A, Block, C> sp_consensus::Environment<Block> for ProposerFactory<A, C>
196where
197	A: TransactionPool<Block = Block> + 'static,
198	Block: BlockT,
199	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
200	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
201{
202	type CreateProposer = future::Ready<Result<Self::Proposer, Self::Error>>;
203	type Proposer = Proposer<Block, C, A>;
204	type Error = sp_blockchain::Error;
205
206	fn init(&mut self, parent_header: &<Block as BlockT>::Header) -> Self::CreateProposer {
207		future::ready(Ok(self.init_with_now(parent_header, Box::new(time::Instant::now))))
208	}
209}
210
211/// The proposer logic.
212pub struct Proposer<Block: BlockT, C, A: TransactionPool> {
213	spawn_handle: Box<dyn SpawnNamed>,
214	client: Arc<C>,
215	parent_hash: Block::Hash,
216	parent_number: <<Block as BlockT>::Header as HeaderT>::Number,
217	transaction_pool: Arc<A>,
218	now: Box<dyn Fn() -> time::Instant + Send + Sync>,
219	metrics: PrometheusMetrics,
220	default_block_size_limit: usize,
221	soft_deadline_percent: Percent,
222	telemetry: Option<TelemetryHandle>,
223}
224
225impl<A, Block, C> sp_consensus::Proposer<Block> for Proposer<Block, C, A>
226where
227	A: TransactionPool<Block = Block> + 'static,
228	Block: BlockT,
229	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
230	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
231{
232	type Proposal = Pin<Box<dyn Future<Output = Result<Proposal<Block>, Self::Error>> + Send>>;
233	type Error = sp_blockchain::Error;
234
235	fn propose(self, args: ProposeArgs<Block>) -> Self::Proposal {
236		Self::propose_block(self, args).boxed()
237	}
238}
239
240/// If the block is full we will attempt to push at most
241/// this number of transactions before quitting for real.
242/// It allows us to increase block utilization.
243const MAX_SKIPPED_TRANSACTIONS: usize = 8;
244
245impl<A, Block, C> Proposer<Block, C, A>
246where
247	A: TransactionPool<Block = Block> + 'static,
248	Block: BlockT,
249	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
250	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
251{
252	/// Propose a new block.
253	pub async fn propose_block(
254		self,
255		args: ProposeArgs<Block>,
256	) -> Result<Proposal<Block>, sp_blockchain::Error> {
257		let (tx, rx) = oneshot::channel();
258		let spawn_handle = self.spawn_handle.clone();
259
260		// Spawn on a new thread, because block production is a blocking operation.
261		spawn_handle.spawn_blocking(
262			"basic-authorship-proposer",
263			None,
264			async move {
265				let res = self.propose_with(args).await;
266				if tx.send(res).is_err() {
267					trace!(
268						target: LOG_TARGET,
269						"Could not send block production result to proposer!"
270					);
271				}
272			}
273			.boxed(),
274		);
275
276		rx.await?.map_err(Into::into)
277	}
278
279	async fn propose_with(
280		self,
281		args: ProposeArgs<Block>,
282	) -> Result<Proposal<Block>, sp_blockchain::Error> {
283		let ProposeArgs {
284			inherent_data,
285			inherent_digests,
286			max_duration,
287			block_size_limit,
288			storage_proof_recorder,
289			extra_extensions,
290		} = args;
291		// leave some time for evaluation and block finalization (10%)
292		let deadline = (self.now)() + max_duration - max_duration / 10;
293		let block_timer = time::Instant::now();
294
295		let mut block_builder = BlockBuilderBuilder::new(&*self.client)
296			.on_parent_block(self.parent_hash)
297			.with_parent_block_number(self.parent_number)
298			.with_proof_recorder(storage_proof_recorder)
299			.with_inherent_digests(inherent_digests)
300			.with_extra_extensions(extra_extensions)
301			.build()?;
302
303		self.apply_inherents(&mut block_builder, inherent_data)?;
304
305		let mode = block_builder.extrinsic_inclusion_mode();
306		let end_reason = match mode {
307			ExtrinsicInclusionMode::AllExtrinsics =>
308				self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?,
309			ExtrinsicInclusionMode::OnlyInherents => EndProposingReason::TransactionForbidden,
310		};
311		let (block, storage_changes) = block_builder.build()?.into_inner();
312		let block_took = block_timer.elapsed();
313
314		self.print_summary(&block, end_reason, block_took, block_timer.elapsed());
315		Ok(Proposal { block, storage_changes })
316	}
317
318	/// Apply all inherents to the block.
319	fn apply_inherents(
320		&self,
321		block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C>,
322		inherent_data: InherentData,
323	) -> Result<(), sp_blockchain::Error> {
324		let create_inherents_start = time::Instant::now();
325
326		let inherent_identifiers = log_enabled!(target: LOG_TARGET, Level::Debug).then(|| {
327			inherent_data
328				.identifiers()
329				.map(|id| String::from_utf8_lossy(id).to_string())
330				.collect::<Vec<String>>()
331		});
332
333		let inherents = block_builder.create_inherents(inherent_data)?;
334		let create_inherents_end = time::Instant::now();
335
336		debug!(target: LOG_TARGET, "apply_inherents: Runtime provided {} inherents. Inherent identifiers present: {:?}", inherents.len(), inherent_identifiers);
337
338		self.metrics.report(|metrics| {
339			metrics.create_inherents_time.observe(
340				create_inherents_end
341					.saturating_duration_since(create_inherents_start)
342					.as_secs_f64(),
343			);
344		});
345
346		for inherent in inherents {
347			match block_builder.push(inherent) {
348				Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => {
349					warn!(
350						target: LOG_TARGET,
351						"โš ๏ธ  Dropping non-mandatory inherent from overweight block."
352					)
353				},
354				Err(ApplyExtrinsicFailed(Validity(e))) if e.was_mandatory() => {
355					error!(
356						"โŒ๏ธ Mandatory inherent extrinsic returned error. Block cannot be produced."
357					);
358					return Err(ApplyExtrinsicFailed(Validity(e)))
359				},
360				Err(e) => {
361					warn!(
362						target: LOG_TARGET,
363						"โ—๏ธ Inherent extrinsic returned unexpected error: {}. Dropping.", e
364					);
365				},
366				Ok(_) => {},
367			}
368		}
369		Ok(())
370	}
371
372	/// Apply as many extrinsics as possible to the block.
373	async fn apply_extrinsics(
374		&self,
375		block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C>,
376		deadline: time::Instant,
377		block_size_limit: Option<usize>,
378	) -> Result<EndProposingReason, sp_blockchain::Error> {
379		// proceed with transactions
380		// We calculate soft deadline used only in case we start skipping transactions.
381		let now = (self.now)();
382		let left = deadline.saturating_duration_since(now);
383		let left_micros: u64 = left.as_micros().saturated_into();
384		let soft_deadline =
385			now + time::Duration::from_micros(self.soft_deadline_percent.mul_floor(left_micros));
386		let mut skipped = 0;
387		let mut unqueue_invalid = TxInvalidityReportMap::new();
388		let mut limit_hit_reason: Option<EndProposingReason> = None;
389
390		let delay = deadline.saturating_duration_since((self.now)()) / 8;
391		let mut pending_iterator =
392			self.transaction_pool.ready_at_with_timeout(self.parent_hash, delay).await;
393
394		let block_size_limit = block_size_limit.unwrap_or(self.default_block_size_limit);
395
396		debug!(target: LOG_TARGET, "Attempting to push transactions from the pool at {:?}.", self.parent_hash);
397		let mut transaction_pushed = false;
398
399		let end_reason = loop {
400			let pending_tx = if let Some(pending_tx) = pending_iterator.next() {
401				pending_tx
402			} else {
403				debug!(
404					target: LOG_TARGET,
405					"No more transactions, proceeding with proposing."
406				);
407
408				break limit_hit_reason.unwrap_or(EndProposingReason::NoMoreTransactions)
409			};
410
411			let now = (self.now)();
412			if now > deadline {
413				debug!(
414					target: LOG_TARGET,
415					"Consensus deadline reached when pushing block transactions, \
416				proceeding with proposing."
417				);
418				break limit_hit_reason.unwrap_or(EndProposingReason::HitDeadline)
419			}
420
421			let pending_tx_data = (**pending_tx.data()).clone();
422			let pending_tx_hash = pending_tx.hash().clone();
423
424			let block_size = block_builder.estimate_block_size();
425			if block_size + pending_tx_data.encoded_size() > block_size_limit {
426				pending_iterator.report_invalid(&pending_tx);
427				limit_hit_reason = Some(EndProposingReason::HitBlockSizeLimit);
428				if skipped < MAX_SKIPPED_TRANSACTIONS {
429					skipped += 1;
430					debug!(
431						target: LOG_TARGET,
432						"Transaction would overflow the block size limit, \
433					 but will try {} more transactions before quitting.",
434						MAX_SKIPPED_TRANSACTIONS - skipped,
435					);
436					continue
437				} else if now < soft_deadline {
438					debug!(
439						target: LOG_TARGET,
440						"Transaction would overflow the block size limit, \
441					 but we still have time before the soft deadline, so \
442					 we will try a bit more."
443					);
444					continue
445				} else {
446					debug!(
447						target: LOG_TARGET,
448						"Reached block size limit, proceeding with proposing."
449					);
450					break EndProposingReason::HitBlockSizeLimit
451				}
452			}
453
454			trace!(target: LOG_TARGET, "[{:?}] Pushing to the block.", pending_tx_hash);
455			match sc_block_builder::BlockBuilder::push(block_builder, pending_tx_data) {
456				Ok(()) => {
457					transaction_pushed = true;
458					limit_hit_reason = None;
459					trace!(target: LOG_TARGET, "[{:?}] Pushed to the block.", pending_tx_hash);
460				},
461				Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => {
462					pending_iterator.report_invalid(&pending_tx);
463					limit_hit_reason = Some(EndProposingReason::HitBlockWeightLimit);
464					if skipped < MAX_SKIPPED_TRANSACTIONS {
465						skipped += 1;
466						debug!(target: LOG_TARGET,
467							"Block seems full, but will try {} more transactions before quitting.",
468							MAX_SKIPPED_TRANSACTIONS - skipped,
469						);
470					} else if (self.now)() < soft_deadline {
471						debug!(target: LOG_TARGET,
472							"Block seems full, but we still have time before the soft deadline, \
473							 so we will try a bit more before quitting."
474						);
475					} else {
476						debug!(
477							target: LOG_TARGET,
478							"Reached block weight limit, proceeding with proposing."
479						);
480						break EndProposingReason::HitBlockWeightLimit
481					}
482				},
483				Err(e) => {
484					pending_iterator.report_invalid(&pending_tx);
485					debug!(
486						target: LOG_TARGET,
487						"[{:?}] Invalid transaction: {} at: {}", pending_tx_hash, e, self.parent_hash
488					);
489
490					let error_to_report = match e {
491						ApplyExtrinsicFailed(Validity(e)) => Some(e),
492						_ => None,
493					};
494
495					unqueue_invalid.insert(pending_tx_hash, error_to_report);
496				},
497			}
498		};
499
500		if matches!(end_reason, EndProposingReason::HitBlockSizeLimit) && !transaction_pushed {
501			warn!(
502				target: LOG_TARGET,
503				"Hit block size limit of `{}` without including any transaction!", block_size_limit,
504			);
505		}
506
507		self.transaction_pool
508			.report_invalid(Some(self.parent_hash), unqueue_invalid)
509			.await;
510		Ok(end_reason)
511	}
512
513	/// Prints a summary and does telemetry + metrics.
514	///
515	/// - `block`: The block that was build.
516	/// - `end_reason`: Why did we stop producing the block?
517	/// - `block_took`: How long did it took to produce the actual block?
518	/// - `propose_took`: How long did the entire proposing took?
519	fn print_summary(
520		&self,
521		block: &Block,
522		end_reason: EndProposingReason,
523		block_took: time::Duration,
524		propose_took: time::Duration,
525	) {
526		let extrinsics = block.extrinsics();
527		self.metrics.report(|metrics| {
528			metrics.number_of_transactions.set(extrinsics.len() as u64);
529			metrics.block_constructed.observe(block_took.as_secs_f64());
530			metrics.report_end_proposing_reason(end_reason);
531			metrics.create_block_proposal_time.observe(propose_took.as_secs_f64());
532		});
533
534		let extrinsics_summary = if extrinsics.is_empty() {
535			"no extrinsics".to_string()
536		} else {
537			format!(
538				"extrinsics ({}): [{}]",
539				extrinsics.len(),
540				extrinsics
541					.iter()
542					.map(|xt| BlakeTwo256::hash_of(xt).to_string())
543					.collect::<Vec<_>>()
544					.join(", ")
545			)
546		};
547
548		if log::log_enabled!(log::Level::Info) {
549			info!(
550				"๐ŸŽ Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; extrinsics_count: {}",
551				block.header().number(),
552				block_took.as_millis(),
553				<Block as BlockT>::Hash::from(block.header().hash()),
554				block.header().parent_hash(),
555				end_reason,
556				extrinsics.len()
557			)
558		} else if log::log_enabled!(log::Level::Trace) {
559			trace!(
560				"๐ŸŽ Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; {extrinsics_summary}",
561				block.header().number(),
562				block_took.as_millis(),
563				<Block as BlockT>::Hash::from(block.header().hash()),
564				block.header().parent_hash(),
565				end_reason
566			);
567		}
568
569		telemetry!(
570			self.telemetry;
571			CONSENSUS_INFO;
572			"prepared_block_for_proposing";
573			"number" => ?block.header().number(),
574			"hash" => ?<Block as BlockT>::Hash::from(block.header().hash()),
575		);
576	}
577}
578
579#[cfg(test)]
580mod tests {
581	use super::*;
582	use futures::executor::block_on;
583	use parking_lot::Mutex;
584	use sc_client_api::{Backend, TrieCacheContext};
585	use sc_transaction_pool::BasicPool;
586	use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionSource};
587	use sp_api::Core;
588	use sp_blockchain::HeaderBackend;
589	use sp_consensus::{BlockOrigin, Environment};
590	use sp_runtime::{generic::BlockId, traits::NumberFor, Perbill};
591	use substrate_test_runtime_client::{
592		prelude::*,
593		runtime::{Block as TestBlock, Extrinsic, ExtrinsicBuilder, Transfer},
594		TestClientBuilder, TestClientBuilderExt,
595	};
596
597	const SOURCE: TransactionSource = TransactionSource::External;
598
599	// Note:
600	// Maximum normal extrinsic size for `substrate_test_runtime` is ~65% of max_block (refer to
601	// `substrate_test_runtime::RuntimeBlockWeights` for details).
602	// This extrinsic sizing allows for:
603	// - one huge xts + a lot of tiny dust
604	// - one huge, no medium,
605	// - two medium xts
606	// This is widely exploited in following tests.
607	const HUGE: u32 = 649000000;
608	const MEDIUM: u32 = 250000000;
609	const TINY: u32 = 1000;
610
611	fn extrinsic(nonce: u64) -> Extrinsic {
612		ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)).nonce(nonce).build()
613	}
614
615	fn chain_event<B: BlockT>(header: B::Header) -> ChainEvent<B>
616	where
617		NumberFor<B>: From<u64>,
618	{
619		ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }
620	}
621
622	#[test]
623	fn should_cease_building_block_when_deadline_is_reached() {
624		// given
625		let client = Arc::new(substrate_test_runtime_client::new());
626		let spawner = sp_core::testing::TaskExecutor::new();
627		let txpool = Arc::from(BasicPool::new_full(
628			Default::default(),
629			true.into(),
630			None,
631			spawner.clone(),
632			client.clone(),
633		));
634
635		let hashof0 = client.info().genesis_hash;
636		block_on(txpool.submit_at(hashof0, SOURCE, vec![extrinsic(0), extrinsic(1)])).unwrap();
637
638		block_on(
639			txpool.maintain(chain_event(
640				client.expect_header(hashof0).expect("there should be header"),
641			)),
642		);
643
644		let mut proposer_factory =
645			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
646
647		let cell = Mutex::new((false, time::Instant::now()));
648		let proposer = proposer_factory.init_with_now(
649			&client.expect_header(hashof0).unwrap(),
650			Box::new(move || {
651				let mut value = cell.lock();
652				if !value.0 {
653					value.0 = true;
654					return value.1
655				}
656				let old = value.1;
657				let new = old + time::Duration::from_secs(1);
658				*value = (true, new);
659				old
660			}),
661		);
662
663		// when
664		let deadline = time::Duration::from_secs(3);
665		let block = block_on(
666			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
667		)
668		.map(|r| r.block)
669		.unwrap();
670
671		// then
672		// block should have some extrinsics although we have some more in the pool.
673		assert_eq!(block.extrinsics().len(), 1);
674		assert_eq!(txpool.ready().count(), 2);
675	}
676
677	#[test]
678	fn should_not_panic_when_deadline_is_reached() {
679		let client = Arc::new(substrate_test_runtime_client::new());
680		let spawner = sp_core::testing::TaskExecutor::new();
681		let txpool = Arc::from(BasicPool::new_full(
682			Default::default(),
683			true.into(),
684			None,
685			spawner.clone(),
686			client.clone(),
687		));
688
689		let mut proposer_factory =
690			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
691
692		let cell = Mutex::new((false, time::Instant::now()));
693		let proposer = proposer_factory.init_with_now(
694			&client.expect_header(client.info().genesis_hash).unwrap(),
695			Box::new(move || {
696				let mut value = cell.lock();
697				if !value.0 {
698					value.0 = true;
699					return value.1
700				}
701				let new = value.1 + time::Duration::from_secs(160);
702				*value = (true, new);
703				new
704			}),
705		);
706
707		let deadline = time::Duration::from_secs(1);
708		block_on(
709			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
710		)
711		.map(|r| r.block)
712		.unwrap();
713	}
714
715	#[test]
716	fn proposed_storage_changes_should_match_execute_block_storage_changes() {
717		let (client, backend) = TestClientBuilder::new().build_with_backend();
718		let client = Arc::new(client);
719		let spawner = sp_core::testing::TaskExecutor::new();
720		let txpool = Arc::from(BasicPool::new_full(
721			Default::default(),
722			true.into(),
723			None,
724			spawner.clone(),
725			client.clone(),
726		));
727
728		let genesis_hash = client.info().best_hash;
729
730		block_on(txpool.submit_at(genesis_hash, SOURCE, vec![extrinsic(0)])).unwrap();
731
732		block_on(
733			txpool.maintain(chain_event(
734				client
735					.expect_header(client.info().genesis_hash)
736					.expect("there should be header"),
737			)),
738		);
739
740		let mut proposer_factory =
741			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
742
743		let proposer = proposer_factory.init_with_now(
744			&client.header(genesis_hash).unwrap().unwrap(),
745			Box::new(move || time::Instant::now()),
746		);
747
748		let deadline = time::Duration::from_secs(9);
749		let proposal = block_on(
750			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
751		)
752		.unwrap();
753
754		assert_eq!(proposal.block.extrinsics().len(), 1);
755
756		let api = client.runtime_api();
757		api.execute_block(genesis_hash, proposal.block.into()).unwrap();
758
759		let state = backend.state_at(genesis_hash, TrieCacheContext::Untrusted).unwrap();
760
761		let storage_changes = api.into_storage_changes(&state, genesis_hash).unwrap();
762
763		assert_eq!(
764			proposal.storage_changes.transaction_storage_root,
765			storage_changes.transaction_storage_root,
766		);
767	}
768
769	// This test ensures that if one transaction of a user was rejected, because for example
770	// the weight limit was hit, we don't mark the other transactions of the user as invalid because
771	// the nonce is not matching.
772	#[test]
773	fn should_not_remove_invalid_transactions_from_the_same_sender_after_one_was_invalid() {
774		// given
775		let client = Arc::new(substrate_test_runtime_client::new());
776		let spawner = sp_core::testing::TaskExecutor::new();
777		let txpool = Arc::from(BasicPool::new_full(
778			Default::default(),
779			true.into(),
780			None,
781			spawner.clone(),
782			client.clone(),
783		));
784
785		let medium = |nonce| {
786			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(MEDIUM))
787				.nonce(nonce)
788				.build()
789		};
790		let huge = |nonce| {
791			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE)).nonce(nonce).build()
792		};
793
794		block_on(txpool.submit_at(
795			client.info().genesis_hash,
796			SOURCE,
797			vec![medium(0), medium(1), huge(2), medium(3), huge(4), medium(5), medium(6)],
798		))
799		.unwrap();
800
801		let mut proposer_factory =
802			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
803		let mut propose_block = |client: &TestClient,
804		                         parent_number,
805		                         expected_block_extrinsics,
806		                         expected_pool_transactions| {
807			let hash = client.expect_block_hash_from_id(&BlockId::Number(parent_number)).unwrap();
808			let proposer = proposer_factory.init_with_now(
809				&client.expect_header(hash).unwrap(),
810				Box::new(move || time::Instant::now()),
811			);
812
813			// when
814			let deadline = time::Duration::from_secs(900);
815			let block = block_on(
816				proposer
817					.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
818			)
819			.map(|r| r.block)
820			.unwrap();
821
822			// then
823			// block should have some extrinsics although we have some more in the pool.
824			assert_eq!(
825				txpool.ready().count(),
826				expected_pool_transactions,
827				"at block: {}",
828				block.header.number
829			);
830			assert_eq!(
831				block.extrinsics().len(),
832				expected_block_extrinsics,
833				"at block: {}",
834				block.header.number
835			);
836
837			block
838		};
839
840		let import_and_maintain = |client: Arc<TestClient>, block: TestBlock| {
841			let hash = block.hash();
842			block_on(client.import(BlockOrigin::Own, block)).unwrap();
843			block_on(txpool.maintain(chain_event(
844				client.expect_header(hash).expect("there should be header"),
845			)));
846		};
847
848		block_on(
849			txpool.maintain(chain_event(
850				client
851					.expect_header(client.info().genesis_hash)
852					.expect("there should be header"),
853			)),
854		);
855		assert_eq!(txpool.ready().count(), 7);
856
857		// let's create one block and import it
858		let block = propose_block(&client, 0, 2, 7);
859		import_and_maintain(client.clone(), block.clone());
860		assert_eq!(txpool.ready().count(), 5);
861
862		// now let's make sure that we can still make some progress
863		let block = propose_block(&client, 1, 1, 5);
864		import_and_maintain(client.clone(), block.clone());
865		assert_eq!(txpool.ready().count(), 4);
866
867		// again let's make sure that we can still make some progress
868		let block = propose_block(&client, 2, 1, 4);
869		import_and_maintain(client.clone(), block.clone());
870		assert_eq!(txpool.ready().count(), 3);
871
872		// again let's make sure that we can still make some progress
873		let block = propose_block(&client, 3, 1, 3);
874		import_and_maintain(client.clone(), block.clone());
875		assert_eq!(txpool.ready().count(), 2);
876
877		// again let's make sure that we can still make some progress
878		let block = propose_block(&client, 4, 2, 2);
879		import_and_maintain(client.clone(), block.clone());
880		assert_eq!(txpool.ready().count(), 0);
881	}
882
883	#[test]
884	fn should_cease_building_block_when_block_limit_is_reached() {
885		let client = Arc::new(substrate_test_runtime_client::new());
886		let spawner = sp_core::testing::TaskExecutor::new();
887		let txpool = Arc::from(BasicPool::new_full(
888			Default::default(),
889			true.into(),
890			None,
891			spawner.clone(),
892			client.clone(),
893		));
894		let genesis_hash = client.info().genesis_hash;
895		let genesis_header = client.expect_header(genesis_hash).expect("there should be header");
896
897		let extrinsics_num = 5;
898		let extrinsics = std::iter::once(
899			Transfer {
900				from: Sr25519Keyring::Alice.into(),
901				to: Sr25519Keyring::Bob.into(),
902				amount: 100,
903				nonce: 0,
904			}
905			.into_unchecked_extrinsic(),
906		)
907		.chain((1..extrinsics_num as u64).map(extrinsic))
908		.collect::<Vec<_>>();
909
910		let block_limit = genesis_header.encoded_size() +
911			extrinsics
912				.iter()
913				.take(extrinsics_num - 1)
914				.map(Encode::encoded_size)
915				.sum::<usize>() +
916			Vec::<Extrinsic>::new().encoded_size();
917
918		block_on(txpool.submit_at(genesis_hash, SOURCE, extrinsics.clone())).unwrap();
919
920		block_on(txpool.maintain(chain_event(genesis_header.clone())));
921
922		let mut proposer_factory =
923			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
924
925		let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
926
927		// Give it enough time
928		let deadline = time::Duration::from_secs(300);
929		let block = block_on(proposer.propose_block(ProposeArgs {
930			max_duration: deadline,
931			block_size_limit: Some(block_limit),
932			..Default::default()
933		}))
934		.map(|r| r.block)
935		.unwrap();
936
937		// Based on the block limit, one transaction shouldn't be included.
938		assert_eq!(block.extrinsics().len(), extrinsics_num - 1);
939
940		let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
941
942		let block = block_on(
943			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
944		)
945		.map(|r| r.block)
946		.unwrap();
947
948		// Without a block limit we should include all of them
949		assert_eq!(block.extrinsics().len(), extrinsics_num);
950
951		let mut proposer_factory =
952			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
953
954		let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
955
956		// Exact block_limit, which includes:
957		// 99 (header_size) + 718 (proof@initialize_block) + 246 (one Transfer extrinsic)
958		let block_limit = {
959			let builder = BlockBuilderBuilder::new(&*client)
960				.on_parent_block(genesis_header.hash())
961				.with_parent_block_number(0)
962				.enable_proof_recording()
963				.build()
964				.unwrap();
965			builder.estimate_block_size() + extrinsics[0].encoded_size()
966		};
967		let block = block_on(proposer.propose_block(ProposeArgs {
968			max_duration: deadline,
969			block_size_limit: Some(block_limit),
970			storage_proof_recorder: Some(Default::default()),
971			..Default::default()
972		}))
973		.map(|r| r.block)
974		.unwrap();
975
976		// The block limit was increased, but we now include the proof in the estimation of the
977		// block size and thus, only the `Transfer` will fit into the block. It reads more data
978		// than we have reserved in the block limit.
979		assert_eq!(block.extrinsics().len(), 1);
980	}
981
982	#[test]
983	fn should_keep_adding_transactions_after_exhausts_resources_before_soft_deadline() {
984		// given
985		let client = Arc::new(substrate_test_runtime_client::new());
986		let spawner = sp_core::testing::TaskExecutor::new();
987		let txpool = Arc::from(BasicPool::new_full(
988			Default::default(),
989			true.into(),
990			None,
991			spawner.clone(),
992			client.clone(),
993		));
994		let genesis_hash = client.info().genesis_hash;
995
996		let tiny = |nonce| {
997			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)).nonce(nonce).build()
998		};
999		let huge = |who| {
1000			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE))
1001				.signer(Sr25519Keyring::numeric(who))
1002				.build()
1003		};
1004
1005		block_on(
1006			txpool.submit_at(
1007				genesis_hash,
1008				SOURCE,
1009				// add 2 * MAX_SKIPPED_TRANSACTIONS that exhaust resources
1010				(0..MAX_SKIPPED_TRANSACTIONS * 2)
1011					.into_iter()
1012					.map(huge)
1013					// and some transactions that are okay.
1014					.chain((0..MAX_SKIPPED_TRANSACTIONS as u64).into_iter().map(tiny))
1015					.collect(),
1016			),
1017		)
1018		.unwrap();
1019
1020		block_on(txpool.maintain(chain_event(
1021			client.expect_header(genesis_hash).expect("there should be header"),
1022		)));
1023		assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 3);
1024
1025		let mut proposer_factory =
1026			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
1027
1028		let cell = Mutex::new(time::Instant::now());
1029		let proposer = proposer_factory.init_with_now(
1030			&client.expect_header(genesis_hash).unwrap(),
1031			Box::new(move || {
1032				let mut value = cell.lock();
1033				let old = *value;
1034				*value = old + time::Duration::from_secs(1);
1035				old
1036			}),
1037		);
1038
1039		// when
1040		// give it enough time so that deadline is never triggered.
1041		let deadline = time::Duration::from_secs(900);
1042		let block = block_on(
1043			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
1044		)
1045		.map(|r| r.block)
1046		.unwrap();
1047
1048		// then block should have all non-exhaust resources extrinsics (+ the first one).
1049		assert_eq!(block.extrinsics().len(), MAX_SKIPPED_TRANSACTIONS + 1);
1050	}
1051
1052	#[test]
1053	fn should_only_skip_up_to_some_limit_after_soft_deadline() {
1054		// given
1055		let client = Arc::new(substrate_test_runtime_client::new());
1056		let spawner = sp_core::testing::TaskExecutor::new();
1057		let txpool = Arc::from(BasicPool::new_full(
1058			Default::default(),
1059			true.into(),
1060			None,
1061			spawner.clone(),
1062			client.clone(),
1063		));
1064		let genesis_hash = client.info().genesis_hash;
1065
1066		let tiny = |who| {
1067			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY))
1068				.signer(Sr25519Keyring::numeric(who))
1069				.nonce(1)
1070				.build()
1071		};
1072		let huge = |who| {
1073			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE))
1074				.signer(Sr25519Keyring::numeric(who))
1075				.build()
1076		};
1077
1078		block_on(
1079			txpool.submit_at(
1080				genesis_hash,
1081				SOURCE,
1082				(0..MAX_SKIPPED_TRANSACTIONS + 2)
1083					.into_iter()
1084					.map(huge)
1085					// and some transactions that are okay.
1086					.chain((0..MAX_SKIPPED_TRANSACTIONS + 2).into_iter().map(tiny))
1087					.collect(),
1088			),
1089		)
1090		.unwrap();
1091
1092		block_on(txpool.maintain(chain_event(
1093			client.expect_header(genesis_hash).expect("there should be header"),
1094		)));
1095		assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 2 + 4);
1096
1097		let mut proposer_factory =
1098			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
1099
1100		let deadline = time::Duration::from_secs(600);
1101		let cell = Arc::new(Mutex::new((0, time::Instant::now())));
1102		let cell2 = cell.clone();
1103		let proposer = proposer_factory.init_with_now(
1104			&client.expect_header(genesis_hash).unwrap(),
1105			Box::new(move || {
1106				let mut value = cell.lock();
1107				let (called, old) = *value;
1108				// add time after deadline is calculated internally (hence 1)
1109				let increase = if called == 1 {
1110					// we start after the soft_deadline should have already been reached.
1111					deadline / 2
1112				} else {
1113					// but we make sure to never reach the actual deadline
1114					time::Duration::from_millis(0)
1115				};
1116				*value = (called + 1, old + increase);
1117				old
1118			}),
1119		);
1120
1121		let block = block_on(
1122			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
1123		)
1124		.map(|r| r.block)
1125		.unwrap();
1126
1127		// then the block should have one or two transactions. This maybe random as they are
1128		// processed in parallel. The same signer and consecutive nonces for huge and tiny
1129		// transactions guarantees that max two transactions will get to the block.
1130		assert!(
1131			(1..3).contains(&block.extrinsics().len()),
1132			"Block shall contain one or two extrinsics."
1133		);
1134		assert!(
1135			cell2.lock().0 > MAX_SKIPPED_TRANSACTIONS,
1136			"Not enough calls to current time, which indicates the test might have ended because of deadline, not soft deadline"
1137		);
1138	}
1139}