1use codec::Encode;
24use futures::{
25 channel::oneshot,
26 future,
27 future::{Future, FutureExt},
28};
29use log::{debug, error, info, log_enabled, trace, warn, Level};
30use prometheus_endpoint::Registry as PrometheusRegistry;
31use sc_block_builder::{BlockBuilderApi, BlockBuilderBuilder};
32use sc_proposer_metrics::{EndProposingReason, MetricsLink as PrometheusMetrics};
33use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO};
34use sc_transaction_pool_api::{InPoolTransaction, TransactionPool, TxInvalidityReportMap};
35use sp_api::{ApiExt, CallApiAt, ProofRecorder, ProvideRuntimeApi};
36use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed, HeaderBackend};
37use sp_consensus::{DisableProofRecording, EnableProofRecording, ProofRecording, Proposal};
38use sp_core::traits::SpawnNamed;
39use sp_inherents::InherentData;
40use sp_runtime::{
41 traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT},
42 Digest, ExtrinsicInclusionMode, Percent, SaturatedConversion,
43};
44use sp_trie::recorder::IgnoredNodes;
45use std::{marker::PhantomData, pin::Pin, sync::Arc, time};
46
47pub const DEFAULT_BLOCK_SIZE_LIMIT: usize = 4 * 1024 * 1024 + 512;
55
56const DEFAULT_SOFT_DEADLINE_PERCENT: Percent = Percent::from_percent(50);
57
58const LOG_TARGET: &'static str = "basic-authorship";
59
60pub struct ProposerFactory<A, C, PR> {
62 spawn_handle: Box<dyn SpawnNamed>,
63 client: Arc<C>,
65 transaction_pool: Arc<A>,
67 metrics: PrometheusMetrics,
69 default_block_size_limit: usize,
74 soft_deadline_percent: Percent,
82 telemetry: Option<TelemetryHandle>,
83 include_proof_in_block_size_estimation: bool,
85 _phantom: PhantomData<PR>,
87}
88
89impl<A, C, PR> Clone for ProposerFactory<A, C, PR> {
90 fn clone(&self) -> Self {
91 Self {
92 spawn_handle: self.spawn_handle.clone(),
93 client: self.client.clone(),
94 transaction_pool: self.transaction_pool.clone(),
95 metrics: self.metrics.clone(),
96 default_block_size_limit: self.default_block_size_limit,
97 soft_deadline_percent: self.soft_deadline_percent,
98 telemetry: self.telemetry.clone(),
99 include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation,
100 _phantom: self._phantom,
101 }
102 }
103}
104
105impl<A, C> ProposerFactory<A, C, DisableProofRecording> {
106 pub fn new(
111 spawn_handle: impl SpawnNamed + 'static,
112 client: Arc<C>,
113 transaction_pool: Arc<A>,
114 prometheus: Option<&PrometheusRegistry>,
115 telemetry: Option<TelemetryHandle>,
116 ) -> Self {
117 ProposerFactory {
118 spawn_handle: Box::new(spawn_handle),
119 transaction_pool,
120 metrics: PrometheusMetrics::new(prometheus),
121 default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT,
122 soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT,
123 telemetry,
124 client,
125 include_proof_in_block_size_estimation: false,
126 _phantom: PhantomData,
127 }
128 }
129}
130
131impl<A, C> ProposerFactory<A, C, EnableProofRecording> {
132 pub fn with_proof_recording(
139 spawn_handle: impl SpawnNamed + 'static,
140 client: Arc<C>,
141 transaction_pool: Arc<A>,
142 prometheus: Option<&PrometheusRegistry>,
143 telemetry: Option<TelemetryHandle>,
144 ) -> Self {
145 ProposerFactory {
146 client,
147 spawn_handle: Box::new(spawn_handle),
148 transaction_pool,
149 metrics: PrometheusMetrics::new(prometheus),
150 default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT,
151 soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT,
152 telemetry,
153 include_proof_in_block_size_estimation: true,
154 _phantom: PhantomData,
155 }
156 }
157
158 pub fn disable_proof_in_block_size_estimation(&mut self) {
160 self.include_proof_in_block_size_estimation = false;
161 }
162}
163
164impl<A, C, PR> ProposerFactory<A, C, PR> {
165 pub fn set_default_block_size_limit(&mut self, limit: usize) {
173 self.default_block_size_limit = limit;
174 }
175
176 pub fn set_soft_deadline(&mut self, percent: Percent) {
189 self.soft_deadline_percent = percent;
190 }
191}
192
193impl<Block, C, A, PR> ProposerFactory<A, C, PR>
194where
195 A: TransactionPool<Block = Block> + 'static,
196 Block: BlockT,
197 C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + Send + Sync + 'static,
198 C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
199{
200 fn init_with_now(
201 &mut self,
202 parent_header: &<Block as BlockT>::Header,
203 now: Box<dyn Fn() -> time::Instant + Send + Sync>,
204 ) -> Proposer<Block, C, A, PR> {
205 let parent_hash = parent_header.hash();
206
207 info!(
208 "๐ Starting consensus session on top of parent {:?} (#{})",
209 parent_hash,
210 parent_header.number()
211 );
212
213 let proposer = Proposer::<_, _, _, PR> {
214 spawn_handle: self.spawn_handle.clone(),
215 client: self.client.clone(),
216 parent_hash,
217 parent_number: *parent_header.number(),
218 transaction_pool: self.transaction_pool.clone(),
219 now,
220 metrics: self.metrics.clone(),
221 default_block_size_limit: self.default_block_size_limit,
222 soft_deadline_percent: self.soft_deadline_percent,
223 telemetry: self.telemetry.clone(),
224 _phantom: PhantomData,
225 include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation,
226 };
227
228 proposer
229 }
230}
231
232impl<A, Block, C, PR> sp_consensus::Environment<Block> for ProposerFactory<A, C, PR>
233where
234 A: TransactionPool<Block = Block> + 'static,
235 Block: BlockT,
236 C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
237 C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
238 PR: ProofRecording,
239{
240 type CreateProposer = future::Ready<Result<Self::Proposer, Self::Error>>;
241 type Proposer = Proposer<Block, C, A, PR>;
242 type Error = sp_blockchain::Error;
243
244 fn init(&mut self, parent_header: &<Block as BlockT>::Header) -> Self::CreateProposer {
245 future::ready(Ok(self.init_with_now(parent_header, Box::new(time::Instant::now))))
246 }
247}
248
249pub struct Proposer<Block: BlockT, C, A: TransactionPool, PR> {
251 spawn_handle: Box<dyn SpawnNamed>,
252 client: Arc<C>,
253 parent_hash: Block::Hash,
254 parent_number: <<Block as BlockT>::Header as HeaderT>::Number,
255 transaction_pool: Arc<A>,
256 now: Box<dyn Fn() -> time::Instant + Send + Sync>,
257 metrics: PrometheusMetrics,
258 default_block_size_limit: usize,
259 include_proof_in_block_size_estimation: bool,
260 soft_deadline_percent: Percent,
261 telemetry: Option<TelemetryHandle>,
262 _phantom: PhantomData<PR>,
263}
264
265impl<A, Block, C, PR> sp_consensus::Proposer<Block> for Proposer<Block, C, A, PR>
266where
267 A: TransactionPool<Block = Block> + 'static,
268 Block: BlockT,
269 C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
270 C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
271 PR: ProofRecording,
272{
273 type Proposal =
274 Pin<Box<dyn Future<Output = Result<Proposal<Block, PR::Proof>, Self::Error>> + Send>>;
275 type Error = sp_blockchain::Error;
276 type ProofRecording = PR;
277 type Proof = PR::Proof;
278
279 fn propose(
280 self,
281 inherent_data: InherentData,
282 inherent_digests: Digest,
283 max_duration: time::Duration,
284 block_size_limit: Option<usize>,
285 ) -> Self::Proposal {
286 self.propose_block(ProposeArgs {
287 inherent_data,
288 inherent_digests,
289 max_duration,
290 block_size_limit,
291 ignored_nodes_by_proof_recording: None,
292 })
293 .boxed()
294 }
295}
296
297pub struct ProposeArgs<Block: BlockT> {
299 pub inherent_data: InherentData,
301 pub inherent_digests: Digest,
303 pub max_duration: time::Duration,
305 pub block_size_limit: Option<usize>,
310 pub ignored_nodes_by_proof_recording: Option<IgnoredNodes<Block::Hash>>,
314}
315
316impl<Block: BlockT> Default for ProposeArgs<Block> {
317 fn default() -> Self {
318 Self {
319 inherent_data: Default::default(),
320 inherent_digests: Default::default(),
321 max_duration: Default::default(),
322 block_size_limit: None,
323 ignored_nodes_by_proof_recording: None,
324 }
325 }
326}
327
328const MAX_SKIPPED_TRANSACTIONS: usize = 8;
332
333impl<A, Block, C, PR> Proposer<Block, C, A, PR>
334where
335 A: TransactionPool<Block = Block> + 'static,
336 Block: BlockT,
337 C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
338 C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
339 PR: ProofRecording,
340{
341 pub async fn propose_block(
343 self,
344 args: ProposeArgs<Block>,
345 ) -> Result<Proposal<Block, PR::Proof>, sp_blockchain::Error> {
346 let (tx, rx) = oneshot::channel();
347 let spawn_handle = self.spawn_handle.clone();
348
349 spawn_handle.spawn_blocking(
351 "basic-authorship-proposer",
352 None,
353 async move {
354 let res = self.propose_with(args).await;
355 if tx.send(res).is_err() {
356 trace!(
357 target: LOG_TARGET,
358 "Could not send block production result to proposer!"
359 );
360 }
361 }
362 .boxed(),
363 );
364
365 rx.await?.map_err(Into::into)
366 }
367
368 async fn propose_with(
369 self,
370 ProposeArgs {
371 inherent_data,
372 inherent_digests,
373 max_duration,
374 block_size_limit,
375 ignored_nodes_by_proof_recording,
376 }: ProposeArgs<Block>,
377 ) -> Result<Proposal<Block, PR::Proof>, sp_blockchain::Error> {
378 let deadline = (self.now)() + max_duration - max_duration / 10;
380 let block_timer = time::Instant::now();
381 let mut block_builder = BlockBuilderBuilder::new(&*self.client)
382 .on_parent_block(self.parent_hash)
383 .with_parent_block_number(self.parent_number)
384 .with_proof_recorder(PR::ENABLED.then(|| {
385 ProofRecorder::<Block>::with_ignored_nodes(
386 ignored_nodes_by_proof_recording.unwrap_or_default(),
387 )
388 }))
389 .with_inherent_digests(inherent_digests)
390 .build()?;
391
392 self.apply_inherents(&mut block_builder, inherent_data)?;
393
394 let mode = block_builder.extrinsic_inclusion_mode();
395 let end_reason = match mode {
396 ExtrinsicInclusionMode::AllExtrinsics =>
397 self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?,
398 ExtrinsicInclusionMode::OnlyInherents => EndProposingReason::TransactionForbidden,
399 };
400 let (block, storage_changes, proof) = block_builder.build()?.into_inner();
401 let block_took = block_timer.elapsed();
402
403 let proof =
404 PR::into_proof(proof).map_err(|e| sp_blockchain::Error::Application(Box::new(e)))?;
405
406 self.print_summary(&block, end_reason, block_took, block_timer.elapsed());
407 Ok(Proposal { block, proof, storage_changes })
408 }
409
410 fn apply_inherents(
412 &self,
413 block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C>,
414 inherent_data: InherentData,
415 ) -> Result<(), sp_blockchain::Error> {
416 let create_inherents_start = time::Instant::now();
417
418 let inherent_identifiers = log_enabled!(target: LOG_TARGET, Level::Debug).then(|| {
419 inherent_data
420 .identifiers()
421 .map(|id| String::from_utf8_lossy(id).to_string())
422 .collect::<Vec<String>>()
423 });
424
425 let inherents = block_builder.create_inherents(inherent_data)?;
426 let create_inherents_end = time::Instant::now();
427
428 debug!(target: LOG_TARGET, "apply_inherents: Runtime provided {} inherents. Inherent identifiers present: {:?}", inherents.len(), inherent_identifiers);
429
430 self.metrics.report(|metrics| {
431 metrics.create_inherents_time.observe(
432 create_inherents_end
433 .saturating_duration_since(create_inherents_start)
434 .as_secs_f64(),
435 );
436 });
437
438 for inherent in inherents {
439 match block_builder.push(inherent) {
440 Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => {
441 warn!(
442 target: LOG_TARGET,
443 "โ ๏ธ Dropping non-mandatory inherent from overweight block."
444 )
445 },
446 Err(ApplyExtrinsicFailed(Validity(e))) if e.was_mandatory() => {
447 error!(
448 "โ๏ธ Mandatory inherent extrinsic returned error. Block cannot be produced."
449 );
450 return Err(ApplyExtrinsicFailed(Validity(e)))
451 },
452 Err(e) => {
453 warn!(
454 target: LOG_TARGET,
455 "โ๏ธ Inherent extrinsic returned unexpected error: {}. Dropping.", e
456 );
457 },
458 Ok(_) => {},
459 }
460 }
461 Ok(())
462 }
463
464 async fn apply_extrinsics(
466 &self,
467 block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C>,
468 deadline: time::Instant,
469 block_size_limit: Option<usize>,
470 ) -> Result<EndProposingReason, sp_blockchain::Error> {
471 let now = (self.now)();
474 let left = deadline.saturating_duration_since(now);
475 let left_micros: u64 = left.as_micros().saturated_into();
476 let soft_deadline =
477 now + time::Duration::from_micros(self.soft_deadline_percent.mul_floor(left_micros));
478 let mut skipped = 0;
479 let mut unqueue_invalid = TxInvalidityReportMap::new();
480 let mut limit_hit_reason: Option<EndProposingReason> = None;
481
482 let delay = deadline.saturating_duration_since((self.now)()) / 8;
483 let mut pending_iterator =
484 self.transaction_pool.ready_at_with_timeout(self.parent_hash, delay).await;
485
486 let block_size_limit = block_size_limit.unwrap_or(self.default_block_size_limit);
487
488 debug!(target: LOG_TARGET, "Attempting to push transactions from the pool at {:?}.", self.parent_hash);
489 let mut transaction_pushed = false;
490
491 let end_reason = loop {
492 let pending_tx = if let Some(pending_tx) = pending_iterator.next() {
493 pending_tx
494 } else {
495 debug!(
496 target: LOG_TARGET,
497 "No more transactions, proceeding with proposing."
498 );
499
500 break limit_hit_reason.unwrap_or(EndProposingReason::NoMoreTransactions)
501 };
502
503 let now = (self.now)();
504 if now > deadline {
505 debug!(
506 target: LOG_TARGET,
507 "Consensus deadline reached when pushing block transactions, \
508 proceeding with proposing."
509 );
510 break limit_hit_reason.unwrap_or(EndProposingReason::HitDeadline)
511 }
512
513 let pending_tx_data = (**pending_tx.data()).clone();
514 let pending_tx_hash = pending_tx.hash().clone();
515
516 let block_size =
517 block_builder.estimate_block_size(self.include_proof_in_block_size_estimation);
518 if block_size + pending_tx_data.encoded_size() > block_size_limit {
519 pending_iterator.report_invalid(&pending_tx);
520 limit_hit_reason = Some(EndProposingReason::HitBlockSizeLimit);
521 if skipped < MAX_SKIPPED_TRANSACTIONS {
522 skipped += 1;
523 debug!(
524 target: LOG_TARGET,
525 "Transaction would overflow the block size limit, \
526 but will try {} more transactions before quitting.",
527 MAX_SKIPPED_TRANSACTIONS - skipped,
528 );
529 continue
530 } else if now < soft_deadline {
531 debug!(
532 target: LOG_TARGET,
533 "Transaction would overflow the block size limit, \
534 but we still have time before the soft deadline, so \
535 we will try a bit more."
536 );
537 continue
538 } else {
539 debug!(
540 target: LOG_TARGET,
541 "Reached block size limit, proceeding with proposing."
542 );
543 break EndProposingReason::HitBlockSizeLimit
544 }
545 }
546
547 trace!(target: LOG_TARGET, "[{:?}] Pushing to the block.", pending_tx_hash);
548 match sc_block_builder::BlockBuilder::push(block_builder, pending_tx_data) {
549 Ok(()) => {
550 transaction_pushed = true;
551 limit_hit_reason = None;
552 trace!(target: LOG_TARGET, "[{:?}] Pushed to the block.", pending_tx_hash);
553 },
554 Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => {
555 pending_iterator.report_invalid(&pending_tx);
556 limit_hit_reason = Some(EndProposingReason::HitBlockWeightLimit);
557 if skipped < MAX_SKIPPED_TRANSACTIONS {
558 skipped += 1;
559 debug!(target: LOG_TARGET,
560 "Block seems full, but will try {} more transactions before quitting.",
561 MAX_SKIPPED_TRANSACTIONS - skipped,
562 );
563 } else if (self.now)() < soft_deadline {
564 debug!(target: LOG_TARGET,
565 "Block seems full, but we still have time before the soft deadline, \
566 so we will try a bit more before quitting."
567 );
568 } else {
569 debug!(
570 target: LOG_TARGET,
571 "Reached block weight limit, proceeding with proposing."
572 );
573 break EndProposingReason::HitBlockWeightLimit
574 }
575 },
576 Err(e) => {
577 pending_iterator.report_invalid(&pending_tx);
578 debug!(
579 target: LOG_TARGET,
580 "[{:?}] Invalid transaction: {} at: {}", pending_tx_hash, e, self.parent_hash
581 );
582
583 let error_to_report = match e {
584 ApplyExtrinsicFailed(Validity(e)) => Some(e),
585 _ => None,
586 };
587
588 unqueue_invalid.insert(pending_tx_hash, error_to_report);
589 },
590 }
591 };
592
593 if matches!(end_reason, EndProposingReason::HitBlockSizeLimit) && !transaction_pushed {
594 warn!(
595 target: LOG_TARGET,
596 "Hit block size limit of `{}` without including any transaction!", block_size_limit,
597 );
598 }
599
600 self.transaction_pool
601 .report_invalid(Some(self.parent_hash), unqueue_invalid)
602 .await;
603 Ok(end_reason)
604 }
605
606 fn print_summary(
613 &self,
614 block: &Block,
615 end_reason: EndProposingReason,
616 block_took: time::Duration,
617 propose_took: time::Duration,
618 ) {
619 let extrinsics = block.extrinsics();
620 self.metrics.report(|metrics| {
621 metrics.number_of_transactions.set(extrinsics.len() as u64);
622 metrics.block_constructed.observe(block_took.as_secs_f64());
623 metrics.report_end_proposing_reason(end_reason);
624 metrics.create_block_proposal_time.observe(propose_took.as_secs_f64());
625 });
626
627 let extrinsics_summary = if extrinsics.is_empty() {
628 "no extrinsics".to_string()
629 } else {
630 format!(
631 "extrinsics ({}): [{}]",
632 extrinsics.len(),
633 extrinsics
634 .iter()
635 .map(|xt| BlakeTwo256::hash_of(xt).to_string())
636 .collect::<Vec<_>>()
637 .join(", ")
638 )
639 };
640
641 if log::log_enabled!(log::Level::Info) {
642 info!(
643 "๐ Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; extrinsics_count: {}",
644 block.header().number(),
645 block_took.as_millis(),
646 <Block as BlockT>::Hash::from(block.header().hash()),
647 block.header().parent_hash(),
648 end_reason,
649 extrinsics.len()
650 )
651 } else if log::log_enabled!(log::Level::Trace) {
652 trace!(
653 "๐ Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; {extrinsics_summary}",
654 block.header().number(),
655 block_took.as_millis(),
656 <Block as BlockT>::Hash::from(block.header().hash()),
657 block.header().parent_hash(),
658 end_reason
659 );
660 }
661
662 telemetry!(
663 self.telemetry;
664 CONSENSUS_INFO;
665 "prepared_block_for_proposing";
666 "number" => ?block.header().number(),
667 "hash" => ?<Block as BlockT>::Hash::from(block.header().hash()),
668 );
669 }
670}
671
672#[cfg(test)]
673mod tests {
674 use super::*;
675 use futures::executor::block_on;
676 use parking_lot::Mutex;
677 use sc_client_api::{Backend, TrieCacheContext};
678 use sc_transaction_pool::BasicPool;
679 use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionSource};
680 use sp_api::Core;
681 use sp_blockchain::HeaderBackend;
682 use sp_consensus::{BlockOrigin, Environment};
683 use sp_runtime::{generic::BlockId, traits::NumberFor, Perbill};
684 use substrate_test_runtime_client::{
685 prelude::*,
686 runtime::{Block as TestBlock, Extrinsic, ExtrinsicBuilder, Transfer},
687 TestClientBuilder, TestClientBuilderExt,
688 };
689
690 const SOURCE: TransactionSource = TransactionSource::External;
691
692 const HUGE: u32 = 649000000;
701 const MEDIUM: u32 = 250000000;
702 const TINY: u32 = 1000;
703
704 fn extrinsic(nonce: u64) -> Extrinsic {
705 ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)).nonce(nonce).build()
706 }
707
708 fn chain_event<B: BlockT>(header: B::Header) -> ChainEvent<B>
709 where
710 NumberFor<B>: From<u64>,
711 {
712 ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }
713 }
714
715 #[test]
716 fn should_cease_building_block_when_deadline_is_reached() {
717 let client = Arc::new(substrate_test_runtime_client::new());
719 let spawner = sp_core::testing::TaskExecutor::new();
720 let txpool = Arc::from(BasicPool::new_full(
721 Default::default(),
722 true.into(),
723 None,
724 spawner.clone(),
725 client.clone(),
726 ));
727
728 let hashof0 = client.info().genesis_hash;
729 block_on(txpool.submit_at(hashof0, SOURCE, vec![extrinsic(0), extrinsic(1)])).unwrap();
730
731 block_on(
732 txpool.maintain(chain_event(
733 client.expect_header(hashof0).expect("there should be header"),
734 )),
735 );
736
737 let mut proposer_factory =
738 ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
739
740 let cell = Mutex::new((false, time::Instant::now()));
741 let proposer = proposer_factory.init_with_now(
742 &client.expect_header(hashof0).unwrap(),
743 Box::new(move || {
744 let mut value = cell.lock();
745 if !value.0 {
746 value.0 = true;
747 return value.1
748 }
749 let old = value.1;
750 let new = old + time::Duration::from_secs(1);
751 *value = (true, new);
752 old
753 }),
754 );
755
756 let deadline = time::Duration::from_secs(3);
758 let block = block_on(
759 proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
760 )
761 .map(|r| r.block)
762 .unwrap();
763
764 assert_eq!(block.extrinsics().len(), 1);
767 assert_eq!(txpool.ready().count(), 2);
768 }
769
770 #[test]
771 fn should_not_panic_when_deadline_is_reached() {
772 let client = Arc::new(substrate_test_runtime_client::new());
773 let spawner = sp_core::testing::TaskExecutor::new();
774 let txpool = Arc::from(BasicPool::new_full(
775 Default::default(),
776 true.into(),
777 None,
778 spawner.clone(),
779 client.clone(),
780 ));
781
782 let mut proposer_factory =
783 ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
784
785 let cell = Mutex::new((false, time::Instant::now()));
786 let proposer = proposer_factory.init_with_now(
787 &client.expect_header(client.info().genesis_hash).unwrap(),
788 Box::new(move || {
789 let mut value = cell.lock();
790 if !value.0 {
791 value.0 = true;
792 return value.1
793 }
794 let new = value.1 + time::Duration::from_secs(160);
795 *value = (true, new);
796 new
797 }),
798 );
799
800 let deadline = time::Duration::from_secs(1);
801 block_on(
802 proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
803 )
804 .map(|r| r.block)
805 .unwrap();
806 }
807
808 #[test]
809 fn proposed_storage_changes_should_match_execute_block_storage_changes() {
810 let (client, backend) = TestClientBuilder::new().build_with_backend();
811 let client = Arc::new(client);
812 let spawner = sp_core::testing::TaskExecutor::new();
813 let txpool = Arc::from(BasicPool::new_full(
814 Default::default(),
815 true.into(),
816 None,
817 spawner.clone(),
818 client.clone(),
819 ));
820
821 let genesis_hash = client.info().best_hash;
822
823 block_on(txpool.submit_at(genesis_hash, SOURCE, vec![extrinsic(0)])).unwrap();
824
825 block_on(
826 txpool.maintain(chain_event(
827 client
828 .expect_header(client.info().genesis_hash)
829 .expect("there should be header"),
830 )),
831 );
832
833 let mut proposer_factory =
834 ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
835
836 let proposer = proposer_factory.init_with_now(
837 &client.header(genesis_hash).unwrap().unwrap(),
838 Box::new(move || time::Instant::now()),
839 );
840
841 let deadline = time::Duration::from_secs(9);
842 let proposal = block_on(
843 proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
844 )
845 .unwrap();
846
847 assert_eq!(proposal.block.extrinsics().len(), 1);
848
849 let api = client.runtime_api();
850 api.execute_block(genesis_hash, proposal.block).unwrap();
851
852 let state = backend.state_at(genesis_hash, TrieCacheContext::Untrusted).unwrap();
853
854 let storage_changes = api.into_storage_changes(&state, genesis_hash).unwrap();
855
856 assert_eq!(
857 proposal.storage_changes.transaction_storage_root,
858 storage_changes.transaction_storage_root,
859 );
860 }
861
862 #[test]
866 fn should_not_remove_invalid_transactions_from_the_same_sender_after_one_was_invalid() {
867 let client = Arc::new(substrate_test_runtime_client::new());
869 let spawner = sp_core::testing::TaskExecutor::new();
870 let txpool = Arc::from(BasicPool::new_full(
871 Default::default(),
872 true.into(),
873 None,
874 spawner.clone(),
875 client.clone(),
876 ));
877
878 let medium = |nonce| {
879 ExtrinsicBuilder::new_fill_block(Perbill::from_parts(MEDIUM))
880 .nonce(nonce)
881 .build()
882 };
883 let huge = |nonce| {
884 ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE)).nonce(nonce).build()
885 };
886
887 block_on(txpool.submit_at(
888 client.info().genesis_hash,
889 SOURCE,
890 vec![medium(0), medium(1), huge(2), medium(3), huge(4), medium(5), medium(6)],
891 ))
892 .unwrap();
893
894 let mut proposer_factory =
895 ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
896 let mut propose_block = |client: &TestClient,
897 parent_number,
898 expected_block_extrinsics,
899 expected_pool_transactions| {
900 let hash = client.expect_block_hash_from_id(&BlockId::Number(parent_number)).unwrap();
901 let proposer = proposer_factory.init_with_now(
902 &client.expect_header(hash).unwrap(),
903 Box::new(move || time::Instant::now()),
904 );
905
906 let deadline = time::Duration::from_secs(900);
908 let block = block_on(
909 proposer
910 .propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
911 )
912 .map(|r| r.block)
913 .unwrap();
914
915 assert_eq!(
918 txpool.ready().count(),
919 expected_pool_transactions,
920 "at block: {}",
921 block.header.number
922 );
923 assert_eq!(
924 block.extrinsics().len(),
925 expected_block_extrinsics,
926 "at block: {}",
927 block.header.number
928 );
929
930 block
931 };
932
933 let import_and_maintain = |client: Arc<TestClient>, block: TestBlock| {
934 let hash = block.hash();
935 block_on(client.import(BlockOrigin::Own, block)).unwrap();
936 block_on(txpool.maintain(chain_event(
937 client.expect_header(hash).expect("there should be header"),
938 )));
939 };
940
941 block_on(
942 txpool.maintain(chain_event(
943 client
944 .expect_header(client.info().genesis_hash)
945 .expect("there should be header"),
946 )),
947 );
948 assert_eq!(txpool.ready().count(), 7);
949
950 let block = propose_block(&client, 0, 2, 7);
952 import_and_maintain(client.clone(), block.clone());
953 assert_eq!(txpool.ready().count(), 5);
954
955 let block = propose_block(&client, 1, 1, 5);
957 import_and_maintain(client.clone(), block.clone());
958 assert_eq!(txpool.ready().count(), 4);
959
960 let block = propose_block(&client, 2, 1, 4);
962 import_and_maintain(client.clone(), block.clone());
963 assert_eq!(txpool.ready().count(), 3);
964
965 let block = propose_block(&client, 3, 1, 3);
967 import_and_maintain(client.clone(), block.clone());
968 assert_eq!(txpool.ready().count(), 2);
969
970 let block = propose_block(&client, 4, 2, 2);
972 import_and_maintain(client.clone(), block.clone());
973 assert_eq!(txpool.ready().count(), 0);
974 }
975
976 #[test]
977 fn should_cease_building_block_when_block_limit_is_reached() {
978 let client = Arc::new(substrate_test_runtime_client::new());
979 let spawner = sp_core::testing::TaskExecutor::new();
980 let txpool = Arc::from(BasicPool::new_full(
981 Default::default(),
982 true.into(),
983 None,
984 spawner.clone(),
985 client.clone(),
986 ));
987 let genesis_hash = client.info().genesis_hash;
988 let genesis_header = client.expect_header(genesis_hash).expect("there should be header");
989
990 let extrinsics_num = 5;
991 let extrinsics = std::iter::once(
992 Transfer {
993 from: Sr25519Keyring::Alice.into(),
994 to: Sr25519Keyring::Bob.into(),
995 amount: 100,
996 nonce: 0,
997 }
998 .into_unchecked_extrinsic(),
999 )
1000 .chain((1..extrinsics_num as u64).map(extrinsic))
1001 .collect::<Vec<_>>();
1002
1003 let block_limit = genesis_header.encoded_size() +
1004 extrinsics
1005 .iter()
1006 .take(extrinsics_num - 1)
1007 .map(Encode::encoded_size)
1008 .sum::<usize>() +
1009 Vec::<Extrinsic>::new().encoded_size();
1010
1011 block_on(txpool.submit_at(genesis_hash, SOURCE, extrinsics.clone())).unwrap();
1012
1013 block_on(txpool.maintain(chain_event(genesis_header.clone())));
1014
1015 let mut proposer_factory =
1016 ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
1017
1018 let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
1019
1020 let deadline = time::Duration::from_secs(300);
1022 let block = block_on(proposer.propose_block(ProposeArgs {
1023 max_duration: deadline,
1024 block_size_limit: Some(block_limit),
1025 ..Default::default()
1026 }))
1027 .map(|r| r.block)
1028 .unwrap();
1029
1030 assert_eq!(block.extrinsics().len(), extrinsics_num - 1);
1032
1033 let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
1034
1035 let block = block_on(
1036 proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
1037 )
1038 .map(|r| r.block)
1039 .unwrap();
1040
1041 assert_eq!(block.extrinsics().len(), extrinsics_num);
1043
1044 let mut proposer_factory = ProposerFactory::with_proof_recording(
1045 spawner.clone(),
1046 client.clone(),
1047 txpool.clone(),
1048 None,
1049 None,
1050 );
1051
1052 let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
1053
1054 let block_limit = {
1057 let builder = BlockBuilderBuilder::new(&*client)
1058 .on_parent_block(genesis_header.hash())
1059 .with_parent_block_number(0)
1060 .enable_proof_recording()
1061 .build()
1062 .unwrap();
1063 builder.estimate_block_size(true) + extrinsics[0].encoded_size()
1064 };
1065 let block = block_on(proposer.propose_block(ProposeArgs {
1066 max_duration: deadline,
1067 block_size_limit: Some(block_limit),
1068 ..Default::default()
1069 }))
1070 .map(|r| r.block)
1071 .unwrap();
1072
1073 assert_eq!(block.extrinsics().len(), 1);
1077 }
1078
1079 #[test]
1080 fn should_keep_adding_transactions_after_exhausts_resources_before_soft_deadline() {
1081 let client = Arc::new(substrate_test_runtime_client::new());
1083 let spawner = sp_core::testing::TaskExecutor::new();
1084 let txpool = Arc::from(BasicPool::new_full(
1085 Default::default(),
1086 true.into(),
1087 None,
1088 spawner.clone(),
1089 client.clone(),
1090 ));
1091 let genesis_hash = client.info().genesis_hash;
1092
1093 let tiny = |nonce| {
1094 ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)).nonce(nonce).build()
1095 };
1096 let huge = |who| {
1097 ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE))
1098 .signer(Sr25519Keyring::numeric(who))
1099 .build()
1100 };
1101
1102 block_on(
1103 txpool.submit_at(
1104 genesis_hash,
1105 SOURCE,
1106 (0..MAX_SKIPPED_TRANSACTIONS * 2)
1108 .into_iter()
1109 .map(huge)
1110 .chain((0..MAX_SKIPPED_TRANSACTIONS as u64).into_iter().map(tiny))
1112 .collect(),
1113 ),
1114 )
1115 .unwrap();
1116
1117 block_on(txpool.maintain(chain_event(
1118 client.expect_header(genesis_hash).expect("there should be header"),
1119 )));
1120 assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 3);
1121
1122 let mut proposer_factory =
1123 ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
1124
1125 let cell = Mutex::new(time::Instant::now());
1126 let proposer = proposer_factory.init_with_now(
1127 &client.expect_header(genesis_hash).unwrap(),
1128 Box::new(move || {
1129 let mut value = cell.lock();
1130 let old = *value;
1131 *value = old + time::Duration::from_secs(1);
1132 old
1133 }),
1134 );
1135
1136 let deadline = time::Duration::from_secs(900);
1139 let block = block_on(
1140 proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
1141 )
1142 .map(|r| r.block)
1143 .unwrap();
1144
1145 assert_eq!(block.extrinsics().len(), MAX_SKIPPED_TRANSACTIONS + 1);
1147 }
1148
1149 #[test]
1150 fn should_only_skip_up_to_some_limit_after_soft_deadline() {
1151 let client = Arc::new(substrate_test_runtime_client::new());
1153 let spawner = sp_core::testing::TaskExecutor::new();
1154 let txpool = Arc::from(BasicPool::new_full(
1155 Default::default(),
1156 true.into(),
1157 None,
1158 spawner.clone(),
1159 client.clone(),
1160 ));
1161 let genesis_hash = client.info().genesis_hash;
1162
1163 let tiny = |who| {
1164 ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY))
1165 .signer(Sr25519Keyring::numeric(who))
1166 .nonce(1)
1167 .build()
1168 };
1169 let huge = |who| {
1170 ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE))
1171 .signer(Sr25519Keyring::numeric(who))
1172 .build()
1173 };
1174
1175 block_on(
1176 txpool.submit_at(
1177 genesis_hash,
1178 SOURCE,
1179 (0..MAX_SKIPPED_TRANSACTIONS + 2)
1180 .into_iter()
1181 .map(huge)
1182 .chain((0..MAX_SKIPPED_TRANSACTIONS + 2).into_iter().map(tiny))
1184 .collect(),
1185 ),
1186 )
1187 .unwrap();
1188
1189 block_on(txpool.maintain(chain_event(
1190 client.expect_header(genesis_hash).expect("there should be header"),
1191 )));
1192 assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 2 + 4);
1193
1194 let mut proposer_factory =
1195 ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
1196
1197 let deadline = time::Duration::from_secs(600);
1198 let cell = Arc::new(Mutex::new((0, time::Instant::now())));
1199 let cell2 = cell.clone();
1200 let proposer = proposer_factory.init_with_now(
1201 &client.expect_header(genesis_hash).unwrap(),
1202 Box::new(move || {
1203 let mut value = cell.lock();
1204 let (called, old) = *value;
1205 let increase = if called == 1 {
1207 deadline / 2
1209 } else {
1210 time::Duration::from_millis(0)
1212 };
1213 *value = (called + 1, old + increase);
1214 old
1215 }),
1216 );
1217
1218 let block = block_on(
1219 proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
1220 )
1221 .map(|r| r.block)
1222 .unwrap();
1223
1224 assert!(
1228 (1..3).contains(&block.extrinsics().len()),
1229 "Block shall contain one or two extrinsics."
1230 );
1231 assert!(
1232 cell2.lock().0 > MAX_SKIPPED_TRANSACTIONS,
1233 "Not enough calls to current time, which indicates the test might have ended because of deadline, not soft deadline"
1234 );
1235 }
1236}