referrerpolicy=no-referrer-when-downgrade

sc_service/client/
client.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19//! Substrate Client
20
21use super::{
22	block_rules::{BlockRules, LookupResult as BlockLookupResult},
23	CodeProvider,
24};
25use crate::client::notification_pinning::NotificationPinningWorker;
26use log::{debug, info, trace, warn};
27use parking_lot::{Mutex, RwLock};
28use prometheus_endpoint::Registry;
29use rand::Rng;
30use sc_chain_spec::{resolve_state_version_from_wasm, BuildGenesisBlock};
31use sc_client_api::{
32	backend::{
33		self, apply_aux, BlockImportOperation, ClientImportOperation, FinalizeSummary, Finalizer,
34		ImportNotificationAction, ImportSummary, LockImportRun, NewBlockState, StorageProvider,
35	},
36	client::{
37		BadBlocks, BlockBackend, BlockImportNotification, BlockOf, BlockchainEvents, ClientInfo,
38		FinalityNotification, FinalityNotifications, ForkBlocks, ImportNotifications,
39		PreCommitActions, ProvideUncles,
40	},
41	execution_extensions::ExecutionExtensions,
42	notifications::{StorageEventStream, StorageNotifications},
43	CallExecutor, ExecutorProvider, KeysIter, OnFinalityAction, OnImportAction, PairsIter,
44	ProofProvider, TrieCacheContext, UnpinWorkerMessage, UsageProvider,
45};
46use sc_consensus::{
47	BlockCheckParams, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction,
48};
49use sc_executor::RuntimeVersion;
50use sc_telemetry::{telemetry, TelemetryHandle, SUBSTRATE_INFO};
51use sp_api::{
52	ApiExt, ApiRef, CallApiAt, CallApiAtParams, ConstructRuntimeApi, Core as CoreApi,
53	ProvideRuntimeApi,
54};
55use sp_blockchain::{
56	self as blockchain, Backend as ChainBackend, CachedHeaderMetadata, Error,
57	HeaderBackend as ChainHeaderBackend, HeaderMetadata, Info as BlockchainInfo,
58};
59use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError};
60
61use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender};
62use sp_core::{
63	storage::{ChildInfo, ChildType, PrefixedStorageKey, StorageChild, StorageData, StorageKey},
64	traits::{CallContext, SpawnNamed},
65};
66use sp_runtime::{
67	generic::{BlockId, SignedBlock},
68	traits::{
69		Block as BlockT, BlockIdTo, HashingFor, Header as HeaderT, NumberFor, One,
70		SaturatedConversion, Zero,
71	},
72	Justification, Justifications, StateVersion,
73};
74use sp_state_machine::{
75	prove_child_read, prove_range_read_with_child_with_size, prove_read,
76	read_range_proof_check_with_child_on_proving_backend, Backend as StateBackend,
77	ChildStorageCollection, KeyValueStates, KeyValueStorageLevel, StorageCollection,
78	MAX_NESTED_TRIE_DEPTH,
79};
80use sp_trie::{proof_size_extension::ProofSizeExt, CompactProof, MerkleValue, StorageProof};
81use std::{
82	collections::{HashMap, HashSet},
83	marker::PhantomData,
84	path::PathBuf,
85	sync::Arc,
86};
87
88use super::call_executor::LocalCallExecutor;
89use sp_core::traits::CodeExecutor;
90
91type NotificationSinks<T> = Mutex<Vec<TracingUnboundedSender<T>>>;
92
93/// Substrate Client
94pub struct Client<B, E, Block, RA>
95where
96	Block: BlockT,
97{
98	backend: Arc<B>,
99	executor: E,
100	storage_notifications: StorageNotifications<Block>,
101	import_notification_sinks: NotificationSinks<BlockImportNotification<Block>>,
102	every_import_notification_sinks: NotificationSinks<BlockImportNotification<Block>>,
103	finality_notification_sinks: NotificationSinks<FinalityNotification<Block>>,
104	// Collects auxiliary operations to be performed atomically together with
105	// block import operations.
106	import_actions: Mutex<Vec<OnImportAction<Block>>>,
107	// Collects auxiliary operations to be performed atomically together with
108	// block finalization operations.
109	finality_actions: Mutex<Vec<OnFinalityAction<Block>>>,
110	// Holds the block hash currently being imported. TODO: replace this with block queue.
111	importing_block: RwLock<Option<Block::Hash>>,
112	block_rules: BlockRules<Block>,
113	config: ClientConfig<Block>,
114	telemetry: Option<TelemetryHandle>,
115	unpin_worker_sender: TracingUnboundedSender<UnpinWorkerMessage<Block>>,
116	code_provider: CodeProvider<Block, B, E>,
117	_phantom: PhantomData<RA>,
118}
119
120/// Used in importing a block, where additional changes are made after the runtime
121/// executed.
122enum PrePostHeader<H> {
123	/// they are the same: no post-runtime digest items.
124	Same(H),
125	/// different headers (pre, post).
126	Different(H, H),
127}
128
129impl<H> PrePostHeader<H> {
130	/// get a reference to the "post-header" -- the header as it should be
131	/// after all changes are applied.
132	fn post(&self) -> &H {
133		match *self {
134			PrePostHeader::Same(ref h) => h,
135			PrePostHeader::Different(_, ref h) => h,
136		}
137	}
138
139	/// convert to the "post-header" -- the header as it should be after
140	/// all changes are applied.
141	fn into_post(self) -> H {
142		match self {
143			PrePostHeader::Same(h) => h,
144			PrePostHeader::Different(_, h) => h,
145		}
146	}
147}
148
149enum PrepareStorageChangesResult<Block: BlockT> {
150	Discard(ImportResult),
151	Import(Option<sc_consensus::StorageChanges<Block>>),
152}
153/// Client configuration items.
154#[derive(Debug, Clone)]
155pub struct ClientConfig<Block: BlockT> {
156	/// Enable the offchain worker db.
157	pub offchain_worker_enabled: bool,
158	/// If true, allows access from the runtime to write into offchain worker db.
159	pub offchain_indexing_api: bool,
160	/// Path where WASM files exist to override the on-chain WASM.
161	pub wasm_runtime_overrides: Option<PathBuf>,
162	/// Skip writing genesis state on first start.
163	pub no_genesis: bool,
164	/// Map of WASM runtime substitute starting at the child of the given block until the runtime
165	/// version doesn't match anymore.
166	pub wasm_runtime_substitutes: HashMap<NumberFor<Block>, Vec<u8>>,
167	/// Enable recording of storage proofs during block import
168	pub enable_import_proof_recording: bool,
169}
170
171impl<Block: BlockT> Default for ClientConfig<Block> {
172	fn default() -> Self {
173		Self {
174			offchain_worker_enabled: false,
175			offchain_indexing_api: false,
176			wasm_runtime_overrides: None,
177			no_genesis: false,
178			wasm_runtime_substitutes: HashMap::new(),
179			enable_import_proof_recording: false,
180		}
181	}
182}
183
184/// Create a client with the explicitly provided backend.
185/// This is useful for testing backend implementations.
186pub fn new_with_backend<B, E, Block, G, RA>(
187	backend: Arc<B>,
188	executor: E,
189	genesis_block_builder: G,
190	spawn_handle: Box<dyn SpawnNamed>,
191	prometheus_registry: Option<Registry>,
192	telemetry: Option<TelemetryHandle>,
193	config: ClientConfig<Block>,
194) -> sp_blockchain::Result<Client<B, LocalCallExecutor<Block, B, E>, Block, RA>>
195where
196	E: CodeExecutor + sc_executor::RuntimeVersionOf,
197	G: BuildGenesisBlock<
198		Block,
199		BlockImportOperation = <B as backend::Backend<Block>>::BlockImportOperation,
200	>,
201	Block: BlockT,
202	B: backend::LocalBackend<Block> + 'static,
203{
204	let extensions = ExecutionExtensions::new(None, Arc::new(executor.clone()));
205
206	let call_executor =
207		LocalCallExecutor::new(backend.clone(), executor, config.clone(), extensions)?;
208
209	Client::new(
210		backend,
211		call_executor,
212		spawn_handle,
213		genesis_block_builder,
214		Default::default(),
215		Default::default(),
216		prometheus_registry,
217		telemetry,
218		config,
219	)
220}
221
222impl<B, E, Block, RA> BlockOf for Client<B, E, Block, RA>
223where
224	B: backend::Backend<Block>,
225	E: CallExecutor<Block>,
226	Block: BlockT,
227{
228	type Type = Block;
229}
230
231impl<B, E, Block, RA> LockImportRun<Block, B> for Client<B, E, Block, RA>
232where
233	B: backend::Backend<Block>,
234	E: CallExecutor<Block>,
235	Block: BlockT,
236{
237	fn lock_import_and_run<R, Err, F>(&self, f: F) -> Result<R, Err>
238	where
239		F: FnOnce(&mut ClientImportOperation<Block, B>) -> Result<R, Err>,
240		Err: From<sp_blockchain::Error>,
241	{
242		let inner = || {
243			let _import_lock = self.backend.get_import_lock().write();
244
245			let mut op = ClientImportOperation {
246				op: self.backend.begin_operation()?,
247				notify_imported: None,
248				notify_finalized: None,
249			};
250
251			let r = f(&mut op)?;
252
253			let ClientImportOperation { mut op, notify_imported, notify_finalized } = op;
254
255			let finality_notification = notify_finalized.map(|summary| {
256				FinalityNotification::from_summary(summary, self.unpin_worker_sender.clone())
257			});
258
259			let (import_notification, storage_changes, import_notification_action) =
260				match notify_imported {
261					Some(mut summary) => {
262						let import_notification_action = summary.import_notification_action;
263						let storage_changes = summary.storage_changes.take();
264						(
265							Some(BlockImportNotification::from_summary(
266								summary,
267								self.unpin_worker_sender.clone(),
268							)),
269							storage_changes,
270							import_notification_action,
271						)
272					},
273					None => (None, None, ImportNotificationAction::None),
274				};
275
276			if let Some(ref notification) = finality_notification {
277				for action in self.finality_actions.lock().iter_mut() {
278					op.insert_aux(action(notification))?;
279				}
280			}
281			if let Some(ref notification) = import_notification {
282				for action in self.import_actions.lock().iter_mut() {
283					op.insert_aux(action(notification))?;
284				}
285			}
286
287			self.backend.commit_operation(op)?;
288
289			// We need to pin the block in the backend once
290			// for each notification. Once all notifications are
291			// dropped, the block will be unpinned automatically.
292			if let Some(ref notification) = finality_notification {
293				if let Err(err) = self.backend.pin_block(notification.hash) {
294					debug!(
295						"Unable to pin block for finality notification. hash: {}, Error: {}",
296						notification.hash, err
297					);
298				} else {
299					let _ = self
300						.unpin_worker_sender
301						.unbounded_send(UnpinWorkerMessage::AnnouncePin(notification.hash))
302						.map_err(|e| {
303							log::error!(
304								"Unable to send AnnouncePin worker message for finality: {e}"
305							)
306						});
307				}
308			}
309
310			if let Some(ref notification) = import_notification {
311				if let Err(err) = self.backend.pin_block(notification.hash) {
312					debug!(
313						"Unable to pin block for import notification. hash: {}, Error: {}",
314						notification.hash, err
315					);
316				} else {
317					let _ = self
318						.unpin_worker_sender
319						.unbounded_send(UnpinWorkerMessage::AnnouncePin(notification.hash))
320						.map_err(|e| {
321							log::error!("Unable to send AnnouncePin worker message for import: {e}")
322						});
323				};
324			}
325
326			self.notify_finalized(finality_notification)?;
327			self.notify_imported(import_notification, import_notification_action, storage_changes)?;
328
329			Ok(r)
330		};
331
332		let result = inner();
333		*self.importing_block.write() = None;
334
335		result
336	}
337}
338
339impl<B, E, Block, RA> LockImportRun<Block, B> for &Client<B, E, Block, RA>
340where
341	Block: BlockT,
342	B: backend::Backend<Block>,
343	E: CallExecutor<Block>,
344{
345	fn lock_import_and_run<R, Err, F>(&self, f: F) -> Result<R, Err>
346	where
347		F: FnOnce(&mut ClientImportOperation<Block, B>) -> Result<R, Err>,
348		Err: From<sp_blockchain::Error>,
349	{
350		(**self).lock_import_and_run(f)
351	}
352}
353
354impl<B, E, Block, RA> Client<B, E, Block, RA>
355where
356	B: backend::Backend<Block>,
357	E: CallExecutor<Block>,
358	Block: BlockT,
359	Block::Header: Clone,
360{
361	/// Creates new Substrate Client with given blockchain and code executor.
362	pub fn new<G>(
363		backend: Arc<B>,
364		executor: E,
365		spawn_handle: Box<dyn SpawnNamed>,
366		genesis_block_builder: G,
367		fork_blocks: ForkBlocks<Block>,
368		bad_blocks: BadBlocks<Block>,
369		prometheus_registry: Option<Registry>,
370		telemetry: Option<TelemetryHandle>,
371		config: ClientConfig<Block>,
372	) -> sp_blockchain::Result<Self>
373	where
374		G: BuildGenesisBlock<
375			Block,
376			BlockImportOperation = <B as backend::Backend<Block>>::BlockImportOperation,
377		>,
378		E: Clone,
379		B: 'static,
380	{
381		let info = backend.blockchain().info();
382		if info.finalized_state.is_none() {
383			let (genesis_block, mut op) = genesis_block_builder.build_genesis_block()?;
384			info!(
385				"🔨 Initializing Genesis block/state (state: {}, header-hash: {})",
386				genesis_block.header().state_root(),
387				genesis_block.header().hash()
388			);
389			// Genesis may be written after some blocks have been imported and finalized.
390			// So we only finalize it when the database is empty.
391			let block_state = if info.best_hash == Default::default() {
392				NewBlockState::Final
393			} else {
394				NewBlockState::Normal
395			};
396			let (header, body) = genesis_block.deconstruct();
397			op.set_block_data(header, Some(body), None, None, block_state)?;
398			backend.commit_operation(op)?;
399		}
400
401		let (unpin_worker_sender, rx) = tracing_unbounded::<UnpinWorkerMessage<Block>>(
402			"notification-pinning-worker-channel",
403			10_000,
404		);
405		let unpin_worker = NotificationPinningWorker::new(rx, backend.clone());
406		spawn_handle.spawn("notification-pinning-worker", None, Box::pin(unpin_worker.run()));
407		let code_provider = CodeProvider::new(&config, executor.clone(), backend.clone())?;
408
409		Ok(Client {
410			backend,
411			executor,
412			storage_notifications: StorageNotifications::new(prometheus_registry),
413			import_notification_sinks: Default::default(),
414			every_import_notification_sinks: Default::default(),
415			finality_notification_sinks: Default::default(),
416			import_actions: Default::default(),
417			finality_actions: Default::default(),
418			importing_block: Default::default(),
419			block_rules: BlockRules::new(fork_blocks, bad_blocks),
420			config,
421			telemetry,
422			unpin_worker_sender,
423			code_provider,
424			_phantom: Default::default(),
425		})
426	}
427
428	/// returns a reference to the block import notification sinks
429	/// useful for test environments.
430	pub fn import_notification_sinks(&self) -> &NotificationSinks<BlockImportNotification<Block>> {
431		&self.import_notification_sinks
432	}
433
434	/// returns a reference to the finality notification sinks
435	/// useful for test environments.
436	pub fn finality_notification_sinks(&self) -> &NotificationSinks<FinalityNotification<Block>> {
437		&self.finality_notification_sinks
438	}
439
440	/// Get a reference to the state at a given block.
441	pub fn state_at(&self, hash: Block::Hash) -> sp_blockchain::Result<B::State> {
442		self.backend.state_at(hash, TrieCacheContext::Untrusted)
443	}
444
445	/// Get the code at a given block.
446	///
447	/// This takes any potential substitutes into account, but ignores overrides.
448	pub fn code_at(&self, hash: Block::Hash) -> sp_blockchain::Result<Vec<u8>> {
449		self.code_provider.code_at_ignoring_overrides(hash)
450	}
451
452	/// Get the RuntimeVersion at a given block.
453	pub fn runtime_version_at(&self, hash: Block::Hash) -> sp_blockchain::Result<RuntimeVersion> {
454		CallExecutor::runtime_version(&self.executor, hash)
455	}
456
457	/// Apply a checked and validated block to an operation.
458	fn apply_block(
459		&self,
460		operation: &mut ClientImportOperation<Block, B>,
461		import_block: BlockImportParams<Block>,
462		storage_changes: Option<sc_consensus::StorageChanges<Block>>,
463	) -> sp_blockchain::Result<ImportResult>
464	where
465		Self: ProvideRuntimeApi<Block>,
466		<Self as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
467	{
468		let BlockImportParams {
469			origin,
470			header,
471			justifications,
472			post_digests,
473			body,
474			indexed_body,
475			finalized,
476			auxiliary,
477			fork_choice,
478			intermediates,
479			import_existing,
480			create_gap,
481			..
482		} = import_block;
483
484		if !intermediates.is_empty() {
485			return Err(Error::IncompletePipeline)
486		}
487
488		let fork_choice = fork_choice.ok_or(Error::IncompletePipeline)?;
489
490		let import_headers = if post_digests.is_empty() {
491			PrePostHeader::Same(header)
492		} else {
493			let mut post_header = header.clone();
494			for item in post_digests {
495				post_header.digest_mut().push(item);
496			}
497			PrePostHeader::Different(header, post_header)
498		};
499
500		let hash = import_headers.post().hash();
501		let height = (*import_headers.post().number()).saturated_into::<u64>();
502
503		*self.importing_block.write() = Some(hash);
504
505		operation.op.set_create_gap(create_gap);
506
507		let result = self.execute_and_import_block(
508			operation,
509			origin,
510			hash,
511			import_headers,
512			justifications,
513			body,
514			indexed_body,
515			storage_changes,
516			finalized,
517			auxiliary,
518			fork_choice,
519			import_existing,
520		);
521
522		if let Ok(ImportResult::Imported(ref aux)) = result {
523			if aux.is_new_best {
524				// don't send telemetry block import events during initial sync for every
525				// block to avoid spamming the telemetry server, these events will be randomly
526				// sent at a rate of 1/10.
527				if origin != BlockOrigin::NetworkInitialSync || rand::thread_rng().gen_bool(0.1) {
528					telemetry!(
529						self.telemetry;
530						SUBSTRATE_INFO;
531						"block.import";
532						"height" => height,
533						"best" => ?hash,
534						"origin" => ?origin
535					);
536				}
537			}
538		}
539
540		result
541	}
542
543	fn execute_and_import_block(
544		&self,
545		operation: &mut ClientImportOperation<Block, B>,
546		origin: BlockOrigin,
547		hash: Block::Hash,
548		import_headers: PrePostHeader<Block::Header>,
549		justifications: Option<Justifications>,
550		body: Option<Vec<Block::Extrinsic>>,
551		indexed_body: Option<Vec<Vec<u8>>>,
552		storage_changes: Option<sc_consensus::StorageChanges<Block>>,
553		finalized: bool,
554		aux: Vec<(Vec<u8>, Option<Vec<u8>>)>,
555		fork_choice: ForkChoiceStrategy,
556		import_existing: bool,
557	) -> sp_blockchain::Result<ImportResult>
558	where
559		Self: ProvideRuntimeApi<Block>,
560		<Self as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
561	{
562		let parent_hash = *import_headers.post().parent_hash();
563		let status = self.backend.blockchain().status(hash)?;
564		let parent_exists =
565			self.backend.blockchain().status(parent_hash)? == blockchain::BlockStatus::InChain;
566		match (import_existing, status) {
567			(false, blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain),
568			(false, blockchain::BlockStatus::Unknown) => {},
569			(true, blockchain::BlockStatus::InChain) => {},
570			(true, blockchain::BlockStatus::Unknown) => {},
571		}
572
573		let info = self.backend.blockchain().info();
574		let gap_block =
575			info.block_gap.map_or(false, |gap| *import_headers.post().number() == gap.start);
576
577		// the block is lower than our last finalized block so it must revert
578		// finality, refusing import.
579		if status == blockchain::BlockStatus::Unknown &&
580			*import_headers.post().number() <= info.finalized_number &&
581			!gap_block
582		{
583			return Err(sp_blockchain::Error::NotInFinalizedChain)
584		}
585
586		// this is a fairly arbitrary choice of where to draw the line on making notifications,
587		// but the general goal is to only make notifications when we are already fully synced
588		// and get a new chain head.
589		let make_notifications = match origin {
590			BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast =>
591				true,
592			BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false,
593		};
594
595		let storage_changes = match storage_changes {
596			Some(storage_changes) => {
597				let storage_changes = match storage_changes {
598					sc_consensus::StorageChanges::Changes(storage_changes) => {
599						self.backend.begin_state_operation(&mut operation.op, parent_hash)?;
600						let (main_sc, child_sc, offchain_sc, tx, _, tx_index) =
601							storage_changes.into_inner();
602
603						if self.config.offchain_indexing_api {
604							operation.op.update_offchain_storage(offchain_sc)?;
605						}
606
607						operation.op.update_db_storage(tx)?;
608						operation.op.update_storage(main_sc.clone(), child_sc.clone())?;
609						operation.op.update_transaction_index(tx_index)?;
610
611						Some((main_sc, child_sc))
612					},
613					sc_consensus::StorageChanges::Import(changes) => {
614						let mut storage = sp_storage::Storage::default();
615						for state in changes.state.0.into_iter() {
616							if state.parent_storage_keys.is_empty() && state.state_root.is_empty() {
617								for (key, value) in state.key_values.into_iter() {
618									storage.top.insert(key, value);
619								}
620							} else {
621								for parent_storage in state.parent_storage_keys {
622									let storage_key = PrefixedStorageKey::new_ref(&parent_storage);
623									let storage_key =
624										match ChildType::from_prefixed_key(storage_key) {
625											Some((ChildType::ParentKeyId, storage_key)) =>
626												storage_key,
627											None =>
628												return Err(Error::Backend(
629													"Invalid child storage key.".to_string(),
630												)),
631										};
632									let entry = storage
633										.children_default
634										.entry(storage_key.to_vec())
635										.or_insert_with(|| StorageChild {
636											data: Default::default(),
637											child_info: ChildInfo::new_default(storage_key),
638										});
639									for (key, value) in state.key_values.iter() {
640										entry.data.insert(key.clone(), value.clone());
641									}
642								}
643							}
644						}
645
646						// This is use by fast sync for runtime version to be resolvable from
647						// changes.
648						let state_version = resolve_state_version_from_wasm::<_, HashingFor<Block>>(
649							&storage,
650							&self.executor,
651						)?;
652						let state_root = operation.op.reset_storage(storage, state_version)?;
653						if state_root != *import_headers.post().state_root() {
654							// State root mismatch when importing state. This should not happen in
655							// safe fast sync mode, but may happen in unsafe mode.
656							warn!("Error importing state: State root mismatch.");
657							return Err(Error::InvalidStateRoot)
658						}
659						None
660					},
661				};
662
663				storage_changes
664			},
665			None => None,
666		};
667
668		// Ensure parent chain is finalized to maintain invariant that finality is called
669		// sequentially.
670		if finalized && parent_exists && info.finalized_hash != parent_hash {
671			self.apply_finality_with_block_hash(
672				operation,
673				parent_hash,
674				None,
675				&info,
676				make_notifications,
677			)?;
678		}
679
680		let is_new_best = !gap_block &&
681			(finalized ||
682				match fork_choice {
683					ForkChoiceStrategy::LongestChain =>
684						import_headers.post().number() > &info.best_number,
685					ForkChoiceStrategy::Custom(v) => v,
686				});
687
688		let leaf_state = if finalized {
689			NewBlockState::Final
690		} else if is_new_best {
691			NewBlockState::Best
692		} else {
693			NewBlockState::Normal
694		};
695
696		let tree_route = if is_new_best && info.best_hash != parent_hash && parent_exists {
697			let route_from_best =
698				sp_blockchain::tree_route(self.backend.blockchain(), info.best_hash, parent_hash)?;
699			Some(route_from_best)
700		} else {
701			None
702		};
703
704		trace!(
705			"Imported {}, (#{}), best={}, origin={:?}",
706			hash,
707			import_headers.post().number(),
708			is_new_best,
709			origin,
710		);
711
712		operation.op.set_block_data(
713			import_headers.post().clone(),
714			body,
715			indexed_body,
716			justifications,
717			leaf_state,
718		)?;
719
720		operation.op.insert_aux(aux)?;
721
722		let should_notify_every_block = !self.every_import_notification_sinks.lock().is_empty();
723
724		// Notify when we are already synced to the tip of the chain
725		// or if this import triggers a re-org
726		let should_notify_recent_block = make_notifications || tree_route.is_some();
727
728		if should_notify_every_block || should_notify_recent_block {
729			let header = import_headers.into_post();
730			if finalized && should_notify_recent_block {
731				let mut summary = match operation.notify_finalized.take() {
732					Some(mut summary) => {
733						summary.header = header.clone();
734						summary.finalized.push(hash);
735						summary
736					},
737					None => FinalizeSummary {
738						header: header.clone(),
739						finalized: vec![hash],
740						stale_heads: Vec::new(),
741					},
742				};
743
744				if parent_exists {
745					// Add to the stale list all heads that are branching from parent besides our
746					// current `head`.
747					for head in self
748						.backend
749						.blockchain()
750						.leaves()?
751						.into_iter()
752						.filter(|h| *h != parent_hash)
753					{
754						let route_from_parent = sp_blockchain::tree_route(
755							self.backend.blockchain(),
756							parent_hash,
757							head,
758						)?;
759						if route_from_parent.retracted().is_empty() {
760							summary.stale_heads.push(head);
761						}
762					}
763				}
764				operation.notify_finalized = Some(summary);
765			}
766
767			let import_notification_action = if should_notify_every_block {
768				if should_notify_recent_block {
769					ImportNotificationAction::Both
770				} else {
771					ImportNotificationAction::EveryBlock
772				}
773			} else {
774				ImportNotificationAction::RecentBlock
775			};
776
777			operation.notify_imported = Some(ImportSummary {
778				hash,
779				origin,
780				header,
781				is_new_best,
782				storage_changes,
783				tree_route,
784				import_notification_action,
785			})
786		}
787
788		Ok(ImportResult::imported(is_new_best))
789	}
790
791	/// Prepares the storage changes for a block.
792	///
793	/// It checks if the state should be enacted and if the `import_block` maybe already provides
794	/// the required storage changes. If the state should be enacted and the storage changes are not
795	/// provided, the block is re-executed to get the storage changes.
796	fn prepare_block_storage_changes(
797		&self,
798		import_block: &mut BlockImportParams<Block>,
799	) -> sp_blockchain::Result<PrepareStorageChangesResult<Block>>
800	where
801		Self: ProvideRuntimeApi<Block>,
802		<Self as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
803	{
804		let parent_hash = import_block.header.parent_hash();
805		let state_action = std::mem::replace(&mut import_block.state_action, StateAction::Skip);
806		let (enact_state, storage_changes) = match (self.block_status(*parent_hash)?, state_action)
807		{
808			(BlockStatus::KnownBad, _) =>
809				return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)),
810			(
811				BlockStatus::InChainPruned,
812				StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(_)),
813			) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)),
814			(_, StateAction::ApplyChanges(changes)) => (true, Some(changes)),
815			(BlockStatus::Unknown, _) =>
816				return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)),
817			(_, StateAction::Skip) => (false, None),
818			(BlockStatus::InChainPruned, StateAction::Execute) =>
819				return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)),
820			(BlockStatus::InChainPruned, StateAction::ExecuteIfPossible) => (false, None),
821			(_, StateAction::Execute) => (true, None),
822			(_, StateAction::ExecuteIfPossible) => (true, None),
823		};
824
825		let storage_changes = match (enact_state, storage_changes, &import_block.body) {
826			// We have storage changes and should enact the state, so we don't need to do anything
827			// here
828			(true, changes @ Some(_), _) => changes,
829			// We should enact state, but don't have any storage changes, so we need to execute the
830			// block.
831			(true, None, Some(ref body)) => {
832				let mut runtime_api = self.runtime_api();
833				let call_context = CallContext::Onchain;
834				runtime_api.set_call_context(call_context);
835
836				if self.config.enable_import_proof_recording {
837					runtime_api.record_proof();
838					let recorder = runtime_api
839						.proof_recorder()
840						.expect("Proof recording is enabled in the line above; qed.");
841					runtime_api.register_extension(ProofSizeExt::new(recorder));
842				}
843
844				runtime_api.execute_block(
845					*parent_hash,
846					Block::new(import_block.header.clone(), body.clone()),
847				)?;
848
849				let state = self.backend.state_at(*parent_hash, call_context.into())?;
850				let gen_storage_changes = runtime_api
851					.into_storage_changes(&state, *parent_hash)
852					.map_err(sp_blockchain::Error::Storage)?;
853
854				if import_block.header.state_root() != &gen_storage_changes.transaction_storage_root
855				{
856					return Err(Error::InvalidStateRoot)
857				}
858				Some(sc_consensus::StorageChanges::Changes(gen_storage_changes))
859			},
860			// No block body, no storage changes
861			(true, None, None) => None,
862			// We should not enact the state, so we set the storage changes to `None`.
863			(false, _, _) => None,
864		};
865
866		Ok(PrepareStorageChangesResult::Import(storage_changes))
867	}
868
869	fn apply_finality_with_block_hash(
870		&self,
871		operation: &mut ClientImportOperation<Block, B>,
872		hash: Block::Hash,
873		justification: Option<Justification>,
874		info: &BlockchainInfo<Block>,
875		notify: bool,
876	) -> sp_blockchain::Result<()> {
877		if hash == info.finalized_hash {
878			warn!(
879				"Possible safety violation: attempted to re-finalize last finalized block {:?} ",
880				hash,
881			);
882			return Ok(())
883		}
884
885		// Find tree route from last finalized to given block.
886		let route_from_finalized =
887			sp_blockchain::tree_route(self.backend.blockchain(), info.finalized_hash, hash)?;
888
889		if let Some(retracted) = route_from_finalized.retracted().get(0) {
890			warn!(
891				"Safety violation: attempted to revert finalized block {:?} which is not in the \
892				same chain as last finalized {:?}",
893				retracted, info.finalized_hash
894			);
895
896			return Err(sp_blockchain::Error::NotInFinalizedChain)
897		}
898
899		// We may need to coercively update the best block if there is more than one
900		// leaf or if the finalized block number is greater than last best number recorded
901		// by the backend. This last condition may apply in case of consensus implementations
902		// not always checking this condition.
903		let block_number = self
904			.backend
905			.blockchain()
906			.number(hash)?
907			.ok_or(Error::MissingHeader(format!("{hash:?}")))?;
908		if self.backend.blockchain().leaves()?.len() > 1 || info.best_number < block_number {
909			let route_from_best =
910				sp_blockchain::tree_route(self.backend.blockchain(), info.best_hash, hash)?;
911
912			// If the block is not a direct ancestor of the current best chain,
913			// then some other block is the common ancestor.
914			if route_from_best.common_block().hash != hash {
915				// NOTE: we're setting the finalized block as best block, this might
916				// be slightly inaccurate since we might have a "better" block
917				// further along this chain, but since best chain selection logic is
918				// plugable we cannot make a better choice here. usages that need
919				// an accurate "best" block need to go through `SelectChain`
920				// instead.
921				operation.op.mark_head(hash)?;
922			}
923		}
924
925		let enacted = route_from_finalized.enacted();
926		assert!(enacted.len() > 0);
927		for finalize_new in &enacted[..enacted.len() - 1] {
928			operation.op.mark_finalized(finalize_new.hash, None)?;
929		}
930
931		assert_eq!(enacted.last().map(|e| e.hash), Some(hash));
932		operation.op.mark_finalized(hash, justification)?;
933
934		if notify {
935			let finalized =
936				route_from_finalized.enacted().iter().map(|elem| elem.hash).collect::<Vec<_>>();
937
938			let block_number = route_from_finalized
939				.last()
940				.expect(
941					"The block to finalize is always the latest \
942						block in the route to the finalized block; qed",
943				)
944				.number;
945
946			// The stale heads are the leaves that will be displaced after the
947			// block is finalized.
948			let stale_heads = self
949				.backend
950				.blockchain()
951				.displaced_leaves_after_finalizing(hash, block_number)?
952				.hashes()
953				.collect();
954
955			let header = self
956				.backend
957				.blockchain()
958				.header(hash)?
959				.expect("Block to finalize expected to be onchain; qed");
960
961			operation.notify_finalized = Some(FinalizeSummary { header, finalized, stale_heads });
962		}
963
964		Ok(())
965	}
966
967	fn notify_finalized(
968		&self,
969		notification: Option<FinalityNotification<Block>>,
970	) -> sp_blockchain::Result<()> {
971		let mut sinks = self.finality_notification_sinks.lock();
972
973		let notification = match notification {
974			Some(notify_finalized) => notify_finalized,
975			None => {
976				// Cleanup any closed finality notification sinks
977				// since we won't be running the loop below which
978				// would also remove any closed sinks.
979				sinks.retain(|sink| !sink.is_closed());
980				return Ok(())
981			},
982		};
983
984		telemetry!(
985			self.telemetry;
986			SUBSTRATE_INFO;
987			"notify.finalized";
988			"height" => format!("{}", notification.header.number()),
989			"best" => ?notification.hash,
990		);
991
992		sinks.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
993
994		Ok(())
995	}
996
997	fn notify_imported(
998		&self,
999		notification: Option<BlockImportNotification<Block>>,
1000		import_notification_action: ImportNotificationAction,
1001		storage_changes: Option<(StorageCollection, ChildStorageCollection)>,
1002	) -> sp_blockchain::Result<()> {
1003		let notification = match notification {
1004			Some(notify_import) => notify_import,
1005			None => {
1006				// Cleanup any closed import notification sinks since we won't
1007				// be sending any notifications below which would remove any
1008				// closed sinks. this is necessary since during initial sync we
1009				// won't send any import notifications which could lead to a
1010				// temporary leak of closed/discarded notification sinks (e.g.
1011				// from consensus code).
1012				self.import_notification_sinks.lock().retain(|sink| !sink.is_closed());
1013
1014				self.every_import_notification_sinks.lock().retain(|sink| !sink.is_closed());
1015
1016				return Ok(())
1017			},
1018		};
1019
1020		let trigger_storage_changes_notification = || {
1021			if let Some(storage_changes) = storage_changes {
1022				// TODO [ToDr] How to handle re-orgs? Should we re-emit all storage changes?
1023				self.storage_notifications.trigger(
1024					&notification.hash,
1025					storage_changes.0.into_iter(),
1026					storage_changes.1.into_iter().map(|(sk, v)| (sk, v.into_iter())),
1027				);
1028			}
1029		};
1030
1031		match import_notification_action {
1032			ImportNotificationAction::Both => {
1033				trigger_storage_changes_notification();
1034				self.import_notification_sinks
1035					.lock()
1036					.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
1037
1038				self.every_import_notification_sinks
1039					.lock()
1040					.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
1041			},
1042			ImportNotificationAction::RecentBlock => {
1043				trigger_storage_changes_notification();
1044				self.import_notification_sinks
1045					.lock()
1046					.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
1047
1048				self.every_import_notification_sinks.lock().retain(|sink| !sink.is_closed());
1049			},
1050			ImportNotificationAction::EveryBlock => {
1051				self.every_import_notification_sinks
1052					.lock()
1053					.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
1054
1055				self.import_notification_sinks.lock().retain(|sink| !sink.is_closed());
1056			},
1057			ImportNotificationAction::None => {
1058				// This branch is unreachable in fact because the block import notification must be
1059				// Some(_) instead of None (it's already handled at the beginning of this function)
1060				// at this point.
1061				self.import_notification_sinks.lock().retain(|sink| !sink.is_closed());
1062
1063				self.every_import_notification_sinks.lock().retain(|sink| !sink.is_closed());
1064			},
1065		}
1066
1067		Ok(())
1068	}
1069
1070	/// Attempts to revert the chain by `n` blocks guaranteeing that no block is
1071	/// reverted past the last finalized block. Returns the number of blocks
1072	/// that were successfully reverted.
1073	pub fn revert(&self, n: NumberFor<Block>) -> sp_blockchain::Result<NumberFor<Block>> {
1074		let (number, _) = self.backend.revert(n, false)?;
1075		Ok(number)
1076	}
1077
1078	/// Attempts to revert the chain by `n` blocks disregarding finality. This method will revert
1079	/// any finalized blocks as requested and can potentially leave the node in an inconsistent
1080	/// state. Other modules in the system that persist data and that rely on finality
1081	/// (e.g. consensus parts) will be unaffected by the revert. Use this method with caution and
1082	/// making sure that no other data needs to be reverted for consistency aside from the block
1083	/// data. If `blacklist` is set to true, will also blacklist reverted blocks from finalizing
1084	/// again. The blacklist is reset upon client restart.
1085	///
1086	/// Returns the number of blocks that were successfully reverted.
1087	pub fn unsafe_revert(
1088		&mut self,
1089		n: NumberFor<Block>,
1090		blacklist: bool,
1091	) -> sp_blockchain::Result<NumberFor<Block>> {
1092		let (number, reverted) = self.backend.revert(n, true)?;
1093		if blacklist {
1094			for b in reverted {
1095				self.block_rules.mark_bad(b);
1096			}
1097		}
1098		Ok(number)
1099	}
1100
1101	/// Get blockchain info.
1102	pub fn chain_info(&self) -> BlockchainInfo<Block> {
1103		self.backend.blockchain().info()
1104	}
1105
1106	/// Get block status.
1107	pub fn block_status(&self, hash: Block::Hash) -> sp_blockchain::Result<BlockStatus> {
1108		// this can probably be implemented more efficiently
1109		if self
1110			.importing_block
1111			.read()
1112			.as_ref()
1113			.map_or(false, |importing| &hash == importing)
1114		{
1115			return Ok(BlockStatus::Queued)
1116		}
1117
1118		let hash_and_number = self.backend.blockchain().number(hash)?.map(|n| (hash, n));
1119		match hash_and_number {
1120			Some((hash, number)) =>
1121				if self.backend.have_state_at(hash, number) {
1122					Ok(BlockStatus::InChainWithState)
1123				} else {
1124					Ok(BlockStatus::InChainPruned)
1125				},
1126			None => Ok(BlockStatus::Unknown),
1127		}
1128	}
1129
1130	/// Get block header by id.
1131	pub fn header(
1132		&self,
1133		hash: Block::Hash,
1134	) -> sp_blockchain::Result<Option<<Block as BlockT>::Header>> {
1135		self.backend.blockchain().header(hash)
1136	}
1137
1138	/// Get block body by id.
1139	pub fn body(
1140		&self,
1141		hash: Block::Hash,
1142	) -> sp_blockchain::Result<Option<Vec<<Block as BlockT>::Extrinsic>>> {
1143		self.backend.blockchain().body(hash)
1144	}
1145
1146	/// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors.
1147	pub fn uncles(
1148		&self,
1149		target_hash: Block::Hash,
1150		max_generation: NumberFor<Block>,
1151	) -> sp_blockchain::Result<Vec<Block::Hash>> {
1152		let load_header = |hash: Block::Hash| -> sp_blockchain::Result<Block::Header> {
1153			self.backend
1154				.blockchain()
1155				.header(hash)?
1156				.ok_or_else(|| Error::UnknownBlock(format!("{:?}", hash)))
1157		};
1158
1159		let genesis_hash = self.backend.blockchain().info().genesis_hash;
1160		if genesis_hash == target_hash {
1161			return Ok(Vec::new())
1162		}
1163
1164		let mut current_hash = target_hash;
1165		let mut current = load_header(current_hash)?;
1166		let mut ancestor_hash = *current.parent_hash();
1167		let mut ancestor = load_header(ancestor_hash)?;
1168		let mut uncles = Vec::new();
1169
1170		let mut generation: NumberFor<Block> = Zero::zero();
1171		while generation < max_generation {
1172			let children = self.backend.blockchain().children(ancestor_hash)?;
1173			uncles.extend(children.into_iter().filter(|h| h != &current_hash));
1174			current_hash = ancestor_hash;
1175
1176			if genesis_hash == current_hash {
1177				break
1178			}
1179
1180			current = ancestor;
1181			ancestor_hash = *current.parent_hash();
1182			ancestor = load_header(ancestor_hash)?;
1183			generation += One::one();
1184		}
1185		trace!("Collected {} uncles", uncles.len());
1186		Ok(uncles)
1187	}
1188}
1189
1190impl<B, E, Block, RA> UsageProvider<Block> for Client<B, E, Block, RA>
1191where
1192	B: backend::Backend<Block>,
1193	E: CallExecutor<Block>,
1194	Block: BlockT,
1195{
1196	/// Get usage info about current client.
1197	fn usage_info(&self) -> ClientInfo<Block> {
1198		ClientInfo { chain: self.chain_info(), usage: self.backend.usage_info() }
1199	}
1200}
1201
1202impl<B, E, Block, RA> ProofProvider<Block> for Client<B, E, Block, RA>
1203where
1204	B: backend::Backend<Block>,
1205	E: CallExecutor<Block>,
1206	Block: BlockT,
1207{
1208	fn read_proof(
1209		&self,
1210		hash: Block::Hash,
1211		keys: &mut dyn Iterator<Item = &[u8]>,
1212	) -> sp_blockchain::Result<StorageProof> {
1213		self.state_at(hash)
1214			.and_then(|state| prove_read(state, keys).map_err(Into::into))
1215	}
1216
1217	fn read_child_proof(
1218		&self,
1219		hash: Block::Hash,
1220		child_info: &ChildInfo,
1221		keys: &mut dyn Iterator<Item = &[u8]>,
1222	) -> sp_blockchain::Result<StorageProof> {
1223		self.state_at(hash)
1224			.and_then(|state| prove_child_read(state, child_info, keys).map_err(Into::into))
1225	}
1226
1227	fn execution_proof(
1228		&self,
1229		hash: Block::Hash,
1230		method: &str,
1231		call_data: &[u8],
1232	) -> sp_blockchain::Result<(Vec<u8>, StorageProof)> {
1233		self.executor.prove_execution(hash, method, call_data)
1234	}
1235
1236	fn read_proof_collection(
1237		&self,
1238		hash: Block::Hash,
1239		start_key: &[Vec<u8>],
1240		size_limit: usize,
1241	) -> sp_blockchain::Result<(CompactProof, u32)> {
1242		let state = self.state_at(hash)?;
1243		// this is a read proof, using version V0 or V1 is equivalent.
1244		let root = state.storage_root(std::iter::empty(), StateVersion::V0).0;
1245
1246		let (proof, count) = prove_range_read_with_child_with_size::<_, HashingFor<Block>>(
1247			state, size_limit, start_key,
1248		)?;
1249		let proof = proof
1250			.into_compact_proof::<HashingFor<Block>>(root)
1251			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?;
1252		Ok((proof, count))
1253	}
1254
1255	fn storage_collection(
1256		&self,
1257		hash: Block::Hash,
1258		start_key: &[Vec<u8>],
1259		size_limit: usize,
1260	) -> sp_blockchain::Result<Vec<(KeyValueStorageLevel, bool)>> {
1261		if start_key.len() > MAX_NESTED_TRIE_DEPTH {
1262			return Err(Error::Backend("Invalid start key.".to_string()))
1263		}
1264		let state = self.state_at(hash)?;
1265		let child_info = |storage_key: &Vec<u8>| -> sp_blockchain::Result<ChildInfo> {
1266			let storage_key = PrefixedStorageKey::new_ref(storage_key);
1267			match ChildType::from_prefixed_key(storage_key) {
1268				Some((ChildType::ParentKeyId, storage_key)) =>
1269					Ok(ChildInfo::new_default(storage_key)),
1270				None => Err(Error::Backend("Invalid child storage key.".to_string())),
1271			}
1272		};
1273		let mut current_child = if start_key.len() == 2 {
1274			let start_key = start_key.get(0).expect("checked len");
1275			if let Some(child_root) = state
1276				.storage(start_key)
1277				.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1278			{
1279				Some((child_info(start_key)?, child_root))
1280			} else {
1281				return Err(Error::Backend("Invalid root start key.".to_string()))
1282			}
1283		} else {
1284			None
1285		};
1286		let mut current_key = start_key.last().map(Clone::clone).unwrap_or_default();
1287		let mut total_size = 0;
1288		let mut result = vec![(
1289			KeyValueStorageLevel {
1290				state_root: Vec::new(),
1291				key_values: Vec::new(),
1292				parent_storage_keys: Vec::new(),
1293			},
1294			false,
1295		)];
1296
1297		let mut child_roots = HashSet::new();
1298		loop {
1299			let mut entries = Vec::new();
1300			let mut complete = true;
1301			let mut switch_child_key = None;
1302			while let Some(next_key) = if let Some(child) = current_child.as_ref() {
1303				state
1304					.next_child_storage_key(&child.0, &current_key)
1305					.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1306			} else {
1307				state
1308					.next_storage_key(&current_key)
1309					.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1310			} {
1311				let value = if let Some(child) = current_child.as_ref() {
1312					state
1313						.child_storage(&child.0, next_key.as_ref())
1314						.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1315						.unwrap_or_default()
1316				} else {
1317					state
1318						.storage(next_key.as_ref())
1319						.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1320						.unwrap_or_default()
1321				};
1322				let size = value.len() + next_key.len();
1323				if total_size + size > size_limit && !entries.is_empty() {
1324					complete = false;
1325					break
1326				}
1327				total_size += size;
1328
1329				if current_child.is_none() &&
1330					sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) &&
1331					!child_roots.contains(value.as_slice())
1332				{
1333					child_roots.insert(value.clone());
1334					switch_child_key = Some((next_key.clone(), value.clone()));
1335					entries.push((next_key.clone(), value));
1336					break
1337				}
1338				entries.push((next_key.clone(), value));
1339				current_key = next_key;
1340			}
1341			if let Some((child, child_root)) = switch_child_key.take() {
1342				result[0].0.key_values.extend(entries.into_iter());
1343				current_child = Some((child_info(&child)?, child_root));
1344				current_key = Vec::new();
1345			} else if let Some((child, child_root)) = current_child.take() {
1346				current_key = child.into_prefixed_storage_key().into_inner();
1347				result.push((
1348					KeyValueStorageLevel {
1349						state_root: child_root,
1350						key_values: entries,
1351						parent_storage_keys: Vec::new(),
1352					},
1353					complete,
1354				));
1355				if !complete {
1356					break
1357				}
1358			} else {
1359				result[0].0.key_values.extend(entries.into_iter());
1360				result[0].1 = complete;
1361				break
1362			}
1363		}
1364		Ok(result)
1365	}
1366
1367	fn verify_range_proof(
1368		&self,
1369		root: Block::Hash,
1370		proof: CompactProof,
1371		start_key: &[Vec<u8>],
1372	) -> sp_blockchain::Result<(KeyValueStates, usize)> {
1373		let mut db = sp_state_machine::MemoryDB::<HashingFor<Block>>::new(&[]);
1374		// Compact encoding
1375		sp_trie::decode_compact::<sp_state_machine::LayoutV0<HashingFor<Block>>, _, _>(
1376			&mut db,
1377			proof.iter_compact_encoded_nodes(),
1378			Some(&root),
1379		)
1380		.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?;
1381		let proving_backend = sp_state_machine::TrieBackendBuilder::new(db, root).build();
1382		let state = read_range_proof_check_with_child_on_proving_backend::<HashingFor<Block>>(
1383			&proving_backend,
1384			start_key,
1385		)?;
1386
1387		Ok(state)
1388	}
1389}
1390
1391impl<B, E, Block, RA> ExecutorProvider<Block> for Client<B, E, Block, RA>
1392where
1393	B: backend::Backend<Block>,
1394	E: CallExecutor<Block>,
1395	Block: BlockT,
1396{
1397	type Executor = E;
1398
1399	fn executor(&self) -> &Self::Executor {
1400		&self.executor
1401	}
1402
1403	fn execution_extensions(&self) -> &ExecutionExtensions<Block> {
1404		self.executor.execution_extensions()
1405	}
1406}
1407
1408impl<B, E, Block, RA> StorageProvider<Block, B> for Client<B, E, Block, RA>
1409where
1410	B: backend::Backend<Block>,
1411	E: CallExecutor<Block>,
1412	Block: BlockT,
1413{
1414	fn storage_keys(
1415		&self,
1416		hash: <Block as BlockT>::Hash,
1417		prefix: Option<&StorageKey>,
1418		start_key: Option<&StorageKey>,
1419	) -> sp_blockchain::Result<KeysIter<B::State, Block>> {
1420		let state = self.state_at(hash)?;
1421		KeysIter::new(state, prefix, start_key)
1422			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1423	}
1424
1425	fn child_storage_keys(
1426		&self,
1427		hash: <Block as BlockT>::Hash,
1428		child_info: ChildInfo,
1429		prefix: Option<&StorageKey>,
1430		start_key: Option<&StorageKey>,
1431	) -> sp_blockchain::Result<KeysIter<B::State, Block>> {
1432		let state = self.state_at(hash)?;
1433		KeysIter::new_child(state, child_info, prefix, start_key)
1434			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1435	}
1436
1437	fn storage_pairs(
1438		&self,
1439		hash: <Block as BlockT>::Hash,
1440		prefix: Option<&StorageKey>,
1441		start_key: Option<&StorageKey>,
1442	) -> sp_blockchain::Result<PairsIter<B::State, Block>> {
1443		let state = self.state_at(hash)?;
1444		PairsIter::new(state, prefix, start_key)
1445			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1446	}
1447
1448	fn storage(
1449		&self,
1450		hash: Block::Hash,
1451		key: &StorageKey,
1452	) -> sp_blockchain::Result<Option<StorageData>> {
1453		Ok(self
1454			.state_at(hash)?
1455			.storage(&key.0)
1456			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1457			.map(StorageData))
1458	}
1459
1460	fn storage_hash(
1461		&self,
1462		hash: <Block as BlockT>::Hash,
1463		key: &StorageKey,
1464	) -> sp_blockchain::Result<Option<Block::Hash>> {
1465		self.state_at(hash)?
1466			.storage_hash(&key.0)
1467			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1468	}
1469
1470	fn child_storage(
1471		&self,
1472		hash: <Block as BlockT>::Hash,
1473		child_info: &ChildInfo,
1474		key: &StorageKey,
1475	) -> sp_blockchain::Result<Option<StorageData>> {
1476		Ok(self
1477			.state_at(hash)?
1478			.child_storage(child_info, &key.0)
1479			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1480			.map(StorageData))
1481	}
1482
1483	fn child_storage_hash(
1484		&self,
1485		hash: <Block as BlockT>::Hash,
1486		child_info: &ChildInfo,
1487		key: &StorageKey,
1488	) -> sp_blockchain::Result<Option<Block::Hash>> {
1489		self.state_at(hash)?
1490			.child_storage_hash(child_info, &key.0)
1491			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1492	}
1493
1494	fn closest_merkle_value(
1495		&self,
1496		hash: <Block as BlockT>::Hash,
1497		key: &StorageKey,
1498	) -> blockchain::Result<Option<MerkleValue<<Block as BlockT>::Hash>>> {
1499		self.state_at(hash)?
1500			.closest_merkle_value(&key.0)
1501			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1502	}
1503
1504	fn child_closest_merkle_value(
1505		&self,
1506		hash: <Block as BlockT>::Hash,
1507		child_info: &ChildInfo,
1508		key: &StorageKey,
1509	) -> blockchain::Result<Option<MerkleValue<<Block as BlockT>::Hash>>> {
1510		self.state_at(hash)?
1511			.child_closest_merkle_value(child_info, &key.0)
1512			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1513	}
1514}
1515
1516impl<B, E, Block, RA> HeaderMetadata<Block> for Client<B, E, Block, RA>
1517where
1518	B: backend::Backend<Block>,
1519	E: CallExecutor<Block>,
1520	Block: BlockT,
1521{
1522	type Error = sp_blockchain::Error;
1523
1524	fn header_metadata(
1525		&self,
1526		hash: Block::Hash,
1527	) -> Result<CachedHeaderMetadata<Block>, Self::Error> {
1528		self.backend.blockchain().header_metadata(hash)
1529	}
1530
1531	fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata<Block>) {
1532		self.backend.blockchain().insert_header_metadata(hash, metadata)
1533	}
1534
1535	fn remove_header_metadata(&self, hash: Block::Hash) {
1536		self.backend.blockchain().remove_header_metadata(hash)
1537	}
1538}
1539
1540impl<B, E, Block, RA> ProvideUncles<Block> for Client<B, E, Block, RA>
1541where
1542	B: backend::Backend<Block>,
1543	E: CallExecutor<Block>,
1544	Block: BlockT,
1545{
1546	fn uncles(
1547		&self,
1548		target_hash: Block::Hash,
1549		max_generation: NumberFor<Block>,
1550	) -> sp_blockchain::Result<Vec<Block::Header>> {
1551		Ok(Client::uncles(self, target_hash, max_generation)?
1552			.into_iter()
1553			.filter_map(|hash| Client::header(self, hash).unwrap_or(None))
1554			.collect())
1555	}
1556}
1557
1558impl<B, E, Block, RA> ChainHeaderBackend<Block> for Client<B, E, Block, RA>
1559where
1560	B: backend::Backend<Block>,
1561	E: CallExecutor<Block> + Send + Sync,
1562	Block: BlockT,
1563	RA: Send + Sync,
1564{
1565	fn header(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<Block::Header>> {
1566		self.backend.blockchain().header(hash)
1567	}
1568
1569	fn info(&self) -> blockchain::Info<Block> {
1570		self.backend.blockchain().info()
1571	}
1572
1573	fn status(&self, hash: Block::Hash) -> sp_blockchain::Result<blockchain::BlockStatus> {
1574		self.backend.blockchain().status(hash)
1575	}
1576
1577	fn number(
1578		&self,
1579		hash: Block::Hash,
1580	) -> sp_blockchain::Result<Option<<<Block as BlockT>::Header as HeaderT>::Number>> {
1581		self.backend.blockchain().number(hash)
1582	}
1583
1584	fn hash(&self, number: NumberFor<Block>) -> sp_blockchain::Result<Option<Block::Hash>> {
1585		self.backend.blockchain().hash(number)
1586	}
1587}
1588
1589impl<B, E, Block, RA> BlockIdTo<Block> for Client<B, E, Block, RA>
1590where
1591	B: backend::Backend<Block>,
1592	E: CallExecutor<Block> + Send + Sync,
1593	Block: BlockT,
1594	RA: Send + Sync,
1595{
1596	type Error = Error;
1597
1598	fn to_hash(&self, block_id: &BlockId<Block>) -> sp_blockchain::Result<Option<Block::Hash>> {
1599		self.block_hash_from_id(block_id)
1600	}
1601
1602	fn to_number(
1603		&self,
1604		block_id: &BlockId<Block>,
1605	) -> sp_blockchain::Result<Option<NumberFor<Block>>> {
1606		self.block_number_from_id(block_id)
1607	}
1608}
1609
1610impl<B, E, Block, RA> ChainHeaderBackend<Block> for &Client<B, E, Block, RA>
1611where
1612	B: backend::Backend<Block>,
1613	E: CallExecutor<Block> + Send + Sync,
1614	Block: BlockT,
1615	RA: Send + Sync,
1616{
1617	fn header(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<Block::Header>> {
1618		self.backend.blockchain().header(hash)
1619	}
1620
1621	fn info(&self) -> blockchain::Info<Block> {
1622		self.backend.blockchain().info()
1623	}
1624
1625	fn status(&self, hash: Block::Hash) -> sp_blockchain::Result<blockchain::BlockStatus> {
1626		(**self).status(hash)
1627	}
1628
1629	fn number(
1630		&self,
1631		hash: Block::Hash,
1632	) -> sp_blockchain::Result<Option<<<Block as BlockT>::Header as HeaderT>::Number>> {
1633		(**self).number(hash)
1634	}
1635
1636	fn hash(&self, number: NumberFor<Block>) -> sp_blockchain::Result<Option<Block::Hash>> {
1637		(**self).hash(number)
1638	}
1639}
1640
1641impl<B, E, Block, RA> ProvideRuntimeApi<Block> for Client<B, E, Block, RA>
1642where
1643	B: backend::Backend<Block>,
1644	E: CallExecutor<Block, Backend = B> + Send + Sync,
1645	Block: BlockT,
1646	RA: ConstructRuntimeApi<Block, Self> + Send + Sync,
1647{
1648	type Api = <RA as ConstructRuntimeApi<Block, Self>>::RuntimeApi;
1649
1650	fn runtime_api(&self) -> ApiRef<Self::Api> {
1651		RA::construct_runtime_api(self)
1652	}
1653}
1654
1655impl<B, E, Block, RA> CallApiAt<Block> for Client<B, E, Block, RA>
1656where
1657	B: backend::Backend<Block>,
1658	E: CallExecutor<Block, Backend = B> + Send + Sync,
1659	Block: BlockT,
1660	RA: Send + Sync,
1661{
1662	type StateBackend = B::State;
1663
1664	fn call_api_at(&self, params: CallApiAtParams<Block>) -> Result<Vec<u8>, sp_api::ApiError> {
1665		self.executor
1666			.contextual_call(
1667				params.at,
1668				params.function,
1669				&params.arguments,
1670				params.overlayed_changes,
1671				params.recorder,
1672				params.call_context,
1673				params.extensions,
1674			)
1675			.map_err(Into::into)
1676	}
1677
1678	fn runtime_version_at(&self, hash: Block::Hash) -> Result<RuntimeVersion, sp_api::ApiError> {
1679		CallExecutor::runtime_version(&self.executor, hash).map_err(Into::into)
1680	}
1681
1682	fn state_at(&self, at: Block::Hash) -> Result<Self::StateBackend, sp_api::ApiError> {
1683		self.state_at(at).map_err(Into::into)
1684	}
1685
1686	fn initialize_extensions(
1687		&self,
1688		at: Block::Hash,
1689		extensions: &mut sp_externalities::Extensions,
1690	) -> Result<(), sp_api::ApiError> {
1691		let block_number = self.expect_block_number_from_id(&BlockId::Hash(at))?;
1692
1693		extensions.merge(self.executor.execution_extensions().extensions(at, block_number));
1694
1695		Ok(())
1696	}
1697}
1698
1699/// NOTE: only use this implementation when you are sure there are NO consensus-level BlockImport
1700/// objects. Otherwise, importing blocks directly into the client would be bypassing
1701/// important verification work.
1702#[async_trait::async_trait]
1703impl<B, E, Block, RA> sc_consensus::BlockImport<Block> for &Client<B, E, Block, RA>
1704where
1705	B: backend::Backend<Block>,
1706	E: CallExecutor<Block> + Send + Sync,
1707	Block: BlockT,
1708	Client<B, E, Block, RA>: ProvideRuntimeApi<Block>,
1709	<Client<B, E, Block, RA> as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
1710	RA: Sync + Send,
1711{
1712	type Error = ConsensusError;
1713
1714	/// Import a checked and validated block.
1715	///
1716	/// NOTE: only use this implementation when there are NO consensus-level BlockImport
1717	/// objects. Otherwise, importing blocks directly into the client would be bypassing
1718	/// important verification work.
1719	///
1720	/// If you are not sure that there are no BlockImport objects provided by the consensus
1721	/// algorithm, don't use this function.
1722	async fn import_block(
1723		&self,
1724		mut import_block: BlockImportParams<Block>,
1725	) -> Result<ImportResult, Self::Error> {
1726		let span = tracing::span!(tracing::Level::DEBUG, "import_block");
1727		let _enter = span.enter();
1728
1729		let storage_changes =
1730			match self.prepare_block_storage_changes(&mut import_block).map_err(|e| {
1731				warn!("Block prepare storage changes error: {}", e);
1732				ConsensusError::ClientImport(e.to_string())
1733			})? {
1734				PrepareStorageChangesResult::Discard(res) => return Ok(res),
1735				PrepareStorageChangesResult::Import(storage_changes) => storage_changes,
1736			};
1737
1738		self.lock_import_and_run(|operation| {
1739			self.apply_block(operation, import_block, storage_changes)
1740		})
1741		.map_err(|e| {
1742			warn!("Block import error: {}", e);
1743			ConsensusError::ClientImport(e.to_string())
1744		})
1745	}
1746
1747	/// Check block preconditions.
1748	async fn check_block(
1749		&self,
1750		block: BlockCheckParams<Block>,
1751	) -> Result<ImportResult, Self::Error> {
1752		let BlockCheckParams {
1753			hash,
1754			number,
1755			parent_hash,
1756			allow_missing_state,
1757			import_existing,
1758			allow_missing_parent,
1759		} = block;
1760
1761		// Check the block against white and black lists if any are defined
1762		// (i.e. fork blocks and bad blocks respectively)
1763		match self.block_rules.lookup(number, &hash) {
1764			BlockLookupResult::KnownBad => {
1765				trace!("Rejecting known bad block: #{} {:?}", number, hash);
1766				return Ok(ImportResult::KnownBad)
1767			},
1768			BlockLookupResult::Expected(expected_hash) => {
1769				trace!(
1770					"Rejecting block from known invalid fork. Got {:?}, expected: {:?} at height {}",
1771					hash,
1772					expected_hash,
1773					number
1774				);
1775				return Ok(ImportResult::KnownBad)
1776			},
1777			BlockLookupResult::NotSpecial => {},
1778		}
1779
1780		// Own status must be checked first. If the block and ancestry is pruned
1781		// this function must return `AlreadyInChain` rather than `MissingState`
1782		match self
1783			.block_status(hash)
1784			.map_err(|e| ConsensusError::ClientImport(e.to_string()))?
1785		{
1786			BlockStatus::InChainWithState | BlockStatus::Queued =>
1787				return Ok(ImportResult::AlreadyInChain),
1788			BlockStatus::InChainPruned if !import_existing =>
1789				return Ok(ImportResult::AlreadyInChain),
1790			BlockStatus::InChainPruned => {},
1791			BlockStatus::Unknown => {},
1792			BlockStatus::KnownBad => return Ok(ImportResult::KnownBad),
1793		}
1794
1795		match self
1796			.block_status(parent_hash)
1797			.map_err(|e| ConsensusError::ClientImport(e.to_string()))?
1798		{
1799			BlockStatus::InChainWithState | BlockStatus::Queued => {},
1800			BlockStatus::Unknown if allow_missing_parent => {},
1801			BlockStatus::Unknown => return Ok(ImportResult::UnknownParent),
1802			BlockStatus::InChainPruned if allow_missing_state => {},
1803			BlockStatus::InChainPruned => return Ok(ImportResult::MissingState),
1804			BlockStatus::KnownBad => return Ok(ImportResult::KnownBad),
1805		}
1806
1807		Ok(ImportResult::imported(false))
1808	}
1809}
1810
1811#[async_trait::async_trait]
1812impl<B, E, Block, RA> sc_consensus::BlockImport<Block> for Client<B, E, Block, RA>
1813where
1814	B: backend::Backend<Block>,
1815	E: CallExecutor<Block> + Send + Sync,
1816	Block: BlockT,
1817	Self: ProvideRuntimeApi<Block>,
1818	<Self as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
1819	RA: Sync + Send,
1820{
1821	type Error = ConsensusError;
1822
1823	async fn check_block(
1824		&self,
1825		block: BlockCheckParams<Block>,
1826	) -> Result<ImportResult, Self::Error> {
1827		(&self).check_block(block).await
1828	}
1829
1830	async fn import_block(
1831		&self,
1832		import_block: BlockImportParams<Block>,
1833	) -> Result<ImportResult, Self::Error> {
1834		(&self).import_block(import_block).await
1835	}
1836}
1837
1838impl<B, E, Block, RA> Finalizer<Block, B> for Client<B, E, Block, RA>
1839where
1840	B: backend::Backend<Block>,
1841	E: CallExecutor<Block>,
1842	Block: BlockT,
1843{
1844	fn apply_finality(
1845		&self,
1846		operation: &mut ClientImportOperation<Block, B>,
1847		hash: Block::Hash,
1848		justification: Option<Justification>,
1849		notify: bool,
1850	) -> sp_blockchain::Result<()> {
1851		let info = self.backend.blockchain().info();
1852		self.apply_finality_with_block_hash(operation, hash, justification, &info, notify)
1853	}
1854
1855	fn finalize_block(
1856		&self,
1857		hash: Block::Hash,
1858		justification: Option<Justification>,
1859		notify: bool,
1860	) -> sp_blockchain::Result<()> {
1861		self.lock_import_and_run(|operation| {
1862			self.apply_finality(operation, hash, justification, notify)
1863		})
1864	}
1865}
1866
1867impl<B, E, Block, RA> Finalizer<Block, B> for &Client<B, E, Block, RA>
1868where
1869	B: backend::Backend<Block>,
1870	E: CallExecutor<Block>,
1871	Block: BlockT,
1872{
1873	fn apply_finality(
1874		&self,
1875		operation: &mut ClientImportOperation<Block, B>,
1876		hash: Block::Hash,
1877		justification: Option<Justification>,
1878		notify: bool,
1879	) -> sp_blockchain::Result<()> {
1880		(**self).apply_finality(operation, hash, justification, notify)
1881	}
1882
1883	fn finalize_block(
1884		&self,
1885		hash: Block::Hash,
1886		justification: Option<Justification>,
1887		notify: bool,
1888	) -> sp_blockchain::Result<()> {
1889		(**self).finalize_block(hash, justification, notify)
1890	}
1891}
1892
1893impl<B, E, Block, RA> PreCommitActions<Block> for Client<B, E, Block, RA>
1894where
1895	Block: BlockT,
1896{
1897	fn register_import_action(&self, action: OnImportAction<Block>) {
1898		self.import_actions.lock().push(action);
1899	}
1900
1901	fn register_finality_action(&self, action: OnFinalityAction<Block>) {
1902		self.finality_actions.lock().push(action);
1903	}
1904}
1905
1906impl<B, E, Block, RA> BlockchainEvents<Block> for Client<B, E, Block, RA>
1907where
1908	E: CallExecutor<Block>,
1909	Block: BlockT,
1910{
1911	/// Get block import event stream.
1912	fn import_notification_stream(&self) -> ImportNotifications<Block> {
1913		let (sink, stream) = tracing_unbounded("mpsc_import_notification_stream", 100_000);
1914		self.import_notification_sinks.lock().push(sink);
1915		stream
1916	}
1917
1918	fn every_import_notification_stream(&self) -> ImportNotifications<Block> {
1919		let (sink, stream) = tracing_unbounded("mpsc_every_import_notification_stream", 100_000);
1920		self.every_import_notification_sinks.lock().push(sink);
1921		stream
1922	}
1923
1924	fn finality_notification_stream(&self) -> FinalityNotifications<Block> {
1925		let (sink, stream) = tracing_unbounded("mpsc_finality_notification_stream", 100_000);
1926		self.finality_notification_sinks.lock().push(sink);
1927		stream
1928	}
1929
1930	/// Get storage changes event stream.
1931	fn storage_changes_notification_stream(
1932		&self,
1933		filter_keys: Option<&[StorageKey]>,
1934		child_filter_keys: Option<&[(StorageKey, Option<Vec<StorageKey>>)]>,
1935	) -> sp_blockchain::Result<StorageEventStream<Block::Hash>> {
1936		Ok(self.storage_notifications.listen(filter_keys, child_filter_keys))
1937	}
1938}
1939
1940impl<B, E, Block, RA> BlockBackend<Block> for Client<B, E, Block, RA>
1941where
1942	B: backend::Backend<Block>,
1943	E: CallExecutor<Block>,
1944	Block: BlockT,
1945{
1946	fn block_body(
1947		&self,
1948		hash: Block::Hash,
1949	) -> sp_blockchain::Result<Option<Vec<<Block as BlockT>::Extrinsic>>> {
1950		self.body(hash)
1951	}
1952
1953	fn block(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<SignedBlock<Block>>> {
1954		Ok(match (self.header(hash)?, self.body(hash)?, self.justifications(hash)?) {
1955			(Some(header), Some(extrinsics), justifications) =>
1956				Some(SignedBlock { block: Block::new(header, extrinsics), justifications }),
1957			_ => None,
1958		})
1959	}
1960
1961	fn block_status(&self, hash: Block::Hash) -> sp_blockchain::Result<BlockStatus> {
1962		Client::block_status(self, hash)
1963	}
1964
1965	fn justifications(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<Justifications>> {
1966		self.backend.blockchain().justifications(hash)
1967	}
1968
1969	fn block_hash(&self, number: NumberFor<Block>) -> sp_blockchain::Result<Option<Block::Hash>> {
1970		self.backend.blockchain().hash(number)
1971	}
1972
1973	fn indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<Vec<u8>>> {
1974		self.backend.blockchain().indexed_transaction(hash)
1975	}
1976
1977	fn has_indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result<bool> {
1978		self.backend.blockchain().has_indexed_transaction(hash)
1979	}
1980
1981	fn block_indexed_body(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<Vec<Vec<u8>>>> {
1982		self.backend.blockchain().block_indexed_body(hash)
1983	}
1984
1985	fn requires_full_sync(&self) -> bool {
1986		self.backend.requires_full_sync()
1987	}
1988}
1989
1990impl<B, E, Block, RA> backend::AuxStore for Client<B, E, Block, RA>
1991where
1992	B: backend::Backend<Block>,
1993	E: CallExecutor<Block>,
1994	Block: BlockT,
1995	Self: ProvideRuntimeApi<Block>,
1996	<Self as ProvideRuntimeApi<Block>>::Api: CoreApi<Block>,
1997{
1998	/// Insert auxiliary data into key-value store.
1999	fn insert_aux<
2000		'a,
2001		'b: 'a,
2002		'c: 'a,
2003		I: IntoIterator<Item = &'a (&'c [u8], &'c [u8])>,
2004		D: IntoIterator<Item = &'a &'b [u8]>,
2005	>(
2006		&self,
2007		insert: I,
2008		delete: D,
2009	) -> sp_blockchain::Result<()> {
2010		// Import is locked here because we may have other block import
2011		// operations that tries to set aux data. Note that for consensus
2012		// layer, one can always use atomic operations to make sure
2013		// import is only locked once.
2014		self.lock_import_and_run(|operation| apply_aux(operation, insert, delete))
2015	}
2016	/// Query auxiliary data from key-value store.
2017	fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result<Option<Vec<u8>>> {
2018		backend::AuxStore::get_aux(&*self.backend, key)
2019	}
2020}
2021
2022impl<B, E, Block, RA> backend::AuxStore for &Client<B, E, Block, RA>
2023where
2024	B: backend::Backend<Block>,
2025	E: CallExecutor<Block>,
2026	Block: BlockT,
2027	Client<B, E, Block, RA>: ProvideRuntimeApi<Block>,
2028	<Client<B, E, Block, RA> as ProvideRuntimeApi<Block>>::Api: CoreApi<Block>,
2029{
2030	fn insert_aux<
2031		'a,
2032		'b: 'a,
2033		'c: 'a,
2034		I: IntoIterator<Item = &'a (&'c [u8], &'c [u8])>,
2035		D: IntoIterator<Item = &'a &'b [u8]>,
2036	>(
2037		&self,
2038		insert: I,
2039		delete: D,
2040	) -> sp_blockchain::Result<()> {
2041		(**self).insert_aux(insert, delete)
2042	}
2043
2044	fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result<Option<Vec<u8>>> {
2045		(**self).get_aux(key)
2046	}
2047}
2048
2049impl<BE, E, B, RA> sp_consensus::block_validation::Chain<B> for Client<BE, E, B, RA>
2050where
2051	BE: backend::Backend<B>,
2052	E: CallExecutor<B>,
2053	B: BlockT,
2054{
2055	fn block_status(
2056		&self,
2057		hash: B::Hash,
2058	) -> Result<BlockStatus, Box<dyn std::error::Error + Send>> {
2059		Client::block_status(self, hash).map_err(|e| Box::new(e) as Box<_>)
2060	}
2061}
2062
2063impl<BE, E, B, RA> sp_transaction_storage_proof::IndexedBody<B> for Client<BE, E, B, RA>
2064where
2065	BE: backend::Backend<B>,
2066	E: CallExecutor<B>,
2067	B: BlockT,
2068{
2069	fn block_indexed_body(
2070		&self,
2071		number: NumberFor<B>,
2072	) -> Result<Option<Vec<Vec<u8>>>, sp_transaction_storage_proof::Error> {
2073		let hash = match self
2074			.backend
2075			.blockchain()
2076			.block_hash_from_id(&BlockId::Number(number))
2077			.map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e)))?
2078		{
2079			Some(hash) => hash,
2080			None => return Ok(None),
2081		};
2082
2083		self.backend
2084			.blockchain()
2085			.block_indexed_body(hash)
2086			.map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e)))
2087	}
2088
2089	fn number(
2090		&self,
2091		hash: B::Hash,
2092	) -> Result<Option<NumberFor<B>>, sp_transaction_storage_proof::Error> {
2093		self.backend
2094			.blockchain()
2095			.number(hash)
2096			.map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e)))
2097	}
2098}