sc_service/client/
client.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19//! Substrate Client
20
21use super::{
22	block_rules::{BlockRules, LookupResult as BlockLookupResult},
23	CodeProvider,
24};
25use crate::client::notification_pinning::NotificationPinningWorker;
26use log::{debug, info, trace, warn};
27use parking_lot::{Mutex, RwLock};
28use prometheus_endpoint::Registry;
29use rand::Rng;
30use sc_chain_spec::{resolve_state_version_from_wasm, BuildGenesisBlock};
31use sc_client_api::{
32	backend::{
33		self, apply_aux, BlockImportOperation, ClientImportOperation, FinalizeSummary, Finalizer,
34		ImportNotificationAction, ImportSummary, LockImportRun, NewBlockState, StorageProvider,
35	},
36	client::{
37		BadBlocks, BlockBackend, BlockImportNotification, BlockOf, BlockchainEvents, ClientInfo,
38		FinalityNotification, FinalityNotifications, ForkBlocks, ImportNotifications,
39		PreCommitActions, ProvideUncles,
40	},
41	execution_extensions::ExecutionExtensions,
42	notifications::{StorageEventStream, StorageNotifications},
43	CallExecutor, ExecutorProvider, KeysIter, OnFinalityAction, OnImportAction, PairsIter,
44	ProofProvider, UnpinWorkerMessage, UsageProvider,
45};
46use sc_consensus::{
47	BlockCheckParams, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction,
48};
49use sc_executor::RuntimeVersion;
50use sc_telemetry::{telemetry, TelemetryHandle, SUBSTRATE_INFO};
51use sp_api::{
52	ApiExt, ApiRef, CallApiAt, CallApiAtParams, ConstructRuntimeApi, Core as CoreApi,
53	ProvideRuntimeApi,
54};
55use sp_blockchain::{
56	self as blockchain, Backend as ChainBackend, CachedHeaderMetadata, Error,
57	HeaderBackend as ChainHeaderBackend, HeaderMetadata, Info as BlockchainInfo,
58};
59use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError};
60
61use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender};
62use sp_core::{
63	storage::{ChildInfo, ChildType, PrefixedStorageKey, StorageChild, StorageData, StorageKey},
64	traits::{CallContext, SpawnNamed},
65};
66use sp_runtime::{
67	generic::{BlockId, SignedBlock},
68	traits::{
69		Block as BlockT, BlockIdTo, HashingFor, Header as HeaderT, NumberFor, One,
70		SaturatedConversion, Zero,
71	},
72	Justification, Justifications, StateVersion,
73};
74use sp_state_machine::{
75	prove_child_read, prove_range_read_with_child_with_size, prove_read,
76	read_range_proof_check_with_child_on_proving_backend, Backend as StateBackend,
77	ChildStorageCollection, KeyValueStates, KeyValueStorageLevel, StorageCollection,
78	MAX_NESTED_TRIE_DEPTH,
79};
80use sp_trie::{proof_size_extension::ProofSizeExt, CompactProof, MerkleValue, StorageProof};
81use std::{
82	collections::{HashMap, HashSet},
83	marker::PhantomData,
84	path::PathBuf,
85	sync::Arc,
86};
87
88#[cfg(feature = "test-helpers")]
89use {
90	super::call_executor::LocalCallExecutor, sc_client_api::in_mem, sp_core::traits::CodeExecutor,
91};
92
93type NotificationSinks<T> = Mutex<Vec<TracingUnboundedSender<T>>>;
94
95/// Substrate Client
96pub struct Client<B, E, Block, RA>
97where
98	Block: BlockT,
99{
100	backend: Arc<B>,
101	executor: E,
102	storage_notifications: StorageNotifications<Block>,
103	import_notification_sinks: NotificationSinks<BlockImportNotification<Block>>,
104	every_import_notification_sinks: NotificationSinks<BlockImportNotification<Block>>,
105	finality_notification_sinks: NotificationSinks<FinalityNotification<Block>>,
106	// Collects auxiliary operations to be performed atomically together with
107	// block import operations.
108	import_actions: Mutex<Vec<OnImportAction<Block>>>,
109	// Collects auxiliary operations to be performed atomically together with
110	// block finalization operations.
111	finality_actions: Mutex<Vec<OnFinalityAction<Block>>>,
112	// Holds the block hash currently being imported. TODO: replace this with block queue.
113	importing_block: RwLock<Option<Block::Hash>>,
114	block_rules: BlockRules<Block>,
115	config: ClientConfig<Block>,
116	telemetry: Option<TelemetryHandle>,
117	unpin_worker_sender: TracingUnboundedSender<UnpinWorkerMessage<Block>>,
118	code_provider: CodeProvider<Block, B, E>,
119	_phantom: PhantomData<RA>,
120}
121
122/// Used in importing a block, where additional changes are made after the runtime
123/// executed.
124enum PrePostHeader<H> {
125	/// they are the same: no post-runtime digest items.
126	Same(H),
127	/// different headers (pre, post).
128	Different(H, H),
129}
130
131impl<H> PrePostHeader<H> {
132	/// get a reference to the "post-header" -- the header as it should be
133	/// after all changes are applied.
134	fn post(&self) -> &H {
135		match *self {
136			PrePostHeader::Same(ref h) => h,
137			PrePostHeader::Different(_, ref h) => h,
138		}
139	}
140
141	/// convert to the "post-header" -- the header as it should be after
142	/// all changes are applied.
143	fn into_post(self) -> H {
144		match self {
145			PrePostHeader::Same(h) => h,
146			PrePostHeader::Different(_, h) => h,
147		}
148	}
149}
150
151enum PrepareStorageChangesResult<Block: BlockT> {
152	Discard(ImportResult),
153	Import(Option<sc_consensus::StorageChanges<Block>>),
154}
155
156/// Create an instance of in-memory client.
157#[cfg(feature = "test-helpers")]
158pub fn new_in_mem<E, Block, G, RA>(
159	backend: Arc<in_mem::Backend<Block>>,
160	executor: E,
161	genesis_block_builder: G,
162	prometheus_registry: Option<Registry>,
163	telemetry: Option<TelemetryHandle>,
164	spawn_handle: Box<dyn SpawnNamed>,
165	config: ClientConfig<Block>,
166) -> sp_blockchain::Result<
167	Client<in_mem::Backend<Block>, LocalCallExecutor<Block, in_mem::Backend<Block>, E>, Block, RA>,
168>
169where
170	E: CodeExecutor + sc_executor::RuntimeVersionOf,
171	Block: BlockT,
172	G: BuildGenesisBlock<
173			Block,
174			BlockImportOperation = <in_mem::Backend<Block> as backend::Backend<Block>>::BlockImportOperation,
175		>,
176{
177	new_with_backend(
178		backend,
179		executor,
180		genesis_block_builder,
181		spawn_handle,
182		prometheus_registry,
183		telemetry,
184		config,
185	)
186}
187
188/// Client configuration items.
189#[derive(Debug, Clone)]
190pub struct ClientConfig<Block: BlockT> {
191	/// Enable the offchain worker db.
192	pub offchain_worker_enabled: bool,
193	/// If true, allows access from the runtime to write into offchain worker db.
194	pub offchain_indexing_api: bool,
195	/// Path where WASM files exist to override the on-chain WASM.
196	pub wasm_runtime_overrides: Option<PathBuf>,
197	/// Skip writing genesis state on first start.
198	pub no_genesis: bool,
199	/// Map of WASM runtime substitute starting at the child of the given block until the runtime
200	/// version doesn't match anymore.
201	pub wasm_runtime_substitutes: HashMap<NumberFor<Block>, Vec<u8>>,
202	/// Enable recording of storage proofs during block import
203	pub enable_import_proof_recording: bool,
204}
205
206impl<Block: BlockT> Default for ClientConfig<Block> {
207	fn default() -> Self {
208		Self {
209			offchain_worker_enabled: false,
210			offchain_indexing_api: false,
211			wasm_runtime_overrides: None,
212			no_genesis: false,
213			wasm_runtime_substitutes: HashMap::new(),
214			enable_import_proof_recording: false,
215		}
216	}
217}
218
219/// Create a client with the explicitly provided backend.
220/// This is useful for testing backend implementations.
221#[cfg(feature = "test-helpers")]
222pub fn new_with_backend<B, E, Block, G, RA>(
223	backend: Arc<B>,
224	executor: E,
225	genesis_block_builder: G,
226	spawn_handle: Box<dyn SpawnNamed>,
227	prometheus_registry: Option<Registry>,
228	telemetry: Option<TelemetryHandle>,
229	config: ClientConfig<Block>,
230) -> sp_blockchain::Result<Client<B, LocalCallExecutor<Block, B, E>, Block, RA>>
231where
232	E: CodeExecutor + sc_executor::RuntimeVersionOf,
233	G: BuildGenesisBlock<
234		Block,
235		BlockImportOperation = <B as backend::Backend<Block>>::BlockImportOperation,
236	>,
237	Block: BlockT,
238	B: backend::LocalBackend<Block> + 'static,
239{
240	let extensions = ExecutionExtensions::new(None, Arc::new(executor.clone()));
241
242	let call_executor =
243		LocalCallExecutor::new(backend.clone(), executor, config.clone(), extensions)?;
244
245	Client::new(
246		backend,
247		call_executor,
248		spawn_handle,
249		genesis_block_builder,
250		Default::default(),
251		Default::default(),
252		prometheus_registry,
253		telemetry,
254		config,
255	)
256}
257
258impl<B, E, Block, RA> BlockOf for Client<B, E, Block, RA>
259where
260	B: backend::Backend<Block>,
261	E: CallExecutor<Block>,
262	Block: BlockT,
263{
264	type Type = Block;
265}
266
267impl<B, E, Block, RA> LockImportRun<Block, B> for Client<B, E, Block, RA>
268where
269	B: backend::Backend<Block>,
270	E: CallExecutor<Block>,
271	Block: BlockT,
272{
273	fn lock_import_and_run<R, Err, F>(&self, f: F) -> Result<R, Err>
274	where
275		F: FnOnce(&mut ClientImportOperation<Block, B>) -> Result<R, Err>,
276		Err: From<sp_blockchain::Error>,
277	{
278		let inner = || {
279			let _import_lock = self.backend.get_import_lock().write();
280
281			let mut op = ClientImportOperation {
282				op: self.backend.begin_operation()?,
283				notify_imported: None,
284				notify_finalized: None,
285			};
286
287			let r = f(&mut op)?;
288
289			let ClientImportOperation { mut op, notify_imported, notify_finalized } = op;
290
291			let finality_notification = notify_finalized.map(|summary| {
292				FinalityNotification::from_summary(summary, self.unpin_worker_sender.clone())
293			});
294
295			let (import_notification, storage_changes, import_notification_action) =
296				match notify_imported {
297					Some(mut summary) => {
298						let import_notification_action = summary.import_notification_action;
299						let storage_changes = summary.storage_changes.take();
300						(
301							Some(BlockImportNotification::from_summary(
302								summary,
303								self.unpin_worker_sender.clone(),
304							)),
305							storage_changes,
306							import_notification_action,
307						)
308					},
309					None => (None, None, ImportNotificationAction::None),
310				};
311
312			if let Some(ref notification) = finality_notification {
313				for action in self.finality_actions.lock().iter_mut() {
314					op.insert_aux(action(notification))?;
315				}
316			}
317			if let Some(ref notification) = import_notification {
318				for action in self.import_actions.lock().iter_mut() {
319					op.insert_aux(action(notification))?;
320				}
321			}
322
323			self.backend.commit_operation(op)?;
324
325			// We need to pin the block in the backend once
326			// for each notification. Once all notifications are
327			// dropped, the block will be unpinned automatically.
328			if let Some(ref notification) = finality_notification {
329				if let Err(err) = self.backend.pin_block(notification.hash) {
330					debug!(
331						"Unable to pin block for finality notification. hash: {}, Error: {}",
332						notification.hash, err
333					);
334				} else {
335					let _ = self
336						.unpin_worker_sender
337						.unbounded_send(UnpinWorkerMessage::AnnouncePin(notification.hash))
338						.map_err(|e| {
339							log::error!(
340								"Unable to send AnnouncePin worker message for finality: {e}"
341							)
342						});
343				}
344			}
345
346			if let Some(ref notification) = import_notification {
347				if let Err(err) = self.backend.pin_block(notification.hash) {
348					debug!(
349						"Unable to pin block for import notification. hash: {}, Error: {}",
350						notification.hash, err
351					);
352				} else {
353					let _ = self
354						.unpin_worker_sender
355						.unbounded_send(UnpinWorkerMessage::AnnouncePin(notification.hash))
356						.map_err(|e| {
357							log::error!("Unable to send AnnouncePin worker message for import: {e}")
358						});
359				};
360			}
361
362			self.notify_finalized(finality_notification)?;
363			self.notify_imported(import_notification, import_notification_action, storage_changes)?;
364
365			Ok(r)
366		};
367
368		let result = inner();
369		*self.importing_block.write() = None;
370
371		result
372	}
373}
374
375impl<B, E, Block, RA> LockImportRun<Block, B> for &Client<B, E, Block, RA>
376where
377	Block: BlockT,
378	B: backend::Backend<Block>,
379	E: CallExecutor<Block>,
380{
381	fn lock_import_and_run<R, Err, F>(&self, f: F) -> Result<R, Err>
382	where
383		F: FnOnce(&mut ClientImportOperation<Block, B>) -> Result<R, Err>,
384		Err: From<sp_blockchain::Error>,
385	{
386		(**self).lock_import_and_run(f)
387	}
388}
389
390impl<B, E, Block, RA> Client<B, E, Block, RA>
391where
392	B: backend::Backend<Block>,
393	E: CallExecutor<Block>,
394	Block: BlockT,
395	Block::Header: Clone,
396{
397	/// Creates new Substrate Client with given blockchain and code executor.
398	pub fn new<G>(
399		backend: Arc<B>,
400		executor: E,
401		spawn_handle: Box<dyn SpawnNamed>,
402		genesis_block_builder: G,
403		fork_blocks: ForkBlocks<Block>,
404		bad_blocks: BadBlocks<Block>,
405		prometheus_registry: Option<Registry>,
406		telemetry: Option<TelemetryHandle>,
407		config: ClientConfig<Block>,
408	) -> sp_blockchain::Result<Self>
409	where
410		G: BuildGenesisBlock<
411			Block,
412			BlockImportOperation = <B as backend::Backend<Block>>::BlockImportOperation,
413		>,
414		E: Clone,
415		B: 'static,
416	{
417		let info = backend.blockchain().info();
418		if info.finalized_state.is_none() {
419			let (genesis_block, mut op) = genesis_block_builder.build_genesis_block()?;
420			info!(
421				"🔨 Initializing Genesis block/state (state: {}, header-hash: {})",
422				genesis_block.header().state_root(),
423				genesis_block.header().hash()
424			);
425			// Genesis may be written after some blocks have been imported and finalized.
426			// So we only finalize it when the database is empty.
427			let block_state = if info.best_hash == Default::default() {
428				NewBlockState::Final
429			} else {
430				NewBlockState::Normal
431			};
432			let (header, body) = genesis_block.deconstruct();
433			op.set_block_data(header, Some(body), None, None, block_state)?;
434			backend.commit_operation(op)?;
435		}
436
437		let (unpin_worker_sender, rx) = tracing_unbounded::<UnpinWorkerMessage<Block>>(
438			"notification-pinning-worker-channel",
439			10_000,
440		);
441		let unpin_worker = NotificationPinningWorker::new(rx, backend.clone());
442		spawn_handle.spawn("notification-pinning-worker", None, Box::pin(unpin_worker.run()));
443		let code_provider = CodeProvider::new(&config, executor.clone(), backend.clone())?;
444
445		Ok(Client {
446			backend,
447			executor,
448			storage_notifications: StorageNotifications::new(prometheus_registry),
449			import_notification_sinks: Default::default(),
450			every_import_notification_sinks: Default::default(),
451			finality_notification_sinks: Default::default(),
452			import_actions: Default::default(),
453			finality_actions: Default::default(),
454			importing_block: Default::default(),
455			block_rules: BlockRules::new(fork_blocks, bad_blocks),
456			config,
457			telemetry,
458			unpin_worker_sender,
459			code_provider,
460			_phantom: Default::default(),
461		})
462	}
463
464	/// returns a reference to the block import notification sinks
465	/// useful for test environments.
466	pub fn import_notification_sinks(&self) -> &NotificationSinks<BlockImportNotification<Block>> {
467		&self.import_notification_sinks
468	}
469
470	/// returns a reference to the finality notification sinks
471	/// useful for test environments.
472	pub fn finality_notification_sinks(&self) -> &NotificationSinks<FinalityNotification<Block>> {
473		&self.finality_notification_sinks
474	}
475
476	/// Get a reference to the state at a given block.
477	pub fn state_at(&self, hash: Block::Hash) -> sp_blockchain::Result<B::State> {
478		self.backend.state_at(hash)
479	}
480
481	/// Get the code at a given block.
482	///
483	/// This takes any potential substitutes into account, but ignores overrides.
484	pub fn code_at(&self, hash: Block::Hash) -> sp_blockchain::Result<Vec<u8>> {
485		self.code_provider.code_at_ignoring_overrides(hash)
486	}
487
488	/// Get the RuntimeVersion at a given block.
489	pub fn runtime_version_at(&self, hash: Block::Hash) -> sp_blockchain::Result<RuntimeVersion> {
490		CallExecutor::runtime_version(&self.executor, hash)
491	}
492
493	/// Apply a checked and validated block to an operation.
494	fn apply_block(
495		&self,
496		operation: &mut ClientImportOperation<Block, B>,
497		import_block: BlockImportParams<Block>,
498		storage_changes: Option<sc_consensus::StorageChanges<Block>>,
499	) -> sp_blockchain::Result<ImportResult>
500	where
501		Self: ProvideRuntimeApi<Block>,
502		<Self as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
503	{
504		let BlockImportParams {
505			origin,
506			header,
507			justifications,
508			post_digests,
509			body,
510			indexed_body,
511			finalized,
512			auxiliary,
513			fork_choice,
514			intermediates,
515			import_existing,
516			create_gap,
517			..
518		} = import_block;
519
520		if !intermediates.is_empty() {
521			return Err(Error::IncompletePipeline)
522		}
523
524		let fork_choice = fork_choice.ok_or(Error::IncompletePipeline)?;
525
526		let import_headers = if post_digests.is_empty() {
527			PrePostHeader::Same(header)
528		} else {
529			let mut post_header = header.clone();
530			for item in post_digests {
531				post_header.digest_mut().push(item);
532			}
533			PrePostHeader::Different(header, post_header)
534		};
535
536		let hash = import_headers.post().hash();
537		let height = (*import_headers.post().number()).saturated_into::<u64>();
538
539		*self.importing_block.write() = Some(hash);
540
541		operation.op.set_create_gap(create_gap);
542
543		let result = self.execute_and_import_block(
544			operation,
545			origin,
546			hash,
547			import_headers,
548			justifications,
549			body,
550			indexed_body,
551			storage_changes,
552			finalized,
553			auxiliary,
554			fork_choice,
555			import_existing,
556		);
557
558		if let Ok(ImportResult::Imported(ref aux)) = result {
559			if aux.is_new_best {
560				// don't send telemetry block import events during initial sync for every
561				// block to avoid spamming the telemetry server, these events will be randomly
562				// sent at a rate of 1/10.
563				if origin != BlockOrigin::NetworkInitialSync || rand::thread_rng().gen_bool(0.1) {
564					telemetry!(
565						self.telemetry;
566						SUBSTRATE_INFO;
567						"block.import";
568						"height" => height,
569						"best" => ?hash,
570						"origin" => ?origin
571					);
572				}
573			}
574		}
575
576		result
577	}
578
579	fn execute_and_import_block(
580		&self,
581		operation: &mut ClientImportOperation<Block, B>,
582		origin: BlockOrigin,
583		hash: Block::Hash,
584		import_headers: PrePostHeader<Block::Header>,
585		justifications: Option<Justifications>,
586		body: Option<Vec<Block::Extrinsic>>,
587		indexed_body: Option<Vec<Vec<u8>>>,
588		storage_changes: Option<sc_consensus::StorageChanges<Block>>,
589		finalized: bool,
590		aux: Vec<(Vec<u8>, Option<Vec<u8>>)>,
591		fork_choice: ForkChoiceStrategy,
592		import_existing: bool,
593	) -> sp_blockchain::Result<ImportResult>
594	where
595		Self: ProvideRuntimeApi<Block>,
596		<Self as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
597	{
598		let parent_hash = *import_headers.post().parent_hash();
599		let status = self.backend.blockchain().status(hash)?;
600		let parent_exists =
601			self.backend.blockchain().status(parent_hash)? == blockchain::BlockStatus::InChain;
602		match (import_existing, status) {
603			(false, blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain),
604			(false, blockchain::BlockStatus::Unknown) => {},
605			(true, blockchain::BlockStatus::InChain) => {},
606			(true, blockchain::BlockStatus::Unknown) => {},
607		}
608
609		let info = self.backend.blockchain().info();
610		let gap_block =
611			info.block_gap.map_or(false, |gap| *import_headers.post().number() == gap.start);
612
613		// the block is lower than our last finalized block so it must revert
614		// finality, refusing import.
615		if status == blockchain::BlockStatus::Unknown &&
616			*import_headers.post().number() <= info.finalized_number &&
617			!gap_block
618		{
619			return Err(sp_blockchain::Error::NotInFinalizedChain)
620		}
621
622		// this is a fairly arbitrary choice of where to draw the line on making notifications,
623		// but the general goal is to only make notifications when we are already fully synced
624		// and get a new chain head.
625		let make_notifications = match origin {
626			BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast =>
627				true,
628			BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false,
629		};
630
631		let storage_changes = match storage_changes {
632			Some(storage_changes) => {
633				let storage_changes = match storage_changes {
634					sc_consensus::StorageChanges::Changes(storage_changes) => {
635						self.backend.begin_state_operation(&mut operation.op, parent_hash)?;
636						let (main_sc, child_sc, offchain_sc, tx, _, tx_index) =
637							storage_changes.into_inner();
638
639						if self.config.offchain_indexing_api {
640							operation.op.update_offchain_storage(offchain_sc)?;
641						}
642
643						operation.op.update_db_storage(tx)?;
644						operation.op.update_storage(main_sc.clone(), child_sc.clone())?;
645						operation.op.update_transaction_index(tx_index)?;
646
647						Some((main_sc, child_sc))
648					},
649					sc_consensus::StorageChanges::Import(changes) => {
650						let mut storage = sp_storage::Storage::default();
651						for state in changes.state.0.into_iter() {
652							if state.parent_storage_keys.is_empty() && state.state_root.is_empty() {
653								for (key, value) in state.key_values.into_iter() {
654									storage.top.insert(key, value);
655								}
656							} else {
657								for parent_storage in state.parent_storage_keys {
658									let storage_key = PrefixedStorageKey::new_ref(&parent_storage);
659									let storage_key =
660										match ChildType::from_prefixed_key(storage_key) {
661											Some((ChildType::ParentKeyId, storage_key)) =>
662												storage_key,
663											None =>
664												return Err(Error::Backend(
665													"Invalid child storage key.".to_string(),
666												)),
667										};
668									let entry = storage
669										.children_default
670										.entry(storage_key.to_vec())
671										.or_insert_with(|| StorageChild {
672											data: Default::default(),
673											child_info: ChildInfo::new_default(storage_key),
674										});
675									for (key, value) in state.key_values.iter() {
676										entry.data.insert(key.clone(), value.clone());
677									}
678								}
679							}
680						}
681
682						// This is use by fast sync for runtime version to be resolvable from
683						// changes.
684						let state_version = resolve_state_version_from_wasm::<_, HashingFor<Block>>(
685							&storage,
686							&self.executor,
687						)?;
688						let state_root = operation.op.reset_storage(storage, state_version)?;
689						if state_root != *import_headers.post().state_root() {
690							// State root mismatch when importing state. This should not happen in
691							// safe fast sync mode, but may happen in unsafe mode.
692							warn!("Error importing state: State root mismatch.");
693							return Err(Error::InvalidStateRoot)
694						}
695						None
696					},
697				};
698
699				storage_changes
700			},
701			None => None,
702		};
703
704		// Ensure parent chain is finalized to maintain invariant that finality is called
705		// sequentially.
706		if finalized && parent_exists && info.finalized_hash != parent_hash {
707			self.apply_finality_with_block_hash(
708				operation,
709				parent_hash,
710				None,
711				&info,
712				make_notifications,
713			)?;
714		}
715
716		let is_new_best = !gap_block &&
717			(finalized ||
718				match fork_choice {
719					ForkChoiceStrategy::LongestChain =>
720						import_headers.post().number() > &info.best_number,
721					ForkChoiceStrategy::Custom(v) => v,
722				});
723
724		let leaf_state = if finalized {
725			NewBlockState::Final
726		} else if is_new_best {
727			NewBlockState::Best
728		} else {
729			NewBlockState::Normal
730		};
731
732		let tree_route = if is_new_best && info.best_hash != parent_hash && parent_exists {
733			let route_from_best =
734				sp_blockchain::tree_route(self.backend.blockchain(), info.best_hash, parent_hash)?;
735			Some(route_from_best)
736		} else {
737			None
738		};
739
740		trace!(
741			"Imported {}, (#{}), best={}, origin={:?}",
742			hash,
743			import_headers.post().number(),
744			is_new_best,
745			origin,
746		);
747
748		operation.op.set_block_data(
749			import_headers.post().clone(),
750			body,
751			indexed_body,
752			justifications,
753			leaf_state,
754		)?;
755
756		operation.op.insert_aux(aux)?;
757
758		let should_notify_every_block = !self.every_import_notification_sinks.lock().is_empty();
759
760		// Notify when we are already synced to the tip of the chain
761		// or if this import triggers a re-org
762		let should_notify_recent_block = make_notifications || tree_route.is_some();
763
764		if should_notify_every_block || should_notify_recent_block {
765			let header = import_headers.into_post();
766			if finalized && should_notify_recent_block {
767				let mut summary = match operation.notify_finalized.take() {
768					Some(mut summary) => {
769						summary.header = header.clone();
770						summary.finalized.push(hash);
771						summary
772					},
773					None => FinalizeSummary {
774						header: header.clone(),
775						finalized: vec![hash],
776						stale_heads: Vec::new(),
777					},
778				};
779
780				if parent_exists {
781					// Add to the stale list all heads that are branching from parent besides our
782					// current `head`.
783					for head in self
784						.backend
785						.blockchain()
786						.leaves()?
787						.into_iter()
788						.filter(|h| *h != parent_hash)
789					{
790						let route_from_parent = sp_blockchain::tree_route(
791							self.backend.blockchain(),
792							parent_hash,
793							head,
794						)?;
795						if route_from_parent.retracted().is_empty() {
796							summary.stale_heads.push(head);
797						}
798					}
799				}
800				operation.notify_finalized = Some(summary);
801			}
802
803			let import_notification_action = if should_notify_every_block {
804				if should_notify_recent_block {
805					ImportNotificationAction::Both
806				} else {
807					ImportNotificationAction::EveryBlock
808				}
809			} else {
810				ImportNotificationAction::RecentBlock
811			};
812
813			operation.notify_imported = Some(ImportSummary {
814				hash,
815				origin,
816				header,
817				is_new_best,
818				storage_changes,
819				tree_route,
820				import_notification_action,
821			})
822		}
823
824		Ok(ImportResult::imported(is_new_best))
825	}
826
827	/// Prepares the storage changes for a block.
828	///
829	/// It checks if the state should be enacted and if the `import_block` maybe already provides
830	/// the required storage changes. If the state should be enacted and the storage changes are not
831	/// provided, the block is re-executed to get the storage changes.
832	fn prepare_block_storage_changes(
833		&self,
834		import_block: &mut BlockImportParams<Block>,
835	) -> sp_blockchain::Result<PrepareStorageChangesResult<Block>>
836	where
837		Self: ProvideRuntimeApi<Block>,
838		<Self as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
839	{
840		let parent_hash = import_block.header.parent_hash();
841		let state_action = std::mem::replace(&mut import_block.state_action, StateAction::Skip);
842		let (enact_state, storage_changes) = match (self.block_status(*parent_hash)?, state_action)
843		{
844			(BlockStatus::KnownBad, _) =>
845				return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)),
846			(
847				BlockStatus::InChainPruned,
848				StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(_)),
849			) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)),
850			(_, StateAction::ApplyChanges(changes)) => (true, Some(changes)),
851			(BlockStatus::Unknown, _) =>
852				return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)),
853			(_, StateAction::Skip) => (false, None),
854			(BlockStatus::InChainPruned, StateAction::Execute) =>
855				return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)),
856			(BlockStatus::InChainPruned, StateAction::ExecuteIfPossible) => (false, None),
857			(_, StateAction::Execute) => (true, None),
858			(_, StateAction::ExecuteIfPossible) => (true, None),
859		};
860
861		let storage_changes = match (enact_state, storage_changes, &import_block.body) {
862			// We have storage changes and should enact the state, so we don't need to do anything
863			// here
864			(true, changes @ Some(_), _) => changes,
865			// We should enact state, but don't have any storage changes, so we need to execute the
866			// block.
867			(true, None, Some(ref body)) => {
868				let mut runtime_api = self.runtime_api();
869
870				runtime_api.set_call_context(CallContext::Onchain);
871
872				if self.config.enable_import_proof_recording {
873					runtime_api.record_proof();
874					let recorder = runtime_api
875						.proof_recorder()
876						.expect("Proof recording is enabled in the line above; qed.");
877					runtime_api.register_extension(ProofSizeExt::new(recorder));
878				}
879
880				runtime_api.execute_block(
881					*parent_hash,
882					Block::new(import_block.header.clone(), body.clone()),
883				)?;
884
885				let state = self.backend.state_at(*parent_hash)?;
886				let gen_storage_changes = runtime_api
887					.into_storage_changes(&state, *parent_hash)
888					.map_err(sp_blockchain::Error::Storage)?;
889
890				if import_block.header.state_root() != &gen_storage_changes.transaction_storage_root
891				{
892					return Err(Error::InvalidStateRoot)
893				}
894				Some(sc_consensus::StorageChanges::Changes(gen_storage_changes))
895			},
896			// No block body, no storage changes
897			(true, None, None) => None,
898			// We should not enact the state, so we set the storage changes to `None`.
899			(false, _, _) => None,
900		};
901
902		Ok(PrepareStorageChangesResult::Import(storage_changes))
903	}
904
905	fn apply_finality_with_block_hash(
906		&self,
907		operation: &mut ClientImportOperation<Block, B>,
908		hash: Block::Hash,
909		justification: Option<Justification>,
910		info: &BlockchainInfo<Block>,
911		notify: bool,
912	) -> sp_blockchain::Result<()> {
913		if hash == info.finalized_hash {
914			warn!(
915				"Possible safety violation: attempted to re-finalize last finalized block {:?} ",
916				hash,
917			);
918			return Ok(())
919		}
920
921		// Find tree route from last finalized to given block.
922		let route_from_finalized =
923			sp_blockchain::tree_route(self.backend.blockchain(), info.finalized_hash, hash)?;
924
925		if let Some(retracted) = route_from_finalized.retracted().get(0) {
926			warn!(
927				"Safety violation: attempted to revert finalized block {:?} which is not in the \
928				same chain as last finalized {:?}",
929				retracted, info.finalized_hash
930			);
931
932			return Err(sp_blockchain::Error::NotInFinalizedChain)
933		}
934
935		// We may need to coercively update the best block if there is more than one
936		// leaf or if the finalized block number is greater than last best number recorded
937		// by the backend. This last condition may apply in case of consensus implementations
938		// not always checking this condition.
939		let block_number = self
940			.backend
941			.blockchain()
942			.number(hash)?
943			.ok_or(Error::MissingHeader(format!("{hash:?}")))?;
944		if self.backend.blockchain().leaves()?.len() > 1 || info.best_number < block_number {
945			let route_from_best =
946				sp_blockchain::tree_route(self.backend.blockchain(), info.best_hash, hash)?;
947
948			// If the block is not a direct ancestor of the current best chain,
949			// then some other block is the common ancestor.
950			if route_from_best.common_block().hash != hash {
951				// NOTE: we're setting the finalized block as best block, this might
952				// be slightly inaccurate since we might have a "better" block
953				// further along this chain, but since best chain selection logic is
954				// plugable we cannot make a better choice here. usages that need
955				// an accurate "best" block need to go through `SelectChain`
956				// instead.
957				operation.op.mark_head(hash)?;
958			}
959		}
960
961		let enacted = route_from_finalized.enacted();
962		assert!(enacted.len() > 0);
963		for finalize_new in &enacted[..enacted.len() - 1] {
964			operation.op.mark_finalized(finalize_new.hash, None)?;
965		}
966
967		assert_eq!(enacted.last().map(|e| e.hash), Some(hash));
968		operation.op.mark_finalized(hash, justification)?;
969
970		if notify {
971			let finalized =
972				route_from_finalized.enacted().iter().map(|elem| elem.hash).collect::<Vec<_>>();
973
974			let block_number = route_from_finalized
975				.last()
976				.expect(
977					"The block to finalize is always the latest \
978						block in the route to the finalized block; qed",
979				)
980				.number;
981
982			// The stale heads are the leaves that will be displaced after the
983			// block is finalized.
984			let stale_heads = self
985				.backend
986				.blockchain()
987				.displaced_leaves_after_finalizing(hash, block_number)?
988				.hashes()
989				.collect();
990
991			let header = self
992				.backend
993				.blockchain()
994				.header(hash)?
995				.expect("Block to finalize expected to be onchain; qed");
996
997			operation.notify_finalized = Some(FinalizeSummary { header, finalized, stale_heads });
998		}
999
1000		Ok(())
1001	}
1002
1003	fn notify_finalized(
1004		&self,
1005		notification: Option<FinalityNotification<Block>>,
1006	) -> sp_blockchain::Result<()> {
1007		let mut sinks = self.finality_notification_sinks.lock();
1008
1009		let notification = match notification {
1010			Some(notify_finalized) => notify_finalized,
1011			None => {
1012				// Cleanup any closed finality notification sinks
1013				// since we won't be running the loop below which
1014				// would also remove any closed sinks.
1015				sinks.retain(|sink| !sink.is_closed());
1016				return Ok(())
1017			},
1018		};
1019
1020		telemetry!(
1021			self.telemetry;
1022			SUBSTRATE_INFO;
1023			"notify.finalized";
1024			"height" => format!("{}", notification.header.number()),
1025			"best" => ?notification.hash,
1026		);
1027
1028		sinks.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
1029
1030		Ok(())
1031	}
1032
1033	fn notify_imported(
1034		&self,
1035		notification: Option<BlockImportNotification<Block>>,
1036		import_notification_action: ImportNotificationAction,
1037		storage_changes: Option<(StorageCollection, ChildStorageCollection)>,
1038	) -> sp_blockchain::Result<()> {
1039		let notification = match notification {
1040			Some(notify_import) => notify_import,
1041			None => {
1042				// Cleanup any closed import notification sinks since we won't
1043				// be sending any notifications below which would remove any
1044				// closed sinks. this is necessary since during initial sync we
1045				// won't send any import notifications which could lead to a
1046				// temporary leak of closed/discarded notification sinks (e.g.
1047				// from consensus code).
1048				self.import_notification_sinks.lock().retain(|sink| !sink.is_closed());
1049
1050				self.every_import_notification_sinks.lock().retain(|sink| !sink.is_closed());
1051
1052				return Ok(())
1053			},
1054		};
1055
1056		let trigger_storage_changes_notification = || {
1057			if let Some(storage_changes) = storage_changes {
1058				// TODO [ToDr] How to handle re-orgs? Should we re-emit all storage changes?
1059				self.storage_notifications.trigger(
1060					&notification.hash,
1061					storage_changes.0.into_iter(),
1062					storage_changes.1.into_iter().map(|(sk, v)| (sk, v.into_iter())),
1063				);
1064			}
1065		};
1066
1067		match import_notification_action {
1068			ImportNotificationAction::Both => {
1069				trigger_storage_changes_notification();
1070				self.import_notification_sinks
1071					.lock()
1072					.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
1073
1074				self.every_import_notification_sinks
1075					.lock()
1076					.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
1077			},
1078			ImportNotificationAction::RecentBlock => {
1079				trigger_storage_changes_notification();
1080				self.import_notification_sinks
1081					.lock()
1082					.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
1083
1084				self.every_import_notification_sinks.lock().retain(|sink| !sink.is_closed());
1085			},
1086			ImportNotificationAction::EveryBlock => {
1087				self.every_import_notification_sinks
1088					.lock()
1089					.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
1090
1091				self.import_notification_sinks.lock().retain(|sink| !sink.is_closed());
1092			},
1093			ImportNotificationAction::None => {
1094				// This branch is unreachable in fact because the block import notification must be
1095				// Some(_) instead of None (it's already handled at the beginning of this function)
1096				// at this point.
1097				self.import_notification_sinks.lock().retain(|sink| !sink.is_closed());
1098
1099				self.every_import_notification_sinks.lock().retain(|sink| !sink.is_closed());
1100			},
1101		}
1102
1103		Ok(())
1104	}
1105
1106	/// Attempts to revert the chain by `n` blocks guaranteeing that no block is
1107	/// reverted past the last finalized block. Returns the number of blocks
1108	/// that were successfully reverted.
1109	pub fn revert(&self, n: NumberFor<Block>) -> sp_blockchain::Result<NumberFor<Block>> {
1110		let (number, _) = self.backend.revert(n, false)?;
1111		Ok(number)
1112	}
1113
1114	/// Attempts to revert the chain by `n` blocks disregarding finality. This method will revert
1115	/// any finalized blocks as requested and can potentially leave the node in an inconsistent
1116	/// state. Other modules in the system that persist data and that rely on finality
1117	/// (e.g. consensus parts) will be unaffected by the revert. Use this method with caution and
1118	/// making sure that no other data needs to be reverted for consistency aside from the block
1119	/// data. If `blacklist` is set to true, will also blacklist reverted blocks from finalizing
1120	/// again. The blacklist is reset upon client restart.
1121	///
1122	/// Returns the number of blocks that were successfully reverted.
1123	pub fn unsafe_revert(
1124		&mut self,
1125		n: NumberFor<Block>,
1126		blacklist: bool,
1127	) -> sp_blockchain::Result<NumberFor<Block>> {
1128		let (number, reverted) = self.backend.revert(n, true)?;
1129		if blacklist {
1130			for b in reverted {
1131				self.block_rules.mark_bad(b);
1132			}
1133		}
1134		Ok(number)
1135	}
1136
1137	/// Get blockchain info.
1138	pub fn chain_info(&self) -> BlockchainInfo<Block> {
1139		self.backend.blockchain().info()
1140	}
1141
1142	/// Get block status.
1143	pub fn block_status(&self, hash: Block::Hash) -> sp_blockchain::Result<BlockStatus> {
1144		// this can probably be implemented more efficiently
1145		if self
1146			.importing_block
1147			.read()
1148			.as_ref()
1149			.map_or(false, |importing| &hash == importing)
1150		{
1151			return Ok(BlockStatus::Queued)
1152		}
1153
1154		let hash_and_number = self.backend.blockchain().number(hash)?.map(|n| (hash, n));
1155		match hash_and_number {
1156			Some((hash, number)) =>
1157				if self.backend.have_state_at(hash, number) {
1158					Ok(BlockStatus::InChainWithState)
1159				} else {
1160					Ok(BlockStatus::InChainPruned)
1161				},
1162			None => Ok(BlockStatus::Unknown),
1163		}
1164	}
1165
1166	/// Get block header by id.
1167	pub fn header(
1168		&self,
1169		hash: Block::Hash,
1170	) -> sp_blockchain::Result<Option<<Block as BlockT>::Header>> {
1171		self.backend.blockchain().header(hash)
1172	}
1173
1174	/// Get block body by id.
1175	pub fn body(
1176		&self,
1177		hash: Block::Hash,
1178	) -> sp_blockchain::Result<Option<Vec<<Block as BlockT>::Extrinsic>>> {
1179		self.backend.blockchain().body(hash)
1180	}
1181
1182	/// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors.
1183	pub fn uncles(
1184		&self,
1185		target_hash: Block::Hash,
1186		max_generation: NumberFor<Block>,
1187	) -> sp_blockchain::Result<Vec<Block::Hash>> {
1188		let load_header = |hash: Block::Hash| -> sp_blockchain::Result<Block::Header> {
1189			self.backend
1190				.blockchain()
1191				.header(hash)?
1192				.ok_or_else(|| Error::UnknownBlock(format!("{:?}", hash)))
1193		};
1194
1195		let genesis_hash = self.backend.blockchain().info().genesis_hash;
1196		if genesis_hash == target_hash {
1197			return Ok(Vec::new())
1198		}
1199
1200		let mut current_hash = target_hash;
1201		let mut current = load_header(current_hash)?;
1202		let mut ancestor_hash = *current.parent_hash();
1203		let mut ancestor = load_header(ancestor_hash)?;
1204		let mut uncles = Vec::new();
1205
1206		let mut generation: NumberFor<Block> = Zero::zero();
1207		while generation < max_generation {
1208			let children = self.backend.blockchain().children(ancestor_hash)?;
1209			uncles.extend(children.into_iter().filter(|h| h != &current_hash));
1210			current_hash = ancestor_hash;
1211
1212			if genesis_hash == current_hash {
1213				break
1214			}
1215
1216			current = ancestor;
1217			ancestor_hash = *current.parent_hash();
1218			ancestor = load_header(ancestor_hash)?;
1219			generation += One::one();
1220		}
1221		trace!("Collected {} uncles", uncles.len());
1222		Ok(uncles)
1223	}
1224}
1225
1226impl<B, E, Block, RA> UsageProvider<Block> for Client<B, E, Block, RA>
1227where
1228	B: backend::Backend<Block>,
1229	E: CallExecutor<Block>,
1230	Block: BlockT,
1231{
1232	/// Get usage info about current client.
1233	fn usage_info(&self) -> ClientInfo<Block> {
1234		ClientInfo { chain: self.chain_info(), usage: self.backend.usage_info() }
1235	}
1236}
1237
1238impl<B, E, Block, RA> ProofProvider<Block> for Client<B, E, Block, RA>
1239where
1240	B: backend::Backend<Block>,
1241	E: CallExecutor<Block>,
1242	Block: BlockT,
1243{
1244	fn read_proof(
1245		&self,
1246		hash: Block::Hash,
1247		keys: &mut dyn Iterator<Item = &[u8]>,
1248	) -> sp_blockchain::Result<StorageProof> {
1249		self.state_at(hash)
1250			.and_then(|state| prove_read(state, keys).map_err(Into::into))
1251	}
1252
1253	fn read_child_proof(
1254		&self,
1255		hash: Block::Hash,
1256		child_info: &ChildInfo,
1257		keys: &mut dyn Iterator<Item = &[u8]>,
1258	) -> sp_blockchain::Result<StorageProof> {
1259		self.state_at(hash)
1260			.and_then(|state| prove_child_read(state, child_info, keys).map_err(Into::into))
1261	}
1262
1263	fn execution_proof(
1264		&self,
1265		hash: Block::Hash,
1266		method: &str,
1267		call_data: &[u8],
1268	) -> sp_blockchain::Result<(Vec<u8>, StorageProof)> {
1269		self.executor.prove_execution(hash, method, call_data)
1270	}
1271
1272	fn read_proof_collection(
1273		&self,
1274		hash: Block::Hash,
1275		start_key: &[Vec<u8>],
1276		size_limit: usize,
1277	) -> sp_blockchain::Result<(CompactProof, u32)> {
1278		let state = self.state_at(hash)?;
1279		// this is a read proof, using version V0 or V1 is equivalent.
1280		let root = state.storage_root(std::iter::empty(), StateVersion::V0).0;
1281
1282		let (proof, count) = prove_range_read_with_child_with_size::<_, HashingFor<Block>>(
1283			state, size_limit, start_key,
1284		)?;
1285		let proof = proof
1286			.into_compact_proof::<HashingFor<Block>>(root)
1287			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?;
1288		Ok((proof, count))
1289	}
1290
1291	fn storage_collection(
1292		&self,
1293		hash: Block::Hash,
1294		start_key: &[Vec<u8>],
1295		size_limit: usize,
1296	) -> sp_blockchain::Result<Vec<(KeyValueStorageLevel, bool)>> {
1297		if start_key.len() > MAX_NESTED_TRIE_DEPTH {
1298			return Err(Error::Backend("Invalid start key.".to_string()))
1299		}
1300		let state = self.state_at(hash)?;
1301		let child_info = |storage_key: &Vec<u8>| -> sp_blockchain::Result<ChildInfo> {
1302			let storage_key = PrefixedStorageKey::new_ref(storage_key);
1303			match ChildType::from_prefixed_key(storage_key) {
1304				Some((ChildType::ParentKeyId, storage_key)) =>
1305					Ok(ChildInfo::new_default(storage_key)),
1306				None => Err(Error::Backend("Invalid child storage key.".to_string())),
1307			}
1308		};
1309		let mut current_child = if start_key.len() == 2 {
1310			let start_key = start_key.get(0).expect("checked len");
1311			if let Some(child_root) = state
1312				.storage(start_key)
1313				.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1314			{
1315				Some((child_info(start_key)?, child_root))
1316			} else {
1317				return Err(Error::Backend("Invalid root start key.".to_string()))
1318			}
1319		} else {
1320			None
1321		};
1322		let mut current_key = start_key.last().map(Clone::clone).unwrap_or_default();
1323		let mut total_size = 0;
1324		let mut result = vec![(
1325			KeyValueStorageLevel {
1326				state_root: Vec::new(),
1327				key_values: Vec::new(),
1328				parent_storage_keys: Vec::new(),
1329			},
1330			false,
1331		)];
1332
1333		let mut child_roots = HashSet::new();
1334		loop {
1335			let mut entries = Vec::new();
1336			let mut complete = true;
1337			let mut switch_child_key = None;
1338			while let Some(next_key) = if let Some(child) = current_child.as_ref() {
1339				state
1340					.next_child_storage_key(&child.0, &current_key)
1341					.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1342			} else {
1343				state
1344					.next_storage_key(&current_key)
1345					.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1346			} {
1347				let value = if let Some(child) = current_child.as_ref() {
1348					state
1349						.child_storage(&child.0, next_key.as_ref())
1350						.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1351						.unwrap_or_default()
1352				} else {
1353					state
1354						.storage(next_key.as_ref())
1355						.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1356						.unwrap_or_default()
1357				};
1358				let size = value.len() + next_key.len();
1359				if total_size + size > size_limit && !entries.is_empty() {
1360					complete = false;
1361					break
1362				}
1363				total_size += size;
1364
1365				if current_child.is_none() &&
1366					sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) &&
1367					!child_roots.contains(value.as_slice())
1368				{
1369					child_roots.insert(value.clone());
1370					switch_child_key = Some((next_key.clone(), value.clone()));
1371					entries.push((next_key.clone(), value));
1372					break
1373				}
1374				entries.push((next_key.clone(), value));
1375				current_key = next_key;
1376			}
1377			if let Some((child, child_root)) = switch_child_key.take() {
1378				result[0].0.key_values.extend(entries.into_iter());
1379				current_child = Some((child_info(&child)?, child_root));
1380				current_key = Vec::new();
1381			} else if let Some((child, child_root)) = current_child.take() {
1382				current_key = child.into_prefixed_storage_key().into_inner();
1383				result.push((
1384					KeyValueStorageLevel {
1385						state_root: child_root,
1386						key_values: entries,
1387						parent_storage_keys: Vec::new(),
1388					},
1389					complete,
1390				));
1391				if !complete {
1392					break
1393				}
1394			} else {
1395				result[0].0.key_values.extend(entries.into_iter());
1396				result[0].1 = complete;
1397				break
1398			}
1399		}
1400		Ok(result)
1401	}
1402
1403	fn verify_range_proof(
1404		&self,
1405		root: Block::Hash,
1406		proof: CompactProof,
1407		start_key: &[Vec<u8>],
1408	) -> sp_blockchain::Result<(KeyValueStates, usize)> {
1409		let mut db = sp_state_machine::MemoryDB::<HashingFor<Block>>::new(&[]);
1410		// Compact encoding
1411		let _ = sp_trie::decode_compact::<sp_state_machine::LayoutV0<HashingFor<Block>>, _, _>(
1412			&mut db,
1413			proof.iter_compact_encoded_nodes(),
1414			Some(&root),
1415		)
1416		.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?;
1417		let proving_backend = sp_state_machine::TrieBackendBuilder::new(db, root).build();
1418		let state = read_range_proof_check_with_child_on_proving_backend::<HashingFor<Block>>(
1419			&proving_backend,
1420			start_key,
1421		)?;
1422
1423		Ok(state)
1424	}
1425}
1426
1427impl<B, E, Block, RA> ExecutorProvider<Block> for Client<B, E, Block, RA>
1428where
1429	B: backend::Backend<Block>,
1430	E: CallExecutor<Block>,
1431	Block: BlockT,
1432{
1433	type Executor = E;
1434
1435	fn executor(&self) -> &Self::Executor {
1436		&self.executor
1437	}
1438
1439	fn execution_extensions(&self) -> &ExecutionExtensions<Block> {
1440		self.executor.execution_extensions()
1441	}
1442}
1443
1444impl<B, E, Block, RA> StorageProvider<Block, B> for Client<B, E, Block, RA>
1445where
1446	B: backend::Backend<Block>,
1447	E: CallExecutor<Block>,
1448	Block: BlockT,
1449{
1450	fn storage_keys(
1451		&self,
1452		hash: <Block as BlockT>::Hash,
1453		prefix: Option<&StorageKey>,
1454		start_key: Option<&StorageKey>,
1455	) -> sp_blockchain::Result<KeysIter<B::State, Block>> {
1456		let state = self.state_at(hash)?;
1457		KeysIter::new(state, prefix, start_key)
1458			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1459	}
1460
1461	fn child_storage_keys(
1462		&self,
1463		hash: <Block as BlockT>::Hash,
1464		child_info: ChildInfo,
1465		prefix: Option<&StorageKey>,
1466		start_key: Option<&StorageKey>,
1467	) -> sp_blockchain::Result<KeysIter<B::State, Block>> {
1468		let state = self.state_at(hash)?;
1469		KeysIter::new_child(state, child_info, prefix, start_key)
1470			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1471	}
1472
1473	fn storage_pairs(
1474		&self,
1475		hash: <Block as BlockT>::Hash,
1476		prefix: Option<&StorageKey>,
1477		start_key: Option<&StorageKey>,
1478	) -> sp_blockchain::Result<PairsIter<B::State, Block>> {
1479		let state = self.state_at(hash)?;
1480		PairsIter::new(state, prefix, start_key)
1481			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1482	}
1483
1484	fn storage(
1485		&self,
1486		hash: Block::Hash,
1487		key: &StorageKey,
1488	) -> sp_blockchain::Result<Option<StorageData>> {
1489		Ok(self
1490			.state_at(hash)?
1491			.storage(&key.0)
1492			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1493			.map(StorageData))
1494	}
1495
1496	fn storage_hash(
1497		&self,
1498		hash: <Block as BlockT>::Hash,
1499		key: &StorageKey,
1500	) -> sp_blockchain::Result<Option<Block::Hash>> {
1501		self.state_at(hash)?
1502			.storage_hash(&key.0)
1503			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1504	}
1505
1506	fn child_storage(
1507		&self,
1508		hash: <Block as BlockT>::Hash,
1509		child_info: &ChildInfo,
1510		key: &StorageKey,
1511	) -> sp_blockchain::Result<Option<StorageData>> {
1512		Ok(self
1513			.state_at(hash)?
1514			.child_storage(child_info, &key.0)
1515			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1516			.map(StorageData))
1517	}
1518
1519	fn child_storage_hash(
1520		&self,
1521		hash: <Block as BlockT>::Hash,
1522		child_info: &ChildInfo,
1523		key: &StorageKey,
1524	) -> sp_blockchain::Result<Option<Block::Hash>> {
1525		self.state_at(hash)?
1526			.child_storage_hash(child_info, &key.0)
1527			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1528	}
1529
1530	fn closest_merkle_value(
1531		&self,
1532		hash: <Block as BlockT>::Hash,
1533		key: &StorageKey,
1534	) -> blockchain::Result<Option<MerkleValue<<Block as BlockT>::Hash>>> {
1535		self.state_at(hash)?
1536			.closest_merkle_value(&key.0)
1537			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1538	}
1539
1540	fn child_closest_merkle_value(
1541		&self,
1542		hash: <Block as BlockT>::Hash,
1543		child_info: &ChildInfo,
1544		key: &StorageKey,
1545	) -> blockchain::Result<Option<MerkleValue<<Block as BlockT>::Hash>>> {
1546		self.state_at(hash)?
1547			.child_closest_merkle_value(child_info, &key.0)
1548			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1549	}
1550}
1551
1552impl<B, E, Block, RA> HeaderMetadata<Block> for Client<B, E, Block, RA>
1553where
1554	B: backend::Backend<Block>,
1555	E: CallExecutor<Block>,
1556	Block: BlockT,
1557{
1558	type Error = sp_blockchain::Error;
1559
1560	fn header_metadata(
1561		&self,
1562		hash: Block::Hash,
1563	) -> Result<CachedHeaderMetadata<Block>, Self::Error> {
1564		self.backend.blockchain().header_metadata(hash)
1565	}
1566
1567	fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata<Block>) {
1568		self.backend.blockchain().insert_header_metadata(hash, metadata)
1569	}
1570
1571	fn remove_header_metadata(&self, hash: Block::Hash) {
1572		self.backend.blockchain().remove_header_metadata(hash)
1573	}
1574}
1575
1576impl<B, E, Block, RA> ProvideUncles<Block> for Client<B, E, Block, RA>
1577where
1578	B: backend::Backend<Block>,
1579	E: CallExecutor<Block>,
1580	Block: BlockT,
1581{
1582	fn uncles(
1583		&self,
1584		target_hash: Block::Hash,
1585		max_generation: NumberFor<Block>,
1586	) -> sp_blockchain::Result<Vec<Block::Header>> {
1587		Ok(Client::uncles(self, target_hash, max_generation)?
1588			.into_iter()
1589			.filter_map(|hash| Client::header(self, hash).unwrap_or(None))
1590			.collect())
1591	}
1592}
1593
1594impl<B, E, Block, RA> ChainHeaderBackend<Block> for Client<B, E, Block, RA>
1595where
1596	B: backend::Backend<Block>,
1597	E: CallExecutor<Block> + Send + Sync,
1598	Block: BlockT,
1599	RA: Send + Sync,
1600{
1601	fn header(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<Block::Header>> {
1602		self.backend.blockchain().header(hash)
1603	}
1604
1605	fn info(&self) -> blockchain::Info<Block> {
1606		self.backend.blockchain().info()
1607	}
1608
1609	fn status(&self, hash: Block::Hash) -> sp_blockchain::Result<blockchain::BlockStatus> {
1610		self.backend.blockchain().status(hash)
1611	}
1612
1613	fn number(
1614		&self,
1615		hash: Block::Hash,
1616	) -> sp_blockchain::Result<Option<<<Block as BlockT>::Header as HeaderT>::Number>> {
1617		self.backend.blockchain().number(hash)
1618	}
1619
1620	fn hash(&self, number: NumberFor<Block>) -> sp_blockchain::Result<Option<Block::Hash>> {
1621		self.backend.blockchain().hash(number)
1622	}
1623}
1624
1625impl<B, E, Block, RA> BlockIdTo<Block> for Client<B, E, Block, RA>
1626where
1627	B: backend::Backend<Block>,
1628	E: CallExecutor<Block> + Send + Sync,
1629	Block: BlockT,
1630	RA: Send + Sync,
1631{
1632	type Error = Error;
1633
1634	fn to_hash(&self, block_id: &BlockId<Block>) -> sp_blockchain::Result<Option<Block::Hash>> {
1635		self.block_hash_from_id(block_id)
1636	}
1637
1638	fn to_number(
1639		&self,
1640		block_id: &BlockId<Block>,
1641	) -> sp_blockchain::Result<Option<NumberFor<Block>>> {
1642		self.block_number_from_id(block_id)
1643	}
1644}
1645
1646impl<B, E, Block, RA> ChainHeaderBackend<Block> for &Client<B, E, Block, RA>
1647where
1648	B: backend::Backend<Block>,
1649	E: CallExecutor<Block> + Send + Sync,
1650	Block: BlockT,
1651	RA: Send + Sync,
1652{
1653	fn header(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<Block::Header>> {
1654		self.backend.blockchain().header(hash)
1655	}
1656
1657	fn info(&self) -> blockchain::Info<Block> {
1658		self.backend.blockchain().info()
1659	}
1660
1661	fn status(&self, hash: Block::Hash) -> sp_blockchain::Result<blockchain::BlockStatus> {
1662		(**self).status(hash)
1663	}
1664
1665	fn number(
1666		&self,
1667		hash: Block::Hash,
1668	) -> sp_blockchain::Result<Option<<<Block as BlockT>::Header as HeaderT>::Number>> {
1669		(**self).number(hash)
1670	}
1671
1672	fn hash(&self, number: NumberFor<Block>) -> sp_blockchain::Result<Option<Block::Hash>> {
1673		(**self).hash(number)
1674	}
1675}
1676
1677impl<B, E, Block, RA> ProvideRuntimeApi<Block> for Client<B, E, Block, RA>
1678where
1679	B: backend::Backend<Block>,
1680	E: CallExecutor<Block, Backend = B> + Send + Sync,
1681	Block: BlockT,
1682	RA: ConstructRuntimeApi<Block, Self> + Send + Sync,
1683{
1684	type Api = <RA as ConstructRuntimeApi<Block, Self>>::RuntimeApi;
1685
1686	fn runtime_api(&self) -> ApiRef<Self::Api> {
1687		RA::construct_runtime_api(self)
1688	}
1689}
1690
1691impl<B, E, Block, RA> CallApiAt<Block> for Client<B, E, Block, RA>
1692where
1693	B: backend::Backend<Block>,
1694	E: CallExecutor<Block, Backend = B> + Send + Sync,
1695	Block: BlockT,
1696	RA: Send + Sync,
1697{
1698	type StateBackend = B::State;
1699
1700	fn call_api_at(&self, params: CallApiAtParams<Block>) -> Result<Vec<u8>, sp_api::ApiError> {
1701		self.executor
1702			.contextual_call(
1703				params.at,
1704				params.function,
1705				&params.arguments,
1706				params.overlayed_changes,
1707				params.recorder,
1708				params.call_context,
1709				params.extensions,
1710			)
1711			.map_err(Into::into)
1712	}
1713
1714	fn runtime_version_at(&self, hash: Block::Hash) -> Result<RuntimeVersion, sp_api::ApiError> {
1715		CallExecutor::runtime_version(&self.executor, hash).map_err(Into::into)
1716	}
1717
1718	fn state_at(&self, at: Block::Hash) -> Result<Self::StateBackend, sp_api::ApiError> {
1719		self.state_at(at).map_err(Into::into)
1720	}
1721
1722	fn initialize_extensions(
1723		&self,
1724		at: Block::Hash,
1725		extensions: &mut sp_externalities::Extensions,
1726	) -> Result<(), sp_api::ApiError> {
1727		let block_number = self.expect_block_number_from_id(&BlockId::Hash(at))?;
1728
1729		extensions.merge(self.executor.execution_extensions().extensions(at, block_number));
1730
1731		Ok(())
1732	}
1733}
1734
1735/// NOTE: only use this implementation when you are sure there are NO consensus-level BlockImport
1736/// objects. Otherwise, importing blocks directly into the client would be bypassing
1737/// important verification work.
1738#[async_trait::async_trait]
1739impl<B, E, Block, RA> sc_consensus::BlockImport<Block> for &Client<B, E, Block, RA>
1740where
1741	B: backend::Backend<Block>,
1742	E: CallExecutor<Block> + Send + Sync,
1743	Block: BlockT,
1744	Client<B, E, Block, RA>: ProvideRuntimeApi<Block>,
1745	<Client<B, E, Block, RA> as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
1746	RA: Sync + Send,
1747{
1748	type Error = ConsensusError;
1749
1750	/// Import a checked and validated block.
1751	///
1752	/// NOTE: only use this implementation when there are NO consensus-level BlockImport
1753	/// objects. Otherwise, importing blocks directly into the client would be bypassing
1754	/// important verification work.
1755	///
1756	/// If you are not sure that there are no BlockImport objects provided by the consensus
1757	/// algorithm, don't use this function.
1758	async fn import_block(
1759		&self,
1760		mut import_block: BlockImportParams<Block>,
1761	) -> Result<ImportResult, Self::Error> {
1762		let span = tracing::span!(tracing::Level::DEBUG, "import_block");
1763		let _enter = span.enter();
1764
1765		let storage_changes =
1766			match self.prepare_block_storage_changes(&mut import_block).map_err(|e| {
1767				warn!("Block prepare storage changes error: {}", e);
1768				ConsensusError::ClientImport(e.to_string())
1769			})? {
1770				PrepareStorageChangesResult::Discard(res) => return Ok(res),
1771				PrepareStorageChangesResult::Import(storage_changes) => storage_changes,
1772			};
1773
1774		self.lock_import_and_run(|operation| {
1775			self.apply_block(operation, import_block, storage_changes)
1776		})
1777		.map_err(|e| {
1778			warn!("Block import error: {}", e);
1779			ConsensusError::ClientImport(e.to_string())
1780		})
1781	}
1782
1783	/// Check block preconditions.
1784	async fn check_block(
1785		&self,
1786		block: BlockCheckParams<Block>,
1787	) -> Result<ImportResult, Self::Error> {
1788		let BlockCheckParams {
1789			hash,
1790			number,
1791			parent_hash,
1792			allow_missing_state,
1793			import_existing,
1794			allow_missing_parent,
1795		} = block;
1796
1797		// Check the block against white and black lists if any are defined
1798		// (i.e. fork blocks and bad blocks respectively)
1799		match self.block_rules.lookup(number, &hash) {
1800			BlockLookupResult::KnownBad => {
1801				trace!("Rejecting known bad block: #{} {:?}", number, hash);
1802				return Ok(ImportResult::KnownBad)
1803			},
1804			BlockLookupResult::Expected(expected_hash) => {
1805				trace!(
1806					"Rejecting block from known invalid fork. Got {:?}, expected: {:?} at height {}",
1807					hash,
1808					expected_hash,
1809					number
1810				);
1811				return Ok(ImportResult::KnownBad)
1812			},
1813			BlockLookupResult::NotSpecial => {},
1814		}
1815
1816		// Own status must be checked first. If the block and ancestry is pruned
1817		// this function must return `AlreadyInChain` rather than `MissingState`
1818		match self
1819			.block_status(hash)
1820			.map_err(|e| ConsensusError::ClientImport(e.to_string()))?
1821		{
1822			BlockStatus::InChainWithState | BlockStatus::Queued =>
1823				return Ok(ImportResult::AlreadyInChain),
1824			BlockStatus::InChainPruned if !import_existing =>
1825				return Ok(ImportResult::AlreadyInChain),
1826			BlockStatus::InChainPruned => {},
1827			BlockStatus::Unknown => {},
1828			BlockStatus::KnownBad => return Ok(ImportResult::KnownBad),
1829		}
1830
1831		match self
1832			.block_status(parent_hash)
1833			.map_err(|e| ConsensusError::ClientImport(e.to_string()))?
1834		{
1835			BlockStatus::InChainWithState | BlockStatus::Queued => {},
1836			BlockStatus::Unknown if allow_missing_parent => {},
1837			BlockStatus::Unknown => return Ok(ImportResult::UnknownParent),
1838			BlockStatus::InChainPruned if allow_missing_state => {},
1839			BlockStatus::InChainPruned => return Ok(ImportResult::MissingState),
1840			BlockStatus::KnownBad => return Ok(ImportResult::KnownBad),
1841		}
1842
1843		Ok(ImportResult::imported(false))
1844	}
1845}
1846
1847#[async_trait::async_trait]
1848impl<B, E, Block, RA> sc_consensus::BlockImport<Block> for Client<B, E, Block, RA>
1849where
1850	B: backend::Backend<Block>,
1851	E: CallExecutor<Block> + Send + Sync,
1852	Block: BlockT,
1853	Self: ProvideRuntimeApi<Block>,
1854	<Self as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
1855	RA: Sync + Send,
1856{
1857	type Error = ConsensusError;
1858
1859	async fn check_block(
1860		&self,
1861		block: BlockCheckParams<Block>,
1862	) -> Result<ImportResult, Self::Error> {
1863		(&self).check_block(block).await
1864	}
1865
1866	async fn import_block(
1867		&self,
1868		import_block: BlockImportParams<Block>,
1869	) -> Result<ImportResult, Self::Error> {
1870		(&self).import_block(import_block).await
1871	}
1872}
1873
1874impl<B, E, Block, RA> Finalizer<Block, B> for Client<B, E, Block, RA>
1875where
1876	B: backend::Backend<Block>,
1877	E: CallExecutor<Block>,
1878	Block: BlockT,
1879{
1880	fn apply_finality(
1881		&self,
1882		operation: &mut ClientImportOperation<Block, B>,
1883		hash: Block::Hash,
1884		justification: Option<Justification>,
1885		notify: bool,
1886	) -> sp_blockchain::Result<()> {
1887		let info = self.backend.blockchain().info();
1888		self.apply_finality_with_block_hash(operation, hash, justification, &info, notify)
1889	}
1890
1891	fn finalize_block(
1892		&self,
1893		hash: Block::Hash,
1894		justification: Option<Justification>,
1895		notify: bool,
1896	) -> sp_blockchain::Result<()> {
1897		self.lock_import_and_run(|operation| {
1898			self.apply_finality(operation, hash, justification, notify)
1899		})
1900	}
1901}
1902
1903impl<B, E, Block, RA> Finalizer<Block, B> for &Client<B, E, Block, RA>
1904where
1905	B: backend::Backend<Block>,
1906	E: CallExecutor<Block>,
1907	Block: BlockT,
1908{
1909	fn apply_finality(
1910		&self,
1911		operation: &mut ClientImportOperation<Block, B>,
1912		hash: Block::Hash,
1913		justification: Option<Justification>,
1914		notify: bool,
1915	) -> sp_blockchain::Result<()> {
1916		(**self).apply_finality(operation, hash, justification, notify)
1917	}
1918
1919	fn finalize_block(
1920		&self,
1921		hash: Block::Hash,
1922		justification: Option<Justification>,
1923		notify: bool,
1924	) -> sp_blockchain::Result<()> {
1925		(**self).finalize_block(hash, justification, notify)
1926	}
1927}
1928
1929impl<B, E, Block, RA> PreCommitActions<Block> for Client<B, E, Block, RA>
1930where
1931	Block: BlockT,
1932{
1933	fn register_import_action(&self, action: OnImportAction<Block>) {
1934		self.import_actions.lock().push(action);
1935	}
1936
1937	fn register_finality_action(&self, action: OnFinalityAction<Block>) {
1938		self.finality_actions.lock().push(action);
1939	}
1940}
1941
1942impl<B, E, Block, RA> BlockchainEvents<Block> for Client<B, E, Block, RA>
1943where
1944	E: CallExecutor<Block>,
1945	Block: BlockT,
1946{
1947	/// Get block import event stream.
1948	fn import_notification_stream(&self) -> ImportNotifications<Block> {
1949		let (sink, stream) = tracing_unbounded("mpsc_import_notification_stream", 100_000);
1950		self.import_notification_sinks.lock().push(sink);
1951		stream
1952	}
1953
1954	fn every_import_notification_stream(&self) -> ImportNotifications<Block> {
1955		let (sink, stream) = tracing_unbounded("mpsc_every_import_notification_stream", 100_000);
1956		self.every_import_notification_sinks.lock().push(sink);
1957		stream
1958	}
1959
1960	fn finality_notification_stream(&self) -> FinalityNotifications<Block> {
1961		let (sink, stream) = tracing_unbounded("mpsc_finality_notification_stream", 100_000);
1962		self.finality_notification_sinks.lock().push(sink);
1963		stream
1964	}
1965
1966	/// Get storage changes event stream.
1967	fn storage_changes_notification_stream(
1968		&self,
1969		filter_keys: Option<&[StorageKey]>,
1970		child_filter_keys: Option<&[(StorageKey, Option<Vec<StorageKey>>)]>,
1971	) -> sp_blockchain::Result<StorageEventStream<Block::Hash>> {
1972		Ok(self.storage_notifications.listen(filter_keys, child_filter_keys))
1973	}
1974}
1975
1976impl<B, E, Block, RA> BlockBackend<Block> for Client<B, E, Block, RA>
1977where
1978	B: backend::Backend<Block>,
1979	E: CallExecutor<Block>,
1980	Block: BlockT,
1981{
1982	fn block_body(
1983		&self,
1984		hash: Block::Hash,
1985	) -> sp_blockchain::Result<Option<Vec<<Block as BlockT>::Extrinsic>>> {
1986		self.body(hash)
1987	}
1988
1989	fn block(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<SignedBlock<Block>>> {
1990		Ok(match (self.header(hash)?, self.body(hash)?, self.justifications(hash)?) {
1991			(Some(header), Some(extrinsics), justifications) =>
1992				Some(SignedBlock { block: Block::new(header, extrinsics), justifications }),
1993			_ => None,
1994		})
1995	}
1996
1997	fn block_status(&self, hash: Block::Hash) -> sp_blockchain::Result<BlockStatus> {
1998		Client::block_status(self, hash)
1999	}
2000
2001	fn justifications(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<Justifications>> {
2002		self.backend.blockchain().justifications(hash)
2003	}
2004
2005	fn block_hash(&self, number: NumberFor<Block>) -> sp_blockchain::Result<Option<Block::Hash>> {
2006		self.backend.blockchain().hash(number)
2007	}
2008
2009	fn indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<Vec<u8>>> {
2010		self.backend.blockchain().indexed_transaction(hash)
2011	}
2012
2013	fn has_indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result<bool> {
2014		self.backend.blockchain().has_indexed_transaction(hash)
2015	}
2016
2017	fn block_indexed_body(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<Vec<Vec<u8>>>> {
2018		self.backend.blockchain().block_indexed_body(hash)
2019	}
2020
2021	fn requires_full_sync(&self) -> bool {
2022		self.backend.requires_full_sync()
2023	}
2024}
2025
2026impl<B, E, Block, RA> backend::AuxStore for Client<B, E, Block, RA>
2027where
2028	B: backend::Backend<Block>,
2029	E: CallExecutor<Block>,
2030	Block: BlockT,
2031	Self: ProvideRuntimeApi<Block>,
2032	<Self as ProvideRuntimeApi<Block>>::Api: CoreApi<Block>,
2033{
2034	/// Insert auxiliary data into key-value store.
2035	fn insert_aux<
2036		'a,
2037		'b: 'a,
2038		'c: 'a,
2039		I: IntoIterator<Item = &'a (&'c [u8], &'c [u8])>,
2040		D: IntoIterator<Item = &'a &'b [u8]>,
2041	>(
2042		&self,
2043		insert: I,
2044		delete: D,
2045	) -> sp_blockchain::Result<()> {
2046		// Import is locked here because we may have other block import
2047		// operations that tries to set aux data. Note that for consensus
2048		// layer, one can always use atomic operations to make sure
2049		// import is only locked once.
2050		self.lock_import_and_run(|operation| apply_aux(operation, insert, delete))
2051	}
2052	/// Query auxiliary data from key-value store.
2053	fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result<Option<Vec<u8>>> {
2054		backend::AuxStore::get_aux(&*self.backend, key)
2055	}
2056}
2057
2058impl<B, E, Block, RA> backend::AuxStore for &Client<B, E, Block, RA>
2059where
2060	B: backend::Backend<Block>,
2061	E: CallExecutor<Block>,
2062	Block: BlockT,
2063	Client<B, E, Block, RA>: ProvideRuntimeApi<Block>,
2064	<Client<B, E, Block, RA> as ProvideRuntimeApi<Block>>::Api: CoreApi<Block>,
2065{
2066	fn insert_aux<
2067		'a,
2068		'b: 'a,
2069		'c: 'a,
2070		I: IntoIterator<Item = &'a (&'c [u8], &'c [u8])>,
2071		D: IntoIterator<Item = &'a &'b [u8]>,
2072	>(
2073		&self,
2074		insert: I,
2075		delete: D,
2076	) -> sp_blockchain::Result<()> {
2077		(**self).insert_aux(insert, delete)
2078	}
2079
2080	fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result<Option<Vec<u8>>> {
2081		(**self).get_aux(key)
2082	}
2083}
2084
2085impl<BE, E, B, RA> sp_consensus::block_validation::Chain<B> for Client<BE, E, B, RA>
2086where
2087	BE: backend::Backend<B>,
2088	E: CallExecutor<B>,
2089	B: BlockT,
2090{
2091	fn block_status(
2092		&self,
2093		hash: B::Hash,
2094	) -> Result<BlockStatus, Box<dyn std::error::Error + Send>> {
2095		Client::block_status(self, hash).map_err(|e| Box::new(e) as Box<_>)
2096	}
2097}
2098
2099impl<BE, E, B, RA> sp_transaction_storage_proof::IndexedBody<B> for Client<BE, E, B, RA>
2100where
2101	BE: backend::Backend<B>,
2102	E: CallExecutor<B>,
2103	B: BlockT,
2104{
2105	fn block_indexed_body(
2106		&self,
2107		number: NumberFor<B>,
2108	) -> Result<Option<Vec<Vec<u8>>>, sp_transaction_storage_proof::Error> {
2109		let hash = match self
2110			.backend
2111			.blockchain()
2112			.block_hash_from_id(&BlockId::Number(number))
2113			.map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e)))?
2114		{
2115			Some(hash) => hash,
2116			None => return Ok(None),
2117		};
2118
2119		self.backend
2120			.blockchain()
2121			.block_indexed_body(hash)
2122			.map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e)))
2123	}
2124
2125	fn number(
2126		&self,
2127		hash: B::Hash,
2128	) -> Result<Option<NumberFor<B>>, sp_transaction_storage_proof::Error> {
2129		self.backend
2130			.blockchain()
2131			.number(hash)
2132			.map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e)))
2133	}
2134}