sc_client_db/
lib.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19//! Client backend that is backed by a database.
20//!
21//! # Canonicality vs. Finality
22//!
23//! Finality indicates that a block will not be reverted, according to the consensus algorithm,
24//! while canonicality indicates that the block may be reverted, but we will be unable to do so,
25//! having discarded heavy state that will allow a chain reorganization.
26//!
27//! Finality implies canonicality but not vice-versa.
28
29#![warn(missing_docs)]
30
31pub mod offchain;
32
33pub mod bench;
34
35mod children;
36mod parity_db;
37mod pinned_blocks_cache;
38mod record_stats_state;
39mod stats;
40#[cfg(any(feature = "rocksdb", test))]
41mod upgrade;
42mod utils;
43
44use linked_hash_map::LinkedHashMap;
45use log::{debug, trace, warn};
46use parking_lot::{Mutex, RwLock};
47use std::{
48	collections::{HashMap, HashSet},
49	io,
50	path::{Path, PathBuf},
51	sync::Arc,
52};
53
54use crate::{
55	pinned_blocks_cache::PinnedBlocksCache,
56	record_stats_state::RecordStatsState,
57	stats::StateUsageStats,
58	utils::{meta_keys, read_db, read_meta, DatabaseType, Meta},
59};
60use codec::{Decode, Encode};
61use hash_db::Prefix;
62use sc_client_api::{
63	backend::NewBlockState,
64	blockchain::{BlockGap, BlockGapType},
65	leaves::{FinalizationOutcome, LeafSet},
66	utils::is_descendent_of,
67	IoInfo, MemoryInfo, MemorySize, UsageInfo,
68};
69use sc_state_db::{IsPruned, LastCanonicalized, StateDb};
70use sp_arithmetic::traits::Saturating;
71use sp_blockchain::{
72	Backend as _, CachedHeaderMetadata, DisplacedLeavesAfterFinalization, Error as ClientError,
73	HeaderBackend, HeaderMetadata, HeaderMetadataCache, Result as ClientResult,
74};
75use sp_core::{
76	offchain::OffchainOverlayedChange,
77	storage::{well_known_keys, ChildInfo},
78};
79use sp_database::Transaction;
80use sp_runtime::{
81	generic::BlockId,
82	traits::{
83		Block as BlockT, Hash, HashingFor, Header as HeaderT, NumberFor, One, SaturatedConversion,
84		Zero,
85	},
86	Justification, Justifications, StateVersion, Storage,
87};
88use sp_state_machine::{
89	backend::{AsTrieBackend, Backend as StateBackend},
90	BackendTransaction, ChildStorageCollection, DBValue, IndexOperation, IterArgs,
91	OffchainChangesCollection, StateMachineStats, StorageCollection, StorageIterator, StorageKey,
92	StorageValue, UsageInfo as StateUsageInfo,
93};
94use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, MerkleValue, PrefixedMemoryDB};
95use utils::BLOCK_GAP_CURRENT_VERSION;
96
97// Re-export the Database trait so that one can pass an implementation of it.
98pub use sc_state_db::PruningMode;
99pub use sp_database::Database;
100
101pub use bench::BenchmarkingState;
102
103const CACHE_HEADERS: usize = 8;
104
105/// DB-backed patricia trie state, transaction type is an overlay of changes to commit.
106pub type DbState<H> = sp_state_machine::TrieBackend<Arc<dyn sp_state_machine::Storage<H>>, H>;
107
108/// Builder for [`DbState`].
109pub type DbStateBuilder<Hasher> =
110	sp_state_machine::TrieBackendBuilder<Arc<dyn sp_state_machine::Storage<Hasher>>, Hasher>;
111
112/// Length of a [`DbHash`].
113const DB_HASH_LEN: usize = 32;
114
115/// Hash type that this backend uses for the database.
116pub type DbHash = sp_core::H256;
117
118/// An extrinsic entry in the database.
119#[derive(Debug, Encode, Decode)]
120enum DbExtrinsic<B: BlockT> {
121	/// Extrinsic that contains indexed data.
122	Indexed {
123		/// Hash of the indexed part.
124		hash: DbHash,
125		/// Extrinsic header.
126		header: Vec<u8>,
127	},
128	/// Complete extrinsic data.
129	Full(B::Extrinsic),
130}
131
132/// A reference tracking state.
133///
134/// It makes sure that the hash we are using stays pinned in storage
135/// until this structure is dropped.
136pub struct RefTrackingState<Block: BlockT> {
137	state: DbState<HashingFor<Block>>,
138	storage: Arc<StorageDb<Block>>,
139	parent_hash: Option<Block::Hash>,
140}
141
142impl<B: BlockT> RefTrackingState<B> {
143	fn new(
144		state: DbState<HashingFor<B>>,
145		storage: Arc<StorageDb<B>>,
146		parent_hash: Option<B::Hash>,
147	) -> Self {
148		RefTrackingState { state, parent_hash, storage }
149	}
150}
151
152impl<B: BlockT> Drop for RefTrackingState<B> {
153	fn drop(&mut self) {
154		if let Some(hash) = &self.parent_hash {
155			self.storage.state_db.unpin(hash);
156		}
157	}
158}
159
160impl<Block: BlockT> std::fmt::Debug for RefTrackingState<Block> {
161	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
162		write!(f, "Block {:?}", self.parent_hash)
163	}
164}
165
166/// A raw iterator over the `RefTrackingState`.
167pub struct RawIter<B: BlockT> {
168	inner: <DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::RawIter,
169}
170
171impl<B: BlockT> StorageIterator<HashingFor<B>> for RawIter<B> {
172	type Backend = RefTrackingState<B>;
173	type Error = <DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::Error;
174
175	fn next_key(&mut self, backend: &Self::Backend) -> Option<Result<StorageKey, Self::Error>> {
176		self.inner.next_key(&backend.state)
177	}
178
179	fn next_pair(
180		&mut self,
181		backend: &Self::Backend,
182	) -> Option<Result<(StorageKey, StorageValue), Self::Error>> {
183		self.inner.next_pair(&backend.state)
184	}
185
186	fn was_complete(&self) -> bool {
187		self.inner.was_complete()
188	}
189}
190
191impl<B: BlockT> StateBackend<HashingFor<B>> for RefTrackingState<B> {
192	type Error = <DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::Error;
193	type TrieBackendStorage =
194		<DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::TrieBackendStorage;
195	type RawIter = RawIter<B>;
196
197	fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
198		self.state.storage(key)
199	}
200
201	fn storage_hash(&self, key: &[u8]) -> Result<Option<B::Hash>, Self::Error> {
202		self.state.storage_hash(key)
203	}
204
205	fn child_storage(
206		&self,
207		child_info: &ChildInfo,
208		key: &[u8],
209	) -> Result<Option<Vec<u8>>, Self::Error> {
210		self.state.child_storage(child_info, key)
211	}
212
213	fn child_storage_hash(
214		&self,
215		child_info: &ChildInfo,
216		key: &[u8],
217	) -> Result<Option<B::Hash>, Self::Error> {
218		self.state.child_storage_hash(child_info, key)
219	}
220
221	fn closest_merkle_value(
222		&self,
223		key: &[u8],
224	) -> Result<Option<MerkleValue<B::Hash>>, Self::Error> {
225		self.state.closest_merkle_value(key)
226	}
227
228	fn child_closest_merkle_value(
229		&self,
230		child_info: &ChildInfo,
231		key: &[u8],
232	) -> Result<Option<MerkleValue<B::Hash>>, Self::Error> {
233		self.state.child_closest_merkle_value(child_info, key)
234	}
235
236	fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
237		self.state.exists_storage(key)
238	}
239
240	fn exists_child_storage(
241		&self,
242		child_info: &ChildInfo,
243		key: &[u8],
244	) -> Result<bool, Self::Error> {
245		self.state.exists_child_storage(child_info, key)
246	}
247
248	fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
249		self.state.next_storage_key(key)
250	}
251
252	fn next_child_storage_key(
253		&self,
254		child_info: &ChildInfo,
255		key: &[u8],
256	) -> Result<Option<Vec<u8>>, Self::Error> {
257		self.state.next_child_storage_key(child_info, key)
258	}
259
260	fn storage_root<'a>(
261		&self,
262		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
263		state_version: StateVersion,
264	) -> (B::Hash, BackendTransaction<HashingFor<B>>) {
265		self.state.storage_root(delta, state_version)
266	}
267
268	fn child_storage_root<'a>(
269		&self,
270		child_info: &ChildInfo,
271		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
272		state_version: StateVersion,
273	) -> (B::Hash, bool, BackendTransaction<HashingFor<B>>) {
274		self.state.child_storage_root(child_info, delta, state_version)
275	}
276
277	fn raw_iter(&self, args: IterArgs) -> Result<Self::RawIter, Self::Error> {
278		self.state.raw_iter(args).map(|inner| RawIter { inner })
279	}
280
281	fn register_overlay_stats(&self, stats: &StateMachineStats) {
282		self.state.register_overlay_stats(stats);
283	}
284
285	fn usage_info(&self) -> StateUsageInfo {
286		self.state.usage_info()
287	}
288}
289
290impl<B: BlockT> AsTrieBackend<HashingFor<B>> for RefTrackingState<B> {
291	type TrieBackendStorage =
292		<DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::TrieBackendStorage;
293
294	fn as_trie_backend(
295		&self,
296	) -> &sp_state_machine::TrieBackend<Self::TrieBackendStorage, HashingFor<B>> {
297		&self.state.as_trie_backend()
298	}
299}
300
301/// Database settings.
302pub struct DatabaseSettings {
303	/// The maximum trie cache size in bytes.
304	///
305	/// If `None` is given, the cache is disabled.
306	pub trie_cache_maximum_size: Option<usize>,
307	/// Requested state pruning mode.
308	pub state_pruning: Option<PruningMode>,
309	/// Where to find the database.
310	pub source: DatabaseSource,
311	/// Block pruning mode.
312	///
313	/// NOTE: only finalized blocks are subject for removal!
314	pub blocks_pruning: BlocksPruning,
315}
316
317/// Block pruning settings.
318#[derive(Debug, Clone, Copy, PartialEq)]
319pub enum BlocksPruning {
320	/// Keep full block history, of every block that was ever imported.
321	KeepAll,
322	/// Keep full finalized block history.
323	KeepFinalized,
324	/// Keep N recent finalized blocks.
325	Some(u32),
326}
327
328impl BlocksPruning {
329	/// True if this is an archive pruning mode (either KeepAll or KeepFinalized).
330	pub fn is_archive(&self) -> bool {
331		match *self {
332			BlocksPruning::KeepAll | BlocksPruning::KeepFinalized => true,
333			BlocksPruning::Some(_) => false,
334		}
335	}
336}
337
338/// Where to find the database..
339#[derive(Debug, Clone)]
340pub enum DatabaseSource {
341	/// Check given path, and see if there is an existing database there. If it's either `RocksDb`
342	/// or `ParityDb`, use it. If there is none, create a new instance of `ParityDb`.
343	Auto {
344		/// Path to the paritydb database.
345		paritydb_path: PathBuf,
346		/// Path to the rocksdb database.
347		rocksdb_path: PathBuf,
348		/// Cache size in MiB. Used only by `RocksDb` variant of `DatabaseSource`.
349		cache_size: usize,
350	},
351	/// Load a RocksDB database from a given path. Recommended for most uses.
352	#[cfg(feature = "rocksdb")]
353	RocksDb {
354		/// Path to the database.
355		path: PathBuf,
356		/// Cache size in MiB.
357		cache_size: usize,
358	},
359
360	/// Load a ParityDb database from a given path.
361	ParityDb {
362		/// Path to the database.
363		path: PathBuf,
364	},
365
366	/// Use a custom already-open database.
367	Custom {
368		/// the handle to the custom storage
369		db: Arc<dyn Database<DbHash>>,
370
371		/// if set, the `create` flag will be required to open such datasource
372		require_create_flag: bool,
373	},
374}
375
376impl DatabaseSource {
377	/// Return path for databases that are stored on disk.
378	pub fn path(&self) -> Option<&Path> {
379		match self {
380			// as per https://github.com/paritytech/substrate/pull/9500#discussion_r684312550
381			//
382			// IIUC this is needed for polkadot to create its own dbs, so until it can use parity db
383			// I would think rocksdb, but later parity-db.
384			DatabaseSource::Auto { paritydb_path, .. } => Some(paritydb_path),
385			#[cfg(feature = "rocksdb")]
386			DatabaseSource::RocksDb { path, .. } => Some(path),
387			DatabaseSource::ParityDb { path } => Some(path),
388			DatabaseSource::Custom { .. } => None,
389		}
390	}
391
392	/// Set path for databases that are stored on disk.
393	pub fn set_path(&mut self, p: &Path) -> bool {
394		match self {
395			DatabaseSource::Auto { ref mut paritydb_path, .. } => {
396				*paritydb_path = p.into();
397				true
398			},
399			#[cfg(feature = "rocksdb")]
400			DatabaseSource::RocksDb { ref mut path, .. } => {
401				*path = p.into();
402				true
403			},
404			DatabaseSource::ParityDb { ref mut path } => {
405				*path = p.into();
406				true
407			},
408			DatabaseSource::Custom { .. } => false,
409		}
410	}
411}
412
413impl std::fmt::Display for DatabaseSource {
414	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
415		let name = match self {
416			DatabaseSource::Auto { .. } => "Auto",
417			#[cfg(feature = "rocksdb")]
418			DatabaseSource::RocksDb { .. } => "RocksDb",
419			DatabaseSource::ParityDb { .. } => "ParityDb",
420			DatabaseSource::Custom { .. } => "Custom",
421		};
422		write!(f, "{}", name)
423	}
424}
425
426pub(crate) mod columns {
427	pub const META: u32 = crate::utils::COLUMN_META;
428	pub const STATE: u32 = 1;
429	pub const STATE_META: u32 = 2;
430	/// maps hashes to lookup keys and numbers to canon hashes.
431	pub const KEY_LOOKUP: u32 = 3;
432	pub const HEADER: u32 = 4;
433	pub const BODY: u32 = 5;
434	pub const JUSTIFICATIONS: u32 = 6;
435	pub const AUX: u32 = 8;
436	/// Offchain workers local storage
437	pub const OFFCHAIN: u32 = 9;
438	/// Transactions
439	pub const TRANSACTION: u32 = 11;
440	pub const BODY_INDEX: u32 = 12;
441}
442
443struct PendingBlock<Block: BlockT> {
444	header: Block::Header,
445	justifications: Option<Justifications>,
446	body: Option<Vec<Block::Extrinsic>>,
447	indexed_body: Option<Vec<Vec<u8>>>,
448	leaf_state: NewBlockState,
449}
450
451// wrapper that implements trait required for state_db
452#[derive(Clone)]
453struct StateMetaDb(Arc<dyn Database<DbHash>>);
454
455impl sc_state_db::MetaDb for StateMetaDb {
456	type Error = sp_database::error::DatabaseError;
457
458	fn get_meta(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
459		Ok(self.0.get(columns::STATE_META, key))
460	}
461}
462
463struct MetaUpdate<Block: BlockT> {
464	pub hash: Block::Hash,
465	pub number: NumberFor<Block>,
466	pub is_best: bool,
467	pub is_finalized: bool,
468	pub with_state: bool,
469}
470
471fn cache_header<Hash: std::cmp::Eq + std::hash::Hash, Header>(
472	cache: &mut LinkedHashMap<Hash, Option<Header>>,
473	hash: Hash,
474	header: Option<Header>,
475) {
476	cache.insert(hash, header);
477	while cache.len() > CACHE_HEADERS {
478		cache.pop_front();
479	}
480}
481
482/// Block database
483pub struct BlockchainDb<Block: BlockT> {
484	db: Arc<dyn Database<DbHash>>,
485	meta: Arc<RwLock<Meta<NumberFor<Block>, Block::Hash>>>,
486	leaves: RwLock<LeafSet<Block::Hash, NumberFor<Block>>>,
487	header_metadata_cache: Arc<HeaderMetadataCache<Block>>,
488	header_cache: Mutex<LinkedHashMap<Block::Hash, Option<Block::Header>>>,
489	pinned_blocks_cache: Arc<RwLock<PinnedBlocksCache<Block>>>,
490}
491
492impl<Block: BlockT> BlockchainDb<Block> {
493	fn new(db: Arc<dyn Database<DbHash>>) -> ClientResult<Self> {
494		let meta = read_meta::<Block>(&*db, columns::HEADER)?;
495		let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?;
496		Ok(BlockchainDb {
497			db,
498			leaves: RwLock::new(leaves),
499			meta: Arc::new(RwLock::new(meta)),
500			header_metadata_cache: Arc::new(HeaderMetadataCache::default()),
501			header_cache: Default::default(),
502			pinned_blocks_cache: Arc::new(RwLock::new(PinnedBlocksCache::new())),
503		})
504	}
505
506	fn update_meta(&self, update: MetaUpdate<Block>) {
507		let MetaUpdate { hash, number, is_best, is_finalized, with_state } = update;
508		let mut meta = self.meta.write();
509		if number.is_zero() {
510			meta.genesis_hash = hash;
511		}
512
513		if is_best {
514			meta.best_number = number;
515			meta.best_hash = hash;
516		}
517
518		if is_finalized {
519			if with_state {
520				meta.finalized_state = Some((hash, number));
521			}
522			meta.finalized_number = number;
523			meta.finalized_hash = hash;
524		}
525	}
526
527	fn update_block_gap(&self, gap: Option<BlockGap<NumberFor<Block>>>) {
528		let mut meta = self.meta.write();
529		meta.block_gap = gap;
530	}
531
532	/// Empty the cache of pinned items.
533	fn clear_pinning_cache(&self) {
534		self.pinned_blocks_cache.write().clear();
535	}
536
537	/// Load a justification into the cache of pinned items.
538	/// Reference count of the item will not be increased. Use this
539	/// to load values for items into the cache which have already been pinned.
540	fn insert_justifications_if_pinned(&self, hash: Block::Hash, justification: Justification) {
541		let mut cache = self.pinned_blocks_cache.write();
542		if !cache.contains(hash) {
543			return;
544		}
545
546		let justifications = Justifications::from(justification);
547		cache.insert_justifications(hash, Some(justifications));
548	}
549
550	/// Load a justification from the db into the cache of pinned items.
551	/// Reference count of the item will not be increased. Use this
552	/// to load values for items into the cache which have already been pinned.
553	fn insert_persisted_justifications_if_pinned(&self, hash: Block::Hash) -> ClientResult<()> {
554		let mut cache = self.pinned_blocks_cache.write();
555		if !cache.contains(hash) {
556			return Ok(());
557		}
558
559		let justifications = self.justifications_uncached(hash)?;
560		cache.insert_justifications(hash, justifications);
561		Ok(())
562	}
563
564	/// Load a block body from the db into the cache of pinned items.
565	/// Reference count of the item will not be increased. Use this
566	/// to load values for items items into the cache which have already been pinned.
567	fn insert_persisted_body_if_pinned(&self, hash: Block::Hash) -> ClientResult<()> {
568		let mut cache = self.pinned_blocks_cache.write();
569		if !cache.contains(hash) {
570			return Ok(());
571		}
572
573		let body = self.body_uncached(hash)?;
574		cache.insert_body(hash, body);
575		Ok(())
576	}
577
578	/// Bump reference count for pinned item.
579	fn bump_ref(&self, hash: Block::Hash) {
580		self.pinned_blocks_cache.write().pin(hash);
581	}
582
583	/// Decrease reference count for pinned item and remove if reference count is 0.
584	fn unpin(&self, hash: Block::Hash) {
585		self.pinned_blocks_cache.write().unpin(hash);
586	}
587
588	fn justifications_uncached(&self, hash: Block::Hash) -> ClientResult<Option<Justifications>> {
589		match read_db(
590			&*self.db,
591			columns::KEY_LOOKUP,
592			columns::JUSTIFICATIONS,
593			BlockId::<Block>::Hash(hash),
594		)? {
595			Some(justifications) => match Decode::decode(&mut &justifications[..]) {
596				Ok(justifications) => Ok(Some(justifications)),
597				Err(err) =>
598					return Err(sp_blockchain::Error::Backend(format!(
599						"Error decoding justifications: {err}"
600					))),
601			},
602			None => Ok(None),
603		}
604	}
605
606	fn body_uncached(&self, hash: Block::Hash) -> ClientResult<Option<Vec<Block::Extrinsic>>> {
607		if let Some(body) =
608			read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, BlockId::Hash::<Block>(hash))?
609		{
610			// Plain body
611			match Decode::decode(&mut &body[..]) {
612				Ok(body) => return Ok(Some(body)),
613				Err(err) =>
614					return Err(sp_blockchain::Error::Backend(format!("Error decoding body: {err}"))),
615			}
616		}
617
618		if let Some(index) = read_db(
619			&*self.db,
620			columns::KEY_LOOKUP,
621			columns::BODY_INDEX,
622			BlockId::Hash::<Block>(hash),
623		)? {
624			match Vec::<DbExtrinsic<Block>>::decode(&mut &index[..]) {
625				Ok(index) => {
626					let mut body = Vec::new();
627					for ex in index {
628						match ex {
629							DbExtrinsic::Indexed { hash, header } => {
630								match self.db.get(columns::TRANSACTION, hash.as_ref()) {
631									Some(t) => {
632										let mut input =
633											utils::join_input(header.as_ref(), t.as_ref());
634										let ex = Block::Extrinsic::decode(&mut input).map_err(
635											|err| {
636												sp_blockchain::Error::Backend(format!(
637													"Error decoding indexed extrinsic: {err}"
638												))
639											},
640										)?;
641										body.push(ex);
642									},
643									None =>
644										return Err(sp_blockchain::Error::Backend(format!(
645											"Missing indexed transaction {hash:?}"
646										))),
647								};
648							},
649							DbExtrinsic::Full(ex) => {
650								body.push(ex);
651							},
652						}
653					}
654					return Ok(Some(body));
655				},
656				Err(err) =>
657					return Err(sp_blockchain::Error::Backend(format!(
658						"Error decoding body list: {err}",
659					))),
660			}
661		}
662		Ok(None)
663	}
664}
665
666impl<Block: BlockT> sc_client_api::blockchain::HeaderBackend<Block> for BlockchainDb<Block> {
667	fn header(&self, hash: Block::Hash) -> ClientResult<Option<Block::Header>> {
668		let mut cache = self.header_cache.lock();
669		if let Some(result) = cache.get_refresh(&hash) {
670			return Ok(result.clone());
671		}
672		let header = utils::read_header(
673			&*self.db,
674			columns::KEY_LOOKUP,
675			columns::HEADER,
676			BlockId::<Block>::Hash(hash),
677		)?;
678		cache_header(&mut cache, hash, header.clone());
679		Ok(header)
680	}
681
682	fn info(&self) -> sc_client_api::blockchain::Info<Block> {
683		let meta = self.meta.read();
684		sc_client_api::blockchain::Info {
685			best_hash: meta.best_hash,
686			best_number: meta.best_number,
687			genesis_hash: meta.genesis_hash,
688			finalized_hash: meta.finalized_hash,
689			finalized_number: meta.finalized_number,
690			finalized_state: meta.finalized_state,
691			number_leaves: self.leaves.read().count(),
692			block_gap: meta.block_gap,
693		}
694	}
695
696	fn status(&self, hash: Block::Hash) -> ClientResult<sc_client_api::blockchain::BlockStatus> {
697		match self.header(hash)?.is_some() {
698			true => Ok(sc_client_api::blockchain::BlockStatus::InChain),
699			false => Ok(sc_client_api::blockchain::BlockStatus::Unknown),
700		}
701	}
702
703	fn number(&self, hash: Block::Hash) -> ClientResult<Option<NumberFor<Block>>> {
704		Ok(self.header_metadata(hash).ok().map(|header_metadata| header_metadata.number))
705	}
706
707	fn hash(&self, number: NumberFor<Block>) -> ClientResult<Option<Block::Hash>> {
708		Ok(utils::read_header::<Block>(
709			&*self.db,
710			columns::KEY_LOOKUP,
711			columns::HEADER,
712			BlockId::Number(number),
713		)?
714		.map(|header| header.hash()))
715	}
716}
717
718impl<Block: BlockT> sc_client_api::blockchain::Backend<Block> for BlockchainDb<Block> {
719	fn body(&self, hash: Block::Hash) -> ClientResult<Option<Vec<Block::Extrinsic>>> {
720		let cache = self.pinned_blocks_cache.read();
721		if let Some(result) = cache.body(&hash) {
722			return Ok(result.clone());
723		}
724
725		self.body_uncached(hash)
726	}
727
728	fn justifications(&self, hash: Block::Hash) -> ClientResult<Option<Justifications>> {
729		let cache = self.pinned_blocks_cache.read();
730		if let Some(result) = cache.justifications(&hash) {
731			return Ok(result.clone());
732		}
733
734		self.justifications_uncached(hash)
735	}
736
737	fn last_finalized(&self) -> ClientResult<Block::Hash> {
738		Ok(self.meta.read().finalized_hash)
739	}
740
741	fn leaves(&self) -> ClientResult<Vec<Block::Hash>> {
742		Ok(self.leaves.read().hashes())
743	}
744
745	fn children(&self, parent_hash: Block::Hash) -> ClientResult<Vec<Block::Hash>> {
746		children::read_children(&*self.db, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash)
747	}
748
749	fn indexed_transaction(&self, hash: Block::Hash) -> ClientResult<Option<Vec<u8>>> {
750		Ok(self.db.get(columns::TRANSACTION, hash.as_ref()))
751	}
752
753	fn has_indexed_transaction(&self, hash: Block::Hash) -> ClientResult<bool> {
754		Ok(self.db.contains(columns::TRANSACTION, hash.as_ref()))
755	}
756
757	fn block_indexed_body(&self, hash: Block::Hash) -> ClientResult<Option<Vec<Vec<u8>>>> {
758		let body = match read_db(
759			&*self.db,
760			columns::KEY_LOOKUP,
761			columns::BODY_INDEX,
762			BlockId::<Block>::Hash(hash),
763		)? {
764			Some(body) => body,
765			None => return Ok(None),
766		};
767		match Vec::<DbExtrinsic<Block>>::decode(&mut &body[..]) {
768			Ok(index) => {
769				let mut transactions = Vec::new();
770				for ex in index.into_iter() {
771					if let DbExtrinsic::Indexed { hash, .. } = ex {
772						match self.db.get(columns::TRANSACTION, hash.as_ref()) {
773							Some(t) => transactions.push(t),
774							None =>
775								return Err(sp_blockchain::Error::Backend(format!(
776									"Missing indexed transaction {hash:?}",
777								))),
778						}
779					}
780				}
781				Ok(Some(transactions))
782			},
783			Err(err) =>
784				Err(sp_blockchain::Error::Backend(format!("Error decoding body list: {err}"))),
785		}
786	}
787}
788
789impl<Block: BlockT> HeaderMetadata<Block> for BlockchainDb<Block> {
790	type Error = sp_blockchain::Error;
791
792	fn header_metadata(
793		&self,
794		hash: Block::Hash,
795	) -> Result<CachedHeaderMetadata<Block>, Self::Error> {
796		self.header_metadata_cache.header_metadata(hash).map_or_else(
797			|| {
798				self.header(hash)?
799					.map(|header| {
800						let header_metadata = CachedHeaderMetadata::from(&header);
801						self.header_metadata_cache
802							.insert_header_metadata(header_metadata.hash, header_metadata.clone());
803						header_metadata
804					})
805					.ok_or_else(|| {
806						ClientError::UnknownBlock(format!(
807							"Header was not found in the database: {hash:?}",
808						))
809					})
810			},
811			Ok,
812		)
813	}
814
815	fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata<Block>) {
816		self.header_metadata_cache.insert_header_metadata(hash, metadata)
817	}
818
819	fn remove_header_metadata(&self, hash: Block::Hash) {
820		self.header_cache.lock().remove(&hash);
821		self.header_metadata_cache.remove_header_metadata(hash);
822	}
823}
824
825/// Database transaction
826pub struct BlockImportOperation<Block: BlockT> {
827	old_state: RecordStatsState<RefTrackingState<Block>, Block>,
828	db_updates: PrefixedMemoryDB<HashingFor<Block>>,
829	storage_updates: StorageCollection,
830	child_storage_updates: ChildStorageCollection,
831	offchain_storage_updates: OffchainChangesCollection,
832	pending_block: Option<PendingBlock<Block>>,
833	aux_ops: Vec<(Vec<u8>, Option<Vec<u8>>)>,
834	finalized_blocks: Vec<(Block::Hash, Option<Justification>)>,
835	set_head: Option<Block::Hash>,
836	commit_state: bool,
837	create_gap: bool,
838	index_ops: Vec<IndexOperation>,
839}
840
841impl<Block: BlockT> BlockImportOperation<Block> {
842	fn apply_offchain(&mut self, transaction: &mut Transaction<DbHash>) {
843		let mut count = 0;
844		for ((prefix, key), value_operation) in self.offchain_storage_updates.drain(..) {
845			count += 1;
846			let key = crate::offchain::concatenate_prefix_and_key(&prefix, &key);
847			match value_operation {
848				OffchainOverlayedChange::SetValue(val) =>
849					transaction.set_from_vec(columns::OFFCHAIN, &key, val),
850				OffchainOverlayedChange::Remove => transaction.remove(columns::OFFCHAIN, &key),
851			}
852		}
853
854		if count > 0 {
855			log::debug!(target: "sc_offchain", "Applied {count} offchain indexing changes.");
856		}
857	}
858
859	fn apply_aux(&mut self, transaction: &mut Transaction<DbHash>) {
860		for (key, maybe_val) in self.aux_ops.drain(..) {
861			match maybe_val {
862				Some(val) => transaction.set_from_vec(columns::AUX, &key, val),
863				None => transaction.remove(columns::AUX, &key),
864			}
865		}
866	}
867
868	fn apply_new_state(
869		&mut self,
870		storage: Storage,
871		state_version: StateVersion,
872	) -> ClientResult<Block::Hash> {
873		if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(k)) {
874			return Err(sp_blockchain::Error::InvalidState);
875		}
876
877		let child_delta = storage.children_default.values().map(|child_content| {
878			(
879				&child_content.child_info,
880				child_content.data.iter().map(|(k, v)| (&k[..], Some(&v[..]))),
881			)
882		});
883
884		let (root, transaction) = self.old_state.full_storage_root(
885			storage.top.iter().map(|(k, v)| (&k[..], Some(&v[..]))),
886			child_delta,
887			state_version,
888		);
889
890		self.db_updates = transaction;
891		Ok(root)
892	}
893}
894
895impl<Block: BlockT> sc_client_api::backend::BlockImportOperation<Block>
896	for BlockImportOperation<Block>
897{
898	type State = RecordStatsState<RefTrackingState<Block>, Block>;
899
900	fn state(&self) -> ClientResult<Option<&Self::State>> {
901		Ok(Some(&self.old_state))
902	}
903
904	fn set_block_data(
905		&mut self,
906		header: Block::Header,
907		body: Option<Vec<Block::Extrinsic>>,
908		indexed_body: Option<Vec<Vec<u8>>>,
909		justifications: Option<Justifications>,
910		leaf_state: NewBlockState,
911	) -> ClientResult<()> {
912		assert!(self.pending_block.is_none(), "Only one block per operation is allowed");
913		self.pending_block =
914			Some(PendingBlock { header, body, indexed_body, justifications, leaf_state });
915		Ok(())
916	}
917
918	fn update_db_storage(
919		&mut self,
920		update: PrefixedMemoryDB<HashingFor<Block>>,
921	) -> ClientResult<()> {
922		self.db_updates = update;
923		Ok(())
924	}
925
926	fn reset_storage(
927		&mut self,
928		storage: Storage,
929		state_version: StateVersion,
930	) -> ClientResult<Block::Hash> {
931		let root = self.apply_new_state(storage, state_version)?;
932		self.commit_state = true;
933		Ok(root)
934	}
935
936	fn set_genesis_state(
937		&mut self,
938		storage: Storage,
939		commit: bool,
940		state_version: StateVersion,
941	) -> ClientResult<Block::Hash> {
942		let root = self.apply_new_state(storage, state_version)?;
943		self.commit_state = commit;
944		Ok(root)
945	}
946
947	fn insert_aux<I>(&mut self, ops: I) -> ClientResult<()>
948	where
949		I: IntoIterator<Item = (Vec<u8>, Option<Vec<u8>>)>,
950	{
951		self.aux_ops.append(&mut ops.into_iter().collect());
952		Ok(())
953	}
954
955	fn update_storage(
956		&mut self,
957		update: StorageCollection,
958		child_update: ChildStorageCollection,
959	) -> ClientResult<()> {
960		self.storage_updates = update;
961		self.child_storage_updates = child_update;
962		Ok(())
963	}
964
965	fn update_offchain_storage(
966		&mut self,
967		offchain_update: OffchainChangesCollection,
968	) -> ClientResult<()> {
969		self.offchain_storage_updates = offchain_update;
970		Ok(())
971	}
972
973	fn mark_finalized(
974		&mut self,
975		block: Block::Hash,
976		justification: Option<Justification>,
977	) -> ClientResult<()> {
978		self.finalized_blocks.push((block, justification));
979		Ok(())
980	}
981
982	fn mark_head(&mut self, hash: Block::Hash) -> ClientResult<()> {
983		assert!(self.set_head.is_none(), "Only one set head per operation is allowed");
984		self.set_head = Some(hash);
985		Ok(())
986	}
987
988	fn update_transaction_index(&mut self, index_ops: Vec<IndexOperation>) -> ClientResult<()> {
989		self.index_ops = index_ops;
990		Ok(())
991	}
992
993	fn set_create_gap(&mut self, create_gap: bool) {
994		self.create_gap = create_gap;
995	}
996}
997
998struct StorageDb<Block: BlockT> {
999	pub db: Arc<dyn Database<DbHash>>,
1000	pub state_db: StateDb<Block::Hash, Vec<u8>, StateMetaDb>,
1001	prefix_keys: bool,
1002}
1003
1004impl<Block: BlockT> sp_state_machine::Storage<HashingFor<Block>> for StorageDb<Block> {
1005	fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result<Option<DBValue>, String> {
1006		if self.prefix_keys {
1007			let key = prefixed_key::<HashingFor<Block>>(key, prefix);
1008			self.state_db.get(&key, self)
1009		} else {
1010			self.state_db.get(key.as_ref(), self)
1011		}
1012		.map_err(|e| format!("Database backend error: {e:?}"))
1013	}
1014}
1015
1016impl<Block: BlockT> sc_state_db::NodeDb for StorageDb<Block> {
1017	type Error = io::Error;
1018	type Key = [u8];
1019
1020	fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
1021		Ok(self.db.get(columns::STATE, key))
1022	}
1023}
1024
1025struct DbGenesisStorage<Block: BlockT> {
1026	root: Block::Hash,
1027	storage: PrefixedMemoryDB<HashingFor<Block>>,
1028}
1029
1030impl<Block: BlockT> DbGenesisStorage<Block> {
1031	pub fn new(root: Block::Hash, storage: PrefixedMemoryDB<HashingFor<Block>>) -> Self {
1032		DbGenesisStorage { root, storage }
1033	}
1034}
1035
1036impl<Block: BlockT> sp_state_machine::Storage<HashingFor<Block>> for DbGenesisStorage<Block> {
1037	fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result<Option<DBValue>, String> {
1038		use hash_db::HashDB;
1039		Ok(self.storage.get(key, prefix))
1040	}
1041}
1042
1043struct EmptyStorage<Block: BlockT>(pub Block::Hash);
1044
1045impl<Block: BlockT> EmptyStorage<Block> {
1046	pub fn new() -> Self {
1047		let mut root = Block::Hash::default();
1048		let mut mdb = MemoryDB::<HashingFor<Block>>::default();
1049		// both triedbmut are the same on empty storage.
1050		sp_trie::trie_types::TrieDBMutBuilderV1::<HashingFor<Block>>::new(&mut mdb, &mut root)
1051			.build();
1052		EmptyStorage(root)
1053	}
1054}
1055
1056impl<Block: BlockT> sp_state_machine::Storage<HashingFor<Block>> for EmptyStorage<Block> {
1057	fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result<Option<DBValue>, String> {
1058		Ok(None)
1059	}
1060}
1061
1062/// Frozen `value` at time `at`.
1063///
1064/// Used as inner structure under lock in `FrozenForDuration`.
1065struct Frozen<T: Clone> {
1066	at: std::time::Instant,
1067	value: Option<T>,
1068}
1069
1070/// Some value frozen for period of time.
1071///
1072/// If time `duration` not passed since the value was instantiated,
1073/// current frozen value is returned. Otherwise, you have to provide
1074/// a new value which will be again frozen for `duration`.
1075pub(crate) struct FrozenForDuration<T: Clone> {
1076	duration: std::time::Duration,
1077	value: parking_lot::Mutex<Frozen<T>>,
1078}
1079
1080impl<T: Clone> FrozenForDuration<T> {
1081	fn new(duration: std::time::Duration) -> Self {
1082		Self { duration, value: Frozen { at: std::time::Instant::now(), value: None }.into() }
1083	}
1084
1085	fn take_or_else<F>(&self, f: F) -> T
1086	where
1087		F: FnOnce() -> T,
1088	{
1089		let mut lock = self.value.lock();
1090		let now = std::time::Instant::now();
1091		if now.saturating_duration_since(lock.at) > self.duration || lock.value.is_none() {
1092			let new_value = f();
1093			lock.at = now;
1094			lock.value = Some(new_value.clone());
1095			new_value
1096		} else {
1097			lock.value.as_ref().expect("Checked with in branch above; qed").clone()
1098		}
1099	}
1100}
1101
1102/// Disk backend.
1103///
1104/// Disk backend keeps data in a key-value store. In archive mode, trie nodes are kept from all
1105/// blocks. Otherwise, trie nodes are kept only from some recent blocks.
1106pub struct Backend<Block: BlockT> {
1107	storage: Arc<StorageDb<Block>>,
1108	offchain_storage: offchain::LocalStorage,
1109	blockchain: BlockchainDb<Block>,
1110	canonicalization_delay: u64,
1111	import_lock: Arc<RwLock<()>>,
1112	is_archive: bool,
1113	blocks_pruning: BlocksPruning,
1114	io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>,
1115	state_usage: Arc<StateUsageStats>,
1116	genesis_state: RwLock<Option<Arc<DbGenesisStorage<Block>>>>,
1117	shared_trie_cache: Option<sp_trie::cache::SharedTrieCache<HashingFor<Block>>>,
1118}
1119
1120impl<Block: BlockT> Backend<Block> {
1121	/// Create a new instance of database backend.
1122	///
1123	/// The pruning window is how old a block must be before the state is pruned.
1124	pub fn new(db_config: DatabaseSettings, canonicalization_delay: u64) -> ClientResult<Self> {
1125		use utils::OpenDbError;
1126
1127		let db_source = &db_config.source;
1128
1129		let (needs_init, db) =
1130			match crate::utils::open_database::<Block>(db_source, DatabaseType::Full, false) {
1131				Ok(db) => (false, db),
1132				Err(OpenDbError::DoesNotExist) => {
1133					let db =
1134						crate::utils::open_database::<Block>(db_source, DatabaseType::Full, true)?;
1135					(true, db)
1136				},
1137				Err(as_is) => return Err(as_is.into()),
1138			};
1139
1140		Self::from_database(db as Arc<_>, canonicalization_delay, &db_config, needs_init)
1141	}
1142
1143	/// Reset the shared trie cache.
1144	pub fn reset_trie_cache(&self) {
1145		if let Some(cache) = &self.shared_trie_cache {
1146			cache.reset();
1147		}
1148	}
1149
1150	/// Create new memory-backed client backend for tests.
1151	#[cfg(any(test, feature = "test-helpers"))]
1152	pub fn new_test(blocks_pruning: u32, canonicalization_delay: u64) -> Self {
1153		Self::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), canonicalization_delay)
1154	}
1155
1156	/// Create new memory-backed client backend for tests.
1157	#[cfg(any(test, feature = "test-helpers"))]
1158	pub fn new_test_with_tx_storage(
1159		blocks_pruning: BlocksPruning,
1160		canonicalization_delay: u64,
1161	) -> Self {
1162		let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS);
1163		let db = sp_database::as_database(db);
1164		let state_pruning = match blocks_pruning {
1165			BlocksPruning::KeepAll => PruningMode::ArchiveAll,
1166			BlocksPruning::KeepFinalized => PruningMode::ArchiveCanonical,
1167			BlocksPruning::Some(n) => PruningMode::blocks_pruning(n),
1168		};
1169		let db_setting = DatabaseSettings {
1170			trie_cache_maximum_size: Some(16 * 1024 * 1024),
1171			state_pruning: Some(state_pruning),
1172			source: DatabaseSource::Custom { db, require_create_flag: true },
1173			blocks_pruning,
1174		};
1175
1176		Self::new(db_setting, canonicalization_delay).expect("failed to create test-db")
1177	}
1178
1179	/// Expose the Database that is used by this backend.
1180	/// The second argument is the Column that stores the State.
1181	///
1182	/// Should only be needed for benchmarking.
1183	#[cfg(any(feature = "runtime-benchmarks"))]
1184	pub fn expose_db(&self) -> (Arc<dyn sp_database::Database<DbHash>>, sp_database::ColumnId) {
1185		(self.storage.db.clone(), columns::STATE)
1186	}
1187
1188	/// Expose the Storage that is used by this backend.
1189	///
1190	/// Should only be needed for benchmarking.
1191	#[cfg(any(feature = "runtime-benchmarks"))]
1192	pub fn expose_storage(&self) -> Arc<dyn sp_state_machine::Storage<HashingFor<Block>>> {
1193		self.storage.clone()
1194	}
1195
1196	fn from_database(
1197		db: Arc<dyn Database<DbHash>>,
1198		canonicalization_delay: u64,
1199		config: &DatabaseSettings,
1200		should_init: bool,
1201	) -> ClientResult<Self> {
1202		let mut db_init_transaction = Transaction::new();
1203
1204		let requested_state_pruning = config.state_pruning.clone();
1205		let state_meta_db = StateMetaDb(db.clone());
1206		let map_e = sp_blockchain::Error::from_state_db;
1207
1208		let (state_db_init_commit_set, state_db) = StateDb::open(
1209			state_meta_db,
1210			requested_state_pruning,
1211			!db.supports_ref_counting(),
1212			should_init,
1213		)
1214		.map_err(map_e)?;
1215
1216		apply_state_commit(&mut db_init_transaction, state_db_init_commit_set);
1217
1218		let state_pruning_used = state_db.pruning_mode();
1219		let is_archive_pruning = state_pruning_used.is_archive();
1220		let blockchain = BlockchainDb::new(db.clone())?;
1221
1222		let storage_db =
1223			StorageDb { db: db.clone(), state_db, prefix_keys: !db.supports_ref_counting() };
1224
1225		let offchain_storage = offchain::LocalStorage::new(db.clone());
1226
1227		let backend = Backend {
1228			storage: Arc::new(storage_db),
1229			offchain_storage,
1230			blockchain,
1231			canonicalization_delay,
1232			import_lock: Default::default(),
1233			is_archive: is_archive_pruning,
1234			io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)),
1235			state_usage: Arc::new(StateUsageStats::new()),
1236			blocks_pruning: config.blocks_pruning,
1237			genesis_state: RwLock::new(None),
1238			shared_trie_cache: config.trie_cache_maximum_size.map(|maximum_size| {
1239				SharedTrieCache::new(sp_trie::cache::CacheSize::new(maximum_size))
1240			}),
1241		};
1242
1243		// Older DB versions have no last state key. Check if the state is available and set it.
1244		let info = backend.blockchain.info();
1245		if info.finalized_state.is_none() &&
1246			info.finalized_hash != Default::default() &&
1247			sc_client_api::Backend::have_state_at(
1248				&backend,
1249				info.finalized_hash,
1250				info.finalized_number,
1251			) {
1252			backend.blockchain.update_meta(MetaUpdate {
1253				hash: info.finalized_hash,
1254				number: info.finalized_number,
1255				is_best: info.finalized_hash == info.best_hash,
1256				is_finalized: true,
1257				with_state: true,
1258			});
1259		}
1260
1261		db.commit(db_init_transaction)?;
1262
1263		Ok(backend)
1264	}
1265
1266	/// Handle setting head within a transaction. `route_to` should be the last
1267	/// block that existed in the database. `best_to` should be the best block
1268	/// to be set.
1269	///
1270	/// In the case where the new best block is a block to be imported, `route_to`
1271	/// should be the parent of `best_to`. In the case where we set an existing block
1272	/// to be best, `route_to` should equal to `best_to`.
1273	fn set_head_with_transaction(
1274		&self,
1275		transaction: &mut Transaction<DbHash>,
1276		route_to: Block::Hash,
1277		best_to: (NumberFor<Block>, Block::Hash),
1278	) -> ClientResult<(Vec<Block::Hash>, Vec<Block::Hash>)> {
1279		let mut enacted = Vec::default();
1280		let mut retracted = Vec::default();
1281
1282		let (best_number, best_hash) = best_to;
1283
1284		let meta = self.blockchain.meta.read();
1285
1286		if meta.best_number.saturating_sub(best_number).saturated_into::<u64>() >
1287			self.canonicalization_delay
1288		{
1289			return Err(sp_blockchain::Error::SetHeadTooOld);
1290		}
1291
1292		let parent_exists =
1293			self.blockchain.status(route_to)? == sp_blockchain::BlockStatus::InChain;
1294
1295		// Cannot find tree route with empty DB or when imported a detached block.
1296		if meta.best_hash != Default::default() && parent_exists {
1297			let tree_route = sp_blockchain::tree_route(&self.blockchain, meta.best_hash, route_to)?;
1298
1299			// uncanonicalize: check safety violations and ensure the numbers no longer
1300			// point to these block hashes in the key mapping.
1301			for r in tree_route.retracted() {
1302				if r.hash == meta.finalized_hash {
1303					warn!(
1304						"Potential safety failure: reverting finalized block {:?}",
1305						(&r.number, &r.hash)
1306					);
1307
1308					return Err(sp_blockchain::Error::NotInFinalizedChain);
1309				}
1310
1311				retracted.push(r.hash);
1312				utils::remove_number_to_key_mapping(transaction, columns::KEY_LOOKUP, r.number)?;
1313			}
1314
1315			// canonicalize: set the number lookup to map to this block's hash.
1316			for e in tree_route.enacted() {
1317				enacted.push(e.hash);
1318				utils::insert_number_to_key_mapping(
1319					transaction,
1320					columns::KEY_LOOKUP,
1321					e.number,
1322					e.hash,
1323				)?;
1324			}
1325		}
1326
1327		let lookup_key = utils::number_and_hash_to_lookup_key(best_number, &best_hash)?;
1328		transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, lookup_key);
1329		utils::insert_number_to_key_mapping(
1330			transaction,
1331			columns::KEY_LOOKUP,
1332			best_number,
1333			best_hash,
1334		)?;
1335
1336		Ok((enacted, retracted))
1337	}
1338
1339	fn ensure_sequential_finalization(
1340		&self,
1341		header: &Block::Header,
1342		last_finalized: Option<Block::Hash>,
1343	) -> ClientResult<()> {
1344		let last_finalized =
1345			last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash);
1346		if last_finalized != self.blockchain.meta.read().genesis_hash &&
1347			*header.parent_hash() != last_finalized
1348		{
1349			return Err(sp_blockchain::Error::NonSequentialFinalization(format!(
1350				"Last finalized {last_finalized:?} not parent of {:?}",
1351				header.hash()
1352			)));
1353		}
1354		Ok(())
1355	}
1356
1357	/// `remove_displaced` can be set to `false` if this is not the last of many subsequent calls
1358	/// for performance reasons.
1359	fn finalize_block_with_transaction(
1360		&self,
1361		transaction: &mut Transaction<DbHash>,
1362		hash: Block::Hash,
1363		header: &Block::Header,
1364		last_finalized: Option<Block::Hash>,
1365		justification: Option<Justification>,
1366		current_transaction_justifications: &mut HashMap<Block::Hash, Justification>,
1367		remove_displaced: bool,
1368	) -> ClientResult<MetaUpdate<Block>> {
1369		// TODO: ensure best chain contains this block.
1370		let number = *header.number();
1371		self.ensure_sequential_finalization(header, last_finalized)?;
1372		let with_state = sc_client_api::Backend::have_state_at(self, hash, number);
1373
1374		self.note_finalized(
1375			transaction,
1376			header,
1377			hash,
1378			with_state,
1379			current_transaction_justifications,
1380			remove_displaced,
1381		)?;
1382
1383		if let Some(justification) = justification {
1384			transaction.set_from_vec(
1385				columns::JUSTIFICATIONS,
1386				&utils::number_and_hash_to_lookup_key(number, hash)?,
1387				Justifications::from(justification.clone()).encode(),
1388			);
1389			current_transaction_justifications.insert(hash, justification);
1390		}
1391		Ok(MetaUpdate { hash, number, is_best: false, is_finalized: true, with_state })
1392	}
1393
1394	// performs forced canonicalization with a delay after importing a non-finalized block.
1395	fn force_delayed_canonicalize(
1396		&self,
1397		transaction: &mut Transaction<DbHash>,
1398	) -> ClientResult<()> {
1399		let best_canonical = match self.storage.state_db.last_canonicalized() {
1400			LastCanonicalized::None => 0,
1401			LastCanonicalized::Block(b) => b,
1402			// Nothing needs to be done when canonicalization is not happening.
1403			LastCanonicalized::NotCanonicalizing => return Ok(()),
1404		};
1405
1406		let info = self.blockchain.info();
1407		let best_number: u64 = self.blockchain.info().best_number.saturated_into();
1408
1409		for to_canonicalize in
1410			best_canonical + 1..=best_number.saturating_sub(self.canonicalization_delay)
1411		{
1412			let hash_to_canonicalize = sc_client_api::blockchain::HeaderBackend::hash(
1413				&self.blockchain,
1414				to_canonicalize.saturated_into(),
1415			)?
1416			.ok_or_else(|| {
1417				let best_hash = info.best_hash;
1418
1419				sp_blockchain::Error::Backend(format!(
1420					"Can't canonicalize missing block number #{to_canonicalize} when for best block {best_hash:?} (#{best_number})",
1421				))
1422			})?;
1423
1424			if !sc_client_api::Backend::have_state_at(
1425				self,
1426				hash_to_canonicalize,
1427				to_canonicalize.saturated_into(),
1428			) {
1429				return Ok(());
1430			}
1431
1432			trace!(target: "db", "Canonicalize block #{to_canonicalize} ({hash_to_canonicalize:?})");
1433			let commit = self.storage.state_db.canonicalize_block(&hash_to_canonicalize).map_err(
1434				sp_blockchain::Error::from_state_db::<
1435					sc_state_db::Error<sp_database::error::DatabaseError>,
1436				>,
1437			)?;
1438			apply_state_commit(transaction, commit);
1439		}
1440
1441		Ok(())
1442	}
1443
1444	fn try_commit_operation(&self, mut operation: BlockImportOperation<Block>) -> ClientResult<()> {
1445		let mut transaction = Transaction::new();
1446
1447		operation.apply_aux(&mut transaction);
1448		operation.apply_offchain(&mut transaction);
1449
1450		let mut meta_updates = Vec::with_capacity(operation.finalized_blocks.len());
1451		let (best_num, mut last_finalized_hash, mut last_finalized_num, mut block_gap) = {
1452			let meta = self.blockchain.meta.read();
1453			(meta.best_number, meta.finalized_hash, meta.finalized_number, meta.block_gap)
1454		};
1455
1456		let mut block_gap_updated = false;
1457
1458		let mut current_transaction_justifications: HashMap<Block::Hash, Justification> =
1459			HashMap::new();
1460		let mut finalized_blocks = operation.finalized_blocks.into_iter().peekable();
1461		while let Some((block_hash, justification)) = finalized_blocks.next() {
1462			let block_header = self.blockchain.expect_header(block_hash)?;
1463			meta_updates.push(self.finalize_block_with_transaction(
1464				&mut transaction,
1465				block_hash,
1466				&block_header,
1467				Some(last_finalized_hash),
1468				justification,
1469				&mut current_transaction_justifications,
1470				finalized_blocks.peek().is_none(),
1471			)?);
1472			last_finalized_hash = block_hash;
1473			last_finalized_num = *block_header.number();
1474		}
1475
1476		let imported = if let Some(pending_block) = operation.pending_block {
1477			let hash = pending_block.header.hash();
1478
1479			let parent_hash = *pending_block.header.parent_hash();
1480			let number = *pending_block.header.number();
1481			let highest_leaf = self
1482				.blockchain
1483				.leaves
1484				.read()
1485				.highest_leaf()
1486				.map(|(n, _)| n)
1487				.unwrap_or(Zero::zero());
1488			let existing_header = number <= highest_leaf && self.blockchain.header(hash)?.is_some();
1489
1490			// blocks are keyed by number + hash.
1491			let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?;
1492
1493			if pending_block.leaf_state.is_best() {
1494				self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?;
1495			};
1496
1497			utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash)?;
1498
1499			transaction.set_from_vec(columns::HEADER, &lookup_key, pending_block.header.encode());
1500			if let Some(body) = pending_block.body {
1501				// If we have any index operations we save block in the new format with indexed
1502				// extrinsic headers Otherwise we save the body as a single blob.
1503				if operation.index_ops.is_empty() {
1504					transaction.set_from_vec(columns::BODY, &lookup_key, body.encode());
1505				} else {
1506					let body =
1507						apply_index_ops::<Block>(&mut transaction, body, operation.index_ops);
1508					transaction.set_from_vec(columns::BODY_INDEX, &lookup_key, body);
1509				}
1510			}
1511			if let Some(body) = pending_block.indexed_body {
1512				apply_indexed_body::<Block>(&mut transaction, body);
1513			}
1514			if let Some(justifications) = pending_block.justifications {
1515				transaction.set_from_vec(
1516					columns::JUSTIFICATIONS,
1517					&lookup_key,
1518					justifications.encode(),
1519				);
1520			}
1521
1522			if number.is_zero() {
1523				transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref());
1524
1525				if operation.commit_state {
1526					transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key);
1527				} else {
1528					// When we don't want to commit the genesis state, we still preserve it in
1529					// memory to bootstrap consensus. It is queried for an initial list of
1530					// authorities, etc.
1531					*self.genesis_state.write() = Some(Arc::new(DbGenesisStorage::new(
1532						*pending_block.header.state_root(),
1533						operation.db_updates.clone(),
1534					)));
1535				}
1536			}
1537
1538			let finalized = if operation.commit_state {
1539				let mut changeset: sc_state_db::ChangeSet<Vec<u8>> =
1540					sc_state_db::ChangeSet::default();
1541				let mut ops: u64 = 0;
1542				let mut bytes: u64 = 0;
1543				let mut removal: u64 = 0;
1544				let mut bytes_removal: u64 = 0;
1545				for (mut key, (val, rc)) in operation.db_updates.drain() {
1546					self.storage.db.sanitize_key(&mut key);
1547					if rc > 0 {
1548						ops += 1;
1549						bytes += key.len() as u64 + val.len() as u64;
1550						if rc == 1 {
1551							changeset.inserted.push((key, val.to_vec()));
1552						} else {
1553							changeset.inserted.push((key.clone(), val.to_vec()));
1554							for _ in 0..rc - 1 {
1555								changeset.inserted.push((key.clone(), Default::default()));
1556							}
1557						}
1558					} else if rc < 0 {
1559						removal += 1;
1560						bytes_removal += key.len() as u64;
1561						if rc == -1 {
1562							changeset.deleted.push(key);
1563						} else {
1564							for _ in 0..-rc {
1565								changeset.deleted.push(key.clone());
1566							}
1567						}
1568					}
1569				}
1570				self.state_usage.tally_writes_nodes(ops, bytes);
1571				self.state_usage.tally_removed_nodes(removal, bytes_removal);
1572
1573				let mut ops: u64 = 0;
1574				let mut bytes: u64 = 0;
1575				for (key, value) in operation
1576					.storage_updates
1577					.iter()
1578					.chain(operation.child_storage_updates.iter().flat_map(|(_, s)| s.iter()))
1579				{
1580					ops += 1;
1581					bytes += key.len() as u64;
1582					if let Some(v) = value.as_ref() {
1583						bytes += v.len() as u64;
1584					}
1585				}
1586				self.state_usage.tally_writes(ops, bytes);
1587				let number_u64 = number.saturated_into::<u64>();
1588				let commit = self
1589					.storage
1590					.state_db
1591					.insert_block(&hash, number_u64, pending_block.header.parent_hash(), changeset)
1592					.map_err(|e: sc_state_db::Error<sp_database::error::DatabaseError>| {
1593						sp_blockchain::Error::from_state_db(e)
1594					})?;
1595				apply_state_commit(&mut transaction, commit);
1596				if number <= last_finalized_num {
1597					// Canonicalize in the db when re-importing existing blocks with state.
1598					let commit = self.storage.state_db.canonicalize_block(&hash).map_err(
1599						sp_blockchain::Error::from_state_db::<
1600							sc_state_db::Error<sp_database::error::DatabaseError>,
1601						>,
1602					)?;
1603					apply_state_commit(&mut transaction, commit);
1604					meta_updates.push(MetaUpdate {
1605						hash,
1606						number,
1607						is_best: false,
1608						is_finalized: true,
1609						with_state: true,
1610					});
1611				}
1612
1613				// Check if need to finalize. Genesis is always finalized instantly.
1614				let finalized = number_u64 == 0 || pending_block.leaf_state.is_final();
1615				finalized
1616			} else {
1617				(number.is_zero() && last_finalized_num.is_zero()) ||
1618					pending_block.leaf_state.is_final()
1619			};
1620
1621			let header = &pending_block.header;
1622			let is_best = pending_block.leaf_state.is_best();
1623			debug!(
1624				target: "db",
1625				"DB Commit {hash:?} ({number}), best={is_best}, state={}, existing={existing_header}, finalized={finalized}",
1626				operation.commit_state,
1627			);
1628
1629			self.state_usage.merge_sm(operation.old_state.usage_info());
1630
1631			// release state reference so that it can be finalized
1632			// VERY IMPORTANT
1633			drop(operation.old_state);
1634
1635			if finalized {
1636				// TODO: ensure best chain contains this block.
1637				self.ensure_sequential_finalization(header, Some(last_finalized_hash))?;
1638				let mut current_transaction_justifications = HashMap::new();
1639				self.note_finalized(
1640					&mut transaction,
1641					header,
1642					hash,
1643					operation.commit_state,
1644					&mut current_transaction_justifications,
1645					true,
1646				)?;
1647			} else {
1648				// canonicalize blocks which are old enough, regardless of finality.
1649				self.force_delayed_canonicalize(&mut transaction)?
1650			}
1651
1652			if !existing_header {
1653				// Add a new leaf if the block has the potential to be finalized.
1654				if number > last_finalized_num || last_finalized_num.is_zero() {
1655					let mut leaves = self.blockchain.leaves.write();
1656					leaves.import(hash, number, parent_hash);
1657					leaves.prepare_transaction(
1658						&mut transaction,
1659						columns::META,
1660						meta_keys::LEAF_PREFIX,
1661					);
1662				}
1663
1664				let mut children = children::read_children(
1665					&*self.storage.db,
1666					columns::META,
1667					meta_keys::CHILDREN_PREFIX,
1668					parent_hash,
1669				)?;
1670				if !children.contains(&hash) {
1671					children.push(hash);
1672					children::write_children(
1673						&mut transaction,
1674						columns::META,
1675						meta_keys::CHILDREN_PREFIX,
1676						parent_hash,
1677						children,
1678					);
1679				}
1680
1681				if let Some(mut gap) = block_gap {
1682					match gap.gap_type {
1683						BlockGapType::MissingHeaderAndBody =>
1684							if number == gap.start {
1685								gap.start += One::one();
1686								utils::insert_number_to_key_mapping(
1687									&mut transaction,
1688									columns::KEY_LOOKUP,
1689									number,
1690									hash,
1691								)?;
1692								if gap.start > gap.end {
1693									transaction.remove(columns::META, meta_keys::BLOCK_GAP);
1694									transaction.remove(columns::META, meta_keys::BLOCK_GAP_VERSION);
1695									block_gap = None;
1696									debug!(target: "db", "Removed block gap.");
1697								} else {
1698									block_gap = Some(gap);
1699									debug!(target: "db", "Update block gap. {block_gap:?}");
1700									transaction.set(
1701										columns::META,
1702										meta_keys::BLOCK_GAP,
1703										&gap.encode(),
1704									);
1705									transaction.set(
1706										columns::META,
1707										meta_keys::BLOCK_GAP_VERSION,
1708										&BLOCK_GAP_CURRENT_VERSION.encode(),
1709									);
1710								}
1711								block_gap_updated = true;
1712							},
1713						BlockGapType::MissingBody => {
1714							unreachable!("Unsupported block gap. TODO: https://github.com/paritytech/polkadot-sdk/issues/5406")
1715						},
1716					}
1717				} else if operation.create_gap &&
1718					number > best_num + One::one() &&
1719					self.blockchain.header(parent_hash)?.is_none()
1720				{
1721					let gap = BlockGap {
1722						start: best_num + One::one(),
1723						end: number - One::one(),
1724						gap_type: BlockGapType::MissingHeaderAndBody,
1725					};
1726					transaction.set(columns::META, meta_keys::BLOCK_GAP, &gap.encode());
1727					transaction.set(
1728						columns::META,
1729						meta_keys::BLOCK_GAP_VERSION,
1730						&BLOCK_GAP_CURRENT_VERSION.encode(),
1731					);
1732					block_gap = Some(gap);
1733					block_gap_updated = true;
1734					debug!(target: "db", "Detected block gap {block_gap:?}");
1735				}
1736			}
1737
1738			meta_updates.push(MetaUpdate {
1739				hash,
1740				number,
1741				is_best: pending_block.leaf_state.is_best(),
1742				is_finalized: finalized,
1743				with_state: operation.commit_state,
1744			});
1745			Some((pending_block.header, hash))
1746		} else {
1747			None
1748		};
1749
1750		if let Some(set_head) = operation.set_head {
1751			if let Some(header) =
1752				sc_client_api::blockchain::HeaderBackend::header(&self.blockchain, set_head)?
1753			{
1754				let number = header.number();
1755				let hash = header.hash();
1756
1757				self.set_head_with_transaction(&mut transaction, hash, (*number, hash))?;
1758
1759				meta_updates.push(MetaUpdate {
1760					hash,
1761					number: *number,
1762					is_best: true,
1763					is_finalized: false,
1764					with_state: false,
1765				});
1766			} else {
1767				return Err(sp_blockchain::Error::UnknownBlock(format!(
1768					"Cannot set head {set_head:?}",
1769				)));
1770			}
1771		}
1772
1773		self.storage.db.commit(transaction)?;
1774
1775		// Apply all in-memory state changes.
1776		// Code beyond this point can't fail.
1777
1778		if let Some((header, hash)) = imported {
1779			trace!(target: "db", "DB Commit done {hash:?}");
1780			let header_metadata = CachedHeaderMetadata::from(&header);
1781			self.blockchain.insert_header_metadata(header_metadata.hash, header_metadata);
1782			cache_header(&mut self.blockchain.header_cache.lock(), hash, Some(header));
1783		}
1784
1785		for m in meta_updates {
1786			self.blockchain.update_meta(m);
1787		}
1788		if block_gap_updated {
1789			self.blockchain.update_block_gap(block_gap);
1790		}
1791
1792		Ok(())
1793	}
1794
1795	// Write stuff to a transaction after a new block is finalized. This canonicalizes finalized
1796	// blocks. Fails if called with a block which was not a child of the last finalized block.
1797	/// `remove_displaced` can be set to `false` if this is not the last of many subsequent calls
1798	/// for performance reasons.
1799	fn note_finalized(
1800		&self,
1801		transaction: &mut Transaction<DbHash>,
1802		f_header: &Block::Header,
1803		f_hash: Block::Hash,
1804		with_state: bool,
1805		current_transaction_justifications: &mut HashMap<Block::Hash, Justification>,
1806		remove_displaced: bool,
1807	) -> ClientResult<()> {
1808		let f_num = *f_header.number();
1809
1810		let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash)?;
1811		if with_state {
1812			transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key.clone());
1813		}
1814		transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key);
1815
1816		let requires_canonicalization = match self.storage.state_db.last_canonicalized() {
1817			LastCanonicalized::None => true,
1818			LastCanonicalized::Block(b) => f_num.saturated_into::<u64>() > b,
1819			LastCanonicalized::NotCanonicalizing => false,
1820		};
1821
1822		if requires_canonicalization && sc_client_api::Backend::have_state_at(self, f_hash, f_num) {
1823			let commit = self.storage.state_db.canonicalize_block(&f_hash).map_err(
1824				sp_blockchain::Error::from_state_db::<
1825					sc_state_db::Error<sp_database::error::DatabaseError>,
1826				>,
1827			)?;
1828			apply_state_commit(transaction, commit);
1829		}
1830
1831		if remove_displaced {
1832			let new_displaced = self.blockchain.displaced_leaves_after_finalizing(f_hash, f_num)?;
1833
1834			self.blockchain.leaves.write().remove_displaced_leaves(FinalizationOutcome::new(
1835				new_displaced.displaced_leaves.iter().copied(),
1836			));
1837
1838			if !matches!(self.blocks_pruning, BlocksPruning::KeepAll) {
1839				self.prune_displaced_branches(transaction, &new_displaced)?;
1840			}
1841		}
1842
1843		self.prune_blocks(transaction, f_num, current_transaction_justifications)?;
1844
1845		Ok(())
1846	}
1847
1848	fn prune_blocks(
1849		&self,
1850		transaction: &mut Transaction<DbHash>,
1851		finalized_number: NumberFor<Block>,
1852		current_transaction_justifications: &mut HashMap<Block::Hash, Justification>,
1853	) -> ClientResult<()> {
1854		if let BlocksPruning::Some(blocks_pruning) = self.blocks_pruning {
1855			// Always keep the last finalized block
1856			let keep = std::cmp::max(blocks_pruning, 1);
1857			if finalized_number >= keep.into() {
1858				let number = finalized_number.saturating_sub(keep.into());
1859
1860				// Before we prune a block, check if it is pinned
1861				if let Some(hash) = self.blockchain.hash(number)? {
1862					self.blockchain.insert_persisted_body_if_pinned(hash)?;
1863
1864					// If the block was finalized in this transaction, it will not be in the db
1865					// yet.
1866					if let Some(justification) = current_transaction_justifications.remove(&hash) {
1867						self.blockchain.insert_justifications_if_pinned(hash, justification);
1868					} else {
1869						self.blockchain.insert_persisted_justifications_if_pinned(hash)?;
1870					}
1871				};
1872
1873				self.prune_block(transaction, BlockId::<Block>::number(number))?;
1874			}
1875		}
1876		Ok(())
1877	}
1878
1879	fn prune_displaced_branches(
1880		&self,
1881		transaction: &mut Transaction<DbHash>,
1882		displaced: &DisplacedLeavesAfterFinalization<Block>,
1883	) -> ClientResult<()> {
1884		// Discard all blocks from displaced branches
1885		for &hash in displaced.displaced_blocks.iter() {
1886			self.blockchain.insert_persisted_body_if_pinned(hash)?;
1887			self.prune_block(transaction, BlockId::<Block>::hash(hash))?;
1888		}
1889		Ok(())
1890	}
1891
1892	fn prune_block(
1893		&self,
1894		transaction: &mut Transaction<DbHash>,
1895		id: BlockId<Block>,
1896	) -> ClientResult<()> {
1897		debug!(target: "db", "Removing block #{id}");
1898		utils::remove_from_db(
1899			transaction,
1900			&*self.storage.db,
1901			columns::KEY_LOOKUP,
1902			columns::BODY,
1903			id,
1904		)?;
1905		utils::remove_from_db(
1906			transaction,
1907			&*self.storage.db,
1908			columns::KEY_LOOKUP,
1909			columns::JUSTIFICATIONS,
1910			id,
1911		)?;
1912		if let Some(index) =
1913			read_db(&*self.storage.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)?
1914		{
1915			utils::remove_from_db(
1916				transaction,
1917				&*self.storage.db,
1918				columns::KEY_LOOKUP,
1919				columns::BODY_INDEX,
1920				id,
1921			)?;
1922			match Vec::<DbExtrinsic<Block>>::decode(&mut &index[..]) {
1923				Ok(index) =>
1924					for ex in index {
1925						if let DbExtrinsic::Indexed { hash, .. } = ex {
1926							transaction.release(columns::TRANSACTION, hash);
1927						}
1928					},
1929				Err(err) =>
1930					return Err(sp_blockchain::Error::Backend(format!(
1931						"Error decoding body list: {err}",
1932					))),
1933			}
1934		}
1935		Ok(())
1936	}
1937
1938	fn empty_state(&self) -> RecordStatsState<RefTrackingState<Block>, Block> {
1939		let root = EmptyStorage::<Block>::new().0; // Empty trie
1940		let db_state = DbStateBuilder::<HashingFor<Block>>::new(self.storage.clone(), root)
1941			.with_optional_cache(self.shared_trie_cache.as_ref().map(|c| c.local_cache()))
1942			.build();
1943		let state = RefTrackingState::new(db_state, self.storage.clone(), None);
1944		RecordStatsState::new(state, None, self.state_usage.clone())
1945	}
1946}
1947
1948fn apply_state_commit(
1949	transaction: &mut Transaction<DbHash>,
1950	commit: sc_state_db::CommitSet<Vec<u8>>,
1951) {
1952	for (key, val) in commit.data.inserted.into_iter() {
1953		transaction.set_from_vec(columns::STATE, &key[..], val);
1954	}
1955	for key in commit.data.deleted.into_iter() {
1956		transaction.remove(columns::STATE, &key[..]);
1957	}
1958	for (key, val) in commit.meta.inserted.into_iter() {
1959		transaction.set_from_vec(columns::STATE_META, &key[..], val);
1960	}
1961	for key in commit.meta.deleted.into_iter() {
1962		transaction.remove(columns::STATE_META, &key[..]);
1963	}
1964}
1965
1966fn apply_index_ops<Block: BlockT>(
1967	transaction: &mut Transaction<DbHash>,
1968	body: Vec<Block::Extrinsic>,
1969	ops: Vec<IndexOperation>,
1970) -> Vec<u8> {
1971	let mut extrinsic_index: Vec<DbExtrinsic<Block>> = Vec::with_capacity(body.len());
1972	let mut index_map = HashMap::new();
1973	let mut renewed_map = HashMap::new();
1974	for op in ops {
1975		match op {
1976			IndexOperation::Insert { extrinsic, hash, size } => {
1977				index_map.insert(extrinsic, (hash, size));
1978			},
1979			IndexOperation::Renew { extrinsic, hash } => {
1980				renewed_map.insert(extrinsic, DbHash::from_slice(hash.as_ref()));
1981			},
1982		}
1983	}
1984	for (index, extrinsic) in body.into_iter().enumerate() {
1985		let db_extrinsic = if let Some(hash) = renewed_map.get(&(index as u32)) {
1986			// Bump ref counter
1987			let extrinsic = extrinsic.encode();
1988			transaction.reference(columns::TRANSACTION, DbHash::from_slice(hash.as_ref()));
1989			DbExtrinsic::Indexed { hash: *hash, header: extrinsic }
1990		} else {
1991			match index_map.get(&(index as u32)) {
1992				Some((hash, size)) => {
1993					let encoded = extrinsic.encode();
1994					if *size as usize <= encoded.len() {
1995						let offset = encoded.len() - *size as usize;
1996						transaction.store(
1997							columns::TRANSACTION,
1998							DbHash::from_slice(hash.as_ref()),
1999							encoded[offset..].to_vec(),
2000						);
2001						DbExtrinsic::Indexed {
2002							hash: DbHash::from_slice(hash.as_ref()),
2003							header: encoded[..offset].to_vec(),
2004						}
2005					} else {
2006						// Invalid indexed slice. Just store full data and don't index anything.
2007						DbExtrinsic::Full(extrinsic)
2008					}
2009				},
2010				_ => DbExtrinsic::Full(extrinsic),
2011			}
2012		};
2013		extrinsic_index.push(db_extrinsic);
2014	}
2015	debug!(
2016		target: "db",
2017		"DB transaction index: {} inserted, {} renewed, {} full",
2018		index_map.len(),
2019		renewed_map.len(),
2020		extrinsic_index.len() - index_map.len() - renewed_map.len(),
2021	);
2022	extrinsic_index.encode()
2023}
2024
2025fn apply_indexed_body<Block: BlockT>(transaction: &mut Transaction<DbHash>, body: Vec<Vec<u8>>) {
2026	for extrinsic in body {
2027		let hash = sp_runtime::traits::BlakeTwo256::hash(&extrinsic);
2028		transaction.store(columns::TRANSACTION, DbHash::from_slice(hash.as_ref()), extrinsic);
2029	}
2030}
2031
2032impl<Block> sc_client_api::backend::AuxStore for Backend<Block>
2033where
2034	Block: BlockT,
2035{
2036	fn insert_aux<
2037		'a,
2038		'b: 'a,
2039		'c: 'a,
2040		I: IntoIterator<Item = &'a (&'c [u8], &'c [u8])>,
2041		D: IntoIterator<Item = &'a &'b [u8]>,
2042	>(
2043		&self,
2044		insert: I,
2045		delete: D,
2046	) -> ClientResult<()> {
2047		let mut transaction = Transaction::new();
2048		for (k, v) in insert {
2049			transaction.set(columns::AUX, k, v);
2050		}
2051		for k in delete {
2052			transaction.remove(columns::AUX, k);
2053		}
2054		self.storage.db.commit(transaction)?;
2055		Ok(())
2056	}
2057
2058	fn get_aux(&self, key: &[u8]) -> ClientResult<Option<Vec<u8>>> {
2059		Ok(self.storage.db.get(columns::AUX, key))
2060	}
2061}
2062
2063impl<Block: BlockT> sc_client_api::backend::Backend<Block> for Backend<Block> {
2064	type BlockImportOperation = BlockImportOperation<Block>;
2065	type Blockchain = BlockchainDb<Block>;
2066	type State = RecordStatsState<RefTrackingState<Block>, Block>;
2067	type OffchainStorage = offchain::LocalStorage;
2068
2069	fn begin_operation(&self) -> ClientResult<Self::BlockImportOperation> {
2070		Ok(BlockImportOperation {
2071			pending_block: None,
2072			old_state: self.empty_state(),
2073			db_updates: PrefixedMemoryDB::default(),
2074			storage_updates: Default::default(),
2075			child_storage_updates: Default::default(),
2076			offchain_storage_updates: Default::default(),
2077			aux_ops: Vec::new(),
2078			finalized_blocks: Vec::new(),
2079			set_head: None,
2080			commit_state: false,
2081			create_gap: true,
2082			index_ops: Default::default(),
2083		})
2084	}
2085
2086	fn begin_state_operation(
2087		&self,
2088		operation: &mut Self::BlockImportOperation,
2089		block: Block::Hash,
2090	) -> ClientResult<()> {
2091		if block == Default::default() {
2092			operation.old_state = self.empty_state();
2093		} else {
2094			operation.old_state = self.state_at(block)?;
2095		}
2096
2097		operation.commit_state = true;
2098		Ok(())
2099	}
2100
2101	fn commit_operation(&self, operation: Self::BlockImportOperation) -> ClientResult<()> {
2102		let usage = operation.old_state.usage_info();
2103		self.state_usage.merge_sm(usage);
2104
2105		if let Err(e) = self.try_commit_operation(operation) {
2106			let state_meta_db = StateMetaDb(self.storage.db.clone());
2107			self.storage
2108				.state_db
2109				.reset(state_meta_db)
2110				.map_err(sp_blockchain::Error::from_state_db)?;
2111			self.blockchain.clear_pinning_cache();
2112			Err(e)
2113		} else {
2114			self.storage.state_db.sync();
2115			Ok(())
2116		}
2117	}
2118
2119	fn finalize_block(
2120		&self,
2121		hash: Block::Hash,
2122		justification: Option<Justification>,
2123	) -> ClientResult<()> {
2124		let mut transaction = Transaction::new();
2125		let header = self.blockchain.expect_header(hash)?;
2126
2127		let mut current_transaction_justifications = HashMap::new();
2128		let m = self.finalize_block_with_transaction(
2129			&mut transaction,
2130			hash,
2131			&header,
2132			None,
2133			justification,
2134			&mut current_transaction_justifications,
2135			true,
2136		)?;
2137
2138		self.storage.db.commit(transaction)?;
2139		self.blockchain.update_meta(m);
2140		Ok(())
2141	}
2142
2143	fn append_justification(
2144		&self,
2145		hash: Block::Hash,
2146		justification: Justification,
2147	) -> ClientResult<()> {
2148		let mut transaction: Transaction<DbHash> = Transaction::new();
2149		let header = self.blockchain.expect_header(hash)?;
2150		let number = *header.number();
2151
2152		// Check if the block is finalized first.
2153		let is_descendent_of = is_descendent_of(&self.blockchain, None);
2154		let last_finalized = self.blockchain.last_finalized()?;
2155
2156		// We can do a quick check first, before doing a proper but more expensive check
2157		if number > self.blockchain.info().finalized_number ||
2158			(hash != last_finalized && !is_descendent_of(&hash, &last_finalized)?)
2159		{
2160			return Err(ClientError::NotInFinalizedChain);
2161		}
2162
2163		let justifications = if let Some(mut stored_justifications) =
2164			self.blockchain.justifications(hash)?
2165		{
2166			if !stored_justifications.append(justification) {
2167				return Err(ClientError::BadJustification("Duplicate consensus engine ID".into()));
2168			}
2169			stored_justifications
2170		} else {
2171			Justifications::from(justification)
2172		};
2173
2174		transaction.set_from_vec(
2175			columns::JUSTIFICATIONS,
2176			&utils::number_and_hash_to_lookup_key(number, hash)?,
2177			justifications.encode(),
2178		);
2179
2180		self.storage.db.commit(transaction)?;
2181
2182		Ok(())
2183	}
2184
2185	fn offchain_storage(&self) -> Option<Self::OffchainStorage> {
2186		Some(self.offchain_storage.clone())
2187	}
2188
2189	fn usage_info(&self) -> Option<UsageInfo> {
2190		let (io_stats, state_stats) = self.io_stats.take_or_else(|| {
2191			(
2192				// TODO: implement DB stats and cache size retrieval
2193				kvdb::IoStats::empty(),
2194				self.state_usage.take(),
2195			)
2196		});
2197		let database_cache = MemorySize::from_bytes(0);
2198		let state_cache = MemorySize::from_bytes(
2199			self.shared_trie_cache.as_ref().map_or(0, |c| c.used_memory_size()),
2200		);
2201
2202		Some(UsageInfo {
2203			memory: MemoryInfo { state_cache, database_cache },
2204			io: IoInfo {
2205				transactions: io_stats.transactions,
2206				bytes_read: io_stats.bytes_read,
2207				bytes_written: io_stats.bytes_written,
2208				writes: io_stats.writes,
2209				reads: io_stats.reads,
2210				average_transaction_size: io_stats.avg_transaction_size() as u64,
2211				state_reads: state_stats.reads.ops,
2212				state_writes: state_stats.writes.ops,
2213				state_writes_cache: state_stats.overlay_writes.ops,
2214				state_reads_cache: state_stats.cache_reads.ops,
2215				state_writes_nodes: state_stats.nodes_writes.ops,
2216			},
2217		})
2218	}
2219
2220	fn revert(
2221		&self,
2222		n: NumberFor<Block>,
2223		revert_finalized: bool,
2224	) -> ClientResult<(NumberFor<Block>, HashSet<Block::Hash>)> {
2225		let mut reverted_finalized = HashSet::new();
2226
2227		let info = self.blockchain.info();
2228
2229		let highest_leaf = self
2230			.blockchain
2231			.leaves
2232			.read()
2233			.highest_leaf()
2234			.and_then(|(n, h)| h.last().map(|h| (n, *h)));
2235
2236		let best_number = info.best_number;
2237		let best_hash = info.best_hash;
2238
2239		let finalized = info.finalized_number;
2240
2241		let revertible = best_number - finalized;
2242		let n = if !revert_finalized && revertible < n { revertible } else { n };
2243
2244		let (n, mut number_to_revert, mut hash_to_revert) = match highest_leaf {
2245			Some((l_n, l_h)) => (n + (l_n - best_number), l_n, l_h),
2246			None => (n, best_number, best_hash),
2247		};
2248
2249		let mut revert_blocks = || -> ClientResult<NumberFor<Block>> {
2250			for c in 0..n.saturated_into::<u64>() {
2251				if number_to_revert.is_zero() {
2252					return Ok(c.saturated_into::<NumberFor<Block>>());
2253				}
2254				let mut transaction = Transaction::new();
2255				let removed = self.blockchain.header(hash_to_revert)?.ok_or_else(|| {
2256					sp_blockchain::Error::UnknownBlock(format!(
2257						"Error reverting to {hash_to_revert}. Block header not found.",
2258					))
2259				})?;
2260				let removed_hash = removed.hash();
2261
2262				let prev_number = number_to_revert.saturating_sub(One::one());
2263				let prev_hash =
2264					if prev_number == best_number { best_hash } else { *removed.parent_hash() };
2265
2266				if !self.have_state_at(prev_hash, prev_number) {
2267					return Ok(c.saturated_into::<NumberFor<Block>>());
2268				}
2269
2270				match self.storage.state_db.revert_one() {
2271					Some(commit) => {
2272						apply_state_commit(&mut transaction, commit);
2273
2274						number_to_revert = prev_number;
2275						hash_to_revert = prev_hash;
2276
2277						let update_finalized = number_to_revert < finalized;
2278
2279						let key = utils::number_and_hash_to_lookup_key(
2280							number_to_revert,
2281							&hash_to_revert,
2282						)?;
2283						if update_finalized {
2284							transaction.set_from_vec(
2285								columns::META,
2286								meta_keys::FINALIZED_BLOCK,
2287								key.clone(),
2288							);
2289
2290							reverted_finalized.insert(removed_hash);
2291							if let Some((hash, _)) = self.blockchain.info().finalized_state {
2292								if hash == hash_to_revert {
2293									if !number_to_revert.is_zero() &&
2294										self.have_state_at(
2295											prev_hash,
2296											number_to_revert - One::one(),
2297										) {
2298										let lookup_key = utils::number_and_hash_to_lookup_key(
2299											number_to_revert - One::one(),
2300											prev_hash,
2301										)?;
2302										transaction.set_from_vec(
2303											columns::META,
2304											meta_keys::FINALIZED_STATE,
2305											lookup_key,
2306										);
2307									} else {
2308										transaction
2309											.remove(columns::META, meta_keys::FINALIZED_STATE);
2310									}
2311								}
2312							}
2313						}
2314						transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, key);
2315						transaction.remove(columns::KEY_LOOKUP, removed.hash().as_ref());
2316						children::remove_children(
2317							&mut transaction,
2318							columns::META,
2319							meta_keys::CHILDREN_PREFIX,
2320							hash_to_revert,
2321						);
2322						self.storage.db.commit(transaction)?;
2323
2324						let is_best = number_to_revert < best_number;
2325
2326						self.blockchain.update_meta(MetaUpdate {
2327							hash: hash_to_revert,
2328							number: number_to_revert,
2329							is_best,
2330							is_finalized: update_finalized,
2331							with_state: false,
2332						});
2333					},
2334					None => return Ok(c.saturated_into::<NumberFor<Block>>()),
2335				}
2336			}
2337
2338			Ok(n)
2339		};
2340
2341		let reverted = revert_blocks()?;
2342
2343		let revert_leaves = || -> ClientResult<()> {
2344			let mut transaction = Transaction::new();
2345			let mut leaves = self.blockchain.leaves.write();
2346
2347			leaves.revert(hash_to_revert, number_to_revert);
2348			leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX);
2349			self.storage.db.commit(transaction)?;
2350
2351			Ok(())
2352		};
2353
2354		revert_leaves()?;
2355
2356		Ok((reverted, reverted_finalized))
2357	}
2358
2359	fn remove_leaf_block(&self, hash: Block::Hash) -> ClientResult<()> {
2360		let best_hash = self.blockchain.info().best_hash;
2361
2362		if best_hash == hash {
2363			return Err(sp_blockchain::Error::Backend(format!("Can't remove best block {hash:?}")));
2364		}
2365
2366		let hdr = self.blockchain.header_metadata(hash)?;
2367		if !self.have_state_at(hash, hdr.number) {
2368			return Err(sp_blockchain::Error::UnknownBlock(format!(
2369				"State already discarded for {hash:?}",
2370			)));
2371		}
2372
2373		let mut leaves = self.blockchain.leaves.write();
2374		if !leaves.contains(hdr.number, hash) {
2375			return Err(sp_blockchain::Error::Backend(format!(
2376				"Can't remove non-leaf block {hash:?}",
2377			)));
2378		}
2379
2380		let mut transaction = Transaction::new();
2381		if let Some(commit) = self.storage.state_db.remove(&hash) {
2382			apply_state_commit(&mut transaction, commit);
2383		}
2384		transaction.remove(columns::KEY_LOOKUP, hash.as_ref());
2385
2386		let children: Vec<_> = self
2387			.blockchain()
2388			.children(hdr.parent)?
2389			.into_iter()
2390			.filter(|child_hash| *child_hash != hash)
2391			.collect();
2392		let parent_leaf = if children.is_empty() {
2393			children::remove_children(
2394				&mut transaction,
2395				columns::META,
2396				meta_keys::CHILDREN_PREFIX,
2397				hdr.parent,
2398			);
2399			Some(hdr.parent)
2400		} else {
2401			children::write_children(
2402				&mut transaction,
2403				columns::META,
2404				meta_keys::CHILDREN_PREFIX,
2405				hdr.parent,
2406				children,
2407			);
2408			None
2409		};
2410
2411		let remove_outcome = leaves.remove(hash, hdr.number, parent_leaf);
2412		leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX);
2413		if let Err(e) = self.storage.db.commit(transaction) {
2414			if let Some(outcome) = remove_outcome {
2415				leaves.undo().undo_remove(outcome);
2416			}
2417			return Err(e.into());
2418		}
2419		self.blockchain().remove_header_metadata(hash);
2420		Ok(())
2421	}
2422
2423	fn blockchain(&self) -> &BlockchainDb<Block> {
2424		&self.blockchain
2425	}
2426
2427	fn state_at(&self, hash: Block::Hash) -> ClientResult<Self::State> {
2428		if hash == self.blockchain.meta.read().genesis_hash {
2429			if let Some(genesis_state) = &*self.genesis_state.read() {
2430				let root = genesis_state.root;
2431				let db_state =
2432					DbStateBuilder::<HashingFor<Block>>::new(genesis_state.clone(), root)
2433						.with_optional_cache(
2434							self.shared_trie_cache.as_ref().map(|c| c.local_cache()),
2435						)
2436						.build();
2437
2438				let state = RefTrackingState::new(db_state, self.storage.clone(), None);
2439				return Ok(RecordStatsState::new(state, None, self.state_usage.clone()));
2440			}
2441		}
2442
2443		match self.blockchain.header_metadata(hash) {
2444			Ok(ref hdr) => {
2445				let hint = || {
2446					sc_state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref())
2447						.unwrap_or(None)
2448						.is_some()
2449				};
2450
2451				if let Ok(()) =
2452					self.storage.state_db.pin(&hash, hdr.number.saturated_into::<u64>(), hint)
2453				{
2454					let root = hdr.state_root;
2455					let db_state =
2456						DbStateBuilder::<HashingFor<Block>>::new(self.storage.clone(), root)
2457							.with_optional_cache(
2458								self.shared_trie_cache.as_ref().map(|c| c.local_cache()),
2459							)
2460							.build();
2461					let state = RefTrackingState::new(db_state, self.storage.clone(), Some(hash));
2462					Ok(RecordStatsState::new(state, Some(hash), self.state_usage.clone()))
2463				} else {
2464					Err(sp_blockchain::Error::UnknownBlock(format!(
2465						"State already discarded for {hash:?}",
2466					)))
2467				}
2468			},
2469			Err(e) => Err(e),
2470		}
2471	}
2472
2473	fn have_state_at(&self, hash: Block::Hash, number: NumberFor<Block>) -> bool {
2474		if self.is_archive {
2475			match self.blockchain.header_metadata(hash) {
2476				Ok(header) => sp_state_machine::Storage::get(
2477					self.storage.as_ref(),
2478					&header.state_root,
2479					(&[], None),
2480				)
2481				.unwrap_or(None)
2482				.is_some(),
2483				_ => false,
2484			}
2485		} else {
2486			match self.storage.state_db.is_pruned(&hash, number.saturated_into::<u64>()) {
2487				IsPruned::Pruned => false,
2488				IsPruned::NotPruned => true,
2489				IsPruned::MaybePruned => match self.blockchain.header_metadata(hash) {
2490					Ok(header) => sp_state_machine::Storage::get(
2491						self.storage.as_ref(),
2492						&header.state_root,
2493						(&[], None),
2494					)
2495					.unwrap_or(None)
2496					.is_some(),
2497					_ => false,
2498				},
2499			}
2500		}
2501	}
2502
2503	fn get_import_lock(&self) -> &RwLock<()> {
2504		&self.import_lock
2505	}
2506
2507	fn requires_full_sync(&self) -> bool {
2508		matches!(
2509			self.storage.state_db.pruning_mode(),
2510			PruningMode::ArchiveAll | PruningMode::ArchiveCanonical
2511		)
2512	}
2513
2514	fn pin_block(&self, hash: <Block as BlockT>::Hash) -> sp_blockchain::Result<()> {
2515		let hint = || {
2516			let header_metadata = self.blockchain.header_metadata(hash);
2517			header_metadata
2518				.map(|hdr| {
2519					sc_state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref())
2520						.unwrap_or(None)
2521						.is_some()
2522				})
2523				.unwrap_or(false)
2524		};
2525
2526		if let Some(number) = self.blockchain.number(hash)? {
2527			self.storage.state_db.pin(&hash, number.saturated_into::<u64>(), hint).map_err(
2528				|_| {
2529					sp_blockchain::Error::UnknownBlock(format!(
2530						"Unable to pin: state already discarded for `{hash:?}`",
2531					))
2532				},
2533			)?;
2534		} else {
2535			return Err(ClientError::UnknownBlock(format!(
2536				"Can not pin block with hash `{hash:?}`. Block not found.",
2537			)));
2538		}
2539
2540		if self.blocks_pruning != BlocksPruning::KeepAll {
2541			// Only increase reference count for this hash. Value is loaded once we prune.
2542			self.blockchain.bump_ref(hash);
2543		}
2544		Ok(())
2545	}
2546
2547	fn unpin_block(&self, hash: <Block as BlockT>::Hash) {
2548		self.storage.state_db.unpin(&hash);
2549
2550		if self.blocks_pruning != BlocksPruning::KeepAll {
2551			self.blockchain.unpin(hash);
2552		}
2553	}
2554}
2555
2556impl<Block: BlockT> sc_client_api::backend::LocalBackend<Block> for Backend<Block> {}
2557
2558#[cfg(test)]
2559pub(crate) mod tests {
2560	use super::*;
2561	use crate::columns;
2562	use hash_db::{HashDB, EMPTY_PREFIX};
2563	use sc_client_api::{
2564		backend::{Backend as BTrait, BlockImportOperation as Op},
2565		blockchain::Backend as BLBTrait,
2566	};
2567	use sp_blockchain::{lowest_common_ancestor, tree_route};
2568	use sp_core::H256;
2569	use sp_runtime::{
2570		testing::{Block as RawBlock, ExtrinsicWrapper, Header},
2571		traits::{BlakeTwo256, Hash},
2572		ConsensusEngineId, StateVersion,
2573	};
2574
2575	const CONS0_ENGINE_ID: ConsensusEngineId = *b"CON0";
2576	const CONS1_ENGINE_ID: ConsensusEngineId = *b"CON1";
2577
2578	pub(crate) type Block = RawBlock<ExtrinsicWrapper<u64>>;
2579
2580	pub fn insert_header(
2581		backend: &Backend<Block>,
2582		number: u64,
2583		parent_hash: H256,
2584		changes: Option<Vec<(Vec<u8>, Vec<u8>)>>,
2585		extrinsics_root: H256,
2586	) -> H256 {
2587		insert_block(backend, number, parent_hash, changes, extrinsics_root, Vec::new(), None)
2588			.unwrap()
2589	}
2590
2591	pub fn insert_block(
2592		backend: &Backend<Block>,
2593		number: u64,
2594		parent_hash: H256,
2595		_changes: Option<Vec<(Vec<u8>, Vec<u8>)>>,
2596		extrinsics_root: H256,
2597		body: Vec<ExtrinsicWrapper<u64>>,
2598		transaction_index: Option<Vec<IndexOperation>>,
2599	) -> Result<H256, sp_blockchain::Error> {
2600		use sp_runtime::testing::Digest;
2601
2602		let digest = Digest::default();
2603		let mut header =
2604			Header { number, parent_hash, state_root: Default::default(), digest, extrinsics_root };
2605
2606		let block_hash = if number == 0 { Default::default() } else { parent_hash };
2607		let mut op = backend.begin_operation().unwrap();
2608		backend.begin_state_operation(&mut op, block_hash).unwrap();
2609		if let Some(index) = transaction_index {
2610			op.update_transaction_index(index).unwrap();
2611		}
2612
2613		// Insert some fake data to ensure that the block can be found in the state column.
2614		let (root, overlay) = op.old_state.storage_root(
2615			vec![(block_hash.as_ref(), Some(block_hash.as_ref()))].into_iter(),
2616			StateVersion::V1,
2617		);
2618		op.update_db_storage(overlay).unwrap();
2619		header.state_root = root.into();
2620
2621		op.set_block_data(header.clone(), Some(body), None, None, NewBlockState::Best)
2622			.unwrap();
2623
2624		backend.commit_operation(op)?;
2625
2626		Ok(header.hash())
2627	}
2628
2629	pub fn insert_disconnected_header(
2630		backend: &Backend<Block>,
2631		number: u64,
2632		parent_hash: H256,
2633		extrinsics_root: H256,
2634		best: bool,
2635	) -> H256 {
2636		use sp_runtime::testing::Digest;
2637
2638		let digest = Digest::default();
2639		let header =
2640			Header { number, parent_hash, state_root: Default::default(), digest, extrinsics_root };
2641
2642		let mut op = backend.begin_operation().unwrap();
2643
2644		op.set_block_data(
2645			header.clone(),
2646			Some(vec![]),
2647			None,
2648			None,
2649			if best { NewBlockState::Best } else { NewBlockState::Normal },
2650		)
2651		.unwrap();
2652
2653		backend.commit_operation(op).unwrap();
2654
2655		header.hash()
2656	}
2657
2658	pub fn insert_header_no_head(
2659		backend: &Backend<Block>,
2660		number: u64,
2661		parent_hash: H256,
2662		extrinsics_root: H256,
2663	) -> H256 {
2664		use sp_runtime::testing::Digest;
2665
2666		let digest = Digest::default();
2667		let mut header =
2668			Header { number, parent_hash, state_root: Default::default(), digest, extrinsics_root };
2669		let mut op = backend.begin_operation().unwrap();
2670
2671		let root = backend
2672			.state_at(parent_hash)
2673			.unwrap_or_else(|_| {
2674				if parent_hash == Default::default() {
2675					backend.empty_state()
2676				} else {
2677					panic!("Unknown block: {parent_hash:?}")
2678				}
2679			})
2680			.storage_root(
2681				vec![(parent_hash.as_ref(), Some(parent_hash.as_ref()))].into_iter(),
2682				StateVersion::V1,
2683			)
2684			.0;
2685		header.state_root = root.into();
2686
2687		op.set_block_data(header.clone(), None, None, None, NewBlockState::Normal)
2688			.unwrap();
2689		backend.commit_operation(op).unwrap();
2690
2691		header.hash()
2692	}
2693
2694	#[test]
2695	fn block_hash_inserted_correctly() {
2696		let backing = {
2697			let db = Backend::<Block>::new_test(1, 0);
2698			for i in 0..10 {
2699				assert!(db.blockchain().hash(i).unwrap().is_none());
2700
2701				{
2702					let hash = if i == 0 {
2703						Default::default()
2704					} else {
2705						db.blockchain.hash(i - 1).unwrap().unwrap()
2706					};
2707
2708					let mut op = db.begin_operation().unwrap();
2709					db.begin_state_operation(&mut op, hash).unwrap();
2710					let header = Header {
2711						number: i,
2712						parent_hash: hash,
2713						state_root: Default::default(),
2714						digest: Default::default(),
2715						extrinsics_root: Default::default(),
2716					};
2717
2718					op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best)
2719						.unwrap();
2720					db.commit_operation(op).unwrap();
2721				}
2722
2723				assert!(db.blockchain().hash(i).unwrap().is_some())
2724			}
2725			db.storage.db.clone()
2726		};
2727
2728		let backend = Backend::<Block>::new(
2729			DatabaseSettings {
2730				trie_cache_maximum_size: Some(16 * 1024 * 1024),
2731				state_pruning: Some(PruningMode::blocks_pruning(1)),
2732				source: DatabaseSource::Custom { db: backing, require_create_flag: false },
2733				blocks_pruning: BlocksPruning::KeepFinalized,
2734			},
2735			0,
2736		)
2737		.unwrap();
2738		assert_eq!(backend.blockchain().info().best_number, 9);
2739		for i in 0..10 {
2740			assert!(backend.blockchain().hash(i).unwrap().is_some())
2741		}
2742	}
2743
2744	#[test]
2745	fn set_state_data() {
2746		set_state_data_inner(StateVersion::V0);
2747		set_state_data_inner(StateVersion::V1);
2748	}
2749	fn set_state_data_inner(state_version: StateVersion) {
2750		let db = Backend::<Block>::new_test(2, 0);
2751		let hash = {
2752			let mut op = db.begin_operation().unwrap();
2753			let mut header = Header {
2754				number: 0,
2755				parent_hash: Default::default(),
2756				state_root: Default::default(),
2757				digest: Default::default(),
2758				extrinsics_root: Default::default(),
2759			};
2760
2761			let storage = vec![(vec![1, 3, 5], vec![2, 4, 6]), (vec![1, 2, 3], vec![9, 9, 9])];
2762
2763			header.state_root = op
2764				.old_state
2765				.storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..]))), state_version)
2766				.0
2767				.into();
2768			let hash = header.hash();
2769
2770			op.reset_storage(
2771				Storage {
2772					top: storage.into_iter().collect(),
2773					children_default: Default::default(),
2774				},
2775				state_version,
2776			)
2777			.unwrap();
2778			op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best)
2779				.unwrap();
2780
2781			db.commit_operation(op).unwrap();
2782
2783			let state = db.state_at(hash).unwrap();
2784
2785			assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6]));
2786			assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9]));
2787			assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None);
2788
2789			hash
2790		};
2791
2792		{
2793			let mut op = db.begin_operation().unwrap();
2794			db.begin_state_operation(&mut op, hash).unwrap();
2795			let mut header = Header {
2796				number: 1,
2797				parent_hash: hash,
2798				state_root: Default::default(),
2799				digest: Default::default(),
2800				extrinsics_root: Default::default(),
2801			};
2802
2803			let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))];
2804
2805			let (root, overlay) = op.old_state.storage_root(
2806				storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
2807				state_version,
2808			);
2809			op.update_db_storage(overlay).unwrap();
2810			header.state_root = root.into();
2811
2812			op.update_storage(storage, Vec::new()).unwrap();
2813			op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best)
2814				.unwrap();
2815
2816			db.commit_operation(op).unwrap();
2817
2818			let state = db.state_at(header.hash()).unwrap();
2819
2820			assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None);
2821			assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9]));
2822			assert_eq!(state.storage(&[5, 5, 5]).unwrap(), Some(vec![4, 5, 6]));
2823		}
2824	}
2825
2826	#[test]
2827	fn delete_only_when_negative_rc() {
2828		sp_tracing::try_init_simple();
2829		let state_version = StateVersion::default();
2830		let key;
2831		let backend = Backend::<Block>::new_test(1, 0);
2832
2833		let hash = {
2834			let mut op = backend.begin_operation().unwrap();
2835			backend.begin_state_operation(&mut op, Default::default()).unwrap();
2836			let mut header = Header {
2837				number: 0,
2838				parent_hash: Default::default(),
2839				state_root: Default::default(),
2840				digest: Default::default(),
2841				extrinsics_root: Default::default(),
2842			};
2843
2844			header.state_root =
2845				op.old_state.storage_root(std::iter::empty(), state_version).0.into();
2846			let hash = header.hash();
2847
2848			op.reset_storage(
2849				Storage { top: Default::default(), children_default: Default::default() },
2850				state_version,
2851			)
2852			.unwrap();
2853
2854			key = op.db_updates.insert(EMPTY_PREFIX, b"hello");
2855			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best)
2856				.unwrap();
2857
2858			backend.commit_operation(op).unwrap();
2859			assert_eq!(
2860				backend
2861					.storage
2862					.db
2863					.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
2864					.unwrap(),
2865				&b"hello"[..]
2866			);
2867			hash
2868		};
2869
2870		let hashof1 = {
2871			let mut op = backend.begin_operation().unwrap();
2872			backend.begin_state_operation(&mut op, hash).unwrap();
2873			let mut header = Header {
2874				number: 1,
2875				parent_hash: hash,
2876				state_root: Default::default(),
2877				digest: Default::default(),
2878				extrinsics_root: Default::default(),
2879			};
2880
2881			let storage: Vec<(_, _)> = vec![];
2882
2883			header.state_root = op
2884				.old_state
2885				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
2886				.0
2887				.into();
2888			let hash = header.hash();
2889
2890			op.db_updates.insert(EMPTY_PREFIX, b"hello");
2891			op.db_updates.remove(&key, EMPTY_PREFIX);
2892			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best)
2893				.unwrap();
2894
2895			backend.commit_operation(op).unwrap();
2896			assert_eq!(
2897				backend
2898					.storage
2899					.db
2900					.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
2901					.unwrap(),
2902				&b"hello"[..]
2903			);
2904			hash
2905		};
2906
2907		let hashof2 = {
2908			let mut op = backend.begin_operation().unwrap();
2909			backend.begin_state_operation(&mut op, hashof1).unwrap();
2910			let mut header = Header {
2911				number: 2,
2912				parent_hash: hashof1,
2913				state_root: Default::default(),
2914				digest: Default::default(),
2915				extrinsics_root: Default::default(),
2916			};
2917
2918			let storage: Vec<(_, _)> = vec![];
2919
2920			header.state_root = op
2921				.old_state
2922				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
2923				.0
2924				.into();
2925			let hash = header.hash();
2926
2927			op.db_updates.remove(&key, EMPTY_PREFIX);
2928			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best)
2929				.unwrap();
2930
2931			backend.commit_operation(op).unwrap();
2932
2933			assert!(backend
2934				.storage
2935				.db
2936				.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
2937				.is_some());
2938			hash
2939		};
2940
2941		let hashof3 = {
2942			let mut op = backend.begin_operation().unwrap();
2943			backend.begin_state_operation(&mut op, hashof2).unwrap();
2944			let mut header = Header {
2945				number: 3,
2946				parent_hash: hashof2,
2947				state_root: Default::default(),
2948				digest: Default::default(),
2949				extrinsics_root: Default::default(),
2950			};
2951
2952			let storage: Vec<(_, _)> = vec![];
2953
2954			header.state_root = op
2955				.old_state
2956				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
2957				.0
2958				.into();
2959			let hash = header.hash();
2960
2961			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best)
2962				.unwrap();
2963
2964			backend.commit_operation(op).unwrap();
2965			hash
2966		};
2967
2968		let hashof4 = {
2969			let mut op = backend.begin_operation().unwrap();
2970			backend.begin_state_operation(&mut op, hashof3).unwrap();
2971			let mut header = Header {
2972				number: 4,
2973				parent_hash: hashof3,
2974				state_root: Default::default(),
2975				digest: Default::default(),
2976				extrinsics_root: Default::default(),
2977			};
2978
2979			let storage: Vec<(_, _)> = vec![];
2980
2981			header.state_root = op
2982				.old_state
2983				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
2984				.0
2985				.into();
2986			let hash = header.hash();
2987
2988			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best)
2989				.unwrap();
2990
2991			backend.commit_operation(op).unwrap();
2992			assert!(backend
2993				.storage
2994				.db
2995				.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
2996				.is_none());
2997			hash
2998		};
2999
3000		backend.finalize_block(hashof1, None).unwrap();
3001		backend.finalize_block(hashof2, None).unwrap();
3002		backend.finalize_block(hashof3, None).unwrap();
3003		backend.finalize_block(hashof4, None).unwrap();
3004		assert!(backend
3005			.storage
3006			.db
3007			.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
3008			.is_none());
3009	}
3010
3011	#[test]
3012	fn tree_route_works() {
3013		let backend = Backend::<Block>::new_test(1000, 100);
3014		let blockchain = backend.blockchain();
3015		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3016
3017		// fork from genesis: 3 prong.
3018		let a1 = insert_header(&backend, 1, block0, None, Default::default());
3019		let a2 = insert_header(&backend, 2, a1, None, Default::default());
3020		let a3 = insert_header(&backend, 3, a2, None, Default::default());
3021
3022		// fork from genesis: 2 prong.
3023		let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32]));
3024		let b2 = insert_header(&backend, 2, b1, None, Default::default());
3025
3026		{
3027			let tree_route = tree_route(blockchain, a1, a1).unwrap();
3028
3029			assert_eq!(tree_route.common_block().hash, a1);
3030			assert!(tree_route.retracted().is_empty());
3031			assert!(tree_route.enacted().is_empty());
3032		}
3033
3034		{
3035			let tree_route = tree_route(blockchain, a3, b2).unwrap();
3036
3037			assert_eq!(tree_route.common_block().hash, block0);
3038			assert_eq!(
3039				tree_route.retracted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3040				vec![a3, a2, a1]
3041			);
3042			assert_eq!(
3043				tree_route.enacted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3044				vec![b1, b2]
3045			);
3046		}
3047
3048		{
3049			let tree_route = tree_route(blockchain, a1, a3).unwrap();
3050
3051			assert_eq!(tree_route.common_block().hash, a1);
3052			assert!(tree_route.retracted().is_empty());
3053			assert_eq!(
3054				tree_route.enacted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3055				vec![a2, a3]
3056			);
3057		}
3058
3059		{
3060			let tree_route = tree_route(blockchain, a3, a1).unwrap();
3061
3062			assert_eq!(tree_route.common_block().hash, a1);
3063			assert_eq!(
3064				tree_route.retracted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3065				vec![a3, a2]
3066			);
3067			assert!(tree_route.enacted().is_empty());
3068		}
3069
3070		{
3071			let tree_route = tree_route(blockchain, a2, a2).unwrap();
3072
3073			assert_eq!(tree_route.common_block().hash, a2);
3074			assert!(tree_route.retracted().is_empty());
3075			assert!(tree_route.enacted().is_empty());
3076		}
3077	}
3078
3079	#[test]
3080	fn tree_route_child() {
3081		let backend = Backend::<Block>::new_test(1000, 100);
3082		let blockchain = backend.blockchain();
3083
3084		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3085		let block1 = insert_header(&backend, 1, block0, None, Default::default());
3086
3087		{
3088			let tree_route = tree_route(blockchain, block0, block1).unwrap();
3089
3090			assert_eq!(tree_route.common_block().hash, block0);
3091			assert!(tree_route.retracted().is_empty());
3092			assert_eq!(
3093				tree_route.enacted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3094				vec![block1]
3095			);
3096		}
3097	}
3098
3099	#[test]
3100	fn lowest_common_ancestor_works() {
3101		let backend = Backend::<Block>::new_test(1000, 100);
3102		let blockchain = backend.blockchain();
3103		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3104
3105		// fork from genesis: 3 prong.
3106		let a1 = insert_header(&backend, 1, block0, None, Default::default());
3107		let a2 = insert_header(&backend, 2, a1, None, Default::default());
3108		let a3 = insert_header(&backend, 3, a2, None, Default::default());
3109
3110		// fork from genesis: 2 prong.
3111		let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32]));
3112		let b2 = insert_header(&backend, 2, b1, None, Default::default());
3113
3114		{
3115			let lca = lowest_common_ancestor(blockchain, a3, b2).unwrap();
3116
3117			assert_eq!(lca.hash, block0);
3118			assert_eq!(lca.number, 0);
3119		}
3120
3121		{
3122			let lca = lowest_common_ancestor(blockchain, a1, a3).unwrap();
3123
3124			assert_eq!(lca.hash, a1);
3125			assert_eq!(lca.number, 1);
3126		}
3127
3128		{
3129			let lca = lowest_common_ancestor(blockchain, a3, a1).unwrap();
3130
3131			assert_eq!(lca.hash, a1);
3132			assert_eq!(lca.number, 1);
3133		}
3134
3135		{
3136			let lca = lowest_common_ancestor(blockchain, a2, a3).unwrap();
3137
3138			assert_eq!(lca.hash, a2);
3139			assert_eq!(lca.number, 2);
3140		}
3141
3142		{
3143			let lca = lowest_common_ancestor(blockchain, a2, a1).unwrap();
3144
3145			assert_eq!(lca.hash, a1);
3146			assert_eq!(lca.number, 1);
3147		}
3148
3149		{
3150			let lca = lowest_common_ancestor(blockchain, a2, a2).unwrap();
3151
3152			assert_eq!(lca.hash, a2);
3153			assert_eq!(lca.number, 2);
3154		}
3155	}
3156
3157	#[test]
3158	fn displaced_leaves_after_finalizing_works_with_disconnect() {
3159		// In this test we will create a situation that can typically happen after warp sync.
3160		// The situation looks like this:
3161		// g -> <unimported> -> a3 -> a4
3162		// Basically there is a gap of unimported blocks at some point in the chain.
3163		let backend = Backend::<Block>::new_test(1000, 100);
3164		let blockchain = backend.blockchain();
3165		let genesis_number = 0;
3166		let genesis_hash =
3167			insert_header(&backend, genesis_number, Default::default(), None, Default::default());
3168
3169		let a3_number = 3;
3170		let a3_hash = insert_disconnected_header(
3171			&backend,
3172			a3_number,
3173			H256::from([200; 32]),
3174			H256::from([1; 32]),
3175			true,
3176		);
3177
3178		let a4_number = 4;
3179		let a4_hash =
3180			insert_disconnected_header(&backend, a4_number, a3_hash, H256::from([2; 32]), true);
3181		{
3182			let displaced =
3183				blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap();
3184			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, genesis_hash]);
3185			assert_eq!(displaced.displaced_leaves, vec![(genesis_number, genesis_hash)]);
3186			assert_eq!(displaced.displaced_blocks, vec![]);
3187		}
3188
3189		{
3190			let displaced =
3191				blockchain.displaced_leaves_after_finalizing(a4_hash, a4_number).unwrap();
3192			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, genesis_hash]);
3193			assert_eq!(displaced.displaced_leaves, vec![(genesis_number, genesis_hash)]);
3194			assert_eq!(displaced.displaced_blocks, vec![]);
3195		}
3196
3197		// Import block a1 which has the genesis block as parent.
3198		// g -> a1 -> <unimported> -> a3(f) -> a4
3199		let a1_number = 1;
3200		let a1_hash = insert_disconnected_header(
3201			&backend,
3202			a1_number,
3203			genesis_hash,
3204			H256::from([123; 32]),
3205			false,
3206		);
3207		{
3208			let displaced =
3209				blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap();
3210			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, a1_hash]);
3211			assert_eq!(displaced.displaced_leaves, vec![]);
3212			assert_eq!(displaced.displaced_blocks, vec![]);
3213		}
3214
3215		// Import block b1 which has the genesis block as parent.
3216		// g -> a1 -> <unimported> -> a3(f) -> a4
3217		//  \-> b1
3218		let b1_number = 1;
3219		let b1_hash = insert_disconnected_header(
3220			&backend,
3221			b1_number,
3222			genesis_hash,
3223			H256::from([124; 32]),
3224			false,
3225		);
3226		{
3227			let displaced =
3228				blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap();
3229			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, a1_hash, b1_hash]);
3230			assert_eq!(displaced.displaced_leaves, vec![]);
3231			assert_eq!(displaced.displaced_blocks, vec![]);
3232		}
3233
3234		// If branch of b blocks is higher in number than a branch, we
3235		// should still not prune disconnected leafs.
3236		// g -> a1 -> <unimported> -> a3(f) -> a4
3237		//  \-> b1 -> b2 ----------> b3 ----> b4 -> b5
3238		let b2_number = 2;
3239		let b2_hash =
3240			insert_disconnected_header(&backend, b2_number, b1_hash, H256::from([40; 32]), false);
3241		let b3_number = 3;
3242		let b3_hash =
3243			insert_disconnected_header(&backend, b3_number, b2_hash, H256::from([41; 32]), false);
3244		let b4_number = 4;
3245		let b4_hash =
3246			insert_disconnected_header(&backend, b4_number, b3_hash, H256::from([42; 32]), false);
3247		let b5_number = 5;
3248		let b5_hash =
3249			insert_disconnected_header(&backend, b5_number, b4_hash, H256::from([43; 32]), false);
3250		{
3251			let displaced =
3252				blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap();
3253			assert_eq!(blockchain.leaves().unwrap(), vec![b5_hash, a4_hash, a1_hash]);
3254			assert_eq!(displaced.displaced_leaves, vec![]);
3255			assert_eq!(displaced.displaced_blocks, vec![]);
3256		}
3257
3258		// Even though there is a disconnect, diplace should still detect
3259		// branches above the block gap.
3260		//                              /-> c4
3261		// g -> a1 -> <unimported> -> a3 -> a4(f)
3262		//  \-> b1 -> b2 ----------> b3 -> b4 -> b5
3263		let c4_number = 4;
3264		let c4_hash =
3265			insert_disconnected_header(&backend, c4_number, a3_hash, H256::from([44; 32]), false);
3266		{
3267			let displaced =
3268				blockchain.displaced_leaves_after_finalizing(a4_hash, a4_number).unwrap();
3269			assert_eq!(blockchain.leaves().unwrap(), vec![b5_hash, a4_hash, c4_hash, a1_hash]);
3270			assert_eq!(displaced.displaced_leaves, vec![(c4_number, c4_hash)]);
3271			assert_eq!(displaced.displaced_blocks, vec![c4_hash]);
3272		}
3273	}
3274	#[test]
3275	fn displaced_leaves_after_finalizing_works() {
3276		let backend = Backend::<Block>::new_test(1000, 100);
3277		let blockchain = backend.blockchain();
3278		let genesis_number = 0;
3279		let genesis_hash =
3280			insert_header(&backend, genesis_number, Default::default(), None, Default::default());
3281
3282		// fork from genesis: 3 prong.
3283		// block 0 -> a1 -> a2 -> a3
3284		//        \
3285		//         -> b1 -> b2 -> c1 -> c2
3286		//              \
3287		//               -> d1 -> d2
3288		let a1_number = 1;
3289		let a1_hash = insert_header(&backend, a1_number, genesis_hash, None, Default::default());
3290		let a2_number = 2;
3291		let a2_hash = insert_header(&backend, a2_number, a1_hash, None, Default::default());
3292		let a3_number = 3;
3293		let a3_hash = insert_header(&backend, a3_number, a2_hash, None, Default::default());
3294
3295		{
3296			let displaced = blockchain
3297				.displaced_leaves_after_finalizing(genesis_hash, genesis_number)
3298				.unwrap();
3299			assert_eq!(displaced.displaced_leaves, vec![]);
3300			assert_eq!(displaced.displaced_blocks, vec![]);
3301		}
3302		{
3303			let displaced_a1 =
3304				blockchain.displaced_leaves_after_finalizing(a1_hash, a1_number).unwrap();
3305			assert_eq!(displaced_a1.displaced_leaves, vec![]);
3306			assert_eq!(displaced_a1.displaced_blocks, vec![]);
3307
3308			let displaced_a2 =
3309				blockchain.displaced_leaves_after_finalizing(a2_hash, a3_number).unwrap();
3310			assert_eq!(displaced_a2.displaced_leaves, vec![]);
3311			assert_eq!(displaced_a2.displaced_blocks, vec![]);
3312
3313			let displaced_a3 =
3314				blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap();
3315			assert_eq!(displaced_a3.displaced_leaves, vec![]);
3316			assert_eq!(displaced_a3.displaced_blocks, vec![]);
3317		}
3318		{
3319			// Finalized block is above leaves and not imported yet.
3320			// We will not be able to make a connection,
3321			// nothing can be marked as displaced.
3322			let displaced =
3323				blockchain.displaced_leaves_after_finalizing(H256::from([57; 32]), 10).unwrap();
3324			assert_eq!(displaced.displaced_leaves, vec![]);
3325			assert_eq!(displaced.displaced_blocks, vec![]);
3326		}
3327
3328		// fork from genesis: 2 prong.
3329		let b1_number = 1;
3330		let b1_hash = insert_header(&backend, b1_number, genesis_hash, None, H256::from([1; 32]));
3331		let b2_number = 2;
3332		let b2_hash = insert_header(&backend, b2_number, b1_hash, None, Default::default());
3333
3334		// fork from b2.
3335		let c1_number = 3;
3336		let c1_hash = insert_header(&backend, c1_number, b2_hash, None, H256::from([2; 32]));
3337		let c2_number = 4;
3338		let c2_hash = insert_header(&backend, c2_number, c1_hash, None, Default::default());
3339
3340		// fork from b1.
3341		let d1_number = 2;
3342		let d1_hash = insert_header(&backend, d1_number, b1_hash, None, H256::from([3; 32]));
3343		let d2_number = 3;
3344		let d2_hash = insert_header(&backend, d2_number, d1_hash, None, Default::default());
3345
3346		{
3347			let displaced_a1 =
3348				blockchain.displaced_leaves_after_finalizing(a1_hash, a1_number).unwrap();
3349			assert_eq!(
3350				displaced_a1.displaced_leaves,
3351				vec![(c2_number, c2_hash), (d2_number, d2_hash)]
3352			);
3353			let mut displaced_blocks = vec![b1_hash, b2_hash, c1_hash, c2_hash, d1_hash, d2_hash];
3354			displaced_blocks.sort();
3355			assert_eq!(displaced_a1.displaced_blocks, displaced_blocks);
3356
3357			let displaced_a2 =
3358				blockchain.displaced_leaves_after_finalizing(a2_hash, a2_number).unwrap();
3359			assert_eq!(displaced_a1.displaced_leaves, displaced_a2.displaced_leaves);
3360			assert_eq!(displaced_a1.displaced_blocks, displaced_a2.displaced_blocks);
3361
3362			let displaced_a3 =
3363				blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap();
3364			assert_eq!(displaced_a1.displaced_leaves, displaced_a3.displaced_leaves);
3365			assert_eq!(displaced_a1.displaced_blocks, displaced_a3.displaced_blocks);
3366		}
3367		{
3368			let displaced =
3369				blockchain.displaced_leaves_after_finalizing(b1_hash, b1_number).unwrap();
3370			assert_eq!(displaced.displaced_leaves, vec![(a3_number, a3_hash)]);
3371			let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash];
3372			displaced_blocks.sort();
3373			assert_eq!(displaced.displaced_blocks, displaced_blocks);
3374		}
3375		{
3376			let displaced =
3377				blockchain.displaced_leaves_after_finalizing(b2_hash, b2_number).unwrap();
3378			assert_eq!(
3379				displaced.displaced_leaves,
3380				vec![(a3_number, a3_hash), (d2_number, d2_hash)]
3381			);
3382			let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash, d1_hash, d2_hash];
3383			displaced_blocks.sort();
3384			assert_eq!(displaced.displaced_blocks, displaced_blocks);
3385		}
3386		{
3387			let displaced =
3388				blockchain.displaced_leaves_after_finalizing(c2_hash, c2_number).unwrap();
3389			assert_eq!(
3390				displaced.displaced_leaves,
3391				vec![(a3_number, a3_hash), (d2_number, d2_hash)]
3392			);
3393			let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash, d1_hash, d2_hash];
3394			displaced_blocks.sort();
3395			assert_eq!(displaced.displaced_blocks, displaced_blocks);
3396		}
3397	}
3398
3399	#[test]
3400	fn test_tree_route_regression() {
3401		// NOTE: this is a test for a regression introduced in #3665, the result
3402		// of tree_route would be erroneously computed, since it was taking into
3403		// account the `ancestor` in `CachedHeaderMetadata` for the comparison.
3404		// in this test we simulate the same behavior with the side-effect
3405		// triggering the issue being eviction of a previously fetched record
3406		// from the cache, therefore this test is dependent on the LRU cache
3407		// size for header metadata, which is currently set to 5000 elements.
3408		let backend = Backend::<Block>::new_test(10000, 10000);
3409		let blockchain = backend.blockchain();
3410
3411		let genesis = insert_header(&backend, 0, Default::default(), None, Default::default());
3412
3413		let block100 = (1..=100).fold(genesis, |parent, n| {
3414			insert_header(&backend, n, parent, None, Default::default())
3415		});
3416
3417		let block7000 = (101..=7000).fold(block100, |parent, n| {
3418			insert_header(&backend, n, parent, None, Default::default())
3419		});
3420
3421		// This will cause the ancestor of `block100` to be set to `genesis` as a side-effect.
3422		lowest_common_ancestor(blockchain, genesis, block100).unwrap();
3423
3424		// While traversing the tree we will have to do 6900 calls to
3425		// `header_metadata`, which will make sure we will exhaust our cache
3426		// which only takes 5000 elements. In particular, the `CachedHeaderMetadata` struct for
3427		// block #100 will be evicted and will get a new value (with ancestor set to its parent).
3428		let tree_route = tree_route(blockchain, block100, block7000).unwrap();
3429
3430		assert!(tree_route.retracted().is_empty());
3431	}
3432
3433	#[test]
3434	fn test_leaves_with_complex_block_tree() {
3435		let backend: Arc<Backend<substrate_test_runtime_client::runtime::Block>> =
3436			Arc::new(Backend::new_test(20, 20));
3437		substrate_test_runtime_client::trait_tests::test_leaves_for_backend(backend);
3438	}
3439
3440	#[test]
3441	fn test_children_with_complex_block_tree() {
3442		let backend: Arc<Backend<substrate_test_runtime_client::runtime::Block>> =
3443			Arc::new(Backend::new_test(20, 20));
3444		substrate_test_runtime_client::trait_tests::test_children_for_backend(backend);
3445	}
3446
3447	#[test]
3448	fn test_blockchain_query_by_number_gets_canonical() {
3449		let backend: Arc<Backend<substrate_test_runtime_client::runtime::Block>> =
3450			Arc::new(Backend::new_test(20, 20));
3451		substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical(
3452			backend,
3453		);
3454	}
3455
3456	#[test]
3457	fn test_leaves_pruned_on_finality() {
3458		//   / 1b - 2b - 3b
3459		// 0 - 1a - 2a
3460		//   \ 1c
3461		let backend: Backend<Block> = Backend::new_test(10, 10);
3462		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3463
3464		let block1_a = insert_header(&backend, 1, block0, None, Default::default());
3465		let block1_b = insert_header(&backend, 1, block0, None, [1; 32].into());
3466		let block1_c = insert_header(&backend, 1, block0, None, [2; 32].into());
3467
3468		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block1_a, block1_b, block1_c]);
3469
3470		let block2_a = insert_header(&backend, 2, block1_a, None, Default::default());
3471		let block2_b = insert_header(&backend, 2, block1_b, None, Default::default());
3472
3473		let block3_b = insert_header(&backend, 3, block2_b, None, [3; 32].into());
3474
3475		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block3_b, block2_a, block1_c]);
3476
3477		backend.finalize_block(block1_a, None).unwrap();
3478		backend.finalize_block(block2_a, None).unwrap();
3479
3480		// All leaves are pruned that are known to not belong to canonical branch
3481		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]);
3482	}
3483
3484	#[test]
3485	fn test_aux() {
3486		let backend: Backend<substrate_test_runtime_client::runtime::Block> =
3487			Backend::new_test(0, 0);
3488		assert!(backend.get_aux(b"test").unwrap().is_none());
3489		backend.insert_aux(&[(&b"test"[..], &b"hello"[..])], &[]).unwrap();
3490		assert_eq!(b"hello", &backend.get_aux(b"test").unwrap().unwrap()[..]);
3491		backend.insert_aux(&[], &[&b"test"[..]]).unwrap();
3492		assert!(backend.get_aux(b"test").unwrap().is_none());
3493	}
3494
3495	#[test]
3496	fn test_finalize_block_with_justification() {
3497		use sc_client_api::blockchain::Backend as BlockChainBackend;
3498
3499		let backend = Backend::<Block>::new_test(10, 10);
3500
3501		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3502		let block1 = insert_header(&backend, 1, block0, None, Default::default());
3503
3504		let justification = Some((CONS0_ENGINE_ID, vec![1, 2, 3]));
3505		backend.finalize_block(block1, justification.clone()).unwrap();
3506
3507		assert_eq!(
3508			backend.blockchain().justifications(block1).unwrap(),
3509			justification.map(Justifications::from),
3510		);
3511	}
3512
3513	#[test]
3514	fn test_append_justification_to_finalized_block() {
3515		use sc_client_api::blockchain::Backend as BlockChainBackend;
3516
3517		let backend = Backend::<Block>::new_test(10, 10);
3518
3519		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3520		let block1 = insert_header(&backend, 1, block0, None, Default::default());
3521
3522		let just0 = (CONS0_ENGINE_ID, vec![1, 2, 3]);
3523		backend.finalize_block(block1, Some(just0.clone().into())).unwrap();
3524
3525		let just1 = (CONS1_ENGINE_ID, vec![4, 5]);
3526		backend.append_justification(block1, just1.clone()).unwrap();
3527
3528		let just2 = (CONS1_ENGINE_ID, vec![6, 7]);
3529		assert!(matches!(
3530			backend.append_justification(block1, just2),
3531			Err(ClientError::BadJustification(_))
3532		));
3533
3534		let justifications = {
3535			let mut just = Justifications::from(just0);
3536			just.append(just1);
3537			just
3538		};
3539		assert_eq!(backend.blockchain().justifications(block1).unwrap(), Some(justifications),);
3540	}
3541
3542	#[test]
3543	fn test_finalize_multiple_blocks_in_single_op() {
3544		let backend = Backend::<Block>::new_test(10, 10);
3545
3546		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3547		let block1 = insert_header(&backend, 1, block0, None, Default::default());
3548		let block2 = insert_header(&backend, 2, block1, None, Default::default());
3549		let block3 = insert_header(&backend, 3, block2, None, Default::default());
3550		let block4 = insert_header(&backend, 4, block3, None, Default::default());
3551		{
3552			let mut op = backend.begin_operation().unwrap();
3553			backend.begin_state_operation(&mut op, block0).unwrap();
3554			op.mark_finalized(block1, None).unwrap();
3555			op.mark_finalized(block2, None).unwrap();
3556			backend.commit_operation(op).unwrap();
3557		}
3558		{
3559			let mut op = backend.begin_operation().unwrap();
3560			backend.begin_state_operation(&mut op, block2).unwrap();
3561			op.mark_finalized(block3, None).unwrap();
3562			op.mark_finalized(block4, None).unwrap();
3563			backend.commit_operation(op).unwrap();
3564		}
3565	}
3566
3567	#[test]
3568	fn storage_hash_is_cached_correctly() {
3569		let state_version = StateVersion::default();
3570		let backend = Backend::<Block>::new_test(10, 10);
3571
3572		let hash0 = {
3573			let mut op = backend.begin_operation().unwrap();
3574			backend.begin_state_operation(&mut op, Default::default()).unwrap();
3575			let mut header = Header {
3576				number: 0,
3577				parent_hash: Default::default(),
3578				state_root: Default::default(),
3579				digest: Default::default(),
3580				extrinsics_root: Default::default(),
3581			};
3582
3583			let storage = vec![(b"test".to_vec(), b"test".to_vec())];
3584
3585			header.state_root = op
3586				.old_state
3587				.storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..]))), state_version)
3588				.0
3589				.into();
3590			let hash = header.hash();
3591
3592			op.reset_storage(
3593				Storage {
3594					top: storage.into_iter().collect(),
3595					children_default: Default::default(),
3596				},
3597				state_version,
3598			)
3599			.unwrap();
3600			op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best)
3601				.unwrap();
3602
3603			backend.commit_operation(op).unwrap();
3604
3605			hash
3606		};
3607
3608		let block0_hash = backend.state_at(hash0).unwrap().storage_hash(&b"test"[..]).unwrap();
3609
3610		let hash1 = {
3611			let mut op = backend.begin_operation().unwrap();
3612			backend.begin_state_operation(&mut op, hash0).unwrap();
3613			let mut header = Header {
3614				number: 1,
3615				parent_hash: hash0,
3616				state_root: Default::default(),
3617				digest: Default::default(),
3618				extrinsics_root: Default::default(),
3619			};
3620
3621			let storage = vec![(b"test".to_vec(), Some(b"test2".to_vec()))];
3622
3623			let (root, overlay) = op.old_state.storage_root(
3624				storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
3625				state_version,
3626			);
3627			op.update_db_storage(overlay).unwrap();
3628			header.state_root = root.into();
3629			let hash = header.hash();
3630
3631			op.update_storage(storage, Vec::new()).unwrap();
3632			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Normal)
3633				.unwrap();
3634
3635			backend.commit_operation(op).unwrap();
3636
3637			hash
3638		};
3639
3640		{
3641			let header = backend.blockchain().header(hash1).unwrap().unwrap();
3642			let mut op = backend.begin_operation().unwrap();
3643			op.set_block_data(header, None, None, None, NewBlockState::Best).unwrap();
3644			backend.commit_operation(op).unwrap();
3645		}
3646
3647		let block1_hash = backend.state_at(hash1).unwrap().storage_hash(&b"test"[..]).unwrap();
3648
3649		assert_ne!(block0_hash, block1_hash);
3650	}
3651
3652	#[test]
3653	fn test_finalize_non_sequential() {
3654		let backend = Backend::<Block>::new_test(10, 10);
3655
3656		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3657		let block1 = insert_header(&backend, 1, block0, None, Default::default());
3658		let block2 = insert_header(&backend, 2, block1, None, Default::default());
3659		{
3660			let mut op = backend.begin_operation().unwrap();
3661			backend.begin_state_operation(&mut op, block0).unwrap();
3662			op.mark_finalized(block2, None).unwrap();
3663			backend.commit_operation(op).unwrap_err();
3664		}
3665	}
3666
3667	#[test]
3668	fn prune_blocks_on_finalize() {
3669		let pruning_modes =
3670			vec![BlocksPruning::Some(2), BlocksPruning::KeepFinalized, BlocksPruning::KeepAll];
3671
3672		for pruning_mode in pruning_modes {
3673			let backend = Backend::<Block>::new_test_with_tx_storage(pruning_mode, 0);
3674			let mut blocks = Vec::new();
3675			let mut prev_hash = Default::default();
3676			for i in 0..5 {
3677				let hash = insert_block(
3678					&backend,
3679					i,
3680					prev_hash,
3681					None,
3682					Default::default(),
3683					vec![i.into()],
3684					None,
3685				)
3686				.unwrap();
3687				blocks.push(hash);
3688				prev_hash = hash;
3689			}
3690
3691			{
3692				let mut op = backend.begin_operation().unwrap();
3693				backend.begin_state_operation(&mut op, blocks[4]).unwrap();
3694				for i in 1..5 {
3695					op.mark_finalized(blocks[i], None).unwrap();
3696				}
3697				backend.commit_operation(op).unwrap();
3698			}
3699			let bc = backend.blockchain();
3700
3701			if matches!(pruning_mode, BlocksPruning::Some(_)) {
3702				assert_eq!(None, bc.body(blocks[0]).unwrap());
3703				assert_eq!(None, bc.body(blocks[1]).unwrap());
3704				assert_eq!(None, bc.body(blocks[2]).unwrap());
3705				assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap());
3706				assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap());
3707			} else {
3708				for i in 0..5 {
3709					assert_eq!(Some(vec![(i as u64).into()]), bc.body(blocks[i]).unwrap());
3710				}
3711			}
3712		}
3713	}
3714
3715	#[test]
3716	fn prune_blocks_on_finalize_with_fork() {
3717		sp_tracing::try_init_simple();
3718
3719		let pruning_modes =
3720			vec![BlocksPruning::Some(2), BlocksPruning::KeepFinalized, BlocksPruning::KeepAll];
3721
3722		for pruning in pruning_modes {
3723			let backend = Backend::<Block>::new_test_with_tx_storage(pruning, 10);
3724			let mut blocks = Vec::new();
3725			let mut prev_hash = Default::default();
3726			for i in 0..5 {
3727				let hash = insert_block(
3728					&backend,
3729					i,
3730					prev_hash,
3731					None,
3732					Default::default(),
3733					vec![i.into()],
3734					None,
3735				)
3736				.unwrap();
3737				blocks.push(hash);
3738				prev_hash = hash;
3739			}
3740
3741			// insert a fork at block 2
3742			let fork_hash_root =
3743				insert_block(&backend, 2, blocks[1], None, H256::random(), vec![2.into()], None)
3744					.unwrap();
3745			insert_block(
3746				&backend,
3747				3,
3748				fork_hash_root,
3749				None,
3750				H256::random(),
3751				vec![3.into(), 11.into()],
3752				None,
3753			)
3754			.unwrap();
3755			let mut op = backend.begin_operation().unwrap();
3756			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
3757			op.mark_head(blocks[4]).unwrap();
3758			backend.commit_operation(op).unwrap();
3759
3760			let bc = backend.blockchain();
3761			assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap());
3762
3763			for i in 1..5 {
3764				let mut op = backend.begin_operation().unwrap();
3765				backend.begin_state_operation(&mut op, blocks[4]).unwrap();
3766				op.mark_finalized(blocks[i], None).unwrap();
3767				backend.commit_operation(op).unwrap();
3768			}
3769
3770			if matches!(pruning, BlocksPruning::Some(_)) {
3771				assert_eq!(None, bc.body(blocks[0]).unwrap());
3772				assert_eq!(None, bc.body(blocks[1]).unwrap());
3773				assert_eq!(None, bc.body(blocks[2]).unwrap());
3774
3775				assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap());
3776				assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap());
3777			} else {
3778				for i in 0..5 {
3779					assert_eq!(Some(vec![(i as u64).into()]), bc.body(blocks[i]).unwrap());
3780				}
3781			}
3782
3783			if matches!(pruning, BlocksPruning::KeepAll) {
3784				assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap());
3785			} else {
3786				assert_eq!(None, bc.body(fork_hash_root).unwrap());
3787			}
3788
3789			assert_eq!(bc.info().best_number, 4);
3790			for i in 0..5 {
3791				assert!(bc.hash(i).unwrap().is_some());
3792			}
3793		}
3794	}
3795
3796	#[test]
3797	fn prune_blocks_on_finalize_and_reorg() {
3798		//	0 - 1b
3799		//	\ - 1a - 2a - 3a
3800		//	     \ - 2b
3801
3802		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(10), 10);
3803
3804		let make_block = |index, parent, val: u64| {
3805			insert_block(&backend, index, parent, None, H256::random(), vec![val.into()], None)
3806				.unwrap()
3807		};
3808
3809		let block_0 = make_block(0, Default::default(), 0x00);
3810		let block_1a = make_block(1, block_0, 0x1a);
3811		let block_1b = make_block(1, block_0, 0x1b);
3812		let block_2a = make_block(2, block_1a, 0x2a);
3813		let block_2b = make_block(2, block_1a, 0x2b);
3814		let block_3a = make_block(3, block_2a, 0x3a);
3815
3816		// Make sure 1b is head
3817		let mut op = backend.begin_operation().unwrap();
3818		backend.begin_state_operation(&mut op, block_0).unwrap();
3819		op.mark_head(block_1b).unwrap();
3820		backend.commit_operation(op).unwrap();
3821
3822		// Finalize 3a
3823		let mut op = backend.begin_operation().unwrap();
3824		backend.begin_state_operation(&mut op, block_0).unwrap();
3825		op.mark_head(block_3a).unwrap();
3826		op.mark_finalized(block_1a, None).unwrap();
3827		op.mark_finalized(block_2a, None).unwrap();
3828		op.mark_finalized(block_3a, None).unwrap();
3829		backend.commit_operation(op).unwrap();
3830
3831		let bc = backend.blockchain();
3832		assert_eq!(None, bc.body(block_1b).unwrap());
3833		assert_eq!(None, bc.body(block_2b).unwrap());
3834		assert_eq!(Some(vec![0x00.into()]), bc.body(block_0).unwrap());
3835		assert_eq!(Some(vec![0x1a.into()]), bc.body(block_1a).unwrap());
3836		assert_eq!(Some(vec![0x2a.into()]), bc.body(block_2a).unwrap());
3837		assert_eq!(Some(vec![0x3a.into()]), bc.body(block_3a).unwrap());
3838	}
3839
3840	#[test]
3841	fn indexed_data_block_body() {
3842		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
3843
3844		let x0 = ExtrinsicWrapper::from(0u64).encode();
3845		let x1 = ExtrinsicWrapper::from(1u64).encode();
3846		let x0_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x0[1..]);
3847		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
3848		let index = vec![
3849			IndexOperation::Insert {
3850				extrinsic: 0,
3851				hash: x0_hash.as_ref().to_vec(),
3852				size: (x0.len() - 1) as u32,
3853			},
3854			IndexOperation::Insert {
3855				extrinsic: 1,
3856				hash: x1_hash.as_ref().to_vec(),
3857				size: (x1.len() - 1) as u32,
3858			},
3859		];
3860		let hash = insert_block(
3861			&backend,
3862			0,
3863			Default::default(),
3864			None,
3865			Default::default(),
3866			vec![0u64.into(), 1u64.into()],
3867			Some(index),
3868		)
3869		.unwrap();
3870		let bc = backend.blockchain();
3871		assert_eq!(bc.indexed_transaction(x0_hash).unwrap().unwrap(), &x0[1..]);
3872		assert_eq!(bc.indexed_transaction(x1_hash).unwrap().unwrap(), &x1[1..]);
3873
3874		let hashof0 = bc.info().genesis_hash;
3875		// Push one more blocks and make sure block is pruned and transaction index is cleared.
3876		let block1 =
3877			insert_block(&backend, 1, hash, None, Default::default(), vec![], None).unwrap();
3878		backend.finalize_block(block1, None).unwrap();
3879		assert_eq!(bc.body(hashof0).unwrap(), None);
3880		assert_eq!(bc.indexed_transaction(x0_hash).unwrap(), None);
3881		assert_eq!(bc.indexed_transaction(x1_hash).unwrap(), None);
3882	}
3883
3884	#[test]
3885	fn index_invalid_size() {
3886		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
3887
3888		let x0 = ExtrinsicWrapper::from(0u64).encode();
3889		let x1 = ExtrinsicWrapper::from(1u64).encode();
3890		let x0_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x0[..]);
3891		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[..]);
3892		let index = vec![
3893			IndexOperation::Insert {
3894				extrinsic: 0,
3895				hash: x0_hash.as_ref().to_vec(),
3896				size: (x0.len()) as u32,
3897			},
3898			IndexOperation::Insert {
3899				extrinsic: 1,
3900				hash: x1_hash.as_ref().to_vec(),
3901				size: (x1.len() + 1) as u32,
3902			},
3903		];
3904		insert_block(
3905			&backend,
3906			0,
3907			Default::default(),
3908			None,
3909			Default::default(),
3910			vec![0u64.into(), 1u64.into()],
3911			Some(index),
3912		)
3913		.unwrap();
3914		let bc = backend.blockchain();
3915		assert_eq!(bc.indexed_transaction(x0_hash).unwrap().unwrap(), &x0[..]);
3916		assert_eq!(bc.indexed_transaction(x1_hash).unwrap(), None);
3917	}
3918
3919	#[test]
3920	fn renew_transaction_storage() {
3921		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
3922		let mut blocks = Vec::new();
3923		let mut prev_hash = Default::default();
3924		let x1 = ExtrinsicWrapper::from(0u64).encode();
3925		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
3926		for i in 0..10 {
3927			let mut index = Vec::new();
3928			if i == 0 {
3929				index.push(IndexOperation::Insert {
3930					extrinsic: 0,
3931					hash: x1_hash.as_ref().to_vec(),
3932					size: (x1.len() - 1) as u32,
3933				});
3934			} else if i < 5 {
3935				// keep renewing 1st
3936				index.push(IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() });
3937			} // else stop renewing
3938			let hash = insert_block(
3939				&backend,
3940				i,
3941				prev_hash,
3942				None,
3943				Default::default(),
3944				vec![i.into()],
3945				Some(index),
3946			)
3947			.unwrap();
3948			blocks.push(hash);
3949			prev_hash = hash;
3950		}
3951
3952		for i in 1..10 {
3953			let mut op = backend.begin_operation().unwrap();
3954			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
3955			op.mark_finalized(blocks[i], None).unwrap();
3956			backend.commit_operation(op).unwrap();
3957			let bc = backend.blockchain();
3958			if i < 6 {
3959				assert!(bc.indexed_transaction(x1_hash).unwrap().is_some());
3960			} else {
3961				assert!(bc.indexed_transaction(x1_hash).unwrap().is_none());
3962			}
3963		}
3964	}
3965
3966	#[test]
3967	fn remove_leaf_block_works() {
3968		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
3969		let mut blocks = Vec::new();
3970		let mut prev_hash = Default::default();
3971		for i in 0..2 {
3972			let hash = insert_block(
3973				&backend,
3974				i,
3975				prev_hash,
3976				None,
3977				Default::default(),
3978				vec![i.into()],
3979				None,
3980			)
3981			.unwrap();
3982			blocks.push(hash);
3983			prev_hash = hash;
3984		}
3985
3986		for i in 0..2 {
3987			let hash = insert_block(
3988				&backend,
3989				2,
3990				blocks[1],
3991				None,
3992				sp_core::H256::random(),
3993				vec![i.into()],
3994				None,
3995			)
3996			.unwrap();
3997			blocks.push(hash);
3998		}
3999
4000		// insert a fork at block 1, which becomes best block
4001		let best_hash = insert_block(
4002			&backend,
4003			1,
4004			blocks[0],
4005			None,
4006			sp_core::H256::random(),
4007			vec![42.into()],
4008			None,
4009		)
4010		.unwrap();
4011
4012		assert_eq!(backend.blockchain().info().best_hash, best_hash);
4013		assert!(backend.remove_leaf_block(best_hash).is_err());
4014
4015		assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], blocks[3], best_hash]);
4016		assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2], blocks[3]]);
4017
4018		assert!(backend.have_state_at(blocks[3], 2));
4019		assert!(backend.blockchain().header(blocks[3]).unwrap().is_some());
4020		backend.remove_leaf_block(blocks[3]).unwrap();
4021		assert!(!backend.have_state_at(blocks[3], 2));
4022		assert!(backend.blockchain().header(blocks[3]).unwrap().is_none());
4023		assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], best_hash]);
4024		assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2]]);
4025
4026		assert!(backend.have_state_at(blocks[2], 2));
4027		assert!(backend.blockchain().header(blocks[2]).unwrap().is_some());
4028		backend.remove_leaf_block(blocks[2]).unwrap();
4029		assert!(!backend.have_state_at(blocks[2], 2));
4030		assert!(backend.blockchain().header(blocks[2]).unwrap().is_none());
4031		assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash, blocks[1]]);
4032		assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![]);
4033
4034		assert!(backend.have_state_at(blocks[1], 1));
4035		assert!(backend.blockchain().header(blocks[1]).unwrap().is_some());
4036		backend.remove_leaf_block(blocks[1]).unwrap();
4037		assert!(!backend.have_state_at(blocks[1], 1));
4038		assert!(backend.blockchain().header(blocks[1]).unwrap().is_none());
4039		assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash]);
4040		assert_eq!(backend.blockchain().children(blocks[0]).unwrap(), vec![best_hash]);
4041	}
4042
4043	#[test]
4044	fn test_import_existing_block_as_new_head() {
4045		let backend: Backend<Block> = Backend::new_test(10, 3);
4046		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4047		let block1 = insert_header(&backend, 1, block0, None, Default::default());
4048		let block2 = insert_header(&backend, 2, block1, None, Default::default());
4049		let block3 = insert_header(&backend, 3, block2, None, Default::default());
4050		let block4 = insert_header(&backend, 4, block3, None, Default::default());
4051		let block5 = insert_header(&backend, 5, block4, None, Default::default());
4052		assert_eq!(backend.blockchain().info().best_hash, block5);
4053
4054		// Insert 1 as best again. This should fail because canonicalization_delay == 3 and best ==
4055		// 5
4056		let header = Header {
4057			number: 1,
4058			parent_hash: block0,
4059			state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
4060			digest: Default::default(),
4061			extrinsics_root: Default::default(),
4062		};
4063		let mut op = backend.begin_operation().unwrap();
4064		op.set_block_data(header, None, None, None, NewBlockState::Best).unwrap();
4065		assert!(matches!(backend.commit_operation(op), Err(sp_blockchain::Error::SetHeadTooOld)));
4066
4067		// Insert 2 as best again.
4068		let header = backend.blockchain().header(block2).unwrap().unwrap();
4069		let mut op = backend.begin_operation().unwrap();
4070		op.set_block_data(header, None, None, None, NewBlockState::Best).unwrap();
4071		backend.commit_operation(op).unwrap();
4072		assert_eq!(backend.blockchain().info().best_hash, block2);
4073	}
4074
4075	#[test]
4076	fn test_import_existing_block_as_final() {
4077		let backend: Backend<Block> = Backend::new_test(10, 10);
4078		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4079		let block1 = insert_header(&backend, 1, block0, None, Default::default());
4080		let _block2 = insert_header(&backend, 2, block1, None, Default::default());
4081		// Genesis is auto finalized, the rest are not.
4082		assert_eq!(backend.blockchain().info().finalized_hash, block0);
4083
4084		// Insert 1 as final again.
4085		let header = backend.blockchain().header(block1).unwrap().unwrap();
4086
4087		let mut op = backend.begin_operation().unwrap();
4088		op.set_block_data(header, None, None, None, NewBlockState::Final).unwrap();
4089		backend.commit_operation(op).unwrap();
4090
4091		assert_eq!(backend.blockchain().info().finalized_hash, block1);
4092	}
4093
4094	#[test]
4095	fn test_import_existing_state_fails() {
4096		let backend: Backend<Block> = Backend::new_test(10, 10);
4097		let genesis =
4098			insert_block(&backend, 0, Default::default(), None, Default::default(), vec![], None)
4099				.unwrap();
4100
4101		insert_block(&backend, 1, genesis, None, Default::default(), vec![], None).unwrap();
4102		let err = insert_block(&backend, 1, genesis, None, Default::default(), vec![], None)
4103			.err()
4104			.unwrap();
4105		match err {
4106			sp_blockchain::Error::StateDatabase(m) if m == "Block already exists" => (),
4107			e @ _ => panic!("Unexpected error {:?}", e),
4108		}
4109	}
4110
4111	#[test]
4112	fn test_leaves_not_created_for_ancient_blocks() {
4113		let backend: Backend<Block> = Backend::new_test(10, 10);
4114		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4115
4116		let block1_a = insert_header(&backend, 1, block0, None, Default::default());
4117		let block2_a = insert_header(&backend, 2, block1_a, None, Default::default());
4118		backend.finalize_block(block1_a, None).unwrap();
4119		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]);
4120
4121		// Insert a fork prior to finalization point. Leave should not be created.
4122		insert_header_no_head(&backend, 1, block0, [1; 32].into());
4123		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]);
4124	}
4125
4126	#[test]
4127	fn revert_non_best_blocks() {
4128		let backend = Backend::<Block>::new_test(10, 10);
4129
4130		let genesis =
4131			insert_block(&backend, 0, Default::default(), None, Default::default(), vec![], None)
4132				.unwrap();
4133
4134		let block1 =
4135			insert_block(&backend, 1, genesis, None, Default::default(), vec![], None).unwrap();
4136
4137		let block2 =
4138			insert_block(&backend, 2, block1, None, Default::default(), vec![], None).unwrap();
4139
4140		let block3 = {
4141			let mut op = backend.begin_operation().unwrap();
4142			backend.begin_state_operation(&mut op, block1).unwrap();
4143			let header = Header {
4144				number: 3,
4145				parent_hash: block2,
4146				state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
4147				digest: Default::default(),
4148				extrinsics_root: Default::default(),
4149			};
4150
4151			op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal)
4152				.unwrap();
4153
4154			backend.commit_operation(op).unwrap();
4155
4156			header.hash()
4157		};
4158
4159		let block4 = {
4160			let mut op = backend.begin_operation().unwrap();
4161			backend.begin_state_operation(&mut op, block2).unwrap();
4162			let header = Header {
4163				number: 4,
4164				parent_hash: block3,
4165				state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
4166				digest: Default::default(),
4167				extrinsics_root: Default::default(),
4168			};
4169
4170			op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal)
4171				.unwrap();
4172
4173			backend.commit_operation(op).unwrap();
4174
4175			header.hash()
4176		};
4177
4178		let block3_fork = {
4179			let mut op = backend.begin_operation().unwrap();
4180			backend.begin_state_operation(&mut op, block2).unwrap();
4181			let header = Header {
4182				number: 3,
4183				parent_hash: block2,
4184				state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
4185				digest: Default::default(),
4186				extrinsics_root: H256::from_low_u64_le(42),
4187			};
4188
4189			op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal)
4190				.unwrap();
4191
4192			backend.commit_operation(op).unwrap();
4193
4194			header.hash()
4195		};
4196
4197		assert!(backend.have_state_at(block1, 1));
4198		assert!(backend.have_state_at(block2, 2));
4199		assert!(backend.have_state_at(block3, 3));
4200		assert!(backend.have_state_at(block4, 4));
4201		assert!(backend.have_state_at(block3_fork, 3));
4202
4203		assert_eq!(backend.blockchain.leaves().unwrap(), vec![block4, block3_fork]);
4204		assert_eq!(4, backend.blockchain.leaves.read().highest_leaf().unwrap().0);
4205
4206		assert_eq!(3, backend.revert(1, false).unwrap().0);
4207
4208		assert!(backend.have_state_at(block1, 1));
4209		assert!(!backend.have_state_at(block2, 2));
4210		assert!(!backend.have_state_at(block3, 3));
4211		assert!(!backend.have_state_at(block4, 4));
4212		assert!(!backend.have_state_at(block3_fork, 3));
4213
4214		assert_eq!(backend.blockchain.leaves().unwrap(), vec![block1]);
4215		assert_eq!(1, backend.blockchain.leaves.read().highest_leaf().unwrap().0);
4216	}
4217
4218	#[test]
4219	fn revert_finalized_blocks() {
4220		let pruning_modes = [BlocksPruning::Some(10), BlocksPruning::KeepAll];
4221
4222		// we will create a chain with 11 blocks, finalize block #8 and then
4223		// attempt to revert 5 blocks.
4224		for pruning_mode in pruning_modes {
4225			let backend = Backend::<Block>::new_test_with_tx_storage(pruning_mode, 1);
4226
4227			let mut parent = Default::default();
4228			for i in 0..=10 {
4229				parent = insert_block(&backend, i, parent, None, Default::default(), vec![], None)
4230					.unwrap();
4231			}
4232
4233			assert_eq!(backend.blockchain().info().best_number, 10);
4234
4235			let block8 = backend.blockchain().hash(8).unwrap().unwrap();
4236			backend.finalize_block(block8, None).unwrap();
4237			backend.revert(5, true).unwrap();
4238
4239			match pruning_mode {
4240				// we can only revert to blocks for which we have state, if pruning is enabled
4241				// then the last state available will be that of the latest finalized block
4242				BlocksPruning::Some(_) => {
4243					assert_eq!(backend.blockchain().info().finalized_number, 8)
4244				},
4245				// otherwise if we're not doing state pruning we can revert past finalized blocks
4246				_ => assert_eq!(backend.blockchain().info().finalized_number, 5),
4247			}
4248		}
4249	}
4250
4251	#[test]
4252	fn test_no_duplicated_leaves_allowed() {
4253		let backend: Backend<Block> = Backend::new_test(10, 10);
4254		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4255		let block1 = insert_header(&backend, 1, block0, None, Default::default());
4256		// Add block 2 not as the best block
4257		let block2 = insert_header_no_head(&backend, 2, block1, Default::default());
4258		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]);
4259		assert_eq!(backend.blockchain().info().best_hash, block1);
4260
4261		// Add block 2 as the best block
4262		let block2 = insert_header(&backend, 2, block1, None, Default::default());
4263		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]);
4264		assert_eq!(backend.blockchain().info().best_hash, block2);
4265	}
4266
4267	#[test]
4268	fn force_delayed_canonicalize_waiting_for_blocks_to_be_finalized() {
4269		let pruning_modes =
4270			[BlocksPruning::Some(10), BlocksPruning::KeepAll, BlocksPruning::KeepFinalized];
4271
4272		for pruning_mode in pruning_modes {
4273			eprintln!("Running with pruning mode: {:?}", pruning_mode);
4274
4275			let backend = Backend::<Block>::new_test_with_tx_storage(pruning_mode, 1);
4276
4277			let genesis = insert_block(
4278				&backend,
4279				0,
4280				Default::default(),
4281				None,
4282				Default::default(),
4283				vec![],
4284				None,
4285			)
4286			.unwrap();
4287
4288			let block1 = {
4289				let mut op = backend.begin_operation().unwrap();
4290				backend.begin_state_operation(&mut op, genesis).unwrap();
4291				let mut header = Header {
4292					number: 1,
4293					parent_hash: genesis,
4294					state_root: Default::default(),
4295					digest: Default::default(),
4296					extrinsics_root: Default::default(),
4297				};
4298
4299				let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))];
4300
4301				let (root, overlay) = op.old_state.storage_root(
4302					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
4303					StateVersion::V1,
4304				);
4305				op.update_db_storage(overlay).unwrap();
4306				header.state_root = root.into();
4307
4308				op.update_storage(storage, Vec::new()).unwrap();
4309
4310				op.set_block_data(
4311					header.clone(),
4312					Some(Vec::new()),
4313					None,
4314					None,
4315					NewBlockState::Normal,
4316				)
4317				.unwrap();
4318
4319				backend.commit_operation(op).unwrap();
4320
4321				header.hash()
4322			};
4323
4324			if matches!(pruning_mode, BlocksPruning::Some(_)) {
4325				assert_eq!(
4326					LastCanonicalized::Block(0),
4327					backend.storage.state_db.last_canonicalized()
4328				);
4329			}
4330
4331			// This should not trigger any forced canonicalization as we didn't have imported any
4332			// best block by now.
4333			let block2 = {
4334				let mut op = backend.begin_operation().unwrap();
4335				backend.begin_state_operation(&mut op, block1).unwrap();
4336				let mut header = Header {
4337					number: 2,
4338					parent_hash: block1,
4339					state_root: Default::default(),
4340					digest: Default::default(),
4341					extrinsics_root: Default::default(),
4342				};
4343
4344				let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 2]))];
4345
4346				let (root, overlay) = op.old_state.storage_root(
4347					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
4348					StateVersion::V1,
4349				);
4350				op.update_db_storage(overlay).unwrap();
4351				header.state_root = root.into();
4352
4353				op.update_storage(storage, Vec::new()).unwrap();
4354
4355				op.set_block_data(
4356					header.clone(),
4357					Some(Vec::new()),
4358					None,
4359					None,
4360					NewBlockState::Normal,
4361				)
4362				.unwrap();
4363
4364				backend.commit_operation(op).unwrap();
4365
4366				header.hash()
4367			};
4368
4369			if matches!(pruning_mode, BlocksPruning::Some(_)) {
4370				assert_eq!(
4371					LastCanonicalized::Block(0),
4372					backend.storage.state_db.last_canonicalized()
4373				);
4374			}
4375
4376			// This should also not trigger it yet, because we import a best block, but the best
4377			// block from the POV of the db is still at `0`.
4378			let block3 = {
4379				let mut op = backend.begin_operation().unwrap();
4380				backend.begin_state_operation(&mut op, block2).unwrap();
4381				let mut header = Header {
4382					number: 3,
4383					parent_hash: block2,
4384					state_root: Default::default(),
4385					digest: Default::default(),
4386					extrinsics_root: Default::default(),
4387				};
4388
4389				let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 3]))];
4390
4391				let (root, overlay) = op.old_state.storage_root(
4392					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
4393					StateVersion::V1,
4394				);
4395				op.update_db_storage(overlay).unwrap();
4396				header.state_root = root.into();
4397
4398				op.update_storage(storage, Vec::new()).unwrap();
4399
4400				op.set_block_data(
4401					header.clone(),
4402					Some(Vec::new()),
4403					None,
4404					None,
4405					NewBlockState::Best,
4406				)
4407				.unwrap();
4408
4409				backend.commit_operation(op).unwrap();
4410
4411				header.hash()
4412			};
4413
4414			// Now it should kick in.
4415			let block4 = {
4416				let mut op = backend.begin_operation().unwrap();
4417				backend.begin_state_operation(&mut op, block3).unwrap();
4418				let mut header = Header {
4419					number: 4,
4420					parent_hash: block3,
4421					state_root: Default::default(),
4422					digest: Default::default(),
4423					extrinsics_root: Default::default(),
4424				};
4425
4426				let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 4]))];
4427
4428				let (root, overlay) = op.old_state.storage_root(
4429					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
4430					StateVersion::V1,
4431				);
4432				op.update_db_storage(overlay).unwrap();
4433				header.state_root = root.into();
4434
4435				op.update_storage(storage, Vec::new()).unwrap();
4436
4437				op.set_block_data(
4438					header.clone(),
4439					Some(Vec::new()),
4440					None,
4441					None,
4442					NewBlockState::Best,
4443				)
4444				.unwrap();
4445
4446				backend.commit_operation(op).unwrap();
4447
4448				header.hash()
4449			};
4450
4451			if matches!(pruning_mode, BlocksPruning::Some(_)) {
4452				assert_eq!(
4453					LastCanonicalized::Block(2),
4454					backend.storage.state_db.last_canonicalized()
4455				);
4456			}
4457
4458			assert_eq!(block1, backend.blockchain().hash(1).unwrap().unwrap());
4459			assert_eq!(block2, backend.blockchain().hash(2).unwrap().unwrap());
4460			assert_eq!(block3, backend.blockchain().hash(3).unwrap().unwrap());
4461			assert_eq!(block4, backend.blockchain().hash(4).unwrap().unwrap());
4462		}
4463	}
4464
4465	#[test]
4466	fn test_pinned_blocks_on_finalize() {
4467		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
4468		let mut blocks = Vec::new();
4469		let mut prev_hash = Default::default();
4470
4471		let build_justification = |i: u64| ([0, 0, 0, 0], vec![i.try_into().unwrap()]);
4472		// Block tree:
4473		//   0 -> 1 -> 2 -> 3 -> 4
4474		for i in 0..5 {
4475			let hash = insert_block(
4476				&backend,
4477				i,
4478				prev_hash,
4479				None,
4480				Default::default(),
4481				vec![i.into()],
4482				None,
4483			)
4484			.unwrap();
4485			blocks.push(hash);
4486			// Avoid block pruning.
4487			backend.pin_block(blocks[i as usize]).unwrap();
4488
4489			prev_hash = hash;
4490		}
4491
4492		let bc = backend.blockchain();
4493
4494		// Check that we can properly access values when there is reference count
4495		// but no value.
4496		assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap());
4497
4498		// Block 1 gets pinned three times
4499		backend.pin_block(blocks[1]).unwrap();
4500		backend.pin_block(blocks[1]).unwrap();
4501
4502		// Finalize all blocks. This will trigger pruning.
4503		let mut op = backend.begin_operation().unwrap();
4504		backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4505		for i in 1..5 {
4506			op.mark_finalized(blocks[i], Some(build_justification(i.try_into().unwrap())))
4507				.unwrap();
4508		}
4509		backend.commit_operation(op).unwrap();
4510
4511		// Block 0, 1, 2, 3 are pinned, so all values should be cached.
4512		// Block 4 is inside the pruning window, its value is in db.
4513		assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap());
4514
4515		assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap());
4516		assert_eq!(
4517			Some(Justifications::from(build_justification(1))),
4518			bc.justifications(blocks[1]).unwrap()
4519		);
4520
4521		assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap());
4522		assert_eq!(
4523			Some(Justifications::from(build_justification(2))),
4524			bc.justifications(blocks[2]).unwrap()
4525		);
4526
4527		assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap());
4528		assert_eq!(
4529			Some(Justifications::from(build_justification(3))),
4530			bc.justifications(blocks[3]).unwrap()
4531		);
4532
4533		assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap());
4534		assert_eq!(
4535			Some(Justifications::from(build_justification(4))),
4536			bc.justifications(blocks[4]).unwrap()
4537		);
4538
4539		// Unpin all blocks. Values should be removed from cache.
4540		for block in &blocks {
4541			backend.unpin_block(*block);
4542		}
4543
4544		assert!(bc.body(blocks[0]).unwrap().is_none());
4545		// Block 1 was pinned twice, we expect it to be still cached
4546		assert!(bc.body(blocks[1]).unwrap().is_some());
4547		assert!(bc.justifications(blocks[1]).unwrap().is_some());
4548		// Headers should also be available while pinned
4549		assert!(bc.header(blocks[1]).ok().flatten().is_some());
4550		assert!(bc.body(blocks[2]).unwrap().is_none());
4551		assert!(bc.justifications(blocks[2]).unwrap().is_none());
4552		assert!(bc.body(blocks[3]).unwrap().is_none());
4553		assert!(bc.justifications(blocks[3]).unwrap().is_none());
4554
4555		// After these unpins, block 1 should also be removed
4556		backend.unpin_block(blocks[1]);
4557		assert!(bc.body(blocks[1]).unwrap().is_some());
4558		assert!(bc.justifications(blocks[1]).unwrap().is_some());
4559		backend.unpin_block(blocks[1]);
4560		assert!(bc.body(blocks[1]).unwrap().is_none());
4561		assert!(bc.justifications(blocks[1]).unwrap().is_none());
4562
4563		// Block 4 is inside the pruning window and still kept
4564		assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap());
4565		assert_eq!(
4566			Some(Justifications::from(build_justification(4))),
4567			bc.justifications(blocks[4]).unwrap()
4568		);
4569
4570		// Block tree:
4571		//   0 -> 1 -> 2 -> 3 -> 4 -> 5
4572		let hash =
4573			insert_block(&backend, 5, prev_hash, None, Default::default(), vec![5.into()], None)
4574				.unwrap();
4575		blocks.push(hash);
4576
4577		backend.pin_block(blocks[4]).unwrap();
4578		// Mark block 5 as finalized.
4579		let mut op = backend.begin_operation().unwrap();
4580		backend.begin_state_operation(&mut op, blocks[5]).unwrap();
4581		op.mark_finalized(blocks[5], Some(build_justification(5))).unwrap();
4582		backend.commit_operation(op).unwrap();
4583
4584		assert!(bc.body(blocks[0]).unwrap().is_none());
4585		assert!(bc.body(blocks[1]).unwrap().is_none());
4586		assert!(bc.body(blocks[2]).unwrap().is_none());
4587		assert!(bc.body(blocks[3]).unwrap().is_none());
4588
4589		assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap());
4590		assert_eq!(
4591			Some(Justifications::from(build_justification(4))),
4592			bc.justifications(blocks[4]).unwrap()
4593		);
4594		assert_eq!(Some(vec![5.into()]), bc.body(blocks[5]).unwrap());
4595		assert!(bc.header(blocks[5]).ok().flatten().is_some());
4596
4597		backend.unpin_block(blocks[4]);
4598		assert!(bc.body(blocks[4]).unwrap().is_none());
4599		assert!(bc.justifications(blocks[4]).unwrap().is_none());
4600
4601		// Append a justification to block 5.
4602		backend.append_justification(blocks[5], ([0, 0, 0, 1], vec![42])).unwrap();
4603
4604		let hash =
4605			insert_block(&backend, 6, blocks[5], None, Default::default(), vec![6.into()], None)
4606				.unwrap();
4607		blocks.push(hash);
4608
4609		// Pin block 5 so it gets loaded into the cache on prune
4610		backend.pin_block(blocks[5]).unwrap();
4611
4612		// Finalize block 6 so block 5 gets pruned. Since it is pinned both justifications should be
4613		// in memory.
4614		let mut op = backend.begin_operation().unwrap();
4615		backend.begin_state_operation(&mut op, blocks[6]).unwrap();
4616		op.mark_finalized(blocks[6], None).unwrap();
4617		backend.commit_operation(op).unwrap();
4618
4619		assert_eq!(Some(vec![5.into()]), bc.body(blocks[5]).unwrap());
4620		assert!(bc.header(blocks[5]).ok().flatten().is_some());
4621		let mut expected = Justifications::from(build_justification(5));
4622		expected.append(([0, 0, 0, 1], vec![42]));
4623		assert_eq!(Some(expected), bc.justifications(blocks[5]).unwrap());
4624	}
4625
4626	#[test]
4627	fn test_pinned_blocks_on_finalize_with_fork() {
4628		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
4629		let mut blocks = Vec::new();
4630		let mut prev_hash = Default::default();
4631
4632		// Block tree:
4633		//   0 -> 1 -> 2 -> 3 -> 4
4634		for i in 0..5 {
4635			let hash = insert_block(
4636				&backend,
4637				i,
4638				prev_hash,
4639				None,
4640				Default::default(),
4641				vec![i.into()],
4642				None,
4643			)
4644			.unwrap();
4645			blocks.push(hash);
4646
4647			// Avoid block pruning.
4648			backend.pin_block(blocks[i as usize]).unwrap();
4649
4650			prev_hash = hash;
4651		}
4652
4653		// Insert a fork at the second block.
4654		// Block tree:
4655		//   0 -> 1 -> 2 -> 3 -> 4
4656		//        \ -> 2 -> 3
4657		let fork_hash_root =
4658			insert_block(&backend, 2, blocks[1], None, H256::random(), vec![2.into()], None)
4659				.unwrap();
4660		let fork_hash_3 = insert_block(
4661			&backend,
4662			3,
4663			fork_hash_root,
4664			None,
4665			H256::random(),
4666			vec![3.into(), 11.into()],
4667			None,
4668		)
4669		.unwrap();
4670
4671		// Do not prune the fork hash.
4672		backend.pin_block(fork_hash_3).unwrap();
4673
4674		let mut op = backend.begin_operation().unwrap();
4675		backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4676		op.mark_head(blocks[4]).unwrap();
4677		backend.commit_operation(op).unwrap();
4678
4679		for i in 1..5 {
4680			let mut op = backend.begin_operation().unwrap();
4681			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4682			op.mark_finalized(blocks[i], None).unwrap();
4683			backend.commit_operation(op).unwrap();
4684		}
4685
4686		let bc = backend.blockchain();
4687		assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap());
4688		assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap());
4689		assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap());
4690		assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap());
4691		assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap());
4692		// Check the fork hashes.
4693		assert_eq!(None, bc.body(fork_hash_root).unwrap());
4694		assert_eq!(Some(vec![3.into(), 11.into()]), bc.body(fork_hash_3).unwrap());
4695
4696		// Unpin all blocks, except the forked one.
4697		for block in &blocks {
4698			backend.unpin_block(*block);
4699		}
4700		assert!(bc.body(blocks[0]).unwrap().is_none());
4701		assert!(bc.body(blocks[1]).unwrap().is_none());
4702		assert!(bc.body(blocks[2]).unwrap().is_none());
4703		assert!(bc.body(blocks[3]).unwrap().is_none());
4704
4705		assert!(bc.body(fork_hash_3).unwrap().is_some());
4706		backend.unpin_block(fork_hash_3);
4707		assert!(bc.body(fork_hash_3).unwrap().is_none());
4708	}
4709}