sp_state_machine/
lib.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! Substrate state machine implementation.
19
20#![warn(missing_docs)]
21#![cfg_attr(not(feature = "std"), no_std)]
22
23extern crate alloc;
24
25pub mod backend;
26#[cfg(feature = "std")]
27mod basic;
28mod error;
29mod ext;
30#[cfg(feature = "fuzzing")]
31pub mod fuzzing;
32#[cfg(feature = "std")]
33mod in_memory_backend;
34pub(crate) mod overlayed_changes;
35#[cfg(feature = "std")]
36mod read_only;
37mod stats;
38#[cfg(feature = "std")]
39mod testing;
40mod trie_backend;
41mod trie_backend_essence;
42
43pub use trie_backend::TrieCacheProvider;
44
45#[cfg(feature = "std")]
46pub use std_reexport::*;
47
48#[cfg(feature = "std")]
49pub use execution::*;
50#[cfg(feature = "std")]
51pub use log::{debug, error as log_error, warn};
52#[cfg(feature = "std")]
53pub use tracing::trace;
54
55/// In no_std we skip logs for state_machine, this macro
56/// is a noops.
57#[cfg(not(feature = "std"))]
58#[macro_export]
59macro_rules! warn {
60	(target: $target:expr, $message:expr $( , $arg:ident )* $( , )?) => {
61		{
62			$(
63				let _ = &$arg;
64			)*
65		}
66	};
67	($message:expr, $( $arg:expr, )*) => {
68		{
69			$(
70				let _ = &$arg;
71			)*
72		}
73	};
74}
75
76/// In no_std we skip logs for state_machine, this macro
77/// is a noops.
78#[cfg(not(feature = "std"))]
79#[macro_export]
80macro_rules! debug {
81	(target: $target:expr, $message:expr $( , $arg:ident )* $( , )?) => {
82		{
83			$(
84				let _ = &$arg;
85			)*
86		}
87	};
88}
89
90/// In no_std we skip logs for state_machine, this macro
91/// is a noops.
92#[cfg(not(feature = "std"))]
93#[macro_export]
94macro_rules! trace {
95	(target: $target:expr, $($arg:tt)+) => {
96		()
97	};
98	($($arg:tt)+) => {
99		()
100	};
101}
102
103/// In no_std we skip logs for state_machine, this macro
104/// is a noops.
105#[cfg(not(feature = "std"))]
106#[macro_export]
107macro_rules! log_error {
108	(target: $target:expr, $($arg:tt)+) => {
109		()
110	};
111	($($arg:tt)+) => {
112		()
113	};
114}
115
116/// Default error type to use with state machine trie backend.
117#[cfg(feature = "std")]
118pub type DefaultError = String;
119/// Error type to use with state machine trie backend.
120#[cfg(not(feature = "std"))]
121#[derive(Debug, Default, Clone, Copy, Eq, PartialEq)]
122pub struct DefaultError;
123
124#[cfg(not(feature = "std"))]
125impl core::fmt::Display for DefaultError {
126	fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
127		write!(f, "DefaultError")
128	}
129}
130
131pub use crate::{
132	backend::{Backend, BackendTransaction, IterArgs, KeysIter, PairsIter, StorageIterator},
133	error::{Error, ExecutionError},
134	ext::Ext,
135	overlayed_changes::{
136		ChildStorageCollection, IndexOperation, OffchainChangesCollection,
137		OffchainOverlayedChanges, OverlayedChanges, StorageChanges, StorageCollection, StorageKey,
138		StorageValue,
139	},
140	stats::{StateMachineStats, UsageInfo, UsageUnit},
141	trie_backend::{TrieBackend, TrieBackendBuilder},
142	trie_backend_essence::{Storage, TrieBackendStorage},
143};
144
145#[cfg(feature = "std")]
146mod std_reexport {
147	pub use crate::{
148		basic::BasicExternalities,
149		in_memory_backend::new_in_mem,
150		read_only::{InspectState, ReadOnlyExternalities},
151		testing::TestExternalities,
152		trie_backend::create_proof_check_backend,
153	};
154	pub use sp_trie::{
155		trie_types::{TrieDBMutV0, TrieDBMutV1},
156		CompactProof, DBValue, LayoutV0, LayoutV1, MemoryDB, StorageProof, TrieMut,
157	};
158}
159
160#[cfg(feature = "std")]
161mod execution {
162	use crate::backend::AsTrieBackend;
163
164	use super::*;
165	use codec::Codec;
166	use hash_db::Hasher;
167	use smallvec::SmallVec;
168	use sp_core::{
169		hexdisplay::HexDisplay,
170		storage::{ChildInfo, ChildType, PrefixedStorageKey},
171		traits::{CallContext, CodeExecutor, RuntimeCode},
172	};
173	use sp_externalities::Extensions;
174	use sp_trie::PrefixedMemoryDB;
175	use std::collections::{HashMap, HashSet};
176
177	pub(crate) type CallResult<E> = Result<Vec<u8>, E>;
178
179	/// Default handler of the execution manager.
180	pub type DefaultHandler<E> = fn(CallResult<E>, CallResult<E>) -> CallResult<E>;
181
182	/// Trie backend with in-memory storage.
183	pub type InMemoryBackend<H> = TrieBackend<PrefixedMemoryDB<H>, H>;
184
185	/// Storage backend trust level.
186	#[derive(Debug, Clone)]
187	pub enum BackendTrustLevel {
188		/// Panics from trusted backends are considered justified, and never caught.
189		Trusted,
190		/// Panics from untrusted backend are caught and interpreted as runtime error.
191		/// Untrusted backend may be missing some parts of the trie, so panics are not considered
192		/// fatal.
193		Untrusted,
194	}
195
196	/// The substrate state machine.
197	pub struct StateMachine<'a, B, H, Exec>
198	where
199		H: Hasher,
200		B: Backend<H>,
201	{
202		backend: &'a B,
203		exec: &'a Exec,
204		method: &'a str,
205		call_data: &'a [u8],
206		overlay: &'a mut OverlayedChanges<H>,
207		extensions: &'a mut Extensions,
208		runtime_code: &'a RuntimeCode<'a>,
209		stats: StateMachineStats,
210		/// The hash of the block the state machine will be executed on.
211		///
212		/// Used for logging.
213		parent_hash: Option<H::Out>,
214		context: CallContext,
215	}
216
217	impl<'a, B, H, Exec> Drop for StateMachine<'a, B, H, Exec>
218	where
219		H: Hasher,
220		B: Backend<H>,
221	{
222		fn drop(&mut self) {
223			self.backend.register_overlay_stats(&self.stats);
224		}
225	}
226
227	impl<'a, B, H, Exec> StateMachine<'a, B, H, Exec>
228	where
229		H: Hasher,
230		H::Out: Ord + 'static + codec::Codec,
231		Exec: CodeExecutor + Clone + 'static,
232		B: Backend<H>,
233	{
234		/// Creates new substrate state machine.
235		pub fn new(
236			backend: &'a B,
237			overlay: &'a mut OverlayedChanges<H>,
238			exec: &'a Exec,
239			method: &'a str,
240			call_data: &'a [u8],
241			extensions: &'a mut Extensions,
242			runtime_code: &'a RuntimeCode,
243			context: CallContext,
244		) -> Self {
245			Self {
246				backend,
247				exec,
248				method,
249				call_data,
250				extensions,
251				overlay,
252				runtime_code,
253				stats: StateMachineStats::default(),
254				parent_hash: None,
255				context,
256			}
257		}
258
259		/// Set the given `parent_hash` as the hash of the parent block.
260		///
261		/// This will be used for improved logging.
262		pub fn set_parent_hash(mut self, parent_hash: H::Out) -> Self {
263			self.parent_hash = Some(parent_hash);
264			self
265		}
266
267		/// Execute a call using the given state backend, overlayed changes, and call executor.
268		///
269		/// On an error, no prospective changes are written to the overlay.
270		///
271		/// Note: changes to code will be in place if this call is made again. For running partial
272		/// blocks (e.g. a transaction at a time), ensure a different method is used.
273		///
274		/// Returns the SCALE encoded result of the executed function.
275		pub fn execute(&mut self) -> Result<Vec<u8>, Box<dyn Error>> {
276			self.overlay
277				.enter_runtime()
278				.expect("StateMachine is never called from the runtime; qed");
279
280			let mut ext = Ext::new(self.overlay, self.backend, Some(self.extensions));
281
282			let ext_id = ext.id;
283
284			trace!(
285				target: "state",
286				ext_id = %HexDisplay::from(&ext_id.to_le_bytes()),
287				method = %self.method,
288				parent_hash = %self.parent_hash.map(|h| format!("{:?}", h)).unwrap_or_else(|| String::from("None")),
289				input = ?HexDisplay::from(&self.call_data),
290				"Call",
291			);
292
293			let result = self
294				.exec
295				.call(&mut ext, self.runtime_code, self.method, self.call_data, self.context)
296				.0;
297
298			self.overlay
299				.exit_runtime()
300				.expect("Runtime is not able to call this function in the overlay; qed");
301
302			trace!(
303				target: "state",
304				ext_id = %HexDisplay::from(&ext_id.to_le_bytes()),
305				?result,
306				"Return",
307			);
308
309			result.map_err(|e| Box::new(e) as Box<_>)
310		}
311	}
312
313	/// Prove execution using the given state backend, overlayed changes, and call executor.
314	pub fn prove_execution<B, H, Exec>(
315		backend: &mut B,
316		overlay: &mut OverlayedChanges<H>,
317		exec: &Exec,
318		method: &str,
319		call_data: &[u8],
320		runtime_code: &RuntimeCode,
321	) -> Result<(Vec<u8>, StorageProof), Box<dyn Error>>
322	where
323		B: AsTrieBackend<H>,
324		H: Hasher,
325		H::Out: Ord + 'static + codec::Codec,
326		Exec: CodeExecutor + Clone + 'static,
327	{
328		let trie_backend = backend.as_trie_backend();
329		prove_execution_on_trie_backend::<_, _, _>(
330			trie_backend,
331			overlay,
332			exec,
333			method,
334			call_data,
335			runtime_code,
336			&mut Default::default(),
337		)
338	}
339
340	/// Prove execution using the given trie backend, overlayed changes, and call executor.
341	/// Produces a state-backend-specific "transaction" which can be used to apply the changes
342	/// to the backing store, such as the disk.
343	/// Execution proof is the set of all 'touched' storage DBValues from the backend.
344	///
345	/// On an error, no prospective changes are written to the overlay.
346	///
347	/// Note: changes to code will be in place if this call is made again. For running partial
348	/// blocks (e.g. a transaction at a time), ensure a different method is used.
349	pub fn prove_execution_on_trie_backend<S, H, Exec>(
350		trie_backend: &TrieBackend<S, H>,
351		overlay: &mut OverlayedChanges<H>,
352		exec: &Exec,
353		method: &str,
354		call_data: &[u8],
355		runtime_code: &RuntimeCode,
356		extensions: &mut Extensions,
357	) -> Result<(Vec<u8>, StorageProof), Box<dyn Error>>
358	where
359		S: trie_backend_essence::TrieBackendStorage<H>,
360		H: Hasher,
361		H::Out: Ord + 'static + codec::Codec,
362		Exec: CodeExecutor + 'static + Clone,
363	{
364		let proving_backend =
365			TrieBackendBuilder::wrap(trie_backend).with_recorder(Default::default()).build();
366
367		let result = StateMachine::<_, H, Exec>::new(
368			&proving_backend,
369			overlay,
370			exec,
371			method,
372			call_data,
373			extensions,
374			runtime_code,
375			CallContext::Offchain,
376		)
377		.execute()?;
378
379		let proof = proving_backend
380			.extract_proof()
381			.expect("A recorder was set and thus, a storage proof can be extracted; qed");
382
383		Ok((result, proof))
384	}
385
386	/// Check execution proof, generated by `prove_execution` call.
387	pub fn execution_proof_check<H, Exec>(
388		root: H::Out,
389		proof: StorageProof,
390		overlay: &mut OverlayedChanges<H>,
391		exec: &Exec,
392		method: &str,
393		call_data: &[u8],
394		runtime_code: &RuntimeCode,
395	) -> Result<Vec<u8>, Box<dyn Error>>
396	where
397		H: Hasher + 'static,
398		Exec: CodeExecutor + Clone + 'static,
399		H::Out: Ord + 'static + codec::Codec,
400	{
401		let trie_backend = create_proof_check_backend::<H>(root, proof)?;
402		execution_proof_check_on_trie_backend::<_, _>(
403			&trie_backend,
404			overlay,
405			exec,
406			method,
407			call_data,
408			runtime_code,
409		)
410	}
411
412	/// Check execution proof on proving backend, generated by `prove_execution` call.
413	pub fn execution_proof_check_on_trie_backend<H, Exec>(
414		trie_backend: &TrieBackend<MemoryDB<H>, H>,
415		overlay: &mut OverlayedChanges<H>,
416		exec: &Exec,
417		method: &str,
418		call_data: &[u8],
419		runtime_code: &RuntimeCode,
420	) -> Result<Vec<u8>, Box<dyn Error>>
421	where
422		H: Hasher,
423		H::Out: Ord + 'static + codec::Codec,
424		Exec: CodeExecutor + Clone + 'static,
425	{
426		StateMachine::<_, H, Exec>::new(
427			trie_backend,
428			overlay,
429			exec,
430			method,
431			call_data,
432			&mut Extensions::default(),
433			runtime_code,
434			CallContext::Offchain,
435		)
436		.execute()
437	}
438
439	/// Generate storage read proof.
440	pub fn prove_read<B, H, I>(backend: B, keys: I) -> Result<StorageProof, Box<dyn Error>>
441	where
442		B: AsTrieBackend<H>,
443		H: Hasher,
444		H::Out: Ord + Codec,
445		I: IntoIterator,
446		I::Item: AsRef<[u8]>,
447	{
448		let trie_backend = backend.as_trie_backend();
449		prove_read_on_trie_backend(trie_backend, keys)
450	}
451
452	/// State machine only allows a single level
453	/// of child trie.
454	pub const MAX_NESTED_TRIE_DEPTH: usize = 2;
455
456	/// Multiple key value state.
457	/// States are ordered by root storage key.
458	#[derive(PartialEq, Eq, Clone)]
459	pub struct KeyValueStates(pub Vec<KeyValueStorageLevel>);
460
461	/// A key value state at any storage level.
462	#[derive(PartialEq, Eq, Clone)]
463	pub struct KeyValueStorageLevel {
464		/// State root of the level, for
465		/// top trie it is as an empty byte array.
466		pub state_root: Vec<u8>,
467		/// Storage of parents, empty for top root or
468		/// when exporting (building proof).
469		pub parent_storage_keys: Vec<Vec<u8>>,
470		/// Pair of key and values from this state.
471		pub key_values: Vec<(Vec<u8>, Vec<u8>)>,
472	}
473
474	impl<I> From<I> for KeyValueStates
475	where
476		I: IntoIterator<Item = (Vec<u8>, (Vec<(Vec<u8>, Vec<u8>)>, Vec<Vec<u8>>))>,
477	{
478		fn from(b: I) -> Self {
479			let mut result = Vec::new();
480			for (state_root, (key_values, storage_paths)) in b.into_iter() {
481				result.push(KeyValueStorageLevel {
482					state_root,
483					key_values,
484					parent_storage_keys: storage_paths,
485				})
486			}
487			KeyValueStates(result)
488		}
489	}
490
491	impl KeyValueStates {
492		/// Return total number of key values in states.
493		pub fn len(&self) -> usize {
494			self.0.iter().fold(0, |nb, state| nb + state.key_values.len())
495		}
496
497		/// Update last keys accessed from this state.
498		pub fn update_last_key(
499			&self,
500			stopped_at: usize,
501			last: &mut SmallVec<[Vec<u8>; 2]>,
502		) -> bool {
503			if stopped_at == 0 || stopped_at > MAX_NESTED_TRIE_DEPTH {
504				return false
505			}
506			match stopped_at {
507				1 => {
508					let top_last =
509						self.0.get(0).and_then(|s| s.key_values.last().map(|kv| kv.0.clone()));
510					if let Some(top_last) = top_last {
511						match last.len() {
512							0 => {
513								last.push(top_last);
514								return true
515							},
516							2 => {
517								last.pop();
518							},
519							_ => (),
520						}
521						// update top trie access.
522						last[0] = top_last;
523						return true
524					} else {
525						// No change in top trie accesses.
526						// Indicates end of reading of a child trie.
527						last.truncate(1);
528						return true
529					}
530				},
531				2 => {
532					let top_last =
533						self.0.get(0).and_then(|s| s.key_values.last().map(|kv| kv.0.clone()));
534					let child_last =
535						self.0.last().and_then(|s| s.key_values.last().map(|kv| kv.0.clone()));
536
537					if let Some(child_last) = child_last {
538						if last.is_empty() {
539							if let Some(top_last) = top_last {
540								last.push(top_last)
541							} else {
542								return false
543							}
544						} else if let Some(top_last) = top_last {
545							last[0] = top_last;
546						}
547						if last.len() == 2 {
548							last.pop();
549						}
550						last.push(child_last);
551						return true
552					} else {
553						// stopped at level 2 so child last is define.
554						return false
555					}
556				},
557				_ => (),
558			}
559			false
560		}
561	}
562
563	/// Generate range storage read proof, with child tries
564	/// content.
565	/// A size limit is applied to the proof with the
566	/// exception that `start_at` and its following element
567	/// are always part of the proof.
568	/// If a key different than `start_at` is a child trie root,
569	/// the child trie content will be included in the proof.
570	pub fn prove_range_read_with_child_with_size<B, H>(
571		backend: B,
572		size_limit: usize,
573		start_at: &[Vec<u8>],
574	) -> Result<(StorageProof, u32), Box<dyn Error>>
575	where
576		B: AsTrieBackend<H>,
577		H: Hasher,
578		H::Out: Ord + Codec,
579	{
580		let trie_backend = backend.as_trie_backend();
581		prove_range_read_with_child_with_size_on_trie_backend(trie_backend, size_limit, start_at)
582	}
583
584	/// Generate range storage read proof, with child tries
585	/// content.
586	/// See `prove_range_read_with_child_with_size`.
587	pub fn prove_range_read_with_child_with_size_on_trie_backend<S, H>(
588		trie_backend: &TrieBackend<S, H>,
589		size_limit: usize,
590		start_at: &[Vec<u8>],
591	) -> Result<(StorageProof, u32), Box<dyn Error>>
592	where
593		S: trie_backend_essence::TrieBackendStorage<H>,
594		H: Hasher,
595		H::Out: Ord + Codec,
596	{
597		if start_at.len() > MAX_NESTED_TRIE_DEPTH {
598			return Err(Box::new("Invalid start of range."))
599		}
600
601		let recorder = sp_trie::recorder::Recorder::default();
602		let proving_backend =
603			TrieBackendBuilder::wrap(trie_backend).with_recorder(recorder.clone()).build();
604		let mut count = 0;
605
606		let mut child_roots = HashSet::new();
607		let (mut child_key, mut start_at) = if start_at.len() == 2 {
608			let storage_key = start_at.get(0).expect("Checked length.").clone();
609			if let Some(state_root) = proving_backend
610				.storage(&storage_key)
611				.map_err(|e| Box::new(e) as Box<dyn Error>)?
612			{
613				child_roots.insert(state_root);
614			} else {
615				return Err(Box::new("Invalid range start child trie key."))
616			}
617
618			(Some(storage_key), start_at.get(1).cloned())
619		} else {
620			(None, start_at.get(0).cloned())
621		};
622
623		loop {
624			let (child_info, depth) = if let Some(storage_key) = child_key.as_ref() {
625				let storage_key = PrefixedStorageKey::new_ref(storage_key);
626				(
627					Some(match ChildType::from_prefixed_key(storage_key) {
628						Some((ChildType::ParentKeyId, storage_key)) =>
629							ChildInfo::new_default(storage_key),
630						None => return Err(Box::new("Invalid range start child trie key.")),
631					}),
632					2,
633				)
634			} else {
635				(None, 1)
636			};
637
638			let start_at_ref = start_at.as_ref().map(AsRef::as_ref);
639			let mut switch_child_key = None;
640			let mut iter = proving_backend
641				.pairs(IterArgs {
642					child_info,
643					start_at: start_at_ref,
644					start_at_exclusive: true,
645					..IterArgs::default()
646				})
647				.map_err(|e| Box::new(e) as Box<dyn Error>)?;
648
649			while let Some(item) = iter.next() {
650				let (key, value) = item.map_err(|e| Box::new(e) as Box<dyn Error>)?;
651
652				if depth < MAX_NESTED_TRIE_DEPTH &&
653					sp_core::storage::well_known_keys::is_child_storage_key(key.as_slice())
654				{
655					count += 1;
656					// do not add two child trie with same root
657					if !child_roots.contains(value.as_slice()) {
658						child_roots.insert(value);
659						switch_child_key = Some(key);
660						break
661					}
662				} else if recorder.estimate_encoded_size() <= size_limit {
663					count += 1;
664				} else {
665					break
666				}
667			}
668
669			let completed = iter.was_complete();
670
671			if switch_child_key.is_none() {
672				if depth == 1 {
673					break
674				} else if completed {
675					start_at = child_key.take();
676				} else {
677					break
678				}
679			} else {
680				child_key = switch_child_key;
681				start_at = None;
682			}
683		}
684
685		let proof = proving_backend
686			.extract_proof()
687			.expect("A recorder was set and thus, a storage proof can be extracted; qed");
688		Ok((proof, count))
689	}
690
691	/// Generate range storage read proof.
692	pub fn prove_range_read_with_size<B, H>(
693		backend: B,
694		child_info: Option<&ChildInfo>,
695		prefix: Option<&[u8]>,
696		size_limit: usize,
697		start_at: Option<&[u8]>,
698	) -> Result<(StorageProof, u32), Box<dyn Error>>
699	where
700		B: AsTrieBackend<H>,
701		H: Hasher,
702		H::Out: Ord + Codec,
703	{
704		let trie_backend = backend.as_trie_backend();
705		prove_range_read_with_size_on_trie_backend(
706			trie_backend,
707			child_info,
708			prefix,
709			size_limit,
710			start_at,
711		)
712	}
713
714	/// Generate range storage read proof on an existing trie backend.
715	pub fn prove_range_read_with_size_on_trie_backend<S, H>(
716		trie_backend: &TrieBackend<S, H>,
717		child_info: Option<&ChildInfo>,
718		prefix: Option<&[u8]>,
719		size_limit: usize,
720		start_at: Option<&[u8]>,
721	) -> Result<(StorageProof, u32), Box<dyn Error>>
722	where
723		S: trie_backend_essence::TrieBackendStorage<H>,
724		H: Hasher,
725		H::Out: Ord + Codec,
726	{
727		let recorder = sp_trie::recorder::Recorder::default();
728		let proving_backend =
729			TrieBackendBuilder::wrap(trie_backend).with_recorder(recorder.clone()).build();
730		let mut count = 0;
731		let iter = proving_backend
732			// NOTE: Even though the loop below doesn't use these values
733			//       this *must* fetch both the keys and the values so that
734			//       the proof is correct.
735			.pairs(IterArgs {
736				child_info: child_info.cloned(),
737				prefix,
738				start_at,
739				..IterArgs::default()
740			})
741			.map_err(|e| Box::new(e) as Box<dyn Error>)?;
742
743		for item in iter {
744			item.map_err(|e| Box::new(e) as Box<dyn Error>)?;
745			if count == 0 || recorder.estimate_encoded_size() <= size_limit {
746				count += 1;
747			} else {
748				break
749			}
750		}
751
752		let proof = proving_backend
753			.extract_proof()
754			.expect("A recorder was set and thus, a storage proof can be extracted; qed");
755		Ok((proof, count))
756	}
757
758	/// Generate child storage read proof.
759	pub fn prove_child_read<B, H, I>(
760		backend: B,
761		child_info: &ChildInfo,
762		keys: I,
763	) -> Result<StorageProof, Box<dyn Error>>
764	where
765		B: AsTrieBackend<H>,
766		H: Hasher,
767		H::Out: Ord + Codec,
768		I: IntoIterator,
769		I::Item: AsRef<[u8]>,
770	{
771		let trie_backend = backend.as_trie_backend();
772		prove_child_read_on_trie_backend(trie_backend, child_info, keys)
773	}
774
775	/// Generate storage read proof on pre-created trie backend.
776	pub fn prove_read_on_trie_backend<S, H, I>(
777		trie_backend: &TrieBackend<S, H>,
778		keys: I,
779	) -> Result<StorageProof, Box<dyn Error>>
780	where
781		S: trie_backend_essence::TrieBackendStorage<H>,
782		H: Hasher,
783		H::Out: Ord + Codec,
784		I: IntoIterator,
785		I::Item: AsRef<[u8]>,
786	{
787		let proving_backend =
788			TrieBackendBuilder::wrap(trie_backend).with_recorder(Default::default()).build();
789		for key in keys.into_iter() {
790			proving_backend
791				.storage(key.as_ref())
792				.map_err(|e| Box::new(e) as Box<dyn Error>)?;
793		}
794
795		Ok(proving_backend
796			.extract_proof()
797			.expect("A recorder was set and thus, a storage proof can be extracted; qed"))
798	}
799
800	/// Generate storage read proof on pre-created trie backend.
801	pub fn prove_child_read_on_trie_backend<S, H, I>(
802		trie_backend: &TrieBackend<S, H>,
803		child_info: &ChildInfo,
804		keys: I,
805	) -> Result<StorageProof, Box<dyn Error>>
806	where
807		S: trie_backend_essence::TrieBackendStorage<H>,
808		H: Hasher,
809		H::Out: Ord + Codec,
810		I: IntoIterator,
811		I::Item: AsRef<[u8]>,
812	{
813		let proving_backend =
814			TrieBackendBuilder::wrap(trie_backend).with_recorder(Default::default()).build();
815		for key in keys.into_iter() {
816			proving_backend
817				.child_storage(child_info, key.as_ref())
818				.map_err(|e| Box::new(e) as Box<dyn Error>)?;
819		}
820
821		Ok(proving_backend
822			.extract_proof()
823			.expect("A recorder was set and thus, a storage proof can be extracted; qed"))
824	}
825
826	/// Check storage read proof, generated by `prove_read` call.
827	pub fn read_proof_check<H, I>(
828		root: H::Out,
829		proof: StorageProof,
830		keys: I,
831	) -> Result<HashMap<Vec<u8>, Option<Vec<u8>>>, Box<dyn Error>>
832	where
833		H: Hasher + 'static,
834		H::Out: Ord + Codec,
835		I: IntoIterator,
836		I::Item: AsRef<[u8]>,
837	{
838		let proving_backend = create_proof_check_backend::<H>(root, proof)?;
839		let mut result = HashMap::new();
840		for key in keys.into_iter() {
841			let value = read_proof_check_on_proving_backend(&proving_backend, key.as_ref())?;
842			result.insert(key.as_ref().to_vec(), value);
843		}
844		Ok(result)
845	}
846
847	/// Check storage range proof with child trie included, generated by
848	/// `prove_range_read_with_child_with_size` call.
849	///
850	/// Returns key values contents and the depth of the pending state iteration
851	/// (0 if completed).
852	pub fn read_range_proof_check_with_child<H>(
853		root: H::Out,
854		proof: StorageProof,
855		start_at: &[Vec<u8>],
856	) -> Result<(KeyValueStates, usize), Box<dyn Error>>
857	where
858		H: Hasher + 'static,
859		H::Out: Ord + Codec,
860	{
861		let proving_backend = create_proof_check_backend::<H>(root, proof)?;
862		read_range_proof_check_with_child_on_proving_backend(&proving_backend, start_at)
863	}
864
865	/// Check child storage range proof, generated by `prove_range_read_with_size` call.
866	pub fn read_range_proof_check<H>(
867		root: H::Out,
868		proof: StorageProof,
869		child_info: Option<&ChildInfo>,
870		prefix: Option<&[u8]>,
871		count: Option<u32>,
872		start_at: Option<&[u8]>,
873	) -> Result<(Vec<(Vec<u8>, Vec<u8>)>, bool), Box<dyn Error>>
874	where
875		H: Hasher + 'static,
876		H::Out: Ord + Codec,
877	{
878		let proving_backend = create_proof_check_backend::<H>(root, proof)?;
879		read_range_proof_check_on_proving_backend(
880			&proving_backend,
881			child_info,
882			prefix,
883			count,
884			start_at,
885		)
886	}
887
888	/// Check child storage read proof, generated by `prove_child_read` call.
889	pub fn read_child_proof_check<H, I>(
890		root: H::Out,
891		proof: StorageProof,
892		child_info: &ChildInfo,
893		keys: I,
894	) -> Result<HashMap<Vec<u8>, Option<Vec<u8>>>, Box<dyn Error>>
895	where
896		H: Hasher + 'static,
897		H::Out: Ord + Codec,
898		I: IntoIterator,
899		I::Item: AsRef<[u8]>,
900	{
901		let proving_backend = create_proof_check_backend::<H>(root, proof)?;
902		let mut result = HashMap::new();
903		for key in keys.into_iter() {
904			let value = read_child_proof_check_on_proving_backend(
905				&proving_backend,
906				child_info,
907				key.as_ref(),
908			)?;
909			result.insert(key.as_ref().to_vec(), value);
910		}
911		Ok(result)
912	}
913
914	/// Check storage read proof on pre-created proving backend.
915	pub fn read_proof_check_on_proving_backend<H>(
916		proving_backend: &TrieBackend<MemoryDB<H>, H>,
917		key: &[u8],
918	) -> Result<Option<Vec<u8>>, Box<dyn Error>>
919	where
920		H: Hasher,
921		H::Out: Ord + Codec,
922	{
923		proving_backend.storage(key).map_err(|e| Box::new(e) as Box<dyn Error>)
924	}
925
926	/// Check child storage read proof on pre-created proving backend.
927	pub fn read_child_proof_check_on_proving_backend<H>(
928		proving_backend: &TrieBackend<MemoryDB<H>, H>,
929		child_info: &ChildInfo,
930		key: &[u8],
931	) -> Result<Option<Vec<u8>>, Box<dyn Error>>
932	where
933		H: Hasher,
934		H::Out: Ord + Codec,
935	{
936		proving_backend
937			.child_storage(child_info, key)
938			.map_err(|e| Box::new(e) as Box<dyn Error>)
939	}
940
941	/// Check storage range proof on pre-created proving backend.
942	///
943	/// Returns a vector with the read `key => value` pairs and a `bool` that is set to `true` when
944	/// all `key => value` pairs could be read and no more are left.
945	pub fn read_range_proof_check_on_proving_backend<H>(
946		proving_backend: &TrieBackend<MemoryDB<H>, H>,
947		child_info: Option<&ChildInfo>,
948		prefix: Option<&[u8]>,
949		count: Option<u32>,
950		start_at: Option<&[u8]>,
951	) -> Result<(Vec<(Vec<u8>, Vec<u8>)>, bool), Box<dyn Error>>
952	where
953		H: Hasher,
954		H::Out: Ord + Codec,
955	{
956		let mut values = Vec::new();
957		let mut iter = proving_backend
958			.pairs(IterArgs {
959				child_info: child_info.cloned(),
960				prefix,
961				start_at,
962				stop_on_incomplete_database: true,
963				..IterArgs::default()
964			})
965			.map_err(|e| Box::new(e) as Box<dyn Error>)?;
966
967		while let Some(item) = iter.next() {
968			let (key, value) = item.map_err(|e| Box::new(e) as Box<dyn Error>)?;
969			values.push((key, value));
970			if !count.as_ref().map_or(true, |c| (values.len() as u32) < *c) {
971				break
972			}
973		}
974
975		Ok((values, iter.was_complete()))
976	}
977
978	/// Check storage range proof on pre-created proving backend.
979	///
980	/// See `read_range_proof_check_with_child`.
981	pub fn read_range_proof_check_with_child_on_proving_backend<H>(
982		proving_backend: &TrieBackend<MemoryDB<H>, H>,
983		start_at: &[Vec<u8>],
984	) -> Result<(KeyValueStates, usize), Box<dyn Error>>
985	where
986		H: Hasher,
987		H::Out: Ord + Codec,
988	{
989		let mut result = vec![KeyValueStorageLevel {
990			state_root: Default::default(),
991			key_values: Default::default(),
992			parent_storage_keys: Default::default(),
993		}];
994		if start_at.len() > MAX_NESTED_TRIE_DEPTH {
995			return Err(Box::new("Invalid start of range."))
996		}
997
998		let mut child_roots = HashSet::new();
999		let (mut child_key, mut start_at) = if start_at.len() == 2 {
1000			let storage_key = start_at.get(0).expect("Checked length.").clone();
1001			let child_key = if let Some(state_root) = proving_backend
1002				.storage(&storage_key)
1003				.map_err(|e| Box::new(e) as Box<dyn Error>)?
1004			{
1005				child_roots.insert(state_root.clone());
1006				Some((storage_key, state_root))
1007			} else {
1008				return Err(Box::new("Invalid range start child trie key."))
1009			};
1010
1011			(child_key, start_at.get(1).cloned())
1012		} else {
1013			(None, start_at.get(0).cloned())
1014		};
1015
1016		let completed = loop {
1017			let (child_info, depth) = if let Some((storage_key, state_root)) = child_key.as_ref() {
1018				result.push(KeyValueStorageLevel {
1019					state_root: state_root.clone(),
1020					key_values: Default::default(),
1021					parent_storage_keys: Default::default(),
1022				});
1023
1024				let storage_key = PrefixedStorageKey::new_ref(storage_key);
1025				(
1026					Some(match ChildType::from_prefixed_key(storage_key) {
1027						Some((ChildType::ParentKeyId, storage_key)) =>
1028							ChildInfo::new_default(storage_key),
1029						None => return Err(Box::new("Invalid range start child trie key.")),
1030					}),
1031					2,
1032				)
1033			} else {
1034				(None, 1)
1035			};
1036
1037			let values = if child_info.is_some() {
1038				&mut result.last_mut().expect("Added above").key_values
1039			} else {
1040				&mut result[0].key_values
1041			};
1042			let start_at_ref = start_at.as_ref().map(AsRef::as_ref);
1043			let mut switch_child_key = None;
1044
1045			let mut iter = proving_backend
1046				.pairs(IterArgs {
1047					child_info,
1048					start_at: start_at_ref,
1049					start_at_exclusive: true,
1050					stop_on_incomplete_database: true,
1051					..IterArgs::default()
1052				})
1053				.map_err(|e| Box::new(e) as Box<dyn Error>)?;
1054
1055			while let Some(item) = iter.next() {
1056				let (key, value) = item.map_err(|e| Box::new(e) as Box<dyn Error>)?;
1057				values.push((key.to_vec(), value.to_vec()));
1058
1059				if depth < MAX_NESTED_TRIE_DEPTH &&
1060					sp_core::storage::well_known_keys::is_child_storage_key(key.as_slice())
1061				{
1062					// Do not add two chid trie with same root.
1063					if !child_roots.contains(value.as_slice()) {
1064						child_roots.insert(value.clone());
1065						switch_child_key = Some((key, value));
1066						break
1067					}
1068				}
1069			}
1070
1071			let completed = iter.was_complete();
1072
1073			if switch_child_key.is_none() {
1074				if !completed {
1075					break depth
1076				}
1077				if depth == 1 {
1078					break 0
1079				} else {
1080					start_at = child_key.take().map(|entry| entry.0);
1081				}
1082			} else {
1083				child_key = switch_child_key;
1084				start_at = None;
1085			}
1086		};
1087		Ok((KeyValueStates(result), completed))
1088	}
1089}
1090
1091#[cfg(test)]
1092mod tests {
1093	use super::{backend::AsTrieBackend, ext::Ext, *};
1094	use crate::{execution::CallResult, in_memory_backend::new_in_mem};
1095	use assert_matches::assert_matches;
1096	use codec::Encode;
1097	use sp_core::{
1098		map,
1099		storage::{ChildInfo, StateVersion},
1100		traits::{CallContext, CodeExecutor, Externalities, RuntimeCode},
1101		H256,
1102	};
1103	use sp_runtime::traits::BlakeTwo256;
1104	use sp_trie::{
1105		trie_types::{TrieDBMutBuilderV0, TrieDBMutBuilderV1},
1106		KeySpacedDBMut, PrefixedMemoryDB,
1107	};
1108	use std::collections::{BTreeMap, HashMap};
1109
1110	#[derive(Clone)]
1111	struct DummyCodeExecutor {
1112		native_available: bool,
1113		native_succeeds: bool,
1114		fallback_succeeds: bool,
1115	}
1116
1117	impl CodeExecutor for DummyCodeExecutor {
1118		type Error = u8;
1119
1120		fn call(
1121			&self,
1122			ext: &mut dyn Externalities,
1123			_: &RuntimeCode,
1124			_method: &str,
1125			_data: &[u8],
1126			_: CallContext,
1127		) -> (CallResult<Self::Error>, bool) {
1128			let using_native = self.native_available;
1129			match (using_native, self.native_succeeds, self.fallback_succeeds) {
1130				(true, true, _) | (false, _, true) => (
1131					Ok(vec![
1132						ext.storage(b"value1").unwrap()[0] + ext.storage(b"value2").unwrap()[0],
1133					]),
1134					using_native,
1135				),
1136				_ => (Err(0), using_native),
1137			}
1138		}
1139	}
1140
1141	impl sp_core::traits::ReadRuntimeVersion for DummyCodeExecutor {
1142		fn read_runtime_version(
1143			&self,
1144			_: &[u8],
1145			_: &mut dyn Externalities,
1146		) -> std::result::Result<Vec<u8>, String> {
1147			unimplemented!("Not required in tests.")
1148		}
1149	}
1150
1151	#[test]
1152	fn execute_works() {
1153		execute_works_inner(StateVersion::V0);
1154		execute_works_inner(StateVersion::V1);
1155	}
1156	fn execute_works_inner(state_version: StateVersion) {
1157		let backend = trie_backend::tests::test_trie(state_version, None, None);
1158		let mut overlayed_changes = Default::default();
1159		let wasm_code = RuntimeCode::empty();
1160		let mut execution_extensions = &mut Default::default();
1161
1162		let mut state_machine = StateMachine::new(
1163			&backend,
1164			&mut overlayed_changes,
1165			&DummyCodeExecutor {
1166				native_available: true,
1167				native_succeeds: true,
1168				fallback_succeeds: true,
1169			},
1170			"test",
1171			&[],
1172			&mut execution_extensions,
1173			&wasm_code,
1174			CallContext::Offchain,
1175		);
1176
1177		assert_eq!(state_machine.execute().unwrap(), vec![66]);
1178	}
1179
1180	#[test]
1181	fn execute_works_with_native_else_wasm() {
1182		execute_works_with_native_else_wasm_inner(StateVersion::V0);
1183		execute_works_with_native_else_wasm_inner(StateVersion::V1);
1184	}
1185	fn execute_works_with_native_else_wasm_inner(state_version: StateVersion) {
1186		let backend = trie_backend::tests::test_trie(state_version, None, None);
1187		let mut overlayed_changes = Default::default();
1188		let wasm_code = RuntimeCode::empty();
1189		let mut execution_extensions = &mut Default::default();
1190
1191		let mut state_machine = StateMachine::new(
1192			&backend,
1193			&mut overlayed_changes,
1194			&DummyCodeExecutor {
1195				native_available: true,
1196				native_succeeds: true,
1197				fallback_succeeds: true,
1198			},
1199			"test",
1200			&[],
1201			&mut execution_extensions,
1202			&wasm_code,
1203			CallContext::Offchain,
1204		);
1205
1206		assert_eq!(state_machine.execute().unwrap(), vec![66]);
1207	}
1208
1209	#[test]
1210	fn prove_execution_and_proof_check_works() {
1211		prove_execution_and_proof_check_works_inner(StateVersion::V0);
1212		prove_execution_and_proof_check_works_inner(StateVersion::V1);
1213	}
1214	fn prove_execution_and_proof_check_works_inner(state_version: StateVersion) {
1215		let executor = DummyCodeExecutor {
1216			native_available: true,
1217			native_succeeds: true,
1218			fallback_succeeds: true,
1219		};
1220
1221		// fetch execution proof from 'remote' full node
1222		let mut remote_backend = trie_backend::tests::test_trie(state_version, None, None);
1223		let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0;
1224		let (remote_result, remote_proof) = prove_execution(
1225			&mut remote_backend,
1226			&mut Default::default(),
1227			&executor,
1228			"test",
1229			&[],
1230			&RuntimeCode::empty(),
1231		)
1232		.unwrap();
1233
1234		// check proof locally
1235		let local_result = execution_proof_check::<BlakeTwo256, _>(
1236			remote_root,
1237			remote_proof,
1238			&mut Default::default(),
1239			&executor,
1240			"test",
1241			&[],
1242			&RuntimeCode::empty(),
1243		)
1244		.unwrap();
1245
1246		// check that both results are correct
1247		assert_eq!(remote_result, vec![66]);
1248		assert_eq!(remote_result, local_result);
1249	}
1250
1251	#[test]
1252	fn clear_prefix_in_ext_works() {
1253		let initial: BTreeMap<_, _> = map![
1254			b"aaa".to_vec() => b"0".to_vec(),
1255			b"abb".to_vec() => b"1".to_vec(),
1256			b"abc".to_vec() => b"2".to_vec(),
1257			b"bbb".to_vec() => b"3".to_vec()
1258		];
1259		let state = InMemoryBackend::<BlakeTwo256>::from((initial, StateVersion::default()));
1260		let backend = state.as_trie_backend();
1261
1262		let mut overlay = OverlayedChanges::default();
1263		overlay.set_storage(b"aba".to_vec(), Some(b"1312".to_vec()));
1264		overlay.set_storage(b"bab".to_vec(), Some(b"228".to_vec()));
1265		overlay.start_transaction();
1266		overlay.set_storage(b"abd".to_vec(), Some(b"69".to_vec()));
1267		overlay.set_storage(b"bbd".to_vec(), Some(b"42".to_vec()));
1268
1269		let overlay_limit = overlay.clone();
1270		{
1271			let mut ext = Ext::new(&mut overlay, backend, None);
1272			let _ = ext.clear_prefix(b"ab", None, None);
1273		}
1274		overlay.commit_transaction().unwrap();
1275
1276		assert_eq!(
1277			overlay
1278				.changes_mut()
1279				.map(|(k, v)| (k.clone(), v.value().cloned()))
1280				.collect::<HashMap<_, _>>(),
1281			map![
1282				b"abc".to_vec() => None,
1283				b"abb".to_vec() => None,
1284				b"aba".to_vec() => None,
1285				b"abd".to_vec() => None,
1286
1287				b"bab".to_vec() => Some(b"228".to_vec()),
1288				b"bbd".to_vec() => Some(b"42".to_vec())
1289			],
1290		);
1291
1292		let mut overlay = overlay_limit;
1293		{
1294			let mut ext = Ext::new(&mut overlay, backend, None);
1295			assert_matches!(
1296				ext.clear_prefix(b"ab", Some(1), None).deconstruct(),
1297				(Some(_), 1, 3, 1)
1298			);
1299		}
1300		overlay.commit_transaction().unwrap();
1301
1302		assert_eq!(
1303			overlay
1304				.changes_mut()
1305				.map(|(k, v)| (k.clone(), v.value().cloned()))
1306				.collect::<HashMap<_, _>>(),
1307			map![
1308				b"abb".to_vec() => None,
1309				b"aba".to_vec() => None,
1310				b"abd".to_vec() => None,
1311
1312				b"bab".to_vec() => Some(b"228".to_vec()),
1313				b"bbd".to_vec() => Some(b"42".to_vec())
1314			],
1315		);
1316	}
1317
1318	#[test]
1319	fn limited_child_kill_works() {
1320		let child_info = ChildInfo::new_default(b"sub1");
1321		let initial: HashMap<_, BTreeMap<_, _>> = map![
1322			Some(child_info.clone()) => map![
1323				b"a".to_vec() => b"0".to_vec(),
1324				b"b".to_vec() => b"1".to_vec(),
1325				b"c".to_vec() => b"2".to_vec(),
1326				b"d".to_vec() => b"3".to_vec()
1327			],
1328		];
1329		let backend = InMemoryBackend::<BlakeTwo256>::from((initial, StateVersion::default()));
1330
1331		let mut overlay = OverlayedChanges::default();
1332		overlay.set_child_storage(&child_info, b"1".to_vec(), Some(b"1312".to_vec()));
1333		overlay.set_child_storage(&child_info, b"2".to_vec(), Some(b"1312".to_vec()));
1334		overlay.set_child_storage(&child_info, b"3".to_vec(), Some(b"1312".to_vec()));
1335		overlay.set_child_storage(&child_info, b"4".to_vec(), Some(b"1312".to_vec()));
1336
1337		{
1338			let mut ext = Ext::new(&mut overlay, &backend, None);
1339			let r = ext.kill_child_storage(&child_info, Some(2), None);
1340			assert_matches!(r.deconstruct(), (Some(_), 2, 6, 2));
1341		}
1342
1343		assert_eq!(
1344			overlay
1345				.children_mut()
1346				.flat_map(|(iter, _child_info)| iter)
1347				.map(|(k, v)| (k.clone(), v.value()))
1348				.collect::<BTreeMap<_, _>>(),
1349			map![
1350				b"1".to_vec() => None,
1351				b"2".to_vec() => None,
1352				b"3".to_vec() => None,
1353				b"4".to_vec() => None,
1354				b"a".to_vec() => None,
1355				b"b".to_vec() => None,
1356			],
1357		);
1358	}
1359
1360	#[test]
1361	fn limited_child_kill_off_by_one_works() {
1362		let child_info = ChildInfo::new_default(b"sub1");
1363		let initial: HashMap<_, BTreeMap<_, _>> = map![
1364			Some(child_info.clone()) => map![
1365				b"a".to_vec() => b"0".to_vec(),
1366				b"b".to_vec() => b"1".to_vec(),
1367				b"c".to_vec() => b"2".to_vec(),
1368				b"d".to_vec() => b"3".to_vec()
1369			],
1370		];
1371		let backend = InMemoryBackend::<BlakeTwo256>::from((initial, StateVersion::default()));
1372		let mut overlay = OverlayedChanges::default();
1373		let mut ext = Ext::new(&mut overlay, &backend, None);
1374		let r = ext.kill_child_storage(&child_info, Some(0), None).deconstruct();
1375		assert_matches!(r, (Some(_), 0, 0, 0));
1376		let r = ext
1377			.kill_child_storage(&child_info, Some(1), r.0.as_ref().map(|x| &x[..]))
1378			.deconstruct();
1379		assert_matches!(r, (Some(_), 1, 1, 1));
1380		let r = ext
1381			.kill_child_storage(&child_info, Some(4), r.0.as_ref().map(|x| &x[..]))
1382			.deconstruct();
1383		// Only 3 items remaining to remove
1384		assert_matches!(r, (None, 3, 3, 3));
1385		let r = ext.kill_child_storage(&child_info, Some(1), None).deconstruct();
1386		assert_matches!(r, (Some(_), 0, 0, 1));
1387	}
1388
1389	#[test]
1390	fn limited_child_kill_off_by_one_works_without_limit() {
1391		let child_info = ChildInfo::new_default(b"sub1");
1392		let initial: HashMap<_, BTreeMap<_, _>> = map![
1393			Some(child_info.clone()) => map![
1394				b"a".to_vec() => b"0".to_vec(),
1395				b"b".to_vec() => b"1".to_vec(),
1396				b"c".to_vec() => b"2".to_vec(),
1397				b"d".to_vec() => b"3".to_vec()
1398			],
1399		];
1400		let backend = InMemoryBackend::<BlakeTwo256>::from((initial, StateVersion::default()));
1401		let mut overlay = OverlayedChanges::default();
1402		let mut ext = Ext::new(&mut overlay, &backend, None);
1403		assert_eq!(ext.kill_child_storage(&child_info, None, None).deconstruct(), (None, 4, 4, 4));
1404	}
1405
1406	#[test]
1407	fn set_child_storage_works() {
1408		let child_info = ChildInfo::new_default(b"sub1");
1409		let child_info = &child_info;
1410		let state = new_in_mem::<BlakeTwo256>();
1411		let backend = state.as_trie_backend();
1412		let mut overlay = OverlayedChanges::default();
1413		let mut ext = Ext::new(&mut overlay, backend, None);
1414
1415		ext.set_child_storage(child_info, b"abc".to_vec(), b"def".to_vec());
1416		assert_eq!(ext.child_storage(child_info, b"abc"), Some(b"def".to_vec()));
1417		let _ = ext.kill_child_storage(child_info, None, None);
1418		assert_eq!(ext.child_storage(child_info, b"abc"), None);
1419	}
1420
1421	#[test]
1422	fn append_storage_works() {
1423		let reference_data = vec![b"data1".to_vec(), b"2".to_vec(), b"D3".to_vec(), b"d4".to_vec()];
1424		let key = b"key".to_vec();
1425		let state = new_in_mem::<BlakeTwo256>();
1426		let backend = state.as_trie_backend();
1427		let mut overlay = OverlayedChanges::default();
1428		{
1429			let mut ext = Ext::new(&mut overlay, backend, None);
1430
1431			ext.storage_append(key.clone(), reference_data[0].encode());
1432			assert_eq!(ext.storage(key.as_slice()), Some(vec![reference_data[0].clone()].encode()));
1433		}
1434		overlay.start_transaction();
1435		{
1436			let mut ext = Ext::new(&mut overlay, backend, None);
1437
1438			for i in reference_data.iter().skip(1) {
1439				ext.storage_append(key.clone(), i.encode());
1440			}
1441			assert_eq!(ext.storage(key.as_slice()), Some(reference_data.encode()));
1442		}
1443		overlay.rollback_transaction().unwrap();
1444		{
1445			let mut ext = Ext::new(&mut overlay, backend, None);
1446			assert_eq!(ext.storage(key.as_slice()), Some(vec![reference_data[0].clone()].encode()));
1447		}
1448	}
1449
1450	// Test that we can append twice to a key, then perform a remove operation.
1451	// The test checks specifically that the append is merged with its parent transaction
1452	// on commit.
1453	#[test]
1454	fn commit_merges_append_with_parent() {
1455		#[derive(codec::Encode, codec::Decode)]
1456		enum Item {
1457			Item1,
1458			Item2,
1459		}
1460
1461		let key = b"events".to_vec();
1462		let state = new_in_mem::<BlakeTwo256>();
1463		let backend = state.as_trie_backend();
1464		let mut overlay = OverlayedChanges::default();
1465
1466		// Append first item
1467		overlay.start_transaction();
1468		{
1469			let mut ext = Ext::new(&mut overlay, backend, None);
1470			ext.clear_storage(key.as_slice());
1471			ext.storage_append(key.clone(), Item::Item1.encode());
1472		}
1473
1474		// Append second item
1475		overlay.start_transaction();
1476		{
1477			let mut ext = Ext::new(&mut overlay, backend, None);
1478
1479			assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::Item1].encode()));
1480
1481			ext.storage_append(key.clone(), Item::Item2.encode());
1482
1483			assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::Item1, Item::Item2].encode()),);
1484		}
1485
1486		// Remove item
1487		overlay.start_transaction();
1488		{
1489			let mut ext = Ext::new(&mut overlay, backend, None);
1490
1491			ext.place_storage(key.clone(), None);
1492
1493			assert_eq!(ext.storage(key.as_slice()), None);
1494		}
1495
1496		// Remove gets commited and merged into previous transaction
1497		overlay.commit_transaction().unwrap();
1498		{
1499			let mut ext = Ext::new(&mut overlay, backend, None);
1500			assert_eq!(ext.storage(key.as_slice()), None,);
1501		}
1502
1503		// Remove gets rolled back, we should see the initial append again.
1504		overlay.rollback_transaction().unwrap();
1505		{
1506			let mut ext = Ext::new(&mut overlay, backend, None);
1507			assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::Item1].encode()));
1508		}
1509
1510		overlay.commit_transaction().unwrap();
1511		{
1512			let mut ext = Ext::new(&mut overlay, backend, None);
1513			assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::Item1].encode()));
1514		}
1515	}
1516
1517	#[test]
1518	fn remove_with_append_then_rollback_appended_then_append_again() {
1519		#[derive(codec::Encode, codec::Decode)]
1520		enum Item {
1521			InitializationItem,
1522			DiscardedItem,
1523			CommittedItem,
1524		}
1525
1526		let key = b"events".to_vec();
1527		let state = new_in_mem::<BlakeTwo256>();
1528		let backend = state.as_trie_backend();
1529		let mut overlay = OverlayedChanges::default();
1530
1531		// For example, block initialization with event.
1532		{
1533			let mut ext = Ext::new(&mut overlay, backend, None);
1534			ext.clear_storage(key.as_slice());
1535			ext.storage_append(key.clone(), Item::InitializationItem.encode());
1536		}
1537		overlay.start_transaction();
1538
1539		// For example, first transaction resulted in panic during block building
1540		{
1541			let mut ext = Ext::new(&mut overlay, backend, None);
1542
1543			assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::InitializationItem].encode()));
1544
1545			ext.storage_append(key.clone(), Item::DiscardedItem.encode());
1546
1547			assert_eq!(
1548				ext.storage(key.as_slice()),
1549				Some(vec![Item::InitializationItem, Item::DiscardedItem].encode()),
1550			);
1551		}
1552		overlay.rollback_transaction().unwrap();
1553
1554		// Then we apply next transaction which is valid this time.
1555		{
1556			let mut ext = Ext::new(&mut overlay, backend, None);
1557
1558			assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::InitializationItem].encode()));
1559
1560			ext.storage_append(key.clone(), Item::CommittedItem.encode());
1561
1562			assert_eq!(
1563				ext.storage(key.as_slice()),
1564				Some(vec![Item::InitializationItem, Item::CommittedItem].encode()),
1565			);
1566		}
1567		overlay.start_transaction();
1568
1569		// Then only initialization item and second (committed) item should persist.
1570		{
1571			let mut ext = Ext::new(&mut overlay, backend, None);
1572			assert_eq!(
1573				ext.storage(key.as_slice()),
1574				Some(vec![Item::InitializationItem, Item::CommittedItem].encode()),
1575			);
1576		}
1577	}
1578
1579	fn test_compact(remote_proof: StorageProof, remote_root: &sp_core::H256) -> StorageProof {
1580		let compact_remote_proof =
1581			remote_proof.into_compact_proof::<BlakeTwo256>(*remote_root).unwrap();
1582		compact_remote_proof
1583			.to_storage_proof::<BlakeTwo256>(Some(remote_root))
1584			.unwrap()
1585			.0
1586	}
1587
1588	#[test]
1589	fn prove_read_and_proof_check_works() {
1590		prove_read_and_proof_check_works_inner(StateVersion::V0);
1591		prove_read_and_proof_check_works_inner(StateVersion::V1);
1592	}
1593	fn prove_read_and_proof_check_works_inner(state_version: StateVersion) {
1594		let child_info = ChildInfo::new_default(b"sub1");
1595		let missing_child_info = ChildInfo::new_default(b"sub1sub2"); // key will include other child root to proof.
1596		let child_info = &child_info;
1597		let missing_child_info = &missing_child_info;
1598		// fetch read proof from 'remote' full node
1599		let remote_backend = trie_backend::tests::test_trie(state_version, None, None);
1600		let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0;
1601		let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap();
1602		let remote_proof = test_compact(remote_proof, &remote_root);
1603		// check proof locally
1604		let local_result1 =
1605			read_proof_check::<BlakeTwo256, _>(remote_root, remote_proof.clone(), &[b"value2"])
1606				.unwrap();
1607		let local_result2 =
1608			read_proof_check::<BlakeTwo256, _>(remote_root, remote_proof, &[&[0xff]]).is_ok();
1609		// check that results are correct
1610		assert_eq!(
1611			local_result1.into_iter().collect::<Vec<_>>(),
1612			vec![(b"value2".to_vec(), Some(vec![24]))],
1613		);
1614		assert_eq!(local_result2, false);
1615		// on child trie
1616		let remote_backend = trie_backend::tests::test_trie(state_version, None, None);
1617		let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0;
1618		let remote_proof = prove_child_read(remote_backend, child_info, &[b"value3"]).unwrap();
1619		let remote_proof = test_compact(remote_proof, &remote_root);
1620		let local_result1 = read_child_proof_check::<BlakeTwo256, _>(
1621			remote_root,
1622			remote_proof.clone(),
1623			child_info,
1624			&[b"value3"],
1625		)
1626		.unwrap();
1627		let local_result2 = read_child_proof_check::<BlakeTwo256, _>(
1628			remote_root,
1629			remote_proof.clone(),
1630			child_info,
1631			&[b"value2"],
1632		)
1633		.unwrap();
1634		let local_result3 = read_child_proof_check::<BlakeTwo256, _>(
1635			remote_root,
1636			remote_proof,
1637			missing_child_info,
1638			&[b"dummy"],
1639		)
1640		.unwrap();
1641
1642		assert_eq!(
1643			local_result1.into_iter().collect::<Vec<_>>(),
1644			vec![(b"value3".to_vec(), Some(vec![142; 33]))],
1645		);
1646		assert_eq!(local_result2.into_iter().collect::<Vec<_>>(), vec![(b"value2".to_vec(), None)]);
1647		assert_eq!(local_result3.into_iter().collect::<Vec<_>>(), vec![(b"dummy".to_vec(), None)]);
1648	}
1649
1650	#[test]
1651	fn child_read_compact_stress_test() {
1652		use rand::{rngs::SmallRng, RngCore, SeedableRng};
1653		let mut storage: HashMap<Option<ChildInfo>, BTreeMap<StorageKey, StorageValue>> =
1654			Default::default();
1655		let mut seed = [0; 32];
1656		for i in 0..50u32 {
1657			let mut child_infos = Vec::new();
1658			let seed_partial = &mut seed[0..4];
1659			seed_partial.copy_from_slice(&i.to_be_bytes()[..]);
1660			let mut rand = SmallRng::from_seed(seed);
1661
1662			let nb_child_trie = rand.next_u32() as usize % 25;
1663			for _ in 0..nb_child_trie {
1664				let key_len = 1 + (rand.next_u32() % 10);
1665				let mut key = vec![0; key_len as usize];
1666				rand.fill_bytes(&mut key[..]);
1667				let child_info = ChildInfo::new_default(key.as_slice());
1668				let nb_item = 1 + rand.next_u32() % 25;
1669				let mut items = BTreeMap::new();
1670				for item in 0..nb_item {
1671					let key_len = 1 + (rand.next_u32() % 10);
1672					let mut key = vec![0; key_len as usize];
1673					rand.fill_bytes(&mut key[..]);
1674					let value = vec![item as u8; item as usize + 28];
1675					items.insert(key, value);
1676				}
1677				child_infos.push(child_info.clone());
1678				storage.insert(Some(child_info), items);
1679			}
1680
1681			let trie: InMemoryBackend<BlakeTwo256> =
1682				(storage.clone(), StateVersion::default()).into();
1683			let trie_root = *trie.root();
1684			let backend = TrieBackendBuilder::wrap(&trie).with_recorder(Default::default()).build();
1685			let mut queries = Vec::new();
1686			for c in 0..(5 + nb_child_trie / 2) {
1687				// random existing query
1688				let child_info = if c < 5 {
1689					// 4 missing child trie
1690					let key_len = 1 + (rand.next_u32() % 10);
1691					let mut key = vec![0; key_len as usize];
1692					rand.fill_bytes(&mut key[..]);
1693					ChildInfo::new_default(key.as_slice())
1694				} else {
1695					child_infos[rand.next_u32() as usize % nb_child_trie].clone()
1696				};
1697
1698				if let Some(values) = storage.get(&Some(child_info.clone())) {
1699					for _ in 0..(1 + values.len() / 2) {
1700						let ix = rand.next_u32() as usize % values.len();
1701						for (i, (key, value)) in values.iter().enumerate() {
1702							if i == ix {
1703								assert_eq!(
1704									&backend
1705										.child_storage(&child_info, key.as_slice())
1706										.unwrap()
1707										.unwrap(),
1708									value
1709								);
1710								queries.push((
1711									child_info.clone(),
1712									key.clone(),
1713									Some(value.clone()),
1714								));
1715								break
1716							}
1717						}
1718					}
1719				}
1720				for _ in 0..4 {
1721					let key_len = 1 + (rand.next_u32() % 10);
1722					let mut key = vec![0; key_len as usize];
1723					rand.fill_bytes(&mut key[..]);
1724					let result = backend.child_storage(&child_info, key.as_slice()).unwrap();
1725					queries.push((child_info.clone(), key, result));
1726				}
1727			}
1728
1729			let storage_proof = backend.extract_proof().expect("Failed to extract proof");
1730			let remote_proof = test_compact(storage_proof, &trie_root);
1731			let proof_check =
1732				create_proof_check_backend::<BlakeTwo256>(trie_root, remote_proof).unwrap();
1733
1734			for (child_info, key, expected) in queries {
1735				assert_eq!(
1736					proof_check.child_storage(&child_info, key.as_slice()).unwrap(),
1737					expected,
1738				);
1739			}
1740		}
1741	}
1742
1743	#[test]
1744	fn prove_read_with_size_limit_works() {
1745		let state_version = StateVersion::V0;
1746		let remote_backend = trie_backend::tests::test_trie(state_version, None, None);
1747		let remote_root = remote_backend.storage_root(::std::iter::empty(), state_version).0;
1748		let (proof, count) =
1749			prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap();
1750		// Always contains at least some nodes.
1751		assert_eq!(proof.to_memory_db::<BlakeTwo256>().drain().len(), 3);
1752		assert_eq!(count, 1);
1753		assert_eq!(proof.encoded_size(), 443);
1754
1755		let remote_backend = trie_backend::tests::test_trie(state_version, None, None);
1756		let (proof, count) =
1757			prove_range_read_with_size(remote_backend, None, None, 800, Some(&[])).unwrap();
1758		assert_eq!(proof.to_memory_db::<BlakeTwo256>().drain().len(), 9);
1759		assert_eq!(count, 85);
1760		assert_eq!(proof.encoded_size(), 857);
1761		let (results, completed) = read_range_proof_check::<BlakeTwo256>(
1762			remote_root,
1763			proof.clone(),
1764			None,
1765			None,
1766			Some(count),
1767			None,
1768		)
1769		.unwrap();
1770		assert_eq!(results.len() as u32, count);
1771		assert_eq!(completed, false);
1772		// When checking without count limit, proof may actually contain extra values.
1773		let (results, completed) =
1774			read_range_proof_check::<BlakeTwo256>(remote_root, proof, None, None, None, None)
1775				.unwrap();
1776		assert_eq!(results.len() as u32, 101);
1777		assert_eq!(completed, false);
1778
1779		let remote_backend = trie_backend::tests::test_trie(state_version, None, None);
1780		let (proof, count) =
1781			prove_range_read_with_size(remote_backend, None, None, 50000, Some(&[])).unwrap();
1782		assert_eq!(proof.to_memory_db::<BlakeTwo256>().drain().len(), 11);
1783		assert_eq!(count, 132);
1784		assert_eq!(proof.encoded_size(), 990);
1785
1786		let (results, completed) =
1787			read_range_proof_check::<BlakeTwo256>(remote_root, proof, None, None, None, None)
1788				.unwrap();
1789		assert_eq!(results.len() as u32, count);
1790		assert_eq!(completed, true);
1791	}
1792
1793	#[test]
1794	fn prove_read_with_size_limit_proof_size() {
1795		let mut root = H256::default();
1796		let mut mdb = PrefixedMemoryDB::<BlakeTwo256>::default();
1797		{
1798			let mut mdb = KeySpacedDBMut::new(&mut mdb, b"");
1799			let mut trie = TrieDBMutBuilderV1::new(&mut mdb, &mut root).build();
1800			trie.insert(b"value1", &[123; 1]).unwrap();
1801			trie.insert(b"value2", &[123; 10]).unwrap();
1802			trie.insert(b"value3", &[123; 100]).unwrap();
1803			trie.insert(b"value4", &[123; 1000]).unwrap();
1804		}
1805
1806		let remote_backend: TrieBackend<PrefixedMemoryDB<BlakeTwo256>, BlakeTwo256> =
1807			TrieBackendBuilder::new(mdb, root)
1808				.with_optional_cache(None)
1809				.with_optional_recorder(None)
1810				.build();
1811
1812		let (proof, count) =
1813			prove_range_read_with_size(remote_backend, None, None, 1000, None).unwrap();
1814
1815		assert_eq!(proof.encoded_size(), 1267);
1816		assert_eq!(count, 3);
1817	}
1818
1819	#[test]
1820	fn inner_state_versioning_switch_proofs() {
1821		let mut state_version = StateVersion::V0;
1822		let (mut mdb, mut root) = trie_backend::tests::test_db(state_version);
1823		{
1824			let mut trie = TrieDBMutBuilderV0::from_existing(&mut mdb, &mut root).build();
1825			trie.insert(b"foo", vec![1u8; 1_000].as_slice()) // big inner hash
1826				.expect("insert failed");
1827			trie.insert(b"foo2", vec![3u8; 16].as_slice()) // no inner hash
1828				.expect("insert failed");
1829			trie.insert(b"foo222", vec![5u8; 100].as_slice()) // inner hash
1830				.expect("insert failed");
1831		}
1832
1833		let check_proof = |mdb, root, state_version| -> StorageProof {
1834			let remote_backend = TrieBackendBuilder::new(mdb, root).build();
1835			let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0;
1836			let remote_proof = prove_read(remote_backend, &[b"foo222"]).unwrap();
1837			// check proof locally
1838			let local_result1 =
1839				read_proof_check::<BlakeTwo256, _>(remote_root, remote_proof.clone(), &[b"foo222"])
1840					.unwrap();
1841			// check that results are correct
1842			assert_eq!(
1843				local_result1.into_iter().collect::<Vec<_>>(),
1844				vec![(b"foo222".to_vec(), Some(vec![5u8; 100]))],
1845			);
1846			remote_proof
1847		};
1848
1849		let remote_proof = check_proof(mdb.clone(), root, state_version);
1850		// check full values in proof
1851		assert!(remote_proof.encode().len() > 1_100);
1852		assert!(remote_proof.encoded_size() > 1_100);
1853		let root1 = root;
1854
1855		// do switch
1856		state_version = StateVersion::V1;
1857		{
1858			let mut trie = TrieDBMutBuilderV1::from_existing(&mut mdb, &mut root).build();
1859			trie.insert(b"foo222", vec![5u8; 100].as_slice()) // inner hash
1860				.expect("insert failed");
1861			// update with same value do change
1862			trie.insert(b"foo", vec![1u8; 1000].as_slice()) // inner hash
1863				.expect("insert failed");
1864		}
1865		let root3 = root;
1866		assert!(root1 != root3);
1867		let remote_proof = check_proof(mdb.clone(), root, state_version);
1868		// nodes foo is replaced by its hashed value form.
1869		assert!(remote_proof.encode().len() < 1000);
1870		assert!(remote_proof.encoded_size() < 1000);
1871		assert_eq!(remote_proof.encode().len(), remote_proof.encoded_size());
1872	}
1873
1874	#[test]
1875	fn prove_range_with_child_works() {
1876		let state_version = StateVersion::V0;
1877		let remote_backend = trie_backend::tests::test_trie(state_version, None, None);
1878		let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0;
1879		let mut start_at = smallvec::SmallVec::<[Vec<u8>; 2]>::new();
1880		let trie_backend = remote_backend.as_trie_backend();
1881		let max_iter = 1000;
1882		let mut nb_loop = 0;
1883		loop {
1884			nb_loop += 1;
1885			if max_iter == nb_loop {
1886				panic!("Too many loop in prove range");
1887			}
1888			let (proof, count) = prove_range_read_with_child_with_size_on_trie_backend(
1889				trie_backend,
1890				1,
1891				start_at.as_slice(),
1892			)
1893			.unwrap();
1894			// Always contains at least some nodes.
1895			assert!(proof.to_memory_db::<BlakeTwo256>().drain().len() > 0);
1896			assert!(count < 3); // when doing child we include parent and first child key.
1897
1898			let (result, completed_depth) = read_range_proof_check_with_child::<BlakeTwo256>(
1899				remote_root,
1900				proof.clone(),
1901				start_at.as_slice(),
1902			)
1903			.unwrap();
1904
1905			if completed_depth == 0 {
1906				break
1907			}
1908			assert!(result.update_last_key(completed_depth, &mut start_at));
1909		}
1910
1911		assert_eq!(nb_loop, 10);
1912	}
1913
1914	#[test]
1915	fn compact_multiple_child_trie() {
1916		let size_no_inner_hash = compact_multiple_child_trie_inner(StateVersion::V0);
1917		let size_inner_hash = compact_multiple_child_trie_inner(StateVersion::V1);
1918		assert!(size_inner_hash < size_no_inner_hash);
1919	}
1920	fn compact_multiple_child_trie_inner(state_version: StateVersion) -> usize {
1921		// this root will be queried
1922		let child_info1 = ChildInfo::new_default(b"sub1");
1923		// this root will not be include in proof
1924		let child_info2 = ChildInfo::new_default(b"sub2");
1925		// this root will be include in proof
1926		let child_info3 = ChildInfo::new_default(b"sub");
1927		let remote_backend = trie_backend::tests::test_trie(state_version, None, None);
1928		let long_vec: Vec<u8> = (0..1024usize).map(|_| 8u8).collect();
1929		let (remote_root, transaction) = remote_backend.full_storage_root(
1930			std::iter::empty(),
1931			vec![
1932				(
1933					&child_info1,
1934					vec![
1935						// a inner hashable node
1936						(&b"k"[..], Some(&long_vec[..])),
1937						// need to ensure this is not an inline node
1938						// otherwise we do not know what is accessed when
1939						// storing proof.
1940						(&b"key1"[..], Some(&vec![5u8; 32][..])),
1941						(&b"key2"[..], Some(&b"val3"[..])),
1942					]
1943					.into_iter(),
1944				),
1945				(
1946					&child_info2,
1947					vec![(&b"key3"[..], Some(&b"val4"[..])), (&b"key4"[..], Some(&b"val5"[..]))]
1948						.into_iter(),
1949				),
1950				(
1951					&child_info3,
1952					vec![(&b"key5"[..], Some(&b"val6"[..])), (&b"key6"[..], Some(&b"val7"[..]))]
1953						.into_iter(),
1954				),
1955			]
1956			.into_iter(),
1957			state_version,
1958		);
1959		let mut remote_storage = remote_backend.backend_storage().clone();
1960		remote_storage.consolidate(transaction);
1961		let remote_backend = TrieBackendBuilder::new(remote_storage, remote_root).build();
1962		let remote_proof = prove_child_read(remote_backend, &child_info1, &[b"key1"]).unwrap();
1963		let size = remote_proof.encoded_size();
1964		let remote_proof = test_compact(remote_proof, &remote_root);
1965		let local_result1 = read_child_proof_check::<BlakeTwo256, _>(
1966			remote_root,
1967			remote_proof,
1968			&child_info1,
1969			&[b"key1"],
1970		)
1971		.unwrap();
1972		assert_eq!(local_result1.len(), 1);
1973		assert_eq!(local_result1.get(&b"key1"[..]), Some(&Some(vec![5u8; 32])));
1974		size
1975	}
1976
1977	#[test]
1978	fn child_storage_uuid() {
1979		let state_version = StateVersion::V0;
1980		let child_info_1 = ChildInfo::new_default(b"sub_test1");
1981		let child_info_2 = ChildInfo::new_default(b"sub_test2");
1982
1983		use crate::trie_backend::tests::test_trie;
1984		let mut overlay = OverlayedChanges::default();
1985
1986		let mut transaction = {
1987			let backend = test_trie(state_version, None, None);
1988			let mut ext = Ext::new(&mut overlay, &backend, None);
1989			ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec());
1990			ext.set_child_storage(&child_info_2, b"abc".to_vec(), b"def".to_vec());
1991			ext.storage_root(state_version);
1992			overlay.drain_storage_changes(&backend, state_version).unwrap().transaction
1993		};
1994		let mut duplicate = false;
1995		for (k, (value, rc)) in transaction.drain().iter() {
1996			// look for a key inserted twice: transaction rc is 2
1997			if *rc == 2 {
1998				duplicate = true;
1999				println!("test duplicate for {:?} {:?}", k, value);
2000			}
2001		}
2002		assert!(!duplicate);
2003	}
2004
2005	#[test]
2006	fn set_storage_empty_allowed() {
2007		let initial: BTreeMap<_, _> = map![
2008			b"aaa".to_vec() => b"0".to_vec(),
2009			b"bbb".to_vec() => b"".to_vec()
2010		];
2011		let state = InMemoryBackend::<BlakeTwo256>::from((initial, StateVersion::default()));
2012		let backend = state.as_trie_backend();
2013
2014		let mut overlay = OverlayedChanges::default();
2015		overlay.start_transaction();
2016		overlay.set_storage(b"ccc".to_vec(), Some(b"".to_vec()));
2017		assert_eq!(overlay.storage(b"ccc"), Some(Some(&[][..])));
2018		overlay.commit_transaction().unwrap();
2019		overlay.start_transaction();
2020		assert_eq!(overlay.storage(b"ccc"), Some(Some(&[][..])));
2021		assert_eq!(overlay.storage(b"bbb"), None);
2022
2023		{
2024			let mut ext = Ext::new(&mut overlay, backend, None);
2025			assert_eq!(ext.storage(b"bbb"), Some(vec![]));
2026			assert_eq!(ext.storage(b"ccc"), Some(vec![]));
2027			ext.clear_storage(b"ccc");
2028			assert_eq!(ext.storage(b"ccc"), None);
2029		}
2030		overlay.commit_transaction().unwrap();
2031		assert_eq!(overlay.storage(b"ccc"), Some(None));
2032	}
2033}