referrerpolicy=no-referrer-when-downgrade

sp_trie/
recorder.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! Trie recorder
19//!
20//! Provides an implementation of the [`TrieRecorder`](trie_db::TrieRecorder) trait. It can be used
21//! to record storage accesses to the state to generate a [`StorageProof`].
22
23use crate::{GenericMemoryDB, NodeCodec, StorageProof};
24use codec::Encode;
25use hash_db::Hasher;
26use memory_db::KeyFunction;
27use parking_lot::{Mutex, MutexGuard};
28use std::{
29	collections::{HashMap, HashSet},
30	marker::PhantomData,
31	mem,
32	ops::DerefMut,
33	sync::{
34		atomic::{AtomicUsize, Ordering},
35		Arc,
36	},
37};
38use trie_db::{RecordedForKey, TrieAccess};
39
40const LOG_TARGET: &str = "trie-recorder";
41
42/// A list of ignored nodes for [`Recorder`].
43///
44/// These nodes when passed to a recorder will be ignored and not recorded by the recorder.
45#[derive(Clone)]
46pub struct IgnoredNodes<H> {
47	nodes: HashSet<H>,
48}
49
50impl<H> Default for IgnoredNodes<H> {
51	fn default() -> Self {
52		Self { nodes: HashSet::default() }
53	}
54}
55
56impl<H: Eq + std::hash::Hash + Clone> IgnoredNodes<H> {
57	/// Initialize from the given storage proof.
58	///
59	/// So, all recorded nodes of the proof will be the ignored nodes.
60	pub fn from_storage_proof<Hasher: trie_db::Hasher<Out = H>>(proof: &StorageProof) -> Self {
61		Self { nodes: proof.iter_nodes().map(|n| Hasher::hash(&n)).collect() }
62	}
63
64	/// Initialize from the given memory db.
65	///
66	/// All nodes that have a reference count > 0 will be used as ignored nodes.
67	pub fn from_memory_db<Hasher: trie_db::Hasher<Out = H>, KF: KeyFunction<Hasher>>(
68		mut memory_db: GenericMemoryDB<Hasher, KF>,
69	) -> Self {
70		Self {
71			nodes: memory_db
72				.drain()
73				.into_iter()
74				// We do not want to add removed nodes.
75				.filter(|(_, (_, counter))| *counter > 0)
76				.map(|(_, (data, _))| Hasher::hash(&data))
77				.collect(),
78		}
79	}
80
81	/// Extend `self` with the other instance of ignored nodes.
82	pub fn extend(&mut self, other: Self) {
83		self.nodes.extend(other.nodes.into_iter());
84	}
85
86	/// Returns `true` if the node is ignored.
87	pub fn is_ignored(&self, node: &H) -> bool {
88		self.nodes.contains(node)
89	}
90}
91
92/// Stores all the information per transaction.
93#[derive(Default)]
94struct Transaction<H> {
95	/// Stores transaction information about [`RecorderInner::recorded_keys`].
96	///
97	/// For each transaction we only store the `storage_root` and the old states per key. `None`
98	/// state means that the key wasn't recorded before.
99	recorded_keys: HashMap<H, HashMap<Arc<[u8]>, Option<RecordedForKey>>>,
100	/// Stores transaction information about [`RecorderInner::accessed_nodes`].
101	///
102	/// For each transaction we only store the hashes of added nodes.
103	accessed_nodes: HashSet<H>,
104}
105
106/// The internals of [`Recorder`].
107struct RecorderInner<H> {
108	/// The keys for that we have recorded the trie nodes and if we have recorded up to the value.
109	///
110	/// Mapping: `StorageRoot -> (Key -> RecordedForKey)`.
111	recorded_keys: HashMap<H, HashMap<Arc<[u8]>, RecordedForKey>>,
112
113	/// Currently active transactions.
114	transactions: Vec<Transaction<H>>,
115
116	/// The encoded nodes we accessed while recording.
117	///
118	/// Mapping: `Hash(Node) -> Node`.
119	accessed_nodes: HashMap<H, Vec<u8>>,
120
121	/// Nodes that should be ignored and not recorded.
122	ignored_nodes: IgnoredNodes<H>,
123}
124
125impl<H> Default for RecorderInner<H> {
126	fn default() -> Self {
127		Self {
128			recorded_keys: Default::default(),
129			accessed_nodes: Default::default(),
130			transactions: Vec::new(),
131			ignored_nodes: Default::default(),
132		}
133	}
134}
135
136/// The trie recorder.
137///
138/// Owns the recorded data. Is used to transform data into a storage
139/// proof and to provide transaction support. The `as_trie_recorder` method provides a
140/// [`trie_db::TrieDB`] compatible recorder that implements the actual recording logic.
141pub struct Recorder<H: Hasher> {
142	inner: Arc<Mutex<RecorderInner<H::Out>>>,
143	/// The estimated encoded size of the storage proof this recorder will produce.
144	///
145	/// We store this in an atomic to be able to fetch the value while the `inner` is may locked.
146	encoded_size_estimation: Arc<AtomicUsize>,
147}
148
149impl<H: Hasher> Default for Recorder<H> {
150	fn default() -> Self {
151		Self { inner: Default::default(), encoded_size_estimation: Arc::new(0.into()) }
152	}
153}
154
155impl<H: Hasher> Clone for Recorder<H> {
156	fn clone(&self) -> Self {
157		Self {
158			inner: self.inner.clone(),
159			encoded_size_estimation: self.encoded_size_estimation.clone(),
160		}
161	}
162}
163
164impl<H: Hasher> Recorder<H> {
165	/// Create a new instance with the given `ingored_nodes`.
166	///
167	/// These ignored nodes are not recorded when accessed.
168	pub fn with_ignored_nodes(ignored_nodes: IgnoredNodes<H::Out>) -> Self {
169		Self {
170			inner: Arc::new(Mutex::new(RecorderInner { ignored_nodes, ..Default::default() })),
171			..Default::default()
172		}
173	}
174
175	/// Returns [`RecordedForKey`] per recorded key per trie.
176	///
177	/// There are multiple tries when working with e.g. child tries.
178	pub fn recorded_keys(&self) -> HashMap<H::Out, HashMap<Arc<[u8]>, RecordedForKey>> {
179		self.inner.lock().recorded_keys.clone()
180	}
181
182	/// Returns the recorder as [`TrieRecorder`](trie_db::TrieRecorder) compatible type.
183	///
184	/// - `storage_root`: The storage root of the trie for which accesses are recorded. This is
185	///   important when recording access to different tries at once (like top and child tries).
186	///
187	/// NOTE: This locks a mutex that stays locked until the return value is dropped.
188	#[inline]
189	pub fn as_trie_recorder(&self, storage_root: H::Out) -> TrieRecorder<'_, H> {
190		TrieRecorder::<H> {
191			inner: self.inner.lock(),
192			storage_root,
193			encoded_size_estimation: self.encoded_size_estimation.clone(),
194			_phantom: PhantomData,
195		}
196	}
197
198	/// Drain the recording into a [`StorageProof`].
199	///
200	/// While a recorder can be cloned, all share the same internal state. After calling this
201	/// function, all other instances will have their internal state reset as well.
202	///
203	/// If you don't want to drain the recorded state, use [`Self::to_storage_proof`].
204	///
205	/// Returns the [`StorageProof`].
206	pub fn drain_storage_proof(self) -> StorageProof {
207		let mut recorder = mem::take(&mut *self.inner.lock());
208		StorageProof::new(recorder.accessed_nodes.drain().map(|(_, v)| v))
209	}
210
211	/// Convert the recording to a [`StorageProof`].
212	///
213	/// In contrast to [`Self::drain_storage_proof`] this doesn't consume and doesn't clear the
214	/// recordings.
215	///
216	/// Returns the [`StorageProof`].
217	pub fn to_storage_proof(&self) -> StorageProof {
218		let recorder = self.inner.lock();
219		StorageProof::new(recorder.accessed_nodes.values().cloned())
220	}
221
222	/// Returns the estimated encoded size of the proof.
223	///
224	/// The estimation is based on all the nodes that were accessed until now while
225	/// accessing the trie.
226	pub fn estimate_encoded_size(&self) -> usize {
227		self.encoded_size_estimation.load(Ordering::Relaxed)
228	}
229
230	/// Reset the state.
231	///
232	/// This discards all recorded data.
233	pub fn reset(&self) {
234		mem::take(&mut *self.inner.lock());
235		self.encoded_size_estimation.store(0, Ordering::Relaxed);
236	}
237
238	/// Start a new transaction.
239	pub fn start_transaction(&self) {
240		let mut inner = self.inner.lock();
241		inner.transactions.push(Default::default());
242	}
243
244	/// Rollback the latest transaction.
245	///
246	/// Returns an error if there wasn't any active transaction.
247	pub fn rollback_transaction(&self) -> Result<(), ()> {
248		let mut inner = self.inner.lock();
249
250		// We locked `inner` and can just update the encoded size locally and then store it back to
251		// the atomic.
252		let mut new_encoded_size_estimation = self.encoded_size_estimation.load(Ordering::Relaxed);
253		let transaction = inner.transactions.pop().ok_or(())?;
254
255		transaction.accessed_nodes.into_iter().for_each(|n| {
256			if let Some(old) = inner.accessed_nodes.remove(&n) {
257				new_encoded_size_estimation =
258					new_encoded_size_estimation.saturating_sub(old.encoded_size());
259			}
260		});
261
262		transaction.recorded_keys.into_iter().for_each(|(storage_root, keys)| {
263			keys.into_iter().for_each(|(k, old_state)| {
264				if let Some(state) = old_state {
265					inner.recorded_keys.entry(storage_root).or_default().insert(k, state);
266				} else {
267					inner.recorded_keys.entry(storage_root).or_default().remove(&k);
268				}
269			});
270		});
271
272		self.encoded_size_estimation
273			.store(new_encoded_size_estimation, Ordering::Relaxed);
274
275		Ok(())
276	}
277
278	/// Commit the latest transaction.
279	///
280	/// Returns an error if there wasn't any active transaction.
281	pub fn commit_transaction(&self) -> Result<(), ()> {
282		let mut inner = self.inner.lock();
283
284		let transaction = inner.transactions.pop().ok_or(())?;
285
286		if let Some(parent_transaction) = inner.transactions.last_mut() {
287			parent_transaction.accessed_nodes.extend(transaction.accessed_nodes);
288
289			transaction.recorded_keys.into_iter().for_each(|(storage_root, keys)| {
290				keys.into_iter().for_each(|(k, old_state)| {
291					parent_transaction
292						.recorded_keys
293						.entry(storage_root)
294						.or_default()
295						.entry(k)
296						.or_insert(old_state);
297				})
298			});
299		}
300
301		Ok(())
302	}
303}
304
305impl<H: Hasher> crate::ProofSizeProvider for Recorder<H> {
306	fn estimate_encoded_size(&self) -> usize {
307		Recorder::estimate_encoded_size(self)
308	}
309}
310
311/// The [`TrieRecorder`](trie_db::TrieRecorder) implementation.
312pub struct TrieRecorder<'a, H: Hasher> {
313	inner: MutexGuard<'a, RecorderInner<H::Out>>,
314	storage_root: H::Out,
315	encoded_size_estimation: Arc<AtomicUsize>,
316	_phantom: PhantomData<H>,
317}
318
319impl<H: Hasher> crate::TrieRecorderProvider<H> for Recorder<H> {
320	type Recorder<'a>
321		= TrieRecorder<'a, H>
322	where
323		H: 'a;
324
325	fn drain_storage_proof(self) -> Option<StorageProof> {
326		Some(Recorder::drain_storage_proof(self))
327	}
328
329	fn as_trie_recorder(&self, storage_root: H::Out) -> Self::Recorder<'_> {
330		Recorder::as_trie_recorder(&self, storage_root)
331	}
332}
333
334impl<'a, H: Hasher> TrieRecorder<'a, H> {
335	/// Update the recorded keys entry for the given `full_key`.
336	fn update_recorded_keys(&mut self, full_key: &[u8], access: RecordedForKey) {
337		let inner = self.inner.deref_mut();
338
339		let entry =
340			inner.recorded_keys.entry(self.storage_root).or_default().entry(full_key.into());
341
342		let key = entry.key().clone();
343
344		// We don't need to update the record if we only accessed the `Hash` for the given
345		// `full_key`. Only `Value` access can be an upgrade from `Hash`.
346		let entry = if matches!(access, RecordedForKey::Value) {
347			entry.and_modify(|e| {
348				if let Some(tx) = inner.transactions.last_mut() {
349					// Store the previous state only once per transaction.
350					tx.recorded_keys
351						.entry(self.storage_root)
352						.or_default()
353						.entry(key.clone())
354						.or_insert(Some(*e));
355				}
356
357				*e = access;
358			})
359		} else {
360			entry
361		};
362
363		entry.or_insert_with(|| {
364			if let Some(tx) = inner.transactions.last_mut() {
365				// The key wasn't yet recorded, so there isn't any old state.
366				tx.recorded_keys
367					.entry(self.storage_root)
368					.or_default()
369					.entry(key)
370					.or_insert(None);
371			}
372
373			access
374		});
375	}
376}
377
378impl<'a, H: Hasher> trie_db::TrieRecorder<H::Out> for TrieRecorder<'a, H> {
379	fn record(&mut self, access: TrieAccess<H::Out>) {
380		let mut encoded_size_update = 0;
381
382		match access {
383			TrieAccess::NodeOwned { hash, node_owned } => {
384				let inner = self.inner.deref_mut();
385
386				if inner.ignored_nodes.is_ignored(&hash) {
387					tracing::trace!(
388						target: LOG_TARGET,
389						?hash,
390						"Ignoring node",
391					);
392					return
393				}
394
395				tracing::trace!(
396					target: LOG_TARGET,
397					?hash,
398					"Recording node",
399				);
400
401				inner.accessed_nodes.entry(hash).or_insert_with(|| {
402					let node = node_owned.to_encoded::<NodeCodec<H>>();
403
404					encoded_size_update += node.encoded_size();
405
406					if let Some(tx) = inner.transactions.last_mut() {
407						tx.accessed_nodes.insert(hash);
408					}
409
410					node
411				});
412			},
413			TrieAccess::EncodedNode { hash, encoded_node } => {
414				let inner = self.inner.deref_mut();
415
416				if inner.ignored_nodes.is_ignored(&hash) {
417					tracing::trace!(
418						target: LOG_TARGET,
419						?hash,
420						"Ignoring node",
421					);
422					return
423				}
424
425				tracing::trace!(
426					target: LOG_TARGET,
427					hash = ?hash,
428					"Recording node",
429				);
430
431				inner.accessed_nodes.entry(hash).or_insert_with(|| {
432					let node = encoded_node.into_owned();
433
434					encoded_size_update += node.encoded_size();
435
436					if let Some(tx) = inner.transactions.last_mut() {
437						tx.accessed_nodes.insert(hash);
438					}
439
440					node
441				});
442			},
443			TrieAccess::Value { hash, value, full_key } => {
444				let inner = self.inner.deref_mut();
445
446				// A value is also just a node.
447				if inner.ignored_nodes.is_ignored(&hash) {
448					tracing::trace!(
449						target: LOG_TARGET,
450						?hash,
451						"Ignoring value",
452					);
453					return
454				}
455
456				tracing::trace!(
457					target: LOG_TARGET,
458					hash = ?hash,
459					key = ?sp_core::hexdisplay::HexDisplay::from(&full_key),
460					"Recording value",
461				);
462
463				inner.accessed_nodes.entry(hash).or_insert_with(|| {
464					let value = value.into_owned();
465
466					encoded_size_update += value.encoded_size();
467
468					if let Some(tx) = inner.transactions.last_mut() {
469						tx.accessed_nodes.insert(hash);
470					}
471
472					value
473				});
474
475				self.update_recorded_keys(full_key, RecordedForKey::Value);
476			},
477			TrieAccess::Hash { full_key } => {
478				tracing::trace!(
479					target: LOG_TARGET,
480					key = ?sp_core::hexdisplay::HexDisplay::from(&full_key),
481					"Recorded hash access for key",
482				);
483
484				// We don't need to update the `encoded_size_update` as the hash was already
485				// accounted for by the recorded node that holds the hash.
486				self.update_recorded_keys(full_key, RecordedForKey::Hash);
487			},
488			TrieAccess::NonExisting { full_key } => {
489				tracing::trace!(
490					target: LOG_TARGET,
491					key = ?sp_core::hexdisplay::HexDisplay::from(&full_key),
492					"Recorded non-existing value access for key",
493				);
494
495				// Non-existing access means we recorded all trie nodes up to the value.
496				// Not the actual value, as it doesn't exist, but all trie nodes to know
497				// that the value doesn't exist in the trie.
498				self.update_recorded_keys(full_key, RecordedForKey::Value);
499			},
500			TrieAccess::InlineValue { full_key } => {
501				tracing::trace!(
502					target: LOG_TARGET,
503					key = ?sp_core::hexdisplay::HexDisplay::from(&full_key),
504					"Recorded inline value access for key",
505				);
506
507				// A value was accessed that is stored inline a node and we recorded all trie nodes
508				// to access this value.
509				self.update_recorded_keys(full_key, RecordedForKey::Value);
510			},
511		};
512
513		self.encoded_size_estimation.fetch_add(encoded_size_update, Ordering::Relaxed);
514	}
515
516	fn trie_nodes_recorded_for_key(&self, key: &[u8]) -> RecordedForKey {
517		self.inner
518			.recorded_keys
519			.get(&self.storage_root)
520			.and_then(|k| k.get(key).copied())
521			.unwrap_or(RecordedForKey::None)
522	}
523}
524
525#[cfg(test)]
526mod tests {
527	use super::*;
528	use crate::tests::create_trie;
529	use trie_db::{Trie, TrieDBBuilder, TrieRecorder};
530
531	type MemoryDB = crate::MemoryDB<sp_core::Blake2Hasher>;
532	type Layout = crate::LayoutV1<sp_core::Blake2Hasher>;
533	type Recorder = super::Recorder<sp_core::Blake2Hasher>;
534
535	const TEST_DATA: &[(&[u8], &[u8])] =
536		&[(b"key1", &[1; 64]), (b"key2", &[2; 64]), (b"key3", &[3; 64]), (b"key4", &[4; 64])];
537
538	#[test]
539	fn recorder_works() {
540		let (db, root) = create_trie::<Layout>(TEST_DATA);
541
542		let recorder = Recorder::default();
543
544		{
545			let mut trie_recorder = recorder.as_trie_recorder(root);
546			let trie = TrieDBBuilder::<Layout>::new(&db, &root)
547				.with_recorder(&mut trie_recorder)
548				.build();
549			assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap());
550		}
551
552		let storage_proof = recorder.drain_storage_proof();
553		let memory_db: MemoryDB = storage_proof.into_memory_db();
554
555		// Check that we recorded the required data
556		let trie = TrieDBBuilder::<Layout>::new(&memory_db, &root).build();
557		assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap());
558	}
559
560	#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
561	struct RecorderStats {
562		accessed_nodes: usize,
563		recorded_keys: usize,
564		estimated_size: usize,
565	}
566
567	impl RecorderStats {
568		fn extract(recorder: &Recorder) -> Self {
569			let inner = recorder.inner.lock();
570
571			let recorded_keys =
572				inner.recorded_keys.iter().flat_map(|(_, keys)| keys.keys()).count();
573
574			Self {
575				recorded_keys,
576				accessed_nodes: inner.accessed_nodes.len(),
577				estimated_size: recorder.estimate_encoded_size(),
578			}
579		}
580	}
581
582	#[test]
583	fn recorder_transactions_rollback_work() {
584		let (db, root) = create_trie::<Layout>(TEST_DATA);
585
586		let recorder = Recorder::default();
587		let mut stats = vec![RecorderStats::default()];
588
589		for i in 0..4 {
590			recorder.start_transaction();
591			{
592				let mut trie_recorder = recorder.as_trie_recorder(root);
593				let trie = TrieDBBuilder::<Layout>::new(&db, &root)
594					.with_recorder(&mut trie_recorder)
595					.build();
596
597				assert_eq!(TEST_DATA[i].1.to_vec(), trie.get(TEST_DATA[i].0).unwrap().unwrap());
598			}
599			stats.push(RecorderStats::extract(&recorder));
600		}
601
602		assert_eq!(4, recorder.inner.lock().transactions.len());
603
604		for i in 0..5 {
605			assert_eq!(stats[4 - i], RecorderStats::extract(&recorder));
606
607			let storage_proof = recorder.to_storage_proof();
608			let memory_db: MemoryDB = storage_proof.into_memory_db();
609
610			// Check that we recorded the required data
611			let trie = TrieDBBuilder::<Layout>::new(&memory_db, &root).build();
612
613			// Check that the required data is still present.
614			for a in 0..4 {
615				if a < 4 - i {
616					assert_eq!(TEST_DATA[a].1.to_vec(), trie.get(TEST_DATA[a].0).unwrap().unwrap());
617				} else {
618					// All the data that we already rolled back, should be gone!
619					assert!(trie.get(TEST_DATA[a].0).is_err());
620				}
621			}
622
623			if i < 4 {
624				recorder.rollback_transaction().unwrap();
625			}
626		}
627
628		assert_eq!(0, recorder.inner.lock().transactions.len());
629	}
630
631	#[test]
632	fn recorder_transactions_commit_work() {
633		let (db, root) = create_trie::<Layout>(TEST_DATA);
634
635		let recorder = Recorder::default();
636
637		for i in 0..4 {
638			recorder.start_transaction();
639			{
640				let mut trie_recorder = recorder.as_trie_recorder(root);
641				let trie = TrieDBBuilder::<Layout>::new(&db, &root)
642					.with_recorder(&mut trie_recorder)
643					.build();
644
645				assert_eq!(TEST_DATA[i].1.to_vec(), trie.get(TEST_DATA[i].0).unwrap().unwrap());
646			}
647		}
648
649		let stats = RecorderStats::extract(&recorder);
650		assert_eq!(4, recorder.inner.lock().transactions.len());
651
652		for _ in 0..4 {
653			recorder.commit_transaction().unwrap();
654		}
655		assert_eq!(0, recorder.inner.lock().transactions.len());
656		assert_eq!(stats, RecorderStats::extract(&recorder));
657
658		let storage_proof = recorder.to_storage_proof();
659		let memory_db: MemoryDB = storage_proof.into_memory_db();
660
661		// Check that we recorded the required data
662		let trie = TrieDBBuilder::<Layout>::new(&memory_db, &root).build();
663
664		// Check that the required data is still present.
665		for i in 0..4 {
666			assert_eq!(TEST_DATA[i].1.to_vec(), trie.get(TEST_DATA[i].0).unwrap().unwrap());
667		}
668	}
669
670	#[test]
671	fn recorder_transactions_commit_and_rollback_work() {
672		let (db, root) = create_trie::<Layout>(TEST_DATA);
673
674		let recorder = Recorder::default();
675
676		for i in 0..2 {
677			recorder.start_transaction();
678			{
679				let mut trie_recorder = recorder.as_trie_recorder(root);
680				let trie = TrieDBBuilder::<Layout>::new(&db, &root)
681					.with_recorder(&mut trie_recorder)
682					.build();
683
684				assert_eq!(TEST_DATA[i].1.to_vec(), trie.get(TEST_DATA[i].0).unwrap().unwrap());
685			}
686		}
687
688		recorder.rollback_transaction().unwrap();
689
690		for i in 2..4 {
691			recorder.start_transaction();
692			{
693				let mut trie_recorder = recorder.as_trie_recorder(root);
694				let trie = TrieDBBuilder::<Layout>::new(&db, &root)
695					.with_recorder(&mut trie_recorder)
696					.build();
697
698				assert_eq!(TEST_DATA[i].1.to_vec(), trie.get(TEST_DATA[i].0).unwrap().unwrap());
699			}
700		}
701
702		recorder.rollback_transaction().unwrap();
703
704		assert_eq!(2, recorder.inner.lock().transactions.len());
705
706		for _ in 0..2 {
707			recorder.commit_transaction().unwrap();
708		}
709
710		assert_eq!(0, recorder.inner.lock().transactions.len());
711
712		let storage_proof = recorder.to_storage_proof();
713		let memory_db: MemoryDB = storage_proof.into_memory_db();
714
715		// Check that we recorded the required data
716		let trie = TrieDBBuilder::<Layout>::new(&memory_db, &root).build();
717
718		// Check that the required data is still present.
719		for i in 0..4 {
720			if i % 2 == 0 {
721				assert_eq!(TEST_DATA[i].1.to_vec(), trie.get(TEST_DATA[i].0).unwrap().unwrap());
722			} else {
723				assert!(trie.get(TEST_DATA[i].0).is_err());
724			}
725		}
726	}
727
728	#[test]
729	fn recorder_transaction_accessed_keys_works() {
730		let key = TEST_DATA[0].0;
731		let (db, root) = create_trie::<Layout>(TEST_DATA);
732
733		let recorder = Recorder::default();
734
735		{
736			let trie_recorder = recorder.as_trie_recorder(root);
737			assert!(matches!(trie_recorder.trie_nodes_recorded_for_key(key), RecordedForKey::None));
738		}
739
740		recorder.start_transaction();
741		{
742			let mut trie_recorder = recorder.as_trie_recorder(root);
743			let trie = TrieDBBuilder::<Layout>::new(&db, &root)
744				.with_recorder(&mut trie_recorder)
745				.build();
746
747			assert_eq!(
748				sp_core::Blake2Hasher::hash(TEST_DATA[0].1),
749				trie.get_hash(TEST_DATA[0].0).unwrap().unwrap()
750			);
751			assert!(matches!(trie_recorder.trie_nodes_recorded_for_key(key), RecordedForKey::Hash));
752		}
753
754		recorder.start_transaction();
755		{
756			let mut trie_recorder = recorder.as_trie_recorder(root);
757			let trie = TrieDBBuilder::<Layout>::new(&db, &root)
758				.with_recorder(&mut trie_recorder)
759				.build();
760
761			assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap());
762			assert!(matches!(
763				trie_recorder.trie_nodes_recorded_for_key(key),
764				RecordedForKey::Value,
765			));
766		}
767
768		recorder.rollback_transaction().unwrap();
769		{
770			let trie_recorder = recorder.as_trie_recorder(root);
771			assert!(matches!(trie_recorder.trie_nodes_recorded_for_key(key), RecordedForKey::Hash));
772		}
773
774		recorder.rollback_transaction().unwrap();
775		{
776			let trie_recorder = recorder.as_trie_recorder(root);
777			assert!(matches!(trie_recorder.trie_nodes_recorded_for_key(key), RecordedForKey::None));
778		}
779
780		recorder.start_transaction();
781		{
782			let mut trie_recorder = recorder.as_trie_recorder(root);
783			let trie = TrieDBBuilder::<Layout>::new(&db, &root)
784				.with_recorder(&mut trie_recorder)
785				.build();
786
787			assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap());
788			assert!(matches!(
789				trie_recorder.trie_nodes_recorded_for_key(key),
790				RecordedForKey::Value,
791			));
792		}
793
794		recorder.start_transaction();
795		{
796			let mut trie_recorder = recorder.as_trie_recorder(root);
797			let trie = TrieDBBuilder::<Layout>::new(&db, &root)
798				.with_recorder(&mut trie_recorder)
799				.build();
800
801			assert_eq!(
802				sp_core::Blake2Hasher::hash(TEST_DATA[0].1),
803				trie.get_hash(TEST_DATA[0].0).unwrap().unwrap()
804			);
805			assert!(matches!(
806				trie_recorder.trie_nodes_recorded_for_key(key),
807				RecordedForKey::Value
808			));
809		}
810
811		recorder.rollback_transaction().unwrap();
812		{
813			let trie_recorder = recorder.as_trie_recorder(root);
814			assert!(matches!(
815				trie_recorder.trie_nodes_recorded_for_key(key),
816				RecordedForKey::Value
817			));
818		}
819
820		recorder.rollback_transaction().unwrap();
821		{
822			let trie_recorder = recorder.as_trie_recorder(root);
823			assert!(matches!(trie_recorder.trie_nodes_recorded_for_key(key), RecordedForKey::None));
824		}
825	}
826
827	#[test]
828	fn recorder_ignoring_nodes_works() {
829		let (db, root) = create_trie::<Layout>(TEST_DATA);
830
831		let recorder = Recorder::default();
832
833		{
834			let mut trie_recorder = recorder.as_trie_recorder(root);
835			let trie = TrieDBBuilder::<Layout>::new(&db, &root)
836				.with_recorder(&mut trie_recorder)
837				.build();
838
839			for (key, data) in TEST_DATA.iter().take(3) {
840				assert_eq!(data.to_vec(), trie.get(&key).unwrap().unwrap());
841			}
842		}
843
844		assert!(recorder.estimate_encoded_size() > 10);
845		let mut ignored_nodes = IgnoredNodes::from_storage_proof::<sp_core::Blake2Hasher>(
846			&recorder.drain_storage_proof(),
847		);
848
849		let recorder = Recorder::with_ignored_nodes(ignored_nodes.clone());
850
851		{
852			let mut trie_recorder = recorder.as_trie_recorder(root);
853			let trie = TrieDBBuilder::<Layout>::new(&db, &root)
854				.with_recorder(&mut trie_recorder)
855				.build();
856
857			for (key, data) in TEST_DATA {
858				assert_eq!(data.to_vec(), trie.get(&key).unwrap().unwrap());
859			}
860		}
861
862		assert!(recorder.estimate_encoded_size() > TEST_DATA[3].1.len());
863		let ignored_nodes2 = IgnoredNodes::from_storage_proof::<sp_core::Blake2Hasher>(
864			&recorder.drain_storage_proof(),
865		);
866
867		ignored_nodes.extend(ignored_nodes2);
868
869		let recorder = Recorder::with_ignored_nodes(ignored_nodes);
870
871		{
872			let mut trie_recorder = recorder.as_trie_recorder(root);
873			let trie = TrieDBBuilder::<Layout>::new(&db, &root)
874				.with_recorder(&mut trie_recorder)
875				.build();
876
877			for (key, data) in TEST_DATA {
878				assert_eq!(data.to_vec(), trie.get(&key).unwrap().unwrap());
879			}
880		}
881		assert_eq!(0, recorder.estimate_encoded_size());
882	}
883}