referrerpolicy=no-referrer-when-downgrade

pallet_mmr/mmr/
storage.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! An MMR storage implementation.
19
20use crate::{
21	mmr::{Node, NodeOf},
22	primitives::{mmr_lib, mmr_lib::helper, utils::NodesUtils, FullLeaf, NodeIndex},
23	BlockHashProvider, Config, Nodes, NumberOfLeaves, Pallet,
24};
25use alloc::{vec, vec::Vec};
26use codec::Encode;
27use core::iter::Peekable;
28use frame::{
29	deps::{
30		sp_core::offchain::StorageKind,
31		sp_io::{offchain, offchain_index},
32	},
33	prelude::*,
34};
35use log::{debug, trace};
36
37/// A marker type for runtime-specific storage implementation.
38///
39/// Allows appending new items to the MMR and proof verification.
40/// MMR nodes are appended to two different storages:
41/// 1. We add nodes (leaves) hashes to the on-chain storage (see [crate::Nodes]).
42/// 2. We add full leaves (and all inner nodes as well) into the `IndexingAPI` during block
43///    processing, so the values end up in the Offchain DB if indexing is enabled.
44pub struct RuntimeStorage;
45
46/// A marker type for offchain-specific storage implementation.
47///
48/// Allows proof generation and verification, but does not support appending new items.
49/// MMR nodes are assumed to be stored in the Off-Chain DB. Note this storage type
50/// DOES NOT support adding new items to the MMR.
51pub struct OffchainStorage;
52
53impl OffchainStorage {
54	fn get(key: &[u8]) -> Option<Vec<u8>> {
55		offchain::local_storage_get(StorageKind::PERSISTENT, &key)
56	}
57
58	#[cfg(not(feature = "runtime-benchmarks"))]
59	fn set<T: Config<I>, I: 'static>(key: &[u8], value: &[u8]) {
60		offchain_index::set(key, value);
61	}
62
63	#[cfg(feature = "runtime-benchmarks")]
64	fn set<T: Config<I>, I: 'static>(key: &[u8], value: &[u8]) {
65		if crate::pallet::UseLocalStorage::<T, I>::get() {
66			offchain::local_storage_set(StorageKind::PERSISTENT, key, value);
67		} else {
68			offchain_index::set(key, value);
69		}
70	}
71}
72
73/// A storage layer for MMR.
74///
75/// There are two different implementations depending on the use case.
76/// See docs for [RuntimeStorage] and [OffchainStorage].
77pub struct Storage<StorageType, T, I, L>(core::marker::PhantomData<(StorageType, T, I, L)>);
78
79impl<StorageType, T, I, L> Default for Storage<StorageType, T, I, L> {
80	fn default() -> Self {
81		Self(Default::default())
82	}
83}
84
85impl<T, I, L> mmr_lib::MMRStoreReadOps<NodeOf<T, I, L>> for Storage<OffchainStorage, T, I, L>
86where
87	T: Config<I>,
88	I: 'static,
89	L: FullLeaf + Decode,
90{
91	fn get_elem(&self, pos: NodeIndex) -> mmr_lib::Result<Option<NodeOf<T, I, L>>> {
92		// Find out which leaf added node `pos` in the MMR.
93		let ancestor_leaf_idx = NodesUtils::leaf_index_that_added_node(pos);
94
95		// We should only get here when trying to generate proofs. The client requests
96		// for proofs for finalized blocks, which should usually be already canonicalized,
97		// unless the MMR client gadget has a delay.
98		let key = Pallet::<T, I>::node_canon_offchain_key(pos);
99		debug!(
100			target: "runtime::mmr::offchain", "offchain db get {}: leaf idx {:?}, canon key {:?}",
101			pos, ancestor_leaf_idx, key
102		);
103		// Try to retrieve the element from Off-chain DB.
104		if let Some(elem) = OffchainStorage::get(&key) {
105			return Ok(codec::Decode::decode(&mut &*elem).ok())
106		}
107
108		// Fall through to searching node using fork-specific key.
109		let ancestor_parent_block_num =
110			Pallet::<T, I>::leaf_index_to_parent_block_num(ancestor_leaf_idx);
111		let ancestor_parent_hash = T::BlockHashProvider::block_hash(ancestor_parent_block_num);
112		let temp_key = Pallet::<T, I>::node_temp_offchain_key(pos, ancestor_parent_hash);
113		debug!(
114			target: "runtime::mmr::offchain",
115			"offchain db get {}: leaf idx {:?}, hash {:?}, temp key {:?}",
116			pos, ancestor_leaf_idx, ancestor_parent_hash, temp_key
117		);
118		// Retrieve the element from Off-chain DB.
119		Ok(OffchainStorage::get(&temp_key).and_then(|v| codec::Decode::decode(&mut &*v).ok()))
120	}
121}
122
123impl<T, I, L> mmr_lib::MMRStoreWriteOps<NodeOf<T, I, L>> for Storage<OffchainStorage, T, I, L>
124where
125	T: Config<I>,
126	I: 'static,
127	L: FullLeaf + Decode,
128{
129	fn append(&mut self, _: NodeIndex, _: Vec<NodeOf<T, I, L>>) -> mmr_lib::Result<()> {
130		panic!("MMR must not be altered in the off-chain context.")
131	}
132}
133
134impl<T, I, L> mmr_lib::MMRStoreReadOps<NodeOf<T, I, L>> for Storage<RuntimeStorage, T, I, L>
135where
136	T: Config<I>,
137	I: 'static,
138	L: FullLeaf,
139{
140	fn get_elem(&self, pos: NodeIndex) -> mmr_lib::Result<Option<NodeOf<T, I, L>>> {
141		Ok(Nodes::<T, I>::get(pos).map(Node::Hash))
142	}
143}
144
145impl<T, I, L> mmr_lib::MMRStoreWriteOps<NodeOf<T, I, L>> for Storage<RuntimeStorage, T, I, L>
146where
147	T: Config<I>,
148	I: 'static,
149	L: FullLeaf,
150{
151	fn append(&mut self, pos: NodeIndex, elems: Vec<NodeOf<T, I, L>>) -> mmr_lib::Result<()> {
152		if elems.is_empty() {
153			return Ok(())
154		}
155
156		trace!(
157			target: "runtime::mmr", "elems: {:?}",
158			elems.iter().map(|elem| elem.hash()).collect::<Vec<_>>()
159		);
160
161		let leaves = NumberOfLeaves::<T, I>::get();
162		let size = NodesUtils::new(leaves).size();
163
164		if pos != size {
165			return Err(mmr_lib::Error::InconsistentStore)
166		}
167
168		let new_size = size + elems.len() as NodeIndex;
169
170		// A sorted (ascending) iterator over peak indices to prune and persist.
171		let (peaks_to_prune, mut peaks_to_store) = peaks_to_prune_and_store(size, new_size);
172
173		// Now we are going to iterate over elements to insert
174		// and keep track of the current `node_index` and `leaf_index`.
175		let mut leaf_index = leaves;
176		let mut node_index = size;
177
178		// Use parent hash of block adding new nodes (this block) as extra identifier
179		// in offchain DB to avoid DB collisions and overwrites in case of forks.
180		let parent_hash = <frame_system::Pallet<T>>::parent_hash();
181		for elem in elems {
182			// On-chain we are going to only store new peaks.
183			if peaks_to_store.next_if_eq(&node_index).is_some() {
184				Nodes::<T, I>::insert(node_index, elem.hash());
185			}
186			// We are storing full node off-chain (using indexing API).
187			Self::store_to_offchain(node_index, parent_hash, &elem);
188
189			// Increase the indices.
190			if let Node::Data(..) = elem {
191				leaf_index += 1;
192			}
193			node_index += 1;
194		}
195
196		// Update current number of leaves.
197		NumberOfLeaves::<T, I>::put(leaf_index);
198
199		// And remove all remaining items from `peaks_before` collection.
200		for pos in peaks_to_prune {
201			Nodes::<T, I>::remove(pos);
202		}
203
204		Ok(())
205	}
206}
207
208impl<T, I, L> Storage<RuntimeStorage, T, I, L>
209where
210	T: Config<I>,
211	I: 'static,
212	L: FullLeaf,
213{
214	fn store_to_offchain(
215		pos: NodeIndex,
216		parent_hash: <T as frame_system::Config>::Hash,
217		node: &NodeOf<T, I, L>,
218	) {
219		let encoded_node = node.encode();
220		// We store this leaf offchain keyed by `(parent_hash, node_index)` to make it
221		// fork-resistant. The MMR client gadget task will "canonicalize" it on the first
222		// finality notification that follows, when we are not worried about forks anymore.
223		let temp_key = Pallet::<T, I>::node_temp_offchain_key(pos, parent_hash);
224		debug!(
225			target: "runtime::mmr::offchain", "offchain db set: pos {} parent_hash {:?} key {:?}",
226			pos, parent_hash, temp_key
227		);
228		OffchainStorage::set::<T, I>(&temp_key, &encoded_node);
229	}
230}
231
232fn peaks_to_prune_and_store(
233	old_size: NodeIndex,
234	new_size: NodeIndex,
235) -> (impl Iterator<Item = NodeIndex>, Peekable<impl Iterator<Item = NodeIndex>>) {
236	// A sorted (ascending) collection of peak indices before and after insertion.
237	// both collections may share a common prefix.
238	let peaks_before = if old_size == 0 { vec![] } else { helper::get_peaks(old_size) };
239	let peaks_after = helper::get_peaks(new_size);
240	trace!(target: "runtime::mmr", "peaks_before: {:?}", peaks_before);
241	trace!(target: "runtime::mmr", "peaks_after: {:?}", peaks_after);
242	let mut peaks_before = peaks_before.into_iter().peekable();
243	let mut peaks_after = peaks_after.into_iter().peekable();
244
245	// Consume a common prefix between `peaks_before` and `peaks_after`,
246	// since that's something we will not be touching anyway.
247	while peaks_before.peek() == peaks_after.peek() {
248		peaks_before.next();
249		peaks_after.next();
250	}
251
252	// what's left in both collections is:
253	// 1. Old peaks to remove from storage
254	// 2. New peaks to persist in storage
255	(peaks_before, peaks_after)
256}