referrerpolicy=no-referrer-when-downgrade

polkadot_node_core_chain_selection/
backend.rs

1// Copyright (C) Parity Technologies (UK) Ltd.
2// This file is part of Polkadot.
3
4// Polkadot is free software: you can redistribute it and/or modify
5// it under the terms of the GNU General Public License as published by
6// the Free Software Foundation, either version 3 of the License, or
7// (at your option) any later version.
8
9// Polkadot is distributed in the hope that it will be useful,
10// but WITHOUT ANY WARRANTY; without even the implied warranty of
11// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12// GNU General Public License for more details.
13
14// You should have received a copy of the GNU General Public License
15// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
16
17//! An abstraction over storage used by the chain selection subsystem.
18//!
19//! This provides both a [`Backend`] trait and an [`OverlayedBackend`]
20//! struct which allows in-memory changes to be applied on top of a
21//! [`Backend`], maintaining consistency between queries and temporary writes,
22//! before any commit to the underlying storage is made.
23
24use polkadot_primitives::{BlockNumber, Hash};
25
26use std::collections::HashMap;
27
28use crate::{BlockEntry, Error, LeafEntrySet, Timestamp};
29
30pub(super) enum BackendWriteOp {
31	WriteBlockEntry(BlockEntry),
32	WriteBlocksByNumber(BlockNumber, Vec<Hash>),
33	WriteViableLeaves(LeafEntrySet),
34	WriteStagnantAt(Timestamp, Vec<Hash>),
35	DeleteBlocksByNumber(BlockNumber),
36	DeleteBlockEntry(Hash),
37	DeleteStagnantAt(Timestamp),
38}
39
40/// An abstraction over backend storage for the logic of this subsystem.
41pub(super) trait Backend {
42	/// Load a block entry from the DB.
43	fn load_block_entry(&self, hash: &Hash) -> Result<Option<BlockEntry>, Error>;
44	/// Load the active-leaves set.
45	fn load_leaves(&self) -> Result<LeafEntrySet, Error>;
46	/// Load the stagnant list at the given timestamp.
47	fn load_stagnant_at(&self, timestamp: Timestamp) -> Result<Vec<Hash>, Error>;
48	/// Load all stagnant lists up to and including the given Unix timestamp
49	/// in ascending order. Stop fetching stagnant entries upon reaching `max_elements`.
50	fn load_stagnant_at_up_to(
51		&self,
52		up_to: Timestamp,
53		max_elements: usize,
54	) -> Result<Vec<(Timestamp, Vec<Hash>)>, Error>;
55	/// Load the earliest kept block number.
56	fn load_first_block_number(&self) -> Result<Option<BlockNumber>, Error>;
57	/// Load blocks by number.
58	fn load_blocks_by_number(&self, number: BlockNumber) -> Result<Vec<Hash>, Error>;
59
60	/// Atomically write the list of operations, with later operations taking precedence over prior.
61	fn write<I>(&mut self, ops: I) -> Result<(), Error>
62	where
63		I: IntoIterator<Item = BackendWriteOp>;
64}
65
66/// An in-memory overlay over the backend.
67///
68/// This maintains read-only access to the underlying backend, but can be
69/// converted into a set of write operations which will, when written to
70/// the underlying backend, give the same view as the state of the overlay.
71pub(super) struct OverlayedBackend<'a, B: 'a> {
72	inner: &'a B,
73
74	// `None` means 'deleted', missing means query inner.
75	block_entries: HashMap<Hash, Option<BlockEntry>>,
76	// `None` means 'deleted', missing means query inner.
77	blocks_by_number: HashMap<BlockNumber, Option<Vec<Hash>>>,
78	// 'None' means 'deleted', missing means query inner.
79	stagnant_at: HashMap<Timestamp, Option<Vec<Hash>>>,
80	// 'None' means query inner.
81	leaves: Option<LeafEntrySet>,
82}
83
84impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> {
85	pub(super) fn new(backend: &'a B) -> Self {
86		OverlayedBackend {
87			inner: backend,
88			block_entries: HashMap::new(),
89			blocks_by_number: HashMap::new(),
90			stagnant_at: HashMap::new(),
91			leaves: None,
92		}
93	}
94
95	pub(super) fn load_block_entry(&self, hash: &Hash) -> Result<Option<BlockEntry>, Error> {
96		if let Some(val) = self.block_entries.get(&hash) {
97			return Ok(val.clone())
98		}
99
100		self.inner.load_block_entry(hash)
101	}
102
103	pub(super) fn load_blocks_by_number(&self, number: BlockNumber) -> Result<Vec<Hash>, Error> {
104		if let Some(val) = self.blocks_by_number.get(&number) {
105			return Ok(val.as_ref().map_or(Vec::new(), Clone::clone))
106		}
107
108		self.inner.load_blocks_by_number(number)
109	}
110
111	pub(super) fn load_leaves(&self) -> Result<LeafEntrySet, Error> {
112		if let Some(ref set) = self.leaves {
113			return Ok(set.clone())
114		}
115
116		self.inner.load_leaves()
117	}
118
119	pub(super) fn load_stagnant_at(&self, timestamp: Timestamp) -> Result<Vec<Hash>, Error> {
120		if let Some(val) = self.stagnant_at.get(&timestamp) {
121			return Ok(val.as_ref().map_or(Vec::new(), Clone::clone))
122		}
123
124		self.inner.load_stagnant_at(timestamp)
125	}
126
127	pub(super) fn write_block_entry(&mut self, entry: BlockEntry) {
128		self.block_entries.insert(entry.block_hash, Some(entry));
129	}
130
131	pub(super) fn delete_block_entry(&mut self, hash: &Hash) {
132		self.block_entries.insert(*hash, None);
133	}
134
135	pub(super) fn write_blocks_by_number(&mut self, number: BlockNumber, blocks: Vec<Hash>) {
136		if blocks.is_empty() {
137			self.blocks_by_number.insert(number, None);
138		} else {
139			self.blocks_by_number.insert(number, Some(blocks));
140		}
141	}
142
143	pub(super) fn delete_blocks_by_number(&mut self, number: BlockNumber) {
144		self.blocks_by_number.insert(number, None);
145	}
146
147	pub(super) fn write_leaves(&mut self, leaves: LeafEntrySet) {
148		self.leaves = Some(leaves);
149	}
150
151	pub(super) fn write_stagnant_at(&mut self, timestamp: Timestamp, hashes: Vec<Hash>) {
152		self.stagnant_at.insert(timestamp, Some(hashes));
153	}
154
155	pub(super) fn delete_stagnant_at(&mut self, timestamp: Timestamp) {
156		self.stagnant_at.insert(timestamp, None);
157	}
158
159	/// Transform this backend into a set of write-ops to be written to the
160	/// inner backend.
161	pub(super) fn into_write_ops(self) -> impl Iterator<Item = BackendWriteOp> {
162		let block_entry_ops = self.block_entries.into_iter().map(|(h, v)| match v {
163			Some(v) => BackendWriteOp::WriteBlockEntry(v),
164			None => BackendWriteOp::DeleteBlockEntry(h),
165		});
166
167		let blocks_by_number_ops = self.blocks_by_number.into_iter().map(|(n, v)| match v {
168			Some(v) => BackendWriteOp::WriteBlocksByNumber(n, v),
169			None => BackendWriteOp::DeleteBlocksByNumber(n),
170		});
171
172		let leaf_ops = self.leaves.into_iter().map(BackendWriteOp::WriteViableLeaves);
173
174		let stagnant_at_ops = self.stagnant_at.into_iter().map(|(n, v)| match v {
175			Some(v) => BackendWriteOp::WriteStagnantAt(n, v),
176			None => BackendWriteOp::DeleteStagnantAt(n),
177		});
178
179		block_entry_ops
180			.chain(blocks_by_number_ops)
181			.chain(leaf_ops)
182			.chain(stagnant_at_ops)
183	}
184}
185
186/// Attempt to find the given ancestor in the chain with given head.
187///
188/// If the ancestor is the most recently finalized block, and the `head` is
189/// a known unfinalized block, this will return `true`.
190///
191/// If the ancestor is an unfinalized block and `head` is known, this will
192/// return true if `ancestor` is in `head`'s chain.
193///
194/// If the ancestor is an older finalized block, this will return `false`.
195fn contains_ancestor(backend: &impl Backend, head: Hash, ancestor: Hash) -> Result<bool, Error> {
196	let mut current_hash = head;
197	loop {
198		if current_hash == ancestor {
199			return Ok(true)
200		}
201		match backend.load_block_entry(&current_hash)? {
202			Some(e) => current_hash = e.parent_hash,
203			None => break,
204		}
205	}
206
207	Ok(false)
208}
209
210/// This returns the best unfinalized leaf containing the required block.
211///
212/// If the required block is finalized but not the most recent finalized block,
213/// this will return `None`.
214///
215/// If the required block is unfinalized but not an ancestor of any viable leaf,
216/// this will return `None`.
217//
218// Note: this is O(N^2) in the depth of `required` and the number of leaves.
219// We expect the number of unfinalized blocks to be small, as in, to not exceed
220// single digits in practice, and exceedingly unlikely to surpass 1000.
221//
222// However, if we need to, we could implement some type of skip-list for
223// fast ancestry checks.
224pub(super) fn find_best_leaf_containing(
225	backend: &impl Backend,
226	required: Hash,
227) -> Result<Option<Hash>, Error> {
228	let leaves = backend.load_leaves()?;
229	for leaf in leaves.into_hashes_descending() {
230		if contains_ancestor(backend, leaf, required)? {
231			return Ok(Some(leaf))
232		}
233	}
234
235	// If there are no viable leaves containing the ancestor
236	Ok(None)
237}