referrerpolicy=no-referrer-when-downgrade

polkadot_node_core_chain_selection/db_backend/
v1.rs

1// Copyright (C) Parity Technologies (UK) Ltd.
2// This file is part of Polkadot.
3
4// Polkadot is free software: you can redistribute it and/or modify
5// it under the terms of the GNU General Public License as published by
6// the Free Software Foundation, either version 3 of the License, or
7// (at your option) any later version.
8
9// Polkadot is distributed in the hope that it will be useful,
10// but WITHOUT ANY WARRANTY; without even the implied warranty of
11// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12// GNU General Public License for more details.
13
14// You should have received a copy of the GNU General Public License
15// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
16
17//! A database [`Backend`][crate::backend::Backend] for the chain selection subsystem.
18//!
19//! This stores the following schema:
20//!
21//! ```ignore
22//! ("CS_block_entry", Hash) -> BlockEntry;
23//! ("CS_block_height", BigEndianBlockNumber) -> Vec<Hash>;
24//! ("CS_stagnant_at", BigEndianTimestamp) -> Vec<Hash>;
25//! ("CS_leaves") -> LeafEntrySet;
26//! ```
27//!
28//! The big-endian encoding is used for creating iterators over the key-value DB which are
29//! accessible by prefix, to find the earliest block number stored as well as the all stagnant
30//! blocks.
31//!
32//! The `Vec`s stored are always non-empty. Empty `Vec`s are not stored on disk so there is no
33//! semantic difference between `None` and an empty `Vec`.
34
35use crate::{
36	backend::{Backend, BackendWriteOp},
37	Error,
38};
39
40use polkadot_node_primitives::BlockWeight;
41use polkadot_primitives::{BlockNumber, Hash};
42
43use codec::{Decode, Encode};
44use polkadot_node_subsystem_util::database::{DBTransaction, Database};
45
46use std::sync::Arc;
47
48const BLOCK_ENTRY_PREFIX: &[u8; 14] = b"CS_block_entry";
49const BLOCK_HEIGHT_PREFIX: &[u8; 15] = b"CS_block_height";
50const STAGNANT_AT_PREFIX: &[u8; 14] = b"CS_stagnant_at";
51const LEAVES_KEY: &[u8; 9] = b"CS_leaves";
52
53type Timestamp = u64;
54
55#[derive(Debug, Encode, Decode, Clone, PartialEq)]
56enum Approval {
57	#[codec(index = 0)]
58	Approved,
59	#[codec(index = 1)]
60	Unapproved,
61	#[codec(index = 2)]
62	Stagnant,
63}
64
65impl From<crate::Approval> for Approval {
66	fn from(x: crate::Approval) -> Self {
67		match x {
68			crate::Approval::Approved => Approval::Approved,
69			crate::Approval::Unapproved => Approval::Unapproved,
70			crate::Approval::Stagnant => Approval::Stagnant,
71		}
72	}
73}
74
75impl From<Approval> for crate::Approval {
76	fn from(x: Approval) -> crate::Approval {
77		match x {
78			Approval::Approved => crate::Approval::Approved,
79			Approval::Unapproved => crate::Approval::Unapproved,
80			Approval::Stagnant => crate::Approval::Stagnant,
81		}
82	}
83}
84
85#[derive(Debug, Encode, Decode, Clone, PartialEq)]
86struct ViabilityCriteria {
87	explicitly_reverted: bool,
88	approval: Approval,
89	earliest_unviable_ancestor: Option<Hash>,
90}
91
92impl From<crate::ViabilityCriteria> for ViabilityCriteria {
93	fn from(x: crate::ViabilityCriteria) -> Self {
94		ViabilityCriteria {
95			explicitly_reverted: x.explicitly_reverted,
96			approval: x.approval.into(),
97			earliest_unviable_ancestor: x.earliest_unviable_ancestor,
98		}
99	}
100}
101
102impl From<ViabilityCriteria> for crate::ViabilityCriteria {
103	fn from(x: ViabilityCriteria) -> crate::ViabilityCriteria {
104		crate::ViabilityCriteria {
105			explicitly_reverted: x.explicitly_reverted,
106			approval: x.approval.into(),
107			earliest_unviable_ancestor: x.earliest_unviable_ancestor,
108		}
109	}
110}
111
112#[derive(Encode, Decode)]
113struct LeafEntry {
114	weight: BlockWeight,
115	block_number: BlockNumber,
116	block_hash: Hash,
117}
118
119impl From<crate::LeafEntry> for LeafEntry {
120	fn from(x: crate::LeafEntry) -> Self {
121		LeafEntry { weight: x.weight, block_number: x.block_number, block_hash: x.block_hash }
122	}
123}
124
125impl From<LeafEntry> for crate::LeafEntry {
126	fn from(x: LeafEntry) -> crate::LeafEntry {
127		crate::LeafEntry {
128			weight: x.weight,
129			block_number: x.block_number,
130			block_hash: x.block_hash,
131		}
132	}
133}
134
135#[derive(Encode, Decode)]
136struct LeafEntrySet {
137	inner: Vec<LeafEntry>,
138}
139
140impl From<crate::LeafEntrySet> for LeafEntrySet {
141	fn from(x: crate::LeafEntrySet) -> Self {
142		LeafEntrySet { inner: x.inner.into_iter().map(Into::into).collect() }
143	}
144}
145
146impl From<LeafEntrySet> for crate::LeafEntrySet {
147	fn from(x: LeafEntrySet) -> crate::LeafEntrySet {
148		crate::LeafEntrySet { inner: x.inner.into_iter().map(Into::into).collect() }
149	}
150}
151
152#[derive(Debug, Encode, Decode, Clone, PartialEq)]
153struct BlockEntry {
154	block_hash: Hash,
155	block_number: BlockNumber,
156	parent_hash: Hash,
157	children: Vec<Hash>,
158	viability: ViabilityCriteria,
159	weight: BlockWeight,
160}
161
162impl From<crate::BlockEntry> for BlockEntry {
163	fn from(x: crate::BlockEntry) -> Self {
164		BlockEntry {
165			block_hash: x.block_hash,
166			block_number: x.block_number,
167			parent_hash: x.parent_hash,
168			children: x.children,
169			viability: x.viability.into(),
170			weight: x.weight,
171		}
172	}
173}
174
175impl From<BlockEntry> for crate::BlockEntry {
176	fn from(x: BlockEntry) -> crate::BlockEntry {
177		crate::BlockEntry {
178			block_hash: x.block_hash,
179			block_number: x.block_number,
180			parent_hash: x.parent_hash,
181			children: x.children,
182			viability: x.viability.into(),
183			weight: x.weight,
184		}
185	}
186}
187
188/// Configuration for the database backend.
189#[derive(Debug, Clone, Copy)]
190pub struct Config {
191	/// The column where block metadata is stored.
192	pub col_data: u32,
193}
194
195/// The database backend.
196pub struct DbBackend {
197	inner: Arc<dyn Database>,
198	config: Config,
199}
200
201impl DbBackend {
202	/// Create a new [`DbBackend`] with the supplied key-value store and
203	/// config.
204	pub fn new(db: Arc<dyn Database>, config: Config) -> Self {
205		DbBackend { inner: db, config }
206	}
207}
208
209impl Backend for DbBackend {
210	fn load_block_entry(&self, hash: &Hash) -> Result<Option<crate::BlockEntry>, Error> {
211		load_decode::<BlockEntry>(&*self.inner, self.config.col_data, &block_entry_key(hash))
212			.map(|o| o.map(Into::into))
213	}
214
215	fn load_leaves(&self) -> Result<crate::LeafEntrySet, Error> {
216		load_decode::<LeafEntrySet>(&*self.inner, self.config.col_data, LEAVES_KEY)
217			.map(|o| o.map(Into::into).unwrap_or_default())
218	}
219
220	fn load_stagnant_at(&self, timestamp: crate::Timestamp) -> Result<Vec<Hash>, Error> {
221		load_decode::<Vec<Hash>>(
222			&*self.inner,
223			self.config.col_data,
224			&stagnant_at_key(timestamp.into()),
225		)
226		.map(|o| o.unwrap_or_default())
227	}
228
229	fn load_stagnant_at_up_to(
230		&self,
231		up_to: crate::Timestamp,
232		max_elements: usize,
233	) -> Result<Vec<(crate::Timestamp, Vec<Hash>)>, Error> {
234		let stagnant_at_iter =
235			self.inner.iter_with_prefix(self.config.col_data, &STAGNANT_AT_PREFIX[..]);
236
237		let val = stagnant_at_iter
238			.filter_map(|r| match r {
239				Ok((k, v)) =>
240					match (decode_stagnant_at_key(&mut &k[..]), <Vec<_>>::decode(&mut &v[..]).ok())
241					{
242						(Some(at), Some(stagnant_at)) => Some(Ok((at, stagnant_at))),
243						_ => None,
244					},
245				Err(e) => Some(Err(e)),
246			})
247			.enumerate()
248			.take_while(|(idx, r)| {
249				r.as_ref().map_or(true, |(at, _)| *at <= up_to.into() && *idx < max_elements)
250			})
251			.map(|(_, v)| v)
252			.collect::<Result<Vec<_>, _>>()?;
253
254		Ok(val)
255	}
256
257	fn load_first_block_number(&self) -> Result<Option<BlockNumber>, Error> {
258		let blocks_at_height_iter =
259			self.inner.iter_with_prefix(self.config.col_data, &BLOCK_HEIGHT_PREFIX[..]);
260
261		let val = blocks_at_height_iter
262			.filter_map(|r| match r {
263				Ok((k, _)) => decode_block_height_key(&k[..]).map(Ok),
264				Err(e) => Some(Err(e)),
265			})
266			.next();
267
268		val.transpose().map_err(Error::from)
269	}
270
271	fn load_blocks_by_number(&self, number: BlockNumber) -> Result<Vec<Hash>, Error> {
272		load_decode::<Vec<Hash>>(&*self.inner, self.config.col_data, &block_height_key(number))
273			.map(|o| o.unwrap_or_default())
274	}
275
276	/// Atomically write the list of operations, with later operations taking precedence over prior.
277	fn write<I>(&mut self, ops: I) -> Result<(), Error>
278	where
279		I: IntoIterator<Item = BackendWriteOp>,
280	{
281		let mut tx = DBTransaction::new();
282		for op in ops {
283			match op {
284				BackendWriteOp::WriteBlockEntry(block_entry) => {
285					let block_entry: BlockEntry = block_entry.into();
286					tx.put_vec(
287						self.config.col_data,
288						&block_entry_key(&block_entry.block_hash),
289						block_entry.encode(),
290					);
291				},
292				BackendWriteOp::WriteBlocksByNumber(block_number, v) =>
293					if v.is_empty() {
294						tx.delete(self.config.col_data, &block_height_key(block_number));
295					} else {
296						tx.put_vec(
297							self.config.col_data,
298							&block_height_key(block_number),
299							v.encode(),
300						);
301					},
302				BackendWriteOp::WriteViableLeaves(leaves) => {
303					let leaves: LeafEntrySet = leaves.into();
304					if leaves.inner.is_empty() {
305						tx.delete(self.config.col_data, &LEAVES_KEY[..]);
306					} else {
307						tx.put_vec(self.config.col_data, &LEAVES_KEY[..], leaves.encode());
308					}
309				},
310				BackendWriteOp::WriteStagnantAt(timestamp, stagnant_at) => {
311					let timestamp: Timestamp = timestamp.into();
312					if stagnant_at.is_empty() {
313						tx.delete(self.config.col_data, &stagnant_at_key(timestamp));
314					} else {
315						tx.put_vec(
316							self.config.col_data,
317							&stagnant_at_key(timestamp),
318							stagnant_at.encode(),
319						);
320					}
321				},
322				BackendWriteOp::DeleteBlocksByNumber(block_number) => {
323					tx.delete(self.config.col_data, &block_height_key(block_number));
324				},
325				BackendWriteOp::DeleteBlockEntry(hash) => {
326					tx.delete(self.config.col_data, &block_entry_key(&hash));
327				},
328				BackendWriteOp::DeleteStagnantAt(timestamp) => {
329					let timestamp: Timestamp = timestamp.into();
330					tx.delete(self.config.col_data, &stagnant_at_key(timestamp));
331				},
332			}
333		}
334
335		self.inner.write(tx).map_err(Into::into)
336	}
337}
338
339fn load_decode<D: Decode>(
340	db: &dyn Database,
341	col_data: u32,
342	key: &[u8],
343) -> Result<Option<D>, Error> {
344	match db.get(col_data, key)? {
345		None => Ok(None),
346		Some(raw) => D::decode(&mut &raw[..]).map(Some).map_err(Into::into),
347	}
348}
349
350fn block_entry_key(hash: &Hash) -> [u8; 14 + 32] {
351	let mut key = [0; 14 + 32];
352	key[..14].copy_from_slice(BLOCK_ENTRY_PREFIX);
353	hash.using_encoded(|s| key[14..].copy_from_slice(s));
354	key
355}
356
357fn block_height_key(number: BlockNumber) -> [u8; 15 + 4] {
358	let mut key = [0; 15 + 4];
359	key[..15].copy_from_slice(BLOCK_HEIGHT_PREFIX);
360	key[15..].copy_from_slice(&number.to_be_bytes());
361	key
362}
363
364fn stagnant_at_key(timestamp: Timestamp) -> [u8; 14 + 8] {
365	let mut key = [0; 14 + 8];
366	key[..14].copy_from_slice(STAGNANT_AT_PREFIX);
367	key[14..].copy_from_slice(&timestamp.to_be_bytes());
368	key
369}
370
371fn decode_block_height_key(key: &[u8]) -> Option<BlockNumber> {
372	if key.len() != 15 + 4 {
373		return None
374	}
375	if !key.starts_with(BLOCK_HEIGHT_PREFIX) {
376		return None
377	}
378
379	let mut bytes = [0; 4];
380	bytes.copy_from_slice(&key[15..]);
381	Some(BlockNumber::from_be_bytes(bytes))
382}
383
384fn decode_stagnant_at_key(key: &[u8]) -> Option<Timestamp> {
385	if key.len() != 14 + 8 {
386		return None
387	}
388	if !key.starts_with(STAGNANT_AT_PREFIX) {
389		return None
390	}
391
392	let mut bytes = [0; 8];
393	bytes.copy_from_slice(&key[14..]);
394	Some(Timestamp::from_be_bytes(bytes))
395}
396
397#[cfg(test)]
398mod tests {
399	use super::*;
400
401	#[cfg(test)]
402	fn test_db() -> Arc<dyn Database> {
403		let db = kvdb_memorydb::create(1);
404		let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[0]);
405		Arc::new(db)
406	}
407
408	#[test]
409	fn block_height_key_decodes() {
410		let key = block_height_key(5);
411		assert_eq!(decode_block_height_key(&key), Some(5));
412	}
413
414	#[test]
415	fn stagnant_at_key_decodes() {
416		let key = stagnant_at_key(5);
417		assert_eq!(decode_stagnant_at_key(&key), Some(5));
418	}
419
420	#[test]
421	fn lower_block_height_key_lesser() {
422		for i in 0..256 {
423			for j in 1..=256 {
424				let key_a = block_height_key(i);
425				let key_b = block_height_key(i + j);
426
427				assert!(key_a < key_b);
428			}
429		}
430	}
431
432	#[test]
433	fn lower_stagnant_at_key_lesser() {
434		for i in 0..256 {
435			for j in 1..=256 {
436				let key_a = stagnant_at_key(i);
437				let key_b = stagnant_at_key(i + j);
438
439				assert!(key_a < key_b);
440			}
441		}
442	}
443
444	#[test]
445	fn write_read_block_entry() {
446		let db = test_db();
447		let config = Config { col_data: 0 };
448
449		let mut backend = DbBackend::new(db, config);
450
451		let block_entry = BlockEntry {
452			block_hash: Hash::repeat_byte(1),
453			block_number: 1,
454			parent_hash: Hash::repeat_byte(0),
455			children: vec![],
456			viability: ViabilityCriteria {
457				earliest_unviable_ancestor: None,
458				explicitly_reverted: false,
459				approval: Approval::Unapproved,
460			},
461			weight: 100,
462		};
463
464		backend
465			.write(vec![BackendWriteOp::WriteBlockEntry(block_entry.clone().into())])
466			.unwrap();
467
468		assert_eq!(
469			backend.load_block_entry(&block_entry.block_hash).unwrap().map(BlockEntry::from),
470			Some(block_entry),
471		);
472	}
473
474	#[test]
475	fn delete_block_entry() {
476		let db = test_db();
477		let config = Config { col_data: 0 };
478
479		let mut backend = DbBackend::new(db, config);
480
481		let block_entry = BlockEntry {
482			block_hash: Hash::repeat_byte(1),
483			block_number: 1,
484			parent_hash: Hash::repeat_byte(0),
485			children: vec![],
486			viability: ViabilityCriteria {
487				earliest_unviable_ancestor: None,
488				explicitly_reverted: false,
489				approval: Approval::Unapproved,
490			},
491			weight: 100,
492		};
493
494		backend
495			.write(vec![BackendWriteOp::WriteBlockEntry(block_entry.clone().into())])
496			.unwrap();
497
498		backend
499			.write(vec![BackendWriteOp::DeleteBlockEntry(block_entry.block_hash)])
500			.unwrap();
501
502		assert!(backend.load_block_entry(&block_entry.block_hash).unwrap().is_none());
503	}
504
505	#[test]
506	fn earliest_block_number() {
507		let db = test_db();
508		let config = Config { col_data: 0 };
509
510		let mut backend = DbBackend::new(db, config);
511
512		assert!(backend.load_first_block_number().unwrap().is_none());
513
514		backend
515			.write(vec![
516				BackendWriteOp::WriteBlocksByNumber(2, vec![Hash::repeat_byte(0)]),
517				BackendWriteOp::WriteBlocksByNumber(5, vec![Hash::repeat_byte(0)]),
518				BackendWriteOp::WriteBlocksByNumber(10, vec![Hash::repeat_byte(0)]),
519			])
520			.unwrap();
521
522		assert_eq!(backend.load_first_block_number().unwrap(), Some(2));
523
524		backend
525			.write(vec![
526				BackendWriteOp::WriteBlocksByNumber(2, vec![]),
527				BackendWriteOp::DeleteBlocksByNumber(5),
528			])
529			.unwrap();
530
531		assert_eq!(backend.load_first_block_number().unwrap(), Some(10));
532	}
533
534	#[test]
535	fn stagnant_at_up_to() {
536		let db = test_db();
537		let config = Config { col_data: 0 };
538
539		let mut backend = DbBackend::new(db, config);
540
541		// Prove that it's cheap
542		assert!(backend
543			.load_stagnant_at_up_to(Timestamp::max_value(), usize::MAX)
544			.unwrap()
545			.is_empty());
546
547		backend
548			.write(vec![
549				BackendWriteOp::WriteStagnantAt(2, vec![Hash::repeat_byte(1)]),
550				BackendWriteOp::WriteStagnantAt(5, vec![Hash::repeat_byte(2)]),
551				BackendWriteOp::WriteStagnantAt(10, vec![Hash::repeat_byte(3)]),
552			])
553			.unwrap();
554
555		assert_eq!(
556			backend.load_stagnant_at_up_to(Timestamp::max_value(), usize::MAX).unwrap(),
557			vec![
558				(2, vec![Hash::repeat_byte(1)]),
559				(5, vec![Hash::repeat_byte(2)]),
560				(10, vec![Hash::repeat_byte(3)]),
561			]
562		);
563
564		assert_eq!(
565			backend.load_stagnant_at_up_to(10, usize::MAX).unwrap(),
566			vec![
567				(2, vec![Hash::repeat_byte(1)]),
568				(5, vec![Hash::repeat_byte(2)]),
569				(10, vec![Hash::repeat_byte(3)]),
570			]
571		);
572
573		assert_eq!(
574			backend.load_stagnant_at_up_to(9, usize::MAX).unwrap(),
575			vec![(2, vec![Hash::repeat_byte(1)]), (5, vec![Hash::repeat_byte(2)]),]
576		);
577
578		assert_eq!(
579			backend.load_stagnant_at_up_to(9, 1).unwrap(),
580			vec![(2, vec![Hash::repeat_byte(1)]),]
581		);
582
583		backend.write(vec![BackendWriteOp::DeleteStagnantAt(2)]).unwrap();
584
585		assert_eq!(
586			backend.load_stagnant_at_up_to(5, usize::MAX).unwrap(),
587			vec![(5, vec![Hash::repeat_byte(2)]),]
588		);
589
590		backend.write(vec![BackendWriteOp::WriteStagnantAt(5, vec![])]).unwrap();
591
592		assert_eq!(
593			backend.load_stagnant_at_up_to(10, usize::MAX).unwrap(),
594			vec![(10, vec![Hash::repeat_byte(3)]),]
595		);
596	}
597
598	#[test]
599	fn write_read_blocks_at_height() {
600		let db = test_db();
601		let config = Config { col_data: 0 };
602
603		let mut backend = DbBackend::new(db, config);
604
605		backend
606			.write(vec![
607				BackendWriteOp::WriteBlocksByNumber(2, vec![Hash::repeat_byte(1)]),
608				BackendWriteOp::WriteBlocksByNumber(5, vec![Hash::repeat_byte(2)]),
609				BackendWriteOp::WriteBlocksByNumber(10, vec![Hash::repeat_byte(3)]),
610			])
611			.unwrap();
612
613		assert_eq!(backend.load_blocks_by_number(2).unwrap(), vec![Hash::repeat_byte(1)]);
614
615		assert_eq!(backend.load_blocks_by_number(3).unwrap(), vec![]);
616
617		backend
618			.write(vec![
619				BackendWriteOp::WriteBlocksByNumber(2, vec![]),
620				BackendWriteOp::DeleteBlocksByNumber(5),
621			])
622			.unwrap();
623
624		assert_eq!(backend.load_blocks_by_number(2).unwrap(), vec![]);
625
626		assert_eq!(backend.load_blocks_by_number(5).unwrap(), vec![]);
627
628		assert_eq!(backend.load_blocks_by_number(10).unwrap(), vec![Hash::repeat_byte(3)]);
629	}
630}