referrerpolicy=no-referrer-when-downgrade

pallet_paged_list/
paged_list.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! Paged storage list.
19
20// links are better than no links - even when they refer to private stuff.
21#![allow(rustdoc::private_intra_doc_links)]
22#![deny(rustdoc::broken_intra_doc_links)]
23#![deny(missing_docs)]
24#![deny(unsafe_code)]
25
26use alloc::vec::Vec;
27use codec::{Decode, Encode, EncodeLike, FullCodec};
28use core::marker::PhantomData;
29use frame::{
30	deps::sp_io,
31	prelude::*,
32	runtime::prelude::storage::{StorageAppender, StorageList, StoragePrefixedContainer},
33	traits::{Get, StorageInstance},
34};
35
36pub type PageIndex = u32;
37pub type ValueIndex = u32;
38
39/// A paginated storage list.
40///
41/// # Motivation
42///
43/// This type replaces `StorageValue<Vec<V>>` in situations where only iteration and appending is
44/// needed. There are a few places where this is the case. A paginated structure reduces the memory
45/// usage when a storage transactions needs to be rolled back. The main motivation is therefore a
46/// reduction of runtime memory on storage transaction rollback. Should be configured such that the
47/// size of a page is about 64KiB. This can only be ensured when `V` implements `MaxEncodedLen`.
48///
49/// # Implementation
50///
51/// The metadata of this struct is stored in [`StoragePagedListMeta`]. The data is stored in
52/// [`Page`]s.
53///
54/// Each [`Page`] holds at most `ValuesPerNewPage` values in its `values` vector. The last page is
55/// the only one that could have less than `ValuesPerNewPage` values.
56/// **Iteration** happens by starting
57/// at [`first_page`][StoragePagedListMeta::first_page]/
58/// [`first_value_offset`][StoragePagedListMeta::first_value_offset] and incrementing these indices
59/// as long as there are elements in the page and there are pages in storage. All elements of a page
60/// are loaded once a page is read from storage. Iteration then happens on the cached elements. This
61/// reduces the number of storage `read` calls on the overlay. **Appending** to the list happens by
62/// appending to the last page by utilizing
63/// [`storage::append`](frame::deps::sp_io::storage::append). It allows to directly extend
64/// the elements of `values` vector of the page without loading the whole vector from storage. A new
65/// page is instantiated once [`Page::next`] overflows `ValuesPerNewPage`. Its vector will also be
66/// created through [`storage::append`](frame::deps::sp_io::storage::append). **Draining** advances
67/// the internal indices identical to Iteration. It additionally persists the increments to storage
68/// and thereby 'drains' elements. Completely drained pages are deleted from storage.
69///
70/// # Further Observations
71///
72/// - The encoded layout of a page is exactly its [`Page::values`]. The [`Page::next`] offset is
73///   stored in the [`StoragePagedListMeta`] instead. There is no particular reason for this,
74///   besides having all management state handy in one location.
75/// - The PoV complexity of iterating compared to a `StorageValue<Vec<V>>` is improved for
76///   "shortish" iterations and worse for total iteration. The append complexity is identical in the
77///   asymptotic case when using an `Appender`, and worse in all. For example when appending just
78///   one value.
79/// - It does incur a read overhead on the host side as compared to a `StorageValue<Vec<V>>`.
80pub struct StoragePagedList<Prefix, Value, ValuesPerNewPage> {
81	_phantom: PhantomData<(Prefix, Value, ValuesPerNewPage)>,
82}
83
84/// The state of a [`StoragePagedList`].
85///
86/// This struct doubles as [`frame::deps::frame_support::storage::StorageList::Appender`].
87#[derive(
88	Encode, Decode, CloneNoBound, PartialEqNoBound, EqNoBound, DebugNoBound, DefaultNoBound,
89)]
90// todo ignore scale bounds
91pub struct StoragePagedListMeta<Prefix, Value, ValuesPerNewPage> {
92	/// The first page that could contain a value.
93	///
94	/// Can be >0 when pages were deleted.
95	pub first_page: PageIndex,
96	/// The first index inside `first_page` that could contain a value.
97	///
98	/// Can be >0 when values were deleted.
99	pub first_value_offset: ValueIndex,
100
101	/// The last page that could contain data.
102	///
103	/// Appending starts at this page index.
104	pub last_page: PageIndex,
105	/// The last value inside `last_page` that could contain a value.
106	///
107	/// Appending starts at this index. If the page does not hold a value at this index, then the
108	/// whole list is empty. The only case where this can happen is when both are `0`.
109	pub last_page_len: ValueIndex,
110
111	_phantom: PhantomData<(Prefix, Value, ValuesPerNewPage)>,
112}
113
114impl<Prefix, Value, ValuesPerNewPage> StorageAppender<Value>
115	for StoragePagedListMeta<Prefix, Value, ValuesPerNewPage>
116where
117	Prefix: StorageInstance,
118	Value: FullCodec,
119	ValuesPerNewPage: Get<u32>,
120{
121	fn append<EncodeLikeValue>(&mut self, item: EncodeLikeValue)
122	where
123		EncodeLikeValue: EncodeLike<Value>,
124	{
125		self.append_one(item);
126	}
127}
128
129impl<Prefix, Value, ValuesPerNewPage> StoragePagedListMeta<Prefix, Value, ValuesPerNewPage>
130where
131	Prefix: StorageInstance,
132	Value: FullCodec,
133	ValuesPerNewPage: Get<u32>,
134{
135	pub fn from_storage() -> Option<Self> {
136		let key = Self::key();
137
138		sp_io::storage::get(&key).and_then(|raw| Self::decode(&mut &raw[..]).ok())
139	}
140
141	pub fn key() -> Vec<u8> {
142		meta_key::<Prefix>()
143	}
144
145	pub fn append_one<EncodeLikeValue>(&mut self, item: EncodeLikeValue)
146	where
147		EncodeLikeValue: EncodeLike<Value>,
148	{
149		// Note: we use >= here in case someone decreased it in a runtime upgrade.
150		if self.last_page_len >= ValuesPerNewPage::get() {
151			self.last_page.saturating_inc();
152			self.last_page_len = 0;
153		}
154		let key = page_key::<Prefix>(self.last_page);
155		self.last_page_len.saturating_inc();
156		sp_io::storage::append(&key, item.encode());
157		self.store();
158	}
159
160	pub fn store(&self) {
161		let key = Self::key();
162		self.using_encoded(|enc| sp_io::storage::set(&key, enc));
163	}
164
165	pub fn reset(&mut self) {
166		*self = Default::default();
167		Self::delete();
168	}
169
170	pub fn delete() {
171		sp_io::storage::clear(&Self::key());
172	}
173}
174
175/// A page that was decoded from storage and caches its values.
176pub struct Page<V> {
177	/// The index of the page.
178	index: PageIndex,
179	/// The remaining values of the page, to be drained by [`Page::next`].
180	values: core::iter::Skip<alloc::vec::IntoIter<V>>,
181}
182
183impl<V: FullCodec> Page<V> {
184	/// Read the page with `index` from storage and assume the first value at `value_index`.
185	pub fn from_storage<Prefix: StorageInstance>(
186		index: PageIndex,
187		value_index: ValueIndex,
188	) -> Option<Self> {
189		let key = page_key::<Prefix>(index);
190		let values = sp_io::storage::get(&key)
191			.and_then(|raw| alloc::vec::Vec::<V>::decode(&mut &raw[..]).ok())?;
192		if values.is_empty() {
193			// Don't create empty pages.
194			return None
195		}
196		let values = values.into_iter().skip(value_index as usize);
197
198		Some(Self { index, values })
199	}
200
201	/// Whether no more values can be read from this page.
202	pub fn is_eof(&self) -> bool {
203		self.values.len() == 0
204	}
205
206	/// Delete this page from storage.
207	pub fn delete<Prefix: StorageInstance>(&self) {
208		delete_page::<Prefix>(self.index);
209	}
210}
211
212/// Delete a page with `index` from storage.
213// Does not live under `Page` since it does not require the `Value` generic.
214pub(crate) fn delete_page<Prefix: StorageInstance>(index: PageIndex) {
215	let key = page_key::<Prefix>(index);
216	sp_io::storage::clear(&key);
217}
218
219/// Storage key of a page with `index`.
220// Does not live under `Page` since it does not require the `Value` generic.
221pub(crate) fn page_key<Prefix: StorageInstance>(index: PageIndex) -> Vec<u8> {
222	(StoragePagedListPrefix::<Prefix>::final_prefix(), b"page", index).encode()
223}
224
225pub(crate) fn meta_key<Prefix: StorageInstance>() -> Vec<u8> {
226	(StoragePagedListPrefix::<Prefix>::final_prefix(), b"meta").encode()
227}
228
229impl<V> Iterator for Page<V> {
230	type Item = V;
231
232	fn next(&mut self) -> Option<Self::Item> {
233		self.values.next()
234	}
235}
236
237/// Iterates over values of a [`StoragePagedList`].
238///
239/// Can optionally drain the iterated values.
240pub struct StoragePagedListIterator<Prefix, Value, ValuesPerNewPage> {
241	// Design: we put the Page into the iterator to have fewer storage look-ups. Yes, these
242	// look-ups would be cached anyway, but bugging the overlay on each `.next` call still seems
243	// like a poor trade-off than caching it in the iterator directly. Iterating and modifying is
244	// not allowed at the same time anyway, just like with maps. Note: if Page is empty then
245	// the iterator did not find any data upon setup or ran out of pages.
246	page: Option<Page<Value>>,
247	drain: bool,
248	meta: StoragePagedListMeta<Prefix, Value, ValuesPerNewPage>,
249}
250
251impl<Prefix, Value, ValuesPerNewPage> StoragePagedListIterator<Prefix, Value, ValuesPerNewPage>
252where
253	Prefix: StorageInstance,
254	Value: FullCodec,
255	ValuesPerNewPage: Get<u32>,
256{
257	/// Read self from the storage.
258	pub fn from_meta(
259		meta: StoragePagedListMeta<Prefix, Value, ValuesPerNewPage>,
260		drain: bool,
261	) -> Self {
262		let page = Page::<Value>::from_storage::<Prefix>(meta.first_page, meta.first_value_offset);
263		Self { page, drain, meta }
264	}
265}
266
267impl<Prefix, Value, ValuesPerNewPage> Iterator
268	for StoragePagedListIterator<Prefix, Value, ValuesPerNewPage>
269where
270	Prefix: StorageInstance,
271	Value: FullCodec,
272	ValuesPerNewPage: Get<u32>,
273{
274	type Item = Value;
275
276	fn next(&mut self) -> Option<Self::Item> {
277		let page = self.page.as_mut()?;
278		let value = match page.next() {
279			Some(value) => value,
280			None => {
281				defensive!("There are no empty pages in storage; nuking the list");
282				self.meta.reset();
283				self.page = None;
284				return None
285			},
286		};
287
288		if page.is_eof() {
289			if self.drain {
290				page.delete::<Prefix>();
291				self.meta.first_value_offset = 0;
292				self.meta.first_page.saturating_inc();
293			}
294
295			debug_assert!(!self.drain || self.meta.first_page == page.index + 1);
296			self.page = Page::from_storage::<Prefix>(page.index.saturating_add(1), 0);
297			if self.drain {
298				if self.page.is_none() {
299					self.meta.reset();
300				} else {
301					self.meta.store();
302				}
303			}
304		} else {
305			if self.drain {
306				self.meta.first_value_offset.saturating_inc();
307				self.meta.store();
308			}
309		}
310		Some(value)
311	}
312}
313
314impl<Prefix, Value, ValuesPerNewPage> StorageList<Value>
315	for StoragePagedList<Prefix, Value, ValuesPerNewPage>
316where
317	Prefix: StorageInstance,
318	Value: FullCodec,
319	ValuesPerNewPage: Get<u32>,
320{
321	type Iterator = StoragePagedListIterator<Prefix, Value, ValuesPerNewPage>;
322	type Appender = StoragePagedListMeta<Prefix, Value, ValuesPerNewPage>;
323
324	fn iter() -> Self::Iterator {
325		StoragePagedListIterator::from_meta(Self::read_meta(), false)
326	}
327
328	fn drain() -> Self::Iterator {
329		StoragePagedListIterator::from_meta(Self::read_meta(), true)
330	}
331
332	fn appender() -> Self::Appender {
333		Self::appender()
334	}
335}
336
337impl<Prefix, Value, ValuesPerNewPage> StoragePagedList<Prefix, Value, ValuesPerNewPage>
338where
339	Prefix: StorageInstance,
340	Value: FullCodec,
341	ValuesPerNewPage: Get<u32>,
342{
343	fn read_meta() -> StoragePagedListMeta<Prefix, Value, ValuesPerNewPage> {
344		// Use default here to not require a setup migration.
345		StoragePagedListMeta::from_storage().unwrap_or_default()
346	}
347
348	/// Provides a fast append iterator.
349	///
350	/// The list should not be modified while appending. Also don't call it recursively.
351	fn appender() -> StoragePagedListMeta<Prefix, Value, ValuesPerNewPage> {
352		Self::read_meta()
353	}
354
355	/// Return the elements of the list.
356	#[cfg(test)]
357	fn as_vec() -> Vec<Value> {
358		<Self as StorageList<_>>::iter().collect()
359	}
360
361	/// Return and remove the elements of the list.
362	#[cfg(test)]
363	fn as_drained_vec() -> Vec<Value> {
364		<Self as StorageList<_>>::drain().collect()
365	}
366}
367
368/// Provides the final prefix for a [`StoragePagedList`].
369///
370/// It solely exists so that when re-using it from the iterator and meta struct, none of the un-used
371/// generics bleed through. Otherwise when only having the `StoragePrefixedContainer` implementation
372/// on the list directly, the iterator and metadata need to muster *all* generics, even the ones
373/// that are completely useless for prefix calculation.
374struct StoragePagedListPrefix<Prefix>(PhantomData<Prefix>);
375
376impl<Prefix> StoragePrefixedContainer for StoragePagedListPrefix<Prefix>
377where
378	Prefix: StorageInstance,
379{
380	fn pallet_prefix() -> &'static [u8] {
381		Prefix::pallet_prefix().as_bytes()
382	}
383
384	fn storage_prefix() -> &'static [u8] {
385		Prefix::STORAGE_PREFIX.as_bytes()
386	}
387}
388
389impl<Prefix, Value, ValuesPerNewPage> StoragePrefixedContainer
390	for StoragePagedList<Prefix, Value, ValuesPerNewPage>
391where
392	Prefix: StorageInstance,
393	Value: FullCodec,
394	ValuesPerNewPage: Get<u32>,
395{
396	fn pallet_prefix() -> &'static [u8] {
397		StoragePagedListPrefix::<Prefix>::pallet_prefix()
398	}
399
400	fn storage_prefix() -> &'static [u8] {
401		StoragePagedListPrefix::<Prefix>::storage_prefix()
402	}
403}
404
405/// Prelude for (doc)tests.
406#[cfg(feature = "std")]
407#[allow(dead_code)]
408pub(crate) mod mock {
409	pub use super::*;
410	use frame::testing_prelude::*;
411
412	parameter_types! {
413		pub const ValuesPerNewPage: u32 = 5;
414		pub const MaxPages: Option<u32> = Some(20);
415	}
416
417	pub struct Prefix;
418	impl StorageInstance for Prefix {
419		fn pallet_prefix() -> &'static str {
420			"test"
421		}
422		const STORAGE_PREFIX: &'static str = "foo";
423	}
424
425	pub type List = StoragePagedList<Prefix, u32, ValuesPerNewPage>;
426}
427
428#[cfg(test)]
429mod tests {
430	use super::mock::*;
431	use frame::testing_prelude::*;
432
433	#[test]
434	fn append_works() {
435		TestExternalities::default().execute_with(|| {
436			List::append_many(0..1000);
437			assert_eq!(List::as_vec(), (0..1000).collect::<Vec<_>>());
438		});
439	}
440
441	/// Draining all works.
442	#[test]
443	fn simple_drain_works() {
444		TestExternalities::default().execute_with(|| {
445			let _g = StorageNoopGuard::default(); // All in all a No-Op
446			List::append_many(0..1000);
447
448			assert_eq!(List::as_drained_vec(), (0..1000).collect::<Vec<_>>());
449
450			assert_eq!(List::read_meta(), Default::default());
451
452			// all gone
453			assert_eq!(List::as_vec(), Vec::<u32>::new());
454			// Need to delete the metadata manually.
455			StoragePagedListMeta::<Prefix, u32, ValuesPerNewPage>::delete();
456		});
457	}
458
459	/// Drain half of the elements and iterator the rest.
460	#[test]
461	fn partial_drain_works() {
462		TestExternalities::default().execute_with(|| {
463			List::append_many(0..100);
464
465			let vals = List::drain().take(50).collect::<Vec<_>>();
466			assert_eq!(vals, (0..50).collect::<Vec<_>>());
467
468			let meta = List::read_meta();
469			// Will switch over to `10/0`, but will in the next call.
470			assert_eq!((meta.first_page, meta.first_value_offset), (10, 0));
471
472			// 50 gone, 50 to go
473			assert_eq!(List::as_vec(), (50..100).collect::<Vec<_>>());
474		});
475	}
476
477	/// Draining, appending and iterating work together.
478	#[test]
479	fn drain_append_iter_works() {
480		TestExternalities::default().execute_with(|| {
481			for r in 1..=100 {
482				List::append_many(0..12);
483				List::append_many(0..12);
484
485				let dropped = List::drain().take(12).collect::<Vec<_>>();
486				assert_eq!(dropped, (0..12).collect::<Vec<_>>());
487
488				assert_eq!(List::as_vec(), (0..12).cycle().take(r * 12).collect::<Vec<_>>());
489			}
490		});
491	}
492
493	/// Pages are removed ASAP.
494	#[test]
495	fn drain_eager_page_removal() {
496		TestExternalities::default().execute_with(|| {
497			List::append_many(0..9);
498
499			assert!(sp_io::storage::exists(&page_key::<Prefix>(0)));
500			assert!(sp_io::storage::exists(&page_key::<Prefix>(1)));
501
502			assert_eq!(List::drain().take(5).count(), 5);
503			// Page 0 is eagerly removed.
504			assert!(!sp_io::storage::exists(&page_key::<Prefix>(0)));
505			assert!(sp_io::storage::exists(&page_key::<Prefix>(1)));
506		});
507	}
508
509	/// Appending encodes pages as `Vec`.
510	#[test]
511	fn append_storage_layout() {
512		TestExternalities::default().execute_with(|| {
513			List::append_many(0..9);
514
515			let key = page_key::<Prefix>(0);
516			let raw = sp_io::storage::get(&key).expect("Page should be present");
517			let as_vec = Vec::<u32>::decode(&mut &raw[..]).unwrap();
518			assert_eq!(as_vec.len(), 5, "First page contains 5");
519
520			let key = page_key::<Prefix>(1);
521			let raw = sp_io::storage::get(&key).expect("Page should be present");
522			let as_vec = Vec::<u32>::decode(&mut &raw[..]).unwrap();
523			assert_eq!(as_vec.len(), 4, "Second page contains 4");
524
525			let meta = sp_io::storage::get(&meta_key::<Prefix>()).expect("Meta should be present");
526			let meta: StoragePagedListMeta<Prefix, u32, ValuesPerNewPage> =
527				Decode::decode(&mut &meta[..]).unwrap();
528			assert_eq!(meta.first_page, 0);
529			assert_eq!(meta.first_value_offset, 0);
530			assert_eq!(meta.last_page, 1);
531			assert_eq!(meta.last_page_len, 4);
532
533			let page = Page::<u32>::from_storage::<Prefix>(0, 0).unwrap();
534			assert_eq!(page.index, 0);
535			assert_eq!(page.values.count(), 5);
536
537			let page = Page::<u32>::from_storage::<Prefix>(1, 0).unwrap();
538			assert_eq!(page.index, 1);
539			assert_eq!(page.values.count(), 4);
540		});
541	}
542
543	#[test]
544	fn page_key_correct() {
545		let got = page_key::<Prefix>(0);
546		let pallet_prefix = StoragePagedListPrefix::<Prefix>::final_prefix();
547		let want = (pallet_prefix, b"page", 0).encode();
548
549		assert_eq!(want.len(), 32 + 4 + 4);
550		assert!(want.starts_with(&pallet_prefix[..]));
551		assert_eq!(got, want);
552	}
553
554	#[test]
555	fn meta_key_correct() {
556		let got = meta_key::<Prefix>();
557		let pallet_prefix = StoragePagedListPrefix::<Prefix>::final_prefix();
558		let want = (pallet_prefix, b"meta").encode();
559
560		assert_eq!(want.len(), 32 + 4);
561		assert!(want.starts_with(&pallet_prefix[..]));
562		assert_eq!(got, want);
563	}
564
565	#[test]
566	fn peekable_drain_also_deletes() {
567		TestExternalities::default().execute_with(|| {
568			List::append_many(0..10);
569
570			let mut iter = List::drain().peekable();
571			assert_eq!(iter.peek(), Some(&0));
572			// `peek` does remove one element...
573			assert_eq!(List::iter().count(), 9);
574		});
575	}
576}