referrerpolicy=no-referrer-when-downgrade

frame_benchmarking_cli/storage/
read.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18use codec::Encode;
19use frame_storage_access_test_runtime::StorageAccessParams;
20use log::{debug, info};
21use rand::prelude::*;
22use sc_cli::{Error, Result};
23use sc_client_api::{Backend as ClientBackend, StorageProvider, UsageProvider};
24use sp_api::CallApiAt;
25use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT};
26use sp_state_machine::{backend::AsTrieBackend, Backend};
27use sp_storage::ChildInfo;
28use sp_trie::StorageProof;
29use std::{fmt::Debug, sync::Arc, time::Instant};
30
31use super::{cmd::StorageCmd, get_wasm_module, MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION};
32use crate::shared::{new_rng, BenchRecord};
33
34impl StorageCmd {
35	/// Benchmarks the time it takes to read a single Storage item.
36	/// Uses the latest state that is available for the given client.
37	pub(crate) fn bench_read<B, BA, C>(
38		&self,
39		client: Arc<C>,
40		_shared_trie_cache: Option<sp_trie::cache::SharedTrieCache<HashingFor<B>>>,
41	) -> Result<BenchRecord>
42	where
43		C: UsageProvider<B> + StorageProvider<B, BA> + CallApiAt<B>,
44		B: BlockT + Debug,
45		BA: ClientBackend<B>,
46		<<B as BlockT>::Header as HeaderT>::Number: From<u32>,
47	{
48		if self.params.is_validate_block_mode() && self.params.disable_pov_recorder {
49			return Err("PoV recorder must be activated to provide a storage proof for block validation at runtime. Remove `--disable-pov-recorder` from the command line.".into())
50		}
51		if self.params.is_validate_block_mode() &&
52			self.params.batch_size > MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION
53		{
54			return Err(format!("Batch size is too large. This may cause problems with runtime memory allocation. Better set `--batch-size {}` or less.", MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION).into())
55		}
56
57		let mut record = BenchRecord::default();
58		let best_hash = client.usage_info().chain.best_hash;
59
60		info!("Preparing keys from block {}", best_hash);
61		// Load all keys and randomly shuffle them.
62		let mut keys: Vec<_> = client.storage_keys(best_hash, None, None)?.collect();
63		let (mut rng, _) = new_rng(None);
64		keys.shuffle(&mut rng);
65		if keys.is_empty() {
66			return Err("Can't process benchmarking with empty storage".into())
67		}
68
69		let mut child_nodes = Vec::new();
70		// Interesting part here:
71		// Read all the keys in the database and measure the time it takes to access each.
72		info!("Reading {} keys", keys.len());
73
74		// Read using the same TrieBackend and recorder for up to `batch_size` keys.
75		// This would allow us to measure the amortized cost of reading a key.
76		let state = client
77			.state_at(best_hash)
78			.map_err(|_err| Error::Input("State not found".into()))?;
79		// We reassign the backend and recorder for every batch size.
80		// Using a new recorder for every read vs using the same for the entire batch
81		// produces significant different results. Since in the real use case we use a
82		// single recorder per block, simulate the same behavior by creating a new
83		// recorder every batch size, so that the amortized cost of reading a key is
84		// measured in conditions closer to the real world.
85		let (mut backend, mut recorder) = self.create_backend::<B, C>(&state);
86
87		let mut read_in_batch = 0;
88		let mut on_validation_batch = vec![];
89		let mut on_validation_size = 0;
90
91		let last_key = keys.last().expect("Checked above to be non-empty");
92		for key in keys.as_slice() {
93			match (self.params.include_child_trees, self.is_child_key(key.clone().0)) {
94				(true, Some(info)) => {
95					// child tree key
96					for ck in client.child_storage_keys(best_hash, info.clone(), None, None)? {
97						child_nodes.push((ck, info.clone()));
98					}
99				},
100				_ => {
101					// regular key
102					on_validation_batch.push((key.0.clone(), None));
103					let start = Instant::now();
104					let v = backend
105						.storage(key.0.as_ref())
106						.expect("Checked above to exist")
107						.ok_or("Value unexpectedly empty")?;
108					on_validation_size += v.len();
109					if self.params.is_import_block_mode() {
110						record.append(v.len(), start.elapsed())?;
111					}
112				},
113			}
114			read_in_batch += 1;
115			let is_batch_full = read_in_batch >= self.params.batch_size || key == last_key;
116
117			// Read keys on block validation
118			if is_batch_full && self.params.is_validate_block_mode() {
119				let root = backend.root();
120				let storage_proof = recorder
121					.clone()
122					.map(|r| r.drain_storage_proof())
123					.expect("Storage proof must exist for block validation");
124				let elapsed = measure_block_validation::<B>(
125					*root,
126					storage_proof,
127					on_validation_batch.clone(),
128					self.params.validate_block_rounds,
129				);
130				record.append(on_validation_size / on_validation_batch.len(), elapsed)?;
131
132				on_validation_batch = vec![];
133				on_validation_size = 0;
134			}
135
136			// Reload recorder
137			if is_batch_full {
138				(backend, recorder) = self.create_backend::<B, C>(&state);
139				read_in_batch = 0;
140			}
141		}
142
143		if self.params.include_child_trees && !child_nodes.is_empty() {
144			child_nodes.shuffle(&mut rng);
145
146			info!("Reading {} child keys", child_nodes.len());
147			let (last_child_key, last_child_info) =
148				child_nodes.last().expect("Checked above to be non-empty");
149			for (key, info) in child_nodes.as_slice() {
150				on_validation_batch.push((key.0.clone(), Some(info.clone())));
151				let start = Instant::now();
152				let v = backend
153					.child_storage(info, key.0.as_ref())
154					.expect("Checked above to exist")
155					.ok_or("Value unexpectedly empty")?;
156				on_validation_size += v.len();
157				if self.params.is_import_block_mode() {
158					record.append(v.len(), start.elapsed())?;
159				}
160				read_in_batch += 1;
161				let is_batch_full = read_in_batch >= self.params.batch_size ||
162					(last_child_key == key && last_child_info == info);
163
164				// Read child keys on block validation
165				if is_batch_full && self.params.is_validate_block_mode() {
166					let root = backend.root();
167					let storage_proof = recorder
168						.clone()
169						.map(|r| r.drain_storage_proof())
170						.expect("Storage proof must exist for block validation");
171					let elapsed = measure_block_validation::<B>(
172						*root,
173						storage_proof,
174						on_validation_batch.clone(),
175						self.params.validate_block_rounds,
176					);
177					record.append(on_validation_size / on_validation_batch.len(), elapsed)?;
178
179					on_validation_batch = vec![];
180					on_validation_size = 0;
181				}
182
183				// Reload recorder
184				if is_batch_full {
185					(backend, recorder) = self.create_backend::<B, C>(&state);
186					read_in_batch = 0;
187				}
188			}
189		}
190
191		Ok(record)
192	}
193
194	fn create_backend<'a, B, C>(
195		&self,
196		state: &'a C::StateBackend,
197	) -> (
198		sp_state_machine::TrieBackend<
199			&'a <C::StateBackend as AsTrieBackend<HashingFor<B>>>::TrieBackendStorage,
200			HashingFor<B>,
201			&'a sp_trie::cache::LocalTrieCache<HashingFor<B>>,
202		>,
203		Option<sp_trie::recorder::Recorder<HashingFor<B>>>,
204	)
205	where
206		C: CallApiAt<B>,
207		B: BlockT + Debug,
208	{
209		let recorder = (!self.params.disable_pov_recorder).then(|| Default::default());
210		let backend = sp_state_machine::TrieBackendBuilder::wrap(state.as_trie_backend())
211			.with_optional_recorder(recorder.clone())
212			.build();
213
214		(backend, recorder)
215	}
216}
217
218fn measure_block_validation<B: BlockT + Debug>(
219	root: B::Hash,
220	storage_proof: StorageProof,
221	on_validation_batch: Vec<(Vec<u8>, Option<ChildInfo>)>,
222	rounds: u32,
223) -> std::time::Duration {
224	debug!(
225		"POV: len {:?} {:?}",
226		storage_proof.len(),
227		storage_proof.clone().encoded_compact_size::<HashingFor<B>>(root)
228	);
229	let batch_size = on_validation_batch.len();
230	let wasm_module = get_wasm_module();
231	let mut instance = wasm_module.new_instance().expect("Failed to create wasm instance");
232	let params = StorageAccessParams::<B>::new_read(root, storage_proof, on_validation_batch);
233	let dry_run_encoded = params.as_dry_run().encode();
234	let encoded = params.encode();
235
236	let mut durations_in_nanos = Vec::new();
237
238	for i in 1..=rounds {
239		info!("validate_block with {} keys, round {}/{}", batch_size, i, rounds);
240
241		// Dry run to get the time it takes without storage access
242		let dry_run_start = Instant::now();
243		instance
244			.call_export("validate_block", &dry_run_encoded)
245			.expect("Failed to call validate_block");
246		let dry_run_elapsed = dry_run_start.elapsed();
247		debug!("validate_block dry-run time {:?}", dry_run_elapsed);
248
249		let start = Instant::now();
250		instance
251			.call_export("validate_block", &encoded)
252			.expect("Failed to call validate_block");
253		let elapsed = start.elapsed();
254		debug!("validate_block time {:?}", elapsed);
255
256		durations_in_nanos
257			.push(elapsed.saturating_sub(dry_run_elapsed).as_nanos() as u64 / batch_size as u64);
258	}
259
260	std::time::Duration::from_nanos(
261		durations_in_nanos.iter().sum::<u64>() / durations_in_nanos.len() as u64,
262	)
263}