frame_benchmarking_cli/storage/
read.rs1use codec::Encode;
19use frame_storage_access_test_runtime::StorageAccessParams;
20use log::{debug, info};
21use rand::prelude::*;
22use sc_cli::{Error, Result};
23use sc_client_api::{Backend as ClientBackend, StorageProvider, UsageProvider};
24use sp_api::CallApiAt;
25use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT};
26use sp_state_machine::{backend::AsTrieBackend, Backend};
27use sp_storage::ChildInfo;
28use sp_trie::StorageProof;
29use std::{fmt::Debug, sync::Arc, time::Instant};
30
31use super::{cmd::StorageCmd, get_wasm_module, MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION};
32use crate::shared::{new_rng, BenchRecord};
33
34impl StorageCmd {
35 pub(crate) fn bench_read<B, BA, C>(
38 &self,
39 client: Arc<C>,
40 _shared_trie_cache: Option<sp_trie::cache::SharedTrieCache<HashingFor<B>>>,
41 ) -> Result<BenchRecord>
42 where
43 C: UsageProvider<B> + StorageProvider<B, BA> + CallApiAt<B>,
44 B: BlockT + Debug,
45 BA: ClientBackend<B>,
46 <<B as BlockT>::Header as HeaderT>::Number: From<u32>,
47 {
48 if self.params.is_validate_block_mode() && self.params.disable_pov_recorder {
49 return Err("PoV recorder must be activated to provide a storage proof for block validation at runtime. Remove `--disable-pov-recorder` from the command line.".into())
50 }
51 if self.params.is_validate_block_mode() &&
52 self.params.batch_size > MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION
53 {
54 return Err(format!("Batch size is too large. This may cause problems with runtime memory allocation. Better set `--batch-size {}` or less.", MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION).into())
55 }
56
57 let mut record = BenchRecord::default();
58 let best_hash = client.usage_info().chain.best_hash;
59
60 info!("Preparing keys from block {}", best_hash);
61 let mut keys: Vec<_> = client.storage_keys(best_hash, None, None)?.collect();
63 let (mut rng, _) = new_rng(None);
64 keys.shuffle(&mut rng);
65 if keys.is_empty() {
66 return Err("Can't process benchmarking with empty storage".into())
67 }
68
69 let mut child_nodes = Vec::new();
70 info!("Reading {} keys", keys.len());
73
74 let state = client
77 .state_at(best_hash)
78 .map_err(|_err| Error::Input("State not found".into()))?;
79 let (mut backend, mut recorder) = self.create_backend::<B, C>(&state);
86
87 let mut read_in_batch = 0;
88 let mut on_validation_batch = vec![];
89 let mut on_validation_size = 0;
90
91 let last_key = keys.last().expect("Checked above to be non-empty");
92 for key in keys.as_slice() {
93 match (self.params.include_child_trees, self.is_child_key(key.clone().0)) {
94 (true, Some(info)) => {
95 for ck in client.child_storage_keys(best_hash, info.clone(), None, None)? {
97 child_nodes.push((ck, info.clone()));
98 }
99 },
100 _ => {
101 on_validation_batch.push((key.0.clone(), None));
103 let start = Instant::now();
104 let v = backend
105 .storage(key.0.as_ref())
106 .expect("Checked above to exist")
107 .ok_or("Value unexpectedly empty")?;
108 on_validation_size += v.len();
109 if self.params.is_import_block_mode() {
110 record.append(v.len(), start.elapsed())?;
111 }
112 },
113 }
114 read_in_batch += 1;
115 let is_batch_full = read_in_batch >= self.params.batch_size || key == last_key;
116
117 if is_batch_full && self.params.is_validate_block_mode() {
119 let root = backend.root();
120 let storage_proof = recorder
121 .clone()
122 .map(|r| r.drain_storage_proof())
123 .expect("Storage proof must exist for block validation");
124 let elapsed = measure_block_validation::<B>(
125 *root,
126 storage_proof,
127 on_validation_batch.clone(),
128 self.params.validate_block_rounds,
129 );
130 record.append(on_validation_size / on_validation_batch.len(), elapsed)?;
131
132 on_validation_batch = vec![];
133 on_validation_size = 0;
134 }
135
136 if is_batch_full {
138 (backend, recorder) = self.create_backend::<B, C>(&state);
139 read_in_batch = 0;
140 }
141 }
142
143 if self.params.include_child_trees && !child_nodes.is_empty() {
144 child_nodes.shuffle(&mut rng);
145
146 info!("Reading {} child keys", child_nodes.len());
147 let (last_child_key, last_child_info) =
148 child_nodes.last().expect("Checked above to be non-empty");
149 for (key, info) in child_nodes.as_slice() {
150 on_validation_batch.push((key.0.clone(), Some(info.clone())));
151 let start = Instant::now();
152 let v = backend
153 .child_storage(info, key.0.as_ref())
154 .expect("Checked above to exist")
155 .ok_or("Value unexpectedly empty")?;
156 on_validation_size += v.len();
157 if self.params.is_import_block_mode() {
158 record.append(v.len(), start.elapsed())?;
159 }
160 read_in_batch += 1;
161 let is_batch_full = read_in_batch >= self.params.batch_size ||
162 (last_child_key == key && last_child_info == info);
163
164 if is_batch_full && self.params.is_validate_block_mode() {
166 let root = backend.root();
167 let storage_proof = recorder
168 .clone()
169 .map(|r| r.drain_storage_proof())
170 .expect("Storage proof must exist for block validation");
171 let elapsed = measure_block_validation::<B>(
172 *root,
173 storage_proof,
174 on_validation_batch.clone(),
175 self.params.validate_block_rounds,
176 );
177 record.append(on_validation_size / on_validation_batch.len(), elapsed)?;
178
179 on_validation_batch = vec![];
180 on_validation_size = 0;
181 }
182
183 if is_batch_full {
185 (backend, recorder) = self.create_backend::<B, C>(&state);
186 read_in_batch = 0;
187 }
188 }
189 }
190
191 Ok(record)
192 }
193
194 fn create_backend<'a, B, C>(
195 &self,
196 state: &'a C::StateBackend,
197 ) -> (
198 sp_state_machine::TrieBackend<
199 &'a <C::StateBackend as AsTrieBackend<HashingFor<B>>>::TrieBackendStorage,
200 HashingFor<B>,
201 &'a sp_trie::cache::LocalTrieCache<HashingFor<B>>,
202 >,
203 Option<sp_trie::recorder::Recorder<HashingFor<B>>>,
204 )
205 where
206 C: CallApiAt<B>,
207 B: BlockT + Debug,
208 {
209 let recorder = (!self.params.disable_pov_recorder).then(|| Default::default());
210 let backend = sp_state_machine::TrieBackendBuilder::wrap(state.as_trie_backend())
211 .with_optional_recorder(recorder.clone())
212 .build();
213
214 (backend, recorder)
215 }
216}
217
218fn measure_block_validation<B: BlockT + Debug>(
219 root: B::Hash,
220 storage_proof: StorageProof,
221 on_validation_batch: Vec<(Vec<u8>, Option<ChildInfo>)>,
222 rounds: u32,
223) -> std::time::Duration {
224 debug!(
225 "POV: len {:?} {:?}",
226 storage_proof.len(),
227 storage_proof.clone().encoded_compact_size::<HashingFor<B>>(root)
228 );
229 let batch_size = on_validation_batch.len();
230 let wasm_module = get_wasm_module();
231 let mut instance = wasm_module.new_instance().expect("Failed to create wasm instance");
232 let params = StorageAccessParams::<B>::new_read(root, storage_proof, on_validation_batch);
233 let dry_run_encoded = params.as_dry_run().encode();
234 let encoded = params.encode();
235
236 let mut durations_in_nanos = Vec::new();
237
238 for i in 1..=rounds {
239 info!("validate_block with {} keys, round {}/{}", batch_size, i, rounds);
240
241 let dry_run_start = Instant::now();
243 instance
244 .call_export("validate_block", &dry_run_encoded)
245 .expect("Failed to call validate_block");
246 let dry_run_elapsed = dry_run_start.elapsed();
247 debug!("validate_block dry-run time {:?}", dry_run_elapsed);
248
249 let start = Instant::now();
250 instance
251 .call_export("validate_block", &encoded)
252 .expect("Failed to call validate_block");
253 let elapsed = start.elapsed();
254 debug!("validate_block time {:?}", elapsed);
255
256 durations_in_nanos
257 .push(elapsed.saturating_sub(dry_run_elapsed).as_nanos() as u64 / batch_size as u64);
258 }
259
260 std::time::Duration::from_nanos(
261 durations_in_nanos.iter().sum::<u64>() / durations_in_nanos.len() as u64,
262 )
263}