referrerpolicy=no-referrer-when-downgrade

frame_benchmarking_cli/pallet/
writer.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18// Outputs benchmark results to Rust files that can be ingested by the runtime.
19
20use std::{
21	collections::{HashMap, HashSet},
22	fs,
23	path::PathBuf,
24};
25
26use inflector::Inflector;
27use itertools::Itertools;
28use serde::Serialize;
29
30use crate::{
31	pallet::{
32		command::{PovEstimationMode, PovModesMap},
33		types::{ComponentRange, ComponentRangeMap},
34	},
35	shared::UnderscoreHelper,
36	PalletCmd,
37};
38use frame_benchmarking::{
39	Analysis, AnalysisChoice, BenchmarkBatchSplitResults, BenchmarkResult, BenchmarkSelector,
40};
41use frame_support::traits::StorageInfo;
42use sp_core::hexdisplay::HexDisplay;
43use sp_runtime::traits::Zero;
44
45const VERSION: &str = env!("CARGO_PKG_VERSION");
46const TEMPLATE: &str = include_str!("./template.hbs");
47
48// This is the final structure we will pass to the Handlebars template.
49#[derive(Serialize, Default, Debug, Clone)]
50struct TemplateData {
51	args: Vec<String>,
52	date: String,
53	hostname: String,
54	cpuname: String,
55	version: String,
56	pallet: String,
57	instance: String,
58	header: String,
59	cmd: CmdData,
60	benchmarks: Vec<BenchmarkData>,
61}
62
63// This was the final data we have about each benchmark.
64#[derive(Serialize, Default, Debug, Clone, PartialEq)]
65struct BenchmarkData {
66	name: String,
67	components: Vec<Component>,
68	#[serde(serialize_with = "string_serialize")]
69	base_weight: u128,
70	#[serde(serialize_with = "string_serialize")]
71	base_reads: u128,
72	#[serde(serialize_with = "string_serialize")]
73	base_writes: u128,
74	#[serde(serialize_with = "string_serialize")]
75	base_calculated_proof_size: u128,
76	#[serde(serialize_with = "string_serialize")]
77	base_recorded_proof_size: u128,
78	component_weight: Vec<ComponentSlope>,
79	component_reads: Vec<ComponentSlope>,
80	component_writes: Vec<ComponentSlope>,
81	component_calculated_proof_size: Vec<ComponentSlope>,
82	component_recorded_proof_size: Vec<ComponentSlope>,
83	component_ranges: Vec<ComponentRange>,
84	comments: Vec<String>,
85	#[serde(serialize_with = "string_serialize")]
86	min_execution_time: u128,
87}
88
89// This forwards some specific metadata from the `PalletCmd`
90#[derive(Serialize, Default, Debug, Clone)]
91struct CmdData {
92	steps: u32,
93	repeat: u32,
94	lowest_range_values: Vec<u32>,
95	highest_range_values: Vec<u32>,
96	wasm_execution: String,
97	chain: String,
98	db_cache: u32,
99	analysis_choice: String,
100	worst_case_map_values: u32,
101	additional_trie_layers: u8,
102}
103
104// This encodes the component name and whether that component is used.
105#[derive(Serialize, Debug, Clone, Eq, PartialEq)]
106struct Component {
107	name: String,
108	is_used: bool,
109}
110
111// This encodes the slope of some benchmark related to a component.
112#[derive(Serialize, Debug, Clone, Eq, PartialEq)]
113struct ComponentSlope {
114	name: String,
115	#[serde(serialize_with = "string_serialize")]
116	slope: u128,
117	#[serde(serialize_with = "string_serialize")]
118	error: u128,
119}
120
121// Small helper to create an `io::Error` from a string.
122fn io_error(s: &str) -> std::io::Error {
123	use std::io::{Error, ErrorKind};
124	Error::new(ErrorKind::Other, s)
125}
126
127// This function takes a list of `BenchmarkBatch` and organizes them by pallet into a `HashMap`.
128// So this: `[(p1, b1), (p1, b2), (p2, b1), (p1, b3), (p2, b2)]`
129// Becomes:
130//
131// ```
132// p1 -> [b1, b2, b3]
133// p2 -> [b1, b2]
134// ```
135fn map_results(
136	batches: &[BenchmarkBatchSplitResults],
137	storage_info: &[StorageInfo],
138	component_ranges: &ComponentRangeMap,
139	pov_modes: PovModesMap,
140	default_pov_mode: PovEstimationMode,
141	analysis_choice: &AnalysisChoice,
142	pov_analysis_choice: &AnalysisChoice,
143	worst_case_map_values: u32,
144	additional_trie_layers: u8,
145) -> Result<HashMap<(String, String), Vec<BenchmarkData>>, std::io::Error> {
146	// Skip if batches is empty.
147	if batches.is_empty() {
148		return Err(io_error("empty batches"))
149	}
150
151	let mut all_benchmarks = HashMap::<_, Vec<BenchmarkData>>::new();
152
153	for batch in batches {
154		// Skip if there are no results
155		if batch.time_results.is_empty() {
156			continue
157		}
158
159		let pallet_name = String::from_utf8(batch.pallet.clone()).unwrap();
160		let instance_name = String::from_utf8(batch.instance.clone()).unwrap();
161		let benchmark_data = get_benchmark_data(
162			batch,
163			storage_info,
164			&component_ranges,
165			pov_modes.clone(),
166			default_pov_mode,
167			analysis_choice,
168			pov_analysis_choice,
169			worst_case_map_values,
170			additional_trie_layers,
171		);
172		let pallet_benchmarks = all_benchmarks.entry((pallet_name, instance_name)).or_default();
173		pallet_benchmarks.push(benchmark_data);
174	}
175	Ok(all_benchmarks)
176}
177
178// Get an iterator of errors.
179fn extract_errors(errors: &Option<Vec<u128>>) -> impl Iterator<Item = u128> + '_ {
180	errors
181		.as_ref()
182		.map(|e| e.as_slice())
183		.unwrap_or(&[])
184		.iter()
185		.copied()
186		.chain(std::iter::repeat(0))
187}
188
189// Analyze and return the relevant results for a given benchmark.
190fn get_benchmark_data(
191	batch: &BenchmarkBatchSplitResults,
192	storage_info: &[StorageInfo],
193	// Per extrinsic component ranges.
194	component_ranges: &ComponentRangeMap,
195	pov_modes: PovModesMap,
196	default_pov_mode: PovEstimationMode,
197	analysis_choice: &AnalysisChoice,
198	pov_analysis_choice: &AnalysisChoice,
199	worst_case_map_values: u32,
200	additional_trie_layers: u8,
201) -> BenchmarkData {
202	// Analyze benchmarks to get the linear regression.
203	let analysis_function = match analysis_choice {
204		AnalysisChoice::MinSquares => Analysis::min_squares_iqr,
205		AnalysisChoice::MedianSlopes => Analysis::median_slopes,
206		AnalysisChoice::Max => Analysis::max,
207	};
208	let pov_analysis_function = match pov_analysis_choice {
209		AnalysisChoice::MinSquares => Analysis::min_squares_iqr,
210		AnalysisChoice::MedianSlopes => Analysis::median_slopes,
211		AnalysisChoice::Max => Analysis::max,
212	};
213	let pallet = String::from_utf8(batch.pallet.clone()).unwrap();
214	let benchmark = String::from_utf8(batch.benchmark.clone()).unwrap();
215
216	let extrinsic_time = analysis_function(&batch.time_results, BenchmarkSelector::ExtrinsicTime)
217		.expect("analysis function should return an extrinsic time for valid inputs");
218	let reads = analysis_function(&batch.db_results, BenchmarkSelector::Reads)
219		.expect("analysis function should return the number of reads for valid inputs");
220	let writes = analysis_function(&batch.db_results, BenchmarkSelector::Writes)
221		.expect("analysis function should return the number of writes for valid inputs");
222	let recorded_proof_size =
223		pov_analysis_function(&batch.db_results, BenchmarkSelector::ProofSize)
224			.expect("analysis function should return proof sizes for valid inputs");
225
226	// Analysis data may include components that are not used, this filters out anything whose value
227	// is zero.
228	let mut used_components = Vec::new();
229	let mut used_extrinsic_time = Vec::new();
230	let mut used_reads = Vec::new();
231	let mut used_writes = Vec::new();
232	let mut used_calculated_proof_size = Vec::<ComponentSlope>::new();
233	let mut used_recorded_proof_size = Vec::<ComponentSlope>::new();
234
235	extrinsic_time
236		.slopes
237		.into_iter()
238		.zip(extrinsic_time.names.iter())
239		.zip(extract_errors(&extrinsic_time.errors))
240		.for_each(|((slope, name), error)| {
241			if !slope.is_zero() {
242				if !used_components.contains(&name) {
243					used_components.push(name);
244				}
245				used_extrinsic_time.push(ComponentSlope { name: name.clone(), slope, error });
246			}
247		});
248	reads
249		.slopes
250		.into_iter()
251		.zip(reads.names.iter())
252		.zip(extract_errors(&reads.errors))
253		.for_each(|((slope, name), error)| {
254			if !slope.is_zero() {
255				if !used_components.contains(&name) {
256					used_components.push(name);
257				}
258				used_reads.push(ComponentSlope { name: name.clone(), slope, error });
259			}
260		});
261	writes
262		.slopes
263		.into_iter()
264		.zip(writes.names.iter())
265		.zip(extract_errors(&writes.errors))
266		.for_each(|((slope, name), error)| {
267			if !slope.is_zero() {
268				if !used_components.contains(&name) {
269					used_components.push(name);
270				}
271				used_writes.push(ComponentSlope { name: name.clone(), slope, error });
272			}
273		});
274	recorded_proof_size
275		.slopes
276		.into_iter()
277		.zip(recorded_proof_size.names.iter())
278		.zip(extract_errors(&recorded_proof_size.errors))
279		.for_each(|((slope, name), error)| {
280			if !slope.is_zero() {
281				// These are only for comments, so don't touch the `used_components`.
282				used_recorded_proof_size.push(ComponentSlope { name: name.clone(), slope, error });
283			}
284		});
285	used_recorded_proof_size.sort_by(|a, b| a.name.cmp(&b.name));
286
287	// We add additional comments showing which storage items were touched.
288	// We find the worst case proof size, and use that as the final proof size result.
289	let mut storage_per_prefix = HashMap::<Vec<u8>, Vec<BenchmarkResult>>::new();
290	let pov_mode = pov_modes.get(&(pallet.clone(), benchmark.clone())).cloned().unwrap_or_default();
291	let comments = process_storage_results(
292		&mut storage_per_prefix,
293		&batch.db_results,
294		storage_info,
295		&pov_mode,
296		default_pov_mode,
297		worst_case_map_values,
298		additional_trie_layers,
299	);
300
301	let proof_size_per_components = storage_per_prefix
302		.iter()
303		.map(|(prefix, results)| {
304			let proof_size = analysis_function(results, BenchmarkSelector::ProofSize)
305				.expect("analysis function should return proof sizes for valid inputs");
306			let slope = proof_size
307				.slopes
308				.into_iter()
309				.zip(proof_size.names.iter())
310				.zip(extract_errors(&proof_size.errors))
311				.map(|((slope, name), error)| ComponentSlope { name: name.clone(), slope, error })
312				.collect::<Vec<_>>();
313			(prefix.clone(), slope, proof_size.base)
314		})
315		.collect::<Vec<_>>();
316
317	let mut base_calculated_proof_size = 0;
318	// Sum up the proof sizes per component
319	for (_, slope, base) in proof_size_per_components.iter() {
320		base_calculated_proof_size = base_calculated_proof_size.max(*base);
321		for component in slope.iter() {
322			let mut found = false;
323			for used_component in used_calculated_proof_size.iter_mut() {
324				if used_component.name == component.name {
325					used_component.slope = used_component.slope.max(component.slope);
326					found = true;
327					break
328				}
329			}
330			if !found && !component.slope.is_zero() {
331				if !used_components.contains(&&component.name) {
332					used_components.push(&component.name);
333				}
334				used_calculated_proof_size.push(ComponentSlope {
335					name: component.name.clone(),
336					slope: component.slope,
337					error: component.error,
338				});
339			}
340		}
341	}
342	used_calculated_proof_size.sort_by(|a, b| a.name.cmp(&b.name));
343
344	// This puts a marker on any component which is entirely unused in the weight formula.
345	let components = batch.time_results[0]
346		.components
347		.iter()
348		.map(|(name, _)| -> Component {
349			let name_string = name.to_string();
350			let is_used = used_components.contains(&&name_string);
351			Component { name: name_string, is_used }
352		})
353		.collect::<Vec<_>>();
354
355	let component_ranges = component_ranges
356		.get(&(pallet.clone(), benchmark.clone()))
357		.map(|c| c.clone())
358		.unwrap_or_default();
359
360	BenchmarkData {
361		name: benchmark,
362		components,
363		base_weight: extrinsic_time.base,
364		base_reads: reads.base,
365		base_writes: writes.base,
366		base_calculated_proof_size,
367		base_recorded_proof_size: recorded_proof_size.base,
368		component_weight: used_extrinsic_time,
369		component_reads: used_reads,
370		component_writes: used_writes,
371		component_calculated_proof_size: used_calculated_proof_size,
372		component_recorded_proof_size: used_recorded_proof_size,
373		component_ranges,
374		comments,
375		min_execution_time: extrinsic_time.minimum,
376	}
377}
378
379/// Create weight file from benchmark data and Handlebars template.
380pub(crate) fn write_results(
381	batches: &[BenchmarkBatchSplitResults],
382	storage_info: &[StorageInfo],
383	component_ranges: &HashMap<(String, String), Vec<ComponentRange>>,
384	pov_modes: PovModesMap,
385	default_pov_mode: PovEstimationMode,
386	path: &PathBuf,
387	cmd: &PalletCmd,
388) -> Result<(), sc_cli::Error> {
389	// Use custom template if provided.
390	let template: String = match &cmd.template {
391		Some(template_file) => fs::read_to_string(template_file)?,
392		None => TEMPLATE.to_string(),
393	};
394
395	// Use header if provided
396	let header_text = match &cmd.header {
397		Some(header_file) => {
398			let text = fs::read_to_string(header_file)?;
399			text
400		},
401		None => String::new(),
402	};
403
404	// Date string metadata
405	let date = chrono::Utc::now().format("%Y-%m-%d").to_string();
406
407	// Full CLI args passed to trigger the benchmark.
408	let args = std::env::args().collect::<Vec<String>>();
409
410	// Which analysis function should be used when outputting benchmarks
411	let analysis_choice: AnalysisChoice =
412		cmd.output_analysis.clone().try_into().map_err(io_error)?;
413	let pov_analysis_choice: AnalysisChoice =
414		cmd.output_pov_analysis.clone().try_into().map_err(io_error)?;
415
416	if cmd.additional_trie_layers > 4 {
417		println!(
418			"WARNING: `additional_trie_layers` is unexpectedly large. It assumes {} storage items.",
419			16f64.powi(cmd.additional_trie_layers as i32)
420		)
421	}
422
423	// Capture individual args
424	let cmd_data = CmdData {
425		steps: cmd.steps,
426		repeat: cmd.repeat,
427		lowest_range_values: cmd.lowest_range_values.clone(),
428		highest_range_values: cmd.highest_range_values.clone(),
429		wasm_execution: cmd.wasm_method.to_string(),
430		chain: format!("{:?}", cmd.shared_params.chain),
431		db_cache: cmd.database_cache_size,
432		analysis_choice: format!("{:?}", analysis_choice),
433		worst_case_map_values: cmd.worst_case_map_values,
434		additional_trie_layers: cmd.additional_trie_layers,
435	};
436
437	// New Handlebars instance with helpers.
438	let mut handlebars = handlebars::Handlebars::new();
439	handlebars.register_helper("underscore", Box::new(UnderscoreHelper));
440	handlebars.register_helper("join", Box::new(JoinHelper));
441	// Don't HTML escape any characters.
442	handlebars.register_escape_fn(|s| -> String { s.to_string() });
443
444	// Organize results by pallet into a JSON map
445	let all_results = map_results(
446		batches,
447		storage_info,
448		component_ranges,
449		pov_modes,
450		default_pov_mode,
451		&analysis_choice,
452		&pov_analysis_choice,
453		cmd.worst_case_map_values,
454		cmd.additional_trie_layers,
455	)?;
456	let mut created_files = Vec::new();
457
458	for ((pallet, instance), results) in all_results.iter() {
459		let mut file_path = path.clone();
460		// If a user only specified a directory...
461		if file_path.is_dir() {
462			// Start with "path/to/pallet_name".
463			let mut file_name = pallet.clone();
464			// Check if there might be multiple instances benchmarked.
465			if all_results.keys().any(|(p, i)| p == pallet && i != instance) {
466				// Append "_instance_name".
467				file_name = format!("{}_{}", file_name, instance.to_snake_case());
468			}
469			// "mod::pallet_name.rs" becomes "mod_pallet_name.rs".
470			file_name = file_name.replace("::", "_");
471			// Some old runtimes have a bug with the pallet and instance name containing a space
472			file_name = file_name.replace(" ", "");
473			file_path.push(file_name);
474			file_path.set_extension("rs");
475		}
476
477		let hbs_data = TemplateData {
478			args: args.clone(),
479			date: date.clone(),
480			hostname: cmd.hostinfo_params.hostname(),
481			cpuname: cmd.hostinfo_params.cpuname(),
482			version: VERSION.to_string(),
483			pallet: pallet.to_string(),
484			instance: instance.to_string(),
485			header: header_text.clone(),
486			cmd: cmd_data.clone(),
487			benchmarks: results.clone(),
488		};
489
490		let mut output_file = fs::File::create(&file_path).map_err(|e| {
491			format!("Could not write weight file to: {:?}. Error: {:?}", &file_path, e)
492		})?;
493		handlebars
494			.render_template_to_write(&template, &hbs_data, &mut output_file)
495			.map_err(|e| io_error(&e.to_string()))?;
496		println!("Created file: {:?}", &file_path);
497		created_files.push(file_path);
498	}
499
500	let overwritten_files = created_files.iter().duplicates().collect::<Vec<_>>();
501	if !overwritten_files.is_empty() {
502		let msg = format!(
503			"Multiple results were written to the same file. This can happen when \
504		there are multiple instances of a pallet deployed and `--output` forces the output of all \
505		instances into the same file. Use `--unsafe-overwrite-results` to ignore this error. The \
506		affected files are: {:?}",
507			overwritten_files
508		);
509
510		if cmd.unsafe_overwrite_results {
511			println!("{msg}");
512		} else {
513			return Err(msg.into())
514		}
515	}
516	Ok(())
517}
518
519/// This function looks at the keys touched during the benchmark, and the storage info we collected
520/// from the pallets, and creates comments with information about the storage keys touched during
521/// each benchmark.
522///
523/// It returns informational comments for human consumption.
524pub(crate) fn process_storage_results(
525	storage_per_prefix: &mut HashMap<Vec<u8>, Vec<BenchmarkResult>>,
526	results: &[BenchmarkResult],
527	storage_info: &[StorageInfo],
528	pov_modes: &HashMap<(String, String), PovEstimationMode>,
529	default_pov_mode: PovEstimationMode,
530	worst_case_map_values: u32,
531	additional_trie_layers: u8,
532) -> Vec<String> {
533	let mut comments = Vec::new();
534	let mut storage_info_map = storage_info
535		.iter()
536		.map(|info| (info.prefix.clone(), info))
537		.collect::<HashMap<_, _>>();
538
539	// Special hack to show `Skipped Metadata`
540	let skip_storage_info = StorageInfo {
541		pallet_name: b"Skipped".to_vec(),
542		storage_name: b"Metadata".to_vec(),
543		prefix: b"Skipped Metadata".to_vec(),
544		max_values: None,
545		max_size: None,
546	};
547	storage_info_map.insert(skip_storage_info.prefix.clone(), &skip_storage_info);
548
549	// Special hack to show `Benchmark Override`
550	let benchmark_override = StorageInfo {
551		pallet_name: b"Benchmark".to_vec(),
552		storage_name: b"Override".to_vec(),
553		prefix: b"Benchmark Override".to_vec(),
554		max_values: None,
555		max_size: None,
556	};
557	storage_info_map.insert(benchmark_override.prefix.clone(), &benchmark_override);
558
559	// This tracks the keys we already identified, so we only generate a single comment.
560	let mut identified_prefix = HashSet::<Vec<u8>>::new();
561	let mut identified_key = HashSet::<Vec<u8>>::new();
562
563	// TODO Emit a warning for unused `pov_mode` attributes.
564
565	// We have to iterate in reverse order to catch the largest values for read/write since the
566	// components start low and then increase and only the first value is used.
567	for result in results.iter().rev() {
568		for (key, reads, writes, whitelisted) in &result.keys {
569			// skip keys which are whitelisted
570			if *whitelisted {
571				continue
572			}
573
574			let prefix_length = key.len().min(32);
575			let prefix = key[0..prefix_length].to_vec();
576			let is_key_identified = identified_key.contains(key);
577			let is_prefix_identified = identified_prefix.contains(&prefix);
578
579			let mut prefix_result = result.clone();
580			let key_info = storage_info_map.get(&prefix);
581			let pallet_name = match key_info {
582				Some(k) => String::from_utf8(k.pallet_name.clone()).expect("encoded from string"),
583				None => "".to_string(),
584			};
585			let storage_name = match key_info {
586				Some(k) => String::from_utf8(k.storage_name.clone()).expect("encoded from string"),
587				None => "".to_string(),
588			};
589			let max_size = key_info.and_then(|k| k.max_size);
590
591			let override_pov_mode = match key_info {
592				Some(_) => {
593					// Is there an override for the storage key?
594					pov_modes.get(&(pallet_name.clone(), storage_name.clone())).or(
595						// .. or for the storage prefix?
596						pov_modes.get(&(pallet_name.clone(), "ALL".to_string())).or(
597							// .. or for the benchmark?
598							pov_modes.get(&("ALL".to_string(), "ALL".to_string())),
599						),
600					)
601				},
602				None => None,
603			};
604			let is_all_ignored = pov_modes.get(&("ALL".to_string(), "ALL".to_string())) ==
605				Some(&PovEstimationMode::Ignored);
606			if is_all_ignored && override_pov_mode != Some(&PovEstimationMode::Ignored) {
607				panic!("The syntax currently does not allow to exclude single keys from a top-level `Ignored` pov-mode.");
608			}
609
610			let pov_overhead = single_read_pov_overhead(
611				key_info.and_then(|i| i.max_values),
612				worst_case_map_values,
613			);
614
615			let used_pov_mode = match (override_pov_mode, max_size, default_pov_mode) {
616				// All is ignored by default and no override:
617				(None, _, PovEstimationMode::Ignored)  => {
618					prefix_result.proof_size = 0;
619					PovEstimationMode::Ignored
620				},
621				// Some is ignored by override, maybe all:
622				(Some(PovEstimationMode::Ignored), _, _) => {
623					// If this is applied to All keys, then we also remove the base weight and just set all to zero.
624					if is_all_ignored {
625						prefix_result.proof_size = 0;
626					} else {
627						// Otherwise we just don't *increase* `proof_size` for this key.
628					}
629					PovEstimationMode::Ignored
630				},
631				(Some(PovEstimationMode::Measured), _, _)|
632				(None, _, PovEstimationMode::Measured) |
633				// Use best effort in this case since failing would be really annoying.
634				(None, None, PovEstimationMode::MaxEncodedLen) => {
635					// We add the overhead for a single read each time. In a more advanced version
636					// we could take node re-using into account and over-estimate a bit less.
637					prefix_result.proof_size += pov_overhead * *reads;
638					PovEstimationMode::Measured
639				},
640				(Some(PovEstimationMode::MaxEncodedLen), Some(max_size), _) |
641				(None, Some(max_size), PovEstimationMode::MaxEncodedLen) => {
642					prefix_result.proof_size = (pov_overhead + max_size) * *reads;
643					PovEstimationMode::MaxEncodedLen
644				},
645				(Some(PovEstimationMode::MaxEncodedLen), None, _) => {
646					panic!("Key does not have MEL bound but MEL PoV estimation mode was specified {:?}", &key);
647				},
648			};
649			// Add the additional trie layer overhead for every new prefix.
650			if *reads > 0 && !is_all_ignored {
651				prefix_result.proof_size += 15 * 33 * additional_trie_layers as u32;
652			}
653			storage_per_prefix.entry(prefix.clone()).or_default().push(prefix_result);
654
655			match (is_key_identified, is_prefix_identified) {
656				// We already did everything, move on...
657				(true, true) => continue,
658				(false, true) => {
659					// track newly identified key
660					identified_key.insert(key.clone());
661				},
662				(false, false) => {
663					// track newly identified key and prefix
664					identified_key.insert(key.clone());
665					identified_prefix.insert(prefix.clone());
666				},
667				// Not possible. If the key is known, the prefix is too.
668				(true, false) => unreachable!(),
669			}
670
671			// For any new prefix, we should write some comment about the number of reads and
672			// writes.
673			if !is_prefix_identified {
674				match key_info {
675					Some(_) => {
676						let comment = format!(
677							"Storage: `{}::{}` (r:{} w:{})",
678							pallet_name, storage_name, reads, writes,
679						);
680						comments.push(comment)
681					},
682					None => {
683						let comment = format!(
684							"Storage: UNKNOWN KEY `0x{}` (r:{} w:{})",
685							HexDisplay::from(key),
686							reads,
687							writes,
688						);
689						comments.push(comment)
690					},
691				}
692			}
693
694			// For any new key, we should add the PoV impact.
695			if !is_key_identified {
696				match key_info {
697					Some(key_info) => {
698						match worst_case_pov(
699							key_info.max_values,
700							key_info.max_size,
701							!is_prefix_identified,
702							worst_case_map_values,
703						) {
704							Some(new_pov) => {
705								let comment = format!(
706									"Proof: `{pallet_name}::{storage_name}` (`max_values`: {:?}, `max_size`: {:?}, added: {}, mode: `{:?}`)",
707									key_info.max_values,
708									key_info.max_size,
709									new_pov,
710									used_pov_mode,
711								);
712								comments.push(comment)
713							},
714							None => {
715								let comment = format!(
716									"Proof: `{}::{}` (`max_values`: {:?}, `max_size`: {:?}, mode: `{:?}`)",
717									pallet_name, storage_name, key_info.max_values, key_info.max_size,
718									used_pov_mode,
719								);
720								comments.push(comment);
721							},
722						}
723					},
724					None => {
725						let comment = format!(
726							"Proof: UNKNOWN KEY `0x{}` (r:{} w:{})",
727							HexDisplay::from(key),
728							reads,
729							writes,
730						);
731						comments.push(comment)
732					},
733				}
734			}
735		}
736	}
737
738	comments
739}
740
741/// The PoV overhead when reading a key the first time out of a map with `max_values` entries.
742fn single_read_pov_overhead(max_values: Option<u32>, worst_case_map_values: u32) -> u32 {
743	let max_values = max_values.unwrap_or(worst_case_map_values);
744	let depth: u32 = easy_log_16(max_values);
745	// Normally we have 16 entries of 32 byte hashes per tree layer. In the new trie
746	// layout the hashes are prefixed by their compact length, hence 33 instead. The proof
747	// compaction can compress one node per layer since we send the value itself,
748	// therefore we end up with a size of `15 * 33` per layer.
749	depth * 15 * 33
750}
751
752/// Given the max values and max size of some storage item, calculate the worst
753/// case PoV.
754///
755/// # Arguments
756/// * `max_values`: The maximum number of values in the storage item. `None` for  unbounded items.
757/// * `max_size`: The maximum size of the value in the storage. `None` for unbounded items.
758fn worst_case_pov(
759	max_values: Option<u32>,
760	max_size: Option<u32>,
761	is_new_prefix: bool,
762	worst_case_map_values: u32,
763) -> Option<u32> {
764	if let Some(max_size) = max_size {
765		let trie_size: u32 = if is_new_prefix {
766			single_read_pov_overhead(max_values, worst_case_map_values)
767		} else {
768			0
769		};
770
771		Some(trie_size + max_size)
772	} else {
773		None
774	}
775}
776
777/// A simple match statement which outputs the log 16 of some value.
778fn easy_log_16(i: u32) -> u32 {
779	#[allow(clippy::redundant_guards)]
780	match i {
781		i if i == 0 => 0,
782		i if i <= 16 => 1,
783		i if i <= 256 => 2,
784		i if i <= 4_096 => 3,
785		i if i <= 65_536 => 4,
786		i if i <= 1_048_576 => 5,
787		i if i <= 16_777_216 => 6,
788		i if i <= 268_435_456 => 7,
789		_ => 8,
790	}
791}
792
793// A helper to join a string of vectors.
794#[derive(Clone, Copy)]
795struct JoinHelper;
796impl handlebars::HelperDef for JoinHelper {
797	fn call<'reg: 'rc, 'rc>(
798		&self,
799		h: &handlebars::Helper,
800		_: &handlebars::Handlebars,
801		_: &handlebars::Context,
802		_rc: &mut handlebars::RenderContext,
803		out: &mut dyn handlebars::Output,
804	) -> handlebars::HelperResult {
805		use handlebars::JsonRender;
806		let param = h.param(0).unwrap();
807		let value = param.value();
808		let joined = if value.is_array() {
809			value
810				.as_array()
811				.unwrap()
812				.iter()
813				.map(|v| v.render())
814				.collect::<Vec<String>>()
815				.join(" ")
816		} else {
817			value.render()
818		};
819		out.write(&joined)?;
820		Ok(())
821	}
822}
823
824// u128 does not serialize well into JSON for `handlebars`, so we represent it as a string.
825fn string_serialize<S>(x: &u128, s: S) -> Result<S::Ok, S::Error>
826where
827	S: serde::Serializer,
828{
829	s.serialize_str(&x.to_string())
830}
831
832#[cfg(test)]
833mod test {
834	use super::*;
835	use frame_benchmarking::{BenchmarkBatchSplitResults, BenchmarkParameter, BenchmarkResult};
836
837	fn test_data(
838		pallet: &[u8],
839		benchmark: &[u8],
840		param: BenchmarkParameter,
841		base: u32,
842		slope: u32,
843	) -> BenchmarkBatchSplitResults {
844		let mut results = Vec::new();
845		for i in 0..5 {
846			results.push(BenchmarkResult {
847				components: vec![(param, i)],
848				extrinsic_time: (base + slope * i).into(),
849				storage_root_time: (base + slope * i).into(),
850				reads: (base + slope * i).into(),
851				repeat_reads: 0,
852				writes: (base + slope * i).into(),
853				repeat_writes: 0,
854				proof_size: (i + 1) * 1024,
855				// All R/W come from this key:
856				keys: vec![(b"bounded".to_vec(), (base + slope * i), (base + slope * i), false)],
857			})
858		}
859
860		return BenchmarkBatchSplitResults {
861			pallet: [pallet.to_vec(), b"_pallet".to_vec()].concat(),
862			instance: b"instance".to_vec(),
863			benchmark: [benchmark.to_vec(), b"_benchmark".to_vec()].concat(),
864			time_results: results.clone(),
865			db_results: results,
866		}
867	}
868
869	fn test_storage_info() -> Vec<StorageInfo> {
870		vec![StorageInfo {
871			pallet_name: b"bounded".to_vec(),
872			storage_name: b"bounded".to_vec(),
873			prefix: b"bounded".to_vec(),
874			max_values: Some(1 << 20),
875			max_size: Some(32),
876		}]
877	}
878
879	fn test_pov_mode() -> PovModesMap {
880		let mut map = PovModesMap::new();
881		map.entry(("scheduler".into(), "first_benchmark".into()))
882			.or_default()
883			.insert(("scheduler".into(), "mel".into()), PovEstimationMode::MaxEncodedLen);
884		map.entry(("scheduler".into(), "first_benchmark".into()))
885			.or_default()
886			.insert(("scheduler".into(), "measured".into()), PovEstimationMode::Measured);
887		map
888	}
889
890	fn check_data(benchmark: &BenchmarkData, component: &str, base: u128, slope: u128) {
891		assert_eq!(
892			benchmark.components,
893			vec![Component { name: component.to_string(), is_used: true },],
894		);
895		// Weights multiplied by 1,000
896		assert_eq!(benchmark.base_weight, base * 1_000);
897		assert_eq!(
898			benchmark.component_weight,
899			vec![ComponentSlope { name: component.to_string(), slope: slope * 1_000, error: 0 }]
900		);
901		// DB Reads/Writes are untouched
902		assert_eq!(benchmark.base_reads, base);
903		assert_eq!(
904			benchmark.component_reads,
905			vec![ComponentSlope { name: component.to_string(), slope, error: 0 }]
906		);
907		assert_eq!(benchmark.base_writes, base);
908		assert_eq!(
909			benchmark.component_writes,
910			vec![ComponentSlope { name: component.to_string(), slope, error: 0 }]
911		);
912		// Measure PoV is correct
913		assert_eq!(benchmark.base_recorded_proof_size, 1024);
914		assert_eq!(
915			benchmark.component_recorded_proof_size,
916			vec![ComponentSlope { name: component.to_string(), slope: 1024, error: 0 }]
917		);
918	}
919
920	/// We measure a linear proof size but select `pov_mode = MEL` with a present MEL bound for the
921	/// type. This should result in the measured PoV being ignored and the MEL used instead.
922	#[test]
923	fn pov_mode_mel_constant_works() {
924		let mut results = Vec::new();
925		for i in 0..5 {
926			results.push(BenchmarkResult {
927				components: vec![(BenchmarkParameter::s, i)],
928				extrinsic_time: 0,
929				storage_root_time: 0,
930				reads: 1,
931				repeat_reads: 777,
932				writes: 888,
933				repeat_writes: 999,
934				proof_size: i * 1024,
935				keys: vec![(b"mel".to_vec(), 1, 1, false)],
936			})
937		}
938
939		let data = BenchmarkBatchSplitResults {
940			pallet: b"scheduler".to_vec(),
941			instance: b"instance".to_vec(),
942			benchmark: b"first_benchmark".to_vec(),
943			time_results: results.clone(),
944			db_results: results,
945		};
946
947		let storage_info = vec![StorageInfo {
948			pallet_name: b"scheduler".to_vec(),
949			storage_name: b"mel".to_vec(),
950			prefix: b"mel".to_vec(),
951			max_values: None,
952			max_size: Some(1 << 22), // MEL of 4 MiB
953		}];
954
955		let mapped_results = map_results(
956			&[data],
957			&storage_info,
958			&Default::default(),
959			test_pov_mode(),
960			PovEstimationMode::MaxEncodedLen,
961			&AnalysisChoice::default(),
962			&AnalysisChoice::MedianSlopes,
963			1_000_000,
964			0,
965		)
966		.unwrap();
967		let result =
968			mapped_results.get(&("scheduler".to_string(), "instance".to_string())).unwrap()[0]
969				.clone();
970
971		let base = result.base_calculated_proof_size;
972		assert!(result.component_calculated_proof_size.is_empty(), "There is no slope");
973		// It's a map with 5 layers overhead:
974		assert_eq!(base, (1 << 22) + 15 * 33 * 5);
975	}
976
977	/// Record a small linear proof size but since MEL is selected and available it should be used
978	/// instead.
979	#[test]
980	fn pov_mode_mel_linear_works() {
981		let mut results = Vec::new();
982		for i in 0..5 {
983			results.push(BenchmarkResult {
984				components: vec![(BenchmarkParameter::s, i)],
985				extrinsic_time: 0,
986				storage_root_time: 0,
987				reads: 123,
988				repeat_reads: 777,
989				writes: 888,
990				repeat_writes: 999,
991				proof_size: i * 1024,
992				keys: vec![("mel".as_bytes().to_vec(), i, 1, false)],
993			})
994		}
995
996		let data = BenchmarkBatchSplitResults {
997			pallet: b"scheduler".to_vec(),
998			instance: b"instance".to_vec(),
999			benchmark: b"first_benchmark".to_vec(),
1000			time_results: results.clone(),
1001			db_results: results,
1002		};
1003
1004		let storage_info = vec![StorageInfo {
1005			pallet_name: b"scheduler".to_vec(),
1006			storage_name: b"mel".to_vec(),
1007			prefix: b"mel".to_vec(),
1008			max_values: None,
1009			max_size: Some(1 << 22), // MEL of 4 MiB
1010		}];
1011
1012		let mapped_results = map_results(
1013			&[data],
1014			&storage_info,
1015			&Default::default(),
1016			test_pov_mode(),
1017			PovEstimationMode::MaxEncodedLen,
1018			&AnalysisChoice::default(),
1019			&AnalysisChoice::MedianSlopes,
1020			1_000_000,
1021			0,
1022		)
1023		.unwrap();
1024		let result =
1025			mapped_results.get(&("scheduler".to_string(), "instance".to_string())).unwrap()[0]
1026				.clone();
1027
1028		let base = result.base_calculated_proof_size;
1029		assert_eq!(result.component_calculated_proof_size.len(), 1, "There is a slope");
1030		let slope = result.component_calculated_proof_size[0].clone().slope;
1031		assert_eq!(base, 0);
1032		// It's a map with 5 layers overhead:
1033		assert_eq!(slope, (1 << 22) + 15 * 33 * 5);
1034	}
1035
1036	#[test]
1037	fn pov_mode_measured_const_works() {
1038		let mut results = Vec::new();
1039		for i in 0..5 {
1040			results.push(BenchmarkResult {
1041				components: vec![(BenchmarkParameter::s, i)],
1042				extrinsic_time: 0,
1043				storage_root_time: 0,
1044				reads: 123,
1045				repeat_reads: 777,
1046				writes: 888,
1047				repeat_writes: 999,
1048				proof_size: 1024,
1049				keys: vec![("measured".as_bytes().to_vec(), 1, 1, false)],
1050			})
1051		}
1052
1053		let data = BenchmarkBatchSplitResults {
1054			pallet: b"scheduler".to_vec(),
1055			instance: b"instance".to_vec(),
1056			benchmark: b"first_benchmark".to_vec(),
1057			time_results: results.clone(),
1058			db_results: results,
1059		};
1060
1061		let storage_info = vec![StorageInfo {
1062			pallet_name: b"scheduler".to_vec(),
1063			storage_name: b"measured".to_vec(),
1064			prefix: b"measured".to_vec(),
1065			max_values: None,
1066			max_size: Some(1 << 22), // MEL of 4 MiB
1067		}];
1068
1069		let mapped_results = map_results(
1070			&[data],
1071			&storage_info,
1072			&Default::default(),
1073			test_pov_mode(),
1074			PovEstimationMode::MaxEncodedLen,
1075			&AnalysisChoice::default(),
1076			&AnalysisChoice::MedianSlopes,
1077			1_000_000,
1078			0,
1079		)
1080		.unwrap();
1081		let result =
1082			mapped_results.get(&("scheduler".to_string(), "instance".to_string())).unwrap()[0]
1083				.clone();
1084
1085		let base = result.base_calculated_proof_size;
1086		assert!(result.component_calculated_proof_size.is_empty(), "There is no slope");
1087		// 5 Trie layers overhead because of the 1M max elements in that map:
1088		assert_eq!(base, 1024 + 15 * 33 * 5);
1089	}
1090
1091	#[test]
1092	fn pov_mode_measured_linear_works() {
1093		let mut results = Vec::new();
1094		for i in 0..5 {
1095			results.push(BenchmarkResult {
1096				components: vec![(BenchmarkParameter::s, i)],
1097				extrinsic_time: 0,
1098				storage_root_time: 0,
1099				reads: 123,
1100				repeat_reads: 777,
1101				writes: 888,
1102				repeat_writes: 999,
1103				proof_size: i * 1024,
1104				keys: vec![("measured".as_bytes().to_vec(), i, 1, false)],
1105			})
1106		}
1107
1108		let data = BenchmarkBatchSplitResults {
1109			pallet: b"scheduler".to_vec(),
1110			instance: b"instance".to_vec(),
1111			benchmark: b"first_benchmark".to_vec(),
1112			time_results: results.clone(),
1113			db_results: results,
1114		};
1115
1116		let storage_info = vec![StorageInfo {
1117			pallet_name: b"scheduler".to_vec(),
1118			storage_name: b"measured".to_vec(),
1119			prefix: b"measured".to_vec(),
1120			max_values: None,
1121			max_size: Some(1 << 22), // MEL of 4 MiB
1122		}];
1123
1124		let mapped_results = map_results(
1125			&[data],
1126			&storage_info,
1127			&Default::default(),
1128			test_pov_mode(),
1129			PovEstimationMode::MaxEncodedLen,
1130			&AnalysisChoice::default(),
1131			&AnalysisChoice::MedianSlopes,
1132			1_000_000,
1133			0,
1134		)
1135		.unwrap();
1136		let result =
1137			mapped_results.get(&("scheduler".to_string(), "instance".to_string())).unwrap()[0]
1138				.clone();
1139
1140		let base = result.base_calculated_proof_size;
1141		assert_eq!(result.component_calculated_proof_size.len(), 1, "There is a slope");
1142		let slope = result.component_calculated_proof_size[0].clone().slope;
1143		assert_eq!(base, 0);
1144		// It's a map with 5 layers overhead:
1145		assert_eq!(slope, 1024 + 15 * 33 * 5);
1146	}
1147
1148	#[test]
1149	fn pov_mode_ignored_linear_works() {
1150		let mut results = Vec::new();
1151		for i in 0..5 {
1152			results.push(BenchmarkResult {
1153				components: vec![(BenchmarkParameter::s, i)],
1154				extrinsic_time: 0,
1155				storage_root_time: 0,
1156				reads: 123,
1157				repeat_reads: 777,
1158				writes: 888,
1159				repeat_writes: 999,
1160				proof_size: i * 1024,
1161				keys: vec![("ignored".as_bytes().to_vec(), i, 1, false)],
1162			})
1163		}
1164
1165		let data = BenchmarkBatchSplitResults {
1166			pallet: b"scheduler".to_vec(),
1167			instance: b"instance".to_vec(),
1168			benchmark: b"first_benchmark".to_vec(),
1169			time_results: results.clone(),
1170			db_results: results,
1171		};
1172
1173		let storage_info = vec![StorageInfo {
1174			pallet_name: b"scheduler".to_vec(),
1175			storage_name: b"ignored".to_vec(),
1176			prefix: b"ignored".to_vec(),
1177			max_values: None,
1178			max_size: Some(1 << 22), // MEL of 4 MiB
1179		}];
1180
1181		let mapped_results = map_results(
1182			&[data],
1183			&storage_info,
1184			&Default::default(),
1185			test_pov_mode(),
1186			PovEstimationMode::Ignored,
1187			&AnalysisChoice::default(),
1188			&AnalysisChoice::MedianSlopes,
1189			1_000_000,
1190			0,
1191		)
1192		.unwrap();
1193		let result =
1194			mapped_results.get(&("scheduler".to_string(), "instance".to_string())).unwrap()[0]
1195				.clone();
1196
1197		let base = result.base_calculated_proof_size;
1198		assert!(result.component_calculated_proof_size.is_empty(), "There is no slope");
1199		assert_eq!(base, 0);
1200	}
1201
1202	#[test]
1203	fn map_results_works() {
1204		let mapped_results = map_results(
1205			&[
1206				test_data(b"first", b"first", BenchmarkParameter::a, 10, 3),
1207				test_data(b"first", b"second", BenchmarkParameter::b, 9, 2),
1208				test_data(b"second", b"first", BenchmarkParameter::c, 3, 4),
1209				test_data(b"bounded", b"bounded", BenchmarkParameter::d, 4, 6),
1210			],
1211			&test_storage_info(),
1212			&Default::default(),
1213			Default::default(),
1214			PovEstimationMode::MaxEncodedLen,
1215			&AnalysisChoice::default(),
1216			&AnalysisChoice::MedianSlopes,
1217			1_000_000,
1218			0,
1219		)
1220		.unwrap();
1221
1222		let first_benchmark = &mapped_results
1223			.get(&("first_pallet".to_string(), "instance".to_string()))
1224			.unwrap()[0];
1225		assert_eq!(first_benchmark.name, "first_benchmark");
1226		check_data(first_benchmark, "a", 10, 3);
1227
1228		let second_benchmark = &mapped_results
1229			.get(&("first_pallet".to_string(), "instance".to_string()))
1230			.unwrap()[1];
1231		assert_eq!(second_benchmark.name, "second_benchmark");
1232		check_data(second_benchmark, "b", 9, 2);
1233
1234		let second_pallet_benchmark = &mapped_results
1235			.get(&("second_pallet".to_string(), "instance".to_string()))
1236			.unwrap()[0];
1237		assert_eq!(second_pallet_benchmark.name, "first_benchmark");
1238		check_data(second_pallet_benchmark, "c", 3, 4);
1239
1240		let bounded_pallet_benchmark = &mapped_results
1241			.get(&("bounded_pallet".to_string(), "instance".to_string()))
1242			.unwrap()[0];
1243		assert_eq!(bounded_pallet_benchmark.name, "bounded_benchmark");
1244		check_data(bounded_pallet_benchmark, "d", 4, 6);
1245		// (5 * 15 * 33 + 32) * 4 = 10028
1246		assert_eq!(bounded_pallet_benchmark.base_calculated_proof_size, 10028);
1247		// (5 * 15 * 33 + 32) * 6 = 15042
1248		assert_eq!(
1249			bounded_pallet_benchmark.component_calculated_proof_size,
1250			vec![ComponentSlope { name: "d".into(), slope: 15042, error: 0 }]
1251		);
1252	}
1253
1254	#[test]
1255	fn additional_trie_layers_work() {
1256		let mapped_results = map_results(
1257			&[test_data(b"first", b"first", BenchmarkParameter::a, 10, 3)],
1258			&test_storage_info(),
1259			&Default::default(),
1260			Default::default(),
1261			PovEstimationMode::MaxEncodedLen,
1262			&AnalysisChoice::default(),
1263			&AnalysisChoice::MedianSlopes,
1264			1_000_000,
1265			2,
1266		)
1267		.unwrap();
1268		let with_layer = &mapped_results
1269			.get(&("first_pallet".to_string(), "instance".to_string()))
1270			.unwrap()[0];
1271		let mapped_results = map_results(
1272			&[test_data(b"first", b"first", BenchmarkParameter::a, 10, 3)],
1273			&test_storage_info(),
1274			&Default::default(),
1275			Default::default(),
1276			PovEstimationMode::MaxEncodedLen,
1277			&AnalysisChoice::default(),
1278			&AnalysisChoice::MedianSlopes,
1279			1_000_000,
1280			0,
1281		)
1282		.unwrap();
1283		let without_layer = &mapped_results
1284			.get(&("first_pallet".to_string(), "instance".to_string()))
1285			.unwrap()[0];
1286
1287		assert_eq!(
1288			without_layer.base_calculated_proof_size + 2 * 15 * 33,
1289			with_layer.base_calculated_proof_size
1290		);
1291		// The additional trie layers ONLY affect the base weight, not the components.
1292		assert_eq!(
1293			without_layer.component_calculated_proof_size,
1294			with_layer.component_calculated_proof_size
1295		);
1296	}
1297
1298	#[test]
1299	fn template_works() {
1300		let all_results = map_results(
1301			&[
1302				test_data(b"first", b"first", BenchmarkParameter::a, 10, 3),
1303				test_data(b"first", b"second", BenchmarkParameter::b, 9, 2),
1304				test_data(b"second", b"first", BenchmarkParameter::c, 3, 4),
1305			],
1306			&test_storage_info(),
1307			&Default::default(),
1308			Default::default(),
1309			PovEstimationMode::MaxEncodedLen,
1310			&AnalysisChoice::default(),
1311			&AnalysisChoice::MedianSlopes,
1312			1_000_000,
1313			0,
1314		)
1315		.unwrap();
1316
1317		// New Handlebars instance with helpers.
1318		let mut handlebars = handlebars::Handlebars::new();
1319		handlebars.register_helper("underscore", Box::new(UnderscoreHelper));
1320		handlebars.register_helper("join", Box::new(JoinHelper));
1321		// Don't HTML escape any characters.
1322		handlebars.register_escape_fn(|s| -> String { s.to_string() });
1323
1324		for ((_pallet, _instance), results) in all_results.iter() {
1325			let hbs_data = TemplateData { benchmarks: results.clone(), ..Default::default() };
1326
1327			let output = handlebars.render_template(&TEMPLATE, &hbs_data);
1328			assert!(output.is_ok());
1329			println!("{:?}", output);
1330		}
1331	}
1332
1333	#[test]
1334	fn easy_log_16_works() {
1335		assert_eq!(easy_log_16(0), 0);
1336		assert_eq!(easy_log_16(1), 1);
1337		assert_eq!(easy_log_16(16), 1);
1338		assert_eq!(easy_log_16(17), 2);
1339		assert_eq!(easy_log_16(16u32.pow(2)), 2);
1340		assert_eq!(easy_log_16(16u32.pow(2) + 1), 3);
1341		assert_eq!(easy_log_16(16u32.pow(3)), 3);
1342		assert_eq!(easy_log_16(16u32.pow(3) + 1), 4);
1343		assert_eq!(easy_log_16(16u32.pow(4)), 4);
1344		assert_eq!(easy_log_16(16u32.pow(4) + 1), 5);
1345		assert_eq!(easy_log_16(16u32.pow(5)), 5);
1346		assert_eq!(easy_log_16(16u32.pow(5) + 1), 6);
1347		assert_eq!(easy_log_16(16u32.pow(6)), 6);
1348		assert_eq!(easy_log_16(16u32.pow(6) + 1), 7);
1349		assert_eq!(easy_log_16(16u32.pow(7)), 7);
1350		assert_eq!(easy_log_16(16u32.pow(7) + 1), 8);
1351		assert_eq!(easy_log_16(u32::MAX), 8);
1352	}
1353}