referrerpolicy=no-referrer-when-downgrade

sc_sysinfo/
sysinfo.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19use crate::{ExecutionLimit, HwBench};
20
21use sc_telemetry::SysInfo;
22use sp_core::{sr25519, Pair};
23use sp_io::crypto::sr25519_verify;
24
25use core::f64;
26use derive_more::From;
27use rand::{seq::SliceRandom, Rng, RngCore};
28use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer};
29use std::{
30	borrow::Cow,
31	fmt::{self, Display, Formatter},
32	fs::File,
33	io::{Seek, SeekFrom, Write},
34	ops::{Deref, DerefMut},
35	path::{Path, PathBuf},
36	sync::{Arc, Barrier},
37	time::{Duration, Instant},
38};
39
40/// A single hardware metric.
41#[derive(Deserialize, Serialize, Debug, Clone, Copy, PartialEq)]
42pub enum Metric {
43	/// SR25519 signature verification.
44	Sr25519Verify,
45	/// Blake2-256 hashing algorithm.
46	Blake2256,
47	/// Blake2-256 hashing algorithm executed in parallel
48	Blake2256Parallel { num_cores: usize },
49	/// Copying data in RAM.
50	MemCopy,
51	/// Disk sequential write.
52	DiskSeqWrite,
53	/// Disk random write.
54	DiskRndWrite,
55}
56
57/// Describes a checking failure for the hardware requirements.
58#[derive(Debug, Clone, Copy, PartialEq)]
59pub struct CheckFailure {
60	/// The metric that failed the check.
61	pub metric: Metric,
62	/// The expected minimum value.
63	pub expected: Throughput,
64	/// The measured value.
65	pub found: Throughput,
66}
67
68/// A list of metrics that failed to meet the minimum hardware requirements.
69#[derive(Debug, Clone, PartialEq, From)]
70pub struct CheckFailures(pub Vec<CheckFailure>);
71
72impl Display for CheckFailures {
73	fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {
74		write!(formatter, "Failed checks: ")?;
75		for failure in &self.0 {
76			write!(
77				formatter,
78				"{}(expected: {}, found: {}), ",
79				failure.metric.name(),
80				failure.expected,
81				failure.found
82			)?
83		}
84		Ok(())
85	}
86}
87
88impl Metric {
89	/// The category of the metric.
90	pub fn category(&self) -> &'static str {
91		match self {
92			Self::Sr25519Verify | Self::Blake2256 | Self::Blake2256Parallel { .. } => "CPU",
93			Self::MemCopy => "Memory",
94			Self::DiskSeqWrite | Self::DiskRndWrite => "Disk",
95		}
96	}
97
98	/// The name of the metric. It is always prefixed by the [`self.category()`].
99	pub fn name(&self) -> Cow<'static, str> {
100		match self {
101			Self::Sr25519Verify => Cow::Borrowed("SR25519-Verify"),
102			Self::Blake2256 => Cow::Borrowed("BLAKE2-256"),
103			Self::Blake2256Parallel { num_cores } =>
104				Cow::Owned(format!("BLAKE2-256-Parallel-{}", num_cores)),
105			Self::MemCopy => Cow::Borrowed("Copy"),
106			Self::DiskSeqWrite => Cow::Borrowed("Seq Write"),
107			Self::DiskRndWrite => Cow::Borrowed("Rnd Write"),
108		}
109	}
110}
111
112/// The unit in which the [`Throughput`] (bytes per second) is denoted.
113pub enum Unit {
114	GiBs,
115	MiBs,
116	KiBs,
117}
118
119impl fmt::Display for Unit {
120	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
121		f.write_str(match self {
122			Unit::GiBs => "GiBs",
123			Unit::MiBs => "MiBs",
124			Unit::KiBs => "KiBs",
125		})
126	}
127}
128
129/// Throughput as measured in bytes per second.
130#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)]
131pub struct Throughput(f64);
132
133const KIBIBYTE: f64 = (1 << 10) as f64;
134const MEBIBYTE: f64 = (1 << 20) as f64;
135const GIBIBYTE: f64 = (1 << 30) as f64;
136
137impl Throughput {
138	/// Construct [`Self`] from kibibyte/s.
139	pub fn from_kibs(kibs: f64) -> Throughput {
140		Throughput(kibs * KIBIBYTE)
141	}
142
143	/// Construct [`Self`] from mebibyte/s.
144	pub fn from_mibs(mibs: f64) -> Throughput {
145		Throughput(mibs * MEBIBYTE)
146	}
147
148	/// Construct [`Self`] from gibibyte/s.
149	pub fn from_gibs(gibs: f64) -> Throughput {
150		Throughput(gibs * GIBIBYTE)
151	}
152
153	/// [`Self`] as number of byte/s.
154	pub fn as_bytes(&self) -> f64 {
155		self.0
156	}
157
158	/// [`Self`] as number of kibibyte/s.
159	pub fn as_kibs(&self) -> f64 {
160		self.0 / KIBIBYTE
161	}
162
163	/// [`Self`] as number of mebibyte/s.
164	pub fn as_mibs(&self) -> f64 {
165		self.0 / MEBIBYTE
166	}
167
168	/// [`Self`] as number of gibibyte/s.
169	pub fn as_gibs(&self) -> f64 {
170		self.0 / GIBIBYTE
171	}
172
173	/// Normalizes [`Self`] to use the largest unit possible.
174	pub fn normalize(&self) -> (f64, Unit) {
175		let bs = self.0;
176
177		if bs >= GIBIBYTE {
178			(self.as_gibs(), Unit::GiBs)
179		} else if bs >= MEBIBYTE {
180			(self.as_mibs(), Unit::MiBs)
181		} else {
182			(self.as_kibs(), Unit::KiBs)
183		}
184	}
185}
186
187impl fmt::Display for Throughput {
188	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
189		let (value, unit) = self.normalize();
190		write!(f, "{:.2?} {}", value, unit)
191	}
192}
193
194/// Serializes `Throughput` and uses MiBs as the unit.
195pub fn serialize_throughput<S>(throughput: &Throughput, serializer: S) -> Result<S::Ok, S::Error>
196where
197	S: Serializer,
198{
199	serializer.serialize_u64(throughput.as_mibs() as u64)
200}
201
202/// Serializes `Option<Throughput>` and uses MiBs as the unit.
203pub fn serialize_throughput_option<S>(
204	maybe_throughput: &Option<Throughput>,
205	serializer: S,
206) -> Result<S::Ok, S::Error>
207where
208	S: Serializer,
209{
210	if let Some(throughput) = maybe_throughput {
211		return serializer.serialize_some(&(throughput.as_mibs() as u64))
212	}
213	serializer.serialize_none()
214}
215
216/// Serializes throughput into MiBs and represents it as `f64`.
217fn serialize_throughput_as_f64<S>(throughput: &Throughput, serializer: S) -> Result<S::Ok, S::Error>
218where
219	S: Serializer,
220{
221	serializer.serialize_f64(throughput.as_mibs())
222}
223
224struct ThroughputVisitor;
225impl<'de> Visitor<'de> for ThroughputVisitor {
226	type Value = Throughput;
227
228	fn expecting(&self, formatter: &mut Formatter) -> fmt::Result {
229		formatter.write_str("A value that is a f64.")
230	}
231
232	fn visit_f64<E>(self, value: f64) -> Result<Self::Value, E>
233	where
234		E: serde::de::Error,
235	{
236		Ok(Throughput::from_mibs(value))
237	}
238}
239
240fn deserialize_throughput<'de, D>(deserializer: D) -> Result<Throughput, D::Error>
241where
242	D: Deserializer<'de>,
243{
244	Ok(deserializer.deserialize_f64(ThroughputVisitor))?
245}
246
247/// Multiple requirements for the hardware.
248#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
249pub struct Requirements(pub Vec<Requirement>);
250
251/// A single requirement for the hardware.
252#[derive(Deserialize, Serialize, Debug, Clone, Copy, PartialEq)]
253pub struct Requirement {
254	/// The metric to measure.
255	pub metric: Metric,
256	/// The minimal throughput that needs to be archived for this requirement.
257	#[serde(
258		serialize_with = "serialize_throughput_as_f64",
259		deserialize_with = "deserialize_throughput"
260	)]
261	pub minimum: Throughput,
262	/// Check this requirement only for relay chain validator nodes.
263	#[serde(default)]
264	#[serde(skip_serializing_if = "core::ops::Not::not")]
265	pub validator_only: bool,
266}
267
268#[inline(always)]
269pub(crate) fn benchmark<E>(
270	name: &str,
271	size: usize,
272	max_iterations: usize,
273	max_duration: Duration,
274	mut run: impl FnMut() -> Result<(), E>,
275) -> Result<Throughput, E> {
276	// Run the benchmark once as a warmup to get the code into the L1 cache.
277	run()?;
278
279	// Then run it multiple times and average the result.
280	let timestamp = Instant::now();
281	let mut elapsed = Duration::default();
282	let mut count = 0;
283	for _ in 0..max_iterations {
284		run()?;
285
286		count += 1;
287		elapsed = timestamp.elapsed();
288
289		if elapsed >= max_duration {
290			break
291		}
292	}
293
294	let score = Throughput::from_kibs((size * count) as f64 / (elapsed.as_secs_f64() * 1024.0));
295	log::trace!(
296		"Calculated {} of {} in {} iterations in {}ms",
297		name,
298		score,
299		count,
300		elapsed.as_millis()
301	);
302	Ok(score)
303}
304
305/// Gathers information about node's hardware and software.
306pub fn gather_sysinfo() -> SysInfo {
307	#[allow(unused_mut)]
308	let mut sysinfo = SysInfo {
309		cpu: None,
310		memory: None,
311		core_count: None,
312		linux_kernel: None,
313		linux_distro: None,
314		is_virtual_machine: None,
315	};
316
317	#[cfg(target_os = "linux")]
318	crate::sysinfo_linux::gather_linux_sysinfo(&mut sysinfo);
319
320	#[cfg(target_os = "freebsd")]
321	crate::sysinfo_freebsd::gather_freebsd_sysinfo(&mut sysinfo);
322
323	sysinfo
324}
325
326#[inline(never)]
327fn clobber_slice<T>(slice: &mut [T]) {
328	assert!(!slice.is_empty());
329
330	// Discourage the compiler from optimizing out our benchmarks.
331	//
332	// Volatile reads and writes are guaranteed to not be elided nor reordered,
333	// so we can use them to effectively clobber a piece of memory and prevent
334	// the compiler from optimizing out our technically unnecessary code.
335	//
336	// This is not totally bulletproof in theory, but should work in practice.
337	//
338	// SAFETY: We've checked that the slice is not empty, so reading and writing
339	//         its first element is always safe.
340	unsafe {
341		let value = std::ptr::read_volatile(slice.as_ptr());
342		std::ptr::write_volatile(slice.as_mut_ptr(), value);
343	}
344}
345
346#[inline(never)]
347fn clobber_value<T>(input: &mut T) {
348	// Look into `clobber_slice` for a comment.
349	unsafe {
350		let value = std::ptr::read_volatile(input);
351		std::ptr::write_volatile(input, value);
352	}
353}
354
355/// A default [`ExecutionLimit`] that can be used to call [`benchmark_cpu`].
356pub const DEFAULT_CPU_EXECUTION_LIMIT: ExecutionLimit =
357	ExecutionLimit::Both { max_iterations: 4 * 1024, max_duration: Duration::from_millis(100) };
358
359// This benchmarks the single core CPU speed as measured by calculating BLAKE2b-256 hashes, in bytes
360// per second.
361pub fn benchmark_cpu(limit: ExecutionLimit) -> Throughput {
362	benchmark_cpu_parallelism(limit, 1)
363}
364
365// This benchmarks the entire CPU speed as measured by calculating BLAKE2b-256 hashes, in bytes per
366// second. It spawns multiple threads to measure the throughput of the entire CPU and averages the
367// score obtained by each thread. If we have at least `refhw_num_cores` available then the
368// average throughput should be relatively close to the single core performance as measured by
369// calling this function with refhw_num_cores equal to 1.
370pub fn benchmark_cpu_parallelism(limit: ExecutionLimit, refhw_num_cores: usize) -> Throughput {
371	// In general the results of this benchmark are somewhat sensitive to how much
372	// data we hash at the time. The smaller this is the *less* B/s we can hash,
373	// the bigger this is the *more* B/s we can hash, up until a certain point
374	// where we can achieve roughly ~100% of what the hasher can do. If we'd plot
375	// this on a graph with the number of bytes we want to hash on the X axis
376	// and the speed in B/s on the Y axis then we'd essentially see it grow
377	// logarithmically.
378	//
379	// In practice however we might not always have enough data to hit the maximum
380	// possible speed that the hasher can achieve, so the size set here should be
381	// picked in such a way as to still measure how fast the hasher is at hashing,
382	// but without hitting its theoretical maximum speed.
383	const SIZE: usize = 32 * 1024;
384
385	let ready_to_run_benchmark = Arc::new(Barrier::new(refhw_num_cores));
386	let mut benchmark_threads = Vec::new();
387
388	// Spawn a thread for each expected core and average the throughput for each of them.
389	for _ in 0..refhw_num_cores {
390		let ready_to_run_benchmark = ready_to_run_benchmark.clone();
391
392		let handle = std::thread::spawn(move || {
393			let mut buffer = Vec::new();
394			buffer.resize(SIZE, 0x66);
395			let mut hash = Default::default();
396
397			let run = || -> Result<(), ()> {
398				clobber_slice(&mut buffer);
399				hash = sp_crypto_hashing::blake2_256(&buffer);
400				clobber_slice(&mut hash);
401
402				Ok(())
403			};
404			ready_to_run_benchmark.wait();
405			benchmark("CPU score", SIZE, limit.max_iterations(), limit.max_duration(), run)
406				.expect("benchmark cannot fail; qed")
407		});
408		benchmark_threads.push(handle);
409	}
410
411	let average_score = benchmark_threads
412		.into_iter()
413		.map(|thread| thread.join().map(|throughput| throughput.as_kibs()).unwrap_or(0.0))
414		.sum::<f64>() /
415		refhw_num_cores as f64;
416	Throughput::from_kibs(average_score)
417}
418
419/// A default [`ExecutionLimit`] that can be used to call [`benchmark_memory`].
420pub const DEFAULT_MEMORY_EXECUTION_LIMIT: ExecutionLimit =
421	ExecutionLimit::Both { max_iterations: 32, max_duration: Duration::from_millis(100) };
422
423// This benchmarks the effective `memcpy` memory bandwidth available in bytes per second.
424//
425// It doesn't technically measure the absolute maximum memory bandwidth available,
426// but that's fine, because real code most of the time isn't optimized to take
427// advantage of the full memory bandwidth either.
428pub fn benchmark_memory(limit: ExecutionLimit) -> Throughput {
429	// Ideally this should be at least as big as the CPU's L3 cache,
430	// and it should be big enough so that the `memcpy` takes enough
431	// time to be actually measurable.
432	//
433	// As long as it's big enough increasing it further won't change
434	// the benchmark's results.
435	const SIZE: usize = 64 * 1024 * 1024;
436
437	let mut src = Vec::new();
438	let mut dst = Vec::new();
439
440	// Prefault the pages; we want to measure the memory bandwidth,
441	// not how fast the kernel can supply us with fresh memory pages.
442	src.resize(SIZE, 0x66);
443	dst.resize(SIZE, 0x77);
444
445	let run = || -> Result<(), ()> {
446		clobber_slice(&mut src);
447		clobber_slice(&mut dst);
448
449		// SAFETY: Both vectors are of the same type and of the same size,
450		//         so copying data between them is safe.
451		unsafe {
452			// We use `memcpy` directly here since `copy_from_slice` isn't actually
453			// guaranteed to be turned into a `memcpy`.
454			libc::memcpy(dst.as_mut_ptr().cast(), src.as_ptr().cast(), SIZE);
455		}
456
457		clobber_slice(&mut dst);
458		clobber_slice(&mut src);
459
460		Ok(())
461	};
462
463	benchmark("memory score", SIZE, limit.max_iterations(), limit.max_duration(), run)
464		.expect("benchmark cannot fail; qed")
465}
466
467struct TemporaryFile {
468	fp: Option<File>,
469	path: PathBuf,
470}
471
472impl Drop for TemporaryFile {
473	fn drop(&mut self) {
474		let _ = self.fp.take();
475
476		// Remove the file.
477		//
478		// This has to be done *after* the benchmark,
479		// otherwise it changes the results as the data
480		// doesn't actually get properly flushed to the disk,
481		// since the file's not there anymore.
482		if let Err(error) = std::fs::remove_file(&self.path) {
483			log::warn!("Failed to remove the file used for the disk benchmark: {}", error);
484		}
485	}
486}
487
488impl Deref for TemporaryFile {
489	type Target = File;
490	fn deref(&self) -> &Self::Target {
491		self.fp.as_ref().expect("`fp` is None only during `drop`")
492	}
493}
494
495impl DerefMut for TemporaryFile {
496	fn deref_mut(&mut self) -> &mut Self::Target {
497		self.fp.as_mut().expect("`fp` is None only during `drop`")
498	}
499}
500
501fn rng() -> rand_pcg::Pcg64 {
502	rand_pcg::Pcg64::new(0xcafef00dd15ea5e5, 0xa02bdbf7bb3c0a7ac28fa16a64abf96)
503}
504
505fn random_data(size: usize) -> Vec<u8> {
506	let mut buffer = Vec::new();
507	buffer.resize(size, 0);
508	rng().fill(&mut buffer[..]);
509	buffer
510}
511
512/// A default [`ExecutionLimit`] that can be used to call [`benchmark_disk_sequential_writes`]
513/// and [`benchmark_disk_random_writes`].
514pub const DEFAULT_DISK_EXECUTION_LIMIT: ExecutionLimit =
515	ExecutionLimit::Both { max_iterations: 32, max_duration: Duration::from_millis(300) };
516
517pub fn benchmark_disk_sequential_writes(
518	limit: ExecutionLimit,
519	directory: &Path,
520) -> Result<Throughput, String> {
521	const SIZE: usize = 64 * 1024 * 1024;
522
523	let buffer = random_data(SIZE);
524	let path = directory.join(".disk_bench_seq_wr.tmp");
525
526	let fp =
527		File::create(&path).map_err(|error| format!("failed to create a test file: {}", error))?;
528
529	let mut fp = TemporaryFile { fp: Some(fp), path };
530
531	fp.sync_all()
532		.map_err(|error| format!("failed to fsync the test file: {}", error))?;
533
534	let run = || {
535		// Just dump everything to the disk in one go.
536		fp.write_all(&buffer)
537			.map_err(|error| format!("failed to write to the test file: {}", error))?;
538
539		// And then make sure it was actually written to disk.
540		fp.sync_all()
541			.map_err(|error| format!("failed to fsync the test file: {}", error))?;
542
543		// Rewind to the beginning for the next iteration of the benchmark.
544		fp.seek(SeekFrom::Start(0))
545			.map_err(|error| format!("failed to seek to the start of the test file: {}", error))?;
546
547		Ok(())
548	};
549
550	benchmark(
551		"disk sequential write score",
552		SIZE,
553		limit.max_iterations(),
554		limit.max_duration(),
555		run,
556	)
557}
558
559pub fn benchmark_disk_random_writes(
560	limit: ExecutionLimit,
561	directory: &Path,
562) -> Result<Throughput, String> {
563	const SIZE: usize = 64 * 1024 * 1024;
564
565	let buffer = random_data(SIZE);
566	let path = directory.join(".disk_bench_rand_wr.tmp");
567
568	let fp =
569		File::create(&path).map_err(|error| format!("failed to create a test file: {}", error))?;
570
571	let mut fp = TemporaryFile { fp: Some(fp), path };
572
573	// Since we want to test random writes we need an existing file
574	// through which we can seek, so here we just populate it with some data.
575	fp.write_all(&buffer)
576		.map_err(|error| format!("failed to write to the test file: {}", error))?;
577
578	fp.sync_all()
579		.map_err(|error| format!("failed to fsync the test file: {}", error))?;
580
581	// Generate a list of random positions at which we'll issue writes.
582	let mut positions = Vec::with_capacity(SIZE / 4096);
583	{
584		let mut position = 0;
585		while position < SIZE {
586			positions.push(position);
587			position += 4096;
588		}
589	}
590
591	positions.shuffle(&mut rng());
592
593	let run = || {
594		for &position in &positions {
595			fp.seek(SeekFrom::Start(position as u64))
596				.map_err(|error| format!("failed to seek in the test file: {}", error))?;
597
598			// Here we deliberately only write half of the chunk since we don't
599			// want the OS' disk scheduler to coalesce our writes into one single
600			// sequential write.
601			//
602			// Also the chunk's size is deliberately exactly half of a modern disk's
603			// sector size to trigger an RMW cycle.
604			let chunk = &buffer[position..position + 2048];
605			fp.write_all(&chunk)
606				.map_err(|error| format!("failed to write to the test file: {}", error))?;
607		}
608
609		fp.sync_all()
610			.map_err(|error| format!("failed to fsync the test file: {}", error))?;
611
612		Ok(())
613	};
614
615	// We only wrote half of the bytes hence `SIZE / 2`.
616	benchmark(
617		"disk random write score",
618		SIZE / 2,
619		limit.max_iterations(),
620		limit.max_duration(),
621		run,
622	)
623}
624
625/// Benchmarks the verification speed of sr25519 signatures.
626///
627/// Returns the throughput in B/s by convention.
628/// The values are rather small (0.4-0.8) so it is advised to convert them into KB/s.
629pub fn benchmark_sr25519_verify(limit: ExecutionLimit) -> Throughput {
630	const INPUT_SIZE: usize = 32;
631	const ITERATION_SIZE: usize = 2048;
632	let pair = sr25519::Pair::from_string("//Alice", None).unwrap();
633
634	let mut rng = rng();
635	let mut msgs = Vec::new();
636	let mut sigs = Vec::new();
637
638	for _ in 0..ITERATION_SIZE {
639		let mut msg = vec![0u8; INPUT_SIZE];
640		rng.fill_bytes(&mut msg[..]);
641
642		sigs.push(pair.sign(&msg));
643		msgs.push(msg);
644	}
645
646	let run = || -> Result<(), String> {
647		for (sig, msg) in sigs.iter().zip(msgs.iter()) {
648			let mut ok = sr25519_verify(&sig, &msg[..], &pair.public());
649			clobber_value(&mut ok);
650		}
651		Ok(())
652	};
653	benchmark(
654		"sr25519 verification score",
655		INPUT_SIZE * ITERATION_SIZE,
656		limit.max_iterations(),
657		limit.max_duration(),
658		run,
659	)
660	.expect("sr25519 verification cannot fail; qed")
661}
662
663/// Benchmarks the hardware and returns the results of those benchmarks.
664///
665/// Optionally accepts a path to a `scratch_directory` to use to benchmark the
666/// disk. Also accepts the `requirements` for the hardware benchmark and a
667/// boolean to specify if the node is an authority.
668pub fn gather_hwbench(scratch_directory: Option<&Path>, requirements: &Requirements) -> HwBench {
669	let cpu_hashrate_score = benchmark_cpu(DEFAULT_CPU_EXECUTION_LIMIT);
670	let (parallel_cpu_hashrate_score, parallel_cpu_cores) = requirements
671		.0
672		.iter()
673		.filter_map(|req| {
674			if let Metric::Blake2256Parallel { num_cores } = req.metric {
675				Some((benchmark_cpu_parallelism(DEFAULT_CPU_EXECUTION_LIMIT, num_cores), num_cores))
676			} else {
677				None
678			}
679		})
680		.next()
681		.unwrap_or((cpu_hashrate_score, 1));
682	#[allow(unused_mut)]
683	let mut hwbench = HwBench {
684		cpu_hashrate_score,
685		parallel_cpu_hashrate_score,
686		parallel_cpu_cores,
687		memory_memcpy_score: benchmark_memory(DEFAULT_MEMORY_EXECUTION_LIMIT),
688		disk_sequential_write_score: None,
689		disk_random_write_score: None,
690	};
691
692	if let Some(scratch_directory) = scratch_directory {
693		hwbench.disk_sequential_write_score =
694			match benchmark_disk_sequential_writes(DEFAULT_DISK_EXECUTION_LIMIT, scratch_directory)
695			{
696				Ok(score) => Some(score),
697				Err(error) => {
698					log::warn!("Failed to run the sequential write disk benchmark: {}", error);
699					None
700				},
701			};
702
703		hwbench.disk_random_write_score =
704			match benchmark_disk_random_writes(DEFAULT_DISK_EXECUTION_LIMIT, scratch_directory) {
705				Ok(score) => Some(score),
706				Err(error) => {
707					log::warn!("Failed to run the random write disk benchmark: {}", error);
708					None
709				},
710			};
711	}
712
713	hwbench
714}
715
716impl Requirements {
717	/// Whether the hardware requirements are met by the provided benchmark results.
718	pub fn check_hardware(
719		&self,
720		hwbench: &HwBench,
721		is_rc_authority: bool,
722	) -> Result<(), CheckFailures> {
723		let mut failures = Vec::new();
724		for requirement in self.0.iter() {
725			if requirement.validator_only && !is_rc_authority {
726				continue
727			}
728
729			match requirement.metric {
730				Metric::Blake2256 =>
731					if requirement.minimum > hwbench.cpu_hashrate_score {
732						failures.push(CheckFailure {
733							metric: requirement.metric,
734							expected: requirement.minimum,
735							found: hwbench.cpu_hashrate_score,
736						});
737					},
738				Metric::Blake2256Parallel { .. } =>
739					if requirement.minimum > hwbench.parallel_cpu_hashrate_score {
740						failures.push(CheckFailure {
741							metric: requirement.metric,
742							expected: requirement.minimum,
743							found: hwbench.parallel_cpu_hashrate_score,
744						});
745					},
746				Metric::MemCopy =>
747					if requirement.minimum > hwbench.memory_memcpy_score {
748						failures.push(CheckFailure {
749							metric: requirement.metric,
750							expected: requirement.minimum,
751							found: hwbench.memory_memcpy_score,
752						});
753					},
754				Metric::DiskSeqWrite =>
755					if let Some(score) = hwbench.disk_sequential_write_score {
756						if requirement.minimum > score {
757							failures.push(CheckFailure {
758								metric: requirement.metric,
759								expected: requirement.minimum,
760								found: score,
761							});
762						}
763					},
764				Metric::DiskRndWrite =>
765					if let Some(score) = hwbench.disk_random_write_score {
766						if requirement.minimum > score {
767							failures.push(CheckFailure {
768								metric: requirement.metric,
769								expected: requirement.minimum,
770								found: score,
771							});
772						}
773					},
774				Metric::Sr25519Verify => {},
775			}
776		}
777		if failures.is_empty() {
778			Ok(())
779		} else {
780			Err(failures.into())
781		}
782	}
783}
784
785#[cfg(test)]
786mod tests {
787	use super::*;
788	use sp_runtime::assert_eq_error_rate_float;
789
790	#[cfg(target_os = "linux")]
791	#[test]
792	fn test_gather_sysinfo_linux() {
793		let sysinfo = gather_sysinfo();
794		assert!(sysinfo.cpu.unwrap().len() > 0);
795		assert!(sysinfo.core_count.unwrap() > 0);
796		assert!(sysinfo.memory.unwrap() > 0);
797		assert_ne!(sysinfo.is_virtual_machine, None);
798		assert_ne!(sysinfo.linux_kernel, None);
799		assert_ne!(sysinfo.linux_distro, None);
800	}
801
802	#[test]
803	fn test_benchmark_cpu() {
804		assert!(benchmark_cpu(DEFAULT_CPU_EXECUTION_LIMIT) > Throughput::from_mibs(0.0));
805	}
806
807	#[test]
808	fn test_benchmark_parallel_cpu() {
809		assert!(
810			benchmark_cpu_parallelism(DEFAULT_CPU_EXECUTION_LIMIT, 8) > Throughput::from_mibs(0.0)
811		);
812	}
813
814	#[test]
815	fn test_benchmark_memory() {
816		assert!(benchmark_memory(DEFAULT_MEMORY_EXECUTION_LIMIT) > Throughput::from_mibs(0.0));
817	}
818
819	#[test]
820	fn test_benchmark_disk_sequential_writes() {
821		assert!(
822			benchmark_disk_sequential_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() >
823				Throughput::from_mibs(0.0)
824		);
825	}
826
827	#[test]
828	fn test_benchmark_disk_random_writes() {
829		assert!(
830			benchmark_disk_random_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() >
831				Throughput::from_mibs(0.0)
832		);
833	}
834
835	#[test]
836	fn test_benchmark_sr25519_verify() {
837		assert!(
838			benchmark_sr25519_verify(ExecutionLimit::MaxIterations(1)) > Throughput::from_mibs(0.0)
839		);
840	}
841
842	/// Test the [`Throughput`].
843	#[test]
844	fn throughput_works() {
845		/// Float precision.
846		const EPS: f64 = 0.1;
847		let gib = Throughput::from_gibs(14.324);
848
849		assert_eq_error_rate_float!(14.324, gib.as_gibs(), EPS);
850		assert_eq_error_rate_float!(14667.776, gib.as_mibs(), EPS);
851		assert_eq_error_rate_float!(14667.776 * 1024.0, gib.as_kibs(), EPS);
852		assert_eq!("14.32 GiBs", gib.to_string());
853
854		let mib = Throughput::from_mibs(1029.0);
855		assert_eq!("1.00 GiBs", mib.to_string());
856	}
857
858	/// Test the [`HwBench`] serialization.
859	#[test]
860	fn hwbench_serialize_works() {
861		let hwbench = HwBench {
862			cpu_hashrate_score: Throughput::from_gibs(1.32),
863			parallel_cpu_hashrate_score: Throughput::from_gibs(1.32),
864			parallel_cpu_cores: 4,
865			memory_memcpy_score: Throughput::from_kibs(9342.432),
866			disk_sequential_write_score: Some(Throughput::from_kibs(4332.12)),
867			disk_random_write_score: None,
868		};
869
870		let serialized = serde_json::to_string(&hwbench).unwrap();
871		// Throughput from all of the benchmarks should be converted to MiBs.
872		assert_eq!(serialized, "{\"cpu_hashrate_score\":1351,\"parallel_cpu_hashrate_score\":1351,\"parallel_cpu_cores\":4,\"memory_memcpy_score\":9,\"disk_sequential_write_score\":4}");
873	}
874}