sc_sysinfo/
sysinfo.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19use crate::{ExecutionLimit, HwBench};
20
21use sc_telemetry::SysInfo;
22use sp_core::{sr25519, Pair};
23use sp_io::crypto::sr25519_verify;
24
25use core::f64;
26use derive_more::From;
27use rand::{seq::SliceRandom, Rng, RngCore};
28use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer};
29use std::{
30	borrow::Cow,
31	fmt::{self, Display, Formatter},
32	fs::File,
33	io::{Seek, SeekFrom, Write},
34	ops::{Deref, DerefMut},
35	path::{Path, PathBuf},
36	sync::{Arc, Barrier},
37	time::{Duration, Instant},
38};
39
40/// A single hardware metric.
41#[derive(Deserialize, Serialize, Debug, Clone, Copy, PartialEq)]
42pub enum Metric {
43	/// SR25519 signature verification.
44	Sr25519Verify,
45	/// Blake2-256 hashing algorithm.
46	Blake2256,
47	/// Blake2-256 hashing algorithm executed in parallel
48	Blake2256Parallel { num_cores: usize },
49	/// Copying data in RAM.
50	MemCopy,
51	/// Disk sequential write.
52	DiskSeqWrite,
53	/// Disk random write.
54	DiskRndWrite,
55}
56
57/// Describes a checking failure for the hardware requirements.
58#[derive(Debug, Clone, Copy, PartialEq)]
59pub struct CheckFailure {
60	/// The metric that failed the check.
61	pub metric: Metric,
62	/// The expected minimum value.
63	pub expected: Throughput,
64	/// The measured value.
65	pub found: Throughput,
66}
67
68/// A list of metrics that failed to meet the minimum hardware requirements.
69#[derive(Debug, Clone, PartialEq, From)]
70pub struct CheckFailures(pub Vec<CheckFailure>);
71
72impl Display for CheckFailures {
73	fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {
74		write!(formatter, "Failed checks: ")?;
75		for failure in &self.0 {
76			write!(
77				formatter,
78				"{}(expected: {}, found: {}), ",
79				failure.metric.name(),
80				failure.expected,
81				failure.found
82			)?
83		}
84		Ok(())
85	}
86}
87
88impl Metric {
89	/// The category of the metric.
90	pub fn category(&self) -> &'static str {
91		match self {
92			Self::Sr25519Verify | Self::Blake2256 | Self::Blake2256Parallel { .. } => "CPU",
93			Self::MemCopy => "Memory",
94			Self::DiskSeqWrite | Self::DiskRndWrite => "Disk",
95		}
96	}
97
98	/// The name of the metric. It is always prefixed by the [`self.category()`].
99	pub fn name(&self) -> Cow<'static, str> {
100		match self {
101			Self::Sr25519Verify => Cow::Borrowed("SR25519-Verify"),
102			Self::Blake2256 => Cow::Borrowed("BLAKE2-256"),
103			Self::Blake2256Parallel { num_cores } =>
104				Cow::Owned(format!("BLAKE2-256-Parallel-{}", num_cores)),
105			Self::MemCopy => Cow::Borrowed("Copy"),
106			Self::DiskSeqWrite => Cow::Borrowed("Seq Write"),
107			Self::DiskRndWrite => Cow::Borrowed("Rnd Write"),
108		}
109	}
110}
111
112/// The unit in which the [`Throughput`] (bytes per second) is denoted.
113pub enum Unit {
114	GiBs,
115	MiBs,
116	KiBs,
117}
118
119impl fmt::Display for Unit {
120	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
121		f.write_str(match self {
122			Unit::GiBs => "GiBs",
123			Unit::MiBs => "MiBs",
124			Unit::KiBs => "KiBs",
125		})
126	}
127}
128
129/// Throughput as measured in bytes per second.
130#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)]
131pub struct Throughput(f64);
132
133const KIBIBYTE: f64 = (1 << 10) as f64;
134const MEBIBYTE: f64 = (1 << 20) as f64;
135const GIBIBYTE: f64 = (1 << 30) as f64;
136
137impl Throughput {
138	/// Construct [`Self`] from kibibyte/s.
139	pub fn from_kibs(kibs: f64) -> Throughput {
140		Throughput(kibs * KIBIBYTE)
141	}
142
143	/// Construct [`Self`] from mebibyte/s.
144	pub fn from_mibs(mibs: f64) -> Throughput {
145		Throughput(mibs * MEBIBYTE)
146	}
147
148	/// Construct [`Self`] from gibibyte/s.
149	pub fn from_gibs(gibs: f64) -> Throughput {
150		Throughput(gibs * GIBIBYTE)
151	}
152
153	/// [`Self`] as number of byte/s.
154	pub fn as_bytes(&self) -> f64 {
155		self.0
156	}
157
158	/// [`Self`] as number of kibibyte/s.
159	pub fn as_kibs(&self) -> f64 {
160		self.0 / KIBIBYTE
161	}
162
163	/// [`Self`] as number of mebibyte/s.
164	pub fn as_mibs(&self) -> f64 {
165		self.0 / MEBIBYTE
166	}
167
168	/// [`Self`] as number of gibibyte/s.
169	pub fn as_gibs(&self) -> f64 {
170		self.0 / GIBIBYTE
171	}
172
173	/// Normalizes [`Self`] to use the largest unit possible.
174	pub fn normalize(&self) -> (f64, Unit) {
175		let bs = self.0;
176
177		if bs >= GIBIBYTE {
178			(self.as_gibs(), Unit::GiBs)
179		} else if bs >= MEBIBYTE {
180			(self.as_mibs(), Unit::MiBs)
181		} else {
182			(self.as_kibs(), Unit::KiBs)
183		}
184	}
185}
186
187impl fmt::Display for Throughput {
188	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
189		let (value, unit) = self.normalize();
190		write!(f, "{:.2?} {}", value, unit)
191	}
192}
193
194/// Serializes `Throughput` and uses MiBs as the unit.
195pub fn serialize_throughput<S>(throughput: &Throughput, serializer: S) -> Result<S::Ok, S::Error>
196where
197	S: Serializer,
198{
199	serializer.serialize_u64(throughput.as_mibs() as u64)
200}
201
202/// Serializes `Option<Throughput>` and uses MiBs as the unit.
203pub fn serialize_throughput_option<S>(
204	maybe_throughput: &Option<Throughput>,
205	serializer: S,
206) -> Result<S::Ok, S::Error>
207where
208	S: Serializer,
209{
210	if let Some(throughput) = maybe_throughput {
211		return serializer.serialize_some(&(throughput.as_mibs() as u64))
212	}
213	serializer.serialize_none()
214}
215
216/// Serializes throughput into MiBs and represents it as `f64`.
217fn serialize_throughput_as_f64<S>(throughput: &Throughput, serializer: S) -> Result<S::Ok, S::Error>
218where
219	S: Serializer,
220{
221	serializer.serialize_f64(throughput.as_mibs())
222}
223
224struct ThroughputVisitor;
225impl<'de> Visitor<'de> for ThroughputVisitor {
226	type Value = Throughput;
227
228	fn expecting(&self, formatter: &mut Formatter) -> fmt::Result {
229		formatter.write_str("A value that is a f64.")
230	}
231
232	fn visit_f64<E>(self, value: f64) -> Result<Self::Value, E>
233	where
234		E: serde::de::Error,
235	{
236		Ok(Throughput::from_mibs(value))
237	}
238}
239
240fn deserialize_throughput<'de, D>(deserializer: D) -> Result<Throughput, D::Error>
241where
242	D: Deserializer<'de>,
243{
244	Ok(deserializer.deserialize_f64(ThroughputVisitor))?
245}
246
247/// Multiple requirements for the hardware.
248#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
249pub struct Requirements(pub Vec<Requirement>);
250
251/// A single requirement for the hardware.
252#[derive(Deserialize, Serialize, Debug, Clone, Copy, PartialEq)]
253pub struct Requirement {
254	/// The metric to measure.
255	pub metric: Metric,
256	/// The minimal throughput that needs to be archived for this requirement.
257	#[serde(
258		serialize_with = "serialize_throughput_as_f64",
259		deserialize_with = "deserialize_throughput"
260	)]
261	pub minimum: Throughput,
262	/// Check this requirement only for relay chain validator nodes.
263	#[serde(default)]
264	#[serde(skip_serializing_if = "core::ops::Not::not")]
265	pub validator_only: bool,
266}
267
268#[inline(always)]
269pub(crate) fn benchmark<E>(
270	name: &str,
271	size: usize,
272	max_iterations: usize,
273	max_duration: Duration,
274	mut run: impl FnMut() -> Result<(), E>,
275) -> Result<Throughput, E> {
276	// Run the benchmark once as a warmup to get the code into the L1 cache.
277	run()?;
278
279	// Then run it multiple times and average the result.
280	let timestamp = Instant::now();
281	let mut elapsed = Duration::default();
282	let mut count = 0;
283	for _ in 0..max_iterations {
284		run()?;
285
286		count += 1;
287		elapsed = timestamp.elapsed();
288
289		if elapsed >= max_duration {
290			break
291		}
292	}
293
294	let score = Throughput::from_kibs((size * count) as f64 / (elapsed.as_secs_f64() * 1024.0));
295	log::trace!(
296		"Calculated {} of {} in {} iterations in {}ms",
297		name,
298		score,
299		count,
300		elapsed.as_millis()
301	);
302	Ok(score)
303}
304
305/// Gathers information about node's hardware and software.
306pub fn gather_sysinfo() -> SysInfo {
307	#[allow(unused_mut)]
308	let mut sysinfo = SysInfo {
309		cpu: None,
310		memory: None,
311		core_count: None,
312		linux_kernel: None,
313		linux_distro: None,
314		is_virtual_machine: None,
315	};
316
317	#[cfg(target_os = "linux")]
318	crate::sysinfo_linux::gather_linux_sysinfo(&mut sysinfo);
319
320	sysinfo
321}
322
323#[inline(never)]
324fn clobber_slice<T>(slice: &mut [T]) {
325	assert!(!slice.is_empty());
326
327	// Discourage the compiler from optimizing out our benchmarks.
328	//
329	// Volatile reads and writes are guaranteed to not be elided nor reordered,
330	// so we can use them to effectively clobber a piece of memory and prevent
331	// the compiler from optimizing out our technically unnecessary code.
332	//
333	// This is not totally bulletproof in theory, but should work in practice.
334	//
335	// SAFETY: We've checked that the slice is not empty, so reading and writing
336	//         its first element is always safe.
337	unsafe {
338		let value = std::ptr::read_volatile(slice.as_ptr());
339		std::ptr::write_volatile(slice.as_mut_ptr(), value);
340	}
341}
342
343#[inline(never)]
344fn clobber_value<T>(input: &mut T) {
345	// Look into `clobber_slice` for a comment.
346	unsafe {
347		let value = std::ptr::read_volatile(input);
348		std::ptr::write_volatile(input, value);
349	}
350}
351
352/// A default [`ExecutionLimit`] that can be used to call [`benchmark_cpu`].
353pub const DEFAULT_CPU_EXECUTION_LIMIT: ExecutionLimit =
354	ExecutionLimit::Both { max_iterations: 4 * 1024, max_duration: Duration::from_millis(100) };
355
356// This benchmarks the single core CPU speed as measured by calculating BLAKE2b-256 hashes, in bytes
357// per second.
358pub fn benchmark_cpu(limit: ExecutionLimit) -> Throughput {
359	benchmark_cpu_parallelism(limit, 1)
360}
361
362// This benchmarks the entire CPU speed as measured by calculating BLAKE2b-256 hashes, in bytes per
363// second. It spawns multiple threads to measure the throughput of the entire CPU and averages the
364// score obtained by each thread. If we have at least `refhw_num_cores` available then the
365// average throughput should be relatively close to the single core performance as measured by
366// calling this function with refhw_num_cores equal to 1.
367pub fn benchmark_cpu_parallelism(limit: ExecutionLimit, refhw_num_cores: usize) -> Throughput {
368	// In general the results of this benchmark are somewhat sensitive to how much
369	// data we hash at the time. The smaller this is the *less* B/s we can hash,
370	// the bigger this is the *more* B/s we can hash, up until a certain point
371	// where we can achieve roughly ~100% of what the hasher can do. If we'd plot
372	// this on a graph with the number of bytes we want to hash on the X axis
373	// and the speed in B/s on the Y axis then we'd essentially see it grow
374	// logarithmically.
375	//
376	// In practice however we might not always have enough data to hit the maximum
377	// possible speed that the hasher can achieve, so the size set here should be
378	// picked in such a way as to still measure how fast the hasher is at hashing,
379	// but without hitting its theoretical maximum speed.
380	const SIZE: usize = 32 * 1024;
381
382	let ready_to_run_benchmark = Arc::new(Barrier::new(refhw_num_cores));
383	let mut benchmark_threads = Vec::new();
384
385	// Spawn a thread for each expected core and average the throughput for each of them.
386	for _ in 0..refhw_num_cores {
387		let ready_to_run_benchmark = ready_to_run_benchmark.clone();
388
389		let handle = std::thread::spawn(move || {
390			let mut buffer = Vec::new();
391			buffer.resize(SIZE, 0x66);
392			let mut hash = Default::default();
393
394			let run = || -> Result<(), ()> {
395				clobber_slice(&mut buffer);
396				hash = sp_crypto_hashing::blake2_256(&buffer);
397				clobber_slice(&mut hash);
398
399				Ok(())
400			};
401			ready_to_run_benchmark.wait();
402			benchmark("CPU score", SIZE, limit.max_iterations(), limit.max_duration(), run)
403				.expect("benchmark cannot fail; qed")
404		});
405		benchmark_threads.push(handle);
406	}
407
408	let average_score = benchmark_threads
409		.into_iter()
410		.map(|thread| thread.join().map(|throughput| throughput.as_kibs()).unwrap_or(0.0))
411		.sum::<f64>() /
412		refhw_num_cores as f64;
413	Throughput::from_kibs(average_score)
414}
415
416/// A default [`ExecutionLimit`] that can be used to call [`benchmark_memory`].
417pub const DEFAULT_MEMORY_EXECUTION_LIMIT: ExecutionLimit =
418	ExecutionLimit::Both { max_iterations: 32, max_duration: Duration::from_millis(100) };
419
420// This benchmarks the effective `memcpy` memory bandwidth available in bytes per second.
421//
422// It doesn't technically measure the absolute maximum memory bandwidth available,
423// but that's fine, because real code most of the time isn't optimized to take
424// advantage of the full memory bandwidth either.
425pub fn benchmark_memory(limit: ExecutionLimit) -> Throughput {
426	// Ideally this should be at least as big as the CPU's L3 cache,
427	// and it should be big enough so that the `memcpy` takes enough
428	// time to be actually measurable.
429	//
430	// As long as it's big enough increasing it further won't change
431	// the benchmark's results.
432	const SIZE: usize = 64 * 1024 * 1024;
433
434	let mut src = Vec::new();
435	let mut dst = Vec::new();
436
437	// Prefault the pages; we want to measure the memory bandwidth,
438	// not how fast the kernel can supply us with fresh memory pages.
439	src.resize(SIZE, 0x66);
440	dst.resize(SIZE, 0x77);
441
442	let run = || -> Result<(), ()> {
443		clobber_slice(&mut src);
444		clobber_slice(&mut dst);
445
446		// SAFETY: Both vectors are of the same type and of the same size,
447		//         so copying data between them is safe.
448		unsafe {
449			// We use `memcpy` directly here since `copy_from_slice` isn't actually
450			// guaranteed to be turned into a `memcpy`.
451			libc::memcpy(dst.as_mut_ptr().cast(), src.as_ptr().cast(), SIZE);
452		}
453
454		clobber_slice(&mut dst);
455		clobber_slice(&mut src);
456
457		Ok(())
458	};
459
460	benchmark("memory score", SIZE, limit.max_iterations(), limit.max_duration(), run)
461		.expect("benchmark cannot fail; qed")
462}
463
464struct TemporaryFile {
465	fp: Option<File>,
466	path: PathBuf,
467}
468
469impl Drop for TemporaryFile {
470	fn drop(&mut self) {
471		let _ = self.fp.take();
472
473		// Remove the file.
474		//
475		// This has to be done *after* the benchmark,
476		// otherwise it changes the results as the data
477		// doesn't actually get properly flushed to the disk,
478		// since the file's not there anymore.
479		if let Err(error) = std::fs::remove_file(&self.path) {
480			log::warn!("Failed to remove the file used for the disk benchmark: {}", error);
481		}
482	}
483}
484
485impl Deref for TemporaryFile {
486	type Target = File;
487	fn deref(&self) -> &Self::Target {
488		self.fp.as_ref().expect("`fp` is None only during `drop`")
489	}
490}
491
492impl DerefMut for TemporaryFile {
493	fn deref_mut(&mut self) -> &mut Self::Target {
494		self.fp.as_mut().expect("`fp` is None only during `drop`")
495	}
496}
497
498fn rng() -> rand_pcg::Pcg64 {
499	rand_pcg::Pcg64::new(0xcafef00dd15ea5e5, 0xa02bdbf7bb3c0a7ac28fa16a64abf96)
500}
501
502fn random_data(size: usize) -> Vec<u8> {
503	let mut buffer = Vec::new();
504	buffer.resize(size, 0);
505	rng().fill(&mut buffer[..]);
506	buffer
507}
508
509/// A default [`ExecutionLimit`] that can be used to call [`benchmark_disk_sequential_writes`]
510/// and [`benchmark_disk_random_writes`].
511pub const DEFAULT_DISK_EXECUTION_LIMIT: ExecutionLimit =
512	ExecutionLimit::Both { max_iterations: 32, max_duration: Duration::from_millis(300) };
513
514pub fn benchmark_disk_sequential_writes(
515	limit: ExecutionLimit,
516	directory: &Path,
517) -> Result<Throughput, String> {
518	const SIZE: usize = 64 * 1024 * 1024;
519
520	let buffer = random_data(SIZE);
521	let path = directory.join(".disk_bench_seq_wr.tmp");
522
523	let fp =
524		File::create(&path).map_err(|error| format!("failed to create a test file: {}", error))?;
525
526	let mut fp = TemporaryFile { fp: Some(fp), path };
527
528	fp.sync_all()
529		.map_err(|error| format!("failed to fsync the test file: {}", error))?;
530
531	let run = || {
532		// Just dump everything to the disk in one go.
533		fp.write_all(&buffer)
534			.map_err(|error| format!("failed to write to the test file: {}", error))?;
535
536		// And then make sure it was actually written to disk.
537		fp.sync_all()
538			.map_err(|error| format!("failed to fsync the test file: {}", error))?;
539
540		// Rewind to the beginning for the next iteration of the benchmark.
541		fp.seek(SeekFrom::Start(0))
542			.map_err(|error| format!("failed to seek to the start of the test file: {}", error))?;
543
544		Ok(())
545	};
546
547	benchmark(
548		"disk sequential write score",
549		SIZE,
550		limit.max_iterations(),
551		limit.max_duration(),
552		run,
553	)
554}
555
556pub fn benchmark_disk_random_writes(
557	limit: ExecutionLimit,
558	directory: &Path,
559) -> Result<Throughput, String> {
560	const SIZE: usize = 64 * 1024 * 1024;
561
562	let buffer = random_data(SIZE);
563	let path = directory.join(".disk_bench_rand_wr.tmp");
564
565	let fp =
566		File::create(&path).map_err(|error| format!("failed to create a test file: {}", error))?;
567
568	let mut fp = TemporaryFile { fp: Some(fp), path };
569
570	// Since we want to test random writes we need an existing file
571	// through which we can seek, so here we just populate it with some data.
572	fp.write_all(&buffer)
573		.map_err(|error| format!("failed to write to the test file: {}", error))?;
574
575	fp.sync_all()
576		.map_err(|error| format!("failed to fsync the test file: {}", error))?;
577
578	// Generate a list of random positions at which we'll issue writes.
579	let mut positions = Vec::with_capacity(SIZE / 4096);
580	{
581		let mut position = 0;
582		while position < SIZE {
583			positions.push(position);
584			position += 4096;
585		}
586	}
587
588	positions.shuffle(&mut rng());
589
590	let run = || {
591		for &position in &positions {
592			fp.seek(SeekFrom::Start(position as u64))
593				.map_err(|error| format!("failed to seek in the test file: {}", error))?;
594
595			// Here we deliberately only write half of the chunk since we don't
596			// want the OS' disk scheduler to coalesce our writes into one single
597			// sequential write.
598			//
599			// Also the chunk's size is deliberately exactly half of a modern disk's
600			// sector size to trigger an RMW cycle.
601			let chunk = &buffer[position..position + 2048];
602			fp.write_all(&chunk)
603				.map_err(|error| format!("failed to write to the test file: {}", error))?;
604		}
605
606		fp.sync_all()
607			.map_err(|error| format!("failed to fsync the test file: {}", error))?;
608
609		Ok(())
610	};
611
612	// We only wrote half of the bytes hence `SIZE / 2`.
613	benchmark(
614		"disk random write score",
615		SIZE / 2,
616		limit.max_iterations(),
617		limit.max_duration(),
618		run,
619	)
620}
621
622/// Benchmarks the verification speed of sr25519 signatures.
623///
624/// Returns the throughput in B/s by convention.
625/// The values are rather small (0.4-0.8) so it is advised to convert them into KB/s.
626pub fn benchmark_sr25519_verify(limit: ExecutionLimit) -> Throughput {
627	const INPUT_SIZE: usize = 32;
628	const ITERATION_SIZE: usize = 2048;
629	let pair = sr25519::Pair::from_string("//Alice", None).unwrap();
630
631	let mut rng = rng();
632	let mut msgs = Vec::new();
633	let mut sigs = Vec::new();
634
635	for _ in 0..ITERATION_SIZE {
636		let mut msg = vec![0u8; INPUT_SIZE];
637		rng.fill_bytes(&mut msg[..]);
638
639		sigs.push(pair.sign(&msg));
640		msgs.push(msg);
641	}
642
643	let run = || -> Result<(), String> {
644		for (sig, msg) in sigs.iter().zip(msgs.iter()) {
645			let mut ok = sr25519_verify(&sig, &msg[..], &pair.public());
646			clobber_value(&mut ok);
647		}
648		Ok(())
649	};
650	benchmark(
651		"sr25519 verification score",
652		INPUT_SIZE * ITERATION_SIZE,
653		limit.max_iterations(),
654		limit.max_duration(),
655		run,
656	)
657	.expect("sr25519 verification cannot fail; qed")
658}
659
660/// Benchmarks the hardware and returns the results of those benchmarks.
661///
662/// Optionally accepts a path to a `scratch_directory` to use to benchmark the
663/// disk. Also accepts the `requirements` for the hardware benchmark and a
664/// boolean to specify if the node is an authority.
665pub fn gather_hwbench(scratch_directory: Option<&Path>, requirements: &Requirements) -> HwBench {
666	let cpu_hashrate_score = benchmark_cpu(DEFAULT_CPU_EXECUTION_LIMIT);
667	let (parallel_cpu_hashrate_score, parallel_cpu_cores) = requirements
668		.0
669		.iter()
670		.filter_map(|req| {
671			if let Metric::Blake2256Parallel { num_cores } = req.metric {
672				Some((benchmark_cpu_parallelism(DEFAULT_CPU_EXECUTION_LIMIT, num_cores), num_cores))
673			} else {
674				None
675			}
676		})
677		.next()
678		.unwrap_or((cpu_hashrate_score, 1));
679	#[allow(unused_mut)]
680	let mut hwbench = HwBench {
681		cpu_hashrate_score,
682		parallel_cpu_hashrate_score,
683		parallel_cpu_cores,
684		memory_memcpy_score: benchmark_memory(DEFAULT_MEMORY_EXECUTION_LIMIT),
685		disk_sequential_write_score: None,
686		disk_random_write_score: None,
687	};
688
689	if let Some(scratch_directory) = scratch_directory {
690		hwbench.disk_sequential_write_score =
691			match benchmark_disk_sequential_writes(DEFAULT_DISK_EXECUTION_LIMIT, scratch_directory)
692			{
693				Ok(score) => Some(score),
694				Err(error) => {
695					log::warn!("Failed to run the sequential write disk benchmark: {}", error);
696					None
697				},
698			};
699
700		hwbench.disk_random_write_score =
701			match benchmark_disk_random_writes(DEFAULT_DISK_EXECUTION_LIMIT, scratch_directory) {
702				Ok(score) => Some(score),
703				Err(error) => {
704					log::warn!("Failed to run the random write disk benchmark: {}", error);
705					None
706				},
707			};
708	}
709
710	hwbench
711}
712
713impl Requirements {
714	/// Whether the hardware requirements are met by the provided benchmark results.
715	pub fn check_hardware(
716		&self,
717		hwbench: &HwBench,
718		is_rc_authority: bool,
719	) -> Result<(), CheckFailures> {
720		let mut failures = Vec::new();
721		for requirement in self.0.iter() {
722			if requirement.validator_only && !is_rc_authority {
723				continue
724			}
725
726			match requirement.metric {
727				Metric::Blake2256 =>
728					if requirement.minimum > hwbench.cpu_hashrate_score {
729						failures.push(CheckFailure {
730							metric: requirement.metric,
731							expected: requirement.minimum,
732							found: hwbench.cpu_hashrate_score,
733						});
734					},
735				Metric::Blake2256Parallel { .. } =>
736					if requirement.minimum > hwbench.parallel_cpu_hashrate_score {
737						failures.push(CheckFailure {
738							metric: requirement.metric,
739							expected: requirement.minimum,
740							found: hwbench.parallel_cpu_hashrate_score,
741						});
742					},
743				Metric::MemCopy =>
744					if requirement.minimum > hwbench.memory_memcpy_score {
745						failures.push(CheckFailure {
746							metric: requirement.metric,
747							expected: requirement.minimum,
748							found: hwbench.memory_memcpy_score,
749						});
750					},
751				Metric::DiskSeqWrite =>
752					if let Some(score) = hwbench.disk_sequential_write_score {
753						if requirement.minimum > score {
754							failures.push(CheckFailure {
755								metric: requirement.metric,
756								expected: requirement.minimum,
757								found: score,
758							});
759						}
760					},
761				Metric::DiskRndWrite =>
762					if let Some(score) = hwbench.disk_random_write_score {
763						if requirement.minimum > score {
764							failures.push(CheckFailure {
765								metric: requirement.metric,
766								expected: requirement.minimum,
767								found: score,
768							});
769						}
770					},
771				Metric::Sr25519Verify => {},
772			}
773		}
774		if failures.is_empty() {
775			Ok(())
776		} else {
777			Err(failures.into())
778		}
779	}
780}
781
782#[cfg(test)]
783mod tests {
784	use super::*;
785	use sp_runtime::assert_eq_error_rate_float;
786
787	#[cfg(target_os = "linux")]
788	#[test]
789	fn test_gather_sysinfo_linux() {
790		let sysinfo = gather_sysinfo();
791		assert!(sysinfo.cpu.unwrap().len() > 0);
792		assert!(sysinfo.core_count.unwrap() > 0);
793		assert!(sysinfo.memory.unwrap() > 0);
794		assert_ne!(sysinfo.is_virtual_machine, None);
795		assert_ne!(sysinfo.linux_kernel, None);
796		assert_ne!(sysinfo.linux_distro, None);
797	}
798
799	#[test]
800	fn test_benchmark_cpu() {
801		assert!(benchmark_cpu(DEFAULT_CPU_EXECUTION_LIMIT) > Throughput::from_mibs(0.0));
802	}
803
804	#[test]
805	fn test_benchmark_parallel_cpu() {
806		assert!(
807			benchmark_cpu_parallelism(DEFAULT_CPU_EXECUTION_LIMIT, 8) > Throughput::from_mibs(0.0)
808		);
809	}
810
811	#[test]
812	fn test_benchmark_memory() {
813		assert!(benchmark_memory(DEFAULT_MEMORY_EXECUTION_LIMIT) > Throughput::from_mibs(0.0));
814	}
815
816	#[test]
817	fn test_benchmark_disk_sequential_writes() {
818		assert!(
819			benchmark_disk_sequential_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() >
820				Throughput::from_mibs(0.0)
821		);
822	}
823
824	#[test]
825	fn test_benchmark_disk_random_writes() {
826		assert!(
827			benchmark_disk_random_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() >
828				Throughput::from_mibs(0.0)
829		);
830	}
831
832	#[test]
833	fn test_benchmark_sr25519_verify() {
834		assert!(
835			benchmark_sr25519_verify(ExecutionLimit::MaxIterations(1)) > Throughput::from_mibs(0.0)
836		);
837	}
838
839	/// Test the [`Throughput`].
840	#[test]
841	fn throughput_works() {
842		/// Float precision.
843		const EPS: f64 = 0.1;
844		let gib = Throughput::from_gibs(14.324);
845
846		assert_eq_error_rate_float!(14.324, gib.as_gibs(), EPS);
847		assert_eq_error_rate_float!(14667.776, gib.as_mibs(), EPS);
848		assert_eq_error_rate_float!(14667.776 * 1024.0, gib.as_kibs(), EPS);
849		assert_eq!("14.32 GiBs", gib.to_string());
850
851		let mib = Throughput::from_mibs(1029.0);
852		assert_eq!("1.00 GiBs", mib.to_string());
853	}
854
855	/// Test the [`HwBench`] serialization.
856	#[test]
857	fn hwbench_serialize_works() {
858		let hwbench = HwBench {
859			cpu_hashrate_score: Throughput::from_gibs(1.32),
860			parallel_cpu_hashrate_score: Throughput::from_gibs(1.32),
861			parallel_cpu_cores: 4,
862			memory_memcpy_score: Throughput::from_kibs(9342.432),
863			disk_sequential_write_score: Some(Throughput::from_kibs(4332.12)),
864			disk_random_write_score: None,
865		};
866
867		let serialized = serde_json::to_string(&hwbench).unwrap();
868		// Throughput from all of the benchmarks should be converted to MiBs.
869		assert_eq!(serialized, "{\"cpu_hashrate_score\":1351,\"parallel_cpu_hashrate_score\":1351,\"parallel_cpu_cores\":4,\"memory_memcpy_score\":9,\"disk_sequential_write_score\":4}");
870	}
871}