referrerpolicy=no-referrer-when-downgrade

polkadot_node_primitives/
lib.rs

1// Copyright (C) Parity Technologies (UK) Ltd.
2// This file is part of Polkadot.
3
4// Polkadot is free software: you can redistribute it and/or modify
5// it under the terms of the GNU General Public License as published by
6// the Free Software Foundation, either version 3 of the License, or
7// (at your option) any later version.
8
9// Polkadot is distributed in the hope that it will be useful,
10// but WITHOUT ANY WARRANTY; without even the implied warranty of
11// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12// GNU General Public License for more details.
13
14// You should have received a copy of the GNU General Public License
15// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
16
17//! Primitive types used on the node-side.
18//!
19//! Unlike the `polkadot-primitives` crate, these primitives are only used on the node-side,
20//! not shared between the node and the runtime. This crate builds on top of the primitives defined
21//! there.
22
23#![deny(missing_docs)]
24
25use std::pin::Pin;
26
27use bounded_vec::BoundedVec;
28use codec::{Decode, Encode, Error as CodecError, Input};
29use futures::Future;
30use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
31
32use polkadot_primitives::{
33	BlakeTwo256, BlockNumber, CandidateCommitments, CandidateHash, ChunkIndex, CollatorPair,
34	CommittedCandidateReceiptError, CommittedCandidateReceiptV2 as CommittedCandidateReceipt,
35	CompactStatement, CoreIndex, EncodeAs, Hash, HashT, HeadData, Id as ParaId,
36	PersistedValidationData, SessionIndex, Signed, UncheckedSigned, ValidationCode,
37	ValidationCodeHash, MAX_CODE_SIZE, MAX_POV_SIZE,
38};
39pub use sp_consensus_babe::{
40	AllowedSlots as BabeAllowedSlots, BabeEpochConfiguration, Epoch as BabeEpoch,
41	Randomness as BabeRandomness,
42};
43
44pub use polkadot_parachain_primitives::primitives::{
45	BlockData, HorizontalMessages, UpwardMessages,
46};
47
48pub mod approval;
49
50/// Disputes related types.
51pub mod disputes;
52pub use disputes::{
53	dispute_is_inactive, CandidateVotes, DisputeMessage, DisputeMessageCheckError, DisputeStatus,
54	InvalidDisputeVote, SignedDisputeStatement, Timestamp, UncheckedDisputeMessage,
55	ValidDisputeVote, ACTIVE_DURATION_SECS,
56};
57
58/// The current node version, which takes the basic SemVer form `<major>.<minor>.<patch>`.
59/// In general, minor should be bumped on every release while major or patch releases are
60/// relatively rare.
61///
62/// The associated worker binaries should use the same version as the node that spawns them.
63pub const NODE_VERSION: &'static str = "1.19.2";
64
65// For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node
66// plus some overhead:
67// header 1 + bitmap 2 + max partial_key 8 + children 16 * (32 + len 1) + value 32 + value len 1
68const MERKLE_NODE_MAX_SIZE: usize = 512 + 100;
69// 16-ary Merkle Prefix Trie for 32-bit ValidatorIndex has depth at most 8.
70const MERKLE_PROOF_MAX_DEPTH: usize = 8;
71
72/// The bomb limit for decompressing code blobs.
73#[deprecated(
74	note = "`VALIDATION_CODE_BOMB_LIMIT` will be removed. Use `validation_code_bomb_limit`
75	runtime API to retrieve the value from the runtime"
76)]
77pub const VALIDATION_CODE_BOMB_LIMIT: usize = (MAX_CODE_SIZE * 4u32) as usize;
78
79/// The bomb limit for decompressing PoV blobs.
80pub const POV_BOMB_LIMIT: usize = (MAX_POV_SIZE * 4u32) as usize;
81
82/// How many blocks after finalization an information about backed/included candidate should be
83/// pre-loaded (when scraping onchain votes) and kept locally (when pruning).
84///
85/// We don't want to remove scraped candidates on finalization because we want to
86/// be sure that disputes will conclude on abandoned forks.
87/// Removing the candidate on finalization creates a possibility for an attacker to
88/// avoid slashing. If a bad fork is abandoned too quickly because another
89/// better one gets finalized the entries for the bad fork will be pruned and we
90/// might never participate in a dispute for it.
91///
92/// Why pre-load finalized blocks? I dispute might be raised against finalized candidate. In most
93/// of the cases it will conclude valid (otherwise we are in big trouble) but never the less the
94/// node must participate. It's possible to see a vote for such dispute onchain before we have it
95/// imported by `dispute-distribution`. In this case we won't have `CandidateReceipt` and the import
96/// will fail unless we keep them preloaded.
97///
98/// This value should consider the timeout we allow for participation in approval-voting. In
99/// particular, the following condition should hold:
100///
101/// slot time * `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` > `APPROVAL_EXECUTION_TIMEOUT`
102/// + slot time
103pub const DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION: BlockNumber = 10;
104
105/// Linked to `MAX_FINALITY_LAG` in relay chain selection,
106/// `MAX_HEADS_LOOK_BACK` in `approval-voting` and
107/// `MAX_BATCH_SCRAPE_ANCESTORS` in `dispute-coordinator`
108pub const MAX_FINALITY_LAG: u32 = 500;
109
110/// Type of a session window size.
111///
112/// We are not using `NonZeroU32` here because `expect` and `unwrap` are not yet const, so global
113/// constants of `SessionWindowSize` would require `LazyLock` in that case.
114///
115/// See: <https://github.com/rust-lang/rust/issues/67441>
116#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
117pub struct SessionWindowSize(SessionIndex);
118
119#[macro_export]
120/// Create a new checked `SessionWindowSize` which cannot be 0.
121macro_rules! new_session_window_size {
122	(0) => {
123		compile_error!("Must be non zero");
124	};
125	(0_u32) => {
126		compile_error!("Must be non zero");
127	};
128	(0 as u32) => {
129		compile_error!("Must be non zero");
130	};
131	(0 as _) => {
132		compile_error!("Must be non zero");
133	};
134	($l:literal) => {
135		SessionWindowSize::unchecked_new($l as _)
136	};
137}
138
139/// It would be nice to draw this from the chain state, but we have no tools for it right now.
140/// On Polkadot this is 1 day, and on Kusama it's 6 hours.
141///
142/// Number of sessions we want to consider in disputes.
143pub const DISPUTE_WINDOW: SessionWindowSize = new_session_window_size!(6);
144
145impl SessionWindowSize {
146	/// Get the value as `SessionIndex` for doing comparisons with those.
147	pub fn get(self) -> SessionIndex {
148		self.0
149	}
150
151	/// Helper function for `new_session_window_size`.
152	///
153	/// Don't use it. The only reason it is public, is because otherwise the
154	/// `new_session_window_size` macro would not work outside of this module.
155	#[doc(hidden)]
156	pub const fn unchecked_new(size: SessionIndex) -> Self {
157		Self(size)
158	}
159}
160
161/// The cumulative weight of a block in a fork-choice rule.
162pub type BlockWeight = u32;
163
164/// A statement, where the candidate receipt is included in the `Seconded` variant.
165///
166/// This is the committed candidate receipt instead of the bare candidate receipt. As such,
167/// it gives access to the commitments to validators who have not executed the candidate. This
168/// is necessary to allow a block-producing validator to include candidates from outside the para
169/// it is assigned to.
170#[derive(Clone, PartialEq, Eq, Encode, Decode)]
171pub enum Statement {
172	/// A statement that a validator seconds a candidate.
173	#[codec(index = 1)]
174	Seconded(CommittedCandidateReceipt),
175	/// A statement that a validator has deemed a candidate valid.
176	#[codec(index = 2)]
177	Valid(CandidateHash),
178}
179
180impl std::fmt::Debug for Statement {
181	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
182		match self {
183			Statement::Seconded(seconded) => write!(f, "Seconded: {:?}", seconded.descriptor),
184			Statement::Valid(hash) => write!(f, "Valid: {:?}", hash),
185		}
186	}
187}
188
189impl Statement {
190	/// Get the candidate hash referenced by this statement.
191	///
192	/// If this is a `Statement::Seconded`, this does hash the candidate receipt, which may be
193	/// expensive for large candidates.
194	pub fn candidate_hash(&self) -> CandidateHash {
195		match *self {
196			Statement::Valid(ref h) => *h,
197			Statement::Seconded(ref c) => c.hash(),
198		}
199	}
200
201	/// Transform this statement into its compact version, which references only the hash
202	/// of the candidate.
203	pub fn to_compact(&self) -> CompactStatement {
204		match *self {
205			Statement::Seconded(ref c) => CompactStatement::Seconded(c.hash()),
206			Statement::Valid(hash) => CompactStatement::Valid(hash),
207		}
208	}
209
210	/// Add the [`PersistedValidationData`] to the statement, if seconded.
211	pub fn supply_pvd(self, pvd: PersistedValidationData) -> StatementWithPVD {
212		match self {
213			Statement::Seconded(c) => StatementWithPVD::Seconded(c, pvd),
214			Statement::Valid(hash) => StatementWithPVD::Valid(hash),
215		}
216	}
217}
218
219impl From<&'_ Statement> for CompactStatement {
220	fn from(stmt: &Statement) -> Self {
221		stmt.to_compact()
222	}
223}
224
225impl EncodeAs<CompactStatement> for Statement {
226	fn encode_as(&self) -> Vec<u8> {
227		self.to_compact().encode()
228	}
229}
230
231/// A statement, exactly the same as [`Statement`] but where seconded messages carry
232/// the [`PersistedValidationData`].
233#[derive(Clone, PartialEq, Eq)]
234pub enum StatementWithPVD {
235	/// A statement that a validator seconds a candidate.
236	Seconded(CommittedCandidateReceipt, PersistedValidationData),
237	/// A statement that a validator has deemed a candidate valid.
238	Valid(CandidateHash),
239}
240
241impl std::fmt::Debug for StatementWithPVD {
242	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
243		match self {
244			StatementWithPVD::Seconded(seconded, _) =>
245				write!(f, "Seconded: {:?}", seconded.descriptor),
246			StatementWithPVD::Valid(hash) => write!(f, "Valid: {:?}", hash),
247		}
248	}
249}
250
251impl StatementWithPVD {
252	/// Get the candidate hash referenced by this statement.
253	///
254	/// If this is a `Statement::Seconded`, this does hash the candidate receipt, which may be
255	/// expensive for large candidates.
256	pub fn candidate_hash(&self) -> CandidateHash {
257		match *self {
258			StatementWithPVD::Valid(ref h) => *h,
259			StatementWithPVD::Seconded(ref c, _) => c.hash(),
260		}
261	}
262
263	/// Transform this statement into its compact version, which references only the hash
264	/// of the candidate.
265	pub fn to_compact(&self) -> CompactStatement {
266		match *self {
267			StatementWithPVD::Seconded(ref c, _) => CompactStatement::Seconded(c.hash()),
268			StatementWithPVD::Valid(hash) => CompactStatement::Valid(hash),
269		}
270	}
271
272	/// Drop the [`PersistedValidationData`] from the statement.
273	pub fn drop_pvd(self) -> Statement {
274		match self {
275			StatementWithPVD::Seconded(c, _) => Statement::Seconded(c),
276			StatementWithPVD::Valid(c_h) => Statement::Valid(c_h),
277		}
278	}
279
280	/// Drop the [`PersistedValidationData`] from the statement in a signed
281	/// variant.
282	pub fn drop_pvd_from_signed(signed: SignedFullStatementWithPVD) -> SignedFullStatement {
283		signed
284			.convert_to_superpayload_with(|s| s.drop_pvd())
285			.expect("persisted_validation_data doesn't affect encode_as; qed")
286	}
287
288	/// Converts the statement to a compact signed statement by dropping the
289	/// [`CommittedCandidateReceipt`] and the [`PersistedValidationData`].
290	pub fn signed_to_compact(signed: SignedFullStatementWithPVD) -> Signed<CompactStatement> {
291		signed
292			.convert_to_superpayload_with(|s| s.to_compact())
293			.expect("doesn't affect encode_as; qed")
294	}
295}
296
297impl From<&'_ StatementWithPVD> for CompactStatement {
298	fn from(stmt: &StatementWithPVD) -> Self {
299		stmt.to_compact()
300	}
301}
302
303impl EncodeAs<CompactStatement> for StatementWithPVD {
304	fn encode_as(&self) -> Vec<u8> {
305		self.to_compact().encode()
306	}
307}
308
309/// A statement, the corresponding signature, and the index of the sender.
310///
311/// Signing context and validator set should be apparent from context.
312///
313/// This statement is "full" in the sense that the `Seconded` variant includes the candidate
314/// receipt. Only the compact `SignedStatement` is suitable for submission to the chain.
315pub type SignedFullStatement = Signed<Statement, CompactStatement>;
316
317/// Variant of `SignedFullStatement` where the signature has not yet been verified.
318pub type UncheckedSignedFullStatement = UncheckedSigned<Statement, CompactStatement>;
319
320/// A statement, the corresponding signature, and the index of the sender.
321///
322/// Seconded statements are accompanied by the [`PersistedValidationData`]
323///
324/// Signing context and validator set should be apparent from context.
325pub type SignedFullStatementWithPVD = Signed<StatementWithPVD, CompactStatement>;
326
327/// Candidate invalidity details
328#[derive(Debug)]
329pub enum InvalidCandidate {
330	/// Failed to execute `validate_block`. This includes function panicking.
331	ExecutionError(String),
332	/// Validation outputs check doesn't pass.
333	InvalidOutputs,
334	/// Execution timeout.
335	Timeout,
336	/// Validation input is over the limit.
337	ParamsTooLarge(u64),
338	/// Code size is over the limit.
339	CodeTooLarge(u64),
340	/// PoV does not decompress correctly.
341	PoVDecompressionFailure,
342	/// Validation function returned invalid data.
343	BadReturn,
344	/// Invalid relay chain parent.
345	BadParent,
346	/// POV hash does not match.
347	PoVHashMismatch,
348	/// Bad collator signature.
349	BadSignature,
350	/// Para head hash does not match.
351	ParaHeadHashMismatch,
352	/// Validation code hash does not match.
353	CodeHashMismatch,
354	/// Validation has generated different candidate commitments.
355	CommitmentsHashMismatch,
356	/// The candidate receipt contains an invalid session index.
357	InvalidSessionIndex,
358	/// The candidate receipt invalid UMP signals.
359	InvalidUMPSignals(CommittedCandidateReceiptError),
360}
361
362/// Result of the validation of the candidate.
363#[derive(Debug)]
364pub enum ValidationResult {
365	/// Candidate is valid. The validation process yields these outputs and the persisted
366	/// validation data used to form inputs.
367	Valid(CandidateCommitments, PersistedValidationData),
368	/// Candidate is invalid.
369	Invalid(InvalidCandidate),
370}
371
372/// A Proof-of-Validity
373#[derive(PartialEq, Eq, Clone, Encode, Decode, Debug)]
374pub struct PoV {
375	/// The block witness data.
376	pub block_data: BlockData,
377}
378
379impl PoV {
380	/// Get the blake2-256 hash of the PoV.
381	pub fn hash(&self) -> Hash {
382		BlakeTwo256::hash_of(self)
383	}
384}
385
386/// A type that represents a maybe compressed [`PoV`].
387#[derive(Clone, Encode, Decode)]
388#[cfg(not(target_os = "unknown"))]
389pub enum MaybeCompressedPoV {
390	/// A raw [`PoV`], aka not compressed.
391	Raw(PoV),
392	/// The given [`PoV`] is already compressed.
393	Compressed(PoV),
394}
395
396#[cfg(not(target_os = "unknown"))]
397impl std::fmt::Debug for MaybeCompressedPoV {
398	fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
399		let (variant, size) = match self {
400			MaybeCompressedPoV::Raw(pov) => ("Raw", pov.block_data.0.len()),
401			MaybeCompressedPoV::Compressed(pov) => ("Compressed", pov.block_data.0.len()),
402		};
403
404		write!(f, "{} PoV ({} bytes)", variant, size)
405	}
406}
407
408#[cfg(not(target_os = "unknown"))]
409impl MaybeCompressedPoV {
410	/// Convert into a compressed [`PoV`].
411	///
412	/// If `self == Raw` it is compressed using [`maybe_compress_pov`].
413	pub fn into_compressed(self) -> PoV {
414		match self {
415			Self::Raw(raw) => maybe_compress_pov(raw),
416			Self::Compressed(compressed) => compressed,
417		}
418	}
419}
420
421/// The output of a collator.
422///
423/// This differs from `CandidateCommitments` in two ways:
424///
425/// - does not contain the erasure root; that's computed at the Polkadot level, not at Cumulus
426/// - contains a proof of validity.
427#[derive(Debug, Clone, Encode, Decode)]
428#[cfg(not(target_os = "unknown"))]
429pub struct Collation<BlockNumber = polkadot_primitives::BlockNumber> {
430	/// Messages destined to be interpreted by the Relay chain itself.
431	pub upward_messages: UpwardMessages,
432	/// The horizontal messages sent by the parachain.
433	pub horizontal_messages: HorizontalMessages,
434	/// New validation code.
435	pub new_validation_code: Option<ValidationCode>,
436	/// The head-data produced as a result of execution.
437	pub head_data: HeadData,
438	/// Proof to verify the state transition of the parachain.
439	pub proof_of_validity: MaybeCompressedPoV,
440	/// The number of messages processed from the DMQ.
441	pub processed_downward_messages: u32,
442	/// The mark which specifies the block number up to which all inbound HRMP messages are
443	/// processed.
444	pub hrmp_watermark: BlockNumber,
445}
446
447/// Signal that is being returned when a collation was seconded by a validator.
448#[derive(Debug)]
449#[cfg(not(target_os = "unknown"))]
450pub struct CollationSecondedSignal {
451	/// The hash of the relay chain block that was used as context to sign [`Self::statement`].
452	pub relay_parent: Hash,
453	/// The statement about seconding the collation.
454	///
455	/// Anything else than [`Statement::Seconded`] is forbidden here.
456	pub statement: SignedFullStatement,
457}
458
459/// Result of the [`CollatorFn`] invocation.
460#[cfg(not(target_os = "unknown"))]
461pub struct CollationResult {
462	/// The collation that was build.
463	pub collation: Collation,
464	/// An optional result sender that should be informed about a successfully seconded collation.
465	///
466	/// There is no guarantee that this sender is informed ever about any result, it is completely
467	/// okay to just drop it. However, if it is called, it should be called with the signed
468	/// statement of a parachain validator seconding the collation.
469	pub result_sender: Option<futures::channel::oneshot::Sender<CollationSecondedSignal>>,
470}
471
472#[cfg(not(target_os = "unknown"))]
473impl CollationResult {
474	/// Convert into the inner values.
475	pub fn into_inner(
476		self,
477	) -> (Collation, Option<futures::channel::oneshot::Sender<CollationSecondedSignal>>) {
478		(self.collation, self.result_sender)
479	}
480}
481
482/// Collation function.
483///
484/// Will be called with the hash of the relay chain block the parachain block should be build on and
485/// the [`PersistedValidationData`] that provides information about the state of the parachain on
486/// the relay chain.
487///
488/// Returns an optional [`CollationResult`].
489#[cfg(not(target_os = "unknown"))]
490pub type CollatorFn = Box<
491	dyn Fn(
492			Hash,
493			&PersistedValidationData,
494		) -> Pin<Box<dyn Future<Output = Option<CollationResult>> + Send>>
495		+ Send
496		+ Sync,
497>;
498
499/// Configuration for the collation generator
500#[cfg(not(target_os = "unknown"))]
501pub struct CollationGenerationConfig {
502	/// Collator's authentication key, so it can sign things.
503	pub key: CollatorPair,
504	/// Collation function. See [`CollatorFn`] for more details.
505	///
506	/// If this is `None`, it implies that collations are intended to be submitted
507	/// out-of-band and not pulled out of the function.
508	pub collator: Option<CollatorFn>,
509	/// The parachain that this collator collates for
510	pub para_id: ParaId,
511}
512
513#[cfg(not(target_os = "unknown"))]
514impl std::fmt::Debug for CollationGenerationConfig {
515	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
516		write!(f, "CollationGenerationConfig {{ ... }}")
517	}
518}
519
520/// Parameters for `CollationGenerationMessage::SubmitCollation`.
521#[derive(Debug)]
522pub struct SubmitCollationParams {
523	/// The relay-parent the collation is built against.
524	pub relay_parent: Hash,
525	/// The collation itself (PoV and commitments)
526	pub collation: Collation,
527	/// The parent block's head-data.
528	pub parent_head: HeadData,
529	/// The hash of the validation code the collation was created against.
530	pub validation_code_hash: ValidationCodeHash,
531	/// An optional result sender that should be informed about a successfully seconded collation.
532	///
533	/// There is no guarantee that this sender is informed ever about any result, it is completely
534	/// okay to just drop it. However, if it is called, it should be called with the signed
535	/// statement of a parachain validator seconding the collation.
536	pub result_sender: Option<futures::channel::oneshot::Sender<CollationSecondedSignal>>,
537	/// The core index on which the resulting candidate should be backed
538	pub core_index: CoreIndex,
539}
540
541/// This is the data we keep available for each candidate included in the relay chain.
542#[derive(Clone, Encode, Decode, PartialEq, Eq, Debug)]
543pub struct AvailableData {
544	/// The Proof-of-Validation of the candidate.
545	pub pov: std::sync::Arc<PoV>,
546	/// The persisted validation data needed for approval checks.
547	pub validation_data: PersistedValidationData,
548}
549
550/// This is a convenience type to allow the Erasure chunk proof to Decode into a nested BoundedVec
551#[derive(PartialEq, Eq, Clone, Debug, Hash)]
552pub struct Proof(BoundedVec<BoundedVec<u8, 1, MERKLE_NODE_MAX_SIZE>, 1, MERKLE_PROOF_MAX_DEPTH>);
553
554impl Proof {
555	/// This function allows to convert back to the standard nested Vec format
556	pub fn iter(&self) -> impl Iterator<Item = &[u8]> {
557		self.0.iter().map(|v| v.as_slice())
558	}
559
560	/// Construct an invalid dummy proof
561	///
562	/// Useful for testing, should absolutely not be used in production.
563	pub fn dummy_proof() -> Proof {
564		Proof(BoundedVec::from_vec(vec![BoundedVec::from_vec(vec![0]).unwrap()]).unwrap())
565	}
566}
567
568/// Possible errors when converting from `Vec<Vec<u8>>` into [`Proof`].
569#[derive(thiserror::Error, Debug)]
570pub enum MerkleProofError {
571	#[error("Merkle max proof depth exceeded {0} > {} .", MERKLE_PROOF_MAX_DEPTH)]
572	/// This error signifies that the Proof length exceeds the trie's max depth
573	MerkleProofDepthExceeded(usize),
574
575	#[error("Merkle node max size exceeded {0} > {} .", MERKLE_NODE_MAX_SIZE)]
576	/// This error signifies that a Proof node exceeds the 16-ary max node size
577	MerkleProofNodeSizeExceeded(usize),
578}
579
580impl TryFrom<Vec<Vec<u8>>> for Proof {
581	type Error = MerkleProofError;
582
583	fn try_from(input: Vec<Vec<u8>>) -> Result<Self, Self::Error> {
584		if input.len() > MERKLE_PROOF_MAX_DEPTH {
585			return Err(Self::Error::MerkleProofDepthExceeded(input.len()))
586		}
587		let mut out = Vec::new();
588		for element in input.into_iter() {
589			let length = element.len();
590			let data: BoundedVec<u8, 1, MERKLE_NODE_MAX_SIZE> = BoundedVec::from_vec(element)
591				.map_err(|_| Self::Error::MerkleProofNodeSizeExceeded(length))?;
592			out.push(data);
593		}
594		Ok(Proof(BoundedVec::from_vec(out).expect("Buffer size is deterined above. qed")))
595	}
596}
597
598impl Decode for Proof {
599	fn decode<I: Input>(value: &mut I) -> Result<Self, CodecError> {
600		let temp: Vec<Vec<u8>> = Decode::decode(value)?;
601		let mut out = Vec::new();
602		for element in temp.into_iter() {
603			let bounded_temp: Result<BoundedVec<u8, 1, MERKLE_NODE_MAX_SIZE>, CodecError> =
604				BoundedVec::from_vec(element)
605					.map_err(|_| "Inner node exceeds maximum node size.".into());
606			out.push(bounded_temp?);
607		}
608		BoundedVec::from_vec(out)
609			.map(Self)
610			.map_err(|_| "Merkle proof depth exceeds maximum trie depth".into())
611	}
612}
613
614impl Encode for Proof {
615	fn size_hint(&self) -> usize {
616		MERKLE_NODE_MAX_SIZE * MERKLE_PROOF_MAX_DEPTH
617	}
618
619	fn using_encoded<R, F: FnOnce(&[u8]) -> R>(&self, f: F) -> R {
620		let temp = self.0.iter().map(|v| v.as_vec()).collect::<Vec<_>>();
621		temp.using_encoded(f)
622	}
623}
624
625impl Serialize for Proof {
626	fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
627	where
628		S: Serializer,
629	{
630		serializer.serialize_bytes(&self.encode())
631	}
632}
633
634impl<'de> Deserialize<'de> for Proof {
635	fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
636	where
637		D: Deserializer<'de>,
638	{
639		// Deserialize the string and get individual components
640		let s = Vec::<u8>::deserialize(deserializer)?;
641		let mut slice = s.as_slice();
642		Decode::decode(&mut slice).map_err(de::Error::custom)
643	}
644}
645
646/// A chunk of erasure-encoded block data.
647#[derive(PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Debug, Hash)]
648pub struct ErasureChunk {
649	/// The erasure-encoded chunk of data belonging to the candidate block.
650	pub chunk: Vec<u8>,
651	/// The index of this erasure-encoded chunk of data.
652	pub index: ChunkIndex,
653	/// Proof for this chunk's branch in the Merkle tree.
654	pub proof: Proof,
655}
656
657impl ErasureChunk {
658	/// Convert bounded Vec Proof to regular `Vec<Vec<u8>>`
659	pub fn proof(&self) -> &Proof {
660		&self.proof
661	}
662}
663
664/// Compress a PoV, unless it exceeds the [`POV_BOMB_LIMIT`].
665#[cfg(not(target_os = "unknown"))]
666pub fn maybe_compress_pov(pov: PoV) -> PoV {
667	let PoV { block_data: BlockData(raw) } = pov;
668	let raw = sp_maybe_compressed_blob::compress(&raw, POV_BOMB_LIMIT).unwrap_or(raw);
669
670	let pov = PoV { block_data: BlockData(raw) };
671	pov
672}