1#[cfg(test)]
118mod tests;
119
120use std::{
121 cmp::{min, Ordering},
122 collections::{
123 hash_map::{Entry, HashMap},
124 BTreeMap, HashSet, VecDeque,
125 },
126 sync::Arc,
127};
128
129use super::LOG_TARGET;
130use polkadot_node_subsystem::messages::Ancestors;
131use polkadot_node_subsystem_util::inclusion_emulator::{
132 self, validate_commitments, ConstraintModifications, Constraints, Fragment,
133 HypotheticalOrConcreteCandidate, ProspectiveCandidate, RelayChainBlockInfo,
134};
135use polkadot_primitives::{
136 BlockNumber, CandidateCommitments, CandidateHash,
137 CommittedCandidateReceiptV2 as CommittedCandidateReceipt, Hash, HeadData,
138 PersistedValidationData, ValidationCodeHash,
139};
140use thiserror::Error;
141
142#[derive(Debug, Clone, PartialEq, Error)]
144pub(crate) enum Error {
145 #[error("Candidate already known")]
146 CandidateAlreadyKnown,
147 #[error("Candidate's parent head is equal to its output head. Would introduce a cycle.")]
148 ZeroLengthCycle,
149 #[error("Candidate would introduce a cycle")]
150 Cycle,
151 #[error("Candidate would introduce two paths to the same output state")]
152 MultiplePaths,
153 #[error("Attempting to directly introduce a Backed candidate. It should first be introduced as Seconded")]
154 IntroduceBackedCandidate,
155 #[error("Relay parent {0:?} of the candidate precedes the relay parent {1:?} of a pending availability candidate")]
156 RelayParentPrecedesCandidatePendingAvailability(Hash, Hash),
157 #[error("Candidate would introduce a fork with a pending availability candidate: {0:?}")]
158 ForkWithCandidatePendingAvailability(CandidateHash),
159 #[error("Fork selection rule favours another candidate: {0:?}")]
160 ForkChoiceRule(CandidateHash),
161 #[error("Could not find parent of the candidate")]
162 ParentCandidateNotFound,
163 #[error("Could not compute candidate constraints: {0:?}")]
164 ComputeConstraints(inclusion_emulator::ModificationError),
165 #[error("Candidate violates constraints: {0:?}")]
166 CheckAgainstConstraints(inclusion_emulator::FragmentValidityError),
167 #[error("Relay parent would move backwards from the latest candidate in the chain")]
168 RelayParentMovedBackwards,
169 #[error(transparent)]
170 CandidateEntry(#[from] CandidateEntryError),
171 #[error("Relay parent {0:?} not in scope. Earliest relay parent allowed {1:?}")]
172 RelayParentNotInScope(Hash, Hash),
173}
174
175fn fork_selection_rule(hash1: &CandidateHash, hash2: &CandidateHash) -> Ordering {
179 hash1.cmp(hash2)
180}
181
182#[derive(Clone, Default)]
186pub(crate) struct CandidateStorage {
187 by_parent_head: HashMap<Hash, HashSet<CandidateHash>>,
191
192 by_output_head: HashMap<Hash, HashSet<CandidateHash>>,
195
196 by_candidate_hash: HashMap<CandidateHash, CandidateEntry>,
198}
199
200impl CandidateStorage {
201 pub fn add_pending_availability_candidate(
203 &mut self,
204 candidate_hash: CandidateHash,
205 candidate: CommittedCandidateReceipt,
206 persisted_validation_data: PersistedValidationData,
207 ) -> Result<(), Error> {
208 let entry = CandidateEntry::new(
209 candidate_hash,
210 candidate,
211 persisted_validation_data,
212 CandidateState::Backed,
213 )?;
214
215 self.add_candidate_entry(entry)
216 }
217
218 pub fn len(&self) -> usize {
220 self.by_candidate_hash.len()
221 }
222
223 fn add_candidate_entry(&mut self, candidate: CandidateEntry) -> Result<(), Error> {
225 let candidate_hash = candidate.candidate_hash;
226 if self.by_candidate_hash.contains_key(&candidate_hash) {
227 return Err(Error::CandidateAlreadyKnown)
228 }
229
230 self.by_parent_head
231 .entry(candidate.parent_head_data_hash)
232 .or_default()
233 .insert(candidate_hash);
234 self.by_output_head
235 .entry(candidate.output_head_data_hash)
236 .or_default()
237 .insert(candidate_hash);
238 self.by_candidate_hash.insert(candidate_hash, candidate);
239
240 Ok(())
241 }
242
243 fn remove_candidate(&mut self, candidate_hash: &CandidateHash) {
245 if let Some(entry) = self.by_candidate_hash.remove(candidate_hash) {
246 if let Entry::Occupied(mut e) = self.by_parent_head.entry(entry.parent_head_data_hash) {
247 e.get_mut().remove(&candidate_hash);
248 if e.get().is_empty() {
249 e.remove();
250 }
251 }
252
253 if let Entry::Occupied(mut e) = self.by_output_head.entry(entry.output_head_data_hash) {
254 e.get_mut().remove(&candidate_hash);
255 if e.get().is_empty() {
256 e.remove();
257 }
258 }
259 }
260 }
261
262 fn mark_backed(&mut self, candidate_hash: &CandidateHash) {
264 if let Some(entry) = self.by_candidate_hash.get_mut(candidate_hash) {
265 gum::trace!(target: LOG_TARGET, ?candidate_hash, "Candidate marked as backed");
266 entry.state = CandidateState::Backed;
267 } else {
268 gum::trace!(target: LOG_TARGET, ?candidate_hash, "Candidate not found while marking as backed");
269 }
270 }
271
272 fn contains(&self, candidate_hash: &CandidateHash) -> bool {
274 self.by_candidate_hash.contains_key(candidate_hash)
275 }
276
277 fn candidates(&self) -> impl Iterator<Item = &CandidateEntry> {
279 self.by_candidate_hash.values()
280 }
281
282 fn head_data_by_hash(&self, hash: &Hash) -> Option<&HeadData> {
284 self.by_output_head
290 .get(hash)
291 .and_then(|m| m.iter().next())
292 .and_then(|a_candidate| self.by_candidate_hash.get(a_candidate))
293 .map(|e| &e.candidate.commitments.head_data)
294 .or_else(|| {
295 self.by_parent_head
296 .get(hash)
297 .and_then(|m| m.iter().next())
298 .and_then(|a_candidate| self.by_candidate_hash.get(a_candidate))
299 .map(|e| &e.candidate.persisted_validation_data.parent_head)
300 })
301 }
302
303 fn possible_backed_para_children<'a>(
305 &'a self,
306 parent_head_hash: &'a Hash,
307 ) -> impl Iterator<Item = &'a CandidateEntry> + 'a {
308 let by_candidate_hash = &self.by_candidate_hash;
309 self.by_parent_head
310 .get(parent_head_hash)
311 .into_iter()
312 .flat_map(|hashes| hashes.iter())
313 .filter_map(move |h| {
314 by_candidate_hash.get(h).and_then(|candidate| {
315 (candidate.state == CandidateState::Backed).then_some(candidate)
316 })
317 })
318 }
319}
320
321#[derive(Debug, PartialEq, Clone)]
325enum CandidateState {
326 Seconded,
328 Backed,
330}
331
332#[derive(Debug, Clone, PartialEq, Error)]
333pub enum CandidateEntryError {
335 #[error("Candidate does not match the persisted validation data provided alongside it")]
336 PersistedValidationDataMismatch,
337 #[error("Candidate's parent head is equal to its output head. Would introduce a cycle")]
338 ZeroLengthCycle,
339}
340
341#[derive(Debug, Clone)]
342pub(crate) struct CandidateEntry {
344 candidate_hash: CandidateHash,
345 parent_head_data_hash: Hash,
346 output_head_data_hash: Hash,
347 relay_parent: Hash,
348 candidate: Arc<ProspectiveCandidate>,
349 state: CandidateState,
350}
351
352impl CandidateEntry {
353 pub fn new_seconded(
355 candidate_hash: CandidateHash,
356 candidate: CommittedCandidateReceipt,
357 persisted_validation_data: PersistedValidationData,
358 ) -> Result<Self, CandidateEntryError> {
359 Self::new(candidate_hash, candidate, persisted_validation_data, CandidateState::Seconded)
360 }
361
362 pub fn hash(&self) -> CandidateHash {
363 self.candidate_hash
364 }
365
366 fn new(
367 candidate_hash: CandidateHash,
368 candidate: CommittedCandidateReceipt,
369 persisted_validation_data: PersistedValidationData,
370 state: CandidateState,
371 ) -> Result<Self, CandidateEntryError> {
372 if persisted_validation_data.hash() != candidate.descriptor.persisted_validation_data_hash()
373 {
374 return Err(CandidateEntryError::PersistedValidationDataMismatch)
375 }
376
377 let parent_head_data_hash = persisted_validation_data.parent_head.hash();
378 let output_head_data_hash = candidate.commitments.head_data.hash();
379
380 if parent_head_data_hash == output_head_data_hash {
381 return Err(CandidateEntryError::ZeroLengthCycle)
382 }
383
384 Ok(Self {
385 candidate_hash,
386 parent_head_data_hash,
387 output_head_data_hash,
388 relay_parent: candidate.descriptor.relay_parent(),
389 state,
390 candidate: Arc::new(ProspectiveCandidate {
391 commitments: candidate.commitments,
392 persisted_validation_data,
393 pov_hash: candidate.descriptor.pov_hash(),
394 validation_code_hash: candidate.descriptor.validation_code_hash(),
395 }),
396 })
397 }
398}
399
400impl HypotheticalOrConcreteCandidate for CandidateEntry {
401 fn commitments(&self) -> Option<&CandidateCommitments> {
402 Some(&self.candidate.commitments)
403 }
404
405 fn persisted_validation_data(&self) -> Option<&PersistedValidationData> {
406 Some(&self.candidate.persisted_validation_data)
407 }
408
409 fn validation_code_hash(&self) -> Option<ValidationCodeHash> {
410 Some(self.candidate.validation_code_hash)
411 }
412
413 fn parent_head_data_hash(&self) -> Hash {
414 self.parent_head_data_hash
415 }
416
417 fn output_head_data_hash(&self) -> Option<Hash> {
418 Some(self.output_head_data_hash)
419 }
420
421 fn relay_parent(&self) -> Hash {
422 self.relay_parent
423 }
424
425 fn candidate_hash(&self) -> CandidateHash {
426 self.candidate_hash
427 }
428}
429
430#[derive(Debug, Clone)]
433pub(crate) struct PendingAvailability {
434 pub candidate_hash: CandidateHash,
436 pub relay_parent: RelayChainBlockInfo,
438}
439
440#[derive(Debug, Clone)]
442pub(crate) struct Scope {
443 relay_parent: RelayChainBlockInfo,
445 ancestors: BTreeMap<BlockNumber, RelayChainBlockInfo>,
447 ancestors_by_hash: HashMap<Hash, RelayChainBlockInfo>,
449 pending_availability: Vec<PendingAvailability>,
451 base_constraints: Constraints,
453 max_backable_len: usize,
455}
456
457#[derive(Debug)]
460pub(crate) struct UnexpectedAncestor {
461 #[allow(dead_code)]
464 pub number: BlockNumber,
465 #[allow(dead_code)]
468 pub prev: BlockNumber,
469}
470
471impl Scope {
472 pub fn with_ancestors(
490 relay_parent: RelayChainBlockInfo,
491 base_constraints: Constraints,
492 pending_availability: Vec<PendingAvailability>,
493 max_backable_len: usize,
494 ancestors: impl IntoIterator<Item = RelayChainBlockInfo>,
495 ) -> Result<Self, UnexpectedAncestor> {
496 let mut ancestors_map = BTreeMap::new();
497 let mut ancestors_by_hash = HashMap::new();
498 {
499 let mut prev = relay_parent.number;
500 for ancestor in ancestors {
501 if prev == 0 {
502 return Err(UnexpectedAncestor { number: ancestor.number, prev })
503 } else if ancestor.number != prev - 1 {
504 return Err(UnexpectedAncestor { number: ancestor.number, prev })
505 } else if prev == base_constraints.min_relay_parent_number {
506 break
507 } else {
508 prev = ancestor.number;
509 ancestors_by_hash.insert(ancestor.hash, ancestor.clone());
510 ancestors_map.insert(ancestor.number, ancestor);
511 }
512 }
513 }
514
515 Ok(Scope {
516 relay_parent,
517 base_constraints,
518 max_backable_len: max_backable_len + pending_availability.len(),
519 pending_availability,
520 ancestors: ancestors_map,
521 ancestors_by_hash,
522 })
523 }
524
525 pub fn earliest_relay_parent(&self) -> RelayChainBlockInfo {
527 self.ancestors
528 .iter()
529 .next()
530 .map(|(_, v)| v.clone())
531 .unwrap_or_else(|| self.relay_parent.clone())
532 }
533
534 pub fn ancestor(&self, hash: &Hash) -> Option<RelayChainBlockInfo> {
536 if hash == &self.relay_parent.hash {
537 return Some(self.relay_parent.clone())
538 }
539
540 self.ancestors_by_hash.get(hash).map(|info| info.clone())
541 }
542
543 pub fn base_constraints(&self) -> &Constraints {
545 &self.base_constraints
546 }
547
548 fn get_pending_availability(
550 &self,
551 candidate_hash: &CandidateHash,
552 ) -> Option<&PendingAvailability> {
553 self.pending_availability.iter().find(|c| &c.candidate_hash == candidate_hash)
554 }
555}
556
557#[cfg_attr(test, derive(Clone))]
558struct FragmentNode {
561 fragment: Fragment,
562 candidate_hash: CandidateHash,
563 cumulative_modifications: ConstraintModifications,
564 parent_head_data_hash: Hash,
565 output_head_data_hash: Hash,
566}
567
568impl FragmentNode {
569 fn relay_parent(&self) -> Hash {
570 self.fragment.relay_parent().hash
571 }
572}
573
574impl From<&FragmentNode> for CandidateEntry {
575 fn from(node: &FragmentNode) -> Self {
576 Self {
579 candidate_hash: node.candidate_hash,
580 parent_head_data_hash: node.parent_head_data_hash,
581 output_head_data_hash: node.output_head_data_hash,
582 candidate: node.fragment.candidate_clone(),
583 relay_parent: node.relay_parent(),
584 state: CandidateState::Backed,
586 }
587 }
588}
589
590#[derive(Default)]
593#[cfg_attr(test, derive(Clone))]
594struct BackedChain {
595 chain: Vec<FragmentNode>,
597 by_parent_head: HashMap<Hash, CandidateHash>,
600 by_output_head: HashMap<Hash, CandidateHash>,
603 candidates: HashSet<CandidateHash>,
605}
606
607impl BackedChain {
608 fn push(&mut self, candidate: FragmentNode) {
609 self.candidates.insert(candidate.candidate_hash);
610 self.by_parent_head
611 .insert(candidate.parent_head_data_hash, candidate.candidate_hash);
612 self.by_output_head
613 .insert(candidate.output_head_data_hash, candidate.candidate_hash);
614 self.chain.push(candidate);
615 }
616
617 fn clear(&mut self) -> Vec<FragmentNode> {
618 self.by_parent_head.clear();
619 self.by_output_head.clear();
620 self.candidates.clear();
621
622 std::mem::take(&mut self.chain)
623 }
624
625 fn revert_to_parent_hash<'a>(
626 &'a mut self,
627 parent_head_data_hash: &Hash,
628 ) -> impl Iterator<Item = FragmentNode> + 'a {
629 let mut found_index = None;
630 for index in 0..self.chain.len() {
631 let node = &self.chain[index];
632
633 if found_index.is_some() {
634 self.by_parent_head.remove(&node.parent_head_data_hash);
635 self.by_output_head.remove(&node.output_head_data_hash);
636 self.candidates.remove(&node.candidate_hash);
637 } else if &node.output_head_data_hash == parent_head_data_hash {
638 found_index = Some(index);
639 }
640 }
641
642 if let Some(index) = found_index {
643 self.chain.drain(min(index + 1, self.chain.len())..)
644 } else {
645 self.chain.drain(0..0)
647 }
648 }
649
650 fn contains(&self, hash: &CandidateHash) -> bool {
651 self.candidates.contains(hash)
652 }
653}
654
655#[cfg_attr(test, derive(Clone))]
661pub(crate) struct FragmentChain {
662 scope: Scope,
665
666 best_chain: BackedChain,
670
671 unconnected: CandidateStorage,
675}
676
677impl FragmentChain {
678 pub fn init(scope: Scope, mut candidates_pending_availability: CandidateStorage) -> Self {
681 let mut fragment_chain = Self {
682 scope,
683 best_chain: BackedChain::default(),
684 unconnected: CandidateStorage::default(),
685 };
686
687 fragment_chain.populate_chain(&mut candidates_pending_availability);
690
691 fragment_chain
692 }
693
694 pub fn populate_from_previous(&mut self, prev_fragment_chain: &FragmentChain) {
697 let mut prev_storage = prev_fragment_chain.unconnected.clone();
698
699 for candidate in prev_fragment_chain.best_chain.chain.iter() {
700 if prev_fragment_chain
709 .scope
710 .get_pending_availability(&candidate.candidate_hash)
711 .is_none()
712 {
713 let _ = prev_storage.add_candidate_entry(candidate.into());
714 }
715 }
716
717 self.populate_chain(&mut prev_storage);
719
720 self.trim_uneligible_forks(&mut prev_storage, None);
723
724 self.populate_unconnected_potential_candidates(prev_storage);
726 }
727
728 pub fn scope(&self) -> &Scope {
730 &self.scope
731 }
732
733 pub fn best_chain_len(&self) -> usize {
735 self.best_chain.chain.len()
736 }
737
738 pub fn unconnected_len(&self) -> usize {
740 self.unconnected.len()
741 }
742
743 pub fn contains_unconnected_candidate(&self, candidate: &CandidateHash) -> bool {
745 self.unconnected.contains(candidate)
746 }
747
748 pub fn best_chain_vec(&self) -> Vec<CandidateHash> {
750 self.best_chain.chain.iter().map(|candidate| candidate.candidate_hash).collect()
751 }
752
753 pub fn unconnected(&self) -> impl Iterator<Item = &CandidateEntry> {
755 self.unconnected.candidates()
756 }
757
758 pub fn is_candidate_backed(&self, hash: &CandidateHash) -> bool {
760 self.best_chain.candidates.contains(hash) ||
761 matches!(
762 self.unconnected.by_candidate_hash.get(hash),
763 Some(candidate) if candidate.state == CandidateState::Backed
764 )
765 }
766
767 pub fn candidate_backed(&mut self, newly_backed_candidate: &CandidateHash) {
769 if self.best_chain.candidates.contains(newly_backed_candidate) {
771 return
772 }
773 let Some(parent_head_hash) = self
774 .unconnected
775 .by_candidate_hash
776 .get(newly_backed_candidate)
777 .map(|entry| entry.parent_head_data_hash)
778 else {
779 return
781 };
782
783 self.unconnected.mark_backed(newly_backed_candidate);
785
786 if !self.revert_to(&parent_head_hash) {
788 return
790 }
791
792 let mut prev_storage = std::mem::take(&mut self.unconnected);
793
794 self.populate_chain(&mut prev_storage);
796
797 self.trim_uneligible_forks(&mut prev_storage, Some(parent_head_hash));
801
802 self.populate_unconnected_potential_candidates(prev_storage);
804 }
805
806 pub fn can_add_candidate_as_potential(
810 &self,
811 candidate: &impl HypotheticalOrConcreteCandidate,
812 ) -> Result<(), Error> {
813 let candidate_hash = candidate.candidate_hash();
814
815 if self.best_chain.contains(&candidate_hash) || self.unconnected.contains(&candidate_hash) {
816 return Err(Error::CandidateAlreadyKnown)
817 }
818
819 self.check_potential(candidate)
820 }
821
822 pub fn try_adding_seconded_candidate(
825 &mut self,
826 candidate: &CandidateEntry,
827 ) -> Result<(), Error> {
828 if candidate.state == CandidateState::Backed {
829 return Err(Error::IntroduceBackedCandidate);
830 }
831
832 self.can_add_candidate_as_potential(candidate)?;
833
834 self.unconnected.add_candidate_entry(candidate.clone())?;
837
838 Ok(())
839 }
840
841 pub fn get_head_data_by_hash(&self, head_data_hash: &Hash) -> Option<HeadData> {
843 let required_parent = &self.scope.base_constraints().required_parent;
845 if &required_parent.hash() == head_data_hash {
846 return Some(required_parent.clone())
847 }
848
849 let has_head_data_in_chain = self
851 .best_chain
852 .by_parent_head
853 .get(head_data_hash)
854 .or_else(|| self.best_chain.by_output_head.get(head_data_hash))
855 .is_some();
856
857 if has_head_data_in_chain {
858 return self.best_chain.chain.iter().find_map(|candidate| {
859 if &candidate.parent_head_data_hash == head_data_hash {
860 Some(
861 candidate
862 .fragment
863 .candidate()
864 .persisted_validation_data
865 .parent_head
866 .clone(),
867 )
868 } else if &candidate.output_head_data_hash == head_data_hash {
869 Some(candidate.fragment.candidate().commitments.head_data.clone())
870 } else {
871 None
872 }
873 });
874 }
875
876 self.unconnected.head_data_by_hash(head_data_hash).cloned()
878 }
879
880 pub fn find_backable_chain(
886 &self,
887 ancestors: Ancestors,
888 count: u32,
889 ) -> Vec<(CandidateHash, Hash)> {
890 if count == 0 {
891 return vec![]
892 }
893 let base_pos = self.find_ancestor_path(ancestors);
894
895 let actual_end_index =
896 std::cmp::min(base_pos + (count as usize), self.best_chain.chain.len());
897 let mut res = Vec::with_capacity(actual_end_index - base_pos);
898
899 for elem in &self.best_chain.chain[base_pos..actual_end_index] {
900 if self.scope.get_pending_availability(&elem.candidate_hash).is_none() {
903 res.push((elem.candidate_hash, elem.relay_parent()));
904 } else {
905 break
906 }
907 }
908
909 res
910 }
911
912 fn find_ancestor_path(&self, mut ancestors: Ancestors) -> usize {
916 if self.best_chain.chain.is_empty() {
917 return 0;
918 }
919
920 for (index, candidate) in self.best_chain.chain.iter().enumerate() {
921 if !ancestors.remove(&candidate.candidate_hash) {
922 return index
923 }
924 }
925
926 self.best_chain.chain.len()
929 }
930
931 fn earliest_relay_parent(&self) -> Option<RelayChainBlockInfo> {
937 if let Some(last_candidate) = self.best_chain.chain.last() {
938 self.scope.ancestor(&last_candidate.relay_parent()).or_else(|| {
939 self.scope
942 .get_pending_availability(&last_candidate.candidate_hash)
943 .map(|c| c.relay_parent.clone())
944 })
945 } else {
946 Some(self.scope.earliest_relay_parent())
947 }
948 }
949
950 fn earliest_relay_parent_pending_availability(&self) -> RelayChainBlockInfo {
954 self.best_chain
955 .chain
956 .iter()
957 .rev()
958 .find_map(|candidate| {
959 self.scope
960 .get_pending_availability(&candidate.candidate_hash)
961 .map(|c| c.relay_parent.clone())
962 })
963 .unwrap_or_else(|| self.scope.earliest_relay_parent())
964 }
965
966 fn populate_unconnected_potential_candidates(&mut self, old_storage: CandidateStorage) {
968 for candidate in old_storage.by_candidate_hash.into_values() {
969 if self.scope.get_pending_availability(&candidate.candidate_hash).is_some() {
972 continue
973 }
974
975 match self.can_add_candidate_as_potential(&candidate) {
976 Ok(()) => {
977 let _ = self.unconnected.add_candidate_entry(candidate);
978 },
979 Err(_) => {},
982 };
983 }
984 }
985
986 fn check_cycles_or_invalid_tree(&self, output_head_hash: &Hash) -> Result<(), Error> {
989 if self.best_chain.by_parent_head.contains_key(output_head_hash) {
992 return Err(Error::Cycle)
993 }
994
995 if self.best_chain.by_output_head.contains_key(output_head_hash) {
997 return Err(Error::MultiplePaths)
998 }
999
1000 Ok(())
1001 }
1002
1003 fn check_potential(
1007 &self,
1008 candidate: &impl HypotheticalOrConcreteCandidate,
1009 ) -> Result<(), Error> {
1010 let relay_parent = candidate.relay_parent();
1011 let parent_head_hash = candidate.parent_head_data_hash();
1012
1013 if let Some(output_head_hash) = candidate.output_head_data_hash() {
1015 if parent_head_hash == output_head_hash {
1016 return Err(Error::ZeroLengthCycle)
1017 }
1018 }
1019
1020 let Some(relay_parent) = self.scope.ancestor(&relay_parent) else {
1022 return Err(Error::RelayParentNotInScope(
1023 relay_parent,
1024 self.scope.earliest_relay_parent().hash,
1025 ))
1026 };
1027
1028 let earliest_rp_of_pending_availability = self.earliest_relay_parent_pending_availability();
1030 if relay_parent.number < earliest_rp_of_pending_availability.number {
1031 return Err(Error::RelayParentPrecedesCandidatePendingAvailability(
1032 relay_parent.hash,
1033 earliest_rp_of_pending_availability.hash,
1034 ))
1035 }
1036
1037 if let Some(other_candidate) = self.best_chain.by_parent_head.get(&parent_head_hash) {
1039 if self.scope().get_pending_availability(other_candidate).is_some() {
1040 return Err(Error::ForkWithCandidatePendingAvailability(*other_candidate))
1042 }
1043
1044 if fork_selection_rule(other_candidate, &candidate.candidate_hash()) == Ordering::Less {
1047 return Err(Error::ForkChoiceRule(*other_candidate))
1048 }
1049 }
1050
1051 let (is_unconnected, constraints, maybe_min_relay_parent_number) =
1054 if let Some(parent_candidate) = self.best_chain.by_output_head.get(&parent_head_hash) {
1055 let Some(parent_candidate) =
1056 self.best_chain.chain.iter().find(|c| &c.candidate_hash == parent_candidate)
1057 else {
1058 return Err(Error::ParentCandidateNotFound)
1060 };
1061
1062 (
1063 false,
1064 self.scope
1065 .base_constraints
1066 .apply_modifications(&parent_candidate.cumulative_modifications)
1067 .map_err(Error::ComputeConstraints)?,
1068 self.scope.ancestor(&parent_candidate.relay_parent()).map(|rp| rp.number),
1069 )
1070 } else if self.scope.base_constraints.required_parent.hash() == parent_head_hash {
1071 (false, self.scope.base_constraints.clone(), None)
1073 } else {
1074 (true, self.scope.base_constraints.clone(), None)
1076 };
1077
1078 if let Some(ref output_head_hash) = candidate.output_head_data_hash() {
1080 self.check_cycles_or_invalid_tree(output_head_hash)?;
1081 }
1082
1083 if let (Some(commitments), Some(pvd), Some(validation_code_hash)) = (
1085 candidate.commitments(),
1086 candidate.persisted_validation_data(),
1087 candidate.validation_code_hash(),
1088 ) {
1089 if is_unconnected {
1090 return validate_commitments(
1093 &self.scope.base_constraints,
1094 &relay_parent,
1095 commitments,
1096 &validation_code_hash,
1097 )
1098 .map_err(Error::CheckAgainstConstraints)
1099 }
1100 Fragment::check_against_constraints(
1101 &relay_parent,
1102 &constraints,
1103 commitments,
1104 &validation_code_hash,
1105 pvd,
1106 )
1107 .map_err(Error::CheckAgainstConstraints)?;
1108 }
1109
1110 if relay_parent.number < constraints.min_relay_parent_number {
1111 return Err(Error::RelayParentMovedBackwards)
1112 }
1113
1114 if let Some(earliest_rp) = maybe_min_relay_parent_number {
1115 if relay_parent.number < earliest_rp {
1116 return Err(Error::RelayParentMovedBackwards)
1117 }
1118 }
1119
1120 Ok(())
1121 }
1122
1123 fn trim_uneligible_forks(&self, storage: &mut CandidateStorage, starting_point: Option<Hash>) {
1128 let mut queue: VecDeque<_> = if let Some(starting_point) = starting_point {
1130 [(starting_point, true)].into_iter().collect()
1131 } else {
1132 if self.best_chain.chain.is_empty() {
1133 [(self.scope.base_constraints.required_parent.hash(), true)]
1134 .into_iter()
1135 .collect()
1136 } else {
1137 self.best_chain.chain.iter().map(|c| (c.parent_head_data_hash, true)).collect()
1138 }
1139 };
1140 let mut visited = HashSet::new();
1143
1144 while let Some((parent, parent_has_potential)) = queue.pop_front() {
1145 visited.insert(parent);
1146
1147 let Some(children) = storage.by_parent_head.get(&parent) else { continue };
1148 let mut to_remove = vec![];
1150
1151 for child_hash in children.iter() {
1152 let Some(child) = storage.by_candidate_hash.get(child_hash) else { continue };
1153
1154 if visited.contains(&child.output_head_data_hash) {
1157 continue
1158 }
1159
1160 if parent_has_potential && self.check_potential(child).is_ok() {
1163 queue.push_back((child.output_head_data_hash, true));
1164 } else {
1165 to_remove.push(*child_hash);
1169 queue.push_back((child.output_head_data_hash, false));
1170 }
1171 }
1172
1173 for hash in to_remove {
1174 storage.remove_candidate(&hash);
1175 }
1176 }
1177 }
1178
1179 fn populate_chain(&mut self, storage: &mut CandidateStorage) {
1184 let mut cumulative_modifications =
1185 if let Some(last_candidate) = self.best_chain.chain.last() {
1186 last_candidate.cumulative_modifications.clone()
1187 } else {
1188 ConstraintModifications::identity()
1189 };
1190 let Some(mut earliest_rp) = self.earliest_relay_parent() else { return };
1191
1192 loop {
1193 if self.best_chain.chain.len() >= self.scope.max_backable_len {
1194 break;
1195 }
1196
1197 let child_constraints =
1198 match self.scope.base_constraints.apply_modifications(&cumulative_modifications) {
1199 Err(e) => {
1200 gum::debug!(
1201 target: LOG_TARGET,
1202 new_parent_head = ?cumulative_modifications.required_parent,
1203 err = ?e,
1204 "Failed to apply modifications",
1205 );
1206
1207 break
1208 },
1209 Ok(c) => c,
1210 };
1211
1212 let required_head_hash = child_constraints.required_parent.hash();
1213
1214 let possible_children = storage
1217 .possible_backed_para_children(&required_head_hash)
1218 .filter_map(|candidate| {
1219 let pending = self.scope.get_pending_availability(&candidate.candidate_hash);
1227 let Some(relay_parent) = pending
1228 .map(|p| p.relay_parent.clone())
1229 .or_else(|| self.scope.ancestor(&candidate.relay_parent))
1230 else {
1231 return None
1232 };
1233
1234 if self.check_cycles_or_invalid_tree(&candidate.output_head_data_hash).is_err()
1235 {
1236 return None
1237 }
1238
1239 let min_relay_parent_number = pending
1246 .map(|p| match self.best_chain.chain.len() {
1247 0 => p.relay_parent.number,
1248 _ => earliest_rp.number,
1249 })
1250 .unwrap_or_else(|| earliest_rp.number);
1251
1252 if relay_parent.number < min_relay_parent_number {
1253 return None }
1255
1256 if self.best_chain.contains(&candidate.candidate_hash) {
1260 return None
1261 }
1262
1263 let fragment = {
1264 let mut constraints = child_constraints.clone();
1265 if let Some(ref p) = pending {
1266 constraints.min_relay_parent_number = p.relay_parent.number;
1268 }
1269
1270 let f = Fragment::new(
1271 relay_parent.clone(),
1272 constraints,
1273 candidate.candidate.clone(),
1275 );
1276
1277 match f {
1278 Ok(f) => f,
1279 Err(e) => {
1280 gum::debug!(
1281 target: LOG_TARGET,
1282 err = ?e,
1283 ?relay_parent,
1284 candidate_hash = ?candidate.candidate_hash,
1285 "Failed to instantiate fragment",
1286 );
1287
1288 return None
1289 },
1290 }
1291 };
1292
1293 Some((
1294 fragment,
1295 candidate.candidate_hash,
1296 candidate.output_head_data_hash,
1297 candidate.parent_head_data_hash,
1298 ))
1299 });
1300
1301 let best_candidate =
1303 possible_children.min_by(|(_, ref child1, _, _), (_, ref child2, _, _)| {
1304 if self.scope.get_pending_availability(child1).is_some() {
1306 Ordering::Less
1307 } else if self.scope.get_pending_availability(child2).is_some() {
1308 Ordering::Greater
1309 } else {
1310 fork_selection_rule(child1, child2)
1312 }
1313 });
1314
1315 if let Some((fragment, candidate_hash, output_head_data_hash, parent_head_data_hash)) =
1316 best_candidate
1317 {
1318 storage.remove_candidate(&candidate_hash);
1320
1321 cumulative_modifications.stack(fragment.constraint_modifications());
1323 earliest_rp = fragment.relay_parent().clone();
1325
1326 let node = FragmentNode {
1327 fragment,
1328 candidate_hash,
1329 parent_head_data_hash,
1330 output_head_data_hash,
1331 cumulative_modifications: cumulative_modifications.clone(),
1332 };
1333
1334 self.best_chain.push(node);
1336 } else {
1337 break
1338 }
1339 }
1340 }
1341
1342 fn revert_to(&mut self, parent_head_hash: &Hash) -> bool {
1347 let mut removed_items = None;
1348 if &self.scope.base_constraints.required_parent.hash() == parent_head_hash {
1349 removed_items = Some(self.best_chain.clear());
1350 }
1351
1352 if removed_items.is_none() && self.best_chain.by_output_head.contains_key(parent_head_hash)
1353 {
1354 removed_items = Some(self.best_chain.revert_to_parent_hash(parent_head_hash).collect());
1355 }
1356
1357 let Some(removed_items) = removed_items else { return false };
1358
1359 for node in &removed_items {
1362 let _ = self.unconnected.add_candidate_entry(node.into());
1363 }
1364 true
1365 }
1366}