referrerpolicy=no-referrer-when-downgrade

pallet_election_provider_multi_block/unsigned/
miner.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! The miner code for the EPMB pallet.
19//!
20//! It is broadly consisted of two main types:
21//!
22//! * [`crate::unsigned::miner::BaseMiner`], which is more generic, needs parameterization via
23//!   [`crate::unsigned::miner::MinerConfig`], and can be used by an external implementation.
24//! * [`crate::unsigned::miner::OffchainWorkerMiner`], which is more opinionated, and is used by
25//!   this pallet via the `offchain_worker` hook to also mine solutions during the
26//!   `Phase::Unsigned`.
27
28use super::{Call, Config, Pallet};
29use crate::{
30	helpers,
31	types::{PadSolutionPages, *},
32	verifier::{self},
33	CommonError,
34};
35use codec::Encode;
36use frame_election_provider_support::{ExtendedBalance, NposSolver, Support, VoteWeight};
37use frame_support::{traits::Get, BoundedVec};
38use frame_system::pallet_prelude::*;
39use scale_info::TypeInfo;
40use sp_npos_elections::EvaluateSupport;
41use sp_runtime::{
42	offchain::storage::{MutateStorageError, StorageValueRef},
43	traits::{SaturatedConversion, Saturating, Zero},
44};
45use sp_std::{collections::btree_map::BTreeMap, prelude::*};
46
47// TODO: we should have a fuzzer for miner that ensures no matter the parameters, it generates a
48// valid solution. Esp. for the trimming.
49
50/// The type of the snapshot.
51///
52/// Used to express errors.
53#[derive(Debug, Eq, PartialEq)]
54pub enum SnapshotType {
55	/// Voters at the given page missing.
56	Voters(PageIndex),
57	/// Targets missing.
58	Targets,
59	/// Metadata missing.
60	Metadata,
61	/// Desired targets missing.
62	DesiredTargets,
63}
64
65pub(crate) type MinerSolverErrorOf<T> = <<T as MinerConfig>::Solver as NposSolver>::Error;
66
67/// The errors related to the [`BaseMiner`].
68#[derive(
69	frame_support::DebugNoBound, frame_support::EqNoBound, frame_support::PartialEqNoBound,
70)]
71pub enum MinerError<T: MinerConfig> {
72	/// An internal error in the NPoS elections crate.
73	NposElections(sp_npos_elections::Error),
74	/// An internal error in the generic solver.
75	Solver(MinerSolverErrorOf<T>),
76	/// Snapshot data was unavailable unexpectedly.
77	SnapshotUnAvailable(SnapshotType),
78	/// The base, common errors from the pallet.
79	Common(CommonError),
80	/// The solution generated from the miner is not feasible.
81	Feasibility(verifier::FeasibilityError),
82	/// Some page index has been invalid.
83	InvalidPage,
84	/// Too many winners were removed during trimming.
85	TooManyWinnersRemoved,
86	/// A defensive error has occurred.
87	Defensive(&'static str),
88}
89
90impl<T: MinerConfig> From<sp_npos_elections::Error> for MinerError<T> {
91	fn from(e: sp_npos_elections::Error) -> Self {
92		MinerError::NposElections(e)
93	}
94}
95
96impl<T: MinerConfig> From<verifier::FeasibilityError> for MinerError<T> {
97	fn from(e: verifier::FeasibilityError) -> Self {
98		MinerError::Feasibility(e)
99	}
100}
101
102impl<T: MinerConfig> From<CommonError> for MinerError<T> {
103	fn from(e: CommonError) -> Self {
104		MinerError::Common(e)
105	}
106}
107
108/// The errors related to the `OffchainWorkerMiner`.
109#[derive(
110	frame_support::DebugNoBound, frame_support::EqNoBound, frame_support::PartialEqNoBound,
111)]
112pub enum OffchainMinerError<T: Config> {
113	/// An error in the base miner.
114	BaseMiner(MinerError<T::MinerConfig>),
115	/// The base, common errors from the pallet.
116	Common(CommonError),
117	/// Something went wrong fetching the lock.
118	Lock(&'static str),
119	/// Submitting a transaction to the pool failed.
120	PoolSubmissionFailed,
121	/// Cannot restore a solution that was not stored.
122	NoStoredSolution,
123	/// Cached solution is not a `submit_unsigned` call.
124	SolutionCallInvalid,
125	/// Failed to store a solution.
126	FailedToStoreSolution,
127	/// Cannot mine a solution with zero pages.
128	ZeroPages,
129}
130
131impl<T: Config> From<MinerError<T::MinerConfig>> for OffchainMinerError<T> {
132	fn from(e: MinerError<T::MinerConfig>) -> Self {
133		OffchainMinerError::BaseMiner(e)
134	}
135}
136
137impl<T: Config> From<CommonError> for OffchainMinerError<T> {
138	fn from(e: CommonError) -> Self {
139		OffchainMinerError::Common(e)
140	}
141}
142
143/// Configurations for the miner.
144///
145/// This is extracted from the main crate's config so that an offchain miner can readily use the
146/// [`BaseMiner`] without needing to deal with the rest of the pallet's configuration.
147pub trait MinerConfig {
148	/// The account id type.
149	type AccountId: Ord + Clone + codec::Codec + core::fmt::Debug;
150	/// The solution that the miner is mining.
151	/// The solution type.
152	type Solution: codec::FullCodec
153		+ Default
154		+ PartialEq
155		+ Eq
156		+ Clone
157		+ sp_std::fmt::Debug
158		+ Ord
159		+ NposSolution
160		+ TypeInfo
161		+ codec::MaxEncodedLen;
162	/// The solver type.
163	type Solver: NposSolver<AccountId = Self::AccountId>;
164	/// The maximum length that the miner should use for a solution, per page.
165	///
166	/// This value is not set in stone, and it is up to an individual miner to configure. A good
167	/// value is something like 75% of the total block length, which can be fetched from the system
168	/// pallet.
169	type MaxLength: Get<u32>;
170	/// Maximum number of votes per voter.
171	///
172	/// Must be the same as configured in the [`crate::Config::DataProvider`].
173	///
174	/// For simplicity, this is 16 in Polkadot and 24 in Kusama.
175	type MaxVotesPerVoter: Get<u32>;
176	/// Maximum number of winners to select per page.
177	///
178	/// The miner should respect this, it is used for trimming, and bounded data types.
179	///
180	/// Should equal to the onchain value set in `Verifier::Config`.
181	type MaxWinnersPerPage: Get<u32>;
182	/// Maximum number of backers per winner, per page.
183	///
184	/// The miner should respect this, it is used for trimming, and bounded data types.
185	///
186	/// Should equal to the onchain value set in `Verifier::Config`.
187	type MaxBackersPerWinner: Get<u32>;
188	/// Maximum number of backers, per winner, across all pages.
189	///
190	/// The miner should respect this, it is used for trimming, and bounded data types.
191	///
192	/// Should equal to the onchain value set in `Verifier::Config`.
193	type MaxBackersPerWinnerFinal: Get<u32>;
194	/// **Maximum** number of pages that we may compute.
195	///
196	/// Must be the same as configured in the [`crate::Config`].
197	type Pages: Get<u32>;
198	/// Maximum number of voters per snapshot page.
199	///
200	/// Must be the same as configured in the [`crate::Config`].
201	type VoterSnapshotPerBlock: Get<u32>;
202	/// Maximum number of targets per snapshot page.
203	///
204	/// Must be the same as configured in the [`crate::Config`].
205	type TargetSnapshotPerBlock: Get<u32>;
206	/// The hash type of the runtime.
207	type Hash: Eq + PartialEq;
208}
209
210/// A base miner that is only capable of mining a new solution and checking it against the state of
211/// this pallet for feasibility, and trimming its length/weight.
212pub struct BaseMiner<T: MinerConfig>(sp_std::marker::PhantomData<T>);
213
214/// Parameterized `BoundedSupports` for the miner.
215///
216/// The bounds of this are set such to only encapsulate a single page of a snapshot. The other
217/// counterpart is [`FullSupportsOfMiner`].
218pub type PageSupportsOfMiner<T> = frame_election_provider_support::BoundedSupports<
219	<T as MinerConfig>::AccountId,
220	<T as MinerConfig>::MaxWinnersPerPage,
221	<T as MinerConfig>::MaxBackersPerWinner,
222>;
223
224/// Helper type that computes the maximum total winners across all pages.
225pub struct MaxWinnersFinal<T: MinerConfig>(core::marker::PhantomData<T>);
226
227impl<T: MinerConfig> frame_support::traits::Get<u32> for MaxWinnersFinal<T> {
228	fn get() -> u32 {
229		T::Pages::get().saturating_mul(T::MaxWinnersPerPage::get())
230	}
231}
232
233/// The full version of [`PageSupportsOfMiner`].
234///
235/// This should be used on a support instance that is encapsulating the full solution.
236///
237/// Another way to look at it, this is never wrapped in a `Vec<_>`
238pub type FullSupportsOfMiner<T> = frame_election_provider_support::BoundedSupports<
239	<T as MinerConfig>::AccountId,
240	MaxWinnersFinal<T>,
241	<T as MinerConfig>::MaxBackersPerWinnerFinal,
242>;
243
244/// Aggregator for inputs to [`BaseMiner`].
245pub struct MineInput<T: MinerConfig> {
246	/// Number of winners to pick.
247	pub desired_targets: u32,
248	/// All of the targets.
249	pub all_targets: BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>,
250	/// Paginated list of voters.
251	///
252	/// Note for staking-miners: How this is calculated is rather delicate, and the order of the
253	/// nested vectors matter. See carefully how `OffchainWorkerMiner::mine_solution` is doing
254	/// this.
255	pub voter_pages: AllVoterPagesOf<T>,
256	/// Number of pages to mind.
257	///
258	/// Note for staking-miner: Always use [`MinerConfig::Pages`] unless explicitly wanted
259	/// otherwise.
260	pub pages: PageIndex,
261	/// Whether to reduce the solution. Almost always``
262	pub do_reduce: bool,
263	/// The current round for which the solution is being calculated.
264	pub round: u32,
265}
266
267impl<T: MinerConfig> BaseMiner<T> {
268	/// Mine a new npos solution, with the given number of pages.
269	///
270	/// This miner is only capable of mining a solution that either uses all of the pages of the
271	/// snapshot, or the top `pages` thereof.
272	///
273	/// This always trims the solution to match a few parameters:
274	///
275	/// [`MinerConfig::MaxWinnersPerPage`], [`MinerConfig::MaxBackersPerWinner`],
276	/// [`MinerConfig::MaxBackersPerWinnerFinal`] and [`MinerConfig::MaxLength`].
277	///
278	/// The order of pages returned is aligned with the snapshot. For example, the index 0 of the
279	/// returning solution pages corresponds to the page 0 of the snapshot.
280	///
281	/// The only difference is, if the solution is partial, then [`Pagify`] must be used to properly
282	/// pad the results.
283	pub fn mine_solution(
284		MineInput { desired_targets, all_targets, voter_pages, mut pages, do_reduce, round }: MineInput<
285			T,
286		>,
287	) -> Result<PagedRawSolution<T>, MinerError<T>> {
288		pages = pages.min(T::Pages::get());
289
290		// we also build this closure early, so we can let `targets` be consumed.
291		let voter_page_fn = helpers::generate_voter_page_fn::<T>(&voter_pages);
292		let target_index_fn = helpers::target_index_fn::<T>(&all_targets);
293
294		// now flatten the voters, ready to be used as if pagination did not existed.
295		let all_voters: AllVoterPagesFlattenedOf<T> = voter_pages
296			.iter()
297			.cloned()
298			.flatten()
299			.collect::<Vec<_>>()
300			.try_into()
301			.expect("Flattening the voters into `AllVoterPagesFlattenedOf` cannot fail; qed");
302
303		let ElectionResult { winners: _, assignments } = T::Solver::solve(
304			desired_targets as usize,
305			all_targets.clone().to_vec(),
306			all_voters.clone().into_inner(),
307		)
308		.map_err(|e| MinerError::Solver(e))?;
309
310		// reduce and trim supports. We don't trim length and weight here, since those are dependent
311		// on the final form of the solution ([`PagedRawSolution`]), thus we do it later.
312		let trimmed_assignments = {
313			// Implementation note: the overall code path is as follows: election_results ->
314			// assignments -> staked assignments -> reduce -> supports -> trim supports -> staked
315			// assignments -> final assignments
316			// This is by no means the most performant, but is the clear and correct.
317			use sp_npos_elections::{
318				assignment_ratio_to_staked_normalized, assignment_staked_to_ratio_normalized,
319				reduce, supports_to_staked_assignment, to_supports, EvaluateSupport,
320			};
321
322			// These closures are of no use in the rest of these code, since they only deal with the
323			// overall list of voters.
324			let cache = helpers::generate_voter_cache::<T, _>(&all_voters);
325			let stake_of = helpers::stake_of_fn::<T, _>(&all_voters, &cache);
326
327			// 1. convert to staked and reduce
328			let (reduced_count, staked) = {
329				let mut staked = assignment_ratio_to_staked_normalized(assignments, &stake_of)
330					.map_err::<MinerError<T>, _>(Into::into)?;
331
332				// first, reduce the solution if requested. This will already remove a lot of
333				// "redundant" and reduce the chance for the need of any further trimming.
334				let count = if do_reduce { reduce(&mut staked) } else { 0 };
335				(count, staked)
336			};
337
338			// 2. trim the supports by FINAL backing.
339			let (_pre_score, final_trimmed_assignments, winners_removed, backers_removed) = {
340				// these supports could very well be invalid for SCORE purposes. The reason is that
341				// you might trim out half of an account's stake, but we don't look for this
342				// account's other votes to fix it.
343				let supports_invalid_score = to_supports(&staked);
344
345				let pre_score = (&supports_invalid_score).evaluate();
346				let (bounded_invalid_score, winners_removed, backers_removed) =
347					FullSupportsOfMiner::<T>::sorted_truncate_from(supports_invalid_score);
348
349				// now recreated the staked assignments
350				let staked = supports_to_staked_assignment(bounded_invalid_score.into());
351				let assignments = assignment_staked_to_ratio_normalized(staked)
352					.map_err::<MinerError<T>, _>(Into::into)?;
353				(pre_score, assignments, winners_removed, backers_removed)
354			};
355
356			miner_log!(
357				debug,
358				"initial score = {:?}, reduced {} edges, trimmed {} winners and {} backers due to global support limits",
359				_pre_score,
360				reduced_count,
361				winners_removed,
362				backers_removed,
363			);
364
365			final_trimmed_assignments
366		};
367
368		// split the assignments into different pages.
369		let mut paged_assignments: BoundedVec<Vec<AssignmentOf<T>>, T::Pages> =
370			BoundedVec::with_bounded_capacity(pages as usize);
371		paged_assignments.bounded_resize(pages as usize, Default::default());
372
373		for assignment in trimmed_assignments {
374			// NOTE: this `page` index is LOCAL. It does not correspond to the actual page index of
375			// the snapshot map, but rather the index in the `voter_pages`.
376			let page = voter_page_fn(&assignment.who).ok_or(MinerError::InvalidPage)?;
377			let assignment_page =
378				paged_assignments.get_mut(page as usize).ok_or(MinerError::InvalidPage)?;
379			assignment_page.push(assignment);
380		}
381
382		// convert each page to a compact struct -- no more change allowed.
383		let mut solution_pages: Vec<SolutionOf<T>> = paged_assignments
384			.into_iter()
385			.enumerate()
386			.map(|(page_index, assignment_page)| {
387				// get the page of the snapshot that corresponds to this page of the assignments.
388				let page: PageIndex = page_index.saturated_into();
389				let voter_snapshot_page = voter_pages
390					.get(page as usize)
391					.ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Voters(page)))?;
392
393				// one last trimming -- `MaxBackersPerWinner`, the per-page variant.
394				let trimmed_assignment_page = Self::trim_supports_max_backers_per_winner_per_page(
395					assignment_page,
396					voter_snapshot_page,
397					page_index as u32,
398				)?;
399
400				let voter_index_fn = {
401					let cache = helpers::generate_voter_cache::<T, _>(&voter_snapshot_page);
402					helpers::voter_index_fn_owned::<T>(cache)
403				};
404
405				<SolutionOf<T>>::from_assignment(
406					&trimmed_assignment_page,
407					&voter_index_fn,
408					&target_index_fn,
409				)
410				.map_err::<MinerError<T>, _>(Into::into)
411			})
412			.collect::<Result<Vec<_>, _>>()?;
413
414		// now do the length trim.
415		let _trim_length_weight =
416			Self::maybe_trim_weight_and_len(&mut solution_pages, &voter_pages)?;
417		miner_log!(debug, "trimmed {} voters due to length restriction.", _trim_length_weight);
418
419		// finally, wrap everything up. Assign a fake score here, since we might need to re-compute
420		// it.
421		let mut paged = PagedRawSolution { round, solution_pages, score: Default::default() };
422
423		// OPTIMIZATION: we do feasibility_check inside `compute_score`, and once later
424		// pre_dispatch. I think it is fine, but maybe we can improve it.
425		let score = Self::compute_score(&paged, &voter_pages, &all_targets, desired_targets)
426			.map_err::<MinerError<T>, _>(Into::into)?;
427		paged.score = score;
428
429		miner_log!(
430			debug,
431			"mined a solution with {} pages, score {:?}, {} winners, {} voters, {} edges, and {} bytes",
432			pages,
433			score,
434			paged.winner_count_single_page_target_snapshot(),
435			paged.voter_count(),
436			paged.edge_count(),
437			paged.using_encoded(|b| b.len())
438		);
439
440		Ok(paged)
441	}
442
443	/// perform the feasibility check on all pages of a solution, returning `Ok(())` if all good and
444	/// the corresponding error otherwise.
445	pub fn check_feasibility(
446		paged_solution: &PagedRawSolution<T>,
447		paged_voters: &AllVoterPagesOf<T>,
448		snapshot_targets: &BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>,
449		desired_targets: u32,
450	) -> Result<Vec<PageSupportsOfMiner<T>>, MinerError<T>> {
451		// check every solution page for feasibility.
452		let padded_voters = paged_voters.clone().pad_solution_pages(T::Pages::get());
453		paged_solution
454			.solution_pages
455			.pagify(T::Pages::get())
456			.map(|(page_index, page_solution)| {
457				match verifier::feasibility_check_page_inner_with_snapshot::<T>(
458					page_solution.clone(),
459					&padded_voters[page_index as usize],
460					snapshot_targets,
461					desired_targets,
462				) {
463					Ok(x) => {
464						miner_log!(debug, "feasibility check of page {:?} was okay", page_index,);
465						Ok(x)
466					},
467					Err(e) => {
468						miner_log!(
469							warn,
470							"feasibility check of page {:?} {:?} failed for solution because: {:?}",
471							page_index,
472							page_solution,
473							e,
474						);
475						Err(e)
476					},
477				}
478			})
479			.collect::<Result<Vec<_>, _>>()
480			.map_err(|err| MinerError::from(err))
481			.and_then(|supports| {
482				// If we someday want to check `MaxBackersPerWinnerFinal`, it would be here.
483				Ok(supports)
484			})
485	}
486
487	/// Take the given raw paged solution and compute its score. This will replicate what the chain
488	/// would do as closely as possible, and expects all the corresponding snapshot data to be
489	/// available.
490	fn compute_score(
491		paged_solution: &PagedRawSolution<T>,
492		paged_voters: &AllVoterPagesOf<T>,
493		all_targets: &BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>,
494		desired_targets: u32,
495	) -> Result<ElectionScore, MinerError<T>> {
496		let all_supports =
497			Self::check_feasibility(paged_solution, paged_voters, all_targets, desired_targets)?;
498		let mut total_backings: BTreeMap<T::AccountId, ExtendedBalance> = BTreeMap::new();
499		all_supports.into_iter().flat_map(|x| x.0).for_each(|(who, support)| {
500			let backing = total_backings.entry(who).or_default();
501			*backing = backing.saturating_add(support.total);
502		});
503
504		let all_supports = total_backings
505			.into_iter()
506			.map(|(who, total)| (who, Support { total, ..Default::default() }))
507			.collect::<Vec<_>>();
508
509		Ok((&all_supports).evaluate())
510	}
511
512	fn trim_supports_max_backers_per_winner_per_page(
513		untrimmed_assignments: Vec<AssignmentOf<T>>,
514		page_voters: &VoterPageOf<T>,
515		page: PageIndex,
516	) -> Result<Vec<AssignmentOf<T>>, MinerError<T>> {
517		use sp_npos_elections::{
518			assignment_ratio_to_staked_normalized, assignment_staked_to_ratio_normalized,
519			supports_to_staked_assignment, to_supports,
520		};
521		// convert to staked
522		let cache = helpers::generate_voter_cache::<T, _>(page_voters);
523		let stake_of = helpers::stake_of_fn::<T, _>(&page_voters, &cache);
524		let untrimmed_staked_assignments =
525			assignment_ratio_to_staked_normalized(untrimmed_assignments, &stake_of)?;
526
527		// convert to supports
528		let supports = to_supports(&untrimmed_staked_assignments);
529		drop(untrimmed_staked_assignments);
530
531		// Convert it to our desired bounds, which will truncate the smallest backers if need
532		// be.
533		let (bounded, winners_removed, backers_removed) =
534			PageSupportsOfMiner::<T>::sorted_truncate_from(supports);
535
536		miner_log!(
537			debug,
538			"trimmed {} winners and {} backers from page {} due to per-page limits",
539			winners_removed,
540			backers_removed,
541			page
542		);
543
544		// convert back to staked
545		let trimmed_staked_assignments = supports_to_staked_assignment(bounded.into());
546		// and then ratio assignments
547		let trimmed_assignments =
548			assignment_staked_to_ratio_normalized(trimmed_staked_assignments)?;
549
550		Ok(trimmed_assignments)
551	}
552
553	/// Maybe tim the weight and length of the given multi-page solution.
554	///
555	/// Returns the number of voters removed.
556	///
557	/// If either of the bounds are not met, the trimming strategy is as follows:
558	///
559	/// Start from the least significant page. Assume only this page is going to be trimmed. call
560	/// `page.sort()` on this page. This will make sure in each field (`votes1`, `votes2`, etc.) of
561	/// that page, the voters are sorted by descending stake. Then, we compare the last item of each
562	/// field. This is the process of removing the single least staked voter.
563	///
564	/// We repeat this until satisfied, for both weight and length. If a full page is removed, but
565	/// the bound is not satisfied, we need to make sure that we sort the next least valuable page,
566	/// and repeat the same process.
567	///
568	/// NOTE: this is a public function to be used by the `OffchainWorkerMiner` or any similar one,
569	/// based on the submission strategy. The length and weight bounds of a call are dependent on
570	/// the number of pages being submitted, the number of blocks over which we submit, and the type
571	/// of the transaction and its weight (e.g. signed or unsigned).
572	///
573	/// NOTE: It could be that this function removes too many voters, and the solution becomes
574	/// invalid. This is not yet handled and only a warning is emitted.
575	pub fn maybe_trim_weight_and_len(
576		solution_pages: &mut Vec<SolutionOf<T>>,
577		paged_voters: &AllVoterPagesOf<T>,
578	) -> Result<u32, MinerError<T>> {
579		debug_assert_eq!(solution_pages.len(), paged_voters.len());
580		let size_limit = T::MaxLength::get();
581
582		let needs_any_trim = |solution_pages: &mut Vec<SolutionOf<T>>| {
583			let size = solution_pages.encoded_size() as u32;
584			let needs_len_trim = size > size_limit;
585			// a reminder that we used to have weight trimming here, but not more!
586			let needs_weight_trim = false;
587			needs_weight_trim || needs_len_trim
588		};
589
590		// Note the solution might be partial. In either case, this is its least significant page.
591		let mut current_trimming_page = 0;
592		let current_trimming_page_stake_of = |current_trimming_page: usize| {
593			Box::new(move |voter_index: &SolutionVoterIndexOf<T>| -> VoteWeight {
594				paged_voters
595					.get(current_trimming_page)
596					.and_then(|page_voters| {
597						page_voters
598							.get((*voter_index).saturated_into::<usize>())
599							.map(|(_, s, _)| *s)
600					})
601					.unwrap_or_default()
602			})
603		};
604
605		let sort_current_trimming_page =
606			|current_trimming_page: usize, solution_pages: &mut Vec<SolutionOf<T>>| {
607				solution_pages.get_mut(current_trimming_page).map(|solution_page| {
608					let stake_of_fn = current_trimming_page_stake_of(current_trimming_page);
609					solution_page.sort(stake_of_fn)
610				});
611			};
612
613		let is_empty = |solution_pages: &Vec<SolutionOf<T>>| {
614			solution_pages.iter().all(|page| page.voter_count().is_zero())
615		};
616
617		if needs_any_trim(solution_pages) {
618			sort_current_trimming_page(current_trimming_page, solution_pages)
619		}
620
621		// Implementation note: we want `solution_pages` and `paged_voters` to remain in sync, so
622		// while one of the pages of `solution_pages` might become "empty" we prefer not removing
623		// it. This has a slight downside that even an empty pages consumes a few dozens of bytes,
624		// which we accept for code simplicity.
625
626		let mut removed = 0;
627		while needs_any_trim(solution_pages) && !is_empty(solution_pages) {
628			if let Some(removed_idx) =
629				solution_pages.get_mut(current_trimming_page).and_then(|page| {
630					let stake_of_fn = current_trimming_page_stake_of(current_trimming_page);
631					page.remove_weakest_sorted(&stake_of_fn)
632				}) {
633				miner_log!(
634					trace,
635					"removed voter at index {:?} of (un-pagified) page {} as the weakest due to weight/length limits.",
636					removed_idx,
637					current_trimming_page
638				);
639				// we removed one person, continue.
640				removed.saturating_inc();
641			} else {
642				// this page cannot support remove anymore. Try and go to the next page.
643				miner_log!(
644					debug,
645					"page {} seems to be fully empty now, moving to the next one",
646					current_trimming_page
647				);
648				let next_page = current_trimming_page.saturating_add(1);
649				if paged_voters.len() > next_page {
650					current_trimming_page = next_page;
651					sort_current_trimming_page(current_trimming_page, solution_pages);
652				} else {
653					miner_log!(
654						warn,
655						"no more pages to trim from at page {}, already trimmed",
656						current_trimming_page
657					);
658					break
659				}
660			}
661		}
662
663		Ok(removed)
664	}
665}
666
667/// A miner that is suited to work inside offchain worker environment.
668///
669/// This is parameterized by [`Config`], rather than [`MinerConfig`].
670pub struct OffchainWorkerMiner<T: Config>(sp_std::marker::PhantomData<T>);
671
672impl<T: Config> OffchainWorkerMiner<T> {
673	/// Storage key used to store the offchain worker running status.
674	pub(crate) const OFFCHAIN_LOCK: &'static [u8] = b"parity/multi-block-unsigned-election/lock";
675	/// Storage key used to store the last block number at which offchain worker ran.
676	const OFFCHAIN_LAST_BLOCK: &'static [u8] = b"parity/multi-block-unsigned-election";
677	/// Storage key used to cache the solution `call` and its snapshot fingerprint.
678	const OFFCHAIN_CACHED_CALL: &'static [u8] = b"parity/multi-block-unsigned-election/call";
679
680	pub(crate) fn fetch_snapshot(
681		pages: PageIndex,
682	) -> Result<
683		(AllVoterPagesOf<T::MinerConfig>, BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>, u32),
684		OffchainMinerError<T>,
685	> {
686		// read the appropriate snapshot pages.
687		let desired_targets = crate::Snapshot::<T>::desired_targets()
688			.ok_or(MinerError::SnapshotUnAvailable(SnapshotType::DesiredTargets))?;
689		let all_targets = crate::Snapshot::<T>::targets()
690			.ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Targets))?;
691
692		// This is the range of voters that we are interested in.
693		let voter_pages_range = crate::Pallet::<T>::msp_range_for(pages as usize);
694
695		sublog!(
696			debug,
697			"unsigned::base-miner",
698			"mining a solution with {} pages, voter snapshot range will be: {:?}",
699			pages,
700			voter_pages_range
701		);
702
703		// NOTE: if `pages (2) < T::Pages (3)`, at this point this vector will have length 2,
704		// with a layout of `[snapshot(1), snapshot(2)]`, namely the two most significant pages
705		//  of the snapshot.
706		let voter_pages: BoundedVec<_, T::Pages> = voter_pages_range
707			.into_iter()
708			.map(|p| {
709				crate::Snapshot::<T>::voters(p)
710					.ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Voters(p)))
711			})
712			.collect::<Result<Vec<_>, _>>()?
713			.try_into()
714			.expect(
715				"`voter_pages_range` has `.take(pages)`; it must have length less than pages; it
716				must convert to `BoundedVec`; qed",
717			);
718
719		Ok((voter_pages, all_targets, desired_targets))
720	}
721
722	pub fn mine_solution(
723		pages: PageIndex,
724		do_reduce: bool,
725	) -> Result<PagedRawSolution<T::MinerConfig>, OffchainMinerError<T>> {
726		if pages.is_zero() {
727			return Err(OffchainMinerError::<T>::ZeroPages);
728		}
729		let (voter_pages, all_targets, desired_targets) = Self::fetch_snapshot(pages)?;
730		let round = crate::Pallet::<T>::round();
731		BaseMiner::<T::MinerConfig>::mine_solution(MineInput {
732			desired_targets,
733			all_targets,
734			voter_pages,
735			pages,
736			do_reduce,
737			round,
738		})
739		.map_err(Into::into)
740	}
741
742	/// Get a checked solution from the base miner, ensure unsigned-specific checks also pass, then
743	/// return an submittable call.
744	fn mine_checked_call() -> Result<Call<T>, OffchainMinerError<T>> {
745		// we always do reduce in the offchain worker miner.
746		let reduce = true;
747
748		// NOTE: we don't run any checks in the base miner, and run all of them via
749		// `Self::full_checks`.
750		let paged_solution = Self::mine_solution(T::MinerPages::get(), reduce)
751			.map_err::<OffchainMinerError<T>, _>(Into::into)?;
752		// check the call fully, no fingerprinting.
753		let _ = Self::check_solution(&paged_solution, None, true)?;
754
755		let call: Call<T> =
756			Call::<T>::submit_unsigned { paged_solution: Box::new(paged_solution) }.into();
757
758		Ok(call)
759	}
760
761	/// Mine a new checked solution, maybe cache it, and submit it back to the chain as an unsigned
762	/// transaction.
763	pub(crate) fn mine_check_maybe_save_submit(save: bool) -> Result<(), OffchainMinerError<T>> {
764		sublog!(debug, "unsigned::ocw-miner", "miner attempting to compute an unsigned solution.");
765		let call = Self::mine_checked_call()?;
766		if save {
767			Self::save_solution(&call, crate::Snapshot::<T>::fingerprint())?;
768		}
769		Self::submit_call(call)
770	}
771
772	/// Check the solution, from the perspective of the offchain-worker miner:
773	///
774	/// 1. unsigned-specific checks.
775	/// 2. full-checks of the base miner
776	/// 	1. optionally feasibility check.
777	/// 	2. snapshot-independent checks.
778	/// 		1. optionally, snapshot fingerprint.
779	pub(crate) fn check_solution(
780		paged_solution: &PagedRawSolution<T::MinerConfig>,
781		maybe_snapshot_fingerprint: Option<T::Hash>,
782		do_feasibility: bool,
783	) -> Result<(), OffchainMinerError<T>> {
784		// NOTE: we prefer cheap checks first, so first run unsigned checks.
785		Pallet::<T>::unsigned_specific_checks(paged_solution)?;
786		Self::base_check_solution(paged_solution, maybe_snapshot_fingerprint, do_feasibility)
787	}
788
789	fn submit_call(call: Call<T>) -> Result<(), OffchainMinerError<T>> {
790		let xt = T::create_bare(call.into());
791		frame_system::offchain::SubmitTransaction::<T, Call<T>>::submit_transaction(xt)
792			.map(|_| {
793				sublog!(
794					debug,
795					"unsigned::ocw-miner",
796					"miner submitted a solution as an unsigned transaction",
797				);
798			})
799			.map_err(|_| OffchainMinerError::PoolSubmissionFailed)
800	}
801
802	/// Check the solution, from the perspective of the base miner:
803	///
804	/// 1. snapshot-independent checks.
805	/// 	- with the fingerprint check being an optional step fo that.
806	/// 2. optionally, feasibility check.
807	///
808	/// In most cases, you should always use this either with `do_feasibility = true` or
809	/// `maybe_snapshot_fingerprint.is_some()`. Doing both could be an overkill. The snapshot
810	/// staying constant (which can be checked via the hash) is a string guarantee that the
811	/// feasibility still holds.
812	///
813	/// The difference between this and [`Self::check_solution`] is that this does not run unsigned
814	/// specific checks.
815	pub(crate) fn base_check_solution(
816		paged_solution: &PagedRawSolution<T::MinerConfig>,
817		maybe_snapshot_fingerprint: Option<T::Hash>,
818		do_feasibility: bool,
819	) -> Result<(), OffchainMinerError<T>> {
820		let _ = crate::Pallet::<T>::snapshot_independent_checks(
821			paged_solution,
822			maybe_snapshot_fingerprint,
823		)?;
824
825		if do_feasibility {
826			let (voter_pages, all_targets, desired_targets) =
827				Self::fetch_snapshot(paged_solution.solution_pages.len() as PageIndex)?;
828			let _ = BaseMiner::<T::MinerConfig>::check_feasibility(
829				&paged_solution,
830				&voter_pages,
831				&all_targets,
832				desired_targets,
833			)?;
834		}
835
836		Ok(())
837	}
838
839	/// Attempt to restore a solution from cache. Otherwise, compute it fresh. Either way,
840	/// submit if our call's score is greater than that of the cached solution.
841	pub(crate) fn restore_or_compute_then_maybe_submit() -> Result<(), OffchainMinerError<T>> {
842		sublog!(
843			debug,
844			"unsigned::ocw-miner",
845			"miner attempting to restore or compute an unsigned solution."
846		);
847
848		let call = Self::restore_solution()
849			.and_then(|(call, snapshot_fingerprint)| {
850				// ensure the cached call is still current before submitting
851				if let Call::submit_unsigned { paged_solution, .. } = &call {
852					// we check the snapshot fingerprint instead of doing a full feasibility.
853					OffchainWorkerMiner::<T>::check_solution(
854						paged_solution,
855						Some(snapshot_fingerprint),
856						false,
857					).map_err::<OffchainMinerError<T>, _>(Into::into)?;
858					Ok(call)
859				} else {
860					Err(OffchainMinerError::SolutionCallInvalid)
861				}
862			})
863			.or_else::<OffchainMinerError<T>, _>(|error| {
864				use OffchainMinerError as OE;
865				use MinerError as ME;
866				use CommonError as CE;
867				match error {
868					OE::NoStoredSolution => {
869						// IFF, not present regenerate.
870						let call = Self::mine_checked_call()?;
871						Self::save_solution(&call, crate::Snapshot::<T>::fingerprint())?;
872						Ok(call)
873					},
874					OE::Common(ref e) => {
875						sublog!(
876							error,
877							"unsigned::ocw-miner",
878							"unsigned specific checks failed ({:?}) while restoring solution. This should never happen. clearing cache.",
879							e,
880						);
881						Self::clear_offchain_solution_cache();
882						Err(error)
883					},
884					OE::BaseMiner(ME::Feasibility(_))
885						| OE::BaseMiner(ME::Common(CE::WrongRound))
886						| OE::BaseMiner(ME::Common(CE::WrongFingerprint))
887					=> {
888						// note that failing `Feasibility` can only mean that the solution was
889						// computed over a snapshot that has changed due to a fork.
890						sublog!(warn, "unsigned::ocw-miner", "wiping infeasible solution ({:?}).", error);
891						// kill the "bad" solution.
892						Self::clear_offchain_solution_cache();
893
894						// .. then return the error as-is.
895						Err(error)
896					},
897					_ => {
898						sublog!(debug, "unsigned::ocw-miner", "unhandled error in restoring offchain solution {:?}", error);
899						// nothing to do. Return the error as-is.
900						Err(error)
901					},
902				}
903			})?;
904
905		Self::submit_call(call)
906	}
907
908	/// Checks if an execution of the offchain worker is permitted at the given block number, or
909	/// not.
910	///
911	/// This makes sure that
912	/// 1. we don't run on previous blocks in case of a re-org
913	/// 2. we don't run twice within a window of length `T::OffchainRepeat`.
914	///
915	/// Returns `Ok(())` if offchain worker limit is respected, `Err(reason)` otherwise. If
916	/// `Ok()` is returned, `now` is written in storage and will be used in further calls as the
917	/// baseline.
918	pub fn ensure_offchain_repeat_frequency(
919		now: BlockNumberFor<T>,
920	) -> Result<(), OffchainMinerError<T>> {
921		let threshold = T::OffchainRepeat::get();
922		let last_block = StorageValueRef::persistent(&Self::OFFCHAIN_LAST_BLOCK);
923
924		let mutate_stat = last_block.mutate::<_, &'static str, _>(
925			|maybe_head: Result<Option<BlockNumberFor<T>>, _>| {
926				match maybe_head {
927					Ok(Some(head)) if now < head => Err("fork."),
928					Ok(Some(head)) if now >= head && now <= head + threshold =>
929						Err("recently executed."),
930					Ok(Some(head)) if now > head + threshold => {
931						// we can run again now. Write the new head.
932						Ok(now)
933					},
934					_ => {
935						// value doesn't exists. Probably this node just booted up. Write, and
936						// run
937						Ok(now)
938					},
939				}
940			},
941		);
942
943		match mutate_stat {
944			// all good
945			Ok(_) => Ok(()),
946			// failed to write.
947			Err(MutateStorageError::ConcurrentModification(_)) => Err(OffchainMinerError::Lock(
948				"failed to write to offchain db (concurrent modification).",
949			)),
950			// fork etc.
951			Err(MutateStorageError::ValueFunctionFailed(why)) => Err(OffchainMinerError::Lock(why)),
952		}
953	}
954
955	/// Save a given call into OCW storage.
956	fn save_solution(
957		call: &Call<T>,
958		snapshot_fingerprint: T::Hash,
959	) -> Result<(), OffchainMinerError<T>> {
960		sublog!(debug, "unsigned::ocw-miner", "saving a call to the offchain storage.");
961		let storage = StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL);
962		match storage.mutate::<_, (), _>(|_| Ok((call.clone(), snapshot_fingerprint))) {
963			Ok(_) => Ok(()),
964			Err(MutateStorageError::ConcurrentModification(_)) =>
965				Err(OffchainMinerError::FailedToStoreSolution),
966			Err(MutateStorageError::ValueFunctionFailed(_)) => {
967				// this branch should be unreachable according to the definition of
968				// `StorageValueRef::mutate`: that function should only ever `Err` if the closure we
969				// pass it returns an error. however, for safety in case the definition changes, we
970				// do not optimize the branch away or panic.
971				Err(OffchainMinerError::FailedToStoreSolution)
972			},
973		}
974	}
975
976	/// Get a saved solution from OCW storage if it exists.
977	fn restore_solution() -> Result<(Call<T>, T::Hash), OffchainMinerError<T>> {
978		StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL)
979			.get()
980			.ok()
981			.flatten()
982			.ok_or(OffchainMinerError::NoStoredSolution)
983	}
984
985	/// Clear a saved solution from OCW storage.
986	fn clear_offchain_solution_cache() {
987		sublog!(debug, "unsigned::ocw-miner", "clearing offchain call cache storage.");
988		let mut storage = StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL);
989		storage.clear();
990	}
991
992	#[cfg(test)]
993	fn cached_solution() -> Option<Call<T>> {
994		StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL)
995			.get::<Call<T>>()
996			.unwrap()
997	}
998}
999
1000// This will only focus on testing the internals of `maybe_trim_weight_and_len_works`.
1001#[cfg(test)]
1002mod trimming {
1003	use super::*;
1004	use crate::{mock::*, verifier::Verifier};
1005	use frame_election_provider_support::TryFromUnboundedPagedSupports;
1006	use sp_npos_elections::Support;
1007
1008	#[test]
1009	fn solution_without_any_trimming() {
1010		ExtBuilder::mock_signed().build_and_execute(|| {
1011			// adjust the voters a bit, such that they are all different backings
1012			let mut current_voters = Voters::get();
1013			current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
1014			Voters::set(current_voters);
1015
1016			roll_to_snapshot_created();
1017
1018			// now we let the miner mine something for us..
1019			let solution = mine_full_solution().unwrap();
1020			assert_eq!(
1021				solution.solution_pages.iter().map(|page| page.voter_count()).sum::<usize>(),
1022				8
1023			);
1024
1025			assert_eq!(solution.solution_pages.encoded_size(), 105);
1026			load_mock_signed_and_start(solution);
1027			let supports = roll_to_full_verification();
1028
1029			// a solution is queued.
1030			assert!(VerifierPallet::queued_score().is_some());
1031
1032			assert_eq!(
1033				supports,
1034				vec![
1035					vec![
1036						(30, Support { total: 30, voters: vec![(30, 30)] }),
1037						(40, Support { total: 40, voters: vec![(40, 40)] })
1038					],
1039					vec![
1040						(30, Support { total: 11, voters: vec![(5, 2), (6, 2), (7, 7)] }),
1041						(40, Support { total: 7, voters: vec![(5, 3), (6, 4)] })
1042					],
1043					vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })]
1044				]
1045				.try_from_unbounded_paged()
1046				.unwrap()
1047			);
1048		})
1049	}
1050
1051	#[test]
1052	fn trim_length() {
1053		ExtBuilder::mock_signed().miner_max_length(104).build_and_execute(|| {
1054			// adjust the voters a bit, such that they are all different backings
1055			let mut current_voters = Voters::get();
1056			current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
1057			Voters::set(current_voters);
1058
1059			roll_to_snapshot_created();
1060			ensure_voters(3, 12);
1061
1062			let solution = mine_full_solution().unwrap();
1063
1064			assert_eq!(
1065				solution.solution_pages.iter().map(|page| page.voter_count()).sum::<usize>(),
1066				7
1067			);
1068
1069			assert_eq!(solution.solution_pages.encoded_size(), 99);
1070
1071			load_mock_signed_and_start(solution);
1072			let supports = roll_to_full_verification();
1073
1074			// a solution is queued.
1075			assert!(VerifierPallet::queued_score().is_some());
1076
1077			assert_eq!(
1078				supports,
1079				vec![
1080					// 30 is gone! Note that length trimming starts from lsp, so we trim from this
1081					// page only.
1082					vec![(40, Support { total: 40, voters: vec![(40, 40)] })],
1083					vec![
1084						(30, Support { total: 11, voters: vec![(5, 2), (6, 2), (7, 7)] }),
1085						(40, Support { total: 7, voters: vec![(5, 3), (6, 4)] })
1086					],
1087					vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })]
1088				]
1089				.try_from_unbounded_paged()
1090				.unwrap()
1091			);
1092		});
1093	}
1094
1095	#[test]
1096	fn trim_length_2() {
1097		ExtBuilder::mock_signed().miner_max_length(98).build_and_execute(|| {
1098			// adjust the voters a bit, such that they are all different backings
1099			let mut current_voters = Voters::get();
1100			current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
1101			Voters::set(current_voters);
1102
1103			roll_to_snapshot_created();
1104			ensure_voters(3, 12);
1105
1106			let solution = mine_full_solution().unwrap();
1107
1108			assert_eq!(
1109				solution.solution_pages.iter().map(|page| page.voter_count()).sum::<usize>(),
1110				6
1111			);
1112
1113			assert_eq!(solution.solution_pages.encoded_size(), 93);
1114
1115			load_mock_signed_and_start(solution);
1116			let supports = roll_to_full_verification();
1117
1118			// a solution is queued.
1119			assert!(VerifierPallet::queued_score().is_some());
1120
1121			assert_eq!(
1122				supports,
1123				vec![
1124					vec![],
1125					vec![
1126						(30, Support { total: 11, voters: vec![(5, 2), (6, 2), (7, 7)] }),
1127						(40, Support { total: 7, voters: vec![(5, 3), (6, 4)] })
1128					],
1129					vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })]
1130				]
1131				.try_from_unbounded_paged()
1132				.unwrap()
1133			);
1134		});
1135	}
1136
1137	#[test]
1138	fn trim_length_3() {
1139		ExtBuilder::mock_signed().miner_max_length(92).build_and_execute(|| {
1140			// adjust the voters a bit, such that they are all different backings
1141			let mut current_voters = Voters::get();
1142			current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
1143			Voters::set(current_voters);
1144
1145			roll_to_snapshot_created();
1146			ensure_voters(3, 12);
1147
1148			let solution = mine_full_solution().unwrap();
1149
1150			assert_eq!(
1151				solution.solution_pages.iter().map(|page| page.voter_count()).sum::<usize>(),
1152				5
1153			);
1154
1155			assert_eq!(solution.solution_pages.encoded_size(), 83);
1156
1157			load_mock_signed_and_start(solution);
1158			let supports = roll_to_full_verification();
1159
1160			// a solution is queued.
1161			assert!(VerifierPallet::queued_score().is_some());
1162
1163			assert_eq!(
1164				supports,
1165				vec![
1166					vec![],
1167					vec![
1168						(30, Support { total: 9, voters: vec![(6, 2), (7, 7)] }),
1169						(40, Support { total: 4, voters: vec![(6, 4)] })
1170					],
1171					vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })]
1172				]
1173				.try_from_unbounded_paged()
1174				.unwrap()
1175			);
1176		});
1177	}
1178
1179	#[test]
1180	fn trim_backers_per_page_works() {
1181		ExtBuilder::mock_signed().max_backers_per_winner(2).build_and_execute(|| {
1182			// adjust the voters a bit, such that they are all different backings
1183			let mut current_voters = Voters::get();
1184			current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
1185			Voters::set(current_voters);
1186
1187			roll_to_snapshot_created();
1188			ensure_voters(3, 12);
1189
1190			let solution = mine_full_solution().unwrap();
1191
1192			load_mock_signed_and_start(solution);
1193			let supports = roll_to_full_verification();
1194
1195			// a solution is queued.
1196			assert!(VerifierPallet::queued_score().is_some());
1197
1198			// each page is trimmed individually, based on `solution_without_any_trimming`.
1199			assert_eq!(
1200				supports,
1201				vec![
1202					vec![
1203						(30, Support { total: 30, voters: vec![(30, 30)] }),
1204						(40, Support { total: 40, voters: vec![(40, 40)] })
1205					],
1206					vec![
1207						(30, Support { total: 9, voters: vec![(6, 2), (7, 7)] }),
1208						(40, Support { total: 9, voters: vec![(5, 5), (6, 4)] }) /* notice how
1209						                                                          * 5's stake is
1210						                                                          * re-distributed
1211						                                                          * all here ^^ */
1212					],
1213					vec![(40, Support { total: 7, voters: vec![(3, 3), (4, 4)] })]
1214				]
1215				.try_from_unbounded_paged()
1216				.unwrap()
1217			);
1218		})
1219	}
1220
1221	#[test]
1222	fn trim_backers_per_page_works_2() {
1223		// This one is more interesting, as it also shows that as we trim backers, we re-distribute
1224		// their weight elsewhere.
1225		ExtBuilder::mock_signed().max_backers_per_winner(1).build_and_execute(|| {
1226			// adjust the voters a bit, such that they are all different backings
1227			let mut current_voters = Voters::get();
1228			current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
1229			Voters::set(current_voters);
1230
1231			roll_to_snapshot_created();
1232			ensure_voters(3, 12);
1233
1234			let solution = mine_full_solution().unwrap();
1235
1236			load_mock_signed_and_start(solution);
1237			let supports = roll_to_full_verification();
1238
1239			// a solution is queued.
1240			assert!(VerifierPallet::queued_score().is_some());
1241
1242			// each page is trimmed individually, based on `solution_without_any_trimming`.
1243			assert_eq!(
1244				supports,
1245				vec![
1246					vec![
1247						(30, Support { total: 30, voters: vec![(30, 30)] }),
1248						(40, Support { total: 40, voters: vec![(40, 40)] })
1249					],
1250					vec![
1251						(30, Support { total: 7, voters: vec![(7, 7)] }),
1252						(40, Support { total: 6, voters: vec![(6, 6)] })
1253					],
1254					vec![(40, Support { total: 4, voters: vec![(4, 4)] })]
1255				]
1256				.try_from_unbounded_paged()
1257				.unwrap()
1258			);
1259		})
1260	}
1261
1262	#[test]
1263	fn trim_backers_final_works() {
1264		ExtBuilder::mock_signed()
1265			.max_backers_per_winner(4)
1266			.max_backers_per_winner_final(4)
1267			.build_and_execute(|| {
1268				// adjust the voters a bit, such that they are all different backings
1269				let mut current_voters = Voters::get();
1270				current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
1271				Voters::set(current_voters);
1272
1273				roll_to_snapshot_created();
1274				ensure_voters(3, 12);
1275
1276				let solution = mine_full_solution().unwrap();
1277
1278				load_mock_signed_and_start(solution);
1279				let supports = roll_to_full_verification();
1280
1281				// a solution is queued.
1282				assert!(VerifierPallet::queued_score().is_some());
1283
1284				// 30 has 1 + 3 = 4 backers -- all good
1285				// 40 has 1 + 2 + 3 = 6 backers -- needs to lose 2
1286				assert_eq!(
1287					supports,
1288					vec![
1289						vec![
1290							(30, Support { total: 30, voters: vec![(30, 30)] }),
1291							(40, Support { total: 40, voters: vec![(40, 40)] })
1292						],
1293						vec![
1294							(30, Support { total: 14, voters: vec![(5, 5), (6, 2), (7, 7)] }),
1295							(40, Support { total: 4, voters: vec![(6, 4)] })
1296						],
1297						vec![(40, Support { total: 7, voters: vec![(3, 3), (4, 4)] })]
1298					]
1299					.try_from_unbounded_paged()
1300					.unwrap()
1301				);
1302			})
1303	}
1304
1305	#[test]
1306	fn trim_backers_per_page_and_final_works() {
1307		ExtBuilder::mock_signed()
1308			.max_backers_per_winner_final(4)
1309			.max_backers_per_winner(2)
1310			.build_and_execute(|| {
1311				// adjust the voters a bit, such that they are all different backings
1312				let mut current_voters = Voters::get();
1313				current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
1314				Voters::set(current_voters);
1315
1316				roll_to_snapshot_created();
1317				ensure_voters(3, 12);
1318
1319				let solution = mine_full_solution().unwrap();
1320
1321				load_mock_signed_and_start(solution);
1322				let supports = roll_to_full_verification();
1323
1324				// a solution is queued.
1325				assert!(VerifierPallet::queued_score().is_some());
1326
1327				// each page is trimmed individually, based on `solution_without_any_trimming`.
1328				assert_eq!(
1329					supports,
1330					vec![
1331						vec![
1332							(30, Support { total: 30, voters: vec![(30, 30)] }),
1333							(40, Support { total: 40, voters: vec![(40, 40)] })
1334						],
1335						vec![
1336							(30, Support { total: 12, voters: vec![(5, 5), (7, 7)] }),
1337							(40, Support { total: 6, voters: vec![(6, 6)] })
1338						],
1339						vec![(40, Support { total: 7, voters: vec![(3, 3), (4, 4)] })]
1340					]
1341					.try_from_unbounded_paged()
1342					.unwrap()
1343				);
1344			})
1345	}
1346
1347	#[test]
1348	fn aggressive_backer_trimming_maintains_winner_count() {
1349		// Test the scenario where aggressive backer trimming is applied but the solution
1350		// should still maintain the correct winner count to avoid WrongWinnerCount errors.
1351		ExtBuilder::mock_signed()
1352			.desired_targets(3)
1353			.max_winners_per_page(2)
1354			.pages(2)
1355			.max_backers_per_winner_final(1) // aggressive final trimming
1356			.max_backers_per_winner(1) // aggressive per-page trimming
1357			.build_and_execute(|| {
1358				// Use default 4 targets to stay within TargetSnapshotPerBlock limit
1359
1360				// Adjust the voters a bit, such that they are all different backings
1361				let mut current_voters = Voters::get();
1362				current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
1363				Voters::set(current_voters);
1364
1365				roll_to_snapshot_created();
1366
1367				let solution = mine_full_solution().unwrap();
1368
1369				// The solution should still be valid despite aggressive trimming
1370				assert!(solution.solution_pages.len() > 0);
1371
1372				let winner_count = solution
1373					.solution_pages
1374					.iter()
1375					.flat_map(|page| page.unique_targets())
1376					.collect::<std::collections::HashSet<_>>()
1377					.len();
1378
1379				// We should get 3 winners.
1380				// This demonstrates that FullSupportsOfMiner can accommodate winners from multiple
1381				// pages and can hold more winners than MaxWinnersPerPage.
1382				assert_eq!(winner_count, 3);
1383
1384				// Load and verify the solution passes all checks without WrongWinnerCount error
1385				load_mock_signed_and_start(solution);
1386				let _supports = roll_to_full_verification();
1387
1388				// A solution should be successfully queued
1389				assert!(VerifierPallet::queued_score().is_some());
1390			})
1391	}
1392}
1393
1394#[cfg(test)]
1395mod base_miner {
1396	use std::vec;
1397
1398	use super::*;
1399	use crate::{mock::*, Snapshot};
1400	use frame_election_provider_support::TryFromUnboundedPagedSupports;
1401	use sp_npos_elections::Support;
1402	use sp_runtime::PerU16;
1403
1404	#[test]
1405	fn pagination_does_not_affect_score() {
1406		let score_1 = ExtBuilder::mock_signed()
1407			.pages(1)
1408			.voter_per_page(12)
1409			.build_unchecked()
1410			.execute_with(|| {
1411				roll_to_snapshot_created();
1412				mine_full_solution().unwrap().score
1413			});
1414		let score_2 = ExtBuilder::mock_signed()
1415			.pages(2)
1416			.voter_per_page(6)
1417			.build_unchecked()
1418			.execute_with(|| {
1419				roll_to_snapshot_created();
1420				mine_full_solution().unwrap().score
1421			});
1422		let score_3 = ExtBuilder::mock_signed()
1423			.pages(3)
1424			.voter_per_page(4)
1425			.build_unchecked()
1426			.execute_with(|| {
1427				roll_to_snapshot_created();
1428				mine_full_solution().unwrap().score
1429			});
1430
1431		assert_eq!(score_1, score_2);
1432		assert_eq!(score_2, score_3);
1433	}
1434
1435	#[test]
1436	fn mine_solution_single_page_works() {
1437		ExtBuilder::mock_signed().pages(1).voter_per_page(8).build_and_execute(|| {
1438			roll_to_snapshot_created();
1439
1440			ensure_voters(1, 8);
1441			ensure_targets(1, 4);
1442
1443			assert_eq!(
1444				Snapshot::<Runtime>::voters(0)
1445					.unwrap()
1446					.into_iter()
1447					.map(|(x, _, _)| x)
1448					.collect::<Vec<_>>(),
1449				vec![1, 2, 3, 4, 5, 6, 7, 8]
1450			);
1451
1452			let paged = mine_full_solution().unwrap();
1453			assert_eq!(paged.solution_pages.len(), 1);
1454
1455			// this solution must be feasible and submittable.
1456			OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true).unwrap();
1457
1458			// now do a realistic full verification
1459			load_mock_signed_and_start(paged.clone());
1460			let supports = roll_to_full_verification();
1461
1462			assert_eq!(
1463				supports,
1464				vec![vec![
1465					(10, Support { total: 30, voters: vec![(1, 10), (4, 5), (5, 5), (8, 10)] }),
1466					(
1467						40,
1468						Support {
1469							total: 40,
1470							voters: vec![(2, 10), (3, 10), (4, 5), (5, 5), (6, 10)]
1471						}
1472					)
1473				]]
1474				.try_from_unbounded_paged()
1475				.unwrap()
1476			);
1477
1478			// NOTE: this is the same as the score of any other test that contains the first 8
1479			// voters, we already test for this in `pagination_does_not_affect_score`.
1480			assert_eq!(
1481				paged.score,
1482				ElectionScore { minimal_stake: 30, sum_stake: 70, sum_stake_squared: 2500 }
1483			);
1484		})
1485	}
1486
1487	#[test]
1488	fn mine_solution_double_page_works() {
1489		ExtBuilder::mock_signed().pages(2).voter_per_page(4).build_and_execute(|| {
1490			roll_to_snapshot_created();
1491
1492			// 2 pages of 8 voters
1493			ensure_voters(2, 8);
1494			// 1 page of 4 targets
1495			ensure_targets(1, 4);
1496
1497			// voters in pages. note the reverse page index.
1498			assert_eq!(
1499				Snapshot::<Runtime>::voters(0)
1500					.unwrap()
1501					.into_iter()
1502					.map(|(x, _, _)| x)
1503					.collect::<Vec<_>>(),
1504				vec![5, 6, 7, 8]
1505			);
1506			assert_eq!(
1507				Snapshot::<Runtime>::voters(1)
1508					.unwrap()
1509					.into_iter()
1510					.map(|(x, _, _)| x)
1511					.collect::<Vec<_>>(),
1512				vec![1, 2, 3, 4]
1513			);
1514			// targets in pages.
1515			assert_eq!(Snapshot::<Runtime>::targets().unwrap(), vec![10, 20, 30, 40]);
1516			let paged = mine_full_solution().unwrap();
1517
1518			assert_eq!(
1519				paged.solution_pages,
1520				vec![
1521					TestNposSolution {
1522						// voter 6 (index 1) is backing 40 (index 3).
1523						// voter 8 (index 3) is backing 10 (index 0)
1524						votes1: vec![(1, 3), (3, 0)],
1525						// voter 5 (index 0) is backing 40 (index 3) and 10 (index 0)
1526						votes2: vec![(0, [(0, PerU16::from_parts(32768))], 3)],
1527						..Default::default()
1528					},
1529					TestNposSolution {
1530						// voter 1 (index 0) is backing 10 (index 0)
1531						// voter 2 (index 1) is backing 40 (index 3)
1532						// voter 3 (index 2) is backing 40 (index 3)
1533						votes1: vec![(0, 0), (1, 3), (2, 3)],
1534						// voter 4 (index 3) is backing 40 (index 10) and 10 (index 0)
1535						votes2: vec![(3, [(0, PerU16::from_parts(32768))], 3)],
1536						..Default::default()
1537					},
1538				]
1539			);
1540
1541			// this solution must be feasible and submittable.
1542			OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, false).unwrap();
1543
1544			// it must also be verified in the verifier
1545			load_mock_signed_and_start(paged.clone());
1546			let supports = roll_to_full_verification();
1547
1548			assert_eq!(
1549				supports,
1550				vec![
1551					// page0, supports from voters 5, 6, 7, 8
1552					vec![
1553						(10, Support { total: 15, voters: vec![(5, 5), (8, 10)] }),
1554						(40, Support { total: 15, voters: vec![(5, 5), (6, 10)] })
1555					],
1556					// page1 supports from voters 1, 2, 3, 4
1557					vec![
1558						(10, Support { total: 15, voters: vec![(1, 10), (4, 5)] }),
1559						(40, Support { total: 25, voters: vec![(2, 10), (3, 10), (4, 5)] })
1560					]
1561				]
1562				.try_from_unbounded_paged()
1563				.unwrap()
1564			);
1565
1566			assert_eq!(
1567				paged.score,
1568				ElectionScore { minimal_stake: 30, sum_stake: 70, sum_stake_squared: 2500 }
1569			);
1570		})
1571	}
1572
1573	#[test]
1574	fn mine_solution_triple_page_works() {
1575		ExtBuilder::mock_signed().pages(3).voter_per_page(4).build_and_execute(|| {
1576			roll_to_snapshot_created();
1577
1578			ensure_voters(3, 12);
1579			ensure_targets(1, 4);
1580
1581			// voters in pages. note the reverse page index.
1582			assert_eq!(
1583				Snapshot::<Runtime>::voters(2)
1584					.unwrap()
1585					.into_iter()
1586					.map(|(x, _, _)| x)
1587					.collect::<Vec<_>>(),
1588				vec![1, 2, 3, 4]
1589			);
1590			assert_eq!(
1591				Snapshot::<Runtime>::voters(1)
1592					.unwrap()
1593					.into_iter()
1594					.map(|(x, _, _)| x)
1595					.collect::<Vec<_>>(),
1596				vec![5, 6, 7, 8]
1597			);
1598			assert_eq!(
1599				Snapshot::<Runtime>::voters(0)
1600					.unwrap()
1601					.into_iter()
1602					.map(|(x, _, _)| x)
1603					.collect::<Vec<_>>(),
1604				vec![10, 20, 30, 40]
1605			);
1606
1607			let paged = mine_full_solution().unwrap();
1608			assert_eq!(
1609				paged.solution_pages,
1610				vec![
1611					TestNposSolution { votes1: vec![(2, 2), (3, 3)], ..Default::default() },
1612					TestNposSolution {
1613						votes1: vec![(2, 2)],
1614						votes2: vec![
1615							(0, [(2, PerU16::from_parts(32768))], 3),
1616							(1, [(2, PerU16::from_parts(32768))], 3)
1617						],
1618						..Default::default()
1619					},
1620					TestNposSolution {
1621						votes1: vec![(2, 3), (3, 3)],
1622						votes2: vec![(1, [(2, PerU16::from_parts(32768))], 3)],
1623						..Default::default()
1624					},
1625				]
1626			);
1627
1628			// this solution must be feasible and submittable.
1629			OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true).unwrap();
1630			// now do a realistic full verification
1631			load_mock_signed_and_start(paged.clone());
1632			let supports = roll_to_full_verification();
1633
1634			assert_eq!(
1635				supports,
1636				vec![
1637					// page 0: self-votes.
1638					vec![
1639						(30, Support { total: 30, voters: vec![(30, 30)] }),
1640						(40, Support { total: 40, voters: vec![(40, 40)] })
1641					],
1642					// page 1: 5, 6, 7, 8
1643					vec![
1644						(30, Support { total: 20, voters: vec![(5, 5), (6, 5), (7, 10)] }),
1645						(40, Support { total: 10, voters: vec![(5, 5), (6, 5)] })
1646					],
1647					// page 2: 1, 2, 3, 4
1648					vec![
1649						(30, Support { total: 5, voters: vec![(2, 5)] }),
1650						(40, Support { total: 25, voters: vec![(2, 5), (3, 10), (4, 10)] })
1651					]
1652				]
1653				.try_from_unbounded_paged()
1654				.unwrap()
1655			);
1656
1657			assert_eq!(
1658				paged.score,
1659				ElectionScore { minimal_stake: 55, sum_stake: 130, sum_stake_squared: 8650 }
1660			);
1661		})
1662	}
1663
1664	#[test]
1665	fn mine_solution_choses_most_significant_pages() {
1666		ExtBuilder::mock_signed().pages(2).voter_per_page(4).build_and_execute(|| {
1667			roll_to_snapshot_created();
1668
1669			ensure_voters(2, 8);
1670			ensure_targets(1, 4);
1671
1672			// these folks should be ignored safely.
1673			assert_eq!(
1674				Snapshot::<Runtime>::voters(0)
1675					.unwrap()
1676					.into_iter()
1677					.map(|(x, _, _)| x)
1678					.collect::<Vec<_>>(),
1679				vec![5, 6, 7, 8]
1680			);
1681			// voters in pages 1, this is the most significant page.
1682			assert_eq!(
1683				Snapshot::<Runtime>::voters(1)
1684					.unwrap()
1685					.into_iter()
1686					.map(|(x, _, _)| x)
1687					.collect::<Vec<_>>(),
1688				vec![1, 2, 3, 4]
1689			);
1690
1691			// now we ask for just 1 page of solution.
1692			let paged = mine_solution(1).unwrap();
1693
1694			assert_eq!(
1695				paged.solution_pages,
1696				vec![TestNposSolution {
1697					// voter 1 (index 0) is backing 10 (index 0)
1698					// voter 2 (index 1) is backing 40 (index 3)
1699					// voter 3 (index 2) is backing 40 (index 3)
1700					votes1: vec![(0, 0), (1, 3), (2, 3)],
1701					// voter 4 (index 3) is backing 40 (index 10) and 10 (index 0)
1702					votes2: vec![(3, [(0, PerU16::from_parts(32768))], 3)],
1703					..Default::default()
1704				}]
1705			);
1706
1707			// this solution must be feasible and submittable.
1708			OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true).unwrap();
1709			// now do a realistic full verification.
1710			load_mock_signed_and_start(paged.clone());
1711			let supports = roll_to_full_verification();
1712
1713			assert_eq!(
1714				supports,
1715				vec![
1716					// page0: non existent.
1717					vec![],
1718					// page1 supports from voters 1, 2, 3, 4
1719					vec![
1720						(10, Support { total: 15, voters: vec![(1, 10), (4, 5)] }),
1721						(40, Support { total: 25, voters: vec![(2, 10), (3, 10), (4, 5)] })
1722					]
1723				]
1724				.try_from_unbounded_paged()
1725				.unwrap()
1726			);
1727
1728			assert_eq!(
1729				paged.score,
1730				ElectionScore { minimal_stake: 15, sum_stake: 40, sum_stake_squared: 850 }
1731			);
1732		})
1733	}
1734
1735	#[test]
1736	fn mine_solution_2_out_of_3_pages() {
1737		ExtBuilder::mock_signed().pages(3).voter_per_page(4).build_and_execute(|| {
1738			roll_to_snapshot_created();
1739
1740			ensure_voters(3, 12);
1741			ensure_targets(1, 4);
1742
1743			assert_eq!(
1744				Snapshot::<Runtime>::voters(0)
1745					.unwrap()
1746					.into_iter()
1747					.map(|(x, _, _)| x)
1748					.collect::<Vec<_>>(),
1749				vec![10, 20, 30, 40]
1750			);
1751			assert_eq!(
1752				Snapshot::<Runtime>::voters(1)
1753					.unwrap()
1754					.into_iter()
1755					.map(|(x, _, _)| x)
1756					.collect::<Vec<_>>(),
1757				vec![5, 6, 7, 8]
1758			);
1759			assert_eq!(
1760				Snapshot::<Runtime>::voters(2)
1761					.unwrap()
1762					.into_iter()
1763					.map(|(x, _, _)| x)
1764					.collect::<Vec<_>>(),
1765				vec![1, 2, 3, 4]
1766			);
1767
1768			// now we ask for just 1 page of solution.
1769			let paged = mine_solution(2).unwrap();
1770
1771			// this solution must be feasible and submittable.
1772			OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true).unwrap();
1773
1774			assert_eq!(
1775				paged.solution_pages,
1776				vec![
1777					// this can be "pagified" to snapshot at index 1, which contains 5, 6, 7, 8
1778					// in which:
1779					// 6 (index:1) votes for 40 (index:3)
1780					// 8 (index:1) votes for 10 (index:0)
1781					// 5 votes for both 10 and 40
1782					TestNposSolution {
1783						votes1: vec![(1, 3), (3, 0)],
1784						votes2: vec![(0, [(0, PerU16::from_parts(32768))], 3)],
1785						..Default::default()
1786					},
1787					// this can be 'pagified" to snapshot at index 2, which contains 1, 2, 3, 4
1788					// in which:
1789					// 1 (index:0) votes for 10 (index:0)
1790					// 2 (index:1) votes for 40 (index:3)
1791					// 3 (index:2) votes for 40 (index:3)
1792					// 4 votes for both 10 and 40
1793					TestNposSolution {
1794						votes1: vec![(0, 0), (1, 3), (2, 3)],
1795						votes2: vec![(3, [(0, PerU16::from_parts(32768))], 3)],
1796						..Default::default()
1797					}
1798				]
1799			);
1800
1801			// this solution must be feasible and submittable.
1802			OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true).unwrap();
1803			// now do a realistic full verification.
1804			load_mock_signed_and_start(paged.clone());
1805			let supports = roll_to_full_verification();
1806
1807			assert_eq!(
1808				supports,
1809				vec![
1810					// empty page 0.
1811					vec![],
1812					// supports from voters 5, 6, 7, 8
1813					vec![
1814						(10, Support { total: 15, voters: vec![(5, 5), (8, 10)] }),
1815						(40, Support { total: 15, voters: vec![(5, 5), (6, 10)] })
1816					],
1817					// supports from voters 1, 2, 3, 4
1818					vec![
1819						(10, Support { total: 15, voters: vec![(1, 10), (4, 5)] }),
1820						(40, Support { total: 25, voters: vec![(2, 10), (3, 10), (4, 5)] })
1821					]
1822				]
1823				.try_from_unbounded_paged()
1824				.unwrap()
1825			);
1826
1827			assert_eq!(
1828				paged.score,
1829				ElectionScore { minimal_stake: 30, sum_stake: 70, sum_stake_squared: 2500 }
1830			);
1831		})
1832	}
1833
1834	#[test]
1835	fn can_reduce_solution() {
1836		ExtBuilder::mock_signed().build_and_execute(|| {
1837			roll_to_snapshot_created();
1838			let full_edges = OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), false)
1839				.unwrap()
1840				.solution_pages
1841				.iter()
1842				.fold(0, |acc, x| acc + x.edge_count());
1843			let reduced_edges = OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), true)
1844				.unwrap()
1845				.solution_pages
1846				.iter()
1847				.fold(0, |acc, x| acc + x.edge_count());
1848
1849			assert!(reduced_edges < full_edges, "{} < {} not fulfilled", reduced_edges, full_edges);
1850		})
1851	}
1852}
1853
1854#[cfg(test)]
1855mod offchain_worker_miner {
1856	use crate::{verifier::Verifier, CommonError};
1857	use frame_support::traits::Hooks;
1858	use sp_runtime::offchain::storage_lock::{BlockAndTime, StorageLock};
1859
1860	use super::*;
1861	use crate::mock::*;
1862
1863	#[test]
1864	fn lock_prevents_frequent_execution() {
1865		let (mut ext, _) = ExtBuilder::mock_signed().build_offchainify();
1866		ext.execute_with_sanity_checks(|| {
1867			let offchain_repeat = <Runtime as crate::unsigned::Config>::OffchainRepeat::get();
1868
1869			// first execution -- okay.
1870			assert!(OffchainWorkerMiner::<Runtime>::ensure_offchain_repeat_frequency(25).is_ok());
1871
1872			// next block: rejected.
1873			assert_noop!(
1874				OffchainWorkerMiner::<Runtime>::ensure_offchain_repeat_frequency(26),
1875				OffchainMinerError::Lock("recently executed.")
1876			);
1877
1878			// allowed after `OFFCHAIN_REPEAT`
1879			assert!(OffchainWorkerMiner::<Runtime>::ensure_offchain_repeat_frequency(
1880				(26 + offchain_repeat).into()
1881			)
1882			.is_ok());
1883
1884			// a fork like situation: re-execute last 3.
1885			assert!(OffchainWorkerMiner::<Runtime>::ensure_offchain_repeat_frequency(
1886				(26 + offchain_repeat - 3).into()
1887			)
1888			.is_err());
1889			assert!(OffchainWorkerMiner::<Runtime>::ensure_offchain_repeat_frequency(
1890				(26 + offchain_repeat - 2).into()
1891			)
1892			.is_err());
1893			assert!(OffchainWorkerMiner::<Runtime>::ensure_offchain_repeat_frequency(
1894				(26 + offchain_repeat - 1).into()
1895			)
1896			.is_err());
1897		})
1898	}
1899
1900	#[test]
1901	fn lock_released_after_successful_execution() {
1902		// first, ensure that a successful execution releases the lock
1903		let (mut ext, pool) = ExtBuilder::mock_signed().build_offchainify();
1904		ext.execute_with_sanity_checks(|| {
1905			let guard = StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_LOCK);
1906			let last_block =
1907				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_LAST_BLOCK);
1908
1909			roll_to_unsigned_open();
1910
1911			// initially, the lock is not set.
1912			assert!(guard.get::<bool>().unwrap().is_none());
1913
1914			// a successful a-z execution.
1915			UnsignedPallet::offchain_worker(25);
1916			assert_eq!(pool.read().transactions.len(), 1);
1917
1918			// afterwards, the lock is not set either..
1919			assert!(guard.get::<bool>().unwrap().is_none());
1920			assert_eq!(last_block.get::<BlockNumber>().unwrap(), Some(25));
1921		});
1922	}
1923
1924	#[test]
1925	fn lock_prevents_overlapping_execution() {
1926		// ensure that if the guard is in hold, a new execution is not allowed.
1927		let (mut ext, pool) = ExtBuilder::mock_signed().build_offchainify();
1928		ext.execute_with_sanity_checks(|| {
1929			roll_to_unsigned_open();
1930
1931			// artificially set the value, as if another thread is mid-way.
1932			let mut lock = StorageLock::<BlockAndTime<System>>::with_block_deadline(
1933				OffchainWorkerMiner::<Runtime>::OFFCHAIN_LOCK,
1934				UnsignedPhase::get().saturated_into(),
1935			);
1936			let guard = lock.lock();
1937
1938			// nothing submitted.
1939			UnsignedPallet::offchain_worker(25);
1940			assert_eq!(pool.read().transactions.len(), 0);
1941			UnsignedPallet::offchain_worker(26);
1942			assert_eq!(pool.read().transactions.len(), 0);
1943
1944			drop(guard);
1945
1946			// ๐ŸŽ‰ !
1947			UnsignedPallet::offchain_worker(25);
1948			assert_eq!(pool.read().transactions.len(), 1);
1949		});
1950	}
1951
1952	#[test]
1953	fn initial_ocw_runs_and_saves_new_cache() {
1954		let (mut ext, pool) = ExtBuilder::mock_signed().build_offchainify();
1955		ext.execute_with_sanity_checks(|| {
1956			roll_to_unsigned_open();
1957
1958			let last_block =
1959				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_LAST_BLOCK);
1960			let cache =
1961				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_CACHED_CALL);
1962
1963			assert_eq!(last_block.get::<BlockNumber>(), Ok(None));
1964			assert_eq!(cache.get::<crate::unsigned::Call<Runtime>>(), Ok(None));
1965
1966			// creates, caches, submits without expecting previous cache value
1967			UnsignedPallet::offchain_worker(25);
1968			assert_eq!(pool.read().transactions.len(), 1);
1969
1970			assert_eq!(last_block.get::<BlockNumber>(), Ok(Some(25)));
1971			assert!(matches!(cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
1972		})
1973	}
1974
1975	#[test]
1976	fn ocw_pool_submission_works() {
1977		let (mut ext, pool) = ExtBuilder::mock_signed().build_offchainify();
1978		ext.execute_with_sanity_checks(|| {
1979			roll_to_unsigned_open();
1980
1981			roll_next_with_ocw(Some(pool.clone()));
1982			// OCW must have submitted now
1983
1984			let encoded = pool.read().transactions[0].clone();
1985			let extrinsic: Extrinsic = codec::Decode::decode(&mut &*encoded).unwrap();
1986			let call = extrinsic.function;
1987			assert!(matches!(
1988				call,
1989				crate::mock::RuntimeCall::UnsignedPallet(
1990					crate::unsigned::Call::submit_unsigned { .. }
1991				)
1992			));
1993		})
1994	}
1995
1996	#[test]
1997	fn resubmits_after_offchain_repeat() {
1998		let (mut ext, pool) = ExtBuilder::mock_signed().build_offchainify();
1999		ext.execute_with_sanity_checks(|| {
2000			let offchain_repeat = <Runtime as crate::unsigned::Config>::OffchainRepeat::get();
2001			roll_to_unsigned_open();
2002
2003			assert!(OffchainWorkerMiner::<Runtime>::cached_solution().is_none());
2004			// creates, caches, submits without expecting previous cache value
2005			UnsignedPallet::offchain_worker(25);
2006			assert_eq!(pool.read().transactions.len(), 1);
2007			let tx_cache = pool.read().transactions[0].clone();
2008			// assume that the tx has been processed
2009			pool.try_write().unwrap().transactions.clear();
2010
2011			// attempts to resubmit the tx after the threshold has expired.
2012			UnsignedPallet::offchain_worker(25 + 1 + offchain_repeat);
2013			assert_eq!(pool.read().transactions.len(), 1);
2014
2015			// resubmitted tx is identical to first submission
2016			let tx = &pool.read().transactions[0];
2017			assert_eq!(&tx_cache, tx);
2018		})
2019	}
2020
2021	#[test]
2022	fn regenerates_and_resubmits_after_offchain_repeat_if_no_cache() {
2023		let (mut ext, pool) = ExtBuilder::mock_signed().build_offchainify();
2024		ext.execute_with_sanity_checks(|| {
2025			let offchain_repeat = <Runtime as crate::unsigned::Config>::OffchainRepeat::get();
2026			roll_to_unsigned_open();
2027
2028			assert!(OffchainWorkerMiner::<Runtime>::cached_solution().is_none());
2029			// creates, caches, submits without expecting previous cache value.
2030			UnsignedPallet::offchain_worker(25);
2031			assert_eq!(pool.read().transactions.len(), 1);
2032			let tx_cache = pool.read().transactions[0].clone();
2033			// assume that the tx has been processed
2034			pool.try_write().unwrap().transactions.clear();
2035
2036			// remove the cached submitted tx.
2037			// this ensures that when the resubmit window rolls around, we're ready to regenerate
2038			// from scratch if necessary
2039			let mut call_cache =
2040				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_CACHED_CALL);
2041			assert!(matches!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
2042			call_cache.clear();
2043
2044			// attempts to resubmit the tx after the threshold has expired
2045			UnsignedPallet::offchain_worker(25 + 1 + offchain_repeat);
2046			assert_eq!(pool.read().transactions.len(), 1);
2047
2048			// resubmitted tx is identical to first submission
2049			let tx = &pool.read().transactions[0];
2050			assert_eq!(&tx_cache, tx);
2051		})
2052	}
2053
2054	#[test]
2055	fn altering_snapshot_invalidates_solution_cache() {
2056		// by infeasible, we mean here that if the snapshot fingerprint has changed.
2057		let (mut ext, pool) = ExtBuilder::mock_signed().unsigned_phase(999).build_offchainify();
2058		ext.execute_with_sanity_checks(|| {
2059			let offchain_repeat = <Runtime as crate::unsigned::Config>::OffchainRepeat::get();
2060			roll_to_unsigned_open();
2061			roll_next_with_ocw(None);
2062
2063			// something is submitted..
2064			assert_eq!(pool.read().transactions.len(), 1);
2065			pool.try_write().unwrap().transactions.clear();
2066
2067			// ..and cached
2068			let call_cache =
2069				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_CACHED_CALL);
2070			assert!(matches!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
2071
2072			// now change the snapshot, ofc this is rare in reality. This makes the cached call
2073			// infeasible.
2074			assert_eq!(crate::Snapshot::<Runtime>::targets().unwrap(), vec![10, 20, 30, 40]);
2075			let pre_fingerprint = crate::Snapshot::<Runtime>::fingerprint();
2076			crate::Snapshot::<Runtime>::remove_target(0);
2077			let post_fingerprint = crate::Snapshot::<Runtime>::fingerprint();
2078			assert_eq!(crate::Snapshot::<Runtime>::targets().unwrap(), vec![20, 30, 40]);
2079			assert_ne!(pre_fingerprint, post_fingerprint);
2080
2081			// now run ocw again
2082			let now = System::block_number();
2083			roll_to_with_ocw(now + offchain_repeat + 1, None);
2084			// nothing is submitted this time..
2085			assert_eq!(pool.read().transactions.len(), 0);
2086			// .. and the cache is gone.
2087			assert_eq!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(None));
2088
2089			// upon the next run, we re-generate and submit something fresh again.
2090			roll_to_with_ocw(now + offchain_repeat + offchain_repeat + 2, None);
2091			assert_eq!(pool.read().transactions.len(), 1);
2092			assert!(matches!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
2093		})
2094	}
2095
2096	#[test]
2097	fn wont_resubmit_if_weak_score() {
2098		// common case, if the score is weak, don't bother with anything, ideally check from the
2099		// logs that we don't run feasibility in this call path. Score check must come before.
2100		let (mut ext, pool) = ExtBuilder::mock_signed().unsigned_phase(999).build_offchainify();
2101		ext.execute_with_sanity_checks(|| {
2102			let offchain_repeat = <Runtime as crate::unsigned::Config>::OffchainRepeat::get();
2103			// unfortunately there's no pretty way to run the ocw code such that it generates a
2104			// weak, but correct solution. We just write it to cache directly.
2105			roll_to_unsigned_open();
2106			roll_next_with_ocw(None);
2107
2108			// something is submitted..
2109			assert_eq!(pool.read().transactions.len(), 1);
2110
2111			// ..and cached
2112			let call_cache =
2113				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_CACHED_CALL);
2114			assert!(matches!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
2115
2116			// and replace it with something weak.
2117			let weak_solution = raw_paged_from_supports(
2118				vec![vec![(40, Support { total: 10, voters: vec![(3, 10)] })]],
2119				0,
2120			);
2121			let weak_call = crate::unsigned::Call::<T>::submit_unsigned {
2122				paged_solution: Box::new(weak_solution),
2123			};
2124			call_cache.set(&weak_call);
2125
2126			// run again
2127			roll_to_with_ocw(System::block_number() + offchain_repeat + 1, Some(pool.clone()));
2128			// nothing is submitted this time..
2129			assert_eq!(pool.read().transactions.len(), 0);
2130			// .. and the cache IS STILL THERE!
2131			assert!(matches!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
2132		})
2133	}
2134
2135	#[test]
2136	fn ocw_submission_e2e_works() {
2137		let (mut ext, pool) = ExtBuilder::mock_signed().build_offchainify();
2138		ext.execute_with_sanity_checks(|| {
2139			assert!(VerifierPallet::queued_score().is_none());
2140			roll_to_with_ocw(25 + 1, Some(pool.clone()));
2141			assert!(VerifierPallet::queued_score().is_some());
2142
2143			// call is cached.
2144			let call_cache =
2145				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_CACHED_CALL);
2146			assert!(matches!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
2147
2148			// pool is empty
2149			assert_eq!(pool.read().transactions.len(), 0);
2150		})
2151	}
2152
2153	#[test]
2154	fn ocw_e2e_submits_and_queued_msp_only() {
2155		let (mut ext, pool) = ExtBuilder::mock_signed().build_offchainify();
2156		ext.execute_with_sanity_checks(|| {
2157			// roll to mine
2158			roll_to_unsigned_open_with_ocw(None);
2159			// one block to verify and submit.
2160			roll_next_with_ocw(Some(pool.clone()));
2161
2162			assert_eq!(
2163				multi_block_events(),
2164				vec![
2165					crate::Event::PhaseTransitioned {
2166						from: Phase::Off,
2167						to: Phase::Snapshot(Pages::get())
2168					},
2169					crate::Event::PhaseTransitioned {
2170						from: Phase::Snapshot(0),
2171						to: Phase::Unsigned(UnsignedPhase::get() - 1)
2172					}
2173				]
2174			);
2175			assert_eq!(
2176				verifier_events(),
2177				vec![
2178					crate::verifier::Event::Verified(2, 2),
2179					crate::verifier::Event::Queued(
2180						ElectionScore { minimal_stake: 15, sum_stake: 40, sum_stake_squared: 850 },
2181						None
2182					)
2183				]
2184			);
2185			assert!(VerifierPallet::queued_score().is_some());
2186
2187			// pool is empty
2188			assert_eq!(pool.read().transactions.len(), 0);
2189		})
2190	}
2191
2192	#[test]
2193	fn multi_page_ocw_e2e_submits_and_queued_msp_only() {
2194		let (mut ext, pool) = ExtBuilder::mock_signed().miner_pages(2).build_offchainify();
2195		ext.execute_with_sanity_checks(|| {
2196			// roll to mine
2197			roll_to_unsigned_open_with_ocw(None);
2198			// one block to verify and submit.
2199			roll_next_with_ocw(Some(pool.clone()));
2200
2201			assert_eq!(
2202				multi_block_events(),
2203				vec![
2204					crate::Event::PhaseTransitioned {
2205						from: Phase::Off,
2206						to: Phase::Snapshot(Pages::get())
2207					},
2208					crate::Event::PhaseTransitioned {
2209						from: Phase::Snapshot(0),
2210						to: Phase::Unsigned(UnsignedPhase::get() - 1)
2211					}
2212				]
2213			);
2214			assert_eq!(
2215				verifier_events(),
2216				vec![
2217					crate::verifier::Event::Verified(1, 2),
2218					crate::verifier::Event::Verified(2, 2),
2219					crate::verifier::Event::Queued(
2220						ElectionScore { minimal_stake: 30, sum_stake: 70, sum_stake_squared: 2500 },
2221						None
2222					)
2223				]
2224			);
2225			assert!(VerifierPallet::queued_score().is_some());
2226
2227			// pool is empty
2228			assert_eq!(pool.read().transactions.len(), 0);
2229		})
2230	}
2231
2232	#[test]
2233	fn full_multi_page_ocw_e2e_submits_and_queued_msp_only() {
2234		let (mut ext, pool) = ExtBuilder::mock_signed().miner_pages(3).build_offchainify();
2235		ext.execute_with_sanity_checks(|| {
2236			// roll to mine
2237			roll_to_unsigned_open_with_ocw(None);
2238			// one block to verify and submit.
2239			roll_next_with_ocw(Some(pool.clone()));
2240
2241			assert_eq!(
2242				multi_block_events(),
2243				vec![
2244					crate::Event::PhaseTransitioned {
2245						from: Phase::Off,
2246						to: Phase::Snapshot(Pages::get())
2247					},
2248					crate::Event::PhaseTransitioned {
2249						from: Phase::Snapshot(0),
2250						to: Phase::Unsigned(UnsignedPhase::get() - 1)
2251					}
2252				]
2253			);
2254			assert_eq!(
2255				verifier_events(),
2256				vec![
2257					crate::verifier::Event::Verified(0, 2),
2258					crate::verifier::Event::Verified(1, 2),
2259					crate::verifier::Event::Verified(2, 2),
2260					crate::verifier::Event::Queued(
2261						ElectionScore {
2262							minimal_stake: 55,
2263							sum_stake: 130,
2264							sum_stake_squared: 8650
2265						},
2266						None
2267					)
2268				]
2269			);
2270			assert!(VerifierPallet::queued_score().is_some());
2271
2272			// pool is empty
2273			assert_eq!(pool.read().transactions.len(), 0);
2274		})
2275	}
2276
2277	#[test]
2278	fn will_not_mine_if_not_enough_winners() {
2279		// also see `trim_weight_too_much_makes_solution_invalid`.
2280		let (mut ext, _) = ExtBuilder::mock_signed().desired_targets(77).build_offchainify();
2281		ext.execute_with_sanity_checks(|| {
2282			roll_to_unsigned_open();
2283			ensure_voters(3, 12);
2284
2285			// beautiful errors, isn't it?
2286			assert_eq!(
2287				OffchainWorkerMiner::<Runtime>::mine_checked_call().unwrap_err(),
2288				OffchainMinerError::Common(CommonError::WrongWinnerCount)
2289			);
2290		});
2291	}
2292
2293	mod no_storage {
2294		use super::*;
2295		#[test]
2296		fn ocw_never_uses_cache_on_initial_run_or_resubmission() {
2297			// When `T::OffchainStorage` is false, the offchain worker should never use cache:
2298			// - Initial run: mines and submits without caching
2299			// - Resubmission: re-mines fresh solution instead of restoring from cache
2300			let (mut ext, pool) =
2301				ExtBuilder::mock_signed().offchain_storage(false).build_offchainify();
2302			ext.execute_with_sanity_checks(|| {
2303				let offchain_repeat = <Runtime as crate::unsigned::Config>::OffchainRepeat::get();
2304				roll_to_unsigned_open();
2305
2306				let last_block = StorageValueRef::persistent(
2307					&OffchainWorkerMiner::<Runtime>::OFFCHAIN_LAST_BLOCK,
2308				);
2309				let cache = StorageValueRef::persistent(
2310					&OffchainWorkerMiner::<Runtime>::OFFCHAIN_CACHED_CALL,
2311				);
2312
2313				// Initial state: no previous runs
2314				assert_eq!(last_block.get::<BlockNumber>(), Ok(None));
2315				assert_eq!(cache.get::<crate::unsigned::Call<Runtime>>(), Ok(None));
2316
2317				// First run: mines and submits without caching
2318				UnsignedPallet::offchain_worker(25);
2319				assert_eq!(pool.read().transactions.len(), 1);
2320				let first_tx = pool.read().transactions[0].clone();
2321
2322				// Verify no cache is created or used
2323				assert_eq!(last_block.get::<BlockNumber>(), Ok(Some(25)));
2324				assert_eq!(cache.get::<crate::unsigned::Call<Runtime>>(), Ok(None));
2325
2326				// Clear the pool to simulate transaction processing
2327				pool.try_write().unwrap().transactions.clear();
2328
2329				// Second run after repeat threshold: should re-mine instead of using cache
2330				UnsignedPallet::offchain_worker(25 + 1 + offchain_repeat);
2331				assert_eq!(pool.read().transactions.len(), 1);
2332				let second_tx = pool.read().transactions[0].clone();
2333
2334				// Verify still no cache is used throughout the process
2335				assert_eq!(last_block.get::<BlockNumber>(), Ok(Some(25 + 1 + offchain_repeat)));
2336				assert_eq!(cache.get::<crate::unsigned::Call<Runtime>>(), Ok(None));
2337
2338				// Both transactions should be identical since the snapshot hasn't changed,
2339				// but they were generated independently (no cache reuse)
2340				assert_eq!(first_tx, second_tx);
2341			})
2342		}
2343	}
2344}