referrerpolicy=no-referrer-when-downgrade

pallet_election_provider_multi_block/unsigned/
miner.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! The miner code for the EPMB pallet.
19//!
20//! It is broadly consisted of two main types:
21//!
22//! * [`crate::unsigned::miner::BaseMiner`], which is more generic, needs parameterization via
23//!   [`crate::unsigned::miner::MinerConfig`], and can be used by an external implementation.
24//! * [`crate::unsigned::miner::OffchainWorkerMiner`], which is more opinionated, and is used by
25//!   this pallet via the `offchain_worker` hook to also mine solutions during the
26//!   `Phase::Unsigned`.
27
28use super::{Call, Config, Pallet};
29use crate::{
30	helpers,
31	types::{PadSolutionPages, *},
32	verifier::{self},
33	CommonError,
34};
35use codec::Encode;
36use frame_election_provider_support::{ExtendedBalance, NposSolver, Support, VoteWeight};
37use frame_support::{traits::Get, BoundedVec};
38use frame_system::pallet_prelude::*;
39use scale_info::TypeInfo;
40use sp_npos_elections::EvaluateSupport;
41use sp_runtime::{
42	offchain::storage::{MutateStorageError, StorageValueRef},
43	traits::{SaturatedConversion, Saturating, Zero},
44};
45use sp_std::{collections::btree_map::BTreeMap, prelude::*};
46
47// TODO: we should have a fuzzer for miner that ensures no matter the parameters, it generates a
48// valid solution. Esp. for the trimming.
49
50/// The type of the snapshot.
51///
52/// Used to express errors.
53#[derive(Debug, Eq, PartialEq)]
54pub enum SnapshotType {
55	/// Voters at the given page missing.
56	Voters(PageIndex),
57	/// Targets missing.
58	Targets,
59	/// Metadata missing.
60	Metadata,
61	/// Desired targets missing.
62	DesiredTargets,
63}
64
65pub(crate) type MinerSolverErrorOf<T> = <<T as MinerConfig>::Solver as NposSolver>::Error;
66
67/// The errors related to the [`BaseMiner`].
68#[derive(
69	frame_support::DebugNoBound, frame_support::EqNoBound, frame_support::PartialEqNoBound,
70)]
71pub enum MinerError<T: MinerConfig> {
72	/// An internal error in the NPoS elections crate.
73	NposElections(sp_npos_elections::Error),
74	/// An internal error in the generic solver.
75	Solver(MinerSolverErrorOf<T>),
76	/// Snapshot data was unavailable unexpectedly.
77	SnapshotUnAvailable(SnapshotType),
78	/// The base, common errors from the pallet.
79	Common(CommonError),
80	/// The solution generated from the miner is not feasible.
81	Feasibility(verifier::FeasibilityError),
82	/// Some page index has been invalid.
83	InvalidPage,
84	/// Too many winners were removed during trimming.
85	TooManyWinnersRemoved,
86	/// A defensive error has occurred.
87	Defensive(&'static str),
88}
89
90impl<T: MinerConfig> From<sp_npos_elections::Error> for MinerError<T> {
91	fn from(e: sp_npos_elections::Error) -> Self {
92		MinerError::NposElections(e)
93	}
94}
95
96impl<T: MinerConfig> From<verifier::FeasibilityError> for MinerError<T> {
97	fn from(e: verifier::FeasibilityError) -> Self {
98		MinerError::Feasibility(e)
99	}
100}
101
102impl<T: MinerConfig> From<CommonError> for MinerError<T> {
103	fn from(e: CommonError) -> Self {
104		MinerError::Common(e)
105	}
106}
107
108/// The errors related to the `OffchainWorkerMiner`.
109#[derive(
110	frame_support::DebugNoBound, frame_support::EqNoBound, frame_support::PartialEqNoBound,
111)]
112pub enum OffchainMinerError<T: Config> {
113	/// An error in the base miner.
114	BaseMiner(MinerError<T::MinerConfig>),
115	/// The base, common errors from the pallet.
116	Common(CommonError),
117	/// Something went wrong fetching the lock.
118	Lock(&'static str),
119	/// Submitting a transaction to the pool failed.
120	PoolSubmissionFailed,
121	/// Cannot restore a solution that was not stored.
122	NoStoredSolution,
123	/// Cached solution is not a `submit_unsigned` call.
124	SolutionCallInvalid,
125	/// Failed to store a solution.
126	FailedToStoreSolution,
127	/// Cannot mine a solution with zero pages.
128	ZeroPages,
129}
130
131impl<T: Config> From<MinerError<T::MinerConfig>> for OffchainMinerError<T> {
132	fn from(e: MinerError<T::MinerConfig>) -> Self {
133		OffchainMinerError::BaseMiner(e)
134	}
135}
136
137impl<T: Config> From<CommonError> for OffchainMinerError<T> {
138	fn from(e: CommonError) -> Self {
139		OffchainMinerError::Common(e)
140	}
141}
142
143/// Configurations for the miner.
144///
145/// This is extracted from the main crate's config so that an offchain miner can readily use the
146/// [`BaseMiner`] without needing to deal with the rest of the pallet's configuration.
147pub trait MinerConfig {
148	/// The account id type.
149	type AccountId: Ord + Clone + codec::Codec + core::fmt::Debug;
150	/// The solution that the miner is mining.
151	/// The solution type.
152	type Solution: codec::FullCodec
153		+ Default
154		+ PartialEq
155		+ Eq
156		+ Clone
157		+ sp_std::fmt::Debug
158		+ Ord
159		+ NposSolution
160		+ TypeInfo
161		+ codec::MaxEncodedLen;
162	/// The solver type.
163	type Solver: NposSolver<AccountId = Self::AccountId>;
164	/// The maximum length that the miner should use for a solution, per page.
165	///
166	/// This value is not set in stone, and it is up to an individual miner to configure. A good
167	/// value is something like 75% of the total block length, which can be fetched from the system
168	/// pallet.
169	type MaxLength: Get<u32>;
170	/// Maximum number of votes per voter.
171	///
172	/// Must be the same as configured in the [`crate::Config::DataProvider`].
173	///
174	/// For simplicity, this is 16 in Polkadot and 24 in Kusama.
175	type MaxVotesPerVoter: Get<u32>;
176	/// Maximum number of winners to select per page.
177	///
178	/// The miner should respect this, it is used for trimming, and bounded data types.
179	///
180	/// Should equal to the onchain value set in `Verifier::Config`.
181	type MaxWinnersPerPage: Get<u32>;
182	/// Maximum number of backers per winner, per page.
183	///
184	/// The miner should respect this, it is used for trimming, and bounded data types.
185	///
186	/// Should equal to the onchain value set in `Verifier::Config`.
187	type MaxBackersPerWinner: Get<u32>;
188	/// Maximum number of backers, per winner, across all pages.
189	///
190	/// The miner should respect this, it is used for trimming, and bounded data types.
191	///
192	/// Should equal to the onchain value set in `Verifier::Config`.
193	type MaxBackersPerWinnerFinal: Get<u32>;
194	/// **Maximum** number of pages that we may compute.
195	///
196	/// Must be the same as configured in the [`crate::Config`].
197	type Pages: Get<u32>;
198	/// Maximum number of voters per snapshot page.
199	///
200	/// Must be the same as configured in the [`crate::Config`].
201	type VoterSnapshotPerBlock: Get<u32>;
202	/// Maximum number of targets per snapshot page.
203	///
204	/// Must be the same as configured in the [`crate::Config`].
205	type TargetSnapshotPerBlock: Get<u32>;
206	/// The hash type of the runtime.
207	type Hash: Eq + PartialEq;
208}
209
210/// A base miner that is only capable of mining a new solution and checking it against the state of
211/// this pallet for feasibility, and trimming its length/weight.
212pub struct BaseMiner<T: MinerConfig>(sp_std::marker::PhantomData<T>);
213
214/// Parameterized `BoundedSupports` for the miner.
215///
216/// The bounds of this are set such to only encapsulate a single page of a snapshot. The other
217/// counterpart is [`FullSupportsOfMiner`].
218pub type PageSupportsOfMiner<T> = frame_election_provider_support::BoundedSupports<
219	<T as MinerConfig>::AccountId,
220	<T as MinerConfig>::MaxWinnersPerPage,
221	<T as MinerConfig>::MaxBackersPerWinner,
222>;
223
224/// Helper type that computes the maximum total winners across all pages.
225pub struct MaxWinnersFinal<T: MinerConfig>(core::marker::PhantomData<T>);
226
227impl<T: MinerConfig> frame_support::traits::Get<u32> for MaxWinnersFinal<T> {
228	fn get() -> u32 {
229		T::Pages::get().saturating_mul(T::MaxWinnersPerPage::get())
230	}
231}
232
233/// The full version of [`PageSupportsOfMiner`].
234///
235/// This should be used on a support instance that is encapsulating the full solution.
236///
237/// Another way to look at it, this is never wrapped in a `Vec<_>`
238pub type FullSupportsOfMiner<T> = frame_election_provider_support::BoundedSupports<
239	<T as MinerConfig>::AccountId,
240	MaxWinnersFinal<T>,
241	<T as MinerConfig>::MaxBackersPerWinnerFinal,
242>;
243
244/// Aggregator for inputs to [`BaseMiner`].
245pub struct MineInput<T: MinerConfig> {
246	/// Number of winners to pick.
247	pub desired_targets: u32,
248	/// All of the targets.
249	pub all_targets: BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>,
250	/// Paginated list of voters.
251	///
252	/// Note for staking-miners: How this is calculated is rather delicate, and the order of the
253	/// nested vectors matter. See carefully how `OffchainWorkerMiner::mine_solution` is doing
254	/// this.
255	pub voter_pages: AllVoterPagesOf<T>,
256	/// Number of pages to mind.
257	///
258	/// Note for staking-miner: Always use [`MinerConfig::Pages`] unless explicitly wanted
259	/// otherwise.
260	pub pages: PageIndex,
261	/// Whether to reduce the solution. Almost always``
262	pub do_reduce: bool,
263	/// The current round for which the solution is being calculated.
264	pub round: u32,
265}
266
267impl<T: MinerConfig> BaseMiner<T> {
268	/// Mine a new npos solution, with the given number of pages.
269	///
270	/// This miner is only capable of mining a solution that either uses all of the pages of the
271	/// snapshot, or the top `pages` thereof.
272	///
273	/// This always trims the solution to match a few parameters:
274	///
275	/// [`MinerConfig::MaxWinnersPerPage`], [`MinerConfig::MaxBackersPerWinner`],
276	/// [`MinerConfig::MaxBackersPerWinnerFinal`] and [`MinerConfig::MaxLength`].
277	///
278	/// The order of pages returned is aligned with the snapshot. For example, the index 0 of the
279	/// returning solution pages corresponds to the page 0 of the snapshot.
280	///
281	/// The only difference is, if the solution is partial, then [`Pagify`] must be used to properly
282	/// pad the results.
283	pub fn mine_solution(
284		MineInput { desired_targets, all_targets, voter_pages, mut pages, do_reduce, round }: MineInput<
285			T,
286		>,
287	) -> Result<PagedRawSolution<T>, MinerError<T>> {
288		pages = pages.min(T::Pages::get());
289
290		// we also build this closure early, so we can let `targets` be consumed.
291		let voter_page_fn = helpers::generate_voter_page_fn::<T>(&voter_pages);
292		let target_index_fn = helpers::target_index_fn::<T>(&all_targets);
293
294		// now flatten the voters, ready to be used as if pagination did not existed.
295		let all_voters: AllVoterPagesFlattenedOf<T> = voter_pages
296			.iter()
297			.cloned()
298			.flatten()
299			.collect::<Vec<_>>()
300			.try_into()
301			.expect("Flattening the voters into `AllVoterPagesFlattenedOf` cannot fail; qed");
302
303		let ElectionResult { winners: _, assignments } = T::Solver::solve(
304			desired_targets as usize,
305			all_targets.clone().to_vec(),
306			all_voters.clone().into_inner(),
307		)
308		.map_err(|e| MinerError::Solver(e))?;
309
310		// reduce and trim supports. We don't trim length and weight here, since those are dependent
311		// on the final form of the solution ([`PagedRawSolution`]), thus we do it later.
312		let trimmed_assignments = {
313			// Implementation note: the overall code path is as follows: election_results ->
314			// assignments -> staked assignments -> reduce -> supports -> trim supports -> staked
315			// assignments -> final assignments
316			// This is by no means the most performant, but is the clear and correct.
317			use sp_npos_elections::{
318				assignment_ratio_to_staked_normalized, assignment_staked_to_ratio_normalized,
319				reduce, supports_to_staked_assignment, to_supports, EvaluateSupport,
320			};
321
322			// These closures are of no use in the rest of these code, since they only deal with the
323			// overall list of voters.
324			let cache = helpers::generate_voter_cache::<T, _>(&all_voters);
325			let stake_of = helpers::stake_of_fn::<T, _>(&all_voters, &cache);
326
327			// 1. convert to staked and reduce
328			let (reduced_count, staked) = {
329				let mut staked = assignment_ratio_to_staked_normalized(assignments, &stake_of)
330					.map_err::<MinerError<T>, _>(Into::into)?;
331
332				// first, reduce the solution if requested. This will already remove a lot of
333				// "redundant" and reduce the chance for the need of any further trimming.
334				let count = if do_reduce { reduce(&mut staked) } else { 0 };
335				(count, staked)
336			};
337
338			// 2. trim the supports by FINAL backing.
339			let (_pre_score, final_trimmed_assignments, winners_removed, backers_removed) = {
340				// these supports could very well be invalid for SCORE purposes. The reason is that
341				// you might trim out half of an account's stake, but we don't look for this
342				// account's other votes to fix it.
343				let supports_invalid_score = to_supports(&staked);
344
345				let pre_score = (&supports_invalid_score).evaluate();
346				let (bounded_invalid_score, winners_removed, backers_removed) =
347					FullSupportsOfMiner::<T>::sorted_truncate_from(supports_invalid_score);
348
349				// now recreated the staked assignments
350				let staked = supports_to_staked_assignment(bounded_invalid_score.into());
351				let assignments = assignment_staked_to_ratio_normalized(staked)
352					.map_err::<MinerError<T>, _>(Into::into)?;
353				(pre_score, assignments, winners_removed, backers_removed)
354			};
355
356			miner_log!(
357				debug,
358				"initial score = {:?}, reduced {} edges, trimmed {} winners and {} backers due to global support limits",
359				_pre_score,
360				reduced_count,
361				winners_removed,
362				backers_removed,
363			);
364
365			final_trimmed_assignments
366		};
367
368		// split the assignments into different pages.
369		let mut paged_assignments: BoundedVec<Vec<AssignmentOf<T>>, T::Pages> =
370			BoundedVec::with_bounded_capacity(pages as usize);
371		paged_assignments.bounded_resize(pages as usize, Default::default());
372
373		for assignment in trimmed_assignments {
374			// NOTE: this `page` index is LOCAL. It does not correspond to the actual page index of
375			// the snapshot map, but rather the index in the `voter_pages`.
376			let page = voter_page_fn(&assignment.who).ok_or(MinerError::InvalidPage)?;
377			let assignment_page =
378				paged_assignments.get_mut(page as usize).ok_or(MinerError::InvalidPage)?;
379			assignment_page.push(assignment);
380		}
381
382		// convert each page to a compact struct -- no more change allowed.
383		let mut solution_pages: Vec<SolutionOf<T>> = paged_assignments
384			.into_iter()
385			.enumerate()
386			.map(|(page_index, assignment_page)| {
387				// get the page of the snapshot that corresponds to this page of the assignments.
388				let page: PageIndex = page_index.saturated_into();
389				let voter_snapshot_page = voter_pages
390					.get(page as usize)
391					.ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Voters(page)))?;
392
393				// one last trimming -- `MaxBackersPerWinner`, the per-page variant.
394				let trimmed_assignment_page = Self::trim_supports_max_backers_per_winner_per_page(
395					assignment_page,
396					voter_snapshot_page,
397					page_index as u32,
398				)?;
399
400				let voter_index_fn = {
401					let cache = helpers::generate_voter_cache::<T, _>(&voter_snapshot_page);
402					helpers::voter_index_fn_owned::<T>(cache)
403				};
404
405				<SolutionOf<T>>::from_assignment(
406					&trimmed_assignment_page,
407					&voter_index_fn,
408					&target_index_fn,
409				)
410				.map_err::<MinerError<T>, _>(Into::into)
411			})
412			.collect::<Result<Vec<_>, _>>()?;
413
414		// now do the length trim.
415		let _trim_length_weight =
416			Self::maybe_trim_weight_and_len(&mut solution_pages, &voter_pages)?;
417		miner_log!(debug, "trimmed {} voters due to length restriction.", _trim_length_weight);
418
419		// finally, wrap everything up. Assign a fake score here, since we might need to re-compute
420		// it.
421		let mut paged = PagedRawSolution { round, solution_pages, score: Default::default() };
422
423		// OPTIMIZATION: we do feasibility_check inside `compute_score`, and once later
424		// pre_dispatch. I think it is fine, but maybe we can improve it.
425		let score = Self::compute_score(&paged, &voter_pages, &all_targets, desired_targets)
426			.map_err::<MinerError<T>, _>(Into::into)?;
427		paged.score = score;
428
429		miner_log!(
430			debug,
431			"mined a solution with {} pages, score {:?}, {} winners, {} voters, {} edges, and {} bytes",
432			pages,
433			score,
434			paged.winner_count_single_page_target_snapshot(),
435			paged.voter_count(),
436			paged.edge_count(),
437			paged.using_encoded(|b| b.len())
438		);
439
440		Ok(paged)
441	}
442
443	/// perform the feasibility check on all pages of a solution, returning `Ok(())` if all good and
444	/// the corresponding error otherwise.
445	pub fn check_feasibility(
446		paged_solution: &PagedRawSolution<T>,
447		paged_voters: &AllVoterPagesOf<T>,
448		snapshot_targets: &BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>,
449		desired_targets: u32,
450	) -> Result<Vec<PageSupportsOfMiner<T>>, MinerError<T>> {
451		// check every solution page for feasibility.
452		let padded_voters = paged_voters.clone().pad_solution_pages(T::Pages::get());
453		paged_solution
454			.solution_pages
455			.pagify(T::Pages::get())
456			.map(|(page_index, page_solution)| {
457				match verifier::feasibility_check_page_inner_with_snapshot::<T>(
458					page_solution.clone(),
459					&padded_voters[page_index as usize],
460					snapshot_targets,
461					desired_targets,
462				) {
463					Ok(x) => {
464						miner_log!(debug, "feasibility check of page {:?} was okay", page_index,);
465						Ok(x)
466					},
467					Err(e) => {
468						miner_log!(
469							warn,
470							"feasibility check of page {:?} {:?} failed for solution because: {:?}",
471							page_index,
472							page_solution,
473							e,
474						);
475						Err(e)
476					},
477				}
478			})
479			.collect::<Result<Vec<_>, _>>()
480			.map_err(|err| MinerError::from(err))
481			.and_then(|supports| {
482				// If we someday want to check `MaxBackersPerWinnerFinal`, it would be here.
483				Ok(supports)
484			})
485	}
486
487	/// Take the given raw paged solution and compute its score. This will replicate what the chain
488	/// would do as closely as possible, and expects all the corresponding snapshot data to be
489	/// available.
490	fn compute_score(
491		paged_solution: &PagedRawSolution<T>,
492		paged_voters: &AllVoterPagesOf<T>,
493		all_targets: &BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>,
494		desired_targets: u32,
495	) -> Result<ElectionScore, MinerError<T>> {
496		let all_supports =
497			Self::check_feasibility(paged_solution, paged_voters, all_targets, desired_targets)?;
498		let mut total_backings: BTreeMap<T::AccountId, ExtendedBalance> = BTreeMap::new();
499		all_supports.into_iter().flat_map(|x| x.0).for_each(|(who, support)| {
500			let backing = total_backings.entry(who).or_default();
501			*backing = backing.saturating_add(support.total);
502		});
503
504		let all_supports = total_backings
505			.into_iter()
506			.map(|(who, total)| (who, Support { total, ..Default::default() }))
507			.collect::<Vec<_>>();
508
509		Ok((&all_supports).evaluate())
510	}
511
512	fn trim_supports_max_backers_per_winner_per_page(
513		untrimmed_assignments: Vec<AssignmentOf<T>>,
514		page_voters: &VoterPageOf<T>,
515		page: PageIndex,
516	) -> Result<Vec<AssignmentOf<T>>, MinerError<T>> {
517		use sp_npos_elections::{
518			assignment_ratio_to_staked_normalized, assignment_staked_to_ratio_normalized,
519			supports_to_staked_assignment, to_supports,
520		};
521		// convert to staked
522		let cache = helpers::generate_voter_cache::<T, _>(page_voters);
523		let stake_of = helpers::stake_of_fn::<T, _>(&page_voters, &cache);
524		let untrimmed_staked_assignments =
525			assignment_ratio_to_staked_normalized(untrimmed_assignments, &stake_of)?;
526
527		// convert to supports
528		let supports = to_supports(&untrimmed_staked_assignments);
529		drop(untrimmed_staked_assignments);
530
531		// Convert it to our desired bounds, which will truncate the smallest backers if need
532		// be.
533		let (bounded, winners_removed, backers_removed) =
534			PageSupportsOfMiner::<T>::sorted_truncate_from(supports);
535
536		miner_log!(
537			debug,
538			"trimmed {} winners and {} backers from page {} due to per-page limits",
539			winners_removed,
540			backers_removed,
541			page
542		);
543
544		// convert back to staked
545		let trimmed_staked_assignments = supports_to_staked_assignment(bounded.into());
546		// and then ratio assignments
547		let trimmed_assignments =
548			assignment_staked_to_ratio_normalized(trimmed_staked_assignments)?;
549
550		Ok(trimmed_assignments)
551	}
552
553	/// Maybe tim the weight and length of the given multi-page solution.
554	///
555	/// Returns the number of voters removed.
556	///
557	/// If either of the bounds are not met, the trimming strategy is as follows:
558	///
559	/// Start from the least significant page. Assume only this page is going to be trimmed. call
560	/// `page.sort()` on this page. This will make sure in each field (`votes1`, `votes2`, etc.) of
561	/// that page, the voters are sorted by descending stake. Then, we compare the last item of each
562	/// field. This is the process of removing the single least staked voter.
563	///
564	/// We repeat this until satisfied, for both weight and length. If a full page is removed, but
565	/// the bound is not satisfied, we need to make sure that we sort the next least valuable page,
566	/// and repeat the same process.
567	///
568	/// NOTE: this is a public function to be used by the `OffchainWorkerMiner` or any similar one,
569	/// based on the submission strategy. The length and weight bounds of a call are dependent on
570	/// the number of pages being submitted, the number of blocks over which we submit, and the type
571	/// of the transaction and its weight (e.g. signed or unsigned).
572	///
573	/// NOTE: It could be that this function removes too many voters, and the solution becomes
574	/// invalid. This is not yet handled and only a warning is emitted.
575	pub fn maybe_trim_weight_and_len(
576		solution_pages: &mut Vec<SolutionOf<T>>,
577		paged_voters: &AllVoterPagesOf<T>,
578	) -> Result<u32, MinerError<T>> {
579		debug_assert_eq!(solution_pages.len(), paged_voters.len());
580		let size_limit = T::MaxLength::get();
581
582		let needs_any_trim = |solution_pages: &mut Vec<SolutionOf<T>>| {
583			let size = solution_pages.encoded_size() as u32;
584			let needs_len_trim = size > size_limit;
585			// a reminder that we used to have weight trimming here, but not more!
586			let needs_weight_trim = false;
587			needs_weight_trim || needs_len_trim
588		};
589
590		// Note the solution might be partial. In either case, this is its least significant page.
591		let mut current_trimming_page = 0;
592		let current_trimming_page_stake_of = |current_trimming_page: usize| {
593			Box::new(move |voter_index: &SolutionVoterIndexOf<T>| -> VoteWeight {
594				paged_voters
595					.get(current_trimming_page)
596					.and_then(|page_voters| {
597						page_voters
598							.get((*voter_index).saturated_into::<usize>())
599							.map(|(_, s, _)| *s)
600					})
601					.unwrap_or_default()
602			})
603		};
604
605		let sort_current_trimming_page =
606			|current_trimming_page: usize, solution_pages: &mut Vec<SolutionOf<T>>| {
607				solution_pages.get_mut(current_trimming_page).map(|solution_page| {
608					let stake_of_fn = current_trimming_page_stake_of(current_trimming_page);
609					solution_page.sort(stake_of_fn)
610				});
611			};
612
613		let is_empty = |solution_pages: &Vec<SolutionOf<T>>| {
614			solution_pages.iter().all(|page| page.voter_count().is_zero())
615		};
616
617		if needs_any_trim(solution_pages) {
618			sort_current_trimming_page(current_trimming_page, solution_pages)
619		}
620
621		// Implementation note: we want `solution_pages` and `paged_voters` to remain in sync, so
622		// while one of the pages of `solution_pages` might become "empty" we prefer not removing
623		// it. This has a slight downside that even an empty pages consumes a few dozens of bytes,
624		// which we accept for code simplicity.
625
626		let mut removed = 0;
627		while needs_any_trim(solution_pages) && !is_empty(solution_pages) {
628			if let Some(removed_idx) =
629				solution_pages.get_mut(current_trimming_page).and_then(|page| {
630					let stake_of_fn = current_trimming_page_stake_of(current_trimming_page);
631					page.remove_weakest_sorted(&stake_of_fn)
632				}) {
633				miner_log!(
634					trace,
635					"removed voter at index {:?} of (un-pagified) page {} as the weakest due to weight/length limits.",
636					removed_idx,
637					current_trimming_page
638				);
639				// we removed one person, continue.
640				removed.saturating_inc();
641			} else {
642				// this page cannot support remove anymore. Try and go to the next page.
643				miner_log!(
644					debug,
645					"page {} seems to be fully empty now, moving to the next one",
646					current_trimming_page
647				);
648				let next_page = current_trimming_page.saturating_add(1);
649				if paged_voters.len() > next_page {
650					current_trimming_page = next_page;
651					sort_current_trimming_page(current_trimming_page, solution_pages);
652				} else {
653					miner_log!(
654						warn,
655						"no more pages to trim from at page {}, already trimmed",
656						current_trimming_page
657					);
658					break
659				}
660			}
661		}
662
663		Ok(removed)
664	}
665}
666
667/// A miner that is suited to work inside offchain worker environment.
668///
669/// This is parameterized by [`Config`], rather than [`MinerConfig`].
670pub struct OffchainWorkerMiner<T: Config>(sp_std::marker::PhantomData<T>);
671
672impl<T: Config> OffchainWorkerMiner<T> {
673	/// Storage key used to store the offchain worker running status.
674	pub(crate) const OFFCHAIN_LOCK: &'static [u8] = b"parity/multi-block-unsigned-election/lock";
675	/// Storage key used to store the last block number at which offchain worker ran.
676	const OFFCHAIN_LAST_BLOCK: &'static [u8] = b"parity/multi-block-unsigned-election";
677	/// Storage key used to cache the solution `call` and its snapshot fingerprint.
678	const OFFCHAIN_CACHED_CALL: &'static [u8] = b"parity/multi-block-unsigned-election/call";
679
680	pub(crate) fn fetch_snapshot(
681		pages: PageIndex,
682	) -> Result<
683		(AllVoterPagesOf<T::MinerConfig>, BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>, u32),
684		OffchainMinerError<T>,
685	> {
686		// read the appropriate snapshot pages.
687		let desired_targets = crate::Snapshot::<T>::desired_targets()
688			.ok_or(MinerError::SnapshotUnAvailable(SnapshotType::DesiredTargets))?;
689		let all_targets = crate::Snapshot::<T>::targets()
690			.ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Targets))?;
691
692		// This is the range of voters that we are interested in.
693		let voter_pages_range = crate::Pallet::<T>::msp_range_for(pages as usize);
694
695		sublog!(
696			debug,
697			"unsigned::base-miner",
698			"mining a solution with {} pages, voter snapshot range will be: {:?}",
699			pages,
700			voter_pages_range
701		);
702
703		// NOTE: if `pages (2) < T::Pages (3)`, at this point this vector will have length 2,
704		// with a layout of `[snapshot(1), snapshot(2)]`, namely the two most significant pages
705		//  of the snapshot.
706		let voter_pages: BoundedVec<_, T::Pages> = voter_pages_range
707			.into_iter()
708			.map(|p| {
709				crate::Snapshot::<T>::voters(p)
710					.ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Voters(p)))
711			})
712			.collect::<Result<Vec<_>, _>>()?
713			.try_into()
714			.expect(
715				"`voter_pages_range` has `.take(pages)`; it must have length less than pages; it
716				must convert to `BoundedVec`; qed",
717			);
718
719		Ok((voter_pages, all_targets, desired_targets))
720	}
721
722	pub fn mine_solution(
723		pages: PageIndex,
724		do_reduce: bool,
725	) -> Result<PagedRawSolution<T::MinerConfig>, OffchainMinerError<T>> {
726		if pages.is_zero() {
727			return Err(OffchainMinerError::<T>::ZeroPages);
728		}
729		let (voter_pages, all_targets, desired_targets) = Self::fetch_snapshot(pages)?;
730		let round = crate::Pallet::<T>::round();
731		BaseMiner::<T::MinerConfig>::mine_solution(MineInput {
732			desired_targets,
733			all_targets,
734			voter_pages,
735			pages,
736			do_reduce,
737			round,
738		})
739		.map_err(Into::into)
740	}
741
742	/// Get a checked solution from the base miner, ensure unsigned-specific checks also pass, then
743	/// return an submittable call.
744	fn mine_checked_call() -> Result<Call<T>, OffchainMinerError<T>> {
745		// we always do reduce in the offchain worker miner.
746		let reduce = true;
747
748		// NOTE: we don't run any checks in the base miner, and run all of them via
749		// `Self::full_checks`.
750		let paged_solution = Self::mine_solution(T::MinerPages::get(), reduce)
751			.map_err::<OffchainMinerError<T>, _>(Into::into)?;
752		// check the call fully, no fingerprinting.
753		let _ = Self::check_solution(&paged_solution, None, true)?;
754
755		let call: Call<T> =
756			Call::<T>::submit_unsigned { paged_solution: Box::new(paged_solution) }.into();
757
758		Ok(call)
759	}
760
761	/// Mine a new checked solution, maybe cache it, and submit it back to the chain as an unsigned
762	/// transaction.
763	pub(crate) fn mine_check_maybe_save_submit(save: bool) -> Result<(), OffchainMinerError<T>> {
764		sublog!(debug, "unsigned::ocw-miner", "miner attempting to compute an unsigned solution.");
765		let call = Self::mine_checked_call()?;
766		if save {
767			Self::save_solution(&call, crate::Snapshot::<T>::fingerprint())?;
768		}
769		Self::submit_call(call)
770	}
771
772	/// Check the solution, from the perspective of the offchain-worker miner:
773	///
774	/// 1. unsigned-specific checks.
775	/// 2. full-checks of the base miner
776	/// 	1. optionally feasibility check.
777	/// 	2. snapshot-independent checks.
778	/// 		1. optionally, snapshot fingerprint.
779	pub(crate) fn check_solution(
780		paged_solution: &PagedRawSolution<T::MinerConfig>,
781		maybe_snapshot_fingerprint: Option<T::Hash>,
782		do_feasibility: bool,
783	) -> Result<(), OffchainMinerError<T>> {
784		// NOTE: we prefer cheap checks first, so first run unsigned checks.
785		Pallet::<T>::unsigned_specific_checks(paged_solution)?;
786		Self::base_check_solution(paged_solution, maybe_snapshot_fingerprint, do_feasibility)
787	}
788
789	fn submit_call(call: Call<T>) -> Result<(), OffchainMinerError<T>> {
790		let xt = T::create_bare(call.into());
791		frame_system::offchain::SubmitTransaction::<T, Call<T>>::submit_transaction(xt)
792			.map(|_| {
793				sublog!(
794					debug,
795					"unsigned::ocw-miner",
796					"miner submitted a solution as an unsigned transaction",
797				);
798			})
799			.map_err(|_| OffchainMinerError::PoolSubmissionFailed)
800	}
801
802	/// Check the solution, from the perspective of the base miner:
803	///
804	/// 1. snapshot-independent checks.
805	/// 	- with the fingerprint check being an optional step fo that.
806	/// 2. optionally, feasibility check.
807	///
808	/// In most cases, you should always use this either with `do_feasibility = true` or
809	/// `maybe_snapshot_fingerprint.is_some()`. Doing both could be an overkill. The snapshot
810	/// staying constant (which can be checked via the hash) is a string guarantee that the
811	/// feasibility still holds.
812	///
813	/// The difference between this and [`Self::check_solution`] is that this does not run unsigned
814	/// specific checks.
815	pub(crate) fn base_check_solution(
816		paged_solution: &PagedRawSolution<T::MinerConfig>,
817		maybe_snapshot_fingerprint: Option<T::Hash>,
818		do_feasibility: bool,
819	) -> Result<(), OffchainMinerError<T>> {
820		let _ = crate::Pallet::<T>::snapshot_independent_checks(
821			paged_solution,
822			maybe_snapshot_fingerprint,
823		)?;
824
825		if do_feasibility {
826			let (voter_pages, all_targets, desired_targets) =
827				Self::fetch_snapshot(paged_solution.solution_pages.len() as PageIndex)?;
828			let _ = BaseMiner::<T::MinerConfig>::check_feasibility(
829				&paged_solution,
830				&voter_pages,
831				&all_targets,
832				desired_targets,
833			)?;
834		}
835
836		Ok(())
837	}
838
839	/// Attempt to restore a solution from cache. Otherwise, compute it fresh. Either way,
840	/// submit if our call's score is greater than that of the cached solution.
841	pub(crate) fn restore_or_compute_then_maybe_submit() -> Result<(), OffchainMinerError<T>> {
842		sublog!(
843			debug,
844			"unsigned::ocw-miner",
845			"miner attempting to restore or compute an unsigned solution."
846		);
847
848		let call = Self::restore_solution()
849			.and_then(|(call, snapshot_fingerprint)| {
850				// ensure the cached call is still current before submitting
851				if let Call::submit_unsigned { paged_solution, .. } = &call {
852					// we check the snapshot fingerprint instead of doing a full feasibility.
853					OffchainWorkerMiner::<T>::check_solution(
854						paged_solution,
855						Some(snapshot_fingerprint),
856						false,
857					).map_err::<OffchainMinerError<T>, _>(Into::into)?;
858					Ok(call)
859				} else {
860					Err(OffchainMinerError::SolutionCallInvalid)
861				}
862			})
863			.or_else::<OffchainMinerError<T>, _>(|error| {
864				use OffchainMinerError as OE;
865				use MinerError as ME;
866				use CommonError as CE;
867				match error {
868					OE::NoStoredSolution => {
869						// IFF, not present regenerate.
870						let call = Self::mine_checked_call()?;
871						Self::save_solution(&call, crate::Snapshot::<T>::fingerprint())?;
872						Ok(call)
873					},
874					OE::Common(ref e) => {
875						sublog!(
876							error,
877							"unsigned::ocw-miner",
878							"unsigned specific checks failed ({:?}) while restoring solution. This should never happen. clearing cache.",
879							e,
880						);
881						Self::clear_offchain_solution_cache();
882						Err(error)
883					},
884					OE::BaseMiner(ME::Feasibility(_))
885						| OE::BaseMiner(ME::Common(CE::WrongRound))
886						| OE::BaseMiner(ME::Common(CE::WrongFingerprint))
887					=> {
888						// note that failing `Feasibility` can only mean that the solution was
889						// computed over a snapshot that has changed due to a fork.
890						sublog!(warn, "unsigned::ocw-miner", "wiping infeasible solution ({:?}).", error);
891						// kill the "bad" solution.
892						Self::clear_offchain_solution_cache();
893
894						// .. then return the error as-is.
895						Err(error)
896					},
897					_ => {
898						sublog!(debug, "unsigned::ocw-miner", "unhandled error in restoring offchain solution {:?}", error);
899						// nothing to do. Return the error as-is.
900						Err(error)
901					},
902				}
903			})?;
904
905		Self::submit_call(call)
906	}
907
908	/// Checks if an execution of the offchain worker is permitted at the given block number, or
909	/// not.
910	///
911	/// This makes sure that
912	/// 1. we don't run on previous blocks in case of a re-org
913	/// 2. we don't run twice within a window of length `T::OffchainRepeat`.
914	///
915	/// Returns `Ok(())` if offchain worker limit is respected, `Err(reason)` otherwise. If
916	/// `Ok()` is returned, `now` is written in storage and will be used in further calls as the
917	/// baseline.
918	pub fn ensure_offchain_repeat_frequency(
919		now: BlockNumberFor<T>,
920	) -> Result<(), OffchainMinerError<T>> {
921		let threshold = T::OffchainRepeat::get();
922		let last_block = StorageValueRef::persistent(&Self::OFFCHAIN_LAST_BLOCK);
923
924		let mutate_stat = last_block.mutate::<_, &'static str, _>(
925			|maybe_head: Result<Option<BlockNumberFor<T>>, _>| {
926				match maybe_head {
927					Ok(Some(head)) if now < head => Err("fork."),
928					Ok(Some(head)) if now >= head && now <= head + threshold =>
929						Err("recently executed."),
930					Ok(Some(head)) if now > head + threshold => {
931						// we can run again now. Write the new head.
932						Ok(now)
933					},
934					_ => {
935						// value doesn't exists. Probably this node just booted up. Write, and
936						// run
937						Ok(now)
938					},
939				}
940			},
941		);
942
943		match mutate_stat {
944			// all good
945			Ok(_) => Ok(()),
946			// failed to write.
947			Err(MutateStorageError::ConcurrentModification(_)) => Err(OffchainMinerError::Lock(
948				"failed to write to offchain db (concurrent modification).",
949			)),
950			// fork etc.
951			Err(MutateStorageError::ValueFunctionFailed(why)) => Err(OffchainMinerError::Lock(why)),
952		}
953	}
954
955	/// Save a given call into OCW storage.
956	fn save_solution(
957		call: &Call<T>,
958		snapshot_fingerprint: T::Hash,
959	) -> Result<(), OffchainMinerError<T>> {
960		sublog!(debug, "unsigned::ocw-miner", "saving a call to the offchain storage.");
961		let storage = StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL);
962		match storage.mutate::<_, (), _>(|_| Ok((call.clone(), snapshot_fingerprint))) {
963			Ok(_) => Ok(()),
964			Err(MutateStorageError::ConcurrentModification(_)) =>
965				Err(OffchainMinerError::FailedToStoreSolution),
966			Err(MutateStorageError::ValueFunctionFailed(_)) => {
967				// this branch should be unreachable according to the definition of
968				// `StorageValueRef::mutate`: that function should only ever `Err` if the closure we
969				// pass it returns an error. however, for safety in case the definition changes, we
970				// do not optimize the branch away or panic.
971				Err(OffchainMinerError::FailedToStoreSolution)
972			},
973		}
974	}
975
976	/// Get a saved solution from OCW storage if it exists.
977	fn restore_solution() -> Result<(Call<T>, T::Hash), OffchainMinerError<T>> {
978		StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL)
979			.get()
980			.ok()
981			.flatten()
982			.ok_or(OffchainMinerError::NoStoredSolution)
983	}
984
985	/// Clear a saved solution from OCW storage.
986	fn clear_offchain_solution_cache() {
987		sublog!(debug, "unsigned::ocw-miner", "clearing offchain call cache storage.");
988		let mut storage = StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL);
989		storage.clear();
990	}
991
992	#[cfg(test)]
993	fn cached_solution() -> Option<Call<T>> {
994		StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL)
995			.get::<Call<T>>()
996			.unwrap()
997	}
998}
999
1000// This will only focus on testing the internals of `maybe_trim_weight_and_len_works`.
1001#[cfg(test)]
1002mod trimming {
1003	use super::*;
1004	use crate::{mock::*, verifier::Verifier};
1005	use frame_election_provider_support::TryFromUnboundedPagedSupports;
1006	use sp_npos_elections::Support;
1007
1008	#[test]
1009	fn solution_without_any_trimming() {
1010		ExtBuilder::unsigned().build_and_execute(|| {
1011			// adjust the voters a bit, such that they are all different backings
1012			let mut current_voters = Voters::get();
1013			current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
1014			Voters::set(current_voters);
1015
1016			roll_to_snapshot_created();
1017
1018			// now we let the miner mine something for us..
1019			let solution = mine_full_solution().unwrap();
1020			assert_eq!(
1021				solution.solution_pages.iter().map(|page| page.voter_count()).sum::<usize>(),
1022				8
1023			);
1024
1025			assert_eq!(solution.solution_pages.encoded_size(), 105);
1026
1027			load_mock_signed_and_start(solution);
1028			let supports = roll_to_full_verification();
1029
1030			// a solution is queued.
1031			assert!(VerifierPallet::queued_score().is_some());
1032
1033			assert_eq!(
1034				supports,
1035				vec![
1036					vec![
1037						(30, Support { total: 30, voters: vec![(30, 30)] }),
1038						(40, Support { total: 40, voters: vec![(40, 40)] })
1039					],
1040					vec![
1041						(30, Support { total: 11, voters: vec![(5, 2), (6, 2), (7, 7)] }),
1042						(40, Support { total: 7, voters: vec![(5, 3), (6, 4)] })
1043					],
1044					vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })]
1045				]
1046				.try_from_unbounded_paged()
1047				.unwrap()
1048			);
1049		})
1050	}
1051
1052	#[test]
1053	fn trim_length() {
1054		ExtBuilder::unsigned().miner_max_length(104).build_and_execute(|| {
1055			// adjust the voters a bit, such that they are all different backings
1056			let mut current_voters = Voters::get();
1057			current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
1058			Voters::set(current_voters);
1059
1060			roll_to_snapshot_created();
1061			ensure_voters(3, 12);
1062
1063			let solution = mine_full_solution().unwrap();
1064
1065			assert_eq!(
1066				solution.solution_pages.iter().map(|page| page.voter_count()).sum::<usize>(),
1067				7
1068			);
1069
1070			assert_eq!(solution.solution_pages.encoded_size(), 99);
1071
1072			load_mock_signed_and_start(solution);
1073			let supports = roll_to_full_verification();
1074
1075			// a solution is queued.
1076			assert!(VerifierPallet::queued_score().is_some());
1077
1078			assert_eq!(
1079				supports,
1080				vec![
1081					// 30 is gone! Note that length trimming starts from lsp, so we trim from this
1082					// page only.
1083					vec![(40, Support { total: 40, voters: vec![(40, 40)] })],
1084					vec![
1085						(30, Support { total: 11, voters: vec![(5, 2), (6, 2), (7, 7)] }),
1086						(40, Support { total: 7, voters: vec![(5, 3), (6, 4)] })
1087					],
1088					vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })]
1089				]
1090				.try_from_unbounded_paged()
1091				.unwrap()
1092			);
1093		});
1094	}
1095
1096	#[test]
1097	fn trim_length_2() {
1098		ExtBuilder::unsigned().miner_max_length(98).build_and_execute(|| {
1099			// adjust the voters a bit, such that they are all different backings
1100			let mut current_voters = Voters::get();
1101			current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
1102			Voters::set(current_voters);
1103
1104			roll_to_snapshot_created();
1105			ensure_voters(3, 12);
1106
1107			let solution = mine_full_solution().unwrap();
1108
1109			assert_eq!(
1110				solution.solution_pages.iter().map(|page| page.voter_count()).sum::<usize>(),
1111				6
1112			);
1113
1114			assert_eq!(solution.solution_pages.encoded_size(), 93);
1115
1116			load_mock_signed_and_start(solution);
1117			let supports = roll_to_full_verification();
1118
1119			// a solution is queued.
1120			assert!(VerifierPallet::queued_score().is_some());
1121
1122			assert_eq!(
1123				supports,
1124				vec![
1125					vec![],
1126					vec![
1127						(30, Support { total: 11, voters: vec![(5, 2), (6, 2), (7, 7)] }),
1128						(40, Support { total: 7, voters: vec![(5, 3), (6, 4)] })
1129					],
1130					vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })]
1131				]
1132				.try_from_unbounded_paged()
1133				.unwrap()
1134			);
1135		});
1136	}
1137
1138	#[test]
1139	fn trim_length_3() {
1140		ExtBuilder::unsigned().miner_max_length(92).build_and_execute(|| {
1141			// adjust the voters a bit, such that they are all different backings
1142			let mut current_voters = Voters::get();
1143			current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
1144			Voters::set(current_voters);
1145
1146			roll_to_snapshot_created();
1147			ensure_voters(3, 12);
1148
1149			let solution = mine_full_solution().unwrap();
1150
1151			assert_eq!(
1152				solution.solution_pages.iter().map(|page| page.voter_count()).sum::<usize>(),
1153				5
1154			);
1155
1156			assert_eq!(solution.solution_pages.encoded_size(), 83);
1157
1158			load_mock_signed_and_start(solution);
1159			let supports = roll_to_full_verification();
1160
1161			// a solution is queued.
1162			assert!(VerifierPallet::queued_score().is_some());
1163
1164			assert_eq!(
1165				supports,
1166				vec![
1167					vec![],
1168					vec![
1169						(30, Support { total: 9, voters: vec![(6, 2), (7, 7)] }),
1170						(40, Support { total: 4, voters: vec![(6, 4)] })
1171					],
1172					vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })]
1173				]
1174				.try_from_unbounded_paged()
1175				.unwrap()
1176			);
1177		});
1178	}
1179
1180	#[test]
1181	fn trim_backers_per_page_works() {
1182		ExtBuilder::unsigned().max_backers_per_winner(2).build_and_execute(|| {
1183			// adjust the voters a bit, such that they are all different backings
1184			let mut current_voters = Voters::get();
1185			current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
1186			Voters::set(current_voters);
1187
1188			roll_to_snapshot_created();
1189			ensure_voters(3, 12);
1190
1191			let solution = mine_full_solution().unwrap();
1192
1193			load_mock_signed_and_start(solution);
1194			let supports = roll_to_full_verification();
1195
1196			// a solution is queued.
1197			assert!(VerifierPallet::queued_score().is_some());
1198
1199			// each page is trimmed individually, based on `solution_without_any_trimming`.
1200			assert_eq!(
1201				supports,
1202				vec![
1203					vec![
1204						(30, Support { total: 30, voters: vec![(30, 30)] }),
1205						(40, Support { total: 40, voters: vec![(40, 40)] })
1206					],
1207					vec![
1208						(30, Support { total: 9, voters: vec![(6, 2), (7, 7)] }),
1209						(40, Support { total: 9, voters: vec![(5, 5), (6, 4)] }) /* notice how
1210						                                                          * 5's stake is
1211						                                                          * re-distributed
1212						                                                          * all here ^^ */
1213					],
1214					vec![(40, Support { total: 7, voters: vec![(3, 3), (4, 4)] })]
1215				]
1216				.try_from_unbounded_paged()
1217				.unwrap()
1218			);
1219		})
1220	}
1221
1222	#[test]
1223	fn trim_backers_per_page_works_2() {
1224		// This one is more interesting, as it also shows that as we trim backers, we re-distribute
1225		// their weight elsewhere.
1226		ExtBuilder::unsigned().max_backers_per_winner(1).build_and_execute(|| {
1227			// adjust the voters a bit, such that they are all different backings
1228			let mut current_voters = Voters::get();
1229			current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
1230			Voters::set(current_voters);
1231
1232			roll_to_snapshot_created();
1233			ensure_voters(3, 12);
1234
1235			let solution = mine_full_solution().unwrap();
1236
1237			load_mock_signed_and_start(solution);
1238			let supports = roll_to_full_verification();
1239
1240			// a solution is queued.
1241			assert!(VerifierPallet::queued_score().is_some());
1242
1243			// each page is trimmed individually, based on `solution_without_any_trimming`.
1244			assert_eq!(
1245				supports,
1246				vec![
1247					vec![
1248						(30, Support { total: 30, voters: vec![(30, 30)] }),
1249						(40, Support { total: 40, voters: vec![(40, 40)] })
1250					],
1251					vec![
1252						(30, Support { total: 7, voters: vec![(7, 7)] }),
1253						(40, Support { total: 6, voters: vec![(6, 6)] })
1254					],
1255					vec![(40, Support { total: 4, voters: vec![(4, 4)] })]
1256				]
1257				.try_from_unbounded_paged()
1258				.unwrap()
1259			);
1260		})
1261	}
1262
1263	#[test]
1264	fn trim_backers_final_works() {
1265		ExtBuilder::unsigned()
1266			.max_backers_per_winner(4)
1267			.max_backers_per_winner_final(4)
1268			.build_and_execute(|| {
1269				// adjust the voters a bit, such that they are all different backings
1270				let mut current_voters = Voters::get();
1271				current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
1272				Voters::set(current_voters);
1273
1274				roll_to_snapshot_created();
1275				ensure_voters(3, 12);
1276
1277				let solution = mine_full_solution().unwrap();
1278
1279				load_mock_signed_and_start(solution);
1280				let supports = roll_to_full_verification();
1281
1282				// a solution is queued.
1283				assert!(VerifierPallet::queued_score().is_some());
1284
1285				// 30 has 1 + 3 = 4 backers -- all good
1286				// 40 has 1 + 2 + 3 = 6 backers -- needs to lose 2
1287				assert_eq!(
1288					supports,
1289					vec![
1290						vec![
1291							(30, Support { total: 30, voters: vec![(30, 30)] }),
1292							(40, Support { total: 40, voters: vec![(40, 40)] })
1293						],
1294						vec![
1295							(30, Support { total: 14, voters: vec![(5, 5), (6, 2), (7, 7)] }),
1296							(40, Support { total: 4, voters: vec![(6, 4)] })
1297						],
1298						vec![(40, Support { total: 7, voters: vec![(3, 3), (4, 4)] })]
1299					]
1300					.try_from_unbounded_paged()
1301					.unwrap()
1302				);
1303			})
1304	}
1305
1306	#[test]
1307	fn trim_backers_per_page_and_final_works() {
1308		ExtBuilder::unsigned()
1309			.max_backers_per_winner_final(4)
1310			.max_backers_per_winner(2)
1311			.build_and_execute(|| {
1312				// adjust the voters a bit, such that they are all different backings
1313				let mut current_voters = Voters::get();
1314				current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
1315				Voters::set(current_voters);
1316
1317				roll_to_snapshot_created();
1318				ensure_voters(3, 12);
1319
1320				let solution = mine_full_solution().unwrap();
1321
1322				load_mock_signed_and_start(solution);
1323				let supports = roll_to_full_verification();
1324
1325				// a solution is queued.
1326				assert!(VerifierPallet::queued_score().is_some());
1327
1328				// each page is trimmed individually, based on `solution_without_any_trimming`.
1329				assert_eq!(
1330					supports,
1331					vec![
1332						vec![
1333							(30, Support { total: 30, voters: vec![(30, 30)] }),
1334							(40, Support { total: 40, voters: vec![(40, 40)] })
1335						],
1336						vec![
1337							(30, Support { total: 12, voters: vec![(5, 5), (7, 7)] }),
1338							(40, Support { total: 6, voters: vec![(6, 6)] })
1339						],
1340						vec![(40, Support { total: 7, voters: vec![(3, 3), (4, 4)] })]
1341					]
1342					.try_from_unbounded_paged()
1343					.unwrap()
1344				);
1345			})
1346	}
1347
1348	#[test]
1349	fn aggressive_backer_trimming_maintains_winner_count() {
1350		// Test the scenario where aggressive backer trimming is applied but the solution
1351		// should still maintain the correct winner count to avoid WrongWinnerCount errors.
1352		ExtBuilder::unsigned()
1353			.desired_targets(3)
1354			.max_winners_per_page(2)
1355			.pages(2)
1356			.max_backers_per_winner_final(1) // aggressive final trimming
1357			.max_backers_per_winner(1) // aggressive per-page trimming
1358			.build_and_execute(|| {
1359				// Use default 4 targets to stay within TargetSnapshotPerBlock limit
1360
1361				// Adjust the voters a bit, such that they are all different backings
1362				let mut current_voters = Voters::get();
1363				current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
1364				Voters::set(current_voters);
1365
1366				roll_to_snapshot_created();
1367
1368				let solution = mine_full_solution().unwrap();
1369
1370				// The solution should still be valid despite aggressive trimming
1371				assert!(solution.solution_pages.len() > 0);
1372
1373				let winner_count = solution
1374					.solution_pages
1375					.iter()
1376					.flat_map(|page| page.unique_targets())
1377					.collect::<std::collections::HashSet<_>>()
1378					.len();
1379
1380				// We should get 3 winners.
1381				// This demonstrates that FullSupportsOfMiner can accommodate winners from multiple
1382				// pages and can hold more winners than MaxWinnersPerPage.
1383				assert_eq!(winner_count, 3);
1384
1385				// Load and verify the solution passes all checks without WrongWinnerCount error
1386				load_mock_signed_and_start(solution);
1387				let _supports = roll_to_full_verification();
1388
1389				// A solution should be successfully queued
1390				assert!(VerifierPallet::queued_score().is_some());
1391			})
1392	}
1393}
1394
1395#[cfg(test)]
1396mod base_miner {
1397	use std::vec;
1398
1399	use super::*;
1400	use crate::{mock::*, Snapshot};
1401	use frame_election_provider_support::TryFromUnboundedPagedSupports;
1402	use sp_npos_elections::Support;
1403	use sp_runtime::PerU16;
1404
1405	#[test]
1406	fn pagination_does_not_affect_score() {
1407		let score_1 = ExtBuilder::unsigned()
1408			.pages(1)
1409			.voter_per_page(12)
1410			.build_unchecked()
1411			.execute_with(|| {
1412				roll_to_snapshot_created();
1413				mine_full_solution().unwrap().score
1414			});
1415		let score_2 = ExtBuilder::unsigned()
1416			.pages(2)
1417			.voter_per_page(6)
1418			.build_unchecked()
1419			.execute_with(|| {
1420				roll_to_snapshot_created();
1421				mine_full_solution().unwrap().score
1422			});
1423		let score_3 = ExtBuilder::unsigned()
1424			.pages(3)
1425			.voter_per_page(4)
1426			.build_unchecked()
1427			.execute_with(|| {
1428				roll_to_snapshot_created();
1429				mine_full_solution().unwrap().score
1430			});
1431
1432		assert_eq!(score_1, score_2);
1433		assert_eq!(score_2, score_3);
1434	}
1435
1436	#[test]
1437	fn mine_solution_single_page_works() {
1438		ExtBuilder::unsigned().pages(1).voter_per_page(8).build_and_execute(|| {
1439			roll_to_snapshot_created();
1440
1441			ensure_voters(1, 8);
1442			ensure_targets(1, 4);
1443
1444			assert_eq!(
1445				Snapshot::<Runtime>::voters(0)
1446					.unwrap()
1447					.into_iter()
1448					.map(|(x, _, _)| x)
1449					.collect::<Vec<_>>(),
1450				vec![1, 2, 3, 4, 5, 6, 7, 8]
1451			);
1452
1453			let paged = mine_full_solution().unwrap();
1454			assert_eq!(paged.solution_pages.len(), 1);
1455
1456			// this solution must be feasible and submittable.
1457			OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true).unwrap();
1458
1459			// now do a realistic full verification
1460			load_mock_signed_and_start(paged.clone());
1461			let supports = roll_to_full_verification();
1462
1463			assert_eq!(
1464				supports,
1465				vec![vec![
1466					(10, Support { total: 30, voters: vec![(1, 10), (4, 5), (5, 5), (8, 10)] }),
1467					(
1468						40,
1469						Support {
1470							total: 40,
1471							voters: vec![(2, 10), (3, 10), (4, 5), (5, 5), (6, 10)]
1472						}
1473					)
1474				]]
1475				.try_from_unbounded_paged()
1476				.unwrap()
1477			);
1478
1479			// NOTE: this is the same as the score of any other test that contains the first 8
1480			// voters, we already test for this in `pagination_does_not_affect_score`.
1481			assert_eq!(
1482				paged.score,
1483				ElectionScore { minimal_stake: 30, sum_stake: 70, sum_stake_squared: 2500 }
1484			);
1485		})
1486	}
1487
1488	#[test]
1489	fn mine_solution_double_page_works() {
1490		ExtBuilder::unsigned().pages(2).voter_per_page(4).build_and_execute(|| {
1491			roll_to_snapshot_created();
1492
1493			// 2 pages of 8 voters
1494			ensure_voters(2, 8);
1495			// 1 page of 4 targets
1496			ensure_targets(1, 4);
1497
1498			// voters in pages. note the reverse page index.
1499			assert_eq!(
1500				Snapshot::<Runtime>::voters(0)
1501					.unwrap()
1502					.into_iter()
1503					.map(|(x, _, _)| x)
1504					.collect::<Vec<_>>(),
1505				vec![5, 6, 7, 8]
1506			);
1507			assert_eq!(
1508				Snapshot::<Runtime>::voters(1)
1509					.unwrap()
1510					.into_iter()
1511					.map(|(x, _, _)| x)
1512					.collect::<Vec<_>>(),
1513				vec![1, 2, 3, 4]
1514			);
1515			// targets in pages.
1516			assert_eq!(Snapshot::<Runtime>::targets().unwrap(), vec![10, 20, 30, 40]);
1517			let paged = mine_full_solution().unwrap();
1518
1519			assert_eq!(
1520				paged.solution_pages,
1521				vec![
1522					TestNposSolution {
1523						// voter 6 (index 1) is backing 40 (index 3).
1524						// voter 8 (index 3) is backing 10 (index 0)
1525						votes1: vec![(1, 3), (3, 0)],
1526						// voter 5 (index 0) is backing 40 (index 3) and 10 (index 0)
1527						votes2: vec![(0, [(0, PerU16::from_parts(32768))], 3)],
1528						..Default::default()
1529					},
1530					TestNposSolution {
1531						// voter 1 (index 0) is backing 10 (index 0)
1532						// voter 2 (index 1) is backing 40 (index 3)
1533						// voter 3 (index 2) is backing 40 (index 3)
1534						votes1: vec![(0, 0), (1, 3), (2, 3)],
1535						// voter 4 (index 3) is backing 40 (index 10) and 10 (index 0)
1536						votes2: vec![(3, [(0, PerU16::from_parts(32768))], 3)],
1537						..Default::default()
1538					},
1539				]
1540			);
1541
1542			// this solution must be feasible and submittable.
1543			OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, false).unwrap();
1544
1545			// it must also be verified in the verifier
1546			load_mock_signed_and_start(paged.clone());
1547			let supports = roll_to_full_verification();
1548
1549			assert_eq!(
1550				supports,
1551				vec![
1552					// page0, supports from voters 5, 6, 7, 8
1553					vec![
1554						(10, Support { total: 15, voters: vec![(5, 5), (8, 10)] }),
1555						(40, Support { total: 15, voters: vec![(5, 5), (6, 10)] })
1556					],
1557					// page1 supports from voters 1, 2, 3, 4
1558					vec![
1559						(10, Support { total: 15, voters: vec![(1, 10), (4, 5)] }),
1560						(40, Support { total: 25, voters: vec![(2, 10), (3, 10), (4, 5)] })
1561					]
1562				]
1563				.try_from_unbounded_paged()
1564				.unwrap()
1565			);
1566
1567			assert_eq!(
1568				paged.score,
1569				ElectionScore { minimal_stake: 30, sum_stake: 70, sum_stake_squared: 2500 }
1570			);
1571		})
1572	}
1573
1574	#[test]
1575	fn mine_solution_triple_page_works() {
1576		ExtBuilder::unsigned().pages(3).voter_per_page(4).build_and_execute(|| {
1577			roll_to_snapshot_created();
1578
1579			ensure_voters(3, 12);
1580			ensure_targets(1, 4);
1581
1582			// voters in pages. note the reverse page index.
1583			assert_eq!(
1584				Snapshot::<Runtime>::voters(2)
1585					.unwrap()
1586					.into_iter()
1587					.map(|(x, _, _)| x)
1588					.collect::<Vec<_>>(),
1589				vec![1, 2, 3, 4]
1590			);
1591			assert_eq!(
1592				Snapshot::<Runtime>::voters(1)
1593					.unwrap()
1594					.into_iter()
1595					.map(|(x, _, _)| x)
1596					.collect::<Vec<_>>(),
1597				vec![5, 6, 7, 8]
1598			);
1599			assert_eq!(
1600				Snapshot::<Runtime>::voters(0)
1601					.unwrap()
1602					.into_iter()
1603					.map(|(x, _, _)| x)
1604					.collect::<Vec<_>>(),
1605				vec![10, 20, 30, 40]
1606			);
1607
1608			let paged = mine_full_solution().unwrap();
1609			assert_eq!(
1610				paged.solution_pages,
1611				vec![
1612					TestNposSolution { votes1: vec![(2, 2), (3, 3)], ..Default::default() },
1613					TestNposSolution {
1614						votes1: vec![(2, 2)],
1615						votes2: vec![
1616							(0, [(2, PerU16::from_parts(32768))], 3),
1617							(1, [(2, PerU16::from_parts(32768))], 3)
1618						],
1619						..Default::default()
1620					},
1621					TestNposSolution {
1622						votes1: vec![(2, 3), (3, 3)],
1623						votes2: vec![(1, [(2, PerU16::from_parts(32768))], 3)],
1624						..Default::default()
1625					},
1626				]
1627			);
1628
1629			// this solution must be feasible and submittable.
1630			OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true).unwrap();
1631			// now do a realistic full verification
1632			load_mock_signed_and_start(paged.clone());
1633			let supports = roll_to_full_verification();
1634
1635			assert_eq!(
1636				supports,
1637				vec![
1638					// page 0: self-votes.
1639					vec![
1640						(30, Support { total: 30, voters: vec![(30, 30)] }),
1641						(40, Support { total: 40, voters: vec![(40, 40)] })
1642					],
1643					// page 1: 5, 6, 7, 8
1644					vec![
1645						(30, Support { total: 20, voters: vec![(5, 5), (6, 5), (7, 10)] }),
1646						(40, Support { total: 10, voters: vec![(5, 5), (6, 5)] })
1647					],
1648					// page 2: 1, 2, 3, 4
1649					vec![
1650						(30, Support { total: 5, voters: vec![(2, 5)] }),
1651						(40, Support { total: 25, voters: vec![(2, 5), (3, 10), (4, 10)] })
1652					]
1653				]
1654				.try_from_unbounded_paged()
1655				.unwrap()
1656			);
1657
1658			assert_eq!(
1659				paged.score,
1660				ElectionScore { minimal_stake: 55, sum_stake: 130, sum_stake_squared: 8650 }
1661			);
1662		})
1663	}
1664
1665	#[test]
1666	fn mine_solution_choses_most_significant_pages() {
1667		ExtBuilder::unsigned().pages(2).voter_per_page(4).build_and_execute(|| {
1668			roll_to_snapshot_created();
1669
1670			ensure_voters(2, 8);
1671			ensure_targets(1, 4);
1672
1673			// these folks should be ignored safely.
1674			assert_eq!(
1675				Snapshot::<Runtime>::voters(0)
1676					.unwrap()
1677					.into_iter()
1678					.map(|(x, _, _)| x)
1679					.collect::<Vec<_>>(),
1680				vec![5, 6, 7, 8]
1681			);
1682			// voters in pages 1, this is the most significant page.
1683			assert_eq!(
1684				Snapshot::<Runtime>::voters(1)
1685					.unwrap()
1686					.into_iter()
1687					.map(|(x, _, _)| x)
1688					.collect::<Vec<_>>(),
1689				vec![1, 2, 3, 4]
1690			);
1691
1692			// now we ask for just 1 page of solution.
1693			let paged = mine_solution(1).unwrap();
1694
1695			assert_eq!(
1696				paged.solution_pages,
1697				vec![TestNposSolution {
1698					// voter 1 (index 0) is backing 10 (index 0)
1699					// voter 2 (index 1) is backing 40 (index 3)
1700					// voter 3 (index 2) is backing 40 (index 3)
1701					votes1: vec![(0, 0), (1, 3), (2, 3)],
1702					// voter 4 (index 3) is backing 40 (index 10) and 10 (index 0)
1703					votes2: vec![(3, [(0, PerU16::from_parts(32768))], 3)],
1704					..Default::default()
1705				}]
1706			);
1707
1708			// this solution must be feasible and submittable.
1709			OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true).unwrap();
1710			// now do a realistic full verification.
1711			load_mock_signed_and_start(paged.clone());
1712			let supports = roll_to_full_verification();
1713
1714			assert_eq!(
1715				supports,
1716				vec![
1717					// page0: non existent.
1718					vec![],
1719					// page1 supports from voters 1, 2, 3, 4
1720					vec![
1721						(10, Support { total: 15, voters: vec![(1, 10), (4, 5)] }),
1722						(40, Support { total: 25, voters: vec![(2, 10), (3, 10), (4, 5)] })
1723					]
1724				]
1725				.try_from_unbounded_paged()
1726				.unwrap()
1727			);
1728
1729			assert_eq!(
1730				paged.score,
1731				ElectionScore { minimal_stake: 15, sum_stake: 40, sum_stake_squared: 850 }
1732			);
1733		})
1734	}
1735
1736	#[test]
1737	fn mine_solution_2_out_of_3_pages() {
1738		ExtBuilder::unsigned().pages(3).voter_per_page(4).build_and_execute(|| {
1739			roll_to_snapshot_created();
1740
1741			ensure_voters(3, 12);
1742			ensure_targets(1, 4);
1743
1744			assert_eq!(
1745				Snapshot::<Runtime>::voters(0)
1746					.unwrap()
1747					.into_iter()
1748					.map(|(x, _, _)| x)
1749					.collect::<Vec<_>>(),
1750				vec![10, 20, 30, 40]
1751			);
1752			assert_eq!(
1753				Snapshot::<Runtime>::voters(1)
1754					.unwrap()
1755					.into_iter()
1756					.map(|(x, _, _)| x)
1757					.collect::<Vec<_>>(),
1758				vec![5, 6, 7, 8]
1759			);
1760			assert_eq!(
1761				Snapshot::<Runtime>::voters(2)
1762					.unwrap()
1763					.into_iter()
1764					.map(|(x, _, _)| x)
1765					.collect::<Vec<_>>(),
1766				vec![1, 2, 3, 4]
1767			);
1768
1769			// now we ask for just 1 page of solution.
1770			let paged = mine_solution(2).unwrap();
1771
1772			// this solution must be feasible and submittable.
1773			OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true).unwrap();
1774
1775			assert_eq!(
1776				paged.solution_pages,
1777				vec![
1778					// this can be "pagified" to snapshot at index 1, which contains 5, 6, 7, 8
1779					// in which:
1780					// 6 (index:1) votes for 40 (index:3)
1781					// 8 (index:1) votes for 10 (index:0)
1782					// 5 votes for both 10 and 40
1783					TestNposSolution {
1784						votes1: vec![(1, 3), (3, 0)],
1785						votes2: vec![(0, [(0, PerU16::from_parts(32768))], 3)],
1786						..Default::default()
1787					},
1788					// this can be 'pagified" to snapshot at index 2, which contains 1, 2, 3, 4
1789					// in which:
1790					// 1 (index:0) votes for 10 (index:0)
1791					// 2 (index:1) votes for 40 (index:3)
1792					// 3 (index:2) votes for 40 (index:3)
1793					// 4 votes for both 10 and 40
1794					TestNposSolution {
1795						votes1: vec![(0, 0), (1, 3), (2, 3)],
1796						votes2: vec![(3, [(0, PerU16::from_parts(32768))], 3)],
1797						..Default::default()
1798					}
1799				]
1800			);
1801
1802			// this solution must be feasible and submittable.
1803			OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true).unwrap();
1804			// now do a realistic full verification.
1805			load_mock_signed_and_start(paged.clone());
1806			let supports = roll_to_full_verification();
1807
1808			assert_eq!(
1809				supports,
1810				vec![
1811					// empty page 0.
1812					vec![],
1813					// supports from voters 5, 6, 7, 8
1814					vec![
1815						(10, Support { total: 15, voters: vec![(5, 5), (8, 10)] }),
1816						(40, Support { total: 15, voters: vec![(5, 5), (6, 10)] })
1817					],
1818					// supports from voters 1, 2, 3, 4
1819					vec![
1820						(10, Support { total: 15, voters: vec![(1, 10), (4, 5)] }),
1821						(40, Support { total: 25, voters: vec![(2, 10), (3, 10), (4, 5)] })
1822					]
1823				]
1824				.try_from_unbounded_paged()
1825				.unwrap()
1826			);
1827
1828			assert_eq!(
1829				paged.score,
1830				ElectionScore { minimal_stake: 30, sum_stake: 70, sum_stake_squared: 2500 }
1831			);
1832		})
1833	}
1834
1835	#[test]
1836	fn can_reduce_solution() {
1837		ExtBuilder::unsigned().build_and_execute(|| {
1838			roll_to_snapshot_created();
1839			let full_edges = OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), false)
1840				.unwrap()
1841				.solution_pages
1842				.iter()
1843				.fold(0, |acc, x| acc + x.edge_count());
1844			let reduced_edges = OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), true)
1845				.unwrap()
1846				.solution_pages
1847				.iter()
1848				.fold(0, |acc, x| acc + x.edge_count());
1849
1850			assert!(reduced_edges < full_edges, "{} < {} not fulfilled", reduced_edges, full_edges);
1851		})
1852	}
1853}
1854
1855#[cfg(test)]
1856mod offchain_worker_miner {
1857	use crate::{verifier::Verifier, CommonError};
1858	use frame_support::traits::Hooks;
1859	use sp_runtime::offchain::storage_lock::{BlockAndTime, StorageLock};
1860
1861	use super::*;
1862	use crate::mock::*;
1863
1864	#[test]
1865	fn lock_prevents_frequent_execution() {
1866		let (mut ext, _) = ExtBuilder::unsigned().build_offchainify();
1867		ext.execute_with_sanity_checks(|| {
1868			let offchain_repeat = <Runtime as crate::unsigned::Config>::OffchainRepeat::get();
1869
1870			// first execution -- okay.
1871			assert!(OffchainWorkerMiner::<Runtime>::ensure_offchain_repeat_frequency(25).is_ok());
1872
1873			// next block: rejected.
1874			assert_noop!(
1875				OffchainWorkerMiner::<Runtime>::ensure_offchain_repeat_frequency(26),
1876				OffchainMinerError::Lock("recently executed.")
1877			);
1878
1879			// allowed after `OFFCHAIN_REPEAT`
1880			assert!(OffchainWorkerMiner::<Runtime>::ensure_offchain_repeat_frequency(
1881				(26 + offchain_repeat).into()
1882			)
1883			.is_ok());
1884
1885			// a fork like situation: re-execute last 3.
1886			assert!(OffchainWorkerMiner::<Runtime>::ensure_offchain_repeat_frequency(
1887				(26 + offchain_repeat - 3).into()
1888			)
1889			.is_err());
1890			assert!(OffchainWorkerMiner::<Runtime>::ensure_offchain_repeat_frequency(
1891				(26 + offchain_repeat - 2).into()
1892			)
1893			.is_err());
1894			assert!(OffchainWorkerMiner::<Runtime>::ensure_offchain_repeat_frequency(
1895				(26 + offchain_repeat - 1).into()
1896			)
1897			.is_err());
1898		})
1899	}
1900
1901	#[test]
1902	fn lock_released_after_successful_execution() {
1903		// first, ensure that a successful execution releases the lock
1904		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
1905		ext.execute_with_sanity_checks(|| {
1906			let guard = StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_LOCK);
1907			let last_block =
1908				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_LAST_BLOCK);
1909
1910			roll_to_unsigned_open();
1911
1912			// initially, the lock is not set.
1913			assert!(guard.get::<bool>().unwrap().is_none());
1914
1915			// a successful a-z execution.
1916			UnsignedPallet::offchain_worker(25);
1917			assert_eq!(pool.read().transactions.len(), 1);
1918
1919			// afterwards, the lock is not set either..
1920			assert!(guard.get::<bool>().unwrap().is_none());
1921			assert_eq!(last_block.get::<BlockNumber>().unwrap(), Some(25));
1922		});
1923	}
1924
1925	#[test]
1926	fn lock_prevents_overlapping_execution() {
1927		// ensure that if the guard is in hold, a new execution is not allowed.
1928		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
1929		ext.execute_with_sanity_checks(|| {
1930			roll_to_unsigned_open();
1931
1932			// artificially set the value, as if another thread is mid-way.
1933			let mut lock = StorageLock::<BlockAndTime<System>>::with_block_deadline(
1934				OffchainWorkerMiner::<Runtime>::OFFCHAIN_LOCK,
1935				UnsignedPhase::get().saturated_into(),
1936			);
1937			let guard = lock.lock();
1938
1939			// nothing submitted.
1940			UnsignedPallet::offchain_worker(25);
1941			assert_eq!(pool.read().transactions.len(), 0);
1942			UnsignedPallet::offchain_worker(26);
1943			assert_eq!(pool.read().transactions.len(), 0);
1944
1945			drop(guard);
1946
1947			// ๐ŸŽ‰ !
1948			UnsignedPallet::offchain_worker(25);
1949			assert_eq!(pool.read().transactions.len(), 1);
1950		});
1951	}
1952
1953	#[test]
1954	fn initial_ocw_runs_and_saves_new_cache() {
1955		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
1956		ext.execute_with_sanity_checks(|| {
1957			roll_to_unsigned_open();
1958
1959			let last_block =
1960				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_LAST_BLOCK);
1961			let cache =
1962				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_CACHED_CALL);
1963
1964			assert_eq!(last_block.get::<BlockNumber>(), Ok(None));
1965			assert_eq!(cache.get::<crate::unsigned::Call<Runtime>>(), Ok(None));
1966
1967			// creates, caches, submits without expecting previous cache value
1968			UnsignedPallet::offchain_worker(25);
1969			assert_eq!(pool.read().transactions.len(), 1);
1970
1971			assert_eq!(last_block.get::<BlockNumber>(), Ok(Some(25)));
1972			assert!(matches!(cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
1973		})
1974	}
1975
1976	#[test]
1977	fn ocw_pool_submission_works() {
1978		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
1979		ext.execute_with_sanity_checks(|| {
1980			roll_to_unsigned_open();
1981
1982			roll_next_with_ocw(Some(pool.clone()));
1983			// OCW must have submitted now
1984
1985			let encoded = pool.read().transactions[0].clone();
1986			let extrinsic: Extrinsic = codec::Decode::decode(&mut &*encoded).unwrap();
1987			let call = extrinsic.function;
1988			assert!(matches!(
1989				call,
1990				crate::mock::RuntimeCall::UnsignedPallet(
1991					crate::unsigned::Call::submit_unsigned { .. }
1992				)
1993			));
1994		})
1995	}
1996
1997	#[test]
1998	fn resubmits_after_offchain_repeat() {
1999		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
2000		ext.execute_with_sanity_checks(|| {
2001			let offchain_repeat = <Runtime as crate::unsigned::Config>::OffchainRepeat::get();
2002			roll_to_unsigned_open();
2003
2004			assert!(OffchainWorkerMiner::<Runtime>::cached_solution().is_none());
2005			// creates, caches, submits without expecting previous cache value
2006			UnsignedPallet::offchain_worker(25);
2007			assert_eq!(pool.read().transactions.len(), 1);
2008			let tx_cache = pool.read().transactions[0].clone();
2009			// assume that the tx has been processed
2010			pool.try_write().unwrap().transactions.clear();
2011
2012			// attempts to resubmit the tx after the threshold has expired.
2013			UnsignedPallet::offchain_worker(25 + 1 + offchain_repeat);
2014			assert_eq!(pool.read().transactions.len(), 1);
2015
2016			// resubmitted tx is identical to first submission
2017			let tx = &pool.read().transactions[0];
2018			assert_eq!(&tx_cache, tx);
2019		})
2020	}
2021
2022	#[test]
2023	fn regenerates_and_resubmits_after_offchain_repeat_if_no_cache() {
2024		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
2025		ext.execute_with_sanity_checks(|| {
2026			let offchain_repeat = <Runtime as crate::unsigned::Config>::OffchainRepeat::get();
2027			roll_to_unsigned_open();
2028
2029			assert!(OffchainWorkerMiner::<Runtime>::cached_solution().is_none());
2030			// creates, caches, submits without expecting previous cache value.
2031			UnsignedPallet::offchain_worker(25);
2032			assert_eq!(pool.read().transactions.len(), 1);
2033			let tx_cache = pool.read().transactions[0].clone();
2034			// assume that the tx has been processed
2035			pool.try_write().unwrap().transactions.clear();
2036
2037			// remove the cached submitted tx.
2038			// this ensures that when the resubmit window rolls around, we're ready to regenerate
2039			// from scratch if necessary
2040			let mut call_cache =
2041				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_CACHED_CALL);
2042			assert!(matches!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
2043			call_cache.clear();
2044
2045			// attempts to resubmit the tx after the threshold has expired
2046			UnsignedPallet::offchain_worker(25 + 1 + offchain_repeat);
2047			assert_eq!(pool.read().transactions.len(), 1);
2048
2049			// resubmitted tx is identical to first submission
2050			let tx = &pool.read().transactions[0];
2051			assert_eq!(&tx_cache, tx);
2052		})
2053	}
2054
2055	#[test]
2056	fn altering_snapshot_invalidates_solution_cache() {
2057		// by infeasible, we mean here that if the snapshot fingerprint has changed.
2058		let (mut ext, pool) = ExtBuilder::unsigned().unsigned_phase(999).build_offchainify();
2059		ext.execute_with_sanity_checks(|| {
2060			let offchain_repeat = <Runtime as crate::unsigned::Config>::OffchainRepeat::get();
2061			roll_to_unsigned_open();
2062			roll_next_with_ocw(None);
2063
2064			// something is submitted..
2065			assert_eq!(pool.read().transactions.len(), 1);
2066			pool.try_write().unwrap().transactions.clear();
2067
2068			// ..and cached
2069			let call_cache =
2070				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_CACHED_CALL);
2071			assert!(matches!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
2072
2073			// now change the snapshot, ofc this is rare in reality. This makes the cached call
2074			// infeasible.
2075			assert_eq!(crate::Snapshot::<Runtime>::targets().unwrap(), vec![10, 20, 30, 40]);
2076			let pre_fingerprint = crate::Snapshot::<Runtime>::fingerprint();
2077			crate::Snapshot::<Runtime>::remove_target(0);
2078			let post_fingerprint = crate::Snapshot::<Runtime>::fingerprint();
2079			assert_eq!(crate::Snapshot::<Runtime>::targets().unwrap(), vec![20, 30, 40]);
2080			assert_ne!(pre_fingerprint, post_fingerprint);
2081
2082			// now run ocw again
2083			let now = System::block_number();
2084			roll_to_with_ocw(now + offchain_repeat + 1, None);
2085			// nothing is submitted this time..
2086			assert_eq!(pool.read().transactions.len(), 0);
2087			// .. and the cache is gone.
2088			assert_eq!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(None));
2089
2090			// upon the next run, we re-generate and submit something fresh again.
2091			roll_to_with_ocw(now + offchain_repeat + offchain_repeat + 2, None);
2092			assert_eq!(pool.read().transactions.len(), 1);
2093			assert!(matches!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
2094		})
2095	}
2096
2097	#[test]
2098	fn wont_resubmit_if_weak_score() {
2099		// common case, if the score is weak, don't bother with anything, ideally check from the
2100		// logs that we don't run feasibility in this call path. Score check must come before.
2101		let (mut ext, pool) = ExtBuilder::unsigned().unsigned_phase(999).build_offchainify();
2102		ext.execute_with_sanity_checks(|| {
2103			let offchain_repeat = <Runtime as crate::unsigned::Config>::OffchainRepeat::get();
2104			// unfortunately there's no pretty way to run the ocw code such that it generates a
2105			// weak, but correct solution. We just write it to cache directly.
2106			roll_to_unsigned_open();
2107			roll_next_with_ocw(None);
2108
2109			// something is submitted..
2110			assert_eq!(pool.read().transactions.len(), 1);
2111
2112			// ..and cached
2113			let call_cache =
2114				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_CACHED_CALL);
2115			assert!(matches!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
2116
2117			// and replace it with something weak.
2118			let weak_solution = raw_paged_from_supports(
2119				vec![vec![(40, Support { total: 10, voters: vec![(3, 10)] })]],
2120				0,
2121			);
2122			let weak_call = crate::unsigned::Call::<T>::submit_unsigned {
2123				paged_solution: Box::new(weak_solution),
2124			};
2125			call_cache.set(&weak_call);
2126
2127			// run again
2128			roll_to_with_ocw(System::block_number() + offchain_repeat + 1, Some(pool.clone()));
2129			// nothing is submitted this time..
2130			assert_eq!(pool.read().transactions.len(), 0);
2131			// .. and the cache IS STILL THERE!
2132			assert!(matches!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
2133		})
2134	}
2135
2136	#[test]
2137	fn ocw_submission_e2e_works() {
2138		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
2139		ext.execute_with_sanity_checks(|| {
2140			assert!(VerifierPallet::queued_score().is_none());
2141			roll_to_with_ocw(25 + 1, Some(pool.clone()));
2142			assert!(VerifierPallet::queued_score().is_some());
2143
2144			// call is cached.
2145			let call_cache =
2146				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_CACHED_CALL);
2147			assert!(matches!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
2148
2149			// pool is empty
2150			assert_eq!(pool.read().transactions.len(), 0);
2151		})
2152	}
2153
2154	#[test]
2155	fn ocw_e2e_submits_and_queued_msp_only() {
2156		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
2157		ext.execute_with_sanity_checks(|| {
2158			// roll to mine
2159			roll_to_unsigned_open_with_ocw(None);
2160			// one block to verify and submit.
2161			roll_next_with_ocw(Some(pool.clone()));
2162
2163			assert_eq!(
2164				multi_block_events(),
2165				vec![
2166					crate::Event::PhaseTransitioned {
2167						from: Phase::Off,
2168						to: Phase::Snapshot(Pages::get())
2169					},
2170					crate::Event::PhaseTransitioned {
2171						from: Phase::Snapshot(0),
2172						to: Phase::Unsigned(UnsignedPhase::get() - 1)
2173					}
2174				]
2175			);
2176			assert_eq!(
2177				verifier_events(),
2178				vec![
2179					crate::verifier::Event::Verified(2, 2),
2180					crate::verifier::Event::Queued(
2181						ElectionScore { minimal_stake: 15, sum_stake: 40, sum_stake_squared: 850 },
2182						None
2183					)
2184				]
2185			);
2186			assert!(VerifierPallet::queued_score().is_some());
2187
2188			// pool is empty
2189			assert_eq!(pool.read().transactions.len(), 0);
2190		})
2191	}
2192
2193	#[test]
2194	fn multi_page_ocw_e2e_submits_and_queued_msp_only() {
2195		let (mut ext, pool) = ExtBuilder::unsigned().miner_pages(2).build_offchainify();
2196		ext.execute_with_sanity_checks(|| {
2197			// roll to mine
2198			roll_to_unsigned_open_with_ocw(None);
2199			// one block to verify and submit.
2200			roll_next_with_ocw(Some(pool.clone()));
2201
2202			assert_eq!(
2203				multi_block_events(),
2204				vec![
2205					crate::Event::PhaseTransitioned {
2206						from: Phase::Off,
2207						to: Phase::Snapshot(Pages::get())
2208					},
2209					crate::Event::PhaseTransitioned {
2210						from: Phase::Snapshot(0),
2211						to: Phase::Unsigned(UnsignedPhase::get() - 1)
2212					}
2213				]
2214			);
2215			assert_eq!(
2216				verifier_events(),
2217				vec![
2218					crate::verifier::Event::Verified(1, 2),
2219					crate::verifier::Event::Verified(2, 2),
2220					crate::verifier::Event::Queued(
2221						ElectionScore { minimal_stake: 30, sum_stake: 70, sum_stake_squared: 2500 },
2222						None
2223					)
2224				]
2225			);
2226			assert!(VerifierPallet::queued_score().is_some());
2227
2228			// pool is empty
2229			assert_eq!(pool.read().transactions.len(), 0);
2230		})
2231	}
2232
2233	#[test]
2234	fn full_multi_page_ocw_e2e_submits_and_queued_msp_only() {
2235		let (mut ext, pool) = ExtBuilder::unsigned().miner_pages(3).build_offchainify();
2236		ext.execute_with_sanity_checks(|| {
2237			// roll to mine
2238			roll_to_unsigned_open_with_ocw(None);
2239			// one block to verify and submit.
2240			roll_next_with_ocw(Some(pool.clone()));
2241
2242			assert_eq!(
2243				multi_block_events(),
2244				vec![
2245					crate::Event::PhaseTransitioned {
2246						from: Phase::Off,
2247						to: Phase::Snapshot(Pages::get())
2248					},
2249					crate::Event::PhaseTransitioned {
2250						from: Phase::Snapshot(0),
2251						to: Phase::Unsigned(UnsignedPhase::get() - 1)
2252					}
2253				]
2254			);
2255			assert_eq!(
2256				verifier_events(),
2257				vec![
2258					crate::verifier::Event::Verified(0, 2),
2259					crate::verifier::Event::Verified(1, 2),
2260					crate::verifier::Event::Verified(2, 2),
2261					crate::verifier::Event::Queued(
2262						ElectionScore {
2263							minimal_stake: 55,
2264							sum_stake: 130,
2265							sum_stake_squared: 8650
2266						},
2267						None
2268					)
2269				]
2270			);
2271			assert!(VerifierPallet::queued_score().is_some());
2272
2273			// pool is empty
2274			assert_eq!(pool.read().transactions.len(), 0);
2275		})
2276	}
2277
2278	#[test]
2279	fn will_not_mine_if_not_enough_winners() {
2280		// also see `trim_weight_too_much_makes_solution_invalid`.
2281		let (mut ext, _) = ExtBuilder::unsigned().desired_targets(77).build_offchainify();
2282		ext.execute_with_sanity_checks(|| {
2283			roll_to_unsigned_open();
2284			ensure_voters(3, 12);
2285
2286			// beautiful errors, isn't it?
2287			assert_eq!(
2288				OffchainWorkerMiner::<Runtime>::mine_checked_call().unwrap_err(),
2289				OffchainMinerError::Common(CommonError::WrongWinnerCount)
2290			);
2291		});
2292	}
2293
2294	mod no_storage {
2295		use super::*;
2296		#[test]
2297		fn ocw_never_uses_cache_on_initial_run_or_resubmission() {
2298			// When `T::OffchainStorage` is false, the offchain worker should never use cache:
2299			// - Initial run: mines and submits without caching
2300			// - Resubmission: re-mines fresh solution instead of restoring from cache
2301			let (mut ext, pool) =
2302				ExtBuilder::unsigned().offchain_storage(false).build_offchainify();
2303			ext.execute_with_sanity_checks(|| {
2304				let offchain_repeat = <Runtime as crate::unsigned::Config>::OffchainRepeat::get();
2305				roll_to_unsigned_open();
2306
2307				let last_block = StorageValueRef::persistent(
2308					&OffchainWorkerMiner::<Runtime>::OFFCHAIN_LAST_BLOCK,
2309				);
2310				let cache = StorageValueRef::persistent(
2311					&OffchainWorkerMiner::<Runtime>::OFFCHAIN_CACHED_CALL,
2312				);
2313
2314				// Initial state: no previous runs
2315				assert_eq!(last_block.get::<BlockNumber>(), Ok(None));
2316				assert_eq!(cache.get::<crate::unsigned::Call<Runtime>>(), Ok(None));
2317
2318				// First run: mines and submits without caching
2319				UnsignedPallet::offchain_worker(25);
2320				assert_eq!(pool.read().transactions.len(), 1);
2321				let first_tx = pool.read().transactions[0].clone();
2322
2323				// Verify no cache is created or used
2324				assert_eq!(last_block.get::<BlockNumber>(), Ok(Some(25)));
2325				assert_eq!(cache.get::<crate::unsigned::Call<Runtime>>(), Ok(None));
2326
2327				// Clear the pool to simulate transaction processing
2328				pool.try_write().unwrap().transactions.clear();
2329
2330				// Second run after repeat threshold: should re-mine instead of using cache
2331				UnsignedPallet::offchain_worker(25 + 1 + offchain_repeat);
2332				assert_eq!(pool.read().transactions.len(), 1);
2333				let second_tx = pool.read().transactions[0].clone();
2334
2335				// Verify still no cache is used throughout the process
2336				assert_eq!(last_block.get::<BlockNumber>(), Ok(Some(25 + 1 + offchain_repeat)));
2337				assert_eq!(cache.get::<crate::unsigned::Call<Runtime>>(), Ok(None));
2338
2339				// Both transactions should be identical since the snapshot hasn't changed,
2340				// but they were generated independently (no cache reuse)
2341				assert_eq!(first_tx, second_tx);
2342			})
2343		}
2344	}
2345}