1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
// This file is part of Substrate.

// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0

// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// 	http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

//! # WARNING
//!
//! **DO NOT USE ON VALUE-BEARING CHAINS. THIS PALLET IS ONLY INTENDED FOR TESTING USAGE.**
//!
//! # Glutton Pallet
//!
//! Pallet that consumes `ref_time` and `proof_size` of a block. Based on the `Compute` and
//! `Storage` parameters the pallet consumes the adequate amount of weight.

#![deny(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]

#[cfg(feature = "runtime-benchmarks")]
mod benchmarking;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
pub mod weights;

extern crate alloc;

use alloc::{vec, vec::Vec};
use blake2::{Blake2b512, Digest};
use frame_support::{pallet_prelude::*, weights::WeightMeter, DefaultNoBound};
use frame_system::pallet_prelude::*;
use sp_io::hashing::twox_256;
use sp_runtime::{traits::Zero, FixedPointNumber, FixedU64};

pub use pallet::*;
pub use weights::WeightInfo;

/// The size of each value in the `TrashData` storage in bytes.
pub const VALUE_SIZE: usize = 1024;
/// Max number of entries for the `TrashData` map.
pub const MAX_TRASH_DATA_ENTRIES: u32 = 65_000;
/// Hard limit for any other resource limit (in units).
pub const RESOURCE_HARD_LIMIT: FixedU64 = FixedU64::from_u32(10);

#[frame_support::pallet]
pub mod pallet {
	use super::*;

	#[pallet::config]
	pub trait Config: frame_system::Config {
		/// The overarching event type.
		type RuntimeEvent: From<Event> + IsType<<Self as frame_system::Config>::RuntimeEvent>;

		/// The admin origin that can set computational limits and initialize the pallet.
		type AdminOrigin: EnsureOrigin<Self::RuntimeOrigin>;

		/// Weight information for this pallet.
		type WeightInfo: WeightInfo;
	}

	#[pallet::pallet]
	pub struct Pallet<T>(_);

	#[pallet::event]
	#[pallet::generate_deposit(pub(super) fn deposit_event)]
	pub enum Event {
		/// The pallet has been (re)initialized.
		PalletInitialized {
			/// Whether the pallet has been re-initialized.
			reinit: bool,
		},
		/// The computation limit has been updated.
		ComputationLimitSet {
			/// The computation limit.
			compute: FixedU64,
		},
		/// The storage limit has been updated.
		StorageLimitSet {
			/// The storage limit.
			storage: FixedU64,
		},
		/// The block length limit has been updated.
		BlockLengthLimitSet {
			/// The block length limit.
			block_length: FixedU64,
		},
	}

	#[pallet::error]
	pub enum Error<T> {
		/// The pallet was already initialized.
		///
		/// Set `witness_count` to `Some` to bypass this error.
		AlreadyInitialized,

		/// The limit was over [`crate::RESOURCE_HARD_LIMIT`].
		InsaneLimit,
	}

	/// The proportion of the remaining `ref_time` to consume during `on_idle`.
	///
	/// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to
	/// over `1.0` could stall the chain.
	#[pallet::storage]
	pub(crate) type Compute<T: Config> = StorageValue<_, FixedU64, ValueQuery>;

	/// The proportion of the remaining `proof_size` to consume during `on_idle`.
	///
	/// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to
	/// over `1.0` could stall the chain.
	#[pallet::storage]
	pub(crate) type Storage<T: Config> = StorageValue<_, FixedU64, ValueQuery>;

	/// The proportion of the `block length` to consume on each block.
	///
	/// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to
	/// over `1.0` could stall the chain.
	#[pallet::storage]
	pub(crate) type Length<T: Config> = StorageValue<_, FixedU64, ValueQuery>;

	/// Storage map used for wasting proof size.
	///
	/// It contains no meaningful data - hence the name "Trash". The maximal number of entries is
	/// set to 65k, which is just below the next jump at 16^4. This is important to reduce the proof
	/// size benchmarking overestimate. The assumption here is that we won't have more than 65k *
	/// 1KiB = 65MiB of proof size wasting in practice. However, this limit is not enforced, so the
	/// pallet would also work out of the box with more entries, but its benchmarked proof weight
	/// would possibly be underestimated in that case.
	#[pallet::storage]
	pub(super) type TrashData<T: Config> = StorageMap<
		Hasher = Twox64Concat,
		Key = u32,
		Value = [u8; VALUE_SIZE],
		QueryKind = OptionQuery,
		MaxValues = ConstU32<MAX_TRASH_DATA_ENTRIES>,
	>;

	/// The current number of entries in `TrashData`.
	#[pallet::storage]
	pub(crate) type TrashDataCount<T: Config> = StorageValue<_, u32, ValueQuery>;

	#[pallet::genesis_config]
	#[derive(DefaultNoBound)]
	pub struct GenesisConfig<T: Config> {
		/// The compute limit.
		pub compute: FixedU64,
		/// The storage limit.
		pub storage: FixedU64,
		/// The amount of trash data for wasting proof size.
		pub trash_data_count: u32,
		/// The block length limit.
		pub block_length: FixedU64,
		#[serde(skip)]
		/// The required configuration field.
		pub _config: core::marker::PhantomData<T>,
	}

	#[pallet::genesis_build]
	impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
		fn build(&self) {
			assert!(
				self.trash_data_count <= MAX_TRASH_DATA_ENTRIES,
				"number of TrashData entries cannot be bigger than {:?}",
				MAX_TRASH_DATA_ENTRIES
			);

			(0..self.trash_data_count)
				.for_each(|i| TrashData::<T>::insert(i, Pallet::<T>::gen_value(i)));

			TrashDataCount::<T>::set(self.trash_data_count);

			assert!(self.compute <= RESOURCE_HARD_LIMIT, "Compute limit is insane");
			<Compute<T>>::put(self.compute);

			assert!(self.storage <= RESOURCE_HARD_LIMIT, "Storage limit is insane");
			<Storage<T>>::put(self.storage);

			assert!(self.block_length <= RESOURCE_HARD_LIMIT, "Block length limit is insane");
			<Length<T>>::put(self.block_length);
		}
	}

	#[pallet::hooks]
	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
		fn integrity_test() {
			assert!(
				!T::WeightInfo::waste_ref_time_iter(1).ref_time().is_zero(),
				"Weight zero; would get stuck in an infinite loop"
			);
			assert!(
				!T::WeightInfo::waste_proof_size_some(1).proof_size().is_zero(),
				"Weight zero; would get stuck in an infinite loop"
			);
		}

		fn on_idle(_: BlockNumberFor<T>, remaining_weight: Weight) -> Weight {
			let mut meter = WeightMeter::with_limit(remaining_weight);
			if meter.try_consume(T::WeightInfo::empty_on_idle()).is_err() {
				return T::WeightInfo::empty_on_idle()
			}

			let proof_size_limit =
				Storage::<T>::get().saturating_mul_int(meter.remaining().proof_size());
			let computation_weight_limit =
				Compute::<T>::get().saturating_mul_int(meter.remaining().ref_time());
			let mut meter = WeightMeter::with_limit(Weight::from_parts(
				computation_weight_limit,
				proof_size_limit,
			));

			Self::waste_at_most_proof_size(&mut meter);
			Self::waste_at_most_ref_time(&mut meter);

			meter.consumed()
		}
	}

	#[pallet::inherent]
	impl<T: Config> ProvideInherent for Pallet<T> {
		type Call = Call<T>;
		type Error = sp_inherents::MakeFatalError<()>;

		const INHERENT_IDENTIFIER: InherentIdentifier = *b"bloated0";

		fn create_inherent(_data: &InherentData) -> Option<Self::Call> {
			let max_block_length = *T::BlockLength::get().max.get(DispatchClass::Mandatory);
			let bloat_size = Length::<T>::get().saturating_mul_int(max_block_length) as usize;
			let amount_trash = bloat_size / VALUE_SIZE;
			let garbage = TrashData::<T>::iter()
				.map(|(_k, v)| v)
				.collect::<Vec<_>>()
				.into_iter()
				.cycle()
				.take(amount_trash)
				.collect::<Vec<_>>();

			Some(Call::bloat { garbage })
		}

		fn is_inherent(call: &Self::Call) -> bool {
			matches!(call, Call::bloat { .. })
		}

		fn check_inherent(call: &Self::Call, _: &InherentData) -> Result<(), Self::Error> {
			match call {
				Call::bloat { .. } => Ok(()),
				_ => unreachable!("other calls are not inherents"),
			}
		}
	}

	#[pallet::call(weight = T::WeightInfo)]
	impl<T: Config> Pallet<T> {
		/// Initialize the pallet. Should be called once, if no genesis state was provided.
		///
		/// `current_count` is the current number of elements in `TrashData`. This can be set to
		/// `None` when the pallet is first initialized.
		///
		/// Only callable by Root or `AdminOrigin`. A good default for `new_count` is `5_000`.
		#[pallet::call_index(0)]
		#[pallet::weight(
			T::WeightInfo::initialize_pallet_grow(witness_count.unwrap_or_default())
				.max(T::WeightInfo::initialize_pallet_shrink(witness_count.unwrap_or_default()))
		)]
		pub fn initialize_pallet(
			origin: OriginFor<T>,
			new_count: u32,
			witness_count: Option<u32>,
		) -> DispatchResult {
			T::AdminOrigin::ensure_origin_or_root(origin)?;

			let current_count = TrashDataCount::<T>::get();
			ensure!(
				current_count == witness_count.unwrap_or_default(),
				Error::<T>::AlreadyInitialized
			);

			if new_count > current_count {
				(current_count..new_count)
					.for_each(|i| TrashData::<T>::insert(i, Self::gen_value(i)));
			} else {
				(new_count..current_count).for_each(TrashData::<T>::remove);
			}

			Self::deposit_event(Event::PalletInitialized { reinit: witness_count.is_some() });
			TrashDataCount::<T>::set(new_count);
			Ok(())
		}

		/// Set how much of the remaining `ref_time` weight should be consumed by `on_idle`.
		///
		/// Only callable by Root or `AdminOrigin`.
		#[pallet::call_index(1)]
		pub fn set_compute(origin: OriginFor<T>, compute: FixedU64) -> DispatchResult {
			T::AdminOrigin::ensure_origin_or_root(origin)?;

			ensure!(compute <= RESOURCE_HARD_LIMIT, Error::<T>::InsaneLimit);
			Compute::<T>::set(compute);

			Self::deposit_event(Event::ComputationLimitSet { compute });
			Ok(())
		}

		/// Set how much of the remaining `proof_size` weight should be consumed by `on_idle`.
		///
		/// `1.0` means that all remaining `proof_size` will be consumed. The PoV benchmarking
		/// results that are used here are likely an over-estimation. 100% intended consumption will
		/// therefore translate to less than 100% actual consumption.
		///
		/// Only callable by Root or `AdminOrigin`.
		#[pallet::call_index(2)]
		pub fn set_storage(origin: OriginFor<T>, storage: FixedU64) -> DispatchResult {
			T::AdminOrigin::ensure_origin_or_root(origin)?;

			ensure!(storage <= RESOURCE_HARD_LIMIT, Error::<T>::InsaneLimit);
			Storage::<T>::set(storage);

			Self::deposit_event(Event::StorageLimitSet { storage });
			Ok(())
		}

		/// Increase the block size by including the specified garbage bytes.
		#[pallet::call_index(3)]
		#[pallet::weight((0, DispatchClass::Mandatory))]
		pub fn bloat(_origin: OriginFor<T>, _garbage: Vec<[u8; VALUE_SIZE]>) -> DispatchResult {
			Ok(())
		}

		/// Set how much of the block length should be filled with trash data on each block.
		///
		/// `1.0` means that all block should be filled. If set to `1.0`, storage proof size will
		///  be close to zero.
		///
		/// Only callable by Root or `AdminOrigin`.
		#[pallet::call_index(4)]
		#[pallet::weight({1})]
		pub fn set_block_length(origin: OriginFor<T>, block_length: FixedU64) -> DispatchResult {
			T::AdminOrigin::ensure_origin_or_root(origin)?;

			ensure!(block_length <= RESOURCE_HARD_LIMIT, Error::<T>::InsaneLimit);
			Length::<T>::set(block_length);

			Self::deposit_event(Event::BlockLengthLimitSet { block_length });
			Ok(())
		}
	}

	impl<T: Config> Pallet<T> {
		/// Waste at most the remaining proof size of `meter`.
		///
		/// Tries to come as close to the limit as possible.
		pub(crate) fn waste_at_most_proof_size(meter: &mut WeightMeter) {
			let Ok(n) = Self::calculate_proof_size_iters(&meter) else { return };

			meter.consume(T::WeightInfo::waste_proof_size_some(n));

			(0..n).for_each(|i| {
				TrashData::<T>::get(i);
			});
		}

		/// Calculate how many times `waste_proof_size_some` should be called to fill up `meter`.
		fn calculate_proof_size_iters(meter: &WeightMeter) -> Result<u32, ()> {
			let base = T::WeightInfo::waste_proof_size_some(0);
			let slope = T::WeightInfo::waste_proof_size_some(1).saturating_sub(base);

			let remaining = meter.remaining().saturating_sub(base);
			let iter_by_proof_size =
				remaining.proof_size().checked_div(slope.proof_size()).ok_or(())?;
			let iter_by_ref_time = remaining.ref_time().checked_div(slope.ref_time()).ok_or(())?;

			if iter_by_proof_size > 0 && iter_by_proof_size <= iter_by_ref_time {
				Ok(iter_by_proof_size as u32)
			} else {
				Err(())
			}
		}

		/// Waste at most the remaining ref time weight of `meter`.
		///
		/// Tries to come as close to the limit as possible.
		pub(crate) fn waste_at_most_ref_time(meter: &mut WeightMeter) {
			let Ok(n) = Self::calculate_ref_time_iters(&meter) else { return };
			meter.consume(T::WeightInfo::waste_ref_time_iter(n));

			let clobber = Self::waste_ref_time_iter(vec![0u8; 64], n);

			// By casting it into a vec we can hopefully prevent the compiler from optimizing it
			// out. Note that `Blake2b512` produces 64 bytes, this is therefore impossible - but the
			// compiler does not know that (hopefully).
			debug_assert!(clobber.len() == 64);
			if clobber.len() == 65 {
				TrashData::<T>::insert(0, [clobber[0] as u8; VALUE_SIZE]);
			}
		}

		/// Wastes some `ref_time`. Receives the previous result as an argument.
		///
		/// The ref_time of one iteration should be in the order of 1-10 ms.
		pub(crate) fn waste_ref_time_iter(clobber: Vec<u8>, i: u32) -> Vec<u8> {
			let mut hasher = Blake2b512::new();

			// Blake2 has a very high speed of hashing so we make multiple hashes with it to
			// waste more `ref_time` at once.
			(0..i).for_each(|_| {
				hasher.update(clobber.as_slice());
			});

			hasher.finalize().to_vec()
		}

		/// Calculate how many times `waste_ref_time_iter` should be called to fill up `meter`.
		fn calculate_ref_time_iters(meter: &WeightMeter) -> Result<u32, ()> {
			let base = T::WeightInfo::waste_ref_time_iter(0);
			let slope = T::WeightInfo::waste_ref_time_iter(1).saturating_sub(base);
			if !slope.proof_size().is_zero() || !base.proof_size().is_zero() {
				return Err(())
			}

			match meter
				.remaining()
				.ref_time()
				.saturating_sub(base.ref_time())
				.checked_div(slope.ref_time())
			{
				Some(0) | None => Err(()),
				Some(i) => Ok(i as u32),
			}
		}

		/// Generate a pseudo-random deterministic value from a `seed`.
		pub(crate) fn gen_value(seed: u32) -> [u8; VALUE_SIZE] {
			let mut ret = [0u8; VALUE_SIZE];

			for i in 0u32..(VALUE_SIZE as u32 / 32) {
				let hash = (seed, i).using_encoded(twox_256);
				ret[i as usize * 32..(i + 1) as usize * 32].copy_from_slice(&hash);
			}

			ret
		}
	}
}