referrerpolicy=no-referrer-when-downgrade

sc_network_sync/
state_request_handler.rs

1// Copyright (C) Parity Technologies (UK) Ltd.
2// This file is part of Substrate.
3// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
4
5// Substrate is free software: you can redistribute it and/or modify
6// it under the terms of the GNU General Public License as published by
7// the Free Software Foundation, either version 3 of the License, or
8// (at your option) any later version.
9
10// Substrate is distributed in the hope that it will be useful,
11// but WITHOUT ANY WARRANTY; without even the implied warranty of
12// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13// GNU General Public License for more details.
14
15// You should have received a copy of the GNU General Public License
16// along with Substrate. If not, see <https://www.gnu.org/licenses/>.
17
18//! Helper for handling (i.e. answering) state requests from a remote peer via the
19//! `crate::request_responses::RequestResponsesBehaviour`.
20
21use crate::{
22	schema::v1::{KeyValueStateEntry, StateEntry, StateRequest, StateResponse},
23	LOG_TARGET,
24};
25
26use codec::{Decode, Encode};
27use futures::{channel::oneshot, stream::StreamExt};
28use log::{debug, trace};
29use prost::Message;
30use sc_network_types::PeerId;
31use schnellru::{ByLength, LruMap};
32
33use sc_client_api::{BlockBackend, ProofProvider};
34use sc_network::{
35	config::ProtocolId,
36	request_responses::{IncomingRequest, OutgoingResponse},
37	NetworkBackend, MAX_RESPONSE_SIZE,
38};
39use sp_runtime::traits::Block as BlockT;
40
41use std::{
42	hash::{Hash, Hasher},
43	sync::Arc,
44	time::Duration,
45};
46
47const MAX_RESPONSE_BYTES: usize = 2 * 1024 * 1024; // Actual reponse may be bigger.
48const MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER: usize = 2;
49
50mod rep {
51	use sc_network::ReputationChange as Rep;
52
53	/// Reputation change when a peer sent us the same request multiple times.
54	pub const SAME_REQUEST: Rep = Rep::new(i32::MIN, "Same state request multiple times");
55}
56
57/// Generates a `RequestResponseProtocolConfig` for the state request protocol, refusing incoming
58/// requests.
59pub fn generate_protocol_config<
60	Hash: AsRef<[u8]>,
61	B: BlockT,
62	N: NetworkBackend<B, <B as BlockT>::Hash>,
63>(
64	protocol_id: &ProtocolId,
65	genesis_hash: Hash,
66	fork_id: Option<&str>,
67	inbound_queue: async_channel::Sender<IncomingRequest>,
68) -> N::RequestResponseProtocolConfig {
69	N::request_response_config(
70		generate_protocol_name(genesis_hash, fork_id).into(),
71		std::iter::once(generate_legacy_protocol_name(protocol_id).into()).collect(),
72		1024 * 1024,
73		MAX_RESPONSE_SIZE,
74		Duration::from_secs(40),
75		Some(inbound_queue),
76	)
77}
78
79/// Generate the state protocol name from the genesis hash and fork id.
80fn generate_protocol_name<Hash: AsRef<[u8]>>(genesis_hash: Hash, fork_id: Option<&str>) -> String {
81	let genesis_hash = genesis_hash.as_ref();
82	if let Some(fork_id) = fork_id {
83		format!("/{}/{}/state/2", array_bytes::bytes2hex("", genesis_hash), fork_id)
84	} else {
85		format!("/{}/state/2", array_bytes::bytes2hex("", genesis_hash))
86	}
87}
88
89/// Generate the legacy state protocol name from chain specific protocol identifier.
90fn generate_legacy_protocol_name(protocol_id: &ProtocolId) -> String {
91	format!("/{}/state/2", protocol_id.as_ref())
92}
93
94/// The key of [`BlockRequestHandler::seen_requests`].
95#[derive(Eq, PartialEq, Clone)]
96struct SeenRequestsKey<B: BlockT> {
97	peer: PeerId,
98	block: B::Hash,
99	start: Vec<Vec<u8>>,
100}
101
102#[allow(clippy::derived_hash_with_manual_eq)]
103impl<B: BlockT> Hash for SeenRequestsKey<B> {
104	fn hash<H: Hasher>(&self, state: &mut H) {
105		self.peer.hash(state);
106		self.block.hash(state);
107		self.start.hash(state);
108	}
109}
110
111/// The value of [`StateRequestHandler::seen_requests`].
112enum SeenRequestsValue {
113	/// First time we have seen the request.
114	First,
115	/// We have fulfilled the request `n` times.
116	Fulfilled(usize),
117}
118
119/// Handler for incoming block requests from a remote peer.
120pub struct StateRequestHandler<B: BlockT, Client> {
121	client: Arc<Client>,
122	request_receiver: async_channel::Receiver<IncomingRequest>,
123	/// Maps from request to number of times we have seen this request.
124	///
125	/// This is used to check if a peer is spamming us with the same request.
126	seen_requests: LruMap<SeenRequestsKey<B>, SeenRequestsValue>,
127}
128
129impl<B, Client> StateRequestHandler<B, Client>
130where
131	B: BlockT,
132	Client: BlockBackend<B> + ProofProvider<B> + Send + Sync + 'static,
133{
134	/// Create a new [`StateRequestHandler`].
135	pub fn new<N: NetworkBackend<B, <B as BlockT>::Hash>>(
136		protocol_id: &ProtocolId,
137		fork_id: Option<&str>,
138		client: Arc<Client>,
139		num_peer_hint: usize,
140	) -> (Self, N::RequestResponseProtocolConfig) {
141		// Reserve enough request slots for one request per peer when we are at the maximum
142		// number of peers.
143		let capacity = std::cmp::max(num_peer_hint, 1);
144		let (tx, request_receiver) = async_channel::bounded(capacity);
145
146		let protocol_config = generate_protocol_config::<_, B, N>(
147			protocol_id,
148			client
149				.block_hash(0u32.into())
150				.ok()
151				.flatten()
152				.expect("Genesis block exists; qed"),
153			fork_id,
154			tx,
155		);
156
157		let capacity = ByLength::new(num_peer_hint.max(1) as u32 * 2);
158		let seen_requests = LruMap::new(capacity);
159
160		(Self { client, request_receiver, seen_requests }, protocol_config)
161	}
162
163	/// Run [`StateRequestHandler`].
164	pub async fn run(mut self) {
165		while let Some(request) = self.request_receiver.next().await {
166			let IncomingRequest { peer, payload, pending_response } = request;
167
168			match self.handle_request(payload, pending_response, &peer) {
169				Ok(()) => debug!(target: LOG_TARGET, "Handled block request from {}.", peer),
170				Err(e) => debug!(
171					target: LOG_TARGET,
172					"Failed to handle state request from {}: {}", peer, e,
173				),
174			}
175		}
176	}
177
178	fn handle_request(
179		&mut self,
180		payload: Vec<u8>,
181		pending_response: oneshot::Sender<OutgoingResponse>,
182		peer: &PeerId,
183	) -> Result<(), HandleRequestError> {
184		let request = StateRequest::decode(&payload[..])?;
185		let block: B::Hash = Decode::decode(&mut request.block.as_ref())?;
186
187		let key = SeenRequestsKey { peer: *peer, block, start: request.start.clone() };
188
189		let mut reputation_changes = Vec::new();
190
191		match self.seen_requests.get(&key) {
192			Some(SeenRequestsValue::First) => {},
193			Some(SeenRequestsValue::Fulfilled(ref mut requests)) => {
194				*requests = requests.saturating_add(1);
195
196				if *requests > MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER {
197					reputation_changes.push(rep::SAME_REQUEST);
198				}
199			},
200			None => {
201				self.seen_requests.insert(key.clone(), SeenRequestsValue::First);
202			},
203		}
204
205		trace!(
206			target: LOG_TARGET,
207			"Handling state request from {}: Block {:?}, Starting at {:x?}, no_proof={}",
208			peer,
209			request.block,
210			&request.start,
211			request.no_proof,
212		);
213
214		let result = if reputation_changes.is_empty() {
215			let mut response = StateResponse::default();
216
217			if !request.no_proof {
218				let (proof, _count) = self.client.read_proof_collection(
219					block,
220					request.start.as_slice(),
221					MAX_RESPONSE_BYTES,
222				)?;
223				response.proof = proof.encode();
224			} else {
225				let entries = self.client.storage_collection(
226					block,
227					request.start.as_slice(),
228					MAX_RESPONSE_BYTES,
229				)?;
230				response.entries = entries
231					.into_iter()
232					.map(|(state, complete)| KeyValueStateEntry {
233						state_root: state.state_root,
234						entries: state
235							.key_values
236							.into_iter()
237							.map(|(key, value)| StateEntry { key, value })
238							.collect(),
239						complete,
240					})
241					.collect();
242			}
243
244			trace!(
245				target: LOG_TARGET,
246				"StateResponse contains {} keys, {}, proof nodes, from {:?} to {:?}",
247				response.entries.len(),
248				response.proof.len(),
249				response.entries.get(0).and_then(|top| top
250					.entries
251					.first()
252					.map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key))),
253				response.entries.get(0).and_then(|top| top
254					.entries
255					.last()
256					.map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key))),
257			);
258			if let Some(value) = self.seen_requests.get(&key) {
259				// If this is the first time we have processed this request, we need to change
260				// it to `Fulfilled`.
261				if let SeenRequestsValue::First = value {
262					*value = SeenRequestsValue::Fulfilled(1);
263				}
264			}
265
266			let mut data = Vec::with_capacity(response.encoded_len());
267			response.encode(&mut data)?;
268			Ok(data)
269		} else {
270			Err(())
271		};
272
273		pending_response
274			.send(OutgoingResponse { result, reputation_changes, sent_feedback: None })
275			.map_err(|_| HandleRequestError::SendResponse)
276	}
277}
278
279#[derive(Debug, thiserror::Error)]
280enum HandleRequestError {
281	#[error("Failed to decode request: {0}.")]
282	DecodeProto(#[from] prost::DecodeError),
283
284	#[error("Failed to encode response: {0}.")]
285	EncodeProto(#[from] prost::EncodeError),
286
287	#[error("Failed to decode block hash: {0}.")]
288	InvalidHash(#[from] codec::Error),
289
290	#[error(transparent)]
291	Client(#[from] sp_blockchain::Error),
292
293	#[error("Failed to send response.")]
294	SendResponse,
295}