referrerpolicy=no-referrer-when-downgrade

polkadot_service/builder/
mod.rs

1// Copyright (C) Parity Technologies (UK) Ltd.
2// This file is part of Polkadot.
3
4// Polkadot is free software: you can redistribute it and/or modify
5// it under the terms of the GNU General Public License as published by
6// the Free Software Foundation, either version 3 of the License, or
7// (at your option) any later version.
8
9// Polkadot is distributed in the hope that it will be useful,
10// but WITHOUT ANY WARRANTY; without even the implied warranty of
11// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12// GNU General Public License for more details.
13
14// You should have received a copy of the GNU General Public License
15// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
16
17//! Polkadot service builder.
18
19#![cfg(feature = "full-node")]
20
21mod partial;
22use partial::PolkadotPartialComponents;
23pub(crate) use partial::{new_partial, new_partial_basics};
24
25use crate::{
26	grandpa_support, open_database,
27	overseer::{ExtendedOverseerGenArgs, OverseerGen, OverseerGenArgs},
28	parachains_db,
29	relay_chain_selection::SelectRelayChain,
30	workers, Chain, Error, FullBackend, FullClient, IdentifyVariant, IsParachainNode,
31	GRANDPA_JUSTIFICATION_PERIOD, KEEP_FINALIZED_FOR_LIVE_NETWORKS,
32};
33use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE;
34use gum::info;
35use mmr_gadget::MmrGadget;
36use polkadot_availability_recovery::FETCH_CHUNKS_THRESHOLD;
37use polkadot_node_core_approval_voting::Config as ApprovalVotingConfig;
38use polkadot_node_core_av_store::Config as AvailabilityConfig;
39use polkadot_node_core_candidate_validation::Config as CandidateValidationConfig;
40use polkadot_node_core_chain_selection::{
41	self as chain_selection_subsystem, Config as ChainSelectionConfig,
42};
43use polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig;
44use polkadot_node_network_protocol::{
45	peer_set::{PeerSet, PeerSetProtocolNames},
46	request_response::{IncomingRequest, ReqProtocolNames},
47};
48use polkadot_node_subsystem_types::DefaultSubsystemClient;
49use polkadot_overseer::{Handle, OverseerConnector};
50use polkadot_primitives::Block;
51use sc_client_api::Backend;
52use sc_network::config::FullNetworkConfiguration;
53use sc_network_sync::WarpSyncConfig;
54use sc_service::{Configuration, RpcHandlers, TaskManager};
55use sc_sysinfo::Metric;
56use sc_telemetry::TelemetryWorkerHandle;
57use sc_transaction_pool_api::OffchainTransactionPoolFactory;
58use sp_consensus_beefy::ecdsa_crypto;
59use sp_runtime::traits::Block as BlockT;
60use std::{collections::HashMap, sync::Arc, time::Duration};
61
62/// Polkadot node service initialization parameters.
63pub struct NewFullParams<OverseerGenerator: OverseerGen> {
64	pub is_parachain_node: IsParachainNode,
65	pub enable_beefy: bool,
66	/// Whether to enable the block authoring backoff on production networks
67	/// where it isn't enabled by default.
68	pub force_authoring_backoff: bool,
69	pub telemetry_worker_handle: Option<TelemetryWorkerHandle>,
70	/// The version of the node. TESTING ONLY: `None` can be passed to skip the node/worker version
71	/// check, both on startup and in the workers.
72	pub node_version: Option<String>,
73	/// Whether the node is attempting to run as a secure validator.
74	pub secure_validator_mode: bool,
75	/// An optional path to a directory containing the workers.
76	pub workers_path: Option<std::path::PathBuf>,
77	/// Optional custom names for the prepare and execute workers.
78	pub workers_names: Option<(String, String)>,
79	/// An optional number of the maximum number of pvf execute workers.
80	pub execute_workers_max_num: Option<usize>,
81	/// An optional maximum number of pvf workers that can be spawned in the pvf prepare pool for
82	/// tasks with the priority below critical.
83	pub prepare_workers_soft_max_num: Option<usize>,
84	/// An optional absolute number of pvf workers that can be spawned in the pvf prepare pool.
85	pub prepare_workers_hard_max_num: Option<usize>,
86	/// How long finalized data should be kept in the availability store (in hours)
87	pub keep_finalized_for: Option<u32>,
88	pub overseer_gen: OverseerGenerator,
89	pub overseer_message_channel_capacity_override: Option<usize>,
90	#[allow(dead_code)]
91	pub malus_finality_delay: Option<u32>,
92	pub hwbench: Option<sc_sysinfo::HwBench>,
93}
94
95/// Completely built polkadot node service.
96pub struct NewFull {
97	pub task_manager: TaskManager,
98	pub client: Arc<FullClient>,
99	pub overseer_handle: Option<Handle>,
100	pub network: Arc<dyn sc_network::service::traits::NetworkService>,
101	pub sync_service: Arc<sc_network_sync::SyncingService<Block>>,
102	pub rpc_handlers: RpcHandlers,
103	pub backend: Arc<FullBackend>,
104}
105
106pub struct PolkadotServiceBuilder<OverseerGenerator, Network>
107where
108	OverseerGenerator: OverseerGen,
109	Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Hash>,
110{
111	config: Configuration,
112	params: NewFullParams<OverseerGenerator>,
113	overseer_connector: OverseerConnector,
114	partial_components: PolkadotPartialComponents<SelectRelayChain<FullBackend>>,
115	net_config: FullNetworkConfiguration<Block, <Block as BlockT>::Hash, Network>,
116}
117
118impl<OverseerGenerator, Network> PolkadotServiceBuilder<OverseerGenerator, Network>
119where
120	OverseerGenerator: OverseerGen,
121	Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Hash>,
122{
123	/// Create new polkadot service builder.
124	pub fn new(
125		mut config: Configuration,
126		params: NewFullParams<OverseerGenerator>,
127	) -> Result<PolkadotServiceBuilder<OverseerGenerator, Network>, Error> {
128		let basics = new_partial_basics(&mut config, params.telemetry_worker_handle.clone())?;
129
130		let prometheus_registry = config.prometheus_registry().cloned();
131		let overseer_connector = OverseerConnector::default();
132		let overseer_handle = Handle::new(overseer_connector.handle());
133		let auth_or_collator = config.role.is_authority() || params.is_parachain_node.is_collator();
134
135		let select_chain = if auth_or_collator {
136			let metrics = polkadot_node_subsystem_util::metrics::Metrics::register(
137				prometheus_registry.as_ref(),
138			)?;
139
140			SelectRelayChain::new_with_overseer(
141				basics.backend.clone(),
142				overseer_handle.clone(),
143				metrics,
144				Some(basics.task_manager.spawn_handle()),
145			)
146		} else {
147			SelectRelayChain::new_longest_chain(basics.backend.clone())
148		};
149
150		let partial_components =
151			new_partial::<SelectRelayChain<_>>(&mut config, basics, select_chain)?;
152
153		let net_config = sc_network::config::FullNetworkConfiguration::<_, _, Network>::new(
154			&config.network,
155			config.prometheus_config.as_ref().map(|cfg| cfg.registry.clone()),
156		);
157
158		Ok(PolkadotServiceBuilder {
159			config,
160			params,
161			overseer_connector,
162			partial_components,
163			net_config,
164		})
165	}
166
167	/// Get the genesis hash of the polkadot service being built.
168	pub fn genesis_hash(&self) -> <Block as BlockT>::Hash {
169		self.partial_components.client.chain_info().genesis_hash
170	}
171
172	/// Add extra request-response protocol to the polkadot service.
173	pub fn add_extra_request_response_protocol(
174		&mut self,
175		config: Network::RequestResponseProtocolConfig,
176	) {
177		self.net_config.add_request_response_protocol(config);
178	}
179
180	/// Build polkadot service.
181	pub fn build(self) -> Result<NewFull, Error> {
182		let Self {
183			config,
184			params:
185				NewFullParams {
186					is_parachain_node,
187					enable_beefy,
188					force_authoring_backoff,
189					telemetry_worker_handle: _,
190					node_version,
191					secure_validator_mode,
192					workers_path,
193					workers_names,
194					overseer_gen,
195					overseer_message_channel_capacity_override,
196					malus_finality_delay: _malus_finality_delay,
197					hwbench,
198					execute_workers_max_num,
199					prepare_workers_soft_max_num,
200					prepare_workers_hard_max_num,
201					keep_finalized_for,
202				},
203			overseer_connector,
204			partial_components:
205				sc_service::PartialComponents::<_, _, SelectRelayChain<_>, _, _, _> {
206					client,
207					backend,
208					mut task_manager,
209					keystore_container,
210					select_chain,
211					import_queue,
212					transaction_pool,
213					other:
214						(rpc_extensions_builder, import_setup, rpc_setup, slot_duration, mut telemetry),
215				},
216			mut net_config,
217		} = self;
218
219		let role = config.role;
220		let auth_or_collator = config.role.is_authority() || is_parachain_node.is_collator();
221		let is_offchain_indexing_enabled = config.offchain_worker.indexing_enabled;
222		let force_authoring = config.force_authoring;
223		let disable_grandpa = config.disable_grandpa;
224		let name = config.network.node_name.clone();
225		let backoff_authoring_blocks = if !force_authoring_backoff &&
226			(config.chain_spec.is_polkadot() || config.chain_spec.is_kusama())
227		{
228			// the block authoring backoff is disabled by default on production networks
229			None
230		} else {
231			let mut backoff = sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default();
232
233			if config.chain_spec.is_rococo() ||
234				config.chain_spec.is_versi() ||
235				config.chain_spec.is_dev()
236			{
237				// on testnets that are in flux (like rococo or versi), finality has stalled
238				// sometimes due to operational issues and it's annoying to slow down block
239				// production to 1 block per hour.
240				backoff.max_interval = 10;
241			}
242
243			Some(backoff)
244		};
245		let shared_voter_state = rpc_setup;
246		let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht;
247		let auth_disc_public_addresses = config.network.public_addresses.clone();
248
249		let genesis_hash = client.chain_info().genesis_hash;
250		let peer_store_handle = net_config.peer_store_handle();
251
252		let prometheus_registry = config.prometheus_registry().cloned();
253		let metrics = Network::register_notification_metrics(
254			config.prometheus_config.as_ref().map(|cfg| &cfg.registry),
255		);
256
257		// Note: GrandPa is pushed before the Polkadot-specific protocols. This doesn't change
258		// anything in terms of behaviour, but makes the logs more consistent with the other
259		// Substrate nodes.
260		let grandpa_protocol_name =
261			sc_consensus_grandpa::protocol_standard_name(&genesis_hash, &config.chain_spec);
262		let (grandpa_protocol_config, grandpa_notification_service) =
263			sc_consensus_grandpa::grandpa_peers_set_config::<_, Network>(
264				grandpa_protocol_name.clone(),
265				metrics.clone(),
266				Arc::clone(&peer_store_handle),
267			);
268		net_config.add_notification_protocol(grandpa_protocol_config);
269
270		let beefy_gossip_proto_name =
271			sc_consensus_beefy::gossip_protocol_name(&genesis_hash, config.chain_spec.fork_id());
272		// `beefy_on_demand_justifications_handler` is given to `beefy-gadget` task to be run,
273		// while `beefy_req_resp_cfg` is added to `config.network.request_response_protocols`.
274		let (beefy_on_demand_justifications_handler, beefy_req_resp_cfg) =
275			sc_consensus_beefy::communication::request_response::BeefyJustifsRequestHandler::new::<
276				_,
277				Network,
278			>(
279				&genesis_hash,
280				config.chain_spec.fork_id(),
281				client.clone(),
282				prometheus_registry.clone(),
283			);
284		let beefy_notification_service = match enable_beefy {
285			false => None,
286			true => {
287				let (beefy_notification_config, beefy_notification_service) =
288					sc_consensus_beefy::communication::beefy_peers_set_config::<_, Network>(
289						beefy_gossip_proto_name.clone(),
290						metrics.clone(),
291						Arc::clone(&peer_store_handle),
292					);
293
294				net_config.add_notification_protocol(beefy_notification_config);
295				net_config.add_request_response_protocol(beefy_req_resp_cfg);
296				Some(beefy_notification_service)
297			},
298		};
299
300		// validation/collation protocols are enabled only if `Overseer` is enabled
301		let peerset_protocol_names =
302			PeerSetProtocolNames::new(genesis_hash, config.chain_spec.fork_id());
303
304		// If this is a validator or running alongside a parachain node, we need to enable the
305		// networking protocols.
306		//
307		// Collators and parachain full nodes require the collator and validator networking to send
308		// collations and to be able to recover PoVs.
309		let notification_services = if role.is_authority() ||
310			is_parachain_node.is_running_alongside_parachain_node()
311		{
312			use polkadot_network_bridge::{peer_sets_info, IsAuthority};
313			let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No };
314
315			peer_sets_info::<_, Network>(
316				is_authority,
317				&peerset_protocol_names,
318				metrics.clone(),
319				Arc::clone(&peer_store_handle),
320			)
321			.into_iter()
322			.map(|(config, (peerset, service))| {
323				net_config.add_notification_protocol(config);
324				(peerset, service)
325			})
326			.collect::<HashMap<PeerSet, Box<dyn sc_network::NotificationService>>>()
327		} else {
328			std::collections::HashMap::new()
329		};
330
331		let req_protocol_names = ReqProtocolNames::new(&genesis_hash, config.chain_spec.fork_id());
332
333		let (collation_req_v1_receiver, cfg) =
334			IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
335		net_config.add_request_response_protocol(cfg);
336		let (collation_req_v2_receiver, cfg) =
337			IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
338		net_config.add_request_response_protocol(cfg);
339		let (available_data_req_receiver, cfg) =
340			IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
341		net_config.add_request_response_protocol(cfg);
342		let (pov_req_receiver, cfg) =
343			IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
344		net_config.add_request_response_protocol(cfg);
345		let (chunk_req_v1_receiver, cfg) =
346			IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
347		net_config.add_request_response_protocol(cfg);
348		let (chunk_req_v2_receiver, cfg) =
349			IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
350		net_config.add_request_response_protocol(cfg);
351
352		let grandpa_hard_forks = if config.chain_spec.is_kusama() {
353			grandpa_support::kusama_hard_forks()
354		} else {
355			Vec::new()
356		};
357
358		let warp_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new(
359			backend.clone(),
360			import_setup.1.shared_authority_set().clone(),
361			grandpa_hard_forks,
362		));
363
364		let ext_overseer_args = if is_parachain_node.is_running_alongside_parachain_node() {
365			None
366		} else {
367			let parachains_db = open_database(&config.database)?;
368			let candidate_validation_config = if role.is_authority() {
369				let (prep_worker_path, exec_worker_path) = workers::determine_workers_paths(
370					workers_path,
371					workers_names,
372					node_version.clone(),
373				)?;
374				log::info!("๐Ÿš€ Using prepare-worker binary at: {:?}", prep_worker_path);
375				log::info!("๐Ÿš€ Using execute-worker binary at: {:?}", exec_worker_path);
376
377				Some(CandidateValidationConfig {
378					artifacts_cache_path: config
379						.database
380						.path()
381						.ok_or(Error::DatabasePathRequired)?
382						.join("pvf-artifacts"),
383					node_version,
384					secure_validator_mode,
385					prep_worker_path,
386					exec_worker_path,
387					// Default execution workers is 4 because we have 8 cores on the reference
388					// hardware, and this accounts for 50% of that cpu capacity.
389					pvf_execute_workers_max_num: execute_workers_max_num.unwrap_or(4),
390					pvf_prepare_workers_soft_max_num: prepare_workers_soft_max_num.unwrap_or(1),
391					pvf_prepare_workers_hard_max_num: prepare_workers_hard_max_num.unwrap_or(2),
392				})
393			} else {
394				None
395			};
396			let (candidate_req_v2_receiver, cfg) =
397				IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
398			net_config.add_request_response_protocol(cfg);
399			let (dispute_req_receiver, cfg) =
400				IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
401			net_config.add_request_response_protocol(cfg);
402			let approval_voting_config = ApprovalVotingConfig {
403				col_approval_data: parachains_db::REAL_COLUMNS.col_approval_data,
404				slot_duration_millis: slot_duration.as_millis() as u64,
405			};
406			let dispute_coordinator_config = DisputeCoordinatorConfig {
407				col_dispute_data: parachains_db::REAL_COLUMNS.col_dispute_coordinator_data,
408			};
409			let chain_selection_config = ChainSelectionConfig {
410				col_data: parachains_db::REAL_COLUMNS.col_chain_selection_data,
411				stagnant_check_interval: Default::default(),
412				stagnant_check_mode: chain_selection_subsystem::StagnantCheckMode::PruneOnly,
413			};
414
415			// Kusama + testnets get a higher threshold, we are conservative on Polkadot for now.
416			let fetch_chunks_threshold =
417				if config.chain_spec.is_polkadot() { None } else { Some(FETCH_CHUNKS_THRESHOLD) };
418
419			let availability_config = AvailabilityConfig {
420				col_data: parachains_db::REAL_COLUMNS.col_availability_data,
421				col_meta: parachains_db::REAL_COLUMNS.col_availability_meta,
422				keep_finalized_for: if matches!(config.chain_spec.identify_chain(), Chain::Rococo) {
423					keep_finalized_for.unwrap_or(1)
424				} else {
425					KEEP_FINALIZED_FOR_LIVE_NETWORKS
426				},
427			};
428
429			Some(ExtendedOverseerGenArgs {
430				keystore: keystore_container.local_keystore(),
431				parachains_db,
432				candidate_validation_config,
433				availability_config,
434				pov_req_receiver,
435				chunk_req_v1_receiver,
436				chunk_req_v2_receiver,
437				candidate_req_v2_receiver,
438				approval_voting_config,
439				dispute_req_receiver,
440				dispute_coordinator_config,
441				chain_selection_config,
442				fetch_chunks_threshold,
443			})
444		};
445
446		let (network, system_rpc_tx, tx_handler_controller, sync_service) =
447			sc_service::build_network(sc_service::BuildNetworkParams {
448				config: &config,
449				net_config,
450				client: client.clone(),
451				transaction_pool: transaction_pool.clone(),
452				spawn_handle: task_manager.spawn_handle(),
453				import_queue,
454				block_announce_validator_builder: None,
455				warp_sync_config: Some(WarpSyncConfig::WithProvider(warp_sync)),
456				block_relay: None,
457				metrics,
458			})?;
459
460		if config.offchain_worker.enabled {
461			use futures::FutureExt;
462
463			task_manager.spawn_handle().spawn(
464				"offchain-workers-runner",
465				"offchain-work",
466				sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions {
467					runtime_api_provider: client.clone(),
468					keystore: Some(keystore_container.keystore()),
469					offchain_db: backend.offchain_storage(),
470					transaction_pool: Some(OffchainTransactionPoolFactory::new(
471						transaction_pool.clone(),
472					)),
473					network_provider: Arc::new(network.clone()),
474					is_validator: role.is_authority(),
475					enable_http_requests: false,
476					custom_extensions: move |_| vec![],
477				})?
478				.run(client.clone(), task_manager.spawn_handle())
479				.boxed(),
480			);
481		}
482
483		let network_config = config.network.clone();
484		let rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
485			config,
486			backend: backend.clone(),
487			client: client.clone(),
488			keystore: keystore_container.keystore(),
489			network: network.clone(),
490			sync_service: sync_service.clone(),
491			rpc_builder: Box::new(rpc_extensions_builder),
492			transaction_pool: transaction_pool.clone(),
493			task_manager: &mut task_manager,
494			system_rpc_tx,
495			tx_handler_controller,
496			telemetry: telemetry.as_mut(),
497		})?;
498
499		if let Some(hwbench) = hwbench {
500			sc_sysinfo::print_hwbench(&hwbench);
501			match SUBSTRATE_REFERENCE_HARDWARE.check_hardware(&hwbench, role.is_authority()) {
502				Err(err) if role.is_authority() => {
503					if err
504						.0
505						.iter()
506						.any(|failure| matches!(failure.metric, Metric::Blake2256Parallel { .. }))
507					{
508						log::warn!(
509						"โš ๏ธ  Starting January 2025 the hardware will fail the minimal physical CPU cores requirements {} for role 'Authority',\n\
510						    find out more when this will become mandatory at:\n\
511						    https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#reference-hardware",
512						err
513					);
514					}
515					if err
516						.0
517						.iter()
518						.any(|failure| !matches!(failure.metric, Metric::Blake2256Parallel { .. }))
519					{
520						log::warn!(
521						"โš ๏ธ  The hardware does not meet the minimal requirements {} for role 'Authority' find out more at:\n\
522						https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#reference-hardware",
523						err
524					);
525					}
526				},
527				_ => {},
528			}
529
530			if let Some(ref mut telemetry) = telemetry {
531				let telemetry_handle = telemetry.handle();
532				task_manager.spawn_handle().spawn(
533					"telemetry_hwbench",
534					None,
535					sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench),
536				);
537			}
538		}
539
540		let (block_import, link_half, babe_link, beefy_links) = import_setup;
541
542		let overseer_client = client.clone();
543		let spawner = task_manager.spawn_handle();
544
545		let authority_discovery_service =
546		// We need the authority discovery if this node is either a validator or running alongside a parachain node.
547		// Parachains node require the authority discovery for finding relay chain validators for sending
548		// their PoVs or recovering PoVs.
549		if role.is_authority() || is_parachain_node.is_running_alongside_parachain_node() {
550			use futures::StreamExt;
551			use sc_network::{Event, NetworkEventStream};
552
553			let authority_discovery_role = if role.is_authority() {
554				sc_authority_discovery::Role::PublishAndDiscover(keystore_container.keystore())
555			} else {
556				// don't publish our addresses when we're not an authority (collator, cumulus, ..)
557				sc_authority_discovery::Role::Discover
558			};
559			let dht_event_stream =
560				network.event_stream("authority-discovery").filter_map(|e| async move {
561					match e {
562						Event::Dht(e) => Some(e),
563						_ => None,
564					}
565				});
566			let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config(
567				sc_authority_discovery::WorkerConfig {
568					publish_non_global_ips: auth_disc_publish_non_global_ips,
569					public_addresses: auth_disc_public_addresses,
570					// Require that authority discovery records are signed.
571					strict_record_validation: true,
572					persisted_cache_directory: network_config.net_config_path,
573					..Default::default()
574				},
575				client.clone(),
576				Arc::new(network.clone()),
577				Box::pin(dht_event_stream),
578				authority_discovery_role,
579				prometheus_registry.clone(),
580				task_manager.spawn_handle(),
581			);
582
583			task_manager.spawn_handle().spawn(
584				"authority-discovery-worker",
585				Some("authority-discovery"),
586				Box::pin(worker.run()),
587			);
588			Some(service)
589		} else {
590			None
591		};
592
593		let runtime_client = Arc::new(DefaultSubsystemClient::new(
594			overseer_client.clone(),
595			OffchainTransactionPoolFactory::new(transaction_pool.clone()),
596		));
597
598		let overseer_handle = if let Some(authority_discovery_service) = authority_discovery_service
599		{
600			let (overseer, overseer_handle) = overseer_gen
601				.generate::<sc_service::SpawnTaskHandle, DefaultSubsystemClient<FullClient>>(
602					overseer_connector,
603					OverseerGenArgs {
604						runtime_client,
605						network_service: network.clone(),
606						sync_service: sync_service.clone(),
607						authority_discovery_service,
608						collation_req_v1_receiver,
609						collation_req_v2_receiver,
610						available_data_req_receiver,
611						registry: prometheus_registry.as_ref(),
612						spawner,
613						is_parachain_node,
614						overseer_message_channel_capacity_override,
615						req_protocol_names,
616						peerset_protocol_names,
617						notification_services,
618					},
619					ext_overseer_args,
620				)
621				.map_err(|e| {
622					gum::error!("Failed to init overseer: {}", e);
623					e
624				})?;
625			let handle = Handle::new(overseer_handle.clone());
626
627			{
628				let handle = handle.clone();
629				task_manager.spawn_essential_handle().spawn_blocking(
630					"overseer",
631					None,
632					Box::pin(async move {
633						use futures::{pin_mut, select, FutureExt};
634
635						let forward = polkadot_overseer::forward_events(overseer_client, handle);
636
637						let forward = forward.fuse();
638						let overseer_fut = overseer.run().fuse();
639
640						pin_mut!(overseer_fut);
641						pin_mut!(forward);
642
643						select! {
644							() = forward => (),
645							() = overseer_fut => (),
646							complete => (),
647						}
648					}),
649				);
650			}
651			Some(handle)
652		} else {
653			assert!(
654				!auth_or_collator,
655				"Precondition congruence (false) is guaranteed by manual checking. qed"
656			);
657			None
658		};
659
660		if role.is_authority() {
661			let proposer = sc_basic_authorship::ProposerFactory::new(
662				task_manager.spawn_handle(),
663				client.clone(),
664				transaction_pool.clone(),
665				prometheus_registry.as_ref(),
666				telemetry.as_ref().map(|x| x.handle()),
667			);
668
669			let client_clone = client.clone();
670			let overseer_handle =
671				overseer_handle.as_ref().ok_or(Error::AuthoritiesRequireRealOverseer)?.clone();
672			let slot_duration = babe_link.config().slot_duration();
673			let babe_config = sc_consensus_babe::BabeParams {
674				keystore: keystore_container.keystore(),
675				client: client.clone(),
676				select_chain,
677				block_import,
678				env: proposer,
679				sync_oracle: sync_service.clone(),
680				justification_sync_link: sync_service.clone(),
681				create_inherent_data_providers: move |parent, ()| {
682					let client_clone = client_clone.clone();
683					let overseer_handle = overseer_handle.clone();
684
685					async move {
686						let parachain =
687						polkadot_node_core_parachains_inherent::ParachainsInherentDataProvider::new(
688							client_clone,
689							overseer_handle,
690							parent,
691						);
692
693						let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
694
695						let slot =
696						sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
697							*timestamp,
698							slot_duration,
699						);
700
701						Ok((slot, timestamp, parachain))
702					}
703				},
704				force_authoring,
705				backoff_authoring_blocks,
706				babe_link,
707				block_proposal_slot_portion: sc_consensus_babe::SlotProportion::new(2f32 / 3f32),
708				max_block_proposal_slot_portion: None,
709				telemetry: telemetry.as_ref().map(|x| x.handle()),
710			};
711
712			let babe = sc_consensus_babe::start_babe(babe_config)?;
713			task_manager.spawn_essential_handle().spawn_blocking("babe", None, babe);
714		}
715
716		// if the node isn't actively participating in consensus then it doesn't
717		// need a keystore, regardless of which protocol we use below.
718		let keystore_opt =
719			if role.is_authority() { Some(keystore_container.keystore()) } else { None };
720
721		// beefy is enabled if its notification service exists
722		if let Some(notification_service) = beefy_notification_service {
723			let justifications_protocol_name =
724				beefy_on_demand_justifications_handler.protocol_name();
725			let network_params = sc_consensus_beefy::BeefyNetworkParams {
726				network: Arc::new(network.clone()),
727				sync: sync_service.clone(),
728				gossip_protocol_name: beefy_gossip_proto_name,
729				justifications_protocol_name,
730				notification_service,
731				_phantom: core::marker::PhantomData::<Block>,
732			};
733			let payload_provider = sp_consensus_beefy::mmr::MmrRootProvider::new(client.clone());
734			let beefy_params = sc_consensus_beefy::BeefyParams {
735				client: client.clone(),
736				backend: backend.clone(),
737				payload_provider,
738				runtime: client.clone(),
739				key_store: keystore_opt.clone(),
740				network_params,
741				min_block_delta: 8,
742				prometheus_registry: prometheus_registry.clone(),
743				links: beefy_links,
744				on_demand_justifications_handler: beefy_on_demand_justifications_handler,
745				is_authority: role.is_authority(),
746			};
747
748			let gadget = sc_consensus_beefy::start_beefy_gadget::<
749				_,
750				_,
751				_,
752				_,
753				_,
754				_,
755				_,
756				ecdsa_crypto::AuthorityId,
757			>(beefy_params);
758
759			// BEEFY is part of consensus, if it fails we'll bring the node down with it to make
760			// sure it is noticed.
761			task_manager
762				.spawn_essential_handle()
763				.spawn_blocking("beefy-gadget", None, gadget);
764		}
765		// When offchain indexing is enabled, MMR gadget should also run.
766		if is_offchain_indexing_enabled {
767			task_manager.spawn_essential_handle().spawn_blocking(
768				"mmr-gadget",
769				None,
770				MmrGadget::start(
771					client.clone(),
772					backend.clone(),
773					sp_mmr_primitives::INDEXING_PREFIX.to_vec(),
774				),
775			);
776		}
777
778		let config = sc_consensus_grandpa::Config {
779			// FIXME substrate#1578 make this available through chainspec
780			// Grandpa performance can be improved a bit by tuning this parameter, see:
781			// https://github.com/paritytech/polkadot/issues/5464
782			gossip_duration: Duration::from_millis(1000),
783			justification_generation_period: GRANDPA_JUSTIFICATION_PERIOD,
784			name: Some(name),
785			observer_enabled: false,
786			keystore: keystore_opt,
787			local_role: role,
788			telemetry: telemetry.as_ref().map(|x| x.handle()),
789			protocol_name: grandpa_protocol_name,
790		};
791
792		let enable_grandpa = !disable_grandpa;
793		if enable_grandpa {
794			// start the full GRANDPA voter
795			// NOTE: unlike in substrate we are currently running the full
796			// GRANDPA voter protocol for all full nodes (regardless of whether
797			// they're validators or not). at this point the full voter should
798			// provide better guarantees of block and vote data availability than
799			// the observer.
800
801			let mut voting_rules_builder = sc_consensus_grandpa::VotingRulesBuilder::default();
802
803			#[cfg(not(feature = "malus"))]
804			let _malus_finality_delay = None;
805
806			if let Some(delay) = _malus_finality_delay {
807				info!(?delay, "Enabling malus finality delay",);
808				voting_rules_builder =
809					voting_rules_builder.add(sc_consensus_grandpa::BeforeBestBlockBy(delay));
810			};
811
812			let grandpa_config = sc_consensus_grandpa::GrandpaParams {
813				config,
814				link: link_half,
815				network: network.clone(),
816				sync: sync_service.clone(),
817				voting_rule: voting_rules_builder.build(),
818				prometheus_registry: prometheus_registry.clone(),
819				shared_voter_state,
820				telemetry: telemetry.as_ref().map(|x| x.handle()),
821				notification_service: grandpa_notification_service,
822				offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(
823					transaction_pool.clone(),
824				),
825			};
826
827			task_manager.spawn_essential_handle().spawn_blocking(
828				"grandpa-voter",
829				None,
830				sc_consensus_grandpa::run_grandpa_voter(grandpa_config)?,
831			);
832		}
833
834		Ok(NewFull {
835			task_manager,
836			client,
837			overseer_handle,
838			network,
839			sync_service,
840			rpc_handlers,
841			backend,
842		})
843	}
844}
845
846/// Create a new full node of arbitrary runtime and executor.
847///
848/// This is an advanced feature and not recommended for general use. Generally, `build_full` is
849/// a better choice.
850///
851/// `workers_path` is used to get the path to the directory where auxiliary worker binaries reside.
852/// If not specified, the main binary's directory is searched first, then `/usr/lib/polkadot` is
853/// searched. If the path points to an executable rather then directory, that executable is used
854/// both as preparation and execution worker (supposed to be used for tests only).
855pub fn new_full<
856	OverseerGenerator: OverseerGen,
857	Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Hash>,
858>(
859	config: Configuration,
860	params: NewFullParams<OverseerGenerator>,
861) -> Result<NewFull, Error> {
862	PolkadotServiceBuilder::<OverseerGenerator, Network>::new(config, params)?.build()
863}