referrerpolicy=no-referrer-when-downgrade

polkadot_service/builder/
mod.rs

1// Copyright (C) Parity Technologies (UK) Ltd.
2// This file is part of Polkadot.
3
4// Polkadot is free software: you can redistribute it and/or modify
5// it under the terms of the GNU General Public License as published by
6// the Free Software Foundation, either version 3 of the License, or
7// (at your option) any later version.
8
9// Polkadot is distributed in the hope that it will be useful,
10// but WITHOUT ANY WARRANTY; without even the implied warranty of
11// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12// GNU General Public License for more details.
13
14// You should have received a copy of the GNU General Public License
15// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
16
17//! Polkadot service builder.
18
19#![cfg(feature = "full-node")]
20
21mod partial;
22use partial::PolkadotPartialComponents;
23pub(crate) use partial::{new_partial, new_partial_basics};
24
25use crate::{
26	grandpa_support, open_database,
27	overseer::{ExtendedOverseerGenArgs, OverseerGen, OverseerGenArgs},
28	parachains_db,
29	relay_chain_selection::SelectRelayChain,
30	workers, Chain, Error, FullBackend, FullClient, IdentifyVariant, IsParachainNode,
31	GRANDPA_JUSTIFICATION_PERIOD, KEEP_FINALIZED_FOR_LIVE_NETWORKS,
32};
33use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE;
34use gum::info;
35use mmr_gadget::MmrGadget;
36use polkadot_availability_recovery::FETCH_CHUNKS_THRESHOLD;
37use polkadot_node_core_approval_voting::Config as ApprovalVotingConfig;
38use polkadot_node_core_av_store::Config as AvailabilityConfig;
39use polkadot_node_core_candidate_validation::Config as CandidateValidationConfig;
40use polkadot_node_core_chain_selection::{
41	self as chain_selection_subsystem, Config as ChainSelectionConfig,
42};
43use polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig;
44use polkadot_node_network_protocol::{
45	peer_set::{PeerSet, PeerSetProtocolNames},
46	request_response::{IncomingRequest, ReqProtocolNames},
47};
48use polkadot_node_subsystem_types::DefaultSubsystemClient;
49use polkadot_overseer::{Handle, OverseerConnector};
50use polkadot_primitives::Block;
51use sc_client_api::Backend;
52use sc_network::config::FullNetworkConfiguration;
53use sc_network_sync::WarpSyncConfig;
54use sc_service::{Configuration, RpcHandlers, TaskManager};
55use sc_sysinfo::Metric;
56use sc_telemetry::TelemetryWorkerHandle;
57use sc_transaction_pool_api::OffchainTransactionPoolFactory;
58use sp_consensus_beefy::ecdsa_crypto;
59use sp_runtime::traits::Block as BlockT;
60use std::{
61	collections::{HashMap, HashSet},
62	sync::Arc,
63	time::Duration,
64};
65
66/// Polkadot node service initialization parameters.
67pub struct NewFullParams<OverseerGenerator: OverseerGen> {
68	pub is_parachain_node: IsParachainNode,
69	pub enable_beefy: bool,
70	/// Whether to enable the block authoring backoff on production networks
71	/// where it isn't enabled by default.
72	pub force_authoring_backoff: bool,
73	pub telemetry_worker_handle: Option<TelemetryWorkerHandle>,
74	/// The version of the node. TESTING ONLY: `None` can be passed to skip the node/worker version
75	/// check, both on startup and in the workers.
76	pub node_version: Option<String>,
77	/// Whether the node is attempting to run as a secure validator.
78	pub secure_validator_mode: bool,
79	/// An optional path to a directory containing the workers.
80	pub workers_path: Option<std::path::PathBuf>,
81	/// Optional custom names for the prepare and execute workers.
82	pub workers_names: Option<(String, String)>,
83	/// An optional number of the maximum number of pvf execute workers.
84	pub execute_workers_max_num: Option<usize>,
85	/// An optional maximum number of pvf workers that can be spawned in the pvf prepare pool for
86	/// tasks with the priority below critical.
87	pub prepare_workers_soft_max_num: Option<usize>,
88	/// An optional absolute number of pvf workers that can be spawned in the pvf prepare pool.
89	pub prepare_workers_hard_max_num: Option<usize>,
90	/// How long finalized data should be kept in the availability store (in hours)
91	pub keep_finalized_for: Option<u32>,
92	pub overseer_gen: OverseerGenerator,
93	pub overseer_message_channel_capacity_override: Option<usize>,
94	#[allow(dead_code)]
95	pub malus_finality_delay: Option<u32>,
96	pub hwbench: Option<sc_sysinfo::HwBench>,
97	/// Set of invulnerable AH collator `PeerId`s
98	pub invulnerable_ah_collators: HashSet<polkadot_node_network_protocol::PeerId>,
99	/// Override for `HOLD_OFF_DURATION` constant .
100	pub collator_protocol_hold_off: Option<Duration>,
101}
102
103/// Completely built polkadot node service.
104pub struct NewFull {
105	pub task_manager: TaskManager,
106	pub client: Arc<FullClient>,
107	pub overseer_handle: Option<Handle>,
108	pub network: Arc<dyn sc_network::service::traits::NetworkService>,
109	pub sync_service: Arc<sc_network_sync::SyncingService<Block>>,
110	pub rpc_handlers: RpcHandlers,
111	pub backend: Arc<FullBackend>,
112}
113
114pub struct PolkadotServiceBuilder<OverseerGenerator, Network>
115where
116	OverseerGenerator: OverseerGen,
117	Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Hash>,
118{
119	config: Configuration,
120	params: NewFullParams<OverseerGenerator>,
121	overseer_connector: OverseerConnector,
122	partial_components: PolkadotPartialComponents<SelectRelayChain<FullBackend>>,
123	net_config: FullNetworkConfiguration<Block, <Block as BlockT>::Hash, Network>,
124}
125
126impl<OverseerGenerator, Network> PolkadotServiceBuilder<OverseerGenerator, Network>
127where
128	OverseerGenerator: OverseerGen,
129	Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Hash>,
130{
131	/// Create new polkadot service builder.
132	pub fn new(
133		mut config: Configuration,
134		params: NewFullParams<OverseerGenerator>,
135	) -> Result<PolkadotServiceBuilder<OverseerGenerator, Network>, Error> {
136		let basics = new_partial_basics(&mut config, params.telemetry_worker_handle.clone())?;
137
138		let prometheus_registry = config.prometheus_registry().cloned();
139		let overseer_connector = OverseerConnector::default();
140		let overseer_handle = Handle::new(overseer_connector.handle());
141		let auth_or_collator = config.role.is_authority() || params.is_parachain_node.is_collator();
142
143		let select_chain = if auth_or_collator {
144			let metrics = polkadot_node_subsystem_util::metrics::Metrics::register(
145				prometheus_registry.as_ref(),
146			)?;
147
148			SelectRelayChain::new_with_overseer(
149				basics.backend.clone(),
150				overseer_handle.clone(),
151				metrics,
152				Some(basics.task_manager.spawn_handle()),
153			)
154		} else {
155			SelectRelayChain::new_longest_chain(basics.backend.clone())
156		};
157
158		let partial_components =
159			new_partial::<SelectRelayChain<_>>(&mut config, basics, select_chain)?;
160
161		let net_config = sc_network::config::FullNetworkConfiguration::<_, _, Network>::new(
162			&config.network,
163			config.prometheus_config.as_ref().map(|cfg| cfg.registry.clone()),
164		);
165
166		Ok(PolkadotServiceBuilder {
167			config,
168			params,
169			overseer_connector,
170			partial_components,
171			net_config,
172		})
173	}
174
175	/// Get the genesis hash of the polkadot service being built.
176	pub fn genesis_hash(&self) -> <Block as BlockT>::Hash {
177		self.partial_components.client.chain_info().genesis_hash
178	}
179
180	/// Add extra request-response protocol to the polkadot service.
181	pub fn add_extra_request_response_protocol(
182		&mut self,
183		config: Network::RequestResponseProtocolConfig,
184	) {
185		self.net_config.add_request_response_protocol(config);
186	}
187
188	/// Build polkadot service.
189	pub fn build(self) -> Result<NewFull, Error> {
190		let Self {
191			config,
192			params:
193				NewFullParams {
194					is_parachain_node,
195					enable_beefy,
196					force_authoring_backoff,
197					telemetry_worker_handle: _,
198					node_version,
199					secure_validator_mode,
200					workers_path,
201					workers_names,
202					overseer_gen,
203					overseer_message_channel_capacity_override,
204					malus_finality_delay: _malus_finality_delay,
205					hwbench,
206					execute_workers_max_num,
207					prepare_workers_soft_max_num,
208					prepare_workers_hard_max_num,
209					keep_finalized_for,
210					invulnerable_ah_collators,
211					collator_protocol_hold_off,
212				},
213			overseer_connector,
214			partial_components:
215				sc_service::PartialComponents::<_, _, SelectRelayChain<_>, _, _, _> {
216					client,
217					backend,
218					mut task_manager,
219					keystore_container,
220					select_chain,
221					import_queue,
222					transaction_pool,
223					other:
224						(rpc_extensions_builder, import_setup, rpc_setup, slot_duration, mut telemetry),
225				},
226			mut net_config,
227		} = self;
228
229		let role = config.role;
230		let auth_or_collator = config.role.is_authority() || is_parachain_node.is_collator();
231		let is_offchain_indexing_enabled = config.offchain_worker.indexing_enabled;
232		let force_authoring = config.force_authoring;
233		let disable_grandpa = config.disable_grandpa;
234		let name = config.network.node_name.clone();
235		let backoff_authoring_blocks = if !force_authoring_backoff &&
236			(config.chain_spec.is_polkadot() || config.chain_spec.is_kusama())
237		{
238			// the block authoring backoff is disabled by default on production networks
239			None
240		} else {
241			let mut backoff = sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default();
242
243			if config.chain_spec.is_rococo() ||
244				config.chain_spec.is_versi() ||
245				config.chain_spec.is_dev()
246			{
247				// on testnets that are in flux (like rococo or versi), finality has stalled
248				// sometimes due to operational issues and it's annoying to slow down block
249				// production to 1 block per hour.
250				backoff.max_interval = 10;
251			}
252
253			Some(backoff)
254		};
255		let shared_voter_state = rpc_setup;
256		let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht;
257		let auth_disc_public_addresses = config.network.public_addresses.clone();
258
259		let genesis_hash = client.chain_info().genesis_hash;
260		let peer_store_handle = net_config.peer_store_handle();
261
262		let prometheus_registry = config.prometheus_registry().cloned();
263		let metrics = Network::register_notification_metrics(
264			config.prometheus_config.as_ref().map(|cfg| &cfg.registry),
265		);
266
267		// Note: GrandPa is pushed before the Polkadot-specific protocols. This doesn't change
268		// anything in terms of behaviour, but makes the logs more consistent with the other
269		// Substrate nodes.
270		let grandpa_protocol_name =
271			sc_consensus_grandpa::protocol_standard_name(&genesis_hash, &config.chain_spec);
272		let (grandpa_protocol_config, grandpa_notification_service) =
273			sc_consensus_grandpa::grandpa_peers_set_config::<_, Network>(
274				grandpa_protocol_name.clone(),
275				metrics.clone(),
276				Arc::clone(&peer_store_handle),
277			);
278		net_config.add_notification_protocol(grandpa_protocol_config);
279
280		let beefy_gossip_proto_name =
281			sc_consensus_beefy::gossip_protocol_name(&genesis_hash, config.chain_spec.fork_id());
282		// `beefy_on_demand_justifications_handler` is given to `beefy-gadget` task to be run,
283		// while `beefy_req_resp_cfg` is added to `config.network.request_response_protocols`.
284		let (beefy_on_demand_justifications_handler, beefy_req_resp_cfg) =
285			sc_consensus_beefy::communication::request_response::BeefyJustifsRequestHandler::new::<
286				_,
287				Network,
288			>(
289				&genesis_hash,
290				config.chain_spec.fork_id(),
291				client.clone(),
292				prometheus_registry.clone(),
293			);
294		let beefy_notification_service = match enable_beefy {
295			false => None,
296			true => {
297				let (beefy_notification_config, beefy_notification_service) =
298					sc_consensus_beefy::communication::beefy_peers_set_config::<_, Network>(
299						beefy_gossip_proto_name.clone(),
300						metrics.clone(),
301						Arc::clone(&peer_store_handle),
302					);
303
304				net_config.add_notification_protocol(beefy_notification_config);
305				net_config.add_request_response_protocol(beefy_req_resp_cfg);
306				Some(beefy_notification_service)
307			},
308		};
309
310		// validation/collation protocols are enabled only if `Overseer` is enabled
311		let peerset_protocol_names =
312			PeerSetProtocolNames::new(genesis_hash, config.chain_spec.fork_id());
313
314		// If this is a validator or running alongside a parachain node, we need to enable the
315		// networking protocols.
316		//
317		// Collators and parachain full nodes require the collator and validator networking to send
318		// collations and to be able to recover PoVs.
319		let notification_services = if role.is_authority() ||
320			is_parachain_node.is_running_alongside_parachain_node()
321		{
322			use polkadot_network_bridge::{peer_sets_info, IsAuthority};
323			let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No };
324
325			peer_sets_info::<_, Network>(
326				is_authority,
327				&peerset_protocol_names,
328				metrics.clone(),
329				Arc::clone(&peer_store_handle),
330			)
331			.into_iter()
332			.map(|(config, (peerset, service))| {
333				net_config.add_notification_protocol(config);
334				(peerset, service)
335			})
336			.collect::<HashMap<PeerSet, Box<dyn sc_network::NotificationService>>>()
337		} else {
338			std::collections::HashMap::new()
339		};
340
341		let req_protocol_names = ReqProtocolNames::new(&genesis_hash, config.chain_spec.fork_id());
342
343		let (collation_req_v1_receiver, cfg) =
344			IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
345		net_config.add_request_response_protocol(cfg);
346		let (collation_req_v2_receiver, cfg) =
347			IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
348		net_config.add_request_response_protocol(cfg);
349		let (available_data_req_receiver, cfg) =
350			IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
351		net_config.add_request_response_protocol(cfg);
352		let (pov_req_receiver, cfg) =
353			IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
354		net_config.add_request_response_protocol(cfg);
355		let (chunk_req_v1_receiver, cfg) =
356			IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
357		net_config.add_request_response_protocol(cfg);
358		let (chunk_req_v2_receiver, cfg) =
359			IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
360		net_config.add_request_response_protocol(cfg);
361
362		let grandpa_hard_forks = if config.chain_spec.is_kusama() {
363			grandpa_support::kusama_hard_forks()
364		} else {
365			Vec::new()
366		};
367
368		let warp_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new(
369			backend.clone(),
370			import_setup.1.shared_authority_set().clone(),
371			grandpa_hard_forks,
372		));
373
374		let ext_overseer_args = if is_parachain_node.is_running_alongside_parachain_node() {
375			None
376		} else {
377			let parachains_db = open_database(&config.database)?;
378			let candidate_validation_config = if role.is_authority() {
379				let (prep_worker_path, exec_worker_path) = workers::determine_workers_paths(
380					workers_path,
381					workers_names,
382					node_version.clone(),
383				)?;
384				log::info!("๐Ÿš€ Using prepare-worker binary at: {:?}", prep_worker_path);
385				log::info!("๐Ÿš€ Using execute-worker binary at: {:?}", exec_worker_path);
386
387				Some(CandidateValidationConfig {
388					artifacts_cache_path: config
389						.database
390						.path()
391						.ok_or(Error::DatabasePathRequired)?
392						.join("pvf-artifacts"),
393					node_version,
394					secure_validator_mode,
395					prep_worker_path,
396					exec_worker_path,
397					// Default execution workers is 4 because we have 8 cores on the reference
398					// hardware, and this accounts for 50% of that cpu capacity.
399					pvf_execute_workers_max_num: execute_workers_max_num.unwrap_or(4),
400					pvf_prepare_workers_soft_max_num: prepare_workers_soft_max_num.unwrap_or(1),
401					pvf_prepare_workers_hard_max_num: prepare_workers_hard_max_num.unwrap_or(2),
402				})
403			} else {
404				None
405			};
406			let (candidate_req_v2_receiver, cfg) =
407				IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
408			net_config.add_request_response_protocol(cfg);
409			let (dispute_req_receiver, cfg) =
410				IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
411			net_config.add_request_response_protocol(cfg);
412			let approval_voting_config = ApprovalVotingConfig {
413				col_approval_data: parachains_db::REAL_COLUMNS.col_approval_data,
414				slot_duration_millis: slot_duration.as_millis() as u64,
415			};
416			let dispute_coordinator_config = DisputeCoordinatorConfig {
417				col_dispute_data: parachains_db::REAL_COLUMNS.col_dispute_coordinator_data,
418			};
419			let chain_selection_config = ChainSelectionConfig {
420				col_data: parachains_db::REAL_COLUMNS.col_chain_selection_data,
421				stagnant_check_interval: Default::default(),
422				stagnant_check_mode: chain_selection_subsystem::StagnantCheckMode::PruneOnly,
423			};
424
425			// Kusama + testnets get a higher threshold, we are conservative on Polkadot for now.
426			let fetch_chunks_threshold =
427				if config.chain_spec.is_polkadot() { None } else { Some(FETCH_CHUNKS_THRESHOLD) };
428
429			let availability_config = AvailabilityConfig {
430				col_data: parachains_db::REAL_COLUMNS.col_availability_data,
431				col_meta: parachains_db::REAL_COLUMNS.col_availability_meta,
432				keep_finalized_for: if matches!(config.chain_spec.identify_chain(), Chain::Rococo) {
433					keep_finalized_for.unwrap_or(1)
434				} else {
435					KEEP_FINALIZED_FOR_LIVE_NETWORKS
436				},
437			};
438
439			Some(ExtendedOverseerGenArgs {
440				keystore: keystore_container.local_keystore(),
441				parachains_db,
442				candidate_validation_config,
443				availability_config,
444				pov_req_receiver,
445				chunk_req_v1_receiver,
446				chunk_req_v2_receiver,
447				candidate_req_v2_receiver,
448				approval_voting_config,
449				dispute_req_receiver,
450				dispute_coordinator_config,
451				chain_selection_config,
452				fetch_chunks_threshold,
453				invulnerable_ah_collators,
454				collator_protocol_hold_off,
455			})
456		};
457
458		let (network, system_rpc_tx, tx_handler_controller, sync_service) =
459			sc_service::build_network(sc_service::BuildNetworkParams {
460				config: &config,
461				net_config,
462				client: client.clone(),
463				transaction_pool: transaction_pool.clone(),
464				spawn_handle: task_manager.spawn_handle(),
465				import_queue,
466				block_announce_validator_builder: None,
467				warp_sync_config: Some(WarpSyncConfig::WithProvider(warp_sync)),
468				block_relay: None,
469				metrics,
470			})?;
471
472		if config.offchain_worker.enabled {
473			use futures::FutureExt;
474
475			task_manager.spawn_handle().spawn(
476				"offchain-workers-runner",
477				"offchain-work",
478				sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions {
479					runtime_api_provider: client.clone(),
480					keystore: Some(keystore_container.keystore()),
481					offchain_db: backend.offchain_storage(),
482					transaction_pool: Some(OffchainTransactionPoolFactory::new(
483						transaction_pool.clone(),
484					)),
485					network_provider: Arc::new(network.clone()),
486					is_validator: role.is_authority(),
487					enable_http_requests: false,
488					custom_extensions: move |_| vec![],
489				})?
490				.run(client.clone(), task_manager.spawn_handle())
491				.boxed(),
492			);
493		}
494
495		let network_config = config.network.clone();
496		let rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
497			config,
498			backend: backend.clone(),
499			client: client.clone(),
500			keystore: keystore_container.keystore(),
501			network: network.clone(),
502			sync_service: sync_service.clone(),
503			rpc_builder: Box::new(rpc_extensions_builder),
504			transaction_pool: transaction_pool.clone(),
505			task_manager: &mut task_manager,
506			system_rpc_tx,
507			tx_handler_controller,
508			telemetry: telemetry.as_mut(),
509			tracing_execute_block: None,
510		})?;
511
512		if let Some(hwbench) = hwbench {
513			sc_sysinfo::print_hwbench(&hwbench);
514			match SUBSTRATE_REFERENCE_HARDWARE.check_hardware(&hwbench, role.is_authority()) {
515				Err(err) if role.is_authority() => {
516					if err
517						.0
518						.iter()
519						.any(|failure| matches!(failure.metric, Metric::Blake2256Parallel { .. }))
520					{
521						log::warn!(
522						"โš ๏ธ  Starting January 2025 the hardware will fail the minimal physical CPU cores requirements {} for role 'Authority',\n\
523						    find out more when this will become mandatory at:\n\
524						    https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#reference-hardware",
525						err
526					);
527					}
528					if err
529						.0
530						.iter()
531						.any(|failure| !matches!(failure.metric, Metric::Blake2256Parallel { .. }))
532					{
533						log::warn!(
534						"โš ๏ธ  The hardware does not meet the minimal requirements {} for role 'Authority' find out more at:\n\
535						https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#reference-hardware",
536						err
537					);
538					}
539				},
540				_ => {},
541			}
542
543			if let Some(ref mut telemetry) = telemetry {
544				let telemetry_handle = telemetry.handle();
545				task_manager.spawn_handle().spawn(
546					"telemetry_hwbench",
547					None,
548					sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench),
549				);
550			}
551		}
552
553		let (block_import, link_half, babe_link, beefy_links) = import_setup;
554
555		let overseer_client = client.clone();
556		let spawner = task_manager.spawn_handle();
557
558		let authority_discovery_service =
559		// We need the authority discovery if this node is either a validator or running alongside a parachain node.
560		// Parachains node require the authority discovery for finding relay chain validators for sending
561		// their PoVs or recovering PoVs.
562		if role.is_authority() || is_parachain_node.is_running_alongside_parachain_node() {
563			use futures::StreamExt;
564			use sc_network::{Event, NetworkEventStream};
565
566			let authority_discovery_role = if role.is_authority() {
567				sc_authority_discovery::Role::PublishAndDiscover(keystore_container.keystore())
568			} else {
569				// don't publish our addresses when we're not an authority (collator, cumulus, ..)
570				sc_authority_discovery::Role::Discover
571			};
572			let dht_event_stream =
573				network.event_stream("authority-discovery").filter_map(|e| async move {
574					match e {
575						Event::Dht(e) => Some(e),
576						_ => None,
577					}
578				});
579			let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config(
580				sc_authority_discovery::WorkerConfig {
581					publish_non_global_ips: auth_disc_publish_non_global_ips,
582					public_addresses: auth_disc_public_addresses,
583					// Require that authority discovery records are signed.
584					strict_record_validation: true,
585					persisted_cache_directory: network_config.net_config_path,
586					..Default::default()
587				},
588				client.clone(),
589				Arc::new(network.clone()),
590				Box::pin(dht_event_stream),
591				authority_discovery_role,
592				prometheus_registry.clone(),
593				task_manager.spawn_handle(),
594			);
595
596			task_manager.spawn_handle().spawn(
597				"authority-discovery-worker",
598				Some("authority-discovery"),
599				Box::pin(worker.run()),
600			);
601			Some(service)
602		} else {
603			None
604		};
605
606		let runtime_client = Arc::new(DefaultSubsystemClient::new(
607			overseer_client.clone(),
608			OffchainTransactionPoolFactory::new(transaction_pool.clone()),
609		));
610
611		let overseer_handle = if let Some(authority_discovery_service) = authority_discovery_service
612		{
613			let (overseer, overseer_handle) = overseer_gen
614				.generate::<sc_service::SpawnTaskHandle, DefaultSubsystemClient<FullClient>>(
615					overseer_connector,
616					OverseerGenArgs {
617						runtime_client,
618						network_service: network.clone(),
619						sync_service: sync_service.clone(),
620						authority_discovery_service,
621						collation_req_v1_receiver,
622						collation_req_v2_receiver,
623						available_data_req_receiver,
624						registry: prometheus_registry.as_ref(),
625						spawner,
626						is_parachain_node,
627						overseer_message_channel_capacity_override,
628						req_protocol_names,
629						peerset_protocol_names,
630						notification_services,
631					},
632					ext_overseer_args,
633				)
634				.map_err(|e| {
635					gum::error!("Failed to init overseer: {}", e);
636					e
637				})?;
638			let handle = Handle::new(overseer_handle.clone());
639
640			{
641				let handle = handle.clone();
642				task_manager.spawn_essential_handle().spawn_blocking(
643					"overseer",
644					None,
645					Box::pin(async move {
646						use futures::{pin_mut, select, FutureExt};
647
648						let forward = polkadot_overseer::forward_events(overseer_client, handle);
649
650						let forward = forward.fuse();
651						let overseer_fut = overseer.run().fuse();
652
653						pin_mut!(overseer_fut);
654						pin_mut!(forward);
655
656						select! {
657							() = forward => (),
658							() = overseer_fut => (),
659							complete => (),
660						}
661					}),
662				);
663			}
664			Some(handle)
665		} else {
666			assert!(
667				!auth_or_collator,
668				"Precondition congruence (false) is guaranteed by manual checking. qed"
669			);
670			None
671		};
672
673		if role.is_authority() {
674			let proposer = sc_basic_authorship::ProposerFactory::new(
675				task_manager.spawn_handle(),
676				client.clone(),
677				transaction_pool.clone(),
678				prometheus_registry.as_ref(),
679				telemetry.as_ref().map(|x| x.handle()),
680			);
681
682			let client_clone = client.clone();
683			let overseer_handle =
684				overseer_handle.as_ref().ok_or(Error::AuthoritiesRequireRealOverseer)?.clone();
685			let slot_duration = babe_link.config().slot_duration();
686			let babe_config = sc_consensus_babe::BabeParams {
687				keystore: keystore_container.keystore(),
688				client: client.clone(),
689				select_chain,
690				block_import,
691				env: proposer,
692				sync_oracle: sync_service.clone(),
693				justification_sync_link: sync_service.clone(),
694				create_inherent_data_providers: move |parent, ()| {
695					let client_clone = client_clone.clone();
696					let overseer_handle = overseer_handle.clone();
697
698					async move {
699						let parachain =
700						polkadot_node_core_parachains_inherent::ParachainsInherentDataProvider::new(
701							client_clone,
702							overseer_handle,
703							parent,
704						);
705
706						let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
707
708						let slot =
709						sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
710							*timestamp,
711							slot_duration,
712						);
713
714						Ok((slot, timestamp, parachain))
715					}
716				},
717				force_authoring,
718				backoff_authoring_blocks,
719				babe_link,
720				block_proposal_slot_portion: sc_consensus_babe::SlotProportion::new(2f32 / 3f32),
721				max_block_proposal_slot_portion: None,
722				telemetry: telemetry.as_ref().map(|x| x.handle()),
723			};
724
725			let babe = sc_consensus_babe::start_babe(babe_config)?;
726			task_manager.spawn_essential_handle().spawn_blocking("babe", None, babe);
727		}
728
729		// if the node isn't actively participating in consensus then it doesn't
730		// need a keystore, regardless of which protocol we use below.
731		let keystore_opt =
732			if role.is_authority() { Some(keystore_container.keystore()) } else { None };
733
734		// beefy is enabled if its notification service exists
735		if let Some(notification_service) = beefy_notification_service {
736			let justifications_protocol_name =
737				beefy_on_demand_justifications_handler.protocol_name();
738			let network_params = sc_consensus_beefy::BeefyNetworkParams {
739				network: Arc::new(network.clone()),
740				sync: sync_service.clone(),
741				gossip_protocol_name: beefy_gossip_proto_name,
742				justifications_protocol_name,
743				notification_service,
744				_phantom: core::marker::PhantomData::<Block>,
745			};
746			let payload_provider = sp_consensus_beefy::mmr::MmrRootProvider::new(client.clone());
747			let beefy_params = sc_consensus_beefy::BeefyParams {
748				client: client.clone(),
749				backend: backend.clone(),
750				payload_provider,
751				runtime: client.clone(),
752				key_store: keystore_opt.clone(),
753				network_params,
754				min_block_delta: 8,
755				prometheus_registry: prometheus_registry.clone(),
756				links: beefy_links,
757				on_demand_justifications_handler: beefy_on_demand_justifications_handler,
758				is_authority: role.is_authority(),
759			};
760
761			let gadget = sc_consensus_beefy::start_beefy_gadget::<
762				_,
763				_,
764				_,
765				_,
766				_,
767				_,
768				_,
769				ecdsa_crypto::AuthorityId,
770			>(beefy_params);
771
772			// BEEFY is part of consensus, if it fails we'll bring the node down with it to make
773			// sure it is noticed.
774			task_manager
775				.spawn_essential_handle()
776				.spawn_blocking("beefy-gadget", None, gadget);
777		}
778		// When offchain indexing is enabled, MMR gadget should also run.
779		if is_offchain_indexing_enabled {
780			task_manager.spawn_essential_handle().spawn_blocking(
781				"mmr-gadget",
782				None,
783				MmrGadget::start(
784					client.clone(),
785					backend.clone(),
786					sp_mmr_primitives::INDEXING_PREFIX.to_vec(),
787				),
788			);
789		}
790
791		let config = sc_consensus_grandpa::Config {
792			// FIXME substrate#1578 make this available through chainspec
793			// Grandpa performance can be improved a bit by tuning this parameter, see:
794			// https://github.com/paritytech/polkadot/issues/5464
795			gossip_duration: Duration::from_millis(1000),
796			justification_generation_period: GRANDPA_JUSTIFICATION_PERIOD,
797			name: Some(name),
798			observer_enabled: false,
799			keystore: keystore_opt,
800			local_role: role,
801			telemetry: telemetry.as_ref().map(|x| x.handle()),
802			protocol_name: grandpa_protocol_name,
803		};
804
805		let enable_grandpa = !disable_grandpa;
806		if enable_grandpa {
807			// start the full GRANDPA voter
808			// NOTE: unlike in substrate we are currently running the full
809			// GRANDPA voter protocol for all full nodes (regardless of whether
810			// they're validators or not). at this point the full voter should
811			// provide better guarantees of block and vote data availability than
812			// the observer.
813
814			let mut voting_rules_builder = sc_consensus_grandpa::VotingRulesBuilder::default();
815
816			#[cfg(not(feature = "malus"))]
817			let _malus_finality_delay = None;
818
819			if let Some(delay) = _malus_finality_delay {
820				info!(?delay, "Enabling malus finality delay",);
821				voting_rules_builder =
822					voting_rules_builder.add(sc_consensus_grandpa::BeforeBestBlockBy(delay));
823			};
824
825			let grandpa_config = sc_consensus_grandpa::GrandpaParams {
826				config,
827				link: link_half,
828				network: network.clone(),
829				sync: sync_service.clone(),
830				voting_rule: voting_rules_builder.build(),
831				prometheus_registry: prometheus_registry.clone(),
832				shared_voter_state,
833				telemetry: telemetry.as_ref().map(|x| x.handle()),
834				notification_service: grandpa_notification_service,
835				offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(
836					transaction_pool.clone(),
837				),
838			};
839
840			task_manager.spawn_essential_handle().spawn_blocking(
841				"grandpa-voter",
842				None,
843				sc_consensus_grandpa::run_grandpa_voter(grandpa_config)?,
844			);
845		}
846
847		Ok(NewFull {
848			task_manager,
849			client,
850			overseer_handle,
851			network,
852			sync_service,
853			rpc_handlers,
854			backend,
855		})
856	}
857}
858
859/// Create a new full node of arbitrary runtime and executor.
860///
861/// This is an advanced feature and not recommended for general use. Generally, `build_full` is
862/// a better choice.
863///
864/// `workers_path` is used to get the path to the directory where auxiliary worker binaries reside.
865/// If not specified, the main binary's directory is searched first, then `/usr/lib/polkadot` is
866/// searched. If the path points to an executable rather then directory, that executable is used
867/// both as preparation and execution worker (supposed to be used for tests only).
868pub fn new_full<
869	OverseerGenerator: OverseerGen,
870	Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Hash>,
871>(
872	config: Configuration,
873	params: NewFullParams<OverseerGenerator>,
874) -> Result<NewFull, Error> {
875	PolkadotServiceBuilder::<OverseerGenerator, Network>::new(config, params)?.build()
876}