referrerpolicy=no-referrer-when-downgrade

solochain_template_node/
service.rs

1//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
2
3use futures::FutureExt;
4use sc_client_api::{Backend, BlockBackend};
5use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams};
6use sc_consensus_grandpa::SharedVoterState;
7use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncConfig};
8use sc_telemetry::{Telemetry, TelemetryWorker};
9use sc_transaction_pool_api::OffchainTransactionPoolFactory;
10use solochain_template_runtime::{self, apis::RuntimeApi, opaque::Block};
11use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;
12use std::{sync::Arc, time::Duration};
13
14pub(crate) type FullClient = sc_service::TFullClient<
15	Block,
16	RuntimeApi,
17	sc_executor::WasmExecutor<sp_io::SubstrateHostFunctions>,
18>;
19type FullBackend = sc_service::TFullBackend<Block>;
20type FullSelectChain = sc_consensus::LongestChain<FullBackend, Block>;
21
22/// The minimum period of blocks on which justifications will be
23/// imported and generated.
24const GRANDPA_JUSTIFICATION_PERIOD: u32 = 512;
25
26pub type Service = sc_service::PartialComponents<
27	FullClient,
28	FullBackend,
29	FullSelectChain,
30	sc_consensus::DefaultImportQueue<Block>,
31	sc_transaction_pool::TransactionPoolHandle<Block, FullClient>,
32	(
33		sc_consensus_grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, FullSelectChain>,
34		sc_consensus_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
35		Option<Telemetry>,
36	),
37>;
38
39pub fn new_partial(config: &Configuration) -> Result<Service, ServiceError> {
40	let telemetry = config
41		.telemetry_endpoints
42		.clone()
43		.filter(|x| !x.is_empty())
44		.map(|endpoints| -> Result<_, sc_telemetry::Error> {
45			let worker = TelemetryWorker::new(16)?;
46			let telemetry = worker.handle().new_telemetry(endpoints);
47			Ok((worker, telemetry))
48		})
49		.transpose()?;
50
51	let executor = sc_service::new_wasm_executor::<sp_io::SubstrateHostFunctions>(&config.executor);
52	let (client, backend, keystore_container, task_manager) =
53		sc_service::new_full_parts::<Block, RuntimeApi, _>(
54			config,
55			telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
56			executor,
57		)?;
58	let client = Arc::new(client);
59
60	let telemetry = telemetry.map(|(worker, telemetry)| {
61		task_manager.spawn_handle().spawn("telemetry", None, worker.run());
62		telemetry
63	});
64
65	let select_chain = sc_consensus::LongestChain::new(backend.clone());
66
67	let transaction_pool = Arc::from(
68		sc_transaction_pool::Builder::new(
69			task_manager.spawn_essential_handle(),
70			client.clone(),
71			config.role.is_authority().into(),
72		)
73		.with_options(config.transaction_pool.clone())
74		.with_prometheus(config.prometheus_registry())
75		.build(),
76	);
77
78	let (grandpa_block_import, grandpa_link) = sc_consensus_grandpa::block_import(
79		client.clone(),
80		GRANDPA_JUSTIFICATION_PERIOD,
81		&client,
82		select_chain.clone(),
83		telemetry.as_ref().map(|x| x.handle()),
84	)?;
85
86	let cidp_client = client.clone();
87	let import_queue =
88		sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _>(ImportQueueParams {
89			block_import: grandpa_block_import.clone(),
90			justification_import: Some(Box::new(grandpa_block_import.clone())),
91			client: client.clone(),
92			create_inherent_data_providers: move |parent_hash, _| {
93				let cidp_client = cidp_client.clone();
94				async move {
95					let slot_duration = sc_consensus_aura::standalone::slot_duration_at(
96						&*cidp_client,
97						parent_hash,
98					)?;
99					let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
100
101					let slot =
102						sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
103							*timestamp,
104							slot_duration,
105						);
106
107					Ok((slot, timestamp))
108				}
109			},
110			spawner: &task_manager.spawn_essential_handle(),
111			registry: config.prometheus_registry(),
112			check_for_equivocation: Default::default(),
113			telemetry: telemetry.as_ref().map(|x| x.handle()),
114			compatibility_mode: Default::default(),
115		})?;
116
117	Ok(sc_service::PartialComponents {
118		client,
119		backend,
120		task_manager,
121		import_queue,
122		keystore_container,
123		select_chain,
124		transaction_pool,
125		other: (grandpa_block_import, grandpa_link, telemetry),
126	})
127}
128
129/// Builds a new service for a full client.
130pub fn new_full<
131	N: sc_network::NetworkBackend<Block, <Block as sp_runtime::traits::Block>::Hash>,
132>(
133	config: Configuration,
134) -> Result<TaskManager, ServiceError> {
135	let sc_service::PartialComponents {
136		client,
137		backend,
138		mut task_manager,
139		import_queue,
140		keystore_container,
141		select_chain,
142		transaction_pool,
143		other: (block_import, grandpa_link, mut telemetry),
144	} = new_partial(&config)?;
145
146	let mut net_config = sc_network::config::FullNetworkConfiguration::<
147		Block,
148		<Block as sp_runtime::traits::Block>::Hash,
149		N,
150	>::new(&config.network, config.prometheus_registry().cloned());
151	let metrics = N::register_notification_metrics(config.prometheus_registry());
152
153	let peer_store_handle = net_config.peer_store_handle();
154	let grandpa_protocol_name = sc_consensus_grandpa::protocol_standard_name(
155		&client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"),
156		&config.chain_spec,
157	);
158	let (grandpa_protocol_config, grandpa_notification_service) =
159		sc_consensus_grandpa::grandpa_peers_set_config::<_, N>(
160			grandpa_protocol_name.clone(),
161			metrics.clone(),
162			peer_store_handle,
163		);
164	net_config.add_notification_protocol(grandpa_protocol_config);
165
166	let warp_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new(
167		backend.clone(),
168		grandpa_link.shared_authority_set().clone(),
169		Vec::default(),
170	));
171
172	let (network, system_rpc_tx, tx_handler_controller, sync_service) =
173		sc_service::build_network(sc_service::BuildNetworkParams {
174			config: &config,
175			net_config,
176			client: client.clone(),
177			transaction_pool: transaction_pool.clone(),
178			spawn_handle: task_manager.spawn_handle(),
179			import_queue,
180			block_announce_validator_builder: None,
181			warp_sync_config: Some(WarpSyncConfig::WithProvider(warp_sync)),
182			block_relay: None,
183			metrics,
184		})?;
185
186	if config.offchain_worker.enabled {
187		let offchain_workers =
188			sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions {
189				runtime_api_provider: client.clone(),
190				is_validator: config.role.is_authority(),
191				keystore: Some(keystore_container.keystore()),
192				offchain_db: backend.offchain_storage(),
193				transaction_pool: Some(OffchainTransactionPoolFactory::new(
194					transaction_pool.clone(),
195				)),
196				network_provider: Arc::new(network.clone()),
197				enable_http_requests: true,
198				custom_extensions: |_| vec![],
199			})?;
200		task_manager.spawn_handle().spawn(
201			"offchain-workers-runner",
202			"offchain-worker",
203			offchain_workers.run(client.clone(), task_manager.spawn_handle()).boxed(),
204		);
205	}
206
207	let role = config.role;
208	let force_authoring = config.force_authoring;
209	let backoff_authoring_blocks: Option<()> = None;
210	let name = config.network.node_name.clone();
211	let enable_grandpa = !config.disable_grandpa;
212	let prometheus_registry = config.prometheus_registry().cloned();
213
214	let rpc_extensions_builder = {
215		let client = client.clone();
216		let pool = transaction_pool.clone();
217
218		Box::new(move |_| {
219			let deps = crate::rpc::FullDeps { client: client.clone(), pool: pool.clone() };
220			crate::rpc::create_full(deps).map_err(Into::into)
221		})
222	};
223
224	let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
225		network: Arc::new(network.clone()),
226		client: client.clone(),
227		keystore: keystore_container.keystore(),
228		task_manager: &mut task_manager,
229		transaction_pool: transaction_pool.clone(),
230		rpc_builder: rpc_extensions_builder,
231		backend,
232		system_rpc_tx,
233		tx_handler_controller,
234		sync_service: sync_service.clone(),
235		config,
236		telemetry: telemetry.as_mut(),
237	})?;
238
239	if role.is_authority() {
240		let proposer_factory = sc_basic_authorship::ProposerFactory::new(
241			task_manager.spawn_handle(),
242			client.clone(),
243			transaction_pool.clone(),
244			prometheus_registry.as_ref(),
245			telemetry.as_ref().map(|x| x.handle()),
246		);
247
248		let slot_duration = sc_consensus_aura::slot_duration(&*client)?;
249
250		let aura = sc_consensus_aura::start_aura::<AuraPair, _, _, _, _, _, _, _, _, _, _>(
251			StartAuraParams {
252				slot_duration,
253				client,
254				select_chain,
255				block_import,
256				proposer_factory,
257				create_inherent_data_providers: move |_, ()| async move {
258					let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
259
260					let slot =
261						sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
262							*timestamp,
263							slot_duration,
264						);
265
266					Ok((slot, timestamp))
267				},
268				force_authoring,
269				backoff_authoring_blocks,
270				keystore: keystore_container.keystore(),
271				sync_oracle: sync_service.clone(),
272				justification_sync_link: sync_service.clone(),
273				block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
274				max_block_proposal_slot_portion: None,
275				telemetry: telemetry.as_ref().map(|x| x.handle()),
276				compatibility_mode: Default::default(),
277			},
278		)?;
279
280		// the AURA authoring task is considered essential, i.e. if it
281		// fails we take down the service with it.
282		task_manager
283			.spawn_essential_handle()
284			.spawn_blocking("aura", Some("block-authoring"), aura);
285	}
286
287	if enable_grandpa {
288		// if the node isn't actively participating in consensus then it doesn't
289		// need a keystore, regardless of which protocol we use below.
290		let keystore = if role.is_authority() { Some(keystore_container.keystore()) } else { None };
291
292		let grandpa_config = sc_consensus_grandpa::Config {
293			// FIXME #1578 make this available through chainspec
294			gossip_duration: Duration::from_millis(333),
295			justification_generation_period: GRANDPA_JUSTIFICATION_PERIOD,
296			name: Some(name),
297			observer_enabled: false,
298			keystore,
299			local_role: role,
300			telemetry: telemetry.as_ref().map(|x| x.handle()),
301			protocol_name: grandpa_protocol_name,
302		};
303
304		// start the full GRANDPA voter
305		// NOTE: non-authorities could run the GRANDPA observer protocol, but at
306		// this point the full voter should provide better guarantees of block
307		// and vote data availability than the observer. The observer has not
308		// been tested extensively yet and having most nodes in a network run it
309		// could lead to finality stalls.
310		let grandpa_config = sc_consensus_grandpa::GrandpaParams {
311			config: grandpa_config,
312			link: grandpa_link,
313			network,
314			sync: Arc::new(sync_service),
315			notification_service: grandpa_notification_service,
316			voting_rule: sc_consensus_grandpa::VotingRulesBuilder::default().build(),
317			prometheus_registry,
318			shared_voter_state: SharedVoterState::empty(),
319			telemetry: telemetry.as_ref().map(|x| x.handle()),
320			offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool),
321		};
322
323		// the GRANDPA voter task is considered infallible, i.e.
324		// if it fails we take down the service with it.
325		task_manager.spawn_essential_handle().spawn_blocking(
326			"grandpa-voter",
327			None,
328			sc_consensus_grandpa::run_grandpa_voter(grandpa_config)?,
329		);
330	}
331
332	Ok(task_manager)
333}