Skip to content

Commit

Permalink
Rework telemetry to replace the use of tracing with an object we pass…
Browse files Browse the repository at this point in the history
… around (paritytech#8143)

polkadot companion: paritytech/polkadot#2535
  • Loading branch information
cecton authored Mar 11, 2021
1 parent 6f12d79 commit 6ac86d5
Show file tree
Hide file tree
Showing 55 changed files with 1,028 additions and 838 deletions.
11 changes: 4 additions & 7 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

72 changes: 56 additions & 16 deletions bin/node-template/node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;
use sc_consensus_aura::{ImportQueueParams, StartAuraParams, SlotProportion};
use sc_finality_grandpa::SharedVoterState;
use sc_keystore::LocalKeystore;
use sc_telemetry::TelemetrySpan;
use sc_telemetry::{Telemetry, TelemetryWorker};

// Our native executor instance.
native_executor_instance!(
Expand All @@ -38,6 +38,7 @@ pub fn new_partial(config: &Configuration) -> Result<sc_service::PartialComponen
AuraPair
>,
sc_finality_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
Option<Telemetry>,
)
>, ServiceError> {
if config.keystore_remote.is_some() {
Expand All @@ -46,10 +47,28 @@ pub fn new_partial(config: &Configuration) -> Result<sc_service::PartialComponen
}
let inherent_data_providers = InherentDataProviders::new();

let telemetry = config.telemetry_endpoints.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;

let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, Executor>(&config)?;
sc_service::new_full_parts::<Block, RuntimeApi, Executor>(
&config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
)?;
let client = Arc::new(client);

let telemetry = telemetry
.map(|(worker, telemetry)| {
task_manager.spawn_handle().spawn("telemetry", worker.run());
telemetry
});

let select_chain = sc_consensus::LongestChain::new(backend.clone());

let transaction_pool = sc_transaction_pool::BasicPool::new_full(
Expand All @@ -61,7 +80,10 @@ pub fn new_partial(config: &Configuration) -> Result<sc_service::PartialComponen
);

let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import(
client.clone(), &(client.clone() as Arc<_>), select_chain.clone(),
client.clone(),
&(client.clone() as Arc<_>),
select_chain.clone(),
telemetry.as_ref().map(|x| x.handle()),
)?;

let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(
Expand All @@ -79,6 +101,7 @@ pub fn new_partial(config: &Configuration) -> Result<sc_service::PartialComponen
slot_duration: sc_consensus_aura::slot_duration(&*client)?,
registry: config.prometheus_registry(),
check_for_equivocation: Default::default(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
},
)?;

Expand All @@ -91,7 +114,7 @@ pub fn new_partial(config: &Configuration) -> Result<sc_service::PartialComponen
select_chain,
transaction_pool,
inherent_data_providers,
other: (aura_block_import, grandpa_link),
other: (aura_block_import, grandpa_link, telemetry),
})
}

Expand All @@ -113,7 +136,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
select_chain,
transaction_pool,
inherent_data_providers,
other: (block_import, grandpa_link),
other: (block_import, grandpa_link, mut telemetry),
} = new_partial(&config)?;

if let Some(url) = &config.keystore_remote {
Expand Down Expand Up @@ -167,10 +190,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
})
};

let telemetry_span = TelemetrySpan::new();
let _telemetry_span_entered = telemetry_span.enter();

let (_rpc_handlers, telemetry_connection_notifier) = sc_service::spawn_tasks(
let _rpc_handlers = sc_service::spawn_tasks(
sc_service::SpawnTasksParams {
network: network.clone(),
client: client.clone(),
Expand All @@ -184,7 +204,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
network_status_sinks,
system_rpc_tx,
config,
telemetry_span: Some(telemetry_span.clone()),
telemetry: telemetry.as_mut(),
},
)?;

Expand All @@ -194,6 +214,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
client.clone(),
transaction_pool,
prometheus_registry.as_ref(),
telemetry.as_ref().map(|x| x.handle()),
);

let can_author_with =
Expand All @@ -213,6 +234,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
can_author_with,
sync_oracle: network.clone(),
block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
telemetry: telemetry.as_ref().map(|x| x.handle()),
},
)?;

Expand All @@ -237,6 +259,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
observer_enabled: false,
keystore,
is_authority: role.is_authority(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
};

if enable_grandpa {
Expand All @@ -250,10 +273,10 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
config: grandpa_config,
link: grandpa_link,
network,
telemetry_on_connect: telemetry_connection_notifier.map(|x| x.on_connect_stream()),
voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(),
prometheus_registry,
shared_voter_state: SharedVoterState::empty(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
};

// the GRANDPA voter task is considered infallible, i.e.
Expand All @@ -270,8 +293,26 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>

/// Builds a new service for a light client.
pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError> {
let telemetry = config.telemetry_endpoints.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;

let (client, backend, keystore_container, mut task_manager, on_demand) =
sc_service::new_light_parts::<Block, RuntimeApi, Executor>(&config)?;
sc_service::new_light_parts::<Block, RuntimeApi, Executor>(
&config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
)?;

let mut telemetry = telemetry
.map(|(worker, telemetry)| {
task_manager.spawn_handle().spawn("telemetry", worker.run());
telemetry
});

config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config());

Expand All @@ -289,6 +330,7 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
client.clone(),
&(client.clone() as Arc<_>),
select_chain.clone(),
telemetry.as_ref().map(|x| x.handle()),
)?;

let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(
Expand All @@ -307,6 +349,7 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
slot_duration: sc_consensus_aura::slot_duration(&*client)?,
registry: config.prometheus_registry(),
check_for_equivocation: Default::default(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
},
)?;

Expand All @@ -327,9 +370,6 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
);
}

let telemetry_span = TelemetrySpan::new();
let _telemetry_span_entered = telemetry_span.enter();

sc_service::spawn_tasks(sc_service::SpawnTasksParams {
remote_blockchain: Some(backend.remote_blockchain()),
transaction_pool,
Expand All @@ -343,7 +383,7 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
network,
network_status_sinks,
system_rpc_tx,
telemetry_span: Some(telemetry_span.clone()),
telemetry: telemetry.as_mut(),
})?;

network_starter.start_network();
Expand Down
1 change: 1 addition & 0 deletions bin/node/bench/src/construct.rs
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,7 @@ impl core::Benchmark for ConstructionBenchmark {
context.client.clone(),
self.transactions.clone().into(),
None,
None,
);
let inherent_data_providers = sp_inherents::InherentDataProviders::new();
inherent_data_providers
Expand Down
2 changes: 2 additions & 0 deletions bin/node/cli/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ try-runtime-cli = { version = "0.9.0", optional = true, path = "../../../utils/f
wasm-bindgen = { version = "0.2.57", optional = true }
wasm-bindgen-futures = { version = "0.4.18", optional = true }
browser-utils = { package = "substrate-browser-utils", path = "../../../utils/browser", optional = true, version = "0.9.0"}
libp2p-wasm-ext = { version = "0.27", features = ["websocket"], optional = true }

[target.'cfg(target_arch="x86_64")'.dependencies]
node-executor = { version = "2.0.0", path = "../executor", features = [ "wasmtime" ] }
Expand Down Expand Up @@ -148,6 +149,7 @@ browser = [
"browser-utils",
"wasm-bindgen",
"wasm-bindgen-futures",
"libp2p-wasm-ext",
]
cli = [
"node-executor/wasmi-errno",
Expand Down
14 changes: 4 additions & 10 deletions bin/node/cli/src/browser.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ use log::info;
use wasm_bindgen::prelude::*;
use browser_utils::{
Client,
browser_configuration, init_logging_and_telemetry, set_console_error_panic_hook,
browser_configuration, init_logging, set_console_error_panic_hook,
};

/// Starts the client.
Expand All @@ -37,18 +37,14 @@ async fn start_inner(
log_directives: String,
) -> Result<Client, Box<dyn std::error::Error>> {
set_console_error_panic_hook();
let telemetry_worker = init_logging_and_telemetry(&log_directives)?;
init_logging(&log_directives)?;
let chain_spec = match chain_spec {
Some(chain_spec) => ChainSpec::from_json_bytes(chain_spec.as_bytes().to_vec())
.map_err(|e| format!("{:?}", e))?,
None => crate::chain_spec::development_config(),
};

let telemetry_handle = telemetry_worker.handle();
let config = browser_configuration(
chain_spec,
Some(telemetry_handle),
).await?;
let config = browser_configuration(chain_spec).await?;

info!("Substrate browser node");
info!("✌️ version {}", config.impl_version);
Expand All @@ -60,10 +56,8 @@ async fn start_inner(
// Create the service. This is the most heavy initialization step.
let (task_manager, rpc_handlers) =
crate::service::new_light_base(config)
.map(|(components, rpc_handlers, _, _, _, _)| (components, rpc_handlers))
.map(|(components, rpc_handlers, _, _, _)| (components, rpc_handlers))
.map_err(|e| format!("{:?}", e))?;

task_manager.spawn_handle().spawn("telemetry", telemetry_worker.run());

Ok(browser_utils::start_client(task_manager, rpc_handlers))
}
2 changes: 1 addition & 1 deletion bin/node/cli/src/chain_spec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -443,7 +443,7 @@ pub(crate) mod tests {
Ok(sc_service_test::TestNetComponents::new(task_manager, client, network, transaction_pool))
},
|config| {
let (keep_alive, _, _, client, network, transaction_pool) = new_light_base(config)?;
let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?;
Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool))
}
);
Expand Down
Loading

0 comments on commit 6ac86d5

Please sign in to comment.