Skip to content

Commit

Permalink
Remove all log spam from rerun during normal use with rr.spawn (rer…
Browse files Browse the repository at this point in the history
…un-io#1142)

* Remove all log spam from rerun during normal use with `rr.spawn`

* py lint

* Suppress debug-log-spam from h2, hyper, reqwest, rustls

* better docstring

Co-authored-by: Clement Rey <[email protected]>

* fmt

* fmt

---------

Co-authored-by: Clement Rey <[email protected]>
  • Loading branch information
emilk and teh-cmc authored Feb 8, 2023
1 parent 017cebd commit a8148f0
Show file tree
Hide file tree
Showing 7 changed files with 55 additions and 16 deletions.
19 changes: 15 additions & 4 deletions crates/re_log/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,14 @@
//! * `warn`: problems that we can recover from
//! * `error`: problems that lead to loss of functionality or data
//!
//! The `warn_once` etc macros are for when you want to surpress repeated
//! The `warn_once` etc macros are for when you want to suppress repeated
//! logging of the exact same message.
pub use tracing::{debug, error, info, trace, warn};

// The `re_log::info_once!(…)` etc are nice helpers, but the `log-once` crate is a bit lacking.
// In the future we should implement our own `tracing` layer and de-duplicate based on the callsite,
// similar to how the log console in a browser will automatically supress duplicates.
// similar to how the log console in a browser will automatically suppress duplicates.
pub use log_once::{debug_once, error_once, info_once, trace_once, warn_once};

/// Set `RUST_LOG` environment variable to `info`, unless set,
Expand All @@ -22,8 +22,19 @@ pub use log_once::{debug_once, error_once, info_once, trace_once, warn_once};
fn set_default_rust_log_env() {
let mut rust_log = std::env::var("RUST_LOG").unwrap_or_else(|_| "info".to_owned());

// TODO(emilk): remove once https://github.com/gfx-rs/wgpu/issues/3206 is fixed
for loud_crate in ["naga", "wgpu_core", "wgpu_hal"] {
const LOUD_CRATES: [&str; 7] = [
// wgpu crates spam a lot on info level, which is really annoying
// TODO(emilk): remove once https://github.com/gfx-rs/wgpu/issues/3206 is fixed
"naga",
"wgpu_core",
"wgpu_hal",
// These are quite spammy on debug, drowning out what we care about:
"h2",
"hyper",
"reqwest",
"rustls",
];
for loud_crate in LOUD_CRATES {
if !rust_log.contains(&format!("{loud_crate}=")) {
rust_log += &format!(",{loud_crate}=warn");
}
Expand Down
22 changes: 18 additions & 4 deletions crates/re_sdk_comms/src/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,16 @@ pub struct ServerOptions {
/// If the latency in the [`LogMsg`] channel is greater than this,
/// then start dropping messages in order to keep up.
pub max_latency_sec: f32,

/// Turns `info`-level logs into `debug`-level logs.
pub quiet: bool,
}

impl Default for ServerOptions {
fn default() -> Self {
Self {
max_latency_sec: f32::INFINITY,
quiet: false,
}
}
}
Expand Down Expand Up @@ -55,9 +59,15 @@ pub fn serve(port: u16, options: ServerOptions) -> anyhow::Result<Receiver<LogMs
})
.expect("Failed to spawn thread");

re_log::info!(
"Hosting a SDK server over TCP at {bind_addr}. Connect with the Rerun logging SDK."
);
if options.quiet {
re_log::debug!(
"Hosting a SDK server over TCP at {bind_addr}. Connect with the Rerun logging SDK."
);
} else {
re_log::info!(
"Hosting a SDK server over TCP at {bind_addr}. Connect with the Rerun logging SDK."
);
}

Ok(rx)
}
Expand All @@ -69,7 +79,11 @@ fn spawn_client(stream: std::net::TcpStream, tx: Sender<LogMsg>, options: Server
stream.peer_addr()
))
.spawn(move || {
re_log::info!("New SDK client connected: {:?}", stream.peer_addr());
if options.quiet {
re_log::debug!("New SDK client connected: {:?}", stream.peer_addr());
} else {
re_log::info!("New SDK client connected: {:?}", stream.peer_addr());
}

if let Err(err) = run_client(stream, &tx, options) {
re_log::warn!("Closing connection to client: {err}");
Expand Down
2 changes: 1 addition & 1 deletion crates/re_viewer/src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -612,7 +612,7 @@ impl App {

while let Ok(msg) = rx.try_recv() {
if let LogMsg::BeginRecordingMsg(msg) = &msg {
re_log::info!("Beginning a new recording: {:?}", msg.info);
re_log::debug!("Beginning a new recording: {:?}", msg.info);
self.state.selected_rec_id = msg.info.recording_id;

#[cfg(all(not(target_arch = "wasm32"), feature = "analytics"))]
Expand Down
21 changes: 18 additions & 3 deletions crates/rerun/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,26 +82,38 @@ enum Commands {
pub enum AnalyticsCommands {
/// Prints extra information about analytics.
Details,

/// Deletes everything related to analytics.
///
/// This will remove all pending data that hasn't yet been sent to our servers, as well as
/// reset your analytics ID.
Clear,

/// Associate an email address with the current user.
Email { email: String },

/// Enable analytics.
Enable,

/// Disable analytics.
Disable,

/// Prints the current configuration.
Config,
}

#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum CallSource {
Cli,

Python,
}

// Run the rerun application and return an exit code
// If we be nice to use `std::process::ExitCode` here but
// then there's no good way to get back at the exit code from
// python
pub async fn run<I, T>(args: I) -> anyhow::Result<u8>
pub async fn run<I, T>(call_source: CallSource, args: I) -> anyhow::Result<u8>
where
I: IntoIterator<Item = T>,
T: Into<std::ffi::OsString> + Clone,
Expand All @@ -121,7 +133,7 @@ where
_ => Ok(()),
}
} else {
run_impl(args).await
run_impl(call_source, args).await
};

match res {
Expand Down Expand Up @@ -158,7 +170,7 @@ fn run_analytics(cmd: &AnalyticsCommands) -> Result<(), re_analytics::cli::CliEr
}
}

async fn run_impl(args: Args) -> anyhow::Result<()> {
async fn run_impl(call_source: CallSource, args: Args) -> anyhow::Result<()> {
let mut profiler = re_viewer::Profiler::default();
if args.profile {
profiler.start();
Expand Down Expand Up @@ -186,6 +198,9 @@ async fn run_impl(args: Args) -> anyhow::Result<()> {
{
let server_options = re_sdk_comms::ServerOptions {
max_latency_sec: parse_max_latency(args.drop_at_latency.as_ref()),

// `rerun.spawn()` doesn't ned to log that a connection has been made
quiet: call_source == CallSource::Python,
};
re_sdk_comms::serve(args.port, server_options)?
}
Expand Down
2 changes: 1 addition & 1 deletion crates/rerun/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ static GLOBAL: AccountingAllocator<mimalloc::MiMalloc> =
#[tokio::main]
async fn main() -> anyhow::Result<std::process::ExitCode> {
re_log::setup_native_logging();
rerun::run(std::env::args())
rerun::run(rerun::CallSource::Cli, std::env::args())
.await
.map(std::process::ExitCode::from)
}
3 changes: 1 addition & 2 deletions rerun_py/rerun_sdk/rerun/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,8 +203,7 @@ def spawn(port: int = 9876, connect: bool = True) -> None:

# start_new_session=True ensures the spawned process does NOT die when
# we hit ctrl-c in the terminal running the parent Python process.
rerun_process = subprocess.Popen([python_executable, "-m", "rerun", "--port", str(port)], start_new_session=True)
print(f"Spawned Rerun Viewer with pid {rerun_process.pid}")
subprocess.Popen([python_executable, "-m", "rerun", "--port", str(port)], start_new_session=True)

# TODO(emilk): figure out a way to postpone connecting until the rerun viewer is listening.
# For instance, wait until it prints "Hosting a SDK server over TCP at …"
Expand Down
2 changes: 1 addition & 1 deletion rerun_py/src/python_bridge.rs
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ fn main(argv: Vec<String>) -> PyResult<u8> {
.enable_all()
.build()
.unwrap()
.block_on(rerun::run(argv))
.block_on(rerun::run(rerun::CallSource::Python, argv))
.map_err(|err| PyRuntimeError::new_err(re_error::format(err)))
}

Expand Down

0 comments on commit a8148f0

Please sign in to comment.