From 0664484552583ddca5cc3196d2a91f51b114c42c Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Mon, 13 May 2019 17:34:56 -0700 Subject: [PATCH 01/15] Experiment: Create timing report. --- Cargo.toml | 1 + src/bin/cargo/cli.rs | 1 + src/cargo/core/compiler/context/mod.rs | 2 +- src/cargo/core/compiler/job_queue.rs | 41 +- src/cargo/core/compiler/mod.rs | 1 + src/cargo/core/compiler/timings.rs | 861 +++++++++++++++++++++++++ src/cargo/core/features.rs | 9 + src/cargo/core/manifest.rs | 13 + src/cargo/core/shell.rs | 7 + src/cargo/lib.rs | 1 + src/cargo/util/dependency_queue.rs | 16 +- src/cargo/util/machine_message.rs | 18 +- src/doc/src/reference/unstable.md | 20 + 13 files changed, 977 insertions(+), 14 deletions(-) create mode 100644 src/cargo/core/compiler/timings.rs diff --git a/Cargo.toml b/Cargo.toml index b234f2861..5ff3893a8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,6 +37,7 @@ git2-curl = "0.11.0" glob = "0.3.0" hex = "0.3" home = "0.5" +humantime = "1.2.0" ignore = "0.4.7" lazy_static = "1.2.0" jobserver = "0.1.13" diff --git a/src/bin/cargo/cli.rs b/src/bin/cargo/cli.rs index f5d507701..87486b8b6 100644 --- a/src/bin/cargo/cli.rs +++ b/src/bin/cargo/cli.rs @@ -36,6 +36,7 @@ Available unstable (nightly-only) flags: -Z config-profile -- Read profiles from .cargo/config files -Z install-upgrade -- `cargo install` will upgrade instead of failing -Z cache-messages -- Cache compiler messages + -Z timings -- Display concurrency information Run with 'cargo -Z [FLAG] [SUBCOMMAND]'" ); diff --git a/src/cargo/core/compiler/context/mod.rs b/src/cargo/core/compiler/context/mod.rs index d4acfa737..79707b4f3 100644 --- a/src/cargo/core/compiler/context/mod.rs +++ b/src/cargo/core/compiler/context/mod.rs @@ -129,7 +129,7 @@ impl<'a, 'cfg> Context<'a, 'cfg> { export_dir: Option, exec: &Arc, ) -> CargoResult> { - let mut queue = JobQueue::new(self.bcx); + let mut queue = JobQueue::new(self.bcx, units); let mut plan = BuildPlan::new(); let build_plan = self.bcx.build_config.build_plan; self.prepare_units(export_dir, units)?; diff --git a/src/cargo/core/compiler/job_queue.rs b/src/cargo/core/compiler/job_queue.rs index cea9e4e23..d56bd6e2d 100644 --- a/src/cargo/core/compiler/job_queue.rs +++ b/src/cargo/core/compiler/job_queue.rs @@ -14,6 +14,7 @@ use super::job::{ Freshness::{self, Dirty, Fresh}, Job, }; +use super::timings::Timings; use super::{BuildContext, BuildPlan, CompileMode, Context, Unit}; use crate::core::{PackageId, TargetKind}; use crate::handle_error; @@ -39,6 +40,7 @@ pub struct JobQueue<'a, 'cfg> { is_release: bool, progress: Progress<'cfg>, next_id: u32, + timings: Timings<'a, 'cfg>, } pub struct JobState<'a> { @@ -80,7 +82,7 @@ enum Artifact { } enum Message { - Run(String), + Run(u32, String), BuildPlanMsg(String, ProcessBuilder, Arc>), Stdout(String), Stderr(String), @@ -91,7 +93,7 @@ enum Message { impl<'a> JobState<'a> { pub fn running(&self, cmd: &ProcessBuilder) { - let _ = self.tx.send(Message::Run(cmd.to_string())); + let _ = self.tx.send(Message::Run(self.id, cmd.to_string())); } pub fn build_plan( @@ -128,9 +130,16 @@ impl<'a> JobState<'a> { } impl<'a, 'cfg> JobQueue<'a, 'cfg> { - pub fn new(bcx: &BuildContext<'a, 'cfg>) -> JobQueue<'a, 'cfg> { + pub fn new(bcx: &BuildContext<'a, 'cfg>, root_units: &[Unit<'a>]) -> JobQueue<'a, 'cfg> { let (tx, rx) = channel(); let progress = Progress::with_style("Building", ProgressStyle::Ratio, bcx.config); + let profile = if bcx.build_config.release { + "release" + } else { + "dev" + } + .to_string(); + let timings = Timings::new(bcx.config, root_units, profile); JobQueue { queue: DependencyQueue::new(), tx, @@ -142,6 +151,7 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { is_release: bcx.build_config.release, progress, next_id: 0, + timings, } } @@ -316,6 +326,9 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { // to the jobserver itself. tokens.truncate(self.active.len() - 1); + self.timings + .mark_concurrency(self.active.len(), queue.len(), self.queue.len()); + // Drain all events at once to avoid displaying the progress bar // unnecessarily. let events: Vec<_> = self.rx.try_iter().collect(); @@ -328,18 +341,18 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { for event in events { match event { - Message::Run(cmd) => { + Message::Run(id, cmd) => { cx.bcx .config .shell() .verbose(|c| c.status("Running", &cmd))?; + self.timings.unit_start(id, self.active[&id]); } Message::BuildPlanMsg(module_name, cmd, filenames) => { plan.update(&module_name, &cmd, &filenames)?; } Message::Stdout(out) => { - self.progress.clear(); - println!("{}", out); + cx.bcx.config.shell().stdout_println(out); } Message::Stderr(err) => { let mut shell = cx.bcx.config.shell(); @@ -367,7 +380,7 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { }; info!("end ({:?}): {:?}", unit, result); match result { - Ok(()) => self.finish(&unit, artifact, cx)?, + Ok(()) => self.finish(id, &unit, artifact, cx)?, Err(e) => { let msg = "The following warnings were emitted during compilation:"; self.emit_warnings(Some(msg), &unit, cx)?; @@ -425,6 +438,7 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { if !cx.bcx.build_config.build_plan { cx.bcx.config.shell().status("Finished", message)?; } + self.timings.finished()?; Ok(()) } else { debug!("queue: {:#?}", self.queue); @@ -519,8 +533,12 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { } match fresh { - Freshness::Fresh => doit(), + Freshness::Fresh => { + self.timings.add_fresh(); + doit() + } Freshness::Dirty => { + self.timings.add_dirty(); scope.spawn(move |_| doit()); } } @@ -558,6 +576,7 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { fn finish( &mut self, + id: u32, unit: &Unit<'a>, artifact: Artifact, cx: &mut Context<'_, '_>, @@ -565,7 +584,11 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { if unit.mode.is_run_custom_build() && cx.bcx.show_warnings(unit.pkg.package_id()) { self.emit_warnings(None, unit, cx)?; } - self.queue.finish(unit, &artifact); + let unlocked = self.queue.finish(unit, &artifact); + match artifact { + Artifact::All => self.timings.unit_finished(id, unlocked), + Artifact::Metadata => self.timings.unit_rmeta_finished(id, unlocked), + } Ok(()) } diff --git a/src/cargo/core/compiler/mod.rs b/src/cargo/core/compiler/mod.rs index df6e00bf0..cd0330913 100644 --- a/src/cargo/core/compiler/mod.rs +++ b/src/cargo/core/compiler/mod.rs @@ -11,6 +11,7 @@ mod layout; mod links; mod output_depinfo; pub mod standard_lib; +mod timings; mod unit; pub mod unit_dependencies; diff --git a/src/cargo/core/compiler/timings.rs b/src/cargo/core/compiler/timings.rs new file mode 100644 index 000000000..76987f85e --- /dev/null +++ b/src/cargo/core/compiler/timings.rs @@ -0,0 +1,861 @@ +//! Timing tracking. +//! +//! This module implements some simple tracking information for timing of how +//! long it takes for different units to compile. +use super::{CompileMode, Unit}; +use crate::core::PackageId; +use crate::util::machine_message::{self, Message}; +use crate::util::{CargoResult, Config}; +use std::cmp::max; +use std::collections::HashMap; +use std::fs::File; +use std::io::Write; +use std::time::{Duration, Instant, SystemTime}; + +pub struct Timings<'a, 'cfg> { + config: &'cfg Config, + /// If true, saves an HTML report to disk. + report_html: bool, + /// If true, reports unit completion to stderr. + report_info: bool, + /// If true, emits JSON information with timing information. + report_json: bool, + /// When Cargo started. + start: Instant, + /// A rendered string of when compilation started. + start_str: String, + /// A summary of the root units. + /// + /// Tuples of `(package_description, target_descrptions)`. + root_targets: Vec<(String, Vec)>, + /// The build profile. + profile: String, + /// Total number of fresh units. + total_fresh: u32, + /// Total number of dirty units. + total_dirty: u32, + /// Time tracking for each individual unit. + unit_times: Vec>, + /// Units that are in the process of being built. + /// When they finished, they are moved to `unit_times`. + active: HashMap>, + /// Concurrency-tracking information. This is periodically updated while + /// compilation progresses. + concurrency: Vec, +} + +/// Tracking information for an individual unit. +struct UnitTime<'a> { + unit: Unit<'a>, + /// A string describing the cargo target. + target: String, + /// The time when this unit started as an offset in seconds from `Timings::start`. + start: f64, + /// Total time to build this unit in seconds. + duration: f64, + /// The time when the `.rmeta` file was generated, an offset in seconds + /// from `start`. + rmeta_time: Option, + /// Reverse deps that are freed to run after this unit finished. + unlocked_units: Vec>, + /// Same as `unlocked_units`, but unlocked by rmeta. + unlocked_rmeta_units: Vec>, +} + +/// Periodic concurrency tracking information. +struct Concurrency { + /// Time as an offset in seconds from `Timings::start`. + t: f64, + /// Number of units currently running. + active: usize, + /// Number of units that could run, but are waiting for a jobserver token. + waiting: usize, + /// Number of units that are not yet ready, because they are waiting for + /// dependencies to finish. + inactive: usize, +} + +impl<'a, 'cfg> Timings<'a, 'cfg> { + pub fn new( + config: &'cfg Config, + root_units: &[Unit<'_>], + profile: String, + ) -> Timings<'a, 'cfg> { + let mut root_map: HashMap> = HashMap::new(); + for unit in root_units { + let target_desc = unit.target.description_named(); + root_map + .entry(unit.pkg.package_id()) + .or_default() + .push(target_desc); + } + let root_targets = root_map + .into_iter() + .map(|(pkg_id, targets)| { + let pkg_desc = format!("{} {}", pkg_id.name(), pkg_id.version()); + (pkg_desc, targets) + }) + .collect(); + let start_str = format!("{}", humantime::format_rfc3339_seconds(SystemTime::now())); + let has_report = |what| { + config + .cli_unstable() + .timings + .as_ref() + .map_or(false, |t| t.iter().any(|opt| opt == what)) + }; + + Timings { + config, + report_html: has_report("html"), + report_info: has_report("info"), + report_json: has_report("json"), + start: config.creation_time(), + start_str, + root_targets, + profile, + total_fresh: 0, + total_dirty: 0, + unit_times: Vec::new(), + active: HashMap::new(), + concurrency: Vec::new(), + } + } + + /// Mark that a unit has started running. + pub fn unit_start(&mut self, id: u32, unit: Unit<'a>) { + let mut target = if unit.target.is_lib() && unit.mode == CompileMode::Build { + // Special case for brevity, since most dependencies hit + // this path. + "".to_string() + } else { + format!(" {}", unit.target.description_named()) + }; + match unit.mode { + CompileMode::Test => target.push_str(" (test)"), + CompileMode::Build => {} + CompileMode::Check { test: true } => target.push_str(" (check-test)"), + CompileMode::Check { test: false } => target.push_str(" (check)"), + CompileMode::Bench => target.push_str(" (bench)"), + CompileMode::Doc { .. } => target.push_str(" (doc)"), + CompileMode::Doctest => target.push_str(" (doc test)"), + CompileMode::RunCustomBuild => target.push_str(" (run)"), + } + let unit_time = UnitTime { + unit, + target, + start: d_as_f64(self.start.elapsed()), + duration: 0.0, + rmeta_time: None, + unlocked_units: Vec::new(), + unlocked_rmeta_units: Vec::new(), + }; + assert!(self.active.insert(id, unit_time).is_none()); + } + + /// Mark that the `.rmeta` file as generated. + pub fn unit_rmeta_finished(&mut self, id: u32, unlocked: Vec<&Unit<'a>>) { + if let Some(unit_time) = self.active.get_mut(&id) { + let t = d_as_f64(self.start.elapsed()); + unit_time.rmeta_time = Some(t - unit_time.start); + unit_time.unlocked_rmeta_units.extend(unlocked); + } + } + + /// Mark that a unit has finished running. + pub fn unit_finished(&mut self, id: u32, unlocked: Vec<&Unit<'a>>) { + if let Some(mut unit_time) = self.active.remove(&id) { + let t = d_as_f64(self.start.elapsed()); + unit_time.duration = t - unit_time.start; + unit_time.unlocked_units.extend(unlocked); + if self.report_info { + let msg = format!( + "{}{} in {:.1}s", + unit_time.name_ver(), + unit_time.target, + unit_time.duration + ); + let _ = + self.config + .shell() + .status_with_color("Completed", msg, termcolor::Color::Cyan); + } + if self.report_json { + let msg = machine_message::TimingInfo { + package_id: unit_time.unit.pkg.package_id(), + target: unit_time.unit.target, + mode: unit_time.unit.mode, + duration: unit_time.duration, + rmeta_time: unit_time.rmeta_time, + } + .to_json_string(); + self.config.shell().stdout_println(msg); + } + self.unit_times.push(unit_time); + } + } + + /// This is called periodically to mark the concurrency of internal structures. + pub fn mark_concurrency(&mut self, active: usize, waiting: usize, inactive: usize) { + let c = Concurrency { + t: d_as_f64(self.start.elapsed()), + active, + waiting, + inactive, + }; + self.concurrency.push(c); + } + + /// Mark that a fresh unit was encountered. + pub fn add_fresh(&mut self) { + self.total_fresh += 1; + } + + /// Mark that a dirty unit was encountered. + pub fn add_dirty(&mut self) { + self.total_dirty += 1; + } + + /// Call this when all units are finished. + pub fn finished(&mut self) -> CargoResult<()> { + self.mark_concurrency(0, 0, 0); + self.unit_times + .sort_unstable_by(|a, b| a.start.partial_cmp(&b.start).unwrap()); + if self.report_html { + self.report_html()?; + } + Ok(()) + } + + /// Save HTML report to disk. + pub fn report_html(&self) -> CargoResult<()> { + let duration = self.start.elapsed().as_secs() as u32 + 1; + let mut f = File::create("cargo-timing.html")?; + let roots: Vec<&str> = self + .root_targets + .iter() + .map(|(name, _targets)| name.as_str()) + .collect(); + f.write_all(HTML_TMPL.replace("{ROOTS}", &roots.join(", ")).as_bytes())?; + self.fmt_summary_table(&mut f, duration)?; + let graph_width = self.fmt_pipeline_graph(&mut f, duration)?; + self.fmt_timing_graph(&mut f, graph_width, duration)?; + self.fmt_unit_table(&mut f)?; + f.write_all(HTML_TMPL_FOOT.as_bytes())?; + Ok(()) + } + + /// Render the summary table. + fn fmt_summary_table(&self, f: &mut File, duration: u32) -> CargoResult<()> { + let targets: Vec = self + .root_targets + .iter() + .map(|(name, targets)| format!("{} ({})", name, targets.join(", "))) + .collect(); + let targets = targets.join("
"); + let time_human = if duration > 60 { + format!(" ({}m {:02}s)", duration / 60, duration % 60) + } else { + "".to_string() + }; + let total_time = format!("{}s{}", duration, time_human); + let max_concurrency = self.concurrency.iter().map(|c| c.active).max().unwrap(); + write!( + f, + r#" + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Targets:{}
Profile:{}
Fresh units:{}
Dirty units:{}
Total units:{}
Max concurrency:{}
Build start:{}
Total time:{}
+"#, + targets, + self.profile, + self.total_fresh, + self.total_dirty, + self.total_fresh + self.total_dirty, + max_concurrency, + self.start_str, + total_time, + )?; + Ok(()) + } + + /// Render the box graph of the units over time. + fn fmt_pipeline_graph(&self, f: &mut File, duration: u32) -> CargoResult { + if self.unit_times.is_empty() { + return Ok(0); + } + const BOX_HEIGHT: u32 = 25; + const Y_TICK_DIST: u32 = BOX_HEIGHT + 2; + + let graph_height = Y_TICK_DIST * self.unit_times.len() as u32; + + let graph_width = draw_graph_axes(f, graph_height, duration)?; + + // Draw Y tick marks. + write!(f, "")?; + + // Draw Y labels. + for n in 0..self.unit_times.len() as u32 { + let y = MARGIN + (Y_TICK_DIST * (n + 1)) - 13; + writeln!( + f, + r#"{}"#, + X_LINE - 4, + y, + n + 1 + )?; + } + + // Draw the graph. + writeln!( + f, + r#""#, + X_LINE, MARGIN, graph_width, graph_height + )?; + + // Create a map that will be used for drawing dependency lines. + let unit_map: HashMap<&Unit<'_>, (f64, u32)> = self + .unit_times + .iter() + .enumerate() + .map(|(i, unit)| { + let y = i as u32 * Y_TICK_DIST + 1; + let x = PX_PER_SEC * unit.start; + (&unit.unit, (x, y)) + }) + .collect(); + + for (i, unit) in self.unit_times.iter().enumerate() { + let (x, y) = unit_map[&unit.unit]; + let width = (PX_PER_SEC * unit.duration).max(1.0); + + let dep_class = format!("dep-{}", i); + writeln!( + f, + " ", + x, y, width, BOX_HEIGHT, dep_class, + )?; + let draw_dep_lines = |f: &mut File, x, units| -> CargoResult<()> { + for unlocked in units { + let (u_x, u_y) = unit_map[&unlocked]; + writeln!( + f, + " ", + dep_class, + x, + y + BOX_HEIGHT / 2, + u_y - y, + u_x - x + 5.0, + )?; + } + Ok(()) + }; + if let Some((rmeta_time, ctime, _cent)) = unit.codegen_time() { + let rmeta_x = x + PX_PER_SEC * rmeta_time; + writeln!( + f, + " ", + rmeta_x, + y, + PX_PER_SEC * ctime, + BOX_HEIGHT, + )?; + draw_dep_lines(f, rmeta_x, &unit.unlocked_rmeta_units)?; + } + writeln!( + f, + " {}{} {:.1}s", + x + 5.0, + y + BOX_HEIGHT / 2 - 5, + unit.unit.pkg.name(), + unit.target, + unit.duration + )?; + draw_dep_lines(f, x + width, &unit.unlocked_units)?; + } + writeln!(f, r#""#)?; + writeln!(f, r#""#)?; + Ok(graph_width) + } + + /// Render the line chart of concurrency information. + fn fmt_timing_graph(&self, f: &mut File, graph_width: u32, duration: u32) -> CargoResult<()> { + if graph_width == 0 || self.concurrency.is_empty() { + return Ok(()); + } + const HEIGHT: u32 = 400; + const AXIS_HEIGHT: u32 = HEIGHT - MARGIN - Y_LINE; + const TOP_MARGIN: u32 = 10; + const GRAPH_HEIGHT: u32 = AXIS_HEIGHT - TOP_MARGIN; + + draw_graph_axes(f, AXIS_HEIGHT, duration)?; + + // Draw Y tick marks and labels. + write!(f, "{}\n", + X_LINE - 10, + y + 5, + (n + 1) * step + )); + } + writeln!(f, "\"/>")?; + f.write_all(labels.as_bytes())?; + + // Label the Y axis. + let label_y = (HEIGHT - Y_LINE) / 2; + writeln!( + f, + "# Units", + label_y, label_y + )?; + + // Draw the graph. + writeln!( + f, + r#""#, + X_LINE, + MARGIN, + graph_width, + GRAPH_HEIGHT + TOP_MARGIN + )?; + + let coord = |t, v| { + ( + f64::from(graph_width) * (t / f64::from(duration)), + f64::from(TOP_MARGIN) + f64::from(GRAPH_HEIGHT) * (1.0 - (v as f64 / max_v as f64)), + ) + }; + let mut draw_line = |class, key: fn(&Concurrency) -> usize| { + write!(f, "", class) + }; + + draw_line("line-inactive", |c| c.inactive)?; + draw_line("line-waiting", |c| c.waiting)?; + draw_line("line-active", |c| c.active)?; + + // Draw a legend. + write!( + f, + r#" + + + + Waiting + + + Active + + + Inactive + +"#, + graph_width - 120 + )?; + + writeln!(f, "")?; + writeln!(f, "")?; + Ok(()) + } + + /// Render the table of all units. + fn fmt_unit_table(&self, f: &mut File) -> CargoResult<()> { + write!( + f, + r#" + + + + + + + + + + +"# + )?; + let mut units: Vec<&UnitTime<'_>> = self.unit_times.iter().collect(); + units.sort_unstable_by(|a, b| b.duration.partial_cmp(&a.duration).unwrap()); + for (i, unit) in units.iter().enumerate() { + let codegen = match unit.codegen_time() { + None => "".to_string(), + Some((_rt, ctime, cent)) => format!("{:.1}s ({:.0}%)", ctime, cent), + }; + write!( + f, + r#" + + + + + + +"#, + i + 1, + unit.name_ver(), + unit.target, + unit.duration, + codegen + )?; + } + write!(f, "\n
UnitTotalCodegen
{}.{}{}{:.1}s{}
\n")?; + Ok(()) + } +} + +impl<'a> UnitTime<'a> { + /// Returns the codegen time as (rmeta_time, codegen_time, percent of total) + fn codegen_time(&self) -> Option<(f64, f64, f64)> { + self.rmeta_time.map(|rmeta_time| { + let ctime = self.duration - rmeta_time; + let cent = (ctime / self.duration) * 100.0; + (rmeta_time, ctime, cent) + }) + } + + fn name_ver(&self) -> String { + format!("{} v{}", self.unit.pkg.name(), self.unit.pkg.version()) + } +} + +// Replace with as_secs_f64 when 1.38 hits stable. +fn d_as_f64(d: Duration) -> f64 { + (d.as_secs() as f64) + f64::from(d.subsec_nanos()) / 1_000_000_000.0 +} + +fn round_up(n: u32, step: u32) -> u32 { + if n % step == 0 { + n + } else { + (step - n % step) + n + } +} + +/// Determine the `(step, max_value)` of the number of ticks along an axis. +fn split_ticks(n: u32, max_ticks: u32) -> (u32, u32) { + if n <= max_ticks { + (1, n) + } else if n <= max_ticks * 2 { + (2, round_up(n, 2)) + } else if n <= max_ticks * 4 { + (4, round_up(n, 4)) + } else if n <= max_ticks * 5 { + (5, round_up(n, 5)) + } else { + let mut step = 10; + loop { + let top = round_up(n, step); + if top <= max_ticks * step { + break (step, top); + } + step += 10; + } + } +} + +const X_LINE: u32 = 50; +const MARGIN: u32 = 5; +const Y_LINE: u32 = 35; // relative to bottom +const PX_PER_SEC: f64 = 20.0; +const MIN_TICK_DIST: u32 = 50; + +fn draw_graph_axes(f: &mut File, graph_height: u32, duration: u32) -> CargoResult { + let graph_width = PX_PER_SEC as u32 * duration; + let width = graph_width + X_LINE + 30; + let height = graph_height + MARGIN + Y_LINE; + writeln!( + f, + r#""#, + width, height + )?; + + // Draw axes. + write!( + f, + "{}s\n", + x, + height - Y_LINE + 20, + (n + 1) * step + )); + } + + writeln!(f, "\" />")?; + f.write_all(labels.as_bytes())?; + + // Draw vertical lines. + write!(f, "")?; + + Ok(graph_width) +} + +static HTML_TMPL: &str = r#" + + + Cargo Build Timings — {ROOTS} + + + + + +

Cargo Build Timings

+"#; + +static HTML_TMPL_FOOT: &str = r#" + + + +"#; diff --git a/src/cargo/core/features.rs b/src/cargo/core/features.rs index f52e3097c..0e0ffb3fe 100644 --- a/src/cargo/core/features.rs +++ b/src/cargo/core/features.rs @@ -335,6 +335,7 @@ pub struct CliUnstable { pub cache_messages: bool, pub binary_dep_depinfo: bool, pub build_std: Option>, + pub timings: Option>, } impl CliUnstable { @@ -367,6 +368,13 @@ impl CliUnstable { } } + fn parse_timings(value: Option<&str>) -> Vec { + match value { + None => vec!["html".to_string(), "info".to_string()], + Some(v) => v.split(',').map(|s| s.to_string()).collect(), + } + } + match k { "print-im-a-teapot" => self.print_im_a_teapot = parse_bool(v)?, "unstable-options" => self.unstable_options = true, @@ -384,6 +392,7 @@ impl CliUnstable { "build-std" => { self.build_std = Some(crate::core::compiler::standard_lib::parse_unstable_flag(v)) } + "timings" => self.timings = Some(parse_timings(v)), _ => failure::bail!("unknown `-Z` flag specified: {}", k), } diff --git a/src/cargo/core/manifest.rs b/src/cargo/core/manifest.rs index 81593ea93..96561d2cc 100644 --- a/src/cargo/core/manifest.rs +++ b/src/cargo/core/manifest.rs @@ -926,6 +926,19 @@ impl Target { self.doc = doc; self } + + pub fn description_named(&self) -> String { + match self.kind { + TargetKind::Lib(..) => "lib".to_string(), + TargetKind::Bin => format!("bin \"{}\"", self.name()), + TargetKind::Test => format!("test \"{}\"", self.name()), + TargetKind::Bench => format!("bench \"{}\"", self.name()), + TargetKind::ExampleLib(..) | TargetKind::ExampleBin => { + format!("example \"{}\"", self.name()) + } + TargetKind::CustomBuild => "custom-build".to_string(), + } + } } impl fmt::Display for Target { diff --git a/src/cargo/core/shell.rs b/src/cargo/core/shell.rs index 742b42a3e..dfa195943 100644 --- a/src/cargo/core/shell.rs +++ b/src/cargo/core/shell.rs @@ -111,6 +111,13 @@ impl Shell { } } + pub fn stdout_println(&mut self, message: impl fmt::Display) { + if self.needs_clear { + self.err_erase_line(); + } + println!("{}", message); + } + /// Sets whether the next print should clear the current line. pub fn set_needs_clear(&mut self, needs_clear: bool) { self.needs_clear = needs_clear; diff --git a/src/cargo/lib.rs b/src/cargo/lib.rs index ac36549a8..3adb0c1e9 100644 --- a/src/cargo/lib.rs +++ b/src/cargo/lib.rs @@ -16,6 +16,7 @@ #![allow(clippy::too_many_arguments)] // large project #![allow(clippy::type_complexity)] // there's an exceptionally complex type #![allow(clippy::wrong_self_convention)] // perhaps `Rc` should be special-cased in Clippy? +#![allow(clippy::write_with_newline)] // too pedantic #![warn(clippy::needless_borrow)] #![warn(clippy::redundant_clone)] // Unit is now interned, and would probably be better as pass-by-copy, but diff --git a/src/cargo/util/dependency_queue.rs b/src/cargo/util/dependency_queue.rs index 9a2184080..db8f535f4 100644 --- a/src/cargo/util/dependency_queue.rs +++ b/src/cargo/util/dependency_queue.rs @@ -159,16 +159,26 @@ impl DependencyQueue { /// Calling this function indicates that the `node` has produced `edge`. All /// remaining work items which only depend on this node/edge pair are now /// candidates to start their job. - pub fn finish(&mut self, node: &N, edge: &E) { + /// + /// Returns the nodes that are now allowed to be dequeued as a result of + /// finishing this node. + pub fn finish(&mut self, node: &N, edge: &E) -> Vec<&N> { + // hashset let reverse_deps = self.reverse_dep_map.get(node).and_then(|map| map.get(edge)); let reverse_deps = match reverse_deps { Some(deps) => deps, - None => return, + None => return Vec::new(), }; let key = (node.clone(), edge.clone()); + let mut result = Vec::new(); for dep in reverse_deps.iter() { - assert!(self.dep_map.get_mut(dep).unwrap().0.remove(&key)); + let edges = &mut self.dep_map.get_mut(dep).unwrap().0; + assert!(edges.remove(&key)); + if edges.is_empty() { + result.push(dep); + } } + result } } diff --git a/src/cargo/util/machine_message.rs b/src/cargo/util/machine_message.rs index b52e3b430..caa0833cc 100644 --- a/src/cargo/util/machine_message.rs +++ b/src/cargo/util/machine_message.rs @@ -4,7 +4,7 @@ use serde::ser; use serde::Serialize; use serde_json::{self, json, value::RawValue}; -use crate::core::{PackageId, Target}; +use crate::core::{compiler::CompileMode, PackageId, Target}; pub trait Message: ser::Serialize { fn reason(&self) -> &str; @@ -73,3 +73,19 @@ impl<'a> Message for BuildScript<'a> { "build-script-executed" } } + +#[derive(Serialize)] +pub struct TimingInfo<'a> { + pub package_id: PackageId, + pub target: &'a Target, + pub mode: CompileMode, + pub duration: f64, + #[serde(skip_serializing_if = "Option::is_none")] + pub rmeta_time: Option, +} + +impl<'a> Message for TimingInfo<'a> { + fn reason(&self) -> &str { + "timing-info" + } +} diff --git a/src/doc/src/reference/unstable.md b/src/doc/src/reference/unstable.md index c2d52bea0..18a765c22 100644 --- a/src/doc/src/reference/unstable.md +++ b/src/doc/src/reference/unstable.md @@ -336,3 +336,23 @@ Also if you'd like to see a feature that's not yet implemented and/or if something doesn't quite work the way you'd like it to, feel free to check out the [issue tracker](https://github.com/rust-lang/wg-cargo-std-aware/issues) of the tracking repository, and if it's not there please file a new issue! + +### timings + +The `timings` feature gives some information about how long each compilation +takes, and tracks concurrency information over time. + +``` +cargo +nightly build -Z timings +``` + +The `-Ztimings` flag can optionally take a comma-separated list of the +following values: + +- `html` — Saves a file called `cargo-timing.html` to the current directory + with a report of the compilation. +- `info` — Displays a message to stdout after each compilation finishes with + how long it took. +- `json` — Emits some JSON information about timing information. + +The default if none are specified is `html,info`. From aca3274f6b496fb568be2bbdc5183570c2dea5b2 Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Thu, 12 Sep 2019 13:08:38 -0700 Subject: [PATCH 02/15] Add rustc info to timing output. --- src/cargo/core/compiler/job_queue.rs | 8 +-- src/cargo/core/compiler/timings.rs | 88 ++++++++++++++++++---------- 2 files changed, 57 insertions(+), 39 deletions(-) diff --git a/src/cargo/core/compiler/job_queue.rs b/src/cargo/core/compiler/job_queue.rs index d56bd6e2d..468a76b13 100644 --- a/src/cargo/core/compiler/job_queue.rs +++ b/src/cargo/core/compiler/job_queue.rs @@ -133,13 +133,7 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { pub fn new(bcx: &BuildContext<'a, 'cfg>, root_units: &[Unit<'a>]) -> JobQueue<'a, 'cfg> { let (tx, rx) = channel(); let progress = Progress::with_style("Building", ProgressStyle::Ratio, bcx.config); - let profile = if bcx.build_config.release { - "release" - } else { - "dev" - } - .to_string(); - let timings = Timings::new(bcx.config, root_units, profile); + let timings = Timings::new(bcx, root_units); JobQueue { queue: DependencyQueue::new(), tx, diff --git a/src/cargo/core/compiler/timings.rs b/src/cargo/core/compiler/timings.rs index 76987f85e..9bc5474cf 100644 --- a/src/cargo/core/compiler/timings.rs +++ b/src/cargo/core/compiler/timings.rs @@ -3,6 +3,7 @@ //! This module implements some simple tracking information for timing of how //! long it takes for different units to compile. use super::{CompileMode, Unit}; +use crate::core::compiler::BuildContext; use crate::core::PackageId; use crate::util::machine_message::{self, Message}; use crate::util::{CargoResult, Config}; @@ -24,6 +25,8 @@ pub struct Timings<'a, 'cfg> { start: Instant, /// A rendered string of when compilation started. start_str: String, + /// Some information to display about rustc. + rustc_info: String, /// A summary of the root units. /// /// Tuples of `(package_description, target_descrptions)`. @@ -76,11 +79,7 @@ struct Concurrency { } impl<'a, 'cfg> Timings<'a, 'cfg> { - pub fn new( - config: &'cfg Config, - root_units: &[Unit<'_>], - profile: String, - ) -> Timings<'a, 'cfg> { + pub fn new(bcx: &BuildContext<'a, 'cfg>, root_units: &[Unit<'_>]) -> Timings<'a, 'cfg> { let mut root_map: HashMap> = HashMap::new(); for unit in root_units { let target_desc = unit.target.description_named(); @@ -98,20 +97,28 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { .collect(); let start_str = format!("{}", humantime::format_rfc3339_seconds(SystemTime::now())); let has_report = |what| { - config + bcx.config .cli_unstable() .timings .as_ref() .map_or(false, |t| t.iter().any(|opt| opt == what)) }; + let rustc_info = render_rustc_info(bcx); + let profile = if bcx.build_config.release { + "release" + } else { + "dev" + } + .to_string(); Timings { - config, + config: bcx.config, report_html: has_report("html"), report_info: has_report("info"), report_json: has_report("json"), - start: config.creation_time(), + start: bcx.config.creation_time(), start_str, + rustc_info, root_targets, profile, total_fresh: 0, @@ -270,30 +277,27 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { Profile:{} - - Fresh units: - {} - - - Dirty units: - {} - - - Total units: - {} - - - Max concurrency: - {} - - - Build start: - {} - - - Total time: - {} - + + Fresh units:{} + + + Dirty units:{} + + + Total units:{} + + + Max concurrency:{} + + + Build start:{} + + + Total time:{} + + + rustc:{} + "#, @@ -305,6 +309,7 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { max_concurrency, self.start_str, total_time, + self.rustc_info, )?; Ok(()) } @@ -674,6 +679,24 @@ fn draw_graph_axes(f: &mut File, graph_height: u32, duration: u32) -> CargoResul Ok(graph_width) } +fn render_rustc_info(bcx: &BuildContext<'_, '_>) -> String { + let version = bcx + .rustc + .verbose_version + .lines() + .next() + .expect("rustc version"); + let requested_target = bcx + .build_config + .requested_target + .as_ref() + .map_or("Host", String::as_str); + format!( + "{}
Host: {}
Target: {}", + version, bcx.rustc.host, requested_target + ) +} + static HTML_TMPL: &str = r#" @@ -801,6 +824,7 @@ h1 { } .summary-table td:first-child { + vertical-align: top; text-align: right; } From 674150e47333ed33ab74aa8ee5fa91aa75bbeb6c Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Fri, 13 Sep 2019 09:20:23 -0700 Subject: [PATCH 03/15] Move hardlink_or_copy to a common location so it can be reused. --- src/cargo/core/compiler/mod.rs | 55 ++----------------------------- src/cargo/util/paths.rs | 60 ++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 52 deletions(-) diff --git a/src/cargo/core/compiler/mod.rs b/src/cargo/core/compiler/mod.rs index cd0330913..52cdcb822 100644 --- a/src/cargo/core/compiler/mod.rs +++ b/src/cargo/core/compiler/mod.rs @@ -19,13 +19,12 @@ use std::env; use std::ffi::{OsStr, OsString}; use std::fs::{self, File}; use std::io::Write; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use std::sync::Arc; use failure::Error; use lazycell::LazyCell; use log::debug; -use same_file::is_same_file; use serde::Serialize; pub use self::build_config::{BuildConfig, CompileMode, MessageFormat}; @@ -443,12 +442,12 @@ fn link_targets<'a, 'cfg>( } }; destinations.push(dst.clone()); - hardlink_or_copy(src, dst)?; + paths::link_or_copy(src, dst)?; if let Some(ref path) = output.export_path { let export_dir = export_dir.as_ref().unwrap(); paths::create_dir_all(export_dir)?; - hardlink_or_copy(src, path)?; + paths::link_or_copy(src, path)?; } } @@ -477,54 +476,6 @@ fn link_targets<'a, 'cfg>( })) } -/// Hardlink (file) or symlink (dir) src to dst if possible, otherwise copy it. -fn hardlink_or_copy(src: &Path, dst: &Path) -> CargoResult<()> { - debug!("linking {} to {}", src.display(), dst.display()); - if is_same_file(src, dst).unwrap_or(false) { - return Ok(()); - } - - // NB: we can't use dst.exists(), as if dst is a broken symlink, - // dst.exists() will return false. This is problematic, as we still need to - // unlink dst in this case. symlink_metadata(dst).is_ok() will tell us - // whether dst exists *without* following symlinks, which is what we want. - if fs::symlink_metadata(dst).is_ok() { - paths::remove_file(&dst)?; - } - - let link_result = if src.is_dir() { - #[cfg(target_os = "redox")] - use std::os::redox::fs::symlink; - #[cfg(unix)] - use std::os::unix::fs::symlink; - #[cfg(windows)] - use std::os::windows::fs::symlink_dir as symlink; - - let dst_dir = dst.parent().unwrap(); - let src = if src.starts_with(dst_dir) { - src.strip_prefix(dst_dir).unwrap() - } else { - src - }; - symlink(src, dst) - } else { - fs::hard_link(src, dst) - }; - link_result - .or_else(|err| { - debug!("link failed {}. falling back to fs::copy", err); - fs::copy(src, dst).map(|_| ()) - }) - .chain_err(|| { - format!( - "failed to link or copy `{}` to `{}`", - src.display(), - dst.display() - ) - })?; - Ok(()) -} - // For all plugin dependencies, add their -L paths (now calculated and present // in `build_script_outputs`) to the dynamic library load path for the command // to execute. diff --git a/src/cargo/util/paths.rs b/src/cargo/util/paths.rs index 1e400f130..b2327e325 100644 --- a/src/cargo/util/paths.rs +++ b/src/cargo/util/paths.rs @@ -340,3 +340,63 @@ fn set_not_readonly(p: &Path) -> io::Result { fs::set_permissions(p, perms)?; Ok(true) } + +/// Hardlink (file) or symlink (dir) src to dst if possible, otherwise copy it. +/// +/// If the destination already exists, it is removed before linking. +pub fn link_or_copy(src: impl AsRef, dst: impl AsRef) -> CargoResult<()> { + let src = src.as_ref(); + let dst = dst.as_ref(); + _link_or_copy(src, dst) +} + +fn _link_or_copy(src: &Path, dst: &Path) -> CargoResult<()> { + log::debug!("linking {} to {}", src.display(), dst.display()); + if same_file::is_same_file(src, dst).unwrap_or(false) { + return Ok(()); + } + + // NB: we can't use dst.exists(), as if dst is a broken symlink, + // dst.exists() will return false. This is problematic, as we still need to + // unlink dst in this case. symlink_metadata(dst).is_ok() will tell us + // whether dst exists *without* following symlinks, which is what we want. + if fs::symlink_metadata(dst).is_ok() { + remove_file(&dst)?; + } + + let link_result = if src.is_dir() { + #[cfg(target_os = "redox")] + use std::os::redox::fs::symlink; + #[cfg(unix)] + use std::os::unix::fs::symlink; + #[cfg(windows)] + // FIXME: This should probably panic or have a copy fallback. Symlinks + // are not supported in all windows environments. Currently symlinking + // is only used for .dSYM directories on macos, but this shouldn't be + // accidentally relied upon. + use std::os::windows::fs::symlink_dir as symlink; + + let dst_dir = dst.parent().unwrap(); + let src = if src.starts_with(dst_dir) { + src.strip_prefix(dst_dir).unwrap() + } else { + src + }; + symlink(src, dst) + } else { + fs::hard_link(src, dst) + }; + link_result + .or_else(|err| { + log::debug!("link failed {}. falling back to fs::copy", err); + fs::copy(src, dst).map(|_| ()) + }) + .chain_err(|| { + format!( + "failed to link or copy `{}` to `{}`", + src.display(), + dst.display() + ) + })?; + Ok(()) +} From 6f353b5934e7574cd0fd9d079cca737c9da59553 Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Fri, 13 Sep 2019 09:20:51 -0700 Subject: [PATCH 04/15] Make the cargo-timing.html filename unique per run. --- src/cargo/core/compiler/timings.rs | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/cargo/core/compiler/timings.rs b/src/cargo/core/compiler/timings.rs index 9bc5474cf..499de4b5c 100644 --- a/src/cargo/core/compiler/timings.rs +++ b/src/cargo/core/compiler/timings.rs @@ -6,7 +6,7 @@ use super::{CompileMode, Unit}; use crate::core::compiler::BuildContext; use crate::core::PackageId; use crate::util::machine_message::{self, Message}; -use crate::util::{CargoResult, Config}; +use crate::util::{paths, CargoResult, Config}; use std::cmp::max; use std::collections::HashMap; use std::fs::File; @@ -237,7 +237,9 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { /// Save HTML report to disk. pub fn report_html(&self) -> CargoResult<()> { let duration = self.start.elapsed().as_secs() as u32 + 1; - let mut f = File::create("cargo-timing.html")?; + let timestamp = self.start_str.replace(&['-', ':'][..], ""); + let filename = format!("cargo-timing-{}.html", timestamp); + let mut f = File::create(&filename)?; let roots: Vec<&str> = self .root_targets .iter() @@ -249,6 +251,18 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { self.fmt_timing_graph(&mut f, graph_width, duration)?; self.fmt_unit_table(&mut f)?; f.write_all(HTML_TMPL_FOOT.as_bytes())?; + drop(f); + let msg = format!( + "report saved to {}", + std::env::current_dir() + .unwrap_or_default() + .join(&filename) + .display() + ); + paths::link_or_copy(&filename, "cargo-timing.html")?; + self.config + .shell() + .status_with_color("Timing", msg, termcolor::Color::Cyan)?; Ok(()) } From 06ed7a4ade384bc38823b47034a95c46782f646b Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Fri, 13 Sep 2019 11:32:15 -0700 Subject: [PATCH 05/15] Add some asserts. --- src/cargo/core/compiler/timings.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/cargo/core/compiler/timings.rs b/src/cargo/core/compiler/timings.rs index 499de4b5c..cfe07a739 100644 --- a/src/cargo/core/compiler/timings.rs +++ b/src/cargo/core/compiler/timings.rs @@ -162,18 +162,24 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { /// Mark that the `.rmeta` file as generated. pub fn unit_rmeta_finished(&mut self, id: u32, unlocked: Vec<&Unit<'a>>) { + // `id` may not always be active. "fresh" units unconditionally + // generate `Message::Finish`, but this active map only tracks dirty + // units. if let Some(unit_time) = self.active.get_mut(&id) { let t = d_as_f64(self.start.elapsed()); unit_time.rmeta_time = Some(t - unit_time.start); + assert!(unit_time.unlocked_rmeta_units.is_empty()); unit_time.unlocked_rmeta_units.extend(unlocked); } } /// Mark that a unit has finished running. pub fn unit_finished(&mut self, id: u32, unlocked: Vec<&Unit<'a>>) { + // See note above in `unit_rmeta_finished`, this may not always be active. if let Some(mut unit_time) = self.active.remove(&id) { let t = d_as_f64(self.start.elapsed()); unit_time.duration = t - unit_time.start; + assert!(unit_time.unlocked_units.is_empty()); unit_time.unlocked_units.extend(unlocked); if self.report_info { let msg = format!( From da0706165c1c8b2f59908fb4c1df0036411b616b Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Fri, 13 Sep 2019 11:33:43 -0700 Subject: [PATCH 06/15] Remove format! --- src/cargo/core/compiler/timings.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cargo/core/compiler/timings.rs b/src/cargo/core/compiler/timings.rs index cfe07a739..892ba9bba 100644 --- a/src/cargo/core/compiler/timings.rs +++ b/src/cargo/core/compiler/timings.rs @@ -95,7 +95,7 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { (pkg_desc, targets) }) .collect(); - let start_str = format!("{}", humantime::format_rfc3339_seconds(SystemTime::now())); + let start_str = humantime::format_rfc3339_seconds(SystemTime::now()).to_string(); let has_report = |what| { bcx.config .cli_unstable() From 8bfae2d999d29c49e4984d5ad39ea9f1b312a2f4 Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Fri, 13 Sep 2019 14:42:16 -0700 Subject: [PATCH 07/15] Make timings optional. --- src/cargo/core/compiler/job_queue.rs | 36 +++++++++++++++++++--------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/src/cargo/core/compiler/job_queue.rs b/src/cargo/core/compiler/job_queue.rs index 468a76b13..0c8ec7be0 100644 --- a/src/cargo/core/compiler/job_queue.rs +++ b/src/cargo/core/compiler/job_queue.rs @@ -40,7 +40,7 @@ pub struct JobQueue<'a, 'cfg> { is_release: bool, progress: Progress<'cfg>, next_id: u32, - timings: Timings<'a, 'cfg>, + timings: Option>, } pub struct JobState<'a> { @@ -133,7 +133,10 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { pub fn new(bcx: &BuildContext<'a, 'cfg>, root_units: &[Unit<'a>]) -> JobQueue<'a, 'cfg> { let (tx, rx) = channel(); let progress = Progress::with_style("Building", ProgressStyle::Ratio, bcx.config); - let timings = Timings::new(bcx, root_units); + let timings = match bcx.config.cli_unstable().timings { + Some(..) => Some(Timings::new(bcx, root_units)), + None => None, + }; JobQueue { queue: DependencyQueue::new(), tx, @@ -320,8 +323,9 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { // to the jobserver itself. tokens.truncate(self.active.len() - 1); - self.timings - .mark_concurrency(self.active.len(), queue.len(), self.queue.len()); + if let Some(t) = &mut self.timings { + t.mark_concurrency(self.active.len(), queue.len(), self.queue.len()); + } // Drain all events at once to avoid displaying the progress bar // unnecessarily. @@ -340,7 +344,9 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { .config .shell() .verbose(|c| c.status("Running", &cmd))?; - self.timings.unit_start(id, self.active[&id]); + if let Some(t) = &mut self.timings { + t.unit_start(id, self.active[&id]); + } } Message::BuildPlanMsg(module_name, cmd, filenames) => { plan.update(&module_name, &cmd, &filenames)?; @@ -432,7 +438,9 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { if !cx.bcx.build_config.build_plan { cx.bcx.config.shell().status("Finished", message)?; } - self.timings.finished()?; + if let Some(t) = &mut self.timings { + t.finished()?; + } Ok(()) } else { debug!("queue: {:#?}", self.queue); @@ -528,11 +536,15 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { match fresh { Freshness::Fresh => { - self.timings.add_fresh(); + if let Some(t) = &mut self.timings { + t.add_fresh(); + } doit() } Freshness::Dirty => { - self.timings.add_dirty(); + if let Some(t) = &mut self.timings { + t.add_dirty(); + } scope.spawn(move |_| doit()); } } @@ -579,9 +591,11 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { self.emit_warnings(None, unit, cx)?; } let unlocked = self.queue.finish(unit, &artifact); - match artifact { - Artifact::All => self.timings.unit_finished(id, unlocked), - Artifact::Metadata => self.timings.unit_rmeta_finished(id, unlocked), + if let Some(t) = &mut self.timings { + match artifact { + Artifact::All => t.unit_finished(id, unlocked), + Artifact::Metadata => t.unit_rmeta_finished(id, unlocked), + } } Ok(()) } From 911a9b090483ea0b4007ff65d316c54339160369 Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Sat, 14 Sep 2019 16:18:58 -0700 Subject: [PATCH 08/15] Always emit rmeta when timings are enable to visualize codegen everywhere. --- src/cargo/core/compiler/job_queue.rs | 1 - src/cargo/core/compiler/mod.rs | 8 +++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/cargo/core/compiler/job_queue.rs b/src/cargo/core/compiler/job_queue.rs index 0c8ec7be0..f25630e2d 100644 --- a/src/cargo/core/compiler/job_queue.rs +++ b/src/cargo/core/compiler/job_queue.rs @@ -121,7 +121,6 @@ impl<'a> JobState<'a> { /// This should only be called once because a metadata file can only be /// produced once! pub fn rmeta_produced(&self) { - assert!(self.rmeta_required.get()); self.rmeta_required.set(false); let _ = self .tx diff --git a/src/cargo/core/compiler/mod.rs b/src/cargo/core/compiler/mod.rs index 52cdcb822..447cd4332 100644 --- a/src/cargo/core/compiler/mod.rs +++ b/src/cargo/core/compiler/mod.rs @@ -698,10 +698,11 @@ fn add_error_format_and_color( // prettily, and then when parsing JSON messages from rustc we need to // internally understand that we should extract the `rendered` field and // present it if we can. - if cx.bcx.build_config.cache_messages() || pipelined { + let wants_artifacts = pipelined || cx.bcx.config.cli_unstable().timings.is_some(); + if cx.bcx.build_config.cache_messages() || wants_artifacts { cmd.arg("--error-format=json"); let mut json = String::from("--json=diagnostic-rendered-ansi"); - if pipelined { + if wants_artifacts { json.push_str(",artifacts"); } match cx.bcx.build_config.message_format { @@ -1075,7 +1076,8 @@ struct OutputOptions { impl OutputOptions { fn new<'a>(cx: &Context<'a, '_>, unit: &Unit<'a>) -> OutputOptions { - let look_for_metadata_directive = cx.rmeta_required(unit); + let look_for_metadata_directive = + cx.rmeta_required(unit) || cx.bcx.config.cli_unstable().timings.is_some(); let color = cx.bcx.config.shell().supports_color(); let cache_cell = if cx.bcx.build_config.cache_messages() { let path = cx.files().message_cache_path(unit); From 095f154f3753a1e9c724ec6b6d0abc6ce4ef2dc6 Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Sat, 14 Sep 2019 16:27:41 -0700 Subject: [PATCH 09/15] Give build scripts a different color. --- src/cargo/core/compiler/timings.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/cargo/core/compiler/timings.rs b/src/cargo/core/compiler/timings.rs index 892ba9bba..b44181936 100644 --- a/src/cargo/core/compiler/timings.rs +++ b/src/cargo/core/compiler/timings.rs @@ -390,11 +390,16 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { let width = (PX_PER_SEC * unit.duration).max(1.0); let dep_class = format!("dep-{}", i); + let class = if unit.unit.mode.is_run_custom_build() { + "unit-block-custom" + } else { + "unit-block" + }; writeln!( f, " ", - x, y, width, BOX_HEIGHT, dep_class, + rx=\"3\" class=\"{}\" data-dep-class=\"{}\" />", + x, y, width, BOX_HEIGHT, class, dep_class, )?; let draw_dep_lines = |f: &mut File, x, units| -> CargoResult<()> { for unlocked in units { @@ -743,6 +748,10 @@ h1 { fill: #95cce8; } +.unit-block-custom { + fill: #f0b165; +} + .unit-block-codegen { fill: #aa95e8; pointer-events: none; From 77a47b3e3282c549192e96e71c4f535baccb9e0b Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Mon, 16 Sep 2019 15:19:10 -0700 Subject: [PATCH 10/15] Switch rendering to canvas. Also add some more features. --- src/cargo/core/compiler/timings.js | 437 +++++++++++++++++++++++++ src/cargo/core/compiler/timings.rs | 499 +++++++---------------------- 2 files changed, 550 insertions(+), 386 deletions(-) create mode 100644 src/cargo/core/compiler/timings.js diff --git a/src/cargo/core/compiler/timings.js b/src/cargo/core/compiler/timings.js new file mode 100644 index 000000000..e8c1b0794 --- /dev/null +++ b/src/cargo/core/compiler/timings.js @@ -0,0 +1,437 @@ +// Position of the vertical axis. +const X_LINE = 50; +// General-use margin size. +const MARGIN = 5; +// Position of the horizontal axis, relative to the bottom. +const Y_LINE = 35; +// Minimum distance between time tick labels. +const MIN_TICK_DIST = 50; +// Radius for rounded rectangle corners. +const RADIUS = 3; +// Height of unit boxes. +const BOX_HEIGHT = 25; +// Distance between Y tick marks on the unit graph. +const Y_TICK_DIST = BOX_HEIGHT + 2; +// Rects used for mouseover detection. +// Objects of {x, y, x2, y2, i} where `i` is the index into UNIT_DATA. +let HIT_BOXES = []; +// Index into UNIT_DATA of the last unit hovered over by mouse. +let LAST_HOVER = null; +// Key is unit index, value is {x, y, width, rmeta_x} of the box. +let UNIT_COORDS = {}; +// Map of unit index to the index it was unlocked by. +let REVERSE_UNIT_DEPS = {}; +let REVERSE_UNIT_RMETA_DEPS = {}; +for (let n=0; n unit.duration >= min_time); + + const graph_height = Y_TICK_DIST * units.length; + const {ctx, width, height} = draw_graph_axes('pipeline-graph', graph_height); + const container = document.getElementById('pipeline-container'); + container.style.width = width; + container.style.height = height; + const PX_PER_SEC = document.getElementById('scale').valueAsNumber; + + // Canvas for hover highlights. This is a separate layer to improve performance. + const linectx = setup_canvas('pipeline-graph-lines', width, height); + linectx.clearRect(0, 0, width, height); + + // Draw Y tick marks. + for (let n=1; n= box.x && event.offsetX <= box.x2 && + event.offsetY >= box.y && event.offsetY <= box.y2) { + return box; + } + } +} + +function pipeline_mousemove(event) { + // Highlight dependency lines on mouse hover. + let box = pipeline_mouse_hit(event); + if (box) { + if (box.i != LAST_HOVER) { + LAST_HOVER = box.i; + let g = document.getElementById('pipeline-graph-lines'); + let ctx = g.getContext('2d'); + ctx.clearRect(0, 0, g.width, g.height); + ctx.save(); + ctx.translate(X_LINE, MARGIN); + ctx.lineWidth = 2; + draw_dep_lines(ctx, box.i, true); + + if (box.i in REVERSE_UNIT_DEPS) { + const dep_unit = REVERSE_UNIT_DEPS[box.i]; + if (dep_unit in UNIT_COORDS) { + const {x, y, rmeta_x} = UNIT_COORDS[dep_unit]; + draw_one_dep_line(ctx, x, y, box.i, true); + } + } + if (box.i in REVERSE_UNIT_RMETA_DEPS) { + const dep_unit = REVERSE_UNIT_RMETA_DEPS[box.i]; + if (dep_unit in UNIT_COORDS) { + const {x, y, rmeta_x} = UNIT_COORDS[dep_unit]; + draw_one_dep_line(ctx, rmeta_x, y, box.i, true); + } + } + ctx.restore(); + } + } +} + +render_pipeline_graph(); +render_timing_graph(); + +// Set up and handle controls. +{ + const range = document.getElementById('min-unit-time'); + const time_output = document.getElementById('min-unit-time-output'); + time_output.innerHTML = `${range.value}s`; + range.oninput = event => { + time_output.innerHTML = `${range.value}s`; + render_pipeline_graph(); + }; + + const scale = document.getElementById('scale'); + const scale_output = document.getElementById('scale-output'); + scale_output.innerHTML = `${scale.value}`; + scale.oninput = event => { + scale_output.innerHTML = `${scale.value}`; + render_pipeline_graph(); + render_timing_graph(); + }; +} diff --git a/src/cargo/core/compiler/timings.rs b/src/cargo/core/compiler/timings.rs index b44181936..094c4cd53 100644 --- a/src/cargo/core/compiler/timings.rs +++ b/src/cargo/core/compiler/timings.rs @@ -7,10 +7,9 @@ use crate::core::compiler::BuildContext; use crate::core::PackageId; use crate::util::machine_message::{self, Message}; use crate::util::{paths, CargoResult, Config}; -use std::cmp::max; use std::collections::HashMap; use std::fs::File; -use std::io::Write; +use std::io::{BufWriter, Write}; use std::time::{Duration, Instant, SystemTime}; pub struct Timings<'a, 'cfg> { @@ -66,6 +65,7 @@ struct UnitTime<'a> { } /// Periodic concurrency tracking information. +#[derive(serde::Serialize)] struct Concurrency { /// Time as an offset in seconds from `Timings::start`. t: f64, @@ -245,18 +245,32 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { let duration = self.start.elapsed().as_secs() as u32 + 1; let timestamp = self.start_str.replace(&['-', ':'][..], ""); let filename = format!("cargo-timing-{}.html", timestamp); - let mut f = File::create(&filename)?; + let mut f = BufWriter::new(File::create(&filename)?); let roots: Vec<&str> = self .root_targets .iter() .map(|(name, _targets)| name.as_str()) .collect(); f.write_all(HTML_TMPL.replace("{ROOTS}", &roots.join(", ")).as_bytes())?; - self.fmt_summary_table(&mut f, duration)?; - let graph_width = self.fmt_pipeline_graph(&mut f, duration)?; - self.fmt_timing_graph(&mut f, graph_width, duration)?; - self.fmt_unit_table(&mut f)?; - f.write_all(HTML_TMPL_FOOT.as_bytes())?; + self.write_summary_table(&mut f, duration)?; + f.write_all(HTML_CANVAS.as_bytes())?; + self.write_unit_table(&mut f)?; + writeln!( + f, + "\n\ + \n\ + \n\ + ", + include_str!("timings.js") + )?; drop(f); let msg = format!( "report saved to {}", @@ -273,7 +287,7 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { } /// Render the summary table. - fn fmt_summary_table(&self, f: &mut File, duration: u32) -> CargoResult<()> { + fn write_summary_table(&self, f: &mut impl Write, duration: u32) -> CargoResult<()> { let targets: Vec = self .root_targets .iter() @@ -334,222 +348,78 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { Ok(()) } - /// Render the box graph of the units over time. - fn fmt_pipeline_graph(&self, f: &mut File, duration: u32) -> CargoResult { - if self.unit_times.is_empty() { - return Ok(0); - } - const BOX_HEIGHT: u32 = 25; - const Y_TICK_DIST: u32 = BOX_HEIGHT + 2; - - let graph_height = Y_TICK_DIST * self.unit_times.len() as u32; - - let graph_width = draw_graph_axes(f, graph_height, duration)?; - - // Draw Y tick marks. - write!(f, "")?; - - // Draw Y labels. - for n in 0..self.unit_times.len() as u32 { - let y = MARGIN + (Y_TICK_DIST * (n + 1)) - 13; - writeln!( - f, - r#"{}"#, - X_LINE - 4, - y, - n + 1 - )?; - } - - // Draw the graph. - writeln!( - f, - r#""#, - X_LINE, MARGIN, graph_width, graph_height - )?; - - // Create a map that will be used for drawing dependency lines. - let unit_map: HashMap<&Unit<'_>, (f64, u32)> = self + fn write_js_data(&self, f: &mut impl Write) -> CargoResult<()> { + // Create a map to link indices of unlocked units. + let unit_map: HashMap, usize> = self .unit_times .iter() .enumerate() - .map(|(i, unit)| { - let y = i as u32 * Y_TICK_DIST + 1; - let x = PX_PER_SEC * unit.start; - (&unit.unit, (x, y)) + .map(|(i, ut)| (ut.unit, i)) + .collect(); + #[derive(serde::Serialize)] + struct UnitData { + i: usize, + name: String, + version: String, + mode: String, + target: String, + start: f64, + duration: f64, + rmeta_time: Option, + unlocked_units: Vec, + unlocked_rmeta_units: Vec, + } + let round = |x: f64| (x * 100.0).round() / 100.0; + let unit_data: Vec = self + .unit_times + .iter() + .enumerate() + .map(|(i, ut)| { + let mode = if ut.unit.mode.is_run_custom_build() { + "run-custom-build" + } else { + "todo" + } + .to_string(); + let unlocked_units: Vec = ut + .unlocked_units + .iter() + .map(|unit| unit_map[unit]) + .collect(); + let unlocked_rmeta_units: Vec = ut + .unlocked_rmeta_units + .iter() + .map(|unit| unit_map[unit]) + .collect(); + UnitData { + i, + name: ut.unit.pkg.name().to_string(), + version: ut.unit.pkg.version().to_string(), + mode, + target: ut.target.clone(), + start: round(ut.start), + duration: round(ut.duration), + rmeta_time: ut.rmeta_time.map(|t| round(t)), + unlocked_units, + unlocked_rmeta_units, + } }) .collect(); - - for (i, unit) in self.unit_times.iter().enumerate() { - let (x, y) = unit_map[&unit.unit]; - let width = (PX_PER_SEC * unit.duration).max(1.0); - - let dep_class = format!("dep-{}", i); - let class = if unit.unit.mode.is_run_custom_build() { - "unit-block-custom" - } else { - "unit-block" - }; - writeln!( - f, - " ", - x, y, width, BOX_HEIGHT, class, dep_class, - )?; - let draw_dep_lines = |f: &mut File, x, units| -> CargoResult<()> { - for unlocked in units { - let (u_x, u_y) = unit_map[&unlocked]; - writeln!( - f, - " ", - dep_class, - x, - y + BOX_HEIGHT / 2, - u_y - y, - u_x - x + 5.0, - )?; - } - Ok(()) - }; - if let Some((rmeta_time, ctime, _cent)) = unit.codegen_time() { - let rmeta_x = x + PX_PER_SEC * rmeta_time; - writeln!( - f, - " ", - rmeta_x, - y, - PX_PER_SEC * ctime, - BOX_HEIGHT, - )?; - draw_dep_lines(f, rmeta_x, &unit.unlocked_rmeta_units)?; - } - writeln!( - f, - " {}{} {:.1}s", - x + 5.0, - y + BOX_HEIGHT / 2 - 5, - unit.unit.pkg.name(), - unit.target, - unit.duration - )?; - draw_dep_lines(f, x + width, &unit.unlocked_units)?; - } - writeln!(f, r#""#)?; - writeln!(f, r#""#)?; - Ok(graph_width) - } - - /// Render the line chart of concurrency information. - fn fmt_timing_graph(&self, f: &mut File, graph_width: u32, duration: u32) -> CargoResult<()> { - if graph_width == 0 || self.concurrency.is_empty() { - return Ok(()); - } - const HEIGHT: u32 = 400; - const AXIS_HEIGHT: u32 = HEIGHT - MARGIN - Y_LINE; - const TOP_MARGIN: u32 = 10; - const GRAPH_HEIGHT: u32 = AXIS_HEIGHT - TOP_MARGIN; - - draw_graph_axes(f, AXIS_HEIGHT, duration)?; - - // Draw Y tick marks and labels. - write!(f, "{}\n", - X_LINE - 10, - y + 5, - (n + 1) * step - )); - } - writeln!(f, "\"/>")?; - f.write_all(labels.as_bytes())?; - - // Label the Y axis. - let label_y = (HEIGHT - Y_LINE) / 2; writeln!( f, - "# Units", - label_y, label_y + "const UNIT_DATA = {};", + serde_json::to_string_pretty(&unit_data)? )?; - - // Draw the graph. writeln!( f, - r#""#, - X_LINE, - MARGIN, - graph_width, - GRAPH_HEIGHT + TOP_MARGIN + "const CONCURRENCY_DATA = {};", + serde_json::to_string_pretty(&self.concurrency)? )?; - - let coord = |t, v| { - ( - f64::from(graph_width) * (t / f64::from(duration)), - f64::from(TOP_MARGIN) + f64::from(GRAPH_HEIGHT) * (1.0 - (v as f64 / max_v as f64)), - ) - }; - let mut draw_line = |class, key: fn(&Concurrency) -> usize| { - write!(f, "", class) - }; - - draw_line("line-inactive", |c| c.inactive)?; - draw_line("line-waiting", |c| c.waiting)?; - draw_line("line-active", |c| c.active)?; - - // Draw a legend. - write!( - f, - r#" - - - - Waiting - - - Active - - - Inactive - -"#, - graph_width - 120 - )?; - - writeln!(f, "")?; - writeln!(f, "")?; Ok(()) } /// Render the table of all units. - fn fmt_unit_table(&self, f: &mut File) -> CargoResult<()> { + fn write_unit_table(&self, f: &mut impl Write) -> CargoResult<()> { write!( f, r#" @@ -560,6 +430,7 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { Unit Total Codegen + Features @@ -572,6 +443,7 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { None => "".to_string(), Some((_rt, ctime, cent)) => format!("{:.1}s ({:.0}%)", ctime, cent), }; + let features = unit.unit.features.join(", "); write!( f, r#" @@ -580,13 +452,15 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { {}{} {:.1}s {} + {} "#, i + 1, unit.name_ver(), unit.target, unit.duration, - codegen + codegen, + features, )?; } write!(f, "\n\n")?; @@ -614,96 +488,6 @@ fn d_as_f64(d: Duration) -> f64 { (d.as_secs() as f64) + f64::from(d.subsec_nanos()) / 1_000_000_000.0 } -fn round_up(n: u32, step: u32) -> u32 { - if n % step == 0 { - n - } else { - (step - n % step) + n - } -} - -/// Determine the `(step, max_value)` of the number of ticks along an axis. -fn split_ticks(n: u32, max_ticks: u32) -> (u32, u32) { - if n <= max_ticks { - (1, n) - } else if n <= max_ticks * 2 { - (2, round_up(n, 2)) - } else if n <= max_ticks * 4 { - (4, round_up(n, 4)) - } else if n <= max_ticks * 5 { - (5, round_up(n, 5)) - } else { - let mut step = 10; - loop { - let top = round_up(n, step); - if top <= max_ticks * step { - break (step, top); - } - step += 10; - } - } -} - -const X_LINE: u32 = 50; -const MARGIN: u32 = 5; -const Y_LINE: u32 = 35; // relative to bottom -const PX_PER_SEC: f64 = 20.0; -const MIN_TICK_DIST: u32 = 50; - -fn draw_graph_axes(f: &mut File, graph_height: u32, duration: u32) -> CargoResult { - let graph_width = PX_PER_SEC as u32 * duration; - let width = graph_width + X_LINE + 30; - let height = graph_height + MARGIN + Y_LINE; - writeln!( - f, - r#""#, - width, height - )?; - - // Draw axes. - write!( - f, - "{}s\n", - x, - height - Y_LINE + 20, - (n + 1) * step - )); - } - - writeln!(f, "\" />")?; - f.write_all(labels.as_bytes())?; - - // Draw vertical lines. - write!(f, "")?; - - Ok(graph_width) -} - fn render_rustc_info(bcx: &BuildContext<'_, '_>) -> String { let version = bcx .rustc @@ -731,58 +515,21 @@ static HTML_TMPL: &str = r#" html { font-family: sans-serif; } -svg { + +.canvas-container { + position: relative; margin-top: 5px; margin-bottom: 5px; - background: #f7f7f7; } + h1 { border-bottom: 1px solid #c0c0c0; } -.unit-label { - font-size: 12px; - dominant-baseline: hanging; -} - -.unit-block { - fill: #95cce8; -} - -.unit-block-custom { - fill: #f0b165; -} - -.unit-block-codegen { - fill: #aa95e8; - pointer-events: none; -} .graph { display: block; } -.graph-label-v { - text-anchor: end; - fill: #303030; -} - -.graph-label-h { - text-anchor: middle; - fill: #303030; -} - -.graph-axes { - stroke-width: 2; - fill: none; - stroke: black; -} - -.vert-line { - fill: none; - stroke: #c0c0c0; - stroke-dasharray: 2,4 -} - .my-table { margin-top: 20px; margin-bottom: 20px; @@ -857,34 +604,8 @@ h1 { text-align: right; } -.line-waiting { - fill: none; - stroke: red; - stroke-width: 2px; -} - -.line-active { - fill: none; - stroke: green; - stroke-width: 2px; -} - -.line-inactive { - fill: none; - stroke: blue; - stroke-width: 2px; -} - -.dep-line { - fill: none; - stroke: #ddd; - stroke-dasharray: 2; -} - -.dep-line-highlight { - stroke: #3e3e3e; - stroke-width: 2; - stroke-dasharray: 4; +.input-table td { + text-align: center; } @@ -894,21 +615,27 @@ h1 {

Cargo Build Timings

"#; -static HTML_TMPL_FOOT: &str = r#" - - - +
+ + +
+
+ +
"#; From 0df0595b1047f080cc82ad5b61f8981ffabd0a82 Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Tue, 17 Sep 2019 10:53:28 -0700 Subject: [PATCH 11/15] Update docs. --- src/doc/src/reference/unstable.md | 42 ++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/src/doc/src/reference/unstable.md b/src/doc/src/reference/unstable.md index 18a765c22..60a54be7c 100644 --- a/src/doc/src/reference/unstable.md +++ b/src/doc/src/reference/unstable.md @@ -350,9 +350,49 @@ The `-Ztimings` flag can optionally take a comma-separated list of the following values: - `html` — Saves a file called `cargo-timing.html` to the current directory - with a report of the compilation. + with a report of the compilation. Files are also saved with a timestamp in + the filename if you want to look at older runs. - `info` — Displays a message to stdout after each compilation finishes with how long it took. - `json` — Emits some JSON information about timing information. The default if none are specified is `html,info`. + +#### Reading the graphs + +There are two graphs in the output. The "unit" graph shows the duration of +each unit over time. A "unit" is a single compiler invocation. There are lines +that show which additional units are "unlocked" when a unit finishes. That is, +it shows the new units that are now allowed to run because their dependencies +are all finished. Hover the mouse over a unit to highlight the lines. This can +help visualize the critical path of dependencies. This may change between runs +because the units may finish in different orders. + +The "codegen" times are highlighted in a lavender color. In some cases, build +pipelining allows units to start when their dependencies are performing code +generation. This information is not always displayed (for example, binary +units do not show when code generation starts). + +The "custom build" units are `build.rs` scripts, which when run are +highlighted in orange. + +The second graph shows Cargo's concurrency over time. The three lines are: +- "Waiting" (red) — This is the number of units waiting for a CPU slot to + open. +- "Inactive" (blue) — This is the number of units that are waiting for their + dependencies to finish. +- "Active" (green) — This is the number of units currently running. + +Note: This does not show the concurrency in the compiler itself. `rustc` +coordinates with Cargo via the "job server" to stay within the concurrency +limit. This currently mostly applies to the code generation phase. + +Tips for addressing compile times: +- Look for slow dependencies. + - Check if they have features that you may wish to consider disabling. + - Consider trying to remove the dependency completely. +- Look for a crate being built multiple times with different versions. Try to + remove the older versions from the dependency graph. +- Split large crates into smaller pieces. +- If there are a large number of crates bottlenecked on a single crate, focus + your attention on improving that one crate to improve parallelism. From 6c6aa975348127e3deea2c074a2989626317fbf3 Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Tue, 17 Sep 2019 12:48:26 -0700 Subject: [PATCH 12/15] Remove Option from Timings. --- src/cargo/core/compiler/job_queue.rs | 36 +++++++--------------- src/cargo/core/compiler/timings.rs | 45 +++++++++++++++++++++------- 2 files changed, 45 insertions(+), 36 deletions(-) diff --git a/src/cargo/core/compiler/job_queue.rs b/src/cargo/core/compiler/job_queue.rs index f25630e2d..b3021a66d 100644 --- a/src/cargo/core/compiler/job_queue.rs +++ b/src/cargo/core/compiler/job_queue.rs @@ -40,7 +40,7 @@ pub struct JobQueue<'a, 'cfg> { is_release: bool, progress: Progress<'cfg>, next_id: u32, - timings: Option>, + timings: Timings<'a, 'cfg>, } pub struct JobState<'a> { @@ -132,10 +132,7 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { pub fn new(bcx: &BuildContext<'a, 'cfg>, root_units: &[Unit<'a>]) -> JobQueue<'a, 'cfg> { let (tx, rx) = channel(); let progress = Progress::with_style("Building", ProgressStyle::Ratio, bcx.config); - let timings = match bcx.config.cli_unstable().timings { - Some(..) => Some(Timings::new(bcx, root_units)), - None => None, - }; + let timings = Timings::new(bcx, root_units); JobQueue { queue: DependencyQueue::new(), tx, @@ -322,9 +319,8 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { // to the jobserver itself. tokens.truncate(self.active.len() - 1); - if let Some(t) = &mut self.timings { - t.mark_concurrency(self.active.len(), queue.len(), self.queue.len()); - } + self.timings + .mark_concurrency(self.active.len(), queue.len(), self.queue.len()); // Drain all events at once to avoid displaying the progress bar // unnecessarily. @@ -343,9 +339,7 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { .config .shell() .verbose(|c| c.status("Running", &cmd))?; - if let Some(t) = &mut self.timings { - t.unit_start(id, self.active[&id]); - } + self.timings.unit_start(id, self.active[&id]); } Message::BuildPlanMsg(module_name, cmd, filenames) => { plan.update(&module_name, &cmd, &filenames)?; @@ -437,9 +431,7 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { if !cx.bcx.build_config.build_plan { cx.bcx.config.shell().status("Finished", message)?; } - if let Some(t) = &mut self.timings { - t.finished()?; - } + self.timings.finished()?; Ok(()) } else { debug!("queue: {:#?}", self.queue); @@ -535,15 +527,11 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { match fresh { Freshness::Fresh => { - if let Some(t) = &mut self.timings { - t.add_fresh(); - } + self.timings.add_fresh(); doit() } Freshness::Dirty => { - if let Some(t) = &mut self.timings { - t.add_dirty(); - } + self.timings.add_dirty(); scope.spawn(move |_| doit()); } } @@ -590,11 +578,9 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { self.emit_warnings(None, unit, cx)?; } let unlocked = self.queue.finish(unit, &artifact); - if let Some(t) = &mut self.timings { - match artifact { - Artifact::All => t.unit_finished(id, unlocked), - Artifact::Metadata => t.unit_rmeta_finished(id, unlocked), - } + match artifact { + Artifact::All => self.timings.unit_finished(id, unlocked), + Artifact::Metadata => self.timings.unit_rmeta_finished(id, unlocked), } Ok(()) } diff --git a/src/cargo/core/compiler/timings.rs b/src/cargo/core/compiler/timings.rs index 094c4cd53..ed92c0fc4 100644 --- a/src/cargo/core/compiler/timings.rs +++ b/src/cargo/core/compiler/timings.rs @@ -14,6 +14,8 @@ use std::time::{Duration, Instant, SystemTime}; pub struct Timings<'a, 'cfg> { config: &'cfg Config, + /// Whether or not timings should be captured. + enabled: bool, /// If true, saves an HTML report to disk. report_html: bool, /// If true, reports unit completion to stderr. @@ -80,6 +82,18 @@ struct Concurrency { impl<'a, 'cfg> Timings<'a, 'cfg> { pub fn new(bcx: &BuildContext<'a, 'cfg>, root_units: &[Unit<'_>]) -> Timings<'a, 'cfg> { + let has_report = |what| { + bcx.config + .cli_unstable() + .timings + .as_ref() + .map_or(false, |t| t.iter().any(|opt| opt == what)) + }; + let report_html = has_report("html"); + let report_info = has_report("info"); + let report_json = has_report("json"); + let enabled = report_html | report_info | report_json; + let mut root_map: HashMap> = HashMap::new(); for unit in root_units { let target_desc = unit.target.description_named(); @@ -96,13 +110,6 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { }) .collect(); let start_str = humantime::format_rfc3339_seconds(SystemTime::now()).to_string(); - let has_report = |what| { - bcx.config - .cli_unstable() - .timings - .as_ref() - .map_or(false, |t| t.iter().any(|opt| opt == what)) - }; let rustc_info = render_rustc_info(bcx); let profile = if bcx.build_config.release { "release" @@ -113,9 +120,10 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { Timings { config: bcx.config, - report_html: has_report("html"), - report_info: has_report("info"), - report_json: has_report("json"), + enabled, + report_html, + report_info, + report_json, start: bcx.config.creation_time(), start_str, rustc_info, @@ -131,6 +139,9 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { /// Mark that a unit has started running. pub fn unit_start(&mut self, id: u32, unit: Unit<'a>) { + if !self.enabled { + return; + } let mut target = if unit.target.is_lib() && unit.mode == CompileMode::Build { // Special case for brevity, since most dependencies hit // this path. @@ -162,6 +173,9 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { /// Mark that the `.rmeta` file as generated. pub fn unit_rmeta_finished(&mut self, id: u32, unlocked: Vec<&Unit<'a>>) { + if !self.enabled { + return; + } // `id` may not always be active. "fresh" units unconditionally // generate `Message::Finish`, but this active map only tracks dirty // units. @@ -175,6 +189,9 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { /// Mark that a unit has finished running. pub fn unit_finished(&mut self, id: u32, unlocked: Vec<&Unit<'a>>) { + if !self.enabled { + return; + } // See note above in `unit_rmeta_finished`, this may not always be active. if let Some(mut unit_time) = self.active.remove(&id) { let t = d_as_f64(self.start.elapsed()); @@ -210,6 +227,9 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { /// This is called periodically to mark the concurrency of internal structures. pub fn mark_concurrency(&mut self, active: usize, waiting: usize, inactive: usize) { + if !self.enabled { + return; + } let c = Concurrency { t: d_as_f64(self.start.elapsed()), active, @@ -231,6 +251,9 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { /// Call this when all units are finished. pub fn finished(&mut self) -> CargoResult<()> { + if !self.enabled { + return Ok(()); + } self.mark_concurrency(0, 0, 0); self.unit_times .sort_unstable_by(|a, b| a.start.partial_cmp(&b.start).unwrap()); @@ -241,7 +264,7 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { } /// Save HTML report to disk. - pub fn report_html(&self) -> CargoResult<()> { + fn report_html(&self) -> CargoResult<()> { let duration = self.start.elapsed().as_secs() as u32 + 1; let timestamp = self.start_str.replace(&['-', ':'][..], ""); let filename = format!("cargo-timing-{}.html", timestamp); From aae1416cbddde7a9cffb5d2d3dbf81179bbcbcd5 Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Tue, 17 Sep 2019 12:59:51 -0700 Subject: [PATCH 13/15] Style update. --- src/cargo/core/compiler/timings.rs | 74 ++++++++++++++++-------------- 1 file changed, 39 insertions(+), 35 deletions(-) diff --git a/src/cargo/core/compiler/timings.rs b/src/cargo/core/compiler/timings.rs index ed92c0fc4..00e45a52b 100644 --- a/src/cargo/core/compiler/timings.rs +++ b/src/cargo/core/compiler/timings.rs @@ -179,12 +179,14 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { // `id` may not always be active. "fresh" units unconditionally // generate `Message::Finish`, but this active map only tracks dirty // units. - if let Some(unit_time) = self.active.get_mut(&id) { - let t = d_as_f64(self.start.elapsed()); - unit_time.rmeta_time = Some(t - unit_time.start); - assert!(unit_time.unlocked_rmeta_units.is_empty()); - unit_time.unlocked_rmeta_units.extend(unlocked); - } + let unit_time = match self.active.get_mut(&id) { + Some(ut) => ut, + None => return, + }; + let t = d_as_f64(self.start.elapsed()); + unit_time.rmeta_time = Some(t - unit_time.start); + assert!(unit_time.unlocked_rmeta_units.is_empty()); + unit_time.unlocked_rmeta_units.extend(unlocked); } /// Mark that a unit has finished running. @@ -193,36 +195,38 @@ impl<'a, 'cfg> Timings<'a, 'cfg> { return; } // See note above in `unit_rmeta_finished`, this may not always be active. - if let Some(mut unit_time) = self.active.remove(&id) { - let t = d_as_f64(self.start.elapsed()); - unit_time.duration = t - unit_time.start; - assert!(unit_time.unlocked_units.is_empty()); - unit_time.unlocked_units.extend(unlocked); - if self.report_info { - let msg = format!( - "{}{} in {:.1}s", - unit_time.name_ver(), - unit_time.target, - unit_time.duration - ); - let _ = - self.config - .shell() - .status_with_color("Completed", msg, termcolor::Color::Cyan); - } - if self.report_json { - let msg = machine_message::TimingInfo { - package_id: unit_time.unit.pkg.package_id(), - target: unit_time.unit.target, - mode: unit_time.unit.mode, - duration: unit_time.duration, - rmeta_time: unit_time.rmeta_time, - } - .to_json_string(); - self.config.shell().stdout_println(msg); - } - self.unit_times.push(unit_time); + let mut unit_time = match self.active.remove(&id) { + Some(ut) => ut, + None => return, + }; + let t = d_as_f64(self.start.elapsed()); + unit_time.duration = t - unit_time.start; + assert!(unit_time.unlocked_units.is_empty()); + unit_time.unlocked_units.extend(unlocked); + if self.report_info { + let msg = format!( + "{}{} in {:.1}s", + unit_time.name_ver(), + unit_time.target, + unit_time.duration + ); + let _ = self + .config + .shell() + .status_with_color("Completed", msg, termcolor::Color::Cyan); } + if self.report_json { + let msg = machine_message::TimingInfo { + package_id: unit_time.unit.pkg.package_id(), + target: unit_time.unit.target, + mode: unit_time.unit.mode, + duration: unit_time.duration, + rmeta_time: unit_time.rmeta_time, + } + .to_json_string(); + self.config.shell().stdout_println(msg); + } + self.unit_times.push(unit_time); } /// This is called periodically to mark the concurrency of internal structures. From e913efef9a7e9b57cd8202bbd3093af82555f2f6 Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Tue, 17 Sep 2019 13:01:28 -0700 Subject: [PATCH 14/15] Move timings check to rmeta_required. --- src/cargo/core/compiler/context/mod.rs | 2 +- src/cargo/core/compiler/mod.rs | 8 +++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/src/cargo/core/compiler/context/mod.rs b/src/cargo/core/compiler/context/mod.rs index 79707b4f3..bda437fa0 100644 --- a/src/cargo/core/compiler/context/mod.rs +++ b/src/cargo/core/compiler/context/mod.rs @@ -513,6 +513,6 @@ impl<'a, 'cfg> Context<'a, 'cfg> { /// Returns whether when `unit` is built whether it should emit metadata as /// well because some compilations rely on that. pub fn rmeta_required(&self, unit: &Unit<'a>) -> bool { - self.rmeta_required.contains(unit) + self.rmeta_required.contains(unit) || self.bcx.config.cli_unstable().timings.is_some() } } diff --git a/src/cargo/core/compiler/mod.rs b/src/cargo/core/compiler/mod.rs index 447cd4332..52cdcb822 100644 --- a/src/cargo/core/compiler/mod.rs +++ b/src/cargo/core/compiler/mod.rs @@ -698,11 +698,10 @@ fn add_error_format_and_color( // prettily, and then when parsing JSON messages from rustc we need to // internally understand that we should extract the `rendered` field and // present it if we can. - let wants_artifacts = pipelined || cx.bcx.config.cli_unstable().timings.is_some(); - if cx.bcx.build_config.cache_messages() || wants_artifacts { + if cx.bcx.build_config.cache_messages() || pipelined { cmd.arg("--error-format=json"); let mut json = String::from("--json=diagnostic-rendered-ansi"); - if wants_artifacts { + if pipelined { json.push_str(",artifacts"); } match cx.bcx.build_config.message_format { @@ -1076,8 +1075,7 @@ struct OutputOptions { impl OutputOptions { fn new<'a>(cx: &Context<'a, '_>, unit: &Unit<'a>) -> OutputOptions { - let look_for_metadata_directive = - cx.rmeta_required(unit) || cx.bcx.config.cli_unstable().timings.is_some(); + let look_for_metadata_directive = cx.rmeta_required(unit); let color = cx.bcx.config.shell().supports_color(); let cache_cell = if cx.bcx.build_config.cache_messages() { let path = cx.files().message_cache_path(unit); From 8be10f7e9b8fe9c2c1ec1c0ce8d3ffd3020bc18d Mon Sep 17 00:00:00 2001 From: Eric Huss Date: Tue, 17 Sep 2019 13:08:56 -0700 Subject: [PATCH 15/15] Set min slider to step 0.1 seconds. --- src/cargo/core/compiler/timings.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cargo/core/compiler/timings.rs b/src/cargo/core/compiler/timings.rs index 00e45a52b..88bde8aa8 100644 --- a/src/cargo/core/compiler/timings.rs +++ b/src/cargo/core/compiler/timings.rs @@ -649,7 +649,7 @@ static HTML_CANVAS: &str = r#" - +