diff --git a/Cargo.toml b/Cargo.toml index b234f2861..5ff3893a8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,6 +37,7 @@ git2-curl = "0.11.0" glob = "0.3.0" hex = "0.3" home = "0.5" +humantime = "1.2.0" ignore = "0.4.7" lazy_static = "1.2.0" jobserver = "0.1.13" diff --git a/src/bin/cargo/cli.rs b/src/bin/cargo/cli.rs index f5d507701..87486b8b6 100644 --- a/src/bin/cargo/cli.rs +++ b/src/bin/cargo/cli.rs @@ -36,6 +36,7 @@ Available unstable (nightly-only) flags: -Z config-profile -- Read profiles from .cargo/config files -Z install-upgrade -- `cargo install` will upgrade instead of failing -Z cache-messages -- Cache compiler messages + -Z timings -- Display concurrency information Run with 'cargo -Z [FLAG] [SUBCOMMAND]'" ); diff --git a/src/cargo/core/compiler/context/mod.rs b/src/cargo/core/compiler/context/mod.rs index d4acfa737..79707b4f3 100644 --- a/src/cargo/core/compiler/context/mod.rs +++ b/src/cargo/core/compiler/context/mod.rs @@ -129,7 +129,7 @@ impl<'a, 'cfg> Context<'a, 'cfg> { export_dir: Option, exec: &Arc, ) -> CargoResult> { - let mut queue = JobQueue::new(self.bcx); + let mut queue = JobQueue::new(self.bcx, units); let mut plan = BuildPlan::new(); let build_plan = self.bcx.build_config.build_plan; self.prepare_units(export_dir, units)?; diff --git a/src/cargo/core/compiler/job_queue.rs b/src/cargo/core/compiler/job_queue.rs index cea9e4e23..d56bd6e2d 100644 --- a/src/cargo/core/compiler/job_queue.rs +++ b/src/cargo/core/compiler/job_queue.rs @@ -14,6 +14,7 @@ use super::job::{ Freshness::{self, Dirty, Fresh}, Job, }; +use super::timings::Timings; use super::{BuildContext, BuildPlan, CompileMode, Context, Unit}; use crate::core::{PackageId, TargetKind}; use crate::handle_error; @@ -39,6 +40,7 @@ pub struct JobQueue<'a, 'cfg> { is_release: bool, progress: Progress<'cfg>, next_id: u32, + timings: Timings<'a, 'cfg>, } pub struct JobState<'a> { @@ -80,7 +82,7 @@ enum Artifact { } enum Message { - Run(String), + Run(u32, String), BuildPlanMsg(String, ProcessBuilder, Arc>), Stdout(String), Stderr(String), @@ -91,7 +93,7 @@ enum Message { impl<'a> JobState<'a> { pub fn running(&self, cmd: &ProcessBuilder) { - let _ = self.tx.send(Message::Run(cmd.to_string())); + let _ = self.tx.send(Message::Run(self.id, cmd.to_string())); } pub fn build_plan( @@ -128,9 +130,16 @@ impl<'a> JobState<'a> { } impl<'a, 'cfg> JobQueue<'a, 'cfg> { - pub fn new(bcx: &BuildContext<'a, 'cfg>) -> JobQueue<'a, 'cfg> { + pub fn new(bcx: &BuildContext<'a, 'cfg>, root_units: &[Unit<'a>]) -> JobQueue<'a, 'cfg> { let (tx, rx) = channel(); let progress = Progress::with_style("Building", ProgressStyle::Ratio, bcx.config); + let profile = if bcx.build_config.release { + "release" + } else { + "dev" + } + .to_string(); + let timings = Timings::new(bcx.config, root_units, profile); JobQueue { queue: DependencyQueue::new(), tx, @@ -142,6 +151,7 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { is_release: bcx.build_config.release, progress, next_id: 0, + timings, } } @@ -316,6 +326,9 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { // to the jobserver itself. tokens.truncate(self.active.len() - 1); + self.timings + .mark_concurrency(self.active.len(), queue.len(), self.queue.len()); + // Drain all events at once to avoid displaying the progress bar // unnecessarily. let events: Vec<_> = self.rx.try_iter().collect(); @@ -328,18 +341,18 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { for event in events { match event { - Message::Run(cmd) => { + Message::Run(id, cmd) => { cx.bcx .config .shell() .verbose(|c| c.status("Running", &cmd))?; + self.timings.unit_start(id, self.active[&id]); } Message::BuildPlanMsg(module_name, cmd, filenames) => { plan.update(&module_name, &cmd, &filenames)?; } Message::Stdout(out) => { - self.progress.clear(); - println!("{}", out); + cx.bcx.config.shell().stdout_println(out); } Message::Stderr(err) => { let mut shell = cx.bcx.config.shell(); @@ -367,7 +380,7 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { }; info!("end ({:?}): {:?}", unit, result); match result { - Ok(()) => self.finish(&unit, artifact, cx)?, + Ok(()) => self.finish(id, &unit, artifact, cx)?, Err(e) => { let msg = "The following warnings were emitted during compilation:"; self.emit_warnings(Some(msg), &unit, cx)?; @@ -425,6 +438,7 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { if !cx.bcx.build_config.build_plan { cx.bcx.config.shell().status("Finished", message)?; } + self.timings.finished()?; Ok(()) } else { debug!("queue: {:#?}", self.queue); @@ -519,8 +533,12 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { } match fresh { - Freshness::Fresh => doit(), + Freshness::Fresh => { + self.timings.add_fresh(); + doit() + } Freshness::Dirty => { + self.timings.add_dirty(); scope.spawn(move |_| doit()); } } @@ -558,6 +576,7 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { fn finish( &mut self, + id: u32, unit: &Unit<'a>, artifact: Artifact, cx: &mut Context<'_, '_>, @@ -565,7 +584,11 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { if unit.mode.is_run_custom_build() && cx.bcx.show_warnings(unit.pkg.package_id()) { self.emit_warnings(None, unit, cx)?; } - self.queue.finish(unit, &artifact); + let unlocked = self.queue.finish(unit, &artifact); + match artifact { + Artifact::All => self.timings.unit_finished(id, unlocked), + Artifact::Metadata => self.timings.unit_rmeta_finished(id, unlocked), + } Ok(()) } diff --git a/src/cargo/core/compiler/mod.rs b/src/cargo/core/compiler/mod.rs index df6e00bf0..cd0330913 100644 --- a/src/cargo/core/compiler/mod.rs +++ b/src/cargo/core/compiler/mod.rs @@ -11,6 +11,7 @@ mod layout; mod links; mod output_depinfo; pub mod standard_lib; +mod timings; mod unit; pub mod unit_dependencies; diff --git a/src/cargo/core/compiler/timings.rs b/src/cargo/core/compiler/timings.rs new file mode 100644 index 000000000..76987f85e --- /dev/null +++ b/src/cargo/core/compiler/timings.rs @@ -0,0 +1,861 @@ +//! Timing tracking. +//! +//! This module implements some simple tracking information for timing of how +//! long it takes for different units to compile. +use super::{CompileMode, Unit}; +use crate::core::PackageId; +use crate::util::machine_message::{self, Message}; +use crate::util::{CargoResult, Config}; +use std::cmp::max; +use std::collections::HashMap; +use std::fs::File; +use std::io::Write; +use std::time::{Duration, Instant, SystemTime}; + +pub struct Timings<'a, 'cfg> { + config: &'cfg Config, + /// If true, saves an HTML report to disk. + report_html: bool, + /// If true, reports unit completion to stderr. + report_info: bool, + /// If true, emits JSON information with timing information. + report_json: bool, + /// When Cargo started. + start: Instant, + /// A rendered string of when compilation started. + start_str: String, + /// A summary of the root units. + /// + /// Tuples of `(package_description, target_descrptions)`. + root_targets: Vec<(String, Vec)>, + /// The build profile. + profile: String, + /// Total number of fresh units. + total_fresh: u32, + /// Total number of dirty units. + total_dirty: u32, + /// Time tracking for each individual unit. + unit_times: Vec>, + /// Units that are in the process of being built. + /// When they finished, they are moved to `unit_times`. + active: HashMap>, + /// Concurrency-tracking information. This is periodically updated while + /// compilation progresses. + concurrency: Vec, +} + +/// Tracking information for an individual unit. +struct UnitTime<'a> { + unit: Unit<'a>, + /// A string describing the cargo target. + target: String, + /// The time when this unit started as an offset in seconds from `Timings::start`. + start: f64, + /// Total time to build this unit in seconds. + duration: f64, + /// The time when the `.rmeta` file was generated, an offset in seconds + /// from `start`. + rmeta_time: Option, + /// Reverse deps that are freed to run after this unit finished. + unlocked_units: Vec>, + /// Same as `unlocked_units`, but unlocked by rmeta. + unlocked_rmeta_units: Vec>, +} + +/// Periodic concurrency tracking information. +struct Concurrency { + /// Time as an offset in seconds from `Timings::start`. + t: f64, + /// Number of units currently running. + active: usize, + /// Number of units that could run, but are waiting for a jobserver token. + waiting: usize, + /// Number of units that are not yet ready, because they are waiting for + /// dependencies to finish. + inactive: usize, +} + +impl<'a, 'cfg> Timings<'a, 'cfg> { + pub fn new( + config: &'cfg Config, + root_units: &[Unit<'_>], + profile: String, + ) -> Timings<'a, 'cfg> { + let mut root_map: HashMap> = HashMap::new(); + for unit in root_units { + let target_desc = unit.target.description_named(); + root_map + .entry(unit.pkg.package_id()) + .or_default() + .push(target_desc); + } + let root_targets = root_map + .into_iter() + .map(|(pkg_id, targets)| { + let pkg_desc = format!("{} {}", pkg_id.name(), pkg_id.version()); + (pkg_desc, targets) + }) + .collect(); + let start_str = format!("{}", humantime::format_rfc3339_seconds(SystemTime::now())); + let has_report = |what| { + config + .cli_unstable() + .timings + .as_ref() + .map_or(false, |t| t.iter().any(|opt| opt == what)) + }; + + Timings { + config, + report_html: has_report("html"), + report_info: has_report("info"), + report_json: has_report("json"), + start: config.creation_time(), + start_str, + root_targets, + profile, + total_fresh: 0, + total_dirty: 0, + unit_times: Vec::new(), + active: HashMap::new(), + concurrency: Vec::new(), + } + } + + /// Mark that a unit has started running. + pub fn unit_start(&mut self, id: u32, unit: Unit<'a>) { + let mut target = if unit.target.is_lib() && unit.mode == CompileMode::Build { + // Special case for brevity, since most dependencies hit + // this path. + "".to_string() + } else { + format!(" {}", unit.target.description_named()) + }; + match unit.mode { + CompileMode::Test => target.push_str(" (test)"), + CompileMode::Build => {} + CompileMode::Check { test: true } => target.push_str(" (check-test)"), + CompileMode::Check { test: false } => target.push_str(" (check)"), + CompileMode::Bench => target.push_str(" (bench)"), + CompileMode::Doc { .. } => target.push_str(" (doc)"), + CompileMode::Doctest => target.push_str(" (doc test)"), + CompileMode::RunCustomBuild => target.push_str(" (run)"), + } + let unit_time = UnitTime { + unit, + target, + start: d_as_f64(self.start.elapsed()), + duration: 0.0, + rmeta_time: None, + unlocked_units: Vec::new(), + unlocked_rmeta_units: Vec::new(), + }; + assert!(self.active.insert(id, unit_time).is_none()); + } + + /// Mark that the `.rmeta` file as generated. + pub fn unit_rmeta_finished(&mut self, id: u32, unlocked: Vec<&Unit<'a>>) { + if let Some(unit_time) = self.active.get_mut(&id) { + let t = d_as_f64(self.start.elapsed()); + unit_time.rmeta_time = Some(t - unit_time.start); + unit_time.unlocked_rmeta_units.extend(unlocked); + } + } + + /// Mark that a unit has finished running. + pub fn unit_finished(&mut self, id: u32, unlocked: Vec<&Unit<'a>>) { + if let Some(mut unit_time) = self.active.remove(&id) { + let t = d_as_f64(self.start.elapsed()); + unit_time.duration = t - unit_time.start; + unit_time.unlocked_units.extend(unlocked); + if self.report_info { + let msg = format!( + "{}{} in {:.1}s", + unit_time.name_ver(), + unit_time.target, + unit_time.duration + ); + let _ = + self.config + .shell() + .status_with_color("Completed", msg, termcolor::Color::Cyan); + } + if self.report_json { + let msg = machine_message::TimingInfo { + package_id: unit_time.unit.pkg.package_id(), + target: unit_time.unit.target, + mode: unit_time.unit.mode, + duration: unit_time.duration, + rmeta_time: unit_time.rmeta_time, + } + .to_json_string(); + self.config.shell().stdout_println(msg); + } + self.unit_times.push(unit_time); + } + } + + /// This is called periodically to mark the concurrency of internal structures. + pub fn mark_concurrency(&mut self, active: usize, waiting: usize, inactive: usize) { + let c = Concurrency { + t: d_as_f64(self.start.elapsed()), + active, + waiting, + inactive, + }; + self.concurrency.push(c); + } + + /// Mark that a fresh unit was encountered. + pub fn add_fresh(&mut self) { + self.total_fresh += 1; + } + + /// Mark that a dirty unit was encountered. + pub fn add_dirty(&mut self) { + self.total_dirty += 1; + } + + /// Call this when all units are finished. + pub fn finished(&mut self) -> CargoResult<()> { + self.mark_concurrency(0, 0, 0); + self.unit_times + .sort_unstable_by(|a, b| a.start.partial_cmp(&b.start).unwrap()); + if self.report_html { + self.report_html()?; + } + Ok(()) + } + + /// Save HTML report to disk. + pub fn report_html(&self) -> CargoResult<()> { + let duration = self.start.elapsed().as_secs() as u32 + 1; + let mut f = File::create("cargo-timing.html")?; + let roots: Vec<&str> = self + .root_targets + .iter() + .map(|(name, _targets)| name.as_str()) + .collect(); + f.write_all(HTML_TMPL.replace("{ROOTS}", &roots.join(", ")).as_bytes())?; + self.fmt_summary_table(&mut f, duration)?; + let graph_width = self.fmt_pipeline_graph(&mut f, duration)?; + self.fmt_timing_graph(&mut f, graph_width, duration)?; + self.fmt_unit_table(&mut f)?; + f.write_all(HTML_TMPL_FOOT.as_bytes())?; + Ok(()) + } + + /// Render the summary table. + fn fmt_summary_table(&self, f: &mut File, duration: u32) -> CargoResult<()> { + let targets: Vec = self + .root_targets + .iter() + .map(|(name, targets)| format!("{} ({})", name, targets.join(", "))) + .collect(); + let targets = targets.join("
"); + let time_human = if duration > 60 { + format!(" ({}m {:02}s)", duration / 60, duration % 60) + } else { + "".to_string() + }; + let total_time = format!("{}s{}", duration, time_human); + let max_concurrency = self.concurrency.iter().map(|c| c.active).max().unwrap(); + write!( + f, + r#" + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Targets:{}
Profile:{}
Fresh units:{}
Dirty units:{}
Total units:{}
Max concurrency:{}
Build start:{}
Total time:{}
+"#, + targets, + self.profile, + self.total_fresh, + self.total_dirty, + self.total_fresh + self.total_dirty, + max_concurrency, + self.start_str, + total_time, + )?; + Ok(()) + } + + /// Render the box graph of the units over time. + fn fmt_pipeline_graph(&self, f: &mut File, duration: u32) -> CargoResult { + if self.unit_times.is_empty() { + return Ok(0); + } + const BOX_HEIGHT: u32 = 25; + const Y_TICK_DIST: u32 = BOX_HEIGHT + 2; + + let graph_height = Y_TICK_DIST * self.unit_times.len() as u32; + + let graph_width = draw_graph_axes(f, graph_height, duration)?; + + // Draw Y tick marks. + write!(f, "")?; + + // Draw Y labels. + for n in 0..self.unit_times.len() as u32 { + let y = MARGIN + (Y_TICK_DIST * (n + 1)) - 13; + writeln!( + f, + r#"{}"#, + X_LINE - 4, + y, + n + 1 + )?; + } + + // Draw the graph. + writeln!( + f, + r#""#, + X_LINE, MARGIN, graph_width, graph_height + )?; + + // Create a map that will be used for drawing dependency lines. + let unit_map: HashMap<&Unit<'_>, (f64, u32)> = self + .unit_times + .iter() + .enumerate() + .map(|(i, unit)| { + let y = i as u32 * Y_TICK_DIST + 1; + let x = PX_PER_SEC * unit.start; + (&unit.unit, (x, y)) + }) + .collect(); + + for (i, unit) in self.unit_times.iter().enumerate() { + let (x, y) = unit_map[&unit.unit]; + let width = (PX_PER_SEC * unit.duration).max(1.0); + + let dep_class = format!("dep-{}", i); + writeln!( + f, + " ", + x, y, width, BOX_HEIGHT, dep_class, + )?; + let draw_dep_lines = |f: &mut File, x, units| -> CargoResult<()> { + for unlocked in units { + let (u_x, u_y) = unit_map[&unlocked]; + writeln!( + f, + " ", + dep_class, + x, + y + BOX_HEIGHT / 2, + u_y - y, + u_x - x + 5.0, + )?; + } + Ok(()) + }; + if let Some((rmeta_time, ctime, _cent)) = unit.codegen_time() { + let rmeta_x = x + PX_PER_SEC * rmeta_time; + writeln!( + f, + " ", + rmeta_x, + y, + PX_PER_SEC * ctime, + BOX_HEIGHT, + )?; + draw_dep_lines(f, rmeta_x, &unit.unlocked_rmeta_units)?; + } + writeln!( + f, + " {}{} {:.1}s", + x + 5.0, + y + BOX_HEIGHT / 2 - 5, + unit.unit.pkg.name(), + unit.target, + unit.duration + )?; + draw_dep_lines(f, x + width, &unit.unlocked_units)?; + } + writeln!(f, r#""#)?; + writeln!(f, r#""#)?; + Ok(graph_width) + } + + /// Render the line chart of concurrency information. + fn fmt_timing_graph(&self, f: &mut File, graph_width: u32, duration: u32) -> CargoResult<()> { + if graph_width == 0 || self.concurrency.is_empty() { + return Ok(()); + } + const HEIGHT: u32 = 400; + const AXIS_HEIGHT: u32 = HEIGHT - MARGIN - Y_LINE; + const TOP_MARGIN: u32 = 10; + const GRAPH_HEIGHT: u32 = AXIS_HEIGHT - TOP_MARGIN; + + draw_graph_axes(f, AXIS_HEIGHT, duration)?; + + // Draw Y tick marks and labels. + write!(f, "{}\n", + X_LINE - 10, + y + 5, + (n + 1) * step + )); + } + writeln!(f, "\"/>")?; + f.write_all(labels.as_bytes())?; + + // Label the Y axis. + let label_y = (HEIGHT - Y_LINE) / 2; + writeln!( + f, + "# Units", + label_y, label_y + )?; + + // Draw the graph. + writeln!( + f, + r#""#, + X_LINE, + MARGIN, + graph_width, + GRAPH_HEIGHT + TOP_MARGIN + )?; + + let coord = |t, v| { + ( + f64::from(graph_width) * (t / f64::from(duration)), + f64::from(TOP_MARGIN) + f64::from(GRAPH_HEIGHT) * (1.0 - (v as f64 / max_v as f64)), + ) + }; + let mut draw_line = |class, key: fn(&Concurrency) -> usize| { + write!(f, "", class) + }; + + draw_line("line-inactive", |c| c.inactive)?; + draw_line("line-waiting", |c| c.waiting)?; + draw_line("line-active", |c| c.active)?; + + // Draw a legend. + write!( + f, + r#" + + + + Waiting + + + Active + + + Inactive + +"#, + graph_width - 120 + )?; + + writeln!(f, "")?; + writeln!(f, "")?; + Ok(()) + } + + /// Render the table of all units. + fn fmt_unit_table(&self, f: &mut File) -> CargoResult<()> { + write!( + f, + r#" + + + + + + + + + + +"# + )?; + let mut units: Vec<&UnitTime<'_>> = self.unit_times.iter().collect(); + units.sort_unstable_by(|a, b| b.duration.partial_cmp(&a.duration).unwrap()); + for (i, unit) in units.iter().enumerate() { + let codegen = match unit.codegen_time() { + None => "".to_string(), + Some((_rt, ctime, cent)) => format!("{:.1}s ({:.0}%)", ctime, cent), + }; + write!( + f, + r#" + + + + + + +"#, + i + 1, + unit.name_ver(), + unit.target, + unit.duration, + codegen + )?; + } + write!(f, "\n
UnitTotalCodegen
{}.{}{}{:.1}s{}
\n")?; + Ok(()) + } +} + +impl<'a> UnitTime<'a> { + /// Returns the codegen time as (rmeta_time, codegen_time, percent of total) + fn codegen_time(&self) -> Option<(f64, f64, f64)> { + self.rmeta_time.map(|rmeta_time| { + let ctime = self.duration - rmeta_time; + let cent = (ctime / self.duration) * 100.0; + (rmeta_time, ctime, cent) + }) + } + + fn name_ver(&self) -> String { + format!("{} v{}", self.unit.pkg.name(), self.unit.pkg.version()) + } +} + +// Replace with as_secs_f64 when 1.38 hits stable. +fn d_as_f64(d: Duration) -> f64 { + (d.as_secs() as f64) + f64::from(d.subsec_nanos()) / 1_000_000_000.0 +} + +fn round_up(n: u32, step: u32) -> u32 { + if n % step == 0 { + n + } else { + (step - n % step) + n + } +} + +/// Determine the `(step, max_value)` of the number of ticks along an axis. +fn split_ticks(n: u32, max_ticks: u32) -> (u32, u32) { + if n <= max_ticks { + (1, n) + } else if n <= max_ticks * 2 { + (2, round_up(n, 2)) + } else if n <= max_ticks * 4 { + (4, round_up(n, 4)) + } else if n <= max_ticks * 5 { + (5, round_up(n, 5)) + } else { + let mut step = 10; + loop { + let top = round_up(n, step); + if top <= max_ticks * step { + break (step, top); + } + step += 10; + } + } +} + +const X_LINE: u32 = 50; +const MARGIN: u32 = 5; +const Y_LINE: u32 = 35; // relative to bottom +const PX_PER_SEC: f64 = 20.0; +const MIN_TICK_DIST: u32 = 50; + +fn draw_graph_axes(f: &mut File, graph_height: u32, duration: u32) -> CargoResult { + let graph_width = PX_PER_SEC as u32 * duration; + let width = graph_width + X_LINE + 30; + let height = graph_height + MARGIN + Y_LINE; + writeln!( + f, + r#""#, + width, height + )?; + + // Draw axes. + write!( + f, + "{}s\n", + x, + height - Y_LINE + 20, + (n + 1) * step + )); + } + + writeln!(f, "\" />")?; + f.write_all(labels.as_bytes())?; + + // Draw vertical lines. + write!(f, "")?; + + Ok(graph_width) +} + +static HTML_TMPL: &str = r#" + + + Cargo Build Timings — {ROOTS} + + + + + +

Cargo Build Timings

+"#; + +static HTML_TMPL_FOOT: &str = r#" + + + +"#; diff --git a/src/cargo/core/features.rs b/src/cargo/core/features.rs index f52e3097c..0e0ffb3fe 100644 --- a/src/cargo/core/features.rs +++ b/src/cargo/core/features.rs @@ -335,6 +335,7 @@ pub struct CliUnstable { pub cache_messages: bool, pub binary_dep_depinfo: bool, pub build_std: Option>, + pub timings: Option>, } impl CliUnstable { @@ -367,6 +368,13 @@ impl CliUnstable { } } + fn parse_timings(value: Option<&str>) -> Vec { + match value { + None => vec!["html".to_string(), "info".to_string()], + Some(v) => v.split(',').map(|s| s.to_string()).collect(), + } + } + match k { "print-im-a-teapot" => self.print_im_a_teapot = parse_bool(v)?, "unstable-options" => self.unstable_options = true, @@ -384,6 +392,7 @@ impl CliUnstable { "build-std" => { self.build_std = Some(crate::core::compiler::standard_lib::parse_unstable_flag(v)) } + "timings" => self.timings = Some(parse_timings(v)), _ => failure::bail!("unknown `-Z` flag specified: {}", k), } diff --git a/src/cargo/core/manifest.rs b/src/cargo/core/manifest.rs index 81593ea93..96561d2cc 100644 --- a/src/cargo/core/manifest.rs +++ b/src/cargo/core/manifest.rs @@ -926,6 +926,19 @@ impl Target { self.doc = doc; self } + + pub fn description_named(&self) -> String { + match self.kind { + TargetKind::Lib(..) => "lib".to_string(), + TargetKind::Bin => format!("bin \"{}\"", self.name()), + TargetKind::Test => format!("test \"{}\"", self.name()), + TargetKind::Bench => format!("bench \"{}\"", self.name()), + TargetKind::ExampleLib(..) | TargetKind::ExampleBin => { + format!("example \"{}\"", self.name()) + } + TargetKind::CustomBuild => "custom-build".to_string(), + } + } } impl fmt::Display for Target { diff --git a/src/cargo/core/shell.rs b/src/cargo/core/shell.rs index 742b42a3e..dfa195943 100644 --- a/src/cargo/core/shell.rs +++ b/src/cargo/core/shell.rs @@ -111,6 +111,13 @@ impl Shell { } } + pub fn stdout_println(&mut self, message: impl fmt::Display) { + if self.needs_clear { + self.err_erase_line(); + } + println!("{}", message); + } + /// Sets whether the next print should clear the current line. pub fn set_needs_clear(&mut self, needs_clear: bool) { self.needs_clear = needs_clear; diff --git a/src/cargo/lib.rs b/src/cargo/lib.rs index ac36549a8..3adb0c1e9 100644 --- a/src/cargo/lib.rs +++ b/src/cargo/lib.rs @@ -16,6 +16,7 @@ #![allow(clippy::too_many_arguments)] // large project #![allow(clippy::type_complexity)] // there's an exceptionally complex type #![allow(clippy::wrong_self_convention)] // perhaps `Rc` should be special-cased in Clippy? +#![allow(clippy::write_with_newline)] // too pedantic #![warn(clippy::needless_borrow)] #![warn(clippy::redundant_clone)] // Unit is now interned, and would probably be better as pass-by-copy, but diff --git a/src/cargo/util/dependency_queue.rs b/src/cargo/util/dependency_queue.rs index 9a2184080..db8f535f4 100644 --- a/src/cargo/util/dependency_queue.rs +++ b/src/cargo/util/dependency_queue.rs @@ -159,16 +159,26 @@ impl DependencyQueue { /// Calling this function indicates that the `node` has produced `edge`. All /// remaining work items which only depend on this node/edge pair are now /// candidates to start their job. - pub fn finish(&mut self, node: &N, edge: &E) { + /// + /// Returns the nodes that are now allowed to be dequeued as a result of + /// finishing this node. + pub fn finish(&mut self, node: &N, edge: &E) -> Vec<&N> { + // hashset let reverse_deps = self.reverse_dep_map.get(node).and_then(|map| map.get(edge)); let reverse_deps = match reverse_deps { Some(deps) => deps, - None => return, + None => return Vec::new(), }; let key = (node.clone(), edge.clone()); + let mut result = Vec::new(); for dep in reverse_deps.iter() { - assert!(self.dep_map.get_mut(dep).unwrap().0.remove(&key)); + let edges = &mut self.dep_map.get_mut(dep).unwrap().0; + assert!(edges.remove(&key)); + if edges.is_empty() { + result.push(dep); + } } + result } } diff --git a/src/cargo/util/machine_message.rs b/src/cargo/util/machine_message.rs index b52e3b430..caa0833cc 100644 --- a/src/cargo/util/machine_message.rs +++ b/src/cargo/util/machine_message.rs @@ -4,7 +4,7 @@ use serde::ser; use serde::Serialize; use serde_json::{self, json, value::RawValue}; -use crate::core::{PackageId, Target}; +use crate::core::{compiler::CompileMode, PackageId, Target}; pub trait Message: ser::Serialize { fn reason(&self) -> &str; @@ -73,3 +73,19 @@ impl<'a> Message for BuildScript<'a> { "build-script-executed" } } + +#[derive(Serialize)] +pub struct TimingInfo<'a> { + pub package_id: PackageId, + pub target: &'a Target, + pub mode: CompileMode, + pub duration: f64, + #[serde(skip_serializing_if = "Option::is_none")] + pub rmeta_time: Option, +} + +impl<'a> Message for TimingInfo<'a> { + fn reason(&self) -> &str { + "timing-info" + } +} diff --git a/src/doc/src/reference/unstable.md b/src/doc/src/reference/unstable.md index c2d52bea0..18a765c22 100644 --- a/src/doc/src/reference/unstable.md +++ b/src/doc/src/reference/unstable.md @@ -336,3 +336,23 @@ Also if you'd like to see a feature that's not yet implemented and/or if something doesn't quite work the way you'd like it to, feel free to check out the [issue tracker](https://github.com/rust-lang/wg-cargo-std-aware/issues) of the tracking repository, and if it's not there please file a new issue! + +### timings + +The `timings` feature gives some information about how long each compilation +takes, and tracks concurrency information over time. + +``` +cargo +nightly build -Z timings +``` + +The `-Ztimings` flag can optionally take a comma-separated list of the +following values: + +- `html` — Saves a file called `cargo-timing.html` to the current directory + with a report of the compilation. +- `info` — Displays a message to stdout after each compilation finishes with + how long it took. +- `json` — Emits some JSON information about timing information. + +The default if none are specified is `html,info`.