cargo/tests/testsuite/freshness.rs

2584 lines
68 KiB
Rust
Raw Normal View History

2019-11-25 02:42:45 +00:00
//! Tests for fingerprinting (rebuild detection).
use filetime::FileTime;
use std::fs::{self, OpenOptions};
2019-05-20 19:36:21 +00:00
use std::io;
use std::io::prelude::*;
2018-12-28 15:15:25 +00:00
use std::net::TcpListener;
use std::path::{Path, PathBuf};
use std::process::Stdio;
2018-12-28 15:15:25 +00:00
use std::thread;
2019-01-13 03:49:46 +00:00
use std::time::SystemTime;
use super::death;
use cargo_test_support::paths::{self, CargoPathExt};
use cargo_test_support::registry::Package;
use cargo_test_support::{basic_manifest, is_coarse_mtime, project, rustc_host, sleep_ms};
#[cargo_test]
fn modifying_and_moving() {
let p = project()
.file("src/main.rs", "mod a; fn main() {}")
.file("src/a.rs", "")
.build();
p.cargo("build")
.with_stderr(
2018-03-14 15:17:44 +00:00
"\
[COMPILING] foo v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
p.cargo("build").with_stdout("").run();
p.root().move_into_the_past();
p.root().join("target").move_into_the_past();
p.change_file("src/a.rs", "#[allow(unused)]fn main() {}");
p.cargo("build")
.with_stderr(
2018-03-14 15:17:44 +00:00
"\
[COMPILING] foo v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
fs::rename(&p.root().join("src/a.rs"), &p.root().join("src/b.rs")).unwrap();
p.cargo("build")
.with_status(101)
.with_stderr_contains("[..]file not found[..]")
.run();
}
#[cargo_test]
fn modify_only_some_files() {
let p = project()
.file("src/lib.rs", "mod a;")
.file("src/a.rs", "")
.file("src/main.rs", "mod b; fn main() {}")
.file("src/b.rs", "")
.file("tests/test.rs", "")
.build();
p.cargo("build")
.with_stderr(
2018-03-14 15:17:44 +00:00
"\
[COMPILING] foo v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
p.cargo("test").run();
sleep_ms(1000);
2018-08-29 06:11:10 +00:00
assert!(p.bin("foo").is_file());
let lib = p.root().join("src/lib.rs");
p.change_file("src/lib.rs", "invalid rust code");
p.change_file("src/b.rs", "#[allow(unused)]fn foo() {}");
lib.move_into_the_past();
// Make sure the binary is rebuilt, not the lib
p.cargo("build")
.with_stderr(
2018-03-14 15:17:44 +00:00
"\
[COMPILING] foo v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
2018-08-29 06:11:10 +00:00
assert!(p.bin("foo").is_file());
}
Consider transitive fingerprints for freshness Originally discovered through #1236, this commit fixes a bug in Cargo where crates may not be recompiled when they need to (leading to obscure errors from the compiler). The scenario in question looks like: * Assume a dependency graph of `A -> B -> C` and `A -> C` * Build all packages * Modify C * Rebuild, but hit Ctrl+C while B is building * Modify A * Rebuild again Previously, Cargo only considered the freshness of a package to be the freshness of the package itself (checking source files, for example). To handle transitive recompilations, Cargo propagates a dirty bit throughout the dependency graph automatically (instead if calculating it as such). In the above example, however, we have a problem where as part of the last rebuild Cargo thinks `B` and `C` are fresh! The artifact for `C` was just recompiled, but `B`'s source code is untainted, so Cargo does not think that it needs to recompile `B`. This is wrong, however, because one of `B`'s dependencies was rebuilt, so it needs to be rebuilt. To fix this problem, the fingerprint (a short hash) for all packages is now transitively propagated (the fingerprint changes when an upstream package changes). This should ensure that even when Ctrl+C is hit (or the situation explained in #1236) that Cargo will still consider packages whose source code is untainted as candidates for recompilation. The implementation is somewhat tricky due to the actual fingerprint for a path dependency not being known until *after* the crate is compiled (the fingerprint is the mtime of the dep-info file). Closes #1236
2015-01-29 03:37:19 +00:00
#[cargo_test]
fn rebuild_sub_package_then_while_package() {
let p = project()
2018-03-14 15:17:44 +00:00
.file(
"Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "foo"
authors = []
version = "0.0.1"
Consider transitive fingerprints for freshness Originally discovered through #1236, this commit fixes a bug in Cargo where crates may not be recompiled when they need to (leading to obscure errors from the compiler). The scenario in question looks like: * Assume a dependency graph of `A -> B -> C` and `A -> C` * Build all packages * Modify C * Rebuild, but hit Ctrl+C while B is building * Modify A * Rebuild again Previously, Cargo only considered the freshness of a package to be the freshness of the package itself (checking source files, for example). To handle transitive recompilations, Cargo propagates a dirty bit throughout the dependency graph automatically (instead if calculating it as such). In the above example, however, we have a problem where as part of the last rebuild Cargo thinks `B` and `C` are fresh! The artifact for `C` was just recompiled, but `B`'s source code is untainted, so Cargo does not think that it needs to recompile `B`. This is wrong, however, because one of `B`'s dependencies was rebuilt, so it needs to be rebuilt. To fix this problem, the fingerprint (a short hash) for all packages is now transitively propagated (the fingerprint changes when an upstream package changes). This should ensure that even when Ctrl+C is hit (or the situation explained in #1236) that Cargo will still consider packages whose source code is untainted as candidates for recompilation. The implementation is somewhat tricky due to the actual fingerprint for a path dependency not being known until *after* the crate is compiled (the fingerprint is the mtime of the dep-info file). Closes #1236
2015-01-29 03:37:19 +00:00
2020-09-27 00:59:58 +00:00
[dependencies.a]
path = "a"
[dependencies.b]
path = "b"
"#,
2018-12-08 11:19:47 +00:00
)
.file("src/lib.rs", "extern crate a; extern crate b;")
2018-03-14 15:17:44 +00:00
.file(
"a/Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "a"
authors = []
version = "0.0.1"
[dependencies.b]
path = "../b"
"#,
2018-12-08 11:19:47 +00:00
)
.file("a/src/lib.rs", "extern crate b;")
2018-07-24 22:35:01 +00:00
.file("b/Cargo.toml", &basic_manifest("b", "0.0.1"))
.file("b/src/lib.rs", "")
.build();
Consider transitive fingerprints for freshness Originally discovered through #1236, this commit fixes a bug in Cargo where crates may not be recompiled when they need to (leading to obscure errors from the compiler). The scenario in question looks like: * Assume a dependency graph of `A -> B -> C` and `A -> C` * Build all packages * Modify C * Rebuild, but hit Ctrl+C while B is building * Modify A * Rebuild again Previously, Cargo only considered the freshness of a package to be the freshness of the package itself (checking source files, for example). To handle transitive recompilations, Cargo propagates a dirty bit throughout the dependency graph automatically (instead if calculating it as such). In the above example, however, we have a problem where as part of the last rebuild Cargo thinks `B` and `C` are fresh! The artifact for `C` was just recompiled, but `B`'s source code is untainted, so Cargo does not think that it needs to recompile `B`. This is wrong, however, because one of `B`'s dependencies was rebuilt, so it needs to be rebuilt. To fix this problem, the fingerprint (a short hash) for all packages is now transitively propagated (the fingerprint changes when an upstream package changes). This should ensure that even when Ctrl+C is hit (or the situation explained in #1236) that Cargo will still consider packages whose source code is untainted as candidates for recompilation. The implementation is somewhat tricky due to the actual fingerprint for a path dependency not being known until *after* the crate is compiled (the fingerprint is the mtime of the dep-info file). Closes #1236
2015-01-29 03:37:19 +00:00
p.cargo("build")
.with_stderr(
"\
[COMPILING] b [..]
[COMPILING] a [..]
[COMPILING] foo [..]
[FINISHED] dev [..]
",
)
.run();
Consider transitive fingerprints for freshness Originally discovered through #1236, this commit fixes a bug in Cargo where crates may not be recompiled when they need to (leading to obscure errors from the compiler). The scenario in question looks like: * Assume a dependency graph of `A -> B -> C` and `A -> C` * Build all packages * Modify C * Rebuild, but hit Ctrl+C while B is building * Modify A * Rebuild again Previously, Cargo only considered the freshness of a package to be the freshness of the package itself (checking source files, for example). To handle transitive recompilations, Cargo propagates a dirty bit throughout the dependency graph automatically (instead if calculating it as such). In the above example, however, we have a problem where as part of the last rebuild Cargo thinks `B` and `C` are fresh! The artifact for `C` was just recompiled, but `B`'s source code is untainted, so Cargo does not think that it needs to recompile `B`. This is wrong, however, because one of `B`'s dependencies was rebuilt, so it needs to be rebuilt. To fix this problem, the fingerprint (a short hash) for all packages is now transitively propagated (the fingerprint changes when an upstream package changes). This should ensure that even when Ctrl+C is hit (or the situation explained in #1236) that Cargo will still consider packages whose source code is untainted as candidates for recompilation. The implementation is somewhat tricky due to the actual fingerprint for a path dependency not being known until *after* the crate is compiled (the fingerprint is the mtime of the dep-info file). Closes #1236
2015-01-29 03:37:19 +00:00
if is_coarse_mtime() {
sleep_ms(1000);
}
p.change_file("b/src/lib.rs", "pub fn b() {}");
Consider transitive fingerprints for freshness Originally discovered through #1236, this commit fixes a bug in Cargo where crates may not be recompiled when they need to (leading to obscure errors from the compiler). The scenario in question looks like: * Assume a dependency graph of `A -> B -> C` and `A -> C` * Build all packages * Modify C * Rebuild, but hit Ctrl+C while B is building * Modify A * Rebuild again Previously, Cargo only considered the freshness of a package to be the freshness of the package itself (checking source files, for example). To handle transitive recompilations, Cargo propagates a dirty bit throughout the dependency graph automatically (instead if calculating it as such). In the above example, however, we have a problem where as part of the last rebuild Cargo thinks `B` and `C` are fresh! The artifact for `C` was just recompiled, but `B`'s source code is untainted, so Cargo does not think that it needs to recompile `B`. This is wrong, however, because one of `B`'s dependencies was rebuilt, so it needs to be rebuilt. To fix this problem, the fingerprint (a short hash) for all packages is now transitively propagated (the fingerprint changes when an upstream package changes). This should ensure that even when Ctrl+C is hit (or the situation explained in #1236) that Cargo will still consider packages whose source code is untainted as candidates for recompilation. The implementation is somewhat tricky due to the actual fingerprint for a path dependency not being known until *after* the crate is compiled (the fingerprint is the mtime of the dep-info file). Closes #1236
2015-01-29 03:37:19 +00:00
p.cargo("build -pb -v")
.with_stderr(
"\
[COMPILING] b [..]
[RUNNING] `rustc --crate-name b [..]
[FINISHED] dev [..]
",
)
.run();
Consider transitive fingerprints for freshness Originally discovered through #1236, this commit fixes a bug in Cargo where crates may not be recompiled when they need to (leading to obscure errors from the compiler). The scenario in question looks like: * Assume a dependency graph of `A -> B -> C` and `A -> C` * Build all packages * Modify C * Rebuild, but hit Ctrl+C while B is building * Modify A * Rebuild again Previously, Cargo only considered the freshness of a package to be the freshness of the package itself (checking source files, for example). To handle transitive recompilations, Cargo propagates a dirty bit throughout the dependency graph automatically (instead if calculating it as such). In the above example, however, we have a problem where as part of the last rebuild Cargo thinks `B` and `C` are fresh! The artifact for `C` was just recompiled, but `B`'s source code is untainted, so Cargo does not think that it needs to recompile `B`. This is wrong, however, because one of `B`'s dependencies was rebuilt, so it needs to be rebuilt. To fix this problem, the fingerprint (a short hash) for all packages is now transitively propagated (the fingerprint changes when an upstream package changes). This should ensure that even when Ctrl+C is hit (or the situation explained in #1236) that Cargo will still consider packages whose source code is untainted as candidates for recompilation. The implementation is somewhat tricky due to the actual fingerprint for a path dependency not being known until *after* the crate is compiled (the fingerprint is the mtime of the dep-info file). Closes #1236
2015-01-29 03:37:19 +00:00
p.change_file(
"src/lib.rs",
"extern crate a; extern crate b; pub fn toplevel() {}",
);
Consider transitive fingerprints for freshness Originally discovered through #1236, this commit fixes a bug in Cargo where crates may not be recompiled when they need to (leading to obscure errors from the compiler). The scenario in question looks like: * Assume a dependency graph of `A -> B -> C` and `A -> C` * Build all packages * Modify C * Rebuild, but hit Ctrl+C while B is building * Modify A * Rebuild again Previously, Cargo only considered the freshness of a package to be the freshness of the package itself (checking source files, for example). To handle transitive recompilations, Cargo propagates a dirty bit throughout the dependency graph automatically (instead if calculating it as such). In the above example, however, we have a problem where as part of the last rebuild Cargo thinks `B` and `C` are fresh! The artifact for `C` was just recompiled, but `B`'s source code is untainted, so Cargo does not think that it needs to recompile `B`. This is wrong, however, because one of `B`'s dependencies was rebuilt, so it needs to be rebuilt. To fix this problem, the fingerprint (a short hash) for all packages is now transitively propagated (the fingerprint changes when an upstream package changes). This should ensure that even when Ctrl+C is hit (or the situation explained in #1236) that Cargo will still consider packages whose source code is untainted as candidates for recompilation. The implementation is somewhat tricky due to the actual fingerprint for a path dependency not being known until *after* the crate is compiled (the fingerprint is the mtime of the dep-info file). Closes #1236
2015-01-29 03:37:19 +00:00
p.cargo("build -v")
.with_stderr(
"\
[FRESH] b [..]
[COMPILING] a [..]
[RUNNING] `rustc --crate-name a [..]
[COMPILING] foo [..]
[RUNNING] `rustc --crate-name foo [..]
[FINISHED] dev [..]
",
)
.run();
}
#[cargo_test]
2016-10-04 22:21:16 +00:00
fn changing_lib_features_caches_targets() {
let p = project()
2018-03-14 15:17:44 +00:00
.file(
"Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "foo"
authors = []
version = "0.0.1"
2020-09-27 00:59:58 +00:00
[features]
foo = []
"#,
2018-12-08 11:19:47 +00:00
)
.file("src/lib.rs", "")
.build();
p.cargo("build")
.with_stderr(
2018-03-14 15:17:44 +00:00
"\
[..]Compiling foo v0.0.1 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
2018-03-14 15:17:44 +00:00
p.cargo("build --features foo")
.with_stderr(
2018-03-14 15:17:44 +00:00
"\
[..]Compiling foo v0.0.1 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
2016-10-04 22:21:16 +00:00
2016-11-12 08:26:39 +00:00
/* Targets should be cached from the first build */
2016-10-04 22:21:16 +00:00
p.cargo("build")
.with_stderr("[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]")
.run();
p.cargo("build").with_stdout("").run();
2016-10-04 22:21:16 +00:00
p.cargo("build --features foo")
.with_stderr("[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]")
.run();
2016-10-04 22:21:16 +00:00
}
#[cargo_test]
fn changing_profiles_caches_targets() {
let p = project()
2018-03-14 15:17:44 +00:00
.file(
"Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "foo"
authors = []
version = "0.0.1"
2020-09-27 00:59:58 +00:00
[profile.dev]
panic = "abort"
"#,
2018-12-08 11:19:47 +00:00
)
.file("src/lib.rs", "")
.build();
p.cargo("build")
.with_stderr(
2018-03-14 15:17:44 +00:00
"\
[..]Compiling foo v0.0.1 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
2018-03-14 15:17:44 +00:00
p.cargo("test")
.with_stderr(
2018-03-14 15:17:44 +00:00
"\
[..]Compiling foo v0.0.1 ([..])
[FINISHED] test [unoptimized + debuginfo] target(s) in [..]
[RUNNING] target[..]debug[..]deps[..]foo-[..][EXE]
[DOCTEST] foo
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
2016-11-12 08:26:39 +00:00
/* Targets should be cached from the first build */
p.cargo("build")
.with_stderr("[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]")
.run();
2018-03-14 15:17:44 +00:00
p.cargo("test foo")
.with_stderr(
2018-03-14 15:17:44 +00:00
"\
[FINISHED] test [unoptimized + debuginfo] target(s) in [..]
[RUNNING] target[..]debug[..]deps[..]foo-[..][EXE]
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
}
#[cargo_test]
2016-10-04 22:21:16 +00:00
fn changing_bin_paths_common_target_features_caches_targets() {
// Make sure dep_cache crate is built once per feature
let p = project()
.no_manifest()
2018-03-14 15:17:44 +00:00
.file(
".cargo/config",
r#"
2020-09-27 00:59:58 +00:00
[build]
target-dir = "./target"
"#,
2018-12-08 11:19:47 +00:00
)
.file(
2018-03-14 15:17:44 +00:00
"dep_crate/Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "dep_crate"
version = "0.0.1"
authors = []
2016-10-04 22:21:16 +00:00
2020-09-27 00:59:58 +00:00
[features]
ftest = []
"#,
2018-12-08 11:19:47 +00:00
)
.file(
2018-03-14 15:17:44 +00:00
"dep_crate/src/lib.rs",
r#"
2020-09-27 00:59:58 +00:00
#[cfg(feature = "ftest")]
pub fn yo() {
println!("ftest on")
}
#[cfg(not(feature = "ftest"))]
pub fn yo() {
println!("ftest off")
}
"#,
2018-12-08 11:19:47 +00:00
)
.file(
2018-03-14 15:17:44 +00:00
"a/Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "a"
version = "0.0.1"
authors = []
2016-10-04 22:21:16 +00:00
2020-09-27 00:59:58 +00:00
[dependencies]
dep_crate = {path = "../dep_crate", features = []}
"#,
2018-12-08 11:19:47 +00:00
)
.file("a/src/lib.rs", "")
2018-03-14 15:17:44 +00:00
.file(
"a/src/main.rs",
r#"
2020-09-27 00:59:58 +00:00
extern crate dep_crate;
use dep_crate::yo;
fn main() {
yo();
}
"#,
2018-12-08 11:19:47 +00:00
)
.file(
2018-03-14 15:17:44 +00:00
"b/Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "b"
version = "0.0.1"
authors = []
2016-10-04 22:21:16 +00:00
2020-09-27 00:59:58 +00:00
[dependencies]
dep_crate = {path = "../dep_crate", features = ["ftest"]}
"#,
2018-12-08 11:19:47 +00:00
)
.file("b/src/lib.rs", "")
2018-03-14 15:17:44 +00:00
.file(
"b/src/main.rs",
r#"
2020-09-27 00:59:58 +00:00
extern crate dep_crate;
use dep_crate::yo;
fn main() {
yo();
}
"#,
2018-12-08 11:19:47 +00:00
)
.build();
2016-10-04 22:21:16 +00:00
/* Build and rebuild a/. Ensure dep_crate only builds once */
p.cargo("run")
.cwd("a")
.with_stdout("ftest off")
.with_stderr(
2018-03-14 15:17:44 +00:00
"\
2016-10-04 22:21:16 +00:00
[..]Compiling dep_crate v0.0.1 ([..])
[..]Compiling a v0.0.1 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
2018-08-02 09:18:48 +00:00
[RUNNING] `[..]target/debug/a[EXE]`
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
p.cargo("clean -p a").cwd("a").run();
p.cargo("run")
.cwd("a")
.with_stdout("ftest off")
.with_stderr(
2018-03-14 15:17:44 +00:00
"\
2016-10-04 22:21:16 +00:00
[..]Compiling a v0.0.1 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
2018-08-02 09:18:48 +00:00
[RUNNING] `[..]target/debug/a[EXE]`
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
2016-10-04 22:21:16 +00:00
/* Build and rebuild b/. Ensure dep_crate only builds once */
p.cargo("run")
.cwd("b")
.with_stdout("ftest on")
.with_stderr(
2018-03-14 15:17:44 +00:00
"\
2016-10-04 22:21:16 +00:00
[..]Compiling dep_crate v0.0.1 ([..])
[..]Compiling b v0.0.1 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
2018-08-02 09:18:48 +00:00
[RUNNING] `[..]target/debug/b[EXE]`
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
p.cargo("clean -p b").cwd("b").run();
p.cargo("run")
.cwd("b")
.with_stdout("ftest on")
.with_stderr(
2018-03-14 15:17:44 +00:00
"\
2016-10-04 22:21:16 +00:00
[..]Compiling b v0.0.1 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
2018-08-02 09:18:48 +00:00
[RUNNING] `[..]target/debug/b[EXE]`
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
2016-10-04 22:21:16 +00:00
/* Build a/ package again. If we cache different feature dep builds correctly,
* this should not cause a rebuild of dep_crate */
p.cargo("clean -p a").cwd("a").run();
p.cargo("run")
.cwd("a")
.with_stdout("ftest off")
.with_stderr(
2018-03-14 15:17:44 +00:00
"\
2016-10-04 22:21:16 +00:00
[..]Compiling a v0.0.1 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
2018-08-02 09:18:48 +00:00
[RUNNING] `[..]target/debug/a[EXE]`
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
2016-10-04 22:21:16 +00:00
/* Build b/ package again. If we cache different feature dep builds correctly,
* this should not cause a rebuild */
p.cargo("clean -p b").cwd("b").run();
p.cargo("run")
.cwd("b")
.with_stdout("ftest on")
.with_stderr(
2018-03-14 15:17:44 +00:00
"\
2016-10-04 22:21:16 +00:00
[..]Compiling b v0.0.1 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
2018-08-02 09:18:48 +00:00
[RUNNING] `[..]target/debug/b[EXE]`
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
2016-10-04 22:21:16 +00:00
}
#[cargo_test]
2016-10-04 22:21:16 +00:00
fn changing_bin_features_caches_targets() {
let p = project()
2018-03-14 15:17:44 +00:00
.file(
"Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "foo"
authors = []
version = "0.0.1"
2016-10-04 22:21:16 +00:00
2020-09-27 00:59:58 +00:00
[features]
foo = []
"#,
2018-12-08 11:19:47 +00:00
)
.file(
2018-03-14 15:17:44 +00:00
"src/main.rs",
r#"
2020-09-27 00:59:58 +00:00
fn main() {
let msg = if cfg!(feature = "foo") { "feature on" } else { "feature off" };
println!("{}", msg);
}
"#,
2018-12-08 11:19:47 +00:00
)
.build();
2016-10-04 22:21:16 +00:00
p.cargo("build")
.with_stderr(
"\
[COMPILING] foo v0.0.1 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
2019-01-18 16:39:25 +00:00
p.rename_run("foo", "off1").with_stdout("feature off").run();
p.cargo("build --features foo")
.with_stderr(
"\
[COMPILING] foo v0.0.1 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
2019-01-18 16:39:25 +00:00
p.rename_run("foo", "on1").with_stdout("feature on").run();
2016-10-04 22:21:16 +00:00
2016-11-12 08:26:39 +00:00
/* Targets should be cached from the first build */
2016-10-04 22:21:16 +00:00
2019-10-08 17:13:24 +00:00
let mut e = p.cargo("build");
// MSVC/apple does not include hash in binary filename, so it gets recompiled.
if cfg!(any(target_env = "msvc", target_vendor = "apple")) {
2019-10-08 17:13:24 +00:00
e.with_stderr("[COMPILING] foo[..]\n[FINISHED] dev[..]");
} else {
e.with_stderr("[FINISHED] dev[..]");
}
e.run();
2019-01-18 16:39:25 +00:00
p.rename_run("foo", "off2").with_stdout("feature off").run();
2019-10-08 17:13:24 +00:00
let mut e = p.cargo("build --features foo");
if cfg!(any(target_env = "msvc", target_vendor = "apple")) {
2019-10-08 17:13:24 +00:00
e.with_stderr("[COMPILING] foo[..]\n[FINISHED] dev[..]");
} else {
e.with_stderr("[FINISHED] dev[..]");
}
e.run();
2019-01-18 16:39:25 +00:00
p.rename_run("foo", "on2").with_stdout("feature on").run();
}
#[cargo_test]
fn rebuild_tests_if_lib_changes() {
let p = project()
.file("src/lib.rs", "pub fn foo() {}")
2018-03-14 15:17:44 +00:00
.file(
"tests/foo.rs",
r#"
2020-09-27 00:59:58 +00:00
extern crate foo;
#[test]
fn test() { foo::foo(); }
"#,
2018-12-08 11:19:47 +00:00
)
.build();
p.cargo("build").run();
p.cargo("test").run();
sleep_ms(1000);
p.change_file("src/lib.rs", "");
p.cargo("build -v").run();
p.cargo("test -v")
.with_status(101)
.with_stderr_contains("[..]cannot find function `foo`[..]")
.run();
}
Refactor Cargo's backend, again! This commit started out identifying a relatively simple bug in Cargo. A recent change made it such that the resolution graph included all target-specific dependencies, relying on the structure of the backend to filter out those which don't need to get built. This was unfortunately not accounted for in the portion of the backend that schedules work, mistakenly causing spurious rebuilds if different runs of the graph pulled in new crates. For example if `cargo build` didn't build any target-specific dependencies but then later `cargo test` did (e.g. a dev-dep pulled in a target-specific dep unconditionally) then it would cause a rebuild of the entire graph. This class of bug is certainly not the first in a long and storied history of the backend having multiple points where dependencies are calculated and those often don't quite agree with one another. The purpose of this rewrite is twofold: 1. The `Stage` enum in the backend for scheduling work and ensuring that maximum parallelism is achieved is removed entirely. There is already a function on `Context` which expresses the dependency between targets (`dep_targets`) which takes a much finer grain of dependencies into account as well as already having all the logic for what-depends-on-what. This duplication has caused numerous problems in the past, an unifying these two will truly grant maximum parallelism while ensuring that everyone agrees on what their dependencies are. 2. A large number of locations in the backend have grown to take a (Package, Target, Profile, Kind) tuple, or some subset of this tuple. In general this represents a "unit of work" and is much easier to pass around as one variable, so a `Unit` was introduced which references all of these variables. Almost the entire backend was altered to take a `Unit` instead of these variables specifically, typically providing all of the contextual information necessary for an operation. A crucial part of this change is the inclusion of `Kind` in a `Unit` to ensure that everyone *also* agrees on what architecture they're compiling everything for. There have been many bugs in the past where one part of the backend determined that a package was built for one architecture and then another part thought it was built for another. With the inclusion of `Kind` in dependency management this is handled in a much cleaner fashion as it's only calculated in one location. Some other miscellaneous changes made were: * The `Platform` enumeration has finally been removed. This has been entirely subsumed by `Kind`. * The hokey logic for "build this crate once" even though it may be depended on by both the host/target kinds has been removed. This is now handled in a much nicer fashion where if there's no target then Kind::Target is just never used, and multiple requests for a package are just naturally deduplicated. * There's no longer any need to build up the "requirements" for a package in terms of what platforms it's compiled for, this now just naturally falls out of the dependency graph. * If a build script is overridden then its entire tree of dependencies are not compiled, not just the build script itself. * The `threadpool` dependency has been replaced with one on `crossbeam`. The method of calculating dependencies has quite a few non-static lifetimes and the scoped threads of `crossbeam` are now used instead of a thread pool. * Once any thread fails to execute a command work is no longer scheduled unlike before where some extra pending work may continue to start. * Many functions used early on, such as `compile` and `build_map` have been massively simplified by farming out dependency management to `Context::dep_targets`. * There is now a new profile to represent running a build script. This is used to inject dependencies as well as represent that a library depends on running a build script, not just building it. This change has currently been tested against cross-compiling Servo to Android and passes the test suite (which has quite a few corner cases for build scripts and such), so I'm pretty confident that this refactoring won't have at least too many regressions!
2015-10-02 06:48:47 +00:00
#[cargo_test]
fn no_rebuild_transitive_target_deps() {
let p = project()
2018-03-14 15:17:44 +00:00
.file(
"Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "foo"
version = "0.0.1"
authors = []
Refactor Cargo's backend, again! This commit started out identifying a relatively simple bug in Cargo. A recent change made it such that the resolution graph included all target-specific dependencies, relying on the structure of the backend to filter out those which don't need to get built. This was unfortunately not accounted for in the portion of the backend that schedules work, mistakenly causing spurious rebuilds if different runs of the graph pulled in new crates. For example if `cargo build` didn't build any target-specific dependencies but then later `cargo test` did (e.g. a dev-dep pulled in a target-specific dep unconditionally) then it would cause a rebuild of the entire graph. This class of bug is certainly not the first in a long and storied history of the backend having multiple points where dependencies are calculated and those often don't quite agree with one another. The purpose of this rewrite is twofold: 1. The `Stage` enum in the backend for scheduling work and ensuring that maximum parallelism is achieved is removed entirely. There is already a function on `Context` which expresses the dependency between targets (`dep_targets`) which takes a much finer grain of dependencies into account as well as already having all the logic for what-depends-on-what. This duplication has caused numerous problems in the past, an unifying these two will truly grant maximum parallelism while ensuring that everyone agrees on what their dependencies are. 2. A large number of locations in the backend have grown to take a (Package, Target, Profile, Kind) tuple, or some subset of this tuple. In general this represents a "unit of work" and is much easier to pass around as one variable, so a `Unit` was introduced which references all of these variables. Almost the entire backend was altered to take a `Unit` instead of these variables specifically, typically providing all of the contextual information necessary for an operation. A crucial part of this change is the inclusion of `Kind` in a `Unit` to ensure that everyone *also* agrees on what architecture they're compiling everything for. There have been many bugs in the past where one part of the backend determined that a package was built for one architecture and then another part thought it was built for another. With the inclusion of `Kind` in dependency management this is handled in a much cleaner fashion as it's only calculated in one location. Some other miscellaneous changes made were: * The `Platform` enumeration has finally been removed. This has been entirely subsumed by `Kind`. * The hokey logic for "build this crate once" even though it may be depended on by both the host/target kinds has been removed. This is now handled in a much nicer fashion where if there's no target then Kind::Target is just never used, and multiple requests for a package are just naturally deduplicated. * There's no longer any need to build up the "requirements" for a package in terms of what platforms it's compiled for, this now just naturally falls out of the dependency graph. * If a build script is overridden then its entire tree of dependencies are not compiled, not just the build script itself. * The `threadpool` dependency has been replaced with one on `crossbeam`. The method of calculating dependencies has quite a few non-static lifetimes and the scoped threads of `crossbeam` are now used instead of a thread pool. * Once any thread fails to execute a command work is no longer scheduled unlike before where some extra pending work may continue to start. * Many functions used early on, such as `compile` and `build_map` have been massively simplified by farming out dependency management to `Context::dep_targets`. * There is now a new profile to represent running a build script. This is used to inject dependencies as well as represent that a library depends on running a build script, not just building it. This change has currently been tested against cross-compiling Servo to Android and passes the test suite (which has quite a few corner cases for build scripts and such), so I'm pretty confident that this refactoring won't have at least too many regressions!
2015-10-02 06:48:47 +00:00
2020-09-27 00:59:58 +00:00
[dependencies]
a = { path = "a" }
[dev-dependencies]
b = { path = "b" }
"#,
2018-12-08 11:19:47 +00:00
)
.file("src/lib.rs", "")
Refactor Cargo's backend, again! This commit started out identifying a relatively simple bug in Cargo. A recent change made it such that the resolution graph included all target-specific dependencies, relying on the structure of the backend to filter out those which don't need to get built. This was unfortunately not accounted for in the portion of the backend that schedules work, mistakenly causing spurious rebuilds if different runs of the graph pulled in new crates. For example if `cargo build` didn't build any target-specific dependencies but then later `cargo test` did (e.g. a dev-dep pulled in a target-specific dep unconditionally) then it would cause a rebuild of the entire graph. This class of bug is certainly not the first in a long and storied history of the backend having multiple points where dependencies are calculated and those often don't quite agree with one another. The purpose of this rewrite is twofold: 1. The `Stage` enum in the backend for scheduling work and ensuring that maximum parallelism is achieved is removed entirely. There is already a function on `Context` which expresses the dependency between targets (`dep_targets`) which takes a much finer grain of dependencies into account as well as already having all the logic for what-depends-on-what. This duplication has caused numerous problems in the past, an unifying these two will truly grant maximum parallelism while ensuring that everyone agrees on what their dependencies are. 2. A large number of locations in the backend have grown to take a (Package, Target, Profile, Kind) tuple, or some subset of this tuple. In general this represents a "unit of work" and is much easier to pass around as one variable, so a `Unit` was introduced which references all of these variables. Almost the entire backend was altered to take a `Unit` instead of these variables specifically, typically providing all of the contextual information necessary for an operation. A crucial part of this change is the inclusion of `Kind` in a `Unit` to ensure that everyone *also* agrees on what architecture they're compiling everything for. There have been many bugs in the past where one part of the backend determined that a package was built for one architecture and then another part thought it was built for another. With the inclusion of `Kind` in dependency management this is handled in a much cleaner fashion as it's only calculated in one location. Some other miscellaneous changes made were: * The `Platform` enumeration has finally been removed. This has been entirely subsumed by `Kind`. * The hokey logic for "build this crate once" even though it may be depended on by both the host/target kinds has been removed. This is now handled in a much nicer fashion where if there's no target then Kind::Target is just never used, and multiple requests for a package are just naturally deduplicated. * There's no longer any need to build up the "requirements" for a package in terms of what platforms it's compiled for, this now just naturally falls out of the dependency graph. * If a build script is overridden then its entire tree of dependencies are not compiled, not just the build script itself. * The `threadpool` dependency has been replaced with one on `crossbeam`. The method of calculating dependencies has quite a few non-static lifetimes and the scoped threads of `crossbeam` are now used instead of a thread pool. * Once any thread fails to execute a command work is no longer scheduled unlike before where some extra pending work may continue to start. * Many functions used early on, such as `compile` and `build_map` have been massively simplified by farming out dependency management to `Context::dep_targets`. * There is now a new profile to represent running a build script. This is used to inject dependencies as well as represent that a library depends on running a build script, not just building it. This change has currently been tested against cross-compiling Servo to Android and passes the test suite (which has quite a few corner cases for build scripts and such), so I'm pretty confident that this refactoring won't have at least too many regressions!
2015-10-02 06:48:47 +00:00
.file("tests/foo.rs", "")
2018-03-14 15:17:44 +00:00
.file(
"a/Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "a"
version = "0.0.1"
authors = []
Refactor Cargo's backend, again! This commit started out identifying a relatively simple bug in Cargo. A recent change made it such that the resolution graph included all target-specific dependencies, relying on the structure of the backend to filter out those which don't need to get built. This was unfortunately not accounted for in the portion of the backend that schedules work, mistakenly causing spurious rebuilds if different runs of the graph pulled in new crates. For example if `cargo build` didn't build any target-specific dependencies but then later `cargo test` did (e.g. a dev-dep pulled in a target-specific dep unconditionally) then it would cause a rebuild of the entire graph. This class of bug is certainly not the first in a long and storied history of the backend having multiple points where dependencies are calculated and those often don't quite agree with one another. The purpose of this rewrite is twofold: 1. The `Stage` enum in the backend for scheduling work and ensuring that maximum parallelism is achieved is removed entirely. There is already a function on `Context` which expresses the dependency between targets (`dep_targets`) which takes a much finer grain of dependencies into account as well as already having all the logic for what-depends-on-what. This duplication has caused numerous problems in the past, an unifying these two will truly grant maximum parallelism while ensuring that everyone agrees on what their dependencies are. 2. A large number of locations in the backend have grown to take a (Package, Target, Profile, Kind) tuple, or some subset of this tuple. In general this represents a "unit of work" and is much easier to pass around as one variable, so a `Unit` was introduced which references all of these variables. Almost the entire backend was altered to take a `Unit` instead of these variables specifically, typically providing all of the contextual information necessary for an operation. A crucial part of this change is the inclusion of `Kind` in a `Unit` to ensure that everyone *also* agrees on what architecture they're compiling everything for. There have been many bugs in the past where one part of the backend determined that a package was built for one architecture and then another part thought it was built for another. With the inclusion of `Kind` in dependency management this is handled in a much cleaner fashion as it's only calculated in one location. Some other miscellaneous changes made were: * The `Platform` enumeration has finally been removed. This has been entirely subsumed by `Kind`. * The hokey logic for "build this crate once" even though it may be depended on by both the host/target kinds has been removed. This is now handled in a much nicer fashion where if there's no target then Kind::Target is just never used, and multiple requests for a package are just naturally deduplicated. * There's no longer any need to build up the "requirements" for a package in terms of what platforms it's compiled for, this now just naturally falls out of the dependency graph. * If a build script is overridden then its entire tree of dependencies are not compiled, not just the build script itself. * The `threadpool` dependency has been replaced with one on `crossbeam`. The method of calculating dependencies has quite a few non-static lifetimes and the scoped threads of `crossbeam` are now used instead of a thread pool. * Once any thread fails to execute a command work is no longer scheduled unlike before where some extra pending work may continue to start. * Many functions used early on, such as `compile` and `build_map` have been massively simplified by farming out dependency management to `Context::dep_targets`. * There is now a new profile to represent running a build script. This is used to inject dependencies as well as represent that a library depends on running a build script, not just building it. This change has currently been tested against cross-compiling Servo to Android and passes the test suite (which has quite a few corner cases for build scripts and such), so I'm pretty confident that this refactoring won't have at least too many regressions!
2015-10-02 06:48:47 +00:00
2020-09-27 00:59:58 +00:00
[target.foo.dependencies]
c = { path = "../c" }
"#,
2018-12-08 11:19:47 +00:00
)
.file("a/src/lib.rs", "")
2018-03-14 15:17:44 +00:00
.file(
"b/Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "b"
version = "0.0.1"
authors = []
Refactor Cargo's backend, again! This commit started out identifying a relatively simple bug in Cargo. A recent change made it such that the resolution graph included all target-specific dependencies, relying on the structure of the backend to filter out those which don't need to get built. This was unfortunately not accounted for in the portion of the backend that schedules work, mistakenly causing spurious rebuilds if different runs of the graph pulled in new crates. For example if `cargo build` didn't build any target-specific dependencies but then later `cargo test` did (e.g. a dev-dep pulled in a target-specific dep unconditionally) then it would cause a rebuild of the entire graph. This class of bug is certainly not the first in a long and storied history of the backend having multiple points where dependencies are calculated and those often don't quite agree with one another. The purpose of this rewrite is twofold: 1. The `Stage` enum in the backend for scheduling work and ensuring that maximum parallelism is achieved is removed entirely. There is already a function on `Context` which expresses the dependency between targets (`dep_targets`) which takes a much finer grain of dependencies into account as well as already having all the logic for what-depends-on-what. This duplication has caused numerous problems in the past, an unifying these two will truly grant maximum parallelism while ensuring that everyone agrees on what their dependencies are. 2. A large number of locations in the backend have grown to take a (Package, Target, Profile, Kind) tuple, or some subset of this tuple. In general this represents a "unit of work" and is much easier to pass around as one variable, so a `Unit` was introduced which references all of these variables. Almost the entire backend was altered to take a `Unit` instead of these variables specifically, typically providing all of the contextual information necessary for an operation. A crucial part of this change is the inclusion of `Kind` in a `Unit` to ensure that everyone *also* agrees on what architecture they're compiling everything for. There have been many bugs in the past where one part of the backend determined that a package was built for one architecture and then another part thought it was built for another. With the inclusion of `Kind` in dependency management this is handled in a much cleaner fashion as it's only calculated in one location. Some other miscellaneous changes made were: * The `Platform` enumeration has finally been removed. This has been entirely subsumed by `Kind`. * The hokey logic for "build this crate once" even though it may be depended on by both the host/target kinds has been removed. This is now handled in a much nicer fashion where if there's no target then Kind::Target is just never used, and multiple requests for a package are just naturally deduplicated. * There's no longer any need to build up the "requirements" for a package in terms of what platforms it's compiled for, this now just naturally falls out of the dependency graph. * If a build script is overridden then its entire tree of dependencies are not compiled, not just the build script itself. * The `threadpool` dependency has been replaced with one on `crossbeam`. The method of calculating dependencies has quite a few non-static lifetimes and the scoped threads of `crossbeam` are now used instead of a thread pool. * Once any thread fails to execute a command work is no longer scheduled unlike before where some extra pending work may continue to start. * Many functions used early on, such as `compile` and `build_map` have been massively simplified by farming out dependency management to `Context::dep_targets`. * There is now a new profile to represent running a build script. This is used to inject dependencies as well as represent that a library depends on running a build script, not just building it. This change has currently been tested against cross-compiling Servo to Android and passes the test suite (which has quite a few corner cases for build scripts and such), so I'm pretty confident that this refactoring won't have at least too many regressions!
2015-10-02 06:48:47 +00:00
2020-09-27 00:59:58 +00:00
[dependencies]
c = { path = "../c" }
"#,
2018-12-08 11:19:47 +00:00
)
.file("b/src/lib.rs", "")
2018-07-24 22:35:01 +00:00
.file("c/Cargo.toml", &basic_manifest("c", "0.0.1"))
.file("c/src/lib.rs", "")
.build();
Refactor Cargo's backend, again! This commit started out identifying a relatively simple bug in Cargo. A recent change made it such that the resolution graph included all target-specific dependencies, relying on the structure of the backend to filter out those which don't need to get built. This was unfortunately not accounted for in the portion of the backend that schedules work, mistakenly causing spurious rebuilds if different runs of the graph pulled in new crates. For example if `cargo build` didn't build any target-specific dependencies but then later `cargo test` did (e.g. a dev-dep pulled in a target-specific dep unconditionally) then it would cause a rebuild of the entire graph. This class of bug is certainly not the first in a long and storied history of the backend having multiple points where dependencies are calculated and those often don't quite agree with one another. The purpose of this rewrite is twofold: 1. The `Stage` enum in the backend for scheduling work and ensuring that maximum parallelism is achieved is removed entirely. There is already a function on `Context` which expresses the dependency between targets (`dep_targets`) which takes a much finer grain of dependencies into account as well as already having all the logic for what-depends-on-what. This duplication has caused numerous problems in the past, an unifying these two will truly grant maximum parallelism while ensuring that everyone agrees on what their dependencies are. 2. A large number of locations in the backend have grown to take a (Package, Target, Profile, Kind) tuple, or some subset of this tuple. In general this represents a "unit of work" and is much easier to pass around as one variable, so a `Unit` was introduced which references all of these variables. Almost the entire backend was altered to take a `Unit` instead of these variables specifically, typically providing all of the contextual information necessary for an operation. A crucial part of this change is the inclusion of `Kind` in a `Unit` to ensure that everyone *also* agrees on what architecture they're compiling everything for. There have been many bugs in the past where one part of the backend determined that a package was built for one architecture and then another part thought it was built for another. With the inclusion of `Kind` in dependency management this is handled in a much cleaner fashion as it's only calculated in one location. Some other miscellaneous changes made were: * The `Platform` enumeration has finally been removed. This has been entirely subsumed by `Kind`. * The hokey logic for "build this crate once" even though it may be depended on by both the host/target kinds has been removed. This is now handled in a much nicer fashion where if there's no target then Kind::Target is just never used, and multiple requests for a package are just naturally deduplicated. * There's no longer any need to build up the "requirements" for a package in terms of what platforms it's compiled for, this now just naturally falls out of the dependency graph. * If a build script is overridden then its entire tree of dependencies are not compiled, not just the build script itself. * The `threadpool` dependency has been replaced with one on `crossbeam`. The method of calculating dependencies has quite a few non-static lifetimes and the scoped threads of `crossbeam` are now used instead of a thread pool. * Once any thread fails to execute a command work is no longer scheduled unlike before where some extra pending work may continue to start. * Many functions used early on, such as `compile` and `build_map` have been massively simplified by farming out dependency management to `Context::dep_targets`. * There is now a new profile to represent running a build script. This is used to inject dependencies as well as represent that a library depends on running a build script, not just building it. This change has currently been tested against cross-compiling Servo to Android and passes the test suite (which has quite a few corner cases for build scripts and such), so I'm pretty confident that this refactoring won't have at least too many regressions!
2015-10-02 06:48:47 +00:00
p.cargo("build").run();
p.cargo("test --no-run")
.with_stderr(
2018-03-14 15:17:44 +00:00
"\
[COMPILING] c v0.0.1 ([..])
[COMPILING] b v0.0.1 ([..])
[COMPILING] foo v0.0.1 ([..])
[FINISHED] test [unoptimized + debuginfo] target(s) in [..]
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
}
#[cargo_test]
fn rerun_if_changed_in_dep() {
let p = project()
2018-03-14 15:17:44 +00:00
.file(
"Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "foo"
version = "0.0.1"
authors = []
2020-09-27 00:59:58 +00:00
[dependencies]
a = { path = "a" }
"#,
2018-12-08 11:19:47 +00:00
)
.file("src/lib.rs", "")
2018-03-14 15:17:44 +00:00
.file(
"a/Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "a"
version = "0.0.1"
authors = []
build = "build.rs"
"#,
2018-12-08 11:19:47 +00:00
)
.file(
2018-03-14 15:17:44 +00:00
"a/build.rs",
r#"
2020-09-27 00:59:58 +00:00
fn main() {
println!("cargo:rerun-if-changed=build.rs");
}
"#,
2018-12-08 11:19:47 +00:00
)
.file("a/src/lib.rs", "")
.build();
p.cargo("build").run();
p.cargo("build").with_stdout("").run();
}
#[cargo_test]
fn same_build_dir_cached_packages() {
let p = project()
.no_manifest()
2018-03-14 15:17:44 +00:00
.file(
"a1/Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "a1"
version = "0.0.1"
authors = []
[dependencies]
b = { path = "../b" }
"#,
2018-12-08 11:19:47 +00:00
)
.file("a1/src/lib.rs", "")
2018-03-14 15:17:44 +00:00
.file(
"a2/Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "a2"
version = "0.0.1"
authors = []
[dependencies]
b = { path = "../b" }
"#,
2018-12-08 11:19:47 +00:00
)
.file("a2/src/lib.rs", "")
2018-03-14 15:17:44 +00:00
.file(
"b/Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "b"
version = "0.0.1"
authors = []
[dependencies]
c = { path = "../c" }
"#,
2018-12-08 11:19:47 +00:00
)
.file("b/src/lib.rs", "")
2018-03-14 15:17:44 +00:00
.file(
"c/Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "c"
version = "0.0.1"
authors = []
[dependencies]
d = { path = "../d" }
"#,
2018-12-08 11:19:47 +00:00
)
.file("c/src/lib.rs", "")
2018-07-24 22:35:01 +00:00
.file("d/Cargo.toml", &basic_manifest("d", "0.0.1"))
.file("d/src/lib.rs", "")
2018-03-14 15:17:44 +00:00
.file(
".cargo/config",
r#"
2020-09-27 00:59:58 +00:00
[build]
target-dir = "./target"
"#,
2018-12-08 11:19:47 +00:00
)
.build();
p.cargo("build")
.cwd("a1")
.with_stderr(&format!(
2018-03-14 15:17:44 +00:00
"\
[COMPILING] d v0.0.1 ({dir}/d)
[COMPILING] c v0.0.1 ({dir}/c)
[COMPILING] b v0.0.1 ({dir}/b)
[COMPILING] a1 v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
2018-03-14 15:17:44 +00:00
",
dir = p.url().to_file_path().unwrap().to_str().unwrap()
2018-12-08 11:19:47 +00:00
))
.run();
p.cargo("build")
.cwd("a2")
.with_stderr(
2018-03-14 15:17:44 +00:00
"\
[COMPILING] a2 v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
}
2016-07-15 14:31:06 +00:00
#[cargo_test]
2016-07-15 14:31:06 +00:00
fn no_rebuild_if_build_artifacts_move_backwards_in_time() {
let p = project()
2018-03-14 15:17:44 +00:00
.file(
"Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "foo"
version = "0.0.1"
authors = []
2016-07-15 14:31:06 +00:00
2020-09-27 00:59:58 +00:00
[dependencies]
a = { path = "a" }
"#,
2018-12-08 11:19:47 +00:00
)
.file("src/lib.rs", "")
2018-07-24 22:35:01 +00:00
.file("a/Cargo.toml", &basic_manifest("a", "0.0.1"))
.file("a/src/lib.rs", "")
.build();
2016-07-15 14:31:06 +00:00
p.cargo("build").run();
2016-07-15 14:31:06 +00:00
p.root().move_into_the_past();
p.cargo("build")
.with_stdout("")
.with_stderr("[FINISHED] [..]")
.run();
2016-07-15 14:31:06 +00:00
}
#[cargo_test]
2016-07-15 14:31:06 +00:00
fn rebuild_if_build_artifacts_move_forward_in_time() {
let p = project()
2018-03-14 15:17:44 +00:00
.file(
"Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "foo"
version = "0.0.1"
authors = []
2016-07-15 14:31:06 +00:00
2020-09-27 00:59:58 +00:00
[dependencies]
a = { path = "a" }
"#,
2018-12-08 11:19:47 +00:00
)
.file("src/lib.rs", "")
2018-07-24 22:35:01 +00:00
.file("a/Cargo.toml", &basic_manifest("a", "0.0.1"))
.file("a/src/lib.rs", "")
.build();
2016-07-15 14:31:06 +00:00
p.cargo("build").run();
2016-07-15 14:31:06 +00:00
p.root().move_into_the_future();
p.cargo("build")
.env("CARGO_LOG", "")
.with_stdout("")
.with_stderr(
2018-03-14 15:17:44 +00:00
"\
2016-07-15 14:31:06 +00:00
[COMPILING] a v0.0.1 ([..])
[COMPILING] foo v0.0.1 ([..])
2016-11-02 16:18:30 +00:00
[FINISHED] [..]
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
2016-07-15 14:31:06 +00:00
}
#[cargo_test]
fn rebuild_if_environment_changes() {
let p = project()
2018-03-14 15:17:44 +00:00
.file(
"Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "foo"
description = "old desc"
version = "0.0.1"
authors = []
"#,
2018-12-08 11:19:47 +00:00
)
.file(
2018-03-14 15:17:44 +00:00
"src/main.rs",
r#"
2020-09-27 00:59:58 +00:00
fn main() {
println!("{}", env!("CARGO_PKG_DESCRIPTION"));
}
"#,
2018-12-08 11:19:47 +00:00
)
.build();
p.cargo("run")
.with_stdout("old desc")
.with_stderr(
"\
[COMPILING] foo v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
2018-08-02 09:18:48 +00:00
[RUNNING] `target/debug/foo[EXE]`
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
2018-03-14 15:17:44 +00:00
p.change_file(
"Cargo.toml",
r#"
[package]
name = "foo"
description = "new desc"
version = "0.0.1"
authors = []
"#,
);
2018-03-14 15:17:44 +00:00
p.cargo("run")
.with_stdout("new desc")
.with_stderr(
"\
[COMPILING] foo v0.0.1 ([CWD])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
2018-08-02 09:18:48 +00:00
[RUNNING] `target/debug/foo[EXE]`
2018-03-14 15:17:44 +00:00
",
2018-12-08 11:19:47 +00:00
)
.run();
}
Avoid rebuilding a project when cwd changes This commit is targeted at solving a use case which typically comes up during CI builds -- the `target` directory is cached between builds but the cwd of the build changes over time. For example the following scenario can happen: 1. A project is compiled at `/projects/a`. 2. The `target` directory is cached. 3. A new build is started in `/projects/b`. 4. The previous `target` directory is restored to `/projects/b`. 5. The build start, and Cargo rebuilds everything. The last piece of behavior is indeed unfortunate! Cargo's internal hashing currently isn't that resilient to changing cwd and this PR aims to help improve the situation! The first point of too-much-hashing came up with `Target::src_path`. Each `Target` was hashed and stored for all compilations, and the `src_path` field was an absolute path on the filesystem to the file that needed to be compiled. This path then changed over time when cwd changed, but otherwise everything else remained the same! This commit updates the handling of the `src_path` field to simply ignore it when hashing. Instead the path we actually pass to rustc is later calculated and then passed to the fingerprint calculation. The next problem this fixes is that the dep info files were augmented after creation to have the cwd of the compiler at the time to find the files at a later date. This, unfortunately, would cause issues if the cwd itself changed. Instead the cwd is now left out of dep-info files (they're no longer augmented) and instead the cwd is recalculated when parsing the dep info later. The final problem that this commit fixes is actually an existing issue in Cargo today. Right now you can actually execute `cargo build` from anywhere in a project and Cargo will execute the build. Unfortunately though the argument to rustc was actually different depending on what directory you were in (the compiler was invoked with a path relative to cwd). This path ends up being used for metadata like debuginfo which means that different directories would cause different artifacts to be created, but Cargo wouldn't rerun the compiler! To fix this issue the matter of cwd is now entirely excluded from compilation command lines. Instead rustc is unconditionally invoked with a relative path *if* the path is underneath the workspace root, and otherwise it's invoked as an absolute path (in which case the cwd doesn't matter). Once all these fixes were added up it means that now we can have projects where if you move the entire directory Cargo won't rebuild the original source! Note that this may be a bit of a breaking change, however. This means that the paths in error messages for cargo will no longer be unconditionally relative to the current working directory, but rather relative to the root of the workspace itself. Unfortunately this is moreso of a feature right now rather than a bug, so it may be one that we just have to stomach.
2017-12-06 22:53:09 +00:00
#[cargo_test]
Avoid rebuilding a project when cwd changes This commit is targeted at solving a use case which typically comes up during CI builds -- the `target` directory is cached between builds but the cwd of the build changes over time. For example the following scenario can happen: 1. A project is compiled at `/projects/a`. 2. The `target` directory is cached. 3. A new build is started in `/projects/b`. 4. The previous `target` directory is restored to `/projects/b`. 5. The build start, and Cargo rebuilds everything. The last piece of behavior is indeed unfortunate! Cargo's internal hashing currently isn't that resilient to changing cwd and this PR aims to help improve the situation! The first point of too-much-hashing came up with `Target::src_path`. Each `Target` was hashed and stored for all compilations, and the `src_path` field was an absolute path on the filesystem to the file that needed to be compiled. This path then changed over time when cwd changed, but otherwise everything else remained the same! This commit updates the handling of the `src_path` field to simply ignore it when hashing. Instead the path we actually pass to rustc is later calculated and then passed to the fingerprint calculation. The next problem this fixes is that the dep info files were augmented after creation to have the cwd of the compiler at the time to find the files at a later date. This, unfortunately, would cause issues if the cwd itself changed. Instead the cwd is now left out of dep-info files (they're no longer augmented) and instead the cwd is recalculated when parsing the dep info later. The final problem that this commit fixes is actually an existing issue in Cargo today. Right now you can actually execute `cargo build` from anywhere in a project and Cargo will execute the build. Unfortunately though the argument to rustc was actually different depending on what directory you were in (the compiler was invoked with a path relative to cwd). This path ends up being used for metadata like debuginfo which means that different directories would cause different artifacts to be created, but Cargo wouldn't rerun the compiler! To fix this issue the matter of cwd is now entirely excluded from compilation command lines. Instead rustc is unconditionally invoked with a relative path *if* the path is underneath the workspace root, and otherwise it's invoked as an absolute path (in which case the cwd doesn't matter). Once all these fixes were added up it means that now we can have projects where if you move the entire directory Cargo won't rebuild the original source! Note that this may be a bit of a breaking change, however. This means that the paths in error messages for cargo will no longer be unconditionally relative to the current working directory, but rather relative to the root of the workspace itself. Unfortunately this is moreso of a feature right now rather than a bug, so it may be one that we just have to stomach.
2017-12-06 22:53:09 +00:00
fn no_rebuild_when_rename_dir() {
let p = project()
2018-03-14 15:17:44 +00:00
.file(
"Cargo.toml",
r#"
[package]
name = "bar"
version = "0.0.1"
authors = []
Avoid rebuilding a project when cwd changes This commit is targeted at solving a use case which typically comes up during CI builds -- the `target` directory is cached between builds but the cwd of the build changes over time. For example the following scenario can happen: 1. A project is compiled at `/projects/a`. 2. The `target` directory is cached. 3. A new build is started in `/projects/b`. 4. The previous `target` directory is restored to `/projects/b`. 5. The build start, and Cargo rebuilds everything. The last piece of behavior is indeed unfortunate! Cargo's internal hashing currently isn't that resilient to changing cwd and this PR aims to help improve the situation! The first point of too-much-hashing came up with `Target::src_path`. Each `Target` was hashed and stored for all compilations, and the `src_path` field was an absolute path on the filesystem to the file that needed to be compiled. This path then changed over time when cwd changed, but otherwise everything else remained the same! This commit updates the handling of the `src_path` field to simply ignore it when hashing. Instead the path we actually pass to rustc is later calculated and then passed to the fingerprint calculation. The next problem this fixes is that the dep info files were augmented after creation to have the cwd of the compiler at the time to find the files at a later date. This, unfortunately, would cause issues if the cwd itself changed. Instead the cwd is now left out of dep-info files (they're no longer augmented) and instead the cwd is recalculated when parsing the dep info later. The final problem that this commit fixes is actually an existing issue in Cargo today. Right now you can actually execute `cargo build` from anywhere in a project and Cargo will execute the build. Unfortunately though the argument to rustc was actually different depending on what directory you were in (the compiler was invoked with a path relative to cwd). This path ends up being used for metadata like debuginfo which means that different directories would cause different artifacts to be created, but Cargo wouldn't rerun the compiler! To fix this issue the matter of cwd is now entirely excluded from compilation command lines. Instead rustc is unconditionally invoked with a relative path *if* the path is underneath the workspace root, and otherwise it's invoked as an absolute path (in which case the cwd doesn't matter). Once all these fixes were added up it means that now we can have projects where if you move the entire directory Cargo won't rebuild the original source! Note that this may be a bit of a breaking change, however. This means that the paths in error messages for cargo will no longer be unconditionally relative to the current working directory, but rather relative to the root of the workspace itself. Unfortunately this is moreso of a feature right now rather than a bug, so it may be one that we just have to stomach.
2017-12-06 22:53:09 +00:00
[workspace]
[dependencies]
foo = { path = "foo" }
"#,
2018-12-08 11:19:47 +00:00
)
.file("src/_unused.rs", "")
.file("build.rs", "fn main() {}")
2018-07-24 22:35:01 +00:00
.file("foo/Cargo.toml", &basic_manifest("foo", "0.0.1"))
Avoid rebuilding a project when cwd changes This commit is targeted at solving a use case which typically comes up during CI builds -- the `target` directory is cached between builds but the cwd of the build changes over time. For example the following scenario can happen: 1. A project is compiled at `/projects/a`. 2. The `target` directory is cached. 3. A new build is started in `/projects/b`. 4. The previous `target` directory is restored to `/projects/b`. 5. The build start, and Cargo rebuilds everything. The last piece of behavior is indeed unfortunate! Cargo's internal hashing currently isn't that resilient to changing cwd and this PR aims to help improve the situation! The first point of too-much-hashing came up with `Target::src_path`. Each `Target` was hashed and stored for all compilations, and the `src_path` field was an absolute path on the filesystem to the file that needed to be compiled. This path then changed over time when cwd changed, but otherwise everything else remained the same! This commit updates the handling of the `src_path` field to simply ignore it when hashing. Instead the path we actually pass to rustc is later calculated and then passed to the fingerprint calculation. The next problem this fixes is that the dep info files were augmented after creation to have the cwd of the compiler at the time to find the files at a later date. This, unfortunately, would cause issues if the cwd itself changed. Instead the cwd is now left out of dep-info files (they're no longer augmented) and instead the cwd is recalculated when parsing the dep info later. The final problem that this commit fixes is actually an existing issue in Cargo today. Right now you can actually execute `cargo build` from anywhere in a project and Cargo will execute the build. Unfortunately though the argument to rustc was actually different depending on what directory you were in (the compiler was invoked with a path relative to cwd). This path ends up being used for metadata like debuginfo which means that different directories would cause different artifacts to be created, but Cargo wouldn't rerun the compiler! To fix this issue the matter of cwd is now entirely excluded from compilation command lines. Instead rustc is unconditionally invoked with a relative path *if* the path is underneath the workspace root, and otherwise it's invoked as an absolute path (in which case the cwd doesn't matter). Once all these fixes were added up it means that now we can have projects where if you move the entire directory Cargo won't rebuild the original source! Note that this may be a bit of a breaking change, however. This means that the paths in error messages for cargo will no longer be unconditionally relative to the current working directory, but rather relative to the root of the workspace itself. Unfortunately this is moreso of a feature right now rather than a bug, so it may be one that we just have to stomach.
2017-12-06 22:53:09 +00:00
.file("foo/src/lib.rs", "")
.file("foo/build.rs", "fn main() {}")
Avoid rebuilding a project when cwd changes This commit is targeted at solving a use case which typically comes up during CI builds -- the `target` directory is cached between builds but the cwd of the build changes over time. For example the following scenario can happen: 1. A project is compiled at `/projects/a`. 2. The `target` directory is cached. 3. A new build is started in `/projects/b`. 4. The previous `target` directory is restored to `/projects/b`. 5. The build start, and Cargo rebuilds everything. The last piece of behavior is indeed unfortunate! Cargo's internal hashing currently isn't that resilient to changing cwd and this PR aims to help improve the situation! The first point of too-much-hashing came up with `Target::src_path`. Each `Target` was hashed and stored for all compilations, and the `src_path` field was an absolute path on the filesystem to the file that needed to be compiled. This path then changed over time when cwd changed, but otherwise everything else remained the same! This commit updates the handling of the `src_path` field to simply ignore it when hashing. Instead the path we actually pass to rustc is later calculated and then passed to the fingerprint calculation. The next problem this fixes is that the dep info files were augmented after creation to have the cwd of the compiler at the time to find the files at a later date. This, unfortunately, would cause issues if the cwd itself changed. Instead the cwd is now left out of dep-info files (they're no longer augmented) and instead the cwd is recalculated when parsing the dep info later. The final problem that this commit fixes is actually an existing issue in Cargo today. Right now you can actually execute `cargo build` from anywhere in a project and Cargo will execute the build. Unfortunately though the argument to rustc was actually different depending on what directory you were in (the compiler was invoked with a path relative to cwd). This path ends up being used for metadata like debuginfo which means that different directories would cause different artifacts to be created, but Cargo wouldn't rerun the compiler! To fix this issue the matter of cwd is now entirely excluded from compilation command lines. Instead rustc is unconditionally invoked with a relative path *if* the path is underneath the workspace root, and otherwise it's invoked as an absolute path (in which case the cwd doesn't matter). Once all these fixes were added up it means that now we can have projects where if you move the entire directory Cargo won't rebuild the original source! Note that this may be a bit of a breaking change, however. This means that the paths in error messages for cargo will no longer be unconditionally relative to the current working directory, but rather relative to the root of the workspace itself. Unfortunately this is moreso of a feature right now rather than a bug, so it may be one that we just have to stomach.
2017-12-06 22:53:09 +00:00
.build();
// make sure the most recently modified file is `src/lib.rs`, not
// `Cargo.toml`, to expose a historical bug where we forgot to strip the
// `Cargo.toml` path from looking for the package root.
cargo_test_support::sleep_ms(100);
fs::write(p.root().join("src/lib.rs"), "").unwrap();
p.cargo("build").run();
Avoid rebuilding a project when cwd changes This commit is targeted at solving a use case which typically comes up during CI builds -- the `target` directory is cached between builds but the cwd of the build changes over time. For example the following scenario can happen: 1. A project is compiled at `/projects/a`. 2. The `target` directory is cached. 3. A new build is started in `/projects/b`. 4. The previous `target` directory is restored to `/projects/b`. 5. The build start, and Cargo rebuilds everything. The last piece of behavior is indeed unfortunate! Cargo's internal hashing currently isn't that resilient to changing cwd and this PR aims to help improve the situation! The first point of too-much-hashing came up with `Target::src_path`. Each `Target` was hashed and stored for all compilations, and the `src_path` field was an absolute path on the filesystem to the file that needed to be compiled. This path then changed over time when cwd changed, but otherwise everything else remained the same! This commit updates the handling of the `src_path` field to simply ignore it when hashing. Instead the path we actually pass to rustc is later calculated and then passed to the fingerprint calculation. The next problem this fixes is that the dep info files were augmented after creation to have the cwd of the compiler at the time to find the files at a later date. This, unfortunately, would cause issues if the cwd itself changed. Instead the cwd is now left out of dep-info files (they're no longer augmented) and instead the cwd is recalculated when parsing the dep info later. The final problem that this commit fixes is actually an existing issue in Cargo today. Right now you can actually execute `cargo build` from anywhere in a project and Cargo will execute the build. Unfortunately though the argument to rustc was actually different depending on what directory you were in (the compiler was invoked with a path relative to cwd). This path ends up being used for metadata like debuginfo which means that different directories would cause different artifacts to be created, but Cargo wouldn't rerun the compiler! To fix this issue the matter of cwd is now entirely excluded from compilation command lines. Instead rustc is unconditionally invoked with a relative path *if* the path is underneath the workspace root, and otherwise it's invoked as an absolute path (in which case the cwd doesn't matter). Once all these fixes were added up it means that now we can have projects where if you move the entire directory Cargo won't rebuild the original source! Note that this may be a bit of a breaking change, however. This means that the paths in error messages for cargo will no longer be unconditionally relative to the current working directory, but rather relative to the root of the workspace itself. Unfortunately this is moreso of a feature right now rather than a bug, so it may be one that we just have to stomach.
2017-12-06 22:53:09 +00:00
let mut new = p.root();
new.pop();
new.push("bar");
fs::rename(p.root(), &new).unwrap();
p.cargo("build")
.cwd(&new)
.with_stderr("[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]")
.run();
Avoid rebuilding a project when cwd changes This commit is targeted at solving a use case which typically comes up during CI builds -- the `target` directory is cached between builds but the cwd of the build changes over time. For example the following scenario can happen: 1. A project is compiled at `/projects/a`. 2. The `target` directory is cached. 3. A new build is started in `/projects/b`. 4. The previous `target` directory is restored to `/projects/b`. 5. The build start, and Cargo rebuilds everything. The last piece of behavior is indeed unfortunate! Cargo's internal hashing currently isn't that resilient to changing cwd and this PR aims to help improve the situation! The first point of too-much-hashing came up with `Target::src_path`. Each `Target` was hashed and stored for all compilations, and the `src_path` field was an absolute path on the filesystem to the file that needed to be compiled. This path then changed over time when cwd changed, but otherwise everything else remained the same! This commit updates the handling of the `src_path` field to simply ignore it when hashing. Instead the path we actually pass to rustc is later calculated and then passed to the fingerprint calculation. The next problem this fixes is that the dep info files were augmented after creation to have the cwd of the compiler at the time to find the files at a later date. This, unfortunately, would cause issues if the cwd itself changed. Instead the cwd is now left out of dep-info files (they're no longer augmented) and instead the cwd is recalculated when parsing the dep info later. The final problem that this commit fixes is actually an existing issue in Cargo today. Right now you can actually execute `cargo build` from anywhere in a project and Cargo will execute the build. Unfortunately though the argument to rustc was actually different depending on what directory you were in (the compiler was invoked with a path relative to cwd). This path ends up being used for metadata like debuginfo which means that different directories would cause different artifacts to be created, but Cargo wouldn't rerun the compiler! To fix this issue the matter of cwd is now entirely excluded from compilation command lines. Instead rustc is unconditionally invoked with a relative path *if* the path is underneath the workspace root, and otherwise it's invoked as an absolute path (in which case the cwd doesn't matter). Once all these fixes were added up it means that now we can have projects where if you move the entire directory Cargo won't rebuild the original source! Note that this may be a bit of a breaking change, however. This means that the paths in error messages for cargo will no longer be unconditionally relative to the current working directory, but rather relative to the root of the workspace itself. Unfortunately this is moreso of a feature right now rather than a bug, so it may be one that we just have to stomach.
2017-12-06 22:53:09 +00:00
}
#[cargo_test]
fn unused_optional_dep() {
Package::new("registry1", "0.1.0").publish();
Package::new("registry2", "0.1.0").publish();
Package::new("registry3", "0.1.0").publish();
let p = project()
.file(
"Cargo.toml",
r#"
[package]
name = "p"
authors = []
version = "0.1.0"
[dependencies]
bar = { path = "bar" }
baz = { path = "baz" }
registry1 = "*"
"#,
2018-12-08 11:19:47 +00:00
)
.file("src/lib.rs", "")
.file(
"bar/Cargo.toml",
r#"
[package]
name = "bar"
version = "0.1.1"
authors = []
[dev-dependencies]
registry2 = "*"
"#,
2018-12-08 11:19:47 +00:00
)
.file("bar/src/lib.rs", "")
.file(
"baz/Cargo.toml",
r#"
[package]
name = "baz"
version = "0.1.1"
authors = []
[dependencies]
registry3 = { version = "*", optional = true }
"#,
2018-12-08 11:19:47 +00:00
)
.file("baz/src/lib.rs", "")
.build();
p.cargo("build").run();
p.cargo("build").with_stderr("[FINISHED] [..]").run();
}
#[cargo_test]
fn path_dev_dep_registry_updates() {
Package::new("registry1", "0.1.0").publish();
Package::new("registry2", "0.1.0").publish();
let p = project()
.file(
"Cargo.toml",
r#"
[package]
name = "p"
authors = []
version = "0.1.0"
[dependencies]
bar = { path = "bar" }
"#,
2018-12-08 11:19:47 +00:00
)
.file("src/lib.rs", "")
.file(
"bar/Cargo.toml",
r#"
[package]
name = "bar"
version = "0.1.1"
authors = []
[dependencies]
registry1 = "*"
[dev-dependencies]
baz = { path = "../baz"}
"#,
2018-12-08 11:19:47 +00:00
)
.file("bar/src/lib.rs", "")
.file(
"baz/Cargo.toml",
r#"
[package]
name = "baz"
version = "0.1.1"
authors = []
[dependencies]
registry2 = "*"
"#,
2018-12-08 11:19:47 +00:00
)
.file("baz/src/lib.rs", "")
.build();
p.cargo("build").run();
p.cargo("build").with_stderr("[FINISHED] [..]").run();
}
#[cargo_test]
fn change_panic_mode() {
let p = project()
.file(
"Cargo.toml",
r#"
[workspace]
members = ['bar', 'baz']
[profile.dev]
panic = 'abort'
"#,
2018-12-08 11:19:47 +00:00
)
.file("src/lib.rs", "")
2018-07-24 22:35:01 +00:00
.file("bar/Cargo.toml", &basic_manifest("bar", "0.1.1"))
.file("bar/src/lib.rs", "")
.file(
"baz/Cargo.toml",
r#"
[package]
name = "baz"
version = "0.1.1"
authors = []
[lib]
proc-macro = true
[dependencies]
bar = { path = '../bar' }
"#,
2018-12-08 11:19:47 +00:00
)
.file("baz/src/lib.rs", "extern crate bar;")
.build();
p.cargo("build -p bar").run();
p.cargo("build -p baz").run();
}
#[cargo_test]
fn dont_rebuild_based_on_plugins() {
let p = project()
.file(
"Cargo.toml",
r#"
[package]
name = "bar"
version = "0.1.1"
[workspace]
members = ['baz']
[dependencies]
proc-macro-thing = { path = 'proc-macro-thing' }
"#,
2018-12-08 11:19:47 +00:00
)
.file("src/lib.rs", "")
.file(
"proc-macro-thing/Cargo.toml",
r#"
[package]
name = "proc-macro-thing"
version = "0.1.1"
[lib]
proc-macro = true
[dependencies]
qux = { path = '../qux' }
"#,
2018-12-08 11:19:47 +00:00
)
.file("proc-macro-thing/src/lib.rs", "")
.file(
"baz/Cargo.toml",
r#"
[package]
name = "baz"
version = "0.1.1"
[dependencies]
qux = { path = '../qux' }
"#,
2018-12-08 11:19:47 +00:00
)
.file("baz/src/main.rs", "fn main() {}")
2018-07-24 22:35:01 +00:00
.file("qux/Cargo.toml", &basic_manifest("qux", "0.1.1"))
.file("qux/src/lib.rs", "")
.build();
p.cargo("build").run();
p.cargo("build -p baz").run();
p.cargo("build").with_stderr("[FINISHED] [..]\n").run();
p.cargo("build -p bar")
.with_stderr("[FINISHED] [..]\n")
.run();
}
#[cargo_test]
fn reuse_workspace_lib() {
let p = project()
.file(
"Cargo.toml",
r#"
[package]
name = "bar"
version = "0.1.1"
[workspace]
[dependencies]
baz = { path = 'baz' }
"#,
2018-12-08 11:19:47 +00:00
)
.file("src/lib.rs", "")
2018-07-24 22:35:01 +00:00
.file("baz/Cargo.toml", &basic_manifest("baz", "0.1.1"))
.file("baz/src/lib.rs", "")
.build();
p.cargo("build").run();
p.cargo("test -p baz -v --no-run")
.with_stderr(
"\
[COMPILING] baz v0.1.1 ([..])
[RUNNING] `rustc[..] --test [..]`
[FINISHED] [..]
",
2018-12-08 11:19:47 +00:00
)
.run();
}
2018-10-12 19:55:31 +00:00
#[cargo_test]
2018-10-12 19:55:31 +00:00
fn reuse_shared_build_dep() {
let p = project()
.file(
"Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "foo"
version = "0.0.1"
2018-10-12 19:55:31 +00:00
2020-09-27 00:59:58 +00:00
[dependencies]
shared = {path = "shared"}
2018-10-12 19:55:31 +00:00
2020-09-27 00:59:58 +00:00
[workspace]
members = ["shared", "bar"]
"#,
2018-10-12 19:55:31 +00:00
)
.file("src/main.rs", "fn main() {}")
.file("shared/Cargo.toml", &basic_manifest("shared", "0.0.1"))
.file("shared/src/lib.rs", "")
.file(
"bar/Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "bar"
version = "0.0.1"
2018-10-12 19:55:31 +00:00
2020-09-27 00:59:58 +00:00
[build-dependencies]
shared = { path = "../shared" }
"#,
2018-10-12 19:55:31 +00:00
)
.file("bar/src/lib.rs", "")
.file("bar/build.rs", "fn main() {}")
.build();
2019-08-12 12:31:20 +00:00
p.cargo("build --workspace").run();
2018-10-12 19:55:31 +00:00
// This should not recompile!
p.cargo("build -p foo -v")
.with_stderr(
"\
[FRESH] shared [..]
[FRESH] foo [..]
[FINISHED] [..]
",
)
.run();
}
#[cargo_test]
2018-12-31 19:05:02 +00:00
fn changing_rustflags_is_cached() {
2019-01-02 15:10:01 +00:00
let p = project().file("src/lib.rs", "").build();
2018-12-31 19:05:02 +00:00
// This isn't ever cached, we always have to recompile
for _ in 0..2 {
p.cargo("build")
.with_stderr(
"\
2019-01-02 15:56:46 +00:00
[COMPILING] foo v0.0.1 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]",
)
.run();
p.cargo("build")
.env("RUSTFLAGS", "-C linker=cc")
.with_stderr(
"\
[COMPILING] foo v0.0.1 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]",
)
.run();
}
2018-12-31 19:05:02 +00:00
}
#[cargo_test]
fn update_dependency_mtime_does_not_rebuild() {
let p = project()
.file(
"Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "foo"
version = "0.0.1"
2020-09-27 00:59:58 +00:00
[dependencies]
bar = { path = "bar" }
"#,
)
.file("src/lib.rs", "")
.file("bar/Cargo.toml", &basic_manifest("bar", "0.0.1"))
.file("bar/src/lib.rs", "")
.build();
p.cargo("build -Z mtime-on-use")
.masquerade_as_nightly_cargo()
2019-07-20 22:58:37 +00:00
.env("RUSTFLAGS", "-C linker=cc")
.with_stderr(
"\
[COMPILING] bar v0.0.1 ([..])
[COMPILING] foo v0.0.1 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]",
)
.run();
// This does not make new files, but it does update the mtime of the dependency.
p.cargo("build -p bar -Z mtime-on-use")
.masquerade_as_nightly_cargo()
2019-07-20 22:58:37 +00:00
.env("RUSTFLAGS", "-C linker=cc")
.with_stderr("[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]")
.run();
// This should not recompile!
p.cargo("build -Z mtime-on-use")
.masquerade_as_nightly_cargo()
2019-07-20 22:58:37 +00:00
.env("RUSTFLAGS", "-C linker=cc")
.with_stderr("[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]")
.run();
}
2019-01-13 04:06:47 +00:00
fn fingerprint_cleaner(mut dir: PathBuf, timestamp: filetime::FileTime) {
// Cargo is experimenting with letting outside projects develop some
// limited forms of GC for target_dir. This is one of the forms.
// Specifically, Cargo is updating the mtime of a file in
// target/profile/.fingerprint each time it uses the fingerprint.
// So a cleaner can remove files associated with a fingerprint
// if all the files in the fingerprint's folder are older then a time stamp without
// effecting any builds that happened since that time stamp.
let mut cleand = false;
dir.push(".fingerprint");
for fing in fs::read_dir(&dir).unwrap() {
let fing = fing.unwrap();
2019-05-20 19:36:21 +00:00
let outdated = |f: io::Result<fs::DirEntry>| {
2019-01-13 04:06:47 +00:00
filetime::FileTime::from_last_modification_time(&f.unwrap().metadata().unwrap())
<= timestamp
2019-05-20 19:36:21 +00:00
};
if fs::read_dir(fing.path()).unwrap().all(outdated) {
2019-01-13 04:06:47 +00:00
fs::remove_dir_all(fing.path()).unwrap();
2019-01-13 14:31:22 +00:00
println!("remove: {:?}", fing.path());
2019-01-13 04:06:47 +00:00
// a real cleaner would remove the big files in deps and build as well
// but fingerprint is sufficient for our tests
cleand = true;
} else {
}
}
assert!(
cleand,
"called fingerprint_cleaner, but there was nothing to remove"
);
}
#[cargo_test]
fn fingerprint_cleaner_does_not_rebuild() {
2019-01-13 04:06:47 +00:00
let p = project()
.file(
"Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "foo"
version = "0.0.1"
2019-01-13 04:06:47 +00:00
2020-09-27 00:59:58 +00:00
[dependencies]
bar = { path = "bar" }
2020-09-27 00:59:58 +00:00
[features]
a = []
"#,
2019-01-13 04:06:47 +00:00
)
.file("src/lib.rs", "")
.file("bar/Cargo.toml", &basic_manifest("bar", "0.0.1"))
.file("bar/src/lib.rs", "")
.build();
p.cargo("build -Z mtime-on-use")
.masquerade_as_nightly_cargo()
.run();
p.cargo("build -Z mtime-on-use --features a")
.masquerade_as_nightly_cargo()
2019-01-13 04:06:47 +00:00
.with_stderr(
"\
[COMPILING] foo v0.0.1 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]",
)
.run();
if is_coarse_mtime() {
sleep_ms(1000);
}
let timestamp = filetime::FileTime::from_system_time(SystemTime::now());
2019-01-13 14:31:22 +00:00
if is_coarse_mtime() {
sleep_ms(1000);
}
// This does not make new files, but it does update the mtime.
p.cargo("build -Z mtime-on-use --features a")
.masquerade_as_nightly_cargo()
2019-01-13 04:06:47 +00:00
.with_stderr("[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]")
.run();
fingerprint_cleaner(p.target_debug_dir(), timestamp);
// This should not recompile!
p.cargo("build -Z mtime-on-use --features a")
.masquerade_as_nightly_cargo()
2019-01-13 04:06:47 +00:00
.with_stderr("[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]")
.run();
// But this should be cleaned and so need a rebuild
p.cargo("build -Z mtime-on-use")
.masquerade_as_nightly_cargo()
2019-01-13 04:06:47 +00:00
.with_stderr(
"\
[COMPILING] foo v0.0.1 ([..])
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]",
)
.run();
}
#[cargo_test]
2018-10-12 19:55:31 +00:00
fn reuse_panic_build_dep_test() {
let p = project()
.file(
"Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "foo"
version = "0.0.1"
2018-10-12 19:55:31 +00:00
2020-09-27 00:59:58 +00:00
[build-dependencies]
bar = { path = "bar" }
2018-10-12 19:55:31 +00:00
2020-09-27 00:59:58 +00:00
[dev-dependencies]
bar = { path = "bar" }
2018-10-12 19:55:31 +00:00
2020-09-27 00:59:58 +00:00
[profile.dev]
panic = "abort"
"#,
2018-10-12 19:55:31 +00:00
)
.file("src/lib.rs", "")
.file("build.rs", "fn main() {}")
.file("bar/Cargo.toml", &basic_manifest("bar", "0.0.1"))
.file("bar/src/lib.rs", "")
.build();
// Check that `bar` is not built twice. It is only needed once (without `panic`).
p.cargo("test --lib --no-run -v")
.with_stderr(
"\
[COMPILING] bar [..]
[RUNNING] `rustc --crate-name bar [..]
[COMPILING] foo [..]
[RUNNING] `rustc --crate-name build_script_build [..]
[RUNNING] [..]build-script-build`
[RUNNING] `rustc --crate-name foo src/lib.rs [..]--test[..]
[FINISHED] [..]
",
)
.run();
}
#[cargo_test]
2018-10-12 19:55:31 +00:00
fn reuse_panic_pm() {
// foo(panic) -> bar(panic)
// somepm(nopanic) -> bar(nopanic)
let p = project()
.file(
"Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "foo"
version = "0.0.1"
2018-10-12 19:55:31 +00:00
2020-09-27 00:59:58 +00:00
[dependencies]
bar = { path = "bar" }
somepm = { path = "somepm" }
2018-10-12 19:55:31 +00:00
2020-09-27 00:59:58 +00:00
[profile.dev]
panic = "abort"
"#,
2018-10-12 19:55:31 +00:00
)
.file("src/lib.rs", "extern crate bar;")
.file("bar/Cargo.toml", &basic_manifest("bar", "0.0.1"))
.file("bar/src/lib.rs", "")
.file(
"somepm/Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "somepm"
version = "0.0.1"
2018-10-12 19:55:31 +00:00
2020-09-27 00:59:58 +00:00
[lib]
proc-macro = true
2018-10-12 19:55:31 +00:00
2020-09-27 00:59:58 +00:00
[dependencies]
bar = { path = "../bar" }
"#,
2018-10-12 19:55:31 +00:00
)
.file("somepm/src/lib.rs", "extern crate bar;")
.build();
// bar is built once without panic (for proc-macro) and once with (for the
// normal dependency).
p.cargo("build -v")
.with_stderr_unordered(
"\
[COMPILING] bar [..]
Add support for `-Cembed-bitcode=no` This commit is the Cargo half of support necessary for rust-lang/rust#70458. Today the compiler emits embedded bytecode in rlibs by default, but compresses it. This is both extraneous disk space and wasted build time for almost all builds, so the PR in question there is changing rustc to have a `-Cembed-bitcode` flag which, when enabled, places the bitcode in the object file rather than an auxiliary file (no extra compression), but also enables `-Cembed-bitcode=no` to disable bitcode emission entirely. This Cargo support changes Cargo to pass `-Cembed-bitcode=no` for almost all compilations. Cargo will keep `lto = true` and such working by not passing this flag (and thus allowing bitcode to get embedded), but by default `cargo build` and `cargo build --release` will no longer have any bitcode in rlibs which should result in speedier builds! Most of the changes here were around the test suite and various assertions about the `rustc` command lines we spit out. One test was hard-disabled until we can get `-Cembed-bitcode=no` into nightly, and then we can make it a nightly-only test. The test will then be stable again once `-Cembed-bitcode=no` hits stable. Note that this is intended to land before the upstream `-Cembed-bitcode` change. The thinking is that we'll land everything in rust-lang/rust all at once so there's no build time regressions for anyone. If we were to land the `-Cembed-bitcode` PR first then there would be a build time regression until we land Cargo changes because rustc would be emitting uncompressed bitcode by default and Cargo wouldn't be turning it off.
2020-04-01 18:41:27 +00:00
[RUNNING] `rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link[..]-C debuginfo=2 [..]
[RUNNING] `rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C panic=abort[..]-C debuginfo=2 [..]
2018-10-12 19:55:31 +00:00
[COMPILING] somepm [..]
[RUNNING] `rustc --crate-name somepm [..]
[COMPILING] foo [..]
[RUNNING] `rustc --crate-name foo src/lib.rs [..]-C panic=abort[..]
[FINISHED] [..]
",
)
.run();
}
#[cargo_test]
fn bust_patched_dep() {
Package::new("registry1", "0.1.0").publish();
Package::new("registry2", "0.1.0")
.dep("registry1", "0.1.0")
.publish();
let p = project()
.file(
"Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "foo"
version = "0.0.1"
2020-09-27 00:59:58 +00:00
[dependencies]
registry2 = "0.1.0"
2020-09-27 00:59:58 +00:00
[patch.crates-io]
registry1 = { path = "reg1new" }
"#,
)
.file("src/lib.rs", "")
.file("reg1new/Cargo.toml", &basic_manifest("registry1", "0.1.0"))
.file("reg1new/src/lib.rs", "")
.build();
p.cargo("build").run();
2019-01-04 20:47:22 +00:00
if is_coarse_mtime() {
sleep_ms(1000);
}
p.change_file("reg1new/src/lib.rs", "");
if is_coarse_mtime() {
sleep_ms(1000);
}
p.cargo("build")
.with_stderr(
"\
[COMPILING] registry1 v0.1.0 ([..])
[COMPILING] registry2 v0.1.0
[COMPILING] foo v0.0.1 ([..])
[FINISHED] [..]
",
)
.run();
p.cargo("build -v")
.with_stderr(
"\
[FRESH] registry1 v0.1.0 ([..])
[FRESH] registry2 v0.1.0
[FRESH] foo v0.0.1 ([..])
[FINISHED] [..]
",
)
.run();
}
2018-12-28 15:15:25 +00:00
#[cargo_test]
2018-12-28 15:15:25 +00:00
fn rebuild_on_mid_build_file_modification() {
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
let p = project()
.file(
"Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[workspace]
members = ["root", "proc_macro_dep"]
"#,
2018-12-28 15:15:25 +00:00
)
.file(
"root/Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "root"
version = "0.1.0"
authors = []
2018-12-28 15:15:25 +00:00
2020-09-27 00:59:58 +00:00
[dependencies]
proc_macro_dep = { path = "../proc_macro_dep" }
"#,
2018-12-28 15:15:25 +00:00
)
.file(
"root/src/lib.rs",
r#"
2020-09-27 00:59:58 +00:00
#[macro_use]
extern crate proc_macro_dep;
2018-12-28 15:15:25 +00:00
2020-09-27 00:59:58 +00:00
#[derive(Noop)]
pub struct X;
"#,
2018-12-28 15:15:25 +00:00
)
.file(
"proc_macro_dep/Cargo.toml",
r#"
2020-09-27 00:59:58 +00:00
[package]
name = "proc_macro_dep"
version = "0.1.0"
authors = []
2018-12-28 15:15:25 +00:00
2020-09-27 00:59:58 +00:00
[lib]
proc-macro = true
"#,
2018-12-28 15:15:25 +00:00
)
.file(
"proc_macro_dep/src/lib.rs",
&format!(
r#"
2020-09-27 00:59:58 +00:00
extern crate proc_macro;
use std::io::Read;
use std::net::TcpStream;
use proc_macro::TokenStream;
#[proc_macro_derive(Noop)]
pub fn noop(_input: TokenStream) -> TokenStream {{
let mut stream = TcpStream::connect("{}").unwrap();
let mut v = Vec::new();
stream.read_to_end(&mut v).unwrap();
"".parse().unwrap()
}}
"#,
2018-12-28 15:15:25 +00:00
addr
),
)
.build();
let root = p.root();
let t = thread::spawn(move || {
let socket = server.accept().unwrap().0;
2019-01-04 20:47:22 +00:00
sleep_ms(1000);
2018-12-28 15:15:25 +00:00
let mut file = OpenOptions::new()
.write(true)
.append(true)
.open(root.join("root/src/lib.rs"))
.unwrap();
writeln!(file, "// modified").expect("Failed to append to root sources");
drop(file);
drop(socket);
drop(server.accept().unwrap());
});
p.cargo("build")
.with_stderr(
"\
[COMPILING] proc_macro_dep v0.1.0 ([..]/proc_macro_dep)
[COMPILING] root v0.1.0 ([..]/root)
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
",
)
.run();
p.cargo("build")
.with_stderr(
"\
[COMPILING] root v0.1.0 ([..]/root)
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
",
)
.run();
t.join().ok().unwrap();
}
#[cargo_test]
fn dirty_both_lib_and_test() {
// This tests that all artifacts that depend on the results of a build
// script will get rebuilt when the build script reruns, even for separate
// commands. It does the following:
//
// 1. Project "foo" has a build script which will compile a small
// staticlib to link against. Normally this would use the `cc` crate,
// but here we just use rustc to avoid the `cc` dependency.
// 2. Build the library.
// 3. Build the unit test. The staticlib intentionally has a bad value.
// 4. Rewrite the staticlib with the correct value.
// 5. Build the library again.
// 6. Build the unit test. This should recompile.
let slib = |n| {
format!(
r#"
2020-09-27 00:59:58 +00:00
#[no_mangle]
pub extern "C" fn doit() -> i32 {{
return {};
}}
"#,
n
)
};
let p = project()
.file(
"src/lib.rs",
r#"
2020-09-27 00:59:58 +00:00
extern "C" {
fn doit() -> i32;
}
2020-09-27 00:59:58 +00:00
#[test]
fn t1() {
assert_eq!(unsafe { doit() }, 1, "doit assert failure");
}
"#,
)
.file(
"build.rs",
r#"
2020-09-27 00:59:58 +00:00
use std::env;
use std::path::PathBuf;
use std::process::Command;
2020-09-27 00:59:58 +00:00
fn main() {
let rustc = env::var_os("RUSTC").unwrap();
let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
assert!(
Command::new(rustc)
.args(&[
"--crate-type=staticlib",
"--out-dir",
out_dir.to_str().unwrap(),
"slib.rs"
])
.status()
.unwrap()
.success(),
"slib build failed"
);
println!("cargo:rustc-link-lib=slib");
println!("cargo:rustc-link-search={}", out_dir.display());
}
"#,
)
.file("slib.rs", &slib(2))
.build();
p.cargo("build").run();
// 2 != 1
p.cargo("test --lib")
.with_status(101)
.with_stdout_contains("[..]doit assert failure[..]")
.run();
if is_coarse_mtime() {
// #5918
sleep_ms(1000);
}
// Fix the mistake.
p.change_file("slib.rs", &slib(1));
p.cargo("build").run();
// This should recompile with the new static lib, and the test should pass.
p.cargo("test --lib").run();
}
#[cargo_test]
fn script_fails_stay_dirty() {
// Check if a script is aborted (such as hitting Ctrl-C) that it will re-run.
// Steps:
// 1. Build to establish fingerprints.
// 2. Make a change that triggers the build script to re-run. Abort the
// script while it is running.
// 3. Run the build again and make sure it re-runs the script.
let p = project()
.file(
"build.rs",
r#"
mod helper;
fn main() {
println!("cargo:rerun-if-changed=build.rs");
helper::doit();
}
"#,
)
.file("helper.rs", "pub fn doit() {}")
.file("src/lib.rs", "")
.build();
p.cargo("build").run();
if is_coarse_mtime() {
sleep_ms(1000);
}
p.change_file("helper.rs", r#"pub fn doit() {panic!("Crash!");}"#);
p.cargo("build")
.with_stderr_contains("[..]Crash![..]")
.with_status(101)
.run();
// There was a bug where this second call would be "fresh".
p.cargo("build")
.with_stderr_contains("[..]Crash![..]")
.with_status(101)
.run();
}
#[cargo_test]
fn simulated_docker_deps_stay_cached() {
// Test what happens in docker where the nanoseconds are zeroed out.
Package::new("regdep", "1.0.0").publish();
Package::new("regdep_old_style", "1.0.0")
.file("build.rs", "fn main() {}")
.file("src/lib.rs", "")
.publish();
Package::new("regdep_env", "1.0.0")
.file(
"build.rs",
r#"
fn main() {
println!("cargo:rerun-if-env-changed=SOMEVAR");
}
"#,
)
.file("src/lib.rs", "")
.publish();
Package::new("regdep_rerun", "1.0.0")
.file(
"build.rs",
r#"
fn main() {
println!("cargo:rerun-if-changed=build.rs");
}
"#,
)
.file("src/lib.rs", "")
.publish();
let p = project()
.file(
"Cargo.toml",
r#"
[package]
name = "foo"
version = "0.1.0"
[dependencies]
pathdep = { path = "pathdep" }
regdep = "1.0"
regdep_old_style = "1.0"
regdep_env = "1.0"
regdep_rerun = "1.0"
"#,
)
.file(
"src/lib.rs",
"
extern crate pathdep;
extern crate regdep;
extern crate regdep_old_style;
extern crate regdep_env;
extern crate regdep_rerun;
",
)
.file("build.rs", "fn main() {}")
.file("pathdep/Cargo.toml", &basic_manifest("pathdep", "1.0.0"))
.file("pathdep/src/lib.rs", "")
.build();
2019-04-10 17:42:30 +00:00
p.cargo("build").run();
let already_zero = {
// This happens on HFS with 1-second timestamp resolution,
// or other filesystems where it just so happens to write exactly on a
// 1-second boundary.
let metadata = fs::metadata(p.root().join("src/lib.rs")).unwrap();
let mtime = FileTime::from_last_modification_time(&metadata);
mtime.nanoseconds() == 0
};
// Recursively remove `nanoseconds` from every path.
fn zeropath(path: &Path) {
for entry in walkdir::WalkDir::new(path)
.into_iter()
.filter_map(|e| e.ok())
{
let metadata = fs::metadata(entry.path()).unwrap();
let mtime = metadata.modified().unwrap();
let mtime_duration = mtime.duration_since(SystemTime::UNIX_EPOCH).unwrap();
let trunc_mtime = FileTime::from_unix_time(mtime_duration.as_secs() as i64, 0);
let atime = metadata.accessed().unwrap();
let atime_duration = atime.duration_since(SystemTime::UNIX_EPOCH).unwrap();
let trunc_atime = FileTime::from_unix_time(atime_duration.as_secs() as i64, 0);
if let Err(e) = filetime::set_file_times(entry.path(), trunc_atime, trunc_mtime) {
// Windows doesn't allow changing filetimes on some things
// (directories, other random things I'm not sure why). Just
// ignore them.
if e.kind() == std::io::ErrorKind::PermissionDenied {
println!("PermissionDenied filetime on {:?}", entry.path());
} else {
panic!("FileTime error on {:?}: {:?}", entry.path(), e);
}
}
}
}
zeropath(&p.root());
zeropath(&paths::home());
if already_zero {
Remove `Freshness` from `DependencyQueue` Ever since the inception of Cargo and the advent of incremental compilation at the crate level via Cargo, Cargo has tracked whether it needs to recompile something at a unit level in its "dependency queue" which manages when items are ready for execution. Over time we've fixed lots and lots of bugs related to incremental compilation, and perhaps one of the most impactful realizations was that the model Cargo started with fundamentally doesn't handle interrupting Cargo halfway through and resuming the build later. The previous model relied upon implicitly propagating "dirtiness" based on whether the one of the dependencies of a build was rebuilt or not. This information is not available, however, if Cargo is interrupted and resumed (or performs a subset of steps and then later performs more). We've fixed this in a number of places historically but the purpose of this commit is to put a nail in this coffin once and for all. Implicit propagation of whether a unit is fresh or dirty is no longer present at all. Instead Cargo should always know, irrespective of it's in-memory state, whether a unit needs to be recompiled or not. This commit actually turns up a few bugs in the test suite, so later commits will be targeted at fixing this. Note that this required a good deal of work on the `fingerprint` module to fix some longstanding bugs (like #6780) and some serious hoops had to be jumped through for others (like #6779). While these were fallout from this change they weren't necessarily the primary motivation, but rather to help make `fingerprints` a bit more straightforward in what's an already confusing system! Closes #6780
2019-04-05 19:54:50 +00:00
println!("already zero");
// If it was already truncated, then everything stays fresh.
p.cargo("build -v")
.with_stderr_unordered(
"\
[FRESH] pathdep [..]
[FRESH] regdep [..]
[FRESH] regdep_env [..]
[FRESH] regdep_old_style [..]
[FRESH] regdep_rerun [..]
[FRESH] foo [..]
[FINISHED] [..]
",
)
.run();
} else {
Remove `Freshness` from `DependencyQueue` Ever since the inception of Cargo and the advent of incremental compilation at the crate level via Cargo, Cargo has tracked whether it needs to recompile something at a unit level in its "dependency queue" which manages when items are ready for execution. Over time we've fixed lots and lots of bugs related to incremental compilation, and perhaps one of the most impactful realizations was that the model Cargo started with fundamentally doesn't handle interrupting Cargo halfway through and resuming the build later. The previous model relied upon implicitly propagating "dirtiness" based on whether the one of the dependencies of a build was rebuilt or not. This information is not available, however, if Cargo is interrupted and resumed (or performs a subset of steps and then later performs more). We've fixed this in a number of places historically but the purpose of this commit is to put a nail in this coffin once and for all. Implicit propagation of whether a unit is fresh or dirty is no longer present at all. Instead Cargo should always know, irrespective of it's in-memory state, whether a unit needs to be recompiled or not. This commit actually turns up a few bugs in the test suite, so later commits will be targeted at fixing this. Note that this required a good deal of work on the `fingerprint` module to fix some longstanding bugs (like #6780) and some serious hoops had to be jumped through for others (like #6779). While these were fallout from this change they weren't necessarily the primary motivation, but rather to help make `fingerprints` a bit more straightforward in what's an already confusing system! Closes #6780
2019-04-05 19:54:50 +00:00
println!("not already zero");
// It is not ideal that `foo` gets recompiled, but that is the current
// behavior. Currently mtimes are ignored for registry deps.
Remove `Freshness` from `DependencyQueue` Ever since the inception of Cargo and the advent of incremental compilation at the crate level via Cargo, Cargo has tracked whether it needs to recompile something at a unit level in its "dependency queue" which manages when items are ready for execution. Over time we've fixed lots and lots of bugs related to incremental compilation, and perhaps one of the most impactful realizations was that the model Cargo started with fundamentally doesn't handle interrupting Cargo halfway through and resuming the build later. The previous model relied upon implicitly propagating "dirtiness" based on whether the one of the dependencies of a build was rebuilt or not. This information is not available, however, if Cargo is interrupted and resumed (or performs a subset of steps and then later performs more). We've fixed this in a number of places historically but the purpose of this commit is to put a nail in this coffin once and for all. Implicit propagation of whether a unit is fresh or dirty is no longer present at all. Instead Cargo should always know, irrespective of it's in-memory state, whether a unit needs to be recompiled or not. This commit actually turns up a few bugs in the test suite, so later commits will be targeted at fixing this. Note that this required a good deal of work on the `fingerprint` module to fix some longstanding bugs (like #6780) and some serious hoops had to be jumped through for others (like #6779). While these were fallout from this change they weren't necessarily the primary motivation, but rather to help make `fingerprints` a bit more straightforward in what's an already confusing system! Closes #6780
2019-04-05 19:54:50 +00:00
//
// Note that this behavior is due to the fact that `foo` has a build
// script in "old" mode where it doesn't print `rerun-if-*`. In this
// mode we use `Precalculated` to fingerprint a path dependency, where
// `Precalculated` is an opaque string which has the most recent mtime
// in it. It differs between builds because one has nsec=0 and the other
// likely has a nonzero nsec. Hence, the rebuild.
p.cargo("build -v")
.with_stderr_unordered(
"\
[FRESH] pathdep [..]
[FRESH] regdep [..]
[FRESH] regdep_env [..]
[FRESH] regdep_old_style [..]
[FRESH] regdep_rerun [..]
[COMPILING] foo [..]
[RUNNING] [..]/foo-[..]/build-script-build[..]
[RUNNING] `rustc --crate-name foo[..]
[FINISHED] [..]
",
)
.run();
}
}
2019-03-26 20:21:05 +00:00
#[cargo_test]
2019-03-26 20:21:05 +00:00
fn metadata_change_invalidates() {
let p = project()
.file(
"Cargo.toml",
r#"
[package]
name = "foo"
version = "0.1.0"
"#,
)
.file("src/lib.rs", "")
.build();
p.cargo("build").run();
for attr in &[
"authors = [\"foo\"]",
"description = \"desc\"",
"homepage = \"https://example.com\"",
"repository =\"https://example.com\"",
] {
let mut file = OpenOptions::new()
.write(true)
.append(true)
.open(p.root().join("Cargo.toml"))
.unwrap();
writeln!(file, "{}", attr).unwrap();
p.cargo("build")
.with_stderr_contains("[COMPILING] foo [..]")
.run();
}
p.cargo("build -v")
.with_stderr_contains("[FRESH] foo[..]")
.run();
assert_eq!(p.glob("target/debug/deps/libfoo-*.rlib").count(), 1);
}
#[cargo_test]
2019-03-26 20:21:05 +00:00
fn edition_change_invalidates() {
const MANIFEST: &str = r#"
[package]
name = "foo"
version = "0.1.0"
"#;
let p = project()
.file("Cargo.toml", MANIFEST)
.file("src/lib.rs", "")
.build();
p.cargo("build").run();
p.change_file("Cargo.toml", &format!("{}edition = \"2018\"", MANIFEST));
p.cargo("build")
.with_stderr_contains("[COMPILING] foo [..]")
.run();
p.change_file(
"Cargo.toml",
&format!(
r#"{}edition = "2018"
[lib]
edition = "2015"
"#,
MANIFEST
),
);
p.cargo("build")
.with_stderr_contains("[COMPILING] foo [..]")
.run();
p.cargo("build -v")
.with_stderr_contains("[FRESH] foo[..]")
.run();
assert_eq!(p.glob("target/debug/deps/libfoo-*.rlib").count(), 1);
}
Remove `Freshness` from `DependencyQueue` Ever since the inception of Cargo and the advent of incremental compilation at the crate level via Cargo, Cargo has tracked whether it needs to recompile something at a unit level in its "dependency queue" which manages when items are ready for execution. Over time we've fixed lots and lots of bugs related to incremental compilation, and perhaps one of the most impactful realizations was that the model Cargo started with fundamentally doesn't handle interrupting Cargo halfway through and resuming the build later. The previous model relied upon implicitly propagating "dirtiness" based on whether the one of the dependencies of a build was rebuilt or not. This information is not available, however, if Cargo is interrupted and resumed (or performs a subset of steps and then later performs more). We've fixed this in a number of places historically but the purpose of this commit is to put a nail in this coffin once and for all. Implicit propagation of whether a unit is fresh or dirty is no longer present at all. Instead Cargo should always know, irrespective of it's in-memory state, whether a unit needs to be recompiled or not. This commit actually turns up a few bugs in the test suite, so later commits will be targeted at fixing this. Note that this required a good deal of work on the `fingerprint` module to fix some longstanding bugs (like #6780) and some serious hoops had to be jumped through for others (like #6779). While these were fallout from this change they weren't necessarily the primary motivation, but rather to help make `fingerprints` a bit more straightforward in what's an already confusing system! Closes #6780
2019-04-05 19:54:50 +00:00
#[cargo_test]
Remove `Freshness` from `DependencyQueue` Ever since the inception of Cargo and the advent of incremental compilation at the crate level via Cargo, Cargo has tracked whether it needs to recompile something at a unit level in its "dependency queue" which manages when items are ready for execution. Over time we've fixed lots and lots of bugs related to incremental compilation, and perhaps one of the most impactful realizations was that the model Cargo started with fundamentally doesn't handle interrupting Cargo halfway through and resuming the build later. The previous model relied upon implicitly propagating "dirtiness" based on whether the one of the dependencies of a build was rebuilt or not. This information is not available, however, if Cargo is interrupted and resumed (or performs a subset of steps and then later performs more). We've fixed this in a number of places historically but the purpose of this commit is to put a nail in this coffin once and for all. Implicit propagation of whether a unit is fresh or dirty is no longer present at all. Instead Cargo should always know, irrespective of it's in-memory state, whether a unit needs to be recompiled or not. This commit actually turns up a few bugs in the test suite, so later commits will be targeted at fixing this. Note that this required a good deal of work on the `fingerprint` module to fix some longstanding bugs (like #6780) and some serious hoops had to be jumped through for others (like #6779). While these were fallout from this change they weren't necessarily the primary motivation, but rather to help make `fingerprints` a bit more straightforward in what's an already confusing system! Closes #6780
2019-04-05 19:54:50 +00:00
fn rename_with_path_deps() {
let p = project()
.file(
"Cargo.toml",
r#"
[project]
name = "foo"
version = "0.5.0"
authors = []
[dependencies]
a = { path = 'a' }
"#,
)
2019-05-01 21:39:15 +00:00
.file("src/lib.rs", "extern crate a; pub fn foo() { a::foo(); }")
Remove `Freshness` from `DependencyQueue` Ever since the inception of Cargo and the advent of incremental compilation at the crate level via Cargo, Cargo has tracked whether it needs to recompile something at a unit level in its "dependency queue" which manages when items are ready for execution. Over time we've fixed lots and lots of bugs related to incremental compilation, and perhaps one of the most impactful realizations was that the model Cargo started with fundamentally doesn't handle interrupting Cargo halfway through and resuming the build later. The previous model relied upon implicitly propagating "dirtiness" based on whether the one of the dependencies of a build was rebuilt or not. This information is not available, however, if Cargo is interrupted and resumed (or performs a subset of steps and then later performs more). We've fixed this in a number of places historically but the purpose of this commit is to put a nail in this coffin once and for all. Implicit propagation of whether a unit is fresh or dirty is no longer present at all. Instead Cargo should always know, irrespective of it's in-memory state, whether a unit needs to be recompiled or not. This commit actually turns up a few bugs in the test suite, so later commits will be targeted at fixing this. Note that this required a good deal of work on the `fingerprint` module to fix some longstanding bugs (like #6780) and some serious hoops had to be jumped through for others (like #6779). While these were fallout from this change they weren't necessarily the primary motivation, but rather to help make `fingerprints` a bit more straightforward in what's an already confusing system! Closes #6780
2019-04-05 19:54:50 +00:00
.file(
"a/Cargo.toml",
r#"
[project]
name = "a"
version = "0.5.0"
authors = []
[dependencies]
b = { path = 'b' }
"#,
)
2019-05-01 21:39:15 +00:00
.file("a/src/lib.rs", "extern crate b; pub fn foo() { b::foo() }")
Remove `Freshness` from `DependencyQueue` Ever since the inception of Cargo and the advent of incremental compilation at the crate level via Cargo, Cargo has tracked whether it needs to recompile something at a unit level in its "dependency queue" which manages when items are ready for execution. Over time we've fixed lots and lots of bugs related to incremental compilation, and perhaps one of the most impactful realizations was that the model Cargo started with fundamentally doesn't handle interrupting Cargo halfway through and resuming the build later. The previous model relied upon implicitly propagating "dirtiness" based on whether the one of the dependencies of a build was rebuilt or not. This information is not available, however, if Cargo is interrupted and resumed (or performs a subset of steps and then later performs more). We've fixed this in a number of places historically but the purpose of this commit is to put a nail in this coffin once and for all. Implicit propagation of whether a unit is fresh or dirty is no longer present at all. Instead Cargo should always know, irrespective of it's in-memory state, whether a unit needs to be recompiled or not. This commit actually turns up a few bugs in the test suite, so later commits will be targeted at fixing this. Note that this required a good deal of work on the `fingerprint` module to fix some longstanding bugs (like #6780) and some serious hoops had to be jumped through for others (like #6779). While these were fallout from this change they weren't necessarily the primary motivation, but rather to help make `fingerprints` a bit more straightforward in what's an already confusing system! Closes #6780
2019-04-05 19:54:50 +00:00
.file(
"a/b/Cargo.toml",
r#"
[project]
name = "b"
version = "0.5.0"
authors = []
"#,
)
2019-05-01 21:39:15 +00:00
.file("a/b/src/lib.rs", "pub fn foo() { }");
Remove `Freshness` from `DependencyQueue` Ever since the inception of Cargo and the advent of incremental compilation at the crate level via Cargo, Cargo has tracked whether it needs to recompile something at a unit level in its "dependency queue" which manages when items are ready for execution. Over time we've fixed lots and lots of bugs related to incremental compilation, and perhaps one of the most impactful realizations was that the model Cargo started with fundamentally doesn't handle interrupting Cargo halfway through and resuming the build later. The previous model relied upon implicitly propagating "dirtiness" based on whether the one of the dependencies of a build was rebuilt or not. This information is not available, however, if Cargo is interrupted and resumed (or performs a subset of steps and then later performs more). We've fixed this in a number of places historically but the purpose of this commit is to put a nail in this coffin once and for all. Implicit propagation of whether a unit is fresh or dirty is no longer present at all. Instead Cargo should always know, irrespective of it's in-memory state, whether a unit needs to be recompiled or not. This commit actually turns up a few bugs in the test suite, so later commits will be targeted at fixing this. Note that this required a good deal of work on the `fingerprint` module to fix some longstanding bugs (like #6780) and some serious hoops had to be jumped through for others (like #6779). While these were fallout from this change they weren't necessarily the primary motivation, but rather to help make `fingerprints` a bit more straightforward in what's an already confusing system! Closes #6780
2019-04-05 19:54:50 +00:00
let p = p.build();
p.cargo("build").run();
// Now rename the root directory and rerun `cargo run`. Not only should we
// not build anything but we also shouldn't crash.
let mut new = p.root();
new.pop();
new.push("foo2");
fs::rename(p.root(), &new).unwrap();
p.cargo("build")
.cwd(&new)
.with_stderr("[FINISHED] [..]")
.run();
}
#[cargo_test]
fn move_target_directory_with_path_deps() {
let p = project()
.file(
"Cargo.toml",
r#"
[project]
name = "foo"
version = "0.5.0"
authors = []
[dependencies]
a = { path = "a" }
"#,
)
.file(
"a/Cargo.toml",
r#"
[project]
name = "a"
version = "0.5.0"
authors = []
"#,
)
.file("src/lib.rs", "extern crate a; pub use a::print_msg;")
.file(
"a/build.rs",
r###"
2020-09-27 00:59:58 +00:00
use std::env;
use std::fs;
use std::path::Path;
2020-09-27 00:59:58 +00:00
fn main() {
println!("cargo:rerun-if-changed=build.rs");
let out_dir = env::var("OUT_DIR").unwrap();
let dest_path = Path::new(&out_dir).join("hello.rs");
fs::write(&dest_path, r#"
pub fn message() -> &'static str {
"Hello, World!"
}
"#).unwrap();
}
"###,
)
.file(
"a/src/lib.rs",
r#"
include!(concat!(env!("OUT_DIR"), "/hello.rs"));
pub fn print_msg() { message(); }
"#,
);
let p = p.build();
let mut parent = p.root();
parent.pop();
p.cargo("build").run();
let new_target = p.root().join("target2");
fs::rename(p.root().join("target"), &new_target).unwrap();
p.cargo("build")
.env("CARGO_TARGET_DIR", &new_target)
.with_stderr("[FINISHED] [..]")
.run();
}
#[cargo_test]
fn rerun_if_changes() {
let p = project()
.file(
"build.rs",
r#"
fn main() {
println!("cargo:rerun-if-env-changed=FOO");
if std::env::var("FOO").is_ok() {
println!("cargo:rerun-if-env-changed=BAR");
}
}
"#,
)
.file("src/lib.rs", "")
.build();
p.cargo("build").run();
p.cargo("build").with_stderr("[FINISHED] [..]").run();
p.cargo("build -v")
.env("FOO", "1")
.with_stderr(
"\
[COMPILING] foo [..]
[RUNNING] `[..]build-script-build`
[RUNNING] `rustc [..]
[FINISHED] [..]
",
)
.run();
p.cargo("build")
.env("FOO", "1")
.with_stderr("[FINISHED] [..]")
.run();
p.cargo("build -v")
.env("FOO", "1")
.env("BAR", "1")
.with_stderr(
"\
[COMPILING] foo [..]
[RUNNING] `[..]build-script-build`
[RUNNING] `rustc [..]
[FINISHED] [..]
",
)
.run();
p.cargo("build")
.env("FOO", "1")
.env("BAR", "1")
.with_stderr("[FINISHED] [..]")
.run();
p.cargo("build -v")
.env("BAR", "2")
.with_stderr(
"\
[COMPILING] foo [..]
[RUNNING] `[..]build-script-build`
[RUNNING] `rustc [..]
[FINISHED] [..]
",
)
.run();
p.cargo("build")
.env("BAR", "2")
.with_stderr("[FINISHED] [..]")
.run();
}
#[cargo_test]
fn channel_shares_filenames() {
// Test that different "nightly" releases use the same output filename.
// Create separate rustc binaries to emulate running different toolchains.
let nightly1 = format!(
"\
rustc 1.44.0-nightly (38114ff16 2020-03-21)
binary: rustc
commit-hash: 38114ff16e7856f98b2b4be7ab4cd29b38bed59a
commit-date: 2020-03-21
host: {}
release: 1.44.0-nightly
LLVM version: 9.0
",
rustc_host()
);
let nightly2 = format!(
"\
rustc 1.44.0-nightly (a5b09d354 2020-03-31)
binary: rustc
commit-hash: a5b09d35473615e7142f5570f5c5fad0caf68bd2
commit-date: 2020-03-31
host: {}
release: 1.44.0-nightly
LLVM version: 9.0
",
rustc_host()
);
let beta1 = format!(
"\
rustc 1.43.0-beta.3 (4c587bbda 2020-03-25)
binary: rustc
commit-hash: 4c587bbda04ab55aaf56feab11dfdfe387a85d7a
commit-date: 2020-03-25
host: {}
release: 1.43.0-beta.3
LLVM version: 9.0
",
rustc_host()
);
let beta2 = format!(
"\
rustc 1.42.0-beta.5 (4e1c5f0e9 2020-02-28)
binary: rustc
commit-hash: 4e1c5f0e9769a588b91c977e3d81e140209ef3a2
commit-date: 2020-02-28
host: {}
release: 1.42.0-beta.5
LLVM version: 9.0
",
rustc_host()
);
let stable1 = format!(
"\
rustc 1.42.0 (b8cedc004 2020-03-09)
binary: rustc
commit-hash: b8cedc00407a4c56a3bda1ed605c6fc166655447
commit-date: 2020-03-09
host: {}
release: 1.42.0
LLVM version: 9.0
",
rustc_host()
);
let stable2 = format!(
"\
rustc 1.41.1 (f3e1a954d 2020-02-24)
binary: rustc
commit-hash: f3e1a954d2ead4e2fc197c7da7d71e6c61bad196
commit-date: 2020-02-24
host: {}
release: 1.41.1
LLVM version: 9.0
",
rustc_host()
);
let compiler = project()
.at("compiler")
.file("Cargo.toml", &basic_manifest("compiler", "0.1.0"))
.file(
"src/main.rs",
r#"
fn main() {
if std::env::args_os().any(|a| a == "-vV") {
print!("{}", env!("FUNKY_VERSION_TEST"));
return;
}
let mut cmd = std::process::Command::new("rustc");
cmd.args(std::env::args_os().skip(1));
assert!(cmd.status().unwrap().success());
}
"#,
)
.build();
let makeit = |version, vv| {
// Force a rebuild.
compiler.target_debug_dir().join("deps").rm_rf();
compiler.cargo("build").env("FUNKY_VERSION_TEST", vv).run();
fs::rename(compiler.bin("compiler"), compiler.bin(version)).unwrap();
};
makeit("nightly1", nightly1);
makeit("nightly2", nightly2);
makeit("beta1", beta1);
makeit("beta2", beta2);
makeit("stable1", stable1);
makeit("stable2", stable2);
// Run `cargo check` with different rustc versions to observe its behavior.
let p = project().file("src/lib.rs", "").build();
// Runs `cargo check` and returns the rmeta filename created.
// Checks that the freshness matches the given value.
let check = |version, fresh| -> String {
let output = p
.cargo("check --message-format=json")
.env("RUSTC", compiler.bin(version))
.exec_with_output()
.unwrap();
// Collect the filenames generated.
let mut artifacts: Vec<_> = std::str::from_utf8(&output.stdout)
.unwrap()
.lines()
.filter_map(|line| {
let value: serde_json::Value = serde_json::from_str(line).unwrap();
if value["reason"].as_str().unwrap() == "compiler-artifact" {
assert_eq!(value["fresh"].as_bool().unwrap(), fresh);
let filenames = value["filenames"].as_array().unwrap();
assert_eq!(filenames.len(), 1);
Some(filenames[0].to_string())
} else {
None
}
})
.collect();
// Should only generate one rmeta file.
assert_eq!(artifacts.len(), 1);
artifacts.pop().unwrap()
};
let nightly1_name = check("nightly1", false);
assert_eq!(check("nightly1", true), nightly1_name);
assert_eq!(check("nightly2", false), nightly1_name); // same as before
assert_eq!(check("nightly2", true), nightly1_name);
// Should rebuild going back to nightly1.
assert_eq!(check("nightly1", false), nightly1_name);
let beta1_name = check("beta1", false);
assert_ne!(beta1_name, nightly1_name);
assert_eq!(check("beta1", true), beta1_name);
assert_eq!(check("beta2", false), beta1_name); // same as before
assert_eq!(check("beta2", true), beta1_name);
// Should rebuild going back to beta1.
assert_eq!(check("beta1", false), beta1_name);
let stable1_name = check("stable1", false);
assert_ne!(stable1_name, nightly1_name);
assert_ne!(stable1_name, beta1_name);
let stable2_name = check("stable2", false);
assert_ne!(stable1_name, stable2_name);
// Check everything is fresh.
assert_eq!(check("stable1", true), stable1_name);
assert_eq!(check("stable2", true), stable2_name);
assert_eq!(check("beta1", true), beta1_name);
assert_eq!(check("nightly1", true), nightly1_name);
}
#[cargo_test]
fn linking_interrupted() {
// Interrupt during the linking phase shouldn't leave test executable as "fresh".
2020-04-26 18:20:21 +00:00
// This is used to detect when linking starts, then to pause the linker so
// that the test can kill cargo.
let link_listener = TcpListener::bind("127.0.0.1:0").unwrap();
let link_addr = link_listener.local_addr().unwrap();
// This is used to detect when rustc exits.
let rustc_listener = TcpListener::bind("127.0.0.1:0").unwrap();
let rustc_addr = rustc_listener.local_addr().unwrap();
// Create a linker that we can interrupt.
let linker = project()
.at("linker")
.file("Cargo.toml", &basic_manifest("linker", "1.0.0"))
.file(
"src/main.rs",
&r#"
fn main() {
// Figure out the output filename.
let output = match std::env::args().find(|a| a.starts_with("/OUT:")) {
Some(s) => s[5..].to_string(),
None => {
let mut args = std::env::args();
loop {
if args.next().unwrap() == "-o" {
break;
}
}
args.next().unwrap()
}
};
std::fs::remove_file(&output).unwrap();
std::fs::write(&output, "").unwrap();
// Tell the test that we are ready to be interrupted.
let mut socket = std::net::TcpStream::connect("__ADDR__").unwrap();
// Wait for the test to kill us.
std::thread::sleep(std::time::Duration::new(60, 0));
}
"#
.replace("__ADDR__", &link_addr.to_string()),
)
.build();
linker.cargo("build").run();
// Create a wrapper around rustc that will tell us when rustc is finished.
let rustc = project()
.at("rustc-waiter")
.file("Cargo.toml", &basic_manifest("rustc-waiter", "1.0.0"))
.file(
"src/main.rs",
&r#"
fn main() {
let mut conn = None;
// Check for a normal build (not -vV or --print).
if std::env::args().any(|arg| arg == "t1") {
// Tell the test that rustc has started.
conn = Some(std::net::TcpStream::connect("__ADDR__").unwrap());
}
let status = std::process::Command::new("rustc")
.args(std::env::args().skip(1))
.status()
.expect("rustc to run");
std::process::exit(status.code().unwrap_or(1));
}
"#
.replace("__ADDR__", &rustc_addr.to_string()),
)
.build();
rustc.cargo("build").run();
// Build it once so that the fingerprint gets saved to disk.
let p = project()
.file("src/lib.rs", "")
.file("tests/t1.rs", "")
.build();
p.cargo("test --test t1 --no-run").run();
// Make a change, start a build, then interrupt it.
p.change_file("src/lib.rs", "// modified");
let linker_env = format!(
"CARGO_TARGET_{}_LINKER",
rustc_host().to_uppercase().replace('-', "_")
);
// NOTE: This assumes that the paths to the linker or rustc are not in the
// fingerprint. But maybe they should be?
let mut cmd = p
.cargo("test --test t1 --no-run")
.env(&linker_env, linker.bin("linker"))
.env("RUSTC", rustc.bin("rustc-waiter"))
.build_command();
let mut child = cmd
.stdout(Stdio::null())
.stderr(Stdio::null())
.env("__CARGO_TEST_SETSID_PLEASE_DONT_USE_ELSEWHERE", "1")
.spawn()
.unwrap();
// Wait for rustc to start.
let mut rustc_conn = rustc_listener.accept().unwrap().0;
// Wait for linking to start.
drop(link_listener.accept().unwrap());
// Interrupt the child.
death::ctrl_c(&mut child);
assert!(!child.wait().unwrap().success());
// Wait for rustc to exit. If we don't wait, then the command below could
// start while rustc is still being torn down.
let mut buf = [0];
drop(rustc_conn.read_exact(&mut buf));
// Build again, shouldn't be fresh.
p.cargo("test --test t1")
.with_stderr(
"\
[COMPILING] foo [..]
[FINISHED] [..]
[RUNNING] target/debug/deps/t1[..]
",
)
.run();
}
#[cargo_test]
#[cfg_attr(
not(all(target_arch = "x86_64", target_os = "windows", target_env = "msvc")),
ignore
)]
fn lld_is_fresh() {
// Check for bug when using lld linker that it remains fresh with dylib.
let p = project()
.file(
".cargo/config",
r#"
[target.x86_64-pc-windows-msvc]
linker = "rust-lld"
rustflags = ["-C", "link-arg=-fuse-ld=lld"]
"#,
)
.file(
"Cargo.toml",
r#"
[package]
name = "foo"
version = "0.1.0"
[lib]
crate-type = ["dylib"]
"#,
)
.file("src/lib.rs", "")
.build();
p.cargo("build").run();
p.cargo("build -v")
.with_stderr("[FRESH] foo [..]\n[FINISHED] [..]")
.run();
}
#[cargo_test]
fn env_in_code_causes_rebuild() {
let p = project()
.file(
"Cargo.toml",
r#"
[package]
name = "foo"
version = "0.1.0"
"#,
)
.file(
"src/main.rs",
r#"
fn main() {
println!("{:?}", option_env!("FOO"));
2020-06-29 19:57:09 +00:00
println!("{:?}", option_env!("FOO\nBAR"));
}
"#,
)
.build();
p.cargo("build").env_remove("FOO").run();
p.cargo("build")
.env_remove("FOO")
.with_stderr("[FINISHED] [..]")
.run();
p.cargo("build")
.env("FOO", "bar")
.with_stderr("[COMPILING][..]\n[FINISHED][..]")
.run();
p.cargo("build")
.env("FOO", "bar")
.with_stderr("[FINISHED][..]")
.run();
p.cargo("build")
.env("FOO", "baz")
.with_stderr("[COMPILING][..]\n[FINISHED][..]")
.run();
p.cargo("build")
.env("FOO", "baz")
.with_stderr("[FINISHED][..]")
.run();
p.cargo("build")
.env_remove("FOO")
.with_stderr("[COMPILING][..]\n[FINISHED][..]")
.run();
p.cargo("build")
.env_remove("FOO")
.with_stderr("[FINISHED][..]")
.run();
2020-06-29 19:57:09 +00:00
let interesting = " #!$\nabc\r\\\t\u{8}\r\n";
p.cargo("build").env("FOO", interesting).run();
p.cargo("build")
.env("FOO", interesting)
.with_stderr("[FINISHED][..]")
.run();
p.cargo("build").env("FOO\nBAR", interesting).run();
p.cargo("build")
.env("FOO\nBAR", interesting)
.with_stderr("[FINISHED][..]")
.run();
}
#[cargo_test]
fn env_build_script_no_rebuild() {
let p = project()
.file(
"Cargo.toml",
r#"
[package]
name = "foo"
version = "0.1.0"
"#,
)
.file(
"build.rs",
r#"
fn main() {
println!("cargo:rustc-env=FOO=bar");
}
"#,
)
.file(
"src/main.rs",
r#"
fn main() {
println!("{:?}", env!("FOO"));
}
"#,
)
.build();
p.cargo("build").run();
p.cargo("build").with_stderr("[FINISHED] [..]").run();
}