Make registry locking more coarse

This commit updates the locking strategy in Cargo handle the recent
addition of creating a cache of the on-disk index also on disk. The goal
here is reduce the overhead of locking both cognitively when reading but
also performance wise by requiring fewer locks. Previously Cargo had a
bunch of fine-grained locks throughout the index and git repositories,
but after this commit there's just one global "package cache" lock.

This global lock now serves to basically synchronize the entire crate
graph resolution step. This shouldn't really take that long unless it's
downloading, in which case there's not a ton of benefit to running in
parallel anyway. The other intention of this single global lock is to
make it much easier on the sources to not worry so much about lock
ordering or when to acquire locks, but rather they just assert in their
various operations that they're locked.

Cargo now has a few coarse-grained locations where locks are held (for
example during resolution and during package downloading). These locks
are a bit sprinkled about but they have in-code asserts which assert
that they're held, so we'll find bugs quickly if any lock isn't held
(before a race condition is hit that is)
This commit is contained in:
Alex Crichton 2019-04-26 11:09:25 -07:00
parent 6babe72e7c
commit 5217280ee3
15 changed files with 189 additions and 139 deletions

View file

@ -25,6 +25,7 @@ use crate::ops;
use crate::util::errors::{CargoResult, CargoResultExt, HttpNot200};
use crate::util::network::Retry;
use crate::util::{self, internal, lev_distance, Config, Progress, ProgressStyle};
use crate::util::config::PackageCacheLock;
/// Information about a package that is available somewhere in the file system.
///
@ -339,6 +340,9 @@ pub struct Downloads<'a, 'cfg: 'a> {
/// trigger a timeout; reset `next_speed_check` and set this back to the
/// configured threshold.
next_speed_check_bytes_threshold: Cell<u64>,
/// Global filesystem lock to ensure only one Cargo is downloading at a
/// time.
_lock: PackageCacheLock<'cfg>,
}
struct Download<'cfg> {
@ -437,6 +441,7 @@ impl<'cfg> PackageSet<'cfg> {
timeout,
next_speed_check: Cell::new(Instant::now()),
next_speed_check_bytes_threshold: Cell::new(0),
_lock: self.config.acquire_package_cache_lock()?,
})
}

View file

@ -40,6 +40,10 @@ pub fn update_lockfile(ws: &Workspace<'_>, opts: &UpdateOptions<'_>) -> CargoRes
failure::bail!("you can't update in the offline mode");
}
// Updates often require a lot of modifications to the registry, so ensure
// that we're synchronized against other Cargos.
let _lock = ws.config().acquire_package_cache_lock()?;
let previous_resolve = match ops::load_pkg_lockfile(ws)? {
Some(resolve) => resolve,
None => return generate_lockfile(ws),
@ -73,6 +77,7 @@ pub fn update_lockfile(ws: &Workspace<'_>, opts: &UpdateOptions<'_>) -> CargoRes
});
}
}
registry.add_sources(sources)?;
}

View file

@ -488,6 +488,11 @@ fn check_yanked_install(ws: &Workspace<'_>) -> CargoResult<()> {
// wouldn't be available for `compile_ws`.
let (pkg_set, resolve) = ops::resolve_ws_with_method(ws, Method::Everything, &specs)?;
let mut sources = pkg_set.sources_mut();
// Checking the yanked status invovles taking a look at the registry and
// maybe updating files, so be sure to lock it here.
let _lock = ws.config().acquire_package_cache_lock()?;
for pkg_id in resolve.iter() {
if let Some(source) = sources.get_mut(pkg_id.source_id()) {
if source.is_yanked(pkg_id)? {

View file

@ -548,6 +548,10 @@ fn compare_resolve(
}
fn check_yanked(config: &Config, pkg_set: &PackageSet<'_>, resolve: &Resolve) -> CargoResult<()> {
// Checking the yanked status invovles taking a look at the registry and
// maybe updating files, so be sure to lock it here.
let _lock = config.acquire_package_cache_lock()?;
let mut sources = pkg_set.sources_mut();
for pkg_id in resolve.iter() {
if let Some(source) = sources.get_mut(pkg_id.source_id()) {

View file

@ -564,6 +564,11 @@ pub fn select_pkg<'a, T>(
where
T: Source + 'a,
{
// This operation may involve updating some sources or making a few queries
// which may involve frobbing caches, as a result make sure we synchronize
// with other global Cargos
let _lock = config.acquire_package_cache_lock()?;
if needs_update {
source.update()?;
}

View file

@ -353,6 +353,7 @@ fn registry(
let token = token.or(token_config);
let sid = get_source_id(config, index_config.or(index), registry)?;
let api_host = {
let _lock = config.acquire_package_cache_lock()?;
let mut src = RegistrySource::remote(sid, &HashSet::new(), config);
// Only update the index if the config is not available or `force` is set.
let cfg = src.config();

View file

@ -146,6 +146,10 @@ pub fn resolve_with_previous<'cfg>(
specs: &[PackageIdSpec],
register_patches: bool,
) -> CargoResult<Resolve> {
// We only want one Cargo at a time resolving a crate graph since this can
// involve a lot of frobbing of the global caches.
let _lock = ws.config().acquire_package_cache_lock()?;
// Here we place an artificial limitation that all non-registry sources
// cannot be locked at more than one revision. This means that if a Git
// repository provides more than one package, they must all be updated in

View file

@ -154,12 +154,9 @@ impl<'cfg> Source for GitSource<'cfg> {
}
fn update(&mut self) -> CargoResult<()> {
let lock =
self.config
.git_path()
.open_rw(".cargo-lock-git", self.config, "the git checkouts")?;
let db_path = lock.parent().join("db").join(&self.ident);
let git_path = self.config.git_path();
let git_path = self.config.assert_package_cache_locked(&git_path);
let db_path = git_path.join("db").join(&self.ident);
if self.config.cli_unstable().offline && !db_path.exists() {
failure::bail!(
@ -189,21 +186,17 @@ impl<'cfg> Source for GitSource<'cfg> {
(self.remote.db_at(&db_path)?, actual_rev.unwrap())
};
// Dont use the full hash, in order to contribute less to reaching the path length limit
// on Windows. See <https://github.com/servo/servo/pull/14397>.
// Dont use the full hash, in order to contribute less to reaching the
// path length limit on Windows. See
// <https://github.com/servo/servo/pull/14397>.
let short_id = db.to_short_id(&actual_rev).unwrap();
let checkout_path = lock
.parent()
let checkout_path = git_path
.join("checkouts")
.join(&self.ident)
.join(short_id.as_str());
// Copy the database to the checkout location. After this we could drop
// the lock on the database as we no longer needed it, but we leave it
// in scope so the destructors here won't tamper with too much.
// Checkout is immutable, so we don't need to protect it with a lock once
// it is created.
// Copy the database to the checkout location.
db.copy_to(actual_rev.clone(), &checkout_path, self.config)?;
let source_id = self.source_id.with_precise(Some(actual_rev.to_string()));

View file

@ -78,8 +78,7 @@ use semver::{Version, VersionReq};
use crate::core::dependency::Dependency;
use crate::core::{InternedString, PackageId, SourceId, Summary};
use crate::sources::registry::RegistryData;
use crate::sources::registry::{RegistryPackage, INDEX_LOCK};
use crate::sources::registry::{RegistryData, RegistryPackage};
use crate::util::{internal, CargoResult, Config, Filesystem, ToSemver};
/// Crates.io treats hyphen and underscores as interchangeable, but the index and old Cargo do not.
@ -172,7 +171,6 @@ pub struct RegistryIndex<'cfg> {
path: Filesystem,
summaries_cache: HashMap<InternedString, Summaries>,
config: &'cfg Config,
locked: bool,
}
/// An internal cache of summaries for a particular package.
@ -237,14 +235,12 @@ impl<'cfg> RegistryIndex<'cfg> {
source_id: SourceId,
path: &Filesystem,
config: &'cfg Config,
locked: bool,
) -> RegistryIndex<'cfg> {
RegistryIndex {
source_id,
path: path.clone(),
summaries_cache: HashMap::new(),
config,
locked,
}
}
@ -314,35 +310,19 @@ impl<'cfg> RegistryIndex<'cfg> {
}
// Prepare the `RegistryData` which will lazily initialize internal data
// structures. Note that this is also importantly needed to initialize
// to avoid deadlocks where we acquire a lock below but the `load`
// function inside *also* wants to acquire a lock. See an instance of
// this on #5551.
// structures.
load.prepare()?;
// Synchronize access to the index. For remote indices we want to make
// sure that while we're reading the index no one is trying to update
// it.
let (root, lock) = if self.locked {
let lock = self
.path
.open_ro(Path::new(INDEX_LOCK), self.config, "the registry index");
match lock {
Ok(lock) => (lock.path().parent().unwrap().to_path_buf(), Some(lock)),
Err(_) => {
self.summaries_cache.insert(name, Summaries::default());
return Ok(self.summaries_cache.get_mut(&name).unwrap());
}
}
} else {
(self.path.clone().into_path_unlocked(), None)
};
// let root = self.config.assert_package_cache_locked(&self.path);
let root = load.assert_index_locked(&self.path);
let cache_root = root.join(".cache");
// TODO: comment
let lock_mtime = lock
.as_ref()
.and_then(|l| l.file().metadata().ok())
.map(|t| FileTime::from_last_modification_time(&t));
let lock_mtime = None;
// let lock_mtime = lock
// .as_ref()
// .and_then(|l| l.file().metadata().ok())
// .map(|t| FileTime::from_last_modification_time(&t));
// See module comment in `registry/mod.rs` for why this is structured
@ -371,6 +351,7 @@ impl<'cfg> RegistryIndex<'cfg> {
path.as_ref(),
self.source_id,
load,
self.config,
)?;
if let Some(summaries) = summaries {
self.summaries_cache.insert(name, summaries);
@ -503,6 +484,7 @@ impl Summaries {
relative: &Path,
source_id: SourceId,
load: &mut dyn RegistryData,
config: &Config,
) -> CargoResult<Option<Summaries>> {
// First up, attempt to load the cache. This could fail for all manner
// of reasons, but consider all of them non-fatal and just log their
@ -579,7 +561,8 @@ impl Summaries {
// This is opportunistic so we ignore failure here but are sure to log
// something in case of error.
if fs::create_dir_all(cache_path.parent().unwrap()).is_ok() {
// TODO: somehow need to globally synchronize this
let path = Filesystem::new(cache_path.clone());
config.assert_package_cache_locked(&path);
if let Err(e) = fs::write(cache_path, cache_bytes) {
log::info!("failed to write cache: {}", e);
}

View file

@ -1,12 +1,13 @@
use std::io::prelude::*;
use std::fs::File;
use std::io::SeekFrom;
use std::io::prelude::*;
use std::path::Path;
use crate::core::PackageId;
use crate::sources::registry::{MaybeLock, RegistryConfig, RegistryData};
use crate::util::errors::{CargoResult, CargoResultExt};
use crate::util::paths;
use crate::util::{Config, FileLock, Filesystem, Sha256};
use crate::util::{Config, Filesystem, Sha256};
use hex;
pub struct LocalRegistry<'cfg> {
@ -36,6 +37,12 @@ impl<'cfg> RegistryData for LocalRegistry<'cfg> {
&self.index_path
}
fn assert_index_locked<'a>(&self, path: &'a Filesystem) -> &'a Path {
// Note that the `*_unlocked` variant is used here since we're not
// modifying the index and it's required to be externally synchronized.
path.as_path_unlocked()
}
fn load(
&self,
root: &Path,
@ -71,7 +78,12 @@ impl<'cfg> RegistryData for LocalRegistry<'cfg> {
fn download(&mut self, pkg: PackageId, checksum: &str) -> CargoResult<MaybeLock> {
let crate_file = format!("{}-{}.crate", pkg.name(), pkg.version());
let mut crate_file = self.root.open_ro(&crate_file, self.config, "crate file")?;
// Note that the usage of `into_path_unlocked` here is because the local
// crate files here never change in that we're not the one writing them,
// so it's not our responsibility to synchronize access to them.
let path = self.root.join(&crate_file).into_path_unlocked();
let mut crate_file = File::open(&path)?;
// If we've already got an unpacked version of this crate, then skip the
// checksum below as it is in theory already verified.
@ -89,7 +101,7 @@ impl<'cfg> RegistryData for LocalRegistry<'cfg> {
loop {
let n = crate_file
.read(&mut buf)
.chain_err(|| format!("failed to read `{}`", crate_file.path().display()))?;
.chain_err(|| format!("failed to read `{}`", path.display()))?;
if n == 0 {
break;
}
@ -109,7 +121,7 @@ impl<'cfg> RegistryData for LocalRegistry<'cfg> {
_pkg: PackageId,
_checksum: &str,
_data: &[u8],
) -> CargoResult<FileLock> {
) -> CargoResult<File> {
panic!("this source doesn't download")
}
}

View file

@ -161,6 +161,7 @@
use std::borrow::Cow;
use std::collections::BTreeMap;
use std::collections::HashSet;
use std::fs::{File, OpenOptions};
use std::io::Write;
use std::path::{Path, PathBuf};
@ -177,9 +178,8 @@ use crate::sources::PathSource;
use crate::util::errors::CargoResultExt;
use crate::util::hex;
use crate::util::to_url::ToUrl;
use crate::util::{internal, CargoResult, Config, FileLock, Filesystem};
use crate::util::{internal, CargoResult, Config, Filesystem};
const INDEX_LOCK: &str = ".cargo-index-lock";
const PACKAGE_SOURCE_LOCK: &str = ".cargo-ok";
pub const CRATES_IO_INDEX: &str = "https://github.com/rust-lang/crates.io-index";
pub const CRATES_IO_REGISTRY: &str = "crates-io";
@ -194,7 +194,6 @@ pub struct RegistrySource<'cfg> {
ops: Box<dyn RegistryData + 'cfg>,
index: index::RegistryIndex<'cfg>,
yanked_whitelist: HashSet<PackageId>,
index_locked: bool,
}
#[derive(Deserialize)]
@ -365,20 +364,17 @@ pub trait RegistryData {
fn config(&mut self) -> CargoResult<Option<RegistryConfig>>;
fn update_index(&mut self) -> CargoResult<()>;
fn download(&mut self, pkg: PackageId, checksum: &str) -> CargoResult<MaybeLock>;
fn finish_download(
&mut self,
pkg: PackageId,
checksum: &str,
data: &[u8],
) -> CargoResult<FileLock>;
fn finish_download(&mut self, pkg: PackageId, checksum: &str, data: &[u8])
-> CargoResult<File>;
fn is_crate_downloaded(&self, _pkg: PackageId) -> bool {
true
}
fn assert_index_locked<'a>(&self, path: &'a Filesystem) -> &'a Path;
}
pub enum MaybeLock {
Ready(FileLock),
Ready(File),
Download { url: String, descriptor: String },
}
@ -400,14 +396,7 @@ impl<'cfg> RegistrySource<'cfg> {
) -> RegistrySource<'cfg> {
let name = short_name(source_id);
let ops = remote::RemoteRegistry::new(source_id, config, &name);
RegistrySource::new(
source_id,
config,
&name,
Box::new(ops),
yanked_whitelist,
true,
)
RegistrySource::new(source_id, config, &name, Box::new(ops), yanked_whitelist)
}
pub fn local(
@ -418,14 +407,7 @@ impl<'cfg> RegistrySource<'cfg> {
) -> RegistrySource<'cfg> {
let name = short_name(source_id);
let ops = local::LocalRegistry::new(path, config, &name);
RegistrySource::new(
source_id,
config,
&name,
Box::new(ops),
yanked_whitelist,
false,
)
RegistrySource::new(source_id, config, &name, Box::new(ops), yanked_whitelist)
}
fn new(
@ -434,16 +416,14 @@ impl<'cfg> RegistrySource<'cfg> {
name: &str,
ops: Box<dyn RegistryData + 'cfg>,
yanked_whitelist: &HashSet<PackageId>,
index_locked: bool,
) -> RegistrySource<'cfg> {
RegistrySource {
src_path: config.registry_source_path().join(name),
config,
source_id,
updated: false,
index: index::RegistryIndex::new(source_id, ops.index_path(), config, index_locked),
index: index::RegistryIndex::new(source_id, ops.index_path(), config),
yanked_whitelist: yanked_whitelist.clone(),
index_locked,
ops,
}
}
@ -459,36 +439,26 @@ impl<'cfg> RegistrySource<'cfg> {
/// compiled.
///
/// No action is taken if the source looks like it's already unpacked.
fn unpack_package(&self, pkg: PackageId, tarball: &FileLock) -> CargoResult<PathBuf> {
fn unpack_package(&self, pkg: PackageId, tarball: &File) -> CargoResult<PathBuf> {
// The `.cargo-ok` file is used to track if the source is already
// unpacked and to lock the directory for unpacking.
let mut ok = {
let package_dir = format!("{}-{}", pkg.name(), pkg.version());
let dst = self.src_path.join(&package_dir);
dst.create_dir()?;
// Attempt to open a read-only copy first to avoid an exclusive write
// lock and also work with read-only filesystems. If the file has
// any data, assume the source is already unpacked.
if let Ok(ok) = dst.open_ro(PACKAGE_SOURCE_LOCK, self.config, &package_dir) {
let meta = ok.file().metadata()?;
if meta.len() > 0 {
let unpack_dir = ok.parent().to_path_buf();
return Ok(unpack_dir);
}
}
dst.open_rw(PACKAGE_SOURCE_LOCK, self.config, &package_dir)?
};
let unpack_dir = ok.parent().to_path_buf();
// If the file has any data, assume the source is already unpacked.
let meta = ok.file().metadata()?;
// unpacked.
let package_dir = format!("{}-{}", pkg.name(), pkg.version());
let dst = self.src_path.join(&package_dir);
dst.create_dir()?;
let path = dst.join(PACKAGE_SOURCE_LOCK);
let path = self.config.assert_package_cache_locked(&path);
let unpack_dir = path.parent().unwrap();
let mut ok = OpenOptions::new()
.create(true)
.read(true)
.write(true)
.open(&path)?;
let meta = ok.metadata()?;
if meta.len() > 0 {
return Ok(unpack_dir);
return Ok(unpack_dir.to_path_buf());
}
let gz = GzDecoder::new(tarball.file());
let gz = GzDecoder::new(tarball);
let mut tar = Archive::new(gz);
let prefix = unpack_dir.file_name().unwrap();
let parent = unpack_dir.parent().unwrap();
@ -523,19 +493,18 @@ impl<'cfg> RegistrySource<'cfg> {
// Write to the lock file to indicate that unpacking was successful.
write!(ok, "ok")?;
Ok(unpack_dir)
Ok(unpack_dir.to_path_buf())
}
fn do_update(&mut self) -> CargoResult<()> {
self.ops.update_index()?;
let path = self.ops.index_path();
self.index =
index::RegistryIndex::new(self.source_id, path, self.config, self.index_locked);
self.index = index::RegistryIndex::new(self.source_id, path, self.config);
self.updated = true;
Ok(())
}
fn get_pkg(&mut self, package: PackageId, path: &FileLock) -> CargoResult<Package> {
fn get_pkg(&mut self, package: PackageId, path: &File) -> CargoResult<Package> {
let path = self
.unpack_package(package, path)
.chain_err(|| internal(format!("failed to unpack package `{}`", package)))?;

View file

@ -1,5 +1,6 @@
use std::cell::{Cell, Ref, RefCell};
use std::fmt::Write as FmtWrite;
use std::fs::{self, File, OpenOptions};
use std::io::prelude::*;
use std::io::SeekFrom;
use std::mem;
@ -12,12 +13,9 @@ use log::{debug, trace};
use crate::core::{PackageId, SourceId};
use crate::sources::git;
use crate::sources::registry::MaybeLock;
use crate::sources::registry::{
RegistryConfig, RegistryData, CRATE_TEMPLATE, INDEX_LOCK, VERSION_TEMPLATE,
};
use crate::sources::registry::{RegistryConfig, RegistryData, CRATE_TEMPLATE, VERSION_TEMPLATE};
use crate::util::errors::{CargoResult, CargoResultExt};
use crate::util::{Config, Sha256};
use crate::util::{FileLock, Filesystem};
use crate::util::{Config, Filesystem, Sha256};
pub struct RemoteRegistry<'cfg> {
index_path: Filesystem,
@ -44,7 +42,7 @@ impl<'cfg> RemoteRegistry<'cfg> {
fn repo(&self) -> CargoResult<&git2::Repository> {
self.repo.try_borrow_with(|| {
let path = self.index_path.clone().into_path_unlocked();
let path = self.config.assert_package_cache_locked(&self.index_path);
// Fast path without a lock
if let Ok(repo) = git2::Repository::open(&path) {
@ -54,15 +52,11 @@ impl<'cfg> RemoteRegistry<'cfg> {
// Ok, now we need to lock and try the whole thing over again.
trace!("acquiring registry index lock");
let lock = self.index_path.open_rw(
Path::new(INDEX_LOCK),
self.config,
"the registry index",
)?;
match git2::Repository::open(&path) {
Ok(repo) => Ok(repo),
Err(_) => {
let _ = lock.remove_siblings();
drop(fs::remove_dir_all(&path));
fs::create_dir_all(&path)?;
// Note that we'd actually prefer to use a bare repository
// here as we're not actually going to check anything out.
@ -139,6 +133,10 @@ impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
&self.index_path
}
fn assert_index_locked<'a>(&self, path: &'a Filesystem) -> &'a Path {
self.config.assert_package_cache_locked(path)
}
fn load(
&self,
_root: &Path,
@ -162,9 +160,7 @@ impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
fn config(&mut self) -> CargoResult<Option<RegistryConfig>> {
debug!("loading config");
self.prepare()?;
let _lock =
self.index_path
.open_ro(Path::new(INDEX_LOCK), self.config, "the registry index")?;
self.config.assert_package_cache_locked(&self.index_path);
let mut config = None;
self.load(Path::new(""), Path::new("config.json"), &mut |json| {
config = Some(serde_json::from_slice(json)?);
@ -213,9 +209,7 @@ impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
self.prepare()?;
self.head.set(None);
*self.tree.borrow_mut() = None;
let lock =
self.index_path
.open_rw(Path::new(INDEX_LOCK), self.config, "the registry index")?;
self.config.assert_package_cache_locked(&self.index_path);
self.config
.shell()
.status("Updating", self.source_id.display_index())?;
@ -228,10 +222,6 @@ impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
.chain_err(|| format!("failed to fetch `{}`", url))?;
self.config.updated_sources().insert(self.source_id);
// Make a write to the lock file to record the mtime on the filesystem
// of when the last update happened.
lock.file().set_len(0)?;
lock.file().write(&[0])?;
Ok(())
}
@ -244,8 +234,10 @@ impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
//
// If this fails then we fall through to the exclusive path where we may
// have to redownload the file.
if let Ok(dst) = self.cache_path.open_ro(&filename, self.config, &filename) {
let meta = dst.file().metadata()?;
let path = self.cache_path.join(&filename);
let path = self.config.assert_package_cache_locked(&path);
if let Ok(dst) = File::open(&path) {
let meta = dst.metadata()?;
if meta.len() > 0 {
return Ok(MaybeLock::Ready(dst));
}
@ -271,7 +263,7 @@ impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
pkg: PackageId,
checksum: &str,
data: &[u8],
) -> CargoResult<FileLock> {
) -> CargoResult<File> {
// Verify what we just downloaded
let mut state = Sha256::new();
state.update(data);
@ -280,8 +272,15 @@ impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
}
let filename = self.filename(pkg);
let mut dst = self.cache_path.open_rw(&filename, self.config, &filename)?;
let meta = dst.file().metadata()?;
self.cache_path.create_dir()?;
let path = self.cache_path.join(&filename);
let path = self.config.assert_package_cache_locked(&path);
let mut dst = OpenOptions::new()
.create(true)
.read(true)
.write(true)
.open(&path)?;
let meta = dst.metadata()?;
if meta.len() > 0 {
return Ok(dst);
}
@ -295,8 +294,10 @@ impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
let filename = format!("{}-{}.crate", pkg.name(), pkg.version());
let path = Path::new(&filename);
if let Ok(dst) = self.cache_path.open_ro(path, self.config, &filename) {
if let Ok(meta) = dst.file().metadata() {
let path = self.cache_path.join(path);
let path = self.config.assert_package_cache_locked(&path);
if let Ok(dst) = File::open(path) {
if let Ok(meta) = dst.metadata() {
return meta.len() > 0;
}
}

View file

@ -29,8 +29,13 @@ use crate::util::errors::{self, internal, CargoResult, CargoResultExt};
use crate::util::toml as cargo_toml;
use crate::util::Filesystem;
use crate::util::Rustc;
<<<<<<< HEAD
use crate::util::{paths, validate_package_name};
use crate::util::{ToUrl, ToUrlWithBase};
=======
use crate::util::{ToUrl, ToUrlWithBase};
use crate::util::{paths, validate_package_name, FileLock};
>>>>>>> Make registry locking more coarse
/// Configuration information for cargo. This is not specific to a build, it is information
/// relating to cargo itself.
@ -76,6 +81,9 @@ pub struct Config {
profiles: LazyCell<ConfigProfiles>,
/// Tracks which sources have been updated to avoid multiple updates.
updated_sources: LazyCell<RefCell<HashSet<SourceId>>>,
/// Lock, if held, of the global package cache along with the number of
/// acquisitions so far.
package_cache_lock: RefCell<Option<(FileLock, usize)>>,
}
impl Config {
@ -132,6 +140,7 @@ impl Config {
env,
profiles: LazyCell::new(),
updated_sources: LazyCell::new(),
package_cache_lock: RefCell::new(None),
}
}
@ -828,6 +837,36 @@ impl Config {
};
T::deserialize(d).map_err(|e| e.into())
}
pub fn assert_package_cache_locked<'a>(&self, f: &'a Filesystem) -> &'a Path {
let ret = f.as_path_unlocked();
assert!(
self.package_cache_lock.borrow().is_some(),
"pacakge cache lock is not currently held, Cargo forgot to call \
`acquire_package_cache_lock` before we got to this stack frame",
);
assert!(ret.starts_with(self.home_path.as_path_unlocked()));
return ret;
}
pub fn acquire_package_cache_lock<'a>(&'a self) -> CargoResult<PackageCacheLock<'a>> {
let mut slot = self.package_cache_lock.borrow_mut();
match *slot {
Some((_, ref mut cnt)) => {
*cnt += 1;
}
None => {
let lock = self
.home_path
.open_rw(".package-cache", self, "package cache lock")
.chain_err(|| "failed to acquire package cache lock")?;
*slot = Some((lock, 1));
}
}
Ok(PackageCacheLock(self))
}
pub fn release_package_cache_lock(&self) {}
}
/// A segment of a config key.
@ -1664,3 +1703,16 @@ pub fn save_credentials(cfg: &Config, token: String, registry: Option<String>) -
Ok(())
}
}
pub struct PackageCacheLock<'a>(&'a Config);
impl Drop for PackageCacheLock<'_> {
fn drop(&mut self) {
let mut slot = self.0.package_cache_lock.borrow_mut();
let (_, cnt) = slot.as_mut().unwrap();
*cnt -= 1;
if *cnt == 0 {
*slot = None;
}
}
}

View file

@ -12,6 +12,7 @@ use crate::util::errors::{CargoResult, CargoResultExt};
use crate::util::paths;
use crate::util::Config;
#[derive(Debug)]
pub struct FileLock {
f: Option<File>,
path: PathBuf,
@ -136,6 +137,14 @@ impl Filesystem {
self.root
}
/// Returns the underlying `Path`.
///
/// Note that this is a relatively dangerous operation and should be used
/// with great caution!.
pub fn as_path_unlocked(&self) -> &Path {
&self.root
}
/// Creates the directory pointed to by this filesystem.
///
/// Handles errors where other Cargo processes are also attempting to

View file

@ -108,8 +108,10 @@ fn not_update() {
let sid = SourceId::for_registry(&registry_url()).unwrap();
let cfg = Config::new(Shell::new(), paths::root(), paths::home().join(".cargo"));
let lock = cfg.acquire_package_cache_lock().unwrap();
let mut regsrc = RegistrySource::remote(sid, &HashSet::new(), &cfg);
regsrc.update().unwrap();
drop(lock);
cargo_process("search postgres")
.with_stdout_contains("hoare = \"0.1.1\" # Design by contract style assertions for Rust")