Lazy DefPath decoding for incremental compilation

This commit is contained in:
Aaron Hill 2020-07-29 12:26:15 -04:00
parent db79d2f637
commit e935d3832c
No known key found for this signature in database
GPG key ID: B4087E510E98B164
11 changed files with 275 additions and 43 deletions

View file

@ -1,6 +1,7 @@
//! Code to save/load the dep-graph from files.
use rustc_data_structures::fx::FxHashMap;
use rustc_hir::definitions::Definitions;
use rustc_middle::dep_graph::{PreviousDepGraph, SerializedDepGraph, WorkProduct, WorkProductId};
use rustc_middle::ty::query::OnDiskCache;
use rustc_middle::ty::TyCtxt;
@ -204,7 +205,7 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture {
/// If we are not in incremental compilation mode, returns `None`.
/// Otherwise, tries to load the query result cache from disk,
/// creating an empty cache if it could not be loaded.
pub fn load_query_result_cache(sess: &Session) -> Option<OnDiskCache<'_>> {
pub fn load_query_result_cache<'a>(sess: &'a Session, definitions: &Definitions) -> Option<OnDiskCache<'a>> {
if sess.opts.incremental.is_none() {
return None;
}
@ -216,9 +217,7 @@ pub fn load_query_result_cache(sess: &Session) -> Option<OnDiskCache<'_>> {
&query_cache_path(sess),
sess.is_nightly_build(),
) {
LoadResult::Ok { data: (bytes, start_pos) } => {
Some(OnDiskCache::new(sess, bytes, start_pos))
}
LoadResult::Ok { data: (bytes, start_pos) } => Some(OnDiskCache::new(sess, bytes, start_pos, definitions)),
_ => Some(OnDiskCache::new_empty(sess.source_map())),
}
}

View file

@ -747,7 +747,7 @@ pub fn create_global_ctxt<'tcx>(
Definitions::new(crate_name, sess.local_crate_disambiguator()),
));
let query_result_on_disk_cache = rustc_incremental::load_query_result_cache(sess);
let query_result_on_disk_cache = rustc_incremental::load_query_result_cache(sess, defs);
let codegen_backend = compiler.codegen_backend();
let mut local_providers = *DEFAULT_QUERY_PROVIDERS;

View file

@ -77,6 +77,10 @@
raw_proc_macros: Option<&'static [ProcMacro]>,
/// Source maps for code from the crate.
source_map_import_info: OnceCell<Vec<ImportedSourceFile>>,
/// For every definition in this crate, maps its `DefPathHash` to its
/// `DefIndex`. See `raw_def_id_to_def_id` for more details about how
/// this is used.
def_path_hash_map: OnceCell<FxHashMap<DefPathHash, DefIndex>>,
/// Used for decoding interpret::AllocIds in a cached & thread-safe manner.
alloc_decoding_state: AllocDecodingState,
/// The `DepNodeIndex` of the `DepNode` representing this upstream crate.
@ -1556,6 +1560,53 @@ fn def_key(&self, index: DefIndex) -> DefKey {
})
}
/// Finds the corresponding `DefId` for the provided `DefPathHash`, if it exists.
/// This is used by incremental compilation to map a serialized `DefPathHash` to
/// its `DefId` in the current session.
/// Normally, only one 'main' crate will change between incremental compilation sessions:
/// all dependencies will be completely unchanged. In this case, we can avoid
/// decoding every `DefPathHash` in the crate, since the `DefIndex` from the previous
/// session will still be valid. If our 'guess' is wrong (the `DefIndex` no longer exists,
/// or has a different `DefPathHash`, then we need to decode all `DefPathHashes` to determine
/// the correct mapping).
fn def_path_hash_to_def_id(
&self,
krate: CrateNum,
index_guess: u32,
hash: DefPathHash,
) -> Option<DefId> {
let def_index_guess = DefIndex::from_u32(index_guess);
let old_hash = self
.root
.tables
.def_path_hashes
.get(self, def_index_guess)
.map(|lazy| lazy.decode(self));
// Fast path: the definition and its index is unchanged from the
// previous compilation session. There is no need to decode anything
// else
if old_hash == Some(hash) {
return Some(DefId { krate, index: def_index_guess });
}
// Slow path: We need to find out the new `DefIndex` of the provided
// `DefPathHash`, if its still exists. This requires decoding every `DefPathHash`
// stored in this crate.
let map = self.cdata.def_path_hash_map.get_or_init(|| {
let end_id = self.root.tables.def_path_hashes.size() as u32;
let mut map = FxHashMap::with_capacity_and_hasher(end_id as usize, Default::default());
for i in 0..end_id {
let def_index = DefIndex::from_u32(i);
let hash =
self.root.tables.def_path_hashes.get(self, def_index).unwrap().decode(self);
map.insert(hash, def_index);
}
map
});
map.get(&hash).map(|index| DefId { krate, index: *index })
}
// Returns the path leading to the thing with this `id`.
fn def_path(&self, id: DefIndex) -> DefPath {
debug!("def_path(cnum={:?}, id={:?})", self.cnum, id);
@ -1834,6 +1885,7 @@ impl CrateMetadata {
trait_impls,
raw_proc_macros,
source_map_import_info: OnceCell::new(),
def_path_hash_map: Default::default(),
alloc_decoding_state,
dep_node_index: AtomicCell::new(DepNodeIndex::INVALID),
cnum,

View file

@ -506,6 +506,16 @@ fn num_def_ids(&self, cnum: CrateNum) -> usize {
self.get_crate_data(cnum).num_def_ids()
}
// See `CrateMetadataRef::def_path_hash_to_def_id` for more details
fn def_path_hash_to_def_id(
&self,
cnum: CrateNum,
index_guess: u32,
hash: DefPathHash,
) -> Option<DefId> {
self.get_crate_data(cnum).def_path_hash_to_def_id(cnum, index_guess, hash)
}
fn crates_untracked(&self) -> Vec<CrateNum> {
let mut result = vec![];
self.iter_crate_data(|cnum, _| result.push(cnum));

View file

@ -252,8 +252,8 @@ fn from_def_path_hash(def_path_hash: DefPathHash, kind: DepKind) -> DepNode {
/// has been removed.
fn extract_def_id(&self, tcx: TyCtxt<'tcx>) -> Option<DefId> {
if self.kind.can_reconstruct_query_key() {
let def_path_hash = DefPathHash(self.hash.into());
tcx.def_path_hash_to_def_id.as_ref()?.get(&def_path_hash).cloned()
let def_path_hash = DefPathHash(self.hash);
tcx.queries.on_disk_cache.as_ref()?.def_path_hash_to_def_id(tcx, def_path_hash)
} else {
None
}
@ -320,7 +320,15 @@ fn can_reconstruct_query_key() -> bool {
}
fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
tcx.def_path_hash(*self).0
let hash = tcx.def_path_hash(*self);
// If this is a foreign `DefId`, store its current value
// in the incremental cache. When we decode the cache,
// we will use the old DefIndex as an initial guess for
// a lookup into the crate metadata.
if !self.is_local() {
tcx.queries.on_disk_cache.store_foreign_def_id_hash(*self, hash);
}
hash.0
}
fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
@ -359,7 +367,7 @@ fn can_reconstruct_query_key() -> bool {
fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
let def_id = DefId { krate: *self, index: CRATE_DEF_INDEX };
tcx.def_path_hash(def_id).0
def_id.to_fingerprint(tcx)
}
fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {

View file

@ -5,7 +5,7 @@
use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::Diagnostic;
use rustc_hir::def_id::LocalDefId;
use rustc_hir::def_id::{DefPathHash, LocalDefId};
mod dep_node;
@ -91,6 +91,10 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
type DepKind = DepKind;
type StableHashingContext = StableHashingContext<'tcx>;
fn register_reused_dep_path_hash(&self, hash: DefPathHash) {
self.queries.on_disk_cache.register_reused_dep_path_hash(hash)
}
fn create_stable_hashing_context(&self) -> Self::StableHashingContext {
TyCtxt::create_stable_hashing_context(*self)
}

View file

@ -191,6 +191,12 @@ pub trait CrateStore {
fn def_path_hash(&self, def: DefId) -> DefPathHash;
fn all_def_path_hashes_and_def_ids(&self, cnum: CrateNum) -> Vec<(DefPathHash, DefId)>;
fn num_def_ids(&self, cnum: CrateNum) -> usize;
fn def_path_hash_to_def_id(
&self,
cnum: CrateNum,
index_guess: u32,
hash: DefPathHash,
) -> Option<DefId>;
// "queries" used in resolve that aren't tracked for incremental compilation
fn crate_name_untracked(&self, cnum: CrateNum) -> Symbol;

View file

@ -1,7 +1,7 @@
//! Type context book-keeping.
use crate::arena::Arena;
use crate::dep_graph::{self, DepConstructor, DepGraph};
use crate::dep_graph::{self, DepGraph, DepKind, DepNode, DepNodeExt};
use crate::hir::exports::ExportMap;
use crate::ich::{NodeIdHashingMode, StableHashingContext};
use crate::infer::canonical::{Canonical, CanonicalVarInfo, CanonicalVarInfos};
@ -34,12 +34,12 @@
};
use rustc_data_structures::steal::Steal;
use rustc_data_structures::sync::{self, Lock, Lrc, WorkerLocal};
use rustc_data_structures::unhash::UnhashMap;
use rustc_errors::ErrorReported;
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId, LOCAL_CRATE};
use rustc_hir::definitions::{DefPathHash, Definitions};
use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId};
use rustc_hir::def_id::{CRATE_DEF_INDEX, LOCAL_CRATE};
use rustc_hir::definitions::Definitions;
use rustc_hir::intravisit::Visitor;
use rustc_hir::lang_items::LangItem;
use rustc_hir::{
@ -945,10 +945,6 @@ pub struct GlobalCtxt<'tcx> {
pub(crate) untracked_crate: &'tcx hir::Crate<'tcx>,
pub(crate) definitions: &'tcx Definitions,
/// A map from `DefPathHash` -> `DefId`. Includes `DefId`s from the local crate
/// as well as all upstream crates. Only populated in incremental mode.
pub def_path_hash_to_def_id: Option<UnhashMap<DefPathHash, DefId>>,
pub queries: query::Queries<'tcx>,
maybe_unused_trait_imports: FxHashSet<LocalDefId>,
@ -1113,21 +1109,6 @@ pub fn create_global_ctxt(
let mut providers = IndexVec::from_elem_n(extern_providers, max_cnum + 1);
providers[LOCAL_CRATE] = local_providers;
let def_path_hash_to_def_id = if s.opts.build_dep_graph() {
let capacity = definitions.def_path_table().num_def_ids()
+ crates.iter().map(|cnum| cstore.num_def_ids(*cnum)).sum::<usize>();
let mut map = UnhashMap::with_capacity_and_hasher(capacity, Default::default());
map.extend(definitions.def_path_table().all_def_path_hashes_and_def_ids(LOCAL_CRATE));
for cnum in &crates {
map.extend(cstore.all_def_path_hashes_and_def_ids(*cnum).into_iter());
}
Some(map)
} else {
None
};
let mut trait_map: FxHashMap<_, FxHashMap<_, _>> = FxHashMap::default();
for (hir_id, v) in krate.trait_map.iter() {
let map = trait_map.entry(hir_id.owner).or_default();
@ -1155,7 +1136,6 @@ pub fn create_global_ctxt(
extern_prelude: resolutions.extern_prelude,
untracked_crate: krate,
definitions,
def_path_hash_to_def_id,
queries: query::Queries::new(providers, extern_providers, on_disk_query_result_cache),
ty_rcache: Default::default(),
pred_rcache: Default::default(),
@ -1329,7 +1309,8 @@ pub fn allocate_metadata_dep_nodes(self) {
// We cannot use the query versions of crates() and crate_hash(), since
// those would need the DepNodes that we are allocating here.
for cnum in self.cstore.crates_untracked() {
let dep_node = DepConstructor::CrateMetadata(self, cnum);
let def_path_hash = self.def_path_hash(DefId { krate: cnum, index: CRATE_DEF_INDEX });
let dep_node = DepNode::from_def_path_hash(def_path_hash, DepKind::CrateMetadata);
let crate_hash = self.cstore.crate_hash_untracked(cnum);
self.dep_graph.with_task(
dep_node,

View file

@ -11,6 +11,7 @@
use rustc_errors::Diagnostic;
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, LOCAL_CRATE};
use rustc_hir::definitions::DefPathHash;
use rustc_hir::definitions::Definitions;
use rustc_index::vec::{Idx, IndexVec};
use rustc_serialize::{opaque, Decodable, Decoder, Encodable, Encoder};
use rustc_session::{CrateDisambiguator, Session};
@ -21,6 +22,8 @@
use rustc_span::source_map::{SourceMap, StableSourceFileId};
use rustc_span::CachingSourceMapView;
use rustc_span::{BytePos, ExpnData, SourceFile, Span, DUMMY_SP};
use std::collections::hash_map::Entry;
use std::iter::FromIterator;
use std::mem;
const TAG_FILE_FOOTER: u128 = 0xC0FFEE_C0FFEE_C0FFEE_C0FFEE_C0FFEE;
@ -80,6 +83,28 @@ pub struct OnDiskCache<'sess> {
expn_data: FxHashMap<u32, AbsoluteBytePos>,
// Additional information used when decoding hygiene data.
hygiene_context: HygieneDecodeContext,
// Maps `DefPathHash`es to their `RawDefId`s from the *previous*
// compilation session. This is used as an initial 'guess' when
// we try to map a `DefPathHash` to its `DefId` in the current compilation
// session.
foreign_def_path_hashes: FxHashMap<DefPathHash, RawDefId>,
// The *next* compilation sessison's `foreign_def_path_hashes` - at
// the end of our current compilation session, this will get written
// out to the `foreign_def_path_hashes` field of the `Footer`, which
// will become `foreign_def_path_hashes` of the next compilation session.
// This stores any `DefPathHash` that we may need to map to a `DefId`
// during the next compilation session.
latest_foreign_def_path_hashes: Lock<FxHashMap<DefPathHash, RawDefId>>,
// Maps `DefPathHashes` to their corresponding `LocalDefId`s for all
// local items in the current compilation session.
local_def_path_hash_to_def_id: FxHashMap<DefPathHash, LocalDefId>,
// Caches all lookups of `DefPathHashes`, both for local and foreign
// definitions. A definition from the previous compilation session
// may no longer exist in the current compilation session, so
// we use `Option<DefId>` so that we can cache a lookup failure.
def_path_hash_to_def_id_cache: Lock<FxHashMap<DefPathHash, Option<DefId>>>,
}
// This type is used only for serialization and deserialization.
@ -95,6 +120,7 @@ struct Footer {
syntax_contexts: FxHashMap<u32, AbsoluteBytePos>,
// See `OnDiskCache.expn_data`
expn_data: FxHashMap<u32, AbsoluteBytePos>,
foreign_def_path_hashes: FxHashMap<DefPathHash, RawDefId>,
}
type EncodedQueryResultIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>;
@ -118,9 +144,36 @@ fn to_usize(self) -> usize {
}
}
/// Represents a potentially invalid `DefId`. This is used during incremental
/// compilation to represent a `DefId` from the *previous* compilation session,
/// which may no longer be valid. This is used to help map a `DefPathHash`
/// to a `DefId` in the current compilation session.
#[derive(Encodable, Decodable, Copy, Clone, Debug)]
crate struct RawDefId {
// We deliberately do not use `CrateNum` and `DefIndex`
// here, since a crate/index from the previous compilation
// session may no longer exist.
pub krate: u32,
pub index: u32,
}
fn make_local_def_path_hash_map(definitions: &Definitions) -> FxHashMap<DefPathHash, LocalDefId> {
FxHashMap::from_iter(
definitions
.def_path_table()
.all_def_path_hashes_and_def_ids(LOCAL_CRATE)
.map(|(hash, def_id)| (hash, def_id.as_local().unwrap())),
)
}
impl<'sess> OnDiskCache<'sess> {
/// Creates a new `OnDiskCache` instance from the serialized data in `data`.
pub fn new(sess: &'sess Session, data: Vec<u8>, start_pos: usize) -> Self {
pub fn new(
sess: &'sess Session,
data: Vec<u8>,
start_pos: usize,
definitions: &Definitions,
) -> Self {
debug_assert!(sess.opts.incremental.is_some());
// Wrap in a scope so we can borrow `data`.
@ -155,10 +208,14 @@ pub fn new(sess: &'sess Session, data: Vec<u8>, start_pos: usize) -> Self {
syntax_contexts: footer.syntax_contexts,
expn_data: footer.expn_data,
hygiene_context: Default::default(),
foreign_def_path_hashes: footer.foreign_def_path_hashes,
latest_foreign_def_path_hashes: Default::default(),
local_def_path_hash_to_def_id: make_local_def_path_hash_map(definitions),
def_path_hash_to_def_id_cache: Default::default(),
}
}
pub fn new_empty(source_map: &'sess SourceMap) -> Self {
pub fn new_empty(source_map: &'sess SourceMap, definitions: &Definitions) -> Self {
Self {
serialized_data: Vec::new(),
file_index_to_stable_id: Default::default(),
@ -173,6 +230,10 @@ pub fn new_empty(source_map: &'sess SourceMap) -> Self {
syntax_contexts: FxHashMap::default(),
expn_data: FxHashMap::default(),
hygiene_context: Default::default(),
foreign_def_path_hashes: Default::default(),
latest_foreign_def_path_hashes: Default::default(),
local_def_path_hash_to_def_id: make_local_def_path_hash_map(definitions),
def_path_hash_to_def_id_cache: Default::default(),
}
}
@ -200,6 +261,15 @@ pub fn serialize<'tcx, E>(&self, tcx: TyCtxt<'tcx>, encoder: &mut E) -> Result<(
(file_to_file_index, file_index_to_stable_id)
};
// Load everything into memory so we can write it out to the on-disk
// cache. The vast majority of cacheable query results should already
// be in memory, so this should be a cheap operation.
// Do this *before* we clone 'latest_foreign_def_path_hashes', since
// loading existing queries may cause us to create new DepNodes, which
// may in turn end up invoking `store_foreign_def_id_hash`
tcx.dep_graph.exec_cache_promotions(tcx);
let latest_foreign_def_path_hashes = self.latest_foreign_def_path_hashes.lock().clone();
let hygiene_encode_context = HygieneEncodeContext::default();
let mut encoder = CacheEncoder {
@ -211,13 +281,9 @@ pub fn serialize<'tcx, E>(&self, tcx: TyCtxt<'tcx>, encoder: &mut E) -> Result<(
source_map: CachingSourceMapView::new(tcx.sess.source_map()),
file_to_file_index,
hygiene_context: &hygiene_encode_context,
latest_foreign_def_path_hashes,
};
// Load everything into memory so we can write it out to the on-disk
// cache. The vast majority of cacheable query results should already
// be in memory, so this should be a cheap operation.
tcx.dep_graph.exec_cache_promotions(tcx);
// Encode query results.
let mut query_result_index = EncodedQueryResultIndex::new();
@ -312,6 +378,9 @@ macro_rules! encode_queries {
},
)?;
let foreign_def_path_hashes =
std::mem::take(&mut encoder.latest_foreign_def_path_hashes);
// `Encode the file footer.
let footer_pos = encoder.position() as u64;
encoder.encode_tagged(
@ -324,6 +393,7 @@ macro_rules! encode_queries {
interpret_alloc_index,
syntax_contexts,
expn_data: expn_ids,
foreign_def_path_hashes,
},
)?;
@ -374,6 +444,31 @@ pub fn store_diagnostics(
debug_assert!(prev.is_none());
}
fn get_raw_def_id(&self, hash: &DefPathHash) -> Option<RawDefId> {
self.foreign_def_path_hashes.get(hash).copied()
}
fn try_remap_cnum(&self, tcx: TyCtxt<'_>, cnum: u32) -> Option<CrateNum> {
let cnum_map =
self.cnum_map.get_or_init(|| Self::compute_cnum_map(tcx, &self.prev_cnums[..]));
cnum_map[CrateNum::from_u32(cnum)]
}
pub(crate) fn store_foreign_def_id_hash(&self, def_id: DefId, hash: DefPathHash) {
// We may overwrite an existing entry, but it will have the same value,
// so it's fine
self.latest_foreign_def_path_hashes
.lock()
.insert(hash, RawDefId { krate: def_id.krate.as_u32(), index: def_id.index.as_u32() });
}
pub fn register_reused_dep_path_hash(&self, hash: DefPathHash) {
if let Some(old_id) = self.foreign_def_path_hashes.get(&hash) {
self.latest_foreign_def_path_hashes.lock().insert(hash, *old_id);
}
}
/// Returns the cached query result if there is something in the cache for
/// the given `SerializedDepNodeIndex`; otherwise returns `None`.
crate fn try_load_query_result<'tcx, T>(
@ -481,6 +576,53 @@ fn compute_cnum_map(
map
})
}
/// Converts a `DefPathHash` to its corresponding `DefId` in the current compilation
/// session, if it still exists. This is used during incremental compilation to
/// turn a deserialized `DefPathHash` into its current `DefId`.
pub(crate) fn def_path_hash_to_def_id(
&self,
tcx: TyCtxt<'tcx>,
hash: DefPathHash,
) -> Option<DefId> {
let mut cache = self.def_path_hash_to_def_id_cache.lock();
match cache.entry(hash) {
Entry::Occupied(e) => *e.get(),
Entry::Vacant(e) => {
// Check if the `DefPathHash` corresponds to a definition in the current
// crate
if let Some(def_id) = self.local_def_path_hash_to_def_id.get(&hash).cloned() {
let def_id = def_id.to_def_id();
e.insert(Some(def_id));
return Some(def_id);
}
// This `raw_def_id` represents the `DefId` of this `DefPathHash` in
// the *previous* compliation session. The `DefPathHash` includes the
// owning crate, so if the corresponding definition still exists in the
// current compilation session, the crate is guaranteed to be the same
// (otherwise, we would compute a different `DefPathHash`).
let raw_def_id = self.get_raw_def_id(&hash)?;
// If the owning crate no longer exists, the corresponding definition definitely
// no longer exists.
let krate = self.try_remap_cnum(tcx, raw_def_id.krate)?;
// If our `DefPathHash` corresponded to a definition in the local crate,
// we should have either found it in `local_def_path_hash_to_def_id`, or
// never attempted to load it in the first place. Any query result or `DepNode`
// that references a local `DefId` should depend on some HIR-related `DepNode`.
// If a local definition is removed/modified such that its old `DefPathHash`
// no longer has a corresponding definition, that HIR-related `DepNode` should
// end up red. This should prevent us from ever calling
// `tcx.def_path_hash_to_def_id`, since we'll end up recomputing any
// queries involved.
debug_assert_ne!(krate, LOCAL_CRATE);
// Try to find a definition in the current session, using the previous `DefIndex`
// as an initial guess.
let opt_def_id = tcx.cstore.def_path_hash_to_def_id(krate, raw_def_id.index, hash);
e.insert(opt_def_id);
opt_def_id
}
}
}
}
//- DECODING -------------------------------------------------------------------
@ -713,7 +855,11 @@ fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> {
let def_path_hash = DefPathHash::decode(d)?;
// Using the `DefPathHash`, we can lookup the new `DefId`.
Ok(d.tcx().def_path_hash_to_def_id.as_ref().unwrap()[&def_path_hash])
// Subtle: We only encode a `DefId` as part of a query result.
// If we get to this point, then all of the query inputs were green,
// which means that the definition with this hash is guaranteed to
// still exist in the current compilation session.
Ok(d.tcx().queries.on_disk_cache.def_path_hash_to_def_id(d.tcx(), def_path_hash).unwrap())
}
}
@ -773,6 +919,7 @@ struct CacheEncoder<'a, 'tcx, E: OpaqueEncoder> {
source_map: CachingSourceMapView<'tcx>,
file_to_file_index: FxHashMap<*const SourceFile, SourceFileIndex>,
hygiene_context: &'a HygieneEncodeContext,
latest_foreign_def_path_hashes: FxHashMap<DefPathHash, RawDefId>,
}
impl<'a, 'tcx, E> CacheEncoder<'a, 'tcx, E>
@ -895,6 +1042,15 @@ impl<'a, 'tcx, E> Encodable<CacheEncoder<'a, 'tcx, E>> for DefId
{
fn encode(&self, s: &mut CacheEncoder<'a, 'tcx, E>) -> Result<(), E::Error> {
let def_path_hash = s.tcx.def_path_hash(*self);
// Store additional information when we encode a foreign `DefId`,
// so that we can map its `DefPathHash` back to a `DefId` in the next
// compilation session.
if !self.is_local() {
s.latest_foreign_def_path_hashes.insert(
def_path_hash,
RawDefId { krate: self.krate.as_u32(), index: self.index.as_u32() },
);
}
def_path_hash.encode(s)
}
}

View file

@ -7,6 +7,7 @@
use rustc_data_structures::unlikely;
use rustc_errors::Diagnostic;
use rustc_index::vec::{Idx, IndexVec};
use rustc_span::def_id::DefPathHash;
use parking_lot::{Condvar, Mutex};
use smallvec::{smallvec, SmallVec};
@ -699,6 +700,18 @@ fn try_mark_previous_green<Ctxt: DepContext<DepKind = K>>(
data.current.intern_node(*dep_node, current_deps, fingerprint)
};
// We have just loaded a deserialized `DepNode` from the previous
// compilation session into the current one. If this was a foreign `DefId`,
// then we stored additional information in the incr comp cache when we
// initially created its fingerprint (see `DepNodeParams::to_fingerprint`)
// We won't be calling `to_fingerprint` again for this `DepNode` (we no longer
// have the original value), so we need to copy over this additional information
// from the old incremental cache into the new cache that we serialize
// and the end of this compilation session.
if dep_node.kind.can_reconstruct_query_key() {
tcx.register_reused_dep_path_hash(DefPathHash(dep_node.hash));
}
// ... emitting any stored diagnostic ...
// FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere

View file

@ -15,6 +15,7 @@
use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::Diagnostic;
use rustc_span::def_id::DefPathHash;
use std::fmt;
use std::hash::Hash;
@ -32,6 +33,8 @@ pub trait DepContext: Copy {
/// Try to force a dep node to execute and see if it's green.
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;
fn register_reused_dep_path_hash(&self, hash: DefPathHash);
/// Return whether the current session is tainted by errors.
fn has_errors_or_delayed_span_bugs(&self) -> bool;