Skip to content

Commit bb1fbbf

Browse files
committed
Auto merge of #80177 - tgnottingham:foreign_defpathhash_registration, r=Aaron1011
rustc_query_system: explicitly register reused dep nodes Register nodes that we've reused from the previous session explicitly with `OnDiskCache`. Previously, we relied on this happening as a side effect of accessing the nodes in the `PreviousDepGraph`. For the sake of performance and avoiding unintended side effects, register explictily.
2 parents 353f3a3 + 55ae3b3 commit bb1fbbf

File tree

6 files changed

+54
-63
lines changed

6 files changed

+54
-63
lines changed

compiler/rustc_middle/src/dep_graph/mod.rs

+3-3
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ use rustc_data_structures::profiling::SelfProfilerRef;
55
use rustc_data_structures::sync::Lock;
66
use rustc_data_structures::thin_vec::ThinVec;
77
use rustc_errors::Diagnostic;
8-
use rustc_hir::def_id::{DefPathHash, LocalDefId};
8+
use rustc_hir::def_id::LocalDefId;
99

1010
mod dep_node;
1111

@@ -91,9 +91,9 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
9191
type DepKind = DepKind;
9292
type StableHashingContext = StableHashingContext<'tcx>;
9393

94-
fn register_reused_dep_path_hash(&self, hash: DefPathHash) {
94+
fn register_reused_dep_node(&self, dep_node: &DepNode) {
9595
if let Some(cache) = self.queries.on_disk_cache.as_ref() {
96-
cache.register_reused_dep_path_hash(*self, hash)
96+
cache.register_reused_dep_node(*self, dep_node)
9797
}
9898
}
9999

compiler/rustc_middle/src/ty/query/on_disk_cache.rs

+28-10
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
1+
use crate::dep_graph::{DepNode, DepNodeIndex, SerializedDepNodeIndex};
22
use crate::mir::interpret::{AllocDecodingSession, AllocDecodingState};
33
use crate::mir::{self, interpret};
44
use crate::ty::codec::{OpaqueEncoder, RefDecodable, TyDecoder, TyEncoder};
@@ -264,6 +264,13 @@ impl<'sess> OnDiskCache<'sess> {
264264
(file_to_file_index, file_index_to_stable_id)
265265
};
266266

267+
// Register any dep nodes that we reused from the previous session,
268+
// but didn't `DepNode::construct` in this session. This ensures
269+
// that their `DefPathHash` to `RawDefId` mappings are registered
270+
// in 'latest_foreign_def_path_hashes' if necessary, since that
271+
// normally happens in `DepNode::construct`.
272+
tcx.dep_graph.register_reused_dep_nodes(tcx);
273+
267274
// Load everything into memory so we can write it out to the on-disk
268275
// cache. The vast majority of cacheable query results should already
269276
// be in memory, so this should be a cheap operation.
@@ -467,22 +474,33 @@ impl<'sess> OnDiskCache<'sess> {
467474
.insert(hash, RawDefId { krate: def_id.krate.as_u32(), index: def_id.index.as_u32() });
468475
}
469476

470-
/// If the given `hash` still exists in the current compilation,
471-
/// calls `store_foreign_def_id` with its current `DefId`.
477+
/// If the given `dep_node`'s hash still exists in the current compilation,
478+
/// and its current `DefId` is foreign, calls `store_foreign_def_id` with it.
472479
///
473480
/// Normally, `store_foreign_def_id_hash` can be called directly by
474481
/// the dependency graph when we construct a `DepNode`. However,
475482
/// when we re-use a deserialized `DepNode` from the previous compilation
476483
/// session, we only have the `DefPathHash` available. This method is used
477484
/// to that any `DepNode` that we re-use has a `DefPathHash` -> `RawId` written
478485
/// out for usage in the next compilation session.
479-
pub fn register_reused_dep_path_hash(&self, tcx: TyCtxt<'tcx>, hash: DefPathHash) {
480-
// We can't simply copy the `RawDefId` from `foreign_def_path_hashes` to
481-
// `latest_foreign_def_path_hashes`, since the `RawDefId` might have
482-
// changed in the current compilation session (e.g. we've added/removed crates,
483-
// or added/removed definitions before/after the target definition).
484-
if let Some(def_id) = self.def_path_hash_to_def_id(tcx, hash) {
485-
self.store_foreign_def_id_hash(def_id, hash);
486+
pub fn register_reused_dep_node(&self, tcx: TyCtxt<'tcx>, dep_node: &DepNode) {
487+
// For reused dep nodes, we only need to store the mapping if the node
488+
// is one whose query key we can reconstruct from the hash. We use the
489+
// mapping to aid that reconstruction in the next session. While we also
490+
// use it to decode `DefId`s we encoded in the cache as `DefPathHashes`,
491+
// they're already registered during `DefId` encoding.
492+
if dep_node.kind.can_reconstruct_query_key() {
493+
let hash = DefPathHash(dep_node.hash.into());
494+
495+
// We can't simply copy the `RawDefId` from `foreign_def_path_hashes` to
496+
// `latest_foreign_def_path_hashes`, since the `RawDefId` might have
497+
// changed in the current compilation session (e.g. we've added/removed crates,
498+
// or added/removed definitions before/after the target definition).
499+
if let Some(def_id) = self.def_path_hash_to_def_id(tcx, hash) {
500+
if !def_id.is_local() {
501+
self.store_foreign_def_id_hash(def_id, hash);
502+
}
503+
}
486504
}
487505
}
488506

compiler/rustc_query_system/src/dep_graph/dep_node.rs

+2-3
Original file line numberDiff line numberDiff line change
@@ -60,9 +60,8 @@ pub struct DepNode<K> {
6060
// * When a `DepNode::construct` is called, `arg.to_fingerprint()`
6161
// is responsible for calling `OnDiskCache::store_foreign_def_id_hash`
6262
// if needed
63-
// * When a `DepNode` is loaded from the `PreviousDepGraph`,
64-
// then `PreviousDepGraph::index_to_node` is responsible for calling
65-
// `tcx.register_reused_dep_path_hash`
63+
// * When we serialize the on-disk cache, `OnDiskCache::serialize` is
64+
// responsible for calling `DepGraph::register_reused_dep_nodes`.
6665
//
6766
// FIXME: Enforce this by preventing manual construction of `DefNode`
6867
// (e.g. add a `_priv: ()` field)

compiler/rustc_query_system/src/dep_graph/graph.rs

+19-5
Original file line numberDiff line numberDiff line change
@@ -554,7 +554,7 @@ impl<K: DepKind> DepGraph<K> {
554554
// We never try to mark eval_always nodes as green
555555
debug_assert!(!dep_node.kind.is_eval_always());
556556

557-
data.previous.debug_assert_eq(prev_dep_node_index, *dep_node);
557+
debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
558558

559559
let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
560560

@@ -572,7 +572,7 @@ impl<K: DepKind> DepGraph<K> {
572572
"try_mark_previous_green({:?}) --- found dependency {:?} to \
573573
be immediately green",
574574
dep_node,
575-
data.previous.debug_dep_node(dep_dep_node_index),
575+
data.previous.index_to_node(dep_dep_node_index)
576576
);
577577
current_deps.push(node_index);
578578
}
@@ -585,12 +585,12 @@ impl<K: DepKind> DepGraph<K> {
585585
"try_mark_previous_green({:?}) - END - dependency {:?} was \
586586
immediately red",
587587
dep_node,
588-
data.previous.debug_dep_node(dep_dep_node_index)
588+
data.previous.index_to_node(dep_dep_node_index)
589589
);
590590
return None;
591591
}
592592
None => {
593-
let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index, tcx);
593+
let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index);
594594

595595
// We don't know the state of this dependency. If it isn't
596596
// an eval_always node, let's try to mark it green recursively.
@@ -801,7 +801,7 @@ impl<K: DepKind> DepGraph<K> {
801801
for prev_index in data.colors.values.indices() {
802802
match data.colors.get(prev_index) {
803803
Some(DepNodeColor::Green(_)) => {
804-
let dep_node = data.previous.index_to_node(prev_index, tcx);
804+
let dep_node = data.previous.index_to_node(prev_index);
805805
tcx.try_load_from_on_disk_cache(&dep_node);
806806
}
807807
None | Some(DepNodeColor::Red) => {
@@ -813,6 +813,20 @@ impl<K: DepKind> DepGraph<K> {
813813
}
814814
}
815815

816+
// Register reused dep nodes (i.e. nodes we've marked red or green) with the context.
817+
pub fn register_reused_dep_nodes<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) {
818+
let data = self.data.as_ref().unwrap();
819+
for prev_index in data.colors.values.indices() {
820+
match data.colors.get(prev_index) {
821+
Some(DepNodeColor::Red) | Some(DepNodeColor::Green(_)) => {
822+
let dep_node = data.previous.index_to_node(prev_index);
823+
tcx.register_reused_dep_node(&dep_node);
824+
}
825+
None => {}
826+
}
827+
}
828+
}
829+
816830
fn next_virtual_depnode_index(&self) -> DepNodeIndex {
817831
let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
818832
DepNodeIndex::from_u32(index)

compiler/rustc_query_system/src/dep_graph/mod.rs

+1-2
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@ use rustc_data_structures::profiling::SelfProfilerRef;
1515
use rustc_data_structures::sync::Lock;
1616
use rustc_data_structures::thin_vec::ThinVec;
1717
use rustc_errors::Diagnostic;
18-
use rustc_span::def_id::DefPathHash;
1918

2019
use std::fmt;
2120
use std::hash::Hash;
@@ -33,7 +32,7 @@ pub trait DepContext: Copy {
3332
/// Try to force a dep node to execute and see if it's green.
3433
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;
3534

36-
fn register_reused_dep_path_hash(&self, hash: DefPathHash);
35+
fn register_reused_dep_node(&self, dep_node: &DepNode<Self::DepKind>);
3736

3837
/// Return whether the current session is tainted by errors.
3938
fn has_errors_or_delayed_span_bugs(&self) -> bool;

compiler/rustc_query_system/src/dep_graph/prev.rs

+1-40
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,7 @@
11
use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
22
use super::{DepKind, DepNode};
3-
use crate::dep_graph::DepContext;
43
use rustc_data_structures::fingerprint::Fingerprint;
54
use rustc_data_structures::fx::FxHashMap;
6-
use rustc_span::def_id::DefPathHash;
75

86
#[derive(Debug, Encodable, Decodable)]
97
pub struct PreviousDepGraph<K: DepKind> {
@@ -33,44 +31,7 @@ impl<K: DepKind> PreviousDepGraph<K> {
3331
}
3432

3533
#[inline]
36-
pub fn index_to_node<CTX: DepContext<DepKind = K>>(
37-
&self,
38-
dep_node_index: SerializedDepNodeIndex,
39-
tcx: CTX,
40-
) -> DepNode<K> {
41-
let dep_node = self.data.nodes[dep_node_index];
42-
// We have just loaded a deserialized `DepNode` from the previous
43-
// compilation session into the current one. If this was a foreign `DefId`,
44-
// then we stored additional information in the incr comp cache when we
45-
// initially created its fingerprint (see `DepNodeParams::to_fingerprint`)
46-
// We won't be calling `to_fingerprint` again for this `DepNode` (we no longer
47-
// have the original value), so we need to copy over this additional information
48-
// from the old incremental cache into the new cache that we serialize
49-
// and the end of this compilation session.
50-
if dep_node.kind.can_reconstruct_query_key() {
51-
tcx.register_reused_dep_path_hash(DefPathHash(dep_node.hash.into()));
52-
}
53-
dep_node
54-
}
55-
56-
/// When debug assertions are enabled, asserts that the dep node at `dep_node_index` is equal to `dep_node`.
57-
/// This method should be preferred over manually calling `index_to_node`.
58-
/// Calls to `index_to_node` may affect global state, so gating a call
59-
/// to `index_to_node` on debug assertions could cause behavior changes when debug assertions
60-
/// are enabled.
61-
#[inline]
62-
pub fn debug_assert_eq(&self, dep_node_index: SerializedDepNodeIndex, dep_node: DepNode<K>) {
63-
debug_assert_eq!(self.data.nodes[dep_node_index], dep_node);
64-
}
65-
66-
/// Obtains a debug-printable version of the `DepNode`.
67-
/// See `debug_assert_eq` for why this should be preferred over manually
68-
/// calling `dep_node_index`
69-
pub fn debug_dep_node(&self, dep_node_index: SerializedDepNodeIndex) -> impl std::fmt::Debug {
70-
// We're returning the `DepNode` without calling `register_reused_dep_path_hash`,
71-
// but `impl Debug` return type means that it can only be used for debug printing.
72-
// So, there's no risk of calls trying to create new dep nodes that have this
73-
// node as a dependency
34+
pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode<K> {
7435
self.data.nodes[dep_node_index]
7536
}
7637

0 commit comments

Comments
 (0)