Skip to content

Commit

Permalink
Auto merge of #118667 - michaelwoerister:experiment-sparse-fingerprin…
Browse files Browse the repository at this point in the history
…ts, r=<try>

Experiment: Only track fingerprints for queries with reconstructible dep-nodes.

This is an experiment to collect performance data about alternative ways to adapt #109050. The PR makes the following change:

All queries with keys that are not reconstructible from their corresponding DepNode are now treated similar to anonymous queries. That is we don't compute a DepNode or result fingerprint for them.

This has some implications:
- We save time because query keys and results don't have to be hashed.
- We can save space storing less data for these nodes in the on-disk dep-graph. (not implemented in this PR as I ran out of time. Maybe this would be a quick fix for `@saethlin` though?)
- We don't have to worry about hash collisions for DepNode in these cases (although we still have to worry about hash collisions for result fingerprints, which might include all the same HashStable impls)
- Same as with anonymous queries, the graph can grow additional nodes and edges in some situations because existing graph parts might be promoted while new parts are allocated for the same query if it is re-executed. I don't know how much this happens in practice.
- We cannot cache query results for queries with complex keys.

Given that that last point affects some heavy queries, I have my doubts that this strategy is a win. But let's run it through perf at least once.

cc `@cjgillot,` `@Zoxc`

r? `@ghost`
  • Loading branch information
bors committed Dec 6, 2023
2 parents 15bb3e2 + f2fd56c commit 139a4ac
Show file tree
Hide file tree
Showing 4 changed files with 191 additions and 115 deletions.
10 changes: 5 additions & 5 deletions compiler/rustc_middle/src/query/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1066,7 +1066,7 @@ rustc_queries! {
"const-evaluating + checking `{}`",
key.value.display(tcx)
}
cache_on_disk_if { true }
// cache_on_disk_if { true } Cannot cache this anymore
}

/// Evaluates const items or anonymous constants
Expand All @@ -1081,7 +1081,7 @@ rustc_queries! {
"simplifying constant for the type system `{}`",
key.value.display(tcx)
}
cache_on_disk_if { true }
// cache_on_disk_if { true } Cannot cache this anymore
}

/// Evaluate a constant and convert it to a type level constant or
Expand Down Expand Up @@ -1148,7 +1148,7 @@ rustc_queries! {
/// look up the correct symbol name of instances from upstream crates.
query symbol_name(key: ty::Instance<'tcx>) -> ty::SymbolName<'tcx> {
desc { "computing the symbol for `{}`", key }
cache_on_disk_if { true }
// cache_on_disk_if { true } Cannot cache this anymore
}

query def_kind(def_id: DefId) -> DefKind {
Expand Down Expand Up @@ -1284,7 +1284,7 @@ rustc_queries! {
query codegen_select_candidate(
key: (ty::ParamEnv<'tcx>, ty::TraitRef<'tcx>)
) -> Result<&'tcx ImplSource<'tcx, ()>, CodegenObligationError> {
cache_on_disk_if { true }
// cache_on_disk_if { true } Cannot cache this anymore
desc { |tcx| "computing candidate for `{}`", key.1 }
}

Expand Down Expand Up @@ -1894,7 +1894,7 @@ rustc_queries! {
}

query unused_generic_params(key: ty::InstanceDef<'tcx>) -> UnusedGenericParams {
cache_on_disk_if { key.def_id().is_local() }
// cache_on_disk_if { key.def_id().is_local() } Cannot cache this anymore
desc {
|tcx| "determining which generic parameters are unused by `{}`",
tcx.def_path_str(key.def_id())
Expand Down
171 changes: 94 additions & 77 deletions compiler/rustc_query_system/src/dep_graph/graph.rs
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ impl<D: Deps> DepGraph<D> {
let colors = DepNodeColorMap::new(prev_graph_node_count);

// Instantiate a dependy-less node only once for anonymous queries.
let _green_node_index = current.intern_new_node(
let _green_node_index = current.alloc_new_node(
profiler,
DepNode { kind: D::DEP_KIND_NULL, hash: current.anon_id_seed.into() },
EdgesVec::new(),
Expand Down Expand Up @@ -272,6 +272,7 @@ impl<D: Deps> DepGraph<D> {
D::with_deps(TaskDepsRef::Forbid, op)
}

// FIXME(sparse_fps): Document
#[inline(always)]
pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
&self,
Expand All @@ -287,6 +288,7 @@ impl<D: Deps> DepGraph<D> {
}
}

// FIXME(sparse_fps): Document
pub fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
&self,
cx: Tcx,
Expand All @@ -297,7 +299,7 @@ impl<D: Deps> DepGraph<D> {
OP: FnOnce() -> R,
{
match self.data() {
Some(data) => data.with_anon_task(cx, dep_kind, op),
Some(data) => data.with_anon_task(cx, dep_kind, true, op),
None => (op(), self.next_virtual_depnode_index()),
}
}
Expand Down Expand Up @@ -395,61 +397,71 @@ impl<D: Deps> DepGraphData<D> {
(result, dep_node_index)
}

// FIXME(sparse_fps): Document
/// Executes something within an "anonymous" task, that is, a task the
/// `DepNode` of which is determined by the list of inputs it read from.
pub(crate) fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
&self,
cx: Tcx,
dep_kind: DepKind,
intern: bool,
op: OP,
) -> (R, DepNodeIndex)
where
OP: FnOnce() -> R,
{
debug_assert!(!cx.is_eval_always(dep_kind));

let task_deps = Lock::new(TaskDeps::default());
let result = D::with_deps(TaskDepsRef::Allow(&task_deps), op);
let task_deps = task_deps.into_inner();
let task_deps = task_deps.reads;

let dep_node_index = match task_deps.len() {
0 => {
// Because the dep-node id of anon nodes is computed from the sets of its
// dependencies we already know what the ID of this dependency-less node is
// going to be (i.e. equal to the precomputed
// `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
// a `StableHasher` and sending the node through interning.
DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE
}
1 => {
// When there is only one dependency, don't bother creating a node.
task_deps[0]
}
_ => {
// The dep node indices are hashed here instead of hashing the dep nodes of the
// dependencies. These indices may refer to different nodes per session, but this isn't
// a problem here because we that ensure the final dep node hash is per session only by
// combining it with the per session random number `anon_id_seed`. This hash only need
// to map the dependencies to a single value on a per session basis.
let mut hasher = StableHasher::new();
task_deps.hash(&mut hasher);

let target_dep_node = DepNode {
kind: dep_kind,
// Fingerprint::combine() is faster than sending Fingerprint
// through the StableHasher (at least as long as StableHasher
// is so slow).
hash: self.current.anon_id_seed.combine(hasher.finish()).into(),
};

self.current.intern_new_node(
cx.profiler(),
target_dep_node,
task_deps,
Fingerprint::ZERO,
)
let dep_node_index = if intern {
// FIXME(sparse_fp): what is this assertion about?
debug_assert!(!cx.is_eval_always(dep_kind));

match task_deps.len() {
0 => {
// Because the dep-node id of anon nodes is computed from the sets of its
// dependencies we already know what the ID of this dependency-less node is
// going to be (i.e. equal to the precomputed
// `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
// a `StableHasher` and sending the node through interning.
DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE
}
1 => {
// When there is only one dependency, don't bother creating a node.
task_deps[0]
}
_ => {
// The dep node indices are hashed here instead of hashing the dep nodes of the
// dependencies. These indices may refer to different nodes per session, but this isn't
// a problem here because we that ensure the final dep node hash is per session only by
// combining it with the per session random number `anon_id_seed`. This hash only need
// to map the dependencies to a single value on a per session basis.
let mut hasher = StableHasher::new();
task_deps.hash(&mut hasher);
dep_kind.hash(&mut hasher);

let dedup_fingerprint: Fingerprint = hasher.finish();

match self
.current
.interned_node_to_index
.lock_shard_by_value(&dedup_fingerprint)
.entry(dedup_fingerprint)
{
Entry::Occupied(entry) => *entry.get(),
Entry::Vacant(entry) => {
let dep_node_index =
self.current.alloc_anon_node(cx.profiler(), dep_kind, task_deps);
entry.insert(dep_node_index);
dep_node_index
}
}
}
}
} else {
self.current.alloc_anon_node(cx.profiler(), dep_kind, task_deps)
};

(result, dep_node_index)
Expand Down Expand Up @@ -616,18 +628,20 @@ impl<D: Deps> DepGraph<D> {
}

impl<D: Deps> DepGraphData<D> {
#[inline]
fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option<DepNodeIndex> {
if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
self.current.prev_index_to_index.lock()[prev_index]
} else {
self.current.new_node_to_index.lock_shard_by_value(dep_node).get(dep_node).copied()
}
}
// #[inline]
// fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option<DepNodeIndex> {
// if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
// self.current.prev_index_to_index.lock()[prev_index]
// } else {
// self.current.interned_node_to_index.lock_shard_by_value(dep_node).get(dep_node).copied()
// }
// }

#[inline]
fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
self.dep_node_index_of_opt(dep_node).is_some()
fn dep_node_exists(&self, _dep_node: &DepNode) -> bool {
// FIXME(sparse_fps): bring back assertions
//self.dep_node_index_of_opt(dep_node).is_some()
false
}

fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
Expand Down Expand Up @@ -1071,7 +1085,7 @@ rustc_index::newtype_index! {
/// first, and `data` second.
pub(super) struct CurrentDepGraph<D: Deps> {
encoder: Steal<GraphEncoder<D>>,
new_node_to_index: Sharded<FxHashMap<DepNode, DepNodeIndex>>,
interned_node_to_index: Sharded<FxHashMap<Fingerprint, DepNodeIndex>>,
prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,

/// This is used to verify that fingerprints do not change between the creation of a node
Expand Down Expand Up @@ -1152,7 +1166,7 @@ impl<D: Deps> CurrentDepGraph<D> {
record_graph,
record_stats,
)),
new_node_to_index: Sharded::new(|| {
interned_node_to_index: Sharded::new(|| {
FxHashMap::with_capacity_and_hasher(
new_node_count_estimate / sharded::shards(),
Default::default(),
Expand Down Expand Up @@ -1182,29 +1196,30 @@ impl<D: Deps> CurrentDepGraph<D> {
/// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
/// Assumes that this is a node that has no equivalent in the previous dep-graph.
#[inline(always)]
fn intern_new_node(
fn alloc_new_node(
&self,
profiler: &SelfProfilerRef,
key: DepNode,
edges: EdgesVec,
current_fingerprint: Fingerprint,
) -> DepNodeIndex {
let dep_node_index = match self.new_node_to_index.lock_shard_by_value(&key).entry(key) {
Entry::Occupied(entry) => *entry.get(),
Entry::Vacant(entry) => {
let dep_node_index =
self.encoder.borrow().send(profiler, key, current_fingerprint, edges);
entry.insert(dep_node_index);
dep_node_index
}
};

let dep_node_index = self.encoder.borrow().send(profiler, key, current_fingerprint, edges);
#[cfg(debug_assertions)]
self.record_edge(dep_node_index, key, current_fingerprint);

dep_node_index
}

#[inline(always)]
fn alloc_anon_node(
&self,
profiler: &SelfProfilerRef,
dep_kind: DepKind,
edges: EdgesVec,
) -> DepNodeIndex {
self.encoder.borrow().send_anon_node(profiler, dep_kind, edges)
}

fn intern_node(
&self,
profiler: &SelfProfilerRef,
Expand Down Expand Up @@ -1262,7 +1277,7 @@ impl<D: Deps> CurrentDepGraph<D> {
let fingerprint = fingerprint.unwrap_or(Fingerprint::ZERO);

// This is a new node: it didn't exist in the previous compilation session.
let dep_node_index = self.intern_new_node(profiler, key, edges, fingerprint);
let dep_node_index = self.alloc_new_node(profiler, key, edges, fingerprint);

(dep_node_index, None)
}
Expand All @@ -1274,7 +1289,8 @@ impl<D: Deps> CurrentDepGraph<D> {
prev_graph: &SerializedDepGraph,
prev_index: SerializedDepNodeIndex,
) -> DepNodeIndex {
self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
// FIXME(sparse_fp): restore assertions
// self.debug_assert_not_in_new_nodes(prev_graph, prev_index);

let mut prev_index_to_index = self.prev_index_to_index.lock();

Expand All @@ -1296,18 +1312,19 @@ impl<D: Deps> CurrentDepGraph<D> {
}
}

#[inline]
fn debug_assert_not_in_new_nodes(
&self,
prev_graph: &SerializedDepGraph,
prev_index: SerializedDepNodeIndex,
) {
let node = &prev_graph.index_to_node(prev_index);
debug_assert!(
!self.new_node_to_index.lock_shard_by_value(node).contains_key(node),
"node from previous graph present in new node collection"
);
}
// FIXME(sparse_fp): restore assertions
// #[inline]
// fn debug_assert_not_in_new_nodes(
// &self,
// prev_graph: &SerializedDepGraph,
// prev_index: SerializedDepNodeIndex,
// ) {
// let node = &prev_graph.index_to_node(prev_index);
// debug_assert!(
// !self.interned_node_to_index.lock_shard_by_value(node).contains_key(node),
// "node from previous graph present in new node collection"
// );
// }
}

#[derive(Debug, Clone, Copy)]
Expand Down
Loading

0 comments on commit 139a4ac

Please sign in to comment.