From e354c7330eed4af44cea7558066bed38d2f32d5e Mon Sep 17 00:00:00 2001 From: lcnr Date: Tue, 13 Aug 2024 17:22:47 +0200 Subject: [PATCH] uwu --- .../rustc_type_ir/src/search_graph/mod.rs | 68 ++++++++++++++++--- 1 file changed, 57 insertions(+), 11 deletions(-) diff --git a/compiler/rustc_type_ir/src/search_graph/mod.rs b/compiler/rustc_type_ir/src/search_graph/mod.rs index 97f81bdee4026..e5ab7637db13a 100644 --- a/compiler/rustc_type_ir/src/search_graph/mod.rs +++ b/compiler/rustc_type_ir/src/search_graph/mod.rs @@ -179,7 +179,9 @@ impl AvailableDepth { } } -/// All cycle heads a given goal depends on. +/// All cycle heads a given goal depends on, ordered by their stack depth. +/// +/// We therefore pop the cycle heads from highest to lowest. #[derive(Clone, Debug, PartialEq, Eq, Default)] struct CycleHeads { heads: BTreeSet, @@ -217,6 +219,9 @@ impl CycleHeads { } } + /// Update the cycle heads of a goal at depth `this` given the cycle heads + /// of a nested goal. This merges the heads after filtering the parent goal + /// itself. fn extend_from_child(&mut self, this: StackDepth, child: &CycleHeads) { for &head in child.heads.iter() { match head.cmp(&this) { @@ -264,6 +269,12 @@ impl NestedGoals { } } + /// Adds the nested goals of a nested goal, given that the path `step_kind` from this goal + /// to the parent goal. + /// + /// If the path from this goal to the nested goal is inductive, the paths from this goal + /// to all nested goals of that nested goal are also inductive. Otherwise the paths are + /// the same as for the child. fn extend_from_child(&mut self, step_kind: PathKind, nested_goals: &NestedGoals) { #[allow(rustc::potential_query_instability)] for (input, path_from_entry) in nested_goals.iter() { @@ -332,8 +343,13 @@ struct StackEntry { /// goals still on the stack. #[derive_where(Debug; X: Cx)] struct ProvisionalCacheEntry { + /// Whether evaluating the goal encountered overflow. This is used to + /// disable the cache entry except if the last goal on the stack is + /// already involved in this cycle. encountered_overflow: bool, + /// All cycle heads this cache entry depends on. heads: CycleHeads, + /// The path from the highest cycle head to this goal. path_from_head: PathKind, nested_goals: NestedGoals, result: X::Result, @@ -345,6 +361,10 @@ pub struct SearchGraph, X: Cx = ::Cx> { /// /// An element is *deeper* in the stack if its index is *lower*. stack: IndexVec>, + /// The provisional cache contains entries for already computed goals which + /// still depend on goals higher-up in the stack. We don't move them to the + /// global cache and track them locally instead. A provisional cache entry + /// is only valid until the result of one of its cycle heads changes. provisional_cache: HashMap>>, _marker: PhantomData, @@ -589,6 +609,17 @@ impl, X: Cx> SearchGraph { /// provisional cache entry is involved in would stay the same when computing the /// goal without its cycle head on the stack. For more details, see the relevant /// [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html). + /// + /// This can be thought of rotating the sub-tree of this provisional result and changing + /// its entry point while making sure that all paths through this sub-tree stay the same. + /// + /// + /// In case the popped cycle head failed to reach a fixpoint anything which depends on + /// its provisional result is invalid. Actually discarding provisional cache entries in + /// this case would cause hangs, so we instead change the result of dependant provisional + /// cache entries to also be ambiguous. This causes some undesirable ambiguity for nested + /// goals whose result doesn't actually depend on this cycle head, but that's acceptable + /// to me. fn rebase_provisional_cache_entries( &mut self, cx: X, @@ -614,31 +645,37 @@ impl, X: Cx> SearchGraph { // to the cache entry is not coinductive or if the path from // the cache entry to the current head is not coinductive. // - // Both of these constraints could be lowered, but by only + // Both of these constraints could be weakened, but by only // accepting coinductive paths we don't have to worry about // changing the cycle kind of the remaining cycles. We can // extend this in the future once there's a known issue // caused by it. - if *path_from_head != PathKind::Coinductive { - return false; - } - - if nested_goals.get(stack_entry.input).unwrap() - != UsageKind::Single(PathKind::Coinductive) + if *path_from_head != PathKind::Coinductive + || nested_goals.get(stack_entry.input).unwrap() + != UsageKind::Single(PathKind::Coinductive) { return false; } + // Merge the cycle heads of the provisional cache entry and the + // popped head. If the popped cycle head was a root, discard all + // provisional cache entries which depend on it. heads.remove_highest_cycle_head(); heads.merge(&stack_entry.heads); - // If the popped cycle head was a root, discard all provisional - // cache entries. let Some(head) = heads.opt_highest_cycle_head() else { return false; }; + // As we've made sure that the path from the new highest cycle + // head to the uses of the popped cycle head are fully coinductive, + // we can be sure that the paths to all nested goals of the popped + // cycle head remain the same. We can simply merge them. nested_goals.merge(&stack_entry.nested_goals); + // We now care about the path from the next highest cycle head to the + // provisional cache entry. *path_from_head = Self::stack_path_kind(cx, &self.stack, head); + // Mutate the result of the provisional cache entry in case we did + // not reach a fixpoint. *result = mutate_result(input, *result); true }); @@ -677,6 +714,8 @@ impl, X: Cx> SearchGraph { } } + // A provisional cache entry is only valid if the current path from its + // highest cycle head to the goal is the same. if path_from_head == Self::stack_path_kind(cx, &self.stack, head) { // While we don't have to track the full depth of the provisional cache entry, // we do have to increment the required depth by one as we'd have already failed @@ -718,11 +757,15 @@ impl, X: Cx> SearchGraph { return true; } + // If a nested goal of the global cache entry is on the stack, we would + // definitely encounter a cycle. if stack.iter().any(|e| nested_goals.contains(e.input)) { debug!("cache entry not applicable due to stack"); return false; } + // The global cache entry is also invalid if there's a provisional cache entry + // would apply for any of its nested goals. #[allow(rustc::potential_query_instability)] for (input, path_from_global_entry) in nested_goals.iter() { let Some(entries) = provisional_cache.get(&input) else { @@ -746,6 +789,8 @@ impl, X: Cx> SearchGraph { continue; } + // A provisional cache entry only applies if the path from its highest head + // matches the path when encountering the goal. let head = heads.highest_cycle_head(); let full_path = match Self::stack_path_kind(cx, stack, head) { PathKind::Coinductive => path_from_global_entry, @@ -817,7 +862,8 @@ impl, X: Cx> SearchGraph { // its state is the same regardless of whether we've used the // global cache or not. let reached_depth = self.stack.next_index().plus(additional_depth); - // We don't move cycle participants to the global cache. + // We don't move cycle participants to the global cache, so the + // cycle heads are always empty. let heads = Default::default(); Self::update_parent_goal( cx,