Skip to content

Commit

Permalink
uwu
Browse files Browse the repository at this point in the history
  • Loading branch information
lcnr committed Aug 13, 2024
1 parent cb088a7 commit e354c73
Showing 1 changed file with 57 additions and 11 deletions.
68 changes: 57 additions & 11 deletions compiler/rustc_type_ir/src/search_graph/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,9 @@ impl AvailableDepth {
}
}

/// All cycle heads a given goal depends on.
/// All cycle heads a given goal depends on, ordered by their stack depth.
///
/// We therefore pop the cycle heads from highest to lowest.
#[derive(Clone, Debug, PartialEq, Eq, Default)]
struct CycleHeads {
heads: BTreeSet<StackDepth>,
Expand Down Expand Up @@ -217,6 +219,9 @@ impl CycleHeads {
}
}

/// Update the cycle heads of a goal at depth `this` given the cycle heads
/// of a nested goal. This merges the heads after filtering the parent goal
/// itself.
fn extend_from_child(&mut self, this: StackDepth, child: &CycleHeads) {
for &head in child.heads.iter() {
match head.cmp(&this) {
Expand Down Expand Up @@ -264,6 +269,12 @@ impl<X: Cx> NestedGoals<X> {
}
}

/// Adds the nested goals of a nested goal, given that the path `step_kind` from this goal
/// to the parent goal.
///
/// If the path from this goal to the nested goal is inductive, the paths from this goal
/// to all nested goals of that nested goal are also inductive. Otherwise the paths are
/// the same as for the child.
fn extend_from_child(&mut self, step_kind: PathKind, nested_goals: &NestedGoals<X>) {
#[allow(rustc::potential_query_instability)]
for (input, path_from_entry) in nested_goals.iter() {
Expand Down Expand Up @@ -332,8 +343,13 @@ struct StackEntry<X: Cx> {
/// goals still on the stack.
#[derive_where(Debug; X: Cx)]
struct ProvisionalCacheEntry<X: Cx> {
/// Whether evaluating the goal encountered overflow. This is used to
/// disable the cache entry except if the last goal on the stack is
/// already involved in this cycle.
encountered_overflow: bool,
/// All cycle heads this cache entry depends on.
heads: CycleHeads,
/// The path from the highest cycle head to this goal.
path_from_head: PathKind,
nested_goals: NestedGoals<X>,
result: X::Result,
Expand All @@ -345,6 +361,10 @@ pub struct SearchGraph<D: Delegate<Cx = X>, X: Cx = <D as Delegate>::Cx> {
///
/// An element is *deeper* in the stack if its index is *lower*.
stack: IndexVec<StackDepth, StackEntry<X>>,
/// The provisional cache contains entries for already computed goals which
/// still depend on goals higher-up in the stack. We don't move them to the
/// global cache and track them locally instead. A provisional cache entry
/// is only valid until the result of one of its cycle heads changes.
provisional_cache: HashMap<X::Input, Vec<ProvisionalCacheEntry<X>>>,

_marker: PhantomData<D>,
Expand Down Expand Up @@ -589,6 +609,17 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
/// provisional cache entry is involved in would stay the same when computing the
/// goal without its cycle head on the stack. For more details, see the relevant
/// [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html).
///
/// This can be thought of rotating the sub-tree of this provisional result and changing
/// its entry point while making sure that all paths through this sub-tree stay the same.
///
///
/// In case the popped cycle head failed to reach a fixpoint anything which depends on
/// its provisional result is invalid. Actually discarding provisional cache entries in
/// this case would cause hangs, so we instead change the result of dependant provisional
/// cache entries to also be ambiguous. This causes some undesirable ambiguity for nested
/// goals whose result doesn't actually depend on this cycle head, but that's acceptable
/// to me.
fn rebase_provisional_cache_entries(
&mut self,
cx: X,
Expand All @@ -614,31 +645,37 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// to the cache entry is not coinductive or if the path from
// the cache entry to the current head is not coinductive.
//
// Both of these constraints could be lowered, but by only
// Both of these constraints could be weakened, but by only
// accepting coinductive paths we don't have to worry about
// changing the cycle kind of the remaining cycles. We can
// extend this in the future once there's a known issue
// caused by it.
if *path_from_head != PathKind::Coinductive {
return false;
}

if nested_goals.get(stack_entry.input).unwrap()
!= UsageKind::Single(PathKind::Coinductive)
if *path_from_head != PathKind::Coinductive
|| nested_goals.get(stack_entry.input).unwrap()
!= UsageKind::Single(PathKind::Coinductive)
{
return false;
}

// Merge the cycle heads of the provisional cache entry and the
// popped head. If the popped cycle head was a root, discard all
// provisional cache entries which depend on it.
heads.remove_highest_cycle_head();
heads.merge(&stack_entry.heads);
// If the popped cycle head was a root, discard all provisional
// cache entries.
let Some(head) = heads.opt_highest_cycle_head() else {
return false;
};

// As we've made sure that the path from the new highest cycle
// head to the uses of the popped cycle head are fully coinductive,
// we can be sure that the paths to all nested goals of the popped
// cycle head remain the same. We can simply merge them.
nested_goals.merge(&stack_entry.nested_goals);
// We now care about the path from the next highest cycle head to the
// provisional cache entry.
*path_from_head = Self::stack_path_kind(cx, &self.stack, head);
// Mutate the result of the provisional cache entry in case we did
// not reach a fixpoint.
*result = mutate_result(input, *result);
true
});
Expand Down Expand Up @@ -677,6 +714,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
}
}

// A provisional cache entry is only valid if the current path from its
// highest cycle head to the goal is the same.
if path_from_head == Self::stack_path_kind(cx, &self.stack, head) {
// While we don't have to track the full depth of the provisional cache entry,
// we do have to increment the required depth by one as we'd have already failed
Expand Down Expand Up @@ -718,11 +757,15 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
return true;
}

// If a nested goal of the global cache entry is on the stack, we would
// definitely encounter a cycle.
if stack.iter().any(|e| nested_goals.contains(e.input)) {
debug!("cache entry not applicable due to stack");
return false;
}

// The global cache entry is also invalid if there's a provisional cache entry
// would apply for any of its nested goals.
#[allow(rustc::potential_query_instability)]
for (input, path_from_global_entry) in nested_goals.iter() {
let Some(entries) = provisional_cache.get(&input) else {
Expand All @@ -746,6 +789,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
continue;
}

// A provisional cache entry only applies if the path from its highest head
// matches the path when encountering the goal.
let head = heads.highest_cycle_head();
let full_path = match Self::stack_path_kind(cx, stack, head) {
PathKind::Coinductive => path_from_global_entry,
Expand Down Expand Up @@ -817,7 +862,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// its state is the same regardless of whether we've used the
// global cache or not.
let reached_depth = self.stack.next_index().plus(additional_depth);
// We don't move cycle participants to the global cache.
// We don't move cycle participants to the global cache, so the
// cycle heads are always empty.
let heads = Default::default();
Self::update_parent_goal(
cx,
Expand Down

0 comments on commit e354c73

Please sign in to comment.