diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 6415122dd3905..8e2c3dd3d8ad9 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -666,7 +666,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { size, align, }); - debug!("generator layout: {:#?}", layout); + debug!("generator layout ({:?}): {:#?}", ty, layout); layout } diff --git a/src/librustc_mir/build/expr/as_temp.rs b/src/librustc_mir/build/expr/as_temp.rs index cffd8fb2892f5..85423955ce545 100644 --- a/src/librustc_mir/build/expr/as_temp.rs +++ b/src/librustc_mir/build/expr/as_temp.rs @@ -1,7 +1,7 @@ //! See docs in build/expr/mod.rs use crate::build::{BlockAnd, BlockAndExtension, Builder}; -use crate::build::scope::{CachedBlock, DropKind}; +use crate::build::scope::DropKind; use crate::hair::*; use rustc::middle::region; use rustc::mir::*; @@ -103,9 +103,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { temp_lifetime, temp_place, expr_ty, - DropKind::Value { - cached_block: CachedBlock::default(), - }, + DropKind::Value, ); } diff --git a/src/librustc_mir/build/matches/mod.rs b/src/librustc_mir/build/matches/mod.rs index 8c2ef082c339f..20d1423f8a8b6 100644 --- a/src/librustc_mir/build/matches/mod.rs +++ b/src/librustc_mir/build/matches/mod.rs @@ -5,7 +5,7 @@ //! This also includes code for pattern bindings in `let` statements and //! function parameters. -use crate::build::scope::{CachedBlock, DropKind}; +use crate::build::scope::DropKind; use crate::build::ForGuard::{self, OutsideGuard, RefWithinGuard}; use crate::build::{BlockAnd, BlockAndExtension, Builder}; use crate::build::{GuardFrame, GuardFrameLocal, LocalsForNode}; @@ -557,9 +557,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { region_scope, &Place::Base(PlaceBase::Local(local_id)), var_ty, - DropKind::Value { - cached_block: CachedBlock::default(), - }, + DropKind::Value, ); } diff --git a/src/librustc_mir/build/mod.rs b/src/librustc_mir/build/mod.rs index 7ca54a430a505..c8a31ecffb84d 100644 --- a/src/librustc_mir/build/mod.rs +++ b/src/librustc_mir/build/mod.rs @@ -1,5 +1,5 @@ use crate::build; -use crate::build::scope::{CachedBlock, DropKind}; +use crate::build::scope::DropKind; use crate::hair::cx::Cx; use crate::hair::{LintLevel, BindingMode, PatternKind}; use crate::shim; @@ -912,8 +912,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // Make sure we drop (parts of) the argument even when not matched on. self.schedule_drop( pattern.as_ref().map_or(ast_body.span, |pat| pat.span), - argument_scope, &place, ty, - DropKind::Value { cached_block: CachedBlock::default() }, + argument_scope, &place, ty, DropKind::Value, ); if let Some(pattern) = pattern { diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index 58339173c9e5d..b697b9884367c 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -88,7 +88,7 @@ use rustc::middle::region; use rustc::ty::Ty; use rustc::hir; use rustc::mir::*; -use syntax_pos::{Span, DUMMY_SP}; +use syntax_pos::{DUMMY_SP, Span}; use rustc_data_structures::fx::FxHashMap; use std::collections::hash_map::Entry; use std::mem; @@ -143,10 +143,13 @@ struct DropData<'tcx> { /// Whether this is a value Drop or a StorageDead. kind: DropKind, + + /// The cached blocks for unwinds. + cached_block: CachedBlock, } #[derive(Debug, Default, Clone, Copy)] -pub(crate) struct CachedBlock { +struct CachedBlock { /// The cached block for the cleanups-on-diverge path. This block /// contains code to run the current drop and all the preceding /// drops (i.e., those having lower index in Drop’s Scope drop @@ -164,10 +167,8 @@ pub(crate) struct CachedBlock { #[derive(Debug)] pub(crate) enum DropKind { - Value { - cached_block: CachedBlock, - }, - Storage + Value, + Storage, } #[derive(Clone, Debug)] @@ -210,7 +211,7 @@ impl CachedBlock { impl DropKind { fn may_panic(&self) -> bool { match *self { - DropKind::Value { .. } => true, + DropKind::Value => true, DropKind::Storage => false } } @@ -225,25 +226,24 @@ impl<'tcx> Scope<'tcx> { /// `storage_only` controls whether to invalidate only drop paths that run `StorageDead`. /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current /// top-of-scope (as opposed to dependent scopes). - fn invalidate_cache(&mut self, storage_only: bool, this_scope_only: bool) { + fn invalidate_cache(&mut self, storage_only: bool, is_generator: bool, this_scope_only: bool) { // FIXME: maybe do shared caching of `cached_exits` etc. to handle functions // with lots of `try!`? // cached exits drop storage and refer to the top-of-scope self.cached_exits.clear(); - if !storage_only { - // the current generator drop and unwind ignore - // storage but refer to top-of-scope - self.cached_generator_drop = None; + // the current generator drop and unwind refer to top-of-scope + self.cached_generator_drop = None; + + let ignore_unwinds = storage_only && !is_generator; + if !ignore_unwinds { self.cached_unwind.invalidate(); } - if !storage_only && !this_scope_only { + if !ignore_unwinds && !this_scope_only { for drop_data in &mut self.drops { - if let DropKind::Value { ref mut cached_block } = drop_data.kind { - cached_block.invalidate(); - } + drop_data.cached_block.invalidate(); } } } @@ -388,6 +388,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { unpack!(block = build_scope_drops( &mut self.cfg, + self.is_generator, &scope, block, unwind_to, @@ -454,6 +455,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { unpack!(block = build_scope_drops( &mut self.cfg, + self.is_generator, scope, block, unwind_to, @@ -484,10 +486,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let result = block; while let Some(scope) = scopes.next() { - if !scope.needs_cleanup && !self.is_generator { - continue; - } - block = if let Some(b) = scope.cached_generator_drop { self.cfg.terminate(block, src_info, TerminatorKind::Goto { target: b }); @@ -508,6 +506,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { unpack!(block = build_scope_drops( &mut self.cfg, + self.is_generator, scope, block, unwind_to, @@ -642,16 +641,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { place: &Place<'tcx>, place_ty: Ty<'tcx>, ) { - self.schedule_drop( - span, region_scope, place, place_ty, - DropKind::Storage, - ); - self.schedule_drop( - span, region_scope, place, place_ty, - DropKind::Value { - cached_block: CachedBlock::default(), - }, - ); + self.schedule_drop(span, region_scope, place, place_ty, DropKind::Storage); + self.schedule_drop(span, region_scope, place, place_ty, DropKind::Value); } // Scheduling drops @@ -671,7 +662,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ) { let needs_drop = self.hir.needs_drop(place_ty); match drop_kind { - DropKind::Value { .. } => if !needs_drop { return }, + DropKind::Value => if !needs_drop { return }, DropKind::Storage => { match *place { Place::Base(PlaceBase::Local(index)) => if index.index() <= self.arg_count { @@ -736,9 +727,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // invalidating caches of each scope visited. This way bare minimum of the // caches gets invalidated. i.e., if a new drop is added into the middle scope, the // cache of outer scope stays intact. - scope.invalidate_cache(!needs_drop, this_scope); + scope.invalidate_cache(!needs_drop, self.is_generator, this_scope); if this_scope { - if let DropKind::Value { .. } = drop_kind { + if let DropKind::Value = drop_kind { scope.needs_cleanup = true; } @@ -750,7 +741,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { scope.drops.push(DropData { span: scope_end, location: place.clone(), - kind: drop_kind + kind: drop_kind, + cached_block: CachedBlock::default(), }); return; } @@ -797,6 +789,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // to left reading the cached results but never created anything. // Find the last cached block + debug!("diverge_cleanup_gen(self.scopes = {:?})", self.scopes); let (mut target, first_uncached) = if let Some(cached_index) = self.scopes.iter() .rposition(|scope| scope.cached_unwind.get(generator_drop).is_some()) { (self.scopes[cached_index].cached_unwind.get(generator_drop).unwrap(), cached_index + 1) @@ -890,7 +883,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { assert_eq!(top_scope.region_scope, region_scope); top_scope.drops.clear(); - top_scope.invalidate_cache(false, true); + top_scope.invalidate_cache(false, self.is_generator, true); } /// Drops the single variable provided @@ -941,7 +934,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } } - top_scope.invalidate_cache(true, true); + top_scope.invalidate_cache(true, self.is_generator, true); } } @@ -949,13 +942,14 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Builds drops for pop_scope and exit_scope. fn build_scope_drops<'tcx>( cfg: &mut CFG<'tcx>, + is_generator: bool, scope: &Scope<'tcx>, mut block: BasicBlock, last_unwind_to: BasicBlock, arg_count: usize, generator_drop: bool, ) -> BlockAnd<()> { - debug!("build_scope_drops({:?} -> {:?}", block, scope); + debug!("build_scope_drops({:?} -> {:?})", block, scope); // Build up the drops in evaluation order. The end result will // look like: @@ -969,28 +963,20 @@ fn build_scope_drops<'tcx>( // The horizontal arrows represent the execution path when the drops return // successfully. The downwards arrows represent the execution path when the // drops panic (panicking while unwinding will abort, so there's no need for - // another set of arrows). The drops for the unwind path should have already - // been generated by `diverge_cleanup_gen`. - - let mut unwind_blocks = scope.drops.iter().rev().filter_map(|drop_data| { - if let DropKind::Value { cached_block } = drop_data.kind { - Some(cached_block.get(generator_drop).unwrap_or_else(|| { - span_bug!(drop_data.span, "cached block not present?") - })) - } else { - None - } - }); - - // When we unwind from a drop, we start cleaning up from the next one, so - // we don't need this block. - unwind_blocks.next(); + // another set of arrows). + // + // For generators, we unwind from a drop on a local to its StorageDead + // statement. For other functions we don't worry about StorageDead. The + // drops for the unwind path should have already been generated by + // `diverge_cleanup_gen`. - for drop_data in scope.drops.iter().rev() { + for drop_idx in (0..scope.drops.len()).rev() { + let drop_data = &scope.drops[drop_idx]; let source_info = scope.source_info(drop_data.span); match drop_data.kind { - DropKind::Value { .. } => { - let unwind_to = unwind_blocks.next().unwrap_or(last_unwind_to); + DropKind::Value => { + let unwind_to = get_unwind_to(scope, is_generator, drop_idx, generator_drop) + .unwrap_or(last_unwind_to); let next = cfg.start_new_block(); cfg.terminate(block, source_info, TerminatorKind::Drop { @@ -1018,6 +1004,31 @@ fn build_scope_drops<'tcx>( block.unit() } +fn get_unwind_to<'tcx>( + scope: &Scope<'tcx>, + is_generator: bool, + unwind_from: usize, + generator_drop: bool, +) -> Option { + for drop_idx in (0..unwind_from).rev() { + let drop_data = &scope.drops[drop_idx]; + match (is_generator, &drop_data.kind) { + (true, DropKind::Storage) => { + return Some(drop_data.cached_block.get(generator_drop).unwrap_or_else(|| { + span_bug!(drop_data.span, "cached block not present for {:?}", drop_data) + })); + } + (false, DropKind::Value) => { + return Some(drop_data.cached_block.get(generator_drop).unwrap_or_else(|| { + span_bug!(drop_data.span, "cached block not present for {:?}", drop_data) + })); + } + _ => (), + } + } + None +} + fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>, span: Span, scope: &mut Scope<'tcx>, @@ -1051,6 +1062,7 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>, // Build up the drops. Here we iterate the vector in // *forward* order, so that we generate drops[0] first (right to // left in diagram above). + debug!("build_diverge_scope({:?})", scope.drops); for (j, drop_data) in scope.drops.iter_mut().enumerate() { debug!("build_diverge_scope drop_data[{}]: {:?}", j, drop_data); // Only full value drops are emitted in the diverging path, @@ -1070,20 +1082,30 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>, source_info: source_info(drop_data.span), kind: StatementKind::StorageDead(index) }); + if !target_built_by_us { + // We cannot add statements to an existing block, so we create a new + // block for our StorageDead statements. + let block = cfg.start_new_cleanup_block(); + let source_info = SourceInfo { span: DUMMY_SP, scope: source_scope }; + cfg.terminate(block, source_info, + TerminatorKind::Goto { target: target }); + target = block; + target_built_by_us = true; + } } _ => unreachable!(), }; + *drop_data.cached_block.ref_mut(generator_drop) = Some(target); } DropKind::Storage => {} - DropKind::Value { ref mut cached_block } => { - let cached_block = cached_block.ref_mut(generator_drop); + DropKind::Value => { + let cached_block = drop_data.cached_block.ref_mut(generator_drop); target = if let Some(cached_block) = *cached_block { storage_deads.clear(); target_built_by_us = false; cached_block } else { - push_storage_deads( - cfg, &mut target, &mut storage_deads, target_built_by_us, source_scope); + push_storage_deads(cfg, target, &mut storage_deads); let block = cfg.start_new_cleanup_block(); cfg.terminate(block, source_info(drop_data.span), TerminatorKind::Drop { @@ -1098,7 +1120,7 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>, } }; } - push_storage_deads(cfg, &mut target, &mut storage_deads, target_built_by_us, source_scope); + push_storage_deads(cfg, target, &mut storage_deads); *scope.cached_unwind.ref_mut(generator_drop) = Some(target); assert!(storage_deads.is_empty()); @@ -1108,23 +1130,13 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>, } fn push_storage_deads(cfg: &mut CFG<'tcx>, - target: &mut BasicBlock, - storage_deads: &mut Vec>, - target_built_by_us: bool, - source_scope: SourceScope) { + target: BasicBlock, + storage_deads: &mut Vec>) { if storage_deads.is_empty() { return; } - if !target_built_by_us { - // We cannot add statements to an existing block, so we create a new - // block for our StorageDead statements. - let block = cfg.start_new_cleanup_block(); - let source_info = SourceInfo { span: DUMMY_SP, scope: source_scope }; - cfg.terminate(block, source_info, TerminatorKind::Goto { target: *target }); - *target = block; - } - let statements = &mut cfg.block_data_mut(*target).statements; + let statements = &mut cfg.block_data_mut(target).statements; storage_deads.reverse(); debug!("push_storage_deads({:?}), storage_deads={:?}, statements={:?}", - *target, storage_deads, statements); + target, storage_deads, statements); storage_deads.append(statements); mem::swap(statements, storage_deads); assert!(storage_deads.is_empty()); diff --git a/src/librustc_mir/dataflow/impls/storage_liveness.rs b/src/librustc_mir/dataflow/impls/storage_liveness.rs index 0fb66032a171d..9bf346f5f62d6 100644 --- a/src/librustc_mir/dataflow/impls/storage_liveness.rs +++ b/src/librustc_mir/dataflow/impls/storage_liveness.rs @@ -43,16 +43,9 @@ impl<'a, 'tcx> BitDenotation<'tcx> for MaybeStorageLive<'a, 'tcx> { } fn terminator_effect(&self, - sets: &mut BlockSets<'_, Local>, - loc: Location) { - match &self.mir[loc.block].terminator().kind { - TerminatorKind::Drop { location, .. } => { - if let Some(l) = location.local_or_deref_local() { - sets.kill(l); - } - } - _ => (), - } + _sets: &mut BlockSets<'_, Local>, + _loc: Location) { + // Terminators have no effect } fn propagate_call_return( diff --git a/src/test/mir-opt/generator-storage-dead-unwind.rs b/src/test/mir-opt/generator-storage-dead-unwind.rs new file mode 100644 index 0000000000000..7be17c4292ae6 --- /dev/null +++ b/src/test/mir-opt/generator-storage-dead-unwind.rs @@ -0,0 +1,106 @@ +// ignore-wasm32-bare compiled with panic=abort by default + +// Test that we generate StorageDead on unwind paths for generators. +// +// Basic block and local names can safely change, but the StorageDead statements +// should not go away. + +#![feature(generators, generator_trait)] + +struct Foo(i32); + +impl Drop for Foo { + fn drop(&mut self) {} +} + +struct Bar(i32); + +fn take(_x: T) {} + +fn main() { + let _gen = || { + let a = Foo(5); + let b = Bar(6); + yield; + take(a); + take(b); + }; +} + +// END RUST SOURCE + +// START rustc.main-{{closure}}.StateTransform.before.mir +// ... +// let _2: Foo; +// ... +// let mut _7: Foo; +// ... +// let mut _9: Bar; +// scope 1 { +// let _3: Bar; +// scope 2 { +// } +// } +// bb0: { +// StorageLive(_2); +// _2 = Foo(const 5i32,); +// StorageLive(_3); +// _3 = Bar(const 6i32,); +// ... +// _1 = suspend(move _5) -> [resume: bb2, drop: bb4]; +// } +// bb1 (cleanup): { +// resume; +// } +// bb2: { +// ... +// StorageLive(_7); +// _7 = move _2; +// _6 = const take::(move _7) -> [return: bb9, unwind: bb8]; +// } +// bb3 (cleanup): { +// StorageDead(_2); +// drop(_1) -> bb1; +// } +// bb4: { +// ... +// StorageDead(_3); +// drop(_2) -> [return: bb5, unwind: bb3]; +// } +// bb5: { +// StorageDead(_2); +// drop(_1) -> [return: bb6, unwind: bb1]; +// } +// bb6: { +// generator_drop; +// } +// bb7 (cleanup): { +// StorageDead(_3); +// StorageDead(_2); +// drop(_1) -> bb1; +// } +// bb8 (cleanup): { +// StorageDead(_7); +// goto -> bb7; +// } +// bb9: { +// StorageDead(_7); +// StorageLive(_9); +// _9 = move _3; +// _8 = const take::(move _9) -> [return: bb10, unwind: bb11]; +// } +// bb10: { +// StorageDead(_9); +// ... +// StorageDead(_3); +// StorageDead(_2); +// drop(_1) -> [return: bb12, unwind: bb1]; +// } +// bb11 (cleanup): { +// StorageDead(_9); +// goto -> bb7; +// } +// bb12: { +// return; +// } +// END rustc.main-{{closure}}.StateTransform.before.mir diff --git a/src/test/run-pass/generator/drop-and-replace.rs b/src/test/run-pass/generator/drop-and-replace.rs new file mode 100644 index 0000000000000..042e1276db5c6 --- /dev/null +++ b/src/test/run-pass/generator/drop-and-replace.rs @@ -0,0 +1,44 @@ +// Regression test for incorrect DropAndReplace behavior introduced in #60840 +// and fixed in #61373. When combined with the optimization implemented in +// #60187, this produced incorrect code for generators when a saved local was +// re-assigned. + +#![feature(generators, generator_trait)] + +use std::ops::{Generator, GeneratorState}; +use std::pin::Pin; + +#[derive(Debug, PartialEq)] +struct Foo(i32); + +impl Drop for Foo { + fn drop(&mut self) { } +} + +fn main() { + let mut a = || { + let mut x = Foo(4); + yield; + assert_eq!(x.0, 4); + + // At one point this tricked our dataflow analysis into thinking `x` was + // StorageDead after the assignment. + x = Foo(5); + assert_eq!(x.0, 5); + + { + let y = Foo(6); + yield; + assert_eq!(y.0, 6); + } + + assert_eq!(x.0, 5); + }; + + loop { + match Pin::new(&mut a).resume() { + GeneratorState::Complete(()) => break, + _ => (), + } + } +}