diff --git a/Cargo.lock b/Cargo.lock index 0ee46c8429055..57219e0032311 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -438,7 +438,7 @@ dependencies = [ "proc-macro2 1.0.3", "quote 1.0.2", "syn 1.0.11", - "synstructure 0.12.1", + "synstructure", ] [[package]] @@ -937,13 +937,13 @@ checksum = "a0afaad2b26fa326569eb264b1363e8ae3357618c43982b3f285f0774ce76b69" [[package]] name = "derive-new" -version = "0.5.6" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ca414e896ae072546f4d789f452daaecf60ddee4c9df5dc6d5936d769e3d87c" +checksum = "71f31892cd5c62e414316f2963c5689242c43d8e7bbcaaeca97e5e28c95d91d9" dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.12", - "syn 0.15.35", + "proc-macro2 1.0.3", + "quote 1.0.2", + "syn 1.0.11", ] [[package]] @@ -1144,14 +1144,14 @@ dependencies = [ [[package]] name = "failure_derive" -version = "0.1.5" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea1063915fd7ef4309e222a5a07cf9c319fb9c7836b1f89b85458672dbb127e1" +checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.12", - "syn 0.15.35", - "synstructure 0.10.2", + "proc-macro2 1.0.3", + "quote 1.0.2", + "syn 1.0.11", + "synstructure", ] [[package]] @@ -1404,30 +1404,18 @@ dependencies = [ [[package]] name = "handlebars" -version = "2.0.1" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df044dd42cdb7e32f28557b661406fc0f2494be75199779998810dbc35030e0d" +checksum = "ba758d094d31274eb49d15da6f326b96bf3185239a6359bf684f3d5321148900" dependencies = [ - "hashbrown 0.5.0", - "lazy_static 1.4.0", "log", "pest", "pest_derive", "quick-error", - "regex", "serde", "serde_json", ] -[[package]] -name = "hashbrown" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1de41fb8dba9714efd92241565cdff73f78508c95697dd56787d3cba27e2353" -dependencies = [ - "serde", -] - [[package]] name = "hashbrown" version = "0.6.2" @@ -2054,9 +2042,9 @@ dependencies = [ [[package]] name = "mdbook" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "031bdd9d4893c983e2f69ebc4b59070feee8276a584c4aabdcb351235ea28016" +checksum = "e7ec525f7ebccc2dd935c263717250cd37f9a4b264a77c5dbc950ea2734d8159" dependencies = [ "ammonia", "chrono", @@ -2556,15 +2544,15 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.1.0" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63120576c4efd69615b5537d3d052257328a4ca82876771d6944424ccfd9f646" +checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55" dependencies = [ "pest", "pest_meta", - "proc-macro2 0.4.30", - "quote 0.6.12", - "syn 0.15.35", + "proc-macro2 1.0.3", + "quote 1.0.2", + "syn 1.0.11", ] [[package]] @@ -2784,9 +2772,9 @@ checksum = "6ddd112cca70a4d30883b2d21568a1d376ff8be4758649f64f973c6845128ad3" [[package]] name = "quick-error" -version = "1.2.2" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9274b940887ce9addde99c4eee6b5c44cc494b182b97e73dc8ffdcb3397fd3f0" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quine-mc_cluskey" @@ -3448,7 +3436,7 @@ dependencies = [ "proc-macro2 1.0.3", "quote 1.0.2", "syn 1.0.11", - "synstructure 0.12.1", + "synstructure", ] [[package]] @@ -4058,7 +4046,7 @@ dependencies = [ "proc-macro2 1.0.3", "quote 1.0.2", "syn 1.0.11", - "synstructure 0.12.1", + "synstructure", ] [[package]] @@ -4629,13 +4617,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.81" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "477b13b646f5b5b56fc95bedfc3b550d12141ce84f466f6c44b9a17589923885" +checksum = "9e549e3abf4fb8621bd1609f11dfc9f5e50320802273b12f3811a67e6716ea6c" dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.12", - "syn 0.15.35", + "proc-macro2 1.0.3", + "quote 1.0.2", + "syn 1.0.11", ] [[package]] @@ -4799,7 +4787,7 @@ dependencies = [ "core", "dlmalloc", "fortanix-sgx-abi", - "hashbrown 0.6.2", + "hashbrown", "hermit-abi", "libc", "panic_abort", @@ -4931,18 +4919,6 @@ dependencies = [ "unicode-xid 0.2.0", ] -[[package]] -name = "synstructure" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02353edf96d6e4dc81aea2d8490a7e9db177bf8acb0e951c24940bf866cb313f" -dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.12", - "syn 0.15.35", - "unicode-xid 0.1.0", -] - [[package]] name = "synstructure" version = "0.12.1" diff --git a/src/libarena/lib.rs b/src/libarena/lib.rs index 0f0bd617f439c..bbe80c26dcbf9 100644 --- a/src/libarena/lib.rs +++ b/src/libarena/lib.rs @@ -5,8 +5,7 @@ //! of individual objects while the arena itself is still alive. The benefit //! of an arena is very fast allocation; just a pointer bump. //! -//! This crate implements `TypedArena`, a simple arena that can only hold -//! objects of a single type. +//! This crate implements several kinds of arena. #![doc( html_root_url = "https://doc.rust-lang.org/nightly/", @@ -98,7 +97,13 @@ impl TypedArenaChunk { } } +// The arenas start with PAGE-sized chunks, and then each new chunk is twice as +// big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon +// we stop growing. This scales well, from arenas that are barely used up to +// arenas that are used for 100s of MiBs. Note also that the chosen sizes match +// the usual sizes of pages and huge pages on Linux. const PAGE: usize = 4096; +const HUGE_PAGE: usize = 2 * 1024 * 1024; impl Default for TypedArena { /// Creates a new `TypedArena`. @@ -211,6 +216,9 @@ impl TypedArena { #[cold] fn grow(&self, n: usize) { unsafe { + // We need the element size in to convert chunk sizes (ranging from + // PAGE to HUGE_PAGE bytes) to element counts. + let elem_size = cmp::max(1, mem::size_of::()); let mut chunks = self.chunks.borrow_mut(); let (chunk, mut new_capacity); if let Some(last_chunk) = chunks.last_mut() { @@ -221,18 +229,20 @@ impl TypedArena { self.end.set(last_chunk.end()); return; } else { + // If the previous chunk's capacity is less than HUGE_PAGE + // bytes, then this chunk will be least double the previous + // chunk's size. new_capacity = last_chunk.storage.capacity(); - loop { + if new_capacity < HUGE_PAGE / elem_size { new_capacity = new_capacity.checked_mul(2).unwrap(); - if new_capacity >= currently_used_cap + n { - break; - } } } } else { - let elem_size = cmp::max(1, mem::size_of::()); - new_capacity = cmp::max(n, PAGE / elem_size); + new_capacity = PAGE / elem_size; } + // Also ensure that this chunk can fit `n`. + new_capacity = cmp::max(n, new_capacity); + chunk = TypedArenaChunk::::new(new_capacity); self.ptr.set(chunk.start()); self.end.set(chunk.end()); @@ -347,17 +357,20 @@ impl DroplessArena { self.end.set(last_chunk.end()); return; } else { + // If the previous chunk's capacity is less than HUGE_PAGE + // bytes, then this chunk will be least double the previous + // chunk's size. new_capacity = last_chunk.storage.capacity(); - loop { + if new_capacity < HUGE_PAGE { new_capacity = new_capacity.checked_mul(2).unwrap(); - if new_capacity >= used_bytes + needed_bytes { - break; - } } } } else { - new_capacity = cmp::max(needed_bytes, PAGE); + new_capacity = PAGE; } + // Also ensure that this chunk can fit `needed_bytes`. + new_capacity = cmp::max(needed_bytes, new_capacity); + chunk = TypedArenaChunk::::new(new_capacity); self.ptr.set(chunk.start()); self.end.set(chunk.end()); diff --git a/src/libcore/iter/range.rs b/src/libcore/iter/range.rs index bb68184c8dd77..d74df82bddd9d 100644 --- a/src/libcore/iter/range.rs +++ b/src/libcore/iter/range.rs @@ -4,47 +4,182 @@ use crate::ops::{self, Add, Sub, Try}; use super::{FusedIterator, TrustedLen}; -/// Objects that can be stepped over in both directions. +/// Objects that have a notion of *successor* and *predecessor* operations. /// -/// The `steps_between` function provides a way to efficiently compare -/// two `Step` objects. -#[unstable( - feature = "step_trait", - reason = "likely to be replaced by finer-grained traits", - issue = "42168" -)] -pub trait Step: Clone + PartialOrd + Sized { - /// Returns the number of steps between two step objects. The count is - /// inclusive of `start` and exclusive of `end`. - /// - /// Returns `None` if it is not possible to calculate `steps_between` - /// without overflow. +/// The *successor* operation moves towards values that compare greater. +/// The *predecessor* operation moves towards values that compare lesser. +/// +/// # Safety +/// +/// This trait is `unsafe` because its implementation must be correct for +/// the safety of `unsafe trait TrustedLen` implementations, and the results +/// of using this trait can otherwise be trusted by `unsafe` code to be correct +/// and fulfill the listed obligations. +#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")] +pub unsafe trait Step: Clone + PartialOrd + Sized { + /// Returns the number of *successor* steps required to get from `start` to `end`. + /// + /// Returns `None` if the number of steps would overflow `usize` + /// (or is infinite, or if `end` would never be reached). + /// + /// # Invariants + /// + /// For any `a`, `b`, and `n`: + /// + /// * `steps_between(&a, &b) == Some(n)` if and only if `Step::forward_checked(&a, n) == Some(b)` + /// * `steps_between(&a, &b) == Some(n)` if and only if `Step::backward_checked(&a, n) == Some(a)` + /// * `steps_between(&a, &b) == Some(n)` only if `a <= b` + /// * Corollary: `steps_between(&a, &b) == Some(0)` if and only if `a == b` + /// * Note that `a <= b` does _not_ imply `steps_between(&a, &b) != None`; + /// this is the case wheen it would require more than `usize::MAX` steps to get to `b` + /// * `steps_between(&a, &b) == None` if `a > b` fn steps_between(start: &Self, end: &Self) -> Option; - /// Replaces this step with `1`, returning a clone of itself. + /// Returns the value that would be obtained by taking the *successor* + /// of `self` `count` times. + /// + /// If this would overflow the range of values supported by `Self`, returns `None`. /// - /// The output of this method should always be greater than the output of replace_zero. - fn replace_one(&mut self) -> Self; + /// # Invariants + /// + /// For any `a`, `n`, and `m`: + /// + /// * `Step::forward_checked(a, n).and_then(|x| Step::forward_checked(x, m)) == Step::forward_checked(a, m).and_then(|x| Step::forward_checked(x, n))` + /// + /// For any `a`, `n`, and `m` where `n + m` does not overflow: + /// + /// * `Step::forward_checked(a, n).and_then(|x| Step::forward_checked(x, m)) == Step::forward_checked(a, n + m)` + /// + /// For any `a` and `n`: + /// + /// * `Step::forward_checked(a, n) == (0..n).try_fold(a, |x, _| Step::forward_checked(&x, 1))` + /// * Corollary: `Step::forward_checked(&a, 0) == Some(a)` + #[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")] + fn forward_checked(start: Self, count: usize) -> Option; - /// Replaces this step with `0`, returning a clone of itself. + /// Returns the value that would be obtained by taking the *successor* + /// of `self` `count` times. + /// + /// If this would overflow the range of values supported by `Self`, + /// this function is allowed to panic, wrap, or saturate. + /// The suggested behavior is to panic when debug assertions are enabled, + /// and to wrap or saturate otherwise. /// - /// The output of this method should always be less than the output of replace_one. - fn replace_zero(&mut self) -> Self; + /// Unsafe code should not rely on the correctness of behavior after overflow. + /// + /// # Invariants + /// + /// For any `a`, `n`, and `m`, where no overflow occurs: + /// + /// * `Step::forward(Step::forward(a, n), m) == Step::forward(a, n + m)` + /// + /// For any `a` and `n`, where no overflow occurs: + /// + /// * `Step::forward_checked(a, n) == Some(Step::forward(a, n))` + /// * `Step::forward(a, n) == (0..n).fold(a, |x, _| Step::forward(x, 1))` + /// * Corollary: `Step::forward(a, 0) == a` + /// * `Step::forward(a, n) >= a` + /// * `Step::backward(Step::forward(a, n), n) == a` + #[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")] + fn forward(start: Self, count: usize) -> Self { + Step::forward_checked(start, count).expect("overflow in `Step::forward`") + } - /// Adds one to this step, returning the result. - fn add_one(&self) -> Self; + /// Returns the value that would be obtained by taking the *successor* + /// of `self` `count` times. + /// + /// # Safety + /// + /// It is undefined behavior for this operation to overflow the + /// range of values supported by `Self`. If you cannot guarantee that this + /// will not overflow, use `forward` or `forward_checked` instead. + /// + /// # Invariants + /// + /// For any `a`: + /// + /// * if there exists `b` such that `b > a`, it is safe to call `Step::forward_unchecked(a, 1)` + /// * if there exists `b`, `n` such that `steps_between(&a, &b) == Some(n)`, + /// it is safe to call `Step::forward_unchecked(a, m)` for any `m <= n`. + /// + /// For any `a` and `n`, where no overflow occurs: + /// + /// * `Step::forward_unchecked(a, n)` is equivalent to `Step::forward(a, n)` + #[unstable(feature = "unchecked_math", reason = "niche optimization path", issue = "none")] + unsafe fn forward_unchecked(start: Self, count: usize) -> Self { + Step::forward(start, count) + } - /// Subtracts one to this step, returning the result. - fn sub_one(&self) -> Self; + /// Returns the value that would be obtained by taking the *successor* + /// of `self` `count` times. + /// + /// If this would overflow the range of values supported by `Self`, returns `None`. + /// + /// # Invariants + /// + /// For any `a`, `n`, and `m`: + /// + /// * `Step::backward_checked(a, n).and_then(|x| Step::backward_checked(x, m)) == n.checked_add(m).and_then(|x| Step::backward_checked(a, x))` + /// * `Step::backward_checked(a, n).and_then(|x| Step::backward_checked(x, m)) == try { Step::backward_checked(a, n.checked_add(m)?) }` + /// + /// For any `a` and `n`: + /// + /// * `Step::backward_checked(a, n) == (0..n).try_fold(a, |x, _| Step::backward_checked(&x, 1))` + /// * Corollary: `Step::backward_checked(&a, 0) == Some(a)` + #[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")] + fn backward_checked(start: Self, count: usize) -> Option; - /// Adds a `usize`, returning `None` on overflow. - fn add_usize(&self, n: usize) -> Option; + /// Returns the value that would be obtained by taking the *predecessor* + /// of `self` `count` times. + /// + /// If this would overflow the range of values supported by `Self`, + /// this function is allowed to panic, wrap, or saturate. + /// The suggested behavior is to panic when debug assertions are enabled, + /// and to wrap or saturate otherwise. + /// + /// Unsafe code should not rely on the correctness of behavior after overflow. + /// + /// # Invariants + /// + /// For any `a`, `n`, and `m`, where no overflow occurs: + /// + /// * `Step::backward(Step::backward(a, n), m) == Step::backward(a, n + m)` + /// + /// For any `a` and `n`, where no overflow occurs: + /// + /// * `Step::backward_checked(a, n) == Some(Step::backward(a, n))` + /// * `Step::backward(a, n) == (0..n).fold(a, |x, _| Step::backward(x, 1))` + /// * Corollary: `Step::backward(a, 0) == a` + /// * `Step::backward(a, n) <= a` + /// * `Step::forward(Step::backward(a, n), n) == a` + #[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")] + fn backward(start: Self, count: usize) -> Self { + Step::backward_checked(start, count).expect("overflow in `Step::backward`") + } - /// Subtracts a `usize`, returning `None` on underflow. - fn sub_usize(&self, n: usize) -> Option { - // this default implementation makes the addition of `sub_usize` a non-breaking change - let _ = n; - unimplemented!() + /// Returns the value that would be obtained by taking the *predecessor* + /// of `self` `count` times. + /// + /// # Safety + /// + /// It is undefined behavior for this operation to overflow the + /// range of values supported by `Self`. If you cannot guarantee that this + /// will not overflow, use `backward` or `backward_checked` instead. + /// + /// # Invariants + /// + /// For any `a`: + /// + /// * if there exists `b` such that `b < a`, it is safe to call `Step::backward_unchecked(a, 1)` + /// * if there exists `b`, `n` such that `steps_between(&b, &a) == Some(n)`, + /// it is safe to call `Step::backward_unchecked(a, m)` for any `m <= n`. + /// + /// For any `a` and `n`, where no overflow occurs: + /// + /// * `Step::backward_unchecked(a, n)` is equivalent to `Step::backward(a, n)` + #[unstable(feature = "unchecked_math", reason = "niche optimization path", issue = "none")] + unsafe fn backward_unchecked(start: Self, count: usize) -> Self { + Step::backward(start, count) } } @@ -52,127 +187,218 @@ pub trait Step: Clone + PartialOrd + Sized { macro_rules! step_identical_methods { () => { #[inline] - fn replace_one(&mut self) -> Self { - mem::replace(self, 1) + unsafe fn forward_unchecked(start: Self, n: usize) -> Self { + start.unchecked_add(n as Self) } #[inline] - fn replace_zero(&mut self) -> Self { - mem::replace(self, 0) + unsafe fn backward_unchecked(start: Self, n: usize) -> Self { + start.unchecked_sub(n as Self) } #[inline] - fn add_one(&self) -> Self { - Add::add(*self, 1) + fn forward(start: Self, n: usize) -> Self { + // In debug builds, trigger a panic on overflow. + // This should optimize completely out in release builds. + if Self::forward_checked(start, n).is_none() { + let _ = Add::add(Self::MAX, 1); + } + // Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`. + start.wrapping_add(n as Self) } #[inline] - fn sub_one(&self) -> Self { - Sub::sub(*self, 1) + fn backward(start: Self, n: usize) -> Self { + // In debug builds, trigger a panic on overflow. + // This should optimize completely out in release builds. + if Self::backward_checked(start, n).is_none() { + let _ = Sub::sub(Self::MIN, 1); + } + // Do wrapping math to allow e.g. `Step::backward(127i8, 255)`. + start.wrapping_sub(n as Self) } }; } -macro_rules! step_impl_unsigned { - ($($t:ty)*) => ($( - #[unstable(feature = "step_trait", - reason = "likely to be replaced by finer-grained traits", - issue = "42168")] - impl Step for $t { - #[inline] - fn steps_between(start: &$t, end: &$t) -> Option { - if *start < *end { - usize::try_from(*end - *start).ok() - } else { - Some(0) +macro_rules! step_integer_impls { + { + narrower than or same width as usize: + $( [ $u_narrower:ident $i_narrower:ident ] ),+; + wider than usize: + $( [ $u_wider:ident $i_wider:ident ] ),+; + } => { + $( + #[allow(unreachable_patterns)] + #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")] + unsafe impl Step for $u_narrower { + step_identical_methods!(); + + #[inline] + fn steps_between(start: &Self, end: &Self) -> Option { + if *start <= *end { + // This relies on $u_narrower <= usize + Some((*end - *start) as usize) + } else { + None + } } - } - #[inline] - #[allow(unreachable_patterns)] - fn add_usize(&self, n: usize) -> Option { - match <$t>::try_from(n) { - Ok(n_as_t) => self.checked_add(n_as_t), - Err(_) => None, + #[inline] + fn forward_checked(start: Self, n: usize) -> Option { + match Self::try_from(n) { + Ok(n) => start.checked_add(n), + Err(_) => None, // if n is out of range, `unsigned_start + n` is too + } + } + + #[inline] + fn backward_checked(start: Self, n: usize) -> Option { + match Self::try_from(n) { + Ok(n) => start.checked_sub(n), + Err(_) => None, // if n is out of range, `unsigned_start - n` is too + } } } - #[inline] #[allow(unreachable_patterns)] - fn sub_usize(&self, n: usize) -> Option { - match <$t>::try_from(n) { - Ok(n_as_t) => self.checked_sub(n_as_t), - Err(_) => None, + #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")] + unsafe impl Step for $i_narrower { + step_identical_methods!(); + + #[inline] + fn steps_between(start: &Self, end: &Self) -> Option { + if *start <= *end { + // This relies on $i_narrower <= usize + // + // Casting to isize extends the width but preserves the sign. + // Use wrapping_sub in isize space and cast to usize to compute + // the difference that may not fit inside the range of isize. + Some((*end as isize).wrapping_sub(*start as isize) as usize) + } else { + None + } } - } - step_identical_methods!(); - } - )*) -} -macro_rules! step_impl_signed { - ($( [$t:ty : $unsigned:ty] )*) => ($( - #[unstable(feature = "step_trait", - reason = "likely to be replaced by finer-grained traits", - issue = "42168")] - impl Step for $t { - #[inline] - fn steps_between(start: &$t, end: &$t) -> Option { - if *start < *end { - // Use .wrapping_sub and cast to unsigned to compute the - // difference that may not fit inside the range of $t. - usize::try_from(end.wrapping_sub(*start) as $unsigned).ok() - } else { - Some(0) + #[inline] + fn forward_checked(start: Self, n: usize) -> Option { + match $u_narrower::try_from(n) { + Ok(n) => { + // Wrapping handles cases like + // `Step::forward(-120_i8, 200) == Some(80_i8)`, + // even though 200 is out of range for i8. + let wrapped = start.wrapping_add(n as Self); + if wrapped >= start { + Some(wrapped) + } else { + None // Addition overflowed + } + } + // If n is out of range of e.g. u8, + // then it is bigger than the entire range for i8 is wide + // so `any_i8 + n` necessarily overflows i8. + Err(_) => None, + } + } + + #[inline] + fn backward_checked(start: Self, n: usize) -> Option { + match $u_narrower::try_from(n) { + Ok(n) => { + // Wrapping handles cases like + // `Step::forward(-120_i8, 200) == Some(80_i8)`, + // even though 200 is out of range for i8. + let wrapped = start.wrapping_sub(n as Self); + if wrapped <= start { + Some(wrapped) + } else { + None // Subtraction overflowed + } + } + // If n is out of range of e.g. u8, + // then it is bigger than the entire range for i8 is wide + // so `any_i8 - n` necessarily overflows i8. + Err(_) => None, + } } } + )+ - #[inline] + $( #[allow(unreachable_patterns)] - fn add_usize(&self, n: usize) -> Option { - match <$unsigned>::try_from(n) { - Ok(n_as_unsigned) => { - // Wrapping in unsigned space handles cases like - // `-120_i8.add_usize(200) == Some(80_i8)`, - // even though 200_usize is out of range for i8. - let wrapped = (*self as $unsigned).wrapping_add(n_as_unsigned) as $t; - if wrapped >= *self { - Some(wrapped) - } else { - None // Addition overflowed - } + #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")] + unsafe impl Step for $u_wider { + step_identical_methods!(); + + #[inline] + fn steps_between(start: &Self, end: &Self) -> Option { + if *start <= *end { + usize::try_from(*end - *start).ok() + } else { + None } - Err(_) => None, + } + + #[inline] + fn forward_checked(start: Self, n: usize) -> Option { + start.checked_add(n as Self) + } + + #[inline] + fn backward_checked(start: Self, n: usize) -> Option { + start.checked_sub(n as Self) } } - #[inline] #[allow(unreachable_patterns)] - fn sub_usize(&self, n: usize) -> Option { - match <$unsigned>::try_from(n) { - Ok(n_as_unsigned) => { - // Wrapping in unsigned space handles cases like - // `80_i8.sub_usize(200) == Some(-120_i8)`, - // even though 200_usize is out of range for i8. - let wrapped = (*self as $unsigned).wrapping_sub(n_as_unsigned) as $t; - if wrapped <= *self { - Some(wrapped) - } else { - None // Subtraction underflowed + #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")] + unsafe impl Step for $i_wider { + step_identical_methods!(); + + #[inline] + fn steps_between(start: &Self, end: &Self) -> Option { + if *start <= *end { + match end.checked_sub(*start) { + Some(result) => usize::try_from(result).ok(), + // If the difference is too big for e.g. i128, + // it's also gonna be too big for usize with fewer bits. + None => None, } + } else { + None } - Err(_) => None, + } + + #[inline] + fn forward_checked(start: Self, n: usize) -> Option { + start.checked_add(n as Self) + } + + #[inline] + fn backward_checked(start: Self, n: usize) -> Option { + start.checked_sub(n as Self) } } + )+ + }; +} - step_identical_methods!(); - } - )*) +#[cfg(target_pointer_width = "64")] +step_integer_impls! { + narrower than or same width as usize: [u8 i8], [u16 i16], [u32 i32], [u64 i64], [usize isize]; + wider than usize: [u128 i128]; } -step_impl_unsigned!(usize u8 u16 u32 u64 u128); -step_impl_signed!([isize: usize][i8: u8][i16: u16]); -step_impl_signed!([i32: u32][i64: u64][i128: u128]); +#[cfg(target_pointer_width = "32")] +step_integer_impls! { + narrower than or same width as usize: [u8 i8], [u16 i16], [u32 i32], [usize isize]; + wider than usize: [u64 i64], [u128 i128]; +} + +#[cfg(target_pointer_width = "16")] +step_integer_impls! { + narrower than or same width as usize: [u8 i8], [u16 i16], [usize isize]; + wider than usize: [u32 i32], [u64 i64], [u128 i128]; +} macro_rules! range_exact_iter_impl { ($($t:ty)*) => ($( @@ -188,20 +414,6 @@ macro_rules! range_incl_exact_iter_impl { )*) } -macro_rules! range_trusted_len_impl { - ($($t:ty)*) => ($( - #[unstable(feature = "trusted_len", issue = "37572")] - unsafe impl TrustedLen for ops::Range<$t> { } - )*) -} - -macro_rules! range_incl_trusted_len_impl { - ($($t:ty)*) => ($( - #[unstable(feature = "trusted_len", issue = "37572")] - unsafe impl TrustedLen for ops::RangeInclusive<$t> { } - )*) -} - #[stable(feature = "rust1", since = "1.0.0")] impl Iterator for ops::Range { type Item = A; @@ -209,16 +421,12 @@ impl Iterator for ops::Range { #[inline] fn next(&mut self) -> Option { if self.start < self.end { - // We check for overflow here, even though it can't actually - // happen. Adding this check does however help llvm vectorize loops - // for some ranges that don't get vectorized otherwise, - // and this won't actually result in an extra check in an optimized build. - if let Some(mut n) = self.start.add_usize(1) { - mem::swap(&mut n, &mut self.start); - Some(n) - } else { - None - } + // SAFETY: just checked precondition + // We use the unchecked version here, because + // this helps LLVM vectorize loops for some ranges + // that don't get vectorized otherwise. + let n = unsafe { Step::forward_unchecked(self.start.clone(), 1) }; + Some(mem::replace(&mut self.start, n)) } else { None } @@ -226,17 +434,19 @@ impl Iterator for ops::Range { #[inline] fn size_hint(&self) -> (usize, Option) { - match Step::steps_between(&self.start, &self.end) { - Some(hint) => (hint, Some(hint)), - None => (usize::MAX, None), + if self.start < self.end { + let hint = Step::steps_between(&self.start, &self.end); + (hint.unwrap_or(usize::MAX), hint) + } else { + (0, Some(0)) } } #[inline] fn nth(&mut self, n: usize) -> Option { - if let Some(plus_n) = self.start.add_usize(n) { + if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) { if plus_n < self.end { - self.start = plus_n.add_one(); + self.start = Step::forward(plus_n.clone(), 1); return Some(plus_n); } } @@ -262,25 +472,42 @@ impl Iterator for ops::Range { } // These macros generate `ExactSizeIterator` impls for various range types. -// Range<{u,i}64> and RangeInclusive<{u,i}{32,64,size}> are excluded -// because they cannot guarantee having a length <= usize::MAX, which is -// required by ExactSizeIterator. -range_exact_iter_impl!(usize u8 u16 u32 isize i8 i16 i32); -range_incl_exact_iter_impl!(u8 u16 i8 i16); - -// These macros generate `TrustedLen` impls. // -// They need to guarantee that .size_hint() is either exact, or that -// the upper bound is None when it does not fit the type limits. -range_trusted_len_impl!(usize isize u8 i8 u16 i16 u32 i32 u64 i64 u128 i128); -range_incl_trusted_len_impl!(usize isize u8 i8 u16 i16 u32 i32 u64 i64 u128 i128); +// * `ExactSizeIterator::len` is required to always return an exact `usize`, +// so no range can be longer than `usize::MAX`. +// * For integer types in `Range<_>` this is the case for types narrower than or as wide as `usize`. +// For integer types in `RangeInclusive<_>` +// this is the case for types *strictly narrower* than `usize` +// since e.g. `(0..=u64::MAX).len()` would be `u64::MAX + 1`. +range_exact_iter_impl! { + usize u8 u16 + isize i8 i16 + + // These are incorect per the reasoning above, + // but removing them would be a breaking change as they were stabilized in Rust 1.0.0. + // So e.g. `(0..66_000_u32).len()` for example will compile without error or warnings + // on 16-bit platforms, but continue to give a wrong result. + u32 + i32 +} +range_incl_exact_iter_impl! { + u8 + i8 + + // These are incorect per the reasoning above, + // but removing them would be a breaking change as they were stabilized in Rust 1.26.0. + // So e.g. `(0..=u16::MAX).len()` for example will compile without error or warnings + // on 16-bit platforms, but continue to give a wrong result. + u16 + i16 +} #[stable(feature = "rust1", since = "1.0.0")] impl DoubleEndedIterator for ops::Range { #[inline] fn next_back(&mut self) -> Option { if self.start < self.end { - self.end = self.end.sub_one(); + self.end = Step::backward(self.end.clone(), 1); Some(self.end.clone()) } else { None @@ -289,9 +516,9 @@ impl DoubleEndedIterator for ops::Range { #[inline] fn nth_back(&mut self, n: usize) -> Option { - if let Some(minus_n) = self.end.sub_usize(n) { + if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) { if minus_n > self.start { - self.end = minus_n.sub_one(); + self.end = Step::backward(minus_n, 1); return Some(self.end.clone()); } } @@ -301,6 +528,9 @@ impl DoubleEndedIterator for ops::Range { } } +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for ops::Range {} + #[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for ops::Range {} @@ -310,9 +540,8 @@ impl Iterator for ops::RangeFrom { #[inline] fn next(&mut self) -> Option { - let mut n = self.start.add_one(); - mem::swap(&mut n, &mut self.start); - Some(n) + let n = Step::forward(self.start.clone(), 1); + Some(mem::replace(&mut self.start, n)) } #[inline] @@ -322,8 +551,16 @@ impl Iterator for ops::RangeFrom { #[inline] fn nth(&mut self, n: usize) -> Option { - let plus_n = self.start.add_usize(n).expect("overflow in RangeFrom::nth"); - self.start = plus_n.add_one(); + // If we would jump over the maximum value, panic immediately. + // This is consistent with behavior before the Step redesign, + // even though it's inconsistent with n `next` calls. + // To get consistent behavior, change it to use `forward` instead. + // This change should go through FCP separately to the redesign, so is for now left as a + // FIXME: make this consistent + let plus_n = + Step::forward_checked(self.start.clone(), n).expect("overflow in RangeFrom::nth"); + // The final step should always be debug-checked. + self.start = Step::forward(plus_n.clone(), 1); Some(plus_n) } } @@ -345,7 +582,7 @@ impl Iterator for ops::RangeInclusive { } let is_iterating = self.start < self.end; Some(if is_iterating { - let n = self.start.add_one(); + let n = Step::forward(self.start.clone(), 1); mem::replace(&mut self.start, n) } else { self.exhausted = true; @@ -371,12 +608,12 @@ impl Iterator for ops::RangeInclusive { return None; } - if let Some(plus_n) = self.start.add_usize(n) { + if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) { use crate::cmp::Ordering::*; match plus_n.partial_cmp(&self.end) { Some(Less) => { - self.start = plus_n.add_one(); + self.start = Step::forward(plus_n.clone(), 1); return Some(plus_n); } Some(Equal) => { @@ -407,7 +644,7 @@ impl Iterator for ops::RangeInclusive { let mut accum = init; while self.start < self.end { - let n = self.start.add_one(); + let n = Step::forward(self.start.clone(), 1); let n = mem::replace(&mut self.start, n); accum = f(accum, n)?; } @@ -446,7 +683,7 @@ impl DoubleEndedIterator for ops::RangeInclusive { } let is_iterating = self.start < self.end; Some(if is_iterating { - let n = self.end.sub_one(); + let n = Step::backward(self.end.clone(), 1); mem::replace(&mut self.end, n) } else { self.exhausted = true; @@ -460,12 +697,12 @@ impl DoubleEndedIterator for ops::RangeInclusive { return None; } - if let Some(minus_n) = self.end.sub_usize(n) { + if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) { use crate::cmp::Ordering::*; match minus_n.partial_cmp(&self.start) { Some(Greater) => { - self.end = minus_n.sub_one(); + self.end = Step::backward(minus_n.clone(), 1); return Some(minus_n); } Some(Equal) => { @@ -496,7 +733,7 @@ impl DoubleEndedIterator for ops::RangeInclusive { let mut accum = init; while self.start < self.end { - let n = self.end.sub_one(); + let n = Step::backward(self.end.clone(), 1); let n = mem::replace(&mut self.end, n); accum = f(accum, n)?; } @@ -511,5 +748,8 @@ impl DoubleEndedIterator for ops::RangeInclusive { } } +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for ops::RangeInclusive {} + #[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for ops::RangeInclusive {} diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs index a259e293b0c1a..9b4872f5d9f7a 100644 --- a/src/libcore/num/mod.rs +++ b/src/libcore/num/mod.rs @@ -749,6 +749,23 @@ $EndFeature, " } } + doc_comment! { + concat!("Unchecked integer addition. Computes `self + rhs, assuming overflow +cannot occur. This results in undefined behavior when `self + rhs > ", stringify!($SelfT), +"::max_value()` or `self + rhs < ", stringify!($SelfT), "::min_value()`."), + #[unstable( + feature = "unchecked_math", + reason = "niche optimization path", + issue = "none", + )] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub unsafe fn unchecked_add(self, rhs: Self) -> Self { + intrinsics::unchecked_add(self, rhs) + } + } + doc_comment! { concat!("Checked integer subtraction. Computes `self - rhs`, returning `None` if overflow occurred. @@ -774,6 +791,23 @@ $EndFeature, " } } + doc_comment! { + concat!("Unchecked integer subtraction. Computes `self - rhs, assuming overflow +cannot occur. This results in undefined behavior when `self - rhs > ", stringify!($SelfT), +"::max_value()` or `self - rhs < ", stringify!($SelfT), "::min_value()`."), + #[unstable( + feature = "unchecked_math", + reason = "niche optimization path", + issue = "none", + )] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub unsafe fn unchecked_sub(self, rhs: Self) -> Self { + intrinsics::unchecked_sub(self, rhs) + } + } + doc_comment! { concat!("Checked integer multiplication. Computes `self * rhs`, returning `None` if overflow occurred. @@ -799,6 +833,23 @@ $EndFeature, " } } + doc_comment! { + concat!("Unchecked integer multiplication. Computes `self * rhs, assuming overflow +cannot occur. This results in undefined behavior when `self * rhs > ", stringify!($SelfT), +"::max_value()` or `self * rhs < ", stringify!($SelfT), "::min_value()`."), + #[unstable( + feature = "unchecked_math", + reason = "niche optimization path", + issue = "none", + )] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub unsafe fn unchecked_mul(self, rhs: Self) -> Self { + intrinsics::unchecked_mul(self, rhs) + } + } + doc_comment! { concat!("Checked integer division. Computes `self / rhs`, returning `None` if `rhs == 0` or the division results in overflow. @@ -2936,6 +2987,23 @@ assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_add(3), None);", $EndFeat } } + doc_comment! { + concat!("Unchecked integer addition. Computes `self + rhs, assuming overflow +cannot occur. This results in undefined behavior when `self + rhs > ", stringify!($SelfT), +"::max_value()` or `self + rhs < ", stringify!($SelfT), "::min_value()`."), + #[unstable( + feature = "unchecked_math", + reason = "niche optimization path", + issue = "none", + )] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub unsafe fn unchecked_add(self, rhs: Self) -> Self { + intrinsics::unchecked_add(self, rhs) + } + } + doc_comment! { concat!("Checked integer subtraction. Computes `self - rhs`, returning `None` if overflow occurred. @@ -2959,6 +3027,23 @@ assert_eq!(0", stringify!($SelfT), ".checked_sub(1), None);", $EndFeature, " } } + doc_comment! { + concat!("Unchecked integer subtraction. Computes `self - rhs, assuming overflow +cannot occur. This results in undefined behavior when `self - rhs > ", stringify!($SelfT), +"::max_value()` or `self - rhs < ", stringify!($SelfT), "::min_value()`."), + #[unstable( + feature = "unchecked_math", + reason = "niche optimization path", + issue = "none", + )] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub unsafe fn unchecked_sub(self, rhs: Self) -> Self { + intrinsics::unchecked_sub(self, rhs) + } + } + doc_comment! { concat!("Checked integer multiplication. Computes `self * rhs`, returning `None` if overflow occurred. @@ -2982,6 +3067,23 @@ assert_eq!(", stringify!($SelfT), "::MAX.checked_mul(2), None);", $EndFeature, " } } + doc_comment! { + concat!("Unchecked integer multiplication. Computes `self * rhs, assuming overflow +cannot occur. This results in undefined behavior when `self * rhs > ", stringify!($SelfT), +"::max_value()` or `self * rhs < ", stringify!($SelfT), "::min_value()`."), + #[unstable( + feature = "unchecked_math", + reason = "niche optimization path", + issue = "none", + )] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub unsafe fn unchecked_mul(self, rhs: Self) -> Self { + intrinsics::unchecked_mul(self, rhs) + } + } + doc_comment! { concat!("Checked integer division. Computes `self / rhs`, returning `None` if `rhs == 0`. diff --git a/src/libcore/tests/iter.rs b/src/libcore/tests/iter.rs index 7da02b11676ab..52cf068f0a567 100644 --- a/src/libcore/tests/iter.rs +++ b/src/libcore/tests/iter.rs @@ -2139,6 +2139,24 @@ fn test_range_inclusive_nth_back() { assert_eq!(ExactSizeIterator::is_empty(&r), true); } +#[test] +fn test_range_len() { + assert_eq!((0..10_u8).len(), 10); + assert_eq!((9..10_u8).len(), 1); + assert_eq!((10..10_u8).len(), 0); + assert_eq!((11..10_u8).len(), 0); + assert_eq!((100..10_u8).len(), 0); +} + +#[test] +fn test_range_inclusive_len() { + assert_eq!((0..=10_u8).len(), 11); + assert_eq!((9..=10_u8).len(), 2); + assert_eq!((10..=10_u8).len(), 1); + assert_eq!((11..=10_u8).len(), 0); + assert_eq!((100..=10_u8).len(), 0); +} + #[test] fn test_range_step() { #![allow(deprecated)] @@ -2509,42 +2527,91 @@ fn test_chain_fold() { } #[test] -fn test_step_replace_unsigned() { - let mut x = 4u32; - let y = x.replace_zero(); - assert_eq!(x, 0); - assert_eq!(y, 4); +fn test_steps_between() { + assert_eq!(Step::steps_between(&20_u8, &200_u8), Some(180_usize)); + assert_eq!(Step::steps_between(&-20_i8, &80_i8), Some(100_usize)); + assert_eq!(Step::steps_between(&-120_i8, &80_i8), Some(200_usize)); + assert_eq!(Step::steps_between(&20_u32, &4_000_100_u32), Some(4_000_080_usize)); + assert_eq!(Step::steps_between(&-20_i32, &80_i32), Some(100_usize)); + assert_eq!(Step::steps_between(&-2_000_030_i32, &2_000_050_i32), Some(4_000_080_usize)); - x = 5; - let y = x.replace_one(); - assert_eq!(x, 1); - assert_eq!(y, 5); + // Skip u64/i64 to avoid differences with 32-bit vs 64-bit platforms + + assert_eq!(Step::steps_between(&20_u128, &200_u128), Some(180_usize)); + assert_eq!(Step::steps_between(&-20_i128, &80_i128), Some(100_usize)); + if cfg!(target_pointer_width = "64") { + assert_eq!(Step::steps_between(&10_u128, &0x1_0000_0000_0000_0009_u128), Some(usize::MAX)); + } + assert_eq!(Step::steps_between(&10_u128, &0x1_0000_0000_0000_000a_u128), None); + assert_eq!(Step::steps_between(&10_i128, &0x1_0000_0000_0000_000a_i128), None); + assert_eq!( + Step::steps_between(&-0x1_0000_0000_0000_0000_i128, &0x1_0000_0000_0000_0000_i128,), + None, + ); } #[test] -fn test_step_replace_signed() { - let mut x = 4i32; - let y = x.replace_zero(); - assert_eq!(x, 0); - assert_eq!(y, 4); +fn test_step_forward() { + assert_eq!(Step::forward_checked(55_u8, 200_usize), Some(255_u8)); + assert_eq!(Step::forward_checked(252_u8, 200_usize), None); + assert_eq!(Step::forward_checked(0_u8, 256_usize), None); + assert_eq!(Step::forward_checked(-110_i8, 200_usize), Some(90_i8)); + assert_eq!(Step::forward_checked(-110_i8, 248_usize), None); + assert_eq!(Step::forward_checked(-126_i8, 256_usize), None); + + assert_eq!(Step::forward_checked(35_u16, 100_usize), Some(135_u16)); + assert_eq!(Step::forward_checked(35_u16, 65500_usize), Some(u16::MAX)); + assert_eq!(Step::forward_checked(36_u16, 65500_usize), None); + assert_eq!(Step::forward_checked(-110_i16, 200_usize), Some(90_i16)); + assert_eq!(Step::forward_checked(-20_030_i16, 50_050_usize), Some(30_020_i16)); + assert_eq!(Step::forward_checked(-10_i16, 40_000_usize), None); + assert_eq!(Step::forward_checked(-10_i16, 70_000_usize), None); - x = 5; - let y = x.replace_one(); - assert_eq!(x, 1); - assert_eq!(y, 5); + assert_eq!(Step::forward_checked(10_u128, 70_000_usize), Some(70_010_u128)); + assert_eq!(Step::forward_checked(10_i128, 70_030_usize), Some(70_040_i128)); + assert_eq!( + Step::forward_checked(0xffff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_u128, 0xff_usize), + Some(u128::MAX), + ); + assert_eq!( + Step::forward_checked(0xffff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_u128, 0x100_usize), + None + ); + assert_eq!( + Step::forward_checked(0x7fff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_i128, 0xff_usize), + Some(i128::MAX), + ); + assert_eq!( + Step::forward_checked(0x7fff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_i128, 0x100_usize), + None + ); } #[test] -fn test_step_replace_no_between() { - let mut x = 4u128; - let y = x.replace_zero(); - assert_eq!(x, 0); - assert_eq!(y, 4); +fn test_step_backward() { + assert_eq!(Step::backward_checked(255_u8, 200_usize), Some(55_u8)); + assert_eq!(Step::backward_checked(100_u8, 200_usize), None); + assert_eq!(Step::backward_checked(255_u8, 256_usize), None); + assert_eq!(Step::backward_checked(90_i8, 200_usize), Some(-110_i8)); + assert_eq!(Step::backward_checked(110_i8, 248_usize), None); + assert_eq!(Step::backward_checked(127_i8, 256_usize), None); + + assert_eq!(Step::backward_checked(135_u16, 100_usize), Some(35_u16)); + assert_eq!(Step::backward_checked(u16::MAX, 65500_usize), Some(35_u16)); + assert_eq!(Step::backward_checked(10_u16, 11_usize), None); + assert_eq!(Step::backward_checked(90_i16, 200_usize), Some(-110_i16)); + assert_eq!(Step::backward_checked(30_020_i16, 50_050_usize), Some(-20_030_i16)); + assert_eq!(Step::backward_checked(-10_i16, 40_000_usize), None); + assert_eq!(Step::backward_checked(-10_i16, 70_000_usize), None); - x = 5; - let y = x.replace_one(); - assert_eq!(x, 1); - assert_eq!(y, 5); + assert_eq!(Step::backward_checked(70_010_u128, 70_000_usize), Some(10_u128)); + assert_eq!(Step::backward_checked(70_020_i128, 70_030_usize), Some(-10_i128)); + assert_eq!(Step::backward_checked(10_u128, 7_usize), Some(3_u128)); + assert_eq!(Step::backward_checked(10_u128, 11_usize), None); + assert_eq!( + Step::backward_checked(-0x7fff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_i128, 0x100_usize), + Some(i128::MIN) + ); } #[test] diff --git a/src/libcore/tests/lib.rs b/src/libcore/tests/lib.rs index e7d36d327cd89..d636542d699f9 100644 --- a/src/libcore/tests/lib.rs +++ b/src/libcore/tests/lib.rs @@ -22,6 +22,7 @@ #![feature(slice_partition_at_index)] #![feature(specialization)] #![feature(step_trait)] +#![feature(step_trait_ext)] #![feature(str_internals)] #![feature(test)] #![feature(trusted_len)] diff --git a/src/librustc_driver/lib.rs b/src/librustc_driver/lib.rs index 913ccf8e68089..6847b175e60eb 100644 --- a/src/librustc_driver/lib.rs +++ b/src/librustc_driver/lib.rs @@ -1138,6 +1138,16 @@ pub fn catch_fatal_errors R, R>(f: F) -> Result }) } +/// Variant of `catch_fatal_errors` for the `interface::Result` return type +/// that also computes the exit code. +pub fn catch_with_exit_code(f: impl FnOnce() -> interface::Result<()>) -> i32 { + let result = catch_fatal_errors(f).and_then(|result| result); + match result { + Ok(()) => EXIT_SUCCESS, + Err(_) => EXIT_FAILURE, + } +} + lazy_static! { static ref DEFAULT_HOOK: Box) + Sync + Send + 'static> = { let hook = panic::take_hook(); @@ -1228,12 +1238,12 @@ pub fn init_rustc_env_logger() { env_logger::init_from_env("RUSTC_LOG"); } -pub fn main() { +pub fn main() -> ! { let start = Instant::now(); init_rustc_env_logger(); let mut callbacks = TimePassesCallbacks::default(); install_ice_hook(); - let result = catch_fatal_errors(|| { + let exit_code = catch_with_exit_code(|| { let args = env::args_os() .enumerate() .map(|(i, arg)| { @@ -1246,13 +1256,8 @@ pub fn main() { }) .collect::>(); run_compiler(&args, &mut callbacks, None, None) - }) - .and_then(|result| result); - let exit_code = match result { - Ok(_) => EXIT_SUCCESS, - Err(_) => EXIT_FAILURE, - }; + }); // The extra `\t` is necessary to align this label with the others. print_time_passes_entry(callbacks.time_passes, "\ttotal", start.elapsed()); - process::exit(exit_code); + process::exit(exit_code) } diff --git a/src/librustc_index/vec.rs b/src/librustc_index/vec.rs index a84f89c7cd950..67dcea58cf82b 100644 --- a/src/librustc_index/vec.rs +++ b/src/librustc_index/vec.rs @@ -65,7 +65,7 @@ impl Idx for u32 { /// `u32::MAX`. You can also customize things like the `Debug` impl, /// what traits are derived, and so forth via the macro. #[macro_export] -#[allow_internal_unstable(step_trait, rustc_attrs)] +#[allow_internal_unstable(step_trait, step_trait_ext, rustc_attrs)] macro_rules! newtype_index { // ---- public rules ---- @@ -181,7 +181,7 @@ macro_rules! newtype_index { } } - impl ::std::iter::Step for $type { + unsafe impl ::std::iter::Step for $type { #[inline] fn steps_between(start: &Self, end: &Self) -> Option { ::steps_between( @@ -191,33 +191,13 @@ macro_rules! newtype_index { } #[inline] - fn replace_one(&mut self) -> Self { - ::std::mem::replace(self, Self::from_u32(1)) + fn forward_checked(start: Self, u: usize) -> Option { + Self::index(start).checked_add(u).map(Self::from_usize) } #[inline] - fn replace_zero(&mut self) -> Self { - ::std::mem::replace(self, Self::from_u32(0)) - } - - #[inline] - fn add_one(&self) -> Self { - Self::from_usize(Self::index(*self) + 1) - } - - #[inline] - fn sub_one(&self) -> Self { - Self::from_usize(Self::index(*self) - 1) - } - - #[inline] - fn add_usize(&self, u: usize) -> Option { - Self::index(*self).checked_add(u).map(Self::from_usize) - } - - #[inline] - fn sub_usize(&self, u: usize) -> Option { - Self::index(*self).checked_sub(u).map(Self::from_usize) + fn backward_checked(start: Self, u: usize) -> Option { + Self::index(start).checked_sub(u).map(Self::from_usize) } } diff --git a/src/librustc_mir/const_eval/eval_queries.rs b/src/librustc_mir/const_eval/eval_queries.rs index b6c635fb22ab5..0637ebf959e5a 100644 --- a/src/librustc_mir/const_eval/eval_queries.rs +++ b/src/librustc_mir/const_eval/eval_queries.rs @@ -66,7 +66,7 @@ fn eval_body_using_ecx<'mir, 'tcx>( intern_kind, ret, body.ignore_interior_mut_in_const_validation, - )?; + ); debug!("eval_body_using_ecx done: {:?}", *ret); Ok(ret) diff --git a/src/librustc_mir/const_eval/mod.rs b/src/librustc_mir/const_eval/mod.rs index e1146ef30d131..7f557e340bbc8 100644 --- a/src/librustc_mir/const_eval/mod.rs +++ b/src/librustc_mir/const_eval/mod.rs @@ -53,7 +53,7 @@ pub(crate) fn const_caller_location( let mut ecx = mk_eval_cx(tcx, DUMMY_SP, ty::ParamEnv::reveal_all(), false); let loc_place = ecx.alloc_caller_location(file, line, col); - intern_const_alloc_recursive(&mut ecx, InternKind::Constant, loc_place, false).unwrap(); + intern_const_alloc_recursive(&mut ecx, InternKind::Constant, loc_place, false); ConstValue::Scalar(loc_place.ptr) } diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index 9bb7c879505f6..eba4dd336ade2 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -871,6 +871,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Our result will later be validated anyway, and there seems no good reason // to have to fail early here. This is also more consistent with // `Memory::get_static_alloc` which has to use `const_eval_raw` to avoid cycles. + // FIXME: We can hit delay_span_bug if this is an invalid const, interning finds + // that problem, but we never run validation to show an error. Can we ensure + // this does not happen? let val = self.tcx.const_eval_raw(param_env.and(gid))?; self.raw_const_to_mplace(val) } diff --git a/src/librustc_mir/interpret/intern.rs b/src/librustc_mir/interpret/intern.rs index 1c44101595d4f..02a7f24a1e351 100644 --- a/src/librustc_mir/interpret/intern.rs +++ b/src/librustc_mir/interpret/intern.rs @@ -5,10 +5,9 @@ use super::validity::RefTracking; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; -use rustc_errors::ErrorReported; use rustc_hir as hir; -use rustc_middle::mir::interpret::{ErrorHandled, InterpResult}; -use rustc_middle::ty::{self, Ty}; +use rustc_middle::mir::interpret::InterpResult; +use rustc_middle::ty::{self, query::TyCtxtAt, Ty}; use rustc_ast::ast::Mutability; @@ -29,43 +28,45 @@ struct InternVisitor<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx>> { /// The ectx from which we intern. ecx: &'rt mut InterpCx<'mir, 'tcx, M>, /// Previously encountered safe references. - ref_tracking: &'rt mut RefTracking<(MPlaceTy<'tcx>, Mutability, InternMode)>, + ref_tracking: &'rt mut RefTracking<(MPlaceTy<'tcx>, InternMode)>, /// A list of all encountered allocations. After type-based interning, we traverse this list to /// also intern allocations that are only referenced by a raw pointer or inside a union. leftover_allocations: &'rt mut FxHashSet, - /// The root node of the value that we're looking at. This field is never mutated and only used + /// The root kind of the value that we're looking at. This field is never mutated and only used /// for sanity assertions that will ICE when `const_qualif` screws up. mode: InternMode, - /// This field stores the mutability of the value *currently* being checked. - /// When encountering a mutable reference, we determine the pointee mutability - /// taking into account the mutability of the context: `& &mut i32` is entirely immutable, - /// despite the nested mutable reference! - /// The field gets updated when an `UnsafeCell` is encountered. - mutability: Mutability, + /// This field stores whether we are *currently* inside an `UnsafeCell`. This can affect + /// the intern mode of references we encounter. + inside_unsafe_cell: bool, /// This flag is to avoid triggering UnsafeCells are not allowed behind references in constants /// for promoteds. /// It's a copy of `mir::Body`'s ignore_interior_mut_in_const_validation field - ignore_interior_mut_in_const_validation: bool, + ignore_interior_mut_in_const: bool, } #[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)] enum InternMode { - /// Mutable references must in fact be immutable due to their surrounding immutability in a - /// `static`. In a `static mut` we start out as mutable and thus can also contain further `&mut` - /// that will actually be treated as mutable. - Static, - /// UnsafeCell is OK in the value of a constant: `const FOO = Cell::new(0)` creates - /// a new cell every time it is used. + /// A static and its current mutability. Below shared references inside a `static mut`, + /// this is *immutable*, and below mutable references inside an `UnsafeCell`, this + /// is *mutable*. + Static(hir::Mutability), + /// The "base value" of a const, which can have `UnsafeCell` (as in `const FOO: Cell`), + /// but that interior mutability is simply ignored. ConstBase, - /// `UnsafeCell` ICEs. - Const, + /// The "inner values" of a const with references, where `UnsafeCell` is an error. + ConstInner, } /// Signalling data structure to ensure we don't recurse /// into the memory of other constants or statics struct IsStaticOrFn; +fn mutable_memory_in_const(tcx: TyCtxtAt<'_>, kind: &str) { + // FIXME: show this in validation instead so we can point at where in the value the error is? + tcx.sess.span_err(tcx.span, &format!("mutable memory ({}) is not allowed in constant", kind)); +} + /// Intern an allocation without looking at its children. /// `mode` is the mode of the environment where we found this pointer. /// `mutablity` is the mutability of the place to be interned; even if that says @@ -75,12 +76,11 @@ struct IsStaticOrFn; fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx>>( ecx: &'rt mut InterpCx<'mir, 'tcx, M>, leftover_allocations: &'rt mut FxHashSet, - mode: InternMode, alloc_id: AllocId, - mutability: Mutability, + mode: InternMode, ty: Option>, -) -> InterpResult<'tcx, Option> { - trace!("InternVisitor::intern {:?} with {:?}", alloc_id, mutability,); +) -> Option { + trace!("intern_shallow {:?} with {:?}", alloc_id, mode); // remove allocation let tcx = ecx.tcx; let (kind, mut alloc) = match ecx.memory.alloc_map.remove(&alloc_id) { @@ -89,14 +89,15 @@ fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx>>( // Pointer not found in local memory map. It is either a pointer to the global // map, or dangling. // If the pointer is dangling (neither in local nor global memory), we leave it - // to validation to error. The `delay_span_bug` ensures that we don't forget such - // a check in validation. + // to validation to error -- it has the much better error messages, pointing out where + // in the value the dangling reference lies. + // The `delay_span_bug` ensures that we don't forget such a check in validation. if tcx.get_global_alloc(alloc_id).is_none() { tcx.sess.delay_span_bug(ecx.tcx.span, "tried to intern dangling pointer"); } // treat dangling pointers like other statics // just to stop trying to recurse into them - return Ok(Some(IsStaticOrFn)); + return Some(IsStaticOrFn); } }; // This match is just a canary for future changes to `MemoryKind`, which most likely need @@ -107,45 +108,45 @@ fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx>>( // Set allocation mutability as appropriate. This is used by LLVM to put things into // read-only memory, and also by Miri when evaluating other globals that // access this one. - if mode == InternMode::Static { - // When `ty` is `None`, we assume no interior mutability. + if let InternMode::Static(mutability) = mode { + // For this, we need to take into account `UnsafeCell`. When `ty` is `None`, we assume + // no interior mutability. let frozen = ty.map_or(true, |ty| ty.is_freeze(ecx.tcx.tcx, ecx.param_env, ecx.tcx.span)); // For statics, allocation mutability is the combination of the place mutability and // the type mutability. // The entire allocation needs to be mutable if it contains an `UnsafeCell` anywhere. - if mutability == Mutability::Not && frozen { + let immutable = mutability == Mutability::Not && frozen; + if immutable { alloc.mutability = Mutability::Not; } else { // Just making sure we are not "upgrading" an immutable allocation to mutable. assert_eq!(alloc.mutability, Mutability::Mut); } } else { - // We *could* be non-frozen at `ConstBase`, for constants like `Cell::new(0)`. - // But we still intern that as immutable as the memory cannot be changed once the - // initial value was computed. - // Constants are never mutable. - assert_eq!( - mutability, - Mutability::Not, - "Something went very wrong: mutability requested for a constant" - ); + // No matter what, *constants are never mutable*. Mutating them is UB. + // See const_eval::machine::MemoryExtra::can_access_statics for why + // immutability is so important. + + // There are no sensible checks we can do here; grep for `mutable_memory_in_const` to + // find the checks we are doing elsewhere to avoid even getting here for memory + // that "wants" to be mutable. alloc.mutability = Mutability::Not; }; // link the alloc id to the actual allocation let alloc = tcx.intern_const_alloc(alloc); leftover_allocations.extend(alloc.relocations().iter().map(|&(_, ((), reloc))| reloc)); tcx.set_alloc_id_memory(alloc_id, alloc); - Ok(None) + None } impl<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx>> InternVisitor<'rt, 'mir, 'tcx, M> { fn intern_shallow( &mut self, alloc_id: AllocId, - mutability: Mutability, + mode: InternMode, ty: Option>, - ) -> InterpResult<'tcx, Option> { - intern_shallow(self.ecx, self.leftover_allocations, self.mode, alloc_id, mutability, ty) + ) -> Option { + intern_shallow(self.ecx, self.leftover_allocations, alloc_id, mode, ty) } } @@ -166,22 +167,22 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx>> ValueVisitor<'mir ) -> InterpResult<'tcx> { if let Some(def) = mplace.layout.ty.ty_adt_def() { if Some(def.did) == self.ecx.tcx.lang_items().unsafe_cell_type() { + if self.mode == InternMode::ConstInner && !self.ignore_interior_mut_in_const { + // We do not actually make this memory mutable. But in case the user + // *expected* it to be mutable, make sure we error. This is just a + // sanity check to prevent users from accidentally exploiting the UB + // they caused. It also helps us to find cases where const-checking + // failed to prevent an `UnsafeCell` (but as `ignore_interior_mut_in_const` + // shows that part is not airtight). + mutable_memory_in_const(self.ecx.tcx, "`UnsafeCell`"); + } // We are crossing over an `UnsafeCell`, we can mutate again. This means that // References we encounter inside here are interned as pointing to mutable // allocations. - let old = std::mem::replace(&mut self.mutability, Mutability::Mut); - if !self.ignore_interior_mut_in_const_validation { - assert_ne!( - self.mode, - InternMode::Const, - "UnsafeCells are not allowed behind references in constants. This should \ - have been prevented statically by const qualification. If this were \ - allowed one would be able to change a constant at one use site and other \ - use sites could observe that mutation.", - ); - } + // Remember the `old` value to handle nested `UnsafeCell`. + let old = std::mem::replace(&mut self.inside_unsafe_cell, true); let walked = self.walk_aggregate(mplace, fields); - self.mutability = old; + self.inside_unsafe_cell = old; return walked; } } @@ -191,78 +192,92 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx>> ValueVisitor<'mir fn visit_value(&mut self, mplace: MPlaceTy<'tcx>) -> InterpResult<'tcx> { // Handle Reference types, as these are the only relocations supported by const eval. // Raw pointers (and boxes) are handled by the `leftover_relocations` logic. + let tcx = self.ecx.tcx; let ty = mplace.layout.ty; - if let ty::Ref(_, referenced_ty, mutability) = ty.kind { + if let ty::Ref(_, referenced_ty, ref_mutability) = ty.kind { let value = self.ecx.read_immediate(mplace.into())?; let mplace = self.ecx.ref_to_mplace(value)?; + assert_eq!(mplace.layout.ty, referenced_ty); // Handle trait object vtables. if let ty::Dynamic(..) = - self.ecx.tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind + tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind { - // Validation has already errored on an invalid vtable pointer so we can safely not - // do anything if this is not a real pointer. + // Validation will error (with a better message) on an invalid vtable pointer + // so we can safely not do anything if this is not a real pointer. if let Scalar::Ptr(vtable) = mplace.meta.unwrap_meta() { - // Explicitly choose `Immutable` here, since vtables are immutable, even + // Explicitly choose const mode here, since vtables are immutable, even // if the reference of the fat pointer is mutable. - self.intern_shallow(vtable.alloc_id, Mutability::Not, None)?; + self.intern_shallow(vtable.alloc_id, InternMode::ConstInner, None); } else { - self.ecx().tcx.sess.delay_span_bug( - rustc_span::DUMMY_SP, - "vtables pointers cannot be integer pointers", - ); + // Let validation show the error message, but make sure it *does* error. + tcx.sess + .delay_span_bug(tcx.span, "vtables pointers cannot be integer pointers"); } } // Check if we have encountered this pointer+layout combination before. // Only recurse for allocation-backed pointers. if let Scalar::Ptr(ptr) = mplace.ptr { - // We do not have any `frozen` logic here, because it's essentially equivalent to - // the mutability except for the outermost item. Only `UnsafeCell` can "unfreeze", - // and we check that in `visit_aggregate`. - // This is not an inherent limitation, but one that we know to be true, because - // const qualification enforces it. We can lift it in the future. - match (self.mode, mutability) { - // immutable references are fine everywhere - (_, hir::Mutability::Not) => {} - // all is "good and well" in the unsoundness of `static mut` + // Compute the mode with which we intern this. + let ref_mode = match self.mode { + InternMode::Static(mutbl) => { + // In statics, merge outer mutability with reference mutability and + // take into account whether we are in an `UnsafeCell`. - // mutable references are ok in `static`. Either they are treated as immutable - // because they are behind an immutable one, or they are behind an `UnsafeCell` - // and thus ok. - (InternMode::Static, hir::Mutability::Mut) => {} - // we statically prevent `&mut T` via `const_qualif` and double check this here - (InternMode::ConstBase | InternMode::Const, hir::Mutability::Mut) => { - match referenced_ty.kind { - ty::Array(_, n) - if n.eval_usize(self.ecx.tcx.tcx, self.ecx.param_env) == 0 => {} - ty::Slice(_) - if mplace.meta.unwrap_meta().to_machine_usize(self.ecx)? == 0 => {} - _ => bug!("const qualif failed to prevent mutable references"), + // The only way a mutable reference actually works as a mutable reference is + // by being in a `static mut` directly or behind another mutable reference. + // If there's an immutable reference or we are inside a `static`, then our + // mutable reference is equivalent to an immutable one. As an example: + // `&&mut Foo` is semantically equivalent to `&&Foo` + match ref_mutability { + _ if self.inside_unsafe_cell => { + // Inside an `UnsafeCell` is like inside a `static mut`, the "outer" + // mutability does not matter. + InternMode::Static(ref_mutability) + } + Mutability::Not => { + // A shared reference, things become immutable. + // We do *not* consier `freeze` here -- that is done more precisely + // when traversing the referenced data (by tracking `UnsafeCell`). + InternMode::Static(Mutability::Not) + } + Mutability::Mut => { + // Mutable reference. + InternMode::Static(mutbl) + } } } - } - // Compute the mutability with which we'll start visiting the allocation. This is - // what gets changed when we encounter an `UnsafeCell`. - // - // The only way a mutable reference actually works as a mutable reference is - // by being in a `static mut` directly or behind another mutable reference. - // If there's an immutable reference or we are inside a static, then our - // mutable reference is equivalent to an immutable one. As an example: - // `&&mut Foo` is semantically equivalent to `&&Foo` - let mutability = self.mutability.and(mutability); - // Recursing behind references changes the intern mode for constants in order to - // cause assertions to trigger if we encounter any `UnsafeCell`s. - let mode = match self.mode { - InternMode::ConstBase => InternMode::Const, - other => other, + InternMode::ConstBase | InternMode::ConstInner => { + // Ignore `UnsafeCell`, everything is immutable. Do some sanity checking + // for mutable references that we encounter -- they must all be ZST. + // This helps to prevent users from accidentally exploiting UB that they + // caused (by somehow getting a mutable reference in a `const`). + if ref_mutability == Mutability::Mut { + match referenced_ty.kind { + ty::Array(_, n) + if n.eval_usize(tcx.tcx, self.ecx.param_env) == 0 => {} + ty::Slice(_) + if mplace.meta.unwrap_meta().to_machine_usize(self.ecx)? + == 0 => {} + _ => mutable_memory_in_const(tcx, "`&mut`"), + } + } else { + // A shared reference. We cannot check `freeze` here due to references + // like `&dyn Trait` that are actually immutable. We do check for + // concrete `UnsafeCell` when traversing the pointee though (if it is + // a new allocation, not yet interned). + } + // Go on with the "inner" rules. + InternMode::ConstInner + } }; - match self.intern_shallow(ptr.alloc_id, mutability, Some(mplace.layout.ty))? { + match self.intern_shallow(ptr.alloc_id, ref_mode, Some(referenced_ty)) { // No need to recurse, these are interned already and statics may have // cycles, so we don't want to recurse there Some(IsStaticOrFn) => {} // intern everything referenced by this value. The mutability is taken from the // reference. It is checked above that mutable references only happen in // `static mut` - None => self.ref_tracking.track((mplace, mutability, mode), || ()), + None => self.ref_tracking.track((mplace, ref_mode), || ()), } } Ok(()) @@ -273,6 +288,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx>> ValueVisitor<'mir } } +#[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)] pub enum InternKind { /// The `mutability` of the static, ignoring the type which may have interior mutability. Static(hir::Mutability), @@ -281,71 +297,78 @@ pub enum InternKind { ConstProp, } +/// Intern `ret` and everything it references. +/// +/// This *cannot raise an interpreter error*. Doing so is left to validation, which +/// tracks where in the value we are and thus can show much better error messages. +/// Any errors here would anyway be turned into `const_err` lints, whereas validation failures +/// are hard errors. pub fn intern_const_alloc_recursive>( ecx: &mut InterpCx<'mir, 'tcx, M>, intern_kind: InternKind, ret: MPlaceTy<'tcx>, - ignore_interior_mut_in_const_validation: bool, -) -> InterpResult<'tcx> -where + ignore_interior_mut_in_const: bool, +) where 'tcx: 'mir, { let tcx = ecx.tcx; - let (base_mutability, base_intern_mode) = match intern_kind { - // `static mut` doesn't care about interior mutability, it's mutable anyway - InternKind::Static(mutbl) => (mutbl, InternMode::Static), + let base_intern_mode = match intern_kind { + InternKind::Static(mutbl) => InternMode::Static(mutbl), // FIXME: what about array lengths, array initializers? - InternKind::Constant | InternKind::ConstProp => (Mutability::Not, InternMode::ConstBase), - InternKind::Promoted => (Mutability::Not, InternMode::ConstBase), + InternKind::Constant | InternKind::ConstProp | InternKind::Promoted => { + InternMode::ConstBase + } }; // Type based interning. - // `ref_tracking` tracks typed references we have seen and still need to crawl for + // `ref_tracking` tracks typed references we have already interned and still need to crawl for // more typed information inside them. // `leftover_allocations` collects *all* allocations we see, because some might not // be available in a typed way. They get interned at the end. - let mut ref_tracking = RefTracking::new((ret, base_mutability, base_intern_mode)); + let mut ref_tracking = RefTracking::empty(); let leftover_allocations = &mut FxHashSet::default(); // start with the outermost allocation intern_shallow( ecx, leftover_allocations, - base_intern_mode, // The outermost allocation must exist, because we allocated it with // `Memory::allocate`. ret.ptr.assert_ptr().alloc_id, - base_mutability, + base_intern_mode, Some(ret.layout.ty), - )?; + ); - while let Some(((mplace, mutability, mode), _)) = ref_tracking.todo.pop() { - let interned = InternVisitor { + ref_tracking.track((ret, base_intern_mode), || ()); + + while let Some(((mplace, mode), _)) = ref_tracking.todo.pop() { + let res = InternVisitor { ref_tracking: &mut ref_tracking, ecx, mode, leftover_allocations, - mutability, - ignore_interior_mut_in_const_validation, + ignore_interior_mut_in_const, + inside_unsafe_cell: false, } .visit_value(mplace); - if let Err(error) = interned { - // This can happen when e.g. the tag of an enum is not a valid discriminant. We do have - // to read enum discriminants in order to find references in enum variant fields. - if let err_ub!(ValidationFailure(_)) = error.kind { - let err = crate::const_eval::error_to_const_error(&ecx, error); - match err.struct_error( - ecx.tcx, - "it is undefined behavior to use this value", - |mut diag| { - diag.note(crate::const_eval::note_on_undefined_behavior_error()); - diag.emit(); - }, - ) { - ErrorHandled::TooGeneric - | ErrorHandled::Reported(ErrorReported) - | ErrorHandled::Linted => {} - } + // We deliberately *ignore* interpreter errors here. When there is a problem, the remaining + // references are "leftover"-interned, and later validation will show a proper error + // and point at the right part of the value causing the problem. + match res { + Ok(()) => {} + Err(error) => { + ecx.tcx.sess.delay_span_bug( + ecx.tcx.span, + "error during interning should later cause validation failure", + ); + // Some errors shouldn't come up because creating them causes + // an allocation, which we should avoid. When that happens, + // dedicated error variants should be introduced instead. + assert!( + !error.kind.allocates(), + "interning encountered allocating error: {}", + error + ); } } } @@ -366,26 +389,27 @@ where InternKind::Static(_) => {} // Raw pointers in promoteds may only point to immutable things so we mark // everything as immutable. - // It is UB to mutate through a raw pointer obtained via an immutable reference. + // It is UB to mutate through a raw pointer obtained via an immutable reference: // Since all references and pointers inside a promoted must by their very definition // be created from an immutable reference (and promotion also excludes interior // mutability), mutating through them would be UB. // There's no way we can check whether the user is using raw pointers correctly, // so all we can do is mark this as immutable here. InternKind::Promoted => { + // See const_eval::machine::MemoryExtra::can_access_statics for why + // immutability is so important. alloc.mutability = Mutability::Not; } InternKind::Constant | InternKind::ConstProp => { - // If it's a constant, it *must* be immutable. - // We cannot have mutable memory inside a constant. - // We use `delay_span_bug` here, because this can be reached in the presence - // of fancy transmutes. - if alloc.mutability == Mutability::Mut { - // For better errors later, mark the allocation as immutable - // (on top of the delayed ICE). - alloc.mutability = Mutability::Not; - ecx.tcx.sess.delay_span_bug(ecx.tcx.span, "mutable allocation in constant"); - } + // If it's a constant, we should not have any "leftovers" as everything + // is tracked by const-checking. + // FIXME: downgrade this to a warning? It rejects some legitimate consts, + // such as `const CONST_RAW: *const Vec = &Vec::new() as *const _;`. + ecx.tcx + .sess + .span_err(ecx.tcx.span, "untyped pointers are not allowed in constant"); + // For better errors later, mark the allocation as immutable. + alloc.mutability = Mutability::Not; } } let alloc = tcx.intern_const_alloc(alloc); @@ -396,13 +420,13 @@ where } } } else if ecx.memory.dead_alloc_map.contains_key(&alloc_id) { - // dangling pointer - throw_ub_format!("encountered dangling pointer in final constant") + // Codegen does not like dangling pointers, and generally `tcx` assumes that + // all allocations referenced anywhere actually exist. So, make sure we error here. + ecx.tcx.sess.span_err(ecx.tcx.span, "encountered dangling pointer in final constant"); } else if ecx.tcx.get_global_alloc(alloc_id).is_none() { - // We have hit an `AllocId` that is neither in local or global memory and isn't marked - // as dangling by local memory. + // We have hit an `AllocId` that is neither in local or global memory and isn't + // marked as dangling by local memory. That should be impossible. span_bug!(ecx.tcx.span, "encountered unknown alloc id {:?}", alloc_id); } } - Ok(()) } diff --git a/src/librustc_mir/transform/const_prop.rs b/src/librustc_mir/transform/const_prop.rs index e898f22ec230d..966edd923415b 100644 --- a/src/librustc_mir/transform/const_prop.rs +++ b/src/librustc_mir/transform/const_prop.rs @@ -702,8 +702,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { )) => l.is_bits() && r.is_bits(), interpret::Operand::Indirect(_) if mir_opt_level >= 2 => { let mplace = op.assert_mem_place(&self.ecx); - intern_const_alloc_recursive(&mut self.ecx, InternKind::ConstProp, mplace, false) - .expect("failed to intern alloc"); + intern_const_alloc_recursive(&mut self.ecx, InternKind::ConstProp, mplace, false); true } _ => false, diff --git a/src/test/ui/consts/dangling-alloc-id-ice.rs b/src/test/ui/consts/dangling-alloc-id-ice.rs index dbc50f1fbd4b4..3b7f1de5b9bea 100644 --- a/src/test/ui/consts/dangling-alloc-id-ice.rs +++ b/src/test/ui/consts/dangling-alloc-id-ice.rs @@ -1,11 +1,13 @@ // https://github.com/rust-lang/rust/issues/55223 +#![allow(const_err)] union Foo<'a> { y: &'a (), long_live_the_unit: &'static (), } -const FOO: &() = { //~ ERROR any use of this value will cause an error +const FOO: &() = { //~ ERROR it is undefined behavior to use this value +//~^ ERROR encountered dangling pointer in final constant let y = (); unsafe { Foo { y: &y }.long_live_the_unit } }; diff --git a/src/test/ui/consts/dangling-alloc-id-ice.stderr b/src/test/ui/consts/dangling-alloc-id-ice.stderr index 0e213555052c8..14a49810b9de5 100644 --- a/src/test/ui/consts/dangling-alloc-id-ice.stderr +++ b/src/test/ui/consts/dangling-alloc-id-ice.stderr @@ -1,13 +1,25 @@ -error: any use of this value will cause an error - --> $DIR/dangling-alloc-id-ice.rs:8:1 +error: encountered dangling pointer in final constant + --> $DIR/dangling-alloc-id-ice.rs:9:1 | LL | / const FOO: &() = { +LL | | LL | | let y = (); LL | | unsafe { Foo { y: &y }.long_live_the_unit } LL | | }; - | |__^ encountered dangling pointer in final constant + | |__^ + +error[E0080]: it is undefined behavior to use this value + --> $DIR/dangling-alloc-id-ice.rs:9:1 + | +LL | / const FOO: &() = { +LL | | +LL | | let y = (); +LL | | unsafe { Foo { y: &y }.long_live_the_unit } +LL | | }; + | |__^ type validation failed: encountered a dangling reference (use-after-free) | - = note: `#[deny(const_err)]` on by default + = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior. -error: aborting due to previous error +error: aborting due to 2 previous errors +For more information about this error, try `rustc --explain E0080`. diff --git a/src/test/ui/consts/dangling_raw_ptr.rs b/src/test/ui/consts/dangling_raw_ptr.rs index c2d8e6d421a28..ddd1fb1ba76e1 100644 --- a/src/test/ui/consts/dangling_raw_ptr.rs +++ b/src/test/ui/consts/dangling_raw_ptr.rs @@ -1,4 +1,4 @@ -const FOO: *const u32 = { //~ ERROR any use of this value will cause an error +const FOO: *const u32 = { //~ ERROR encountered dangling pointer in final constant let x = 42; &x }; diff --git a/src/test/ui/consts/dangling_raw_ptr.stderr b/src/test/ui/consts/dangling_raw_ptr.stderr index 4d4c2876c4598..a79ac62d5cdbd 100644 --- a/src/test/ui/consts/dangling_raw_ptr.stderr +++ b/src/test/ui/consts/dangling_raw_ptr.stderr @@ -1,13 +1,11 @@ -error: any use of this value will cause an error +error: encountered dangling pointer in final constant --> $DIR/dangling_raw_ptr.rs:1:1 | LL | / const FOO: *const u32 = { LL | | let x = 42; LL | | &x LL | | }; - | |__^ encountered dangling pointer in final constant - | - = note: `#[deny(const_err)]` on by default + | |__^ error: aborting due to previous error diff --git a/src/test/ui/consts/miri_unleashed/mutable_const.rs b/src/test/ui/consts/miri_unleashed/mutable_const.rs deleted file mode 100644 index f8aa652827381..0000000000000 --- a/src/test/ui/consts/miri_unleashed/mutable_const.rs +++ /dev/null @@ -1,25 +0,0 @@ -// compile-flags: -Zunleash-the-miri-inside-of-you -// normalize-stderr-test "alloc[0-9]+" -> "allocN" - -#![deny(const_err)] // The `allow` variant is tested by `mutable_const2`. -//~^ NOTE lint level -// Here we check that even though `MUTABLE_BEHIND_RAW` is created from a mutable -// allocation, we intern that allocation as *immutable* and reject writes to it. -// We avoid the `delay_span_bug` ICE by having compilation fail via the `deny` above. - -use std::cell::UnsafeCell; - -// make sure we do not just intern this as mutable -const MUTABLE_BEHIND_RAW: *mut i32 = &UnsafeCell::new(42) as *const _ as *mut _; - -const MUTATING_BEHIND_RAW: () = { //~ NOTE - // Test that `MUTABLE_BEHIND_RAW` is actually immutable, by doing this at const time. - unsafe { - *MUTABLE_BEHIND_RAW = 99 //~ ERROR any use of this value will cause an error - //~^ NOTE: which is read-only - // FIXME would be good to match more of the error message here, but looks like we - // normalize *after* checking the annoations here. - } -}; - -fn main() {} diff --git a/src/test/ui/consts/miri_unleashed/mutable_const.stderr b/src/test/ui/consts/miri_unleashed/mutable_const.stderr deleted file mode 100644 index 4772baf9d9a01..0000000000000 --- a/src/test/ui/consts/miri_unleashed/mutable_const.stderr +++ /dev/null @@ -1,39 +0,0 @@ -error: any use of this value will cause an error - --> $DIR/mutable_const.rs:18:9 - | -LL | / const MUTATING_BEHIND_RAW: () = { -LL | | // Test that `MUTABLE_BEHIND_RAW` is actually immutable, by doing this at const time. -LL | | unsafe { -LL | | *MUTABLE_BEHIND_RAW = 99 - | | ^^^^^^^^^^^^^^^^^^^^^^^^ writing to allocN which is read-only -... | -LL | | } -LL | | }; - | |__- - | -note: the lint level is defined here - --> $DIR/mutable_const.rs:4:9 - | -LL | #![deny(const_err)] // The `allow` variant is tested by `mutable_const2`. - | ^^^^^^^^^ - -warning: skipping const checks - | -help: skipping check that does not even have a feature gate - --> $DIR/mutable_const.rs:13:38 - | -LL | const MUTABLE_BEHIND_RAW: *mut i32 = &UnsafeCell::new(42) as *const _ as *mut _; - | ^^^^^^^^^^^^^^^^^^^^ -help: skipping check for `const_raw_ptr_deref` feature - --> $DIR/mutable_const.rs:18:9 - | -LL | *MUTABLE_BEHIND_RAW = 99 - | ^^^^^^^^^^^^^^^^^^^^^^^^ -help: skipping check for `const_mut_refs` feature - --> $DIR/mutable_const.rs:18:9 - | -LL | *MUTABLE_BEHIND_RAW = 99 - | ^^^^^^^^^^^^^^^^^^^^^^^^ - -error: aborting due to previous error; 1 warning emitted - diff --git a/src/test/ui/consts/miri_unleashed/mutable_const2.rs b/src/test/ui/consts/miri_unleashed/mutable_const2.rs deleted file mode 100644 index 867091af7ba76..0000000000000 --- a/src/test/ui/consts/miri_unleashed/mutable_const2.rs +++ /dev/null @@ -1,16 +0,0 @@ -// compile-flags: -Zunleash-the-miri-inside-of-you -// failure-status: 101 -// rustc-env:RUST_BACKTRACE=0 -// normalize-stderr-test "note: rustc 1.* running on .*" -> "note: rustc VERSION running on TARGET" -// normalize-stderr-test "note: compiler flags: .*" -> "note: compiler flags: FLAGS" -// normalize-stderr-test "interpret/intern.rs:[0-9]+:[0-9]+" -> "interpret/intern.rs:LL:CC" - -#![allow(const_err)] - -use std::cell::UnsafeCell; - -// make sure we do not just intern this as mutable -const MUTABLE_BEHIND_RAW: *mut i32 = &UnsafeCell::new(42) as *const _ as *mut _; -//~^ ERROR: mutable allocation in constant - -fn main() {} diff --git a/src/test/ui/consts/miri_unleashed/mutable_const2.stderr b/src/test/ui/consts/miri_unleashed/mutable_const2.stderr deleted file mode 100644 index 98a1c8bdd8967..0000000000000 --- a/src/test/ui/consts/miri_unleashed/mutable_const2.stderr +++ /dev/null @@ -1,29 +0,0 @@ -warning: skipping const checks - | -help: skipping check that does not even have a feature gate - --> $DIR/mutable_const2.rs:13:38 - | -LL | const MUTABLE_BEHIND_RAW: *mut i32 = &UnsafeCell::new(42) as *const _ as *mut _; - | ^^^^^^^^^^^^^^^^^^^^ - -warning: 1 warning emitted - -error: internal compiler error: mutable allocation in constant - --> $DIR/mutable_const2.rs:13:1 - | -LL | const MUTABLE_BEHIND_RAW: *mut i32 = &UnsafeCell::new(42) as *const _ as *mut _; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -thread 'rustc' panicked at 'no errors encountered even though `delay_span_bug` issued', src/librustc_errors/lib.rs:366:17 -note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace - -error: internal compiler error: unexpected panic - -note: the compiler unexpectedly panicked. this is a bug. - -note: we would appreciate a bug report: https://github.com/rust-lang/rust/blob/master/CONTRIBUTING.md#bug-reports - -note: rustc VERSION running on TARGET - -note: compiler flags: FLAGS - diff --git a/src/test/ui/consts/miri_unleashed/mutable_references.rs b/src/test/ui/consts/miri_unleashed/mutable_references.rs index ed2ca86ba2c6b..ca927ef4a518b 100644 --- a/src/test/ui/consts/miri_unleashed/mutable_references.rs +++ b/src/test/ui/consts/miri_unleashed/mutable_references.rs @@ -17,12 +17,11 @@ struct Foo(T); // this is fine for the same reason as `BAR`. static BOO: &mut Foo<()> = &mut Foo(()); +// interior mutability is fine struct Meh { x: &'static UnsafeCell, } - unsafe impl Sync for Meh {} - static MEH: Meh = Meh { x: &UnsafeCell::new(42), }; diff --git a/src/test/ui/consts/miri_unleashed/mutable_references.stderr b/src/test/ui/consts/miri_unleashed/mutable_references.stderr index 83c4e0ceba0de..7109ffd8b61d7 100644 --- a/src/test/ui/consts/miri_unleashed/mutable_references.stderr +++ b/src/test/ui/consts/miri_unleashed/mutable_references.stderr @@ -1,5 +1,5 @@ error[E0594]: cannot assign to `*OH_YES`, as `OH_YES` is an immutable static item - --> $DIR/mutable_references.rs:37:5 + --> $DIR/mutable_references.rs:36:5 | LL | *OH_YES = 99; | ^^^^^^^^^^^^ cannot assign @@ -22,12 +22,12 @@ help: skipping check for `const_mut_refs` feature LL | static BOO: &mut Foo<()> = &mut Foo(()); | ^^^^^^^^^^^^ help: skipping check that does not even have a feature gate - --> $DIR/mutable_references.rs:27:8 + --> $DIR/mutable_references.rs:26:8 | LL | x: &UnsafeCell::new(42), | ^^^^^^^^^^^^^^^^^^^^ help: skipping check for `const_mut_refs` feature - --> $DIR/mutable_references.rs:31:27 + --> $DIR/mutable_references.rs:30:27 | LL | static OH_YES: &mut i32 = &mut 42; | ^^^^^^^ diff --git a/src/test/ui/consts/miri_unleashed/mutable_references_err.rs b/src/test/ui/consts/miri_unleashed/mutable_references_err.rs new file mode 100644 index 0000000000000..06fb27bcff866 --- /dev/null +++ b/src/test/ui/consts/miri_unleashed/mutable_references_err.rs @@ -0,0 +1,37 @@ +// compile-flags: -Zunleash-the-miri-inside-of-you + +#![allow(const_err)] + +use std::cell::UnsafeCell; + +// this test ensures that our mutability story is sound + +struct Meh { + x: &'static UnsafeCell, +} +unsafe impl Sync for Meh {} + +// the following will never be ok! no interior mut behind consts, because +// all allocs interned here will be marked immutable. +const MUH: Meh = Meh { //~ ERROR: mutable memory (`UnsafeCell`) is not allowed in constant + x: &UnsafeCell::new(42), +}; + +struct Synced { + x: UnsafeCell, +} +unsafe impl Sync for Synced {} + +// Make sure we also catch this behind a type-erased `dyn Trait` reference. +const SNEAKY: &dyn Sync = &Synced { x: UnsafeCell::new(42) }; +//~^ ERROR: mutable memory (`UnsafeCell`) is not allowed in constant + +// Make sure we also catch mutable references. +const BLUNT: &mut i32 = &mut 42; +//~^ ERROR: mutable memory (`&mut`) is not allowed in constant + +fn main() { + unsafe { + *MUH.x.get() = 99; + } +} diff --git a/src/test/ui/consts/miri_unleashed/mutable_references_err.stderr b/src/test/ui/consts/miri_unleashed/mutable_references_err.stderr new file mode 100644 index 0000000000000..45e7d5a2cc3b3 --- /dev/null +++ b/src/test/ui/consts/miri_unleashed/mutable_references_err.stderr @@ -0,0 +1,40 @@ +error: mutable memory (`UnsafeCell`) is not allowed in constant + --> $DIR/mutable_references_err.rs:16:1 + | +LL | / const MUH: Meh = Meh { +LL | | x: &UnsafeCell::new(42), +LL | | }; + | |__^ + +error: mutable memory (`UnsafeCell`) is not allowed in constant + --> $DIR/mutable_references_err.rs:26:1 + | +LL | const SNEAKY: &dyn Sync = &Synced { x: UnsafeCell::new(42) }; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: mutable memory (`&mut`) is not allowed in constant + --> $DIR/mutable_references_err.rs:30:1 + | +LL | const BLUNT: &mut i32 = &mut 42; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: skipping const checks + | +help: skipping check that does not even have a feature gate + --> $DIR/mutable_references_err.rs:17:8 + | +LL | x: &UnsafeCell::new(42), + | ^^^^^^^^^^^^^^^^^^^^ +help: skipping check that does not even have a feature gate + --> $DIR/mutable_references_err.rs:26:27 + | +LL | const SNEAKY: &dyn Sync = &Synced { x: UnsafeCell::new(42) }; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +help: skipping check for `const_mut_refs` feature + --> $DIR/mutable_references_err.rs:30:25 + | +LL | const BLUNT: &mut i32 = &mut 42; + | ^^^^^^^ + +error: aborting due to 3 previous errors; 1 warning emitted + diff --git a/src/test/ui/consts/miri_unleashed/mutable_references_ice.rs b/src/test/ui/consts/miri_unleashed/mutable_references_ice.rs deleted file mode 100644 index 7388aad2a9e53..0000000000000 --- a/src/test/ui/consts/miri_unleashed/mutable_references_ice.rs +++ /dev/null @@ -1,29 +0,0 @@ -// compile-flags: -Zunleash-the-miri-inside-of-you -// failure-status: 101 -// rustc-env:RUST_BACKTRACE=0 -// normalize-stderr-test "note: rustc 1.* running on .*" -> "note: rustc VERSION running on TARGET" -// normalize-stderr-test "note: compiler flags: .*" -> "note: compiler flags: FLAGS" -// normalize-stderr-test "interpret/intern.rs:[0-9]+:[0-9]+" -> "interpret/intern.rs:LL:CC" - -#![allow(const_err)] - -use std::cell::UnsafeCell; - -// this test ICEs to ensure that our mutability story is sound - -struct Meh { - x: &'static UnsafeCell, -} - -unsafe impl Sync for Meh {} - -// the following will never be ok! -const MUH: Meh = Meh { - x: &UnsafeCell::new(42), -}; - -fn main() { - unsafe { - *MUH.x.get() = 99; - } -} diff --git a/src/test/ui/consts/miri_unleashed/mutable_references_ice.stderr b/src/test/ui/consts/miri_unleashed/mutable_references_ice.stderr deleted file mode 100644 index 7ddf77af4d3af..0000000000000 --- a/src/test/ui/consts/miri_unleashed/mutable_references_ice.stderr +++ /dev/null @@ -1,25 +0,0 @@ -thread 'rustc' panicked at 'assertion failed: `(left != right)` - left: `Const`, - right: `Const`: UnsafeCells are not allowed behind references in constants. This should have been prevented statically by const qualification. If this were allowed one would be able to change a constant at one use site and other use sites could observe that mutation.', src/librustc_mir/interpret/intern.rs:LL:CC -note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace - -error: internal compiler error: unexpected panic - -note: the compiler unexpectedly panicked. this is a bug. - -note: we would appreciate a bug report: https://github.com/rust-lang/rust/blob/master/CONTRIBUTING.md#bug-reports - -note: rustc VERSION running on TARGET - -note: compiler flags: FLAGS - -warning: skipping const checks - | -help: skipping check that does not even have a feature gate - --> $DIR/mutable_references_ice.rs:22:8 - | -LL | x: &UnsafeCell::new(42), - | ^^^^^^^^^^^^^^^^^^^^ - -warning: 1 warning emitted - diff --git a/src/test/ui/consts/miri_unleashed/raw_mutable_const.rs b/src/test/ui/consts/miri_unleashed/raw_mutable_const.rs new file mode 100644 index 0000000000000..cabd754e01ac3 --- /dev/null +++ b/src/test/ui/consts/miri_unleashed/raw_mutable_const.rs @@ -0,0 +1,10 @@ +// compile-flags: -Zunleash-the-miri-inside-of-you + +#![allow(const_err)] + +use std::cell::UnsafeCell; + +const MUTABLE_BEHIND_RAW: *mut i32 = &UnsafeCell::new(42) as *const _ as *mut _; +//~^ ERROR: untyped pointers are not allowed in constant + +fn main() {} diff --git a/src/test/ui/consts/miri_unleashed/raw_mutable_const.stderr b/src/test/ui/consts/miri_unleashed/raw_mutable_const.stderr new file mode 100644 index 0000000000000..b5b5a965295a7 --- /dev/null +++ b/src/test/ui/consts/miri_unleashed/raw_mutable_const.stderr @@ -0,0 +1,16 @@ +error: untyped pointers are not allowed in constant + --> $DIR/raw_mutable_const.rs:7:1 + | +LL | const MUTABLE_BEHIND_RAW: *mut i32 = &UnsafeCell::new(42) as *const _ as *mut _; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: skipping const checks + | +help: skipping check that does not even have a feature gate + --> $DIR/raw_mutable_const.rs:7:38 + | +LL | const MUTABLE_BEHIND_RAW: *mut i32 = &UnsafeCell::new(42) as *const _ as *mut _; + | ^^^^^^^^^^^^^^^^^^^^ + +error: aborting due to previous error; 1 warning emitted + diff --git a/src/test/ui/consts/raw-ptr-const.rs b/src/test/ui/consts/raw-ptr-const.rs new file mode 100644 index 0000000000000..00fad046b557d --- /dev/null +++ b/src/test/ui/consts/raw-ptr-const.rs @@ -0,0 +1,10 @@ +#![allow(const_err)] // make sure we hit the `delay_span_bug` + +// This is a regression test for a `delay_span_bug` during interning when a constant +// evaluates to a (non-dangling) raw pointer. For now this errors; potentially it +// could also be allowed. + +const CONST_RAW: *const Vec = &Vec::new() as *const _; +//~^ ERROR untyped pointers are not allowed in constant + +fn main() {} diff --git a/src/test/ui/consts/raw-ptr-const.stderr b/src/test/ui/consts/raw-ptr-const.stderr new file mode 100644 index 0000000000000..974b1c3ff45b5 --- /dev/null +++ b/src/test/ui/consts/raw-ptr-const.stderr @@ -0,0 +1,8 @@ +error: untyped pointers are not allowed in constant + --> $DIR/raw-ptr-const.rs:7:1 + | +LL | const CONST_RAW: *const Vec = &Vec::new() as *const _; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: aborting due to previous error + diff --git a/src/test/ui/impl-trait/example-calendar.rs b/src/test/ui/impl-trait/example-calendar.rs index f1b1656745e7c..fafab8a102a90 100644 --- a/src/test/ui/impl-trait/example-calendar.rs +++ b/src/test/ui/impl-trait/example-calendar.rs @@ -2,6 +2,7 @@ #![feature(fn_traits, step_trait, + step_trait_ext, unboxed_closures, )] @@ -10,7 +11,6 @@ //! Originally converted to Rust by [Daniel Keep](https://github.com/DanielKeep). use std::fmt::Write; -use std::mem; /// Date representation. #[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] @@ -156,32 +156,16 @@ impl<'a, 'b> std::ops::Add<&'b NaiveDate> for &'a NaiveDate { } } -impl std::iter::Step for NaiveDate { +unsafe impl std::iter::Step for NaiveDate { fn steps_between(_: &Self, _: &Self) -> Option { unimplemented!() } - fn replace_one(&mut self) -> Self { - mem::replace(self, NaiveDate(0, 0, 1)) + fn forward_checked(start: Self, n: usize) -> Option { + Some((0..n).fold(start, |x, _| x.succ())) } - fn replace_zero(&mut self) -> Self { - mem::replace(self, NaiveDate(0, 0, 0)) - } - - fn add_one(&self) -> Self { - self.succ() - } - - fn sub_one(&self) -> Self { - unimplemented!() - } - - fn add_usize(&self, _: usize) -> Option { - unimplemented!() - } - - fn sub_usize(&self, _: usize) -> Option { + fn backward_checked(_: Self, _: usize) -> Option { unimplemented!() } } diff --git a/src/tools/clippy/src/driver.rs b/src/tools/clippy/src/driver.rs index 2c699998ea90e..1ce0300f23904 100644 --- a/src/tools/clippy/src/driver.rs +++ b/src/tools/clippy/src/driver.rs @@ -296,7 +296,7 @@ pub fn main() { rustc_driver::init_rustc_env_logger(); lazy_static::initialize(&ICE_HOOK); exit( - rustc_driver::catch_fatal_errors(move || { + rustc_driver::catch_with_exit_code(move || { let mut orig_args: Vec = env::args().collect(); if orig_args.iter().any(|a| a == "--version" || a == "-V") { @@ -411,7 +411,5 @@ pub fn main() { if clippy_enabled { &mut clippy } else { &mut default }; rustc_driver::run_compiler(&args, callbacks, None, None) }) - .and_then(|result| result) - .is_err() as i32, ) } diff --git a/src/tools/rustbook/Cargo.toml b/src/tools/rustbook/Cargo.toml index e6e758dccdf0a..ff41197faa1a6 100644 --- a/src/tools/rustbook/Cargo.toml +++ b/src/tools/rustbook/Cargo.toml @@ -23,6 +23,6 @@ codespan-reporting = { version = "0.5", optional = true } rustc-workspace-hack = "1.0.0" [dependencies.mdbook] -version = "0.3.0" +version = "0.3.7" default-features = false features = ["search"]