From 513ea6405b121e589dafad738b600cab5baa2ab6 Mon Sep 17 00:00:00 2001 From: Bastian Kauschke Date: Mon, 23 Mar 2020 18:39:25 +0100 Subject: [PATCH 01/26] add missing visit_consts --- src/librustc/traits/structural_impls.rs | 14 ++++++++++++++ src/librustc/ty/fold.rs | 20 +++++++++++++++----- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/src/librustc/traits/structural_impls.rs b/src/librustc/traits/structural_impls.rs index a5efea9e5fa4d..b1fb02a67b3ff 100644 --- a/src/librustc/traits/structural_impls.rs +++ b/src/librustc/traits/structural_impls.rs @@ -273,6 +273,20 @@ impl<'tcx> TypeVisitor<'tcx> for BoundNamesCollector { t.super_visit_with(self) } + fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool { + match c.val { + ty::ConstKind::Bound(debruijn, bound_var) if debruijn == self.binder_index => { + self.types.insert( + bound_var.as_u32(), + Symbol::intern(&format!("^{}", bound_var.as_u32())), + ); + } + _ => (), + } + + c.super_visit_with(self) + } + fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool { match r { ty::ReLateBound(index, br) if *index == self.binder_index => match br { diff --git a/src/librustc/ty/fold.rs b/src/librustc/ty/fold.rs index 4adca6c7d9772..3f4f2407f1e6e 100644 --- a/src/librustc/ty/fold.rs +++ b/src/librustc/ty/fold.rs @@ -978,17 +978,27 @@ impl<'tcx> TypeVisitor<'tcx> for LateBoundRegionsCollector { // ignore the inputs to a projection, as they may not appear // in the normalized form if self.just_constrained { - match t.kind { - ty::Projection(..) | ty::Opaque(..) => { - return false; - } - _ => {} + if let ty::Projection(..) | ty::Opaque(..) = t.kind { + return false; } } t.super_visit_with(self) } + fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool { + // if we are only looking for "constrained" region, we have to + // ignore the inputs of an unevaluated const, as they may not appear + // in the normalized form + if self.just_constrained { + if let ty::ConstKind::Unevaluated(..) = c.val { + return false; + } + } + + c.super_visit_with(self) + } + fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool { if let ty::ReLateBound(debruijn, br) = *r { if debruijn == self.current_index { From bda976d42db4abc496bb8673246b34e667b48e6f Mon Sep 17 00:00:00 2001 From: Bastian Kauschke Date: Mon, 23 Mar 2020 19:20:28 +0100 Subject: [PATCH 02/26] add missing const super folds --- src/librustc_trait_selection/traits/project.rs | 1 + src/librustc_trait_selection/traits/query/normalize.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/src/librustc_trait_selection/traits/project.rs b/src/librustc_trait_selection/traits/project.rs index 6b14f6959bfb9..0968f470d13c0 100644 --- a/src/librustc_trait_selection/traits/project.rs +++ b/src/librustc_trait_selection/traits/project.rs @@ -387,6 +387,7 @@ impl<'a, 'b, 'tcx> TypeFolder<'tcx> for AssocTypeNormalizer<'a, 'b, 'tcx> { } fn fold_const(&mut self, constant: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> { + let constant = constant.super_fold_with(self); constant.eval(self.selcx.tcx(), self.param_env) } } diff --git a/src/librustc_trait_selection/traits/query/normalize.rs b/src/librustc_trait_selection/traits/query/normalize.rs index 99412fafcfa8d..77128bc8c8ab4 100644 --- a/src/librustc_trait_selection/traits/query/normalize.rs +++ b/src/librustc_trait_selection/traits/query/normalize.rs @@ -191,6 +191,7 @@ impl<'cx, 'tcx> TypeFolder<'tcx> for QueryNormalizer<'cx, 'tcx> { } fn fold_const(&mut self, constant: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> { + let constant = constant.super_fold_with(self); constant.eval(self.infcx.tcx, self.param_env) } } From d7ecc8c9bc2727579b22f155f1b7da42b6eee8e3 Mon Sep 17 00:00:00 2001 From: Bastian Kauschke Date: Mon, 23 Mar 2020 19:22:19 +0100 Subject: [PATCH 03/26] query normalize_generic_arg_after_erasing_regions --- src/librustc/dep_graph/dep_node.rs | 2 +- src/librustc/query/mod.rs | 8 ++++---- src/librustc/ty/normalize_erasing_regions.rs | 14 +++++++++++--- src/librustc/ty/query/keys.rs | 13 ++++++++++++- src/librustc/ty/query/mod.rs | 2 +- src/librustc/ty/subst.rs | 8 ++++++++ src/librustc_session/session.rs | 8 ++++---- src/librustc_traits/normalize_erasing_regions.rs | 15 ++++++++------- 8 files changed, 49 insertions(+), 21 deletions(-) diff --git a/src/librustc/dep_graph/dep_node.rs b/src/librustc/dep_graph/dep_node.rs index b32fa2cda8012..61d63f4623249 100644 --- a/src/librustc/dep_graph/dep_node.rs +++ b/src/librustc/dep_graph/dep_node.rs @@ -59,7 +59,7 @@ use crate::traits::query::{ CanonicalTypeOpAscribeUserTypeGoal, CanonicalTypeOpEqGoal, CanonicalTypeOpNormalizeGoal, CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpSubtypeGoal, }; -use crate::ty::subst::SubstsRef; +use crate::ty::subst::{GenericArg, SubstsRef}; use crate::ty::{self, ParamEnvAnd, Ty, TyCtxt}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; diff --git a/src/librustc/query/mod.rs b/src/librustc/query/mod.rs index 54f5103f736ec..86855cb0ef0c2 100644 --- a/src/librustc/query/mod.rs +++ b/src/librustc/query/mod.rs @@ -9,7 +9,7 @@ use crate::traits::query::{ }; use crate::ty::query::queries; use crate::ty::query::QueryDescription; -use crate::ty::subst::SubstsRef; +use crate::ty::subst::{GenericArg, SubstsRef}; use crate::ty::{self, ParamEnvAnd, Ty, TyCtxt}; use rustc_hir::def_id::{CrateNum, DefId, LocalDefId}; @@ -1114,9 +1114,9 @@ rustc_queries! { } /// Do not call this query directly: invoke `normalize_erasing_regions` instead. - query normalize_ty_after_erasing_regions( - goal: ParamEnvAnd<'tcx, Ty<'tcx>> - ) -> Ty<'tcx> { + query normalize_generic_arg_after_erasing_regions( + goal: ParamEnvAnd<'tcx, GenericArg<'tcx>> + ) -> GenericArg<'tcx> { desc { "normalizing `{:?}`", goal } } diff --git a/src/librustc/ty/normalize_erasing_regions.rs b/src/librustc/ty/normalize_erasing_regions.rs index cbaabd8e1f137..e49bf6f8e67dc 100644 --- a/src/librustc/ty/normalize_erasing_regions.rs +++ b/src/librustc/ty/normalize_erasing_regions.rs @@ -4,8 +4,8 @@ //! //! The methods in this file use a `TypeFolder` to recursively process //! contents, invoking the underlying -//! `normalize_ty_after_erasing_regions` query for each type found -//! within. (This underlying query is what is cached.) +//! `normalize_generic_arg_after_erasing_regions` query for each type +//! or constant found within. (This underlying query is what is cached.) use crate::ty::fold::{TypeFoldable, TypeFolder}; use crate::ty::subst::{Subst, SubstsRef}; @@ -94,6 +94,14 @@ impl TypeFolder<'tcx> for NormalizeAfterErasingRegionsFolder<'tcx> { } fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { - self.tcx.normalize_ty_after_erasing_regions(self.param_env.and(ty)) + self.tcx + .normalize_generic_arg_after_erasing_regions(self.param_env.and(ty.into())) + .expect_ty() + } + + fn fold_const(&mut self, c: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> { + self.tcx + .normalize_generic_arg_after_erasing_regions(self.param_env.and(c.into())) + .expect_const() } } diff --git a/src/librustc/ty/query/keys.rs b/src/librustc/ty/query/keys.rs index 6073d3a545f6d..6be1f04efca2b 100644 --- a/src/librustc/ty/query/keys.rs +++ b/src/librustc/ty/query/keys.rs @@ -5,7 +5,7 @@ use crate::mir; use crate::traits; use crate::ty::fast_reject::SimplifiedType; use crate::ty::query::caches::DefaultCacheSelector; -use crate::ty::subst::SubstsRef; +use crate::ty::subst::{GenericArg, SubstsRef}; use crate::ty::{self, Ty, TyCtxt}; use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE}; use rustc_span::symbol::Symbol; @@ -194,6 +194,17 @@ impl<'tcx> Key for ty::PolyTraitRef<'tcx> { } } +impl<'tcx> Key for GenericArg<'tcx> { + type CacheSelector = DefaultCacheSelector; + + fn query_crate(&self) -> CrateNum { + LOCAL_CRATE + } + fn default_span(&self, _: TyCtxt<'_>) -> Span { + DUMMY_SP + } +} + impl<'tcx> Key for &'tcx ty::Const<'tcx> { type CacheSelector = DefaultCacheSelector; diff --git a/src/librustc/ty/query/mod.rs b/src/librustc/ty/query/mod.rs index 32ba13b1dbe9a..1094eb4940346 100644 --- a/src/librustc/ty/query/mod.rs +++ b/src/librustc/ty/query/mod.rs @@ -31,7 +31,7 @@ use crate::traits::specialization_graph; use crate::traits::Clauses; use crate::traits::{self, Vtable}; use crate::ty::steal::Steal; -use crate::ty::subst::SubstsRef; +use crate::ty::subst::{GenericArg, SubstsRef}; use crate::ty::util::AlwaysRequiresDrop; use crate::ty::{self, AdtSizedConstraint, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt}; use crate::util::common::ErrorReported; diff --git a/src/librustc/ty/subst.rs b/src/librustc/ty/subst.rs index a3acc14856e1f..0f4485a705046 100644 --- a/src/librustc/ty/subst.rs +++ b/src/librustc/ty/subst.rs @@ -128,6 +128,14 @@ impl<'tcx> GenericArg<'tcx> { _ => bug!("expected a type, but found another kind"), } } + + /// Unpack the `GenericArg` as a const when it is known certainly to be a const. + pub fn expect_const(self) -> &'tcx ty::Const<'tcx> { + match self.unpack() { + GenericArgKind::Const(c) => c, + _ => bug!("expected a const, but found another kind"), + } + } } impl<'a, 'tcx> Lift<'tcx> for GenericArg<'a> { diff --git a/src/librustc_session/session.rs b/src/librustc_session/session.rs index 80f59aff69137..b3d75143c5639 100644 --- a/src/librustc_session/session.rs +++ b/src/librustc_session/session.rs @@ -150,7 +150,7 @@ pub struct PerfStats { /// Total number of values canonicalized queries constructed. pub queries_canonicalized: AtomicUsize, /// Number of times this query is invoked. - pub normalize_ty_after_erasing_regions: AtomicUsize, + pub normalize_generic_arg_after_erasing_regions: AtomicUsize, /// Number of times this query is invoked. pub normalize_projection_ty: AtomicUsize, } @@ -707,8 +707,8 @@ impl Session { self.perf_stats.queries_canonicalized.load(Ordering::Relaxed) ); println!( - "normalize_ty_after_erasing_regions: {}", - self.perf_stats.normalize_ty_after_erasing_regions.load(Ordering::Relaxed) + "normalize_generic_arg_after_erasing_regions: {}", + self.perf_stats.normalize_generic_arg_after_erasing_regions.load(Ordering::Relaxed) ); println!( "normalize_projection_ty: {}", @@ -1080,7 +1080,7 @@ fn build_session_( symbol_hash_time: Lock::new(Duration::from_secs(0)), decode_def_path_tables_time: Lock::new(Duration::from_secs(0)), queries_canonicalized: AtomicUsize::new(0), - normalize_ty_after_erasing_regions: AtomicUsize::new(0), + normalize_generic_arg_after_erasing_regions: AtomicUsize::new(0), normalize_projection_ty: AtomicUsize::new(0), }, code_stats: Default::default(), diff --git a/src/librustc_traits/normalize_erasing_regions.rs b/src/librustc_traits/normalize_erasing_regions.rs index c2fb237a05b54..065cf38eb2490 100644 --- a/src/librustc_traits/normalize_erasing_regions.rs +++ b/src/librustc_traits/normalize_erasing_regions.rs @@ -1,23 +1,24 @@ use rustc::traits::query::NoSolution; use rustc::ty::query::Providers; -use rustc::ty::{self, ParamEnvAnd, Ty, TyCtxt}; +use rustc::ty::subst::GenericArg; +use rustc::ty::{self, ParamEnvAnd, TyCtxt}; use rustc_infer::infer::TyCtxtInferExt; use rustc_trait_selection::traits::query::normalize::AtExt; use rustc_trait_selection::traits::{Normalized, ObligationCause}; use std::sync::atomic::Ordering; crate fn provide(p: &mut Providers<'_>) { - *p = Providers { normalize_ty_after_erasing_regions, ..*p }; + *p = Providers { normalize_generic_arg_after_erasing_regions, ..*p }; } -fn normalize_ty_after_erasing_regions<'tcx>( +fn normalize_generic_arg_after_erasing_regions<'tcx>( tcx: TyCtxt<'tcx>, - goal: ParamEnvAnd<'tcx, Ty<'tcx>>, -) -> Ty<'tcx> { - debug!("normalize_ty_after_erasing_regions(goal={:#?})", goal); + goal: ParamEnvAnd<'tcx, GenericArg<'tcx>>, +) -> GenericArg<'tcx> { + debug!("normalize_generic_arg_after_erasing_regions(goal={:#?})", goal); let ParamEnvAnd { param_env, value } = goal; - tcx.sess.perf_stats.normalize_ty_after_erasing_regions.fetch_add(1, Ordering::Relaxed); + tcx.sess.perf_stats.normalize_generic_arg_after_erasing_regions.fetch_add(1, Ordering::Relaxed); tcx.infer_ctxt().enter(|infcx| { let cause = ObligationCause::dummy(); match infcx.at(&cause, param_env).normalize(&value) { From 03bb3bde90a52cfc1d64c15263013d97a3d7aad2 Mon Sep 17 00:00:00 2001 From: bishtpawan Date: Tue, 24 Mar 2020 13:03:01 +0530 Subject: [PATCH 04/26] Add long error explanation for E0710 #61137 --- src/librustc_error_codes/error_codes.rs | 2 +- src/librustc_error_codes/error_codes/E0710.md | 25 +++++++++++++++++++ src/test/ui/unknown-lint-tool-name.stderr | 3 +++ 3 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 src/librustc_error_codes/error_codes/E0710.md diff --git a/src/librustc_error_codes/error_codes.rs b/src/librustc_error_codes/error_codes.rs index 33bfaddc39c9d..86da425060efa 100644 --- a/src/librustc_error_codes/error_codes.rs +++ b/src/librustc_error_codes/error_codes.rs @@ -393,6 +393,7 @@ E0701: include_str!("./error_codes/E0701.md"), E0704: include_str!("./error_codes/E0704.md"), E0705: include_str!("./error_codes/E0705.md"), E0706: include_str!("./error_codes/E0706.md"), +E0710: include_str!("./error_codes/E0710.md"), E0712: include_str!("./error_codes/E0712.md"), E0713: include_str!("./error_codes/E0713.md"), E0714: include_str!("./error_codes/E0714.md"), @@ -604,7 +605,6 @@ E0748: include_str!("./error_codes/E0748.md"), E0708, // `async` non-`move` closures with parameters are not currently // supported // E0709, // multiple different lifetimes used in arguments of `async fn` - E0710, // an unknown tool name found in scoped lint E0711, // a feature has been declared with conflicting stability attributes E0717, // rustc_promotable without stability attribute // E0721, // `await` keyword diff --git a/src/librustc_error_codes/error_codes/E0710.md b/src/librustc_error_codes/error_codes/E0710.md new file mode 100644 index 0000000000000..c0b58eb00f315 --- /dev/null +++ b/src/librustc_error_codes/error_codes/E0710.md @@ -0,0 +1,25 @@ +An unknown tool name found in scoped lint + +Erroneous code example: + +```compile_fail,E0710 +#[allow(clipp::filter_map)] // error: an unknown tool name found in scoped lint: `clipp::filter_map` + +fn main() { + /** + *business logic + */ +} +``` + +Please verify you didn't misspell the tool's name or that you didn't +forget to import it in you project: + +``` +#[allow(clippy::filter_map)] // ok! +fn main() { + /** + *business logic + */ +} +``` \ No newline at end of file diff --git a/src/test/ui/unknown-lint-tool-name.stderr b/src/test/ui/unknown-lint-tool-name.stderr index 1940f61a47b68..f98e15f7cf2f6 100644 --- a/src/test/ui/unknown-lint-tool-name.stderr +++ b/src/test/ui/unknown-lint-tool-name.stderr @@ -36,3 +36,6 @@ LL | #[allow(foo::bar)] error: aborting due to 6 previous errors +For more information about this error, try `rustc --explain E0710`. + + From cd9921ed82759d9e49e337f67d691c2d681f4e16 Mon Sep 17 00:00:00 2001 From: bishtpawan Date: Tue, 24 Mar 2020 13:09:18 +0530 Subject: [PATCH 05/26] Refactor changes --- src/librustc_error_codes/error_codes/E0710.md | 1 - src/test/ui/unknown-lint-tool-name.stderr | 2 -- 2 files changed, 3 deletions(-) diff --git a/src/librustc_error_codes/error_codes/E0710.md b/src/librustc_error_codes/error_codes/E0710.md index c0b58eb00f315..d577cfb1e1299 100644 --- a/src/librustc_error_codes/error_codes/E0710.md +++ b/src/librustc_error_codes/error_codes/E0710.md @@ -4,7 +4,6 @@ Erroneous code example: ```compile_fail,E0710 #[allow(clipp::filter_map)] // error: an unknown tool name found in scoped lint: `clipp::filter_map` - fn main() { /** *business logic diff --git a/src/test/ui/unknown-lint-tool-name.stderr b/src/test/ui/unknown-lint-tool-name.stderr index f98e15f7cf2f6..414816d229cdb 100644 --- a/src/test/ui/unknown-lint-tool-name.stderr +++ b/src/test/ui/unknown-lint-tool-name.stderr @@ -37,5 +37,3 @@ LL | #[allow(foo::bar)] error: aborting due to 6 previous errors For more information about this error, try `rustc --explain E0710`. - - From 10226daa4ee7a2bd61a1d0dd86581990c782278f Mon Sep 17 00:00:00 2001 From: bishtpawan Date: Tue, 24 Mar 2020 15:41:31 +0530 Subject: [PATCH 06/26] Update tools_lints --- src/librustc_error_codes/error_codes/E0710.md | 4 ++-- src/test/ui/tool_lints.stderr | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/librustc_error_codes/error_codes/E0710.md b/src/librustc_error_codes/error_codes/E0710.md index d577cfb1e1299..cbb667d832d5c 100644 --- a/src/librustc_error_codes/error_codes/E0710.md +++ b/src/librustc_error_codes/error_codes/E0710.md @@ -3,7 +3,7 @@ An unknown tool name found in scoped lint Erroneous code example: ```compile_fail,E0710 -#[allow(clipp::filter_map)] // error: an unknown tool name found in scoped lint: `clipp::filter_map` +#[allow(clipp::filter_map)] // error!` fn main() { /** *business logic @@ -21,4 +21,4 @@ fn main() { *business logic */ } -``` \ No newline at end of file +``` diff --git a/src/test/ui/tool_lints.stderr b/src/test/ui/tool_lints.stderr index 86f87784eaf86..b19e137001022 100644 --- a/src/test/ui/tool_lints.stderr +++ b/src/test/ui/tool_lints.stderr @@ -18,3 +18,4 @@ LL | #[warn(foo::bar)] error: aborting due to 3 previous errors +For more information about this error, try `rustc --explain E0710`. \ No newline at end of file From 6c4d5d989684da657e51629c01ca0667a4a90937 Mon Sep 17 00:00:00 2001 From: Bastian Kauschke Date: Tue, 24 Mar 2020 11:24:24 +0100 Subject: [PATCH 07/26] improve normalize cycle error --- src/librustc/query/mod.rs | 2 +- src/librustc/ty/normalize_erasing_regions.rs | 10 +++--- .../associated-const/defaults-cyclic-fail.rs | 2 +- .../defaults-cyclic-fail.stderr | 32 +++++++++++++------ src/test/ui/consts/const-size_of-cycle.stderr | 2 +- 5 files changed, 29 insertions(+), 19 deletions(-) diff --git a/src/librustc/query/mod.rs b/src/librustc/query/mod.rs index 86855cb0ef0c2..1cc5c6e6f4ae7 100644 --- a/src/librustc/query/mod.rs +++ b/src/librustc/query/mod.rs @@ -1117,7 +1117,7 @@ rustc_queries! { query normalize_generic_arg_after_erasing_regions( goal: ParamEnvAnd<'tcx, GenericArg<'tcx>> ) -> GenericArg<'tcx> { - desc { "normalizing `{:?}`", goal } + desc { "normalizing `{}`", goal.value } } query implied_outlives_bounds( diff --git a/src/librustc/ty/normalize_erasing_regions.rs b/src/librustc/ty/normalize_erasing_regions.rs index e49bf6f8e67dc..2f0a57c59eb14 100644 --- a/src/librustc/ty/normalize_erasing_regions.rs +++ b/src/librustc/ty/normalize_erasing_regions.rs @@ -94,14 +94,12 @@ impl TypeFolder<'tcx> for NormalizeAfterErasingRegionsFolder<'tcx> { } fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { - self.tcx - .normalize_generic_arg_after_erasing_regions(self.param_env.and(ty.into())) - .expect_ty() + let arg = self.param_env.and(ty.into()); + self.tcx.normalize_generic_arg_after_erasing_regions(arg).expect_ty() } fn fold_const(&mut self, c: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> { - self.tcx - .normalize_generic_arg_after_erasing_regions(self.param_env.and(c.into())) - .expect_const() + let arg = self.param_env.and(c.into()); + self.tcx.normalize_generic_arg_after_erasing_regions(arg).expect_const() } } diff --git a/src/test/ui/associated-const/defaults-cyclic-fail.rs b/src/test/ui/associated-const/defaults-cyclic-fail.rs index 9b899ee316a0e..9fb1bbebc9610 100644 --- a/src/test/ui/associated-const/defaults-cyclic-fail.rs +++ b/src/test/ui/associated-const/defaults-cyclic-fail.rs @@ -1,9 +1,9 @@ // build-fail +//~^ ERROR cycle detected when normalizing `<() as Tr>::A` // Cyclic assoc. const defaults don't error unless *used* trait Tr { const A: u8 = Self::B; - //~^ ERROR cycle detected when const-evaluating + checking `Tr::A` const B: u8 = Self::A; } diff --git a/src/test/ui/associated-const/defaults-cyclic-fail.stderr b/src/test/ui/associated-const/defaults-cyclic-fail.stderr index 940182d4aa676..6b2fbe5be4e30 100644 --- a/src/test/ui/associated-const/defaults-cyclic-fail.stderr +++ b/src/test/ui/associated-const/defaults-cyclic-fail.stderr @@ -1,30 +1,42 @@ -error[E0391]: cycle detected when const-evaluating + checking `Tr::A` - --> $DIR/defaults-cyclic-fail.rs:5:5 +error[E0391]: cycle detected when normalizing `<() as Tr>::A` + | +note: ...which requires const-evaluating + checking `Tr::A`... + --> $DIR/defaults-cyclic-fail.rs:6:5 | LL | const A: u8 = Self::B; | ^^^^^^^^^^^^^^^^^^^^^^ +note: ...which requires const-evaluating + checking `Tr::A`... + --> $DIR/defaults-cyclic-fail.rs:6:5 | +LL | const A: u8 = Self::B; + | ^^^^^^^^^^^^^^^^^^^^^^ note: ...which requires const-evaluating `Tr::A`... - --> $DIR/defaults-cyclic-fail.rs:5:19 + --> $DIR/defaults-cyclic-fail.rs:6:5 | LL | const A: u8 = Self::B; - | ^^^^^^^ + | ^^^^^^^^^^^^^^^^^^^^^^ + = note: ...which requires normalizing `<() as Tr>::B`... +note: ...which requires const-evaluating + checking `Tr::B`... + --> $DIR/defaults-cyclic-fail.rs:8:5 + | +LL | const B: u8 = Self::A; + | ^^^^^^^^^^^^^^^^^^^^^^ note: ...which requires const-evaluating + checking `Tr::B`... --> $DIR/defaults-cyclic-fail.rs:8:5 | LL | const B: u8 = Self::A; | ^^^^^^^^^^^^^^^^^^^^^^ note: ...which requires const-evaluating `Tr::B`... - --> $DIR/defaults-cyclic-fail.rs:8:19 + --> $DIR/defaults-cyclic-fail.rs:8:5 | LL | const B: u8 = Self::A; - | ^^^^^^^ - = note: ...which again requires const-evaluating + checking `Tr::A`, completing the cycle + | ^^^^^^^^^^^^^^^^^^^^^^ + = note: ...which again requires normalizing `<() as Tr>::A`, completing the cycle note: cycle used when const-evaluating `main` - --> $DIR/defaults-cyclic-fail.rs:16:16 + --> $DIR/defaults-cyclic-fail.rs:14:1 | -LL | assert_eq!(<() as Tr>::A, 0); - | ^^^^^^^^^^^^^ +LL | fn main() { + | ^^^^^^^^^ error: aborting due to previous error diff --git a/src/test/ui/consts/const-size_of-cycle.stderr b/src/test/ui/consts/const-size_of-cycle.stderr index c03b7a19ffc61..aac3622c6de40 100644 --- a/src/test/ui/consts/const-size_of-cycle.stderr +++ b/src/test/ui/consts/const-size_of-cycle.stderr @@ -25,7 +25,7 @@ note: ...which requires const-evaluating + checking `std::intrinsics::size_of`.. LL | pub fn size_of() -> usize; | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ = note: ...which requires computing layout of `Foo`... - = note: ...which requires normalizing `ParamEnvAnd { param_env: ParamEnv { caller_bounds: [], reveal: All, def_id: None }, value: [u8; _] }`... + = note: ...which requires normalizing `[u8; _]`... = note: ...which again requires const-evaluating + checking `Foo::bytes::{{constant}}#0`, completing the cycle note: cycle used when processing `Foo` --> $DIR/const-size_of-cycle.rs:7:1 From 150916047beaae94096d2fdc95b88fbdb837f242 Mon Sep 17 00:00:00 2001 From: bishtpawan Date: Tue, 24 Mar 2020 17:11:04 +0530 Subject: [PATCH 08/26] Add explanation for inner attribute --- src/librustc_error_codes/error_codes/E0710.md | 42 +++++++++++++++---- 1 file changed, 35 insertions(+), 7 deletions(-) diff --git a/src/librustc_error_codes/error_codes/E0710.md b/src/librustc_error_codes/error_codes/E0710.md index cbb667d832d5c..6f2dfcd232390 100644 --- a/src/librustc_error_codes/error_codes/E0710.md +++ b/src/librustc_error_codes/error_codes/E0710.md @@ -1,13 +1,27 @@ An unknown tool name found in scoped lint -Erroneous code example: +Erroneous code examples: ```compile_fail,E0710 #[allow(clipp::filter_map)] // error!` fn main() { - /** - *business logic - */ + // business logic +} +``` + +```compile_fail,E0710 +#[warn(clipp::filter_map)] // error!` +fn main() { + // business logic +} +``` + +```compile_fail,E0710 +fn main() { + #![deny(clipp::filter_map)] //error! + fn filter() { + //logic + } } ``` @@ -17,8 +31,22 @@ forget to import it in you project: ``` #[allow(clippy::filter_map)] // ok! fn main() { - /** - *business logic - */ + // business logic +} +``` + +``` +#[warn(clippy::filter_map)] // ok! +fn main() { + // business logic +} +``` + +``` +fn main() { + #![deny(clippy::filter_map)] // ok! + fn filter() { + //logic + } } ``` From b31707e683ba90548d4830929ae721bc4dbaf13e Mon Sep 17 00:00:00 2001 From: bishtpawan Date: Tue, 24 Mar 2020 19:06:08 +0530 Subject: [PATCH 09/26] Remove unknown lint from deny attribute --- src/librustc_error_codes/error_codes/E0710.md | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/src/librustc_error_codes/error_codes/E0710.md b/src/librustc_error_codes/error_codes/E0710.md index 6f2dfcd232390..d9cefe2a6da72 100644 --- a/src/librustc_error_codes/error_codes/E0710.md +++ b/src/librustc_error_codes/error_codes/E0710.md @@ -16,15 +16,6 @@ fn main() { } ``` -```compile_fail,E0710 -fn main() { - #![deny(clipp::filter_map)] //error! - fn filter() { - //logic - } -} -``` - Please verify you didn't misspell the tool's name or that you didn't forget to import it in you project: @@ -41,12 +32,3 @@ fn main() { // business logic } ``` - -``` -fn main() { - #![deny(clippy::filter_map)] // ok! - fn filter() { - //logic - } -} -``` From 11763d48cf3104656f1e6c902f1d27d7cd16418f Mon Sep 17 00:00:00 2001 From: Bastian Kauschke Date: Tue, 24 Mar 2020 16:56:12 +0100 Subject: [PATCH 10/26] update mir opt test --- src/test/mir-opt/inline/inline-into-box-place.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/mir-opt/inline/inline-into-box-place.rs b/src/test/mir-opt/inline/inline-into-box-place.rs index f368bdef6f8e2..500238de4c5ab 100644 --- a/src/test/mir-opt/inline/inline-into-box-place.rs +++ b/src/test/mir-opt/inline/inline-into-box-place.rs @@ -55,7 +55,7 @@ fn main() { // StorageLive(_2); // _2 = Box(std::vec::Vec); // _4 = &mut (*_2); -// ((*_4).0: alloc::raw_vec::RawVec) = const alloc::raw_vec::RawVec::::NEW; +// ((*_4).0: alloc::raw_vec::RawVec) = const ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), undef_mask: UndefMask { blocks: [65535], len: Size { raw: 16 } }, size: Size { raw: 16 }, align: Align { pow2: 3 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }: alloc::raw_vec::RawVec::; // ((*_4).1: usize) = const 0usize; // _1 = move _2; // StorageDead(_2); From 212e6ce7bf67d6475ec4fdfebfcf9f99704b2aa2 Mon Sep 17 00:00:00 2001 From: Josh Stone Date: Sun, 22 Mar 2020 16:03:34 -0700 Subject: [PATCH 11/26] Implement Fuse with Option The former `done` flag was roughly similar to an `Option` tag, but left the possibity of misuse. By using a real `Option`, we can set `None` when the iterator is exhausted, removing any way to call it again. We also allow niche layout this way, so the `Fuse` may be smaller. The `FusedIterator` specialization does want to ignore the possibility of exhaustion though, so it uses `unsafe { intrinsics::unreachable() }` to optimize that branch away. The entire `Fuse` implementation is now isolated in its own module to contain that unsafety. --- src/libcore/iter/adapters/fuse.rs | 338 ++++++++++++++++++++++++++++++ src/libcore/iter/adapters/mod.rs | 257 +---------------------- 2 files changed, 340 insertions(+), 255 deletions(-) create mode 100644 src/libcore/iter/adapters/fuse.rs diff --git a/src/libcore/iter/adapters/fuse.rs b/src/libcore/iter/adapters/fuse.rs new file mode 100644 index 0000000000000..f5fd075662209 --- /dev/null +++ b/src/libcore/iter/adapters/fuse.rs @@ -0,0 +1,338 @@ +use crate::intrinsics; +use crate::iter::{ + DoubleEndedIterator, ExactSizeIterator, FusedIterator, Iterator, TrustedRandomAccess, +}; +use crate::ops::Try; + +/// An iterator that yields `None` forever after the underlying iterator +/// yields `None` once. +/// +/// This `struct` is created by the [`fuse`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`fuse`]: trait.Iterator.html#method.fuse +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Fuse { + // NOTE: for `I: FusedIterator`, this is always assumed `Some`! + iter: Option, +} +impl Fuse { + pub(in crate::iter) fn new(iter: I) -> Fuse { + Fuse { iter: Some(iter) } + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Fuse where I: Iterator {} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Fuse +where + I: Iterator, +{ + type Item = ::Item; + + #[inline] + default fn next(&mut self) -> Option<::Item> { + let next = self.iter.as_mut()?.next(); + if next.is_none() { + self.iter = None; + } + next + } + + #[inline] + default fn nth(&mut self, n: usize) -> Option { + let nth = self.iter.as_mut()?.nth(n); + if nth.is_none() { + self.iter = None; + } + nth + } + + #[inline] + default fn last(self) -> Option { + self.iter?.last() + } + + #[inline] + default fn count(self) -> usize { + self.iter.map_or(0, I::count) + } + + #[inline] + default fn size_hint(&self) -> (usize, Option) { + self.iter.as_ref().map_or((0, Some(0)), I::size_hint) + } + + #[inline] + default fn try_fold(&mut self, mut acc: Acc, fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + if let Some(ref mut iter) = self.iter { + acc = iter.try_fold(acc, fold)?; + self.iter = None; + } + Try::from_ok(acc) + } + + #[inline] + default fn fold(self, mut acc: Acc, fold: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + if let Some(iter) = self.iter { + acc = iter.fold(acc, fold); + } + acc + } + + #[inline] + default fn find

(&mut self, predicate: P) -> Option + where + P: FnMut(&Self::Item) -> bool, + { + let found = self.iter.as_mut()?.find(predicate); + if found.is_none() { + self.iter = None; + } + found + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Fuse +where + I: DoubleEndedIterator, +{ + #[inline] + default fn next_back(&mut self) -> Option<::Item> { + let next = self.iter.as_mut()?.next_back(); + if next.is_none() { + self.iter = None; + } + next + } + + #[inline] + default fn nth_back(&mut self, n: usize) -> Option<::Item> { + let nth = self.iter.as_mut()?.nth_back(n); + if nth.is_none() { + self.iter = None; + } + nth + } + + #[inline] + default fn try_rfold(&mut self, mut acc: Acc, fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + if let Some(ref mut iter) = self.iter { + acc = iter.try_rfold(acc, fold)?; + self.iter = None; + } + Try::from_ok(acc) + } + + #[inline] + default fn rfold(self, mut acc: Acc, fold: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + if let Some(iter) = self.iter { + acc = iter.rfold(acc, fold); + } + acc + } + + #[inline] + default fn rfind

(&mut self, predicate: P) -> Option + where + P: FnMut(&Self::Item) -> bool, + { + let found = self.iter.as_mut()?.rfind(predicate); + if found.is_none() { + self.iter = None; + } + found + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Fuse +where + I: ExactSizeIterator, +{ + default fn len(&self) -> usize { + self.iter.as_ref().map_or(0, I::len) + } + + default fn is_empty(&self) -> bool { + self.iter.as_ref().map_or(true, I::is_empty) + } +} + +// NOTE: for `I: FusedIterator`, we assume that the iterator is always `Some` +impl Fuse { + #[inline(always)] + fn as_inner(&self) -> &I { + match self.iter { + Some(ref iter) => iter, + // SAFETY: the specialized iterator never sets `None` + None => unsafe { intrinsics::unreachable() }, + } + } + + #[inline(always)] + fn as_inner_mut(&mut self) -> &mut I { + match self.iter { + Some(ref mut iter) => iter, + // SAFETY: the specialized iterator never sets `None` + None => unsafe { intrinsics::unreachable() }, + } + } + + #[inline(always)] + fn into_inner(self) -> I { + match self.iter { + Some(iter) => iter, + // SAFETY: the specialized iterator never sets `None` + None => unsafe { intrinsics::unreachable() }, + } + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl Iterator for Fuse +where + I: FusedIterator, +{ + #[inline] + fn next(&mut self) -> Option<::Item> { + self.as_inner_mut().next() + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + self.as_inner_mut().nth(n) + } + + #[inline] + fn last(self) -> Option { + self.into_inner().last() + } + + #[inline] + fn count(self) -> usize { + self.into_inner().count() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.as_inner().size_hint() + } + + #[inline] + fn try_fold(&mut self, init: Acc, fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + self.as_inner_mut().try_fold(init, fold) + } + + #[inline] + fn fold(self, init: Acc, fold: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.into_inner().fold(init, fold) + } + + #[inline] + fn find

(&mut self, predicate: P) -> Option + where + P: FnMut(&Self::Item) -> bool, + { + self.as_inner_mut().find(predicate) + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl DoubleEndedIterator for Fuse +where + I: DoubleEndedIterator + FusedIterator, +{ + #[inline] + fn next_back(&mut self) -> Option<::Item> { + self.as_inner_mut().next_back() + } + + #[inline] + fn nth_back(&mut self, n: usize) -> Option<::Item> { + self.as_inner_mut().nth_back(n) + } + + #[inline] + fn try_rfold(&mut self, init: Acc, fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + self.as_inner_mut().try_rfold(init, fold) + } + + #[inline] + fn rfold(self, init: Acc, fold: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.into_inner().rfold(init, fold) + } + + #[inline] + fn rfind

(&mut self, predicate: P) -> Option + where + P: FnMut(&Self::Item) -> bool, + { + self.as_inner_mut().rfind(predicate) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Fuse +where + I: ExactSizeIterator + FusedIterator, +{ + fn len(&self) -> usize { + self.as_inner().len() + } + + fn is_empty(&self) -> bool { + self.as_inner().is_empty() + } +} + +unsafe impl TrustedRandomAccess for Fuse +where + I: TrustedRandomAccess + FusedIterator, +{ + unsafe fn get_unchecked(&mut self, i: usize) -> I::Item { + self.as_inner_mut().get_unchecked(i) + } + + fn may_have_side_effect() -> bool { + I::may_have_side_effect() + } +} diff --git a/src/libcore/iter/adapters/mod.rs b/src/libcore/iter/adapters/mod.rs index 6759a6b2d7349..16738543eb3af 100644 --- a/src/libcore/iter/adapters/mod.rs +++ b/src/libcore/iter/adapters/mod.rs @@ -9,11 +9,13 @@ use super::{DoubleEndedIterator, ExactSizeIterator, FusedIterator, Iterator, Tru mod chain; mod flatten; +mod fuse; mod zip; pub use self::chain::Chain; #[stable(feature = "rust1", since = "1.0.0")] pub use self::flatten::{FlatMap, Flatten}; +pub use self::fuse::Fuse; pub(crate) use self::zip::TrustedRandomAccess; pub use self::zip::Zip; @@ -2238,261 +2240,6 @@ where } } -/// An iterator that yields `None` forever after the underlying iterator -/// yields `None` once. -/// -/// This `struct` is created by the [`fuse`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`fuse`]: trait.Iterator.html#method.fuse -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone, Debug)] -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Fuse { - iter: I, - done: bool, -} -impl Fuse { - pub(super) fn new(iter: I) -> Fuse { - Fuse { iter, done: false } - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Fuse where I: Iterator {} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Fuse -where - I: Iterator, -{ - type Item = ::Item; - - #[inline] - default fn next(&mut self) -> Option<::Item> { - if self.done { - None - } else { - let next = self.iter.next(); - self.done = next.is_none(); - next - } - } - - #[inline] - default fn nth(&mut self, n: usize) -> Option { - if self.done { - None - } else { - let nth = self.iter.nth(n); - self.done = nth.is_none(); - nth - } - } - - #[inline] - default fn last(self) -> Option { - if self.done { None } else { self.iter.last() } - } - - #[inline] - default fn count(self) -> usize { - if self.done { 0 } else { self.iter.count() } - } - - #[inline] - default fn size_hint(&self) -> (usize, Option) { - if self.done { (0, Some(0)) } else { self.iter.size_hint() } - } - - #[inline] - default fn try_fold(&mut self, init: Acc, fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - if self.done { - Try::from_ok(init) - } else { - let acc = self.iter.try_fold(init, fold)?; - self.done = true; - Try::from_ok(acc) - } - } - - #[inline] - default fn fold(self, init: Acc, fold: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - if self.done { init } else { self.iter.fold(init, fold) } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Fuse -where - I: DoubleEndedIterator, -{ - #[inline] - default fn next_back(&mut self) -> Option<::Item> { - if self.done { - None - } else { - let next = self.iter.next_back(); - self.done = next.is_none(); - next - } - } - - #[inline] - default fn nth_back(&mut self, n: usize) -> Option<::Item> { - if self.done { - None - } else { - let nth = self.iter.nth_back(n); - self.done = nth.is_none(); - nth - } - } - - #[inline] - default fn try_rfold(&mut self, init: Acc, fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - if self.done { - Try::from_ok(init) - } else { - let acc = self.iter.try_rfold(init, fold)?; - self.done = true; - Try::from_ok(acc) - } - } - - #[inline] - default fn rfold(self, init: Acc, fold: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - if self.done { init } else { self.iter.rfold(init, fold) } - } -} - -unsafe impl TrustedRandomAccess for Fuse -where - I: TrustedRandomAccess, -{ - unsafe fn get_unchecked(&mut self, i: usize) -> I::Item { - self.iter.get_unchecked(i) - } - - fn may_have_side_effect() -> bool { - I::may_have_side_effect() - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl Iterator for Fuse -where - I: FusedIterator, -{ - #[inline] - fn next(&mut self) -> Option<::Item> { - self.iter.next() - } - - #[inline] - fn nth(&mut self, n: usize) -> Option { - self.iter.nth(n) - } - - #[inline] - fn last(self) -> Option { - self.iter.last() - } - - #[inline] - fn count(self) -> usize { - self.iter.count() - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - #[inline] - fn try_fold(&mut self, init: Acc, fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - self.iter.try_fold(init, fold) - } - - #[inline] - fn fold(self, init: Acc, fold: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - self.iter.fold(init, fold) - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl DoubleEndedIterator for Fuse -where - I: DoubleEndedIterator + FusedIterator, -{ - #[inline] - fn next_back(&mut self) -> Option<::Item> { - self.iter.next_back() - } - - #[inline] - fn nth_back(&mut self, n: usize) -> Option<::Item> { - self.iter.nth_back(n) - } - - #[inline] - fn try_rfold(&mut self, init: Acc, fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - self.iter.try_rfold(init, fold) - } - - #[inline] - fn rfold(self, init: Acc, fold: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - self.iter.rfold(init, fold) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Fuse -where - I: ExactSizeIterator, -{ - fn len(&self) -> usize { - self.iter.len() - } - - fn is_empty(&self) -> bool { - self.iter.is_empty() - } -} - /// An iterator that calls a function with a reference to each element before /// yielding it. /// From bedc3587cee29d63d78f943dd332b1d01b16ee6d Mon Sep 17 00:00:00 2001 From: Youngsuk Kim Date: Tue, 24 Mar 2020 21:34:36 -0400 Subject: [PATCH 12/26] fix type name typo in doc comments InterpCtx => InterpCx (rustc_mir::interpret::InterpCx) --- src/librustc_mir/interpret/memory.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 110f2ffd9d78c..f01e6c2e842eb 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -473,7 +473,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { } /// Gives raw access to the `Allocation`, without bounds or alignment checks. - /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCtx` instead! + /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCx` instead! pub fn get_raw( &self, id: AllocId, @@ -510,7 +510,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { } /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks. - /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCtx` instead! + /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCx` instead! pub fn get_raw_mut( &mut self, id: AllocId, From 5c65568f0b0b5f97fc02397c974d206bc3ff0f9d Mon Sep 17 00:00:00 2001 From: bishtpawan Date: Wed, 25 Mar 2020 11:32:23 +0530 Subject: [PATCH 13/26] update tool_lints --- src/test/ui/tool_lints.stderr | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/ui/tool_lints.stderr b/src/test/ui/tool_lints.stderr index b19e137001022..1bcd7fd735de8 100644 --- a/src/test/ui/tool_lints.stderr +++ b/src/test/ui/tool_lints.stderr @@ -18,4 +18,4 @@ LL | #[warn(foo::bar)] error: aborting due to 3 previous errors -For more information about this error, try `rustc --explain E0710`. \ No newline at end of file +For more information about this error, try `rustc --explain E0710`. From 5be304b0b411af49e4f0e7196a35c034f430cc85 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 17 Mar 2020 14:11:51 +0100 Subject: [PATCH 14/26] miri: simplify shift operator overflow checking --- src/librustc_mir/interpret/operator.rs | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/src/librustc_mir/interpret/operator.rs b/src/librustc_mir/interpret/operator.rs index 76a5aecb9db62..cb0aaa4d40d3e 100644 --- a/src/librustc_mir/interpret/operator.rs +++ b/src/librustc_mir/interpret/operator.rs @@ -1,3 +1,5 @@ +use std::convert::TryFrom; + use rustc::mir; use rustc::mir::interpret::{InterpResult, Scalar}; use rustc::ty::{ @@ -130,28 +132,27 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Shift ops can have an RHS with a different numeric type. if bin_op == Shl || bin_op == Shr { let signed = left_layout.abi.is_signed(); - let mut oflo = (r as u32 as u128) != r; - let mut r = r as u32; - let size = left_layout.size; - oflo |= r >= size.bits() as u32; - r %= size.bits() as u32; + let size = u128::from(left_layout.size.bits()); + let overflow = r >= size; + let r = r % size; // mask to type size + let r = u32::try_from(r).unwrap(); // we masked so this will always fit let result = if signed { let l = self.sign_extend(l, left_layout) as i128; let result = match bin_op { - Shl => l << r, - Shr => l >> r, + Shl => l.checked_shl(r).unwrap(), + Shr => l.checked_shr(r).unwrap(), _ => bug!("it has already been checked that this is a shift op"), }; result as u128 } else { match bin_op { - Shl => l << r, - Shr => l >> r, + Shl => l.checked_shl(r).unwrap(), + Shr => l.checked_shr(r).unwrap(), _ => bug!("it has already been checked that this is a shift op"), } }; let truncated = self.truncate(result, left_layout); - return Ok((Scalar::from_uint(truncated, size), oflo, left_layout.ty)); + return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty)); } // For the remaining ops, the types must be the same on both sides @@ -193,7 +194,6 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { _ => None, }; if let Some(op) = op { - let l128 = self.sign_extend(l, left_layout) as i128; let r = self.sign_extend(r, right_layout) as i128; // We need a special check for overflowing remainder: // "int_min % -1" overflows and returns 0, but after casting things to a larger int @@ -206,8 +206,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } _ => {} } + let l = self.sign_extend(l, left_layout) as i128; - let (result, oflo) = op(l128, r); + let (result, oflo) = op(l, r); // This may be out-of-bounds for the result type, so we have to truncate ourselves. // If that truncation loses any information, we have an overflow. let result = result as u128; From 1ddbdc62692f40017949e8ba0acca4b5f61f6027 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sat, 21 Mar 2020 13:49:02 +0100 Subject: [PATCH 15/26] use checked casts and arithmetic in Miri engine --- src/librustc/mir/interpret/allocation.rs | 78 +++++++++---------- src/librustc/mir/interpret/mod.rs | 43 +++++----- src/librustc/mir/interpret/pointer.rs | 4 +- src/librustc/mir/interpret/value.rs | 73 ++++++++++------- src/librustc_mir/interpret/cast.rs | 6 +- src/librustc_mir/interpret/eval_context.rs | 4 +- src/librustc_mir/interpret/intrinsics.rs | 10 +-- .../interpret/intrinsics/caller_location.rs | 6 +- src/librustc_mir/interpret/memory.rs | 30 ++++--- src/librustc_mir/interpret/operand.rs | 16 ++-- src/librustc_mir/interpret/place.rs | 37 ++++----- src/librustc_mir/interpret/step.rs | 5 +- src/librustc_mir/interpret/terminator.rs | 4 +- src/librustc_mir/interpret/traits.rs | 9 ++- src/librustc_mir/interpret/validity.rs | 10 ++- src/librustc_mir/interpret/visitor.rs | 4 +- 16 files changed, 186 insertions(+), 153 deletions(-) diff --git a/src/librustc/mir/interpret/allocation.rs b/src/librustc/mir/interpret/allocation.rs index 946b6add40a7e..4d42e796d10e5 100644 --- a/src/librustc/mir/interpret/allocation.rs +++ b/src/librustc/mir/interpret/allocation.rs @@ -1,18 +1,20 @@ //! The virtual memory representation of the MIR interpreter. +use std::borrow::Cow; +use std::convert::TryFrom; +use std::iter; +use std::ops::{Add, Deref, DerefMut, Mul, Range, Sub}; + +use rustc_ast::ast::Mutability; +use rustc_data_structures::sorted_map::SortedMap; +use rustc_target::abi::HasDataLayout; + use super::{ read_target_uint, write_target_uint, AllocId, InterpResult, Pointer, Scalar, ScalarMaybeUndef, }; use crate::ty::layout::{Align, Size}; -use rustc_ast::ast::Mutability; -use rustc_data_structures::sorted_map::SortedMap; -use rustc_target::abi::HasDataLayout; -use std::borrow::Cow; -use std::iter; -use std::ops::{Deref, DerefMut, Range}; - // NOTE: When adding new fields, make sure to adjust the `Snapshot` impl in // `src/librustc_mir/interpret/snapshot.rs`. #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] @@ -90,7 +92,7 @@ impl Allocation { /// Creates a read-only allocation initialized by the given bytes pub fn from_bytes<'a>(slice: impl Into>, align: Align) -> Self { let bytes = slice.into().into_owned(); - let size = Size::from_bytes(bytes.len() as u64); + let size = Size::from_bytes(u64::try_from(bytes.len()).unwrap()); Self { bytes, relocations: Relocations::new(), @@ -107,9 +109,8 @@ impl Allocation { } pub fn undef(size: Size, align: Align) -> Self { - assert_eq!(size.bytes() as usize as u64, size.bytes()); Allocation { - bytes: vec![0; size.bytes() as usize], + bytes: vec![0; usize::try_from(size.bytes()).unwrap()], relocations: Relocations::new(), undef_mask: UndefMask::new(size, false), size, @@ -152,7 +153,7 @@ impl Allocation<(), ()> { /// Raw accessors. Provide access to otherwise private bytes. impl Allocation { pub fn len(&self) -> usize { - self.size.bytes() as usize + usize::try_from(self.size.bytes()).unwrap() } /// Looks at a slice which may describe undefined bytes or describe a relocation. This differs @@ -182,13 +183,8 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra> Allocation { /// Returns the range of this allocation that was meant. #[inline] fn check_bounds(&self, offset: Size, size: Size) -> Range { - let end = offset + size; // This does overflow checking. - assert_eq!( - end.bytes() as usize as u64, - end.bytes(), - "cannot handle this access on this host architecture" - ); - let end = end.bytes() as usize; + let end = Size::add(offset, size); // This does overflow checking. + let end = usize::try_from(end.bytes()).expect("access too big for this host architecture"); assert!( end <= self.len(), "Out-of-bounds access at offset {}, size {} in allocation of size {}", @@ -196,7 +192,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra> Allocation { size.bytes(), self.len() ); - (offset.bytes() as usize)..end + usize::try_from(offset.bytes()).unwrap()..end } /// The last argument controls whether we error out when there are undefined @@ -294,11 +290,11 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra> Allocation { cx: &impl HasDataLayout, ptr: Pointer, ) -> InterpResult<'tcx, &[u8]> { - assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes()); - let offset = ptr.offset.bytes() as usize; + let offset = usize::try_from(ptr.offset.bytes()).unwrap(); Ok(match self.bytes[offset..].iter().position(|&c| c == 0) { Some(size) => { - let size_with_null = Size::from_bytes((size + 1) as u64); + let size_with_null = + Size::from_bytes(u64::try_from(size.checked_add(1).unwrap()).unwrap()); // Go through `get_bytes` for checks and AllocationExtra hooks. // We read the null, so we include it in the request, but we want it removed // from the result, so we do subslicing. @@ -343,7 +339,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra> Allocation { let (lower, upper) = src.size_hint(); let len = upper.expect("can only write bounded iterators"); assert_eq!(lower, len, "can only write iterators with a precise length"); - let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(len as u64))?; + let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(u64::try_from(len).unwrap()))?; // `zip` would stop when the first iterator ends; we want to definitely // cover all of `bytes`. for dest in bytes { @@ -386,7 +382,11 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra> Allocation { } else { match self.relocations.get(&ptr.offset) { Some(&(tag, alloc_id)) => { - let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits as u64), tag); + let ptr = Pointer::new_with_tag( + alloc_id, + Size::from_bytes(u64::try_from(bits).unwrap()), + tag, + ); return Ok(ScalarMaybeUndef::Scalar(ptr.into())); } None => {} @@ -433,7 +433,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra> Allocation { }; let bytes = match val.to_bits_or_ptr(type_size, cx) { - Err(val) => val.offset.bytes() as u128, + Err(val) => u128::from(val.offset.bytes()), Ok(data) => data, }; @@ -479,7 +479,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation { // We have to go back `pointer_size - 1` bytes, as that one would still overlap with // the beginning of this range. let start = ptr.offset.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1); - let end = ptr.offset + size; // This does overflow checking. + let end = Size::add(ptr.offset, size); // This does overflow checking. self.relocations.range(Size::from_bytes(start)..end) } @@ -524,7 +524,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation { ) }; let start = ptr.offset; - let end = start + size; + let end = Size::add(start, size); // Mark parts of the outermost relocations as undefined if they partially fall outside the // given range. @@ -563,7 +563,7 @@ impl<'tcx, Tag, Extra> Allocation { #[inline] fn check_defined(&self, ptr: Pointer, size: Size) -> InterpResult<'tcx> { self.undef_mask - .is_range_defined(ptr.offset, ptr.offset + size) + .is_range_defined(ptr.offset, Size::add(ptr.offset, size)) .or_else(|idx| throw_ub!(InvalidUndefBytes(Some(Pointer::new(ptr.alloc_id, idx))))) } @@ -571,7 +571,7 @@ impl<'tcx, Tag, Extra> Allocation { if size.bytes() == 0 { return; } - self.undef_mask.set_range(ptr.offset, ptr.offset + size, new_state); + self.undef_mask.set_range(ptr.offset, Size::add(ptr.offset, size), new_state); } } @@ -616,7 +616,7 @@ impl Allocation { for i in 1..size.bytes() { // FIXME: optimize to bitshift the current undef block's bits and read the top bit. - if self.undef_mask.get(src.offset + Size::from_bytes(i)) == cur { + if self.undef_mask.get(Size::add(src.offset, Size::from_bytes(i))) == cur { cur_len += 1; } else { ranges.push(cur_len); @@ -643,7 +643,7 @@ impl Allocation { if defined.ranges.len() <= 1 { self.undef_mask.set_range_inbounds( dest.offset, - dest.offset + size * repeat, + Size::add(dest.offset, Size::mul(size, repeat)), defined.initial, ); return; @@ -721,10 +721,10 @@ impl Allocation { for i in 0..length { new_relocations.extend(relocations.iter().map(|&(offset, reloc)| { // compute offset for current repetition - let dest_offset = dest.offset + (i * size); + let dest_offset = Size::add(dest.offset, Size::mul(size, i)); ( // shift offsets from source allocation to destination allocation - offset + dest_offset - src.offset, + Size::sub(Size::add(offset, dest_offset), src.offset), reloc, ) })); @@ -861,18 +861,18 @@ impl UndefMask { if amount.bytes() == 0 { return; } - let unused_trailing_bits = self.blocks.len() as u64 * Self::BLOCK_SIZE - self.len.bytes(); + let unused_trailing_bits = + u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes(); if amount.bytes() > unused_trailing_bits { let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1; - assert_eq!(additional_blocks as usize as u64, additional_blocks); self.blocks.extend( // FIXME(oli-obk): optimize this by repeating `new_state as Block`. - iter::repeat(0).take(additional_blocks as usize), + iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()), ); } let start = self.len; self.len += amount; - self.set_range_inbounds(start, start + amount, new_state); + self.set_range_inbounds(start, Size::add(start, amount), new_state); } } @@ -881,7 +881,5 @@ fn bit_index(bits: Size) -> (usize, usize) { let bits = bits.bytes(); let a = bits / UndefMask::BLOCK_SIZE; let b = bits % UndefMask::BLOCK_SIZE; - assert_eq!(a as usize as u64, a); - assert_eq!(b as usize as u64, b); - (a as usize, b as usize) + (usize::try_from(a).unwrap(), usize::try_from(b).unwrap()) } diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs index 1b5fb4c9954cb..10c3a06da0810 100644 --- a/src/librustc/mir/interpret/mod.rs +++ b/src/librustc/mir/interpret/mod.rs @@ -95,6 +95,27 @@ mod pointer; mod queries; mod value; +use std::convert::TryFrom; +use std::fmt; +use std::io; +use std::num::NonZeroU32; +use std::sync::atomic::{AtomicU32, Ordering}; + +use byteorder::{BigEndian, LittleEndian, ReadBytesExt, WriteBytesExt}; +use rustc_ast::ast::LitKind; +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::sync::{HashMapExt, Lock}; +use rustc_data_structures::tiny_list::TinyList; +use rustc_hir::def_id::DefId; +use rustc_macros::HashStable; +use rustc_serialize::{Decodable, Encodable, Encoder}; + +use crate::mir; +use crate::ty::codec::TyDecoder; +use crate::ty::layout::{self, Size}; +use crate::ty::subst::GenericArgKind; +use crate::ty::{self, Instance, Ty, TyCtxt}; + pub use self::error::{ struct_error, ConstEvalErr, ConstEvalRawResult, ConstEvalResult, ErrorHandled, FrameInfo, InterpError, InterpErrorInfo, InterpResult, InvalidProgramInfo, MachineStopType, @@ -107,24 +128,6 @@ pub use self::allocation::{Allocation, AllocationExtra, Relocations, UndefMask}; pub use self::pointer::{CheckInAllocMsg, Pointer, PointerArithmetic}; -use crate::mir; -use crate::ty::codec::TyDecoder; -use crate::ty::layout::{self, Size}; -use crate::ty::subst::GenericArgKind; -use crate::ty::{self, Instance, Ty, TyCtxt}; -use byteorder::{BigEndian, LittleEndian, ReadBytesExt, WriteBytesExt}; -use rustc_ast::ast::LitKind; -use rustc_data_structures::fx::FxHashMap; -use rustc_data_structures::sync::{HashMapExt, Lock}; -use rustc_data_structures::tiny_list::TinyList; -use rustc_hir::def_id::DefId; -use rustc_macros::HashStable; -use rustc_serialize::{Decodable, Encodable, Encoder}; -use std::fmt; -use std::io; -use std::num::NonZeroU32; -use std::sync::atomic::{AtomicU32, Ordering}; - /// Uniquely identifies one of the following: /// - A constant /// - A static @@ -264,8 +267,8 @@ impl<'s> AllocDecodingSession<'s> { D: TyDecoder<'tcx>, { // Read the index of the allocation. - let idx = decoder.read_u32()? as usize; - let pos = self.state.data_offsets[idx] as usize; + let idx = usize::try_from(decoder.read_u32()?).unwrap(); + let pos = usize::try_from(self.state.data_offsets[idx]).unwrap(); // Decode the `AllocDiscriminant` now so that we know if we have to reserve an // `AllocId`. diff --git a/src/librustc/mir/interpret/pointer.rs b/src/librustc/mir/interpret/pointer.rs index 7d862d43bba6b..ff479aee4e0a9 100644 --- a/src/librustc/mir/interpret/pointer.rs +++ b/src/librustc/mir/interpret/pointer.rs @@ -62,9 +62,9 @@ pub trait PointerArithmetic: layout::HasDataLayout { /// This should be called by all the other methods before returning! #[inline] fn truncate_to_ptr(&self, (val, over): (u64, bool)) -> (u64, bool) { - let val = val as u128; + let val = u128::from(val); let max_ptr_plus_1 = 1u128 << self.pointer_size().bits(); - ((val % max_ptr_plus_1) as u64, over || val >= max_ptr_plus_1) + (u64::try_from(val % max_ptr_plus_1).unwrap(), over || val >= max_ptr_plus_1) } #[inline] diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs index 59e6b1b0c3788..4474fcd19188b 100644 --- a/src/librustc/mir/interpret/value.rs +++ b/src/librustc/mir/interpret/value.rs @@ -1,3 +1,5 @@ +use std::convert::TryFrom; + use rustc_apfloat::{ ieee::{Double, Single}, Float, @@ -156,7 +158,7 @@ impl Scalar<()> { #[inline(always)] fn check_data(data: u128, size: u8) { debug_assert_eq!( - truncate(data, Size::from_bytes(size as u64)), + truncate(data, Size::from_bytes(u64::from(size))), data, "Scalar value {:#x} exceeds size of {} bytes", data, @@ -203,8 +205,11 @@ impl<'tcx, Tag> Scalar { let dl = cx.data_layout(); match self { Scalar::Raw { data, size } => { - assert_eq!(size as u64, dl.pointer_size.bytes()); - Ok(Scalar::Raw { data: dl.offset(data as u64, i.bytes())? as u128, size }) + assert_eq!(u64::from(size), dl.pointer_size.bytes()); + Ok(Scalar::Raw { + data: u128::from(dl.offset(u64::try_from(data).unwrap(), i.bytes())?), + size, + }) } Scalar::Ptr(ptr) => ptr.offset(i, dl).map(Scalar::Ptr), } @@ -215,8 +220,13 @@ impl<'tcx, Tag> Scalar { let dl = cx.data_layout(); match self { Scalar::Raw { data, size } => { - assert_eq!(size as u64, dl.pointer_size.bytes()); - Scalar::Raw { data: dl.overflowing_offset(data as u64, i.bytes()).0 as u128, size } + assert_eq!(u64::from(size), dl.pointer_size.bytes()); + Scalar::Raw { + data: u128::from( + dl.overflowing_offset(u64::try_from(data).unwrap(), i.bytes()).0, + ), + size, + } } Scalar::Ptr(ptr) => Scalar::Ptr(ptr.wrapping_offset(i, dl)), } @@ -227,8 +237,11 @@ impl<'tcx, Tag> Scalar { let dl = cx.data_layout(); match self { Scalar::Raw { data, size } => { - assert_eq!(size as u64, dl.pointer_size().bytes()); - Ok(Scalar::Raw { data: dl.signed_offset(data as u64, i)? as u128, size }) + assert_eq!(u64::from(size), dl.pointer_size.bytes()); + Ok(Scalar::Raw { + data: u128::from(dl.signed_offset(u64::try_from(data).unwrap(), i)?), + size, + }) } Scalar::Ptr(ptr) => ptr.signed_offset(i, dl).map(Scalar::Ptr), } @@ -239,9 +252,11 @@ impl<'tcx, Tag> Scalar { let dl = cx.data_layout(); match self { Scalar::Raw { data, size } => { - assert_eq!(size as u64, dl.pointer_size.bytes()); + assert_eq!(u64::from(size), dl.pointer_size.bytes()); Scalar::Raw { - data: dl.overflowing_signed_offset(data as u64, i128::from(i)).0 as u128, + data: u128::from( + dl.overflowing_signed_offset(u64::try_from(data).unwrap(), i128::from(i)).0, + ), size, } } @@ -281,25 +296,25 @@ impl<'tcx, Tag> Scalar { #[inline] pub fn from_u8(i: u8) -> Self { // Guaranteed to be truncated and does not need sign extension. - Scalar::Raw { data: i as u128, size: 1 } + Scalar::Raw { data: i.into(), size: 1 } } #[inline] pub fn from_u16(i: u16) -> Self { // Guaranteed to be truncated and does not need sign extension. - Scalar::Raw { data: i as u128, size: 2 } + Scalar::Raw { data: i.into(), size: 2 } } #[inline] pub fn from_u32(i: u32) -> Self { // Guaranteed to be truncated and does not need sign extension. - Scalar::Raw { data: i as u128, size: 4 } + Scalar::Raw { data: i.into(), size: 4 } } #[inline] pub fn from_u64(i: u64) -> Self { // Guaranteed to be truncated and does not need sign extension. - Scalar::Raw { data: i as u128, size: 8 } + Scalar::Raw { data: i.into(), size: 8 } } #[inline] @@ -376,7 +391,7 @@ impl<'tcx, Tag> Scalar { assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST"); match self { Scalar::Raw { data, size } => { - assert_eq!(target_size.bytes(), size as u64); + assert_eq!(target_size.bytes(), u64::from(size)); Scalar::check_data(data, size); Ok(data) } @@ -394,7 +409,7 @@ impl<'tcx, Tag> Scalar { assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST"); match self { Scalar::Raw { data, size } => { - assert_eq!(target_size.bytes(), size as u64); + assert_eq!(target_size.bytes(), u64::from(size)); Scalar::check_data(data, size); Ok(data) } @@ -458,27 +473,27 @@ impl<'tcx, Tag> Scalar { /// Converts the scalar to produce an `u8`. Fails if the scalar is a pointer. pub fn to_u8(self) -> InterpResult<'static, u8> { - self.to_unsigned_with_bit_width(8).map(|v| v as u8) + self.to_unsigned_with_bit_width(8).map(|v| u8::try_from(v).unwrap()) } /// Converts the scalar to produce an `u16`. Fails if the scalar is a pointer. pub fn to_u16(self) -> InterpResult<'static, u16> { - self.to_unsigned_with_bit_width(16).map(|v| v as u16) + self.to_unsigned_with_bit_width(16).map(|v| u16::try_from(v).unwrap()) } /// Converts the scalar to produce an `u32`. Fails if the scalar is a pointer. pub fn to_u32(self) -> InterpResult<'static, u32> { - self.to_unsigned_with_bit_width(32).map(|v| v as u32) + self.to_unsigned_with_bit_width(32).map(|v| u32::try_from(v).unwrap()) } /// Converts the scalar to produce an `u64`. Fails if the scalar is a pointer. pub fn to_u64(self) -> InterpResult<'static, u64> { - self.to_unsigned_with_bit_width(64).map(|v| v as u64) + self.to_unsigned_with_bit_width(64).map(|v| u64::try_from(v).unwrap()) } pub fn to_machine_usize(self, cx: &impl HasDataLayout) -> InterpResult<'static, u64> { let b = self.to_bits(cx.data_layout().pointer_size)?; - Ok(b as u64) + Ok(u64::try_from(b).unwrap()) } #[inline] @@ -490,41 +505,41 @@ impl<'tcx, Tag> Scalar { /// Converts the scalar to produce an `i8`. Fails if the scalar is a pointer. pub fn to_i8(self) -> InterpResult<'static, i8> { - self.to_signed_with_bit_width(8).map(|v| v as i8) + self.to_signed_with_bit_width(8).map(|v| i8::try_from(v).unwrap()) } /// Converts the scalar to produce an `i16`. Fails if the scalar is a pointer. pub fn to_i16(self) -> InterpResult<'static, i16> { - self.to_signed_with_bit_width(16).map(|v| v as i16) + self.to_signed_with_bit_width(16).map(|v| i16::try_from(v).unwrap()) } /// Converts the scalar to produce an `i32`. Fails if the scalar is a pointer. pub fn to_i32(self) -> InterpResult<'static, i32> { - self.to_signed_with_bit_width(32).map(|v| v as i32) + self.to_signed_with_bit_width(32).map(|v| i32::try_from(v).unwrap()) } /// Converts the scalar to produce an `i64`. Fails if the scalar is a pointer. pub fn to_i64(self) -> InterpResult<'static, i64> { - self.to_signed_with_bit_width(64).map(|v| v as i64) + self.to_signed_with_bit_width(64).map(|v| i64::try_from(v).unwrap()) } pub fn to_machine_isize(self, cx: &impl HasDataLayout) -> InterpResult<'static, i64> { let sz = cx.data_layout().pointer_size; let b = self.to_bits(sz)?; let b = sign_extend(b, sz) as i128; - Ok(b as i64) + Ok(i64::try_from(b).unwrap()) } #[inline] pub fn to_f32(self) -> InterpResult<'static, Single> { // Going through `u32` to check size and truncation. - Ok(Single::from_bits(self.to_u32()? as u128)) + Ok(Single::from_bits(self.to_u32()?.into())) } #[inline] pub fn to_f64(self) -> InterpResult<'static, Double> { // Going through `u64` to check size and truncation. - Ok(Double::from_bits(self.to_u64()? as u128)) + Ok(Double::from_bits(self.to_u64()?.into())) } } @@ -671,8 +686,8 @@ pub fn get_slice_bytes<'tcx>(cx: &impl HasDataLayout, val: ConstValue<'tcx>) -> data.get_bytes( cx, // invent a pointer, only the offset is relevant anyway - Pointer::new(AllocId(0), Size::from_bytes(start as u64)), - Size::from_bytes(len as u64), + Pointer::new(AllocId(0), Size::from_bytes(u64::try_from(start).unwrap())), + Size::from_bytes(u64::try_from(len).unwrap()), ) .unwrap_or_else(|err| bug!("const slice is invalid: {:?}", err)) } else { diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs index 5c70b28a56786..e243121558355 100644 --- a/src/librustc_mir/interpret/cast.rs +++ b/src/librustc_mir/interpret/cast.rs @@ -1,3 +1,5 @@ +use std::convert::TryFrom; + use rustc::ty::adjustment::PointerCast; use rustc::ty::layout::{self, Size, TyLayout}; use rustc::ty::{self, Ty, TypeAndMut, TypeFoldable}; @@ -206,8 +208,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Char => { // `u8` to `char` cast - assert_eq!(v as u8 as u128, v); - Ok(Scalar::from_uint(v, Size::from_bytes(4))) + Ok(Scalar::from_uint(u8::try_from(v).unwrap(), Size::from_bytes(4))) } // Casts to bool are not permitted by rustc, no need to handle them here. @@ -227,6 +228,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { match dest_ty.kind { // float -> uint Uint(t) => { + // FIXME: can we make `bit_width` return a type more compatible with `Size::bits`? let width = t.bit_width().unwrap_or_else(|| self.pointer_size().bits() as usize); let v = f.to_u128(width).value; // This should already fit the bit width diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index c50146f295adb..0d0f4daa85ef7 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -1,6 +1,7 @@ use std::cell::Cell; use std::fmt::Write; use std::mem; +use std::ops::Add; use rustc::ich::StableHashingContext; use rustc::mir; @@ -413,6 +414,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // and it also rounds up to alignment, which we want to avoid, // as the unsized field's alignment could be smaller. assert!(!layout.ty.is_simd()); + assert!(layout.fields.count() > 0); trace!("DST layout: {:?}", layout); let sized_size = layout.fields.offset(layout.fields.count() - 1); @@ -452,7 +454,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // here. But this is where the add would go.) // Return the sum of sizes and max of aligns. - let size = sized_size + unsized_size; + let size = Size::add(sized_size, unsized_size); // Choose max of two known alignments (combined value must // be aligned according to more restrictive of the two). diff --git a/src/librustc_mir/interpret/intrinsics.rs b/src/librustc_mir/interpret/intrinsics.rs index 03aedad0d988d..13bfb9895cb00 100644 --- a/src/librustc_mir/interpret/intrinsics.rs +++ b/src/librustc_mir/interpret/intrinsics.rs @@ -29,11 +29,11 @@ fn numeric_intrinsic<'tcx, Tag>( Primitive::Int(integer, _) => integer.size(), _ => bug!("invalid `{}` argument: {:?}", name, bits), }; - let extra = 128 - size.bits() as u128; + let extra = 128 - u128::from(size.bits()); let bits_out = match name { - sym::ctpop => bits.count_ones() as u128, - sym::ctlz => bits.leading_zeros() as u128 - extra, - sym::cttz => (bits << extra).trailing_zeros() as u128 - extra, + sym::ctpop => u128::from(bits.count_ones()), + sym::ctlz => u128::from(bits.leading_zeros()) - extra, + sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra, sym::bswap => (bits << extra).swap_bytes(), sym::bitreverse => (bits << extra).reverse_bits(), _ => bug!("not a numeric intrinsic: {}", name), @@ -261,7 +261,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let val_bits = self.force_bits(val, layout.size)?; let raw_shift = self.read_scalar(args[1])?.not_undef()?; let raw_shift_bits = self.force_bits(raw_shift, layout.size)?; - let width_bits = layout.size.bits() as u128; + let width_bits = u128::from(layout.size.bits()); let shift_bits = raw_shift_bits % width_bits; let inv_shift_bits = (width_bits - shift_bits) % width_bits; let result_bits = if intrinsic_name == sym::rotate_left { diff --git a/src/librustc_mir/interpret/intrinsics/caller_location.rs b/src/librustc_mir/interpret/intrinsics/caller_location.rs index dc2b0e1b983dc..01f9cdea0f0d3 100644 --- a/src/librustc_mir/interpret/intrinsics/caller_location.rs +++ b/src/librustc_mir/interpret/intrinsics/caller_location.rs @@ -1,3 +1,5 @@ +use std::convert::TryFrom; + use rustc::middle::lang_items::PanicLocationLangItem; use rustc::ty::subst::Subst; use rustc_span::{Span, Symbol}; @@ -59,8 +61,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo()); ( Symbol::intern(&caller.file.name.to_string()), - caller.line as u32, - caller.col_display as u32 + 1, + u32::try_from(caller.line).unwrap(), + u32::try_from(caller.col_display).unwrap().checked_add(1).unwrap(), ) } diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 110f2ffd9d78c..f51e38eb1ea5c 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -8,6 +8,8 @@ use std::borrow::Cow; use std::collections::VecDeque; +use std::convert::TryFrom; +use std::ops::{Add, Mul}; use std::ptr; use rustc::ty::layout::{Align, HasDataLayout, Size, TargetDataLayout}; @@ -346,7 +348,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { }; Ok(match normalized.to_bits_or_ptr(self.pointer_size(), self) { Ok(bits) => { - let bits = bits as u64; // it's ptr-sized + let bits = u64::try_from(bits).unwrap(); // it's ptr-sized assert!(size.bytes() == 0); // Must be non-NULL. if bits == 0 { @@ -667,7 +669,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { } if alloc.undef_mask().is_range_defined(i, i + Size::from_bytes(1)).is_ok() { // this `as usize` is fine, since `i` came from a `usize` - let i = i.bytes() as usize; + let i = usize::try_from(i.bytes()).unwrap(); // Checked definedness (and thus range) and relocations. This access also doesn't // influence interpreter execution but is only for debugging. @@ -835,7 +837,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { ) -> InterpResult<'tcx> { let src = src.into_iter(); let size = Size::from_bytes(src.size_hint().0 as u64); - // `write_bytes` checks that this lower bound matches the upper bound matches reality. + // `write_bytes` checks that this lower bound `size` matches the upper bound and reality. let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? { Some(ptr) => ptr, None => return Ok(()), // zero-sized access @@ -874,14 +876,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { let tcx = self.tcx.tcx; - // The bits have to be saved locally before writing to dest in case src and dest overlap. - assert_eq!(size.bytes() as usize as u64, size.bytes()); - // This checks relocation edges on the src. let src_bytes = self.get_raw(src.alloc_id)?.get_bytes_with_undef_and_ptr(&tcx, src, size)?.as_ptr(); let dest_bytes = - self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, size * length)?; + self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, Size::mul(size, length))?; // If `dest_bytes` is empty we just optimize to not run anything for zsts. // See #67539 @@ -902,7 +901,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { // touched if the bytes stay undef for the whole interpreter execution. On contemporary // operating system this can avoid physically allocating the page. let dest_alloc = self.get_raw_mut(dest.alloc_id)?; - dest_alloc.mark_definedness(dest, size * length, false); + dest_alloc.mark_definedness(dest, Size::mul(size, length), false); dest_alloc.mark_relocation_range(relocations); return Ok(()); } @@ -913,11 +912,10 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { // The pointers above remain valid even if the `HashMap` table is moved around because they // point into the `Vec` storing the bytes. unsafe { - assert_eq!(size.bytes() as usize as u64, size.bytes()); if src.alloc_id == dest.alloc_id { if nonoverlapping { - if (src.offset <= dest.offset && src.offset + size > dest.offset) - || (dest.offset <= src.offset && dest.offset + size > src.offset) + if (src.offset <= dest.offset && Size::add(src.offset, size) > dest.offset) + || (dest.offset <= src.offset && Size::add(dest.offset, size) > src.offset) { throw_ub_format!("copy_nonoverlapping called on overlapping ranges") } @@ -926,16 +924,16 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { for i in 0..length { ptr::copy( src_bytes, - dest_bytes.offset((size.bytes() * i) as isize), - size.bytes() as usize, + dest_bytes.offset(isize::try_from(Size::mul(size, i).bytes()).unwrap()), + usize::try_from(size.bytes()).unwrap(), ); } } else { for i in 0..length { ptr::copy_nonoverlapping( src_bytes, - dest_bytes.offset((size.bytes() * i) as isize), - size.bytes() as usize, + dest_bytes.offset(isize::try_from(Size::mul(size, i).bytes()).unwrap()), + usize::try_from(size.bytes()).unwrap(), ); } } @@ -975,7 +973,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { ) -> InterpResult<'tcx, u128> { match scalar.to_bits_or_ptr(size, self) { Ok(bits) => Ok(bits), - Err(ptr) => Ok(M::ptr_to_int(&self, ptr)? as u128), + Err(ptr) => Ok(M::ptr_to_int(&self, ptr)?.into()), } } } diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs index 90fb7eb2bb3ac..9ab4e198db01d 100644 --- a/src/librustc_mir/interpret/operand.rs +++ b/src/librustc_mir/interpret/operand.rs @@ -341,7 +341,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Turn the wide MPlace into a string (must already be dereferenced!) pub fn read_str(&self, mplace: MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> { let len = mplace.len(self)?; - let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len as u64))?; + let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(u64::from(len)))?; let str = ::std::str::from_utf8(bytes) .map_err(|err| err_ub_format!("this string is not valid UTF-8: {}", err))?; Ok(str) @@ -406,7 +406,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { use rustc::mir::ProjectionElem::*; Ok(match *proj_elem { - Field(field, _) => self.operand_field(base, field.index() as u64)?, + Field(field, _) => self.operand_field(base, u64::try_from(field.index()).unwrap())?, Downcast(_, variant) => self.operand_downcast(base, variant)?, Deref => self.deref_operand(base)?.into(), Subslice { .. } | ConstantIndex { .. } | Index(_) => { @@ -556,11 +556,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // where none should happen. let ptr = Pointer::new( self.tcx.alloc_map.lock().create_memory_alloc(data), - Size::from_bytes(start as u64), // offset: `start` + Size::from_bytes(start.try_into().unwrap()), // offset: `start` ); Operand::Immediate(Immediate::new_slice( self.tag_global_base_pointer(ptr).into(), - (end - start) as u64, // len: `end - start` + u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start` self, )) } @@ -581,7 +581,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { .layout .ty .discriminant_for_variant(*self.tcx, index) - .map_or(index.as_u32() as u128, |discr| discr.val); + .map_or(u128::from(index.as_u32()), |discr| discr.val); return Ok((discr_val, index)); } layout::Variants::Multiple { @@ -593,7 +593,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { }; // read raw discriminant value - let discr_op = self.operand_field(rval, discr_index as u64)?; + let discr_op = self.operand_field(rval, u64::try_from(discr_index).unwrap())?; let discr_val = self.read_immediate(discr_op)?; let raw_discr = discr_val.to_scalar_or_undef(); trace!("discr value: {:?}", raw_discr); @@ -657,7 +657,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { if !ptr_valid { throw_ub!(InvalidDiscriminant(raw_discr.erase_tag().into())) } - (dataful_variant.as_u32() as u128, dataful_variant) + (u128::from(dataful_variant.as_u32()), dataful_variant) } Ok(raw_discr) => { // We need to use machine arithmetic to get the relative variant idx: @@ -686,7 +686,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { .expect("tagged layout for non adt") .variants .len(); - assert!((variant_index as usize) < variants_len); + assert!(usize::try_from(variant_index).unwrap() < variants_len); (u128::from(variant_index), VariantIdx::from_u32(variant_index)) } else { (u128::from(dataful_variant.as_u32()), dataful_variant) diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index 6cf11c071e4f7..7bea5357cdf08 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -4,6 +4,7 @@ use std::convert::TryFrom; use std::hash::Hash; +use std::ops::Mul; use rustc::mir; use rustc::mir::interpret::truncate; @@ -405,11 +406,11 @@ where // This can only be reached in ConstProp and non-rustc-MIR. throw_ub!(BoundsCheckFailed { len, index: field }); } - stride * field + Size::mul(stride, field) // `Size` multiplication is checked } layout::FieldPlacement::Union(count) => { assert!( - field < count as u64, + field < u64::try_from(count).unwrap(), "Tried to access field {} of union {:#?} with {} fields", field, base.layout, @@ -420,7 +421,7 @@ where } }; // the only way conversion can fail if is this is an array (otherwise we already panicked - // above). In that case, all fields are equal. + // above). In that case, all fields have the same layout. let field_layout = base.layout.field(self, usize::try_from(field).unwrap_or(0))?; // Offset may need adjustment for unsized fields. @@ -465,7 +466,7 @@ where }; let layout = base.layout.field(self, 0)?; let dl = &self.tcx.data_layout; - Ok((0..len).map(move |i| base.offset(i * stride, MemPlaceMeta::None, layout, dl))) + Ok((0..len).map(move |i| base.offset(Size::mul(stride, i), MemPlaceMeta::None, layout, dl))) } fn mplace_subslice( @@ -477,11 +478,11 @@ where ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { let len = base.len(self)?; // also asserts that we have a type where this makes sense let actual_to = if from_end { - if from + to > len { + if from.checked_add(to).map_or(true, |to| to > len) { // This can only be reached in ConstProp and non-rustc-MIR. - throw_ub!(BoundsCheckFailed { len: len as u64, index: from as u64 + to as u64 }); + throw_ub!(BoundsCheckFailed { len: len, index: from.saturating_add(to) }); } - len - to + len.checked_sub(to).unwrap() } else { to }; @@ -489,12 +490,12 @@ where // Not using layout method because that works with usize, and does not work with slices // (that have count 0 in their layout). let from_offset = match base.layout.fields { - layout::FieldPlacement::Array { stride, .. } => stride * from, + layout::FieldPlacement::Array { stride, .. } => Size::mul(stride, from), // `Size` multiplication is checked _ => bug!("Unexpected layout of index access: {:#?}", base.layout), }; // Compute meta and new layout - let inner_len = actual_to - from; + let inner_len = actual_to.checked_sub(from).unwrap(); let (meta, ty) = match base.layout.ty.kind { // It is not nice to match on the type, but that seems to be the only way to // implement this. @@ -527,7 +528,7 @@ where ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { use rustc::mir::ProjectionElem::*; Ok(match *proj_elem { - Field(field, _) => self.mplace_field(base, field.index() as u64)?, + Field(field, _) => self.mplace_field(base, u64::try_from(field.index()).unwrap())?, Downcast(_, variant) => self.mplace_downcast(base, variant)?, Deref => self.deref_operand(base.into())?, @@ -541,14 +542,14 @@ where ConstantIndex { offset, min_length, from_end } => { let n = base.len(self)?; - if n < min_length as u64 { + if n < u64::from(min_length) { // This can only be reached in ConstProp and non-rustc-MIR. - throw_ub!(BoundsCheckFailed { len: min_length as u64, index: n as u64 }); + throw_ub!(BoundsCheckFailed { len: min_length.into(), index: n.into() }); } let index = if from_end { - assert!(0 < offset && offset - 1 < min_length); - n - u64::from(offset) + assert!(0 < offset && offset <= min_length); + n.checked_sub(u64::from(offset)).unwrap() } else { assert!(offset < min_length); u64::from(offset) @@ -603,7 +604,7 @@ where ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { use rustc::mir::ProjectionElem::*; Ok(match *proj_elem { - Field(field, _) => self.place_field(base, field.index() as u64)?, + Field(field, _) => self.place_field(base, u64::try_from(field.index()).unwrap())?, Downcast(_, variant) => self.place_downcast(base, variant)?, Deref => self.deref_operand(self.place_to_op(base)?)?.into(), // For the other variants, we have to force an allocation. @@ -1028,7 +1029,7 @@ where kind: MemoryKind, ) -> MPlaceTy<'tcx, M::PointerTag> { let ptr = self.memory.allocate_bytes(str.as_bytes(), kind); - let meta = Scalar::from_uint(str.len() as u128, self.pointer_size()); + let meta = Scalar::from_uint(u128::try_from(str.len()).unwrap(), self.pointer_size()); let mplace = MemPlace { ptr: ptr.into(), align: Align::from_bytes(1).unwrap(), @@ -1072,7 +1073,7 @@ where let size = discr_layout.value.size(self); let discr_val = truncate(discr_val, size); - let discr_dest = self.place_field(dest, discr_index as u64)?; + let discr_dest = self.place_field(dest, u64::try_from(discr_index).unwrap())?; self.write_scalar(Scalar::from_uint(discr_val, size), discr_dest)?; } layout::Variants::Multiple { @@ -1103,7 +1104,7 @@ where niche_start_val, )?; // Write result. - let niche_dest = self.place_field(dest, discr_index as u64)?; + let niche_dest = self.place_field(dest, u64::try_from(discr_index).unwrap())?; self.write_immediate(*discr_val, niche_dest)?; } } diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs index cb11df18378d9..eb33a8700f33b 100644 --- a/src/librustc_mir/interpret/step.rs +++ b/src/librustc_mir/interpret/step.rs @@ -2,6 +2,8 @@ //! //! The main entry point is the `step` method. +use std::convert::TryFrom; + use rustc::mir; use rustc::mir::interpret::{InterpResult, PointerArithmetic, Scalar}; use rustc::ty::layout::LayoutOf; @@ -192,7 +194,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Ignore zero-sized fields. if !op.layout.is_zst() { let field_index = active_field_index.unwrap_or(i); - let field_dest = self.place_field(dest, field_index as u64)?; + let field_dest = + self.place_field(dest, u64::try_from(field_index).unwrap())?; self.copy_op(op, field_dest)?; } } diff --git a/src/librustc_mir/interpret/terminator.rs b/src/librustc_mir/interpret/terminator.rs index 6b0bbe4f6e0bb..9c52af41272e3 100644 --- a/src/librustc_mir/interpret/terminator.rs +++ b/src/librustc_mir/interpret/terminator.rs @@ -1,4 +1,5 @@ use std::borrow::Cow; +use std::convert::TryFrom; use rustc::ty::layout::{self, LayoutOf, TyLayout}; use rustc::ty::Instance; @@ -29,6 +30,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { trace!("SwitchInt({:?})", *discr); // Branch to the `otherwise` case by default, if no match is found. + assert!(targets.len() > 0); let mut target_block = targets[targets.len() - 1]; for (index, &const_int) in values.iter().enumerate() { @@ -392,7 +394,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { }; // Find and consult vtable let vtable = receiver_place.vtable(); - let drop_fn = self.get_vtable_slot(vtable, idx)?; + let drop_fn = self.get_vtable_slot(vtable, u64::try_from(idx).unwrap())?; // `*mut receiver_place.layout.ty` is almost the layout that we // want for args[0]: We have to project to field 0 because we want diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs index efbbca534856a..10f746e135aa9 100644 --- a/src/librustc_mir/interpret/traits.rs +++ b/src/librustc_mir/interpret/traits.rs @@ -1,9 +1,11 @@ -use super::{FnVal, InterpCx, Machine, MemoryKind}; +use std::ops::Mul; use rustc::mir::interpret::{InterpResult, Pointer, PointerArithmetic, Scalar}; use rustc::ty::layout::{Align, HasDataLayout, LayoutOf, Size}; use rustc::ty::{self, Instance, Ty, TypeFoldable}; +use super::{FnVal, InterpCx, Machine, MemoryKind}; + impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// Creates a dynamic vtable for the given type and vtable origin. This is used only for /// objects. @@ -103,11 +105,12 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub fn get_vtable_slot( &self, vtable: Scalar, - idx: usize, + idx: u64, ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> { let ptr_size = self.pointer_size(); // Skip over the 'drop_ptr', 'size', and 'align' fields. - let vtable_slot = vtable.ptr_offset(ptr_size * (idx as u64 + 3), self)?; + let vtable_slot = + vtable.ptr_offset(Size::mul(ptr_size, idx.checked_add(3).unwrap()), self)?; let vtable_slot = self .memory .check_ptr_access(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)? diff --git a/src/librustc_mir/interpret/validity.rs b/src/librustc_mir/interpret/validity.rs index 6f9543bf95a3b..164478362bf56 100644 --- a/src/librustc_mir/interpret/validity.rs +++ b/src/librustc_mir/interpret/validity.rs @@ -4,11 +4,12 @@ //! That's useful because it means other passes (e.g. promotion) can rely on `const`s //! to be const-safe. +use std::convert::TryFrom; use std::fmt::Write; -use std::ops::RangeInclusive; +use std::ops::{Mul, RangeInclusive}; use rustc::ty; -use rustc::ty::layout::{self, LayoutOf, TyLayout, VariantIdx}; +use rustc::ty::layout::{self, LayoutOf, Size, TyLayout, VariantIdx}; use rustc_data_structures::fx::FxHashSet; use rustc_hir as hir; use rustc_span::symbol::{sym, Symbol}; @@ -747,7 +748,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> // This is the element type size. let layout = self.ecx.layout_of(tys)?; // This is the size in bytes of the whole array. - let size = layout.size * len; + let size = Size::mul(layout.size, len); // Size is not 0, get a pointer. let ptr = self.ecx.force_ptr(mplace.ptr)?; @@ -777,7 +778,8 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> // Some byte was undefined, determine which // element that byte belongs to so we can // provide an index. - let i = (ptr.offset.bytes() / layout.size.bytes()) as usize; + let i = usize::try_from(ptr.offset.bytes() / layout.size.bytes()) + .unwrap(); self.path.push(PathElem::ArrayElem(i)); throw_validation_failure!("undefined bytes", self.path) diff --git a/src/librustc_mir/interpret/visitor.rs b/src/librustc_mir/interpret/visitor.rs index 8808fc70cf76b..f80ca3d4b874b 100644 --- a/src/librustc_mir/interpret/visitor.rs +++ b/src/librustc_mir/interpret/visitor.rs @@ -1,6 +1,8 @@ //! Visitor for a run-time value with a given layout: Traverse enums, structs and other compound //! types until we arrive at the leaves, with custom handling for primitive types. +use std::convert::TryFrom; + use rustc::mir::interpret::InterpResult; use rustc::ty; use rustc::ty::layout::{self, TyLayout, VariantIdx}; @@ -206,7 +208,7 @@ macro_rules! make_value_visitor { // errors: Projecting to a field needs access to `ecx`. let fields: Vec> = (0..offsets.len()).map(|i| { - v.project_field(self.ecx(), i as u64) + v.project_field(self.ecx(), u64::try_from(i).unwrap()) }) .collect(); self.visit_aggregate(v, fields.into_iter())?; From 9de600892da3c86fb92c1dfde455d39657835739 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sat, 21 Mar 2020 13:58:06 +0100 Subject: [PATCH 16/26] make bit_width return u64, consistently with other sizes in the compiler --- src/librustc_ast/ast.rs | 6 ++--- src/librustc_codegen_llvm/intrinsic.rs | 34 ++++++++++++-------------- src/librustc_mir/interpret/cast.rs | 13 +++++----- 3 files changed, 25 insertions(+), 28 deletions(-) diff --git a/src/librustc_ast/ast.rs b/src/librustc_ast/ast.rs index c796a37553164..3e7fb0e73bffe 100644 --- a/src/librustc_ast/ast.rs +++ b/src/librustc_ast/ast.rs @@ -1614,7 +1614,7 @@ impl FloatTy { } } - pub fn bit_width(self) -> usize { + pub fn bit_width(self) -> u64 { match self { FloatTy::F32 => 32, FloatTy::F64 => 64, @@ -1663,7 +1663,7 @@ impl IntTy { format!("{}{}", val as u128, self.name_str()) } - pub fn bit_width(&self) -> Option { + pub fn bit_width(&self) -> Option { Some(match *self { IntTy::Isize => return None, IntTy::I8 => 8, @@ -1725,7 +1725,7 @@ impl UintTy { format!("{}{}", val, self.name_str()) } - pub fn bit_width(&self) -> Option { + pub fn bit_width(&self) -> Option { Some(match *self { UintTy::Usize => return None, UintTy::U8 => 8, diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 95982c860f3c1..bc25b9496d9b5 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -1172,8 +1172,8 @@ fn generic_simd_intrinsic( let m_len = match in_ty.kind { // Note that this `.unwrap()` crashes for isize/usize, that's sort // of intentional as there's not currently a use case for that. - ty::Int(i) => i.bit_width().unwrap() as u64, - ty::Uint(i) => i.bit_width().unwrap() as u64, + ty::Int(i) => i.bit_width().unwrap(), + ty::Uint(i) => i.bit_width().unwrap(), _ => return_error!("`{}` is not an integral type", in_ty), }; require_simd!(arg_tys[1], "argument"); @@ -1354,20 +1354,18 @@ fn generic_simd_intrinsic( // trailing bits. let expected_int_bits = in_len.max(8); match ret_ty.kind { - ty::Uint(i) if i.bit_width() == Some(expected_int_bits as usize) => (), + ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => (), _ => return_error!("bitmask `{}`, expected `u{}`", ret_ty, expected_int_bits), } // Integer vector : let (i_xn, in_elem_bitwidth) = match in_elem.kind { - ty::Int(i) => ( - args[0].immediate(), - i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits() as _), - ), - ty::Uint(i) => ( - args[0].immediate(), - i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits() as _), - ), + ty::Int(i) => { + (args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits())) + } + ty::Uint(i) => { + (args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits())) + } _ => return_error!( "vector argument `{}`'s element type `{}`, expected integer element type", in_ty, @@ -1378,16 +1376,16 @@ fn generic_simd_intrinsic( // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position. let shift_indices = vec![ - bx.cx.const_int(bx.type_ix(in_elem_bitwidth as _), (in_elem_bitwidth - 1) as _); + bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _); in_len as _ ]; let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice())); // Truncate vector to an - let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len as _)); + let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len)); // Bitcast to iN: - let i_ = bx.bitcast(i1xn, bx.type_ix(in_len as _)); + let i_ = bx.bitcast(i1xn, bx.type_ix(in_len)); // Zero-extend iN to the bitmask type: - return Ok(bx.zext(i_, bx.type_ix(expected_int_bits as _))); + return Ok(bx.zext(i_, bx.type_ix(expected_int_bits))); } fn simd_simple_float_intrinsic( @@ -2099,7 +2097,7 @@ fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, boo match ty.kind { ty::Int(t) => Some(( match t { - ast::IntTy::Isize => cx.tcx.sess.target.ptr_width as u64, + ast::IntTy::Isize => u64::from(cx.tcx.sess.target.ptr_width), ast::IntTy::I8 => 8, ast::IntTy::I16 => 16, ast::IntTy::I32 => 32, @@ -2110,7 +2108,7 @@ fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, boo )), ty::Uint(t) => Some(( match t { - ast::UintTy::Usize => cx.tcx.sess.target.ptr_width as u64, + ast::UintTy::Usize => u64::from(cx.tcx.sess.target.ptr_width), ast::UintTy::U8 => 8, ast::UintTy::U16 => 16, ast::UintTy::U32 => 32, @@ -2127,7 +2125,7 @@ fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, boo // Returns None if the type is not a float fn float_type_width(ty: Ty<'_>) -> Option { match ty.kind { - ty::Float(t) => Some(t.bit_width() as u64), + ty::Float(t) => Some(t.bit_width()), _ => None, } } diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs index e243121558355..1eff420d306a0 100644 --- a/src/librustc_mir/interpret/cast.rs +++ b/src/librustc_mir/interpret/cast.rs @@ -228,17 +228,16 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { match dest_ty.kind { // float -> uint Uint(t) => { - // FIXME: can we make `bit_width` return a type more compatible with `Size::bits`? - let width = t.bit_width().unwrap_or_else(|| self.pointer_size().bits() as usize); - let v = f.to_u128(width).value; + let width = t.bit_width().unwrap_or_else(|| self.pointer_size().bits()); + let v = f.to_u128(usize::try_from(width).unwrap()).value; // This should already fit the bit width - Ok(Scalar::from_uint(v, Size::from_bits(width as u64))) + Ok(Scalar::from_uint(v, Size::from_bits(width))) } // float -> int Int(t) => { - let width = t.bit_width().unwrap_or_else(|| self.pointer_size().bits() as usize); - let v = f.to_i128(width).value; - Ok(Scalar::from_int(v, Size::from_bits(width as u64))) + let width = t.bit_width().unwrap_or_else(|| self.pointer_size().bits()); + let v = f.to_i128(usize::try_from(width).unwrap()).value; + Ok(Scalar::from_int(v, Size::from_bits(width))) } // float -> f32 Float(FloatTy::F32) => Ok(Scalar::from_f32(f.convert(&mut false).value)), From cd15b659c7f20d9b740b3c7b53dde9dcd0132f9d Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sat, 21 Mar 2020 16:28:34 +0100 Subject: [PATCH 17/26] avoid double-cast in mplace_field --- src/librustc_mir/interpret/place.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index 7bea5357cdf08..5870266e69a00 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -396,9 +396,10 @@ where field: u64, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { // Not using the layout method because we want to compute on u64 - let offset = match base.layout.fields { + let (offset, field_layout) = match base.layout.fields { layout::FieldPlacement::Arbitrary { ref offsets, .. } => { - offsets[usize::try_from(field).unwrap()] + let field = usize::try_from(field).unwrap(); + (offsets[field], base.layout.field(self, field)?) } layout::FieldPlacement::Array { stride, .. } => { let len = base.len(self)?; @@ -406,23 +407,22 @@ where // This can only be reached in ConstProp and non-rustc-MIR. throw_ub!(BoundsCheckFailed { len, index: field }); } - Size::mul(stride, field) // `Size` multiplication is checked + // All fields have the same layout. + (Size::mul(stride, field), base.layout.field(self, 9)?) } layout::FieldPlacement::Union(count) => { + let field = usize::try_from(field).unwrap(); assert!( - field < u64::try_from(count).unwrap(), + field < count, "Tried to access field {} of union {:#?} with {} fields", field, base.layout, count ); // Offset is always 0 - Size::from_bytes(0) + (Size::from_bytes(0), base.layout.field(self, field)?) } }; - // the only way conversion can fail if is this is an array (otherwise we already panicked - // above). In that case, all fields have the same layout. - let field_layout = base.layout.field(self, usize::try_from(field).unwrap_or(0))?; // Offset may need adjustment for unsized fields. let (meta, offset) = if field_layout.is_unsized() { From d7e2650db200dcb918b5346e137ee6c1e4bc614a Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sat, 21 Mar 2020 17:17:01 +0100 Subject: [PATCH 18/26] miri: avoid a bunch of casts by offering usized-based field indexing --- src/librustc_mir/const_eval/mod.rs | 11 ++- src/librustc_mir/interpret/cast.rs | 4 +- src/librustc_mir/interpret/intrinsics.rs | 6 +- src/librustc_mir/interpret/operand.rs | 22 +++++- src/librustc_mir/interpret/place.rs | 93 ++++++++++++++---------- src/librustc_mir/interpret/step.rs | 5 +- src/librustc_mir/interpret/terminator.rs | 4 +- src/librustc_mir/interpret/traits.rs | 7 +- src/librustc_mir/interpret/visitor.rs | 19 +++-- src/librustc_target/abi/mod.rs | 3 +- 10 files changed, 106 insertions(+), 68 deletions(-) diff --git a/src/librustc_mir/const_eval/mod.rs b/src/librustc_mir/const_eval/mod.rs index 605091d6c7d41..6e7e6f9d34526 100644 --- a/src/librustc_mir/const_eval/mod.rs +++ b/src/librustc_mir/const_eval/mod.rs @@ -1,5 +1,7 @@ // Not in interpret to make sure we do not use private implementation details +use std::convert::TryFrom; + use rustc::mir; use rustc::ty::layout::VariantIdx; use rustc::ty::{self, TyCtxt}; @@ -37,7 +39,7 @@ pub(crate) fn const_field<'tcx>( Some(variant) => ecx.operand_downcast(op, variant).unwrap(), }; // then project - let field = ecx.operand_field(down, field.index() as u64).unwrap(); + let field = ecx.operand_field(down, field.index()).unwrap(); // and finally move back to the const world, always normalizing because // this is not called for statics. op_to_const(&ecx, field) @@ -68,10 +70,11 @@ pub(crate) fn destructure_const<'tcx>( let variant = ecx.read_discriminant(op).unwrap().1; + // We go to `usize` as we cannot allocate anything bigger anyway. let field_count = match val.ty.kind { - ty::Array(_, len) => len.eval_usize(tcx, param_env), - ty::Adt(def, _) => def.variants[variant].fields.len() as u64, - ty::Tuple(substs) => substs.len() as u64, + ty::Array(_, len) => usize::try_from(len.eval_usize(tcx, param_env)).unwrap(), + ty::Adt(def, _) => def.variants[variant].fields.len(), + ty::Tuple(substs) => substs.len(), _ => bug!("cannot destructure constant {:?}", val), }; diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs index 1eff420d306a0..f7327825ca4b7 100644 --- a/src/librustc_mir/interpret/cast.rs +++ b/src/librustc_mir/interpret/cast.rs @@ -320,11 +320,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Example: `Arc` -> `Arc` // here we need to increase the size of every &T thin ptr field to a fat ptr for i in 0..src.layout.fields.count() { - let dst_field = self.place_field(dest, i as u64)?; + let dst_field = self.place_field(dest, i)?; if dst_field.layout.is_zst() { continue; } - let src_field = self.operand_field(src, i as u64)?; + let src_field = self.operand_field(src, i)?; if src_field.layout.ty == dst_field.layout.ty { self.copy_op(src_field, dst_field)?; } else { diff --git a/src/librustc_mir/interpret/intrinsics.rs b/src/librustc_mir/interpret/intrinsics.rs index 13bfb9895cb00..e5f89b10e76ed 100644 --- a/src/librustc_mir/interpret/intrinsics.rs +++ b/src/librustc_mir/interpret/intrinsics.rs @@ -350,8 +350,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ); for i in 0..len { - let place = self.place_field(dest, i)?; - let value = if i == index { elem } else { self.operand_field(input, i)? }; + let place = self.place_index(dest, i)?; + let value = if i == index { elem } else { self.operand_index(input, i)? }; self.copy_op(value, place)?; } } @@ -370,7 +370,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { "Return type `{}` must match vector element type `{}`", dest.layout.ty, e_ty ); - self.copy_op(self.operand_field(args[0], index)?, dest)?; + self.copy_op(self.operand_index(args[0], index)?, dest)?; } _ => return Ok(false), } diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs index 9ab4e198db01d..9c2175dc0e40a 100644 --- a/src/librustc_mir/interpret/operand.rs +++ b/src/librustc_mir/interpret/operand.rs @@ -351,7 +351,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub fn operand_field( &self, op: OpTy<'tcx, M::PointerTag>, - field: u64, + field: usize, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { let base = match op.try_as_mplace(self) { Ok(mplace) => { @@ -362,7 +362,6 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Err(value) => value, }; - let field = field.try_into().unwrap(); let field_layout = op.layout.field(self, field)?; if field_layout.is_zst() { let immediate = Scalar::zst().into(); @@ -384,6 +383,21 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout }) } + pub fn operand_index( + &self, + op: OpTy<'tcx, M::PointerTag>, + index: u64, + ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { + if let Ok(index) = usize::try_from(index) { + // We can just treat this as a field. + self.operand_field(op, index) + } else { + // Indexing into a big array. This must be an mplace. + let mplace = op.assert_mem_place(self); + Ok(self.mplace_index(mplace, index)?.into()) + } + } + pub fn operand_downcast( &self, op: OpTy<'tcx, M::PointerTag>, @@ -406,7 +420,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { use rustc::mir::ProjectionElem::*; Ok(match *proj_elem { - Field(field, _) => self.operand_field(base, u64::try_from(field.index()).unwrap())?, + Field(field, _) => self.operand_field(base, field.index())?, Downcast(_, variant) => self.operand_downcast(base, variant)?, Deref => self.deref_operand(base)?.into(), Subslice { .. } | ConstantIndex { .. } | Index(_) => { @@ -593,7 +607,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { }; // read raw discriminant value - let discr_op = self.operand_field(rval, u64::try_from(discr_index).unwrap())?; + let discr_op = self.operand_field(rval, discr_index)?; let discr_val = self.read_immediate(discr_op)?; let raw_discr = discr_val.to_scalar_or_undef(); trace!("discr value: {:?}", raw_discr); diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index 5870266e69a00..ae754ab4feb26 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -386,43 +386,20 @@ where Ok(place) } - /// Offset a pointer to project to a field. Unlike `place_field`, this is always - /// possible without allocating, so it can take `&self`. Also return the field's layout. + /// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is + /// always possible without allocating, so it can take `&self`. Also return the field's layout. /// This supports both struct and array fields. + /// + /// This also works for arrays, but then the `usize` index type is restricting. + /// For indexing into arrays, use `mplace_index`. #[inline(always)] pub fn mplace_field( &self, base: MPlaceTy<'tcx, M::PointerTag>, - field: u64, + field: usize, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { - // Not using the layout method because we want to compute on u64 - let (offset, field_layout) = match base.layout.fields { - layout::FieldPlacement::Arbitrary { ref offsets, .. } => { - let field = usize::try_from(field).unwrap(); - (offsets[field], base.layout.field(self, field)?) - } - layout::FieldPlacement::Array { stride, .. } => { - let len = base.len(self)?; - if field >= len { - // This can only be reached in ConstProp and non-rustc-MIR. - throw_ub!(BoundsCheckFailed { len, index: field }); - } - // All fields have the same layout. - (Size::mul(stride, field), base.layout.field(self, 9)?) - } - layout::FieldPlacement::Union(count) => { - let field = usize::try_from(field).unwrap(); - assert!( - field < count, - "Tried to access field {} of union {:#?} with {} fields", - field, - base.layout, - count - ); - // Offset is always 0 - (Size::from_bytes(0), base.layout.field(self, field)?) - } - }; + let offset = base.layout.fields.offset(field); + let field_layout = base.layout.field(self, field)?; // Offset may need adjustment for unsized fields. let (meta, offset) = if field_layout.is_unsized() { @@ -452,6 +429,32 @@ where base.offset(offset, meta, field_layout, self) } + /// Index into an array. + #[inline(always)] + pub fn mplace_index( + &self, + base: MPlaceTy<'tcx, M::PointerTag>, + index: u64, + ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { + // Not using the layout method because we want to compute on u64 + match base.layout.fields { + layout::FieldPlacement::Array { stride, .. } => { + let len = base.len(self)?; + if index >= len { + // This can only be reached in ConstProp and non-rustc-MIR. + throw_ub!(BoundsCheckFailed { len, index }); + } + let offset = Size::mul(stride, index); + // All fields have the same layout. + let field_layout = base.layout.field(self, 0)?; + + assert!(!field_layout.is_unsized()); + base.offset(offset, MemPlaceMeta::None, field_layout, self) + } + _ => bug!("`mplace_index` called on non-array type {:?}", base.layout.ty), + } + } + // Iterates over all fields of an array. Much more efficient than doing the // same by repeatedly calling `mplace_array`. pub(super) fn mplace_array_fields( @@ -528,7 +531,7 @@ where ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { use rustc::mir::ProjectionElem::*; Ok(match *proj_elem { - Field(field, _) => self.mplace_field(base, u64::try_from(field.index()).unwrap())?, + Field(field, _) => self.mplace_field(base, field.index())?, Downcast(_, variant) => self.mplace_downcast(base, variant)?, Deref => self.deref_operand(base.into())?, @@ -536,8 +539,11 @@ where let layout = self.layout_of(self.tcx.types.usize)?; let n = self.access_local(self.frame(), local, Some(layout))?; let n = self.read_scalar(n)?; - let n = self.force_bits(n.not_undef()?, self.tcx.data_layout.pointer_size)?; - self.mplace_field(base, u64::try_from(n).unwrap())? + let n = u64::try_from( + self.force_bits(n.not_undef()?, self.tcx.data_layout.pointer_size)?, + ) + .unwrap(); + self.mplace_index(base, n)? } ConstantIndex { offset, min_length, from_end } => { @@ -555,7 +561,7 @@ where u64::from(offset) }; - self.mplace_field(base, index)? + self.mplace_index(base, index)? } Subslice { from, to, from_end } => { @@ -571,7 +577,7 @@ where pub fn place_field( &mut self, base: PlaceTy<'tcx, M::PointerTag>, - field: u64, + field: usize, ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { // FIXME: We could try to be smarter and avoid allocation for fields that span the // entire place. @@ -579,6 +585,15 @@ where Ok(self.mplace_field(mplace, field)?.into()) } + pub fn place_index( + &mut self, + base: PlaceTy<'tcx, M::PointerTag>, + index: u64, + ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { + let mplace = self.force_allocation(base)?; + Ok(self.mplace_index(mplace, index)?.into()) + } + pub fn place_downcast( &self, base: PlaceTy<'tcx, M::PointerTag>, @@ -604,7 +619,7 @@ where ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { use rustc::mir::ProjectionElem::*; Ok(match *proj_elem { - Field(field, _) => self.place_field(base, u64::try_from(field.index()).unwrap())?, + Field(field, _) => self.place_field(base, field.index())?, Downcast(_, variant) => self.place_downcast(base, variant)?, Deref => self.deref_operand(self.place_to_op(base)?)?.into(), // For the other variants, we have to force an allocation. @@ -1073,7 +1088,7 @@ where let size = discr_layout.value.size(self); let discr_val = truncate(discr_val, size); - let discr_dest = self.place_field(dest, u64::try_from(discr_index).unwrap())?; + let discr_dest = self.place_field(dest, discr_index)?; self.write_scalar(Scalar::from_uint(discr_val, size), discr_dest)?; } layout::Variants::Multiple { @@ -1104,7 +1119,7 @@ where niche_start_val, )?; // Write result. - let niche_dest = self.place_field(dest, u64::try_from(discr_index).unwrap())?; + let niche_dest = self.place_field(dest, discr_index)?; self.write_immediate(*discr_val, niche_dest)?; } } diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs index eb33a8700f33b..6ec11d42f52d2 100644 --- a/src/librustc_mir/interpret/step.rs +++ b/src/librustc_mir/interpret/step.rs @@ -2,8 +2,6 @@ //! //! The main entry point is the `step` method. -use std::convert::TryFrom; - use rustc::mir; use rustc::mir::interpret::{InterpResult, PointerArithmetic, Scalar}; use rustc::ty::layout::LayoutOf; @@ -194,8 +192,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Ignore zero-sized fields. if !op.layout.is_zst() { let field_index = active_field_index.unwrap_or(i); - let field_dest = - self.place_field(dest, u64::try_from(field_index).unwrap())?; + let field_dest = self.place_field(dest, field_index)?; self.copy_op(op, field_dest)?; } } diff --git a/src/librustc_mir/interpret/terminator.rs b/src/librustc_mir/interpret/terminator.rs index 9c52af41272e3..5ce5ba31a0987 100644 --- a/src/librustc_mir/interpret/terminator.rs +++ b/src/librustc_mir/interpret/terminator.rs @@ -309,7 +309,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { .map(|&a| Ok(a)) .chain( (0..untuple_arg.layout.fields.count()) - .map(|i| self.operand_field(untuple_arg, i as u64)), + .map(|i| self.operand_field(untuple_arg, i)), ) .collect::>>>( )?, @@ -332,7 +332,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { if Some(local) == body.spread_arg { // Must be a tuple for i in 0..dest.layout.fields.count() { - let dest = self.place_field(dest, i as u64)?; + let dest = self.place_field(dest, i)?; self.pass_argument(rust_abi, &mut caller_iter, dest)?; } } else { diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs index 10f746e135aa9..fa8d67029dfcc 100644 --- a/src/librustc_mir/interpret/traits.rs +++ b/src/librustc_mir/interpret/traits.rs @@ -1,3 +1,4 @@ +use std::convert::TryFrom; use std::ops::Mul; use rustc::mir::interpret::{InterpResult, Pointer, PointerArithmetic, Scalar}; @@ -56,7 +57,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // `get_vtable` in `rust_codegen_llvm/meth.rs`. // ///////////////////////////////////////////////////////////////////////////////////////// let vtable = self.memory.allocate( - ptr_size * (3 + methods.len() as u64), + Size::mul(ptr_size, u64::try_from(methods.len()).unwrap().checked_add(3).unwrap()), ptr_align, MemoryKind::Vtable, ); @@ -172,10 +173,10 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { .expect("cannot be a ZST"); let alloc = self.memory.get_raw(vtable.alloc_id)?; let size = alloc.read_ptr_sized(self, vtable.offset(pointer_size, self)?)?.not_undef()?; - let size = self.force_bits(size, pointer_size)? as u64; + let size = u64::try_from(self.force_bits(size, pointer_size)?).unwrap(); let align = alloc.read_ptr_sized(self, vtable.offset(pointer_size * 2, self)?)?.not_undef()?; - let align = self.force_bits(align, pointer_size)? as u64; + let align = u64::try_from(self.force_bits(align, pointer_size)?).unwrap(); if size >= self.tcx.data_layout().obj_size_bound() { throw_ub_format!( diff --git a/src/librustc_mir/interpret/visitor.rs b/src/librustc_mir/interpret/visitor.rs index f80ca3d4b874b..e8a7626406413 100644 --- a/src/librustc_mir/interpret/visitor.rs +++ b/src/librustc_mir/interpret/visitor.rs @@ -1,8 +1,6 @@ //! Visitor for a run-time value with a given layout: Traverse enums, structs and other compound //! types until we arrive at the leaves, with custom handling for primitive types. -use std::convert::TryFrom; - use rustc::mir::interpret::InterpResult; use rustc::ty; use rustc::ty::layout::{self, TyLayout, VariantIdx}; @@ -30,7 +28,8 @@ pub trait Value<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Copy { ) -> InterpResult<'tcx, Self>; /// Projects to the n-th field. - fn project_field(self, ecx: &InterpCx<'mir, 'tcx, M>, field: u64) -> InterpResult<'tcx, Self>; + fn project_field(self, ecx: &InterpCx<'mir, 'tcx, M>, field: usize) + -> InterpResult<'tcx, Self>; } // Operands and memory-places are both values. @@ -64,7 +63,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tcx, M:: } #[inline(always)] - fn project_field(self, ecx: &InterpCx<'mir, 'tcx, M>, field: u64) -> InterpResult<'tcx, Self> { + fn project_field( + self, + ecx: &InterpCx<'mir, 'tcx, M>, + field: usize, + ) -> InterpResult<'tcx, Self> { ecx.operand_field(self, field) } } @@ -98,7 +101,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for MPlaceTy<'tcx, } #[inline(always)] - fn project_field(self, ecx: &InterpCx<'mir, 'tcx, M>, field: u64) -> InterpResult<'tcx, Self> { + fn project_field( + self, + ecx: &InterpCx<'mir, 'tcx, M>, + field: usize, + ) -> InterpResult<'tcx, Self> { ecx.mplace_field(self, field) } } @@ -208,7 +215,7 @@ macro_rules! make_value_visitor { // errors: Projecting to a field needs access to `ecx`. let fields: Vec> = (0..offsets.len()).map(|i| { - v.project_field(self.ecx(), u64::try_from(i).unwrap()) + v.project_field(self.ecx(), i) }) .collect(); self.visit_aggregate(v, fields.into_iter())?; diff --git a/src/librustc_target/abi/mod.rs b/src/librustc_target/abi/mod.rs index 635fb80b65980..7cf2b690d352d 100644 --- a/src/librustc_target/abi/mod.rs +++ b/src/librustc_target/abi/mod.rs @@ -3,6 +3,7 @@ pub use Primitive::*; use crate::spec::Target; +use std::convert::TryFrom; use std::ops::{Add, AddAssign, Deref, Mul, Range, RangeInclusive, Sub}; use rustc_index::vec::{Idx, IndexVec}; @@ -665,7 +666,7 @@ impl FieldPlacement { Size::ZERO } FieldPlacement::Array { stride, count } => { - let i = i as u64; + let i = u64::try_from(i).unwrap(); assert!(i < count); stride * i } From f16b4910150f841d57c92aed6fa089f387da0d92 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sat, 21 Mar 2020 17:28:46 +0100 Subject: [PATCH 19/26] remove unnecessary cast --- src/librustc_mir/interpret/operand.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs index 9c2175dc0e40a..f6be92174ad46 100644 --- a/src/librustc_mir/interpret/operand.rs +++ b/src/librustc_mir/interpret/operand.rs @@ -341,7 +341,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Turn the wide MPlace into a string (must already be dereferenced!) pub fn read_str(&self, mplace: MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> { let len = mplace.len(self)?; - let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(u64::from(len)))?; + let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len))?; let str = ::std::str::from_utf8(bytes) .map_err(|err| err_ub_format!("this string is not valid UTF-8: {}", err))?; Ok(str) From 0bc108a13be32d498e6aa3066a15748e2ce1febf Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sun, 22 Mar 2020 17:48:11 +0100 Subject: [PATCH 20/26] make Size::from* methods generic in the integer type they accept --- src/librustc/mir/interpret/allocation.rs | 13 ++++--------- src/librustc/mir/interpret/value.rs | 4 ++-- src/librustc/ty/print/pretty.rs | 4 ++-- src/librustc_codegen_ssa/mir/operand.rs | 2 +- src/librustc_mir/interpret/memory.rs | 2 +- src/librustc_mir/interpret/operand.rs | 4 ++-- src/librustc_mir/interpret/place.rs | 2 +- src/librustc_mir_build/hair/pattern/_match.rs | 6 +++--- src/librustc_target/abi/mod.rs | 11 ++++++----- 9 files changed, 22 insertions(+), 26 deletions(-) diff --git a/src/librustc/mir/interpret/allocation.rs b/src/librustc/mir/interpret/allocation.rs index 4d42e796d10e5..5440aef6fe6c9 100644 --- a/src/librustc/mir/interpret/allocation.rs +++ b/src/librustc/mir/interpret/allocation.rs @@ -92,7 +92,7 @@ impl Allocation { /// Creates a read-only allocation initialized by the given bytes pub fn from_bytes<'a>(slice: impl Into>, align: Align) -> Self { let bytes = slice.into().into_owned(); - let size = Size::from_bytes(u64::try_from(bytes.len()).unwrap()); + let size = Size::from_bytes(bytes.len()); Self { bytes, relocations: Relocations::new(), @@ -293,8 +293,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra> Allocation { let offset = usize::try_from(ptr.offset.bytes()).unwrap(); Ok(match self.bytes[offset..].iter().position(|&c| c == 0) { Some(size) => { - let size_with_null = - Size::from_bytes(u64::try_from(size.checked_add(1).unwrap()).unwrap()); + let size_with_null = Size::from_bytes(size.checked_add(1).unwrap()); // Go through `get_bytes` for checks and AllocationExtra hooks. // We read the null, so we include it in the request, but we want it removed // from the result, so we do subslicing. @@ -339,7 +338,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra> Allocation { let (lower, upper) = src.size_hint(); let len = upper.expect("can only write bounded iterators"); assert_eq!(lower, len, "can only write iterators with a precise length"); - let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(u64::try_from(len).unwrap()))?; + let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(len))?; // `zip` would stop when the first iterator ends; we want to definitely // cover all of `bytes`. for dest in bytes { @@ -382,11 +381,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra> Allocation { } else { match self.relocations.get(&ptr.offset) { Some(&(tag, alloc_id)) => { - let ptr = Pointer::new_with_tag( - alloc_id, - Size::from_bytes(u64::try_from(bits).unwrap()), - tag, - ); + let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits), tag); return Ok(ScalarMaybeUndef::Scalar(ptr.into())); } None => {} diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs index 4474fcd19188b..f00952e3725eb 100644 --- a/src/librustc/mir/interpret/value.rs +++ b/src/librustc/mir/interpret/value.rs @@ -686,8 +686,8 @@ pub fn get_slice_bytes<'tcx>(cx: &impl HasDataLayout, val: ConstValue<'tcx>) -> data.get_bytes( cx, // invent a pointer, only the offset is relevant anyway - Pointer::new(AllocId(0), Size::from_bytes(u64::try_from(start).unwrap())), - Size::from_bytes(u64::try_from(len).unwrap()), + Pointer::new(AllocId(0), Size::from_bytes(start)), + Size::from_bytes(len), ) .unwrap_or_else(|err| bug!("const slice is invalid: {:?}", err)) } else { diff --git a/src/librustc/ty/print/pretty.rs b/src/librustc/ty/print/pretty.rs index 8d5d6247f5c05..b1626d95eb3e8 100644 --- a/src/librustc/ty/print/pretty.rs +++ b/src/librustc/ty/print/pretty.rs @@ -981,7 +981,7 @@ pub trait PrettyPrinter<'tcx>: .alloc_map .lock() .unwrap_memory(ptr.alloc_id) - .get_bytes(&self.tcx(), ptr, Size::from_bytes(*data as u64)) + .get_bytes(&self.tcx(), ptr, Size::from_bytes(*data)) .unwrap(); p!(pretty_print_byte_str(byte_str)); } @@ -1169,7 +1169,7 @@ pub trait PrettyPrinter<'tcx>: (ConstValue::ByRef { alloc, offset }, ty::Array(t, n)) if *t == u8_type => { let n = n.val.try_to_bits(self.tcx().data_layout.pointer_size).unwrap(); // cast is ok because we already checked for pointer size (32 or 64 bit) above - let n = Size::from_bytes(n as u64); + let n = Size::from_bytes(n); let ptr = Pointer::new(AllocId(0), offset); let byte_str = alloc.get_bytes(&self.tcx(), ptr, n).unwrap(); diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs index 1e1fede2588df..5bb30d03d9f8d 100644 --- a/src/librustc_codegen_ssa/mir/operand.rs +++ b/src/librustc_codegen_ssa/mir/operand.rs @@ -91,7 +91,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { }; let a = Scalar::from(Pointer::new( bx.tcx().alloc_map.lock().create_memory_alloc(data), - Size::from_bytes(start as u64), + Size::from_bytes(start), )); let a_llval = bx.scalar_to_backend( a, diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index f51e38eb1ea5c..62bb4c8d55634 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -836,7 +836,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { src: impl IntoIterator, ) -> InterpResult<'tcx> { let src = src.into_iter(); - let size = Size::from_bytes(src.size_hint().0 as u64); + let size = Size::from_bytes(src.size_hint().0); // `write_bytes` checks that this lower bound `size` matches the upper bound and reality. let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? { Some(ptr) => ptr, diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs index f6be92174ad46..409c958ac39fd 100644 --- a/src/librustc_mir/interpret/operand.rs +++ b/src/librustc_mir/interpret/operand.rs @@ -1,7 +1,7 @@ //! Functions concerning immediate values and operands, and reading from operands. //! All high-level functions to read from memory work on operands as sources. -use std::convert::{TryFrom, TryInto}; +use std::convert::TryFrom; use super::{InterpCx, MPlaceTy, Machine, MemPlace, Place, PlaceTy}; pub use rustc::mir::interpret::ScalarMaybeUndef; @@ -570,7 +570,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // where none should happen. let ptr = Pointer::new( self.tcx.alloc_map.lock().create_memory_alloc(data), - Size::from_bytes(start.try_into().unwrap()), // offset: `start` + Size::from_bytes(start), // offset: `start` ); Operand::Immediate(Immediate::new_slice( self.tag_global_base_pointer(ptr).into(), diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index ae754ab4feb26..ef34717255581 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -739,7 +739,7 @@ where ), Immediate::Scalar(ScalarMaybeUndef::Scalar(Scalar::Raw { size, .. })) => { assert_eq!( - Size::from_bytes(size.into()), + Size::from_bytes(size), dest.layout.size, "Size mismatch when writing bits" ) diff --git a/src/librustc_mir_build/hair/pattern/_match.rs b/src/librustc_mir_build/hair/pattern/_match.rs index 89063a4227fa9..76fcbf326fa78 100644 --- a/src/librustc_mir_build/hair/pattern/_match.rs +++ b/src/librustc_mir_build/hair/pattern/_match.rs @@ -1920,8 +1920,8 @@ fn slice_pat_covered_by_const<'tcx>( } (ConstValue::Slice { data, start, end }, ty::Slice(t)) => { assert_eq!(*t, tcx.types.u8); - let ptr = Pointer::new(AllocId(0), Size::from_bytes(start as u64)); - data.get_bytes(&tcx, ptr, Size::from_bytes((end - start) as u64)).unwrap() + let ptr = Pointer::new(AllocId(0), Size::from_bytes(start)); + data.get_bytes(&tcx, ptr, Size::from_bytes(end - start)).unwrap() } // FIXME(oli-obk): create a way to extract fat pointers from ByRef (_, ty::Slice(_)) => return Ok(false), @@ -2375,7 +2375,7 @@ fn specialize_one_pattern<'p, 'tcx>( ty::Slice(t) => { match value.val { ty::ConstKind::Value(ConstValue::Slice { data, start, end }) => { - let offset = Size::from_bytes(start as u64); + let offset = Size::from_bytes(start); let n = (end - start) as u64; (Cow::Borrowed(data), offset, n, t) } diff --git a/src/librustc_target/abi/mod.rs b/src/librustc_target/abi/mod.rs index 7cf2b690d352d..ffd6c8da1dc2f 100644 --- a/src/librustc_target/abi/mod.rs +++ b/src/librustc_target/abi/mod.rs @@ -3,7 +3,7 @@ pub use Primitive::*; use crate::spec::Target; -use std::convert::TryFrom; +use std::convert::{TryFrom, TryInto}; use std::ops::{Add, AddAssign, Deref, Mul, Range, RangeInclusive, Sub}; use rustc_index::vec::{Idx, IndexVec}; @@ -241,17 +241,18 @@ pub struct Size { } impl Size { - pub const ZERO: Size = Self::from_bytes(0); + pub const ZERO: Size = Size { raw: 0 }; #[inline] - pub fn from_bits(bits: u64) -> Size { + pub fn from_bits(bits: impl TryInto) -> Size { + let bits = bits.try_into().ok().unwrap(); // Avoid potential overflow from `bits + 7`. Size::from_bytes(bits / 8 + ((bits % 8) + 7) / 8) } #[inline] - pub const fn from_bytes(bytes: u64) -> Size { - Size { raw: bytes } + pub fn from_bytes(bytes: impl TryInto) -> Size { + Size { raw: bytes.try_into().ok().unwrap() } } #[inline] From afcb6342fa8cf206a80e8555454bff74d9144973 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 24 Mar 2020 10:07:46 +0100 Subject: [PATCH 21/26] use Size addition instead of checked int addition --- src/librustc/mir/interpret/allocation.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/librustc/mir/interpret/allocation.rs b/src/librustc/mir/interpret/allocation.rs index 5440aef6fe6c9..cd4c7c71ccd90 100644 --- a/src/librustc/mir/interpret/allocation.rs +++ b/src/librustc/mir/interpret/allocation.rs @@ -293,7 +293,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra> Allocation { let offset = usize::try_from(ptr.offset.bytes()).unwrap(); Ok(match self.bytes[offset..].iter().position(|&c| c == 0) { Some(size) => { - let size_with_null = Size::from_bytes(size.checked_add(1).unwrap()); + let size_with_null = Size::add(Size::from_bytes(size), Size::from_bytes(1)); // Go through `get_bytes` for checks and AllocationExtra hooks. // We read the null, so we include it in the request, but we want it removed // from the result, so we do subslicing. From 1d67ca00a1b2f667068e24d4f98676f3ec62de42 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 24 Mar 2020 10:16:39 +0100 Subject: [PATCH 22/26] add helper method for ptr ops on Scalar; reduce unnecessary large operand of overflowing_signed_offset --- src/librustc/mir/interpret/pointer.rs | 11 ++-- src/librustc/mir/interpret/value.rs | 73 +++++++++++---------------- 2 files changed, 35 insertions(+), 49 deletions(-) diff --git a/src/librustc/mir/interpret/pointer.rs b/src/librustc/mir/interpret/pointer.rs index ff479aee4e0a9..3f841cfb33003 100644 --- a/src/librustc/mir/interpret/pointer.rs +++ b/src/librustc/mir/interpret/pointer.rs @@ -73,10 +73,8 @@ pub trait PointerArithmetic: layout::HasDataLayout { self.truncate_to_ptr(res) } - // Overflow checking only works properly on the range from -u64 to +u64. #[inline] - fn overflowing_signed_offset(&self, val: u64, i: i128) -> (u64, bool) { - // FIXME: is it possible to over/underflow here? + fn overflowing_signed_offset(&self, val: u64, i: i64) -> (u64, bool) { if i < 0 { // Trickery to ensure that `i64::MIN` works fine: compute `n = -i`. // This formula only works for true negative values; it overflows for zero! @@ -84,6 +82,7 @@ pub trait PointerArithmetic: layout::HasDataLayout { let res = val.overflowing_sub(n); self.truncate_to_ptr(res) } else { + // `i >= 0`, so the cast is safe. self.overflowing_offset(val, i as u64) } } @@ -96,7 +95,7 @@ pub trait PointerArithmetic: layout::HasDataLayout { #[inline] fn signed_offset<'tcx>(&self, val: u64, i: i64) -> InterpResult<'tcx, u64> { - let (res, over) = self.overflowing_signed_offset(val, i128::from(i)); + let (res, over) = self.overflowing_signed_offset(val, i); if over { throw_ub!(PointerArithOverflow) } else { Ok(res) } } } @@ -189,14 +188,14 @@ impl<'tcx, Tag> Pointer { } #[inline] - pub fn overflowing_signed_offset(self, i: i128, cx: &impl HasDataLayout) -> (Self, bool) { + pub fn overflowing_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> (Self, bool) { let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i); (Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over) } #[inline(always)] pub fn wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self { - self.overflowing_signed_offset(i128::from(i), cx).0 + self.overflowing_signed_offset(i, cx).0 } #[inline(always)] diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs index f00952e3725eb..706cf1cd09a79 100644 --- a/src/librustc/mir/interpret/value.rs +++ b/src/librustc/mir/interpret/value.rs @@ -1,11 +1,12 @@ use std::convert::TryFrom; +use std::fmt; use rustc_apfloat::{ ieee::{Double, Single}, Float, }; use rustc_macros::HashStable; -use std::fmt; +use rustc_target::abi::TargetDataLayout; use crate::ty::{ layout::{HasDataLayout, Size}, @@ -200,68 +201,54 @@ impl<'tcx, Tag> Scalar { Scalar::Raw { data: 0, size: 0 } } - #[inline] - pub fn ptr_offset(self, i: Size, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> { - let dl = cx.data_layout(); + #[inline(always)] + fn ptr_op( + self, + dl: &TargetDataLayout, + f_int: impl FnOnce(u64) -> InterpResult<'tcx, u64>, + f_ptr: impl FnOnce(Pointer) -> InterpResult<'tcx, Pointer>, + ) -> InterpResult<'tcx, Self> { match self { Scalar::Raw { data, size } => { assert_eq!(u64::from(size), dl.pointer_size.bytes()); - Ok(Scalar::Raw { - data: u128::from(dl.offset(u64::try_from(data).unwrap(), i.bytes())?), - size, - }) + Ok(Scalar::Raw { data: u128::from(f_int(u64::try_from(data).unwrap())?), size }) } - Scalar::Ptr(ptr) => ptr.offset(i, dl).map(Scalar::Ptr), + Scalar::Ptr(ptr) => Ok(Scalar::Ptr(f_ptr(ptr)?)), } } + #[inline] + pub fn ptr_offset(self, i: Size, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> { + let dl = cx.data_layout(); + self.ptr_op(dl, |int| dl.offset(int, i.bytes()), |ptr| ptr.offset(i, dl)) + } + #[inline] pub fn ptr_wrapping_offset(self, i: Size, cx: &impl HasDataLayout) -> Self { let dl = cx.data_layout(); - match self { - Scalar::Raw { data, size } => { - assert_eq!(u64::from(size), dl.pointer_size.bytes()); - Scalar::Raw { - data: u128::from( - dl.overflowing_offset(u64::try_from(data).unwrap(), i.bytes()).0, - ), - size, - } - } - Scalar::Ptr(ptr) => Scalar::Ptr(ptr.wrapping_offset(i, dl)), - } + self.ptr_op( + dl, + |int| Ok(dl.overflowing_offset(int, i.bytes()).0), + |ptr| Ok(ptr.wrapping_offset(i, dl)), + ) + .unwrap() } #[inline] pub fn ptr_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> { let dl = cx.data_layout(); - match self { - Scalar::Raw { data, size } => { - assert_eq!(u64::from(size), dl.pointer_size.bytes()); - Ok(Scalar::Raw { - data: u128::from(dl.signed_offset(u64::try_from(data).unwrap(), i)?), - size, - }) - } - Scalar::Ptr(ptr) => ptr.signed_offset(i, dl).map(Scalar::Ptr), - } + self.ptr_op(dl, |int| dl.signed_offset(int, i), |ptr| ptr.signed_offset(i, dl)) } #[inline] pub fn ptr_wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self { let dl = cx.data_layout(); - match self { - Scalar::Raw { data, size } => { - assert_eq!(u64::from(size), dl.pointer_size.bytes()); - Scalar::Raw { - data: u128::from( - dl.overflowing_signed_offset(u64::try_from(data).unwrap(), i128::from(i)).0, - ), - size, - } - } - Scalar::Ptr(ptr) => Scalar::Ptr(ptr.wrapping_signed_offset(i, dl)), - } + self.ptr_op( + dl, + |int| Ok(dl.overflowing_signed_offset(int, i).0), + |ptr| Ok(ptr.wrapping_signed_offset(i, dl)), + ) + .unwrap() } #[inline] From b7db7320ad7a0f07074276c76fe3d1ecc23b08ef Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 24 Mar 2020 16:43:50 +0100 Subject: [PATCH 23/26] go back to infix ops for Size --- src/librustc/mir/interpret/allocation.rs | 24 +++++++++++----------- src/librustc_mir/interpret/eval_context.rs | 3 +-- src/librustc_mir/interpret/memory.rs | 14 ++++++------- src/librustc_mir/interpret/place.rs | 8 ++++---- src/librustc_mir/interpret/traits.rs | 6 ++---- src/librustc_mir/interpret/validity.rs | 8 ++++---- 6 files changed, 30 insertions(+), 33 deletions(-) diff --git a/src/librustc/mir/interpret/allocation.rs b/src/librustc/mir/interpret/allocation.rs index cd4c7c71ccd90..4791c2fed9981 100644 --- a/src/librustc/mir/interpret/allocation.rs +++ b/src/librustc/mir/interpret/allocation.rs @@ -3,7 +3,7 @@ use std::borrow::Cow; use std::convert::TryFrom; use std::iter; -use std::ops::{Add, Deref, DerefMut, Mul, Range, Sub}; +use std::ops::{Deref, DerefMut, Range}; use rustc_ast::ast::Mutability; use rustc_data_structures::sorted_map::SortedMap; @@ -183,7 +183,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra> Allocation { /// Returns the range of this allocation that was meant. #[inline] fn check_bounds(&self, offset: Size, size: Size) -> Range { - let end = Size::add(offset, size); // This does overflow checking. + let end = offset + size; // This does overflow checking. let end = usize::try_from(end.bytes()).expect("access too big for this host architecture"); assert!( end <= self.len(), @@ -293,7 +293,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra> Allocation { let offset = usize::try_from(ptr.offset.bytes()).unwrap(); Ok(match self.bytes[offset..].iter().position(|&c| c == 0) { Some(size) => { - let size_with_null = Size::add(Size::from_bytes(size), Size::from_bytes(1)); + let size_with_null = Size::from_bytes(size) + Size::from_bytes(1); // Go through `get_bytes` for checks and AllocationExtra hooks. // We read the null, so we include it in the request, but we want it removed // from the result, so we do subslicing. @@ -474,7 +474,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation { // We have to go back `pointer_size - 1` bytes, as that one would still overlap with // the beginning of this range. let start = ptr.offset.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1); - let end = Size::add(ptr.offset, size); // This does overflow checking. + let end = ptr.offset + size; // This does overflow checking. self.relocations.range(Size::from_bytes(start)..end) } @@ -519,7 +519,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation { ) }; let start = ptr.offset; - let end = Size::add(start, size); + let end = start + size; // `Size` addition // Mark parts of the outermost relocations as undefined if they partially fall outside the // given range. @@ -558,7 +558,7 @@ impl<'tcx, Tag, Extra> Allocation { #[inline] fn check_defined(&self, ptr: Pointer, size: Size) -> InterpResult<'tcx> { self.undef_mask - .is_range_defined(ptr.offset, Size::add(ptr.offset, size)) + .is_range_defined(ptr.offset, ptr.offset + size) // `Size` addition .or_else(|idx| throw_ub!(InvalidUndefBytes(Some(Pointer::new(ptr.alloc_id, idx))))) } @@ -566,7 +566,7 @@ impl<'tcx, Tag, Extra> Allocation { if size.bytes() == 0 { return; } - self.undef_mask.set_range(ptr.offset, Size::add(ptr.offset, size), new_state); + self.undef_mask.set_range(ptr.offset, ptr.offset + size, new_state); } } @@ -611,7 +611,7 @@ impl Allocation { for i in 1..size.bytes() { // FIXME: optimize to bitshift the current undef block's bits and read the top bit. - if self.undef_mask.get(Size::add(src.offset, Size::from_bytes(i))) == cur { + if self.undef_mask.get(src.offset + Size::from_bytes(i)) == cur { cur_len += 1; } else { ranges.push(cur_len); @@ -638,7 +638,7 @@ impl Allocation { if defined.ranges.len() <= 1 { self.undef_mask.set_range_inbounds( dest.offset, - Size::add(dest.offset, Size::mul(size, repeat)), + dest.offset + size * repeat, // `Size` operations defined.initial, ); return; @@ -716,10 +716,10 @@ impl Allocation { for i in 0..length { new_relocations.extend(relocations.iter().map(|&(offset, reloc)| { // compute offset for current repetition - let dest_offset = Size::add(dest.offset, Size::mul(size, i)); + let dest_offset = dest.offset + size * i; // `Size` operations ( // shift offsets from source allocation to destination allocation - Size::sub(Size::add(offset, dest_offset), src.offset), + (offset + dest_offset) - src.offset, // `Size` operations reloc, ) })); @@ -867,7 +867,7 @@ impl UndefMask { } let start = self.len; self.len += amount; - self.set_range_inbounds(start, Size::add(start, amount), new_state); + self.set_range_inbounds(start, start + amount, new_state); // `Size` operation } } diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index 0d0f4daa85ef7..c2baabf4233ce 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -1,7 +1,6 @@ use std::cell::Cell; use std::fmt::Write; use std::mem; -use std::ops::Add; use rustc::ich::StableHashingContext; use rustc::mir; @@ -454,7 +453,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // here. But this is where the add would go.) // Return the sum of sizes and max of aligns. - let size = Size::add(sized_size, unsized_size); + let size = sized_size + unsized_size; // `Size` addition // Choose max of two known alignments (combined value must // be aligned according to more restrictive of the two). diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 62bb4c8d55634..2bd6b05a005c6 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -9,7 +9,6 @@ use std::borrow::Cow; use std::collections::VecDeque; use std::convert::TryFrom; -use std::ops::{Add, Mul}; use std::ptr; use rustc::ty::layout::{Align, HasDataLayout, Size, TargetDataLayout}; @@ -880,7 +879,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { let src_bytes = self.get_raw(src.alloc_id)?.get_bytes_with_undef_and_ptr(&tcx, src, size)?.as_ptr(); let dest_bytes = - self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, Size::mul(size, length))?; + self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, size * length)?; // `Size` multiplication // If `dest_bytes` is empty we just optimize to not run anything for zsts. // See #67539 @@ -901,7 +900,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { // touched if the bytes stay undef for the whole interpreter execution. On contemporary // operating system this can avoid physically allocating the page. let dest_alloc = self.get_raw_mut(dest.alloc_id)?; - dest_alloc.mark_definedness(dest, Size::mul(size, length), false); + dest_alloc.mark_definedness(dest, size * length, false); // `Size` multiplication dest_alloc.mark_relocation_range(relocations); return Ok(()); } @@ -914,8 +913,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { unsafe { if src.alloc_id == dest.alloc_id { if nonoverlapping { - if (src.offset <= dest.offset && Size::add(src.offset, size) > dest.offset) - || (dest.offset <= src.offset && Size::add(dest.offset, size) > src.offset) + // `Size` additions + if (src.offset <= dest.offset && src.offset + size > dest.offset) + || (dest.offset <= src.offset && dest.offset + size > src.offset) { throw_ub_format!("copy_nonoverlapping called on overlapping ranges") } @@ -924,7 +924,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { for i in 0..length { ptr::copy( src_bytes, - dest_bytes.offset(isize::try_from(Size::mul(size, i).bytes()).unwrap()), + dest_bytes.offset(isize::try_from((size * i).bytes()).unwrap()), // `Size` multiplication usize::try_from(size.bytes()).unwrap(), ); } @@ -932,7 +932,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { for i in 0..length { ptr::copy_nonoverlapping( src_bytes, - dest_bytes.offset(isize::try_from(Size::mul(size, i).bytes()).unwrap()), + dest_bytes.offset(isize::try_from((size * i).bytes()).unwrap()), // `Size` multiplication usize::try_from(size.bytes()).unwrap(), ); } diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index ef34717255581..5cf267c257d84 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -4,7 +4,6 @@ use std::convert::TryFrom; use std::hash::Hash; -use std::ops::Mul; use rustc::mir; use rustc::mir::interpret::truncate; @@ -444,7 +443,7 @@ where // This can only be reached in ConstProp and non-rustc-MIR. throw_ub!(BoundsCheckFailed { len, index }); } - let offset = Size::mul(stride, index); + let offset = stride * index; // `Size` multiplication // All fields have the same layout. let field_layout = base.layout.field(self, 0)?; @@ -469,7 +468,8 @@ where }; let layout = base.layout.field(self, 0)?; let dl = &self.tcx.data_layout; - Ok((0..len).map(move |i| base.offset(Size::mul(stride, i), MemPlaceMeta::None, layout, dl))) + // `Size` multiplication + Ok((0..len).map(move |i| base.offset(stride * i, MemPlaceMeta::None, layout, dl))) } fn mplace_subslice( @@ -493,7 +493,7 @@ where // Not using layout method because that works with usize, and does not work with slices // (that have count 0 in their layout). let from_offset = match base.layout.fields { - layout::FieldPlacement::Array { stride, .. } => Size::mul(stride, from), // `Size` multiplication is checked + layout::FieldPlacement::Array { stride, .. } => stride * from, // `Size` multiplication is checked _ => bug!("Unexpected layout of index access: {:#?}", base.layout), }; diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs index fa8d67029dfcc..1e63766b85d42 100644 --- a/src/librustc_mir/interpret/traits.rs +++ b/src/librustc_mir/interpret/traits.rs @@ -1,5 +1,4 @@ use std::convert::TryFrom; -use std::ops::Mul; use rustc::mir::interpret::{InterpResult, Pointer, PointerArithmetic, Scalar}; use rustc::ty::layout::{Align, HasDataLayout, LayoutOf, Size}; @@ -57,7 +56,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // `get_vtable` in `rust_codegen_llvm/meth.rs`. // ///////////////////////////////////////////////////////////////////////////////////////// let vtable = self.memory.allocate( - Size::mul(ptr_size, u64::try_from(methods.len()).unwrap().checked_add(3).unwrap()), + ptr_size * u64::try_from(methods.len()).unwrap().checked_add(3).unwrap(), ptr_align, MemoryKind::Vtable, ); @@ -110,8 +109,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> { let ptr_size = self.pointer_size(); // Skip over the 'drop_ptr', 'size', and 'align' fields. - let vtable_slot = - vtable.ptr_offset(Size::mul(ptr_size, idx.checked_add(3).unwrap()), self)?; + let vtable_slot = vtable.ptr_offset(ptr_size * idx.checked_add(3).unwrap(), self)?; let vtable_slot = self .memory .check_ptr_access(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)? diff --git a/src/librustc_mir/interpret/validity.rs b/src/librustc_mir/interpret/validity.rs index 164478362bf56..a355a22748010 100644 --- a/src/librustc_mir/interpret/validity.rs +++ b/src/librustc_mir/interpret/validity.rs @@ -6,10 +6,10 @@ use std::convert::TryFrom; use std::fmt::Write; -use std::ops::{Mul, RangeInclusive}; +use std::ops::RangeInclusive; use rustc::ty; -use rustc::ty::layout::{self, LayoutOf, Size, TyLayout, VariantIdx}; +use rustc::ty::layout::{self, LayoutOf, TyLayout, VariantIdx}; use rustc_data_structures::fx::FxHashSet; use rustc_hir as hir; use rustc_span::symbol::{sym, Symbol}; @@ -747,8 +747,8 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> } // This is the element type size. let layout = self.ecx.layout_of(tys)?; - // This is the size in bytes of the whole array. - let size = Size::mul(layout.size, len); + // This is the size in bytes of the whole array. (This checks for overflow.) + let size = layout.size * len; // Size is not 0, get a pointer. let ptr = self.ecx.force_ptr(mplace.ptr)?; From 7400955e941a3958b1560f2cb0b7648535d2f9d0 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 24 Mar 2020 17:13:26 +0100 Subject: [PATCH 24/26] add usize methods for Size getters --- src/librustc/mir/interpret/allocation.rs | 8 ++++---- src/librustc_mir/interpret/memory.rs | 13 ++++++------- src/librustc_target/abi/mod.rs | 10 ++++++++++ 3 files changed, 20 insertions(+), 11 deletions(-) diff --git a/src/librustc/mir/interpret/allocation.rs b/src/librustc/mir/interpret/allocation.rs index 4791c2fed9981..26b9e1be2f5d4 100644 --- a/src/librustc/mir/interpret/allocation.rs +++ b/src/librustc/mir/interpret/allocation.rs @@ -110,7 +110,7 @@ impl Allocation { pub fn undef(size: Size, align: Align) -> Self { Allocation { - bytes: vec![0; usize::try_from(size.bytes()).unwrap()], + bytes: vec![0; size.bytes_usize()], relocations: Relocations::new(), undef_mask: UndefMask::new(size, false), size, @@ -153,7 +153,7 @@ impl Allocation<(), ()> { /// Raw accessors. Provide access to otherwise private bytes. impl Allocation { pub fn len(&self) -> usize { - usize::try_from(self.size.bytes()).unwrap() + self.size.bytes_usize() } /// Looks at a slice which may describe undefined bytes or describe a relocation. This differs @@ -192,7 +192,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra> Allocation { size.bytes(), self.len() ); - usize::try_from(offset.bytes()).unwrap()..end + offset.bytes_usize()..end } /// The last argument controls whether we error out when there are undefined @@ -290,7 +290,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra> Allocation { cx: &impl HasDataLayout, ptr: Pointer, ) -> InterpResult<'tcx, &[u8]> { - let offset = usize::try_from(ptr.offset.bytes()).unwrap(); + let offset = ptr.offset.bytes_usize(); Ok(match self.bytes[offset..].iter().position(|&c| c == 0) { Some(size) => { let size_with_null = Size::from_bytes(size) + Size::from_bytes(1); diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 2bd6b05a005c6..49b9018fd1725 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -668,7 +668,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { } if alloc.undef_mask().is_range_defined(i, i + Size::from_bytes(1)).is_ok() { // this `as usize` is fine, since `i` came from a `usize` - let i = usize::try_from(i.bytes()).unwrap(); + let i = i.bytes_usize(); // Checked definedness (and thus range) and relocations. This access also doesn't // influence interpreter execution but is only for debugging. @@ -693,8 +693,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { let mut pos = Size::ZERO; let relocation_width = (self.pointer_size().bytes() - 1) * 3; for (i, target_id) in relocations { - // this `as usize` is fine, since we can't print more chars than `usize::MAX` - write!(msg, "{:1$}", "", ((i - pos) * 3).bytes() as usize).unwrap(); + write!(msg, "{:1$}", "", ((i - pos) * 3).bytes_usize()).unwrap(); let target = format!("({})", target_id); // this `as usize` is fine, since we can't print more chars than `usize::MAX` write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap(); @@ -924,16 +923,16 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { for i in 0..length { ptr::copy( src_bytes, - dest_bytes.offset(isize::try_from((size * i).bytes()).unwrap()), // `Size` multiplication - usize::try_from(size.bytes()).unwrap(), + dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication + size.bytes_usize(), ); } } else { for i in 0..length { ptr::copy_nonoverlapping( src_bytes, - dest_bytes.offset(isize::try_from((size * i).bytes()).unwrap()), // `Size` multiplication - usize::try_from(size.bytes()).unwrap(), + dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication + size.bytes_usize(), ); } } diff --git a/src/librustc_target/abi/mod.rs b/src/librustc_target/abi/mod.rs index ffd6c8da1dc2f..74d9817d2779b 100644 --- a/src/librustc_target/abi/mod.rs +++ b/src/librustc_target/abi/mod.rs @@ -260,6 +260,11 @@ impl Size { self.raw } + #[inline] + pub fn bytes_usize(self) -> usize { + self.bytes().try_into().unwrap() + } + #[inline] pub fn bits(self) -> u64 { self.bytes().checked_mul(8).unwrap_or_else(|| { @@ -267,6 +272,11 @@ impl Size { }) } + #[inline] + pub fn bits_usize(self) -> usize { + self.bits().try_into().unwrap() + } + #[inline] pub fn align_to(self, align: Align) -> Size { let mask = align.bytes() - 1; From f8e3da5ea22268dd9f7ff61c133aca3e8c64206f Mon Sep 17 00:00:00 2001 From: Bastian Kauschke Date: Wed, 25 Mar 2020 16:07:36 +0100 Subject: [PATCH 25/26] run test only on 64bit --- src/test/mir-opt/inline/inline-into-box-place.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/test/mir-opt/inline/inline-into-box-place.rs b/src/test/mir-opt/inline/inline-into-box-place.rs index 500238de4c5ab..fcb7b4c4fe69d 100644 --- a/src/test/mir-opt/inline/inline-into-box-place.rs +++ b/src/test/mir-opt/inline/inline-into-box-place.rs @@ -1,6 +1,7 @@ // ignore-tidy-linelength // ignore-wasm32-bare compiled with panic=abort by default // compile-flags: -Z mir-opt-level=3 +// only-64bit FIXME: the mir representation of RawVec depends on ptr size #![feature(box_syntax)] fn main() { From 4f429c074b865e2ae7a4dc52eb6f2e5db9244e48 Mon Sep 17 00:00:00 2001 From: Josh Stone Date: Wed, 25 Mar 2020 11:09:00 -0700 Subject: [PATCH 26/26] impl TrustedRandomAccess for Fuse without FusedIterator --- src/libcore/iter/adapters/fuse.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/libcore/iter/adapters/fuse.rs b/src/libcore/iter/adapters/fuse.rs index f5fd075662209..a60ca64ec87c8 100644 --- a/src/libcore/iter/adapters/fuse.rs +++ b/src/libcore/iter/adapters/fuse.rs @@ -326,10 +326,14 @@ where unsafe impl TrustedRandomAccess for Fuse where - I: TrustedRandomAccess + FusedIterator, + I: TrustedRandomAccess, { unsafe fn get_unchecked(&mut self, i: usize) -> I::Item { - self.as_inner_mut().get_unchecked(i) + match self.iter { + Some(ref mut iter) => iter.get_unchecked(i), + // SAFETY: the caller asserts there is an item at `i`, so we're not exhausted. + None => intrinsics::unreachable(), + } } fn may_have_side_effect() -> bool {