diff --git a/configure b/configure index 086dd9f74e080..34132b7e5f535 100755 --- a/configure +++ b/configure @@ -607,6 +607,7 @@ opt dist-host-only 0 "only install bins for the host architecture" opt inject-std-version 1 "inject the current compiler version of libstd into programs" opt llvm-version-check 1 "check if the LLVM version is supported, build anyway" opt rustbuild 0 "use the rust and cargo based build system" +opt orbit 0 "get MIR where it belongs - everywhere; most importantly, in orbit" # Optimization and debugging options. These may be overridden by the release channel, etc. opt_nosave optimize 1 "build optimized rust code" @@ -713,6 +714,8 @@ if [ -n "$CFG_ENABLE_DEBUG_ASSERTIONS" ]; then putvar CFG_ENABLE_DEBUG_ASSERTION if [ -n "$CFG_ENABLE_DEBUGINFO" ]; then putvar CFG_ENABLE_DEBUGINFO; fi if [ -n "$CFG_ENABLE_DEBUG_JEMALLOC" ]; then putvar CFG_ENABLE_DEBUG_JEMALLOC; fi +if [ -n "$CFG_ENABLE_ORBIT" ]; then putvar CFG_ENABLE_ORBIT; fi + # A magic value that allows the compiler to use unstable features # during the bootstrap even when doing so would normally be an error # because of feature staging or because the build turns on diff --git a/mk/main.mk b/mk/main.mk index 887247b80c4cd..9df04a6d43eac 100644 --- a/mk/main.mk +++ b/mk/main.mk @@ -134,6 +134,11 @@ ifdef CFG_ENABLE_DEBUGINFO CFG_RUSTC_FLAGS += -g endif +ifdef CFG_ENABLE_ORBIT + $(info cfg: launching MIR (CFG_ENABLE_ORBIT)) + CFG_RUSTC_FLAGS += -Z orbit +endif + ifdef SAVE_TEMPS CFG_RUSTC_FLAGS += --save-temps endif diff --git a/src/compiletest/header.rs b/src/compiletest/header.rs index cf4d545a827c1..ef93fcfa013f8 100644 --- a/src/compiletest/header.rs +++ b/src/compiletest/header.rs @@ -31,6 +31,8 @@ pub struct TestProps { pub pp_exact: Option, // Modules from aux directory that should be compiled pub aux_builds: Vec , + // Environment settings to use for compiling + pub rustc_env: Vec<(String,String)> , // Environment settings to use during execution pub exec_env: Vec<(String,String)> , // Lines to check if they appear in the expected debugger output @@ -77,6 +79,7 @@ pub fn load_props(testfile: &Path) -> TestProps { pp_exact: pp_exact, aux_builds: aux_builds, revisions: vec![], + rustc_env: vec![], exec_env: exec_env, check_lines: check_lines, build_aux_docs: build_aux_docs, @@ -153,10 +156,14 @@ pub fn load_props_into(props: &mut TestProps, testfile: &Path, cfg: Option<&str> props.aux_builds.push(ab); } - if let Some(ee) = parse_exec_env(ln) { + if let Some(ee) = parse_env(ln, "exec-env") { props.exec_env.push(ee); } + if let Some(ee) = parse_env(ln, "rustc-env") { + props.rustc_env.push(ee); + } + if let Some(cl) = parse_check_line(ln) { props.check_lines.push(cl); } @@ -372,8 +379,8 @@ fn parse_pretty_compare_only(line: &str) -> bool { parse_name_directive(line, "pretty-compare-only") } -fn parse_exec_env(line: &str) -> Option<(String, String)> { - parse_name_value_directive(line, "exec-env").map(|nv| { +fn parse_env(line: &str, name: &str) -> Option<(String, String)> { + parse_name_value_directive(line, name).map(|nv| { // nv is either FOO or FOO=BAR let mut strs: Vec = nv .splitn(2, '=') diff --git a/src/compiletest/runtest.rs b/src/compiletest/runtest.rs index be011107c5030..e3ced9eff3ebb 100644 --- a/src/compiletest/runtest.rs +++ b/src/compiletest/runtest.rs @@ -863,12 +863,28 @@ fn cleanup_debug_info_options(options: &Option) -> Option { "-g".to_owned(), "--debuginfo".to_owned() ]; - let new_options = + let mut new_options = split_maybe_args(options).into_iter() .filter(|x| !options_to_remove.contains(x)) - .collect::>() - .join(" "); - Some(new_options) + .collect::>(); + + let mut i = 0; + while i + 1 < new_options.len() { + if new_options[i] == "-Z" { + // FIXME #31005 MIR missing debuginfo currently. + if new_options[i + 1] == "orbit" { + // Remove "-Z" and "orbit". + new_options.remove(i); + new_options.remove(i); + continue; + } + // Always skip over -Z's argument. + i += 1; + } + i += 1; + } + + Some(new_options.join(" ")) } fn check_debugger_output(debugger_run_result: &ProcRes, check_lines: &[String]) { @@ -1386,7 +1402,7 @@ fn compose_and_run_compiler(config: &Config, props: &TestProps, compose_and_run(config, testpaths, args, - Vec::new(), + props.rustc_env.clone(), &config.compile_lib_path, Some(aux_dir.to_str().unwrap()), input) diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs index f76b8655ad1ed..13d3f42ba1896 100644 --- a/src/libcore/lib.rs +++ b/src/libcore/lib.rs @@ -72,6 +72,7 @@ #![feature(reflect)] #![feature(unwind_attributes)] #![feature(repr_simd, platform_intrinsics)] +#![feature(rustc_attrs)] #![feature(staged_api)] #![feature(unboxed_closures)] diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs index 5a2a043d6f1b8..e6f83498ab1e2 100644 --- a/src/libcore/num/mod.rs +++ b/src/libcore/num/mod.rs @@ -1008,6 +1008,7 @@ macro_rules! int_impl { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] + #[cfg_attr(not(stage0), rustc_no_mir)] // FIXME #29769 MIR overflow checking is TBD. pub fn pow(self, mut exp: u32) -> Self { let mut base = self; let mut acc = Self::one(); @@ -1049,6 +1050,7 @@ macro_rules! int_impl { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] + #[cfg_attr(not(stage0), rustc_no_mir)] // FIXME #29769 MIR overflow checking is TBD. pub fn abs(self) -> Self { if self.is_negative() { // Note that the #[inline] above means that the overflow @@ -2013,6 +2015,7 @@ macro_rules! uint_impl { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] + #[cfg_attr(not(stage0), rustc_no_mir)] // FIXME #29769 MIR overflow checking is TBD. pub fn pow(self, mut exp: u32) -> Self { let mut base = self; let mut acc = Self::one(); diff --git a/src/librustc/front/map/mod.rs b/src/librustc/front/map/mod.rs index 44f588c2e9ca0..dfc8560b58de0 100644 --- a/src/librustc/front/map/mod.rs +++ b/src/librustc/front/map/mod.rs @@ -22,6 +22,7 @@ use middle::def_id::DefId; use syntax::abi::Abi; use syntax::ast::{self, Name, NodeId, DUMMY_NODE_ID}; +use syntax::attr::ThinAttributesExt; use syntax::codemap::{Span, Spanned}; use syntax::parse::token; @@ -718,6 +719,8 @@ impl<'ast> Map<'ast> { Some(NodeTraitItem(ref ti)) => Some(&ti.attrs[..]), Some(NodeImplItem(ref ii)) => Some(&ii.attrs[..]), Some(NodeVariant(ref v)) => Some(&v.node.attrs[..]), + Some(NodeExpr(ref e)) => Some(e.attrs.as_attr_slice()), + Some(NodeStmt(ref s)) => Some(s.node.attrs()), // unit/tuple structs take the attributes straight from // the struct definition. Some(NodeStructCtor(_)) => { diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index 3988545c201e0..d2ca1cd3f9339 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -73,7 +73,6 @@ mod macros; pub mod diagnostics; pub mod back { - pub use rustc_back::abi; pub use rustc_back::rpath; pub use rustc_back::svh; } diff --git a/src/librustc/middle/cfg/construct.rs b/src/librustc/middle/cfg/construct.rs index 1fb27261c4dbf..dcfa8d1e36a21 100644 --- a/src/librustc/middle/cfg/construct.rs +++ b/src/librustc/middle/cfg/construct.rs @@ -354,19 +354,10 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { self.straightline(expr, pred, Some(&**e).into_iter()) } - hir::ExprInlineAsm(ref inline_asm) => { - let inputs = inline_asm.inputs.iter(); - let outputs = inline_asm.outputs.iter(); - let post_inputs = self.exprs(inputs.map(|a| { - debug!("cfg::construct InlineAsm id:{} input:{:?}", expr.id, a); - let &(_, ref expr) = a; - &**expr - }), pred); - let post_outputs = self.exprs(outputs.map(|a| { - debug!("cfg::construct InlineAsm id:{} output:{:?}", expr.id, a); - &*a.expr - }), post_inputs); - self.add_ast_node(expr.id, &[post_outputs]) + hir::ExprInlineAsm(_, ref outputs, ref inputs) => { + let post_outputs = self.exprs(outputs.iter().map(|e| &**e), pred); + let post_inputs = self.exprs(inputs.iter().map(|e| &**e), post_outputs); + self.add_ast_node(expr.id, &[post_inputs]) } hir::ExprClosure(..) | diff --git a/src/librustc/middle/check_match.rs b/src/librustc/middle/check_match.rs index 89b57e0d90a00..16e0a334440ff 100644 --- a/src/librustc/middle/check_match.rs +++ b/src/librustc/middle/check_match.rs @@ -475,9 +475,9 @@ impl<'a, 'tcx> Folder for StaticInliner<'a, 'tcx> { let def = self.tcx.def_map.borrow().get(&pat.id).map(|d| d.full_def()); match def { Some(Def::AssociatedConst(did)) | - Some(Def::Const(did)) => match lookup_const_by_id(self.tcx, did, - Some(pat.id), None) { - Some((const_expr, _const_ty)) => { + Some(Def::Const(did)) => { + let substs = Some(self.tcx.node_id_item_substs(pat.id).substs); + if let Some((const_expr, _)) = lookup_const_by_id(self.tcx, did, substs) { const_expr_to_pat(self.tcx, const_expr, pat.span).map(|new_pat| { if let Some(ref mut renaming_map) = self.renaming_map { @@ -487,14 +487,13 @@ impl<'a, 'tcx> Folder for StaticInliner<'a, 'tcx> { new_pat }) - } - None => { + } else { self.failed = true; span_err!(self.tcx.sess, pat.span, E0158, "statics cannot be referenced in patterns"); pat } - }, + } _ => noop_fold_pat(pat, self) } } diff --git a/src/librustc/middle/const_eval.rs b/src/librustc/middle/const_eval.rs index 5d4226fe4ceaf..47b6c49fddb6d 100644 --- a/src/librustc/middle/const_eval.rs +++ b/src/librustc/middle/const_eval.rs @@ -19,7 +19,6 @@ use front::map::blocks::FnLikeNode; use middle::cstore::{self, CrateStore, InlinedItem}; use middle::{infer, subst, traits}; use middle::def::Def; -use middle::subst::Subst; use middle::def_id::DefId; use middle::pat_util::def_to_path; use middle::ty::{self, Ty, TyCtxt}; @@ -89,16 +88,13 @@ fn lookup_variant_by_id<'a>(tcx: &'a ty::TyCtxt, } /// * `def_id` is the id of the constant. -/// * `maybe_ref_id` is the id of the expr referencing the constant. -/// * `param_substs` is the monomorphization substitution for the expression. +/// * `substs` is the monomorphized substitutions for the expression. /// -/// `maybe_ref_id` and `param_substs` are optional and are used for -/// finding substitutions in associated constants. This generally -/// happens in late/trans const evaluation. +/// `substs` is optional and is used for associated constants. +/// This generally happens in late/trans const evaluation. pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: &'a TyCtxt<'tcx>, def_id: DefId, - maybe_ref_id: Option, - param_substs: Option<&'tcx subst::Substs<'tcx>>) + substs: Option>) -> Option<(&'tcx Expr, Option>)> { if let Some(node_id) = tcx.map.as_local_node_id(def_id) { match tcx.map.find(node_id) { @@ -111,28 +107,20 @@ pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: &'a TyCtxt<'tcx>, }, Some(ast_map::NodeTraitItem(ti)) => match ti.node { hir::ConstTraitItem(_, _) => { - match maybe_ref_id { - // If we have a trait item, and we know the expression - // that's the source of the obligation to resolve it, + if let Some(substs) = substs { + // If we have a trait item and the substitutions for it, // `resolve_trait_associated_const` will select an impl // or the default. - Some(ref_id) => { - let trait_id = tcx.trait_of_item(def_id) - .unwrap(); - let mut substs = tcx.node_id_item_substs(ref_id) - .substs; - if let Some(param_substs) = param_substs { - substs = substs.subst(tcx, param_substs); - } - resolve_trait_associated_const(tcx, ti, trait_id, substs) - } + let trait_id = tcx.trait_of_item(def_id).unwrap(); + resolve_trait_associated_const(tcx, ti, trait_id, substs) + } else { // Technically, without knowing anything about the // expression that generates the obligation, we could // still return the default if there is one. However, // it's safer to return `None` than to return some value // that may differ from what you would get from // correctly selecting an impl. - None => None + None } } _ => None @@ -153,7 +141,7 @@ pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: &'a TyCtxt<'tcx>, } None => {} } - let mut used_ref_id = false; + let mut used_substs = false; let expr_ty = match tcx.sess.cstore.maybe_get_item_ast(tcx, def_id) { cstore::FoundAst::Found(&InlinedItem::Item(ref item)) => match item.node { hir::ItemConst(ref ty, ref const_expr) => { @@ -163,21 +151,15 @@ pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: &'a TyCtxt<'tcx>, }, cstore::FoundAst::Found(&InlinedItem::TraitItem(trait_id, ref ti)) => match ti.node { hir::ConstTraitItem(_, _) => { - used_ref_id = true; - match maybe_ref_id { + used_substs = true; + if let Some(substs) = substs { // As mentioned in the comments above for in-crate // constants, we only try to find the expression for // a trait-associated const if the caller gives us - // the expression that refers to it. - Some(ref_id) => { - let mut substs = tcx.node_id_item_substs(ref_id) - .substs; - if let Some(param_substs) = param_substs { - substs = substs.subst(tcx, param_substs); - } - resolve_trait_associated_const(tcx, ti, trait_id, substs) - } - None => None + // the substitutions for the reference to it. + resolve_trait_associated_const(tcx, ti, trait_id, substs) + } else { + None } } _ => None @@ -190,10 +172,10 @@ pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: &'a TyCtxt<'tcx>, }, _ => None }; - // If we used the reference expression, particularly to choose an impl + // If we used the substitutions, particularly to choose an impl // of a trait-associated const, don't cache that, because the next // lookup with the same def_id may yield a different result. - if !used_ref_id { + if !used_substs { tcx.extern_const_statics .borrow_mut() .insert(def_id, expr_ty.map(|(e, t)| (e.id, t))); @@ -389,7 +371,8 @@ pub fn const_expr_to_pat(tcx: &TyCtxt, expr: &Expr, span: Span) -> P { PatKind::Path(path.clone()), Some(Def::Const(def_id)) | Some(Def::AssociatedConst(def_id)) => { - let (expr, _ty) = lookup_const_by_id(tcx, def_id, Some(expr.id), None).unwrap(); + let substs = Some(tcx.node_id_item_substs(expr.id).substs); + let (expr, _ty) = lookup_const_by_id(tcx, def_id, substs).unwrap(); return const_expr_to_pat(tcx, expr, span); }, _ => unreachable!(), @@ -788,12 +771,12 @@ pub fn eval_const_expr_partial<'tcx>(tcx: &TyCtxt<'tcx>, match opt_def { Def::Const(def_id) | Def::AssociatedConst(def_id) => { - let maybe_ref_id = if let ExprTypeChecked = ty_hint { - Some(e.id) + let substs = if let ExprTypeChecked = ty_hint { + Some(tcx.node_id_item_substs(e.id).substs) } else { None }; - if let Some((e, ty)) = lookup_const_by_id(tcx, def_id, maybe_ref_id, None) { + if let Some((e, ty)) = lookup_const_by_id(tcx, def_id, substs) { let item_hint = match ty { Some(ty) => ty_hint.checked_or(ty), None => ty_hint, @@ -1077,7 +1060,7 @@ fn resolve_trait_associated_const<'a, 'tcx: 'a>(tcx: &'a TyCtxt<'tcx>, traits::VtableImpl(ref impl_data) => { match tcx.associated_consts(impl_data.impl_def_id) .iter().find(|ic| ic.name == ti.name) { - Some(ic) => lookup_const_by_id(tcx, ic.def_id, None, None), + Some(ic) => lookup_const_by_id(tcx, ic.def_id, None), None => match ti.node { hir::ConstTraitItem(ref ty, Some(ref expr)) => { Some((&*expr, ast_ty_to_prim_ty(tcx, ty))) diff --git a/src/librustc/middle/cstore.rs b/src/librustc/middle/cstore.rs index 6a4680ecbaf31..f85d87413843e 100644 --- a/src/librustc/middle/cstore.rs +++ b/src/librustc/middle/cstore.rs @@ -122,7 +122,7 @@ pub struct ChildItem { pub enum FoundAst<'ast> { Found(&'ast InlinedItem), - FoundParent(DefId, &'ast InlinedItem), + FoundParent(DefId, &'ast hir::Item), NotFound, } @@ -182,7 +182,7 @@ pub trait CrateStore<'tcx> : Any { fn trait_of_item(&self, tcx: &TyCtxt<'tcx>, def_id: DefId) -> Option; fn impl_or_trait_item(&self, tcx: &TyCtxt<'tcx>, def: DefId) - -> ty::ImplOrTraitItem<'tcx>; + -> Option>; // flags fn is_const_fn(&self, did: DefId) -> bool; @@ -353,7 +353,7 @@ impl<'tcx> CrateStore<'tcx> for DummyCrateStore { fn trait_of_item(&self, tcx: &TyCtxt<'tcx>, def_id: DefId) -> Option { unimplemented!() } fn impl_or_trait_item(&self, tcx: &TyCtxt<'tcx>, def: DefId) - -> ty::ImplOrTraitItem<'tcx> { unimplemented!() } + -> Option> { unimplemented!() } // flags fn is_const_fn(&self, did: DefId) -> bool { unimplemented!() } diff --git a/src/librustc/middle/expr_use_visitor.rs b/src/librustc/middle/expr_use_visitor.rs index a87ce1206b408..91ffef2aa599d 100644 --- a/src/librustc/middle/expr_use_visitor.rs +++ b/src/librustc/middle/expr_use_visitor.rs @@ -449,23 +449,20 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { } } - hir::ExprInlineAsm(ref ia) => { - for &(_, ref input) in &ia.inputs { - self.consume_expr(&input); - } - - for output in &ia.outputs { - if output.is_indirect { - self.consume_expr(&output.expr); + hir::ExprInlineAsm(ref ia, ref outputs, ref inputs) => { + for (o, output) in ia.outputs.iter().zip(outputs) { + if o.is_indirect { + self.consume_expr(output); } else { - self.mutate_expr(expr, &output.expr, - if output.is_rw { + self.mutate_expr(expr, output, + if o.is_rw { MutateMode::WriteAndRead } else { MutateMode::JustWrite }); } } + self.consume_exprs(inputs); } hir::ExprBreak(..) | diff --git a/src/librustc/middle/liveness.rs b/src/librustc/middle/liveness.rs index 0bfb830efc1e7..d78e0ca79aa82 100644 --- a/src/librustc/middle/liveness.rs +++ b/src/librustc/middle/liveness.rs @@ -1170,25 +1170,21 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { self.propagate_through_expr(&e, succ) } - hir::ExprInlineAsm(ref ia) => { - - let succ = ia.outputs.iter().rev().fold(succ, - |succ, out| { - // see comment on lvalues - // in propagate_through_lvalue_components() - if out.is_indirect { - self.propagate_through_expr(&out.expr, succ) - } else { - let acc = if out.is_rw { ACC_WRITE|ACC_READ } else { ACC_WRITE }; - let succ = self.write_lvalue(&out.expr, succ, acc); - self.propagate_through_lvalue_components(&out.expr, succ) - } + hir::ExprInlineAsm(ref ia, ref outputs, ref inputs) => { + let succ = ia.outputs.iter().zip(outputs).rev().fold(succ, |succ, (o, output)| { + // see comment on lvalues + // in propagate_through_lvalue_components() + if o.is_indirect { + self.propagate_through_expr(output, succ) + } else { + let acc = if o.is_rw { ACC_WRITE|ACC_READ } else { ACC_WRITE }; + let succ = self.write_lvalue(output, succ, acc); + self.propagate_through_lvalue_components(output, succ) } - ); + }); + // Inputs are executed first. Propagate last because of rev order - ia.inputs.iter().rev().fold(succ, |succ, &(_, ref expr)| { - self.propagate_through_expr(&expr, succ) - }) + self.propagate_through_exprs(inputs, succ) } hir::ExprLit(..) => { @@ -1425,17 +1421,17 @@ fn check_expr(this: &mut Liveness, expr: &Expr) { intravisit::walk_expr(this, expr); } - hir::ExprInlineAsm(ref ia) => { - for &(_, ref input) in &ia.inputs { - this.visit_expr(&input); + hir::ExprInlineAsm(ref ia, ref outputs, ref inputs) => { + for input in inputs { + this.visit_expr(input); } // Output operands must be lvalues - for out in &ia.outputs { - if !out.is_indirect { - this.check_lvalue(&out.expr); + for (o, output) in ia.outputs.iter().zip(outputs) { + if !o.is_indirect { + this.check_lvalue(output); } - this.visit_expr(&out.expr); + this.visit_expr(output); } intravisit::walk_expr(this, expr); diff --git a/src/librustc/middle/ty/mod.rs b/src/librustc/middle/ty/mod.rs index 081196835936c..6344553e88bbf 100644 --- a/src/librustc/middle/ty/mod.rs +++ b/src/librustc/middle/ty/mod.rs @@ -2182,7 +2182,8 @@ impl<'tcx> TyCtxt<'tcx> { pub fn impl_or_trait_item(&self, id: DefId) -> ImplOrTraitItem<'tcx> { lookup_locally_or_in_crate_store( "impl_or_trait_items", id, &self.impl_or_trait_items, - || self.sess.cstore.impl_or_trait_item(self, id)) + || self.sess.cstore.impl_or_trait_item(self, id) + .expect("missing ImplOrTraitItem in metadata")) } pub fn trait_item_def_ids(&self, id: DefId) -> Rc> { @@ -2502,10 +2503,12 @@ impl<'tcx> TyCtxt<'tcx> { /// ID of the impl that the method belongs to. Otherwise, return `None`. pub fn impl_of_method(&self, def_id: DefId) -> Option { if def_id.krate != LOCAL_CRATE { - return match self.sess.cstore.impl_or_trait_item(self, def_id).container() { - TraitContainer(_) => None, - ImplContainer(def_id) => Some(def_id), - }; + return self.sess.cstore.impl_or_trait_item(self, def_id).and_then(|item| { + match item.container() { + TraitContainer(_) => None, + ImplContainer(def_id) => Some(def_id), + } + }); } match self.impl_or_trait_items.borrow().get(&def_id).cloned() { Some(trait_item) => { diff --git a/src/librustc/middle/ty/sty.rs b/src/librustc/middle/ty/sty.rs index bbc5948f2cac7..2d4d4e51ba73f 100644 --- a/src/librustc/middle/ty/sty.rs +++ b/src/librustc/middle/ty/sty.rs @@ -948,7 +948,7 @@ impl<'tcx> TyS<'tcx> { } } - fn is_slice(&self) -> bool { + pub fn is_slice(&self) -> bool { match self.sty { TyRawPtr(mt) | TyRef(_, mt) => match mt.ty.sty { TySlice(_) | TyStr => true, diff --git a/src/librustc/mir/repr.rs b/src/librustc/mir/repr.rs index 11bb381ec33bb..06d68af883899 100644 --- a/src/librustc/mir/repr.rs +++ b/src/librustc/mir/repr.rs @@ -14,6 +14,7 @@ use rustc_const_eval::{ConstUsize, ConstInt}; use middle::def_id::DefId; use middle::subst::Substs; use middle::ty::{self, AdtDef, ClosureSubsts, FnOutput, Region, Ty}; +use util::ppaux; use rustc_back::slice; use rustc_front::hir::InlineAsm; use std::ascii; @@ -177,6 +178,10 @@ pub struct TempDecl<'tcx> { #[derive(Clone, Debug, RustcEncodable, RustcDecodable)] pub struct ArgDecl<'tcx> { pub ty: Ty<'tcx>, + + /// If true, this argument is a tuple after monomorphization, + /// and has to be collected from multiple actual arguments. + pub spread: bool } /////////////////////////////////////////////////////////////////////////// @@ -675,7 +680,11 @@ pub enum Rvalue<'tcx> { from_end: usize, }, - InlineAsm(InlineAsm), + InlineAsm { + asm: InlineAsm, + outputs: Vec>, + inputs: Vec> + } } #[derive(Clone, Copy, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] @@ -760,7 +769,9 @@ impl<'tcx> Debug for Rvalue<'tcx> { BinaryOp(ref op, ref a, ref b) => write!(fmt, "{:?}({:?}, {:?})", op, a, b), UnaryOp(ref op, ref a) => write!(fmt, "{:?}({:?})", op, a), Box(ref t) => write!(fmt, "Box({:?})", t), - InlineAsm(ref asm) => write!(fmt, "InlineAsm({:?})", asm), + InlineAsm { ref asm, ref outputs, ref inputs } => { + write!(fmt, "asm!({:?} : {:?} : {:?})", asm, outputs, inputs) + } Slice { ref input, from_start, from_end } => write!(fmt, "{:?}[{:?}..-{:?}]", input, from_start, from_end), @@ -775,8 +786,8 @@ impl<'tcx> Debug for Rvalue<'tcx> { Aggregate(ref kind, ref lvs) => { use self::AggregateKind::*; - fn fmt_tuple(fmt: &mut Formatter, name: &str, lvs: &[Operand]) -> fmt::Result { - let mut tuple_fmt = fmt.debug_tuple(name); + fn fmt_tuple(fmt: &mut Formatter, lvs: &[Operand]) -> fmt::Result { + let mut tuple_fmt = fmt.debug_tuple(""); for lv in lvs { tuple_fmt.field(lv); } @@ -790,19 +801,24 @@ impl<'tcx> Debug for Rvalue<'tcx> { match lvs.len() { 0 => write!(fmt, "()"), 1 => write!(fmt, "({:?},)", lvs[0]), - _ => fmt_tuple(fmt, "", lvs), + _ => fmt_tuple(fmt, lvs), } } - Adt(adt_def, variant, _) => { + Adt(adt_def, variant, substs) => { let variant_def = &adt_def.variants[variant]; - let name = ty::tls::with(|tcx| tcx.item_path_str(variant_def.did)); + + try!(ppaux::parameterized(fmt, substs, variant_def.did, + ppaux::Ns::Value, &[], + |tcx| { + tcx.lookup_item_type(variant_def.did).generics + })); match variant_def.kind() { - ty::VariantKind::Unit => write!(fmt, "{}", name), - ty::VariantKind::Tuple => fmt_tuple(fmt, &name, lvs), + ty::VariantKind::Unit => Ok(()), + ty::VariantKind::Tuple => fmt_tuple(fmt, lvs), ty::VariantKind::Struct => { - let mut struct_fmt = fmt.debug_struct(&name); + let mut struct_fmt = fmt.debug_struct(""); for (field, lv) in variant_def.fields.iter().zip(lvs) { struct_fmt.field(&field.name.as_str(), lv); } @@ -882,8 +898,10 @@ impl<'tcx> Debug for Literal<'tcx> { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { use self::Literal::*; match *self { - Item { def_id, .. } => - write!(fmt, "{}", item_path_str(def_id)), + Item { def_id, substs } => { + ppaux::parameterized(fmt, substs, def_id, ppaux::Ns::Value, &[], + |tcx| tcx.lookup_item_type(def_id).generics) + } Value { ref value } => { try!(write!(fmt, "const ")); fmt_const_val(fmt, value) diff --git a/src/librustc/mir/tcx.rs b/src/librustc/mir/tcx.rs index b6b2694a7cbe1..c8a2bc440ce1d 100644 --- a/src/librustc/mir/tcx.rs +++ b/src/librustc/mir/tcx.rs @@ -16,6 +16,7 @@ use mir::repr::*; use middle::subst::{Subst, Substs}; use middle::ty::{self, AdtDef, Ty, TyCtxt}; +use middle::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; use rustc_front::hir; #[derive(Copy, Clone, Debug)] @@ -77,6 +78,29 @@ impl<'tcx> LvalueTy<'tcx> { } } +impl<'tcx> TypeFoldable<'tcx> for LvalueTy<'tcx> { + fn super_fold_with>(&self, folder: &mut F) -> Self { + match *self { + LvalueTy::Ty { ty } => LvalueTy::Ty { ty: ty.fold_with(folder) }, + LvalueTy::Downcast { adt_def, substs, variant_index } => { + let substs = substs.fold_with(folder); + LvalueTy::Downcast { + adt_def: adt_def, + substs: folder.tcx().mk_substs(substs), + variant_index: variant_index + } + } + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + match *self { + LvalueTy::Ty { ty } => ty.visit_with(visitor), + LvalueTy::Downcast { substs, .. } => substs.visit_with(visitor) + } + } +} + impl<'tcx> Mir<'tcx> { pub fn operand_ty(&self, tcx: &TyCtxt<'tcx>, @@ -196,7 +220,7 @@ impl<'tcx> Mir<'tcx> { } } Rvalue::Slice { .. } => None, - Rvalue::InlineAsm(..) => None + Rvalue::InlineAsm { .. } => None } } } diff --git a/src/librustc/mir/visit.rs b/src/librustc/mir/visit.rs index 5e3c6e028a325..36d45f0a51ec2 100644 --- a/src/librustc/mir/visit.rs +++ b/src/librustc/mir/visit.rs @@ -261,7 +261,14 @@ macro_rules! make_mir_visitor { }); } - Rvalue::InlineAsm(_) => { + Rvalue::InlineAsm { ref $($mutability)* outputs, + ref $($mutability)* inputs, .. } => { + for output in & $($mutability)* outputs[..] { + self.visit_lvalue(output, LvalueContext::Store); + } + for input in & $($mutability)* inputs[..] { + self.visit_operand(input); + } } } } diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs index 9f097215a8ac5..17f70b2d8dc66 100644 --- a/src/librustc/session/config.rs +++ b/src/librustc/session/config.rs @@ -663,6 +663,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "print the result of the translation item collection pass"), mir_opt_level: Option = (None, parse_opt_uint, "set the MIR optimization level (0-3)"), + orbit: bool = (false, parse_bool, + "get MIR where it belongs - everywhere; most importantly, in orbit"), } pub fn default_lib_output() -> CrateType { diff --git a/src/librustc/util/ppaux.rs b/src/librustc/util/ppaux.rs index 8fd784cbde7a7..e1690f1a8592a 100644 --- a/src/librustc/util/ppaux.rs +++ b/src/librustc/util/ppaux.rs @@ -60,17 +60,37 @@ fn fn_sig(f: &mut fmt::Formatter, } } -fn parameterized(f: &mut fmt::Formatter, - substs: &subst::Substs, - did: DefId, - projections: &[ty::ProjectionPredicate], - get_generics: GG) - -> fmt::Result +/// Namespace of the path given to parameterized to print. +#[derive(Copy, Clone, PartialEq)] +pub enum Ns { + Type, + Value +} + +pub fn parameterized(f: &mut fmt::Formatter, + substs: &subst::Substs, + did: DefId, + ns: Ns, + projections: &[ty::ProjectionPredicate], + get_generics: GG) + -> fmt::Result where GG: for<'tcx> FnOnce(&TyCtxt<'tcx>) -> ty::Generics<'tcx> { - let (fn_trait_kind, verbose) = try!(ty::tls::with(|tcx| { + if let (Ns::Value, Some(self_ty)) = (ns, substs.self_ty()) { + try!(write!(f, "<{} as ", self_ty)); + } + + let (fn_trait_kind, verbose, last_name) = try!(ty::tls::with(|tcx| { + let (did, last_name) = if ns == Ns::Value { + // Try to get the impl/trait parent, if this is an + // associated value item (method or constant). + tcx.trait_of_item(did).or_else(|| tcx.impl_of_method(did)) + .map_or((did, None), |parent| (parent, Some(tcx.item_name(did)))) + } else { + (did, None) + }; try!(write!(f, "{}", tcx.item_path_str(did))); - Ok((tcx.lang_items.fn_trait_kind(did), tcx.sess.verbose())) + Ok((tcx.lang_items.fn_trait_kind(did), tcx.sess.verbose(), last_name)) })); let mut empty = true; @@ -185,7 +205,28 @@ fn parameterized(f: &mut fmt::Formatter, projection.ty)); } - start_or_continue(f, "", ">") + try!(start_or_continue(f, "", ">")); + + // For values, also print their name and type parameters. + if ns == Ns::Value { + if substs.self_ty().is_some() { + try!(write!(f, ">")); + } + + if let Some(name) = last_name { + try!(write!(f, "::{}", name)); + } + let tps = substs.types.get_slice(subst::FnSpace); + if !tps.is_empty() { + try!(write!(f, "::<{}", tps[0])); + for ty in &tps[1..] { + try!(write!(f, ", {}", ty)); + } + try!(write!(f, ">")); + } + } + + Ok(()) } fn in_binder<'tcx, T, U>(f: &mut fmt::Formatter, @@ -265,6 +306,7 @@ impl<'tcx> fmt::Display for TraitAndProjections<'tcx> { let TraitAndProjections(ref trait_ref, ref projection_bounds) = *self; parameterized(f, trait_ref.substs, trait_ref.def_id, + Ns::Type, projection_bounds, |tcx| tcx.lookup_trait_def(trait_ref.def_id).generics.clone()) } @@ -769,7 +811,7 @@ impl fmt::Display for ty::Binder> impl<'tcx> fmt::Display for ty::TraitRef<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - parameterized(f, self.substs, self.def_id, &[], + parameterized(f, self.substs, self.def_id, Ns::Type, &[], |tcx| tcx.lookup_trait_def(self.def_id).generics.clone()) } } @@ -821,19 +863,9 @@ impl<'tcx> fmt::Display for ty::TypeVariants<'tcx> { try!(write!(f, "extern {} ", bare_fn.abi)); } - try!(write!(f, "{}", bare_fn.sig.0)); - try!(ty::tls::with(|tcx| { - write!(f, " {{{}", tcx.item_path_str(def_id)) - })); - - let tps = substs.types.get_slice(subst::FnSpace); - if tps.len() >= 1 { - try!(write!(f, "::<{}", tps[0])); - for &ty in &tps[1..] { - try!(write!(f, ", {}", ty)); - } - try!(write!(f, ">")); - } + try!(write!(f, "{} {{", bare_fn.sig.0)); + try!(parameterized(f, substs, def_id, Ns::Value, &[], + |tcx| tcx.lookup_item_type(def_id).generics)); write!(f, "}}") } TyFnPtr(ref bare_fn) => { @@ -856,7 +888,7 @@ impl<'tcx> fmt::Display for ty::TypeVariants<'tcx> { !tcx.tcache.borrow().contains_key(&def.did) { write!(f, "{}<..>", tcx.item_path_str(def.did)) } else { - parameterized(f, substs, def.did, &[], + parameterized(f, substs, def.did, Ns::Type, &[], |tcx| tcx.lookup_item_type(def.did).generics) } }) diff --git a/src/librustc_back/abi.rs b/src/librustc_back/abi.rs deleted file mode 100644 index c3a3a8d582aff..0000000000000 --- a/src/librustc_back/abi.rs +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub const BOX_FIELD_DROP_GLUE: usize = 1; -pub const BOX_FIELD_BODY: usize = 4; - -/// The first half of a fat pointer. -/// - For a closure, this is the code address. -/// - For an object or trait instance, this is the address of the box. -/// - For a slice, this is the base address. -pub const FAT_PTR_ADDR: usize = 0; - -/// The second half of a fat pointer. -/// - For a closure, this is the address of the environment. -/// - For an object or trait instance, this is the address of the vtable. -/// - For a slice, this is the length. -pub const FAT_PTR_EXTRA: usize = 1; diff --git a/src/librustc_back/lib.rs b/src/librustc_back/lib.rs index 2b677d665d4f3..3ffc031d621f4 100644 --- a/src/librustc_back/lib.rs +++ b/src/librustc_back/lib.rs @@ -48,7 +48,6 @@ extern crate rustc_llvm; extern crate rustc_front; #[macro_use] extern crate log; -pub mod abi; pub mod tempdir; pub mod rpath; pub mod sha2; diff --git a/src/librustc_back/svh.rs b/src/librustc_back/svh.rs index b01b80b813399..3507a119e5455 100644 --- a/src/librustc_back/svh.rs +++ b/src/librustc_back/svh.rs @@ -279,7 +279,7 @@ mod svh_visitor { ExprBreak(id) => SawExprBreak(id.map(|id| id.node.name.as_str())), ExprAgain(id) => SawExprAgain(id.map(|id| id.node.name.as_str())), ExprRet(..) => SawExprRet, - ExprInlineAsm(ref asm) => SawExprInlineAsm(asm), + ExprInlineAsm(ref a,_,_) => SawExprInlineAsm(a), ExprStruct(..) => SawExprStruct, ExprRepeat(..) => SawExprRepeat, } diff --git a/src/librustc_front/fold.rs b/src/librustc_front/fold.rs index 6ae59122f71c4..e65f2fc37debf 100644 --- a/src/librustc_front/fold.rs +++ b/src/librustc_front/fold.rs @@ -1107,34 +1107,11 @@ pub fn noop_fold_expr(Expr { id, node, span, attrs }: Expr, folder: & respan(folder.new_span(label.span), folder.fold_ident(label.node)) })), ExprRet(e) => ExprRet(e.map(|x| folder.fold_expr(x))), - ExprInlineAsm(InlineAsm { - inputs, - outputs, - asm, - asm_str_style, - clobbers, - volatile, - alignstack, - dialect, - expn_id, - }) => ExprInlineAsm(InlineAsm { - inputs: inputs.move_map(|(c, input)| (c, folder.fold_expr(input))), - outputs: outputs.move_map(|out| { - InlineAsmOutput { - constraint: out.constraint, - expr: folder.fold_expr(out.expr), - is_rw: out.is_rw, - is_indirect: out.is_indirect, - } - }), - asm: asm, - asm_str_style: asm_str_style, - clobbers: clobbers, - volatile: volatile, - alignstack: alignstack, - dialect: dialect, - expn_id: expn_id, - }), + ExprInlineAsm(asm, outputs, inputs) => { + ExprInlineAsm(asm, + outputs.move_map(|x| folder.fold_expr(x)), + inputs.move_map(|x| folder.fold_expr(x))) + } ExprStruct(path, fields, maybe_expr) => { ExprStruct(folder.fold_path(path), fields.move_map(|x| folder.fold_field(x)), diff --git a/src/librustc_front/hir.rs b/src/librustc_front/hir.rs index 0b1418fc87845..ee530677b60cd 100644 --- a/src/librustc_front/hir.rs +++ b/src/librustc_front/hir.rs @@ -39,7 +39,7 @@ use syntax::codemap::{self, Span, Spanned, DUMMY_SP, ExpnId}; use syntax::abi::Abi; use syntax::ast::{Name, NodeId, DUMMY_NODE_ID, TokenTree, AsmDialect}; use syntax::ast::{Attribute, Lit, StrStyle, FloatTy, IntTy, UintTy, MetaItem}; -use syntax::attr::ThinAttributes; +use syntax::attr::{ThinAttributes, ThinAttributesExt}; use syntax::parse::token::InternedString; use syntax::ptr::P; @@ -635,6 +635,16 @@ pub enum Stmt_ { StmtSemi(P, NodeId), } +impl Stmt_ { + pub fn attrs(&self) -> &[Attribute] { + match *self { + StmtDecl(ref d, _) => d.node.attrs(), + StmtExpr(ref e, _) | + StmtSemi(ref e, _) => e.attrs.as_attr_slice(), + } + } +} + // FIXME (pending discussion of #1697, #2178...): local should really be // a refinement on pat. /// Local represents a `let` statement, e.g., `let : = ;` @@ -659,6 +669,15 @@ pub enum Decl_ { DeclItem(ItemId), } +impl Decl_ { + pub fn attrs(&self) -> &[Attribute] { + match *self { + DeclLocal(ref l) => l.attrs.as_attr_slice(), + DeclItem(_) => &[] + } + } +} + /// represents one arm of a 'match' #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] pub struct Arm { @@ -793,8 +812,8 @@ pub enum Expr_ { /// A `return`, with an optional value to be returned ExprRet(Option>), - /// Output of the `asm!()` macro - ExprInlineAsm(InlineAsm), + /// Inline assembly (from `asm!`), with its outputs and inputs. + ExprInlineAsm(InlineAsm, Vec>, Vec>), /// A struct literal expression. /// @@ -978,7 +997,6 @@ pub enum Ty_ { #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] pub struct InlineAsmOutput { pub constraint: InternedString, - pub expr: P, pub is_rw: bool, pub is_indirect: bool, } @@ -988,7 +1006,7 @@ pub struct InlineAsm { pub asm: InternedString, pub asm_str_style: StrStyle, pub outputs: HirVec, - pub inputs: HirVec<(InternedString, P)>, + pub inputs: HirVec, pub clobbers: HirVec, pub volatile: bool, pub alignstack: bool, diff --git a/src/librustc_front/intravisit.rs b/src/librustc_front/intravisit.rs index d71e392f521e7..e031dfc5b161d 100644 --- a/src/librustc_front/intravisit.rs +++ b/src/librustc_front/intravisit.rs @@ -798,12 +798,12 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr) { ExprRet(ref optional_expression) => { walk_list!(visitor, visit_expr, optional_expression); } - ExprInlineAsm(ref ia) => { - for &(_, ref input) in &ia.inputs { - visitor.visit_expr(&input) + ExprInlineAsm(_, ref outputs, ref inputs) => { + for output in outputs { + visitor.visit_expr(output) } - for output in &ia.outputs { - visitor.visit_expr(&output.expr) + for input in inputs { + visitor.visit_expr(input) } } } diff --git a/src/librustc_front/lowering.rs b/src/librustc_front/lowering.rs index 825ab3fbd4c82..8aac6356f9d3b 100644 --- a/src/librustc_front/lowering.rs +++ b/src/librustc_front/lowering.rs @@ -1320,14 +1320,11 @@ pub fn lower_expr(lctx: &LoweringContext, e: &Expr) -> P { dialect, expn_id, }) => hir::ExprInlineAsm(hir::InlineAsm { - inputs: inputs.iter() - .map(|&(ref c, ref input)| (c.clone(), lower_expr(lctx, input))) - .collect(), + inputs: inputs.iter().map(|&(ref c, _)| c.clone()).collect(), outputs: outputs.iter() .map(|out| { hir::InlineAsmOutput { constraint: out.constraint.clone(), - expr: lower_expr(lctx, &out.expr), is_rw: out.is_rw, is_indirect: out.is_indirect, } @@ -1340,7 +1337,8 @@ pub fn lower_expr(lctx: &LoweringContext, e: &Expr) -> P { alignstack: alignstack, dialect: dialect, expn_id: expn_id, - }), + }, outputs.iter().map(|out| lower_expr(lctx, &out.expr)).collect(), + inputs.iter().map(|&(_, ref input)| lower_expr(lctx, input)).collect()), ExprKind::Struct(ref path, ref fields, ref maybe_expr) => { hir::ExprStruct(lower_path(lctx, path), fields.iter().map(|x| lower_field(lctx, x)).collect(), diff --git a/src/librustc_front/print/pprust.rs b/src/librustc_front/print/pprust.rs index 1100f084454c7..1ebbbb73e8c99 100644 --- a/src/librustc_front/print/pprust.rs +++ b/src/librustc_front/print/pprust.rs @@ -1486,12 +1486,13 @@ impl<'a> State<'a> { _ => (), } } - hir::ExprInlineAsm(ref a) => { + hir::ExprInlineAsm(ref a, ref outputs, ref inputs) => { try!(word(&mut self.s, "asm!")); try!(self.popen()); try!(self.print_string(&a.asm, a.asm_str_style)); try!(self.word_space(":")); + let mut out_idx = 0; try!(self.commasep(Inconsistent, &a.outputs, |s, out| { match out.constraint.slice_shift_char() { Some(('=', operand)) if out.is_rw => { @@ -1500,18 +1501,21 @@ impl<'a> State<'a> { _ => try!(s.print_string(&out.constraint, ast::StrStyle::Cooked)), } try!(s.popen()); - try!(s.print_expr(&out.expr)); + try!(s.print_expr(&outputs[out_idx])); try!(s.pclose()); + out_idx += 1; Ok(()) })); try!(space(&mut self.s)); try!(self.word_space(":")); - try!(self.commasep(Inconsistent, &a.inputs, |s, &(ref co, ref o)| { + let mut in_idx = 0; + try!(self.commasep(Inconsistent, &a.inputs, |s, co| { try!(s.print_string(&co, ast::StrStyle::Cooked)); try!(s.popen()); - try!(s.print_expr(&o)); + try!(s.print_expr(&inputs[in_idx])); try!(s.pclose()); + in_idx += 1; Ok(()) })); try!(space(&mut self.s)); diff --git a/src/librustc_llvm/lib.rs b/src/librustc_llvm/lib.rs index bc54b1ebab79a..a1dca796d9a42 100644 --- a/src/librustc_llvm/lib.rs +++ b/src/librustc_llvm/lib.rs @@ -33,8 +33,6 @@ extern crate libc; #[macro_use] #[no_link] extern crate rustc_bitflags; -pub use self::OtherAttribute::*; -pub use self::SpecialAttribute::*; pub use self::AttributeSet::*; pub use self::IntPredicate::*; pub use self::RealPredicate::*; @@ -133,6 +131,7 @@ pub enum DLLStorageClassTypes { } bitflags! { + #[derive(Default, Debug)] flags Attribute : u64 { const ZExt = 1 << 0, const SExt = 1 << 1, @@ -150,138 +149,95 @@ bitflags! { const OptimizeForSize = 1 << 13, const StackProtect = 1 << 14, const StackProtectReq = 1 << 15, - const Alignment = 1 << 16, const NoCapture = 1 << 21, const NoRedZone = 1 << 22, const NoImplicitFloat = 1 << 23, const Naked = 1 << 24, const InlineHint = 1 << 25, - const Stack = 7 << 26, const ReturnsTwice = 1 << 29, const UWTable = 1 << 30, const NonLazyBind = 1 << 31, + + // Some of these are missing from the LLVM C API, the rest are + // present, but commented out, and preceded by the following warning: + // FIXME: These attributes are currently not included in the C API as + // a temporary measure until the API/ABI impact to the C API is understood + // and the path forward agreed upon. + const SanitizeAddress = 1 << 32, + const MinSize = 1 << 33, + const NoDuplicate = 1 << 34, + const StackProtectStrong = 1 << 35, + const SanitizeThread = 1 << 36, + const SanitizeMemory = 1 << 37, + const NoBuiltin = 1 << 38, + const Returned = 1 << 39, + const Cold = 1 << 40, + const Builtin = 1 << 41, const OptimizeNone = 1 << 42, + const InAlloca = 1 << 43, + const NonNull = 1 << 44, + const JumpTable = 1 << 45, + const Convergent = 1 << 46, + const SafeStack = 1 << 47, + const NoRecurse = 1 << 48, + const InaccessibleMemOnly = 1 << 49, + const InaccessibleMemOrArgMemOnly = 1 << 50, } } - -#[repr(u64)] -#[derive(Copy, Clone)] -pub enum OtherAttribute { - // The following are not really exposed in - // the LLVM C api so instead to add these - // we call a wrapper function in RustWrapper - // that uses the C++ api. - SanitizeAddressAttribute = 1 << 32, - MinSizeAttribute = 1 << 33, - NoDuplicateAttribute = 1 << 34, - StackProtectStrongAttribute = 1 << 35, - SanitizeThreadAttribute = 1 << 36, - SanitizeMemoryAttribute = 1 << 37, - NoBuiltinAttribute = 1 << 38, - ReturnedAttribute = 1 << 39, - ColdAttribute = 1 << 40, - BuiltinAttribute = 1 << 41, - OptimizeNoneAttribute = 1 << 42, - InAllocaAttribute = 1 << 43, - NonNullAttribute = 1 << 44, -} - -#[derive(Copy, Clone)] -pub enum SpecialAttribute { - DereferenceableAttribute(u64) -} - -#[repr(C)] -#[derive(Copy, Clone)] -pub enum AttributeSet { - ReturnIndex = 0, - FunctionIndex = !0 -} - -pub trait AttrHelper { - fn apply_llfn(&self, idx: c_uint, llfn: ValueRef); - fn apply_callsite(&self, idx: c_uint, callsite: ValueRef); +#[derive(Copy, Clone, Default, Debug)] +pub struct Attributes { + regular: Attribute, + dereferenceable_bytes: u64 } -impl AttrHelper for Attribute { - fn apply_llfn(&self, idx: c_uint, llfn: ValueRef) { - unsafe { - LLVMAddFunctionAttribute(llfn, idx, self.bits() as uint64_t); - } +impl Attributes { + pub fn set(&mut self, attr: Attribute) -> &mut Self { + self.regular = self.regular | attr; + self } - fn apply_callsite(&self, idx: c_uint, callsite: ValueRef) { - unsafe { - LLVMAddCallSiteAttribute(callsite, idx, self.bits() as uint64_t); - } + pub fn unset(&mut self, attr: Attribute) -> &mut Self { + self.regular = self.regular - attr; + self } -} -impl AttrHelper for OtherAttribute { - fn apply_llfn(&self, idx: c_uint, llfn: ValueRef) { - unsafe { - LLVMAddFunctionAttribute(llfn, idx, *self as uint64_t); - } + pub fn set_dereferenceable(&mut self, bytes: u64) -> &mut Self { + self.dereferenceable_bytes = bytes; + self } - fn apply_callsite(&self, idx: c_uint, callsite: ValueRef) { - unsafe { - LLVMAddCallSiteAttribute(callsite, idx, *self as uint64_t); - } + pub fn unset_dereferenceable(&mut self) -> &mut Self { + self.dereferenceable_bytes = 0; + self } -} -impl AttrHelper for SpecialAttribute { - fn apply_llfn(&self, idx: c_uint, llfn: ValueRef) { - match *self { - DereferenceableAttribute(bytes) => unsafe { - LLVMAddDereferenceableAttr(llfn, idx, bytes as uint64_t); + pub fn apply_llfn(&self, idx: usize, llfn: ValueRef) { + unsafe { + LLVMAddFunctionAttribute(llfn, idx as c_uint, self.regular.bits()); + if self.dereferenceable_bytes != 0 { + LLVMAddDereferenceableAttr(llfn, idx as c_uint, + self.dereferenceable_bytes); } } } - fn apply_callsite(&self, idx: c_uint, callsite: ValueRef) { - match *self { - DereferenceableAttribute(bytes) => unsafe { - LLVMAddDereferenceableCallSiteAttr(callsite, idx, bytes as uint64_t); + pub fn apply_callsite(&self, idx: usize, callsite: ValueRef) { + unsafe { + LLVMAddCallSiteAttribute(callsite, idx as c_uint, self.regular.bits()); + if self.dereferenceable_bytes != 0 { + LLVMAddDereferenceableCallSiteAttr(callsite, idx as c_uint, + self.dereferenceable_bytes); } } } } -pub struct AttrBuilder { - attrs: Vec<(usize, Box)> -} - -impl AttrBuilder { - pub fn new() -> AttrBuilder { - AttrBuilder { - attrs: Vec::new() - } - } - - pub fn arg(&mut self, idx: usize, a: T) -> &mut AttrBuilder { - self.attrs.push((idx, box a as Box)); - self - } - - pub fn ret(&mut self, a: T) -> &mut AttrBuilder { - self.attrs.push((ReturnIndex as usize, box a as Box)); - self - } - - pub fn apply_llfn(&self, llfn: ValueRef) { - for &(idx, ref attr) in &self.attrs { - attr.apply_llfn(idx as c_uint, llfn); - } - } - - pub fn apply_callsite(&self, callsite: ValueRef) { - for &(idx, ref attr) in &self.attrs { - attr.apply_callsite(idx as c_uint, callsite); - } - } +#[repr(C)] +#[derive(Copy, Clone)] +pub enum AttributeSet { + ReturnIndex = 0, + FunctionIndex = !0 } // enum for the LLVM IntPredicate type diff --git a/src/librustc_metadata/astencode.rs b/src/librustc_metadata/astencode.rs index d43a9f4dcda58..5c5574c3a8300 100644 --- a/src/librustc_metadata/astencode.rs +++ b/src/librustc_metadata/astencode.rs @@ -125,61 +125,51 @@ pub fn decode_inlined_item<'tcx>(cdata: &cstore::crate_metadata, tcx: &TyCtxt<'tcx>, parent_path: Vec, parent_def_path: ast_map::DefPath, - par_doc: rbml::Doc, + ast_doc: rbml::Doc, orig_did: DefId) - -> Result<&'tcx InlinedItem, (Vec, - ast_map::DefPath)> { - match par_doc.opt_child(c::tag_ast) { - None => Err((parent_path, parent_def_path)), - Some(ast_doc) => { - let mut path_as_str = None; - debug!("> Decoding inlined fn: {:?}::?", - { - // Do an Option dance to use the path after it is moved below. - let s = ast_map::path_to_string(parent_path.iter().cloned()); - path_as_str = Some(s); - path_as_str.as_ref().map(|x| &x[..]) - }); - let mut ast_dsr = reader::Decoder::new(ast_doc); - let from_id_range = Decodable::decode(&mut ast_dsr).unwrap(); - let to_id_range = reserve_id_range(&tcx.sess, from_id_range); - let dcx = &DecodeContext { - cdata: cdata, - tcx: tcx, - from_id_range: from_id_range, - to_id_range: to_id_range, - last_filemap_index: Cell::new(0) - }; - let raw_ii = decode_ast(ast_doc); - let ii = ast_map::map_decoded_item(&dcx.tcx.map, - parent_path, - parent_def_path, - raw_ii, - dcx); - let name = match *ii { - InlinedItem::Item(ref i) => i.name, - InlinedItem::Foreign(ref i) => i.name, - InlinedItem::TraitItem(_, ref ti) => ti.name, - InlinedItem::ImplItem(_, ref ii) => ii.name - }; - debug!("Fn named: {}", name); - debug!("< Decoded inlined fn: {}::{}", - path_as_str.unwrap(), - name); - region::resolve_inlined_item(&tcx.sess, &tcx.region_maps, ii); - decode_side_tables(dcx, ast_doc); - copy_item_types(dcx, ii, orig_did); - match *ii { - InlinedItem::Item(ref i) => { - debug!(">>> DECODED ITEM >>>\n{}\n<<< DECODED ITEM <<<", - ::rustc_front::print::pprust::item_to_string(&i)); - } - _ => { } - } - - Ok(ii) - } - } + -> &'tcx InlinedItem { + let mut path_as_str = None; + debug!("> Decoding inlined fn: {:?}::?", + { + // Do an Option dance to use the path after it is moved below. + let s = ast_map::path_to_string(parent_path.iter().cloned()); + path_as_str = Some(s); + path_as_str.as_ref().map(|x| &x[..]) + }); + let mut ast_dsr = reader::Decoder::new(ast_doc); + let from_id_range = Decodable::decode(&mut ast_dsr).unwrap(); + let to_id_range = reserve_id_range(&tcx.sess, from_id_range); + let dcx = &DecodeContext { + cdata: cdata, + tcx: tcx, + from_id_range: from_id_range, + to_id_range: to_id_range, + last_filemap_index: Cell::new(0) + }; + let ii = ast_map::map_decoded_item(&dcx.tcx.map, + parent_path, + parent_def_path, + decode_ast(ast_doc), + dcx); + let name = match *ii { + InlinedItem::Item(ref i) => i.name, + InlinedItem::Foreign(ref i) => i.name, + InlinedItem::TraitItem(_, ref ti) => ti.name, + InlinedItem::ImplItem(_, ref ii) => ii.name + }; + debug!("Fn named: {}", name); + debug!("< Decoded inlined fn: {}::{}", + path_as_str.unwrap(), + name); + region::resolve_inlined_item(&tcx.sess, &tcx.region_maps, ii); + decode_side_tables(dcx, ast_doc); + copy_item_types(dcx, ii, orig_did); + if let InlinedItem::Item(ref i) = *ii { + debug!(">>> DECODED ITEM >>>\n{}\n<<< DECODED ITEM <<<", + ::rustc_front::print::pprust::item_to_string(&i)); + } + + ii } // ______________________________________________________________________ diff --git a/src/librustc_metadata/csearch.rs b/src/librustc_metadata/csearch.rs index 2cd119cfc48be..9ac7216165caf 100644 --- a/src/librustc_metadata/csearch.rs +++ b/src/librustc_metadata/csearch.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use astencode; use cstore; use decoder; use encoder; @@ -237,7 +236,7 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore { } fn impl_or_trait_item(&self, tcx: &TyCtxt<'tcx>, def: DefId) - -> ty::ImplOrTraitItem<'tcx> + -> Option> { let cdata = self.get_crate_data(def.krate); decoder::get_impl_or_trait_item( @@ -439,8 +438,7 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore { -> FoundAst<'tcx> { let cdata = self.get_crate_data(def.krate); - let decode_inlined_item = Box::new(astencode::decode_inlined_item); - decoder::maybe_get_item_ast(&cdata, tcx, def.index, decode_inlined_item) + decoder::maybe_get_item_ast(&cdata, tcx, def.index) } fn maybe_get_item_mir(&self, tcx: &TyCtxt<'tcx>, def: DefId) @@ -509,21 +507,18 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore { mir_map: &MirMap<'tcx>, krate: &hir::Crate) -> Vec { - let encode_inlined_item: encoder::EncodeInlinedItem = - Box::new(|ecx, rbml_w, ii| astencode::encode_inlined_item(ecx, rbml_w, ii)); - - let encode_params = encoder::EncodeParams { + let ecx = encoder::EncodeContext { diag: tcx.sess.diagnostic(), tcx: tcx, reexports: reexports, item_symbols: item_symbols, link_meta: link_meta, cstore: self, - encode_inlined_item: encode_inlined_item, reachable: reachable, mir_map: mir_map, + type_abbrevs: RefCell::new(FnvHashMap()), }; - encoder::encode_metadata(encode_params, krate) + encoder::encode_metadata(ecx, krate) } diff --git a/src/librustc_metadata/decoder.rs b/src/librustc_metadata/decoder.rs index 38a2a7794bcbd..1cb5f2b1c3fbc 100644 --- a/src/librustc_metadata/decoder.rs +++ b/src/librustc_metadata/decoder.rs @@ -14,6 +14,7 @@ use self::Family::*; +use astencode::decode_inlined_item; use cstore::{self, crate_metadata}; use common::*; use encoder::def_to_u64; @@ -797,64 +798,36 @@ pub fn get_item_name(intr: &IdentInterner, cdata: Cmd, id: DefIndex) -> ast::Nam item_name(intr, cdata.lookup_item(id)) } -pub type DecodeInlinedItem<'a> = - Box FnMut(Cmd, - &TyCtxt<'tcx>, - Vec, // parent_path - hir_map::DefPath, // parent_def_path - rbml::Doc, - DefId) - -> Result<&'tcx InlinedItem, (Vec, - hir_map::DefPath)> + 'a>; - -pub fn maybe_get_item_ast<'tcx>(cdata: Cmd, - tcx: &TyCtxt<'tcx>, - id: DefIndex, - mut decode_inlined_item: DecodeInlinedItem) +pub fn maybe_get_item_ast<'tcx>(cdata: Cmd, tcx: &TyCtxt<'tcx>, id: DefIndex) -> FoundAst<'tcx> { debug!("Looking up item: {:?}", id); let item_doc = cdata.lookup_item(id); let item_did = item_def_id(item_doc, cdata); - let parent_path = { - let mut path = item_path(item_doc); - path.pop(); - path - }; - let parent_def_path = { - let mut def_path = def_path(cdata, id); - def_path.pop(); - def_path - }; - match decode_inlined_item(cdata, - tcx, - parent_path, - parent_def_path, - item_doc, - item_did) { - Ok(ii) => FoundAst::Found(ii), - Err((mut parent_path, mut parent_def_path)) => { - match item_parent_item(cdata, item_doc) { - Some(parent_did) => { - // Remove the last element from the paths, since we are now - // trying to inline the parent. - parent_path.pop(); - parent_def_path.pop(); - - let parent_item = cdata.lookup_item(parent_did.index); - match decode_inlined_item(cdata, - tcx, - parent_path, - parent_def_path, - parent_item, - parent_did) { - Ok(ii) => FoundAst::FoundParent(parent_did, ii), - Err(_) => FoundAst::NotFound - } - } - None => FoundAst::NotFound + let mut parent_path = item_path(item_doc); + parent_path.pop(); + let mut parent_def_path = def_path(cdata, id); + parent_def_path.pop(); + if let Some(ast_doc) = reader::maybe_get_doc(item_doc, tag_ast as usize) { + let ii = decode_inlined_item(cdata, tcx, parent_path, + parent_def_path, + ast_doc, item_did); + return FoundAst::Found(ii); + } else if let Some(parent_did) = item_parent_item(cdata, item_doc) { + // Remove the last element from the paths, since we are now + // trying to inline the parent. + parent_path.pop(); + parent_def_path.pop(); + let parent_doc = cdata.lookup_item(parent_did.index); + if let Some(ast_doc) = reader::maybe_get_doc(parent_doc, tag_ast as usize) { + let ii = decode_inlined_item(cdata, tcx, parent_path, + parent_def_path, + ast_doc, parent_did); + if let &InlinedItem::Item(ref i) = ii { + return FoundAst::FoundParent(parent_did, i); } } } + FoundAst::NotFound } pub fn is_item_mir_available<'tcx>(cdata: Cmd, id: DefIndex) -> bool { @@ -982,12 +955,16 @@ pub fn get_impl_or_trait_item<'tcx>(intr: Rc, cdata: Cmd, id: DefIndex, tcx: &TyCtxt<'tcx>) - -> ty::ImplOrTraitItem<'tcx> { + -> Option> { let item_doc = cdata.lookup_item(id); let def_id = item_def_id(item_doc, cdata); - let container_id = item_require_parent_item(cdata, item_doc); + let container_id = if let Some(id) = item_parent_item(cdata, item_doc) { + id + } else { + return None; + }; let container_doc = cdata.lookup_item(container_id.index); let container = match item_family(container_doc) { Trait => TraitContainer(container_id), @@ -998,7 +975,7 @@ pub fn get_impl_or_trait_item<'tcx>(intr: Rc, let vis = item_visibility(item_doc); let defaultness = item_defaultness(item_doc); - match item_sort(item_doc) { + Some(match item_sort(item_doc) { sort @ Some('C') | sort @ Some('c') => { let ty = doc_type(item_doc, tcx, cdata); ty::ConstTraitItem(Rc::new(ty::AssociatedConst { @@ -1044,8 +1021,8 @@ pub fn get_impl_or_trait_item<'tcx>(intr: Rc, container: container, })) } - _ => panic!("unknown impl/trait item sort"), - } + _ => return None + }) } pub fn get_trait_item_def_ids(cdata: Cmd, id: DefIndex) @@ -1085,7 +1062,7 @@ pub fn get_provided_trait_methods<'tcx>(intr: Rc, cdata, did.index, tcx); - if let ty::MethodTraitItem(ref method) = trait_item { + if let Some(ty::MethodTraitItem(ref method)) = trait_item { Some((*method).clone()) } else { None @@ -1114,7 +1091,7 @@ pub fn get_associated_consts<'tcx>(intr: Rc, cdata, did.index, tcx); - if let ty::ConstTraitItem(ref ac) = trait_item { + if let Some(ty::ConstTraitItem(ref ac)) = trait_item { Some((*ac).clone()) } else { None diff --git a/src/librustc_metadata/encoder.rs b/src/librustc_metadata/encoder.rs index 41baa0b159148..e677ea962f9cf 100644 --- a/src/librustc_metadata/encoder.rs +++ b/src/librustc_metadata/encoder.rs @@ -13,6 +13,7 @@ #![allow(unused_must_use)] // everything is just a MemWriter, can't fail #![allow(non_camel_case_types)] +use astencode::encode_inlined_item; use common::*; use cstore; use decoder; @@ -55,21 +56,6 @@ use rustc_front::hir::{self, PatKind}; use rustc_front::intravisit::Visitor; use rustc_front::intravisit; -pub type EncodeInlinedItem<'a> = - Box; - -pub struct EncodeParams<'a, 'tcx: 'a> { - pub diag: &'a Handler, - pub tcx: &'a TyCtxt<'tcx>, - pub reexports: &'a def::ExportMap, - pub item_symbols: &'a RefCell>, - pub link_meta: &'a LinkMeta, - pub cstore: &'a cstore::CStore, - pub encode_inlined_item: EncodeInlinedItem<'a>, - pub reachable: &'a NodeSet, - pub mir_map: &'a MirMap<'tcx>, -} - pub struct EncodeContext<'a, 'tcx: 'a> { pub diag: &'a Handler, pub tcx: &'a TyCtxt<'tcx>, @@ -77,7 +63,6 @@ pub struct EncodeContext<'a, 'tcx: 'a> { pub item_symbols: &'a RefCell>, pub link_meta: &'a LinkMeta, pub cstore: &'a cstore::CStore, - pub encode_inlined_item: RefCell>, pub type_abbrevs: tyencode::abbrev_map<'tcx>, pub reachable: &'a NodeSet, pub mir_map: &'a MirMap<'tcx>, @@ -688,6 +673,7 @@ fn encode_info_for_associated_const<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, rbml_w, InlinedItemRef::ImplItem(ecx.tcx.map.local_def_id(parent_id), ii)); + encode_mir(ecx, rbml_w, ii.id); } rbml_w.end_tag(); @@ -733,6 +719,7 @@ fn encode_info_for_method<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, rbml_w, InlinedItemRef::ImplItem(ecx.tcx.map.local_def_id(parent_id), impl_item)); + encode_mir(ecx, rbml_w, impl_item.id); } encode_constness(rbml_w, sig.constness); encode_defaultness(rbml_w, impl_item.defaultness); @@ -820,23 +807,6 @@ fn encode_repr_attrs(rbml_w: &mut Encoder, rbml_w.end_tag(); } -fn encode_inlined_item(ecx: &EncodeContext, - rbml_w: &mut Encoder, - ii: InlinedItemRef) { - let mut eii = ecx.encode_inlined_item.borrow_mut(); - let eii: &mut EncodeInlinedItem = &mut *eii; - eii(ecx, rbml_w, ii); - - let node_id = match ii { - InlinedItemRef::Item(item) => item.id, - InlinedItemRef::TraitItem(_, trait_item) => trait_item.id, - InlinedItemRef::ImplItem(_, impl_item) => impl_item.id, - InlinedItemRef::Foreign(foreign_item) => foreign_item.id - }; - - encode_mir(ecx, rbml_w, node_id); -} - fn encode_mir(ecx: &EncodeContext, rbml_w: &mut Encoder, node_id: NodeId) { if let Some(mir) = ecx.mir_map.map.get(&node_id) { rbml_w.start_tag(tag_mir as usize); @@ -958,6 +928,7 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, encode_path(rbml_w, path); encode_attributes(rbml_w, &item.attrs); encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item)); + encode_mir(ecx, rbml_w, item.id); encode_visibility(rbml_w, vis); encode_stability(rbml_w, stab); encode_deprecation(rbml_w, depr); @@ -976,6 +947,7 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, let needs_inline = tps_len > 0 || attr::requests_inline(&item.attrs); if needs_inline || constness == hir::Constness::Const { encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item)); + encode_mir(ecx, rbml_w, item.id); } if tps_len == 0 { encode_symbol(ecx, rbml_w, item.id); @@ -1044,6 +1016,7 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, encode_variant_id(rbml_w, ecx.tcx.map.local_def_id(v.node.data.id())); } encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item)); + encode_mir(ecx, rbml_w, item.id); encode_path(rbml_w, path); // Encode inherent implementations for this enumeration. @@ -1092,6 +1065,7 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, encode_struct_fields(rbml_w, variant); encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item)); + encode_mir(ecx, rbml_w, item.id); // Encode inherent implementations for this structure. encode_inherent_implementations(ecx, rbml_w, def_id); @@ -1374,6 +1348,7 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, encode_inlined_item(ecx, rbml_w, InlinedItemRef::TraitItem(def_id, trait_item)); + encode_mir(ecx, rbml_w, trait_item.id); } hir::MethodTraitItem(ref sig, ref body) => { // If this is a static method, we've already @@ -1389,6 +1364,7 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, encode_item_sort(rbml_w, 'p'); encode_inlined_item(ecx, rbml_w, InlinedItemRef::TraitItem(def_id, trait_item)); + encode_mir(ecx, rbml_w, trait_item.id); } else { encode_item_sort(rbml_w, 'r'); } @@ -1426,13 +1402,15 @@ fn encode_info_for_foreign_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, encode_name(rbml_w, nitem.name); if abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic { encode_inlined_item(ecx, rbml_w, InlinedItemRef::Foreign(nitem)); + encode_mir(ecx, rbml_w, nitem.id); + } else { + encode_symbol(ecx, rbml_w, nitem.id); } encode_attributes(rbml_w, &nitem.attrs); let stab = stability::lookup_stability(ecx.tcx, ecx.tcx.map.local_def_id(nitem.id)); let depr = stability::lookup_deprecation(ecx.tcx, ecx.tcx.map.local_def_id(nitem.id)); encode_stability(rbml_w, stab); encode_deprecation(rbml_w, depr); - encode_symbol(ecx, rbml_w, nitem.id); encode_method_argument_names(rbml_w, &fndecl); } hir::ForeignItemStatic(_, mutbl) => { @@ -1928,32 +1906,7 @@ fn encode_dylib_dependency_formats(rbml_w: &mut Encoder, ecx: &EncodeContext) { #[allow(non_upper_case_globals)] pub const metadata_encoding_version : &'static [u8] = &[b'r', b'u', b's', b't', 0, 0, 0, 2 ]; -pub fn encode_metadata(parms: EncodeParams, krate: &hir::Crate) -> Vec { - let EncodeParams { - item_symbols, - diag, - tcx, - reexports, - cstore, - encode_inlined_item, - link_meta, - reachable, - mir_map, - .. - } = parms; - let ecx = EncodeContext { - diag: diag, - tcx: tcx, - reexports: reexports, - item_symbols: item_symbols, - link_meta: link_meta, - cstore: cstore, - encode_inlined_item: RefCell::new(encode_inlined_item), - type_abbrevs: RefCell::new(FnvHashMap()), - reachable: reachable, - mir_map: mir_map, - }; - +pub fn encode_metadata(ecx: EncodeContext, krate: &hir::Crate) -> Vec { let mut wr = Cursor::new(Vec::new()); { diff --git a/src/librustc_mir/build/expr/as_rvalue.rs b/src/librustc_mir/build/expr/as_rvalue.rs index 2338d7df01a85..4c0e9b98d9a6e 100644 --- a/src/librustc_mir/build/expr/as_rvalue.rs +++ b/src/librustc_mir/build/expr/as_rvalue.rs @@ -39,8 +39,20 @@ impl<'a,'tcx> Builder<'a,'tcx> { ExprKind::Scope { extent, value } => { this.in_scope(extent, block, |this| this.as_rvalue(block, value)) } - ExprKind::InlineAsm { asm } => { - block.and(Rvalue::InlineAsm(asm.clone())) + ExprKind::InlineAsm { asm, outputs, inputs } => { + let outputs = outputs.into_iter().map(|output| { + unpack!(block = this.as_lvalue(block, output)) + }).collect(); + + let inputs = inputs.into_iter().map(|input| { + unpack!(block = this.as_operand(block, input)) + }).collect(); + + block.and(Rvalue::InlineAsm { + asm: asm.clone(), + outputs: outputs, + inputs: inputs + }) } ExprKind::Repeat { value, count } => { let value_operand = unpack!(block = this.as_operand(block, value)); @@ -73,8 +85,13 @@ impl<'a,'tcx> Builder<'a,'tcx> { }) } ExprKind::Cast { source } => { - let source = unpack!(block = this.as_operand(block, source)); - block.and(Rvalue::Cast(CastKind::Misc, source, expr.ty)) + let source = this.hir.mirror(source); + if source.ty == expr.ty { + this.expr_as_rvalue(block, source) + } else { + let source = unpack!(block = this.as_operand(block, source)); + block.and(Rvalue::Cast(CastKind::Misc, source, expr.ty)) + } } ExprKind::ReifyFnPointer { source } => { let source = unpack!(block = this.as_operand(block, source)); diff --git a/src/librustc_mir/build/matches/mod.rs b/src/librustc_mir/build/matches/mod.rs index 9ecbf748d7c83..673ff9e86c440 100644 --- a/src/librustc_mir/build/matches/mod.rs +++ b/src/librustc_mir/build/matches/mod.rs @@ -238,6 +238,13 @@ pub struct MatchPair<'pat, 'tcx:'pat> { // ... must match this pattern. pattern: &'pat Pattern<'tcx>, + + // HACK(eddyb) This is used to toggle whether a Slice pattern + // has had its length checked. This is only necessary because + // the "rest" part of the pattern right now has type &[T] and + // as such, it requires an Rvalue::Slice to be generated. + // See RFC 495 / issue #23121 for the eventual (proper) solution. + slice_len_checked: bool } #[derive(Clone, Debug, PartialEq)] diff --git a/src/librustc_mir/build/matches/simplify.rs b/src/librustc_mir/build/matches/simplify.rs index 2c8e1c1ccf673..a3337badf884b 100644 --- a/src/librustc_mir/build/matches/simplify.rs +++ b/src/librustc_mir/build/matches/simplify.rs @@ -95,7 +95,18 @@ impl<'a,'tcx> Builder<'a,'tcx> { Err(match_pair) } - PatternKind::Array { ref prefix, ref slice, ref suffix } => { + PatternKind::Range { .. } | + PatternKind::Variant { .. } => { + // cannot simplify, test is required + Err(match_pair) + } + + PatternKind::Slice { .. } if !match_pair.slice_len_checked => { + Err(match_pair) + } + + PatternKind::Array { ref prefix, ref slice, ref suffix } | + PatternKind::Slice { ref prefix, ref slice, ref suffix } => { unpack!(block = self.prefix_suffix_slice(&mut candidate.match_pairs, block, match_pair.lvalue.clone(), @@ -105,13 +116,6 @@ impl<'a,'tcx> Builder<'a,'tcx> { Ok(block) } - PatternKind::Slice { .. } | - PatternKind::Range { .. } | - PatternKind::Variant { .. } => { - // cannot simplify, test is required - Err(match_pair) - } - PatternKind::Leaf { ref subpatterns } => { // tuple struct, match subpats (if any) candidate.match_pairs diff --git a/src/librustc_mir/build/matches/test.rs b/src/librustc_mir/build/matches/test.rs index d42c8ff7bd79f..0efa24f311943 100644 --- a/src/librustc_mir/build/matches/test.rs +++ b/src/librustc_mir/build/matches/test.rs @@ -75,7 +75,8 @@ impl<'a,'tcx> Builder<'a,'tcx> { } } - PatternKind::Slice { ref prefix, ref slice, ref suffix } => { + PatternKind::Slice { ref prefix, ref slice, ref suffix } + if !match_pair.slice_len_checked => { let len = prefix.len() + suffix.len(); let op = if slice.is_some() { BinOp::Ge @@ -89,6 +90,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { } PatternKind::Array { .. } | + PatternKind::Slice { .. } | PatternKind::Wild | PatternKind::Binding { .. } | PatternKind::Leaf { .. } | @@ -174,14 +176,78 @@ impl<'a,'tcx> Builder<'a,'tcx> { targets } - TestKind::Eq { ref value, ty } => { - let expect = self.literal_operand(test.span, ty.clone(), Literal::Value { - value: value.clone() - }); - let val = Operand::Consume(lvalue.clone()); + TestKind::Eq { ref value, mut ty } => { + let mut val = Operand::Consume(lvalue.clone()); + + // If we're using b"..." as a pattern, we need to insert an + // unsizing coercion, as the byte string has the type &[u8; N]. + let expect = if let ConstVal::ByteStr(ref bytes) = *value { + let tcx = self.hir.tcx(); + + // Unsize the lvalue to &[u8], too, if necessary. + if let ty::TyRef(region, mt) = ty.sty { + if let ty::TyArray(_, _) = mt.ty.sty { + ty = tcx.mk_imm_ref(region, tcx.mk_slice(tcx.types.u8)); + let val_slice = self.temp(ty); + self.cfg.push_assign(block, test.span, &val_slice, + Rvalue::Cast(CastKind::Unsize, val, ty)); + val = Operand::Consume(val_slice); + } + } + + assert!(ty.is_slice()); + + let array_ty = tcx.mk_array(tcx.types.u8, bytes.len()); + let array_ref = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), array_ty); + let array = self.literal_operand(test.span, array_ref, Literal::Value { + value: value.clone() + }); + + let slice = self.temp(ty); + self.cfg.push_assign(block, test.span, &slice, + Rvalue::Cast(CastKind::Unsize, array, ty)); + Operand::Consume(slice) + } else { + self.literal_operand(test.span, ty, Literal::Value { + value: value.clone() + }) + }; + + // Use PartialEq::eq for &str and &[u8] slices, instead of BinOp::Eq. let fail = self.cfg.start_new_block(); - let block = self.compare(block, fail, test.span, BinOp::Eq, expect, val.clone()); - vec![block, fail] + if let ty::TyRef(_, mt) = ty.sty { + assert!(ty.is_slice()); + let eq_def_id = self.hir.tcx().lang_items.eq_trait().unwrap(); + let ty = mt.ty; + let (mty, method) = self.hir.trait_method(eq_def_id, "eq", ty, vec![ty]); + + let bool_ty = self.hir.bool_ty(); + let eq_result = self.temp(bool_ty); + let eq_block = self.cfg.start_new_block(); + let cleanup = self.diverge_cleanup(); + self.cfg.terminate(block, Terminator::Call { + func: Operand::Constant(Constant { + span: test.span, + ty: mty, + literal: method + }), + args: vec![val, expect], + destination: Some((eq_result.clone(), eq_block)), + cleanup: cleanup, + }); + + // check the result + let block = self.cfg.start_new_block(); + self.cfg.terminate(eq_block, Terminator::If { + cond: Operand::Consume(eq_result), + targets: (block, fail), + }); + + vec![block, fail] + } else { + let block = self.compare(block, fail, test.span, BinOp::Eq, expect, val); + vec![block, fail] + } } TestKind::Range { ref lo, ref hi, ty } => { @@ -349,9 +415,26 @@ impl<'a,'tcx> Builder<'a,'tcx> { } } - TestKind::Eq { .. } | - TestKind::Range { .. } | + // If we are performing a length check, then this + // informs slice patterns, but nothing else. TestKind::Len { .. } => { + let pattern_test = self.test(&match_pair); + match *match_pair.pattern.kind { + PatternKind::Slice { .. } if pattern_test.kind == test.kind => { + let mut new_candidate = candidate.clone(); + + // Set up the MatchKind to simplify this like an array. + new_candidate.match_pairs[match_pair_index] + .slice_len_checked = true; + resulting_candidates[0].push(new_candidate); + true + } + _ => false + } + } + + TestKind::Eq { .. } | + TestKind::Range { .. } => { // These are all binary tests. // // FIXME(#29623) we can be more clever here @@ -405,7 +488,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { .map(|subpattern| { // e.g., `(x as Variant).0` let lvalue = downcast_lvalue.clone().field(subpattern.field, - subpattern.field_ty()); + subpattern.pattern.ty); // e.g., `(x as Variant).0 @ P1` MatchPair::new(lvalue, &subpattern.pattern) }); diff --git a/src/librustc_mir/build/matches/util.rs b/src/librustc_mir/build/matches/util.rs index c295ed168badb..b46c3ffb76a1b 100644 --- a/src/librustc_mir/build/matches/util.rs +++ b/src/librustc_mir/build/matches/util.rs @@ -22,7 +22,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { subpatterns.iter() .map(|fieldpat| { let lvalue = lvalue.clone().field(fieldpat.field, - fieldpat.field_ty()); + fieldpat.pattern.ty); MatchPair::new(lvalue, &fieldpat.pattern) }) .collect() @@ -118,6 +118,7 @@ impl<'pat, 'tcx> MatchPair<'pat, 'tcx> { MatchPair { lvalue: lvalue, pattern: pattern, + slice_len_checked: false, } } } diff --git a/src/librustc_mir/build/mod.rs b/src/librustc_mir/build/mod.rs index 5d9f827984e0e..b40775f939f75 100644 --- a/src/librustc_mir/build/mod.rs +++ b/src/librustc_mir/build/mod.rs @@ -141,15 +141,18 @@ impl<'a,'tcx> Builder<'a,'tcx> { .chain(explicits) .enumerate() .map(|(index, (ty, pattern))| { + let lvalue = Lvalue::Arg(index as u32); if let Some(pattern) = pattern { - let lvalue = Lvalue::Arg(index as u32); let pattern = this.hir.irrefutable_pat(pattern); unpack!(block = this.lvalue_into_pattern(block, argument_extent, pattern, &lvalue)); } - ArgDecl { ty: ty } + // Make sure we drop (parts of) the argument even when not matched on. + this.schedule_drop(pattern.as_ref().map_or(ast_block.span, |pat| pat.span), + argument_extent, &lvalue, ty); + ArgDecl { ty: ty, spread: false } }) .collect(); diff --git a/src/librustc_mir/hair/cx/expr.rs b/src/librustc_mir/hair/cx/expr.rs index 1f8a5da9c1b9d..4d87d926e4055 100644 --- a/src/librustc_mir/hair/cx/expr.rs +++ b/src/librustc_mir/hair/cx/expr.rs @@ -337,8 +337,12 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { convert_path_expr(cx, self) } - hir::ExprInlineAsm(ref asm) => { - ExprKind::InlineAsm { asm: asm } + hir::ExprInlineAsm(ref asm, ref outputs, ref inputs) => { + ExprKind::InlineAsm { + asm: asm, + outputs: outputs.to_ref(), + inputs: inputs.to_ref() + } } // Now comes the rote stuff: @@ -668,11 +672,16 @@ fn convert_path_expr<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, expr: &'tcx hir::Expr) }, Def::Const(def_id) | Def::AssociatedConst(def_id) => { - if let Some(v) = cx.try_const_eval_literal(expr) { - return ExprKind::Literal { literal: v }; - } else { - def_id + let substs = Some(cx.tcx.node_id_item_substs(expr.id).substs); + if let Some((e, _)) = const_eval::lookup_const_by_id(cx.tcx, def_id, substs) { + // FIXME ConstVal can't be yet used with adjustments, as they would be lost. + if !cx.tcx.tables.borrow().adjustments.contains_key(&e.id) { + if let Some(v) = cx.try_const_eval_literal(e) { + return ExprKind::Literal { literal: v }; + } + } } + def_id } Def::Static(node_id, _) => return ExprKind::StaticRef { diff --git a/src/librustc_mir/hair/cx/mod.rs b/src/librustc_mir/hair/cx/mod.rs index fd4cf7c04734a..b97bfaf5aef68 100644 --- a/src/librustc_mir/hair/cx/mod.rs +++ b/src/librustc_mir/hair/cx/mod.rs @@ -19,7 +19,9 @@ use hair::*; use rustc::mir::repr::*; use rustc::middle::const_eval::{self, ConstVal}; +use rustc::middle::def_id::DefId; use rustc::middle::infer::InferCtxt; +use rustc::middle::subst::{Subst, Substs}; use rustc::middle::ty::{self, Ty, TyCtxt}; use syntax::codemap::Span; use syntax::parse::token; @@ -84,9 +86,44 @@ impl<'a,'tcx:'a> Cx<'a, 'tcx> { pub fn try_const_eval_literal(&mut self, e: &hir::Expr) -> Option> { let hint = const_eval::EvalHint::ExprTypeChecked; - const_eval::eval_const_expr_partial(self.tcx, e, hint, None) - .ok() - .map(|v| Literal::Value { value: v }) + const_eval::eval_const_expr_partial(self.tcx, e, hint, None).ok().and_then(|v| { + match v { + // All of these contain local IDs, unsuitable for storing in MIR. + ConstVal::Struct(_) | ConstVal::Tuple(_) | + ConstVal::Array(..) | ConstVal::Repeat(..) | + ConstVal::Function(_) => None, + + _ => Some(Literal::Value { value: v }) + } + }) + } + + pub fn trait_method(&mut self, + trait_def_id: DefId, + method_name: &str, + self_ty: Ty<'tcx>, + params: Vec>) + -> (Ty<'tcx>, Literal<'tcx>) { + let method_name = token::intern(method_name); + let substs = Substs::new_trait(params, vec![], self_ty); + for trait_item in self.tcx.trait_items(trait_def_id).iter() { + match *trait_item { + ty::ImplOrTraitItem::MethodTraitItem(ref method) => { + if method.name == method_name { + let method_ty = self.tcx.lookup_item_type(method.def_id); + let method_ty = method_ty.ty.subst(self.tcx, &substs); + return (method_ty, Literal::Item { + def_id: method.def_id, + substs: self.tcx.mk_substs(substs), + }); + } + } + ty::ImplOrTraitItem::ConstTraitItem(..) | + ty::ImplOrTraitItem::TypeTraitItem(..) => {} + } + } + + self.tcx.sess.bug(&format!("found no method `{}` in `{:?}`", method_name, trait_def_id)); } pub fn num_variants(&mut self, adt_def: ty::AdtDef<'tcx>) -> usize { diff --git a/src/librustc_mir/hair/cx/pattern.rs b/src/librustc_mir/hair/cx/pattern.rs index d1e3f08aff894..a987377837417 100644 --- a/src/librustc_mir/hair/cx/pattern.rs +++ b/src/librustc_mir/hair/cx/pattern.rs @@ -63,6 +63,8 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> { } fn to_pattern(&mut self, pat: &hir::Pat) -> Pattern<'tcx> { + let mut ty = self.cx.tcx.node_id_to_type(pat.id); + let kind = match pat.node { PatKind::Wild => PatternKind::Wild, @@ -84,9 +86,9 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> { { let def = self.cx.tcx.def_map.borrow().get(&pat.id).unwrap().full_def(); match def { - Def::Const(def_id) | Def::AssociatedConst(def_id) => - match const_eval::lookup_const_by_id(self.cx.tcx, def_id, - Some(pat.id), None) { + Def::Const(def_id) | Def::AssociatedConst(def_id) => { + let substs = Some(self.cx.tcx.node_id_item_substs(pat.id).substs); + match const_eval::lookup_const_by_id(self.cx.tcx, def_id, substs) { Some((const_expr, _const_ty)) => { let pat = const_eval::const_expr_to_pat(self.cx.tcx, const_expr, pat.span); @@ -97,7 +99,8 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> { pat.span, &format!("cannot eval constant: {:?}", def_id)) } - }, + } + } _ => self.cx.tcx.sess.span_bug( pat.span, @@ -169,6 +172,17 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> { hir::BindByRef(hir::MutImmutable) => (Mutability::Not, BindingMode::ByRef(region.unwrap(), BorrowKind::Shared)), }; + + // A ref x pattern is the same node used for x, and as such it has + // x's type, which is &T, where we want T (the type being matched). + if let hir::BindByRef(_) = bm { + if let ty::TyRef(_, mt) = ty.sty { + ty = mt.ty; + } else { + unreachable!("`ref {}` has wrong type {}", ident.node, ty); + } + } + PatternKind::Binding { mutability: mutability, mode: mode, @@ -234,8 +248,6 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> { } }; - let ty = self.cx.tcx.node_id_to_type(pat.id); - Pattern { span: pat.span, ty: ty, @@ -314,20 +326,3 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> { } } } - -impl<'tcx> FieldPattern<'tcx> { - pub fn field_ty(&self) -> Ty<'tcx> { - debug!("field_ty({:?},ty={:?})", self, self.pattern.ty); - let r = match *self.pattern.kind { - PatternKind::Binding { mode: BindingMode::ByRef(..), ..} => { - match self.pattern.ty.sty { - ty::TyRef(_, mt) => mt.ty, - _ => unreachable!() - } - } - _ => self.pattern.ty - }; - debug!("field_ty -> {:?}", r); - r - } -} diff --git a/src/librustc_mir/hair/mod.rs b/src/librustc_mir/hair/mod.rs index 6a22dce7af9d2..78c438df6f87a 100644 --- a/src/librustc_mir/hair/mod.rs +++ b/src/librustc_mir/hair/mod.rs @@ -230,6 +230,8 @@ pub enum ExprKind<'tcx> { }, InlineAsm { asm: &'tcx hir::InlineAsm, + outputs: Vec>, + inputs: Vec> }, } diff --git a/src/librustc_mir/mir_map.rs b/src/librustc_mir/mir_map.rs index 13521de78af28..4717c54ca6491 100644 --- a/src/librustc_mir/mir_map.rs +++ b/src/librustc_mir/mir_map.rs @@ -33,6 +33,7 @@ use rustc::util::common::ErrorReported; use rustc::util::nodemap::NodeMap; use rustc_front::hir; use rustc_front::intravisit::{self, Visitor}; +use syntax::abi::Abi; use syntax::ast; use syntax::attr::AttrMetaMethods; use syntax::codemap::Span; @@ -181,13 +182,20 @@ fn build_mir<'a,'tcx:'a>(cx: Cx<'a,'tcx>, let parameter_scope = cx.tcx().region_maps.lookup_code_extent( CodeExtentData::ParameterScope { fn_id: fn_id, body_id: body.id }); - Ok(build::construct(cx, - span, - implicit_arg_tys, - arguments, - parameter_scope, - fn_sig.output, - body)) + let mut mir = build::construct(cx, span, implicit_arg_tys, arguments, + parameter_scope, fn_sig.output, body); + + match cx.tcx().node_id_to_type(fn_id).sty { + ty::TyFnDef(_, _, f) if f.abi == Abi::RustCall => { + // RustCall pseudo-ABI untuples the last argument. + if let Some(arg_decl) = mir.arg_decls.last_mut() { + arg_decl.spread = true; + } + } + _ => {} + } + + Ok(mir) } fn closure_self_ty<'a, 'tcx>(tcx: &TyCtxt<'tcx>, diff --git a/src/librustc_mir/transform/erase_regions.rs b/src/librustc_mir/transform/erase_regions.rs index 1920bd552ec3e..9ac3749589e90 100644 --- a/src/librustc_mir/transform/erase_regions.rs +++ b/src/librustc_mir/transform/erase_regions.rs @@ -82,7 +82,7 @@ impl<'a, 'tcx> MutVisitor<'tcx> for EraseRegionsVisitor<'a, 'tcx> { Rvalue::BinaryOp(_, _, _) | Rvalue::UnaryOp(_, _) | Rvalue::Slice { input: _, from_start: _, from_end: _ } | - Rvalue::InlineAsm(_) => {}, + Rvalue::InlineAsm {..} => {}, Rvalue::Repeat(_, ref mut value) => value.ty = self.tcx.erase_regions(&value.ty), Rvalue::Ref(ref mut region, _, _) => *region = ty::ReStatic, diff --git a/src/librustc_passes/consts.rs b/src/librustc_passes/consts.rs index 6be7f6c200247..c964179d4076a 100644 --- a/src/librustc_passes/consts.rs +++ b/src/librustc_passes/consts.rs @@ -610,9 +610,8 @@ fn check_expr<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>, } Some(Def::Const(did)) | Some(Def::AssociatedConst(did)) => { - if let Some((expr, _ty)) = const_eval::lookup_const_by_id(v.tcx, did, - Some(e.id), - None) { + let substs = Some(v.tcx.node_id_item_substs(e.id).substs); + if let Some((expr, _)) = const_eval::lookup_const_by_id(v.tcx, did, substs) { let inner = v.global_expr(Mode::Const, expr); v.add_qualif(inner); } @@ -756,7 +755,7 @@ fn check_expr<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>, // Expressions with side-effects. hir::ExprAssign(..) | hir::ExprAssignOp(..) | - hir::ExprInlineAsm(_) => { + hir::ExprInlineAsm(..) => { v.add_qualif(ConstQualif::NOT_CONST); if v.mode != Mode::Var { span_err!(v.tcx.sess, e.span, E0019, diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index d7e79e46720b3..c8ce09b4d7975 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -61,7 +61,6 @@ pub use rustc::lint; pub use rustc::util; pub mod back { - pub use rustc_back::abi; pub use rustc_back::rpath; pub use rustc_back::svh; diff --git a/src/librustc_trans/trans/_match.rs b/src/librustc_trans/trans/_match.rs index f5fbec0b1879e..8aea2f1ec4f4a 100644 --- a/src/librustc_trans/trans/_match.rs +++ b/src/librustc_trans/trans/_match.rs @@ -200,12 +200,13 @@ use middle::lang_items::StrEqFnLangItem; use middle::mem_categorization as mc; use middle::mem_categorization::Categorization; use middle::pat_util::*; +use middle::subst::Substs; use trans::adt; use trans::base::*; use trans::build::{AddCase, And, Br, CondBr, GEPi, InBoundsGEP, Load, PointerCast}; use trans::build::{Not, Store, Sub, add_comment}; use trans::build; -use trans::callee; +use trans::callee::{Callee, ArgVals}; use trans::cleanup::{self, CleanupMethods, DropHintMethods}; use trans::common::*; use trans::consts; @@ -216,6 +217,7 @@ use trans::monomorphize; use trans::tvec; use trans::type_of; use trans::Disr; +use trans::value::Value; use middle::ty::{self, Ty, TyCtxt}; use middle::traits::ProjectionMode; use session::config::NoDebugInfo; @@ -448,6 +450,12 @@ impl<'tcx> Datum<'tcx, Lvalue> { } } +impl fmt::Debug for MatchInput { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&Value(self.val), f) + } +} + impl MatchInput { fn from_val(val: ValueRef) -> MatchInput { MatchInput { @@ -466,11 +474,8 @@ fn expand_nested_bindings<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, col: usize, val: MatchInput) -> Vec> { - debug!("expand_nested_bindings(bcx={}, m={:?}, col={}, val={})", - bcx.to_str(), - m, - col, - bcx.val_to_string(val.val)); + debug!("expand_nested_bindings(bcx={}, m={:?}, col={}, val={:?})", + bcx.to_str(), m, col, val); let _indenter = indenter(); m.iter().map(|br| { @@ -506,11 +511,8 @@ fn enter_match<'a, 'b, 'p, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, -> Vec> where F: FnMut(&[&'p hir::Pat]) -> Option>, { - debug!("enter_match(bcx={}, m={:?}, col={}, val={})", - bcx.to_str(), - m, - col, - bcx.val_to_string(val.val)); + debug!("enter_match(bcx={}, m={:?}, col={}, val={:?})", + bcx.to_str(), m, col, val); let _indenter = indenter(); m.iter().filter_map(|br| { @@ -549,11 +551,8 @@ fn enter_default<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, col: usize, val: MatchInput) -> Vec> { - debug!("enter_default(bcx={}, m={:?}, col={}, val={})", - bcx.to_str(), - m, - col, - bcx.val_to_string(val.val)); + debug!("enter_default(bcx={}, m={:?}, col={}, val={:?})", + bcx.to_str(), m, col, val); let _indenter = indenter(); // Collect all of the matches that can match against anything. @@ -606,12 +605,8 @@ fn enter_opt<'a, 'p, 'blk, 'tcx>( variant_size: usize, val: MatchInput) -> Vec> { - debug!("enter_opt(bcx={}, m={:?}, opt={:?}, col={}, val={})", - bcx.to_str(), - m, - *opt, - col, - bcx.val_to_string(val.val)); + debug!("enter_opt(bcx={}, m={:?}, opt={:?}, col={}, val={:?})", + bcx.to_str(), m, *opt, col, val); let _indenter = indenter(); let ctor = match opt { @@ -887,7 +882,7 @@ fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>, rhs_t: Ty<'tcx>, debug_loc: DebugLoc) -> Result<'blk, 'tcx> { - fn compare_str<'blk, 'tcx>(cx: Block<'blk, 'tcx>, + fn compare_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, lhs_data: ValueRef, lhs_len: ValueRef, rhs_data: ValueRef, @@ -895,11 +890,13 @@ fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>, rhs_t: Ty<'tcx>, debug_loc: DebugLoc) -> Result<'blk, 'tcx> { - let did = langcall(cx, + let did = langcall(bcx, None, &format!("comparison of `{}`", rhs_t), StrEqFnLangItem); - callee::trans_lang_call(cx, did, &[lhs_data, lhs_len, rhs_data, rhs_len], None, debug_loc) + let args = [lhs_data, lhs_len, rhs_data, rhs_len]; + Callee::def(bcx.ccx(), did, bcx.tcx().mk_substs(Substs::empty())) + .call(bcx, debug_loc, ArgVals(&args), None) } let _icx = push_ctxt("compare_values"); @@ -1032,7 +1029,7 @@ fn insert_lllocals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, bcx.fcx.schedule_drop_and_fill_mem(cs, llval, binding_info.ty, opt_datum); } - debug!("binding {} to {}", binding_info.id, bcx.val_to_string(llval)); + debug!("binding {} to {:?}", binding_info.id, Value(llval)); bcx.fcx.lllocals.borrow_mut().insert(binding_info.id, datum); debuginfo::create_match_binding_metadata(bcx, name, binding_info); } @@ -1047,11 +1044,8 @@ fn compile_guard<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, chk: &FailureHandler, has_genuine_default: bool) -> Block<'blk, 'tcx> { - debug!("compile_guard(bcx={}, guard_expr={:?}, m={:?}, vals=[{}])", - bcx.to_str(), - guard_expr, - m, - vals.iter().map(|v| bcx.val_to_string(v.val)).collect::>().join(", ")); + debug!("compile_guard(bcx={}, guard_expr={:?}, m={:?}, vals={:?})", + bcx.to_str(), guard_expr, m, vals); let _indenter = indenter(); let mut bcx = insert_lllocals(bcx, &data.bindings_map, None); @@ -1093,10 +1087,8 @@ fn compile_submatch<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, vals: &[MatchInput], chk: &FailureHandler, has_genuine_default: bool) { - debug!("compile_submatch(bcx={}, m={:?}, vals=[{}])", - bcx.to_str(), - m, - vals.iter().map(|v| bcx.val_to_string(v.val)).collect::>().join(", ")); + debug!("compile_submatch(bcx={}, m={:?}, vals=[{:?}])", + bcx.to_str(), m, vals); let _indenter = indenter(); let _icx = push_ctxt("match::compile_submatch"); let mut bcx = bcx; @@ -1256,7 +1248,7 @@ fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, debug!("options={:?}", opts); let mut kind = NoBranch; let mut test_val = val.val; - debug!("test_val={}", bcx.val_to_string(test_val)); + debug!("test_val={:?}", Value(test_val)); if !opts.is_empty() { match opts[0] { ConstantValue(..) | ConstantRange(..) => { @@ -1761,8 +1753,8 @@ fn mk_binding_alloca<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>, let lvalue = Lvalue::new_with_hint(caller_name, bcx, p_id, HintKind::DontZeroJustUse); let datum = Datum::new(llval, var_ty, lvalue); - debug!("mk_binding_alloca cleanup_scope={:?} llval={} var_ty={:?}", - cleanup_scope, bcx.ccx().tn().val_to_string(llval), var_ty); + debug!("mk_binding_alloca cleanup_scope={:?} llval={:?} var_ty={:?}", + cleanup_scope, Value(llval), var_ty); // Subtle: be sure that we *populate* the memory *before* // we schedule the cleanup. @@ -1794,10 +1786,8 @@ pub fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, val: MatchInput, cleanup_scope: cleanup::ScopeId) -> Block<'blk, 'tcx> { - debug!("bind_irrefutable_pat(bcx={}, pat={:?}, val={})", - bcx.to_str(), - pat, - bcx.val_to_string(val.val)); + debug!("bind_irrefutable_pat(bcx={}, pat={:?}, val={:?})", + bcx.to_str(), pat, val); if bcx.sess().asm_comments() { add_comment(bcx, &format!("bind_irrefutable_pat(pat={:?})", @@ -1923,7 +1913,7 @@ pub fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // a regular one if !type_is_sized(tcx, fty) { let scratch = alloc_ty(bcx, fty, "__struct_field_fat_ptr"); - debug!("Creating fat pointer {}", bcx.val_to_string(scratch)); + debug!("Creating fat pointer {:?}", Value(scratch)); Store(bcx, fldptr, expr::get_dataptr(bcx, scratch)); Store(bcx, val.meta, expr::get_meta(bcx, scratch)); fldptr = scratch; diff --git a/src/librustc_trans/trans/abi.rs b/src/librustc_trans/trans/abi.rs new file mode 100644 index 0000000000000..39ecae4175080 --- /dev/null +++ b/src/librustc_trans/trans/abi.rs @@ -0,0 +1,525 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use llvm::{self, ValueRef}; +use trans::base; +use trans::builder::Builder; +use trans::common::{type_is_fat_ptr, BlockAndBuilder}; +use trans::context::CrateContext; +use trans::cabi_x86; +use trans::cabi_x86_64; +use trans::cabi_x86_win64; +use trans::cabi_arm; +use trans::cabi_aarch64; +use trans::cabi_powerpc; +use trans::cabi_powerpc64; +use trans::cabi_mips; +use trans::cabi_asmjs; +use trans::machine::{llalign_of_min, llsize_of, llsize_of_real}; +use trans::type_::Type; +use trans::type_of; + +use rustc_front::hir; +use middle::ty::{self, Ty}; + +use libc::c_uint; + +pub use syntax::abi::Abi; + +/// The first half of a fat pointer. +/// - For a closure, this is the code address. +/// - For an object or trait instance, this is the address of the box. +/// - For a slice, this is the base address. +pub const FAT_PTR_ADDR: usize = 0; + +/// The second half of a fat pointer. +/// - For a closure, this is the address of the environment. +/// - For an object or trait instance, this is the address of the vtable. +/// - For a slice, this is the length. +pub const FAT_PTR_EXTRA: usize = 1; + +#[derive(Clone, Copy, PartialEq, Debug)] +enum ArgKind { + /// Pass the argument directly using the normal converted + /// LLVM type or by coercing to another specified type + Direct, + /// Pass the argument indirectly via a hidden pointer + Indirect, + /// Ignore the argument (useful for empty struct) + Ignore, +} + +/// Information about how a specific C type +/// should be passed to or returned from a function +/// +/// This is borrowed from clang's ABIInfo.h +#[derive(Clone, Copy, Debug)] +pub struct ArgType { + kind: ArgKind, + /// Original LLVM type + pub original_ty: Type, + /// Sizing LLVM type (pointers are opaque). + /// Unlike original_ty, this is guaranteed to be complete. + /// + /// For example, while we're computing the function pointer type in + /// `struct Foo(fn(Foo));`, `original_ty` is still LLVM's `%Foo = {}`. + /// The field type will likely end up being `void(%Foo)*`, but we cannot + /// use `%Foo` to compute properties (e.g. size and alignment) of `Foo`, + /// until `%Foo` is completed by having all of its field types inserted, + /// so `ty` holds the "sizing type" of `Foo`, which replaces all pointers + /// with opaque ones, resulting in `{i8*}` for `Foo`. + /// ABI-specific logic can then look at the size, alignment and fields of + /// `{i8*}` in order to determine how the argument will be passed. + /// Only later will `original_ty` aka `%Foo` be used in the LLVM function + /// pointer type, without ever having introspected it. + pub ty: Type, + /// Coerced LLVM Type + pub cast: Option, + /// Dummy argument, which is emitted before the real argument + pub pad: Option, + /// LLVM attributes of argument + pub attrs: llvm::Attributes +} + +impl ArgType { + fn new(original_ty: Type, ty: Type) -> ArgType { + ArgType { + kind: ArgKind::Direct, + original_ty: original_ty, + ty: ty, + cast: None, + pad: None, + attrs: llvm::Attributes::default() + } + } + + pub fn make_indirect(&mut self, ccx: &CrateContext) { + assert_eq!(self.kind, ArgKind::Direct); + + // Wipe old attributes, likely not valid through indirection. + self.attrs = llvm::Attributes::default(); + + let llarg_sz = llsize_of_real(ccx, self.ty); + + // For non-immediate arguments the callee gets its own copy of + // the value on the stack, so there are no aliases. It's also + // program-invisible so can't possibly capture + self.attrs.set(llvm::Attribute::NoAlias) + .set(llvm::Attribute::NoCapture) + .set_dereferenceable(llarg_sz); + + self.kind = ArgKind::Indirect; + } + + pub fn ignore(&mut self) { + assert_eq!(self.kind, ArgKind::Direct); + self.kind = ArgKind::Ignore; + } + + pub fn is_indirect(&self) -> bool { + self.kind == ArgKind::Indirect + } + + pub fn is_ignore(&self) -> bool { + self.kind == ArgKind::Ignore + } + + /// Get the LLVM type for an lvalue of the original Rust type of + /// this argument/return, i.e. the result of `type_of::type_of`. + pub fn memory_ty(&self, ccx: &CrateContext) -> Type { + if self.original_ty == Type::i1(ccx) { + Type::i8(ccx) + } else { + self.original_ty + } + } + + /// Store a direct/indirect value described by this ArgType into a + /// lvalue for the original Rust type of this argument/return. + /// Can be used for both storing formal arguments into Rust variables + /// or results of call/invoke instructions into their destinations. + pub fn store(&self, b: &Builder, mut val: ValueRef, dst: ValueRef) { + if self.is_ignore() { + return; + } + if self.is_indirect() { + let llsz = llsize_of(b.ccx, self.ty); + let llalign = llalign_of_min(b.ccx, self.ty); + base::call_memcpy(b, dst, val, llsz, llalign as u32); + } else if let Some(ty) = self.cast { + let cast_dst = b.pointercast(dst, ty.ptr_to()); + let store = b.store(val, cast_dst); + let llalign = llalign_of_min(b.ccx, self.ty); + unsafe { + llvm::LLVMSetAlignment(store, llalign); + } + } else { + if self.original_ty == Type::i1(b.ccx) { + val = b.zext(val, Type::i8(b.ccx)); + } + b.store(val, dst); + } + } + + pub fn store_fn_arg(&self, bcx: &BlockAndBuilder, idx: &mut usize, dst: ValueRef) { + if self.pad.is_some() { + *idx += 1; + } + if self.is_ignore() { + return; + } + let val = llvm::get_param(bcx.fcx().llfn, *idx as c_uint); + *idx += 1; + self.store(bcx, val, dst); + } +} + +/// Metadata describing how the arguments to a native function +/// should be passed in order to respect the native ABI. +/// +/// I will do my best to describe this structure, but these +/// comments are reverse-engineered and may be inaccurate. -NDM +pub struct FnType { + /// The LLVM types of each argument. + pub args: Vec, + + /// LLVM return type. + pub ret: ArgType, + + pub variadic: bool, + + pub cconv: llvm::CallConv +} + +impl FnType { + pub fn new<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + abi: Abi, + sig: &ty::FnSig<'tcx>, + extra_args: &[Ty<'tcx>]) -> FnType { + let mut fn_ty = FnType::unadjusted(ccx, abi, sig, extra_args); + fn_ty.adjust_for_abi(ccx, abi, sig); + fn_ty + } + + pub fn unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + abi: Abi, + sig: &ty::FnSig<'tcx>, + extra_args: &[Ty<'tcx>]) -> FnType { + use self::Abi::*; + let cconv = match ccx.sess().target.target.adjust_abi(abi) { + RustIntrinsic | PlatformIntrinsic | + Rust | RustCall => llvm::CCallConv, + + // It's the ABI's job to select this, not us. + System => ccx.sess().bug("system abi should be selected elsewhere"), + + Stdcall => llvm::X86StdcallCallConv, + Fastcall => llvm::X86FastcallCallConv, + Vectorcall => llvm::X86_VectorCall, + C => llvm::CCallConv, + Win64 => llvm::X86_64_Win64, + + // These API constants ought to be more specific... + Cdecl => llvm::CCallConv, + Aapcs => llvm::CCallConv, + }; + + let mut inputs = &sig.inputs[..]; + let extra_args = if abi == RustCall { + assert!(!sig.variadic && extra_args.is_empty()); + + match inputs[inputs.len() - 1].sty { + ty::TyTuple(ref tupled_arguments) => { + inputs = &inputs[..inputs.len() - 1]; + &tupled_arguments[..] + } + _ => { + unreachable!("argument to function with \"rust-call\" ABI \ + is not a tuple"); + } + } + } else { + assert!(sig.variadic || extra_args.is_empty()); + extra_args + }; + + let arg_of = |ty: Ty<'tcx>| { + if ty.is_bool() { + let llty = Type::i1(ccx); + let mut arg = ArgType::new(llty, llty); + arg.attrs.set(llvm::Attribute::ZExt); + arg + } else { + let mut arg = ArgType::new(type_of::type_of(ccx, ty), + type_of::sizing_type_of(ccx, ty)); + if llsize_of_real(ccx, arg.ty) == 0 { + arg.ignore(); + } + arg + } + }; + + let ret_ty = match sig.output { + ty::FnConverging(ret_ty) => ret_ty, + ty::FnDiverging => ccx.tcx().mk_nil() + }; + let mut ret = arg_of(ret_ty); + + if !type_is_fat_ptr(ccx.tcx(), ret_ty) { + // The `noalias` attribute on the return value is useful to a + // function ptr caller. + if let ty::TyBox(_) = ret_ty.sty { + // `Box` pointer return values never alias because ownership + // is transferred + ret.attrs.set(llvm::Attribute::NoAlias); + } + + // We can also mark the return value as `dereferenceable` in certain cases + match ret_ty.sty { + // These are not really pointers but pairs, (pointer, len) + ty::TyRef(_, ty::TypeAndMut { ty, .. }) | + ty::TyBox(ty) => { + let llty = type_of::sizing_type_of(ccx, ty); + let llsz = llsize_of_real(ccx, llty); + ret.attrs.set_dereferenceable(llsz); + } + _ => {} + } + } + + let mut args = Vec::with_capacity(inputs.len() + extra_args.len()); + + // Handle safe Rust thin and fat pointers. + let rust_ptr_attrs = |ty: Ty<'tcx>, arg: &mut ArgType| match ty.sty { + // `Box` pointer parameters never alias because ownership is transferred + ty::TyBox(inner) => { + arg.attrs.set(llvm::Attribute::NoAlias); + Some(inner) + } + + ty::TyRef(b, mt) => { + use middle::ty::{BrAnon, ReLateBound}; + + // `&mut` pointer parameters never alias other parameters, or mutable global data + // + // `&T` where `T` contains no `UnsafeCell` is immutable, and can be marked as + // both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely + // on memory dependencies rather than pointer equality + let interior_unsafe = mt.ty.type_contents(ccx.tcx()).interior_unsafe(); + + if mt.mutbl != hir::MutMutable && !interior_unsafe { + arg.attrs.set(llvm::Attribute::NoAlias); + } + + if mt.mutbl == hir::MutImmutable && !interior_unsafe { + arg.attrs.set(llvm::Attribute::ReadOnly); + } + + // When a reference in an argument has no named lifetime, it's + // impossible for that reference to escape this function + // (returned or stored beyond the call by a closure). + if let ReLateBound(_, BrAnon(_)) = *b { + arg.attrs.set(llvm::Attribute::NoCapture); + } + + Some(mt.ty) + } + _ => None + }; + + for ty in inputs.iter().chain(extra_args.iter()) { + let mut arg = arg_of(ty); + + if type_is_fat_ptr(ccx.tcx(), ty) { + let original_tys = arg.original_ty.field_types(); + let sizing_tys = arg.ty.field_types(); + assert_eq!((original_tys.len(), sizing_tys.len()), (2, 2)); + + let mut data = ArgType::new(original_tys[0], sizing_tys[0]); + let mut info = ArgType::new(original_tys[1], sizing_tys[1]); + + if let Some(inner) = rust_ptr_attrs(ty, &mut data) { + data.attrs.set(llvm::Attribute::NonNull); + if ccx.tcx().struct_tail(inner).is_trait() { + info.attrs.set(llvm::Attribute::NonNull); + } + } + args.push(data); + args.push(info); + } else { + if let Some(inner) = rust_ptr_attrs(ty, &mut arg) { + let llty = type_of::sizing_type_of(ccx, inner); + let llsz = llsize_of_real(ccx, llty); + arg.attrs.set_dereferenceable(llsz); + } + args.push(arg); + } + } + + FnType { + args: args, + ret: ret, + variadic: sig.variadic, + cconv: cconv + } + } + + pub fn adjust_for_abi<'a, 'tcx>(&mut self, + ccx: &CrateContext<'a, 'tcx>, + abi: Abi, + sig: &ty::FnSig<'tcx>) { + if abi == Abi::Rust || abi == Abi::RustCall || + abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic { + let fixup = |arg: &mut ArgType| { + let mut llty = arg.ty; + + // Replace newtypes with their inner-most type. + while llty.kind() == llvm::TypeKind::Struct { + let inner = llty.field_types(); + if inner.len() != 1 { + break; + } + llty = inner[0]; + } + + if !llty.is_aggregate() { + // Scalars and vectors, always immediate. + if llty != arg.ty { + // Needs a cast as we've unpacked a newtype. + arg.cast = Some(llty); + } + return; + } + + let size = llsize_of_real(ccx, llty); + if size > llsize_of_real(ccx, ccx.int_type()) { + arg.make_indirect(ccx); + } else if size > 0 { + // We want to pass small aggregates as immediates, but using + // a LLVM aggregate type for this leads to bad optimizations, + // so we pick an appropriately sized integer type instead. + arg.cast = Some(Type::ix(ccx, size * 8)); + } + }; + // Fat pointers are returned by-value. + if !self.ret.is_ignore() { + if !type_is_fat_ptr(ccx.tcx(), sig.output.unwrap()) { + fixup(&mut self.ret); + } + } + for arg in &mut self.args { + if arg.is_ignore() { continue; } + fixup(arg); + } + if self.ret.is_indirect() { + self.ret.attrs.set(llvm::Attribute::StructRet); + } + return; + } + + match &ccx.sess().target.target.arch[..] { + "x86" => cabi_x86::compute_abi_info(ccx, self), + "x86_64" => if ccx.sess().target.target.options.is_like_windows { + cabi_x86_win64::compute_abi_info(ccx, self); + } else { + cabi_x86_64::compute_abi_info(ccx, self); + }, + "aarch64" => cabi_aarch64::compute_abi_info(ccx, self), + "arm" => { + let flavor = if ccx.sess().target.target.target_os == "ios" { + cabi_arm::Flavor::Ios + } else { + cabi_arm::Flavor::General + }; + cabi_arm::compute_abi_info(ccx, self, flavor); + }, + "mips" => cabi_mips::compute_abi_info(ccx, self), + "powerpc" => cabi_powerpc::compute_abi_info(ccx, self), + "powerpc64" => cabi_powerpc64::compute_abi_info(ccx, self), + "asmjs" => cabi_asmjs::compute_abi_info(ccx, self), + a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a)) + } + + if self.ret.is_indirect() { + self.ret.attrs.set(llvm::Attribute::StructRet); + } + } + + pub fn llvm_type(&self, ccx: &CrateContext) -> Type { + let mut llargument_tys = Vec::new(); + + let llreturn_ty = if self.ret.is_ignore() { + Type::void(ccx) + } else if self.ret.is_indirect() { + llargument_tys.push(self.ret.original_ty.ptr_to()); + Type::void(ccx) + } else { + self.ret.cast.unwrap_or(self.ret.original_ty) + }; + + for arg in &self.args { + if arg.is_ignore() { + continue; + } + // add padding + if let Some(ty) = arg.pad { + llargument_tys.push(ty); + } + + let llarg_ty = if arg.is_indirect() { + arg.original_ty.ptr_to() + } else { + arg.cast.unwrap_or(arg.original_ty) + }; + + llargument_tys.push(llarg_ty); + } + + if self.variadic { + Type::variadic_func(&llargument_tys, &llreturn_ty) + } else { + Type::func(&llargument_tys, &llreturn_ty) + } + } + + pub fn apply_attrs_llfn(&self, llfn: ValueRef) { + let mut i = if self.ret.is_indirect() { 1 } else { 0 }; + if !self.ret.is_ignore() { + self.ret.attrs.apply_llfn(i, llfn); + } + i += 1; + for arg in &self.args { + if !arg.is_ignore() { + if arg.pad.is_some() { i += 1; } + arg.attrs.apply_llfn(i, llfn); + i += 1; + } + } + } + + pub fn apply_attrs_callsite(&self, callsite: ValueRef) { + let mut i = if self.ret.is_indirect() { 1 } else { 0 }; + if !self.ret.is_ignore() { + self.ret.attrs.apply_callsite(i, callsite); + } + i += 1; + for arg in &self.args { + if !arg.is_ignore() { + if arg.pad.is_some() { i += 1; } + arg.attrs.apply_callsite(i, callsite); + i += 1; + } + } + + if self.cconv != llvm::CCallConv { + llvm::SetInstructionCallConv(callsite, self.cconv); + } + } +} diff --git a/src/librustc_trans/trans/adt.rs b/src/librustc_trans/trans/adt.rs index 320bb1eab3c63..45a1236647e40 100644 --- a/src/librustc_trans/trans/adt.rs +++ b/src/librustc_trans/trans/adt.rs @@ -48,13 +48,13 @@ use std; use std::rc::Rc; use llvm::{ValueRef, True, IntEQ, IntNE}; -use back::abi::FAT_PTR_ADDR; use middle::subst; use middle::ty::{self, Ty, TyCtxt}; use syntax::ast; use syntax::attr; use syntax::attr::IntType; use trans::_match; +use trans::abi::FAT_PTR_ADDR; use trans::base::InitAlloca; use trans::build::*; use trans::cleanup; @@ -67,6 +67,7 @@ use trans::machine; use trans::monomorphize; use trans::type_::Type; use trans::type_of; +use trans::value::Value; type Hint = attr::ReprAttr; @@ -88,11 +89,6 @@ impl TypeContext { fn may_need_drop_flag(t: Type, needs_drop_flag: bool) -> TypeContext { TypeContext { prefix: t, needs_drop_flag: needs_drop_flag } } - pub fn to_string(self) -> String { - let TypeContext { prefix, needs_drop_flag } = self; - format!("TypeContext {{ prefix: {}, needs_drop_flag: {} }}", - prefix.to_string(), needs_drop_flag) - } } /// Representations. @@ -1069,6 +1065,15 @@ pub fn num_args(r: &Repr, discr: Disr) -> usize { /// Access a field, at a point when the value's case is known. pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef { + trans_field_ptr_builder(&bcx.build(), r, val, discr, ix) +} + +/// Access a field, at a point when the value's case is known. +pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, + r: &Repr<'tcx>, + val: MaybeSizedValue, + discr: Disr, ix: usize) + -> ValueRef { // Note: if this ever needs to generate conditionals (e.g., if we // decide to do some kind of cdr-coding-like non-unique repr // someday), it will need to return a possibly-new bcx as well. @@ -1091,13 +1096,15 @@ pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, assert_eq!(machine::llsize_of_alloc(bcx.ccx(), ty), 0); // The contents of memory at this pointer can't matter, but use // the value that's "reasonable" in case of pointer comparison. - PointerCast(bcx, val.value, ty.ptr_to()) + if bcx.is_unreachable() { return C_undef(ty.ptr_to()); } + bcx.pointercast(val.value, ty.ptr_to()) } RawNullablePointer { nndiscr, nnty, .. } => { assert_eq!(ix, 0); assert_eq!(discr, nndiscr); let ty = type_of::type_of(bcx.ccx(), nnty); - PointerCast(bcx, val.value, ty.ptr_to()) + if bcx.is_unreachable() { return C_undef(ty.ptr_to()); } + bcx.pointercast(val.value, ty.ptr_to()) } StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { assert_eq!(discr, nndiscr); @@ -1106,43 +1113,48 @@ pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, } } -pub fn struct_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, st: &Struct<'tcx>, val: MaybeSizedValue, - ix: usize, needs_cast: bool) -> ValueRef { +fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, + st: &Struct<'tcx>, val: MaybeSizedValue, + ix: usize, needs_cast: bool) -> ValueRef { let ccx = bcx.ccx(); + let fty = st.fields[ix]; + let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty); + if bcx.is_unreachable() { + return C_undef(ll_fty.ptr_to()); + } + let ptr_val = if needs_cast { let fields = st.fields.iter().map(|&ty| { type_of::in_memory_type_of(ccx, ty) }).collect::>(); let real_ty = Type::struct_(ccx, &fields[..], st.packed); - PointerCast(bcx, val.value, real_ty.ptr_to()) + bcx.pointercast(val.value, real_ty.ptr_to()) } else { val.value }; - let fty = st.fields[ix]; // Simple case - we can just GEP the field // * First field - Always aligned properly // * Packed struct - There is no alignment padding // * Field is sized - pointer is properly aligned already if ix == 0 || st.packed || type_is_sized(bcx.tcx(), fty) { - return StructGEP(bcx, ptr_val, ix); + return bcx.struct_gep(ptr_val, ix); } // If the type of the last field is [T] or str, then we don't need to do // any adjusments match fty.sty { ty::TySlice(..) | ty::TyStr => { - return StructGEP(bcx, ptr_val, ix); + return bcx.struct_gep(ptr_val, ix); } _ => () } // There's no metadata available, log the case and just do the GEP. if !val.has_meta() { - debug!("Unsized field `{}`, of `{}` has no metadata for adjustment", - ix, - bcx.val_to_string(ptr_val)); - return StructGEP(bcx, ptr_val, ix); + debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment", + ix, Value(ptr_val)); + return bcx.struct_gep(ptr_val, ix); } let dbloc = DebugLoc::None; @@ -1183,23 +1195,21 @@ pub fn struct_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, st: &Struct<'tcx>, v // (unaligned offset + (align - 1)) & -align // Calculate offset - let align_sub_1 = Sub(bcx, align, C_uint(bcx.ccx(), 1u64), dbloc); - let offset = And(bcx, - Add(bcx, unaligned_offset, align_sub_1, dbloc), - Neg(bcx, align, dbloc), - dbloc); + dbloc.apply(bcx.fcx()); + let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx(), 1u64)); + let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1), + bcx.neg(align)); - debug!("struct_field_ptr: DST field offset: {}", - bcx.val_to_string(offset)); + debug!("struct_field_ptr: DST field offset: {:?}", Value(offset)); // Cast and adjust pointer - let byte_ptr = PointerCast(bcx, ptr_val, Type::i8p(bcx.ccx())); - let byte_ptr = GEP(bcx, byte_ptr, &[offset]); + let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx())); + let byte_ptr = bcx.gep(byte_ptr, &[offset]); // Finally, cast back to the type expected let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty); - debug!("struct_field_ptr: Field type is {}", ll_fty.to_string()); - PointerCast(bcx, byte_ptr, ll_fty.ptr_to()) + debug!("struct_field_ptr: Field type is {:?}", ll_fty); + bcx.pointercast(byte_ptr, ll_fty.ptr_to()) } pub fn fold_variants<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, @@ -1283,14 +1293,15 @@ pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let scratch = unpack_datum!(bcx, datum::lvalue_scratch_datum( bcx, tcx.dtor_type(), "drop_flag", InitAlloca::Uninit("drop flag itself has no dtor"), - cleanup::CustomScope(custom_cleanup_scope), (), |_, bcx, _| { + cleanup::CustomScope(custom_cleanup_scope), |bcx, _| { debug!("no-op populate call for trans_drop_flag_ptr on dtor_type={:?}", tcx.dtor_type()); bcx } )); bcx = fold_variants(bcx, r, val, |variant_cx, st, value| { - let ptr = struct_field_ptr(variant_cx, st, MaybeSizedValue::sized(value), + let ptr = struct_field_ptr(&variant_cx.build(), st, + MaybeSizedValue::sized(value), (st.fields.len() - 1), false); datum::Datum::new(ptr, ptr_ty, datum::Lvalue::new("adt::trans_drop_flag_ptr")) .store_to(variant_cx, scratch.val) @@ -1442,7 +1453,7 @@ fn padding(ccx: &CrateContext, size: u64) -> ValueRef { fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a } /// Get the discriminant of a constant value. -pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef) -> Disr { +pub fn const_get_discrim(r: &Repr, val: ValueRef) -> Disr { match *r { CEnum(ity, _, _) => { match ity { @@ -1452,13 +1463,13 @@ pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef) -> Disr { } General(ity, _, _) => { match ity { - attr::SignedInt(..) => Disr(const_to_int(const_get_elt(ccx, val, &[0])) as u64), - attr::UnsignedInt(..) => Disr(const_to_uint(const_get_elt(ccx, val, &[0]))) + attr::SignedInt(..) => Disr(const_to_int(const_get_elt(val, &[0])) as u64), + attr::UnsignedInt(..) => Disr(const_to_uint(const_get_elt(val, &[0]))) } } Univariant(..) => Disr(0), RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => { - ccx.sess().bug("const discrim access of non c-like enum") + unreachable!("const discrim access of non c-like enum") } } } @@ -1472,25 +1483,25 @@ pub fn const_get_field(ccx: &CrateContext, r: &Repr, val: ValueRef, _discr: Disr, ix: usize) -> ValueRef { match *r { CEnum(..) => ccx.sess().bug("element access in C-like enum const"), - Univariant(..) => const_struct_field(ccx, val, ix), - General(..) => const_struct_field(ccx, val, ix + 1), + Univariant(..) => const_struct_field(val, ix), + General(..) => const_struct_field(val, ix + 1), RawNullablePointer { .. } => { assert_eq!(ix, 0); val }, - StructWrappedNullablePointer{ .. } => const_struct_field(ccx, val, ix) + StructWrappedNullablePointer{ .. } => const_struct_field(val, ix) } } /// Extract field of struct-like const, skipping our alignment padding. -fn const_struct_field(ccx: &CrateContext, val: ValueRef, ix: usize) -> ValueRef { +fn const_struct_field(val: ValueRef, ix: usize) -> ValueRef { // Get the ix-th non-undef element of the struct. let mut real_ix = 0; // actual position in the struct let mut ix = ix; // logical index relative to real_ix let mut field; loop { loop { - field = const_get_elt(ccx, val, &[real_ix]); + field = const_get_elt(val, &[real_ix]); if !is_undef(field) { break; } diff --git a/src/librustc_trans/trans/asm.rs b/src/librustc_trans/trans/asm.rs index 98e9a1c98ad84..4d0c55131f765 100644 --- a/src/librustc_trans/trans/asm.rs +++ b/src/librustc_trans/trans/asm.rs @@ -10,13 +10,11 @@ //! # Translation of inline assembly. -use llvm; +use llvm::{self, ValueRef}; +use trans::base; use trans::build::*; -use trans::callee; use trans::common::*; -use trans::cleanup; -use trans::cleanup::CleanupMethods; -use trans::expr; +use trans::datum::{Datum, Lvalue}; use trans::type_of; use trans::type_::Type; @@ -26,64 +24,35 @@ use syntax::ast::AsmDialect; use libc::{c_uint, c_char}; // Take an inline assembly expression and splat it out via LLVM -pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm) - -> Block<'blk, 'tcx> { - let fcx = bcx.fcx; - let mut bcx = bcx; - let mut constraints = Vec::new(); - let mut output_types = Vec::new(); - - let temp_scope = fcx.push_custom_cleanup_scope(); - - let mut ext_inputs = Vec::new(); - let mut ext_constraints = Vec::new(); +pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + ia: &ast::InlineAsm, + outputs: Vec>, + mut inputs: Vec) { + let mut ext_constraints = vec![]; + let mut output_types = vec![]; // Prepare the output operands - let mut outputs = Vec::new(); - let mut inputs = Vec::new(); - for (i, out) in ia.outputs.iter().enumerate() { - constraints.push(out.constraint.clone()); - - let out_datum = unpack_datum!(bcx, expr::trans(bcx, &out.expr)); + let mut indirect_outputs = vec![]; + for (i, (out, out_datum)) in ia.outputs.iter().zip(&outputs).enumerate() { + let val = if out.is_rw || out.is_indirect { + Some(base::load_ty(bcx, out_datum.val, out_datum.ty)) + } else { + None + }; + if out.is_rw { + inputs.push(val.unwrap()); + ext_constraints.push(i.to_string()); + } if out.is_indirect { - bcx = callee::trans_arg_datum(bcx, - expr_ty(bcx, &out.expr), - out_datum, - cleanup::CustomScope(temp_scope), - &mut inputs); - if out.is_rw { - ext_inputs.push(*inputs.last().unwrap()); - ext_constraints.push(i.to_string()); - } + indirect_outputs.push(val.unwrap()); } else { output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty)); - outputs.push(out_datum.val); - if out.is_rw { - bcx = callee::trans_arg_datum(bcx, - expr_ty(bcx, &out.expr), - out_datum, - cleanup::CustomScope(temp_scope), - &mut ext_inputs); - ext_constraints.push(i.to_string()); - } } } - - // Now the input operands - for &(ref c, ref input) in &ia.inputs { - constraints.push((*c).clone()); - - let in_datum = unpack_datum!(bcx, expr::trans(bcx, &input)); - bcx = callee::trans_arg_datum(bcx, - expr_ty(bcx, &input), - in_datum, - cleanup::CustomScope(temp_scope), - &mut inputs); + if !indirect_outputs.is_empty() { + indirect_outputs.extend_from_slice(&inputs); + inputs = indirect_outputs; } - inputs.extend_from_slice(&ext_inputs[..]); - - // no failure occurred preparing operands, no need to cleanup - fcx.pop_custom_cleanup_scope(temp_scope); let clobbers = ia.clobbers.iter() .map(|s| format!("~{{{}}}", &s)); @@ -95,19 +64,18 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm) _ => Vec::new() }; - let all_constraints= constraints.iter() - .map(|s| s.to_string()) - .chain(ext_constraints) - .chain(clobbers) - .chain(arch_clobbers.iter() - .map(|s| s.to_string())) - .collect::>() - .join(","); + let all_constraints = + ia.outputs.iter().map(|out| out.constraint.to_string()) + .chain(ia.inputs.iter().map(|s| s.to_string())) + .chain(ext_constraints) + .chain(clobbers) + .chain(arch_clobbers.iter().map(|s| s.to_string())) + .collect::>().join(","); debug!("Asm Constraints: {}", &all_constraints[..]); // Depending on how many outputs we have, the return type is different - let num_outputs = outputs.len(); + let num_outputs = output_types.len(); let output_type = match num_outputs { 0 => Type::void(bcx.ccx()), 1 => output_types[0], @@ -131,13 +99,10 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm) dialect); // Again, based on how many outputs we have - if num_outputs == 1 { - Store(bcx, r, outputs[0]); - } else { - for (i, o) in outputs.iter().enumerate() { - let v = ExtractValue(bcx, r, i); - Store(bcx, v, *o); - } + let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); + for (i, (_, datum)) in outputs.enumerate() { + let v = if num_outputs == 1 { r } else { ExtractValue(bcx, r, i) }; + Store(bcx, v, datum.val); } // Store expn_id in a metadata node so we can map LLVM errors @@ -152,7 +117,4 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm) llvm::LLVMSetMetadata(r, kind, llvm::LLVMMDNodeInContext(bcx.ccx().llcx(), &val, 1)); } - - return bcx; - } diff --git a/src/librustc_trans/trans/attributes.rs b/src/librustc_trans/trans/attributes.rs index d93d32f8e0d06..99dc3ade823fa 100644 --- a/src/librustc_trans/trans/attributes.rs +++ b/src/librustc_trans/trans/attributes.rs @@ -10,20 +10,11 @@ //! Set and unset common attributes on LLVM values. use libc::{c_uint, c_ulonglong}; -use llvm::{self, ValueRef, AttrHelper}; -use middle::ty; -use middle::infer; -use middle::traits::ProjectionMode; +use llvm::{self, ValueRef}; use session::config::NoDebugInfo; -use syntax::abi::Abi; pub use syntax::attr::InlineAttr; use syntax::ast; -use rustc_front::hir; -use trans::base; -use trans::common; use trans::context::CrateContext; -use trans::machine; -use trans::type_of; /// Mark LLVM function to use provided inline heuristic. #[inline] @@ -112,199 +103,13 @@ pub fn from_fn_attrs(ccx: &CrateContext, attrs: &[ast::Attribute], llfn: ValueRe for attr in attrs { if attr.check_name("cold") { - unsafe { - llvm::LLVMAddFunctionAttribute(llfn, - llvm::FunctionIndex as c_uint, - llvm::ColdAttribute as u64) - } + llvm::Attributes::default().set(llvm::Attribute::Cold) + .apply_llfn(llvm::FunctionIndex as usize, llfn) } else if attr.check_name("allocator") { - llvm::Attribute::NoAlias.apply_llfn(llvm::ReturnIndex as c_uint, llfn); + llvm::Attributes::default().set(llvm::Attribute::NoAlias) + .apply_llfn(llvm::ReturnIndex as usize, llfn) } else if attr.check_name("unwind") { unwind(llfn, true); } } } - -/// Composite function which converts function type into LLVM attributes for the function. -pub fn from_fn_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_type: ty::Ty<'tcx>) - -> llvm::AttrBuilder { - use middle::ty::{BrAnon, ReLateBound}; - - let function_type; - let (fn_sig, abi, env_ty) = match fn_type.sty { - ty::TyFnDef(_, _, ref f) | ty::TyFnPtr(ref f) => (&f.sig, f.abi, None), - ty::TyClosure(closure_did, ref substs) => { - let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), - &ccx.tcx().tables, - ProjectionMode::Any); - function_type = infcx.closure_type(closure_did, substs); - let self_type = base::self_type_for_closure(ccx, closure_did, fn_type); - (&function_type.sig, Abi::RustCall, Some(self_type)) - } - _ => ccx.sess().bug("expected closure or function.") - }; - - let fn_sig = ccx.tcx().erase_late_bound_regions(fn_sig); - let fn_sig = infer::normalize_associated_type(ccx.tcx(), &fn_sig); - - let mut attrs = llvm::AttrBuilder::new(); - let ret_ty = fn_sig.output; - - // These have an odd calling convention, so we need to manually - // unpack the input ty's - let input_tys = match fn_type.sty { - ty::TyClosure(..) => { - assert!(abi == Abi::RustCall); - - match fn_sig.inputs[0].sty { - ty::TyTuple(ref inputs) => { - let mut full_inputs = vec![env_ty.expect("Missing closure environment")]; - full_inputs.extend_from_slice(inputs); - full_inputs - } - _ => ccx.sess().bug("expected tuple'd inputs") - } - }, - ty::TyFnDef(..) | ty::TyFnPtr(_) if abi == Abi::RustCall => { - let mut inputs = vec![fn_sig.inputs[0]]; - - match fn_sig.inputs[1].sty { - ty::TyTuple(ref t_in) => { - inputs.extend_from_slice(&t_in[..]); - inputs - } - _ => ccx.sess().bug("expected tuple'd inputs") - } - } - _ => fn_sig.inputs.clone() - }; - - // Index 0 is the return value of the llvm func, so we start at 1 - let mut idx = 1; - if let ty::FnConverging(ret_ty) = ret_ty { - // A function pointer is called without the declaration - // available, so we have to apply any attributes with ABI - // implications directly to the call instruction. Right now, - // the only attribute we need to worry about is `sret`. - if type_of::return_uses_outptr(ccx, ret_ty) { - let llret_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, ret_ty)); - - // The outptr can be noalias and nocapture because it's entirely - // invisible to the program. We also know it's nonnull as well - // as how many bytes we can dereference - attrs.arg(1, llvm::Attribute::StructRet) - .arg(1, llvm::Attribute::NoAlias) - .arg(1, llvm::Attribute::NoCapture) - .arg(1, llvm::DereferenceableAttribute(llret_sz)); - - // Add one more since there's an outptr - idx += 1; - } else { - // The `noalias` attribute on the return value is useful to a - // function ptr caller. - match ret_ty.sty { - // `Box` pointer return values never alias because ownership - // is transferred - ty::TyBox(it) if common::type_is_sized(ccx.tcx(), it) => { - attrs.ret(llvm::Attribute::NoAlias); - } - _ => {} - } - - // We can also mark the return value as `dereferenceable` in certain cases - match ret_ty.sty { - // These are not really pointers but pairs, (pointer, len) - ty::TyRef(_, ty::TypeAndMut { ty: inner, .. }) - | ty::TyBox(inner) if common::type_is_sized(ccx.tcx(), inner) => { - let llret_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, inner)); - attrs.ret(llvm::DereferenceableAttribute(llret_sz)); - } - _ => {} - } - - if let ty::TyBool = ret_ty.sty { - attrs.ret(llvm::Attribute::ZExt); - } - } - } - - for &t in input_tys.iter() { - match t.sty { - _ if type_of::arg_is_indirect(ccx, t) => { - let llarg_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, t)); - - // For non-immediate arguments the callee gets its own copy of - // the value on the stack, so there are no aliases. It's also - // program-invisible so can't possibly capture - attrs.arg(idx, llvm::Attribute::NoAlias) - .arg(idx, llvm::Attribute::NoCapture) - .arg(idx, llvm::DereferenceableAttribute(llarg_sz)); - } - - ty::TyBool => { - attrs.arg(idx, llvm::Attribute::ZExt); - } - - // `Box` pointer parameters never alias because ownership is transferred - ty::TyBox(inner) => { - attrs.arg(idx, llvm::Attribute::NoAlias); - - if common::type_is_sized(ccx.tcx(), inner) { - let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, inner)); - attrs.arg(idx, llvm::DereferenceableAttribute(llsz)); - } else { - attrs.arg(idx, llvm::NonNullAttribute); - if inner.is_trait() { - attrs.arg(idx + 1, llvm::NonNullAttribute); - } - } - } - - ty::TyRef(b, mt) => { - // `&mut` pointer parameters never alias other parameters, or mutable global data - // - // `&T` where `T` contains no `UnsafeCell` is immutable, and can be marked as - // both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely - // on memory dependencies rather than pointer equality - let interior_unsafe = mt.ty.type_contents(ccx.tcx()).interior_unsafe(); - - if mt.mutbl != hir::MutMutable && !interior_unsafe { - attrs.arg(idx, llvm::Attribute::NoAlias); - } - - if mt.mutbl == hir::MutImmutable && !interior_unsafe { - attrs.arg(idx, llvm::Attribute::ReadOnly); - } - - // & pointer parameters are also never null and for sized types we also know - // exactly how many bytes we can dereference - if common::type_is_sized(ccx.tcx(), mt.ty) { - let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, mt.ty)); - attrs.arg(idx, llvm::DereferenceableAttribute(llsz)); - } else { - attrs.arg(idx, llvm::NonNullAttribute); - if mt.ty.is_trait() { - attrs.arg(idx + 1, llvm::NonNullAttribute); - } - } - - // When a reference in an argument has no named lifetime, it's - // impossible for that reference to escape this function - // (returned or stored beyond the call by a closure). - if let ReLateBound(_, BrAnon(_)) = *b { - attrs.arg(idx, llvm::Attribute::NoCapture); - } - } - - _ => () - } - - if common::type_is_fat_ptr(ccx.tcx(), t) { - idx += 2; - } else { - idx += 1; - } - } - - attrs -} diff --git a/src/librustc_trans/trans/base.rs b/src/librustc_trans/trans/base.rs index 5088dabfbe78e..93ca306f0b0a2 100644 --- a/src/librustc_trans/trans/base.rs +++ b/src/librustc_trans/trans/base.rs @@ -10,8 +10,8 @@ //! Translate the completed AST to the LLVM IR. //! //! Some functions here, such as trans_block and trans_expr, return a value -- -//! the result of the translation to LLVM -- while others, such as trans_fn, -//! trans_impl, and trans_item, are called only for the side effect of adding a +//! the result of the translation to LLVM -- while others, such as trans_fn +//! and trans_item, are called only for the side effect of adding a //! particular definition to the LLVM IR output we're producing. //! //! Hopefully useful general knowledge about trans: @@ -30,7 +30,7 @@ use super::CrateTranslation; use super::ModuleTranslation; use back::link::mangle_exported_name; -use back::{link, abi}; +use back::link; use lint; use llvm::{BasicBlockRef, Linkage, ValueRef, Vector, get_param}; use llvm; @@ -52,12 +52,13 @@ use rustc::mir::mir_map::MirMap; use session::config::{self, NoDebugInfo, FullDebugInfo}; use session::Session; use trans::_match; +use trans::abi::{self, Abi, FnType}; use trans::adt; use trans::assert_dep_graph; use trans::attributes; use trans::build::*; use trans::builder::{Builder, noname}; -use trans::callee; +use trans::callee::{Callee, CallArgs, ArgExprs, ArgVals}; use trans::cleanup::{self, CleanupMethods, DropHint}; use trans::closure; use trans::common::{Block, C_bool, C_bytes_in_context, C_i32, C_int, C_uint, C_integral}; @@ -65,7 +66,7 @@ use trans::collector::{self, TransItem, TransItemState, TransItemCollectionMode} use trans::common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef}; use trans::common::{CrateContext, DropFlagHintsMap, Field, FunctionContext}; use trans::common::{Result, NodeIdAndSpan, VariantInfo}; -use trans::common::{node_id_type, return_type_is_void, fulfill_obligation}; +use trans::common::{node_id_type, fulfill_obligation}; use trans::common::{type_is_immediate, type_is_zero_size, val_ty}; use trans::common; use trans::consts; @@ -75,14 +76,14 @@ use trans::datum; use trans::debuginfo::{self, DebugLoc, ToDebugLoc}; use trans::declare; use trans::expr; -use trans::foreign; use trans::glue; +use trans::inline; use trans::intrinsic; use trans::machine; -use trans::machine::{llsize_of, llsize_of_real}; +use trans::machine::{llalign_of_min, llsize_of, llsize_of_real}; use trans::meth; use trans::mir; -use trans::monomorphize; +use trans::monomorphize::{self, Instance}; use trans::tvec; use trans::type_::Type; use trans::type_of; @@ -100,7 +101,6 @@ use std::cell::{Cell, RefCell}; use std::collections::{HashMap, HashSet}; use std::str; use std::{i8, i16, i32, i64}; -use syntax::abi::Abi; use syntax::codemap::{Span, DUMMY_SP}; use syntax::parse::token::InternedString; use syntax::attr::AttrMetaMethods; @@ -192,72 +192,10 @@ impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> { } } -fn get_extern_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - fn_ty: Ty<'tcx>, - name: &str, - attrs: &[ast::Attribute]) - -> ValueRef { - if let Some(n) = ccx.externs().borrow().get(name) { - return *n; - } - - let f = declare::declare_rust_fn(ccx, name, fn_ty); - attributes::from_fn_attrs(ccx, &attrs, f); - - ccx.externs().borrow_mut().insert(name.to_string(), f); - f -} - -pub fn self_type_for_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - closure_id: DefId, - fn_ty: Ty<'tcx>) - -> Ty<'tcx> { - let closure_kind = ccx.tcx().closure_kind(closure_id); - match closure_kind { - ty::ClosureKind::Fn => { - ccx.tcx().mk_imm_ref(ccx.tcx().mk_region(ty::ReStatic), fn_ty) - } - ty::ClosureKind::FnMut => { - ccx.tcx().mk_mut_ref(ccx.tcx().mk_region(ty::ReStatic), fn_ty) - } - ty::ClosureKind::FnOnce => fn_ty, - } -} - pub fn kind_for_closure(ccx: &CrateContext, closure_id: DefId) -> ty::ClosureKind { *ccx.tcx().tables.borrow().closure_kinds.get(&closure_id).unwrap() } -pub fn get_extern_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - did: DefId, - t: Ty<'tcx>) - -> ValueRef { - let name = ccx.sess().cstore.item_symbol(did); - let ty = type_of(ccx, t); - if let Some(n) = ccx.externs().borrow_mut().get(&name) { - return *n; - } - // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? - // FIXME(nagisa): investigate whether it can be changed into define_global - let c = declare::declare_global(ccx, &name[..], ty); - // Thread-local statics in some other crate need to *always* be linked - // against in a thread-local fashion, so we need to be sure to apply the - // thread-local attribute locally if it was present remotely. If we - // don't do this then linker errors can be generated where the linker - // complains that one object files has a thread local version of the - // symbol and another one doesn't. - for attr in ccx.tcx().get_attrs(did).iter() { - if attr.check_name("thread_local") { - llvm::set_thread_local(c, true); - } - } - if ccx.use_dll_storage_attrs() { - llvm::SetDLLStorageClass(c, llvm::DLLImportStorageClass); - } - ccx.externs().borrow_mut().insert(name.to_string(), c); - return c; -} - fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem) -> DefId { match bcx.tcx().lang_items.require(it) { Ok(id) => id, @@ -280,11 +218,9 @@ pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let _icx = push_ctxt("malloc_raw_exchange"); // Allocate space: - let r = callee::trans_lang_call(bcx, - require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem), - &[size, align], - None, - debug_loc); + let def_id = require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem); + let r = Callee::def(bcx.ccx(), def_id, bcx.tcx().mk_substs(Substs::empty())) + .call(bcx, debug_loc, ArgVals(&[size, align]), None); Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr)) } @@ -897,38 +833,9 @@ pub fn fail_if_zero_or_overflows<'blk, 'tcx>(cx: Block<'blk, 'tcx>, } } -pub fn get_extern_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - def_id: DefId) - -> datum::Datum<'tcx, datum::Rvalue> { - let name = ccx.sess().cstore.item_symbol(def_id); - let attrs = ccx.sess().cstore.item_attrs(def_id); - let ty = ccx.tcx().lookup_item_type(def_id).ty; - match ty.sty { - ty::TyFnDef(_, _, fty) => { - let abi = fty.abi; - let fty = infer::normalize_associated_type(ccx.tcx(), fty); - let ty = ccx.tcx().mk_fn_ptr(fty); - let llfn = match ccx.sess().target.target.adjust_abi(abi) { - Abi::RustIntrinsic | Abi::PlatformIntrinsic => { - ccx.sess().bug("unexpected intrinsic in get_extern_fn") - } - Abi::Rust | Abi::RustCall => { - get_extern_rust_fn(ccx, ty, &name, &attrs) - } - _ => { - foreign::register_foreign_item_fn(ccx, abi, ty, &name, &attrs) - } - }; - datum::immediate_rvalue(llfn, ty) - } - _ => unreachable!("get_extern_fn: expected fn item type, found {}", ty) - } -} - pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, llfn: ValueRef, llargs: &[ValueRef], - fn_ty: Ty<'tcx>, debug_loc: DebugLoc) -> (ValueRef, Block<'blk, 'tcx>) { let _icx = push_ctxt("invoke_"); @@ -936,8 +843,6 @@ pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, return (C_null(Type::i8(bcx.ccx())), bcx); } - let attributes = attributes::from_fn_type(bcx.ccx(), fn_ty); - match bcx.opt_node_id { None => { debug!("invoke at ???"); @@ -948,9 +853,9 @@ pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } if need_invoke(bcx) { - debug!("invoking {} at {:?}", bcx.val_to_string(llfn), bcx.llbb); + debug!("invoking {:?} at {:?}", Value(llfn), bcx.llbb); for &llarg in llargs { - debug!("arg: {}", bcx.val_to_string(llarg)); + debug!("arg: {:?}", Value(llarg)); } let normal_bcx = bcx.fcx.new_temp_block("normal-return"); let landing_pad = bcx.fcx.get_landing_pad(); @@ -960,16 +865,15 @@ pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, &llargs[..], normal_bcx.llbb, landing_pad, - Some(attributes), debug_loc); return (llresult, normal_bcx); } else { - debug!("calling {} at {:?}", bcx.val_to_string(llfn), bcx.llbb); + debug!("calling {:?} at {:?}", Value(llfn), bcx.llbb); for &llarg in llargs { - debug!("arg: {}", bcx.val_to_string(llarg)); + debug!("arg: {:?}", Value(llarg)); } - let llresult = Call(bcx, llfn, &llargs[..], Some(attributes), debug_loc); + let llresult = Call(bcx, llfn, &llargs[..], debug_loc); return (llresult, bcx); } } @@ -1007,19 +911,16 @@ pub fn load_if_immediate<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, t: Ty<' /// differs from the type used for SSA values. Also handles various special cases where the type /// gives us better information about what we are loading. pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef { - if cx.unreachable.get() || type_is_zero_size(cx.ccx(), t) { + if cx.unreachable.get() { return C_undef(type_of::type_of(cx.ccx(), t)); } + load_ty_builder(&B(cx), ptr, t) +} - let ptr = to_arg_ty_ptr(cx, ptr, t); - let align = type_of::align_of(cx.ccx(), t); - - if type_is_immediate(cx.ccx(), t) && type_of::type_of(cx.ccx(), t).is_aggregate() { - let load = Load(cx, ptr); - unsafe { - llvm::LLVMSetAlignment(load, align); - } - return load; +pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef { + let ccx = b.ccx; + if type_is_zero_size(ccx, t) { + return C_undef(type_of::type_of(ccx, t)); } unsafe { @@ -1027,28 +928,26 @@ pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True { let val = llvm::LLVMGetInitializer(global); if !val.is_null() { - return to_arg_ty(cx, val, t); + if t.is_bool() { + return llvm::LLVMConstTrunc(val, Type::i1(ccx).to_ref()); + } + return val; } } } - let val = if t.is_bool() { - LoadRangeAssert(cx, ptr, 0, 2, llvm::False) + if t.is_bool() { + b.trunc(b.load_range_assert(ptr, 0, 2, llvm::False), Type::i1(ccx)) } else if t.is_char() { // a char is a Unicode codepoint, and so takes values from 0 // to 0x10FFFF inclusive only. - LoadRangeAssert(cx, ptr, 0, 0x10FFFF + 1, llvm::False) - } else if (t.is_region_ptr() || t.is_unique()) && !common::type_is_fat_ptr(cx.tcx(), t) { - LoadNonNull(cx, ptr) + b.load_range_assert(ptr, 0, 0x10FFFF + 1, llvm::False) + } else if (t.is_region_ptr() || t.is_unique()) && + !common::type_is_fat_ptr(ccx.tcx(), t) { + b.load_nonnull(ptr) } else { - Load(cx, ptr) - }; - - unsafe { - llvm::LLVMSetAlignment(val, align); + b.load(ptr) } - - to_arg_ty(cx, val, t) } /// Helper for storing values in memory. Does the necessary conversion if the in-memory type @@ -1058,10 +957,7 @@ pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t return; } - debug!("store_ty: {} : {:?} <- {}", - cx.val_to_string(dst), - t, - cx.val_to_string(v)); + debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v)); if common::type_is_fat_ptr(cx.tcx(), t) { Store(cx, @@ -1071,10 +967,7 @@ pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t ExtractValue(cx, v, abi::FAT_PTR_EXTRA), expr::get_meta(cx, dst)); } else { - let store = Store(cx, from_arg_ty(cx, v, t), to_arg_ty_ptr(cx, dst, t)); - unsafe { - llvm::LLVMSetAlignment(store, type_of::align_of(cx.ccx(), t)); - } + Store(cx, from_immediate(cx, v), dst); } } @@ -1097,15 +990,15 @@ pub fn load_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>, Load(cx, expr::get_meta(cx, src))) } -pub fn from_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef { - if ty.is_bool() { +pub fn from_immediate(bcx: Block, val: ValueRef) -> ValueRef { + if val_ty(val) == Type::i1(bcx.ccx()) { ZExt(bcx, val, Type::i8(bcx.ccx())) } else { val } } -pub fn to_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef { +pub fn to_immediate(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef { if ty.is_bool() { Trunc(bcx, val, Type::i1(bcx.ccx())) } else { @@ -1113,17 +1006,6 @@ pub fn to_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef { } } -pub fn to_arg_ty_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ptr: ValueRef, ty: Ty<'tcx>) -> ValueRef { - if type_is_immediate(bcx.ccx(), ty) && type_of::type_of(bcx.ccx(), ty).is_aggregate() { - // We want to pass small aggregates as immediate values, but using an aggregate LLVM type - // for this leads to bad optimizations, so its arg type is an appropriately sized integer - // and we have to convert it - BitCast(bcx, ptr, type_of::arg_type_of(bcx.ccx(), ty).ptr_to()) - } else { - ptr - } -} - pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &hir::Local) -> Block<'blk, 'tcx> { debug!("init_local(bcx={}, local.id={})", bcx.to_str(), local.id); let _indenter = indenter(); @@ -1200,7 +1082,6 @@ pub fn call_lifetime_start(cx: Block, ptr: ValueRef) { Call(cx, lifetime_start, &[C_u64(ccx, size), ptr], - None, DebugLoc::None); }) } @@ -1211,7 +1092,6 @@ pub fn call_lifetime_end(cx: Block, ptr: ValueRef) { Call(cx, lifetime_end, &[C_u64(ccx, size), ptr], - None, DebugLoc::None); }) } @@ -1222,36 +1102,34 @@ pub fn trans_unwind_resume(bcx: Block, lpval: ValueRef) { Resume(bcx, lpval); } else { let exc_ptr = ExtractValue(bcx, lpval, 0); - let llunwresume = bcx.fcx.eh_unwind_resume(); - Call(bcx, llunwresume, &[exc_ptr], None, DebugLoc::None); - Unreachable(bcx); + bcx.fcx.eh_unwind_resume() + .call(bcx, DebugLoc::None, ArgVals(&[exc_ptr]), None); } } - -pub fn call_memcpy(cx: Block, dst: ValueRef, src: ValueRef, n_bytes: ValueRef, align: u32) { +pub fn call_memcpy<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, + dst: ValueRef, + src: ValueRef, + n_bytes: ValueRef, + align: u32) { let _icx = push_ctxt("call_memcpy"); - let ccx = cx.ccx(); + let ccx = b.ccx; let ptr_width = &ccx.sess().target.target.target_pointer_width[..]; let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width); let memcpy = ccx.get_intrinsic(&key); - let src_ptr = PointerCast(cx, src, Type::i8p(ccx)); - let dst_ptr = PointerCast(cx, dst, Type::i8p(ccx)); - let size = IntCast(cx, n_bytes, ccx.int_type()); + let src_ptr = b.pointercast(src, Type::i8p(ccx)); + let dst_ptr = b.pointercast(dst, Type::i8p(ccx)); + let size = b.intcast(n_bytes, ccx.int_type()); let align = C_i32(ccx, align as i32); let volatile = C_bool(ccx, false); - Call(cx, - memcpy, - &[dst_ptr, src_ptr, size, align, volatile], - None, - DebugLoc::None); + b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); } pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx>) { let _icx = push_ctxt("memcpy_ty"); let ccx = bcx.ccx(); - if type_is_zero_size(ccx, t) { + if type_is_zero_size(ccx, t) || bcx.unreachable.get() { return; } @@ -1259,7 +1137,7 @@ pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRe let llty = type_of::type_of(ccx, t); let llsz = llsize_of(ccx, llty); let llalign = type_of::align_of(ccx, t); - call_memcpy(bcx, dst, src, llsz, llalign as u32); + call_memcpy(&B(bcx), dst, src, llsz, llalign as u32); } else if common::type_is_fat_ptr(bcx.tcx(), t) { let (data, extra) = load_fat_ptr(bcx, src, t); store_fat_ptr(bcx, data, extra, dst, t); @@ -1313,7 +1191,7 @@ pub fn call_memset<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); let llintrinsicfn = ccx.get_intrinsic(&intrinsic_key); let volatile = C_bool(ccx, volatile); - b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None, None); + b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); } @@ -1388,7 +1266,7 @@ pub fn alloca_dropped<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ty: Ty<'tcx>, name: &st // Block, which we do not have for `alloca_insert_pt`). core_lifetime_emit(cx.ccx(), p, Lifetime::Start, |ccx, size, lifetime_start| { let ptr = b.pointercast(p, Type::i8p(ccx)); - b.call(lifetime_start, &[C_u64(ccx, size), ptr], None, None); + b.call(lifetime_start, &[C_u64(ccx, size), ptr], None); }); memfill(&b, p, ty, adt::DTOR_DONE); p @@ -1412,41 +1290,6 @@ pub fn set_value_name(val: ValueRef, name: &str) { } } -// Creates the alloca slot which holds the pointer to the slot for the final return value -pub fn make_return_slot_pointer<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, - output_type: Ty<'tcx>) - -> ValueRef { - let lloutputtype = type_of::type_of(fcx.ccx, output_type); - - // We create an alloca to hold a pointer of type `output_type` - // which will hold the pointer to the right alloca which has the - // final ret value - if fcx.needs_ret_allocas { - // Let's create the stack slot - let slot = AllocaFcx(fcx, lloutputtype.ptr_to(), "llretslotptr"); - - // and if we're using an out pointer, then store that in our newly made slot - if type_of::return_uses_outptr(fcx.ccx, output_type) { - let outptr = get_param(fcx.llfn, 0); - - let b = fcx.ccx.builder(); - b.position_before(fcx.alloca_insert_pt.get().unwrap()); - b.store(outptr, slot); - } - - slot - - // But if there are no nested returns, we skip the indirection and have a single - // retslot - } else { - if type_of::return_uses_outptr(fcx.ccx, output_type) { - get_param(fcx.llfn, 0) - } else { - AllocaFcx(fcx, lloutputtype, "sret_slot") - } - } -} - struct FindNestedReturn { found: bool, } @@ -1553,517 +1396,477 @@ fn has_nested_returns(tcx: &TyCtxt, cfg: &cfg::CFG, blk_id: ast::NodeId) -> bool return false; } -// NB: must keep 4 fns in sync: -// -// - type_of_fn -// - create_datums_for_fn_args. -// - new_fn_ctxt -// - trans_args -// -// Be warned! You must call `init_function` before doing anything with the -// returned function context. -pub fn new_fn_ctxt<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, - llfndecl: ValueRef, - id: ast::NodeId, - has_env: bool, - output_type: ty::FnOutput<'tcx>, - param_substs: &'tcx Substs<'tcx>, - sp: Option, - block_arena: &'a TypedArena>) - -> FunctionContext<'a, 'tcx> { - common::validate_substs(param_substs); - - debug!("new_fn_ctxt(path={}, id={}, param_substs={:?})", - if id == !0 { - "".to_string() - } else { - ccx.tcx().map.path_to_string(id).to_string() - }, - id, - param_substs); - - let uses_outptr = match output_type { - ty::FnConverging(output_type) => { - let substd_output_type = monomorphize::apply_param_substs(ccx.tcx(), - param_substs, - &output_type); - type_of::return_uses_outptr(ccx, substd_output_type) - } - ty::FnDiverging => false, - }; - let debug_context = debuginfo::create_function_debug_context(ccx, id, param_substs, llfndecl); - let (blk_id, cfg) = build_cfg(ccx.tcx(), id); - let nested_returns = if let Some(ref cfg) = cfg { - has_nested_returns(ccx.tcx(), cfg, blk_id) - } else { - false - }; +impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { + /// Create a function context for the given function. + /// Beware that you must call `fcx.init` or `fcx.bind_args` + /// before doing anything with the returned function context. + pub fn new(ccx: &'blk CrateContext<'blk, 'tcx>, + llfndecl: ValueRef, + fn_ty: FnType, + def_id: Option, + param_substs: &'tcx Substs<'tcx>, + block_arena: &'blk TypedArena>) + -> FunctionContext<'blk, 'tcx> { + common::validate_substs(param_substs); + + let inlined_did = def_id.and_then(|def_id| inline::get_local_instance(ccx, def_id)); + let inlined_id = inlined_did.and_then(|id| ccx.tcx().map.as_local_node_id(id)); + let local_id = def_id.and_then(|id| ccx.tcx().map.as_local_node_id(id)); + + debug!("FunctionContext::new(path={}, def_id={:?}, param_substs={:?})", + inlined_id.map_or(String::new(), |id| { + ccx.tcx().map.path_to_string(id).to_string() + }), + def_id, + param_substs); + + let debug_context = debuginfo::create_function_debug_context(ccx, + inlined_id.unwrap_or(ast::DUMMY_NODE_ID), param_substs, llfndecl); + + let cfg = inlined_id.map(|id| build_cfg(ccx.tcx(), id)); + let nested_returns = if let Some((blk_id, Some(ref cfg))) = cfg { + has_nested_returns(ccx.tcx(), cfg, blk_id) + } else { + false + }; - let mir = ccx.mir_map().map.get(&id); - - let mut fcx = FunctionContext { - mir: mir, - llfn: llfndecl, - llenv: None, - llretslotptr: Cell::new(None), - param_env: ccx.tcx().empty_parameter_environment(), - alloca_insert_pt: Cell::new(None), - llreturn: Cell::new(None), - needs_ret_allocas: nested_returns, - landingpad_alloca: Cell::new(None), - caller_expects_out_pointer: uses_outptr, - lllocals: RefCell::new(NodeMap()), - llupvars: RefCell::new(NodeMap()), - lldropflag_hints: RefCell::new(DropFlagHintsMap::new()), - id: id, - param_substs: param_substs, - span: sp, - block_arena: block_arena, - lpad_arena: TypedArena::new(), - ccx: ccx, - debug_context: debug_context, - scopes: RefCell::new(Vec::new()), - cfg: cfg, - }; + let check_attrs = |attrs: &[ast::Attribute]| { + let default_to_mir = ccx.sess().opts.debugging_opts.orbit; + let invert = if default_to_mir { "rustc_no_mir" } else { "rustc_mir" }; + default_to_mir ^ attrs.iter().any(|item| item.check_name(invert)) + }; - if has_env { - fcx.llenv = Some(get_param(fcx.llfn, fcx.env_arg_pos() as c_uint)) - } - - fcx -} - -/// Performs setup on a newly created function, creating the entry scope block -/// and allocating space for the return pointer. -pub fn init_function<'a, 'tcx>(fcx: &'a FunctionContext<'a, 'tcx>, - skip_retptr: bool, - output: ty::FnOutput<'tcx>) - -> Block<'a, 'tcx> { - let entry_bcx = fcx.new_temp_block("entry-block"); - - // Use a dummy instruction as the insertion point for all allocas. - // This is later removed in FunctionContext::cleanup. - fcx.alloca_insert_pt.set(Some(unsafe { - Load(entry_bcx, C_null(Type::i8p(fcx.ccx))); - llvm::LLVMGetFirstInstruction(entry_bcx.llbb) - })); - - if let ty::FnConverging(output_type) = output { - // This shouldn't need to recompute the return type, - // as new_fn_ctxt did it already. - let substd_output_type = fcx.monomorphize(&output_type); - if !return_type_is_void(fcx.ccx, substd_output_type) { - // If the function returns nil/bot, there is no real return - // value, so do not set `llretslotptr`. - if !skip_retptr || fcx.caller_expects_out_pointer { - // Otherwise, we normally allocate the llretslotptr, unless we - // have been instructed to skip it for immediate return - // values. - fcx.llretslotptr.set(Some(make_return_slot_pointer(fcx, substd_output_type))); - } + let use_mir = if let Some(id) = local_id { + check_attrs(ccx.tcx().map.attrs(id)) + } else if let Some(def_id) = def_id { + check_attrs(&ccx.sess().cstore.item_attrs(def_id)) + } else { + check_attrs(&[]) + }; + + let mir = if use_mir { + def_id.and_then(|id| ccx.get_mir(id)) + } else { + None + }; + + FunctionContext { + needs_ret_allocas: nested_returns && mir.is_none(), + mir: mir, + llfn: llfndecl, + llretslotptr: Cell::new(None), + param_env: ccx.tcx().empty_parameter_environment(), + alloca_insert_pt: Cell::new(None), + llreturn: Cell::new(None), + landingpad_alloca: Cell::new(None), + lllocals: RefCell::new(NodeMap()), + llupvars: RefCell::new(NodeMap()), + lldropflag_hints: RefCell::new(DropFlagHintsMap::new()), + fn_ty: fn_ty, + param_substs: param_substs, + span: inlined_id.and_then(|id| ccx.tcx().map.opt_span(id)), + block_arena: block_arena, + lpad_arena: TypedArena::new(), + ccx: ccx, + debug_context: debug_context, + scopes: RefCell::new(Vec::new()), + cfg: cfg.and_then(|(_, cfg)| cfg) + } + } + + /// Performs setup on a newly created function, creating the entry + /// scope block and allocating space for the return pointer. + pub fn init(&'blk self, skip_retptr: bool, fn_did: Option) + -> Block<'blk, 'tcx> { + let entry_bcx = self.new_temp_block("entry-block"); + + // Use a dummy instruction as the insertion point for all allocas. + // This is later removed in FunctionContext::cleanup. + self.alloca_insert_pt.set(Some(unsafe { + Load(entry_bcx, C_null(Type::i8p(self.ccx))); + llvm::LLVMGetFirstInstruction(entry_bcx.llbb) + })); + + if !self.fn_ty.ret.is_ignore() && !skip_retptr { + // We normally allocate the llretslotptr, unless we + // have been instructed to skip it for immediate return + // values, or there is nothing to return at all. + + // We create an alloca to hold a pointer of type `ret.original_ty` + // which will hold the pointer to the right alloca which has the + // final ret value + let llty = self.fn_ty.ret.memory_ty(self.ccx); + let slot = if self.needs_ret_allocas { + // Let's create the stack slot + let slot = AllocaFcx(self, llty.ptr_to(), "llretslotptr"); + + // and if we're using an out pointer, then store that in our newly made slot + if self.fn_ty.ret.is_indirect() { + let outptr = get_param(self.llfn, 0); + + let b = self.ccx.builder(); + b.position_before(self.alloca_insert_pt.get().unwrap()); + b.store(outptr, slot); + } + + slot + } else { + // But if there are no nested returns, we skip the indirection + // and have a single retslot + if self.fn_ty.ret.is_indirect() { + get_param(self.llfn, 0) + } else { + AllocaFcx(self, llty, "sret_slot") + } + }; + + self.llretslotptr.set(Some(slot)); } - } - // Create the drop-flag hints for every unfragmented path in the function. - let tcx = fcx.ccx.tcx(); - let fn_did = tcx.map.local_def_id(fcx.id); - let tables = tcx.tables.borrow(); - let mut hints = fcx.lldropflag_hints.borrow_mut(); - let fragment_infos = tcx.fragment_infos.borrow(); + // Create the drop-flag hints for every unfragmented path in the function. + let tcx = self.ccx.tcx(); + let tables = tcx.tables.borrow(); + let mut hints = self.lldropflag_hints.borrow_mut(); + let fragment_infos = tcx.fragment_infos.borrow(); - // Intern table for drop-flag hint datums. - let mut seen = HashMap::new(); + // Intern table for drop-flag hint datums. + let mut seen = HashMap::new(); - if let Some(fragment_infos) = fragment_infos.get(&fn_did) { - for &info in fragment_infos { + let fragment_infos = fn_did.and_then(|did| fragment_infos.get(&did)); + if let Some(fragment_infos) = fragment_infos { + for &info in fragment_infos { - let make_datum = |id| { - let init_val = C_u8(fcx.ccx, adt::DTOR_NEEDED_HINT); - let llname = &format!("dropflag_hint_{}", id); - debug!("adding hint {}", llname); - let ty = tcx.types.u8; - let ptr = alloc_ty(entry_bcx, ty, llname); - Store(entry_bcx, init_val, ptr); - let flag = datum::Lvalue::new_dropflag_hint("base::init_function"); - datum::Datum::new(ptr, ty, flag) - }; + let make_datum = |id| { + let init_val = C_u8(self.ccx, adt::DTOR_NEEDED_HINT); + let llname = &format!("dropflag_hint_{}", id); + debug!("adding hint {}", llname); + let ty = tcx.types.u8; + let ptr = alloc_ty(entry_bcx, ty, llname); + Store(entry_bcx, init_val, ptr); + let flag = datum::Lvalue::new_dropflag_hint("FunctionContext::init"); + datum::Datum::new(ptr, ty, flag) + }; - let (var, datum) = match info { - ty::FragmentInfo::Moved { var, .. } | - ty::FragmentInfo::Assigned { var, .. } => { - let opt_datum = seen.get(&var).cloned().unwrap_or_else(|| { - let ty = tables.node_types[&var]; - if fcx.type_needs_drop(ty) { - let datum = make_datum(var); - seen.insert(var, Some(datum.clone())); - Some(datum) + let (var, datum) = match info { + ty::FragmentInfo::Moved { var, .. } | + ty::FragmentInfo::Assigned { var, .. } => { + let opt_datum = seen.get(&var).cloned().unwrap_or_else(|| { + let ty = tables.node_types[&var]; + if self.type_needs_drop(ty) { + let datum = make_datum(var); + seen.insert(var, Some(datum.clone())); + Some(datum) + } else { + // No drop call needed, so we don't need a dropflag hint + None + } + }); + if let Some(datum) = opt_datum { + (var, datum) } else { - // No drop call needed, so we don't need a dropflag hint - None + continue } - }); - if let Some(datum) = opt_datum { - (var, datum) - } else { - continue } - } - }; - match info { - ty::FragmentInfo::Moved { move_expr: expr_id, .. } => { - debug!("FragmentInfo::Moved insert drop hint for {}", expr_id); - hints.insert(expr_id, DropHint::new(var, datum)); - } - ty::FragmentInfo::Assigned { assignee_id: expr_id, .. } => { - debug!("FragmentInfo::Assigned insert drop hint for {}", expr_id); - hints.insert(expr_id, DropHint::new(var, datum)); + }; + match info { + ty::FragmentInfo::Moved { move_expr: expr_id, .. } => { + debug!("FragmentInfo::Moved insert drop hint for {}", expr_id); + hints.insert(expr_id, DropHint::new(var, datum)); + } + ty::FragmentInfo::Assigned { assignee_id: expr_id, .. } => { + debug!("FragmentInfo::Assigned insert drop hint for {}", expr_id); + hints.insert(expr_id, DropHint::new(var, datum)); + } } } } - } - - entry_bcx -} -// NB: must keep 4 fns in sync: -// -// - type_of_fn -// - create_datums_for_fn_args. -// - new_fn_ctxt -// - trans_args - -pub fn arg_kind<'a, 'tcx>(cx: &FunctionContext<'a, 'tcx>, t: Ty<'tcx>) -> datum::Rvalue { - use trans::datum::{ByRef, ByValue}; - - datum::Rvalue { - mode: if arg_is_indirect(cx.ccx, t) { ByRef } else { ByValue } - } -} - -// create_datums_for_fn_args: creates lvalue datums for each of the -// incoming function arguments. -pub fn create_datums_for_fn_args<'a, 'tcx>(mut bcx: Block<'a, 'tcx>, - args: &[hir::Arg], - arg_tys: &[Ty<'tcx>], - has_tupled_arg: bool, - arg_scope: cleanup::CustomScopeIndex) - -> Block<'a, 'tcx> { - let _icx = push_ctxt("create_datums_for_fn_args"); - let fcx = bcx.fcx; - let arg_scope_id = cleanup::CustomScope(arg_scope); - - debug!("create_datums_for_fn_args"); - - // Return an array wrapping the ValueRefs that we get from `get_param` for - // each argument into datums. - // - // For certain mode/type combinations, the raw llarg values are passed - // by value. However, within the fn body itself, we want to always - // have all locals and arguments be by-ref so that we can cancel the - // cleanup and for better interaction with LLVM's debug info. So, if - // the argument would be passed by value, we store it into an alloca. - // This alloca should be optimized away by LLVM's mem-to-reg pass in - // the event it's not truly needed. - let mut idx = fcx.arg_offset() as c_uint; - let uninit_reason = InitAlloca::Uninit("fn_arg populate dominates dtor"); - for (i, &arg_ty) in arg_tys.iter().enumerate() { - let arg_datum = if !has_tupled_arg || i < arg_tys.len() - 1 { - if type_of::arg_is_indirect(bcx.ccx(), arg_ty) && - bcx.sess().opts.debuginfo != FullDebugInfo { - // Don't copy an indirect argument to an alloca, the caller - // already put it in a temporary alloca and gave it up, unless - // we emit extra-debug-info, which requires local allocas :(. - let llarg = get_param(fcx.llfn, idx); - idx += 1; - bcx.fcx.schedule_lifetime_end(arg_scope_id, llarg); - bcx.fcx.schedule_drop_mem(arg_scope_id, llarg, arg_ty, None); - - datum::Datum::new(llarg, - arg_ty, - datum::Lvalue::new("create_datum_for_fn_args")) - } else if common::type_is_fat_ptr(bcx.tcx(), arg_ty) { - let data = get_param(fcx.llfn, idx); - let extra = get_param(fcx.llfn, idx + 1); - idx += 2; - unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, arg_ty, "", uninit_reason, - arg_scope_id, (data, extra), - |(data, extra), bcx, dst| { - debug!("populate call for create_datum_for_fn_args \ - early fat arg, on arg[{}] ty={:?}", i, arg_ty); - - Store(bcx, data, expr::get_dataptr(bcx, dst)); - Store(bcx, extra, expr::get_meta(bcx, dst)); - bcx - })) - } else { - let llarg = get_param(fcx.llfn, idx); + entry_bcx + } + + /// Creates lvalue datums for each of the incoming function arguments, + /// matches all argument patterns against them to produce bindings, + /// and returns the entry block (see FunctionContext::init). + fn bind_args(&'blk self, + args: &[hir::Arg], + abi: Abi, + id: ast::NodeId, + closure_env: closure::ClosureEnv, + arg_scope: cleanup::CustomScopeIndex) + -> Block<'blk, 'tcx> { + let _icx = push_ctxt("FunctionContext::bind_args"); + let fn_did = self.ccx.tcx().map.local_def_id(id); + let mut bcx = self.init(false, Some(fn_did)); + let arg_scope_id = cleanup::CustomScope(arg_scope); + + let mut idx = 0; + let mut llarg_idx = self.fn_ty.ret.is_indirect() as usize; + + let has_tupled_arg = match closure_env { + closure::ClosureEnv::NotClosure => abi == Abi::RustCall, + closure::ClosureEnv::Closure(..) => { + closure_env.load(bcx, arg_scope_id); + let env_arg = &self.fn_ty.args[idx]; idx += 1; - let tmp = datum::Datum::new(llarg, arg_ty, arg_kind(fcx, arg_ty)); - unpack_datum!(bcx, - datum::lvalue_scratch_datum(bcx, - arg_ty, - "", - uninit_reason, - arg_scope_id, - tmp, - |tmp, bcx, dst| { - - debug!("populate call for create_datum_for_fn_args \ - early thin arg, on arg[{}] ty={:?}", i, arg_ty); - - tmp.store_to(bcx, dst) - })) - } - } else { - // FIXME(pcwalton): Reduce the amount of code bloat this is responsible for. - match arg_ty.sty { - ty::TyTuple(ref tupled_arg_tys) => { - unpack_datum!(bcx, - datum::lvalue_scratch_datum(bcx, - arg_ty, - "tupled_args", - uninit_reason, - arg_scope_id, - (), - |(), - mut bcx, - llval| { - debug!("populate call for create_datum_for_fn_args \ - tupled_args, on arg[{}] ty={:?}", i, arg_ty); - for (j, &tupled_arg_ty) in - tupled_arg_tys.iter().enumerate() { - let lldest = StructGEP(bcx, llval, j); - if common::type_is_fat_ptr(bcx.tcx(), tupled_arg_ty) { - let data = get_param(bcx.fcx.llfn, idx); - let extra = get_param(bcx.fcx.llfn, idx + 1); - Store(bcx, data, expr::get_dataptr(bcx, lldest)); - Store(bcx, extra, expr::get_meta(bcx, lldest)); - idx += 2; - } else { - let datum = datum::Datum::new( - get_param(bcx.fcx.llfn, idx), - tupled_arg_ty, - arg_kind(bcx.fcx, tupled_arg_ty)); - idx += 1; - bcx = datum.store_to(bcx, lldest); - }; - } - bcx - })) + if env_arg.pad.is_some() { + llarg_idx += 1; } - _ => { - bcx.tcx() - .sess - .bug("last argument of a function with `rust-call` ABI isn't a tuple?!") + if !env_arg.is_ignore() { + llarg_idx += 1; } + false } }; - - let pat = &*args[i].pat; - bcx = if let Some(name) = simple_name(pat) { - // Generate nicer LLVM for the common case of fn a pattern - // like `x: T` - set_value_name(arg_datum.val, &bcx.name(name)); - bcx.fcx.lllocals.borrow_mut().insert(pat.id, arg_datum); - bcx + let tupled_arg_id = if has_tupled_arg { + args[args.len() - 1].id } else { - // General path. Copy out the values that are used in the - // pattern. - _match::bind_irrefutable_pat(bcx, pat, arg_datum.match_input(), arg_scope_id) + ast::DUMMY_NODE_ID }; - debuginfo::create_argument_metadata(bcx, &args[i]); - } - bcx -} + // Return an array wrapping the ValueRefs that we get from `get_param` for + // each argument into datums. + // + // For certain mode/type combinations, the raw llarg values are passed + // by value. However, within the fn body itself, we want to always + // have all locals and arguments be by-ref so that we can cancel the + // cleanup and for better interaction with LLVM's debug info. So, if + // the argument would be passed by value, we store it into an alloca. + // This alloca should be optimized away by LLVM's mem-to-reg pass in + // the event it's not truly needed. + let uninit_reason = InitAlloca::Uninit("fn_arg populate dominates dtor"); + for hir_arg in args { + let arg_ty = node_id_type(bcx, hir_arg.id); + let arg_datum = if hir_arg.id != tupled_arg_id { + let arg = &self.fn_ty.args[idx]; + idx += 1; + if arg.is_indirect() && bcx.sess().opts.debuginfo != FullDebugInfo { + // Don't copy an indirect argument to an alloca, the caller + // already put it in a temporary alloca and gave it up, unless + // we emit extra-debug-info, which requires local allocas :(. + let llarg = get_param(self.llfn, llarg_idx as c_uint); + llarg_idx += 1; + self.schedule_lifetime_end(arg_scope_id, llarg); + self.schedule_drop_mem(arg_scope_id, llarg, arg_ty, None); + + datum::Datum::new(llarg, + arg_ty, + datum::Lvalue::new("FunctionContext::bind_args")) + } else { + unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, arg_ty, "", + uninit_reason, + arg_scope_id, |bcx, dst| { + debug!("FunctionContext::bind_args: {:?}: {:?}", hir_arg, arg_ty); + let b = &bcx.build(); + if common::type_is_fat_ptr(bcx.tcx(), arg_ty) { + let meta = &self.fn_ty.args[idx]; + idx += 1; + arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, dst)); + meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, dst)); + } else { + arg.store_fn_arg(b, &mut llarg_idx, dst); + } + bcx + })) + } + } else { + // FIXME(pcwalton): Reduce the amount of code bloat this is responsible for. + let tupled_arg_tys = match arg_ty.sty { + ty::TyTuple(ref tys) => tys, + _ => unreachable!("last argument of `rust-call` fn isn't a tuple?!") + }; -// Ties up the llstaticallocas -> llloadenv -> lltop edges, -// and builds the return block. -pub fn finish_fn<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>, - last_bcx: Block<'blk, 'tcx>, - retty: ty::FnOutput<'tcx>, - ret_debug_loc: DebugLoc) { - let _icx = push_ctxt("finish_fn"); + unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, + arg_ty, + "tupled_args", + uninit_reason, + arg_scope_id, + |bcx, llval| { + debug!("FunctionContext::bind_args: tupled {:?}: {:?}", hir_arg, arg_ty); + for (j, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() { + let dst = StructGEP(bcx, llval, j); + let arg = &self.fn_ty.args[idx]; + let b = &bcx.build(); + if common::type_is_fat_ptr(bcx.tcx(), tupled_arg_ty) { + let meta = &self.fn_ty.args[idx]; + idx += 1; + arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, dst)); + meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, dst)); + } else { + arg.store_fn_arg(b, &mut llarg_idx, dst); + } + } + bcx + })) + }; - let ret_cx = match fcx.llreturn.get() { - Some(llreturn) => { - if !last_bcx.terminated.get() { - Br(last_bcx, llreturn, DebugLoc::None); - } - raw_block(fcx, llreturn) + let pat = &hir_arg.pat; + bcx = if let Some(name) = simple_name(pat) { + // Generate nicer LLVM for the common case of fn a pattern + // like `x: T` + set_value_name(arg_datum.val, &bcx.name(name)); + self.lllocals.borrow_mut().insert(pat.id, arg_datum); + bcx + } else { + // General path. Copy out the values that are used in the + // pattern. + _match::bind_irrefutable_pat(bcx, pat, arg_datum.match_input(), arg_scope_id) + }; + debuginfo::create_argument_metadata(bcx, hir_arg); } - None => last_bcx, - }; - // This shouldn't need to recompute the return type, - // as new_fn_ctxt did it already. - let substd_retty = fcx.monomorphize(&retty); - build_return_block(fcx, ret_cx, substd_retty, ret_debug_loc); + bcx + } - debuginfo::clear_source_location(fcx); - fcx.cleanup(); -} + /// Ties up the llstaticallocas -> llloadenv -> lltop edges, + /// and builds the return block. + pub fn finish(&'blk self, last_bcx: Block<'blk, 'tcx>, + ret_debug_loc: DebugLoc) { + let _icx = push_ctxt("FunctionContext::finish"); -// Builds the return block for a function. -pub fn build_return_block<'blk, 'tcx>(fcx: &FunctionContext<'blk, 'tcx>, - ret_cx: Block<'blk, 'tcx>, - retty: ty::FnOutput<'tcx>, - ret_debug_location: DebugLoc) { - if fcx.llretslotptr.get().is_none() || - (!fcx.needs_ret_allocas && fcx.caller_expects_out_pointer) { - return RetVoid(ret_cx, ret_debug_location); + let ret_cx = match self.llreturn.get() { + Some(llreturn) => { + if !last_bcx.terminated.get() { + Br(last_bcx, llreturn, DebugLoc::None); + } + raw_block(self, llreturn) + } + None => last_bcx, + }; + + self.build_return_block(ret_cx, ret_debug_loc); + + debuginfo::clear_source_location(self); + self.cleanup(); } - let retslot = if fcx.needs_ret_allocas { - Load(ret_cx, fcx.llretslotptr.get().unwrap()) - } else { - fcx.llretslotptr.get().unwrap() - }; - let retptr = Value(retslot); - match retptr.get_dominating_store(ret_cx) { - // If there's only a single store to the ret slot, we can directly return - // the value that was stored and omit the store and the alloca - Some(s) => { - let retval = s.get_operand(0).unwrap().get(); - s.erase_from_parent(); - - if retptr.has_no_uses() { - retptr.erase_from_parent(); - } + // Builds the return block for a function. + pub fn build_return_block(&self, ret_cx: Block<'blk, 'tcx>, + ret_debug_location: DebugLoc) { + if self.llretslotptr.get().is_none() || + ret_cx.unreachable.get() || + (!self.needs_ret_allocas && self.fn_ty.ret.is_indirect()) { + return RetVoid(ret_cx, ret_debug_location); + } - let retval = if retty == ty::FnConverging(fcx.ccx.tcx().types.bool) { - Trunc(ret_cx, retval, Type::i1(fcx.ccx)) - } else { - retval - }; + let retslot = if self.needs_ret_allocas { + Load(ret_cx, self.llretslotptr.get().unwrap()) + } else { + self.llretslotptr.get().unwrap() + }; + let retptr = Value(retslot); + let llty = self.fn_ty.ret.original_ty; + match (retptr.get_dominating_store(ret_cx), self.fn_ty.ret.cast) { + // If there's only a single store to the ret slot, we can directly return + // the value that was stored and omit the store and the alloca. + // However, we only want to do this when there is no cast needed. + (Some(s), None) => { + let mut retval = s.get_operand(0).unwrap().get(); + s.erase_from_parent(); + + if retptr.has_no_uses() { + retptr.erase_from_parent(); + } - if fcx.caller_expects_out_pointer { - if let ty::FnConverging(retty) = retty { - store_ty(ret_cx, retval, get_param(fcx.llfn, 0), retty); + if self.fn_ty.ret.is_indirect() { + Store(ret_cx, retval, get_param(self.llfn, 0)); + RetVoid(ret_cx, ret_debug_location) + } else { + if llty == Type::i1(self.ccx) { + retval = Trunc(ret_cx, retval, llty); + } + Ret(ret_cx, retval, ret_debug_location) } + } + (_, cast_ty) if self.fn_ty.ret.is_indirect() => { + // Otherwise, copy the return value to the ret slot. + assert_eq!(cast_ty, None); + let llsz = llsize_of(self.ccx, self.fn_ty.ret.ty); + let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty); + call_memcpy(&B(ret_cx), get_param(self.llfn, 0), + retslot, llsz, llalign as u32); RetVoid(ret_cx, ret_debug_location) - } else { - Ret(ret_cx, retval, ret_debug_location) } - } - // Otherwise, copy the return value to the ret slot - None => match retty { - ty::FnConverging(retty) => { - if fcx.caller_expects_out_pointer { - memcpy_ty(ret_cx, get_param(fcx.llfn, 0), retslot, retty); - RetVoid(ret_cx, ret_debug_location) - } else { - Ret(ret_cx, load_ty(ret_cx, retslot, retty), ret_debug_location) + (_, Some(cast_ty)) => { + let load = Load(ret_cx, PointerCast(ret_cx, retslot, cast_ty.ptr_to())); + let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty); + unsafe { + llvm::LLVMSetAlignment(load, llalign); } + Ret(ret_cx, load, ret_debug_location) } - ty::FnDiverging => { - if fcx.caller_expects_out_pointer { - RetVoid(ret_cx, ret_debug_location) + (_, None) => { + let retval = if llty == Type::i1(self.ccx) { + let val = LoadRangeAssert(ret_cx, retslot, 0, 2, llvm::False); + Trunc(ret_cx, val, llty) } else { - Ret(ret_cx, C_undef(Type::nil(fcx.ccx)), ret_debug_location) - } + Load(ret_cx, retslot) + }; + Ret(ret_cx, retval, ret_debug_location) } - }, + } } } /// Builds an LLVM function out of a source function. /// /// If the function closes over its environment a closure will be returned. -pub fn trans_closure<'a, 'b, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - decl: &hir::FnDecl, - body: &hir::Block, - llfndecl: ValueRef, - param_substs: &'tcx Substs<'tcx>, - fn_ast_id: ast::NodeId, - attributes: &[ast::Attribute], - output_type: ty::FnOutput<'tcx>, - abi: Abi, - closure_env: closure::ClosureEnv<'b>) { +pub fn trans_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + decl: &hir::FnDecl, + body: &hir::Block, + llfndecl: ValueRef, + param_substs: &'tcx Substs<'tcx>, + def_id: DefId, + inlined_id: ast::NodeId, + fn_ty: FnType, + abi: Abi, + closure_env: closure::ClosureEnv) { ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1); - record_translation_item_as_generated(ccx, fn_ast_id, param_substs); + if collector::collecting_debug_information(ccx) { + ccx.record_translation_item_as_generated(TransItem::Fn(Instance { + def: def_id, + params: ¶m_substs.types + })) + } let _icx = push_ctxt("trans_closure"); attributes::emit_uwtable(llfndecl, true); debug!("trans_closure(..., param_substs={:?})", param_substs); - let has_env = match closure_env { - closure::ClosureEnv::Closure(..) => true, - closure::ClosureEnv::NotClosure => false, - }; - let (arena, fcx): (TypedArena<_>, FunctionContext); arena = TypedArena::new(); - fcx = new_fn_ctxt(ccx, - llfndecl, - fn_ast_id, - has_env, - output_type, - param_substs, - Some(body.span), - &arena); - let mut bcx = init_function(&fcx, false, output_type); - - if attributes.iter().any(|item| item.check_name("rustc_mir")) { - mir::trans_mir(bcx.build()); - fcx.cleanup(); - return; + fcx = FunctionContext::new(ccx, llfndecl, fn_ty, Some(def_id), param_substs, &arena); + + if fcx.mir.is_some() { + return mir::trans_mir(&fcx); } // cleanup scope for the incoming arguments - let fn_cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(ccx, - fn_ast_id, - body.span, - true); + let fn_cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node( + ccx, inlined_id, body.span, true); let arg_scope = fcx.push_custom_cleanup_scope_with_debug_loc(fn_cleanup_debug_loc); - let block_ty = node_id_type(bcx, body.id); - // Set up arguments to the function. - let monomorphized_arg_types = decl.inputs - .iter() - .map(|arg| node_id_type(bcx, arg.id)) - .collect::>(); - for monomorphized_arg_type in &monomorphized_arg_types { - debug!("trans_closure: monomorphized_arg_type: {:?}", - monomorphized_arg_type); - } - debug!("trans_closure: function lltype: {}", - bcx.fcx.ccx.tn().val_to_string(bcx.fcx.llfn)); - - let has_tupled_arg = match closure_env { - closure::ClosureEnv::NotClosure => abi == Abi::RustCall, - _ => false, - }; - - bcx = create_datums_for_fn_args(bcx, - &decl.inputs, - &monomorphized_arg_types, - has_tupled_arg, - arg_scope); - - bcx = closure_env.load(bcx, cleanup::CustomScope(arg_scope)); + debug!("trans_closure: function: {:?}", Value(fcx.llfn)); + let bcx = fcx.bind_args(&decl.inputs, abi, inlined_id, closure_env, arg_scope); // Up until here, IR instructions for this function have explicitly not been annotated with // source code location, so we don't step into call setup code. From here on, source location // emitting should be enabled. debuginfo::start_emitting_source_locations(&fcx); - let dest = match fcx.llretslotptr.get() { - Some(_) => expr::SaveIn(fcx.get_ret_slot(bcx, ty::FnConverging(block_ty), "iret_slot")), - None => { - assert!(type_is_zero_size(bcx.ccx(), block_ty)); - expr::Ignore - } + let dest = if fcx.fn_ty.ret.is_ignore() { + expr::Ignore + } else { + expr::SaveIn(fcx.get_ret_slot(bcx, "iret_slot")) }; // This call to trans_block is the place where we bridge between // translation calls that don't have a return value (trans_crate, // trans_mod, trans_item, et cetera) and those that do // (trans_block, trans_expr, et cetera). - bcx = controlflow::trans_block(bcx, body, dest); + let mut bcx = controlflow::trans_block(bcx, body, dest); match dest { expr::SaveIn(slot) if fcx.needs_ret_allocas => { @@ -2096,29 +1899,7 @@ pub fn trans_closure<'a, 'b, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let ret_debug_loc = DebugLoc::At(fn_cleanup_debug_loc.id, fn_cleanup_debug_loc.span); // Insert the mandatory first few basic blocks before lltop. - finish_fn(&fcx, bcx, output_type, ret_debug_loc); - - fn record_translation_item_as_generated<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - node_id: ast::NodeId, - param_substs: &'tcx Substs<'tcx>) { - if !collector::collecting_debug_information(ccx) { - return; - } - - let def_id = match ccx.tcx().node_id_to_type(node_id).sty { - ty::TyClosure(def_id, _) => def_id, - _ => ccx.external_srcs() - .borrow() - .get(&node_id) - .map(|did| *did) - .unwrap_or_else(|| ccx.tcx().map.local_def_id(node_id)), - }; - - ccx.record_translation_item_as_generated(TransItem::Fn{ - def_id: def_id, - substs: ccx.tcx().mk_substs(ccx.tcx().erase_regions(param_substs)), - }); - } + fcx.finish(bcx, ret_debug_loc); } /// Creates an LLVM function corresponding to a source language function. @@ -2127,44 +1908,37 @@ pub fn trans_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, body: &hir::Block, llfndecl: ValueRef, param_substs: &'tcx Substs<'tcx>, - id: ast::NodeId, - attrs: &[ast::Attribute]) { + id: ast::NodeId) { let _s = StatRecorder::new(ccx, ccx.tcx().map.path_to_string(id).to_string()); debug!("trans_fn(param_substs={:?})", param_substs); let _icx = push_ctxt("trans_fn"); let fn_ty = ccx.tcx().node_id_to_type(id); let fn_ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &fn_ty); - let sig = fn_ty.fn_sig(); - let sig = ccx.tcx().erase_late_bound_regions(&sig); + let sig = ccx.tcx().erase_late_bound_regions(fn_ty.fn_sig()); let sig = infer::normalize_associated_type(ccx.tcx(), &sig); - let output_type = sig.output; let abi = fn_ty.fn_abi(); + let fn_ty = FnType::new(ccx, abi, &sig, &[]); + let def_id = if let Some(&def_id) = ccx.external_srcs().borrow().get(&id) { + def_id + } else { + ccx.tcx().map.local_def_id(id) + }; trans_closure(ccx, decl, body, llfndecl, param_substs, + def_id, id, - attrs, - output_type, + fn_ty, abi, closure::ClosureEnv::NotClosure); } -pub fn trans_enum_variant<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - ctor_id: ast::NodeId, - disr: Disr, - param_substs: &'tcx Substs<'tcx>, - llfndecl: ValueRef) { - let _icx = push_ctxt("trans_enum_variant"); - - trans_enum_variant_or_tuple_like_struct(ccx, ctor_id, disr, param_substs, llfndecl); -} - pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, ctor_ty: Ty<'tcx>, disr: Disr, - args: callee::CallArgs, + args: CallArgs, dest: expr::Dest, debug_loc: DebugLoc) -> Result<'blk, 'tcx> { @@ -2192,7 +1966,7 @@ pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, if !type_is_zero_size(ccx, result_ty) { match args { - callee::ArgExprs(exprs) => { + ArgExprs(exprs) => { let fields = exprs.iter().map(|x| &**x).enumerate().collect::>(); bcx = expr::trans_adt(bcx, result_ty, @@ -2208,7 +1982,7 @@ pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, // Just eval all the expressions (if any). Since expressions in Rust can have arbitrary // contents, there could be side-effects we need from them. match args { - callee::ArgExprs(exprs) => { + ArgExprs(exprs) => { for expr in exprs { bcx = expr::trans_into(bcx, expr, expr::Ignore); } @@ -2233,72 +2007,51 @@ pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, Result::new(bcx, llresult) } -pub fn trans_tuple_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - ctor_id: ast::NodeId, - param_substs: &'tcx Substs<'tcx>, - llfndecl: ValueRef) { - let _icx = push_ctxt("trans_tuple_struct"); - - trans_enum_variant_or_tuple_like_struct(ccx, ctor_id, Disr(0), param_substs, llfndecl); -} - -fn trans_enum_variant_or_tuple_like_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - ctor_id: ast::NodeId, - disr: Disr, - param_substs: &'tcx Substs<'tcx>, - llfndecl: ValueRef) { +pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + ctor_id: ast::NodeId, + disr: Disr, + param_substs: &'tcx Substs<'tcx>, + llfndecl: ValueRef) { let ctor_ty = ccx.tcx().node_id_to_type(ctor_id); let ctor_ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &ctor_ty); let sig = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_sig()); let sig = infer::normalize_associated_type(ccx.tcx(), &sig); - let arg_tys = sig.inputs; - let result_ty = sig.output; + let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]); let (arena, fcx): (TypedArena<_>, FunctionContext); arena = TypedArena::new(); - fcx = new_fn_ctxt(ccx, - llfndecl, - ctor_id, - false, - result_ty, - param_substs, - None, - &arena); - let bcx = init_function(&fcx, false, result_ty); + fcx = FunctionContext::new(ccx, llfndecl, fn_ty, + Some(ccx.tcx().map.local_def_id(ctor_id)), + param_substs, &arena); + let bcx = fcx.init(false, None); assert!(!fcx.needs_ret_allocas); - if !type_is_zero_size(fcx.ccx, result_ty.unwrap()) { - let dest = fcx.get_ret_slot(bcx, result_ty, "eret_slot"); + if !fcx.fn_ty.ret.is_ignore() { + let dest = fcx.get_ret_slot(bcx, "eret_slot"); let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value - let repr = adt::represent_type(ccx, result_ty.unwrap()); - let mut llarg_idx = fcx.arg_offset() as c_uint; - for (i, arg_ty) in arg_tys.into_iter().enumerate() { + let repr = adt::represent_type(ccx, sig.output.unwrap()); + let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize; + let mut arg_idx = 0; + for (i, arg_ty) in sig.inputs.into_iter().enumerate() { let lldestptr = adt::trans_field_ptr(bcx, &repr, dest_val, Disr::from(disr), i); + let arg = &fcx.fn_ty.args[arg_idx]; + arg_idx += 1; + let b = &bcx.build(); if common::type_is_fat_ptr(bcx.tcx(), arg_ty) { - Store(bcx, - get_param(fcx.llfn, llarg_idx), - expr::get_dataptr(bcx, lldestptr)); - Store(bcx, - get_param(fcx.llfn, llarg_idx + 1), - expr::get_meta(bcx, lldestptr)); - llarg_idx += 2; + let meta = &fcx.fn_ty.args[arg_idx]; + arg_idx += 1; + arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, lldestptr)); + meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, lldestptr)); } else { - let arg = get_param(fcx.llfn, llarg_idx); - llarg_idx += 1; - - if arg_is_indirect(ccx, arg_ty) { - memcpy_ty(bcx, lldestptr, arg, arg_ty); - } else { - store_ty(bcx, arg, lldestptr, arg_ty); - } + arg.store_fn_arg(b, &mut llarg_idx, lldestptr); } } adt::trans_set_discr(bcx, &repr, dest, disr); } - finish_fn(&fcx, bcx, result_ty, DebugLoc::None); + fcx.finish(bcx, DebugLoc::None); } fn enum_variant_size_lint(ccx: &CrateContext, enum_def: &hir::EnumDef, sp: Span, id: ast::NodeId) { @@ -2493,10 +2246,11 @@ fn set_global_section(ccx: &CrateContext, llval: ValueRef, i: &hir::Item) { pub fn trans_item(ccx: &CrateContext, item: &hir::Item) { let _icx = push_ctxt("trans_item"); + let tcx = ccx.tcx(); let from_external = ccx.external_srcs().borrow().contains_key(&item.id); match item.node { - hir::ItemFn(ref decl, _, _, abi, ref generics, ref body) => { + hir::ItemFn(ref decl, _, _, _, ref generics, ref body) => { if !generics.is_type_parameterized() { let trans_everywhere = attr::requests_inline(&item.attrs); // Ignore `trans_everywhere` for cross-crate inlined items @@ -2504,26 +2258,10 @@ pub fn trans_item(ccx: &CrateContext, item: &hir::Item) { // compilation unit that references the item, so it will still get // translated everywhere it's needed. for (ref ccx, is_origin) in ccx.maybe_iter(!from_external && trans_everywhere) { - let llfn = get_item_val(ccx, item.id); - let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty()); - if abi != Abi::Rust { - foreign::trans_rust_fn_with_foreign_abi(ccx, - &decl, - &body, - &item.attrs, - llfn, - empty_substs, - item.id, - None); - } else { - trans_fn(ccx, - &decl, - &body, - llfn, - empty_substs, - item.id, - &item.attrs); - } + let empty_substs = tcx.mk_substs(Substs::trans_empty()); + let def_id = tcx.map.local_def_id(item.id); + let llfn = Callee::def(ccx, def_id, empty_substs).reify(ccx).val; + trans_fn(ccx, &decl, &body, llfn, empty_substs, item.id); set_global_section(ccx, llfn, item); update_linkage(ccx, llfn, @@ -2540,29 +2278,46 @@ pub fn trans_item(ccx: &CrateContext, item: &hir::Item) { // error in trans. This is used to write compile-fail tests // that actually test that compilation succeeds without // reporting an error. - let item_def_id = ccx.tcx().map.local_def_id(item.id); - if ccx.tcx().has_attr(item_def_id, "rustc_error") { - ccx.tcx().sess.span_fatal(item.span, "compilation successful"); + if tcx.has_attr(def_id, "rustc_error") { + tcx.sess.span_fatal(item.span, "compilation successful"); } } } } } hir::ItemImpl(_, _, ref generics, _, _, ref impl_items) => { - meth::trans_impl(ccx, item.name, impl_items, generics, item.id); - } - hir::ItemMod(_) => { - // modules have no equivalent at runtime, they just affect - // the mangled names of things contained within + // Both here and below with generic methods, be sure to recurse and look for + // items that we need to translate. + if !generics.ty_params.is_empty() { + return; + } + + for impl_item in impl_items { + if let hir::ImplItemKind::Method(ref sig, ref body) = impl_item.node { + if sig.generics.ty_params.is_empty() { + let trans_everywhere = attr::requests_inline(&impl_item.attrs); + for (ref ccx, is_origin) in ccx.maybe_iter(trans_everywhere) { + let empty_substs = tcx.mk_substs(Substs::trans_empty()); + let def_id = tcx.map.local_def_id(impl_item.id); + let llfn = Callee::def(ccx, def_id, empty_substs).reify(ccx).val; + trans_fn(ccx, &sig.decl, body, llfn, empty_substs, impl_item.id); + update_linkage(ccx, llfn, Some(impl_item.id), + if is_origin { + OriginalTranslation + } else { + InlinedCopy + }); + } + } + } + } } hir::ItemEnum(ref enum_definition, ref gens) => { if gens.ty_params.is_empty() { // sizes only make sense for non-generic types - enum_variant_size_lint(ccx, enum_definition, item.span, item.id); } } - hir::ItemConst(..) => {} hir::ItemStatic(_, m, ref expr) => { let g = match consts::trans_static(ccx, m, expr, item.id, &item.attrs) { Ok(g) => g, @@ -2571,62 +2326,17 @@ pub fn trans_item(ccx: &CrateContext, item: &hir::Item) { set_global_section(ccx, g, item); update_linkage(ccx, g, Some(item.id), OriginalTranslation); } - hir::ItemForeignMod(ref foreign_mod) => { - foreign::trans_foreign_mod(ccx, foreign_mod); - } - hir::ItemTrait(..) => {} - _ => { - // fall through - } - } -} - -// only use this for foreign function ABIs and glue, use `register_fn` for Rust functions -pub fn register_fn_llvmty(ccx: &CrateContext, - sp: Span, - sym: String, - node_id: ast::NodeId, - cc: llvm::CallConv, - llfty: Type) - -> ValueRef { - debug!("register_fn_llvmty id={} sym={}", node_id, sym); - - let llfn = declare::define_fn(ccx, &sym[..], cc, llfty, - ty::FnConverging(ccx.tcx().mk_nil())).unwrap_or_else(||{ - ccx.sess().span_fatal(sp, &format!("symbol `{}` is already defined", sym)); - }); - finish_register_fn(ccx, sym, node_id); - llfn -} - -fn finish_register_fn(ccx: &CrateContext, sym: String, node_id: ast::NodeId) { - ccx.item_symbols().borrow_mut().insert(node_id, sym); -} - -fn register_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - sp: Span, - sym: String, - node_id: ast::NodeId, - node_type: Ty<'tcx>) - -> ValueRef { - if let ty::TyFnDef(_, _, ref f) = node_type.sty { - if f.abi != Abi::Rust && f.abi != Abi::RustCall { - ccx.sess().span_bug(sp, - &format!("only the `{}` or `{}` calling conventions are valid \ - for this function; `{}` was specified", - Abi::Rust.name(), - Abi::RustCall.name(), - f.abi.name())); + hir::ItemForeignMod(ref m) => { + if m.abi == Abi::RustIntrinsic || m.abi == Abi::PlatformIntrinsic { + return; + } + for fi in &m.items { + let lname = imported_name(fi.name, &fi.attrs).to_string(); + ccx.item_symbols().borrow_mut().insert(fi.id, lname); + } } - } else { - ccx.sess().span_bug(sp, "expected bare rust function") + _ => {} } - - let llfn = declare::define_rust_fn(ccx, &sym[..], node_type).unwrap_or_else(|| { - ccx.sess().span_fatal(sp, &format!("symbol `{}` is already defined", sym)); - }); - finish_register_fn(ccx, sym, node_id); - llfn } pub fn is_entry_fn(sess: &Session, node_id: ast::NodeId) -> bool { @@ -2654,14 +2364,15 @@ pub fn create_entry_wrapper(ccx: &CrateContext, sp: Span, main_llfn: ValueRef) { use_start_lang_item: bool) { let llfty = Type::func(&[ccx.int_type(), Type::i8p(ccx).ptr_to()], &ccx.int_type()); - let llfn = declare::define_cfn(ccx, "main", llfty, ccx.tcx().mk_nil()).unwrap_or_else(|| { + if declare::get_defined_value(ccx, "main").is_some() { // FIXME: We should be smart and show a better diagnostic here. ccx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times") .help("did you use #[no_mangle] on `fn main`? Use #[start] instead") .emit(); ccx.sess().abort_if_errors(); panic!(); - }); + } + let llfn = declare::declare_cfn(ccx, "main", llfty); let llbb = unsafe { llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, "top\0".as_ptr() as *const _) @@ -2675,17 +2386,10 @@ pub fn create_entry_wrapper(ccx: &CrateContext, sp: Span, main_llfn: ValueRef) { let (start_fn, args) = if use_start_lang_item { let start_def_id = match ccx.tcx().lang_items.require(StartFnLangItem) { Ok(id) => id, - Err(s) => { - ccx.sess().fatal(&s[..]); - } - }; - let start_fn = if let Some(start_node_id) = ccx.tcx() - .map - .as_local_node_id(start_def_id) { - get_item_val(ccx, start_node_id) - } else { - get_extern_fn(ccx, start_def_id).val + Err(s) => ccx.sess().fatal(&s) }; + let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty()); + let start_fn = Callee::def(ccx, start_def_id, empty_substs).reify(ccx).val; let args = { let opaque_rust_main = llvm::LLVMBuildPointerCast(bld, @@ -2715,11 +2419,11 @@ pub fn create_entry_wrapper(ccx: &CrateContext, sp: Span, main_llfn: ValueRef) { } } -fn exported_name<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - id: ast::NodeId, - ty: Ty<'tcx>, - attrs: &[ast::Attribute]) - -> String { +pub fn exported_name<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + id: ast::NodeId, + ty: Ty<'tcx>, + attrs: &[ast::Attribute]) + -> String { match ccx.external_srcs().borrow().get(&id) { Some(&did) => { let sym = ccx.sess().cstore.item_symbol(did); @@ -2750,180 +2454,18 @@ fn exported_name<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } } -fn contains_null(s: &str) -> bool { - s.bytes().any(|b| b == 0) -} - -pub fn get_item_val(ccx: &CrateContext, id: ast::NodeId) -> ValueRef { - debug!("get_item_val(id=`{}`)", id); - - if let Some(v) = ccx.item_vals().borrow().get(&id).cloned() { - return v; - } - - let item = ccx.tcx().map.get(id); - debug!("get_item_val: id={} item={:?}", id, item); - let val = match item { - hir_map::NodeItem(i) => { - let ty = ccx.tcx().node_id_to_type(i.id); - let sym = || exported_name(ccx, id, ty, &i.attrs); - - let v = match i.node { - hir::ItemStatic(..) => { - // If this static came from an external crate, then - // we need to get the symbol from metadata instead of - // using the current crate's name/version - // information in the hash of the symbol - let sym = sym(); - debug!("making {}", sym); - - // Create the global before evaluating the initializer; - // this is necessary to allow recursive statics. - let llty = type_of(ccx, ty); - let g = declare::define_global(ccx, &sym[..], llty).unwrap_or_else(|| { - ccx.sess() - .span_fatal(i.span, &format!("symbol `{}` is already defined", sym)) - }); - - ccx.item_symbols().borrow_mut().insert(i.id, sym); - g - } - - hir::ItemFn(_, _, _, abi, _, _) => { - let sym = sym(); - let llfn = if abi == Abi::Rust { - register_fn(ccx, i.span, sym, i.id, ty) - } else { - foreign::register_rust_fn_with_foreign_abi(ccx, i.span, sym, i.id) - }; - attributes::from_fn_attrs(ccx, &i.attrs, llfn); - llfn - } - - _ => ccx.sess().bug("get_item_val: weird result in table"), - }; - - v - } - - hir_map::NodeTraitItem(trait_item) => { - debug!("get_item_val(): processing a NodeTraitItem"); - match trait_item.node { - hir::MethodTraitItem(_, Some(_)) => { - register_method(ccx, id, &trait_item.attrs, trait_item.span) - } - _ => { - ccx.sess().span_bug(trait_item.span, - "unexpected variant: trait item other than a provided \ - method in get_item_val()"); - } - } +pub fn imported_name(name: ast::Name, attrs: &[ast::Attribute]) -> InternedString { + match attr::first_attr_value_str_by_name(attrs, "link_name") { + Some(ln) => ln.clone(), + None => match weak_lang_items::link_name(attrs) { + Some(name) => name, + None => name.as_str(), } - - hir_map::NodeImplItem(impl_item) => { - match impl_item.node { - hir::ImplItemKind::Method(..) => { - register_method(ccx, id, &impl_item.attrs, impl_item.span) - } - _ => { - ccx.sess().span_bug(impl_item.span, - "unexpected variant: non-method impl item in \ - get_item_val()"); - } - } - } - - hir_map::NodeForeignItem(ni) => { - match ni.node { - hir::ForeignItemFn(..) => { - let abi = ccx.tcx().map.get_foreign_abi(id); - let ty = ccx.tcx().node_id_to_type(ni.id); - let name = foreign::link_name(&ni); - foreign::register_foreign_item_fn(ccx, abi, ty, &name, &ni.attrs) - } - hir::ForeignItemStatic(..) => { - foreign::register_static(ccx, &ni) - } - } - } - - hir_map::NodeVariant(ref v) => { - let llfn; - let fields = if v.node.data.is_struct() { - ccx.sess().bug("struct variant kind unexpected in get_item_val") - } else { - v.node.data.fields() - }; - assert!(!fields.is_empty()); - let ty = ccx.tcx().node_id_to_type(id); - let parent = ccx.tcx().map.get_parent(id); - let enm = ccx.tcx().map.expect_item(parent); - let sym = exported_name(ccx, id, ty, &enm.attrs); - - llfn = match enm.node { - hir::ItemEnum(_, _) => { - register_fn(ccx, (*v).span, sym, id, ty) - } - _ => ccx.sess().bug("NodeVariant, shouldn't happen"), - }; - attributes::inline(llfn, attributes::InlineAttr::Hint); - llfn - } - - hir_map::NodeStructCtor(struct_def) => { - // Only register the constructor if this is a tuple-like struct. - let ctor_id = if struct_def.is_struct() { - ccx.sess().bug("attempt to register a constructor of a non-tuple-like struct") - } else { - struct_def.id() - }; - let parent = ccx.tcx().map.get_parent(id); - let struct_item = ccx.tcx().map.expect_item(parent); - let ty = ccx.tcx().node_id_to_type(ctor_id); - let sym = exported_name(ccx, id, ty, &struct_item.attrs); - let llfn = register_fn(ccx, struct_item.span, sym, ctor_id, ty); - attributes::inline(llfn, attributes::InlineAttr::Hint); - llfn - } - - ref variant => { - ccx.sess().bug(&format!("get_item_val(): unexpected variant: {:?}", variant)) - } - }; - - // All LLVM globals and functions are initially created as external-linkage - // declarations. If `trans_item`/`trans_fn` later turns the declaration - // into a definition, it adjusts the linkage then (using `update_linkage`). - // - // The exception is foreign items, which have their linkage set inside the - // call to `foreign::register_*` above. We don't touch the linkage after - // that (`foreign::trans_foreign_mod` doesn't adjust the linkage like the - // other item translation functions do). - - ccx.item_vals().borrow_mut().insert(id, val); - val + } } -fn register_method(ccx: &CrateContext, - id: ast::NodeId, - attrs: &[ast::Attribute], - span: Span) - -> ValueRef { - let mty = ccx.tcx().node_id_to_type(id); - - let sym = exported_name(ccx, id, mty, &attrs); - - if let ty::TyFnDef(_, _, ref f) = mty.sty { - let llfn = if f.abi == Abi::Rust || f.abi == Abi::RustCall { - register_fn(ccx, span, sym, id, mty) - } else { - foreign::register_rust_fn_with_foreign_abi(ccx, span, sym, id) - }; - attributes::from_fn_attrs(ccx, &attrs, llfn); - return llfn; - } else { - ccx.sess().span_bug(span, "expected bare rust function"); - } +fn contains_null(s: &str) -> bool { + s.bytes().any(|b| b == 0) } pub fn write_metadata<'a, 'tcx>(cx: &SharedCrateContext<'a, 'tcx>, diff --git a/src/librustc_trans/trans/build.rs b/src/librustc_trans/trans/build.rs index 2b4fcf436cbc0..22536f2dc434d 100644 --- a/src/librustc_trans/trans/build.rs +++ b/src/librustc_trans/trans/build.rs @@ -12,7 +12,7 @@ #![allow(non_snake_case)] use llvm; -use llvm::{CallConv, AtomicBinOp, AtomicOrdering, SynchronizationScope, AsmDialect, AttrBuilder}; +use llvm::{AtomicBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; use llvm::{Opcode, IntPredicate, RealPredicate}; use llvm::{ValueRef, BasicBlockRef}; use trans::common::*; @@ -20,6 +20,7 @@ use syntax::codemap::Span; use trans::builder::Builder; use trans::type_::Type; +use trans::value::Value; use trans::debuginfo::DebugLoc; use libc::{c_uint, c_char}; @@ -138,7 +139,6 @@ pub fn Invoke(cx: Block, args: &[ValueRef], then: BasicBlockRef, catch: BasicBlockRef, - attributes: Option, debug_loc: DebugLoc) -> ValueRef { if cx.unreachable.get() { @@ -146,12 +146,14 @@ pub fn Invoke(cx: Block, } check_not_terminated(cx); terminate(cx, "Invoke"); - debug!("Invoke({} with arguments ({}))", - cx.val_to_string(fn_), - args.iter().map(|a| cx.val_to_string(*a)).collect::>().join(", ")); + debug!("Invoke({:?} with arguments ({}))", + Value(fn_), + args.iter().map(|a| { + format!("{:?}", Value(*a)) + }).collect::>().join(", ")); debug_loc.apply(cx.fcx); let bundle = cx.lpad().and_then(|b| b.bundle()); - B(cx).invoke(fn_, args, then, catch, bundle, attributes) + B(cx).invoke(fn_, args, then, catch, bundle) } pub fn Unreachable(cx: Block) { @@ -908,7 +910,6 @@ pub fn InlineAsmCall(cx: Block, asm: *const c_char, cons: *const c_char, pub fn Call(cx: Block, fn_: ValueRef, args: &[ValueRef], - attributes: Option, debug_loc: DebugLoc) -> ValueRef { if cx.unreachable.get() { @@ -916,22 +917,7 @@ pub fn Call(cx: Block, } debug_loc.apply(cx.fcx); let bundle = cx.lpad.get().and_then(|b| b.bundle()); - B(cx).call(fn_, args, bundle, attributes) -} - -pub fn CallWithConv(cx: Block, - fn_: ValueRef, - args: &[ValueRef], - conv: CallConv, - attributes: Option, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _UndefReturn(cx, fn_); - } - debug_loc.apply(cx.fcx); - let bundle = cx.lpad.get().and_then(|b| b.bundle()); - B(cx).call_with_conv(fn_, args, conv, bundle, attributes) + B(cx).call(fn_, args, bundle) } pub fn AtomicFence(cx: Block, order: AtomicOrdering, scope: SynchronizationScope) { diff --git a/src/librustc_trans/trans/builder.rs b/src/librustc_trans/trans/builder.rs index 3b4a67cb08979..7f8e8393e8c4e 100644 --- a/src/librustc_trans/trans/builder.rs +++ b/src/librustc_trans/trans/builder.rs @@ -11,13 +11,14 @@ #![allow(dead_code)] // FFI wrappers use llvm; -use llvm::{CallConv, AtomicBinOp, AtomicOrdering, SynchronizationScope, AsmDialect, AttrBuilder}; +use llvm::{AtomicBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; use llvm::{Opcode, IntPredicate, RealPredicate, False, OperandBundleDef}; use llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef}; use trans::base; use trans::common::*; use trans::machine::llalign_of_pref; use trans::type_::Type; +use trans::value::Value; use util::nodemap::FnvHashMap; use libc::{c_uint, c_char}; @@ -164,33 +165,28 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { args: &[ValueRef], then: BasicBlockRef, catch: BasicBlockRef, - bundle: Option<&OperandBundleDef>, - attributes: Option) + bundle: Option<&OperandBundleDef>) -> ValueRef { self.count_insn("invoke"); - debug!("Invoke {} with args ({})", - self.ccx.tn().val_to_string(llfn), + debug!("Invoke {:?} with args ({})", + Value(llfn), args.iter() - .map(|&v| self.ccx.tn().val_to_string(v)) + .map(|&v| format!("{:?}", Value(v))) .collect::>() .join(", ")); let bundle = bundle.as_ref().map(|b| b.raw()).unwrap_or(0 as *mut _); unsafe { - let v = llvm::LLVMRustBuildInvoke(self.llbuilder, - llfn, - args.as_ptr(), - args.len() as c_uint, - then, - catch, - bundle, - noname()); - if let Some(a) = attributes { - a.apply_callsite(v); - } - v + llvm::LLVMRustBuildInvoke(self.llbuilder, + llfn, + args.as_ptr(), + args.len() as c_uint, + then, + catch, + bundle, + noname()) } } @@ -497,9 +493,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } pub fn store(&self, val: ValueRef, ptr: ValueRef) -> ValueRef { - debug!("Store {} -> {}", - self.ccx.tn().val_to_string(val), - self.ccx.tn().val_to_string(ptr)); + debug!("Store {:?} -> {:?}", Value(val), Value(ptr)); assert!(!self.llbuilder.is_null()); self.count_insn("store"); unsafe { @@ -508,9 +502,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } pub fn volatile_store(&self, val: ValueRef, ptr: ValueRef) -> ValueRef { - debug!("Store {} -> {}", - self.ccx.tn().val_to_string(val), - self.ccx.tn().val_to_string(ptr)); + debug!("Store {:?} -> {:?}", Value(val), Value(ptr)); assert!(!self.llbuilder.is_null()); self.count_insn("store.volatile"); unsafe { @@ -521,9 +513,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) { - debug!("Store {} -> {}", - self.ccx.tn().val_to_string(val), - self.ccx.tn().val_to_string(ptr)); + debug!("Store {:?} -> {:?}", Value(val), Value(ptr)); self.count_insn("store.atomic"); unsafe { let ty = Type::from_ref(llvm::LLVMTypeOf(ptr)); @@ -780,7 +770,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { comment_text.as_ptr(), noname(), False, False) }; - self.call(asm, &[], None, None); + self.call(asm, &[], None); } } @@ -796,28 +786,27 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { else { llvm::False }; let argtys = inputs.iter().map(|v| { - debug!("Asm Input Type: {}", self.ccx.tn().val_to_string(*v)); + debug!("Asm Input Type: {:?}", Value(*v)); val_ty(*v) }).collect::>(); - debug!("Asm Output Type: {}", self.ccx.tn().type_to_string(output)); + debug!("Asm Output Type: {:?}", output); let fty = Type::func(&argtys[..], &output); unsafe { let v = llvm::LLVMInlineAsm( fty.to_ref(), asm, cons, volatile, alignstack, dia as c_uint); - self.call(v, inputs, None, None) + self.call(v, inputs, None) } } pub fn call(&self, llfn: ValueRef, args: &[ValueRef], - bundle: Option<&OperandBundleDef>, - attributes: Option) -> ValueRef { + bundle: Option<&OperandBundleDef>) -> ValueRef { self.count_insn("call"); - debug!("Call {} with args ({})", - self.ccx.tn().val_to_string(llfn), + debug!("Call {:?} with args ({})", + Value(llfn), args.iter() - .map(|&v| self.ccx.tn().val_to_string(v)) + .map(|&v| format!("{:?}", Value(v))) .collect::>() .join(", ")); @@ -838,11 +827,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { if expected_ty != actual_ty { self.ccx.sess().bug( &format!( - "Type mismatch in function call of {}. Expected {} for param {}, got {}", - self.ccx.tn().val_to_string(llfn), - self.ccx.tn().type_to_string(expected_ty), - i, - self.ccx.tn().type_to_string(actual_ty))); + "Type mismatch in function call of {:?}. \ + Expected {:?} for param {}, got {:?}", + Value(llfn), + expected_ty, i, actual_ty)); } } @@ -850,26 +838,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let bundle = bundle.as_ref().map(|b| b.raw()).unwrap_or(0 as *mut _); unsafe { - let v = llvm::LLVMRustBuildCall(self.llbuilder, llfn, args.as_ptr(), - args.len() as c_uint, bundle, - noname()); - if let Some(a) = attributes { - a.apply_callsite(v); - } - v + llvm::LLVMRustBuildCall(self.llbuilder, llfn, args.as_ptr(), + args.len() as c_uint, bundle, noname()) } } - pub fn call_with_conv(&self, llfn: ValueRef, args: &[ValueRef], - conv: CallConv, - bundle: Option<&OperandBundleDef>, - attributes: Option) -> ValueRef { - self.count_insn("callwithconv"); - let v = self.call(llfn, args, bundle, attributes); - llvm::SetInstructionCallConv(v, conv); - v - } - pub fn select(&self, cond: ValueRef, then_val: ValueRef, else_val: ValueRef) -> ValueRef { self.count_insn("select"); unsafe { diff --git a/src/librustc_trans/trans/cabi.rs b/src/librustc_trans/trans/cabi.rs deleted file mode 100644 index 8c10be44ffde3..0000000000000 --- a/src/librustc_trans/trans/cabi.rs +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub use self::ArgKind::*; - -use llvm::Attribute; -use std::option; -use trans::context::CrateContext; -use trans::cabi_x86; -use trans::cabi_x86_64; -use trans::cabi_x86_win64; -use trans::cabi_arm; -use trans::cabi_aarch64; -use trans::cabi_powerpc; -use trans::cabi_powerpc64; -use trans::cabi_mips; -use trans::cabi_asmjs; -use trans::type_::Type; - -#[derive(Clone, Copy, PartialEq)] -pub enum ArgKind { - /// Pass the argument directly using the normal converted - /// LLVM type or by coercing to another specified type - Direct, - /// Pass the argument indirectly via a hidden pointer - Indirect, - /// Ignore the argument (useful for empty struct) - Ignore, -} - -/// Information about how a specific C type -/// should be passed to or returned from a function -/// -/// This is borrowed from clang's ABIInfo.h -#[derive(Clone, Copy)] -pub struct ArgType { - pub kind: ArgKind, - /// Original LLVM type - pub ty: Type, - /// Coerced LLVM Type - pub cast: option::Option, - /// Dummy argument, which is emitted before the real argument - pub pad: option::Option, - /// LLVM attribute of argument - pub attr: option::Option -} - -impl ArgType { - pub fn direct(ty: Type, cast: option::Option, - pad: option::Option, - attr: option::Option) -> ArgType { - ArgType { - kind: Direct, - ty: ty, - cast: cast, - pad: pad, - attr: attr - } - } - - pub fn indirect(ty: Type, attr: option::Option) -> ArgType { - ArgType { - kind: Indirect, - ty: ty, - cast: option::Option::None, - pad: option::Option::None, - attr: attr - } - } - - pub fn ignore(ty: Type) -> ArgType { - ArgType { - kind: Ignore, - ty: ty, - cast: None, - pad: None, - attr: None, - } - } - - pub fn is_indirect(&self) -> bool { - return self.kind == Indirect; - } - - pub fn is_ignore(&self) -> bool { - return self.kind == Ignore; - } -} - -/// Metadata describing how the arguments to a native function -/// should be passed in order to respect the native ABI. -/// -/// I will do my best to describe this structure, but these -/// comments are reverse-engineered and may be inaccurate. -NDM -pub struct FnType { - /// The LLVM types of each argument. - pub arg_tys: Vec , - - /// LLVM return type. - pub ret_ty: ArgType, -} - -pub fn compute_abi_info(ccx: &CrateContext, - atys: &[Type], - rty: Type, - ret_def: bool) -> FnType { - match &ccx.sess().target.target.arch[..] { - "x86" => cabi_x86::compute_abi_info(ccx, atys, rty, ret_def), - "x86_64" => if ccx.sess().target.target.options.is_like_windows { - cabi_x86_win64::compute_abi_info(ccx, atys, rty, ret_def) - } else { - cabi_x86_64::compute_abi_info(ccx, atys, rty, ret_def) - }, - "aarch64" => cabi_aarch64::compute_abi_info(ccx, atys, rty, ret_def), - "arm" => { - let flavor = if ccx.sess().target.target.target_os == "ios" { - cabi_arm::Flavor::Ios - } else { - cabi_arm::Flavor::General - }; - cabi_arm::compute_abi_info(ccx, atys, rty, ret_def, flavor) - }, - "mips" => cabi_mips::compute_abi_info(ccx, atys, rty, ret_def), - "powerpc" => cabi_powerpc::compute_abi_info(ccx, atys, rty, ret_def), - "powerpc64" => cabi_powerpc64::compute_abi_info(ccx, atys, rty, ret_def), - "asmjs" => cabi_asmjs::compute_abi_info(ccx, atys, rty, ret_def), - a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a) - ), - } -} diff --git a/src/librustc_trans/trans/cabi_aarch64.rs b/src/librustc_trans/trans/cabi_aarch64.rs index f2434ceee2b85..6eea8d472ba55 100644 --- a/src/librustc_trans/trans/cabi_aarch64.rs +++ b/src/librustc_trans/trans/cabi_aarch64.rs @@ -10,8 +10,8 @@ #![allow(non_upper_case_globals)] -use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector, Attribute}; -use trans::cabi::{FnType, ArgType}; +use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector}; +use trans::abi::{FnType, ArgType}; use trans::context::CrateContext; use trans::type_::Type; @@ -161,16 +161,15 @@ fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> { }) } -fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType { - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - return ArgType::direct(ty, None, None, attr); +fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { + if is_reg_ty(ret.ty) { + return; } - if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ty) { - let llty = Type::array(&base_ty, members); - return ArgType::direct(ty, Some(llty), None, None); + if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ret.ty) { + ret.cast = Some(Type::array(&base_ty, members)); + return; } - let size = ty_size(ty); + let size = ty_size(ret.ty); if size <= 16 { let llty = if size <= 1 { Type::i8(ccx) @@ -183,21 +182,21 @@ fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType { } else { Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64) }; - return ArgType::direct(ty, Some(llty), None, None); + ret.cast = Some(llty); + return; } - ArgType::indirect(ty, Some(Attribute::StructRet)) + ret.make_indirect(ccx); } -fn classify_arg_ty(ccx: &CrateContext, ty: Type) -> ArgType { - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - return ArgType::direct(ty, None, None, attr); +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { + if is_reg_ty(arg.ty) { + return; } - if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ty) { - let llty = Type::array(&base_ty, members); - return ArgType::direct(ty, Some(llty), None, None); + if let Some((base_ty, members)) = is_homogenous_aggregate_ty(arg.ty) { + arg.cast = Some(Type::array(&base_ty, members)); + return; } - let size = ty_size(ty); + let size = ty_size(arg.ty); if size <= 16 { let llty = if size == 0 { Type::array(&Type::i64(ccx), 0) @@ -212,9 +211,10 @@ fn classify_arg_ty(ccx: &CrateContext, ty: Type) -> ArgType { } else { Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64) }; - return ArgType::direct(ty, Some(llty), None, None); + arg.cast = Some(llty); + return; } - ArgType::indirect(ty, None) + arg.make_indirect(ccx); } fn is_reg_ty(ty: Type) -> bool { @@ -228,24 +228,13 @@ fn is_reg_ty(ty: Type) -> bool { } } -pub fn compute_abi_info(ccx: &CrateContext, - atys: &[Type], - rty: Type, - ret_def: bool) -> FnType { - let mut arg_tys = Vec::new(); - for &aty in atys { - let ty = classify_arg_ty(ccx, aty); - arg_tys.push(ty); +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + if !fty.ret.is_ignore() { + classify_ret_ty(ccx, &mut fty.ret); } - let ret_ty = if ret_def { - classify_ret_ty(ccx, rty) - } else { - ArgType::direct(Type::void(ccx), None, None, None) - }; - - return FnType { - arg_tys: arg_tys, - ret_ty: ret_ty, - }; + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + classify_arg_ty(ccx, arg); + } } diff --git a/src/librustc_trans/trans/cabi_arm.rs b/src/librustc_trans/trans/cabi_arm.rs index c5116e738048d..14ca77836c28f 100644 --- a/src/librustc_trans/trans/cabi_arm.rs +++ b/src/librustc_trans/trans/cabi_arm.rs @@ -10,8 +10,8 @@ #![allow(non_upper_case_globals)] -use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector, Attribute}; -use trans::cabi::{FnType, ArgType}; +use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector}; +use trans::abi::{FnType, ArgType}; use trans::context::CrateContext; use trans::type_::Type; @@ -129,12 +129,11 @@ fn ty_size(ty: Type, align_fn: TyAlignFn) -> usize { } } -fn classify_ret_ty(ccx: &CrateContext, ty: Type, align_fn: TyAlignFn) -> ArgType { - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - return ArgType::direct(ty, None, None, attr); +fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType, align_fn: TyAlignFn) { + if is_reg_ty(ret.ty) { + return; } - let size = ty_size(ty, align_fn); + let size = ty_size(ret.ty, align_fn); if size <= 4 { let llty = if size <= 1 { Type::i8(ccx) @@ -143,24 +142,24 @@ fn classify_ret_ty(ccx: &CrateContext, ty: Type, align_fn: TyAlignFn) -> ArgType } else { Type::i32(ccx) }; - return ArgType::direct(ty, Some(llty), None, None); + ret.cast = Some(llty); + return; } - ArgType::indirect(ty, Some(Attribute::StructRet)) + ret.make_indirect(ccx); } -fn classify_arg_ty(ccx: &CrateContext, ty: Type, align_fn: TyAlignFn) -> ArgType { - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - return ArgType::direct(ty, None, None, attr); +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, align_fn: TyAlignFn) { + if is_reg_ty(arg.ty) { + return; } - let align = align_fn(ty); - let size = ty_size(ty, align_fn); + let align = align_fn(arg.ty); + let size = ty_size(arg.ty, align_fn); let llty = if align <= 4 { Type::array(&Type::i32(ccx), ((size + 3) / 4) as u64) } else { Type::array(&Type::i64(ccx), ((size + 7) / 8) as u64) }; - ArgType::direct(ty, Some(llty), None, None) + arg.cast = Some(llty); } fn is_reg_ty(ty: Type) -> bool { @@ -174,30 +173,18 @@ fn is_reg_ty(ty: Type) -> bool { } } -pub fn compute_abi_info(ccx: &CrateContext, - atys: &[Type], - rty: Type, - ret_def: bool, - flavor: Flavor) -> FnType { +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType, flavor: Flavor) { let align_fn = match flavor { Flavor::General => general_ty_align as TyAlignFn, Flavor::Ios => ios_ty_align as TyAlignFn, }; - let mut arg_tys = Vec::new(); - for &aty in atys { - let ty = classify_arg_ty(ccx, aty, align_fn); - arg_tys.push(ty); + if !fty.ret.is_ignore() { + classify_ret_ty(ccx, &mut fty.ret, align_fn); } - let ret_ty = if ret_def { - classify_ret_ty(ccx, rty, align_fn) - } else { - ArgType::direct(Type::void(ccx), None, None, None) - }; - - return FnType { - arg_tys: arg_tys, - ret_ty: ret_ty, - }; + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + classify_arg_ty(ccx, arg, align_fn); + } } diff --git a/src/librustc_trans/trans/cabi_asmjs.rs b/src/librustc_trans/trans/cabi_asmjs.rs index 3a4a6b9960e9c..c901442f48548 100644 --- a/src/librustc_trans/trans/cabi_asmjs.rs +++ b/src/librustc_trans/trans/cabi_asmjs.rs @@ -11,62 +11,45 @@ #![allow(non_upper_case_globals)] use llvm::{Struct, Array, Attribute}; -use trans::cabi::{FnType, ArgType}; +use trans::abi::{FnType, ArgType}; use trans::context::CrateContext; -use trans::type_::Type; // Data layout: e-p:32:32-i64:64-v128:32:128-n32-S128 // See the https://github.com/kripken/emscripten-fastcomp-clang repository. // The class `EmscriptenABIInfo` in `/lib/CodeGen/TargetInfo.cpp` contains the ABI definitions. -fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType { - match ty.kind() { +fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { + match ret.ty.kind() { Struct => { - let field_types = ty.field_types(); + let field_types = ret.ty.field_types(); if field_types.len() == 1 { - ArgType::direct(ty, Some(field_types[0]), None, None) + ret.cast = Some(field_types[0]); } else { - ArgType::indirect(ty, Some(Attribute::StructRet)) + ret.make_indirect(ccx); } - }, + } Array => { - ArgType::indirect(ty, Some(Attribute::StructRet)) - }, - _ => { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ArgType::direct(ty, None, None, attr) + ret.make_indirect(ccx); } + _ => {} } } -fn classify_arg_ty(ccx: &CrateContext, ty: Type) -> ArgType { - if ty.is_aggregate() { - ArgType::indirect(ty, Some(Attribute::ByVal)) - } else { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ArgType::direct(ty, None, None, attr) +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { + if arg.ty.is_aggregate() { + arg.make_indirect(ccx); + arg.attrs.set(Attribute::ByVal); } } -pub fn compute_abi_info(ccx: &CrateContext, - atys: &[Type], - rty: Type, - ret_def: bool) -> FnType { - let mut arg_tys = Vec::new(); - for &aty in atys { - let ty = classify_arg_ty(ccx, aty); - arg_tys.push(ty); +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + if !fty.ret.is_ignore() { + classify_ret_ty(ccx, &mut fty.ret); } - let ret_ty = if ret_def { - classify_ret_ty(ccx, rty) - } else { - ArgType::direct(Type::void(ccx), None, None, None) - }; - - return FnType { - arg_tys: arg_tys, - ret_ty: ret_ty, - }; + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + classify_arg_ty(ccx, arg); + } } diff --git a/src/librustc_trans/trans/cabi_mips.rs b/src/librustc_trans/trans/cabi_mips.rs index bcffb238f5950..0d5e81be92142 100644 --- a/src/librustc_trans/trans/cabi_mips.rs +++ b/src/librustc_trans/trans/cabi_mips.rs @@ -13,8 +13,8 @@ use libc::c_uint; use std::cmp; use llvm; -use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector, Attribute}; -use trans::cabi::{ArgType, FnType}; +use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector}; +use trans::abi::{ArgType, FnType}; use trans::context::CrateContext; use trans::type_::Type; @@ -86,34 +86,18 @@ fn ty_size(ty: Type) -> usize { } } -fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType { - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ArgType::direct(ty, None, None, attr) - } else { - ArgType::indirect(ty, Some(Attribute::StructRet)) - } -} - -fn classify_arg_ty(ccx: &CrateContext, ty: Type, offset: &mut usize) -> ArgType { +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) { let orig_offset = *offset; - let size = ty_size(ty) * 8; - let mut align = ty_align(ty); + let size = ty_size(arg.ty) * 8; + let mut align = ty_align(arg.ty); align = cmp::min(cmp::max(align, 4), 8); *offset = align_up_to(*offset, align); *offset += align_up_to(size, align * 8) / 8; - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ArgType::direct(ty, None, None, attr) - } else { - ArgType::direct( - ty, - Some(struct_ty(ccx, ty)), - padding_ty(ccx, align, orig_offset), - None - ) + if !is_reg_ty(arg.ty) { + arg.cast = Some(struct_ty(ccx, arg.ty)); + arg.pad = padding_ty(ccx, align, orig_offset); } } @@ -161,27 +145,14 @@ fn struct_ty(ccx: &CrateContext, ty: Type) -> Type { Type::struct_(ccx, &coerce_to_int(ccx, size), false) } -pub fn compute_abi_info(ccx: &CrateContext, - atys: &[Type], - rty: Type, - ret_def: bool) -> FnType { - let ret_ty = if ret_def { - classify_ret_ty(ccx, rty) - } else { - ArgType::direct(Type::void(ccx), None, None, None) - }; - - let sret = ret_ty.is_indirect(); - let mut arg_tys = Vec::new(); - let mut offset = if sret { 4 } else { 0 }; - - for aty in atys { - let ty = classify_arg_ty(ccx, *aty, &mut offset); - arg_tys.push(ty); - }; +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + if !fty.ret.is_ignore() && !is_reg_ty(fty.ret.ty) { + fty.ret.make_indirect(ccx); + } - return FnType { - arg_tys: arg_tys, - ret_ty: ret_ty, - }; + let mut offset = if fty.ret.is_indirect() { 4 } else { 0 }; + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + classify_arg_ty(ccx, arg, &mut offset); + } } diff --git a/src/librustc_trans/trans/cabi_powerpc.rs b/src/librustc_trans/trans/cabi_powerpc.rs index 1bcc8fd6bbb90..9aa89a04d0a9a 100644 --- a/src/librustc_trans/trans/cabi_powerpc.rs +++ b/src/librustc_trans/trans/cabi_powerpc.rs @@ -10,8 +10,8 @@ use libc::c_uint; use llvm; -use llvm::{Integer, Pointer, Float, Double, Struct, Array, Attribute}; -use trans::cabi::{FnType, ArgType}; +use llvm::{Integer, Pointer, Float, Double, Struct, Array}; +use trans::abi::{FnType, ArgType}; use trans::context::CrateContext; use trans::type_::Type; @@ -82,34 +82,18 @@ fn ty_size(ty: Type) -> usize { } } -fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType { - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ArgType::direct(ty, None, None, attr) - } else { - ArgType::indirect(ty, Some(Attribute::StructRet)) - } -} - -fn classify_arg_ty(ccx: &CrateContext, ty: Type, offset: &mut usize) -> ArgType { +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) { let orig_offset = *offset; - let size = ty_size(ty) * 8; - let mut align = ty_align(ty); + let size = ty_size(arg.ty) * 8; + let mut align = ty_align(arg.ty); align = cmp::min(cmp::max(align, 4), 8); *offset = align_up_to(*offset, align); *offset += align_up_to(size, align * 8) / 8; - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ArgType::direct(ty, None, None, attr) - } else { - ArgType::direct( - ty, - Some(struct_ty(ccx, ty)), - padding_ty(ccx, align, orig_offset), - None - ) + if !is_reg_ty(arg.ty) { + arg.cast = Some(struct_ty(ccx, arg.ty)); + arg.pad = padding_ty(ccx, align, orig_offset); } } @@ -156,27 +140,14 @@ fn struct_ty(ccx: &CrateContext, ty: Type) -> Type { Type::struct_(ccx, &coerce_to_int(ccx, size), false) } -pub fn compute_abi_info(ccx: &CrateContext, - atys: &[Type], - rty: Type, - ret_def: bool) -> FnType { - let ret_ty = if ret_def { - classify_ret_ty(ccx, rty) - } else { - ArgType::direct(Type::void(ccx), None, None, None) - }; - - let sret = ret_ty.is_indirect(); - let mut arg_tys = Vec::new(); - let mut offset = if sret { 4 } else { 0 }; - - for aty in atys { - let ty = classify_arg_ty(ccx, *aty, &mut offset); - arg_tys.push(ty); - }; +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + if !fty.ret.is_ignore() && !is_reg_ty(fty.ret.ty) { + fty.ret.make_indirect(ccx); + } - return FnType { - arg_tys: arg_tys, - ret_ty: ret_ty, - }; + let mut offset = if fty.ret.is_indirect() { 4 } else { 0 }; + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + classify_arg_ty(ccx, arg, &mut offset); + } } diff --git a/src/librustc_trans/trans/cabi_powerpc64.rs b/src/librustc_trans/trans/cabi_powerpc64.rs index f76bb4f9eebc6..340de235732f9 100644 --- a/src/librustc_trans/trans/cabi_powerpc64.rs +++ b/src/librustc_trans/trans/cabi_powerpc64.rs @@ -15,8 +15,8 @@ // Alignment of 128 bit types is not currently handled, this will // need to be fixed when PowerPC vector support is added. -use llvm::{Integer, Pointer, Float, Double, Struct, Array, Attribute}; -use trans::cabi::{FnType, ArgType}; +use llvm::{Integer, Pointer, Float, Double, Struct, Array}; +use trans::abi::{FnType, ArgType}; use trans::context::CrateContext; use trans::type_::Type; @@ -151,22 +151,21 @@ fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> { }) } -fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType { - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - return ArgType::direct(ty, None, None, attr); +fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { + if is_reg_ty(ret.ty) { + return; } // The PowerPC64 big endian ABI doesn't return aggregates in registers if ccx.sess().target.target.target_endian == "big" { - return ArgType::indirect(ty, Some(Attribute::StructRet)) + ret.make_indirect(ccx); } - if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ty) { - let llty = Type::array(&base_ty, members); - return ArgType::direct(ty, Some(llty), None, None); + if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ret.ty) { + ret.cast = Some(Type::array(&base_ty, members)); + return; } - let size = ty_size(ty); + let size = ty_size(ret.ty); if size <= 16 { let llty = if size <= 1 { Type::i8(ccx) @@ -179,28 +178,24 @@ fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType { } else { Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64) }; - return ArgType::direct(ty, Some(llty), None, None); + ret.cast = Some(llty); + return; } - ArgType::indirect(ty, Some(Attribute::StructRet)) + ret.make_indirect(ccx); } -fn classify_arg_ty(ccx: &CrateContext, ty: Type) -> ArgType { - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - return ArgType::direct(ty, None, None, attr); +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { + if is_reg_ty(arg.ty) { + return; } - if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ty) { - let llty = Type::array(&base_ty, members); - return ArgType::direct(ty, Some(llty), None, None); + + if let Some((base_ty, members)) = is_homogenous_aggregate_ty(arg.ty) { + arg.cast = Some(Type::array(&base_ty, members)); + return; } - ArgType::direct( - ty, - Some(struct_ty(ccx, ty)), - None, - None - ) + arg.cast = Some(struct_ty(ccx, arg.ty)); } fn is_reg_ty(ty: Type) -> bool { @@ -236,24 +231,13 @@ fn struct_ty(ccx: &CrateContext, ty: Type) -> Type { Type::struct_(ccx, &coerce_to_long(ccx, size), false) } -pub fn compute_abi_info(ccx: &CrateContext, - atys: &[Type], - rty: Type, - ret_def: bool) -> FnType { - let ret_ty = if ret_def { - classify_ret_ty(ccx, rty) - } else { - ArgType::direct(Type::void(ccx), None, None, None) - }; - - let mut arg_tys = Vec::new(); - for &aty in atys { - let ty = classify_arg_ty(ccx, aty); - arg_tys.push(ty); - }; +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + if !fty.ret.is_ignore() { + classify_ret_ty(ccx, &mut fty.ret); + } - return FnType { - arg_tys: arg_tys, - ret_ty: ret_ty, - }; + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + classify_arg_ty(ccx, arg); + } } diff --git a/src/librustc_trans/trans/cabi_x86.rs b/src/librustc_trans/trans/cabi_x86.rs index 50a3095dea169..9e6c4d9af42ee 100644 --- a/src/librustc_trans/trans/cabi_x86.rs +++ b/src/librustc_trans/trans/cabi_x86.rs @@ -8,24 +8,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use self::Strategy::*; use llvm::*; -use trans::cabi::{ArgType, FnType}; +use trans::abi::FnType; use trans::type_::Type; use super::common::*; use super::machine::*; -enum Strategy { RetValue(Type), RetPointer } -pub fn compute_abi_info(ccx: &CrateContext, - atys: &[Type], - rty: Type, - ret_def: bool) -> FnType { - let mut arg_tys = Vec::new(); - - let ret_ty; - if !ret_def { - ret_ty = ArgType::direct(Type::void(ccx), None, None, None); - } else if rty.kind() == Struct { +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + if !fty.ret.is_ignore() && fty.ret.ty.kind() == Struct { // Returning a structure. Most often, this will use // a hidden first argument. On some platforms, though, // small structs are returned as integers. @@ -33,53 +23,25 @@ pub fn compute_abi_info(ccx: &CrateContext, // Some links: // http://www.angelcode.com/dev/callconv/callconv.html // Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp - let t = &ccx.sess().target.target; - let strategy = if t.options.is_like_osx || t.options.is_like_windows { - match llsize_of_alloc(ccx, rty) { - 1 => RetValue(Type::i8(ccx)), - 2 => RetValue(Type::i16(ccx)), - 4 => RetValue(Type::i32(ccx)), - 8 => RetValue(Type::i64(ccx)), - _ => RetPointer + if t.options.is_like_osx || t.options.is_like_windows { + match llsize_of_alloc(ccx, fty.ret.ty) { + 1 => fty.ret.cast = Some(Type::i8(ccx)), + 2 => fty.ret.cast = Some(Type::i16(ccx)), + 4 => fty.ret.cast = Some(Type::i32(ccx)), + 8 => fty.ret.cast = Some(Type::i64(ccx)), + _ => fty.ret.make_indirect(ccx) } } else { - RetPointer - }; - - match strategy { - RetValue(t) => { - ret_ty = ArgType::direct(rty, Some(t), None, None); - } - RetPointer => { - ret_ty = ArgType::indirect(rty, Some(Attribute::StructRet)); - } + fty.ret.make_indirect(ccx); } - } else { - let attr = if rty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ret_ty = ArgType::direct(rty, None, None, attr); } - for &t in atys { - let ty = match t.kind() { - Struct => { - let size = llsize_of_alloc(ccx, t); - if size == 0 { - ArgType::ignore(t) - } else { - ArgType::indirect(t, Some(Attribute::ByVal)) - } - } - _ => { - let attr = if t == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ArgType::direct(t, None, None, attr) - } - }; - arg_tys.push(ty); + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + if arg.ty.kind() == Struct { + arg.make_indirect(ccx); + arg.attrs.set(Attribute::ByVal); + } } - - return FnType { - arg_tys: arg_tys, - ret_ty: ret_ty, - }; } diff --git a/src/librustc_trans/trans/cabi_x86_64.rs b/src/librustc_trans/trans/cabi_x86_64.rs index 00d8fdad32de1..47d373f90431e 100644 --- a/src/librustc_trans/trans/cabi_x86_64.rs +++ b/src/librustc_trans/trans/cabi_x86_64.rs @@ -16,7 +16,7 @@ use self::RegClass::*; use llvm::{Integer, Pointer, Float, Double}; use llvm::{Struct, Array, Attribute, Vector}; -use trans::cabi::{ArgType, FnType}; +use trans::abi::{ArgType, FnType}; use trans::context::CrateContext; use trans::type_::Type; @@ -383,38 +383,31 @@ fn llreg_ty(ccx: &CrateContext, cls: &[RegClass]) -> Type { } } -pub fn compute_abi_info(ccx: &CrateContext, - atys: &[Type], - rty: Type, - ret_def: bool) -> FnType { +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { fn x86_64_ty(ccx: &CrateContext, - ty: Type, + arg: &mut ArgType, is_mem_cls: F, - ind_attr: Attribute) - -> ArgType where - F: FnOnce(&[RegClass]) -> bool, + ind_attr: Option) + where F: FnOnce(&[RegClass]) -> bool { - if !ty.is_reg_ty() { - let cls = classify_ty(ty); + if !arg.ty.is_reg_ty() { + let cls = classify_ty(arg.ty); if is_mem_cls(&cls) { - ArgType::indirect(ty, Some(ind_attr)) + arg.make_indirect(ccx); + if let Some(attr) = ind_attr { + arg.attrs.set(attr); + } } else { - ArgType::direct(ty, - Some(llreg_ty(ccx, &cls)), - None, - None) + arg.cast = Some(llreg_ty(ccx, &cls)); } - } else { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ArgType::direct(ty, None, None, attr) } } let mut int_regs = 6; // RDI, RSI, RDX, RCX, R8, R9 let mut sse_regs = 8; // XMM0-7 - let ret_ty = if ret_def { - x86_64_ty(ccx, rty, |cls| { + if !fty.ret.is_ignore() { + x86_64_ty(ccx, &mut fty.ret, |cls| { if cls.is_ret_bysret() { // `sret` parameter thus one less register available int_regs -= 1; @@ -422,14 +415,12 @@ pub fn compute_abi_info(ccx: &CrateContext, } else { false } - }, Attribute::StructRet) - } else { - ArgType::direct(Type::void(ccx), None, None, None) - }; + }, None); + } - let mut arg_tys = Vec::new(); - for t in atys { - let ty = x86_64_ty(ccx, *t, |cls| { + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + x86_64_ty(ccx, arg, |cls| { let needed_int = cls.iter().filter(|&&c| c == Int).count() as isize; let needed_sse = cls.iter().filter(|c| c.is_sse()).count() as isize; let in_mem = cls.is_pass_byval() || @@ -444,21 +435,15 @@ pub fn compute_abi_info(ccx: &CrateContext, sse_regs -= needed_sse; } in_mem - }, Attribute::ByVal); - arg_tys.push(ty); + }, Some(Attribute::ByVal)); // An integer, pointer, double or float parameter // thus the above closure passed to `x86_64_ty` won't // get called. - if t.kind() == Integer || t.kind() == Pointer { - int_regs -= 1; - } else if t.kind() == Double || t.kind() == Float { - sse_regs -= 1; + match arg.ty.kind() { + Integer | Pointer => int_regs -= 1, + Double | Float => sse_regs -= 1, + _ => {} } } - - return FnType { - arg_tys: arg_tys, - ret_ty: ret_ty, - }; } diff --git a/src/librustc_trans/trans/cabi_x86_win64.rs b/src/librustc_trans/trans/cabi_x86_win64.rs index 120c8dc0384ce..43f72c454cf02 100644 --- a/src/librustc_trans/trans/cabi_x86_win64.rs +++ b/src/librustc_trans/trans/cabi_x86_win64.rs @@ -11,54 +11,29 @@ use llvm::*; use super::common::*; use super::machine::*; -use trans::cabi::{ArgType, FnType}; +use trans::abi::{ArgType, FnType}; use trans::type_::Type; // Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx -pub fn compute_abi_info(ccx: &CrateContext, - atys: &[Type], - rty: Type, - ret_def: bool) -> FnType { - let mut arg_tys = Vec::new(); +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + let fixup = |a: &mut ArgType| { + if a.ty.kind() == Struct { + match llsize_of_alloc(ccx, a.ty) { + 1 => a.cast = Some(Type::i8(ccx)), + 2 => a.cast = Some(Type::i16(ccx)), + 4 => a.cast = Some(Type::i32(ccx)), + 8 => a.cast = Some(Type::i64(ccx)), + _ => a.make_indirect(ccx) + } + } + }; - let ret_ty; - if !ret_def { - ret_ty = ArgType::direct(Type::void(ccx), None, None, None); - } else if rty.kind() == Struct { - ret_ty = match llsize_of_alloc(ccx, rty) { - 1 => ArgType::direct(rty, Some(Type::i8(ccx)), None, None), - 2 => ArgType::direct(rty, Some(Type::i16(ccx)), None, None), - 4 => ArgType::direct(rty, Some(Type::i32(ccx)), None, None), - 8 => ArgType::direct(rty, Some(Type::i64(ccx)), None, None), - _ => ArgType::indirect(rty, Some(Attribute::StructRet)) - }; - } else { - let attr = if rty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ret_ty = ArgType::direct(rty, None, None, attr); + if !fty.ret.is_ignore() { + fixup(&mut fty.ret); } - - for &t in atys { - let ty = match t.kind() { - Struct => { - match llsize_of_alloc(ccx, t) { - 1 => ArgType::direct(t, Some(Type::i8(ccx)), None, None), - 2 => ArgType::direct(t, Some(Type::i16(ccx)), None, None), - 4 => ArgType::direct(t, Some(Type::i32(ccx)), None, None), - 8 => ArgType::direct(t, Some(Type::i64(ccx)), None, None), - _ => ArgType::indirect(t, None) - } - } - _ => { - let attr = if t == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ArgType::direct(t, None, None, attr) - } - }; - arg_tys.push(ty); + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + fixup(arg); } - - return FnType { - arg_tys: arg_tys, - ret_ty: ret_ty, - }; } diff --git a/src/librustc_trans/trans/callee.rs b/src/librustc_trans/trans/callee.rs index 05e5ac808d030..17d08cd6c2f29 100644 --- a/src/librustc_trans/trans/callee.rs +++ b/src/librustc_trans/trans/callee.rs @@ -25,15 +25,19 @@ use middle::def_id::DefId; use middle::infer; use middle::subst; use middle::subst::{Substs}; +use middle::traits; use rustc::front::map as hir_map; +use trans::abi::{Abi, FnType}; use trans::adt; +use trans::attributes; use trans::base; use trans::base::*; use trans::build::*; use trans::cleanup; use trans::cleanup::CleanupMethods; -use trans::common::{self, Block, Result, NodeIdAndSpan, ExprId, CrateContext, - ExprOrMethodCall, FunctionContext, MethodCallKey}; +use trans::closure; +use trans::common::{self, Block, Result, CrateContext, FunctionContext}; +use trans::common::{C_uint, C_undef}; use trans::consts; use trans::datum::*; use trans::debuginfo::DebugLoc; @@ -41,37 +45,40 @@ use trans::declare; use trans::expr; use trans::glue; use trans::inline; -use trans::foreign; use trans::intrinsic; +use trans::machine::{llalign_of_min, llsize_of_store}; use trans::meth; -use trans::monomorphize; +use trans::monomorphize::{self, Instance}; use trans::type_::Type; use trans::type_of; +use trans::value::Value; use trans::Disr; use middle::ty::{self, Ty, TyCtxt, TypeFoldable}; use rustc_front::hir; -use syntax::abi::Abi; -use syntax::ast; use syntax::codemap::DUMMY_SP; use syntax::errors; use syntax::ptr::P; -pub enum CalleeData<'tcx> { +use std::cmp; + +#[derive(Debug)] +pub enum CalleeData { /// Constructor for enum variant/tuple-like-struct. NamedTupleConstructor(Disr), /// Function pointer. Fn(ValueRef), - Intrinsic(ast::NodeId, &'tcx subst::Substs<'tcx>), + Intrinsic, /// Trait object found in the vtable at that index. Virtual(usize) } +#[derive(Debug)] pub struct Callee<'tcx> { - pub data: CalleeData<'tcx>, + pub data: CalleeData, pub ty: Ty<'tcx> } @@ -96,26 +103,19 @@ impl<'tcx> Callee<'tcx> { pub fn method<'blk>(bcx: Block<'blk, 'tcx>, method: ty::MethodCallee<'tcx>) -> Callee<'tcx> { let substs = bcx.tcx().mk_substs(bcx.fcx.monomorphize(&method.substs)); - let ty = bcx.fcx.monomorphize(&method.ty); - Callee::def(bcx.ccx(), method.def_id, substs, ty) + Callee::def(bcx.ccx(), method.def_id, substs) } /// Function or method definition. pub fn def<'a>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId, - substs: &'tcx subst::Substs<'tcx>, - ty: Ty<'tcx>) + substs: &'tcx subst::Substs<'tcx>) -> Callee<'tcx> { let tcx = ccx.tcx(); if substs.self_ty().is_some() { // Only trait methods can have a Self parameter. - let method_item = tcx.impl_or_trait_item(def_id); - let trait_id = method_item.container().id(); - let trait_ref = ty::Binder(substs.to_trait_ref(tcx, trait_id)); - let vtbl = common::fulfill_obligation(ccx, DUMMY_SP, trait_ref); - return meth::callee_for_trait_impl(ccx, def_id, substs, - trait_id, ty, vtbl); + return Callee::trait_method(ccx, def_id, substs); } let maybe_node_id = inline::get_local_instance(ccx, def_id) @@ -123,34 +123,112 @@ impl<'tcx> Callee<'tcx> { let maybe_ast_node = maybe_node_id.and_then(|node_id| { tcx.map.find(node_id) }); - match maybe_ast_node { + + let data = match maybe_ast_node { Some(hir_map::NodeStructCtor(_)) => { - return Callee { - data: NamedTupleConstructor(Disr(0)), - ty: ty - }; + NamedTupleConstructor(Disr(0)) } Some(hir_map::NodeVariant(_)) => { let vinfo = common::inlined_variant_def(ccx, maybe_node_id.unwrap()); - assert_eq!(vinfo.kind(), ty::VariantKind::Tuple); + NamedTupleConstructor(Disr::from(vinfo.disr_val)) + } + Some(hir_map::NodeForeignItem(fi)) if { + let abi = tcx.map.get_foreign_abi(fi.id); + abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic + } => Intrinsic, - return Callee { - data: NamedTupleConstructor(Disr::from(vinfo.disr_val)), - ty: ty + _ => return Callee::ptr(get_fn(ccx, def_id, substs)) + }; + + Callee { + data: data, + ty: def_ty(tcx, def_id, substs) + } + } + + /// Trait method, which has to be resolved to an impl method. + pub fn trait_method<'a>(ccx: &CrateContext<'a, 'tcx>, + def_id: DefId, + substs: &'tcx subst::Substs<'tcx>) + -> Callee<'tcx> { + let tcx = ccx.tcx(); + + let method_item = tcx.impl_or_trait_item(def_id); + let trait_id = method_item.container().id(); + let trait_ref = ty::Binder(substs.to_trait_ref(tcx, trait_id)); + match common::fulfill_obligation(ccx, DUMMY_SP, trait_ref) { + traits::VtableImpl(vtable_impl) => { + let impl_did = vtable_impl.impl_def_id; + let mname = tcx.item_name(def_id); + // create a concatenated set of substitutions which includes + // those from the impl and those from the method: + let impl_substs = vtable_impl.substs.with_method_from(&substs); + let substs = tcx.mk_substs(impl_substs); + let mth = meth::get_impl_method(tcx, impl_did, substs, mname); + + // Translate the function, bypassing Callee::def. + // That is because default methods have the same ID as the + // trait method used to look up the impl method that ended + // up here, so calling Callee::def would infinitely recurse. + Callee::ptr(get_fn(ccx, mth.method.def_id, mth.substs)) + } + traits::VtableClosure(vtable_closure) => { + // The substitutions should have no type parameters remaining + // after passing through fulfill_obligation + let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap(); + let llfn = closure::trans_closure_method(ccx, + vtable_closure.closure_def_id, + vtable_closure.substs, + trait_closure_kind); + + let method_ty = def_ty(tcx, def_id, substs); + let fn_ptr_ty = match method_ty.sty { + ty::TyFnDef(_, _, fty) => tcx.mk_ty(ty::TyFnPtr(fty)), + _ => unreachable!("expected fn item type, found {}", + method_ty) }; + Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty)) } - Some(hir_map::NodeForeignItem(fi)) => { - let abi = tcx.map.get_foreign_abi(fi.id); - if abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic { - return Callee { - data: Intrinsic(fi.id, substs), - ty: ty - }; + traits::VtableFnPointer(fn_ty) => { + let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap(); + let llfn = trans_fn_pointer_shim(ccx, trait_closure_kind, fn_ty); + + let method_ty = def_ty(tcx, def_id, substs); + let fn_ptr_ty = match method_ty.sty { + ty::TyFnDef(_, _, fty) => tcx.mk_ty(ty::TyFnPtr(fty)), + _ => unreachable!("expected fn item type, found {}", + method_ty) + }; + Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty)) + } + traits::VtableObject(ref data) => { + Callee { + data: Virtual(traits::get_vtable_index_of_object_method( + tcx, data, def_id)), + ty: def_ty(tcx, def_id, substs) } } - _ => {} + vtable => { + unreachable!("resolved vtable bad vtable {:?} in trans", vtable); + } + } + } + + /// Get the abi::FnType for a direct call. Mainly deals with the fact + /// that a Virtual call doesn't take the vtable, like its shim does. + /// The extra argument types are for variadic (extern "C") functions. + pub fn direct_fn_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>, + extra_args: &[Ty<'tcx>]) -> FnType { + let abi = self.ty.fn_abi(); + let sig = ccx.tcx().erase_late_bound_regions(self.ty.fn_sig()); + let sig = infer::normalize_associated_type(ccx.tcx(), &sig); + let mut fn_ty = FnType::unadjusted(ccx, abi, &sig, extra_args); + if let Virtual(_) = self.data { + // Don't pass the vtable, it's not an argument of the virtual fn. + fn_ty.args[1].ignore(); } - Callee::ptr(trans_fn_ref_with_substs(ccx, def_id, Some(ty), substs)) + fn_ty.adjust_for_abi(ccx, abi, &sig); + fn_ty } /// This behemoth of a function translates function calls. Unfortunately, in @@ -175,51 +253,36 @@ impl<'tcx> Callee<'tcx> { /// Turn the callee into a function pointer. pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>) -> Datum<'tcx, Rvalue> { + let fn_ptr_ty = match self.ty.sty { + ty::TyFnDef(_, _, f) => ccx.tcx().mk_ty(ty::TyFnPtr(f)), + _ => self.ty + }; match self.data { Fn(llfn) => { - let fn_ptr_ty = match self.ty.sty { - ty::TyFnDef(_, _, f) => ccx.tcx().mk_ty(ty::TyFnPtr(f)), - _ => self.ty - }; immediate_rvalue(llfn, fn_ptr_ty) } - Virtual(idx) => meth::trans_object_shim(ccx, self.ty, idx), + Virtual(idx) => { + let llfn = meth::trans_object_shim(ccx, self.ty, idx); + immediate_rvalue(llfn, fn_ptr_ty) + } NamedTupleConstructor(_) => match self.ty.sty { ty::TyFnDef(def_id, substs, _) => { - return trans_fn_ref_with_substs(ccx, def_id, Some(self.ty), substs); + return get_fn(ccx, def_id, substs); } _ => unreachable!("expected fn item type, found {}", self.ty) }, - Intrinsic(..) => unreachable!("intrinsic {} getting reified", self.ty) + Intrinsic => unreachable!("intrinsic {} getting reified", self.ty) } } } -/// Translates a reference (with id `ref_id`) to the fn/method with id `def_id` into a function -/// pointer. This may require monomorphization or inlining. -pub fn trans_fn_ref<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - def_id: DefId, - node: ExprOrMethodCall, - param_substs: &'tcx subst::Substs<'tcx>) - -> Datum<'tcx, Rvalue> { - let _icx = push_ctxt("trans_fn_ref"); - - let substs = common::node_id_substs(ccx, node, param_substs); - debug!("trans_fn_ref(def_id={:?}, node={:?}, substs={:?})", - def_id, - node, - substs); - let ref_ty = match node { - ExprId(0) => return trans_fn_ref_with_substs(ccx, def_id, None, substs), - ExprId(id) => ccx.tcx().node_id_to_type(id), - MethodCallKey(method_call) => { - ccx.tcx().tables.borrow().method_map[&method_call].ty - } - }; - let ref_ty = monomorphize::apply_param_substs(ccx.tcx(), - param_substs, - &ref_ty); - trans_fn_ref_with_substs(ccx, def_id, Some(ref_ty), substs) +/// Given a DefId and some Substs, produces the monomorphic item type. +fn def_ty<'tcx>(tcx: &TyCtxt<'tcx>, + def_id: DefId, + substs: &'tcx subst::Substs<'tcx>) + -> Ty<'tcx> { + let ty = tcx.lookup_item_type(def_id).ty; + monomorphize::apply_param_substs(tcx, substs, &ty) } /// Translates an adapter that implements the `Fn` trait for a fn @@ -251,6 +314,21 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>( ty::ClosureKind::Fn | ty::ClosureKind::FnMut => true, ty::ClosureKind::FnOnce => false, }; + + let llfnpointer = match bare_fn_ty.sty { + ty::TyFnDef(def_id, substs, _) => { + // Function definitions have to be turned into a pointer. + let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val; + if !is_by_ref { + // A by-value fn item is ignored, so the shim has + // the same signature as the original function. + return llfn; + } + Some(llfn) + } + _ => None + }; + let bare_fn_ty_maybe_ref = if is_by_ref { tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), bare_fn_ty) } else { @@ -285,58 +363,48 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>( let sig = tcx.erase_late_bound_regions(sig); let sig = infer::normalize_associated_type(ccx.tcx(), &sig); let tuple_input_ty = tcx.mk_tup(sig.inputs.to_vec()); + let sig = ty::FnSig { + inputs: vec![bare_fn_ty_maybe_ref, + tuple_input_ty], + output: sig.output, + variadic: false + }; + let fn_ty = FnType::new(ccx, Abi::RustCall, &sig, &[]); let tuple_fn_ty = tcx.mk_fn_ptr(ty::BareFnTy { unsafety: hir::Unsafety::Normal, abi: Abi::RustCall, - sig: ty::Binder(ty::FnSig { - inputs: vec![bare_fn_ty_maybe_ref, - tuple_input_ty], - output: sig.output, - variadic: false - }) + sig: ty::Binder(sig) }); debug!("tuple_fn_ty: {:?}", tuple_fn_ty); // let function_name = link::mangle_internal_name_by_type_and_seq(ccx, bare_fn_ty, "fn_pointer_shim"); - let llfn = declare::declare_internal_rust_fn(ccx, &function_name[..], tuple_fn_ty); + let llfn = declare::define_internal_fn(ccx, &function_name, tuple_fn_ty); // let empty_substs = tcx.mk_substs(Substs::trans_empty()); let (block_arena, fcx): (TypedArena<_>, FunctionContext); block_arena = TypedArena::new(); - fcx = new_fn_ctxt(ccx, - llfn, - ast::DUMMY_NODE_ID, - false, - sig.output, - empty_substs, - None, - &block_arena); - let mut bcx = init_function(&fcx, false, sig.output); + fcx = FunctionContext::new(ccx, llfn, fn_ty, None, empty_substs, &block_arena); + let mut bcx = fcx.init(false, None); let llargs = get_params(fcx.llfn); - let self_idx = fcx.arg_offset(); - let llfnpointer = match bare_fn_ty.sty { - ty::TyFnDef(def_id, substs, _) => { - // Function definitions have to be turned into a pointer. - Callee::def(ccx, def_id, substs, bare_fn_ty).reify(ccx).val - } - + let self_idx = fcx.fn_ty.ret.is_indirect() as usize; + let llfnpointer = llfnpointer.unwrap_or_else(|| { // the first argument (`self`) will be ptr to the fn pointer - _ => if is_by_ref { + if is_by_ref { Load(bcx, llargs[self_idx]) } else { llargs[self_idx] } - }; + }); assert!(!fcx.needs_ret_allocas); let dest = fcx.llretslotptr.get().map(|_| - expr::SaveIn(fcx.get_ret_slot(bcx, sig.output, "ret_slot")) + expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot")) ); let callee = Callee { @@ -345,7 +413,7 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>( }; bcx = callee.call(bcx, DebugLoc::None, ArgVals(&llargs[(self_idx + 1)..]), dest).bcx; - finish_fn(&fcx, bcx, sig.output, DebugLoc::None); + fcx.finish(bcx, DebugLoc::None); ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn); @@ -359,25 +427,14 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>( /// /// - `ccx`: the crate context /// - `def_id`: def id of the fn or method item being referenced -/// - `node`: node id of the reference to the fn/method, if applicable. -/// This parameter may be zero; but, if so, the resulting value may not -/// have the right type, so it must be cast before being used. -/// - `ref_ty`: monotype of the reference to the fn/method, if applicable. -/// This parameter may be None; but, if so, the resulting value may not -/// have the right type, so it must be cast before being used. /// - `substs`: values for each of the fn/method's parameters -pub fn trans_fn_ref_with_substs<'a, 'tcx>( - ccx: &CrateContext<'a, 'tcx>, - def_id: DefId, - ref_ty: Option>, - substs: &'tcx subst::Substs<'tcx>) - -> Datum<'tcx, Rvalue> -{ - let _icx = push_ctxt("trans_fn_ref_with_substs"); +fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + def_id: DefId, + substs: &'tcx subst::Substs<'tcx>) + -> Datum<'tcx, Rvalue> { let tcx = ccx.tcx(); - debug!("trans_fn_ref_with_substs(def_id={:?}, ref_ty={:?}, substs={:?})", - def_id, ref_ty, substs); + debug!("get_fn(def_id={:?}, substs={:?})", def_id, substs); assert!(!substs.types.needs_infer()); assert!(!substs.types.has_escaping_regions()); @@ -407,7 +464,7 @@ pub fn trans_fn_ref_with_substs<'a, 'tcx>( let must_monomorphise = !substs.types.is_empty() || is_named_tuple_constructor(tcx, def_id); - debug!("trans_fn_ref_with_substs({:?}) must_monomorphise: {}", + debug!("get_fn({:?}) must_monomorphise: {}", def_id, must_monomorphise); // Create a monomorphic version of generic functions @@ -416,9 +473,7 @@ pub fn trans_fn_ref_with_substs<'a, 'tcx>( assert_eq!(def_id.krate, LOCAL_CRATE); let substs = tcx.mk_substs(substs.clone().erase_regions()); - let (mut val, fn_ty, must_cast) = - monomorphize::monomorphic_fn(ccx, def_id, substs); - let fn_ty = ref_ty.unwrap_or(fn_ty); + let (val, fn_ty) = monomorphize::monomorphic_fn(ccx, def_id, substs); let fn_ptr_ty = match fn_ty.sty { ty::TyFnDef(_, _, fty) => { // Create a fn pointer with the substituted signature. @@ -426,36 +481,72 @@ pub fn trans_fn_ref_with_substs<'a, 'tcx>( } _ => unreachable!("expected fn item type, found {}", fn_ty) }; - if must_cast && ref_ty.is_some() { - let llptrty = type_of::type_of(ccx, fn_ptr_ty); - if llptrty != common::val_ty(val) { - val = consts::ptrcast(val, llptrty); - } - } + assert_eq!(type_of::type_of(ccx, fn_ptr_ty), common::val_ty(val)); return immediate_rvalue(val, fn_ptr_ty); } // Find the actual function pointer. - let local_node = ccx.tcx().map.as_local_node_id(def_id); - let mut datum = if let Some(node_id) = local_node { - // Type scheme of the function item (may have type params) - let fn_type_scheme = tcx.lookup_item_type(def_id); - let fn_type = match fn_type_scheme.ty.sty { - ty::TyFnDef(_, _, fty) => { - // Create a fn pointer with the normalized signature. - tcx.mk_fn_ptr(infer::normalize_associated_type(tcx, fty)) + let ty = ccx.tcx().lookup_item_type(def_id).ty; + let fn_ptr_ty = match ty.sty { + ty::TyFnDef(_, _, fty) => { + // Create a fn pointer with the normalized signature. + tcx.mk_fn_ptr(infer::normalize_associated_type(tcx, fty)) + } + _ => unreachable!("expected fn item type, found {}", ty) + }; + + let instance = Instance::mono(ccx.tcx(), def_id); + if let Some(&llfn) = ccx.instances().borrow().get(&instance) { + return immediate_rvalue(llfn, fn_ptr_ty); + } + + let attrs; + let local_id = ccx.tcx().map.as_local_node_id(def_id); + let maybe_node = local_id.and_then(|id| tcx.map.find(id)); + let (sym, attrs, local_item) = match maybe_node { + Some(hir_map::NodeItem(&hir::Item { + ref attrs, id, span, node: hir::ItemFn(..), .. + })) | + Some(hir_map::NodeTraitItem(&hir::TraitItem { + ref attrs, id, span, node: hir::MethodTraitItem(_, Some(_)), .. + })) | + Some(hir_map::NodeImplItem(&hir::ImplItem { + ref attrs, id, span, node: hir::ImplItemKind::Method(..), .. + })) => { + let sym = exported_name(ccx, id, ty, attrs); + + if declare::get_defined_value(ccx, &sym).is_some() { + ccx.sess().span_fatal(span, + &format!("symbol `{}` is already defined", sym)); } - _ => unreachable!("expected fn item type, found {}", - fn_type_scheme.ty) - }; - // Internal reference. - immediate_rvalue(get_item_val(ccx, node_id), fn_type) - } else { - // External reference. - get_extern_fn(ccx, def_id) + (sym, &attrs[..], Some(id)) + } + + Some(hir_map::NodeForeignItem(&hir::ForeignItem { + ref attrs, name, node: hir::ForeignItemFn(..), .. + })) => { + (imported_name(name, attrs).to_string(), &attrs[..], None) + } + + None => { + attrs = ccx.sess().cstore.item_attrs(def_id); + (ccx.sess().cstore.item_symbol(def_id), &attrs[..], None) + } + + ref variant => { + ccx.sess().bug(&format!("get_fn: unexpected variant: {:?}", variant)) + } }; + let llfn = declare::declare_fn(ccx, &sym, ty); + attributes::from_fn_attrs(ccx, attrs, llfn); + if let Some(id) = local_item { + // FIXME(eddyb) Doubt all extern fn should allow unwinding. + attributes::unwind(llfn, true); + ccx.item_symbols().borrow_mut().insert(id, sym); + } + // This is subtle and surprising, but sometimes we have to bitcast // the resulting fn pointer. The reason has to do with external // functions. If you have two crates that both bind the same C @@ -479,30 +570,23 @@ pub fn trans_fn_ref_with_substs<'a, 'tcx>( // This can occur on either a crate-local or crate-external // reference. It also occurs when testing libcore and in some // other weird situations. Annoying. - let llptrty = type_of::type_of(ccx, datum.ty); - if common::val_ty(datum.val) != llptrty { - debug!("trans_fn_ref_with_substs(): casting pointer!"); - datum.val = consts::ptrcast(datum.val, llptrty); + let llptrty = type_of::type_of(ccx, fn_ptr_ty); + let llfn = if common::val_ty(llfn) != llptrty { + debug!("get_fn: casting {:?} to {:?}", llfn, llptrty); + consts::ptrcast(llfn, llptrty) } else { - debug!("trans_fn_ref_with_substs(): not casting pointer!"); - } + debug!("get_fn: not casting pointer!"); + llfn + }; + + ccx.instances().borrow_mut().insert(instance, llfn); - datum + immediate_rvalue(llfn, fn_ptr_ty) } // ______________________________________________________________________ // Translating calls -pub fn trans_lang_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - did: DefId, - args: &[ValueRef], - dest: Option, - debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { - let datum = trans_fn_ref(bcx.ccx(), did, ExprId(0), bcx.fcx.param_substs); - Callee::ptr(datum).call(bcx, debug_loc, ArgVals(args), dest) -} - fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, debug_loc: DebugLoc, callee: Callee<'tcx>, @@ -519,33 +603,29 @@ fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let fcx = bcx.fcx; let ccx = fcx.ccx; - let (abi, ret_ty) = match callee.ty.sty { - ty::TyFnDef(_, _, ref f) | ty::TyFnPtr(ref f) => { - let sig = bcx.tcx().erase_late_bound_regions(&f.sig); - let sig = infer::normalize_associated_type(bcx.tcx(), &sig); - (f.abi, sig.output) + let abi = callee.ty.fn_abi(); + let sig = callee.ty.fn_sig(); + let output = bcx.tcx().erase_late_bound_regions(&sig.output()); + let output = infer::normalize_associated_type(bcx.tcx(), &output); + + let extra_args = match args { + ArgExprs(args) if abi != Abi::RustCall => { + args[sig.0.inputs.len()..].iter().map(|expr| { + common::expr_ty_adjusted(bcx, expr) + }).collect() } - _ => panic!("expected fn item or ptr in Callee::call") + _ => vec![] }; + let fn_ty = callee.direct_fn_type(ccx, &extra_args); - match callee.data { - Intrinsic(node, substs) => { + let mut callee = match callee.data { + Intrinsic => { assert!(abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic); assert!(dest.is_some()); - let call_info = match debug_loc { - DebugLoc::At(id, span) => NodeIdAndSpan { id: id, span: span }, - DebugLoc::None => { - bcx.sess().bug("No call info for intrinsic call?") - } - }; - - let arg_cleanup_scope = fcx.push_custom_cleanup_scope(); - return intrinsic::trans_intrinsic_call(bcx, node, callee.ty, - arg_cleanup_scope, args, - dest.unwrap(), - substs, - call_info); + return intrinsic::trans_intrinsic_call(bcx, callee.ty, &fn_ty, + args, dest.unwrap(), + debug_loc); } NamedTupleConstructor(disr) => { assert!(dest.is_some()); @@ -557,34 +637,25 @@ fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, dest.unwrap(), debug_loc); } - _ => {} - } - - // Intrinsics should not become actual functions. - // We trans them in place in `trans_intrinsic_call` - assert!(abi != Abi::RustIntrinsic && abi != Abi::PlatformIntrinsic); - - let is_rust_fn = abi == Abi::Rust || abi == Abi::RustCall; + f => f + }; // Generate a location to store the result. If the user does // not care about the result, just make a stack slot. let opt_llretslot = dest.and_then(|dest| match dest { expr::SaveIn(dst) => Some(dst), expr::Ignore => { - let ret_ty = match ret_ty { - ty::FnConverging(ret_ty) => ret_ty, - ty::FnDiverging => ccx.tcx().mk_nil() + let needs_drop = || match output { + ty::FnConverging(ret_ty) => bcx.fcx.type_needs_drop(ret_ty), + ty::FnDiverging => false }; - if !is_rust_fn || - type_of::return_uses_outptr(ccx, ret_ty) || - bcx.fcx.type_needs_drop(ret_ty) { + if fn_ty.ret.is_indirect() || fn_ty.ret.cast.is_some() || needs_drop() { // Push the out-pointer if we use an out-pointer for this // return type, otherwise push "undef". - if common::type_is_zero_size(ccx, ret_ty) { - let llty = type_of::type_of(ccx, ret_ty); - Some(common::C_undef(llty.ptr_to())) + if fn_ty.ret.is_ignore() { + Some(C_undef(fn_ty.ret.original_ty.ptr_to())) } else { - let llresult = alloc_ty(bcx, ret_ty, "__llret"); + let llresult = alloca(bcx, fn_ty.ret.original_ty, "__llret"); call_lifetime_start(bcx, llresult); Some(llresult) } @@ -594,128 +665,98 @@ fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } }); - let mut llresult = unsafe { - llvm::LLVMGetUndef(Type::nil(ccx).ptr_to().to_ref()) - }; + // If there no destination, return must be direct, with no cast. + if opt_llretslot.is_none() { + assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none()); + } - let arg_cleanup_scope = fcx.push_custom_cleanup_scope(); + let mut llargs = Vec::new(); - // The code below invokes the function, using either the Rust - // conventions (if it is a rust fn) or the native conventions - // (otherwise). The important part is that, when all is said - // and done, either the return value of the function will have been - // written in opt_llretslot (if it is Some) or `llresult` will be - // set appropriately (otherwise). - if is_rust_fn { - let mut llargs = Vec::new(); - - if let (ty::FnConverging(ret_ty), Some(mut llretslot)) = (ret_ty, opt_llretslot) { - if type_of::return_uses_outptr(ccx, ret_ty) { - let llformal_ret_ty = type_of::type_of(ccx, ret_ty).ptr_to(); - let llret_ty = common::val_ty(llretslot); - if llformal_ret_ty != llret_ty { - // this could happen due to e.g. subtyping - debug!("casting actual return type ({}) to match formal ({})", - bcx.llty_str(llret_ty), bcx.llty_str(llformal_ret_ty)); - llretslot = PointerCast(bcx, llretslot, llformal_ret_ty); - } - llargs.push(llretslot); - } + if fn_ty.ret.is_indirect() { + let mut llretslot = opt_llretslot.unwrap(); + if let Some(ty) = fn_ty.ret.cast { + llretslot = PointerCast(bcx, llretslot, ty.ptr_to()); } + llargs.push(llretslot); + } - let arg_start = llargs.len(); - - // Push the arguments. - bcx = trans_args(bcx, - args, - callee.ty, - &mut llargs, - cleanup::CustomScope(arg_cleanup_scope), - abi); + let arg_cleanup_scope = fcx.push_custom_cleanup_scope(); + bcx = trans_args(bcx, abi, &fn_ty, &mut callee, args, &mut llargs, + cleanup::CustomScope(arg_cleanup_scope)); + fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); - fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); + let llfn = match callee { + Fn(f) => f, + _ => unreachable!("expected fn pointer callee, found {:?}", callee) + }; - let datum = match callee.data { - Fn(f) => immediate_rvalue(f, callee.ty), - Virtual(idx) => { - // The data and vtable pointers were split by trans_arg_datum. - let vtable = llargs.remove(arg_start + 1); - meth::get_virtual_method(bcx, vtable, idx, callee.ty) - } - _ => unreachable!() - }; + let (llret, mut bcx) = base::invoke(bcx, llfn, &llargs, debug_loc); + if !bcx.unreachable.get() { + fn_ty.apply_attrs_callsite(llret); + } - // Invoke the actual rust fn and update bcx/llresult. - let (llret, b) = base::invoke(bcx, - datum.val, - &llargs[..], - datum.ty, - debug_loc); - bcx = b; - llresult = llret; - - // If the Rust convention for this type is return via - // the return value, copy it into llretslot. - match (opt_llretslot, ret_ty) { - (Some(llretslot), ty::FnConverging(ret_ty)) => { - if !type_of::return_uses_outptr(bcx.ccx(), ret_ty) && - !common::type_is_zero_size(bcx.ccx(), ret_ty) - { - store_ty(bcx, llret, llretslot, ret_ty) - } + // If the function we just called does not use an outpointer, + // store the result into the rust outpointer. Cast the outpointer + // type to match because some ABIs will use a different type than + // the Rust type. e.g., a {u32,u32} struct could be returned as + // u64. + if !fn_ty.ret.is_ignore() && !fn_ty.ret.is_indirect() { + if let Some(llforeign_ret_ty) = fn_ty.ret.cast { + let llrust_ret_ty = fn_ty.ret.original_ty; + let llretslot = opt_llretslot.unwrap(); + + // The actual return type is a struct, but the ABI + // adaptation code has cast it into some scalar type. The + // code that follows is the only reliable way I have + // found to do a transform like i64 -> {i32,i32}. + // Basically we dump the data onto the stack then memcpy it. + // + // Other approaches I tried: + // - Casting rust ret pointer to the foreign type and using Store + // is (a) unsafe if size of foreign type > size of rust type and + // (b) runs afoul of strict aliasing rules, yielding invalid + // assembly under -O (specifically, the store gets removed). + // - Truncating foreign type to correct integral type and then + // bitcasting to the struct type yields invalid cast errors. + let llscratch = base::alloca(bcx, llforeign_ret_ty, "__cast"); + base::call_lifetime_start(bcx, llscratch); + Store(bcx, llret, llscratch); + let llscratch_i8 = PointerCast(bcx, llscratch, Type::i8(ccx).ptr_to()); + let llretptr_i8 = PointerCast(bcx, llretslot, Type::i8(ccx).ptr_to()); + let llrust_size = llsize_of_store(ccx, llrust_ret_ty); + let llforeign_align = llalign_of_min(ccx, llforeign_ret_ty); + let llrust_align = llalign_of_min(ccx, llrust_ret_ty); + let llalign = cmp::min(llforeign_align, llrust_align); + debug!("llrust_size={}", llrust_size); + + if !bcx.unreachable.get() { + base::call_memcpy(&B(bcx), llretptr_i8, llscratch_i8, + C_uint(ccx, llrust_size), llalign as u32); } - (_, _) => {} + base::call_lifetime_end(bcx, llscratch); + } else if let Some(llretslot) = opt_llretslot { + base::store_ty(bcx, llret, llretslot, output.unwrap()); } - } else { - // Lang items are the only case where dest is None, and - // they are always Rust fns. - assert!(dest.is_some()); - - let mut llargs = Vec::new(); - let (llfn, arg_tys) = match (callee.data, &args) { - (Fn(f), &ArgExprs(a)) => { - (f, a.iter().map(|x| common::expr_ty_adjusted(bcx, &x)).collect()) - } - _ => panic!("expected fn ptr and arg exprs.") - }; - bcx = trans_args(bcx, - args, - callee.ty, - &mut llargs, - cleanup::CustomScope(arg_cleanup_scope), - abi); - fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); - - bcx = foreign::trans_native_call(bcx, - callee.ty, - llfn, - opt_llretslot.unwrap(), - &llargs[..], - arg_tys, - debug_loc); } fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_cleanup_scope); // If the caller doesn't care about the result of this fn call, // drop the temporary slot we made. - match (dest, opt_llretslot, ret_ty) { + match (dest, opt_llretslot, output) { (Some(expr::Ignore), Some(llretslot), ty::FnConverging(ret_ty)) => { // drop the value if it is not being saved. - bcx = glue::drop_ty(bcx, - llretslot, - ret_ty, - debug_loc); + bcx = glue::drop_ty(bcx, llretslot, ret_ty, debug_loc); call_lifetime_end(bcx, llretslot); } _ => {} } - if ret_ty == ty::FnDiverging { + if output == ty::FnDiverging { Unreachable(bcx); } - Result::new(bcx, llresult) + Result::new(bcx, llret) } pub enum CallArgs<'a, 'tcx> { @@ -741,20 +782,19 @@ pub enum CallArgs<'a, 'tcx> { fn trans_args_under_call_abi<'blk, 'tcx>( mut bcx: Block<'blk, 'tcx>, arg_exprs: &[P], - fn_ty: Ty<'tcx>, + callee: &mut CalleeData, + fn_ty: &FnType, llargs: &mut Vec, arg_cleanup_scope: cleanup::ScopeId) -> Block<'blk, 'tcx> { - let sig = bcx.tcx().erase_late_bound_regions(&fn_ty.fn_sig()); - let sig = infer::normalize_associated_type(bcx.tcx(), &sig); - let args = sig.inputs; + let mut arg_idx = 0; // Translate the `self` argument first. let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0])); bcx = trans_arg_datum(bcx, - args[0], arg_datum, + callee, fn_ty, &mut arg_idx, arg_cleanup_scope, llargs); @@ -781,8 +821,8 @@ fn trans_args_under_call_abi<'blk, 'tcx>( adt::trans_field_ptr(bcx, repr_ptr, srcval, Disr(0), i) }).to_expr_datum(); bcx = trans_arg_datum(bcx, - field_type, arg_datum, + callee, fn_ty, &mut arg_idx, arg_cleanup_scope, llargs); } @@ -796,64 +836,20 @@ fn trans_args_under_call_abi<'blk, 'tcx>( bcx } -fn trans_overloaded_call_args<'blk, 'tcx>( - mut bcx: Block<'blk, 'tcx>, - arg_exprs: Vec<&hir::Expr>, - fn_ty: Ty<'tcx>, - llargs: &mut Vec, - arg_cleanup_scope: cleanup::ScopeId) - -> Block<'blk, 'tcx> { - // Translate the `self` argument first. - let sig = bcx.tcx().erase_late_bound_regions(&fn_ty.fn_sig()); - let sig = infer::normalize_associated_type(bcx.tcx(), &sig); - let arg_tys = sig.inputs; - - let arg_datum = unpack_datum!(bcx, expr::trans(bcx, arg_exprs[0])); - bcx = trans_arg_datum(bcx, - arg_tys[0], - arg_datum, - arg_cleanup_scope, - llargs); - - // Now untuple the rest of the arguments. - let tuple_type = arg_tys[1]; - match tuple_type.sty { - ty::TyTuple(ref field_types) => { - for (i, &field_type) in field_types.iter().enumerate() { - let arg_datum = - unpack_datum!(bcx, expr::trans(bcx, arg_exprs[i + 1])); - bcx = trans_arg_datum(bcx, - field_type, - arg_datum, - arg_cleanup_scope, - llargs); - } - } - _ => { - bcx.sess().span_bug(arg_exprs[0].span, - "argument to `.call()` wasn't a tuple?!") - } - }; - - bcx -} - -pub fn trans_args<'a, 'blk, 'tcx>(cx: Block<'blk, 'tcx>, +pub fn trans_args<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + abi: Abi, + fn_ty: &FnType, + callee: &mut CalleeData, args: CallArgs<'a, 'tcx>, - fn_ty: Ty<'tcx>, llargs: &mut Vec, - arg_cleanup_scope: cleanup::ScopeId, - abi: Abi) + arg_cleanup_scope: cleanup::ScopeId) -> Block<'blk, 'tcx> { debug!("trans_args(abi={})", abi); let _icx = push_ctxt("trans_args"); - let sig = cx.tcx().erase_late_bound_regions(&fn_ty.fn_sig()); - let sig = infer::normalize_associated_type(cx.tcx(), &sig); - let arg_tys = sig.inputs; - let variadic = sig.variadic; - let mut bcx = cx; + let mut bcx = bcx; + let mut arg_idx = 0; // First we figure out the caller's view of the types of the arguments. // This will be needed if this is a generic call, because the callee has @@ -863,79 +859,90 @@ pub fn trans_args<'a, 'blk, 'tcx>(cx: Block<'blk, 'tcx>, if abi == Abi::RustCall { // This is only used for direct calls to the `call`, // `call_mut` or `call_once` functions. - return trans_args_under_call_abi(cx, - arg_exprs, - fn_ty, + return trans_args_under_call_abi(bcx, + arg_exprs, callee, fn_ty, llargs, arg_cleanup_scope) } - let num_formal_args = arg_tys.len(); - for (i, arg_expr) in arg_exprs.iter().enumerate() { - let arg_ty = if i >= num_formal_args { - assert!(variadic); - common::expr_ty_adjusted(cx, &arg_expr) - } else { - arg_tys[i] - }; - + for arg_expr in arg_exprs { let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &arg_expr)); - bcx = trans_arg_datum(bcx, arg_ty, arg_datum, + bcx = trans_arg_datum(bcx, + arg_datum, + callee, fn_ty, &mut arg_idx, arg_cleanup_scope, llargs); } } ArgOverloadedCall(arg_exprs) => { - return trans_overloaded_call_args(cx, - arg_exprs, - fn_ty, - llargs, - arg_cleanup_scope) + for expr in arg_exprs { + let arg_datum = + unpack_datum!(bcx, expr::trans(bcx, expr)); + bcx = trans_arg_datum(bcx, + arg_datum, + callee, fn_ty, &mut arg_idx, + arg_cleanup_scope, + llargs); + } } ArgOverloadedOp(lhs, rhs) => { - assert!(!variadic); - - bcx = trans_arg_datum(bcx, arg_tys[0], lhs, + bcx = trans_arg_datum(bcx, lhs, + callee, fn_ty, &mut arg_idx, arg_cleanup_scope, llargs); if let Some(rhs) = rhs { - assert_eq!(arg_tys.len(), 2); - bcx = trans_arg_datum(bcx, arg_tys[1], rhs, + bcx = trans_arg_datum(bcx, rhs, + callee, fn_ty, &mut arg_idx, arg_cleanup_scope, llargs); - } else { - assert_eq!(arg_tys.len(), 1); } } ArgVals(vs) => { - llargs.extend_from_slice(vs); + match *callee { + Virtual(idx) => { + llargs.push(vs[0]); + + let fn_ptr = meth::get_virtual_method(bcx, vs[1], idx); + let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); + *callee = Fn(PointerCast(bcx, fn_ptr, llty)); + llargs.extend_from_slice(&vs[2..]); + } + _ => llargs.extend_from_slice(vs) + } } } bcx } -pub fn trans_arg_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - formal_arg_ty: Ty<'tcx>, - arg_datum: Datum<'tcx, Expr>, - arg_cleanup_scope: cleanup::ScopeId, - llargs: &mut Vec) - -> Block<'blk, 'tcx> { +fn trans_arg_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + arg_datum: Datum<'tcx, Expr>, + callee: &mut CalleeData, + fn_ty: &FnType, + next_idx: &mut usize, + arg_cleanup_scope: cleanup::ScopeId, + llargs: &mut Vec) + -> Block<'blk, 'tcx> { let _icx = push_ctxt("trans_arg_datum"); let mut bcx = bcx; - let ccx = bcx.ccx(); - debug!("trans_arg_datum({:?})", - formal_arg_ty); + debug!("trans_arg_datum({:?})", arg_datum); - let arg_datum_ty = arg_datum.ty; + let arg = &fn_ty.args[*next_idx]; + *next_idx += 1; + + // Fill padding with undef value, where applicable. + if let Some(ty) = arg.pad { + llargs.push(C_undef(ty)); + } - debug!(" arg datum: {}", arg_datum.to_string(bcx.ccx())); + // Determine whether we want a by-ref datum even if not appropriate. + let want_by_ref = arg.is_indirect() || arg.cast.is_some(); - let mut val = if common::type_is_fat_ptr(bcx.tcx(), arg_datum_ty) && - !bcx.fcx.type_needs_drop(arg_datum_ty) { - arg_datum.val + let fat_ptr = common::type_is_fat_ptr(bcx.tcx(), arg_datum.ty); + let (by_ref, val) = if fat_ptr && !bcx.fcx.type_needs_drop(arg_datum.ty) { + (true, arg_datum.val) } else { // Make this an rvalue, since we are going to be // passing ownership. @@ -944,33 +951,70 @@ pub fn trans_arg_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // Now that arg_datum is owned, get it into the appropriate // mode (ref vs value). - let arg_datum = unpack_datum!( - bcx, arg_datum.to_appropriate_datum(bcx)); + let arg_datum = unpack_datum!(bcx, if want_by_ref { + arg_datum.to_ref_datum(bcx) + } else { + arg_datum.to_appropriate_datum(bcx) + }); // Technically, ownership of val passes to the callee. // However, we must cleanup should we panic before the // callee is actually invoked. - arg_datum.add_clean(bcx.fcx, arg_cleanup_scope) + (arg_datum.kind.is_by_ref(), + arg_datum.add_clean(bcx.fcx, arg_cleanup_scope)) }; - if type_of::arg_is_indirect(ccx, formal_arg_ty) && formal_arg_ty != arg_datum_ty { - // this could happen due to e.g. subtyping - let llformal_arg_ty = type_of::type_of_explicit_arg(ccx, formal_arg_ty); - debug!("casting actual type ({}) to match formal ({})", - bcx.val_to_string(val), bcx.llty_str(llformal_arg_ty)); - debug!("Rust types: {:?}; {:?}", arg_datum_ty, - formal_arg_ty); - val = PointerCast(bcx, val, llformal_arg_ty); + if arg.is_ignore() { + return bcx; } - debug!("--- trans_arg_datum passing {}", bcx.val_to_string(val)); + debug!("--- trans_arg_datum passing {:?}", Value(val)); - if common::type_is_fat_ptr(bcx.tcx(), formal_arg_ty) { + if fat_ptr { + // Fat pointers should be passed without any transformations. + assert!(!arg.is_indirect() && arg.cast.is_none()); llargs.push(Load(bcx, expr::get_dataptr(bcx, val))); - llargs.push(Load(bcx, expr::get_meta(bcx, val))); - } else { - llargs.push(val); + + let info_arg = &fn_ty.args[*next_idx]; + *next_idx += 1; + assert!(!info_arg.is_indirect() && info_arg.cast.is_none()); + let info = Load(bcx, expr::get_meta(bcx, val)); + + if let Virtual(idx) = *callee { + // We have to grab the fn pointer from the vtable when + // handling the first argument, ensure that here. + assert_eq!(*next_idx, 2); + assert!(info_arg.is_ignore()); + let fn_ptr = meth::get_virtual_method(bcx, info, idx); + let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); + *callee = Fn(PointerCast(bcx, fn_ptr, llty)); + } else { + assert!(!info_arg.is_ignore()); + llargs.push(info); + } + return bcx; + } + + let mut val = val; + if by_ref && !arg.is_indirect() { + // Have to load the argument, maybe while casting it. + if arg.original_ty == Type::i1(bcx.ccx()) { + // We store bools as i8 so we need to truncate to i1. + val = LoadRangeAssert(bcx, val, 0, 2, llvm::False); + val = Trunc(bcx, val, arg.original_ty); + } else if let Some(ty) = arg.cast { + val = Load(bcx, PointerCast(bcx, val, ty.ptr_to())); + if !bcx.unreachable.get() { + let llalign = llalign_of_min(bcx.ccx(), arg.ty); + unsafe { + llvm::LLVMSetAlignment(val, llalign); + } + } + } else { + val = Load(bcx, val); + } } + llargs.push(val); bcx } diff --git a/src/librustc_trans/trans/cleanup.rs b/src/librustc_trans/trans/cleanup.rs index 416d951e4b5c5..89a126e79321b 100644 --- a/src/librustc_trans/trans/cleanup.rs +++ b/src/librustc_trans/trans/cleanup.rs @@ -129,7 +129,9 @@ use trans::debuginfo::{DebugLoc, ToDebugLoc}; use trans::glue; use middle::region; use trans::type_::Type; +use trans::value::Value; use middle::ty::{Ty, TyCtxt}; + use std::fmt; use syntax::ast; @@ -401,9 +403,8 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { ptr: val, }; - debug!("schedule_lifetime_end({:?}, val={})", - cleanup_scope, - self.ccx.tn().val_to_string(val)); + debug!("schedule_lifetime_end({:?}, val={:?})", + cleanup_scope, Value(val)); self.schedule_clean(cleanup_scope, drop as CleanupObj); } @@ -426,9 +427,9 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { drop_hint: drop_hint, }; - debug!("schedule_drop_mem({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}", + debug!("schedule_drop_mem({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}", cleanup_scope, - self.ccx.tn().val_to_string(val), + Value(val), ty, drop.fill_on_drop, drop.skip_dtor); @@ -454,10 +455,10 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { drop_hint: drop_hint, }; - debug!("schedule_drop_and_fill_mem({:?}, val={}, ty={:?}, + debug!("schedule_drop_and_fill_mem({:?}, val={:?}, ty={:?}, fill_on_drop={}, skip_dtor={}, has_drop_hint={})", cleanup_scope, - self.ccx.tn().val_to_string(val), + Value(val), ty, drop.fill_on_drop, drop.skip_dtor, @@ -488,9 +489,9 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { drop_hint: None, }; - debug!("schedule_drop_adt_contents({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}", + debug!("schedule_drop_adt_contents({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}", cleanup_scope, - self.ccx.tn().val_to_string(val), + Value(val), ty, drop.fill_on_drop, drop.skip_dtor); @@ -514,9 +515,9 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { drop_hint: None, }); - debug!("schedule_drop_immediate({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}", + debug!("schedule_drop_immediate({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}", cleanup_scope, - self.ccx.tn().val_to_string(val), + Value(val), ty, drop.fill_on_drop, drop.skip_dtor); @@ -532,10 +533,8 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { content_ty: Ty<'tcx>) { let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty }; - debug!("schedule_free_value({:?}, val={}, heap={:?})", - cleanup_scope, - self.ccx.tn().val_to_string(val), - heap); + debug!("schedule_free_value({:?}, val={:?}, heap={:?})", + cleanup_scope, Value(val), heap); self.schedule_clean(cleanup_scope, drop as CleanupObj); } diff --git a/src/librustc_trans/trans/closure.rs b/src/librustc_trans/trans/closure.rs index 11c03fe7a7dc7..2036feb31a25b 100644 --- a/src/librustc_trans/trans/closure.rs +++ b/src/librustc_trans/trans/closure.rs @@ -10,10 +10,11 @@ use arena::TypedArena; use back::link::{self, mangle_internal_name_by_path_and_seq}; -use llvm::{ValueRef, get_params}; +use llvm::{ValueRef, get_param, get_params}; use middle::def_id::DefId; use middle::infer; use middle::traits::ProjectionMode; +use trans::abi::{Abi, FnType}; use trans::adt; use trans::attributes; use trans::base::*; @@ -21,44 +22,41 @@ use trans::build::*; use trans::callee::{self, ArgVals, Callee}; use trans::cleanup::{CleanupMethods, CustomScope, ScopeId}; use trans::common::*; -use trans::datum::{self, Datum, rvalue_scratch_datum, Rvalue}; +use trans::datum::{ByRef, Datum, lvalue_scratch_datum}; +use trans::datum::{rvalue_scratch_datum, Rvalue}; use trans::debuginfo::{self, DebugLoc}; use trans::declare; use trans::expr; -use trans::monomorphize::{MonoId}; -use trans::type_of::*; +use trans::monomorphize::{Instance}; +use trans::value::Value; use trans::Disr; -use middle::ty; +use middle::ty::{self, Ty, TyCtxt}; use session::config::FullDebugInfo; -use syntax::abi::Abi::RustCall; use syntax::ast; -use syntax::attr::{ThinAttributes, ThinAttributesExt}; use rustc_front::hir; +use libc::c_uint; fn load_closure_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, closure_def_id: DefId, arg_scope_id: ScopeId, - freevars: &[ty::Freevar]) - -> Block<'blk, 'tcx> -{ + id: ast::NodeId) { let _icx = push_ctxt("closure::load_closure_environment"); + let kind = kind_for_closure(bcx.ccx(), closure_def_id); + + let env_arg = &bcx.fcx.fn_ty.args[0]; + let mut env_idx = bcx.fcx.fn_ty.ret.is_indirect() as usize; // Special case for small by-value selfs. - let closure_ty = node_id_type(bcx, bcx.fcx.id); - let self_type = self_type_for_closure(bcx.ccx(), closure_def_id, closure_ty); - let kind = kind_for_closure(bcx.ccx(), closure_def_id); - let llenv = if kind == ty::ClosureKind::FnOnce && - !arg_is_indirect(bcx.ccx(), self_type) { - let datum = rvalue_scratch_datum(bcx, - self_type, - "closure_env"); - store_ty(bcx, bcx.fcx.llenv.unwrap(), datum.val, self_type); - datum.val + let llenv = if kind == ty::ClosureKind::FnOnce && !env_arg.is_indirect() { + let closure_ty = node_id_type(bcx, id); + let llenv = rvalue_scratch_datum(bcx, closure_ty, "closure_env").val; + env_arg.store_fn_arg(&bcx.build(), &mut env_idx, llenv); + llenv } else { - bcx.fcx.llenv.unwrap() + get_param(bcx.fcx.llfn, env_idx as c_uint) }; // Store the pointer to closure data in an alloca for debug info because that's what the @@ -71,100 +69,120 @@ fn load_closure_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, None }; - for (i, freevar) in freevars.iter().enumerate() { - let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(), - closure_expr_id: bcx.fcx.id }; - let upvar_capture = bcx.tcx().upvar_capture(upvar_id).unwrap(); - let mut upvar_ptr = StructGEP(bcx, llenv, i); - let captured_by_ref = match upvar_capture { - ty::UpvarCapture::ByValue => false, - ty::UpvarCapture::ByRef(..) => { - upvar_ptr = Load(bcx, upvar_ptr); - true + bcx.tcx().with_freevars(id, |fv| { + for (i, freevar) in fv.iter().enumerate() { + let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(), + closure_expr_id: id }; + let upvar_capture = bcx.tcx().upvar_capture(upvar_id).unwrap(); + let mut upvar_ptr = StructGEP(bcx, llenv, i); + let captured_by_ref = match upvar_capture { + ty::UpvarCapture::ByValue => false, + ty::UpvarCapture::ByRef(..) => { + upvar_ptr = Load(bcx, upvar_ptr); + true + } + }; + let node_id = freevar.def.var_id(); + bcx.fcx.llupvars.borrow_mut().insert(node_id, upvar_ptr); + + if kind == ty::ClosureKind::FnOnce && !captured_by_ref { + let hint = bcx.fcx.lldropflag_hints.borrow().hint_datum(upvar_id.var_id); + bcx.fcx.schedule_drop_mem(arg_scope_id, + upvar_ptr, + node_id_type(bcx, node_id), + hint) } - }; - let node_id = freevar.def.var_id(); - bcx.fcx.llupvars.borrow_mut().insert(node_id, upvar_ptr); - - if kind == ty::ClosureKind::FnOnce && !captured_by_ref { - let hint = bcx.fcx.lldropflag_hints.borrow().hint_datum(upvar_id.var_id); - bcx.fcx.schedule_drop_mem(arg_scope_id, - upvar_ptr, - node_id_type(bcx, node_id), - hint) - } - if let Some(env_pointer_alloca) = env_pointer_alloca { - debuginfo::create_captured_var_metadata( - bcx, - node_id, - env_pointer_alloca, - i, - captured_by_ref, - freevar.span); + if let Some(env_pointer_alloca) = env_pointer_alloca { + debuginfo::create_captured_var_metadata( + bcx, + node_id, + env_pointer_alloca, + i, + captured_by_ref, + freevar.span); + } } - } - - bcx + }) } -pub enum ClosureEnv<'a> { +pub enum ClosureEnv { NotClosure, - Closure(DefId, &'a [ty::Freevar]), + Closure(DefId, ast::NodeId), } -impl<'a> ClosureEnv<'a> { - pub fn load<'blk,'tcx>(self, bcx: Block<'blk, 'tcx>, arg_scope: ScopeId) - -> Block<'blk, 'tcx> - { - match self { - ClosureEnv::NotClosure => bcx, - ClosureEnv::Closure(def_id, freevars) => { - if freevars.is_empty() { - bcx - } else { - load_closure_environment(bcx, def_id, arg_scope, freevars) - } - } +impl ClosureEnv { + pub fn load<'blk,'tcx>(self, bcx: Block<'blk, 'tcx>, arg_scope: ScopeId) { + if let ClosureEnv::Closure(def_id, id) = self { + load_closure_environment(bcx, def_id, arg_scope, id); } } } +fn get_self_type<'tcx>(tcx: &TyCtxt<'tcx>, + closure_id: DefId, + fn_ty: Ty<'tcx>) + -> Ty<'tcx> { + match tcx.closure_kind(closure_id) { + ty::ClosureKind::Fn => { + tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), fn_ty) + } + ty::ClosureKind::FnMut => { + tcx.mk_mut_ref(tcx.mk_region(ty::ReStatic), fn_ty) + } + ty::ClosureKind::FnOnce => fn_ty, + } +} + /// Returns the LLVM function declaration for a closure, creating it if /// necessary. If the ID does not correspond to a closure ID, returns None. -pub fn get_or_create_closure_declaration<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - closure_id: DefId, - substs: &ty::ClosureSubsts<'tcx>) - -> ValueRef { +fn get_or_create_closure_declaration<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + closure_id: DefId, + substs: &ty::ClosureSubsts<'tcx>) + -> ValueRef { // Normalize type so differences in regions and typedefs don't cause // duplicate declarations - let substs = ccx.tcx().erase_regions(substs); - let mono_id = MonoId { + let tcx = ccx.tcx(); + let substs = tcx.erase_regions(substs); + let instance = Instance { def: closure_id, params: &substs.func_substs.types }; - if let Some(&llfn) = ccx.closure_vals().borrow().get(&mono_id) { + if let Some(&llfn) = ccx.instances().borrow().get(&instance) { debug!("get_or_create_closure_declaration(): found closure {:?}: {:?}", - mono_id, ccx.tn().val_to_string(llfn)); + instance, Value(llfn)); return llfn; } - let path = ccx.tcx().def_path(closure_id); + let path = tcx.def_path(closure_id); let symbol = mangle_internal_name_by_path_and_seq(path, "closure"); - let function_type = ccx.tcx().mk_closure_from_closure_substs(closure_id, Box::new(substs)); - let llfn = declare::define_internal_rust_fn(ccx, &symbol[..], function_type); + // Compute the rust-call form of the closure call method. + let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables, ProjectionMode::Any); + let sig = &infcx.closure_type(closure_id, &substs).sig; + let sig = tcx.erase_late_bound_regions(sig); + let sig = infer::normalize_associated_type(tcx, &sig); + let closure_type = tcx.mk_closure_from_closure_substs(closure_id, Box::new(substs)); + let function_type = tcx.mk_fn_ptr(ty::BareFnTy { + unsafety: hir::Unsafety::Normal, + abi: Abi::RustCall, + sig: ty::Binder(ty::FnSig { + inputs: Some(get_self_type(tcx, closure_id, closure_type)) + .into_iter().chain(sig.inputs).collect(), + output: sig.output, + variadic: false + }) + }); + let llfn = declare::define_internal_fn(ccx, &symbol, function_type); // set an inline hint for all closures attributes::inline(llfn, attributes::InlineAttr::Hint); debug!("get_or_create_declaration_if_closure(): inserting new \ - closure {:?} (type {}): {:?}", - mono_id, - ccx.tn().type_to_string(val_ty(llfn)), - ccx.tn().val_to_string(llfn)); - ccx.closure_vals().borrow_mut().insert(mono_id, llfn); + closure {:?}: {:?}", + instance, Value(llfn)); + ccx.instances().borrow_mut().insert(instance, llfn); llfn } @@ -179,8 +197,7 @@ pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>, body: &hir::Block, id: ast::NodeId, closure_def_id: DefId, // (*) - closure_substs: &'tcx ty::ClosureSubsts<'tcx>, - closure_expr_attrs: &ThinAttributes) + closure_substs: &ty::ClosureSubsts<'tcx>) -> Option> { // (*) Note that in the case of inlined functions, the `closure_def_id` will be the @@ -210,22 +227,29 @@ pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>, let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables, ProjectionMode::Any); let function_type = infcx.closure_type(closure_def_id, closure_substs); - let freevars: Vec = - tcx.with_freevars(id, |fv| fv.iter().cloned().collect()); - let sig = tcx.erase_late_bound_regions(&function_type.sig); let sig = infer::normalize_associated_type(ccx.tcx(), &sig); + let closure_type = tcx.mk_closure_from_closure_substs(closure_def_id, + Box::new(closure_substs.clone())); + let sig = ty::FnSig { + inputs: Some(get_self_type(tcx, closure_def_id, closure_type)) + .into_iter().chain(sig.inputs).collect(), + output: sig.output, + variadic: false + }; + let fn_ty = FnType::new(ccx, Abi::RustCall, &sig, &[]); + trans_closure(ccx, decl, body, llfn, param_substs, + closure_def_id, id, - closure_expr_attrs.as_attr_slice(), - sig.output, - function_type.abi, - ClosureEnv::Closure(closure_def_id, &freevars)); + fn_ty, + Abi::RustCall, + ClosureEnv::Closure(closure_def_id, id)); // Don't hoist this to the top of the function. It's perfectly legitimate // to have a zero-size closure (in which case dest will be `Ignore`) and @@ -241,21 +265,23 @@ pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>, let repr = adt::represent_type(ccx, node_id_type(bcx, id)); // Create the closure. - for (i, freevar) in freevars.iter().enumerate() { - let datum = expr::trans_local_var(bcx, freevar.def); - let upvar_slot_dest = adt::trans_field_ptr( - bcx, &repr, adt::MaybeSizedValue::sized(dest_addr), Disr(0), i); - let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(), - closure_expr_id: id }; - match tcx.upvar_capture(upvar_id).unwrap() { - ty::UpvarCapture::ByValue => { - bcx = datum.store_to(bcx, upvar_slot_dest); - } - ty::UpvarCapture::ByRef(..) => { - Store(bcx, datum.to_llref(), upvar_slot_dest); + tcx.with_freevars(id, |fv| { + for (i, freevar) in fv.iter().enumerate() { + let datum = expr::trans_var(bcx, freevar.def); + let upvar_slot_dest = adt::trans_field_ptr( + bcx, &repr, adt::MaybeSizedValue::sized(dest_addr), Disr(0), i); + let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(), + closure_expr_id: id }; + match tcx.upvar_capture(upvar_id).unwrap() { + ty::UpvarCapture::ByValue => { + bcx = datum.store_to(bcx, upvar_slot_dest); + } + ty::UpvarCapture::ByRef(..) => { + Store(bcx, datum.to_llref(), upvar_slot_dest); + } } } - } + }); adt::trans_set_discr(bcx, &repr, dest_addr, Disr(0)); Some(bcx) @@ -278,11 +304,8 @@ pub fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, let tcx = ccx.tcx(); debug!("trans_closure_adapter_shim(llfn_closure_kind={:?}, \ - trait_closure_kind={:?}, \ - llfn={})", - llfn_closure_kind, - trait_closure_kind, - ccx.tn().val_to_string(llfn)); + trait_closure_kind={:?}, llfn={:?})", + llfn_closure_kind, trait_closure_kind, Value(llfn)); match (llfn_closure_kind, trait_closure_kind) { (ty::ClosureKind::Fn, ty::ClosureKind::Fn) | @@ -324,10 +347,8 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( llreffn: ValueRef) -> ValueRef { - debug!("trans_fn_once_adapter_shim(closure_def_id={:?}, substs={:?}, llreffn={})", - closure_def_id, - substs, - ccx.tn().val_to_string(llreffn)); + debug!("trans_fn_once_adapter_shim(closure_def_id={:?}, substs={:?}, llreffn={:?})", + closure_def_id, substs, Value(llreffn)); let tcx = ccx.tcx(); let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables, ProjectionMode::Any); @@ -348,56 +369,70 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( debug!("trans_fn_once_adapter_shim: llref_fn_ty={:?}", llref_fn_ty); - let ret_ty = tcx.erase_late_bound_regions(&sig.output()); - let ret_ty = infer::normalize_associated_type(ccx.tcx(), &ret_ty); // Make a version of the closure type with the same arguments, but // with argument #0 being by value. - assert_eq!(abi, RustCall); + assert_eq!(abi, Abi::RustCall); sig.0.inputs[0] = closure_ty; + + let sig = tcx.erase_late_bound_regions(&sig); + let sig = infer::normalize_associated_type(ccx.tcx(), &sig); + let fn_ty = FnType::new(ccx, abi, &sig, &[]); + let llonce_fn_ty = tcx.mk_fn_ptr(ty::BareFnTy { unsafety: unsafety, abi: abi, - sig: sig + sig: ty::Binder(sig) }); // Create the by-value helper. let function_name = link::mangle_internal_name_by_type_and_seq(ccx, llonce_fn_ty, "once_shim"); - let lloncefn = declare::define_internal_rust_fn(ccx, &function_name, - llonce_fn_ty); + let lloncefn = declare::define_internal_fn(ccx, &function_name, llonce_fn_ty); let (block_arena, fcx): (TypedArena<_>, FunctionContext); block_arena = TypedArena::new(); - fcx = new_fn_ctxt(ccx, - lloncefn, - ast::DUMMY_NODE_ID, - false, - ret_ty, - substs.func_substs, - None, - &block_arena); - let mut bcx = init_function(&fcx, false, ret_ty); + fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None, substs.func_substs, &block_arena); + let mut bcx = fcx.init(false, None); - let mut llargs = get_params(fcx.llfn); // the first argument (`self`) will be the (by value) closure env. let self_scope = fcx.push_custom_cleanup_scope(); let self_scope_id = CustomScope(self_scope); - let rvalue_mode = datum::appropriate_rvalue_mode(ccx, closure_ty); - let self_idx = fcx.arg_offset(); - let llself = llargs[self_idx]; - let env_datum = Datum::new(llself, closure_ty, Rvalue::new(rvalue_mode)); - let env_datum = unpack_datum!(bcx, - env_datum.to_lvalue_datum_in_scope(bcx, "self", - self_scope_id)); - - debug!("trans_fn_once_adapter_shim: env_datum={}", - bcx.val_to_string(env_datum.val)); - llargs[self_idx] = env_datum.val; + + let mut llargs = get_params(fcx.llfn); + let mut self_idx = fcx.fn_ty.ret.is_indirect() as usize; + let env_arg = &fcx.fn_ty.args[0]; + let llenv = if env_arg.is_indirect() { + Datum::new(llargs[self_idx], closure_ty, Rvalue::new(ByRef)) + .add_clean(&fcx, self_scope_id) + } else { + unpack_datum!(bcx, lvalue_scratch_datum(bcx, closure_ty, "self", + InitAlloca::Dropped, + self_scope_id, |bcx, llval| { + let mut llarg_idx = self_idx; + env_arg.store_fn_arg(&bcx.build(), &mut llarg_idx, llval); + bcx.fcx.schedule_lifetime_end(self_scope_id, llval); + bcx + })).val + }; + + debug!("trans_fn_once_adapter_shim: env={:?}", Value(llenv)); + // Adjust llargs such that llargs[self_idx..] has the call arguments. + // For zero-sized closures that means sneaking in a new argument. + if env_arg.is_ignore() { + if self_idx > 0 { + self_idx -= 1; + llargs[self_idx] = llenv; + } else { + llargs.insert(0, llenv); + } + } else { + llargs[self_idx] = llenv; + } let dest = fcx.llretslotptr.get().map( - |_| expr::SaveIn(fcx.get_ret_slot(bcx, ret_ty, "ret_slot"))); + |_| expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot"))); let callee = Callee { data: callee::Fn(llreffn), @@ -407,7 +442,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope); - finish_fn(&fcx, bcx, ret_ty, DebugLoc::None); + fcx.finish(bcx, DebugLoc::None); lloncefn } diff --git a/src/librustc_trans/trans/collector.rs b/src/librustc_trans/trans/collector.rs index cea97c1a1e77f..2c996c032ca90 100644 --- a/src/librustc_trans/trans/collector.rs +++ b/src/librustc_trans/trans/collector.rs @@ -194,10 +194,10 @@ use rustc_front::intravisit as hir_visit; use rustc::front::map as hir_map; use rustc::middle::def_id::DefId; use rustc::middle::lang_items::{ExchangeFreeFnLangItem, ExchangeMallocFnLangItem}; -use rustc::middle::{ty, traits}; +use rustc::middle::traits; use rustc::middle::subst::{self, Substs, Subst}; +use rustc::middle::ty::{self, Ty, TypeFoldable}; use rustc::middle::ty::adjustment::CustomCoerceUnsized; -use rustc::middle::ty::fold::TypeFoldable; use rustc::mir::repr as mir; use rustc::mir::visit as mir_visit; use rustc::mir::visit::Visitor as MirVisitor; @@ -213,11 +213,10 @@ use trans::common::{fulfill_obligation, normalize_and_test_predicates, type_is_sized}; use trans::glue; use trans::meth; -use trans::monomorphize; +use trans::monomorphize::{self, Instance}; use util::nodemap::{FnvHashSet, FnvHashMap, DefIdMap}; use std::hash::{Hash, Hasher}; -use std::rc::Rc; #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] pub enum TransItemCollectionMode { @@ -225,13 +224,10 @@ pub enum TransItemCollectionMode { Lazy } -#[derive(Eq, Clone, Copy, Debug)] +#[derive(PartialEq, Eq, Clone, Copy, Debug)] pub enum TransItem<'tcx> { - DropGlue(ty::Ty<'tcx>), - Fn { - def_id: DefId, - substs: &'tcx Substs<'tcx> - }, + DropGlue(Ty<'tcx>), + Fn(Instance<'tcx>), Static(NodeId) } @@ -242,35 +238,19 @@ impl<'tcx> Hash for TransItem<'tcx> { 0u8.hash(s); t.hash(s); }, - TransItem::Fn { def_id, substs } => { + TransItem::Fn(instance) => { 1u8.hash(s); - def_id.hash(s); - (substs as *const Substs<'tcx> as usize).hash(s); + instance.def.hash(s); + (instance.params as *const _ as usize).hash(s); } TransItem::Static(node_id) => { - 3u8.hash(s); + 2u8.hash(s); node_id.hash(s); } }; } } -impl<'tcx> PartialEq for TransItem<'tcx> { - fn eq(&self, other: &Self) -> bool { - match (*self, *other) { - (TransItem::DropGlue(t1), TransItem::DropGlue(t2)) => t1 == t2, - (TransItem::Fn { def_id: def_id1, substs: substs1 }, - TransItem::Fn { def_id: def_id2, substs: substs2 }) => { - def_id1 == def_id2 && substs1 == substs2 - }, - (TransItem::Static(node_id1), TransItem::Static(node_id2)) => { - node_id1 == node_id2 - }, - _ => false - } - } -} - pub fn collect_crate_translation_items<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, mode: TransItemCollectionMode) -> FnvHashSet> { @@ -282,14 +262,9 @@ pub fn collect_crate_translation_items<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, debug!("Building translation item graph, beginning at roots"); let mut visited = FnvHashSet(); let mut recursion_depths = DefIdMap(); - let mut mir_cache = DefIdMap(); for root in roots { - collect_items_rec(ccx, - root, - &mut visited, - &mut recursion_depths, - &mut mir_cache); + collect_items_rec(ccx, root, &mut visited, &mut recursion_depths); } visited @@ -319,27 +294,11 @@ fn collect_roots<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, roots } -#[derive(Clone)] -enum CachedMir<'mir, 'tcx: 'mir> { - Ref(&'mir mir::Mir<'tcx>), - Owned(Rc>) -} - -impl<'mir, 'tcx: 'mir> CachedMir<'mir, 'tcx> { - fn get_ref<'a>(&'a self) -> &'a mir::Mir<'tcx> { - match *self { - CachedMir::Ref(r) => r, - CachedMir::Owned(ref rc) => &rc, - } - } -} - // Collect all monomorphized translation items reachable from `starting_point` fn collect_items_rec<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>, starting_point: TransItem<'tcx>, visited: &mut FnvHashSet>, - recursion_depths: &mut DefIdMap, - mir_cache: &mut DefIdMap>) { + recursion_depths: &mut DefIdMap) { if !visited.insert(starting_point.clone()) { // We've been here already, no need to search again. return; @@ -357,29 +316,33 @@ fn collect_items_rec<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>, TransItem::Static(_) => { recursion_depth_reset = None; } - TransItem::Fn { def_id, substs: ref param_substs } => { + TransItem::Fn(instance) => { // Keep track of the monomorphization recursion depth recursion_depth_reset = Some(check_recursion_limit(ccx, - def_id, + instance, recursion_depths)); // Scan the MIR in order to find function calls, closures, and // drop-glue - let mir = load_mir(ccx, def_id, mir_cache); + let mir = errors::expect(ccx.sess().diagnostic(), ccx.get_mir(instance.def), + || format!("Could not find MIR for function: {}", instance)); let mut visitor = MirNeighborCollector { ccx: ccx, - mir: mir.get_ref(), + mir: &mir, output: &mut neighbors, - param_substs: param_substs + param_substs: ccx.tcx().mk_substs(Substs { + types: instance.params.clone(), + regions: subst::ErasedRegions + }) }; - visitor.visit_mir(mir.get_ref()); + visitor.visit_mir(&mir); } } for neighbour in neighbors { - collect_items_rec(ccx, neighbour, visited, recursion_depths, mir_cache); + collect_items_rec(ccx, neighbour, visited, recursion_depths); } if let Some((def_id, depth)) = recursion_depth_reset { @@ -389,42 +352,11 @@ fn collect_items_rec<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>, debug!("END collect_items_rec({})", starting_point.to_string(ccx)); } -fn load_mir<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>, - def_id: DefId, - mir_cache: &mut DefIdMap>) - -> CachedMir<'a, 'tcx> { - let mir_not_found_error_message = || { - format!("Could not find MIR for function: {}", - ccx.tcx().item_path_str(def_id)) - }; - - if def_id.is_local() { - let node_id = ccx.tcx().map.as_local_node_id(def_id).unwrap(); - let mir_opt = ccx.mir_map().map.get(&node_id); - let mir = errors::expect(ccx.sess().diagnostic(), - mir_opt, - mir_not_found_error_message); - CachedMir::Ref(mir) - } else { - if let Some(mir) = mir_cache.get(&def_id) { - return mir.clone(); - } - - let mir_opt = ccx.sess().cstore.maybe_get_item_mir(ccx.tcx(), def_id); - let mir = errors::expect(ccx.sess().diagnostic(), - mir_opt, - mir_not_found_error_message); - let cached = CachedMir::Owned(Rc::new(mir)); - mir_cache.insert(def_id, cached.clone()); - cached - } -} - fn check_recursion_limit<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>, - def_id: DefId, + instance: Instance<'tcx>, recursion_depths: &mut DefIdMap) -> (DefId, usize) { - let recursion_depth = recursion_depths.get(&def_id) + let recursion_depth = recursion_depths.get(&instance.def) .map(|x| *x) .unwrap_or(0); debug!(" => recursion depth={}", recursion_depth); @@ -433,20 +365,18 @@ fn check_recursion_limit<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>, // more than the recursion limit is assumed to be causing an // infinite expansion. if recursion_depth > ccx.sess().recursion_limit.get() { - if let Some(node_id) = ccx.tcx().map.as_local_node_id(def_id) { - ccx.sess().span_fatal(ccx.tcx().map.span(node_id), - "reached the recursion limit during monomorphization"); + let error = format!("reached the recursion limit while instantiating `{}`", + instance); + if let Some(node_id) = ccx.tcx().map.as_local_node_id(instance.def) { + ccx.sess().span_fatal(ccx.tcx().map.span(node_id), &error); } else { - let error = format!("reached the recursion limit during \ - monomorphization of '{}'", - ccx.tcx().item_path_str(def_id)); - ccx.sess().fatal(&error[..]); + ccx.sess().fatal(&error); } } - recursion_depths.insert(def_id, recursion_depth + 1); + recursion_depths.insert(instance.def, recursion_depth + 1); - (def_id, recursion_depth) + (instance.def, recursion_depth) } struct MirNeighborCollector<'a, 'tcx: 'a> { @@ -750,7 +680,7 @@ fn do_static_dispatch<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, param_substs: &'tcx Substs<'tcx>) -> Option<(DefId, &'tcx Substs<'tcx>)> { debug!("do_static_dispatch(fn_def_id={}, fn_substs={:?}, param_substs={:?})", - def_id_to_string(ccx, fn_def_id, None), + def_id_to_string(ccx, fn_def_id), fn_substs, param_substs); @@ -798,8 +728,8 @@ fn do_static_trait_method_dispatch<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, trait_id={}, \ callee_substs={:?}, \ param_substs={:?}", - def_id_to_string(ccx, trait_method.def_id, None), - def_id_to_string(ccx, trait_id, None), + def_id_to_string(ccx, trait_method.def_id), + def_id_to_string(ccx, trait_id), callee_substs, param_substs); @@ -933,7 +863,7 @@ fn create_fn_trans_item<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, -> TransItem<'tcx> { debug!("create_fn_trans_item(def_id={}, fn_substs={:?}, param_substs={:?})", - def_id_to_string(ccx, def_id, None), + def_id_to_string(ccx, def_id), fn_substs, param_substs); @@ -945,10 +875,10 @@ fn create_fn_trans_item<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_substs); let concrete_substs = ccx.tcx().erase_regions(&concrete_substs); - let trans_item = TransItem::Fn { - def_id: def_id, - substs: ccx.tcx().mk_substs(concrete_substs), - }; + let trans_item = TransItem::Fn(Instance { + def: def_id, + params: &ccx.tcx().mk_substs(concrete_substs).types, + }); return trans_item; } @@ -1048,8 +978,7 @@ impl<'b, 'a, 'v> hir_visit::Visitor<'v> for RootCollector<'b, 'a, 'v> { if self.mode == TransItemCollectionMode::Eager { debug!("RootCollector: ADT drop-glue for {}", def_id_to_string(self.ccx, - self.ccx.tcx().map.local_def_id(item.id), - None)); + self.ccx.tcx().map.local_def_id(item.id))); let ty = glue::get_drop_glue_type(self.ccx, ty); self.output.push(TransItem::DropGlue(ty)); @@ -1059,8 +988,7 @@ impl<'b, 'a, 'v> hir_visit::Visitor<'v> for RootCollector<'b, 'a, 'v> { hir::ItemStatic(..) => { debug!("RootCollector: ItemStatic({})", def_id_to_string(self.ccx, - self.ccx.tcx().map.local_def_id(item.id), - None)); + self.ccx.tcx().map.local_def_id(item.id))); self.output.push(TransItem::Static(item.id)); } hir::ItemFn(_, _, constness, _, ref generics, _) => { @@ -1069,12 +997,10 @@ impl<'b, 'a, 'v> hir_visit::Visitor<'v> for RootCollector<'b, 'a, 'v> { let def_id = self.ccx.tcx().map.local_def_id(item.id); debug!("RootCollector: ItemFn({})", - def_id_to_string(self.ccx, def_id, None)); + def_id_to_string(self.ccx, def_id)); - self.output.push(TransItem::Fn { - def_id: def_id, - substs: self.trans_empty_substs - }); + let instance = Instance::mono(self.ccx.tcx(), def_id); + self.output.push(TransItem::Fn(instance)); } } } @@ -1108,12 +1034,10 @@ impl<'b, 'a, 'v> hir_visit::Visitor<'v> for RootCollector<'b, 'a, 'v> { let def_id = self.ccx.tcx().map.local_def_id(ii.id); debug!("RootCollector: MethodImplItem({})", - def_id_to_string(self.ccx, def_id, None)); + def_id_to_string(self.ccx, def_id)); - self.output.push(TransItem::Fn { - def_id: def_id, - substs: self.trans_empty_substs - }); + let instance = Instance::mono(self.ccx.tcx(), def_id); + self.output.push(TransItem::Fn(instance)); } } _ => { /* Nothing to do here */ } @@ -1142,7 +1066,7 @@ fn create_trans_items_for_default_impls<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let impl_def_id = tcx.map.local_def_id(item.id); debug!("create_trans_items_for_default_impls(item={})", - def_id_to_string(ccx, impl_def_id, None)); + def_id_to_string(ccx, impl_def_id)); if let Some(trait_ref) = tcx.impl_trait_ref(impl_def_id) { let default_impls = tcx.provided_trait_methods(trait_ref.def_id); @@ -1225,7 +1149,7 @@ pub fn push_unique_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty::TyStruct(adt_def, substs) | ty::TyEnum(adt_def, substs) => { push_item_name(cx, adt_def.did, output); - push_type_params(cx, substs, &[], output); + push_type_params(cx, &substs.types, &[], output); }, ty::TyTuple(ref component_types) => { output.push('('); @@ -1275,7 +1199,7 @@ pub fn push_unique_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty::TyTrait(ref trait_data) => { push_item_name(cx, trait_data.principal.skip_binder().def_id, output); push_type_params(cx, - &trait_data.principal.skip_binder().substs, + &trait_data.principal.skip_binder().substs.types, &trait_data.bounds.projection_bounds, output); }, @@ -1285,7 +1209,7 @@ pub fn push_unique_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, output.push_str("unsafe "); } - if abi != ::syntax::abi::Abi::Rust { + if abi != ::trans::abi::Abi::Rust { output.push_str("extern \""); output.push_str(abi.name()); output.push_str("\" "); @@ -1329,7 +1253,7 @@ pub fn push_unique_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, output.push_str("{"); output.push_str(&format!("{}:{}", def_id.krate, def_id.index.as_usize())); output.push_str("}"); - push_type_params(cx, closure_substs.func_substs, &[], output); + push_type_params(cx, &closure_substs.func_substs.types, &[], output); } ty::TyError | ty::TyInfer(_) | @@ -1371,16 +1295,16 @@ fn push_item_name(ccx: &CrateContext, } fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - substs: &Substs<'tcx>, + types: &'tcx subst::VecPerParamSpace>, projections: &[ty::PolyProjectionPredicate<'tcx>], output: &mut String) { - if substs.types.is_empty() && projections.is_empty() { + if types.is_empty() && projections.is_empty() { return; } output.push('<'); - for &type_parameter in &substs.types { + for &type_parameter in types { push_unique_type_name(cx, type_parameter, output); output.push_str(", "); } @@ -1400,23 +1324,16 @@ fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, output.push('>'); } -fn push_def_id_as_string<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - def_id: DefId, - substs: Option<&Substs<'tcx>>, - output: &mut String) { - push_item_name(ccx, def_id, output); - - if let Some(substs) = substs { - push_type_params(ccx, substs, &[], output); - } +fn push_instance_as_string<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + instance: Instance<'tcx>, + output: &mut String) { + push_item_name(ccx, instance.def, output); + push_type_params(ccx, instance.params, &[], output); } -fn def_id_to_string<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - def_id: DefId, - substs: Option<&Substs<'tcx>>) - -> String { +fn def_id_to_string(ccx: &CrateContext, def_id: DefId) -> String { let mut output = String::new(); - push_def_id_as_string(ccx, def_id, substs, &mut output); + push_item_name(ccx, def_id, &mut output); output } @@ -1440,23 +1357,23 @@ impl<'tcx> TransItem<'tcx> { push_unique_type_name(ccx, t, &mut s); s } - TransItem::Fn { def_id, ref substs } => { - to_string_internal(ccx, "fn ", def_id, Some(substs)) + TransItem::Fn(instance) => { + to_string_internal(ccx, "fn ", instance) }, TransItem::Static(node_id) => { let def_id = hir_map.local_def_id(node_id); - to_string_internal(ccx, "static ", def_id, None) + let instance = Instance::mono(ccx.tcx(), def_id); + to_string_internal(ccx, "static ", instance) }, }; fn to_string_internal<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, prefix: &str, - def_id: DefId, - substs: Option<&Substs<'tcx>>) + instance: Instance<'tcx>) -> String { let mut result = String::with_capacity(32); result.push_str(prefix); - push_def_id_as_string(ccx, def_id, substs, &mut result); + push_instance_as_string(ccx, instance, &mut result); result } } @@ -1466,10 +1383,10 @@ impl<'tcx> TransItem<'tcx> { TransItem::DropGlue(t) => { format!("DropGlue({})", t as *const _ as usize) } - TransItem::Fn { def_id, substs } => { + TransItem::Fn(instance) => { format!("Fn({:?}, {})", - def_id, - substs as *const _ as usize) + instance.def, + instance.params as *const _ as usize) } TransItem::Static(id) => { format!("Static({:?})", id) diff --git a/src/librustc_trans/trans/common.rs b/src/librustc_trans/trans/common.rs index 0aa69dec253a1..db06b2352fd35 100644 --- a/src/librustc_trans/trans/common.rs +++ b/src/librustc_trans/trans/common.rs @@ -12,8 +12,6 @@ //! Code that is useful in various trans modules. -pub use self::ExprOrMethodCall::*; - use session::Session; use llvm; use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind}; @@ -23,33 +21,34 @@ use middle::def::Def; use middle::def_id::DefId; use middle::infer; use middle::lang_items::LangItem; -use middle::subst::{self, Substs}; +use middle::subst::Substs; +use trans::abi::{Abi, FnType}; use trans::base; use trans::build; use trans::builder::Builder; -use trans::callee; +use trans::callee::Callee; use trans::cleanup; use trans::consts; use trans::datum; use trans::debuginfo::{self, DebugLoc}; use trans::declare; use trans::machine; +use trans::mir::CachedMir; use trans::monomorphize; use trans::type_::Type; -use trans::type_of; +use trans::value::Value; use middle::ty::{self, Ty, TyCtxt}; use middle::traits::{self, SelectionContext, ProjectionMode}; use middle::ty::fold::{TypeFolder, TypeFoldable}; use rustc_front::hir; -use rustc::mir::repr::Mir; -use util::nodemap::{FnvHashMap, NodeMap}; +use util::nodemap::NodeMap; use arena::TypedArena; use libc::{c_uint, c_char}; use std::ops::Deref; use std::ffi::CString; use std::cell::{Cell, RefCell}; -use std::vec::Vec; + use syntax::ast; use syntax::codemap::{DUMMY_SP, Span}; use syntax::parse::token::InternedString; @@ -75,18 +74,6 @@ pub fn type_is_fat_ptr<'tcx>(cx: &TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool { } } -fn type_is_newtype_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { - match ty.sty { - ty::TyStruct(def, substs) => { - let fields = &def.struct_variant().fields; - fields.len() == 1 && { - type_is_immediate(ccx, monomorphize::field_ty(ccx.tcx(), substs, &fields[0])) - } - } - _ => false - } -} - pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { use trans::machine::llsize_of_alloc; use trans::type_of::sizing_type_of; @@ -94,7 +81,6 @@ pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - let tcx = ccx.tcx(); let simple = ty.is_scalar() || ty.is_unique() || ty.is_region_ptr() || - type_is_newtype_immediate(ccx, ty) || ty.is_simd(); if simple && !type_is_fat_ptr(tcx, ty) { return true; @@ -120,12 +106,6 @@ pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - llsize_of_alloc(ccx, llty) == 0 } -/// Identifies types which we declare to be equivalent to `void` in C for the purpose of function -/// return types. These are `()`, bot, uninhabited enums and all other zero-sized types. -pub fn return_type_is_void<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.is_nil() || ty.is_empty(ccx.tcx()) || type_is_zero_size(ccx, ty) -} - /// Generates a unique symbol based off the name given. This is used to create /// unique symbols for things like closures. pub fn gensym_name(name: &str) -> ast::Name { @@ -252,8 +232,6 @@ pub fn BuilderRef_res(b: BuilderRef) -> BuilderRef_res { } } -pub type ExternMap = FnvHashMap; - pub fn validate_substs(substs: &Substs) { assert!(!substs.types.needs_infer()); } @@ -295,7 +273,7 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // The MIR for this function. At present, this is optional because // we only have MIR available for things that are local to the // crate. - pub mir: Option<&'a Mir<'tcx>>, + pub mir: Option>, // The ValueRef returned from a call to llvm::LLVMAddFunction; the // address of the first instruction in the sequence of @@ -306,9 +284,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // always an empty parameter-environment NOTE: @jroesch another use of ParamEnv pub param_env: ty::ParameterEnvironment<'a, 'tcx>, - // The environment argument in a closure. - pub llenv: Option, - // A pointer to where to store the return value. If the return type is // immediate, this points to an alloca in the function. Otherwise, it's a // pointer to the hidden first parameter of the function. After function @@ -336,11 +311,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // Note that for cleanuppad-based exceptions this is not used. pub landingpad_alloca: Cell>, - // True if the caller expects this fn to use the out pointer to - // return. Either way, your code should write into the slot llretslotptr - // points to, but if this value is false, that slot will be a local alloca. - pub caller_expects_out_pointer: bool, - // Maps the DefId's for local variables to the allocas created for // them in llallocas. pub lllocals: RefCell>>, @@ -352,9 +322,8 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // paths) for the code being compiled. pub lldropflag_hints: RefCell>, - // The NodeId of the function, or -1 if it doesn't correspond to - // a user-defined function. - pub id: ast::NodeId, + // Describes the return/argument LLVM types and their ABI handling. + pub fn_ty: FnType, // If this function is being monomorphized, this contains the type // substitutions used. @@ -383,20 +352,8 @@ pub struct FunctionContext<'a, 'tcx: 'a> { } impl<'a, 'tcx> FunctionContext<'a, 'tcx> { - pub fn mir(&self) -> &'a Mir<'tcx> { - self.mir.unwrap() - } - - pub fn arg_offset(&self) -> usize { - self.env_arg_pos() + if self.llenv.is_some() { 1 } else { 0 } - } - - pub fn env_arg_pos(&self) -> usize { - if self.caller_expects_out_pointer { - 1 - } else { - 0 - } + pub fn mir(&self) -> CachedMir<'a, 'tcx> { + self.mir.clone().expect("fcx.mir was empty") } pub fn cleanup(&self) { @@ -419,14 +376,9 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { self.llreturn.get().unwrap() } - pub fn get_ret_slot(&self, bcx: Block<'a, 'tcx>, - output: ty::FnOutput<'tcx>, - name: &str) -> ValueRef { + pub fn get_ret_slot(&self, bcx: Block<'a, 'tcx>, name: &str) -> ValueRef { if self.needs_ret_allocas { - base::alloca(bcx, match output { - ty::FnConverging(output_type) => type_of::type_of(bcx.ccx(), output_type), - ty::FnDiverging => Type::void(bcx.ccx()) - }, name) + base::alloca(bcx, self.fn_ty.ret.memory_ty(self.ccx), name) } else { self.llretslotptr.get().unwrap() } @@ -511,62 +463,60 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { // `rust_eh_personality` function, but rather we wired it up to the // CRT's custom personality function, which forces LLVM to consider // landing pads as "landing pads for SEH". - let target = &self.ccx.sess().target.target; - match self.ccx.tcx().lang_items.eh_personality() { - Some(def_id) if !base::wants_msvc_seh(self.ccx.sess()) => { - callee::trans_fn_ref(self.ccx, def_id, ExprId(0), - self.param_substs).val + let ccx = self.ccx; + let tcx = ccx.tcx(); + let target = &ccx.sess().target.target; + match tcx.lang_items.eh_personality() { + Some(def_id) if !base::wants_msvc_seh(ccx.sess()) => { + Callee::def(ccx, def_id, tcx.mk_substs(Substs::empty())).reify(ccx).val } - _ => { - let mut personality = self.ccx.eh_personality().borrow_mut(); - match *personality { - Some(llpersonality) => llpersonality, - None => { - let name = if !base::wants_msvc_seh(self.ccx.sess()) { - "rust_eh_personality" - } else if target.arch == "x86" { - "_except_handler3" - } else { - "__C_specific_handler" - }; - let fty = Type::variadic_func(&[], &Type::i32(self.ccx)); - let f = declare::declare_cfn(self.ccx, name, fty, - self.ccx.tcx().types.i32); - *personality = Some(f); - f - } - } + _ => if let Some(llpersonality) = ccx.eh_personality().get() { + llpersonality + } else { + let name = if !base::wants_msvc_seh(ccx.sess()) { + "rust_eh_personality" + } else if target.arch == "x86" { + "_except_handler3" + } else { + "__C_specific_handler" + }; + let fty = Type::variadic_func(&[], &Type::i32(ccx)); + let f = declare::declare_cfn(ccx, name, fty); + ccx.eh_personality().set(Some(f)); + f } } } // Returns a ValueRef of the "eh_unwind_resume" lang item if one is defined, // otherwise declares it as an external function. - pub fn eh_unwind_resume(&self) -> ValueRef { + pub fn eh_unwind_resume(&self) -> Callee<'tcx> { use trans::attributes; - assert!(self.ccx.sess().target.target.options.custom_unwind_resume); - match self.ccx.tcx().lang_items.eh_unwind_resume() { - Some(def_id) => { - callee::trans_fn_ref(self.ccx, def_id, ExprId(0), - self.param_substs).val - } - None => { - let mut unwresume = self.ccx.eh_unwind_resume().borrow_mut(); - match *unwresume { - Some(llfn) => llfn, - None => { - let fty = Type::func(&[Type::i8p(self.ccx)], &Type::void(self.ccx)); - let llfn = declare::declare_fn(self.ccx, - "rust_eh_unwind_resume", - llvm::CCallConv, - fty, ty::FnDiverging); - attributes::unwind(llfn, true); - *unwresume = Some(llfn); - llfn - } - } - } + let ccx = self.ccx; + let tcx = ccx.tcx(); + assert!(ccx.sess().target.target.options.custom_unwind_resume); + if let Some(def_id) = tcx.lang_items.eh_unwind_resume() { + return Callee::def(ccx, def_id, tcx.mk_substs(Substs::empty())); } + + let ty = tcx.mk_fn_ptr(ty::BareFnTy { + unsafety: hir::Unsafety::Unsafe, + abi: Abi::C, + sig: ty::Binder(ty::FnSig { + inputs: vec![tcx.mk_mut_ptr(tcx.types.u8)], + output: ty::FnDiverging, + variadic: false + }), + }); + + let unwresume = ccx.eh_unwind_resume(); + if let Some(llfn) = unwresume.get() { + return Callee::ptr(datum::immediate_rvalue(llfn, ty)); + } + let llfn = declare::declare_fn(ccx, "rust_eh_unwind_resume", ty); + attributes::unwind(llfn, true); + unwresume.set(Some(llfn)); + Callee::ptr(datum::immediate_rvalue(llfn, ty)) } } @@ -630,7 +580,7 @@ impl<'blk, 'tcx> BlockS<'blk, 'tcx> { self.lpad.get() } - pub fn mir(&self) -> &'blk Mir<'tcx> { + pub fn mir(&self) -> CachedMir<'blk, 'tcx> { self.fcx.mir() } @@ -652,14 +602,6 @@ impl<'blk, 'tcx> BlockS<'blk, 'tcx> { } } - pub fn val_to_string(&self, val: ValueRef) -> String { - self.ccx().tn().val_to_string(val) - } - - pub fn llty_str(&self, ty: Type) -> String { - self.ccx().tn().type_to_string(ty) - } - pub fn to_str(&self) -> String { format!("[block {:p}]", self) } @@ -746,6 +688,10 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { // Methods delegated to bcx + pub fn is_unreachable(&self) -> bool { + self.bcx.unreachable.get() + } + pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> { self.bcx.ccx() } @@ -763,14 +709,10 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { self.bcx.llbb } - pub fn mir(&self) -> &'blk Mir<'tcx> { + pub fn mir(&self) -> CachedMir<'blk, 'tcx> { self.bcx.mir() } - pub fn val_to_string(&self, val: ValueRef) -> String { - self.bcx.val_to_string(val) - } - pub fn monomorphize(&self, value: &T) -> T where T: TypeFoldable<'tcx> { @@ -1028,15 +970,15 @@ pub fn C_bytes_in_context(llcx: ContextRef, bytes: &[u8]) -> ValueRef { } } -pub fn const_get_elt(cx: &CrateContext, v: ValueRef, us: &[c_uint]) +pub fn const_get_elt(v: ValueRef, us: &[c_uint]) -> ValueRef { unsafe { let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); - debug!("const_get_elt(v={}, us={:?}, r={})", - cx.tn().val_to_string(v), us, cx.tn().val_to_string(r)); + debug!("const_get_elt(v={:?}, us={:?}, r={:?})", + Value(v), us, Value(r)); - return r; + r } } @@ -1215,41 +1157,6 @@ pub fn normalize_and_test_predicates<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, infer::drain_fulfillment_cx(&infcx, &mut fulfill_cx, &()).is_ok() } -// Key used to lookup values supplied for type parameters in an expr. -#[derive(Copy, Clone, PartialEq, Debug)] -pub enum ExprOrMethodCall { - // Type parameters for a path like `None::` - ExprId(ast::NodeId), - - // Type parameters for a method call like `a.foo::()` - MethodCallKey(ty::MethodCall) -} - -pub fn node_id_substs<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - node: ExprOrMethodCall, - param_substs: &subst::Substs<'tcx>) - -> &'tcx subst::Substs<'tcx> { - let tcx = ccx.tcx(); - - let substs = match node { - ExprId(id) => { - tcx.node_id_item_substs(id).substs - } - MethodCallKey(method_call) => { - tcx.tables.borrow().method_map[&method_call].substs.clone() - } - }; - - if substs.types.needs_infer() { - tcx.sess.bug(&format!("type parameters for node {:?} include inference types: {:?}", - node, substs)); - } - - ccx.tcx().mk_substs(monomorphize::apply_param_substs(tcx, - param_substs, - &substs.erase_regions())) -} - pub fn langcall(bcx: Block, span: Option, msg: &str, @@ -1351,14 +1258,3 @@ pub fn shift_mask_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, _ => panic!("shift_mask_val: expected Integer or Vector, found {:?}", kind), } } - -pub fn get_static_val<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - did: DefId, - ty: Ty<'tcx>) - -> ValueRef { - if let Some(node_id) = ccx.tcx().map.as_local_node_id(did) { - base::get_item_val(ccx, node_id) - } else { - base::get_extern_const(ccx, did, ty) - } -} diff --git a/src/librustc_trans/trans/consts.rs b/src/librustc_trans/trans/consts.rs index 7d37627ad0eea..82cd6aace0a35 100644 --- a/src/librustc_trans/trans/consts.rs +++ b/src/librustc_trans/trans/consts.rs @@ -9,27 +9,28 @@ // except according to those terms. -use back::abi; use llvm; use llvm::{ConstFCmp, ConstICmp, SetLinkage, SetUnnamedAddr}; use llvm::{InternalLinkage, ValueRef, Bool, True}; use middle::const_qualif::ConstQualif; -use middle::cstore::LOCAL_CRATE; use middle::const_eval::{self, ConstEvalErr}; use middle::def::Def; use middle::def_id::DefId; -use trans::{adt, closure, debuginfo, expr, inline, machine}; -use trans::base::{self, push_ctxt}; +use rustc::front::map as hir_map; +use trans::{abi, adt, closure, debuginfo, expr, machine}; +use trans::base::{self, exported_name, imported_name, push_ctxt}; use trans::callee::Callee; use trans::collector::{self, TransItem}; -use trans::common::{self, type_is_sized, ExprOrMethodCall, node_id_substs, C_nil, const_get_elt}; +use trans::common::{type_is_sized, C_nil, const_get_elt}; use trans::common::{CrateContext, C_integral, C_floating, C_bool, C_str_slice, C_bytes, val_ty}; use trans::common::{C_struct, C_undef, const_to_opt_int, const_to_opt_uint, VariantInfo, C_uint}; -use trans::common::{type_is_fat_ptr, Field, C_vector, C_array, C_null, ExprId, MethodCallKey}; +use trans::common::{type_is_fat_ptr, Field, C_vector, C_array, C_null}; +use trans::datum::{Datum, Lvalue}; use trans::declare; -use trans::monomorphize; +use trans::monomorphize::{self, Instance}; use trans::type_::Type; use trans::type_of; +use trans::value::Value; use trans::Disr; use middle::subst::Substs; use middle::ty::adjustment::{AdjustDerefRef, AdjustReifyFnPointer}; @@ -45,7 +46,7 @@ use std::ffi::{CStr, CString}; use std::borrow::Cow; use libc::c_uint; use syntax::ast::{self, LitKind}; -use syntax::attr; +use syntax::attr::{self, AttrMetaMethods}; use syntax::parse::token; use syntax::ptr::P; @@ -191,27 +192,31 @@ fn const_deref<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } fn const_fn_call<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - node: ExprOrMethodCall, def_id: DefId, + substs: Substs<'tcx>, arg_vals: &[ValueRef], param_substs: &'tcx Substs<'tcx>, trueconst: TrueConst) -> Result { let fn_like = const_eval::lookup_const_fn_by_id(ccx.tcx(), def_id); let fn_like = fn_like.expect("lookup_const_fn_by_id failed in const_fn_call"); + let body = match fn_like.body().expr { + Some(ref expr) => expr, + None => return Ok(C_nil(ccx)) + }; + let args = &fn_like.decl().inputs; assert_eq!(args.len(), arg_vals.len()); let arg_ids = args.iter().map(|arg| arg.pat.id); let fn_args = arg_ids.zip(arg_vals.iter().cloned()).collect(); - let substs = node_id_substs(ccx, node, param_substs); - match fn_like.body().expr { - Some(ref expr) => { - const_expr(ccx, &expr, substs, Some(&fn_args), trueconst).map(|(res, _)| res) - }, - None => Ok(C_nil(ccx)), - } + let substs = monomorphize::apply_param_substs(ccx.tcx(), + param_substs, + &substs.erase_regions()); + let substs = ccx.tcx().mk_substs(substs); + + const_expr(ccx, body, substs, Some(&fn_args), trueconst).map(|(res, _)| res) } pub fn get_const_expr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, @@ -219,14 +224,11 @@ pub fn get_const_expr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ref_expr: &hir::Expr, param_substs: &'tcx Substs<'tcx>) -> &'tcx hir::Expr { - let def_id = inline::maybe_instantiate_inline(ccx, def_id); - - if def_id.krate != LOCAL_CRATE { - ccx.sess().span_bug(ref_expr.span, - "cross crate constant could not be inlined"); - } - - match const_eval::lookup_const_by_id(ccx.tcx(), def_id, Some(ref_expr.id), Some(param_substs)) { + let substs = ccx.tcx().node_id_item_substs(ref_expr.id).substs; + let substs = monomorphize::apply_param_substs(ccx.tcx(), + param_substs, + &substs.erase_regions()); + match const_eval::lookup_const_by_id(ccx.tcx(), def_id, Some(substs)) { Some((ref expr, _ty)) => expr, None => { ccx.sess().span_bug(ref_expr.span, "constant item not found") @@ -351,9 +353,7 @@ pub fn const_expr<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, Some(AdjustReifyFnPointer) => { match ety.sty { ty::TyFnDef(def_id, substs, _) => { - let datum = Callee::def(cx, def_id, substs, ety).reify(cx); - llconst = datum.val; - ety_adjusted = datum.ty; + llconst = Callee::def(cx, def_id, substs).reify(cx).val; } _ => { unreachable!("{} cannot be reified to a fn ptr", ety) @@ -405,8 +405,8 @@ pub fn const_expr<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // to use a different vtable. In that case, we want to // load out the original data pointer so we can repackage // it. - (const_get_elt(cx, llconst, &[abi::FAT_PTR_ADDR as u32]), - Some(const_get_elt(cx, llconst, &[abi::FAT_PTR_EXTRA as u32]))) + (const_get_elt(llconst, &[abi::FAT_PTR_ADDR as u32]), + Some(const_get_elt(llconst, &[abi::FAT_PTR_EXTRA as u32]))) } else { (llconst, None) }; @@ -595,17 +595,15 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, /* Neither type is bottom, and we expect them to be unified * already, so the following is safe. */ let (te1, ty) = try!(const_expr(cx, &e1, param_substs, fn_args, trueconst)); - debug!("const_expr_unadjusted: te1={}, ty={:?}", - cx.tn().val_to_string(te1), - ty); + debug!("const_expr_unadjusted: te1={:?}, ty={:?}", + Value(te1), ty); assert!(!ty.is_simd()); let is_float = ty.is_fp(); let signed = ty.is_signed(); let (te2, ty2) = try!(const_expr(cx, &e2, param_substs, fn_args, trueconst)); - debug!("const_expr_unadjusted: te2={}, ty={:?}", - cx.tn().val_to_string(te2), - ty2); + debug!("const_expr_unadjusted: te2={:?}, ty={:?}", + Value(te2), ty2); try!(check_binary_expr_validity(cx, e, ty, te1, te2, trueconst)); @@ -689,8 +687,8 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let (arr, len) = match bt.sty { ty::TyArray(_, u) => (bv, C_uint(cx, u)), ty::TySlice(..) | ty::TyStr => { - let e1 = const_get_elt(cx, bv, &[0]); - (load_const(cx, e1, bt), const_get_elt(cx, bv, &[1])) + let e1 = const_get_elt(bv, &[0]); + (load_const(cx, e1, bt), const_get_elt(bv, &[1])) }, ty::TyRef(_, mt) => match mt.ty.sty { ty::TyArray(_, u) => { @@ -725,7 +723,7 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, "const index-expr is out of bounds"); C_undef(val_ty(arr).element_type()) } else { - const_get_elt(cx, arr, &[iv as c_uint]) + const_get_elt(arr, &[iv as c_uint]) } }, hir::ExprCast(ref base, _) => { @@ -741,10 +739,10 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let t_cast_inner = t_cast.builtin_deref(true, ty::NoPreference).expect("cast to non-pointer").ty; let ptr_ty = type_of::in_memory_type_of(cx, t_cast_inner).ptr_to(); - let addr = ptrcast(const_get_elt(cx, v, &[abi::FAT_PTR_ADDR as u32]), + let addr = ptrcast(const_get_elt(v, &[abi::FAT_PTR_ADDR as u32]), ptr_ty); if type_is_fat_ptr(cx.tcx(), t_cast) { - let info = const_get_elt(cx, v, &[abi::FAT_PTR_EXTRA as u32]); + let info = const_get_elt(v, &[abi::FAT_PTR_EXTRA as u32]); return Ok(C_struct(cx, &[addr, info], false)) } else { return Ok(addr); @@ -756,7 +754,7 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ) { (CastTy::Int(IntTy::CEnum), CastTy::Int(_)) => { let repr = adt::represent_type(cx, t_expr); - let discr = adt::const_get_discrim(cx, &repr, v); + let discr = adt::const_get_discrim(&repr, v); let iv = C_integral(cx.int_type(), discr.0, false); let s = adt::is_discr_signed(&repr) as Bool; llvm::LLVMConstIntCast(iv, llty.to_ref(), s) @@ -809,7 +807,7 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } let opt_def = cx.tcx().def_map.borrow().get(&cur.id).map(|d| d.full_def()); if let Some(Def::Static(def_id, _)) = opt_def { - common::get_static_val(cx, def_id, ety) + get_static(cx, def_id).val } else { // If this isn't the address of a static, then keep going through // normal constant evaluation. @@ -947,8 +945,8 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, Def::Fn(did) | Def::Method(did) => { try!(const_fn_call( cx, - ExprId(callee.id), did, + cx.tcx().node_id_item_substs(callee.id).substs, &arg_vals, param_substs, trueconst, @@ -976,9 +974,9 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, hir::ExprMethodCall(_, _, ref args) => { let arg_vals = try!(map_list(args)); let method_call = ty::MethodCall::expr(e.id); - let method_did = cx.tcx().tables.borrow().method_map[&method_call].def_id; - try!(const_fn_call(cx, MethodCallKey(method_call), - method_did, &arg_vals, param_substs, trueconst)) + let method = cx.tcx().tables.borrow().method_map[&method_call]; + try!(const_fn_call(cx, method.def_id, method.substs.clone(), + &arg_vals, param_substs, trueconst)) }, hir::ExprType(ref e, _) => try!(const_expr(cx, &e, param_substs, fn_args, trueconst)).0, hir::ExprBlock(ref block) => { @@ -1001,8 +999,7 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, body, e.id, def_id, - substs, - &e.attrs); + substs); } _ => cx.sess().span_bug( @@ -1016,6 +1013,125 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, }) } +pub fn get_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId) + -> Datum<'tcx, Lvalue> { + let ty = ccx.tcx().lookup_item_type(def_id).ty; + + let instance = Instance::mono(ccx.tcx(), def_id); + if let Some(&g) = ccx.instances().borrow().get(&instance) { + return Datum::new(g, ty, Lvalue::new("static")); + } + + let g = if let Some(id) = ccx.tcx().map.as_local_node_id(def_id) { + let llty = type_of::type_of(ccx, ty); + match ccx.tcx().map.get(id) { + hir_map::NodeItem(&hir::Item { + ref attrs, span, node: hir::ItemStatic(..), .. + }) => { + // If this static came from an external crate, then + // we need to get the symbol from metadata instead of + // using the current crate's name/version + // information in the hash of the symbol + let sym = exported_name(ccx, id, ty, attrs); + debug!("making {}", sym); + + // Create the global before evaluating the initializer; + // this is necessary to allow recursive statics. + let g = declare::define_global(ccx, &sym, llty).unwrap_or_else(|| { + ccx.sess().span_fatal(span, + &format!("symbol `{}` is already defined", sym)) + }); + + ccx.item_symbols().borrow_mut().insert(id, sym); + g + } + + hir_map::NodeForeignItem(&hir::ForeignItem { + ref attrs, name, span, node: hir::ForeignItemStatic(..), .. + }) => { + let ident = imported_name(name, attrs); + let g = if let Some(name) = + attr::first_attr_value_str_by_name(&attrs, "linkage") { + // If this is a static with a linkage specified, then we need to handle + // it a little specially. The typesystem prevents things like &T and + // extern "C" fn() from being non-null, so we can't just declare a + // static and call it a day. Some linkages (like weak) will make it such + // that the static actually has a null value. + let linkage = match base::llvm_linkage_by_name(&name) { + Some(linkage) => linkage, + None => { + ccx.sess().span_fatal(span, "invalid linkage specified"); + } + }; + let llty2 = match ty.sty { + ty::TyRawPtr(ref mt) => type_of::type_of(ccx, mt.ty), + _ => { + ccx.sess().span_fatal(span, "must have type `*const T` or `*mut T`"); + } + }; + unsafe { + // Declare a symbol `foo` with the desired linkage. + let g1 = declare::declare_global(ccx, &ident, llty2); + llvm::SetLinkage(g1, linkage); + + // Declare an internal global `extern_with_linkage_foo` which + // is initialized with the address of `foo`. If `foo` is + // discarded during linking (for example, if `foo` has weak + // linkage and there are no definitions), then + // `extern_with_linkage_foo` will instead be initialized to + // zero. + let mut real_name = "_rust_extern_with_linkage_".to_string(); + real_name.push_str(&ident); + let g2 = declare::define_global(ccx, &real_name, llty).unwrap_or_else(||{ + ccx.sess().span_fatal(span, + &format!("symbol `{}` is already defined", ident)) + }); + llvm::SetLinkage(g2, llvm::InternalLinkage); + llvm::LLVMSetInitializer(g2, g1); + g2 + } + } else { + // Generate an external declaration. + declare::declare_global(ccx, &ident, llty) + }; + + for attr in attrs { + if attr.check_name("thread_local") { + llvm::set_thread_local(g, true); + } + } + + g + } + + item => unreachable!("get_static: expected static, found {:?}", item) + } + } else { + // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? + // FIXME(nagisa): investigate whether it can be changed into define_global + let name = ccx.sess().cstore.item_symbol(def_id); + let g = declare::declare_global(ccx, &name, type_of::type_of(ccx, ty)); + // Thread-local statics in some other crate need to *always* be linked + // against in a thread-local fashion, so we need to be sure to apply the + // thread-local attribute locally if it was present remotely. If we + // don't do this then linker errors can be generated where the linker + // complains that one object files has a thread local version of the + // symbol and another one doesn't. + for attr in ccx.tcx().get_attrs(def_id).iter() { + if attr.check_name("thread_local") { + llvm::set_thread_local(g, true); + } + } + if ccx.use_dll_storage_attrs() { + llvm::SetDLLStorageClass(g, llvm::DLLImportStorageClass); + } + g + }; + + ccx.instances().borrow_mut().insert(instance, g); + Datum::new(g, ty, Lvalue::new("static")) +} + pub fn trans_static(ccx: &CrateContext, m: hir::Mutability, expr: &hir::Expr, @@ -1029,7 +1145,8 @@ pub fn trans_static(ccx: &CrateContext, unsafe { let _icx = push_ctxt("trans_static"); - let g = base::get_item_val(ccx, id); + let def_id = ccx.tcx().map.local_def_id(id); + let datum = get_static(ccx, def_id); let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty()); let (v, _) = try!(const_expr( @@ -1042,40 +1159,39 @@ pub fn trans_static(ccx: &CrateContext, // boolean SSA values are i1, but they have to be stored in i8 slots, // otherwise some LLVM optimization passes don't work as expected - let mut val_llty = llvm::LLVMTypeOf(v); - let v = if val_llty == Type::i1(ccx).to_ref() { - val_llty = Type::i8(ccx).to_ref(); - llvm::LLVMConstZExt(v, val_llty) + let mut val_llty = val_ty(v); + let v = if val_llty == Type::i1(ccx) { + val_llty = Type::i8(ccx); + llvm::LLVMConstZExt(v, val_llty.to_ref()) } else { v }; - let ty = ccx.tcx().node_id_to_type(id); - let llty = type_of::type_of(ccx, ty); - let g = if val_llty == llty.to_ref() { - g + let llty = type_of::type_of(ccx, datum.ty); + let g = if val_llty == llty { + datum.val } else { // If we created the global with the wrong type, // correct the type. let empty_string = CString::new("").unwrap(); - let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g)); + let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(datum.val)); let name_string = CString::new(name_str_ref.to_bytes()).unwrap(); - llvm::LLVMSetValueName(g, empty_string.as_ptr()); + llvm::LLVMSetValueName(datum.val, empty_string.as_ptr()); let new_g = llvm::LLVMGetOrInsertGlobal( - ccx.llmod(), name_string.as_ptr(), val_llty); + ccx.llmod(), name_string.as_ptr(), val_llty.to_ref()); // To avoid breaking any invariants, we leave around the old // global for the moment; we'll replace all references to it // with the new global later. (See base::trans_crate.) - ccx.statics_to_rauw().borrow_mut().push((g, new_g)); + ccx.statics_to_rauw().borrow_mut().push((datum.val, new_g)); new_g }; - llvm::LLVMSetAlignment(g, type_of::align_of(ccx, ty)); + llvm::LLVMSetAlignment(g, type_of::align_of(ccx, datum.ty)); llvm::LLVMSetInitializer(g, v); // As an optimization, all shared statics which do not have interior // mutability are placed into read-only memory. if m != hir::MutMutable { - let tcontents = ty.type_contents(ccx.tcx()); + let tcontents = datum.ty.type_contents(ccx.tcx()); if !tcontents.interior_unsafe() { llvm::LLVMSetGlobalConstant(g, llvm::True); } diff --git a/src/librustc_trans/trans/context.rs b/src/librustc_trans/trans/context.rs index eb5ca7722fd3e..8f5572f5c4e0e 100644 --- a/src/librustc_trans/trans/context.rs +++ b/src/librustc_trans/trans/context.rs @@ -16,14 +16,16 @@ use middle::def::ExportMap; use middle::def_id::DefId; use middle::traits; use rustc::mir::mir_map::MirMap; +use rustc::mir::repr as mir; use trans::adt; use trans::base; use trans::builder::Builder; -use trans::common::{ExternMap,BuilderRef_res}; +use trans::common::BuilderRef_res; use trans::debuginfo; use trans::declare; use trans::glue::DropGlueKind; -use trans::monomorphize::MonoId; +use trans::mir::CachedMir; +use trans::monomorphize::Instance; use trans::collector::{TransItem, TransItemState}; use trans::type_::{Type, TypeNames}; use middle::subst::Substs; @@ -75,6 +77,7 @@ pub struct SharedCrateContext<'a, 'tcx: 'a> { check_overflow: bool, check_drop_flag_for_sanity: bool, mir_map: &'a MirMap<'tcx>, + mir_cache: RefCell>>>, available_drop_glues: RefCell, String>>, use_dll_storage_attrs: bool, @@ -90,8 +93,6 @@ pub struct LocalCrateContext<'tcx> { llmod: ModuleRef, llcx: ContextRef, tn: TypeNames, - externs: RefCell, - item_vals: RefCell>, needs_unwind_cleanup_cache: RefCell, bool>>, fn_pointer_shims: RefCell, ValueRef>>, drop_glues: RefCell, ValueRef>>, @@ -100,8 +101,8 @@ pub struct LocalCrateContext<'tcx> { /// Backwards version of the `external` map (inlined items to where they /// came from) external_srcs: RefCell>, - /// Cache instances of monomorphized functions - monomorphized: RefCell, ValueRef>>, + /// Cache instances of monomorphic and polymorphic items + instances: RefCell, ValueRef>>, monomorphizing: RefCell>, available_monomorphizations: RefCell>, /// Cache generated vtables @@ -148,13 +149,13 @@ pub struct LocalCrateContext<'tcx> { builder: BuilderRef_res, /// Holds the LLVM values for closure IDs. - closure_vals: RefCell, ValueRef>>, + closure_vals: RefCell, ValueRef>>, dbg_cx: Option>, - eh_personality: RefCell>, - eh_unwind_resume: RefCell>, - rust_try_fn: RefCell>, + eh_personality: Cell>, + eh_unwind_resume: Cell>, + rust_try_fn: Cell>, intrinsics: RefCell>, @@ -340,6 +341,7 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> { symbol_hasher: RefCell::new(symbol_hasher), tcx: tcx, mir_map: mir_map, + mir_cache: RefCell::new(DefIdMap()), stats: Stats { n_glues_created: Cell::new(0), n_null_glues: Cell::new(0), @@ -464,14 +466,12 @@ impl<'tcx> LocalCrateContext<'tcx> { llmod: llmod, llcx: llcx, tn: TypeNames::new(), - externs: RefCell::new(FnvHashMap()), - item_vals: RefCell::new(NodeMap()), needs_unwind_cleanup_cache: RefCell::new(FnvHashMap()), fn_pointer_shims: RefCell::new(FnvHashMap()), drop_glues: RefCell::new(FnvHashMap()), external: RefCell::new(DefIdMap()), external_srcs: RefCell::new(NodeMap()), - monomorphized: RefCell::new(FnvHashMap()), + instances: RefCell::new(FnvHashMap()), monomorphizing: RefCell::new(DefIdMap()), available_monomorphizations: RefCell::new(FnvHashSet()), vtables: RefCell::new(FnvHashMap()), @@ -492,9 +492,9 @@ impl<'tcx> LocalCrateContext<'tcx> { builder: BuilderRef_res(llvm::LLVMCreateBuilderInContext(llcx)), closure_vals: RefCell::new(FnvHashMap()), dbg_cx: dbg_cx, - eh_personality: RefCell::new(None), - eh_unwind_resume: RefCell::new(None), - rust_try_fn: RefCell::new(None), + eh_personality: Cell::new(None), + eh_unwind_resume: Cell::new(None), + rust_try_fn: Cell::new(None), intrinsics: RefCell::new(FnvHashMap()), n_llvm_insns: Cell::new(0), type_of_depth: Cell::new(0), @@ -616,14 +616,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.local.tn } - pub fn externs<'a>(&'a self) -> &'a RefCell { - &self.local.externs - } - - pub fn item_vals<'a>(&'a self) -> &'a RefCell> { - &self.local.item_vals - } - pub fn export_map<'a>(&'a self) -> &'a ExportMap { &self.shared.export_map } @@ -660,8 +652,8 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.local.external_srcs } - pub fn monomorphized<'a>(&'a self) -> &'a RefCell, ValueRef>> { - &self.local.monomorphized + pub fn instances<'a>(&'a self) -> &'a RefCell, ValueRef>> { + &self.local.instances } pub fn monomorphizing<'a>(&'a self) -> &'a RefCell> { @@ -746,7 +738,7 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { self.local.opaque_vec_type } - pub fn closure_vals<'a>(&'a self) -> &'a RefCell, ValueRef>> { + pub fn closure_vals<'a>(&'a self) -> &'a RefCell, ValueRef>> { &self.local.closure_vals } @@ -754,15 +746,15 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.local.dbg_cx } - pub fn eh_personality<'a>(&'a self) -> &'a RefCell> { + pub fn eh_personality<'a>(&'a self) -> &'a Cell> { &self.local.eh_personality } - pub fn eh_unwind_resume<'a>(&'a self) -> &'a RefCell> { + pub fn eh_unwind_resume<'a>(&'a self) -> &'a Cell> { &self.local.eh_unwind_resume } - pub fn rust_try_fn<'a>(&'a self) -> &'a RefCell> { + pub fn rust_try_fn<'a>(&'a self) -> &'a Cell> { &self.local.rust_try_fn } @@ -829,8 +821,22 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { self.shared.use_dll_storage_attrs() } - pub fn mir_map(&self) -> &'b MirMap<'tcx> { - self.shared.mir_map + pub fn get_mir(&self, def_id: DefId) -> Option> { + if def_id.is_local() { + let node_id = self.tcx().map.as_local_node_id(def_id).unwrap(); + self.shared.mir_map.map.get(&node_id).map(CachedMir::Ref) + } else { + if let Some(mir) = self.shared.mir_cache.borrow().get(&def_id).cloned() { + return Some(CachedMir::Owned(mir)); + } + + let mir = self.sess().cstore.maybe_get_item_mir(self.tcx(), def_id); + let cached = mir.map(Rc::new); + if let Some(ref mir) = cached { + self.shared.mir_cache.borrow_mut().insert(def_id, mir.clone()); + } + cached.map(CachedMir::Owned) + } } pub fn translation_items(&self) -> &RefCell, TransItemState>> { @@ -865,8 +871,7 @@ fn declare_intrinsic(ccx: &CrateContext, key: &str) -> Option { macro_rules! ifn { ($name:expr, fn() -> $ret:expr) => ( if key == $name { - let f = declare::declare_cfn(ccx, $name, Type::func(&[], &$ret), - ccx.tcx().mk_nil()); + let f = declare::declare_cfn(ccx, $name, Type::func(&[], &$ret)); llvm::SetUnnamedAddr(f, false); ccx.intrinsics().borrow_mut().insert($name, f.clone()); return Some(f); @@ -874,9 +879,7 @@ fn declare_intrinsic(ccx: &CrateContext, key: &str) -> Option { ); ($name:expr, fn(...) -> $ret:expr) => ( if key == $name { - let f = declare::declare_cfn(ccx, $name, - Type::variadic_func(&[], &$ret), - ccx.tcx().mk_nil()); + let f = declare::declare_cfn(ccx, $name, Type::variadic_func(&[], &$ret)); llvm::SetUnnamedAddr(f, false); ccx.intrinsics().borrow_mut().insert($name, f.clone()); return Some(f); @@ -884,8 +887,7 @@ fn declare_intrinsic(ccx: &CrateContext, key: &str) -> Option { ); ($name:expr, fn($($arg:expr),*) -> $ret:expr) => ( if key == $name { - let f = declare::declare_cfn(ccx, $name, Type::func(&[$($arg),*], &$ret), - ccx.tcx().mk_nil()); + let f = declare::declare_cfn(ccx, $name, Type::func(&[$($arg),*], &$ret)); llvm::SetUnnamedAddr(f, false); ccx.intrinsics().borrow_mut().insert($name, f.clone()); return Some(f); @@ -1032,8 +1034,7 @@ fn declare_intrinsic(ccx: &CrateContext, key: &str) -> Option { ifn!($name, fn($($arg),*) -> void); } else if key == $name { let f = declare::declare_cfn(ccx, stringify!($cname), - Type::func(&[$($arg),*], &void), - ccx.tcx().mk_nil()); + Type::func(&[$($arg),*], &void)); llvm::SetLinkage(f, llvm::InternalLinkage); let bld = ccx.builder(); @@ -1055,8 +1056,7 @@ fn declare_intrinsic(ccx: &CrateContext, key: &str) -> Option { ifn!($name, fn($($arg),*) -> $ret); } else if key == $name { let f = declare::declare_cfn(ccx, stringify!($cname), - Type::func(&[$($arg),*], &$ret), - ccx.tcx().mk_nil()); + Type::func(&[$($arg),*], &$ret)); ccx.intrinsics().borrow_mut().insert($name, f.clone()); return Some(f); } diff --git a/src/librustc_trans/trans/controlflow.rs b/src/librustc_trans/trans/controlflow.rs index e84e1b45cedcd..91454df156655 100644 --- a/src/librustc_trans/trans/controlflow.rs +++ b/src/librustc_trans/trans/controlflow.rs @@ -11,10 +11,11 @@ use llvm::ValueRef; use middle::def::Def; use middle::lang_items::{PanicFnLangItem, PanicBoundsCheckFnLangItem}; +use middle::subst::Substs; use trans::base::*; use trans::basic_block::BasicBlock; use trans::build::*; -use trans::callee; +use trans::callee::{Callee, ArgVals}; use trans::cleanup::CleanupMethods; use trans::cleanup; use trans::common::*; @@ -24,7 +25,6 @@ use trans::debuginfo::{DebugLoc, ToDebugLoc}; use trans::expr; use trans::machine; use trans; -use middle::ty; use rustc_front::hir; use rustc_front::util as ast_util; @@ -152,9 +152,8 @@ pub fn trans_if<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, els: Option<&hir::Expr>, dest: expr::Dest) -> Block<'blk, 'tcx> { - debug!("trans_if(bcx={}, if_id={}, cond={:?}, thn={}, dest={})", - bcx.to_str(), if_id, cond, thn.id, - dest.to_string(bcx.ccx())); + debug!("trans_if(bcx={}, if_id={}, cond={:?}, thn={}, dest={:?})", + bcx.to_str(), if_id, cond, thn.id, dest); let _icx = push_ctxt("trans_if"); if bcx.unreachable.get() { @@ -363,14 +362,12 @@ pub fn trans_ret<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let fcx = bcx.fcx; let mut bcx = bcx; - let dest = match (fcx.llretslotptr.get(), retval_expr) { - (Some(_), Some(retval_expr)) => { - let ret_ty = expr_ty_adjusted(bcx, &retval_expr); - expr::SaveIn(fcx.get_ret_slot(bcx, ty::FnConverging(ret_ty), "ret_slot")) - } - _ => expr::Ignore, - }; if let Some(x) = retval_expr { + let dest = if fcx.llretslotptr.get().is_some() { + expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot")) + } else { + expr::Ignore + }; bcx = expr::trans_into(bcx, &x, dest); match dest { expr::SaveIn(slot) if fcx.needs_ret_allocas => { @@ -406,13 +403,8 @@ pub fn trans_fail<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let expr_file_line = consts::addr_of(ccx, expr_file_line_const, align, "panic_loc"); let args = vec!(expr_file_line); let did = langcall(bcx, Some(call_info.span), "", PanicFnLangItem); - let bcx = callee::trans_lang_call(bcx, - did, - &args[..], - Some(expr::Ignore), - call_info.debug_loc()).bcx; - Unreachable(bcx); - return bcx; + Callee::def(ccx, did, ccx.tcx().mk_substs(Substs::empty())) + .call(bcx, call_info.debug_loc(), ArgVals(&args), None).bcx } pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, @@ -439,11 +431,6 @@ pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let file_line = consts::addr_of(ccx, file_line_const, align, "panic_bounds_check_loc"); let args = vec!(file_line, index, len); let did = langcall(bcx, Some(call_info.span), "", PanicBoundsCheckFnLangItem); - let bcx = callee::trans_lang_call(bcx, - did, - &args[..], - Some(expr::Ignore), - call_info.debug_loc()).bcx; - Unreachable(bcx); - return bcx; + Callee::def(ccx, did, ccx.tcx().mk_substs(Substs::empty())) + .call(bcx, call_info.debug_loc(), ArgVals(&args), None).bcx } diff --git a/src/librustc_trans/trans/datum.rs b/src/librustc_trans/trans/datum.rs index 32f263746d31e..649f6180de20b 100644 --- a/src/librustc_trans/trans/datum.rs +++ b/src/librustc_trans/trans/datum.rs @@ -101,6 +101,7 @@ use trans::cleanup; use trans::cleanup::{CleanupMethods, DropHintDatum, DropHintMethods}; use trans::expr; use trans::tvec; +use trans::value::Value; use middle::ty::Ty; use std::fmt; @@ -111,7 +112,7 @@ use syntax::codemap::DUMMY_SP; /// describes where the value is stored, what Rust type the value has, /// whether it is addressed by reference, and so forth. Please refer /// the section on datums in `README.md` for more details. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy)] pub struct Datum<'tcx, K> { /// The llvm value. This is either a pointer to the Rust value or /// the value itself, depending on `kind` below. @@ -124,6 +125,13 @@ pub struct Datum<'tcx, K> { pub kind: K, } +impl<'tcx, K: fmt::Debug> fmt::Debug for Datum<'tcx, K> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Datum({:?}, {:?}, {:?})", + Value(self.val), self.ty, self.kind) + } +} + pub struct DatumBlock<'blk, 'tcx: 'blk, K> { pub bcx: Block<'blk, 'tcx>, pub datum: Datum<'tcx, K>, @@ -298,24 +306,23 @@ pub fn immediate_rvalue_bcx<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, /// caller can prove that either (1.) the code injected by `populate` /// onto `bcx` always dominates the end of `scope`, or (2.) the data /// being allocated has no associated destructor. -pub fn lvalue_scratch_datum<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>, - ty: Ty<'tcx>, - name: &str, - zero: InitAlloca, - scope: cleanup::ScopeId, - arg: A, - populate: F) - -> DatumBlock<'blk, 'tcx, Lvalue> where - F: FnOnce(A, Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>, +pub fn lvalue_scratch_datum<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, + ty: Ty<'tcx>, + name: &str, + zero: InitAlloca, + scope: cleanup::ScopeId, + populate: F) + -> DatumBlock<'blk, 'tcx, Lvalue> where + F: FnOnce(Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>, { // Very subtle: potentially initialize the scratch memory at point where it is alloca'ed. // (See discussion at Issue 30530.) let scratch = alloc_ty_init(bcx, ty, zero, name); - debug!("lvalue_scratch_datum scope={:?} scratch={} ty={:?}", - scope, bcx.ccx().tn().val_to_string(scratch), ty); + debug!("lvalue_scratch_datum scope={:?} scratch={:?} ty={:?}", + scope, Value(scratch), ty); // Subtle. Populate the scratch memory *before* scheduling cleanup. - let bcx = populate(arg, bcx, scratch); + let bcx = populate(bcx, scratch); bcx.fcx.schedule_drop_mem(scope, scratch, ty, None); DatumBlock::new(bcx, Datum::new(scratch, ty, Lvalue::new("datum::lvalue_scratch_datum"))) @@ -351,8 +358,8 @@ fn add_rvalue_clean<'a, 'tcx>(mode: RvalueMode, scope: cleanup::ScopeId, val: ValueRef, ty: Ty<'tcx>) { - debug!("add_rvalue_clean scope={:?} val={} ty={:?}", - scope, fcx.ccx.tn().val_to_string(val), ty); + debug!("add_rvalue_clean scope={:?} val={:?} ty={:?}", + scope, Value(val), ty); match mode { ByValue => { fcx.schedule_drop_immediate(scope, val, ty); } ByRef => { @@ -509,14 +516,14 @@ impl<'tcx> Datum<'tcx, Rvalue> { ByValue => { lvalue_scratch_datum( - bcx, self.ty, name, InitAlloca::Dropped, scope, self, - |this, bcx, llval| { + bcx, self.ty, name, InitAlloca::Dropped, scope, + |bcx, llval| { debug!("populate call for Datum::to_lvalue_datum_in_scope \ - self.ty={:?}", this.ty); + self.ty={:?}", self.ty); // do not call_lifetime_start here; the // `InitAlloc::Dropped` will start scratch // value's lifetime at open of function body. - let bcx = this.store_to(bcx, llval); + let bcx = self.store_to(bcx, llval); bcx.fcx.schedule_lifetime_end(scope, llval); bcx }) @@ -617,7 +624,7 @@ impl<'tcx> Datum<'tcx, Expr> { name: &str, expr_id: ast::NodeId) -> DatumBlock<'blk, 'tcx, Lvalue> { - debug!("to_lvalue_datum self: {}", self.to_string(bcx.ccx())); + debug!("to_lvalue_datum self: {:?}", self); self.match_kind( |l| DatumBlock::new(bcx, l), @@ -767,14 +774,6 @@ impl<'tcx, K: KindOps + fmt::Debug> Datum<'tcx, K> { self.shallow_copy_raw(bcx, dst) } - #[allow(dead_code)] // useful for debugging - pub fn to_string<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> String { - format!("Datum({}, {:?}, {:?})", - ccx.tn().val_to_string(self.val), - self.ty, - self.kind) - } - /// See the `appropriate_rvalue_mode()` function pub fn appropriate_rvalue_mode<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> RvalueMode { diff --git a/src/librustc_trans/trans/debuginfo/create_scope_map.rs b/src/librustc_trans/trans/debuginfo/create_scope_map.rs index 4ba103c0c0d08..41fed12e7bf3c 100644 --- a/src/librustc_trans/trans/debuginfo/create_scope_map.rs +++ b/src/librustc_trans/trans/debuginfo/create_scope_map.rs @@ -468,16 +468,13 @@ fn walk_expr(cx: &CrateContext, } } - hir::ExprInlineAsm(hir::InlineAsm { ref inputs, - ref outputs, - .. }) => { - // inputs, outputs: Vec<(String, P)> - for &(_, ref exp) in inputs { - walk_expr(cx, &exp, scope_stack, scope_map); + hir::ExprInlineAsm(_, ref outputs, ref inputs) => { + for output in outputs { + walk_expr(cx, output, scope_stack, scope_map); } - for out in outputs { - walk_expr(cx, &out.expr, scope_stack, scope_map); + for input in inputs { + walk_expr(cx, input, scope_stack, scope_map); } } } diff --git a/src/librustc_trans/trans/debuginfo/mod.rs b/src/librustc_trans/trans/debuginfo/mod.rs index 15275a46e9b77..40eb29ed25050 100644 --- a/src/librustc_trans/trans/debuginfo/mod.rs +++ b/src/librustc_trans/trans/debuginfo/mod.rs @@ -32,9 +32,10 @@ use middle::subst::{self, Substs}; use rustc_front; use rustc_front::hir; +use trans::abi::Abi; use trans::common::{NodeIdAndSpan, CrateContext, FunctionContext, Block}; use trans; -use trans::{monomorphize, type_of}; +use trans::monomorphize; use middle::infer; use middle::ty::{self, Ty}; use session::config::{self, FullDebugInfo, LimitedDebugInfo, NoDebugInfo}; @@ -49,7 +50,6 @@ use std::rc::Rc; use syntax::codemap::{Span, Pos}; use syntax::{ast, codemap}; -use syntax::abi::Abi; use syntax::attr::IntType; use syntax::parse::token::{self, special_idents}; @@ -456,10 +456,10 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty::FnDiverging => diverging_type_metadata(cx) }); - let inputs = &if abi == Abi::RustCall { - type_of::untuple_arguments(cx, &sig.inputs) + let inputs = if abi == Abi::RustCall { + &sig.inputs[..sig.inputs.len()-1] } else { - sig.inputs + &sig.inputs[..] }; // Arguments types @@ -467,6 +467,14 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, signature.push(type_metadata(cx, argument_type, codemap::DUMMY_SP)); } + if abi == Abi::RustCall && !sig.inputs.is_empty() { + if let ty::TyTuple(ref args) = sig.inputs[sig.inputs.len() - 1].sty { + for &argument_type in args { + signature.push(type_metadata(cx, argument_type, codemap::DUMMY_SP)); + } + } + } + return create_DIArray(DIB(cx), &signature[..]); } diff --git a/src/librustc_trans/trans/debuginfo/type_names.rs b/src/librustc_trans/trans/debuginfo/type_names.rs index cc9067677b25b..b71b7789affe2 100644 --- a/src/librustc_trans/trans/debuginfo/type_names.rs +++ b/src/librustc_trans/trans/debuginfo/type_names.rs @@ -107,7 +107,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, output.push_str("unsafe "); } - if abi != ::syntax::abi::Abi::Rust { + if abi != ::trans::abi::Abi::Rust { output.push_str("extern \""); output.push_str(abi.name()); output.push_str("\" "); diff --git a/src/librustc_trans/trans/declare.rs b/src/librustc_trans/trans/declare.rs index 0c512200ff3d5..e63f17770bcec 100644 --- a/src/librustc_trans/trans/declare.rs +++ b/src/librustc_trans/trans/declare.rs @@ -22,13 +22,10 @@ use llvm::{self, ValueRef}; use middle::ty; use middle::infer; -use middle::traits::ProjectionMode; -use syntax::abi::Abi; +use trans::abi::{Abi, FnType}; use trans::attributes; -use trans::base; use trans::context::CrateContext; use trans::type_::Type; -use trans::type_of; use std::ffi::CString; use libc::c_uint; @@ -51,13 +48,10 @@ pub fn declare_global(ccx: &CrateContext, name: &str, ty: Type) -> llvm::ValueRe /// Declare a function. /// -/// For rust functions use `declare_rust_fn` instead. -/// /// If there’s a value with the same name already declared, the function will /// update the declaration and return existing ValueRef instead. -pub fn declare_fn(ccx: &CrateContext, name: &str, callconv: llvm::CallConv, - ty: Type, output: ty::FnOutput) -> ValueRef { - debug!("declare_fn(name={:?})", name); +fn declare_raw_fn(ccx: &CrateContext, name: &str, callconv: llvm::CallConv, ty: Type) -> ValueRef { + debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty); let namebuf = CString::new(name).unwrap_or_else(|_|{ ccx.sess().bug(&format!("name {:?} contains an interior null byte", name)) }); @@ -70,10 +64,6 @@ pub fn declare_fn(ccx: &CrateContext, name: &str, callconv: llvm::CallConv, // be merged. llvm::SetUnnamedAddr(llfn, true); - if output == ty::FnDiverging { - llvm::SetFunctionAttribute(llfn, llvm::Attribute::NoReturn); - } - if ccx.tcx().sess.opts.cg.no_redzone .unwrap_or(ccx.tcx().sess.target.target.options.disable_redzone) { llvm::SetFunctionAttribute(llfn, llvm::Attribute::NoRedZone) @@ -86,13 +76,12 @@ pub fn declare_fn(ccx: &CrateContext, name: &str, callconv: llvm::CallConv, /// Declare a C ABI function. /// /// Only use this for foreign function ABIs and glue. For Rust functions use -/// `declare_rust_fn` instead. +/// `declare_fn` instead. /// /// If there’s a value with the same name already declared, the function will /// update the declaration and return existing ValueRef instead. -pub fn declare_cfn(ccx: &CrateContext, name: &str, fn_type: Type, - output: ty::Ty) -> ValueRef { - declare_fn(ccx, name, llvm::CCallConv, fn_type, ty::FnConverging(output)) +pub fn declare_cfn(ccx: &CrateContext, name: &str, fn_type: Type) -> ValueRef { + declare_raw_fn(ccx, name, llvm::CCallConv, fn_type) } @@ -100,53 +89,27 @@ pub fn declare_cfn(ccx: &CrateContext, name: &str, fn_type: Type, /// /// If there’s a value with the same name already declared, the function will /// update the declaration and return existing ValueRef instead. -pub fn declare_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str, - fn_type: ty::Ty<'tcx>) -> ValueRef { - debug!("declare_rust_fn(name={:?}, fn_type={:?})", name, - fn_type); - - let function_type; // placeholder so that the memory ownership works out ok - let (sig, abi, env) = match fn_type.sty { - ty::TyFnDef(_, _, f) | - ty::TyFnPtr(f) => { - (&f.sig, f.abi, None) - } - ty::TyClosure(closure_did, ref substs) => { - let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), - &ccx.tcx().tables, - ProjectionMode::Any); - function_type = infcx.closure_type(closure_did, substs); - let self_type = base::self_type_for_closure(ccx, closure_did, fn_type); - let llenvironment_type = type_of::type_of_explicit_arg(ccx, self_type); - debug!("declare_rust_fn function_type={:?} self_type={:?}", - function_type, self_type); - (&function_type.sig, Abi::RustCall, Some(llenvironment_type)) - } - _ => ccx.sess().bug("expected closure or fn") - }; - - let sig = ccx.tcx().erase_late_bound_regions(sig); +pub fn declare_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str, + fn_type: ty::Ty<'tcx>) -> ValueRef { + debug!("declare_rust_fn(name={:?}, fn_type={:?})", name, fn_type); + let abi = fn_type.fn_abi(); + let sig = ccx.tcx().erase_late_bound_regions(fn_type.fn_sig()); let sig = infer::normalize_associated_type(ccx.tcx(), &sig); debug!("declare_rust_fn (after region erasure) sig={:?}", sig); - let llfty = type_of::type_of_rust_fn(ccx, env, &sig, abi); - debug!("declare_rust_fn llfty={}", ccx.tn().type_to_string(llfty)); - // it is ok to directly access sig.0.output because we erased all - // late-bound-regions above - let llfn = declare_fn(ccx, name, llvm::CCallConv, llfty, sig.output); - attributes::from_fn_type(ccx, fn_type).apply_llfn(llfn); - llfn -} + let fty = FnType::new(ccx, abi, &sig, &[]); + let llfn = declare_raw_fn(ccx, name, fty.cconv, fty.llvm_type(ccx)); + if sig.output == ty::FnDiverging { + llvm::SetFunctionAttribute(llfn, llvm::Attribute::NoReturn); + } + + if abi != Abi::Rust && abi != Abi::RustCall { + attributes::unwind(llfn, false); + } + + fty.apply_attrs_llfn(llfn); -/// Declare a Rust function with internal linkage. -/// -/// If there’s a value with the same name already declared, the function will -/// update the declaration and return existing ValueRef instead. -pub fn declare_internal_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str, - fn_type: ty::Ty<'tcx>) -> ValueRef { - let llfn = declare_rust_fn(ccx, name, fn_type); - llvm::SetLinkage(llfn, llvm::InternalLinkage); llfn } @@ -166,78 +129,27 @@ pub fn define_global(ccx: &CrateContext, name: &str, ty: Type) -> Option Option { - if get_defined_value(ccx, name).is_some() { - None - } else { - Some(declare_fn(ccx, name, callconv, fn_type, output)) - } -} - - -/// Declare a C ABI function with an intention to define it. -/// -/// Use this function when you intend to define a function. This function will -/// return None if the name already has a definition associated with it. In that -/// case an error should be reported to the user, because it usually happens due -/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes). -/// -/// Only use this for foreign function ABIs and glue. For Rust functions use -/// `declare_rust_fn` instead. -pub fn define_cfn(ccx: &CrateContext, name: &str, fn_type: Type, - output: ty::Ty) -> Option { - if get_defined_value(ccx, name).is_some() { - None - } else { - Some(declare_cfn(ccx, name, fn_type, output)) - } -} - - -/// Declare a Rust function with an intention to define it. -/// -/// Use this function when you intend to define a function. This function will -/// return None if the name already has a definition associated with it. In that -/// case an error should be reported to the user, because it usually happens due -/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes). -pub fn define_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str, - fn_type: ty::Ty<'tcx>) -> Option { - if get_defined_value(ccx, name).is_some() { - None - } else { - Some(declare_rust_fn(ccx, name, fn_type)) - } -} - - /// Declare a Rust function with an intention to define it. /// /// Use this function when you intend to define a function. This function will /// return panic if the name already has a definition associated with it. This /// can happen with #[no_mangle] or #[export_name], for example. -pub fn define_internal_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - name: &str, - fn_type: ty::Ty<'tcx>) -> ValueRef { +pub fn define_internal_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + name: &str, + fn_type: ty::Ty<'tcx>) -> ValueRef { if get_defined_value(ccx, name).is_some() { ccx.sess().fatal(&format!("symbol `{}` already defined", name)) } else { - declare_internal_rust_fn(ccx, name, fn_type) + let llfn = declare_fn(ccx, name, fn_type); + llvm::SetLinkage(llfn, llvm::InternalLinkage); + llfn } } /// Get defined or externally defined (AvailableExternally linkage) value by /// name. -fn get_defined_value(ccx: &CrateContext, name: &str) -> Option { +pub fn get_defined_value(ccx: &CrateContext, name: &str) -> Option { debug!("get_defined_value(name={:?})", name); let namebuf = CString::new(name).unwrap_or_else(|_|{ ccx.sess().bug(&format!("name {:?} contains an interior null byte", name)) diff --git a/src/librustc_trans/trans/expr.rs b/src/librustc_trans/trans/expr.rs index ae03f58bce0cf..abcd703f33ba3 100644 --- a/src/librustc_trans/trans/expr.rs +++ b/src/librustc_trans/trans/expr.rs @@ -44,19 +44,18 @@ //! expression and ensures that the result has a cleanup associated with it, //! creating a temporary stack slot if necessary. //! -//! - `trans_local_var -> Datum`: looks up a local variable or upvar. +//! - `trans_var -> Datum`: looks up a local variable, upvar or static. #![allow(non_camel_case_types)] pub use self::Dest::*; use self::lazy_binop_ty::*; -use back::abi; use llvm::{self, ValueRef, TypeKind}; use middle::const_qualif::ConstQualif; use middle::def::Def; use middle::subst::Substs; -use trans::{_match, adt, asm, base, closure, consts, controlflow}; +use trans::{_match, abi, adt, asm, base, closure, consts, controlflow}; use trans::base::*; use trans::build::*; use trans::callee::{Callee, ArgExprs, ArgOverloadedCall, ArgOverloadedOp}; @@ -69,6 +68,7 @@ use trans::glue; use trans::machine; use trans::tvec; use trans::type_of; +use trans::value::Value; use trans::Disr; use middle::ty::adjustment::{AdjustDerefRef, AdjustReifyFnPointer}; use middle::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer}; @@ -85,6 +85,7 @@ use rustc_front::hir; use syntax::{ast, codemap}; use syntax::parse::token::InternedString; +use std::fmt; use std::mem; // Destinations @@ -98,11 +99,11 @@ pub enum Dest { Ignore, } -impl Dest { - pub fn to_string(&self, ccx: &CrateContext) -> String { +impl fmt::Debug for Dest { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - SaveIn(v) => format!("SaveIn({})", ccx.tn().val_to_string(v)), - Ignore => "Ignore".to_string() + SaveIn(v) => write!(f, "SaveIn({:?})", Value(v)), + Ignore => f.write_str("Ignore") } } } @@ -377,15 +378,13 @@ fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } Some(adj) => { adj } }; - debug!("unadjusted datum for expr {:?}: {} adjustment={:?}", - expr, - datum.to_string(bcx.ccx()), - adjustment); + debug!("unadjusted datum for expr {:?}: {:?} adjustment={:?}", + expr, datum, adjustment); match adjustment { AdjustReifyFnPointer => { match datum.ty.sty { ty::TyFnDef(def_id, substs, _) => { - datum = Callee::def(bcx.ccx(), def_id, substs, datum.ty) + datum = Callee::def(bcx.ccx(), def_id, substs) .reify(bcx.ccx()).to_expr_datum(); } _ => { @@ -452,7 +451,7 @@ fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } } - debug!("after adjustments, datum={}", datum.to_string(bcx.ccx())); + debug!("after adjustments, datum={:?}", datum); DatumBlock::new(bcx, datum) } @@ -462,9 +461,7 @@ fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, target: Datum<'tcx, Rvalue>) -> Block<'blk, 'tcx> { let mut bcx = bcx; - debug!("coerce_unsized({} -> {})", - source.to_string(bcx.ccx()), - target.to_string(bcx.ccx())); + debug!("coerce_unsized({:?} -> {:?})", source, target); match (&source.ty.sty, &target.ty.sty) { (&ty::TyBox(a), &ty::TyBox(b)) | @@ -654,7 +651,8 @@ fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, trans(bcx, &e) } hir::ExprPath(..) => { - trans_def(bcx, expr, bcx.def(expr.id)) + let var = trans_var(bcx, bcx.def(expr.id)); + DatumBlock::new(bcx, var.to_expr_datum()) } hir::ExprField(ref base, name) => { trans_rec_field(bcx, &base, name.node) @@ -854,8 +852,8 @@ fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let (base, len) = base_datum.get_vec_base_and_len(bcx); - debug!("trans_index: base {}", bcx.val_to_string(base)); - debug!("trans_index: len {}", bcx.val_to_string(len)); + debug!("trans_index: base {:?}", Value(base)); + debug!("trans_index: len {:?}", Value(len)); let bounds_check = ICmp(bcx, llvm::IntUGE, @@ -866,7 +864,6 @@ fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let expected = Call(bcx, expect, &[bounds_check, C_bool(ccx, false)], - None, index_expr_debug_loc); bcx = with_cond(bcx, expected, |bcx| { controlflow::trans_fail_bounds_check(bcx, @@ -884,27 +881,40 @@ fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, DatumBlock::new(bcx, elt_datum) } -fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - ref_expr: &hir::Expr, - def: Def) - -> DatumBlock<'blk, 'tcx, Expr> { - //! Translates a reference to a path. +/// Translates a reference to a variable. +pub fn trans_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, def: Def) + -> Datum<'tcx, Lvalue> { - let _icx = push_ctxt("trans_def_lvalue"); match def { - Def::Static(did, _) => { - let const_ty = expr_ty(bcx, ref_expr); - let val = get_static_val(bcx.ccx(), did, const_ty); - let lval = Lvalue::new("expr::trans_def"); - DatumBlock::new(bcx, Datum::new(val, const_ty, LvalueExpr(lval))) - } - Def::Local(..) | Def::Upvar(..) => { - DatumBlock::new(bcx, trans_local_var(bcx, def).to_expr_datum()) + Def::Static(did, _) => consts::get_static(bcx.ccx(), did), + Def::Upvar(_, nid, _, _) => { + // Can't move upvars, so this is never a ZeroMemLastUse. + let local_ty = node_id_type(bcx, nid); + let lval = Lvalue::new_with_hint("expr::trans_var (upvar)", + bcx, nid, HintKind::ZeroAndMaintain); + match bcx.fcx.llupvars.borrow().get(&nid) { + Some(&val) => Datum::new(val, local_ty, lval), + None => { + bcx.sess().bug(&format!( + "trans_var: no llval for upvar {} found", + nid)); + } + } } - _ => { - bcx.sess().span_bug(ref_expr.span, - &format!("{:?} should not reach expr::trans_def", def)) + Def::Local(_, nid) => { + let datum = match bcx.fcx.lllocals.borrow().get(&nid) { + Some(&v) => v, + None => { + bcx.sess().bug(&format!( + "trans_var: no datum for local/arg {} found", + nid)); + } + }; + debug!("take_local(nid={}, v={:?}, ty={})", + nid, Value(datum.val), datum.ty); + datum } + _ => unreachable!("{:?} should not reach expr::trans_var", def) } } @@ -1027,8 +1037,18 @@ fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, trans_assign_op(bcx, expr, op, &dst, &src) } } - hir::ExprInlineAsm(ref a) => { - asm::trans_inline_asm(bcx, a) + hir::ExprInlineAsm(ref a, ref outputs, ref inputs) => { + let outputs = outputs.iter().map(|output| { + let out_datum = unpack_datum!(bcx, trans(bcx, output)); + unpack_datum!(bcx, out_datum.to_lvalue_datum(bcx, "out", expr.id)) + }).collect(); + let inputs = inputs.iter().map(|input| { + let input = unpack_datum!(bcx, trans(bcx, input)); + let input = unpack_datum!(bcx, input.to_rvalue_datum(bcx, "in")); + input.to_llscalarish(bcx) + }).collect(); + asm::trans_inline_asm(bcx, a, outputs, inputs); + bcx } _ => { bcx.tcx().sess.span_bug( @@ -1131,8 +1151,7 @@ fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, body, expr.id, def_id, - substs, - &expr.attrs).unwrap_or(bcx) + substs).unwrap_or(bcx) } hir::ExprCall(ref f, ref args) => { let method = bcx.tcx().tables.borrow().method_map.get(&method_call).cloned(); @@ -1145,7 +1164,7 @@ fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let f = unpack_datum!(bcx, trans(bcx, f)); (match f.ty.sty { ty::TyFnDef(def_id, substs, _) => { - Callee::def(bcx.ccx(), def_id, substs, f.ty) + Callee::def(bcx.ccx(), def_id, substs) } ty::TyFnPtr(_) => { let f = unpack_datum!(bcx, @@ -1249,48 +1268,6 @@ fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } -/// Translates a reference to a local variable or argument. This always results in an lvalue datum. -pub fn trans_local_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - def: Def) - -> Datum<'tcx, Lvalue> { - let _icx = push_ctxt("trans_local_var"); - - match def { - Def::Upvar(_, nid, _, _) => { - // Can't move upvars, so this is never a ZeroMemLastUse. - let local_ty = node_id_type(bcx, nid); - let lval = Lvalue::new_with_hint("expr::trans_local_var (upvar)", - bcx, nid, HintKind::ZeroAndMaintain); - match bcx.fcx.llupvars.borrow().get(&nid) { - Some(&val) => Datum::new(val, local_ty, lval), - None => { - bcx.sess().bug(&format!( - "trans_local_var: no llval for upvar {} found", - nid)); - } - } - } - Def::Local(_, nid) => { - let datum = match bcx.fcx.lllocals.borrow().get(&nid) { - Some(&v) => v, - None => { - bcx.sess().bug(&format!( - "trans_local_var: no datum for local/arg {} found", - nid)); - } - }; - debug!("take_local(nid={}, v={}, ty={})", - nid, bcx.val_to_string(datum.val), datum.ty); - datum - } - _ => { - bcx.sess().unimpl(&format!( - "unsupported def type in trans_local_var: {:?}", - def)); - } - } -} - fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, fields: &[hir::Field], base: Option<&hir::Expr>, @@ -1708,15 +1685,14 @@ fn trans_scalar_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, if use_fmod { let f64t = Type::f64(bcx.ccx()); let fty = Type::func(&[f64t, f64t], &f64t); - let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty, - tcx.types.f64); + let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty); if lhs_t == tcx.types.f32 { let lhs = FPExt(bcx, lhs, f64t); let rhs = FPExt(bcx, rhs, f64t); - let res = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc); + let res = Call(bcx, llfn, &[lhs, rhs], binop_debug_loc); FPTrunc(bcx, res, Type::f32(bcx.ccx())) } else { - Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc) + Call(bcx, llfn, &[lhs, rhs], binop_debug_loc) } } else { FRem(bcx, lhs, rhs, binop_debug_loc) @@ -1829,12 +1805,10 @@ fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let lhs = unpack_datum!(bcx, trans(bcx, lhs)); let lhs = unpack_datum!(bcx, lhs.to_rvalue_datum(bcx, "binop_lhs")); - debug!("trans_binary (expr {}): lhs={}", - expr.id, lhs.to_string(ccx)); + debug!("trans_binary (expr {}): lhs={:?}", expr.id, lhs); let rhs = unpack_datum!(bcx, trans(bcx, rhs)); let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "binop_rhs")); - debug!("trans_binary (expr {}): rhs={}", - expr.id, rhs.to_string(ccx)); + debug!("trans_binary (expr {}): rhs={:?}", expr.id, rhs); if type_is_fat_ptr(ccx.tcx(), lhs.ty) { assert!(type_is_fat_ptr(ccx.tcx(), rhs.ty), @@ -1933,8 +1907,8 @@ fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let t_out = node_id_type(bcx, id); debug!("trans_cast({:?} as {:?})", t_in, t_out); - let mut ll_t_in = type_of::arg_type_of(ccx, t_in); - let ll_t_out = type_of::arg_type_of(ccx, t_out); + let mut ll_t_in = type_of::immediate_type_of(ccx, t_in); + let ll_t_out = type_of::immediate_type_of(ccx, t_out); // Convert the value to be cast into a ValueRef, either by-ref or // by-value as appropriate given its type: let mut datum = unpack_datum!(bcx, trans(bcx, expr)); @@ -2085,10 +2059,8 @@ fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, -> DatumBlock<'blk, 'tcx, Expr> { let ccx = bcx.ccx(); - debug!("deref_once(expr={:?}, datum={}, method_call={:?})", - expr, - datum.to_string(ccx), - method_call); + debug!("deref_once(expr={:?}, datum={:?}, method_call={:?})", + expr, datum, method_call); let mut bcx = bcx; @@ -2175,8 +2147,8 @@ fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } }; - debug!("deref_once(expr={}, method_call={:?}, result={})", - expr.id, method_call, r.datum.to_string(ccx)); + debug!("deref_once(expr={}, method_call={:?}, result={:?})", + expr.id, method_call, r.datum); return r; } @@ -2291,7 +2263,7 @@ impl OverflowOpViaIntrinsic { -> (Block<'blk, 'tcx>, ValueRef) { let llfn = self.to_intrinsic(bcx, lhs_t); - let val = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc); + let val = Call(bcx, llfn, &[lhs, rhs], binop_debug_loc); let result = ExtractValue(bcx, val, 0); // iN operation result let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?" @@ -2300,7 +2272,7 @@ impl OverflowOpViaIntrinsic { let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1"); Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)], - None, binop_debug_loc); + binop_debug_loc); let bcx = base::with_cond(bcx, cond, |bcx| diff --git a/src/librustc_trans/trans/foreign.rs b/src/librustc_trans/trans/foreign.rs deleted file mode 100644 index cace98a230f61..0000000000000 --- a/src/librustc_trans/trans/foreign.rs +++ /dev/null @@ -1,1072 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - - -use back::{abi, link}; -use llvm::{ValueRef, CallConv, get_param}; -use llvm; -use middle::weak_lang_items; -use trans::attributes; -use trans::base::{llvm_linkage_by_name, push_ctxt}; -use trans::base; -use trans::build::*; -use trans::cabi; -use trans::common::*; -use trans::debuginfo::DebugLoc; -use trans::declare; -use trans::expr; -use trans::machine; -use trans::monomorphize; -use trans::type_::Type; -use trans::type_of::*; -use trans::type_of; -use middle::infer; -use middle::ty::{self, Ty, TyCtxt}; -use middle::subst::Substs; - -use std::cmp; -use std::iter::once; -use libc::c_uint; -use syntax::abi::Abi; -use syntax::attr; -use syntax::codemap::Span; -use syntax::parse::token::{InternedString, special_idents}; -use syntax::ast; -use syntax::attr::AttrMetaMethods; - -use rustc_front::print::pprust; -use rustc_front::hir; - -/////////////////////////////////////////////////////////////////////////// -// Type definitions - -struct ForeignTypes<'tcx> { - /// Rust signature of the function - fn_sig: ty::FnSig<'tcx>, - - /// Adapter object for handling native ABI rules (trust me, you - /// don't want to know) - fn_ty: cabi::FnType, - - /// LLVM types that will appear on the foreign function - llsig: LlvmSignature, -} - -struct LlvmSignature { - // LLVM versions of the types of this function's arguments. - llarg_tys: Vec , - - // LLVM version of the type that this function returns. Note that - // this *may not be* the declared return type of the foreign - // function, because the foreign function may opt to return via an - // out pointer. - llret_ty: Type, - - /// True if there is a return value (not bottom, not unit) - ret_def: bool, -} - - -/////////////////////////////////////////////////////////////////////////// -// Calls to external functions - -pub fn llvm_calling_convention(ccx: &CrateContext, - abi: Abi) -> CallConv { - use syntax::abi::Abi::*; - match ccx.sess().target.target.adjust_abi(abi) { - RustIntrinsic => { - // Intrinsics are emitted at the call site - ccx.sess().bug("asked to register intrinsic fn"); - } - PlatformIntrinsic => { - // Intrinsics are emitted at the call site - ccx.sess().bug("asked to register platform intrinsic fn"); - } - - Rust => { - // FIXME(#3678) Implement linking to foreign fns with Rust ABI - ccx.sess().unimpl("foreign functions with Rust ABI"); - } - - RustCall => { - // FIXME(#3678) Implement linking to foreign fns with Rust ABI - ccx.sess().unimpl("foreign functions with RustCall ABI"); - } - - // It's the ABI's job to select this, not us. - System => ccx.sess().bug("system abi should be selected elsewhere"), - - Stdcall => llvm::X86StdcallCallConv, - Fastcall => llvm::X86FastcallCallConv, - Vectorcall => llvm::X86_VectorCall, - C => llvm::CCallConv, - Win64 => llvm::X86_64_Win64, - - // These API constants ought to be more specific... - Cdecl => llvm::CCallConv, - Aapcs => llvm::CCallConv, - } -} - -pub fn register_static(ccx: &CrateContext, - foreign_item: &hir::ForeignItem) -> ValueRef { - let ty = ccx.tcx().node_id_to_type(foreign_item.id); - let llty = type_of::type_of(ccx, ty); - - let ident = link_name(foreign_item); - let c = match attr::first_attr_value_str_by_name(&foreign_item.attrs, - "linkage") { - // If this is a static with a linkage specified, then we need to handle - // it a little specially. The typesystem prevents things like &T and - // extern "C" fn() from being non-null, so we can't just declare a - // static and call it a day. Some linkages (like weak) will make it such - // that the static actually has a null value. - Some(name) => { - let linkage = match llvm_linkage_by_name(&name) { - Some(linkage) => linkage, - None => { - ccx.sess().span_fatal(foreign_item.span, - "invalid linkage specified"); - } - }; - let llty2 = match ty.sty { - ty::TyRawPtr(ref mt) => type_of::type_of(ccx, mt.ty), - _ => { - ccx.sess().span_fatal(foreign_item.span, - "must have type `*T` or `*mut T`"); - } - }; - unsafe { - // Declare a symbol `foo` with the desired linkage. - let g1 = declare::declare_global(ccx, &ident[..], llty2); - llvm::SetLinkage(g1, linkage); - - // Declare an internal global `extern_with_linkage_foo` which - // is initialized with the address of `foo`. If `foo` is - // discarded during linking (for example, if `foo` has weak - // linkage and there are no definitions), then - // `extern_with_linkage_foo` will instead be initialized to - // zero. - let mut real_name = "_rust_extern_with_linkage_".to_string(); - real_name.push_str(&ident); - let g2 = declare::define_global(ccx, &real_name[..], llty).unwrap_or_else(||{ - ccx.sess().span_fatal(foreign_item.span, - &format!("symbol `{}` is already defined", ident)) - }); - llvm::SetLinkage(g2, llvm::InternalLinkage); - llvm::LLVMSetInitializer(g2, g1); - g2 - } - } - None => // Generate an external declaration. - declare::declare_global(ccx, &ident[..], llty), - }; - - // Handle thread-local external statics. - for attr in foreign_item.attrs.iter() { - if attr.check_name("thread_local") { - llvm::set_thread_local(c, true); - } - } - - return c; -} - -// only use this for foreign function ABIs and glue, use `get_extern_rust_fn` for Rust functions -pub fn get_extern_fn(ccx: &CrateContext, - externs: &mut ExternMap, - name: &str, - cc: llvm::CallConv, - ty: Type, - output: Ty) - -> ValueRef { - match externs.get(name) { - Some(n) => return *n, - None => {} - } - let f = declare::declare_fn(ccx, name, cc, ty, ty::FnConverging(output)); - externs.insert(name.to_string(), f); - f -} - -/// Registers a foreign function found in a library. Just adds a LLVM global. -pub fn register_foreign_item_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - abi: Abi, fty: Ty<'tcx>, - name: &str, - attrs: &[ast::Attribute])-> ValueRef { - debug!("register_foreign_item_fn(abi={:?}, \ - ty={:?}, \ - name={})", - abi, - fty, - name); - - let cc = llvm_calling_convention(ccx, abi); - - // Register the function as a C extern fn - let tys = foreign_types_for_fn_ty(ccx, fty); - - // Make sure the calling convention is right for variadic functions - // (should've been caught if not in typeck) - if tys.fn_sig.variadic { - assert!(cc == llvm::CCallConv); - } - - // Create the LLVM value for the C extern fn - let llfn_ty = lltype_for_fn_from_foreign_types(ccx, &tys); - - let llfn = get_extern_fn(ccx, &mut *ccx.externs().borrow_mut(), name, cc, llfn_ty, fty); - attributes::unwind(llfn, false); - add_argument_attributes(&tys, llfn); - attributes::from_fn_attrs(ccx, attrs, llfn); - llfn -} - -/// Prepares a call to a native function. This requires adapting -/// from the Rust argument passing rules to the native rules. -/// -/// # Parameters -/// -/// - `callee_ty`: Rust type for the function we are calling -/// - `llfn`: the function pointer we are calling -/// - `llretptr`: where to store the return value of the function -/// - `llargs_rust`: a list of the argument values, prepared -/// as they would be if calling a Rust function -/// - `passed_arg_tys`: Rust type for the arguments. Normally we -/// can derive these from callee_ty but in the case of variadic -/// functions passed_arg_tys will include the Rust type of all -/// the arguments including the ones not specified in the fn's signature. -pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - callee_ty: Ty<'tcx>, - llfn: ValueRef, - llretptr: ValueRef, - llargs_rust: &[ValueRef], - passed_arg_tys: Vec>, - call_debug_loc: DebugLoc) - -> Block<'blk, 'tcx> -{ - let ccx = bcx.ccx(); - - debug!("trans_native_call(callee_ty={:?}, \ - llfn={}, \ - llretptr={})", - callee_ty, - ccx.tn().val_to_string(llfn), - ccx.tn().val_to_string(llretptr)); - - let (fn_abi, fn_sig) = match callee_ty.sty { - ty::TyFnDef(_, _, ref fn_ty) | - ty::TyFnPtr(ref fn_ty) => (fn_ty.abi, &fn_ty.sig), - _ => ccx.sess().bug("trans_native_call called on non-function type") - }; - let fn_sig = ccx.tcx().erase_late_bound_regions(fn_sig); - let fn_sig = infer::normalize_associated_type(ccx.tcx(), &fn_sig); - let llsig = foreign_signature(ccx, &fn_sig, &passed_arg_tys[..]); - let fn_type = cabi::compute_abi_info(ccx, - &llsig.llarg_tys, - llsig.llret_ty, - llsig.ret_def); - - let arg_tys: &[cabi::ArgType] = &fn_type.arg_tys; - - let mut llargs_foreign = Vec::new(); - - // If the foreign ABI expects return value by pointer, supply the - // pointer that Rust gave us. Sometimes we have to bitcast - // because foreign fns return slightly different (but equivalent) - // views on the same type (e.g., i64 in place of {i32,i32}). - if fn_type.ret_ty.is_indirect() { - match fn_type.ret_ty.cast { - Some(ty) => { - let llcastedretptr = - BitCast(bcx, llretptr, ty.ptr_to()); - llargs_foreign.push(llcastedretptr); - } - None => { - llargs_foreign.push(llretptr); - } - } - } - - let mut offset = 0; - for (i, arg_ty) in arg_tys.iter().enumerate() { - let mut llarg_rust = llargs_rust[i + offset]; - - if arg_ty.is_ignore() { - continue; - } - - // Does Rust pass this argument by pointer? - let rust_indirect = type_of::arg_is_indirect(ccx, passed_arg_tys[i]); - - debug!("argument {}, llarg_rust={}, rust_indirect={}, arg_ty={}", - i, - ccx.tn().val_to_string(llarg_rust), - rust_indirect, - ccx.tn().type_to_string(arg_ty.ty)); - - // Ensure that we always have the Rust value indirectly, - // because it makes bitcasting easier. - if !rust_indirect { - let scratch = base::alloc_ty(bcx, passed_arg_tys[i], "__arg"); - if type_is_fat_ptr(ccx.tcx(), passed_arg_tys[i]) { - Store(bcx, llargs_rust[i + offset], expr::get_dataptr(bcx, scratch)); - Store(bcx, llargs_rust[i + offset + 1], expr::get_meta(bcx, scratch)); - offset += 1; - } else { - base::store_ty(bcx, llarg_rust, scratch, passed_arg_tys[i]); - } - llarg_rust = scratch; - } - - debug!("llarg_rust={} (after indirection)", - ccx.tn().val_to_string(llarg_rust)); - - // Check whether we need to do any casting - match arg_ty.cast { - Some(ty) => llarg_rust = BitCast(bcx, llarg_rust, ty.ptr_to()), - None => () - } - - debug!("llarg_rust={} (after casting)", - ccx.tn().val_to_string(llarg_rust)); - - // Finally, load the value if needed for the foreign ABI - let foreign_indirect = arg_ty.is_indirect(); - let llarg_foreign = if foreign_indirect { - llarg_rust - } else { - if passed_arg_tys[i].is_bool() { - let val = LoadRangeAssert(bcx, llarg_rust, 0, 2, llvm::False); - Trunc(bcx, val, Type::i1(bcx.ccx())) - } else { - Load(bcx, llarg_rust) - } - }; - - debug!("argument {}, llarg_foreign={}", - i, ccx.tn().val_to_string(llarg_foreign)); - - // fill padding with undef value - match arg_ty.pad { - Some(ty) => llargs_foreign.push(C_undef(ty)), - None => () - } - llargs_foreign.push(llarg_foreign); - } - - let cc = llvm_calling_convention(ccx, fn_abi); - - // A function pointer is called without the declaration available, so we have to apply - // any attributes with ABI implications directly to the call instruction. - let mut attrs = llvm::AttrBuilder::new(); - - // Add attributes that are always applicable, independent of the concrete foreign ABI - if fn_type.ret_ty.is_indirect() { - let llret_sz = machine::llsize_of_real(ccx, fn_type.ret_ty.ty); - - // The outptr can be noalias and nocapture because it's entirely - // invisible to the program. We also know it's nonnull as well - // as how many bytes we can dereference - attrs.arg(1, llvm::Attribute::NoAlias) - .arg(1, llvm::Attribute::NoCapture) - .arg(1, llvm::DereferenceableAttribute(llret_sz)); - }; - - // Add attributes that depend on the concrete foreign ABI - let mut arg_idx = if fn_type.ret_ty.is_indirect() { 1 } else { 0 }; - match fn_type.ret_ty.attr { - Some(attr) => { attrs.arg(arg_idx, attr); }, - _ => () - } - - arg_idx += 1; - for arg_ty in &fn_type.arg_tys { - if arg_ty.is_ignore() { - continue; - } - // skip padding - if arg_ty.pad.is_some() { arg_idx += 1; } - - if let Some(attr) = arg_ty.attr { - attrs.arg(arg_idx, attr); - } - - arg_idx += 1; - } - - let llforeign_retval = CallWithConv(bcx, - llfn, - &llargs_foreign[..], - cc, - Some(attrs), - call_debug_loc); - - // If the function we just called does not use an outpointer, - // store the result into the rust outpointer. Cast the outpointer - // type to match because some ABIs will use a different type than - // the Rust type. e.g., a {u32,u32} struct could be returned as - // u64. - if llsig.ret_def && !fn_type.ret_ty.is_indirect() { - let llrust_ret_ty = llsig.llret_ty; - let llforeign_ret_ty = match fn_type.ret_ty.cast { - Some(ty) => ty, - None => fn_type.ret_ty.ty - }; - - debug!("llretptr={}", ccx.tn().val_to_string(llretptr)); - debug!("llforeign_retval={}", ccx.tn().val_to_string(llforeign_retval)); - debug!("llrust_ret_ty={}", ccx.tn().type_to_string(llrust_ret_ty)); - debug!("llforeign_ret_ty={}", ccx.tn().type_to_string(llforeign_ret_ty)); - - if llrust_ret_ty == llforeign_ret_ty { - match fn_sig.output { - ty::FnConverging(result_ty) => { - base::store_ty(bcx, llforeign_retval, llretptr, result_ty) - } - ty::FnDiverging => {} - } - } else { - // The actual return type is a struct, but the ABI - // adaptation code has cast it into some scalar type. The - // code that follows is the only reliable way I have - // found to do a transform like i64 -> {i32,i32}. - // Basically we dump the data onto the stack then memcpy it. - // - // Other approaches I tried: - // - Casting rust ret pointer to the foreign type and using Store - // is (a) unsafe if size of foreign type > size of rust type and - // (b) runs afoul of strict aliasing rules, yielding invalid - // assembly under -O (specifically, the store gets removed). - // - Truncating foreign type to correct integral type and then - // bitcasting to the struct type yields invalid cast errors. - let llscratch = base::alloca(bcx, llforeign_ret_ty, "__cast"); - base::call_lifetime_start(bcx, llscratch); - Store(bcx, llforeign_retval, llscratch); - let llscratch_i8 = BitCast(bcx, llscratch, Type::i8(ccx).ptr_to()); - let llretptr_i8 = BitCast(bcx, llretptr, Type::i8(ccx).ptr_to()); - let llrust_size = machine::llsize_of_store(ccx, llrust_ret_ty); - let llforeign_align = machine::llalign_of_min(ccx, llforeign_ret_ty); - let llrust_align = machine::llalign_of_min(ccx, llrust_ret_ty); - let llalign = cmp::min(llforeign_align, llrust_align); - debug!("llrust_size={}", llrust_size); - base::call_memcpy(bcx, llretptr_i8, llscratch_i8, - C_uint(ccx, llrust_size), llalign as u32); - base::call_lifetime_end(bcx, llscratch); - } - } - - return bcx; -} - -// feature gate SIMD types in FFI, since I (huonw) am not sure the -// ABIs are handled at all correctly. -fn gate_simd_ffi(tcx: &TyCtxt, decl: &hir::FnDecl, ty: &ty::BareFnTy) { - if !tcx.sess.features.borrow().simd_ffi { - let check = |ast_ty: &hir::Ty, ty: ty::Ty| { - if ty.is_simd() { - tcx.sess.struct_span_err(ast_ty.span, - &format!("use of SIMD type `{}` in FFI is highly experimental and \ - may result in invalid code", - pprust::ty_to_string(ast_ty))) - .fileline_help(ast_ty.span, - "add #![feature(simd_ffi)] to the crate attributes to enable") - .emit(); - } - }; - let sig = &ty.sig.0; - for (input, ty) in decl.inputs.iter().zip(&sig.inputs) { - check(&input.ty, *ty) - } - if let hir::Return(ref ty) = decl.output { - check(&ty, sig.output.unwrap()) - } - } -} - -pub fn trans_foreign_mod(ccx: &CrateContext, foreign_mod: &hir::ForeignMod) { - let _icx = push_ctxt("foreign::trans_foreign_mod"); - for foreign_item in &foreign_mod.items { - let lname = link_name(foreign_item); - - if let hir::ForeignItemFn(ref decl, _) = foreign_item.node { - match foreign_mod.abi { - Abi::Rust | Abi::RustIntrinsic | Abi::PlatformIntrinsic => {} - abi => { - let ty = ccx.tcx().node_id_to_type(foreign_item.id); - match ty.sty { - ty::TyFnDef(_, _, bft) | - ty::TyFnPtr(bft) => gate_simd_ffi(ccx.tcx(), &decl, bft), - _ => ccx.tcx().sess.span_bug(foreign_item.span, - "foreign fn's sty isn't a bare_fn_ty?") - } - - register_foreign_item_fn(ccx, abi, ty, &lname, &foreign_item.attrs); - // Unlike for other items, we shouldn't call - // `base::update_linkage` here. Foreign items have - // special linkage requirements, which are handled - // inside `foreign::register_*`. - } - } - } - - ccx.item_symbols().borrow_mut().insert(foreign_item.id, - lname.to_string()); - } -} - -/////////////////////////////////////////////////////////////////////////// -// Rust functions with foreign ABIs -// -// These are normal Rust functions defined with foreign ABIs. For -// now, and perhaps forever, we translate these using a "layer of -// indirection". That is, given a Rust declaration like: -// -// extern "C" fn foo(i: u32) -> u32 { ... } -// -// we will generate a function like: -// -// S foo(T i) { -// S r; -// foo0(&r, NULL, i); -// return r; -// } -// -// #[inline_always] -// void foo0(uint32_t *r, void *env, uint32_t i) { ... } -// -// Here the (internal) `foo0` function follows the Rust ABI as normal, -// where the `foo` function follows the C ABI. We rely on LLVM to -// inline the one into the other. Of course we could just generate the -// correct code in the first place, but this is much simpler. - -pub fn decl_rust_fn_with_foreign_abi<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>, - name: &str) - -> ValueRef { - let tys = foreign_types_for_fn_ty(ccx, t); - let llfn_ty = lltype_for_fn_from_foreign_types(ccx, &tys); - let cconv = match t.sty { - ty::TyFnDef(_, _, ref fn_ty) | ty::TyFnPtr(ref fn_ty) => { - llvm_calling_convention(ccx, fn_ty.abi) - } - _ => panic!("expected bare fn in decl_rust_fn_with_foreign_abi") - }; - let llfn = declare::declare_fn(ccx, name, cconv, llfn_ty, - ty::FnConverging(ccx.tcx().mk_nil())); - add_argument_attributes(&tys, llfn); - debug!("decl_rust_fn_with_foreign_abi(llfn_ty={}, llfn={})", - ccx.tn().type_to_string(llfn_ty), ccx.tn().val_to_string(llfn)); - llfn -} - -pub fn register_rust_fn_with_foreign_abi(ccx: &CrateContext, - sp: Span, - sym: String, - node_id: ast::NodeId) - -> ValueRef { - let _icx = push_ctxt("foreign::register_foreign_fn"); - - let t = ccx.tcx().node_id_to_type(node_id); - let cconv = match t.sty { - ty::TyFnDef(_, _, ref fn_ty) | ty::TyFnPtr(ref fn_ty) => { - llvm_calling_convention(ccx, fn_ty.abi) - } - _ => panic!("expected bare fn in register_rust_fn_with_foreign_abi") - }; - let tys = foreign_types_for_fn_ty(ccx, t); - let llfn_ty = lltype_for_fn_from_foreign_types(ccx, &tys); - let llfn = base::register_fn_llvmty(ccx, sp, sym, node_id, cconv, llfn_ty); - add_argument_attributes(&tys, llfn); - debug!("register_rust_fn_with_foreign_abi(node_id={}, llfn_ty={}, llfn={})", - node_id, ccx.tn().type_to_string(llfn_ty), ccx.tn().val_to_string(llfn)); - llfn -} - -pub fn trans_rust_fn_with_foreign_abi<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - decl: &hir::FnDecl, - body: &hir::Block, - attrs: &[ast::Attribute], - llwrapfn: ValueRef, - param_substs: &'tcx Substs<'tcx>, - id: ast::NodeId, - hash: Option<&str>) { - let _icx = push_ctxt("foreign::build_foreign_fn"); - - let fnty = ccx.tcx().node_id_to_type(id); - let mty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &fnty); - let tys = foreign_types_for_fn_ty(ccx, mty); - - unsafe { // unsafe because we call LLVM operations - // Build up the Rust function (`foo0` above). - let llrustfn = build_rust_fn(ccx, decl, body, param_substs, attrs, id, hash); - - // Build up the foreign wrapper (`foo` above). - return build_wrap_fn(ccx, llrustfn, llwrapfn, &tys, mty); - } - - fn build_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - decl: &hir::FnDecl, - body: &hir::Block, - param_substs: &'tcx Substs<'tcx>, - attrs: &[ast::Attribute], - id: ast::NodeId, - hash: Option<&str>) - -> ValueRef - { - let _icx = push_ctxt("foreign::foreign::build_rust_fn"); - let tcx = ccx.tcx(); - let t = tcx.node_id_to_type(id); - let t = monomorphize::apply_param_substs(tcx, param_substs, &t); - - let path = - tcx.map.def_path_from_id(id) - .into_iter() - .map(|e| e.data.as_interned_str()) - .chain(once(special_idents::clownshoe_abi.name.as_str())); - let ps = link::mangle(path, hash); - - // Compute the type that the function would have if it were just a - // normal Rust function. This will be the type of the wrappee fn. - match t.sty { - ty::TyFnDef(_, _, ref f) | ty::TyFnPtr(ref f)=> { - assert!(f.abi != Abi::Rust); - assert!(f.abi != Abi::RustIntrinsic); - assert!(f.abi != Abi::PlatformIntrinsic); - } - _ => { - ccx.sess().bug(&format!("build_rust_fn: extern fn {} has ty {:?}, \ - expected a bare fn ty", - ccx.tcx().map.path_to_string(id), - t)); - } - }; - - debug!("build_rust_fn: path={} id={} t={:?}", - ccx.tcx().map.path_to_string(id), - id, t); - - let llfn = declare::define_internal_rust_fn(ccx, &ps, t); - attributes::from_fn_attrs(ccx, attrs, llfn); - base::trans_fn(ccx, decl, body, llfn, param_substs, id, attrs); - llfn - } - - unsafe fn build_wrap_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - llrustfn: ValueRef, - llwrapfn: ValueRef, - tys: &ForeignTypes<'tcx>, - t: Ty<'tcx>) { - let _icx = push_ctxt( - "foreign::trans_rust_fn_with_foreign_abi::build_wrap_fn"); - - debug!("build_wrap_fn(llrustfn={}, llwrapfn={}, t={:?})", - ccx.tn().val_to_string(llrustfn), - ccx.tn().val_to_string(llwrapfn), - t); - - // Avoid all the Rust generation stuff and just generate raw - // LLVM here. - // - // We want to generate code like this: - // - // S foo(T i) { - // S r; - // foo0(&r, NULL, i); - // return r; - // } - - if llvm::LLVMCountBasicBlocks(llwrapfn) != 0 { - ccx.sess().bug("wrapping a function inside non-empty wrapper, most likely cause is \ - multiple functions being wrapped"); - } - - let ptr = "the block\0".as_ptr(); - let the_block = llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llwrapfn, - ptr as *const _); - - let builder = ccx.builder(); - builder.position_at_end(the_block); - - // Array for the arguments we will pass to the rust function. - let mut llrust_args = Vec::new(); - let mut next_foreign_arg_counter: c_uint = 0; - let mut next_foreign_arg = |pad: bool| -> c_uint { - next_foreign_arg_counter += if pad { - 2 - } else { - 1 - }; - next_foreign_arg_counter - 1 - }; - - // If there is an out pointer on the foreign function - let foreign_outptr = { - if tys.fn_ty.ret_ty.is_indirect() { - Some(get_param(llwrapfn, next_foreign_arg(false))) - } else { - None - } - }; - - let rustfn_ty = Type::from_ref(llvm::LLVMTypeOf(llrustfn)).element_type(); - let mut rust_param_tys = rustfn_ty.func_params().into_iter(); - // Push Rust return pointer, using null if it will be unused. - let rust_uses_outptr = match tys.fn_sig.output { - ty::FnConverging(ret_ty) => type_of::return_uses_outptr(ccx, ret_ty), - ty::FnDiverging => false - }; - let return_alloca: Option; - let llrust_ret_ty = if rust_uses_outptr { - rust_param_tys.next().expect("Missing return type!").element_type() - } else { - rustfn_ty.return_type() - }; - if rust_uses_outptr { - // Rust expects to use an outpointer. If the foreign fn - // also uses an outpointer, we can reuse it, but the types - // may vary, so cast first to the Rust type. If the - // foreign fn does NOT use an outpointer, we will have to - // alloca some scratch space on the stack. - match foreign_outptr { - Some(llforeign_outptr) => { - debug!("out pointer, foreign={}", - ccx.tn().val_to_string(llforeign_outptr)); - let llrust_retptr = - builder.bitcast(llforeign_outptr, llrust_ret_ty.ptr_to()); - debug!("out pointer, foreign={} (casted)", - ccx.tn().val_to_string(llrust_retptr)); - llrust_args.push(llrust_retptr); - return_alloca = None; - } - - None => { - let slot = builder.alloca(llrust_ret_ty, "return_alloca"); - debug!("out pointer, \ - allocad={}, \ - llrust_ret_ty={}, \ - return_ty={:?}", - ccx.tn().val_to_string(slot), - ccx.tn().type_to_string(llrust_ret_ty), - tys.fn_sig.output); - llrust_args.push(slot); - return_alloca = Some(slot); - } - } - } else { - // Rust does not expect an outpointer. If the foreign fn - // does use an outpointer, then we will do a store of the - // value that the Rust fn returns. - return_alloca = None; - }; - - // Build up the arguments to the call to the rust function. - // Careful to adapt for cases where the native convention uses - // a pointer and Rust does not or vice versa. - for i in 0..tys.fn_sig.inputs.len() { - let rust_ty = tys.fn_sig.inputs[i]; - let rust_indirect = type_of::arg_is_indirect(ccx, rust_ty); - let llty = rust_param_tys.next().expect("Not enough parameter types!"); - let llrust_ty = if rust_indirect { - llty.element_type() - } else { - llty - }; - let llforeign_arg_ty = tys.fn_ty.arg_tys[i]; - let foreign_indirect = llforeign_arg_ty.is_indirect(); - - if llforeign_arg_ty.is_ignore() { - debug!("skipping ignored arg #{}", i); - llrust_args.push(C_undef(llrust_ty)); - continue; - } - - // skip padding - let foreign_index = next_foreign_arg(llforeign_arg_ty.pad.is_some()); - let mut llforeign_arg = get_param(llwrapfn, foreign_index); - - debug!("llforeign_arg {}{}: {}", "#", - i, ccx.tn().val_to_string(llforeign_arg)); - debug!("rust_indirect = {}, foreign_indirect = {}", - rust_indirect, foreign_indirect); - - // Ensure that the foreign argument is indirect (by - // pointer). It makes adapting types easier, since we can - // always just bitcast pointers. - if !foreign_indirect { - llforeign_arg = if rust_ty.is_bool() { - let lltemp = builder.alloca(Type::bool(ccx), ""); - builder.store(builder.zext(llforeign_arg, Type::bool(ccx)), lltemp); - lltemp - } else { - let lltemp = builder.alloca(val_ty(llforeign_arg), ""); - builder.store(llforeign_arg, lltemp); - lltemp - } - } - - // If the types in the ABI and the Rust types don't match, - // bitcast the llforeign_arg pointer so it matches the types - // Rust expects. - if llforeign_arg_ty.cast.is_some() && !type_is_fat_ptr(ccx.tcx(), rust_ty){ - assert!(!foreign_indirect); - llforeign_arg = builder.bitcast(llforeign_arg, llrust_ty.ptr_to()); - } - - let llrust_arg = if rust_indirect || type_is_fat_ptr(ccx.tcx(), rust_ty) { - llforeign_arg - } else { - if rust_ty.is_bool() { - let tmp = builder.load_range_assert(llforeign_arg, 0, 2, llvm::False); - builder.trunc(tmp, Type::i1(ccx)) - } else if type_of::type_of(ccx, rust_ty).is_aggregate() { - // We want to pass small aggregates as immediate values, but using an aggregate - // LLVM type for this leads to bad optimizations, so its arg type is an - // appropriately sized integer and we have to convert it - let tmp = builder.bitcast(llforeign_arg, - type_of::arg_type_of(ccx, rust_ty).ptr_to()); - let load = builder.load(tmp); - llvm::LLVMSetAlignment(load, type_of::align_of(ccx, rust_ty)); - load - } else { - builder.load(llforeign_arg) - } - }; - - debug!("llrust_arg {}{}: {}", "#", - i, ccx.tn().val_to_string(llrust_arg)); - if type_is_fat_ptr(ccx.tcx(), rust_ty) { - let next_llrust_ty = rust_param_tys.next().expect("Not enough parameter types!"); - llrust_args.push(builder.load(builder.bitcast(builder.struct_gep( - llrust_arg, abi::FAT_PTR_ADDR), llrust_ty.ptr_to()))); - llrust_args.push(builder.load(builder.bitcast(builder.struct_gep( - llrust_arg, abi::FAT_PTR_EXTRA), next_llrust_ty.ptr_to()))); - } else { - llrust_args.push(llrust_arg); - } - } - - // Perform the call itself - debug!("calling llrustfn = {}, t = {:?}", - ccx.tn().val_to_string(llrustfn), t); - let attributes = attributes::from_fn_type(ccx, t); - let llrust_ret_val = builder.call(llrustfn, &llrust_args, - None, Some(attributes)); - - // Get the return value where the foreign fn expects it. - let llforeign_ret_ty = match tys.fn_ty.ret_ty.cast { - Some(ty) => ty, - None => tys.fn_ty.ret_ty.ty - }; - match foreign_outptr { - None if !tys.llsig.ret_def => { - // Function returns `()` or `bot`, which in Rust is the LLVM - // type "{}" but in foreign ABIs is "Void". - builder.ret_void(); - } - - None if rust_uses_outptr => { - // Rust uses an outpointer, but the foreign ABI does not. Load. - let llrust_outptr = return_alloca.unwrap(); - let llforeign_outptr_casted = - builder.bitcast(llrust_outptr, llforeign_ret_ty.ptr_to()); - let llforeign_retval = builder.load(llforeign_outptr_casted); - builder.ret(llforeign_retval); - } - - None if llforeign_ret_ty != llrust_ret_ty => { - // Neither ABI uses an outpointer, but the types don't - // quite match. Must cast. Probably we should try and - // examine the types and use a concrete llvm cast, but - // right now we just use a temp memory location and - // bitcast the pointer, which is the same thing the - // old wrappers used to do. - let lltemp = builder.alloca(llforeign_ret_ty, ""); - let lltemp_casted = builder.bitcast(lltemp, llrust_ret_ty.ptr_to()); - builder.store(llrust_ret_val, lltemp_casted); - let llforeign_retval = builder.load(lltemp); - builder.ret(llforeign_retval); - } - - None => { - // Neither ABI uses an outpointer, and the types - // match. Easy peasy. - builder.ret(llrust_ret_val); - } - - Some(llforeign_outptr) if !rust_uses_outptr => { - // Foreign ABI requires an out pointer, but Rust doesn't. - // Store Rust return value. - let llforeign_outptr_casted = - builder.bitcast(llforeign_outptr, llrust_ret_ty.ptr_to()); - builder.store(llrust_ret_val, llforeign_outptr_casted); - builder.ret_void(); - } - - Some(_) => { - // Both ABIs use outpointers. Easy peasy. - builder.ret_void(); - } - } - } -} - -/////////////////////////////////////////////////////////////////////////// -// General ABI Support -// -// This code is kind of a confused mess and needs to be reworked given -// the massive simplifications that have occurred. - -pub fn link_name(i: &hir::ForeignItem) -> InternedString { - match attr::first_attr_value_str_by_name(&i.attrs, "link_name") { - Some(ln) => ln.clone(), - None => match weak_lang_items::link_name(&i.attrs) { - Some(name) => name, - None => i.name.as_str(), - } - } -} - -/// The ForeignSignature is the LLVM types of the arguments/return type of a function. Note that -/// these LLVM types are not quite the same as the LLVM types would be for a native Rust function -/// because foreign functions just plain ignore modes. They also don't pass aggregate values by -/// pointer like we do. -fn foreign_signature<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - fn_sig: &ty::FnSig<'tcx>, - arg_tys: &[Ty<'tcx>]) - -> LlvmSignature { - let llarg_tys = arg_tys.iter().map(|&arg| foreign_arg_type_of(ccx, arg)).collect(); - let (llret_ty, ret_def) = match fn_sig.output { - ty::FnConverging(ret_ty) => - (type_of::foreign_arg_type_of(ccx, ret_ty), !return_type_is_void(ccx, ret_ty)), - ty::FnDiverging => - (Type::nil(ccx), false) - }; - LlvmSignature { - llarg_tys: llarg_tys, - llret_ty: llret_ty, - ret_def: ret_def - } -} - -fn foreign_types_for_fn_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - ty: Ty<'tcx>) -> ForeignTypes<'tcx> { - let fn_sig = match ty.sty { - ty::TyFnDef(_, _, ref fn_ty) | ty::TyFnPtr(ref fn_ty) => &fn_ty.sig, - _ => ccx.sess().bug("foreign_types_for_fn_ty called on non-function type") - }; - let fn_sig = ccx.tcx().erase_late_bound_regions(fn_sig); - let fn_sig = infer::normalize_associated_type(ccx.tcx(), &fn_sig); - let llsig = foreign_signature(ccx, &fn_sig, &fn_sig.inputs); - let fn_ty = cabi::compute_abi_info(ccx, - &llsig.llarg_tys, - llsig.llret_ty, - llsig.ret_def); - debug!("foreign_types_for_fn_ty(\ - ty={:?}, \ - llsig={} -> {}, \ - fn_ty={} -> {}, \ - ret_def={}", - ty, - ccx.tn().types_to_str(&llsig.llarg_tys), - ccx.tn().type_to_string(llsig.llret_ty), - ccx.tn().types_to_str(&fn_ty.arg_tys.iter().map(|t| t.ty).collect::>()), - ccx.tn().type_to_string(fn_ty.ret_ty.ty), - llsig.ret_def); - - ForeignTypes { - fn_sig: fn_sig, - llsig: llsig, - fn_ty: fn_ty - } -} - -fn lltype_for_fn_from_foreign_types(ccx: &CrateContext, tys: &ForeignTypes) -> Type { - let mut llargument_tys = Vec::new(); - - let ret_ty = tys.fn_ty.ret_ty; - let llreturn_ty = if ret_ty.is_indirect() { - llargument_tys.push(ret_ty.ty.ptr_to()); - Type::void(ccx) - } else { - match ret_ty.cast { - Some(ty) => ty, - None => ret_ty.ty - } - }; - - for &arg_ty in &tys.fn_ty.arg_tys { - if arg_ty.is_ignore() { - continue; - } - // add padding - match arg_ty.pad { - Some(ty) => llargument_tys.push(ty), - None => () - } - - let llarg_ty = if arg_ty.is_indirect() { - arg_ty.ty.ptr_to() - } else { - match arg_ty.cast { - Some(ty) => ty, - None => arg_ty.ty - } - }; - - llargument_tys.push(llarg_ty); - } - - if tys.fn_sig.variadic { - Type::variadic_func(&llargument_tys, &llreturn_ty) - } else { - Type::func(&llargument_tys[..], &llreturn_ty) - } -} - -pub fn lltype_for_foreign_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - ty: Ty<'tcx>) -> Type { - lltype_for_fn_from_foreign_types(ccx, &foreign_types_for_fn_ty(ccx, ty)) -} - -fn add_argument_attributes(tys: &ForeignTypes, - llfn: ValueRef) { - let mut i = if tys.fn_ty.ret_ty.is_indirect() { - 1 - } else { - 0 - }; - - match tys.fn_ty.ret_ty.attr { - Some(attr) => unsafe { - llvm::LLVMAddFunctionAttribute(llfn, i as c_uint, attr.bits() as u64); - }, - None => {} - } - - i += 1; - - for &arg_ty in &tys.fn_ty.arg_tys { - if arg_ty.is_ignore() { - continue; - } - // skip padding - if arg_ty.pad.is_some() { i += 1; } - - match arg_ty.attr { - Some(attr) => unsafe { - llvm::LLVMAddFunctionAttribute(llfn, i as c_uint, attr.bits() as u64); - }, - None => () - } - - i += 1; - } -} diff --git a/src/librustc_trans/trans/glue.rs b/src/librustc_trans/trans/glue.rs index d5f8cff495600..4ca5fb07c9825 100644 --- a/src/librustc_trans/trans/glue.rs +++ b/src/librustc_trans/trans/glue.rs @@ -14,18 +14,19 @@ use std; -use back::link::*; +use back::link; use llvm; use llvm::{ValueRef, get_param}; use middle::lang_items::ExchangeFreeFnLangItem; use middle::subst::{Substs}; use middle::traits; use middle::ty::{self, Ty, TyCtxt}; +use trans::abi::{Abi, FnType}; use trans::adt; use trans::adt::GetDtorType; // for tcx.dtor_type() use trans::base::*; use trans::build::*; -use trans::callee; +use trans::callee::{Callee, ArgVals}; use trans::cleanup; use trans::cleanup::CleanupMethods; use trans::collector::{self, TransItem}; @@ -37,25 +38,23 @@ use trans::machine::*; use trans::monomorphize; use trans::type_of::{type_of, sizing_type_of, align_of}; use trans::type_::Type; +use trans::value::Value; use arena::TypedArena; -use libc::c_uint; -use syntax::ast; use syntax::codemap::DUMMY_SP; -pub fn trans_exchange_free_dyn<'blk, 'tcx>(cx: Block<'blk, 'tcx>, +pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v: ValueRef, size: ValueRef, align: ValueRef, debug_loc: DebugLoc) -> Block<'blk, 'tcx> { let _icx = push_ctxt("trans_exchange_free"); - let ccx = cx.ccx(); - callee::trans_lang_call(cx, - langcall(cx, None, "", ExchangeFreeFnLangItem), - &[PointerCast(cx, v, Type::i8p(ccx)), size, align], - Some(expr::Ignore), - debug_loc).bcx + + let def_id = langcall(bcx, None, "", ExchangeFreeFnLangItem); + let args = [PointerCast(bcx, v, Type::i8p(bcx.ccx())), size, align]; + Callee::def(bcx.ccx(), def_id, bcx.tcx().mk_substs(Substs::empty())) + .call(bcx, debug_loc, ArgVals(&args), None).bcx } pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>, @@ -170,13 +169,13 @@ pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let may_need_drop = ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None); bcx = with_cond(bcx, may_need_drop, |cx| { - Call(cx, glue, &[ptr], None, debug_loc); + Call(cx, glue, &[ptr], debug_loc); cx }) } None => { // No drop-hint ==> call standard drop glue - Call(bcx, glue, &[ptr], None, debug_loc); + Call(bcx, glue, &[ptr], debug_loc); } } } @@ -240,38 +239,40 @@ fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } let t = g.ty(); - let llty = if type_is_sized(ccx.tcx(), t) { - type_of(ccx, t).ptr_to() - } else { - type_of(ccx, ccx.tcx().mk_box(t)).ptr_to() + let tcx = ccx.tcx(); + let sig = ty::FnSig { + inputs: vec![tcx.mk_mut_ptr(tcx.types.i8)], + output: ty::FnOutput::FnConverging(tcx.mk_nil()), + variadic: false, }; - - let llfnty = Type::glue_fn(ccx, llty); + // Create a FnType for fn(*mut i8) and substitute the real type in + // later - that prevents FnType from splitting fat pointers up. + let mut fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]); + fn_ty.args[0].original_ty = type_of(ccx, t).ptr_to(); + let llfnty = fn_ty.llvm_type(ccx); // To avoid infinite recursion, don't `make_drop_glue` until after we've // added the entry to the `drop_glues` cache. if let Some(old_sym) = ccx.available_drop_glues().borrow().get(&g) { - let llfn = declare::declare_cfn(ccx, &old_sym, llfnty, ccx.tcx().mk_nil()); + let llfn = declare::declare_cfn(ccx, &old_sym, llfnty); ccx.drop_glues().borrow_mut().insert(g, llfn); return llfn; }; - let fn_nm = mangle_internal_name_by_type_and_seq(ccx, t, "drop"); - let llfn = declare::define_cfn(ccx, &fn_nm, llfnty, ccx.tcx().mk_nil()).unwrap_or_else(||{ - ccx.sess().bug(&format!("symbol `{}` already defined", fn_nm)); - }); + let fn_nm = link::mangle_internal_name_by_type_and_seq(ccx, t, "drop"); + assert!(declare::get_defined_value(ccx, &fn_nm).is_none()); + let llfn = declare::declare_cfn(ccx, &fn_nm, llfnty); ccx.available_drop_glues().borrow_mut().insert(g, fn_nm); + ccx.drop_glues().borrow_mut().insert(g, llfn); let _s = StatRecorder::new(ccx, format!("drop {:?}", t)); - let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty()); + let empty_substs = tcx.mk_substs(Substs::trans_empty()); let (arena, fcx): (TypedArena<_>, FunctionContext); arena = TypedArena::new(); - fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false, - ty::FnConverging(ccx.tcx().mk_nil()), - empty_substs, None, &arena); + fcx = FunctionContext::new(ccx, llfn, fn_ty, None, empty_substs, &arena); - let bcx = init_function(&fcx, false, ty::FnConverging(ccx.tcx().mk_nil())); + let bcx = fcx.init(false, None); update_linkage(ccx, llfn, None, OriginalTranslation); @@ -284,9 +285,8 @@ fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // llfn is expected be declared to take a parameter of the appropriate // type, so we don't need to explicitly cast the function parameter. - let llrawptr0 = get_param(llfn, fcx.arg_offset() as c_uint); - let bcx = make_drop_glue(bcx, llrawptr0, g); - finish_fn(&fcx, bcx, ty::FnConverging(ccx.tcx().mk_nil()), DebugLoc::None); + let bcx = make_drop_glue(bcx, get_param(llfn, 0), g); + fcx.finish(bcx, DebugLoc::None); llfn } @@ -314,7 +314,7 @@ fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, And(bcx, not_init, not_done, DebugLoc::None); with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| { let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap")); - Call(cx, llfn, &[], None, DebugLoc::None); + Call(cx, llfn, &[], DebugLoc::None); cx }) }; @@ -365,27 +365,31 @@ fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, _ => tcx.sess.bug(&format!("dtor for {:?} is not an impl???", t)) }; let dtor_did = def.destructor().unwrap(); - bcx = callee::Callee::ptr(callee::trans_fn_ref_with_substs( - bcx.ccx(), dtor_did, None, vtbl.substs)) - .call(bcx, DebugLoc::None, callee::ArgVals(args), Some(expr::Ignore)).bcx; + bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs) + .call(bcx, DebugLoc::None, ArgVals(args), None).bcx; bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope) } -pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, info: ValueRef) +pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, + t: Ty<'tcx>, info: ValueRef) -> (ValueRef, ValueRef) { - debug!("calculate size of DST: {}; with lost info: {}", - t, bcx.val_to_string(info)); + debug!("calculate size of DST: {}; with lost info: {:?}", + t, Value(info)); if type_is_sized(bcx.tcx(), t) { let sizing_type = sizing_type_of(bcx.ccx(), t); let size = llsize_of_alloc(bcx.ccx(), sizing_type); let align = align_of(bcx.ccx(), t); - debug!("size_and_align_of_dst t={} info={} size: {} align: {}", - t, bcx.val_to_string(info), size, align); + debug!("size_and_align_of_dst t={} info={:?} size: {} align: {}", + t, Value(info), size, align); let size = C_uint(bcx.ccx(), size); let align = C_uint(bcx.ccx(), align); return (size, align); } + if bcx.is_unreachable() { + let llty = Type::int(bcx.ccx()); + return (C_undef(llty), C_undef(llty)); + } match t.sty { ty::TyStruct(def, substs) => { let ccx = bcx.ccx(); @@ -394,7 +398,7 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, in assert!(!t.is_simd()); let repr = adt::represent_type(ccx, t); let sizing_type = adt::sizing_type_context_of(ccx, &repr, true); - debug!("DST {} sizing_type: {}", t, sizing_type.to_string()); + debug!("DST {} sizing_type: {:?}", t, sizing_type); let sized_size = llsize_of_alloc(ccx, sizing_type.prefix()); let sized_align = llalign_of_min(ccx, sizing_type.prefix()); debug!("DST {} statically sized prefix size: {} align: {}", @@ -408,8 +412,6 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, in let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field); let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info); - let dbloc = DebugLoc::None; - // FIXME (#26403, #27023): We should be adding padding // to `sized_size` (to accommodate the `unsized_align` // required of the unsized field that follows) before @@ -418,14 +420,14 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, in // here. But this is where the add would go.) // Return the sum of sizes and max of aligns. - let mut size = Add(bcx, sized_size, unsized_size, dbloc); + let mut size = bcx.add(sized_size, unsized_size); // Issue #27023: If there is a drop flag, *now* we add 1 // to the size. (We can do this without adding any // padding because drop flags do not have any alignment // constraints.) if sizing_type.needs_drop_flag() { - size = Add(bcx, size, C_uint(bcx.ccx(), 1_u64), dbloc); + size = bcx.add(size, C_uint(bcx.ccx(), 1_u64)); } // Choose max of two known alignments (combined value must @@ -436,14 +438,9 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, in // pick the correct alignment statically. C_uint(ccx, std::cmp::max(sized_align, unsized_align)) } - _ => Select(bcx, - ICmp(bcx, - llvm::IntUGT, - sized_align, - unsized_align, - dbloc), - sized_align, - unsized_align) + _ => bcx.select(bcx.icmp(llvm::IntUGT, sized_align, unsized_align), + sized_align, + unsized_align) }; // Issue #27023: must add any necessary padding to `size` @@ -457,19 +454,18 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, in // // `(size + (align-1)) & -align` - let addend = Sub(bcx, align, C_uint(bcx.ccx(), 1_u64), dbloc); - let size = And( - bcx, Add(bcx, size, addend, dbloc), Neg(bcx, align, dbloc), dbloc); + let addend = bcx.sub(align, C_uint(bcx.ccx(), 1_u64)); + let size = bcx.and(bcx.add(size, addend), bcx.neg(align)); (size, align) } ty::TyTrait(..) => { // info points to the vtable and the second entry in the vtable is the // dynamic size of the object. - let info = PointerCast(bcx, info, Type::int(bcx.ccx()).ptr_to()); - let size_ptr = GEPi(bcx, info, &[1]); - let align_ptr = GEPi(bcx, info, &[2]); - (Load(bcx, size_ptr), Load(bcx, align_ptr)) + let info = bcx.pointercast(info, Type::int(bcx.ccx()).ptr_to()); + let size_ptr = bcx.gepi(info, &[1]); + let align_ptr = bcx.gepi(info, &[2]); + (bcx.load(size_ptr), bcx.load(align_ptr)) } ty::TySlice(_) | ty::TyStr => { let unit_ty = t.sequence_element_type(bcx.tcx()); @@ -478,7 +474,7 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, in let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty); let unit_align = llalign_of_min(bcx.ccx(), llunit_ty); let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty); - (Mul(bcx, info, C_uint(bcx.ccx(), unit_size), DebugLoc::None), + (bcx.mul(info, C_uint(bcx.ccx(), unit_size)), C_uint(bcx.ccx(), unit_align)) } _ => bcx.sess().bug(&format!("Unexpected unsized type, found {}", t)) @@ -523,7 +519,8 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None); let info = expr::get_meta(bcx, v0); let info = Load(bcx, info); - let (llsize, llalign) = size_and_align_of_dst(bcx, content_ty, info); + let (llsize, llalign) = + size_and_align_of_dst(&bcx.build(), content_ty, info); // `Box` does not allocate. let needs_free = ICmp(bcx, @@ -585,7 +582,6 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK Call(bcx, dtor, &[PointerCast(bcx, Load(bcx, data_ptr), Type::i8p(bcx.ccx()))], - None, DebugLoc::None); bcx } diff --git a/src/librustc_trans/trans/inline.rs b/src/librustc_trans/trans/inline.rs index 4c647152458f4..ad32870444a22 100644 --- a/src/librustc_trans/trans/inline.rs +++ b/src/librustc_trans/trans/inline.rs @@ -12,7 +12,8 @@ use llvm::{AvailableExternallyLinkage, InternalLinkage, SetLinkage}; use middle::cstore::{CrateStore, FoundAst, InlinedItem}; use middle::def_id::DefId; use middle::subst::Substs; -use trans::base::{push_ctxt, trans_item, get_item_val, trans_fn}; +use trans::base::{push_ctxt, trans_item, trans_fn}; +use trans::callee::Callee; use trans::common::*; use rustc::dep_graph::DepNode; @@ -21,14 +22,15 @@ use rustc_front::hir; fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option { debug!("instantiate_inline({:?})", fn_id); let _icx = push_ctxt("instantiate_inline"); - let _task = ccx.tcx().dep_graph.in_task(DepNode::TransInlinedItem(fn_id)); + let tcx = ccx.tcx(); + let _task = tcx.dep_graph.in_task(DepNode::TransInlinedItem(fn_id)); match ccx.external().borrow().get(&fn_id) { Some(&Some(node_id)) => { // Already inline debug!("instantiate_inline({}): already inline as node id {}", - ccx.tcx().item_path_str(fn_id), node_id); - let node_def_id = ccx.tcx().map.local_def_id(node_id); + tcx.item_path_str(fn_id), node_id); + let node_def_id = tcx.map.local_def_id(node_id); return Some(node_def_id); } Some(&None) => { @@ -39,7 +41,7 @@ fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option { } } - let inlined = ccx.tcx().sess.cstore.maybe_get_item_ast(ccx.tcx(), fn_id); + let inlined = tcx.sess.cstore.maybe_get_item_ast(tcx, fn_id); let inline_id = match inlined { FoundAst::NotFound => { ccx.external().borrow_mut().insert(fn_id, None); @@ -52,38 +54,27 @@ fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option { ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1); trans_item(ccx, item); - let linkage = match item.node { - hir::ItemFn(_, _, _, _, ref generics, _) => { - if generics.is_type_parameterized() { - // Generics have no symbol, so they can't be given any - // linkage. - None + if let hir::ItemFn(_, _, _, _, ref generics, _) = item.node { + // Generics have no symbol, so they can't be given any linkage. + if !generics.is_type_parameterized() { + let linkage = if ccx.sess().opts.cg.codegen_units == 1 { + // We could use AvailableExternallyLinkage here, + // but InternalLinkage allows LLVM to optimize more + // aggressively (at the cost of sometimes + // duplicating code). + InternalLinkage } else { - if ccx.sess().opts.cg.codegen_units == 1 { - // We could use AvailableExternallyLinkage here, - // but InternalLinkage allows LLVM to optimize more - // aggressively (at the cost of sometimes - // duplicating code). - Some(InternalLinkage) - } else { - // With multiple compilation units, duplicated code - // is more of a problem. Also, `codegen_units > 1` - // means the user is okay with losing some - // performance. - Some(AvailableExternallyLinkage) - } - } - } - hir::ItemConst(..) => None, - _ => unreachable!(), - }; - - match linkage { - Some(linkage) => { - let g = get_item_val(ccx, item.id); - SetLinkage(g, linkage); + // With multiple compilation units, duplicated code + // is more of a problem. Also, `codegen_units > 1` + // means the user is okay with losing some + // performance. + AvailableExternallyLinkage + }; + let empty_substs = tcx.mk_substs(Substs::trans_empty()); + let def_id = tcx.map.local_def_id(item.id); + let llfn = Callee::def(ccx, def_id, empty_substs).reify(ccx).val; + SetLinkage(llfn, linkage); } - None => {} } item.id @@ -93,7 +84,7 @@ fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option { ccx.external_srcs().borrow_mut().insert(item.id, fn_id); item.id } - FoundAst::FoundParent(parent_id, &InlinedItem::Item(ref item)) => { + FoundAst::FoundParent(parent_id, item) => { ccx.external().borrow_mut().insert(parent_id, Some(item.id)); ccx.external_srcs().borrow_mut().insert(item.id, parent_id); @@ -101,7 +92,7 @@ fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option { match item.node { hir::ItemEnum(ref ast_def, _) => { let ast_vs = &ast_def.variants; - let ty_vs = &ccx.tcx().lookup_adt_def(parent_id).variants; + let ty_vs = &tcx.lookup_adt_def(parent_id).variants; assert_eq!(ast_vs.len(), ty_vs.len()); for (ast_v, ty_v) in ast_vs.iter().zip(ty_vs.iter()) { if ty_v.did == fn_id { my_id = ast_v.node.data.id(); } @@ -120,13 +111,8 @@ fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option { _ => ccx.sess().bug("instantiate_inline: item has a \ non-enum, non-struct parent") } - trans_item(ccx, &item); my_id } - FoundAst::FoundParent(_, _) => { - ccx.sess().bug("maybe_get_item_ast returned a FoundParent \ - with a non-item parent"); - } FoundAst::Found(&InlinedItem::TraitItem(_, ref trait_item)) => { ccx.external().borrow_mut().insert(fn_id, Some(trait_item.id)); ccx.external_srcs().borrow_mut().insert(trait_item.id, fn_id); @@ -137,10 +123,10 @@ fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option { // the logic to do that already exists in `middle`. In order to // reuse that code, it needs to be able to look up the traits for // inlined items. - let ty_trait_item = ccx.tcx().impl_or_trait_item(fn_id).clone(); - let trait_item_def_id = ccx.tcx().map.local_def_id(trait_item.id); - ccx.tcx().impl_or_trait_items.borrow_mut() - .insert(trait_item_def_id, ty_trait_item); + let ty_trait_item = tcx.impl_or_trait_item(fn_id).clone(); + let trait_item_def_id = tcx.map.local_def_id(trait_item.id); + tcx.impl_or_trait_items.borrow_mut() + .insert(trait_item_def_id, ty_trait_item); // If this is a default method, we can't look up the // impl type. But we aren't going to translate anyways, so @@ -155,18 +141,18 @@ fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option { // Translate monomorphic impl methods immediately. if let hir::ImplItemKind::Method(ref sig, ref body) = impl_item.node { - let impl_tpt = ccx.tcx().lookup_item_type(impl_did); + let impl_tpt = tcx.lookup_item_type(impl_did); if impl_tpt.generics.types.is_empty() && sig.generics.ty_params.is_empty() { - let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty()); - let llfn = get_item_val(ccx, impl_item.id); + let empty_substs = tcx.mk_substs(Substs::trans_empty()); + let def_id = tcx.map.local_def_id(impl_item.id); + let llfn = Callee::def(ccx, def_id, empty_substs).reify(ccx).val; trans_fn(ccx, &sig.decl, body, llfn, empty_substs, - impl_item.id, - &impl_item.attrs); + impl_item.id); // See linkage comments on items. if ccx.sess().opts.cg.codegen_units == 1 { SetLinkage(llfn, InternalLinkage); @@ -180,7 +166,7 @@ fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option { } }; - let inline_def_id = ccx.tcx().map.local_def_id(inline_id); + let inline_def_id = tcx.map.local_def_id(inline_id); Some(inline_def_id) } diff --git a/src/librustc_trans/trans/intrinsic.rs b/src/librustc_trans/trans/intrinsic.rs index ca9833f6f9d69..1140edeaec07a 100644 --- a/src/librustc_trans/trans/intrinsic.rs +++ b/src/librustc_trans/trans/intrinsic.rs @@ -18,6 +18,7 @@ use llvm::{ValueRef, TypeKind}; use middle::infer; use middle::subst; use middle::subst::FnSpace; +use trans::abi::{Abi, FnType}; use trans::adt; use trans::attributes; use trans::base::*; @@ -40,19 +41,18 @@ use trans::Disr; use middle::subst::Substs; use rustc::dep_graph::DepNode; use rustc_front::hir; -use syntax::abi::Abi; use syntax::ast; use syntax::ptr::P; use syntax::parse::token; use rustc::lint; use rustc::session::Session; -use syntax::codemap::Span; +use syntax::codemap::{Span, DUMMY_SP}; use std::cmp::Ordering; -pub fn get_simple_intrinsic(ccx: &CrateContext, item: &hir::ForeignItem) -> Option { - let name = match &*item.name.as_str() { +fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option { + let llvm_name = match name { "sqrtf32" => "llvm.sqrt.f32", "sqrtf64" => "llvm.sqrt.f64", "powif32" => "llvm.powi.f32", @@ -94,7 +94,7 @@ pub fn get_simple_intrinsic(ccx: &CrateContext, item: &hir::ForeignItem) -> Opti "assume" => "llvm.assume", _ => return None }; - Some(ccx.get_intrinsic(&name)) + Some(ccx.get_intrinsic(&llvm_name)) } pub fn span_transmute_size_error(a: &Session, b: Span, msg: &str) { @@ -171,13 +171,11 @@ pub fn check_intrinsics(ccx: &CrateContext) { /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, /// add them to librustc_trans/trans/context.rs pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - node: ast::NodeId, callee_ty: Ty<'tcx>, - cleanup_scope: cleanup::CustomScopeIndex, + fn_ty: &FnType, args: callee::CallArgs<'a, 'tcx>, dest: expr::Dest, - substs: &'tcx subst::Substs<'tcx>, - call_info: NodeIdAndSpan) + call_debug_location: DebugLoc) -> Result<'blk, 'tcx> { let fcx = bcx.fcx; let ccx = fcx.ccx; @@ -185,14 +183,23 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let _icx = push_ctxt("trans_intrinsic_call"); - let sig = ccx.tcx().erase_late_bound_regions(callee_ty.fn_sig()); - let sig = infer::normalize_associated_type(ccx.tcx(), &sig); + let (def_id, substs, sig) = match callee_ty.sty { + ty::TyFnDef(def_id, substs, fty) => { + let sig = tcx.erase_late_bound_regions(&fty.sig); + (def_id, substs, infer::normalize_associated_type(tcx, &sig)) + } + _ => unreachable!("expected fn item type, found {}", callee_ty) + }; let arg_tys = sig.inputs; let ret_ty = sig.output; - let foreign_item = tcx.map.expect_foreign_item(node); - let name = foreign_item.name.as_str(); + let name = tcx.item_name(def_id).as_str(); + + let span = match call_debug_location { + DebugLoc::At(_, span) => span, + DebugLoc::None => fcx.span.unwrap_or(DUMMY_SP) + }; - let call_debug_location = DebugLoc::At(call_info.id, call_info.span); + let cleanup_scope = fcx.push_custom_cleanup_scope(); // For `transmute` we can just trans the input expr directly into dest if name == "transmute" { @@ -213,7 +220,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, if out_type_size != 0 { // FIXME #19925 Remove this hack after a release cycle. let _ = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0])); - let llfn = Callee::def(ccx, def_id, substs, in_type).reify(ccx).val; + let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val; let llfnty = val_ty(llfn); let llresult = match dest { expr::SaveIn(d) => d, @@ -267,7 +274,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let val = if datum.kind.is_by_ref() { load_ty(bcx, datum.val, datum.ty) } else { - from_arg_ty(bcx, datum.val, datum.ty) + from_immediate(bcx, datum.val) }; let cast_val = BitCast(bcx, val, llret_ty); @@ -347,43 +354,6 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } } - // For `try` we need some custom control flow - if &name[..] == "try" { - if let callee::ArgExprs(ref exprs) = args { - let (func, data, local_ptr) = if exprs.len() != 3 { - ccx.sess().bug("expected three exprs as arguments for \ - `try` intrinsic"); - } else { - (&exprs[0], &exprs[1], &exprs[2]) - }; - - // translate arguments - let func = unpack_datum!(bcx, expr::trans(bcx, func)); - let func = unpack_datum!(bcx, func.to_rvalue_datum(bcx, "func")); - let data = unpack_datum!(bcx, expr::trans(bcx, data)); - let data = unpack_datum!(bcx, data.to_rvalue_datum(bcx, "data")); - let local_ptr = unpack_datum!(bcx, expr::trans(bcx, local_ptr)); - let local_ptr = local_ptr.to_rvalue_datum(bcx, "local_ptr"); - let local_ptr = unpack_datum!(bcx, local_ptr); - - let dest = match dest { - expr::SaveIn(d) => d, - expr::Ignore => alloc_ty(bcx, tcx.mk_mut_ptr(tcx.types.i8), - "try_result"), - }; - - // do the invoke - bcx = try_intrinsic(bcx, func.val, data.val, local_ptr.val, dest, - call_debug_location); - - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); - return Result::new(bcx, dest); - } else { - ccx.sess().bug("expected two exprs as arguments for \ - `try` intrinsic"); - } - } - // save the actual AST arguments for later (some places need to do // const-evaluation on them) let expr_arguments = match args { @@ -394,18 +364,19 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, // Push the arguments. let mut llargs = Vec::new(); bcx = callee::trans_args(bcx, + Abi::RustIntrinsic, + fn_ty, + &mut callee::Intrinsic, args, - callee_ty, &mut llargs, - cleanup::CustomScope(cleanup_scope), - Abi::RustIntrinsic); + cleanup::CustomScope(cleanup_scope)); fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); // These are the only intrinsic functions that diverge. if name == "abort" { let llfn = ccx.get_intrinsic(&("llvm.trap")); - Call(bcx, llfn, &[], None, call_debug_location); + Call(bcx, llfn, &[], call_debug_location); fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); Unreachable(bcx); return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to())); @@ -437,14 +408,19 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } }; - let simple = get_simple_intrinsic(ccx, &foreign_item); - let llval = match (simple, &*name) { + let simple = get_simple_intrinsic(ccx, &name); + let llval = match (simple, &name[..]) { (Some(llfn), _) => { - Call(bcx, llfn, &llargs, None, call_debug_location) + Call(bcx, llfn, &llargs, call_debug_location) + } + (_, "try") => { + bcx = try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult, + call_debug_location); + C_nil(ccx) } (_, "breakpoint") => { let llfn = ccx.get_intrinsic(&("llvm.debugtrap")); - Call(bcx, llfn, &[], None, call_debug_location) + Call(bcx, llfn, &[], call_debug_location) } (_, "size_of") => { let tp_ty = *substs.types.get(FnSpace, 0); @@ -454,7 +430,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, (_, "size_of_val") => { let tp_ty = *substs.types.get(FnSpace, 0); if !type_is_sized(tcx, tp_ty) { - let (llsize, _) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); + let (llsize, _) = + glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]); llsize } else { let lltp_ty = type_of::type_of(ccx, tp_ty); @@ -468,7 +445,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, (_, "min_align_of_val") => { let tp_ty = *substs.types.get(FnSpace, 0); if !type_is_sized(tcx, tp_ty) { - let (_, llalign) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); + let (_, llalign) = + glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]); llalign } else { C_uint(ccx, type_of::align_of(ccx, tp_ty)) @@ -505,14 +483,14 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } (_, "init_dropped") => { let tp_ty = *substs.types.get(FnSpace, 0); - if !return_type_is_void(ccx, tp_ty) { + if !type_is_zero_size(ccx, tp_ty) { drop_done_fill_mem(bcx, llresult, tp_ty); } C_nil(ccx) } (_, "init") => { let tp_ty = *substs.types.get(FnSpace, 0); - if !return_type_is_void(ccx, tp_ty) { + if !type_is_zero_size(ccx, tp_ty) { // Just zero out the stack slot. (See comment on base::memzero for explanation) init_zero_mem(bcx, llresult, tp_ty); } @@ -599,21 +577,24 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } (_, "volatile_load") => { let tp_ty = *substs.types.get(FnSpace, 0); - let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); + let mut ptr = llargs[0]; + if let Some(ty) = fn_ty.ret.cast { + ptr = PointerCast(bcx, ptr, ty.ptr_to()); + } let load = VolatileLoad(bcx, ptr); unsafe { llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty)); } - to_arg_ty(bcx, load, tp_ty) + to_immediate(bcx, load, tp_ty) }, (_, "volatile_store") => { let tp_ty = *substs.types.get(FnSpace, 0); - let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); let val = if type_is_immediate(bcx.ccx(), tp_ty) { - from_arg_ty(bcx, llargs[1], tp_ty) + from_immediate(bcx, llargs[1]) } else { Load(bcx, llargs[1]) }; + let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to()); let store = VolatileStore(bcx, val, ptr); unsafe { llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty)); @@ -634,13 +615,13 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, "cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width), llargs[0], call_debug_location), "ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)), - &llargs, None, call_debug_location), + &llargs, call_debug_location), "bswap" => { if width == 8 { llargs[0] // byte swap a u8/i8 is just a no-op } else { Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)), - &llargs, None, call_debug_location) + &llargs, call_debug_location) } } "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => { @@ -669,7 +650,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, }, None => { span_invalid_monomorphization_error( - tcx.sess, call_info.span, + tcx.sess, span, &format!("invalid monomorphization of `{}` intrinsic: \ expected basic integer type, found `{}`", name, sty)); C_null(llret_ty) @@ -680,8 +661,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, (_, "return_address") => { - if !fcx.caller_expects_out_pointer { - span_err!(tcx.sess, call_info.span, E0510, + if !fcx.fn_ty.ret.is_indirect() { + span_err!(tcx.sess, span, E0510, "invalid use of `return_address` intrinsic: function \ does not use out pointer"); C_null(Type::i8p(ccx)) @@ -709,7 +690,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, &llargs, ret_ty, llret_ty, call_debug_location, - call_info) + span) } // This requires that atomic intrinsics follow a specific naming pattern: // "atomic_[_]", and no ordering means SeqCst @@ -742,19 +723,17 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, match split[1] { "cxchg" => { - let tp_ty = *substs.types.get(FnSpace, 0); - let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); - let cmp = from_arg_ty(bcx, llargs[1], tp_ty); - let src = from_arg_ty(bcx, llargs[2], tp_ty); + let cmp = from_immediate(bcx, llargs[1]); + let src = from_immediate(bcx, llargs[2]); + let ptr = PointerCast(bcx, llargs[0], val_ty(src).ptr_to()); let res = AtomicCmpXchg(bcx, ptr, cmp, src, order, failorder, llvm::False); ExtractValue(bcx, res, 0) } "cxchgweak" => { - let tp_ty = *substs.types.get(FnSpace, 0); - let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); - let cmp = from_arg_ty(bcx, llargs[1], tp_ty); - let src = from_arg_ty(bcx, llargs[2], tp_ty); + let cmp = from_immediate(bcx, llargs[1]); + let src = from_immediate(bcx, llargs[2]); + let ptr = PointerCast(bcx, llargs[0], val_ty(src).ptr_to()); let val = AtomicCmpXchg(bcx, ptr, cmp, src, order, failorder, llvm::True); let result = ExtractValue(bcx, val, 0); let success = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx())); @@ -765,13 +744,15 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, "load" => { let tp_ty = *substs.types.get(FnSpace, 0); - let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); - to_arg_ty(bcx, AtomicLoad(bcx, ptr, order), tp_ty) + let mut ptr = llargs[0]; + if let Some(ty) = fn_ty.ret.cast { + ptr = PointerCast(bcx, ptr, ty.ptr_to()); + } + to_immediate(bcx, AtomicLoad(bcx, ptr, order), tp_ty) } "store" => { - let tp_ty = *substs.types.get(FnSpace, 0); - let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); - let val = from_arg_ty(bcx, llargs[1], tp_ty); + let val = from_immediate(bcx, llargs[1]); + let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to()); AtomicStore(bcx, val, ptr, order); C_nil(ccx) } @@ -803,9 +784,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, _ => ccx.sess().fatal("unknown atomic operation") }; - let tp_ty = *substs.types.get(FnSpace, 0); - let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); - let val = from_arg_ty(bcx, llargs[1], tp_ty); + let val = from_immediate(bcx, llargs[1]); + let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to()); AtomicRMW(bcx, atom_op, ptr, val, order) } } @@ -815,8 +795,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, (_, _) => { let intr = match Intrinsic::find(tcx, &name) { Some(intr) => intr, - None => ccx.sess().span_bug(foreign_item.span, - &format!("unknown intrinsic '{}'", name)), + None => unreachable!("unknown intrinsic '{}'", name), }; fn one(x: Vec) -> T { assert_eq!(x.len(), 1); @@ -949,9 +928,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, intrinsics::IntrinsicDef::Named(name) => { let f = declare::declare_cfn(ccx, name, - Type::func(&inputs, &outputs), - tcx.mk_nil()); - Call(bcx, f, &llargs, None, call_debug_location) + Type::func(&inputs, &outputs)); + Call(bcx, f, &llargs, call_debug_location) } }; @@ -973,7 +951,15 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, if val_ty(llval) != Type::void(ccx) && machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 { - store_ty(bcx, llval, llresult, ret_ty); + if let Some(ty) = fn_ty.ret.cast { + let ptr = PointerCast(bcx, llresult, ty.ptr_to()); + let store = Store(bcx, llval, ptr); + unsafe { + llvm::LLVMSetAlignment(store, type_of::align_of(ccx, ret_ty)); + } + } else { + store_ty(bcx, llval, llresult, ret_ty); + } } // If we made a temporary stack slot, let's clean it up @@ -1024,7 +1010,6 @@ fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, Mul(bcx, size, count, DebugLoc::None), align, C_bool(ccx, volatile)], - None, call_debug_location) } @@ -1054,7 +1039,6 @@ fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, Mul(bcx, size, count, DebugLoc::None), align, C_bool(ccx, volatile)], - None, call_debug_location) } @@ -1065,7 +1049,7 @@ fn count_zeros_intrinsic(bcx: Block, -> ValueRef { let y = C_bool(bcx.ccx(), false); let llfn = bcx.ccx().get_intrinsic(&name); - Call(bcx, llfn, &[val, y], None, call_debug_location) + Call(bcx, llfn, &[val, y], call_debug_location) } fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, @@ -1078,7 +1062,7 @@ fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let llfn = bcx.ccx().get_intrinsic(&name); // Convert `i1` to a `bool`, and write it to the out parameter - let val = Call(bcx, llfn, &[a, b], None, call_debug_location); + let val = Call(bcx, llfn, &[a, b], call_debug_location); let result = ExtractValue(bcx, val, 0); let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx())); Store(bcx, result, StructGEP(bcx, out, 0)); @@ -1094,7 +1078,7 @@ fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dest: ValueRef, dloc: DebugLoc) -> Block<'blk, 'tcx> { if bcx.sess().no_landing_pads() { - Call(bcx, func, &[data], None, dloc); + Call(bcx, func, &[data], dloc); Store(bcx, C_null(Type::i8p(bcx.ccx())), dest); bcx } else if wants_msvc_seh(bcx.sess()) { @@ -1165,9 +1149,9 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // More information can be found in libstd's seh.rs implementation. let slot = Alloca(bcx, Type::i8p(ccx), "slot"); let localescape = ccx.get_intrinsic(&"llvm.localescape"); - Call(bcx, localescape, &[slot], None, dloc); + Call(bcx, localescape, &[slot], dloc); Store(bcx, local_ptr, slot); - Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, None, dloc); + Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, dloc); Ret(normal, C_i32(ccx, 0), dloc); @@ -1184,7 +1168,7 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = Call(bcx, llfn, &[func, data, local_ptr], None, dloc); + let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc); Store(bcx, ret, dest); return bcx } @@ -1208,6 +1192,7 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dloc: DebugLoc) -> Block<'blk, 'tcx> { let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| { let ccx = bcx.ccx(); + let tcx = ccx.tcx(); let dloc = DebugLoc::None; // Translates the shims described above: @@ -1228,10 +1213,11 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // managed by the standard library. attributes::emit_uwtable(bcx.fcx.llfn, true); - let catch_pers = match bcx.tcx().lang_items.eh_personality_catch() { - Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0), - bcx.fcx.param_substs).val, - None => bcx.tcx().sess.bug("eh_personality_catch not defined"), + let catch_pers = match tcx.lang_items.eh_personality_catch() { + Some(did) => { + Callee::def(ccx, did, tcx.mk_substs(Substs::empty())).reify(ccx).val + } + None => ccx.sess().bug("eh_personality_catch not defined"), }; let then = bcx.fcx.new_temp_block("then"); @@ -1240,7 +1226,7 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let func = llvm::get_param(bcx.fcx.llfn, 0); let data = llvm::get_param(bcx.fcx.llfn, 1); let local_ptr = llvm::get_param(bcx.fcx.llfn, 2); - Invoke(bcx, func, &[data], then.llbb, catch.llbb, None, dloc); + Invoke(bcx, func, &[data], then.llbb, catch.llbb, dloc); Ret(then, C_i32(ccx, 0), dloc); // Type indicator for the exception being thrown. @@ -1260,7 +1246,7 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = Call(bcx, llfn, &[func, data, local_ptr], None, dloc); + let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc); Store(bcx, ret, dest); return bcx; } @@ -1269,21 +1255,32 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // This is currently primarily used for the `try` intrinsic functions above. fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, name: &str, - ty: Ty<'tcx>, + inputs: Vec>, output: ty::FnOutput<'tcx>, trans: &mut for<'b> FnMut(Block<'b, 'tcx>)) -> ValueRef { let ccx = fcx.ccx; - let llfn = declare::define_internal_rust_fn(ccx, name, ty); + let sig = ty::FnSig { + inputs: inputs, + output: output, + variadic: false, + }; + let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]); + + let rust_fn_ty = ccx.tcx().mk_fn_ptr(ty::BareFnTy { + unsafety: hir::Unsafety::Unsafe, + abi: Abi::Rust, + sig: ty::Binder(sig) + }); + let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty); + let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty()); let (fcx, block_arena); block_arena = TypedArena::new(); - fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false, - output, ccx.tcx().mk_substs(Substs::trans_empty()), - None, &block_arena); - let bcx = init_function(&fcx, true, output); + fcx = FunctionContext::new(ccx, llfn, fn_ty, None, empty_substs, &block_arena); + let bcx = fcx.init(true, None); trans(bcx); fcx.cleanup(); - return llfn + llfn } // Helper function used to get a handle to the `__rust_try` function used to @@ -1294,8 +1291,8 @@ fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, trans: &mut for<'b> FnMut(Block<'b, 'tcx>)) -> ValueRef { let ccx = fcx.ccx; - if let Some(llfn) = *ccx.rust_try_fn().borrow() { - return llfn + if let Some(llfn) = ccx.rust_try_fn().get() { + return llfn; } // Define the type up front for the signature of the rust_try function. @@ -1311,18 +1308,8 @@ fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, }), }); let output = ty::FnOutput::FnConverging(tcx.types.i32); - let try_fn_ty = ty::BareFnTy { - unsafety: hir::Unsafety::Unsafe, - abi: Abi::Rust, - sig: ty::Binder(ty::FnSig { - inputs: vec![fn_ty, i8p, i8p], - output: output, - variadic: false, - }), - }; - let rust_try = gen_fn(fcx, "__rust_try", tcx.mk_fn_ptr(try_fn_ty), output, - trans); - *ccx.rust_try_fn().borrow_mut() = Some(rust_try); + let rust_try = gen_fn(fcx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans); + ccx.rust_try_fn().set(Some(rust_try)); return rust_try } @@ -1341,9 +1328,10 @@ fn generate_filter_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, let tcx = ccx.tcx(); let dloc = DebugLoc::None; - let rust_try_filter = match ccx.tcx().lang_items.msvc_try_filter() { - Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0), - fcx.param_substs).val, + let rust_try_filter = match tcx.lang_items.msvc_try_filter() { + Some(did) => { + Callee::def(ccx, did, tcx.mk_substs(Substs::empty())).reify(ccx).val + } None => ccx.sess().bug("msvc_try_filter not defined"), }; @@ -1373,11 +1361,10 @@ fn generate_filter_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, // For more info, see seh.rs in the standard library. let do_trans = |bcx: Block, ehptrs, base_pointer| { let rust_try_fn = BitCast(bcx, rust_try_fn, Type::i8p(ccx)); - let parentfp = Call(bcx, recoverfp, &[rust_try_fn, base_pointer], - None, dloc); + let parentfp = Call(bcx, recoverfp, &[rust_try_fn, base_pointer], dloc); let arg = Call(bcx, localrecover, - &[rust_try_fn, parentfp, C_i32(ccx, 0)], None, dloc); - let ret = Call(bcx, rust_try_filter, &[ehptrs, arg], None, dloc); + &[rust_try_fn, parentfp, C_i32(ccx, 0)], dloc); + let ret = Call(bcx, rust_try_filter, &[ehptrs, arg], dloc); Ret(bcx, ret, dloc); }; @@ -1389,17 +1376,8 @@ fn generate_filter_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, // going on here, all I can say is that there's a few tests cases in // LLVM's test suite which follow this pattern of instructions, so we // just do the same. - let filter_fn_ty = tcx.mk_fn_ptr(ty::BareFnTy { - unsafety: hir::Unsafety::Unsafe, - abi: Abi::Rust, - sig: ty::Binder(ty::FnSig { - inputs: vec![], - output: output, - variadic: false, - }), - }); - gen_fn(fcx, "__rustc_try_filter", filter_fn_ty, output, &mut |bcx| { - let ebp = Call(bcx, frameaddress, &[C_i32(ccx, 1)], None, dloc); + gen_fn(fcx, "__rustc_try_filter", vec![], output, &mut |bcx| { + let ebp = Call(bcx, frameaddress, &[C_i32(ccx, 1)], dloc); let exn = InBoundsGEP(bcx, ebp, &[C_i32(ccx, -20)]); let exn = Load(bcx, BitCast(bcx, exn, Type::i8p(ccx).ptr_to())); do_trans(bcx, exn, ebp); @@ -1408,16 +1386,7 @@ fn generate_filter_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, // Conveniently on x86_64 the EXCEPTION_POINTERS handle and base pointer // are passed in as arguments to the filter function, so we just pass // those along. - let filter_fn_ty = tcx.mk_fn_ptr(ty::BareFnTy { - unsafety: hir::Unsafety::Unsafe, - abi: Abi::Rust, - sig: ty::Binder(ty::FnSig { - inputs: vec![i8p, i8p], - output: output, - variadic: false, - }), - }); - gen_fn(fcx, "__rustc_try_filter", filter_fn_ty, output, &mut |bcx| { + gen_fn(fcx, "__rustc_try_filter", vec![i8p, i8p], output, &mut |bcx| { let exn = llvm::get_param(bcx.fcx.llfn, 0); let rbp = llvm::get_param(bcx.fcx.llfn, 1); do_trans(bcx, exn, rbp); @@ -1441,7 +1410,7 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a> ret_ty: Ty<'tcx>, llret_ty: Type, call_debug_location: DebugLoc, - call_info: NodeIdAndSpan) -> ValueRef + span: Span) -> ValueRef { // macros for error handling: macro_rules! emit_error { @@ -1450,7 +1419,7 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a> }; ($msg: tt, $($fmt: tt)*) => { span_invalid_monomorphization_error( - bcx.sess(), call_info.span, + bcx.sess(), span, &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg), name, $($fmt)*)); @@ -1519,7 +1488,7 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a> if name.starts_with("simd_shuffle") { let n: usize = match name["simd_shuffle".len()..].parse() { Ok(n) => n, - Err(_) => tcx.sess.span_bug(call_info.span, + Err(_) => tcx.sess.span_bug(span, "bad `simd_shuffle` instruction only caught in trans?") }; @@ -1537,22 +1506,26 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a> let total_len = in_len as u64 * 2; - let vector = match args { - Some(args) => &args[2], - None => bcx.sess().span_bug(call_info.span, - "intrinsic call with unexpected argument shape"), - }; - let vector = match consts::const_expr(bcx.ccx(), vector, substs, None, - consts::TrueConst::Yes, // this should probably help simd error reporting - ) { - Ok((vector, _)) => vector, - Err(err) => bcx.sess().span_fatal(call_info.span, &err.description()), + let (vector, indirect) = match args { + Some(args) => { + match consts::const_expr(bcx.ccx(), &args[2], substs, None, + // this should probably help simd error reporting + consts::TrueConst::Yes) { + Ok((vector, _)) => (vector, false), + Err(err) => bcx.sess().span_fatal(span, &err.description()), + } + } + None => (llargs[2], !type_is_immediate(bcx.ccx(), arg_tys[2])) }; let indices: Option> = (0..n) .map(|i| { let arg_idx = i; - let val = const_get_elt(bcx.ccx(), vector, &[i as libc::c_uint]); + let val = if indirect { + Load(bcx, StructGEP(bcx, vector, i)) + } else { + const_get_elt(vector, &[i as libc::c_uint]) + }; let c = const_to_opt_uint(val); match c { None => { @@ -1689,7 +1662,7 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a> simd_or: TyUint, TyInt => Or; simd_xor: TyUint, TyInt => Xor; } - bcx.sess().span_bug(call_info.span, "unknown SIMD intrinsic"); + bcx.sess().span_bug(span, "unknown SIMD intrinsic"); } // Returns the width of an int TypeVariant, and if it's signed or not diff --git a/src/librustc_trans/trans/llrepr.rs b/src/librustc_trans/trans/llrepr.rs deleted file mode 100644 index 6b785e7edfd6a..0000000000000 --- a/src/librustc_trans/trans/llrepr.rs +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use trans::context::CrateContext; -use trans::type_::Type; -use llvm::ValueRef; - -pub trait LlvmRepr { - fn llrepr(&self, ccx: &CrateContext) -> String; -} - -impl LlvmRepr for [T] { - fn llrepr(&self, ccx: &CrateContext) -> String { - let reprs: Vec = self.iter().map(|t| t.llrepr(ccx)).collect(); - format!("[{}]", reprs.join(",")) - } -} - -impl LlvmRepr for Type { - fn llrepr(&self, ccx: &CrateContext) -> String { - ccx.tn().type_to_string(*self) - } -} - -impl LlvmRepr for ValueRef { - fn llrepr(&self, ccx: &CrateContext) -> String { - ccx.tn().val_to_string(*self) - } -} diff --git a/src/librustc_trans/trans/meth.rs b/src/librustc_trans/trans/meth.rs index 7397ccc2505f1..30560ec05c469 100644 --- a/src/librustc_trans/trans/meth.rs +++ b/src/librustc_trans/trans/meth.rs @@ -18,14 +18,13 @@ use middle::infer; use middle::subst::{Subst, Substs}; use middle::subst; use middle::traits::{self, ProjectionMode}; +use trans::abi::FnType; use trans::base::*; use trans::build::*; -use trans::callee::{Callee, Virtual, ArgVals, - trans_fn_pointer_shim, trans_fn_ref_with_substs}; +use trans::callee::{Callee, Virtual, ArgVals, trans_fn_pointer_shim}; use trans::closure; use trans::common::*; use trans::consts; -use trans::datum::*; use trans::debuginfo::DebugLoc; use trans::declare; use trans::expr; @@ -33,158 +32,25 @@ use trans::glue; use trans::machine; use trans::type_::Type; use trans::type_of::*; +use trans::value::Value; use middle::ty::{self, Ty, TyCtxt, TypeFoldable}; -use syntax::ast::{self, Name}; -use syntax::attr; +use syntax::ast::Name; use syntax::codemap::DUMMY_SP; -use rustc_front::hir; - // drop_glue pointer, size, align. const VTABLE_OFFSET: usize = 3; -/// The main "translation" pass for methods. Generates code -/// for non-monomorphized methods only. Other methods will -/// be generated once they are invoked with specific type parameters, -/// see `trans::base::lval_static_fn()` or `trans::base::monomorphic_fn()`. -pub fn trans_impl(ccx: &CrateContext, - name: ast::Name, - impl_items: &[hir::ImplItem], - generics: &hir::Generics, - id: ast::NodeId) { - let _icx = push_ctxt("meth::trans_impl"); - let tcx = ccx.tcx(); - - debug!("trans_impl(name={}, id={})", name, id); - - // Both here and below with generic methods, be sure to recurse and look for - // items that we need to translate. - if !generics.ty_params.is_empty() { - return; - } - - for impl_item in impl_items { - match impl_item.node { - hir::ImplItemKind::Method(ref sig, ref body) => { - if sig.generics.ty_params.is_empty() { - let trans_everywhere = attr::requests_inline(&impl_item.attrs); - for (ref ccx, is_origin) in ccx.maybe_iter(trans_everywhere) { - let llfn = get_item_val(ccx, impl_item.id); - let empty_substs = tcx.mk_substs(Substs::trans_empty()); - trans_fn(ccx, - &sig.decl, - body, - llfn, - empty_substs, - impl_item.id, - &impl_item.attrs); - update_linkage(ccx, - llfn, - Some(impl_item.id), - if is_origin { OriginalTranslation } else { InlinedCopy }); - } - } - } - _ => {} - } - } -} - -/// Compute the appropriate callee, give na method's ID, trait ID, -/// substitutions and a Vtable for that trait. -pub fn callee_for_trait_impl<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - method_id: DefId, - substs: &'tcx subst::Substs<'tcx>, - trait_id: DefId, - method_ty: Ty<'tcx>, - vtable: traits::Vtable<'tcx, ()>) - -> Callee<'tcx> { - let _icx = push_ctxt("meth::callee_for_trait_impl"); - match vtable { - traits::VtableImpl(vtable_impl) => { - let impl_did = vtable_impl.impl_def_id; - let mname = ccx.tcx().item_name(method_id); - // create a concatenated set of substitutions which includes - // those from the impl and those from the method: - let impl_substs = vtable_impl.substs.with_method_from(&substs); - let substs = ccx.tcx().mk_substs(impl_substs); - let mth = get_impl_method(ccx.tcx(), impl_did, substs, mname); - - // Translate the function, bypassing Callee::def. - // That is because default methods have the same ID as the - // trait method used to look up the impl method that ended - // up here, so calling Callee::def would infinitely recurse. - Callee::ptr(trans_fn_ref_with_substs(ccx, mth.method.def_id, - Some(method_ty), mth.substs)) - } - traits::VtableClosure(vtable_closure) => { - // The substitutions should have no type parameters remaining - // after passing through fulfill_obligation - let trait_closure_kind = ccx.tcx().lang_items.fn_trait_kind(trait_id).unwrap(); - let llfn = closure::trans_closure_method(ccx, - vtable_closure.closure_def_id, - vtable_closure.substs, - trait_closure_kind); - let fn_ptr_ty = match method_ty.sty { - ty::TyFnDef(_, _, fty) => ccx.tcx().mk_ty(ty::TyFnPtr(fty)), - _ => unreachable!("expected fn item type, found {}", - method_ty) - }; - Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty)) - } - traits::VtableFnPointer(fn_ty) => { - let trait_closure_kind = ccx.tcx().lang_items.fn_trait_kind(trait_id).unwrap(); - let llfn = trans_fn_pointer_shim(ccx, trait_closure_kind, fn_ty); - let fn_ptr_ty = match method_ty.sty { - ty::TyFnDef(_, _, fty) => ccx.tcx().mk_ty(ty::TyFnPtr(fty)), - _ => unreachable!("expected fn item type, found {}", - method_ty) - }; - Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty)) - } - traits::VtableObject(ref data) => { - Callee { - data: Virtual(traits::get_vtable_index_of_object_method( - ccx.tcx(), data, method_id)), - ty: method_ty - } - } - traits::VtableBuiltin(..) | - traits::VtableDefaultImpl(..) | - traits::VtableParam(..) => { - ccx.sess().bug( - &format!("resolved vtable bad vtable {:?} in trans", - vtable)); - } - } -} - -/// Extracts a method from a trait object's vtable, at the -/// specified index, and casts it to the given type. +/// Extracts a method from a trait object's vtable, at the specified index. pub fn get_virtual_method<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, llvtable: ValueRef, - vtable_index: usize, - method_ty: Ty<'tcx>) - -> Datum<'tcx, Rvalue> { - let _icx = push_ctxt("meth::get_virtual_method"); - let ccx = bcx.ccx(); - + vtable_index: usize) + -> ValueRef { // Load the data pointer from the object. - debug!("get_virtual_method(callee_ty={}, vtable_index={}, llvtable={})", - method_ty, - vtable_index, - bcx.val_to_string(llvtable)); + debug!("get_virtual_method(vtable_index={}, llvtable={:?})", + vtable_index, Value(llvtable)); - let mptr = Load(bcx, GEPi(bcx, llvtable, &[vtable_index + VTABLE_OFFSET])); - - // Replace the self type (&Self or Box) with an opaque pointer. - if let ty::TyFnDef(_, _, fty) = method_ty.sty { - let opaque_ty = opaque_method_ty(ccx.tcx(), fty); - immediate_rvalue(PointerCast(bcx, mptr, type_of(ccx, opaque_ty)), opaque_ty) - } else { - immediate_rvalue(mptr, method_ty) - } + Load(bcx, GEPi(bcx, llvtable, &[vtable_index + VTABLE_OFFSET])) } /// Generate a shim function that allows an object type like `SomeTrait` to @@ -211,7 +77,7 @@ pub fn get_virtual_method<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, method_ty: Ty<'tcx>, vtable_index: usize) - -> Datum<'tcx, Rvalue> { + -> ValueRef { let _icx = push_ctxt("trans_object_shim"); let tcx = ccx.tcx(); @@ -219,58 +85,40 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, vtable_index, method_ty); - let ret_ty = tcx.erase_late_bound_regions(&method_ty.fn_ret()); - let ret_ty = infer::normalize_associated_type(tcx, &ret_ty); + let sig = tcx.erase_late_bound_regions(&method_ty.fn_sig()); + let sig = infer::normalize_associated_type(tcx, &sig); + let fn_ty = FnType::new(ccx, method_ty.fn_abi(), &sig, &[]); - let shim_fn_ty = match method_ty.sty { - ty::TyFnDef(_, _, fty) => tcx.mk_ty(ty::TyFnPtr(fty)), - _ => unreachable!("expected fn item type, found {}", method_ty) - }; - - // - let function_name = link::mangle_internal_name_by_type_and_seq(ccx, shim_fn_ty, "object_shim"); - let llfn = declare::define_internal_rust_fn(ccx, &function_name, shim_fn_ty); + let function_name = link::mangle_internal_name_by_type_and_seq(ccx, method_ty, "object_shim"); + let llfn = declare::define_internal_fn(ccx, &function_name, method_ty); let empty_substs = tcx.mk_substs(Substs::trans_empty()); let (block_arena, fcx): (TypedArena<_>, FunctionContext); block_arena = TypedArena::new(); - fcx = new_fn_ctxt(ccx, - llfn, - ast::DUMMY_NODE_ID, - false, - ret_ty, - empty_substs, - None, - &block_arena); - let mut bcx = init_function(&fcx, false, ret_ty); - - let llargs = get_params(fcx.llfn); - - let self_idx = fcx.arg_offset(); - let llself = llargs[self_idx]; - let llvtable = llargs[self_idx + 1]; - - debug!("trans_object_shim: llself={}, llvtable={}", - bcx.val_to_string(llself), bcx.val_to_string(llvtable)); - + fcx = FunctionContext::new(ccx, llfn, fn_ty, None, empty_substs, &block_arena); + let mut bcx = fcx.init(false, None); assert!(!fcx.needs_ret_allocas); + let dest = fcx.llretslotptr.get().map( - |_| expr::SaveIn(fcx.get_ret_slot(bcx, ret_ty, "ret_slot"))); + |_| expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot"))); debug!("trans_object_shim: method_offset_in_vtable={}", vtable_index); + let llargs = get_params(fcx.llfn); + let args = ArgVals(&llargs[fcx.fn_ty.ret.is_indirect() as usize..]); + let callee = Callee { data: Virtual(vtable_index), ty: method_ty }; - bcx = callee.call(bcx, DebugLoc::None, ArgVals(&llargs[self_idx..]), dest).bcx; + bcx = callee.call(bcx, DebugLoc::None, args, dest).bcx; - finish_fn(&fcx, bcx, ret_ty, DebugLoc::None); + fcx.finish(bcx, DebugLoc::None); - immediate_rvalue(llfn, shim_fn_ty) + llfn } /// Creates a returns a dynamic vtable for the given type and vtable origin. @@ -311,17 +159,9 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let nullptr = C_null(Type::nil(ccx).ptr_to()); get_vtable_methods(ccx, id, substs) .into_iter() - .map(|opt_mth| { - match opt_mth { - Some(mth) => { - trans_fn_ref_with_substs(ccx, - mth.method.def_id, - None, - &mth.substs).val - } - None => nullptr - } - }) + .map(|opt_mth| opt_mth.map_or(nullptr, |mth| { + Callee::def(ccx, mth.method.def_id, &mth.substs).reify(ccx).val + })) .collect::>() .into_iter() } @@ -452,23 +292,6 @@ pub fn get_vtable_methods<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, .collect() } -/// Replace the self type (&Self or Box) with an opaque pointer. -fn opaque_method_ty<'tcx>(tcx: &TyCtxt<'tcx>, method_ty: &ty::BareFnTy<'tcx>) - -> Ty<'tcx> { - let mut inputs = method_ty.sig.0.inputs.clone(); - inputs[0] = tcx.mk_mut_ptr(tcx.mk_mach_int(ast::IntTy::I8)); - - tcx.mk_fn_ptr(ty::BareFnTy { - unsafety: method_ty.unsafety, - abi: method_ty.abi, - sig: ty::Binder(ty::FnSig { - inputs: inputs, - output: method_ty.sig.0.output, - variadic: method_ty.sig.0.variadic, - }), - }) -} - #[derive(Debug)] pub struct ImplMethod<'tcx> { pub method: Rc>, diff --git a/src/librustc_trans/trans/mir/block.rs b/src/librustc_trans/trans/mir/block.rs index 50283c0959c3f..080547952a5ce 100644 --- a/src/librustc_trans/trans/mir/block.rs +++ b/src/librustc_trans/trans/mir/block.rs @@ -8,33 +8,35 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::{BasicBlockRef, ValueRef, OperandBundleDef}; +use llvm::{self, BasicBlockRef, ValueRef, OperandBundleDef}; use rustc::middle::ty; use rustc::mir::repr as mir; -use syntax::abi::Abi; +use trans::abi::{Abi, FnType}; use trans::adt; -use trans::attributes; use trans::base; use trans::build; -use trans::callee::{Callee, Fn, Virtual}; -use trans::common::{self, Block, BlockAndBuilder}; +use trans::callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual}; +use trans::common::{self, Block, BlockAndBuilder, C_undef}; use trans::debuginfo::DebugLoc; use trans::Disr; -use trans::foreign; +use trans::machine::{llalign_of_min, llbitsize_of_real}; use trans::meth; use trans::type_of; use trans::glue; use trans::type_::Type; use super::{MirContext, drop}; -use super::operand::OperandValue::{FatPtr, Immediate, Ref}; +use super::lvalue::{LvalueRef, load_fat_ptr}; +use super::operand::OperandRef; +use super::operand::OperandValue::{self, FatPtr, Immediate, Ref}; impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { pub fn trans_block(&mut self, bb: mir::BasicBlock) { debug!("trans_block({:?})", bb); let mut bcx = self.bcx(bb); - let data = self.mir.basic_block_data(bb); + let mir = self.mir.clone(); + let data = mir.basic_block_data(bb); // MSVC SEH bits let (cleanup_pad, cleanup_bundle) = if let Some((cp, cb)) = self.make_cleanup_pad(bb) { @@ -104,6 +106,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::Terminator::SwitchInt { ref discr, switch_ty, ref values, ref targets } => { let (otherwise, targets) = targets.split_last().unwrap(); let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval); + let discr = bcx.with_block(|bcx| base::to_immediate(bcx, discr, switch_ty)); let switch = bcx.switch(discr, self.llblock(*otherwise), values.len()); for (value, target) in values.iter().zip(targets) { let llval = self.trans_constval(&bcx, value, switch_ty).immediate(); @@ -113,9 +116,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } mir::Terminator::Return => { - let return_ty = bcx.monomorphize(&self.mir.return_ty); bcx.with_block(|bcx| { - base::build_return_block(self.fcx, bcx, return_ty, DebugLoc::None); + self.fcx.build_return_block(bcx, DebugLoc::None); }) } @@ -141,11 +143,10 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { &[llvalue], self.llblock(target), unwind.llbb(), - cleanup_bundle.as_ref(), - None); + cleanup_bundle.as_ref()); self.bcx(target).at_start(|bcx| drop::drop_fill(bcx, lvalue.llval, ty)); } else { - bcx.call(drop_fn, &[llvalue], cleanup_bundle.as_ref(), None); + bcx.call(drop_fn, &[llvalue], cleanup_bundle.as_ref()); drop::drop_fill(&bcx, lvalue.llval, ty); funclet_br(bcx, self.llblock(target)); } @@ -154,190 +155,341 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::Terminator::Call { ref func, ref args, ref destination, ref cleanup } => { // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. let callee = self.trans_operand(&bcx, func); - let debugloc = DebugLoc::None; - // The arguments we'll be passing. Plus one to account for outptr, if used. - let mut llargs = Vec::with_capacity(args.len() + 1); - // Types of the arguments. We do not preallocate, because this vector is only - // filled when `is_foreign` is `true` and foreign calls are minority of the cases. - let mut arg_tys = Vec::new(); - let (callee, fty) = match callee.ty.sty { + let (mut callee, abi, sig) = match callee.ty.sty { ty::TyFnDef(def_id, substs, f) => { - (Callee::def(bcx.ccx(), def_id, substs, callee.ty), f) + (Callee::def(bcx.ccx(), def_id, substs), f.abi, &f.sig) } ty::TyFnPtr(f) => { (Callee { data: Fn(callee.immediate()), ty: callee.ty - }, f) + }, f.abi, &f.sig) } _ => unreachable!("{} is not callable", callee.ty) }; - // We do not translate intrinsics here (they shouldn’t be functions) - assert!(fty.abi != Abi::RustIntrinsic && fty.abi != Abi::PlatformIntrinsic); - // Foreign-ABI functions are translated differently - let is_foreign = fty.abi != Abi::Rust && fty.abi != Abi::RustCall; + // Handle intrinsics old trans wants Expr's for, ourselves. + let intrinsic = match (&callee.ty.sty, &callee.data) { + (&ty::TyFnDef(def_id, _, _), &Intrinsic) => { + Some(bcx.tcx().item_name(def_id).as_str()) + } + _ => None + }; + let intrinsic = intrinsic.as_ref().map(|s| &s[..]); + + if intrinsic == Some("move_val_init") { + let &(_, target) = destination.as_ref().unwrap(); + // The first argument is a thin destination pointer. + let llptr = self.trans_operand(&bcx, &args[0]).immediate(); + let val = self.trans_operand(&bcx, &args[1]); + self.store_operand(&bcx, llptr, val); + self.set_operand_dropped(&bcx, &args[1]); + funclet_br(bcx, self.llblock(target)); + return; + } + + if intrinsic == Some("transmute") { + let &(ref dest, target) = destination.as_ref().unwrap(); + let dst = self.trans_lvalue(&bcx, dest); + let mut val = self.trans_operand(&bcx, &args[0]); + if let ty::TyFnDef(def_id, substs, _) = val.ty.sty { + let llouttype = type_of::type_of(bcx.ccx(), dst.ty.to_ty(bcx.tcx())); + let out_type_size = llbitsize_of_real(bcx.ccx(), llouttype); + if out_type_size != 0 { + // FIXME #19925 Remove this hack after a release cycle. + let f = Callee::def(bcx.ccx(), def_id, substs); + let datum = f.reify(bcx.ccx()); + val = OperandRef { + val: OperandValue::Immediate(datum.val), + ty: datum.ty + }; + } + } + + let llty = type_of::type_of(bcx.ccx(), val.ty); + let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to()); + self.store_operand(&bcx, cast_ptr, val); + self.set_operand_dropped(&bcx, &args[0]); + funclet_br(bcx, self.llblock(target)); + return; + } + + let extra_args = &args[sig.0.inputs.len()..]; + let extra_args = extra_args.iter().map(|op_arg| { + self.mir.operand_ty(bcx.tcx(), op_arg) + }).collect::>(); + let fn_ty = callee.direct_fn_type(bcx.ccx(), &extra_args); + + // The arguments we'll be passing. Plus one to account for outptr, if used. + let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize; + let mut llargs = Vec::with_capacity(arg_count); // Prepare the return value destination - let (ret_dest_ty, must_copy_dest) = if let Some((ref d, _)) = *destination { + let ret_dest = if let Some((ref d, _)) = *destination { let dest = self.trans_lvalue(&bcx, d); - let ret_ty = dest.ty.to_ty(bcx.tcx()); - if !is_foreign && type_of::return_uses_outptr(bcx.ccx(), ret_ty) { + if fn_ty.ret.is_indirect() { llargs.push(dest.llval); - (Some((dest, ret_ty)), false) + None + } else if fn_ty.ret.is_ignore() { + None } else { - (Some((dest, ret_ty)), !common::type_is_zero_size(bcx.ccx(), ret_ty)) + Some(dest) } } else { - (None, false) + None }; // Split the rust-call tupled arguments off. - let (args, rest) = if fty.abi == Abi::RustCall && !args.is_empty() { + let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() { let (tup, args) = args.split_last().unwrap(); - // we can reorder safely because of MIR - (args, self.trans_operand_untupled(&bcx, tup)) + (args, Some(tup)) } else { - (&args[..], vec![]) + (&args[..], None) }; - let datum = { - let mut arg_ops = args.iter().map(|arg| { - self.trans_operand(&bcx, arg) - }).chain(rest.into_iter()); - - // Get the actual pointer we can call. - // This can involve vtable accesses or reification. - let datum = if let Virtual(idx) = callee.data { - assert!(!is_foreign); - - // Grab the first argument which is a trait object. - let vtable = match arg_ops.next().unwrap().val { - FatPtr(data, vtable) => { - llargs.push(data); - vtable - } - _ => unreachable!("expected FatPtr for Virtual call") + let mut idx = 0; + for arg in first_args { + let val = self.trans_operand(&bcx, arg).val; + self.trans_argument(&bcx, val, &mut llargs, &fn_ty, + &mut idx, &mut callee.data); + } + if let Some(tup) = untuple { + self.trans_arguments_untupled(&bcx, tup, &mut llargs, &fn_ty, + &mut idx, &mut callee.data) + } + + let fn_ptr = match callee.data { + NamedTupleConstructor(_) => { + // FIXME translate this like mir::Rvalue::Aggregate. + callee.reify(bcx.ccx()).val + } + Intrinsic => { + use trans::callee::ArgVals; + use trans::expr::{Ignore, SaveIn}; + use trans::intrinsic::trans_intrinsic_call; + + let (dest, llargs) = if fn_ty.ret.is_indirect() { + (SaveIn(llargs[0]), &llargs[1..]) + } else if let Some(dest) = ret_dest { + (SaveIn(dest.llval), &llargs[..]) + } else { + (Ignore, &llargs[..]) }; bcx.with_block(|bcx| { - meth::get_virtual_method(bcx, vtable, idx, callee.ty) - }) - } else { - callee.reify(bcx.ccx()) - }; - - // Process the rest of the args. - for operand in arg_ops { - match operand.val { - Ref(llval) | Immediate(llval) => llargs.push(llval), - FatPtr(b, e) => { - llargs.push(b); - llargs.push(e); + let res = trans_intrinsic_call(bcx, callee.ty, &fn_ty, + ArgVals(llargs), dest, + DebugLoc::None); + let bcx = res.bcx.build(); + if let Some((_, target)) = *destination { + for op in args { + self.set_operand_dropped(&bcx, op); + } + funclet_br(bcx, self.llblock(target)); + } else { + // trans_intrinsic_call already used Unreachable. + // bcx.unreachable(); } - } - if is_foreign { - arg_tys.push(operand.ty); - } + }); + return; } - - datum + Fn(f) => f, + Virtual(_) => unreachable!("Virtual fn ptr not extracted") }; - let attrs = attributes::from_fn_type(bcx.ccx(), datum.ty); // Many different ways to call a function handled here - match (is_foreign, cleanup, destination) { - // The two cases below are the only ones to use LLVM’s `invoke`. - (false, &Some(cleanup), &None) => { - let cleanup = self.bcx(cleanup); - let landingpad = self.make_landing_pad(cleanup); - let unreachable_blk = self.unreachable_block(); - bcx.invoke(datum.val, - &llargs[..], - unreachable_blk.llbb, - landingpad.llbb(), - cleanup_bundle.as_ref(), - Some(attrs)); - landingpad.at_start(|bcx| for op in args { - self.set_operand_dropped(bcx, op); - }); - }, - (false, &Some(cleanup), &Some((_, success))) => { - let cleanup = self.bcx(cleanup); - let landingpad = self.make_landing_pad(cleanup); - let invokeret = bcx.invoke(datum.val, - &llargs[..], - self.llblock(success), - landingpad.llbb(), - cleanup_bundle.as_ref(), - Some(attrs)); - if must_copy_dest { - let (ret_dest, ret_ty) = ret_dest_ty - .expect("return destination and type not set"); - // We translate the copy straight into the beginning of the target - // block. - self.bcx(success).at_start(|bcx| bcx.with_block( |bcx| { - base::store_ty(bcx, invokeret, ret_dest.llval, ret_ty); - })); + if let Some(cleanup) = cleanup.map(|bb| self.bcx(bb)) { + // We translate the copy into a temporary block. The temporary block is + // necessary because the current block has already been terminated (by + // `invoke`) and we cannot really translate into the target block + // because: + // * The target block may have more than a single precedesor; + // * Some LLVM insns cannot have a preceeding store insn (phi, + // cleanuppad), and adding/prepending the store now may render + // those other instructions invalid. + // + // NB: This approach still may break some LLVM code. For example if the + // target block starts with a `phi` (which may only match on immediate + // precedesors), it cannot know about this temporary block thus + // resulting in an invalid code: + // + // this: + // … + // %0 = … + // %1 = invoke to label %temp … + // temp: + // store ty %1, ty* %dest + // br label %actualtargetblock + // actualtargetblock: ; preds: %temp, … + // phi … [%this, …], [%0, …] ; ERROR: phi requires to match only on + // ; immediate precedesors + + let ret_bcx = if destination.is_some() { + self.fcx.new_block("", None) + } else { + self.unreachable_block() + }; + let landingpad = self.make_landing_pad(cleanup); + + let invokeret = bcx.invoke(fn_ptr, + &llargs, + ret_bcx.llbb, + landingpad.llbb(), + cleanup_bundle.as_ref()); + fn_ty.apply_attrs_callsite(invokeret); + + landingpad.at_start(|bcx| for op in args { + self.set_operand_dropped(bcx, op); + }); + + if let Some((_, target)) = *destination { + let ret_bcx = ret_bcx.build(); + if let Some(ret_dest) = ret_dest { + fn_ty.ret.store(&ret_bcx, invokeret, ret_dest.llval); } - self.bcx(success).at_start(|bcx| for op in args { - self.set_operand_dropped(bcx, op); - }); - landingpad.at_start(|bcx| for op in args { - self.set_operand_dropped(bcx, op); - }); - }, - (false, _, &None) => { - bcx.call(datum.val, - &llargs[..], - cleanup_bundle.as_ref(), - Some(attrs)); - // no need to drop args, because the call never returns - bcx.unreachable(); + for op in args { + self.set_operand_dropped(&ret_bcx, op); + } + ret_bcx.br(self.llblock(target)); } - (false, _, &Some((_, target))) => { - let llret = bcx.call(datum.val, - &llargs[..], - cleanup_bundle.as_ref(), - Some(attrs)); - if must_copy_dest { - let (ret_dest, ret_ty) = ret_dest_ty - .expect("return destination and type not set"); - bcx.with_block(|bcx| { - base::store_ty(bcx, llret, ret_dest.llval, ret_ty); - }); + } else { + let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle.as_ref()); + fn_ty.apply_attrs_callsite(llret); + if let Some((_, target)) = *destination { + if let Some(ret_dest) = ret_dest { + fn_ty.ret.store(&bcx, llret, ret_dest.llval); } for op in args { self.set_operand_dropped(&bcx, op); } funclet_br(bcx, self.llblock(target)); + } else { + // no need to drop args, because the call never returns + bcx.unreachable(); } - // Foreign functions - (true, _, destination) => { - let (dest, _) = ret_dest_ty - .expect("return destination is not set"); - bcx = bcx.map_block(|bcx| { - foreign::trans_native_call(bcx, - datum.ty, - datum.val, - dest.llval, - &llargs[..], - arg_tys, - debugloc) - }); - if let Some((_, target)) = *destination { - for op in args { - self.set_operand_dropped(&bcx, op); - } - funclet_br(bcx, self.llblock(target)); - } - }, } } } } + fn trans_argument(&mut self, + bcx: &BlockAndBuilder<'bcx, 'tcx>, + val: OperandValue, + llargs: &mut Vec, + fn_ty: &FnType, + next_idx: &mut usize, + callee: &mut CalleeData) { + // Treat the values in a fat pointer separately. + if let FatPtr(ptr, meta) = val { + if *next_idx == 0 { + if let Virtual(idx) = *callee { + let llfn = bcx.with_block(|bcx| { + meth::get_virtual_method(bcx, meta, idx) + }); + let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); + *callee = Fn(bcx.pointercast(llfn, llty)); + } + } + self.trans_argument(bcx, Immediate(ptr), llargs, fn_ty, next_idx, callee); + self.trans_argument(bcx, Immediate(meta), llargs, fn_ty, next_idx, callee); + return; + } + + let arg = &fn_ty.args[*next_idx]; + *next_idx += 1; + + // Fill padding with undef value, where applicable. + if let Some(ty) = arg.pad { + llargs.push(C_undef(ty)); + } + + if arg.is_ignore() { + return; + } + + // Force by-ref if we have to load through a cast pointer. + let (mut llval, by_ref) = match val { + Immediate(llval) if arg.is_indirect() || arg.cast.is_some() => { + let llscratch = build::AllocaFcx(bcx.fcx(), arg.original_ty, "arg"); + bcx.store(llval, llscratch); + (llscratch, true) + } + Immediate(llval) => (llval, false), + Ref(llval) => (llval, true), + FatPtr(_, _) => unreachable!("fat pointers handled above") + }; + + if by_ref && !arg.is_indirect() { + // Have to load the argument, maybe while casting it. + if arg.original_ty == Type::i1(bcx.ccx()) { + // We store bools as i8 so we need to truncate to i1. + llval = bcx.load_range_assert(llval, 0, 2, llvm::False); + llval = bcx.trunc(llval, arg.original_ty); + } else if let Some(ty) = arg.cast { + llval = bcx.load(bcx.pointercast(llval, ty.ptr_to())); + let llalign = llalign_of_min(bcx.ccx(), arg.ty); + unsafe { + llvm::LLVMSetAlignment(llval, llalign); + } + } else { + llval = bcx.load(llval); + } + } + + llargs.push(llval); + } + + fn trans_arguments_untupled(&mut self, + bcx: &BlockAndBuilder<'bcx, 'tcx>, + operand: &mir::Operand<'tcx>, + llargs: &mut Vec, + fn_ty: &FnType, + next_idx: &mut usize, + callee: &mut CalleeData) { + // FIXME: consider having some optimization to avoid tupling/untupling + // (and storing/loading in the case of immediates) + + // avoid trans_operand for pointless copying + let lv = match *operand { + mir::Operand::Consume(ref lvalue) => self.trans_lvalue(bcx, lvalue), + mir::Operand::Constant(ref constant) => { + // FIXME: consider being less pessimized + if constant.ty.is_nil() { + return; + } + + let ty = bcx.monomorphize(&constant.ty); + let lv = LvalueRef::alloca(bcx, ty, "__untuple_alloca"); + let constant = self.trans_constant(bcx, constant); + self.store_operand(bcx, lv.llval, constant); + lv + } + }; + + let lv_ty = lv.ty.to_ty(bcx.tcx()); + let result_types = match lv_ty.sty { + ty::TyTuple(ref tys) => tys, + _ => bcx.tcx().sess.span_bug( + self.mir.span, + &format!("bad final argument to \"rust-call\" fn {:?}", lv_ty)) + }; + + let base_repr = adt::represent_type(bcx.ccx(), lv_ty); + let base = adt::MaybeSizedValue::sized(lv.llval); + for (n, &ty) in result_types.iter().enumerate() { + let ptr = adt::trans_field_ptr_builder(bcx, &base_repr, base, Disr(0), n); + let val = if common::type_is_fat_ptr(bcx.tcx(), ty) { + let (lldata, llextra) = load_fat_ptr(bcx, ptr); + FatPtr(lldata, llextra) + } else { + // Don't bother loading the value, trans_argument will. + Ref(ptr) + }; + self.trans_argument(bcx, val, llargs, fn_ty, next_idx, callee); + } + } + fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> ValueRef { let ccx = bcx.ccx(); if let Some(slot) = self.llpersonalityslot { diff --git a/src/librustc_trans/trans/mir/constant.rs b/src/librustc_trans/trans/mir/constant.rs index c20d8b01eb773..d4934718d75ed 100644 --- a/src/librustc_trans/trans/mir/constant.rs +++ b/src/librustc_trans/trans/mir/constant.rs @@ -8,17 +8,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use back::abi; use llvm::ValueRef; use middle::ty::{Ty, TypeFoldable}; use rustc::middle::const_eval::{self, ConstVal}; use rustc_const_eval::ConstInt::*; use rustc::mir::repr as mir; +use trans::abi; use trans::common::{self, BlockAndBuilder, C_bool, C_bytes, C_floating_f64, C_integral, - C_str_slice, C_nil, C_undef}; + C_str_slice, C_undef}; use trans::consts; +use trans::datum; use trans::expr; -use trans::inline; use trans::type_of; use trans::type_::Type; @@ -38,8 +38,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let val = if common::type_is_immediate(ccx, ty) { OperandValue::Immediate(val) } else if common::type_is_fat_ptr(bcx.tcx(), ty) { - let data = common::const_get_elt(ccx, val, &[abi::FAT_PTR_ADDR as u32]); - let extra = common::const_get_elt(ccx, val, &[abi::FAT_PTR_EXTRA as u32]); + let data = common::const_get_elt(val, &[abi::FAT_PTR_ADDR as u32]); + let extra = common::const_get_elt(val, &[abi::FAT_PTR_EXTRA as u32]); OperandValue::FatPtr(data, extra) } else { OperandValue::Ref(val) @@ -85,16 +85,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { ConstVal::Integral(InferSigned(v)) => C_integral(llty, v as u64, true), ConstVal::Str(ref v) => C_str_slice(ccx, v.clone()), ConstVal::ByteStr(ref v) => consts::addr_of(ccx, C_bytes(ccx, v), 1, "byte_str"), - ConstVal::Struct(id) | ConstVal::Tuple(id) | - ConstVal::Array(id, _) | ConstVal::Repeat(id, _) => { - let expr = bcx.tcx().map.expect_expr(id); - bcx.with_block(|bcx| { - expr::trans(bcx, expr).datum.val - }) - }, + ConstVal::Struct(_) | ConstVal::Tuple(_) | + ConstVal::Array(..) | ConstVal::Repeat(..) | + ConstVal::Function(_) => { + unreachable!("MIR must not use {:?} (which refers to a local ID)", cv) + } ConstVal::Char(c) => C_integral(Type::char(ccx), c as u64, false), ConstVal::Dummy => unreachable!(), - ConstVal::Function(_) => C_nil(ccx) } } @@ -116,16 +113,26 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { }; } - let substs = bcx.tcx().mk_substs(bcx.monomorphize(&substs)); - let def_id = inline::maybe_instantiate_inline(bcx.ccx(), def_id); - let expr = const_eval::lookup_const_by_id(bcx.tcx(), def_id, None, Some(substs)) + let substs = Some(bcx.monomorphize(substs)); + let expr = const_eval::lookup_const_by_id(bcx.tcx(), def_id, substs) .expect("def was const, but lookup_const_by_id failed").0; // FIXME: this is falling back to translating from HIR. This is not easy to fix, // because we would have somehow adapt const_eval to work on MIR rather than HIR. let d = bcx.with_block(|bcx| { expr::trans(bcx, expr) }); - OperandRef::from_rvalue_datum(d.datum.to_rvalue_datum(d.bcx, "").datum) + + let datum = d.datum.to_rvalue_datum(d.bcx, "").datum; + + match datum.kind.mode { + datum::RvalueMode::ByValue => { + OperandRef { + ty: datum.ty, + val: OperandValue::Immediate(datum.val) + } + } + datum::RvalueMode::ByRef => self.trans_load(bcx, datum.val, datum.ty) + } } mir::Literal::Value { ref value } => { self.trans_constval(bcx, value, ty) diff --git a/src/librustc_trans/trans/mir/lvalue.rs b/src/librustc_trans/trans/mir/lvalue.rs index 8d97708ca2649..ffc3b1206746e 100644 --- a/src/librustc_trans/trans/mir/lvalue.rs +++ b/src/librustc_trans/trans/mir/lvalue.rs @@ -12,11 +12,13 @@ use llvm::ValueRef; use rustc::middle::ty::{self, Ty, TypeFoldable}; use rustc::mir::repr as mir; use rustc::mir::tcx::LvalueTy; +use trans::abi; use trans::adt; use trans::base; -use trans::common::{self, BlockAndBuilder}; +use trans::builder::Builder; +use trans::common::{self, BlockAndBuilder, C_uint}; +use trans::consts; use trans::machine; -use trans::type_of; use trans::mir::drop; use llvm; use trans::Disr; @@ -49,11 +51,25 @@ impl<'tcx> LvalueRef<'tcx> { { assert!(!ty.has_erasable_regions()); let lltemp = bcx.with_block(|bcx| base::alloc_ty(bcx, ty, name)); - drop::drop_fill(bcx, lltemp, ty); + if bcx.fcx().type_needs_drop(ty) { + drop::drop_fill(bcx, lltemp, ty); + } LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty)) } } +pub fn get_meta(b: &Builder, fat_ptr: ValueRef) -> ValueRef { + b.struct_gep(fat_ptr, abi::FAT_PTR_EXTRA) +} + +pub fn get_dataptr(b: &Builder, fat_ptr: ValueRef) -> ValueRef { + b.struct_gep(fat_ptr, abi::FAT_PTR_ADDR) +} + +pub fn load_fat_ptr(b: &Builder, fat_ptr: ValueRef) -> (ValueRef, ValueRef) { + (b.load(get_dataptr(b, fat_ptr)), b.load(get_meta(b, fat_ptr))) +} + impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { pub fn lvalue_len(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, @@ -89,16 +105,12 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::Lvalue::Arg(index) => self.args[index as usize], mir::Lvalue::Static(def_id) => { let const_ty = self.mir.lvalue_ty(tcx, lvalue); - LvalueRef::new_sized( - common::get_static_val(ccx, def_id, const_ty.to_ty(tcx)), - const_ty) + LvalueRef::new_sized(consts::get_static(ccx, def_id).val, const_ty) }, mir::Lvalue::ReturnPointer => { - let fn_return_ty = bcx.monomorphize(&self.mir.return_ty); - let return_ty = fn_return_ty.unwrap(); - let llval = if !common::return_type_is_void(bcx.ccx(), return_ty) { + let llval = if !fcx.fn_ty.ret.is_ignore() { bcx.with_block(|bcx| { - fcx.get_ret_slot(bcx, fn_return_ty, "") + fcx.get_ret_slot(bcx, "") }) } else { // This is a void return; that is, there’s no place to store the value and @@ -106,27 +118,40 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // Ergo, we return an undef ValueRef, so we do not have to special-case every // place using lvalues, and could use it the same way you use a regular // ReturnPointer LValue (i.e. store into it, load from it etc). - let llty = type_of::type_of(bcx.ccx(), return_ty).ptr_to(); + let llty = fcx.fn_ty.ret.original_ty.ptr_to(); unsafe { llvm::LLVMGetUndef(llty.to_ref()) } }; + let fn_return_ty = bcx.monomorphize(&self.mir.return_ty); + let return_ty = fn_return_ty.unwrap(); LvalueRef::new_sized(llval, LvalueTy::from_ty(return_ty)) }, mir::Lvalue::Projection(ref projection) => { let tr_base = self.trans_lvalue(bcx, &projection.base); let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem); + let projected_ty = bcx.monomorphize(&projected_ty); + + let project_index = |llindex| { + let element = if let ty::TySlice(_) = tr_base.ty.to_ty(tcx).sty { + // Slices already point to the array element type. + bcx.inbounds_gep(tr_base.llval, &[llindex]) + } else { + let zero = common::C_uint(bcx.ccx(), 0u64); + bcx.inbounds_gep(tr_base.llval, &[zero, llindex]) + }; + (element, ptr::null_mut()) + }; + let (llprojected, llextra) = match projection.elem { mir::ProjectionElem::Deref => { let base_ty = tr_base.ty.to_ty(tcx); - bcx.with_block(|bcx| { - if common::type_is_sized(tcx, projected_ty.to_ty(tcx)) { - (base::load_ty(bcx, tr_base.llval, base_ty), - ptr::null_mut()) - } else { - base::load_fat_ptr(bcx, tr_base.llval, base_ty) - } - }) + if common::type_is_sized(tcx, projected_ty.to_ty(tcx)) { + (base::load_ty_builder(bcx, tr_base.llval, base_ty), + ptr::null_mut()) + } else { + load_fat_ptr(bcx, tr_base.llval) + } } mir::ProjectionElem::Field(ref field, _) => { let base_ty = tr_base.ty.to_ty(tcx); @@ -142,9 +167,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } else { adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra) }; - let llprojected = bcx.with_block(|bcx| { - adt::trans_field_ptr(bcx, &base_repr, base, Disr(discr), field.index()) - }); + let llprojected = adt::trans_field_ptr_builder(bcx, &base_repr, base, + Disr(discr), field.index()); let llextra = if is_sized { ptr::null_mut() } else { @@ -154,30 +178,21 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } mir::ProjectionElem::Index(ref index) => { let index = self.trans_operand(bcx, index); - let llindex = self.prepare_index(bcx, index.immediate()); - let zero = common::C_uint(bcx.ccx(), 0u64); - (bcx.inbounds_gep(tr_base.llval, &[zero, llindex]), - ptr::null_mut()) + project_index(self.prepare_index(bcx, index.immediate())) } mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => { - let lloffset = common::C_u32(bcx.ccx(), offset); - let llindex = self.prepare_index(bcx, lloffset); - let zero = common::C_uint(bcx.ccx(), 0u64); - (bcx.inbounds_gep(tr_base.llval, &[zero, llindex]), - ptr::null_mut()) + let lloffset = C_uint(bcx.ccx(), offset); + project_index(self.prepare_index(bcx, lloffset)) } mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => { - let lloffset = common::C_u32(bcx.ccx(), offset); + let lloffset = C_uint(bcx.ccx(), offset); let lllen = self.lvalue_len(bcx, tr_base); let llindex = bcx.sub(lllen, lloffset); - let llindex = self.prepare_index(bcx, llindex); - let zero = common::C_uint(bcx.ccx(), 0u64); - (bcx.inbounds_gep(tr_base.llval, &[zero, llindex]), - ptr::null_mut()) + project_index(self.prepare_index(bcx, llindex)) } mir::ProjectionElem::Downcast(..) => { (tr_base.llval, tr_base.llextra) diff --git a/src/librustc_trans/trans/mir/mod.rs b/src/librustc_trans/trans/mir/mod.rs index 4ad2e035945f3..9df2cb712b14f 100644 --- a/src/librustc_trans/trans/mir/mod.rs +++ b/src/librustc_trans/trans/mir/mod.rs @@ -10,21 +10,39 @@ use libc::c_uint; use llvm::{self, ValueRef}; +use middle::ty; use rustc::mir::repr as mir; use rustc::mir::tcx::LvalueTy; use trans::base; -use trans::common::{self, Block, BlockAndBuilder}; -use trans::expr; -use trans::type_of; +use trans::common::{self, Block, BlockAndBuilder, FunctionContext}; -use self::lvalue::LvalueRef; +use std::ops::Deref; +use std::rc::Rc; + +use self::lvalue::{LvalueRef, get_dataptr, get_meta}; use self::operand::OperandRef; +#[derive(Clone)] +pub enum CachedMir<'mir, 'tcx: 'mir> { + Ref(&'mir mir::Mir<'tcx>), + Owned(Rc>) +} + +impl<'mir, 'tcx: 'mir> Deref for CachedMir<'mir, 'tcx> { + type Target = mir::Mir<'tcx>; + fn deref(&self) -> &mir::Mir<'tcx> { + match *self { + CachedMir::Ref(r) => r, + CachedMir::Owned(ref rc) => rc + } + } +} + // FIXME DebugLoc is always None right now /// Master context for translating MIR. pub struct MirContext<'bcx, 'tcx:'bcx> { - mir: &'bcx mir::Mir<'tcx>, + mir: CachedMir<'bcx, 'tcx>, /// Function context fcx: &'bcx common::FunctionContext<'bcx, 'tcx>, @@ -77,16 +95,16 @@ enum TempRef<'tcx> { /////////////////////////////////////////////////////////////////////////// -pub fn trans_mir<'bcx, 'tcx>(bcx: BlockAndBuilder<'bcx, 'tcx>) { - let fcx = bcx.fcx(); +pub fn trans_mir<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>) { + let bcx = fcx.init(false, None).build(); let mir = bcx.mir(); - let mir_blocks = bcx.mir().all_basic_blocks(); + let mir_blocks = mir.all_basic_blocks(); // Analyze the temps to determine which must be lvalues // FIXME let lvalue_temps = bcx.with_block(|bcx| { - analyze::lvalue_temps(bcx, mir) + analyze::lvalue_temps(bcx, &mir) }); // Allocate variable and temp allocas @@ -108,10 +126,10 @@ pub fn trans_mir<'bcx, 'tcx>(bcx: BlockAndBuilder<'bcx, 'tcx>) { TempRef::Operand(None) }) .collect(); - let args = arg_value_refs(&bcx, mir); + let args = arg_value_refs(&bcx, &mir); // Allocate a `Block` for every basic block - let block_bcxs: Vec> = + let block_bcxs: Vec> = mir_blocks.iter() .map(|&bb|{ // FIXME(#30941) this doesn't handle msvc-style exceptions @@ -138,6 +156,8 @@ pub fn trans_mir<'bcx, 'tcx>(bcx: BlockAndBuilder<'bcx, 'tcx>) { for &bb in &mir_blocks { mircx.trans_block(bb); } + + fcx.cleanup(); } /// Produce, for each argument, a `ValueRef` pointing at the @@ -146,51 +166,75 @@ pub fn trans_mir<'bcx, 'tcx>(bcx: BlockAndBuilder<'bcx, 'tcx>) { fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, mir: &mir::Mir<'tcx>) -> Vec> { - // FIXME tupled_args? I think I'd rather that mapping is done in MIR land though let fcx = bcx.fcx(); let tcx = bcx.tcx(); - let mut idx = fcx.arg_offset() as c_uint; - mir.arg_decls - .iter() - .enumerate() - .map(|(arg_index, arg_decl)| { - let arg_ty = bcx.monomorphize(&arg_decl.ty); - let llval = if type_of::arg_is_indirect(bcx.ccx(), arg_ty) { - // Don't copy an indirect argument to an alloca, the caller - // already put it in a temporary alloca and gave it up, unless - // we emit extra-debug-info, which requires local allocas :(. - // FIXME: lifetimes, debug info - let llarg = llvm::get_param(fcx.llfn, idx); - idx += 1; - llarg - } else if common::type_is_fat_ptr(tcx, arg_ty) { - // we pass fat pointers as two words, but we want to - // represent them internally as a pointer to two words, - // so make an alloca to store them in. - let lldata = llvm::get_param(fcx.llfn, idx); - let llextra = llvm::get_param(fcx.llfn, idx + 1); - idx += 2; - let (lltemp, dataptr, meta) = bcx.with_block(|bcx| { - let lltemp = base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index)); - (lltemp, expr::get_dataptr(bcx, lltemp), expr::get_meta(bcx, lltemp)) - }); - bcx.store(lldata, dataptr); - bcx.store(llextra, meta); - lltemp - } else { - // otherwise, arg is passed by value, so make a - // temporary and store it there - let llarg = llvm::get_param(fcx.llfn, idx); - idx += 1; - bcx.with_block(|bcx| { - let lltemp = base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index)); - base::store_ty(bcx, llarg, lltemp, arg_ty); - lltemp - }) - }; - LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty)) - }) - .collect() + let mut idx = 0; + let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize; + mir.arg_decls.iter().enumerate().map(|(arg_index, arg_decl)| { + let arg_ty = bcx.monomorphize(&arg_decl.ty); + if arg_decl.spread { + // This argument (e.g. the last argument in the "rust-call" ABI) + // is a tuple that was spread at the ABI level and now we have + // to reconstruct it into a tuple local variable, from multiple + // individual LLVM function arguments. + + let tupled_arg_tys = match arg_ty.sty { + ty::TyTuple(ref tys) => tys, + _ => unreachable!("spread argument isn't a tuple?!") + }; + + let lltemp = bcx.with_block(|bcx| { + base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index)) + }); + for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() { + let dst = bcx.struct_gep(lltemp, i); + let arg = &fcx.fn_ty.args[idx]; + idx += 1; + if common::type_is_fat_ptr(tcx, tupled_arg_ty) { + // We pass fat pointers as two words, but inside the tuple + // they are the two sub-fields of a single aggregate field. + let meta = &fcx.fn_ty.args[idx]; + idx += 1; + arg.store_fn_arg(bcx, &mut llarg_idx, get_dataptr(bcx, dst)); + meta.store_fn_arg(bcx, &mut llarg_idx, get_meta(bcx, dst)); + } else { + arg.store_fn_arg(bcx, &mut llarg_idx, dst); + } + } + return LvalueRef::new_sized(lltemp, LvalueTy::from_ty(arg_ty)); + } + + let arg = &fcx.fn_ty.args[idx]; + idx += 1; + let llval = if arg.is_indirect() { + // Don't copy an indirect argument to an alloca, the caller + // already put it in a temporary alloca and gave it up, unless + // we emit extra-debug-info, which requires local allocas :(. + // FIXME: lifetimes, debug info + let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint); + llarg_idx += 1; + llarg + } else { + let lltemp = bcx.with_block(|bcx| { + base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index)) + }); + if common::type_is_fat_ptr(tcx, arg_ty) { + // we pass fat pointers as two words, but we want to + // represent them internally as a pointer to two words, + // so make an alloca to store them in. + let meta = &fcx.fn_ty.args[idx]; + idx += 1; + arg.store_fn_arg(bcx, &mut llarg_idx, get_dataptr(bcx, lltemp)); + meta.store_fn_arg(bcx, &mut llarg_idx, get_meta(bcx, lltemp)); + } else { + // otherwise, arg is passed by value, so make a + // temporary and store it there + arg.store_fn_arg(bcx, &mut llarg_idx, lltemp); + } + lltemp + }; + LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty)) + }).collect() } mod analyze; diff --git a/src/librustc_trans/trans/mir/operand.rs b/src/librustc_trans/trans/mir/operand.rs index 5db5fc82c1fa2..6df4502fbc827 100644 --- a/src/librustc_trans/trans/mir/operand.rs +++ b/src/librustc_trans/trans/mir/operand.rs @@ -9,17 +9,18 @@ // except according to those terms. use llvm::ValueRef; -use rustc::middle::ty::{self, Ty}; +use rustc::middle::ty::Ty; use rustc::mir::repr as mir; -use trans::adt; use trans::base; use trans::common::{self, Block, BlockAndBuilder}; use trans::datum; -use trans::Disr; +use trans::value::Value; use trans::glue; +use std::fmt; + +use super::lvalue::load_fat_ptr; use super::{MirContext, TempRef, drop}; -use super::lvalue::LvalueRef; /// The representation of a Rust value. The enum variant is in fact /// uniquely determined by the value's type, but is kept as a @@ -53,42 +54,32 @@ pub struct OperandRef<'tcx> { pub ty: Ty<'tcx> } -impl<'tcx> OperandRef<'tcx> { - /// Asserts that this operand refers to a scalar and returns - /// a reference to its value. - pub fn immediate(self) -> ValueRef { - match self.val { - OperandValue::Immediate(s) => s, - _ => unreachable!() - } - } - - pub fn repr<'bcx>(self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> String { +impl<'tcx> fmt::Debug for OperandRef<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self.val { OperandValue::Ref(r) => { - format!("OperandRef(Ref({}) @ {:?})", - bcx.val_to_string(r), self.ty) + write!(f, "OperandRef(Ref({:?}) @ {:?})", + Value(r), self.ty) } OperandValue::Immediate(i) => { - format!("OperandRef(Immediate({}) @ {:?})", - bcx.val_to_string(i), self.ty) + write!(f, "OperandRef(Immediate({:?}) @ {:?})", + Value(i), self.ty) } OperandValue::FatPtr(a, d) => { - format!("OperandRef(FatPtr({}, {}) @ {:?})", - bcx.val_to_string(a), - bcx.val_to_string(d), - self.ty) + write!(f, "OperandRef(FatPtr({:?}, {:?}) @ {:?})", + Value(a), Value(d), self.ty) } } } +} - pub fn from_rvalue_datum(datum: datum::Datum<'tcx, datum::Rvalue>) -> OperandRef { - OperandRef { - ty: datum.ty, - val: match datum.kind.mode { - datum::RvalueMode::ByRef => OperandValue::Ref(datum.val), - datum::RvalueMode::ByValue => OperandValue::Immediate(datum.val), - } +impl<'tcx> OperandRef<'tcx> { + /// Asserts that this operand refers to a scalar and returns + /// a reference to its value. + pub fn immediate(self) -> ValueRef { + match self.val { + OperandValue::Immediate(s) => s, + _ => unreachable!() } } } @@ -100,18 +91,14 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { ty: Ty<'tcx>) -> OperandRef<'tcx> { - debug!("trans_load: {} @ {:?}", bcx.val_to_string(llval), ty); + debug!("trans_load: {:?} @ {:?}", Value(llval), ty); let val = match datum::appropriate_rvalue_mode(bcx.ccx(), ty) { datum::ByValue => { - bcx.with_block(|bcx| { - OperandValue::Immediate(base::load_ty(bcx, llval, ty)) - }) + OperandValue::Immediate(base::load_ty_builder(bcx, llval, ty)) } datum::ByRef if common::type_is_fat_ptr(bcx.tcx(), ty) => { - let (lldata, llextra) = bcx.with_block(|bcx| { - base::load_fat_ptr(bcx, llval, ty) - }); + let (lldata, llextra) = load_fat_ptr(bcx, llval); OperandValue::FatPtr(lldata, llextra) } datum::ByRef => OperandValue::Ref(llval) @@ -164,7 +151,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { lldest: ValueRef, operand: OperandRef<'tcx>) { - debug!("store_operand: operand={}", operand.repr(bcx)); + debug!("store_operand: operand={:?}", operand); bcx.with_block(|bcx| self.store_operand_direct(bcx, lldest, operand)) } @@ -187,48 +174,6 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } } - pub fn trans_operand_untupled(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, - operand: &mir::Operand<'tcx>) - -> Vec> - { - // FIXME: consider having some optimization to avoid tupling/untupling - // (and storing/loading in the case of immediates) - - // avoid trans_operand for pointless copying - let lv = match *operand { - mir::Operand::Consume(ref lvalue) => self.trans_lvalue(bcx, lvalue), - mir::Operand::Constant(ref constant) => { - // FIXME: consider being less pessimized - if constant.ty.is_nil() { - return vec![]; - } - - let ty = bcx.monomorphize(&constant.ty); - let lv = LvalueRef::alloca(bcx, ty, "__untuple_alloca"); - let constant = self.trans_constant(bcx, constant); - self.store_operand(bcx, lv.llval, constant); - lv - } - }; - - let lv_ty = lv.ty.to_ty(bcx.tcx()); - let result_types = match lv_ty.sty { - ty::TyTuple(ref tys) => tys, - _ => bcx.tcx().sess.span_bug( - self.mir.span, - &format!("bad final argument to \"rust-call\" fn {:?}", lv_ty)) - }; - - let base_repr = adt::represent_type(bcx.ccx(), lv_ty); - let base = adt::MaybeSizedValue::sized(lv.llval); - result_types.iter().enumerate().map(|(n, &ty)| { - self.trans_load(bcx, bcx.with_block(|bcx| { - adt::trans_field_ptr(bcx, &base_repr, base, Disr(0), n) - }), ty) - }).collect() - } - pub fn set_operand_dropped(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, operand: &mir::Operand<'tcx>) { diff --git a/src/librustc_trans/trans/mir/rvalue.rs b/src/librustc_trans/trans/mir/rvalue.rs index ea80af14f1f2f..17c8aef244bb9 100644 --- a/src/librustc_trans/trans/mir/rvalue.rs +++ b/src/librustc_trans/trans/mir/rvalue.rs @@ -18,20 +18,21 @@ use rustc::mir::repr as mir; use trans::asm; use trans::base; use trans::callee::Callee; -use trans::common::{self, BlockAndBuilder, Result}; +use trans::common::{self, C_uint, BlockAndBuilder, Result}; +use trans::datum::{Datum, Lvalue}; use trans::debuginfo::DebugLoc; use trans::declare; -use trans::expr; use trans::adt; use trans::machine; use trans::type_::Type; use trans::type_of; use trans::tvec; +use trans::value::Value; use trans::Disr; use super::MirContext; use super::operand::{OperandRef, OperandValue}; -use super::lvalue::LvalueRef; +use super::lvalue::{LvalueRef, get_dataptr, get_meta}; impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { pub fn trans_rvalue(&mut self, @@ -40,9 +41,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { rvalue: &mir::Rvalue<'tcx>) -> BlockAndBuilder<'bcx, 'tcx> { - debug!("trans_rvalue(dest.llval={}, rvalue={:?})", - bcx.val_to_string(dest.llval), - rvalue); + debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})", + Value(dest.llval), rvalue); match *rvalue { mir::Rvalue::Use(ref operand) => { @@ -54,7 +54,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { bcx } - mir::Rvalue::Cast(mir::CastKind::Unsize, ref operand, cast_ty) => { + mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => { if common::type_is_fat_ptr(bcx.tcx(), cast_ty) { // into-coerce of a thin pointer to a fat pointer - just // use the operand path. @@ -67,7 +67,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // this to be eliminated by MIR translation, but // `CoerceUnsized` can be passed by a where-clause, // so the (generic) MIR may not be able to expand it. - let operand = self.trans_operand(&bcx, operand); + let operand = self.trans_operand(&bcx, source); bcx.with_block(|bcx| { match operand.val { OperandValue::FatPtr(..) => unreachable!(), @@ -92,6 +92,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } } }); + self.set_operand_dropped(&bcx, source); bcx } @@ -99,8 +100,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let tr_elem = self.trans_operand(&bcx, elem); let count = ConstVal::Integral(ConstInt::Usize(count.value)); let size = self.trans_constval(&bcx, &count, bcx.tcx().types.usize).immediate(); + let base = get_dataptr(&bcx, dest.llval); let bcx = bcx.map_block(|block| { - let base = expr::get_dataptr(block, dest.llval); tvec::iter_vec_raw(block, base, tr_elem.ty, size, |block, llslot, _| { self.store_operand_direct(block, llslot, tr_elem); block @@ -123,15 +124,39 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // Do not generate stores and GEPis for zero-sized fields. if !common::type_is_zero_size(bcx.ccx(), op.ty) { let val = adt::MaybeSizedValue::sized(dest.llval); - let lldest_i = bcx.with_block(|bcx| { - adt::trans_field_ptr(bcx, &repr, val, disr, i) - }); + let lldest_i = adt::trans_field_ptr_builder(&bcx, &repr, + val, disr, i); self.store_operand(&bcx, lldest_i, op); - self.set_operand_dropped(&bcx, operand); } + self.set_operand_dropped(&bcx, operand); } }, _ => { + // FIXME Shouldn't need to manually trigger closure instantiations. + if let mir::AggregateKind::Closure(def_id, substs) = *kind { + use rustc_front::hir; + use syntax::ast::DUMMY_NODE_ID; + use syntax::codemap::DUMMY_SP; + use syntax::ptr::P; + use trans::closure; + + closure::trans_closure_expr(closure::Dest::Ignore(bcx.ccx()), + &hir::FnDecl { + inputs: P::new(), + output: hir::NoReturn(DUMMY_SP), + variadic: false + }, + &hir::Block { + stmts: P::new(), + expr: None, + id: DUMMY_NODE_ID, + rules: hir::DefaultBlock, + span: DUMMY_SP + }, + DUMMY_NODE_ID, def_id, + &bcx.monomorphize(substs)); + } + for (i, operand) in operands.iter().enumerate() { let op = self.trans_operand(&bcx, operand); // Do not generate stores and GEPis for zero-sized fields. @@ -141,8 +166,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // not be structs but arrays. let dest = bcx.gepi(dest.llval, &[0, i]); self.store_operand(&bcx, dest, op); - self.set_operand_dropped(&bcx, operand); } + self.set_operand_dropped(&bcx, operand); } } } @@ -152,26 +177,42 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::Rvalue::Slice { ref input, from_start, from_end } => { let ccx = bcx.ccx(); let input = self.trans_lvalue(&bcx, input); - let (llbase, lllen) = bcx.with_block(|bcx| { - tvec::get_base_and_len(bcx, - input.llval, - input.ty.to_ty(bcx.tcx())) - }); - let llbase1 = bcx.gepi(llbase, &[from_start]); - let adj = common::C_uint(ccx, from_start + from_end); + let ty = input.ty.to_ty(bcx.tcx()); + let (llbase1, lllen) = match ty.sty { + ty::TyArray(_, n) => { + (bcx.gepi(input.llval, &[0, from_start]), C_uint(ccx, n)) + } + ty::TySlice(_) | ty::TyStr => { + (bcx.gepi(input.llval, &[from_start]), input.llextra) + } + _ => unreachable!("cannot slice {}", ty) + }; + let adj = C_uint(ccx, from_start + from_end); let lllen1 = bcx.sub(lllen, adj); - let (lladdrdest, llmetadest) = bcx.with_block(|bcx| { - (expr::get_dataptr(bcx, dest.llval), expr::get_meta(bcx, dest.llval)) - }); - bcx.store(llbase1, lladdrdest); - bcx.store(lllen1, llmetadest); + bcx.store(llbase1, get_dataptr(&bcx, dest.llval)); + bcx.store(lllen1, get_meta(&bcx, dest.llval)); bcx } - mir::Rvalue::InlineAsm(ref inline_asm) => { - bcx.map_block(|bcx| { - asm::trans_inline_asm(bcx, inline_asm) - }) + mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => { + let outputs = outputs.iter().map(|output| { + let lvalue = self.trans_lvalue(&bcx, output); + Datum::new(lvalue.llval, lvalue.ty.to_ty(bcx.tcx()), + Lvalue::new("out")) + }).collect(); + + let input_vals = inputs.iter().map(|input| { + self.trans_operand(&bcx, input).immediate() + }).collect(); + + bcx.with_block(|bcx| { + asm::trans_inline_asm(bcx, asm, outputs, input_vals); + }); + + for input in inputs { + self.set_operand_dropped(&bcx, input); + } + bcx } _ => { @@ -191,9 +232,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue); match *rvalue { - mir::Rvalue::Cast(ref kind, ref operand, cast_ty) => { - let operand = self.trans_operand(&bcx, operand); - debug!("cast operand is {}", operand.repr(&bcx)); + mir::Rvalue::Cast(ref kind, ref source, cast_ty) => { + let operand = self.trans_operand(&bcx, source); + debug!("cast operand is {:?}", operand); let cast_ty = bcx.monomorphize(&cast_ty); let val = match *kind { @@ -201,7 +242,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { match operand.ty.sty { ty::TyFnDef(def_id, substs, _) => { OperandValue::Immediate( - Callee::def(bcx.ccx(), def_id, substs, operand.ty) + Callee::def(bcx.ccx(), def_id, substs) .reify(bcx.ccx()).val) } _ => { @@ -225,6 +266,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // example, // &'a fmt::Debug+Send => &'a fmt::Debug, // and is a no-op at the LLVM level + self.set_operand_dropped(&bcx, source); operand.val } OperandValue::Immediate(lldata) => { @@ -233,12 +275,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { base::unsize_thin_ptr(bcx, lldata, operand.ty, cast_ty) }); + self.set_operand_dropped(&bcx, source); OperandValue::FatPtr(lldata, llextra) } OperandValue::Ref(_) => { bcx.sess().bug( - &format!("by-ref operand {} in trans_rvalue_operand", - operand.repr(&bcx))); + &format!("by-ref operand {:?} in trans_rvalue_operand", + operand)); } } } @@ -246,17 +289,14 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty)); let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); - let ll_t_in = type_of::arg_type_of(bcx.ccx(), operand.ty); - let ll_t_out = type_of::arg_type_of(bcx.ccx(), cast_ty); - let (llval, ll_t_in, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in { + let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty); + let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty); + let llval = operand.immediate(); + let signed = if let CastTy::Int(IntTy::CEnum) = r_t_in { let repr = adt::represent_type(bcx.ccx(), operand.ty); - let llval = operand.immediate(); - let discr = bcx.with_block(|bcx| { - adt::trans_get_discr(bcx, &repr, llval, None, true) - }); - (discr, common::val_ty(discr), adt::is_discr_signed(&repr)) + adt::is_discr_signed(&repr) } else { - (operand.immediate(), ll_t_in, operand.ty.is_signed()) + operand.ty.is_signed() }; let newval = match (r_t_in, r_t_out) { @@ -308,8 +348,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { OperandValue::Immediate(newval) } mir::CastKind::Misc => { // Casts from a fat-ptr. - let ll_cast_ty = type_of::arg_type_of(bcx.ccx(), cast_ty); - let ll_from_ty = type_of::arg_type_of(bcx.ccx(), operand.ty); + let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty); + let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty); if let OperandValue::FatPtr(data_ptr, meta_ptr) = operand.val { if common::type_is_fat_ptr(bcx.tcx(), cast_ty) { let ll_cft = ll_cast_ty.field_types(); @@ -423,7 +463,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let llty = type_of::type_of(bcx.ccx(), content_ty); let llsize = machine::llsize_of(bcx.ccx(), llty); let align = type_of::align_of(bcx.ccx(), content_ty); - let llalign = common::C_uint(bcx.ccx(), align); + let llalign = C_uint(bcx.ccx(), align); let llty_ptr = llty.ptr_to(); let box_ty = bcx.tcx().mk_box(content_ty); let mut llval = None; @@ -448,7 +488,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) | mir::Rvalue::Slice { .. } | - mir::Rvalue::InlineAsm(..) => { + mir::Rvalue::InlineAsm { .. } => { bcx.tcx().sess.bug(&format!("cannot generate operand from rvalue {:?}", rvalue)); } } @@ -511,15 +551,14 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { if use_fmod { let f64t = Type::f64(bcx.ccx()); let fty = Type::func(&[f64t, f64t], &f64t); - let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty, - tcx.types.f64); + let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty); if input_ty == tcx.types.f32 { let lllhs = bcx.fpext(lhs, f64t); let llrhs = bcx.fpext(rhs, f64t); - let llres = bcx.call(llfn, &[lllhs, llrhs], None, None); + let llres = bcx.call(llfn, &[lllhs, llrhs], None); bcx.fptrunc(llres, Type::f32(bcx.ccx())) } else { - bcx.call(llfn, &[lhs, rhs], None, None) + bcx.call(llfn, &[lhs, rhs], None) } } else { bcx.frem(lhs, rhs) @@ -573,7 +612,7 @@ pub fn rvalue_creates_operand<'tcx>(rvalue: &mir::Rvalue<'tcx>) -> bool { mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) | mir::Rvalue::Slice { .. } | - mir::Rvalue::InlineAsm(..) => + mir::Rvalue::InlineAsm { .. } => false, } diff --git a/src/librustc_trans/trans/mod.rs b/src/librustc_trans/trans/mod.rs index 1b8bab7a4ee70..c5ab0d4e74421 100644 --- a/src/librustc_trans/trans/mod.rs +++ b/src/librustc_trans/trans/mod.rs @@ -19,6 +19,7 @@ pub use self::disr::Disr; #[macro_use] mod macros; +mod abi; mod adt; mod asm; mod assert_dep_graph; @@ -27,7 +28,6 @@ mod base; mod basic_block; mod build; mod builder; -mod cabi; mod cabi_aarch64; mod cabi_arm; mod cabi_asmjs; @@ -49,11 +49,9 @@ mod debuginfo; mod declare; mod disr; mod expr; -mod foreign; mod glue; mod inline; mod intrinsic; -mod llrepr; mod machine; mod _match; mod meth; diff --git a/src/librustc_trans/trans/monomorphize.rs b/src/librustc_trans/trans/monomorphize.rs index c6119416e47ed..2e75439ffc329 100644 --- a/src/librustc_trans/trans/monomorphize.rs +++ b/src/librustc_trans/trans/monomorphize.rs @@ -17,38 +17,35 @@ use middle::subst; use middle::subst::{Subst, Substs}; use middle::ty::fold::{TypeFolder, TypeFoldable}; use trans::attributes; -use trans::base::{trans_enum_variant, push_ctxt, get_item_val}; +use trans::base::{push_ctxt}; use trans::base::trans_fn; use trans::base; use trans::common::*; use trans::declare; -use trans::foreign; use middle::ty::{self, Ty, TyCtxt}; use trans::Disr; use rustc::front::map as hir_map; +use rustc::util::ppaux; use rustc_front::hir; -use syntax::abi::Abi; -use syntax::ast; use syntax::attr; use syntax::errors; + +use std::fmt; use std::hash::{Hasher, Hash, SipHasher}; pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_id: DefId, psubsts: &'tcx subst::Substs<'tcx>) - -> (ValueRef, Ty<'tcx>, bool) { + -> (ValueRef, Ty<'tcx>) { debug!("monomorphic_fn(fn_id={:?}, real_substs={:?})", fn_id, psubsts); assert!(!psubsts.types.needs_infer() && !psubsts.types.has_param_types()); - // we can only monomorphize things in this crate (or inlined into it) - let fn_node_id = ccx.tcx().map.as_local_node_id(fn_id).unwrap(); - let _icx = push_ctxt("monomorphic_fn"); - let hash_id = MonoId { + let instance = Instance { def: fn_id, params: &psubsts.types }; @@ -59,41 +56,15 @@ pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let mono_ty = apply_param_substs(ccx.tcx(), psubsts, &item_ty); debug!("mono_ty = {:?} (post-substitution)", mono_ty); - match ccx.monomorphized().borrow().get(&hash_id) { + match ccx.instances().borrow().get(&instance) { Some(&val) => { - debug!("leaving monomorphic fn {}", - ccx.tcx().item_path_str(fn_id)); - return (val, mono_ty, false); + debug!("leaving monomorphic fn {:?}", instance); + return (val, mono_ty); } None => () } - debug!("monomorphic_fn(\ - fn_id={:?}, \ - psubsts={:?}, \ - hash_id={:?})", - fn_id, - psubsts, - hash_id); - - - let map_node = errors::expect( - ccx.sess().diagnostic(), - ccx.tcx().map.find(fn_node_id), - || { - format!("while monomorphizing {:?}, couldn't find it in \ - the item map (may have attempted to monomorphize \ - an item defined in a different crate?)", - fn_id) - }); - - if let hir_map::NodeForeignItem(_) = map_node { - let abi = ccx.tcx().map.get_foreign_abi(fn_node_id); - if abi != Abi::RustIntrinsic && abi != Abi::PlatformIntrinsic { - // Foreign externs don't have to be monomorphized. - return (get_item_val(ccx, fn_node_id), mono_ty, true); - } - } + debug!("monomorphic_fn({:?})", instance); ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1); @@ -110,8 +81,13 @@ pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // recursively more than thirty times can probably safely be assumed // to be causing an infinite expansion. if depth > ccx.sess().recursion_limit.get() { - ccx.sess().span_fatal(ccx.tcx().map.span(fn_node_id), - "reached the recursion limit during monomorphization"); + let error = format!("reached the recursion limit while instantiating `{}`", + instance); + if let Some(id) = ccx.tcx().map.as_local_node_id(fn_id) { + ccx.sess().span_fatal(ccx.tcx().map.span(id), &error); + } else { + ccx.sess().fatal(&error); + } } monomorphizing.insert(fn_id, depth + 1); @@ -120,173 +96,112 @@ pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let hash; let s = { let mut state = SipHasher::new(); - hash_id.hash(&mut state); + instance.hash(&mut state); mono_ty.hash(&mut state); hash = format!("h{}", state.finish()); - let path = ccx.tcx().map.def_path_from_id(fn_node_id); + let path = ccx.tcx().map.def_path(fn_id); exported_name(path, &hash[..]) }; debug!("monomorphize_fn mangled to {}", s); + assert!(declare::get_defined_value(ccx, &s).is_none()); - // This shouldn't need to option dance. - let mut hash_id = Some(hash_id); - let mut mk_lldecl = |abi: Abi| { - let lldecl = if abi != Abi::Rust { - foreign::decl_rust_fn_with_foreign_abi(ccx, mono_ty, &s) - } else { - // FIXME(nagisa): perhaps needs a more fine grained selection? See - // setup_lldecl below. - declare::define_internal_rust_fn(ccx, &s, mono_ty) - }; + // FIXME(nagisa): perhaps needs a more fine grained selection? + let lldecl = declare::define_internal_fn(ccx, &s, mono_ty); + // FIXME(eddyb) Doubt all extern fn should allow unwinding. + attributes::unwind(lldecl, true); - ccx.monomorphized().borrow_mut().insert(hash_id.take().unwrap(), lldecl); - lldecl - }; - let setup_lldecl = |lldecl, attrs: &[ast::Attribute]| { - base::update_linkage(ccx, lldecl, None, base::OriginalTranslation); - attributes::from_fn_attrs(ccx, attrs, lldecl); - - let is_first = !ccx.available_monomorphizations().borrow().contains(&s); - if is_first { - ccx.available_monomorphizations().borrow_mut().insert(s.clone()); - } + ccx.instances().borrow_mut().insert(instance, lldecl); - let trans_everywhere = attr::requests_inline(attrs); - if trans_everywhere && !is_first { - llvm::SetLinkage(lldecl, llvm::AvailableExternallyLinkage); - } - - // If `true`, then `lldecl` should be given a function body. - // Otherwise, it should be left as a declaration of an external - // function, with no definition in the current compilation unit. - trans_everywhere || is_first - }; + // we can only monomorphize things in this crate (or inlined into it) + let fn_node_id = ccx.tcx().map.as_local_node_id(fn_id).unwrap(); + let map_node = errors::expect( + ccx.sess().diagnostic(), + ccx.tcx().map.find(fn_node_id), + || { + format!("while instantiating `{}`, couldn't find it in \ + the item map (may have attempted to monomorphize \ + an item defined in a different crate?)", + instance) + }); + match map_node { + hir_map::NodeItem(&hir::Item { + ref attrs, node: hir::ItemFn(ref decl, _, _, _, _, ref body), .. + }) | + hir_map::NodeTraitItem(&hir::TraitItem { + ref attrs, node: hir::MethodTraitItem( + hir::MethodSig { ref decl, .. }, Some(ref body)), .. + }) | + hir_map::NodeImplItem(&hir::ImplItem { + ref attrs, node: hir::ImplItemKind::Method( + hir::MethodSig { ref decl, .. }, ref body), .. + }) => { + base::update_linkage(ccx, lldecl, None, base::OriginalTranslation); + attributes::from_fn_attrs(ccx, attrs, lldecl); + + let is_first = !ccx.available_monomorphizations().borrow().contains(&s); + if is_first { + ccx.available_monomorphizations().borrow_mut().insert(s.clone()); + } - let lldecl = match map_node { - hir_map::NodeItem(i) => { - match *i { - hir::Item { - node: hir::ItemFn(ref decl, _, _, abi, _, ref body), - .. - } => { - let d = mk_lldecl(abi); - let needs_body = setup_lldecl(d, &i.attrs); - if needs_body { - if abi != Abi::Rust { - foreign::trans_rust_fn_with_foreign_abi( - ccx, &decl, &body, &[], d, psubsts, fn_node_id, - Some(&hash[..])); - } else { - trans_fn(ccx, - &decl, - &body, - d, - psubsts, - fn_node_id, - &i.attrs); - } - } - - d - } - _ => { - ccx.sess().bug("Can't monomorphize this kind of item") - } + let trans_everywhere = attr::requests_inline(attrs); + if trans_everywhere && !is_first { + llvm::SetLinkage(lldecl, llvm::AvailableExternallyLinkage); } - } - hir_map::NodeVariant(v) => { - let variant = inlined_variant_def(ccx, fn_node_id); - assert_eq!(v.node.name, variant.name); - let d = mk_lldecl(Abi::Rust); - attributes::inline(d, attributes::InlineAttr::Hint); - trans_enum_variant(ccx, fn_node_id, Disr::from(variant.disr_val), psubsts, d); - d - } - hir_map::NodeImplItem(impl_item) => { - match impl_item.node { - hir::ImplItemKind::Method(ref sig, ref body) => { - let d = mk_lldecl(Abi::Rust); - let needs_body = setup_lldecl(d, &impl_item.attrs); - if needs_body { - trans_fn(ccx, - &sig.decl, - body, - d, - psubsts, - impl_item.id, - &impl_item.attrs); - } - d - } - _ => { - ccx.sess().bug(&format!("can't monomorphize a {:?}", - map_node)) - } + + if trans_everywhere || is_first { + trans_fn(ccx, decl, body, lldecl, psubsts, fn_node_id); } } - hir_map::NodeTraitItem(trait_item) => { - match trait_item.node { - hir::MethodTraitItem(ref sig, Some(ref body)) => { - let d = mk_lldecl(Abi::Rust); - let needs_body = setup_lldecl(d, &trait_item.attrs); - if needs_body { - trans_fn(ccx, - &sig.decl, - body, - d, - psubsts, - trait_item.id, - &trait_item.attrs); - } - d - } - _ => { - ccx.sess().bug(&format!("can't monomorphize a {:?}", - map_node)) + + hir_map::NodeVariant(_) | hir_map::NodeStructCtor(_) => { + let disr = match map_node { + hir_map::NodeVariant(_) => { + Disr::from(inlined_variant_def(ccx, fn_node_id).disr_val) } - } - } - hir_map::NodeStructCtor(struct_def) => { - let d = mk_lldecl(Abi::Rust); - attributes::inline(d, attributes::InlineAttr::Hint); - if struct_def.is_struct() { - panic!("ast-mapped struct didn't have a ctor id") - } - base::trans_tuple_struct(ccx, - struct_def.id(), - psubsts, - d); - d + hir_map::NodeStructCtor(_) => Disr(0), + _ => unreachable!() + }; + attributes::inline(lldecl, attributes::InlineAttr::Hint); + base::trans_ctor_shim(ccx, fn_node_id, disr, psubsts, lldecl); } - // Ugh -- but this ensures any new variants won't be forgotten - hir_map::NodeForeignItem(..) | - hir_map::NodeLifetime(..) | - hir_map::NodeTyParam(..) | - hir_map::NodeExpr(..) | - hir_map::NodeStmt(..) | - hir_map::NodeBlock(..) | - hir_map::NodePat(..) | - hir_map::NodeLocal(..) => { - ccx.sess().bug(&format!("can't monomorphize a {:?}", - map_node)) - } + _ => unreachable!("can't monomorphize a {:?}", map_node) }; ccx.monomorphizing().borrow_mut().insert(fn_id, depth); debug!("leaving monomorphic fn {}", ccx.tcx().item_path_str(fn_id)); - (lldecl, mono_ty, true) + (lldecl, mono_ty) } -#[derive(PartialEq, Eq, Hash, Debug)] -pub struct MonoId<'tcx> { +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub struct Instance<'tcx> { pub def: DefId, pub params: &'tcx subst::VecPerParamSpace> } +impl<'tcx> fmt::Display for Instance<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let substs = Substs { + types: self.params.clone(), + regions: subst::ErasedRegions + }; + ppaux::parameterized(f, &substs, self.def, ppaux::Ns::Value, &[], + |tcx| tcx.lookup_item_type(self.def).generics) + } +} + +impl<'tcx> Instance<'tcx> { + pub fn mono(tcx: &TyCtxt<'tcx>, def_id: DefId) -> Instance<'tcx> { + Instance { + def: def_id, + params: &tcx.mk_substs(Substs::trans_empty()).types + } + } +} + /// Monomorphizes a type from the AST by first applying the in-scope /// substitutions and then normalizing any associated types. pub fn apply_param_substs<'tcx,T>(tcx: &TyCtxt<'tcx>, diff --git a/src/librustc_trans/trans/tvec.rs b/src/librustc_trans/trans/tvec.rs index d6573e96b02a3..7a0ca86f5a270 100644 --- a/src/librustc_trans/trans/tvec.rs +++ b/src/librustc_trans/trans/tvec.rs @@ -26,6 +26,7 @@ use trans::expr; use trans::machine::llsize_of_alloc; use trans::type_::Type; use trans::type_of; +use trans::value::Value; use middle::ty::{self, Ty}; use rustc_front::hir; @@ -33,20 +34,12 @@ use rustc_front::hir; use syntax::ast; use syntax::parse::token::InternedString; -#[derive(Copy, Clone)] +#[derive(Copy, Clone, Debug)] struct VecTypes<'tcx> { unit_ty: Ty<'tcx>, llunit_ty: Type } -impl<'tcx> VecTypes<'tcx> { - pub fn to_string<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> String { - format!("VecTypes {{unit_ty={}, llunit_ty={}}}", - self.unit_ty, - ccx.tn().type_to_string(self.llunit_ty)) - } -} - pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &hir::Expr, dest: expr::Dest) @@ -58,8 +51,7 @@ pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // to store the array of the suitable size, so all we have to do is // generate the content. - debug!("trans_fixed_vstore(expr={:?}, dest={})", - expr, dest.to_string(bcx.ccx())); + debug!("trans_fixed_vstore(expr={:?}, dest={:?})", expr, dest); let vt = vec_types_from_expr(bcx, expr); @@ -82,7 +74,6 @@ pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, content_expr: &hir::Expr) -> DatumBlock<'blk, 'tcx, Expr> { let fcx = bcx.fcx; - let ccx = fcx.ccx; let mut bcx = bcx; debug!("trans_slice_vec(slice_expr={:?})", @@ -105,7 +96,7 @@ pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // Handle the &[...] case: let vt = vec_types_from_expr(bcx, content_expr); let count = elements_required(bcx, content_expr); - debug!(" vt={}, count={}", vt.to_string(ccx), count); + debug!(" vt={:?}, count={}", vt, count); let fixed_ty = bcx.tcx().mk_array(vt.unit_ty, count); @@ -144,9 +135,7 @@ pub fn trans_lit_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, str_lit: InternedString, dest: Dest) -> Block<'blk, 'tcx> { - debug!("trans_lit_str(lit_expr={:?}, dest={})", - lit_expr, - dest.to_string(bcx.ccx())); + debug!("trans_lit_str(lit_expr={:?}, dest={:?})", lit_expr, dest); match dest { Ignore => bcx, @@ -172,10 +161,8 @@ fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let fcx = bcx.fcx; let mut bcx = bcx; - debug!("write_content(vt={}, dest={}, vstore_expr={:?})", - vt.to_string(bcx.ccx()), - dest.to_string(bcx.ccx()), - vstore_expr); + debug!("write_content(vt={:?}, dest={:?}, vstore_expr={:?})", + vt, dest, vstore_expr); match content_expr.node { hir::ExprLit(ref lit) => { @@ -187,11 +174,9 @@ fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let bytes = s.len(); let llbytes = C_uint(bcx.ccx(), bytes); let llcstr = C_cstr(bcx.ccx(), (*s).clone(), false); - base::call_memcpy(bcx, - lldest, - llcstr, - llbytes, - 1); + if !bcx.unreachable.get() { + base::call_memcpy(&B(bcx), lldest, llcstr, llbytes, 1); + } return bcx; } } @@ -214,8 +199,8 @@ fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let temp_scope = fcx.push_custom_cleanup_scope(); for (i, element) in elements.iter().enumerate() { let lleltptr = GEPi(bcx, lldest, &[i]); - debug!("writing index {} with lleltptr={}", - i, bcx.val_to_string(lleltptr)); + debug!("writing index {} with lleltptr={:?}", + i, Value(lleltptr)); bcx = expr::trans_into(bcx, &element, SaveIn(lleltptr)); let scope = cleanup::CustomScope(temp_scope); diff --git a/src/librustc_trans/trans/type_.rs b/src/librustc_trans/trans/type_.rs index 17300f356c434..57bd0ba815816 100644 --- a/src/librustc_trans/trans/type_.rs +++ b/src/librustc_trans/trans/type_.rs @@ -11,7 +11,7 @@ #![allow(non_upper_case_globals)] use llvm; -use llvm::{TypeRef, Bool, False, True, TypeKind, ValueRef}; +use llvm::{TypeRef, Bool, False, True, TypeKind}; use llvm::{Float, Double, X86_FP80, PPC_FP128, FP128}; use trans::context::CrateContext; @@ -20,18 +20,27 @@ use util::nodemap::FnvHashMap; use syntax::ast; use std::ffi::CString; +use std::fmt; use std::mem; use std::ptr; use std::cell::RefCell; use libc::c_uint; -#[derive(Clone, Copy, PartialEq, Debug)] +#[derive(Clone, Copy, PartialEq)] #[repr(C)] pub struct Type { rf: TypeRef } +impl fmt::Debug for Type { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(&llvm::build_string(|s| unsafe { + llvm::LLVMWriteTypeToString(self.to_ref(), s); + }).expect("non-UTF8 type description from LLVM")) + } +} + macro_rules! ty { ($e:expr) => ( Type::from_ref(unsafe { $e })) } @@ -50,12 +59,6 @@ impl Type { self.rf } - pub fn to_string(self: Type) -> String { - llvm::build_string(|s| unsafe { - llvm::LLVMWriteTypeToString(self.to_ref(), s); - }).expect("non-UTF8 type description from LLVM") - } - pub fn to_ref_slice(slice: &[Type]) -> &[TypeRef] { unsafe { mem::transmute(slice) } } @@ -180,10 +183,6 @@ impl Type { Type::struct_(ccx, &[], false) } - pub fn glue_fn(ccx: &CrateContext, t: Type) -> Type { - Type::func(&[t], &Type::void(ccx)) - } - pub fn array(ty: &Type, len: u64) -> Type { ty!(llvm::LLVMRustArrayType(ty.to_ref(), len)) } @@ -203,7 +202,7 @@ impl Type { } pub fn vtable_ptr(ccx: &CrateContext) -> Type { - Type::glue_fn(ccx, Type::i8p(ccx)).ptr_to().ptr_to() + Type::func(&[Type::i8p(ccx)], &Type::void(ccx)).ptr_to().ptr_to() } pub fn kind(&self) -> TypeKind { @@ -301,7 +300,6 @@ impl Type { } } - /* Memory-managed object interface to type handles. */ pub struct TypeNames { @@ -323,19 +321,4 @@ impl TypeNames { pub fn find_type(&self, s: &str) -> Option { self.named_types.borrow().get(s).map(|x| Type::from_ref(*x)) } - - pub fn type_to_string(&self, ty: Type) -> String { - ty.to_string() - } - - pub fn types_to_str(&self, tys: &[Type]) -> String { - let strs: Vec = tys.iter().map(|t| self.type_to_string(*t)).collect(); - format!("[{}]", strs.join(",")) - } - - pub fn val_to_string(&self, val: ValueRef) -> String { - llvm::build_string(|s| unsafe { - llvm::LLVMWriteValueToString(val, s); - }).expect("nun-UTF8 value description from LLVM") - } } diff --git a/src/librustc_trans/trans/type_of.rs b/src/librustc_trans/trans/type_of.rs index b78bf9bfc3fb2..7d5218d84dafe 100644 --- a/src/librustc_trans/trans/type_of.rs +++ b/src/librustc_trans/trans/type_of.rs @@ -13,15 +13,14 @@ use middle::def_id::DefId; use middle::infer; use middle::subst; +use trans::abi::FnType; use trans::adt; use trans::common::*; -use trans::foreign; use trans::machine; use middle::ty::{self, Ty, TypeFoldable}; use trans::type_::Type; -use syntax::abi::Abi; use syntax::ast; // LLVM doesn't like objects that are too big. Issue #17913 @@ -36,120 +35,6 @@ fn ensure_array_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } } -pub fn arg_is_indirect<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - arg_ty: Ty<'tcx>) -> bool { - !type_is_immediate(ccx, arg_ty) && !type_is_fat_ptr(ccx.tcx(), arg_ty) -} - -pub fn return_uses_outptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - ty: Ty<'tcx>) -> bool { - arg_is_indirect(ccx, ty) -} - -pub fn type_of_explicit_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - arg_ty: Ty<'tcx>) -> Type { - let llty = arg_type_of(ccx, arg_ty); - if arg_is_indirect(ccx, arg_ty) { - llty.ptr_to() - } else { - llty - } -} - -/// Yields the types of the "real" arguments for a function using the `RustCall` -/// ABI by untupling the arguments of the function. -pub fn untuple_arguments<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - inputs: &[Ty<'tcx>]) - -> Vec> { - if inputs.is_empty() { - return Vec::new() - } - - let mut result = Vec::new(); - for (i, &arg_prior_to_tuple) in inputs.iter().enumerate() { - if i < inputs.len() - 1 { - result.push(arg_prior_to_tuple); - } - } - - match inputs[inputs.len() - 1].sty { - ty::TyTuple(ref tupled_arguments) => { - debug!("untuple_arguments(): untupling arguments"); - for &tupled_argument in tupled_arguments { - result.push(tupled_argument); - } - } - _ => { - ccx.tcx().sess.bug("argument to function with \"rust-call\" ABI \ - is neither a tuple nor unit") - } - } - - result -} - -pub fn type_of_rust_fn<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - llenvironment_type: Option, - sig: &ty::FnSig<'tcx>, - abi: Abi) - -> Type -{ - debug!("type_of_rust_fn(sig={:?},abi={:?})", - sig, - abi); - - assert!(!sig.variadic); // rust fns are never variadic - - let mut atys: Vec = Vec::new(); - - // First, munge the inputs, if this has the `rust-call` ABI. - let inputs_temp; - let inputs = if abi == Abi::RustCall { - inputs_temp = untuple_arguments(cx, &sig.inputs); - &inputs_temp - } else { - &sig.inputs - }; - - // Arg 0: Output pointer. - // (if the output type is non-immediate) - let lloutputtype = match sig.output { - ty::FnConverging(output) => { - let use_out_pointer = return_uses_outptr(cx, output); - let lloutputtype = arg_type_of(cx, output); - // Use the output as the actual return value if it's immediate. - if use_out_pointer { - atys.push(lloutputtype.ptr_to()); - Type::void(cx) - } else if return_type_is_void(cx, output) { - Type::void(cx) - } else { - lloutputtype - } - } - ty::FnDiverging => Type::void(cx) - }; - - // Arg 1: Environment - match llenvironment_type { - None => {} - Some(llenvironment_type) => atys.push(llenvironment_type), - } - - // ... then explicit args. - for input in inputs { - let arg_ty = type_of_explicit_arg(cx, input); - - if type_is_fat_ptr(cx.tcx(), input) { - atys.extend(arg_ty.field_types()); - } else { - atys.push(arg_ty); - } - } - - Type::func(&atys[..], &lloutputtype) -} - // A "sizing type" is an LLVM type, the size and alignment of which are // guaranteed to be equivalent to what you would get out of `type_of()`. It's // useful because: @@ -171,7 +56,7 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ let llsizingty = match t.sty { _ if !type_is_sized(cx.tcx(), t) => { - Type::struct_(cx, &[Type::i8p(cx), Type::i8p(cx)], false) + Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, t)], false) } ty::TyBool => Type::bool(cx), @@ -186,7 +71,7 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ if type_is_sized(cx.tcx(), ty) { Type::i8p(cx) } else { - Type::struct_(cx, &[Type::i8p(cx), Type::i8p(cx)], false) + Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, ty)], false) } } @@ -234,32 +119,27 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ ty::TySlice(_) | ty::TyTrait(..) | ty::TyStr => unreachable!() }; - debug!("--> mapped t={:?} to llsizingty={}", - t, - cx.tn().type_to_string(llsizingty)); + debug!("--> mapped t={:?} to llsizingty={:?}", t, llsizingty); cx.llsizingtypes().borrow_mut().insert(t, llsizingty); llsizingty } -pub fn foreign_arg_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { - if t.is_bool() { - Type::i1(cx) - } else { - type_of(cx, t) +fn unsized_info_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { + let unsized_part = ccx.tcx().struct_tail(ty); + match unsized_part.sty { + ty::TyStr | ty::TyArray(..) | ty::TySlice(_) => { + Type::uint_from_ty(ccx, ast::UintTy::Us) + } + ty::TyTrait(_) => Type::vtable_ptr(ccx), + _ => unreachable!("Unexpected tail in unsized_info_ty: {:?} for ty={:?}", + unsized_part, ty) } } -pub fn arg_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { +pub fn immediate_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { if t.is_bool() { Type::i1(cx) - } else if type_is_immediate(cx, t) && type_of(cx, t).is_aggregate() { - // We want to pass small aggregates as immediate values, but using an aggregate LLVM type - // for this leads to bad optimizations, so its arg type is an appropriately sized integer - match machine::llsize_of_alloc(cx, sizing_type_of(cx, t)) { - 0 => type_of(cx, t), - n => Type::ix(cx, n * 8), - } } else { type_of(cx, t) } @@ -314,12 +194,7 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> if t != t_norm { let llty = in_memory_type_of(cx, t_norm); - debug!("--> normalized {:?} {:?} to {:?} {:?} llty={}", - t, - t, - t_norm, - t_norm, - cx.tn().type_to_string(llty)); + debug!("--> normalized {:?} to {:?} llty={:?}", t, t_norm, llty); cx.lltypes().borrow_mut().insert(t, llty); return llty; } @@ -361,16 +236,7 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> cx.tn().find_type("str_slice").unwrap() } else { let ptr_ty = in_memory_type_of(cx, ty).ptr_to(); - let unsized_part = cx.tcx().struct_tail(ty); - let info_ty = match unsized_part.sty { - ty::TyStr | ty::TyArray(..) | ty::TySlice(_) => { - Type::uint_from_ty(cx, ast::UintTy::Us) - } - ty::TyTrait(_) => Type::vtable_ptr(cx), - _ => panic!("Unexpected type returned from \ - struct_tail: {:?} for ty={:?}", - unsized_part, ty) - }; + let info_ty = unsized_info_ty(cx, ty); Type::struct_(cx, &[ptr_ty, info_ty], false) } } else { @@ -398,13 +264,9 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ty::TyFnDef(..) => Type::nil(cx), ty::TyFnPtr(f) => { - if f.abi == Abi::Rust || f.abi == Abi::RustCall { - let sig = cx.tcx().erase_late_bound_regions(&f.sig); - let sig = infer::normalize_associated_type(cx.tcx(), &sig); - type_of_rust_fn(cx, None, &sig, f.abi).ptr_to() - } else { - foreign::lltype_for_foreign_fn(cx, t).ptr_to() - } + let sig = cx.tcx().erase_late_bound_regions(&f.sig); + let sig = infer::normalize_associated_type(cx.tcx(), &sig); + FnType::new(cx, f.abi, &sig, &[]).llvm_type(cx).ptr_to() } ty::TyTuple(ref tys) if tys.is_empty() => Type::nil(cx), ty::TyTuple(..) => { @@ -440,9 +302,7 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ty::TyError => cx.sess().bug("type_of with TyError"), }; - debug!("--> mapped t={:?} to llty={}", - t, - cx.tn().type_to_string(llty)); + debug!("--> mapped t={:?} to llty={:?}", t, llty); cx.lltypes().borrow_mut().insert(t, llty); diff --git a/src/librustc_trans/trans/value.rs b/src/librustc_trans/trans/value.rs index bc71278c15743..a33b7d62d2dce 100644 --- a/src/librustc_trans/trans/value.rs +++ b/src/librustc_trans/trans/value.rs @@ -12,11 +12,22 @@ use llvm; use llvm::{UseRef, ValueRef}; use trans::basic_block::BasicBlock; use trans::common::Block; + +use std::fmt; + use libc::c_uint; -#[derive(Copy, Clone)] +#[derive(Copy, Clone, PartialEq)] pub struct Value(pub ValueRef); +impl fmt::Debug for Value { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(&llvm::build_string(|s| unsafe { + llvm::LLVMWriteValueToString(self.0, s); + }).expect("nun-UTF8 value description from LLVM")) + } +} + macro_rules! opt_val { ($e:expr) => ( unsafe { match $e { diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index 0d5e25efd68c9..903fc458d8185 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -3416,12 +3416,12 @@ fn check_expr_with_expectation_and_lvalue_pref<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, fcx.add_wf_bounds(&item_substs.substs, expr); }); } - hir::ExprInlineAsm(ref ia) => { - for &(_, ref input) in &ia.inputs { - check_expr(fcx, &input); + hir::ExprInlineAsm(_, ref outputs, ref inputs) => { + for output in outputs { + check_expr(fcx, output); } - for out in &ia.outputs { - check_expr(fcx, &out.expr); + for input in inputs { + check_expr(fcx, input); } fcx.write_nil(id); } diff --git a/src/librustc_typeck/collect.rs b/src/librustc_typeck/collect.rs index 0f88640b62951..258c7af1316eb 100644 --- a/src/librustc_typeck/collect.rs +++ b/src/librustc_typeck/collect.rs @@ -2155,7 +2155,7 @@ fn compute_type_scheme_of_foreign_fn_decl<'a, 'tcx>( let input_tys = decl.inputs .iter() .map(|a| ty_of_arg(&ccx.icx(ast_generics), &rb, a, None)) - .collect(); + .collect::>(); let output = match decl.output { hir::Return(ref ty) => @@ -2166,6 +2166,29 @@ fn compute_type_scheme_of_foreign_fn_decl<'a, 'tcx>( ty::FnDiverging }; + // feature gate SIMD types in FFI, since I (huonw) am not sure the + // ABIs are handled at all correctly. + if abi != abi::Abi::RustIntrinsic && abi != abi::Abi::PlatformIntrinsic + && !ccx.tcx.sess.features.borrow().simd_ffi { + let check = |ast_ty: &hir::Ty, ty: ty::Ty| { + if ty.is_simd() { + ccx.tcx.sess.struct_span_err(ast_ty.span, + &format!("use of SIMD type `{}` in FFI is highly experimental and \ + may result in invalid code", + pprust::ty_to_string(ast_ty))) + .fileline_help(ast_ty.span, + "add #![feature(simd_ffi)] to the crate attributes to enable") + .emit(); + } + }; + for (input, ty) in decl.inputs.iter().zip(&input_tys) { + check(&input.ty, ty) + } + if let hir::Return(ref ty) = decl.output { + check(&ty, output.unwrap()) + } + } + let substs = ccx.tcx.mk_substs(mk_item_substs(ccx, &ty_generics)); let t_fn = ccx.tcx.mk_fn_def(id, substs, ty::BareFnTy { abi: abi, diff --git a/src/librustdoc/clean/inline.rs b/src/librustdoc/clean/inline.rs index ef54be720376a..49091a6c2bcfc 100644 --- a/src/librustdoc/clean/inline.rs +++ b/src/librustdoc/clean/inline.rs @@ -336,7 +336,7 @@ pub fn build_impl(cx: &DocContext, let did = assoc_const.def_id; let type_scheme = tcx.lookup_item_type(did); let default = if assoc_const.has_value { - Some(const_eval::lookup_const_by_id(tcx, did, None, None) + Some(const_eval::lookup_const_by_id(tcx, did, None) .unwrap().0.span.to_src(cx)) } else { None @@ -479,7 +479,7 @@ fn build_const(cx: &DocContext, tcx: &TyCtxt, use rustc::middle::const_eval; use rustc_front::print::pprust; - let (expr, ty) = const_eval::lookup_const_by_id(tcx, did, None, None).unwrap_or_else(|| { + let (expr, ty) = const_eval::lookup_const_by_id(tcx, did, None).unwrap_or_else(|| { panic!("expected lookup_const_by_id to succeed for {:?}", did); }); debug!("converting constant expr {:?} to snippet", expr); diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index cd0e3a030bd31..85e48f85d3d90 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -251,6 +251,7 @@ #![feature(raw)] #![feature(repr_simd)] #![feature(reflect_marker)] +#![feature(rustc_attrs)] #![feature(shared)] #![feature(slice_bytes)] #![feature(slice_concat_ext)] diff --git a/src/libstd/num/f32.rs b/src/libstd/num/f32.rs index 3705302592432..e78d46b22e940 100644 --- a/src/libstd/num/f32.rs +++ b/src/libstd/num/f32.rs @@ -1371,6 +1371,7 @@ mod tests { } #[test] + #[rustc_no_mir] // FIXME #27840 MIR NAN ends up negative. fn test_integer_decode() { assert_eq!(3.14159265359f32.integer_decode(), (13176795, -22, 1)); assert_eq!((-8573.5918555f32).integer_decode(), (8779358, -10, -1)); diff --git a/src/libstd/num/f64.rs b/src/libstd/num/f64.rs index 446e22a20ad7d..cea5a9edd680b 100644 --- a/src/libstd/num/f64.rs +++ b/src/libstd/num/f64.rs @@ -1264,6 +1264,7 @@ mod tests { } #[test] + #[rustc_no_mir] // FIXME #27840 MIR NAN ends up negative. fn test_integer_decode() { assert_eq!(3.14159265359f64.integer_decode(), (7074237752028906, -51, 1)); assert_eq!((-8573.5918555f64).integer_decode(), (4713381968463931, -39, -1)); diff --git a/src/libsyntax/feature_gate.rs b/src/libsyntax/feature_gate.rs index fbaf28332c42c..a017e62d54631 100644 --- a/src/libsyntax/feature_gate.rs +++ b/src/libsyntax/feature_gate.rs @@ -350,10 +350,14 @@ pub const KNOWN_ATTRIBUTES: &'static [(&'static str, AttributeType, AttributeGat "the `#[rustc_move_fragments]` attribute \ is just used for rustc unit tests \ and will never be stable")), - ("rustc_mir", Normal, Gated("rustc_attrs", - "the `#[rustc_mir]` attribute \ - is just used for rustc unit tests \ - and will never be stable")), + ("rustc_mir", Whitelisted, Gated("rustc_attrs", + "the `#[rustc_mir]` attribute \ + is just used for rustc unit tests \ + and will never be stable")), + ("rustc_no_mir", Whitelisted, Gated("rustc_attrs", + "the `#[rustc_no_mir]` attribute \ + is just used to make tests pass \ + and will never be stable")), ("allow_internal_unstable", Normal, Gated("allow_internal_unstable", EXPLAIN_ALLOW_INTERNAL_UNSTABLE)), diff --git a/src/test/codegen/adjustments.rs b/src/test/codegen/adjustments.rs index a61fa84398e9a..20d0493943452 100644 --- a/src/test/codegen/adjustments.rs +++ b/src/test/codegen/adjustments.rs @@ -11,6 +11,7 @@ // compile-flags: -C no-prepopulate-passes #![crate_type = "lib"] +#![feature(rustc_attrs)] // Hack to get the correct size for the length part in slices // CHECK: @helper([[USIZE:i[0-9]+]]) @@ -20,6 +21,7 @@ fn helper(_: usize) { // CHECK-LABEL: @no_op_slice_adjustment #[no_mangle] +#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] { // We used to generate an extra alloca and memcpy for the block's trailing expression value, so // check that we copy directly to the return value slot diff --git a/src/test/codegen/coercions.rs b/src/test/codegen/coercions.rs index c8c9f5b407c42..74c7192259ac4 100644 --- a/src/test/codegen/coercions.rs +++ b/src/test/codegen/coercions.rs @@ -11,12 +11,14 @@ // compile-flags: -C no-prepopulate-passes #![crate_type = "lib"] +#![feature(rustc_attrs)] static X: i32 = 5; // CHECK-LABEL: @raw_ptr_to_raw_ptr_noop // CHECK-NOT: alloca #[no_mangle] +#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn raw_ptr_to_raw_ptr_noop() -> *const i32{ &X as *const i32 } @@ -24,6 +26,7 @@ pub fn raw_ptr_to_raw_ptr_noop() -> *const i32{ // CHECK-LABEL: @reference_to_raw_ptr_noop // CHECK-NOT: alloca #[no_mangle] +#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn reference_to_raw_ptr_noop() -> *const i32 { &X } diff --git a/src/test/codegen/consts.rs b/src/test/codegen/consts.rs index 6b4e626df924b..ea4c932d43549 100644 --- a/src/test/codegen/consts.rs +++ b/src/test/codegen/consts.rs @@ -11,6 +11,7 @@ // compile-flags: -C no-prepopulate-passes #![crate_type = "lib"] +#![feature(rustc_attrs)] // Below, these constants are defined as enum variants that by itself would // have a lower alignment than the enum type. Ensure that we mark them @@ -39,18 +40,21 @@ pub static STATIC: E = E::A(0); // CHECK-LABEL: @static_enum_const #[no_mangle] +#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn static_enum_const() -> E { STATIC } // CHECK-LABEL: @inline_enum_const #[no_mangle] +#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn inline_enum_const() -> E { E::A(0) } // CHECK-LABEL: @low_align_const #[no_mangle] +#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn low_align_const() -> E { // Check that low_align_const and high_align_const use the same constant // CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{[0-9]+}}, i8* {{.*}} [[LOW_HIGH:@const[0-9]+]] @@ -59,6 +63,7 @@ pub fn low_align_const() -> E { // CHECK-LABEL: @high_align_const #[no_mangle] +#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn high_align_const() -> E { // Check that low_align_const and high_align_const use the same constant // CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{[0-9]}}, i8* {{.*}} [[LOW_HIGH]] diff --git a/src/test/codegen/drop.rs b/src/test/codegen/drop.rs index 2ac8de6d80294..83dd6a3b00258 100644 --- a/src/test/codegen/drop.rs +++ b/src/test/codegen/drop.rs @@ -11,6 +11,7 @@ // compile-flags: -C no-prepopulate-passes #![crate_type = "lib"] +#![feature(rustc_attrs)] struct SomeUniqueName; @@ -24,6 +25,7 @@ pub fn possibly_unwinding() { // CHECK-LABEL: @droppy #[no_mangle] +#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn droppy() { // Check that there are exactly 6 drop calls. The cleanups for the unwinding should be reused, so // that's one new drop call per call to possibly_unwinding(), and finally 3 drop calls for the diff --git a/src/test/codegen/extern-functions.rs b/src/test/codegen/extern-functions.rs index ff9d54e67e478..7ee31070b2635 100644 --- a/src/test/codegen/extern-functions.rs +++ b/src/test/codegen/extern-functions.rs @@ -22,3 +22,8 @@ extern { #[unwind] fn unwinding_extern_fn(); } + +pub unsafe fn force_declare() { + extern_fn(); + unwinding_extern_fn(); +} diff --git a/src/test/codegen/refs.rs b/src/test/codegen/refs.rs index 08eec0045f78a..36c83412e4f0f 100644 --- a/src/test/codegen/refs.rs +++ b/src/test/codegen/refs.rs @@ -11,6 +11,7 @@ // compile-flags: -C no-prepopulate-passes #![crate_type = "lib"] +#![feature(rustc_attrs)] // Hack to get the correct size for the length part in slices // CHECK: @helper([[USIZE:i[0-9]+]]) @@ -20,6 +21,7 @@ fn helper(_: usize) { // CHECK-LABEL: @ref_dst #[no_mangle] +#[rustc_no_mir] // FIXME #27840 MIR has different codegen. pub fn ref_dst(s: &[u8]) { // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy // directly to the alloca for "x" diff --git a/src/test/codegen/stores.rs b/src/test/codegen/stores.rs index 5d2d47e1bf3f6..f849a6c9b18b8 100644 --- a/src/test/codegen/stores.rs +++ b/src/test/codegen/stores.rs @@ -11,6 +11,7 @@ // compile-flags: -C no-prepopulate-passes #![crate_type = "lib"] +#![feature(rustc_attrs)] pub struct Bytes { a: u8, @@ -23,25 +24,20 @@ pub struct Bytes { // The array is stored as i32, but its alignment is lower, go with 1 byte to avoid target // dependent alignment #[no_mangle] -pub fn small_array_alignment(x: &mut [i8; 4]) { -// CHECK: [[VAR:%[0-9]+]] = load {{(\[4 x i8\]\*, )?}}[4 x i8]** %x -// CHECK: [[VAR2:%[0-9]+]] = bitcast [4 x i8]* [[VAR]] to i32* -// CHECK: store i32 %{{.*}}, i32* [[VAR2]], align 1 - *x = [0; 4]; +#[rustc_no_mir] // FIXME #27840 MIR has different codegen. +pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) { +// CHECK: [[VAR:%[0-9]+]] = bitcast [4 x i8]* %y to i32* +// CHECK: store i32 %{{.*}}, i32* [[VAR]], align 1 + *x = y; } // CHECK-LABEL: small_struct_alignment // The struct is stored as i32, but its alignment is lower, go with 1 byte to avoid target // dependent alignment #[no_mangle] -pub fn small_struct_alignment(x: &mut Bytes) { -// CHECK: [[VAR:%[0-9]+]] = load {{(%Bytes\*, )?}}%Bytes** %x -// CHECK: [[VAR2:%[0-9]+]] = bitcast %Bytes* [[VAR]] to i32* -// CHECK: store i32 %{{.*}}, i32* [[VAR2]], align 1 - *x = Bytes { - a: 0, - b: 0, - c: 0, - d: 0, - }; +#[rustc_no_mir] // FIXME #27840 MIR has different codegen. +pub fn small_struct_alignment(x: &mut Bytes, y: Bytes) { +// CHECK: [[VAR:%[0-9]+]] = bitcast %Bytes* %y to i32* +// CHECK: store i32 %{{.*}}, i32* [[VAR]], align 1 + *x = y; } diff --git a/src/test/compile-fail/const-err.rs b/src/test/compile-fail/const-err.rs index 4d156a49192fd..882e4cb2d47e2 100644 --- a/src/test/compile-fail/const-err.rs +++ b/src/test/compile-fail/const-err.rs @@ -8,8 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#[allow(exceeding_bitshifts)] -#[deny(const_err)] +#![feature(rustc_attrs)] +#![allow(exceeding_bitshifts)] +#![deny(const_err)] fn black_box(_: T) { unimplemented!() @@ -18,6 +19,7 @@ fn black_box(_: T) { const BLA: u8 = 200u8 + 200u8; //~^ ERROR attempted to add with overflow +#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn main() { let a = -std::i8::MIN; //~^ WARN attempted to negate with overflow diff --git a/src/test/compile-fail/const-eval-overflow.rs b/src/test/compile-fail/const-eval-overflow.rs index 3dfcb5bb29a24..96013551ef492 100644 --- a/src/test/compile-fail/const-eval-overflow.rs +++ b/src/test/compile-fail/const-eval-overflow.rs @@ -8,6 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![feature(rustc_attrs)] #![allow(unused_imports)] // Note: the relevant lint pass here runs before some of the constant @@ -103,6 +104,7 @@ const VALS_U64: (u64, u64, u64, u64) = //~^ ERROR attempted to multiply with overflow ); +#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn main() { foo(VALS_I8); foo(VALS_I16); diff --git a/src/test/compile-fail/dupe-symbols-8.rs b/src/test/compile-fail/dupe-symbols-8.rs deleted file mode 100644 index 3c0e545e19335..0000000000000 --- a/src/test/compile-fail/dupe-symbols-8.rs +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -// -// error-pattern:already defined - - -#![allow(warnings)] - -fn main() { - { - extern fn fail() {} - } - { - extern fn fail() {} - } -} diff --git a/src/test/compile-fail/fn-item-type.rs b/src/test/compile-fail/fn-item-type.rs index 2fbd1ddb1e6b9..949000dfed6ac 100644 --- a/src/test/compile-fail/fn-item-type.rs +++ b/src/test/compile-fail/fn-item-type.rs @@ -42,6 +42,6 @@ fn main() { // Make sure we distinguish between trait methods correctly. eq(::foo, ::foo); //~^ ERROR mismatched types - //~| expected `fn() {Foo::foo}` - //~| found `fn() {Foo::foo}` + //~| expected `fn() {::foo}` + //~| found `fn() {::foo}` } diff --git a/src/test/compile-fail/infinite-instantiation.rs b/src/test/compile-fail/infinite-instantiation.rs index 28806b6e2ab8c..d293a1816d89e 100644 --- a/src/test/compile-fail/infinite-instantiation.rs +++ b/src/test/compile-fail/infinite-instantiation.rs @@ -31,7 +31,7 @@ impl ToOpt for Option { } fn function(counter: usize, t: T) { -//~^ ERROR reached the recursion limit during monomorphization +//~^ ERROR reached the recursion limit while instantiating `function:: 0 { function(counter - 1, t.to_option()); // FIXME(#4287) Error message should be here. It should be diff --git a/src/test/compile-fail/intrinsic-return-address.rs b/src/test/compile-fail/intrinsic-return-address.rs index b83f0f73436cf..906056896be1e 100644 --- a/src/test/compile-fail/intrinsic-return-address.rs +++ b/src/test/compile-fail/intrinsic-return-address.rs @@ -15,15 +15,10 @@ extern "rust-intrinsic" { fn return_address() -> *const u8; } -unsafe fn f() { - let _ = return_address(); - //~^ ERROR invalid use of `return_address` intrinsic: function does not use out pointer -} +unsafe fn f() { let _ = return_address(); } +//~^ ERROR invalid use of `return_address` intrinsic: function does not use out pointer -unsafe fn g() -> isize { - let _ = return_address(); - //~^ ERROR invalid use of `return_address` intrinsic: function does not use out pointer - 0 -} +unsafe fn g() -> isize { let _ = return_address(); 0 } +//~^ ERROR invalid use of `return_address` intrinsic: function does not use out pointer fn main() {} diff --git a/src/test/compile-fail/issue-22638.rs b/src/test/compile-fail/issue-22638.rs index e8c17ca0b362b..0c8c2311dcaa5 100644 --- a/src/test/compile-fail/issue-22638.rs +++ b/src/test/compile-fail/issue-22638.rs @@ -17,7 +17,7 @@ struct A (B); impl A { pub fn matches(&self, f: &F) { - //~^ ERROR reached the recursion limit during monomorphization + //~^ ERROR reached the recursion limit while instantiating `A::matches::<[closure let &A(ref term) = self; term.matches(f); } diff --git a/src/test/compile-fail/issue-8727.rs b/src/test/compile-fail/issue-8727.rs index 72da6dcaa6c45..525ec85576284 100644 --- a/src/test/compile-fail/issue-8727.rs +++ b/src/test/compile-fail/issue-8727.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// error-pattern:reached the recursion limit during monomorphization +// error-pattern:reached the recursion limit while instantiating `generic::(x: T, y: T) -> T; } +#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls. fn main() { let x = i32x4(0, 0, 0, 0); let y = u32x4(0, 0, 0, 0); diff --git a/src/test/compile-fail/simd-intrinsic-generic-cast.rs b/src/test/compile-fail/simd-intrinsic-generic-cast.rs index 4999b790b130a..cb3bed7209e2d 100644 --- a/src/test/compile-fail/simd-intrinsic-generic-cast.rs +++ b/src/test/compile-fail/simd-intrinsic-generic-cast.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(repr_simd, platform_intrinsics)] +#![feature(repr_simd, platform_intrinsics, rustc_attrs)] #[repr(simd)] #[derive(Copy, Clone)] @@ -35,6 +35,7 @@ extern "platform-intrinsic" { fn simd_cast(x: T) -> U; } +#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls. fn main() { let x = i32x4(0, 0, 0, 0); diff --git a/src/test/compile-fail/simd-intrinsic-generic-comparison.rs b/src/test/compile-fail/simd-intrinsic-generic-comparison.rs index 617b03a87117b..0e7b2bd490470 100644 --- a/src/test/compile-fail/simd-intrinsic-generic-comparison.rs +++ b/src/test/compile-fail/simd-intrinsic-generic-comparison.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(repr_simd, platform_intrinsics)] +#![feature(repr_simd, platform_intrinsics, rustc_attrs)] #[repr(simd)] #[derive(Copy, Clone)] @@ -29,6 +29,7 @@ extern "platform-intrinsic" { fn simd_ge(x: T, y: T) -> U; } +#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls. fn main() { let x = i32x4(0, 0, 0, 0); diff --git a/src/test/compile-fail/simd-intrinsic-generic-elements.rs b/src/test/compile-fail/simd-intrinsic-generic-elements.rs index b0198c411d567..1f4cc72ffe717 100644 --- a/src/test/compile-fail/simd-intrinsic-generic-elements.rs +++ b/src/test/compile-fail/simd-intrinsic-generic-elements.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(repr_simd, platform_intrinsics)] +#![feature(repr_simd, platform_intrinsics, rustc_attrs)] #[repr(simd)] #[derive(Copy, Clone)] @@ -56,6 +56,7 @@ extern "platform-intrinsic" { fn simd_shuffle8(x: T, y: T, idx: [u32; 8]) -> U; } +#[rustc_no_mir] // FIXME #27840 MIR doesn't provide precise spans for calls. fn main() { let x = i32x4(0, 0, 0, 0); diff --git a/src/test/pretty/issue-4264.pp b/src/test/pretty/issue-4264.pp index 0347631aeb336..fedb68a26afc9 100644 --- a/src/test/pretty/issue-4264.pp +++ b/src/test/pretty/issue-4264.pp @@ -41,37 +41,37 @@ ((::std::fmt::format as fn(core::fmt::Arguments<'_>) -> collections::string::String {collections::fmt::format})(((::std::fmt::Arguments::new_v1 as - fn(&[&str], &[core::fmt::ArgumentV1<'_>]) -> core::fmt::Arguments<'_> {core::fmt::Arguments<'a>::new_v1})(({ - static __STATIC_FMTSTR: - &'static [&'static str] - = - (&([("test" + fn(&[&str], &[core::fmt::ArgumentV1<'_>]) -> core::fmt::Arguments<'_> {core::fmt::Arguments<'a><'_>::new_v1})(({ + static __STATIC_FMTSTR: + &'static [&'static str] + = + (&([("test" + as + &'static str)] + as + [&'static str; 1]) as - &'static str)] - as - [&'static str; 1]) - as - &'static [&'static str; 1]); - (__STATIC_FMTSTR - as - &'static [&'static str]) - } - as - &[&str]), - (&(match (() + &'static [&'static str; 1]); + (__STATIC_FMTSTR as - ()) - { - () - => - ([] - as - [core::fmt::ArgumentV1<'_>; 0]), - } - as - [core::fmt::ArgumentV1<'_>; 0]) - as - &[core::fmt::ArgumentV1<'_>; 0])) + &'static [&'static str]) + } + as + &[&str]), + (&(match (() + as + ()) + { + () + => + ([] + as + [core::fmt::ArgumentV1<'_>; 0]), + } + as + [core::fmt::ArgumentV1<'_>; 0]) + as + &[core::fmt::ArgumentV1<'_>; 0])) as core::fmt::Arguments<'_>)) as collections::string::String); diff --git a/src/test/run-fail/divide-by-zero.rs b/src/test/run-fail/divide-by-zero.rs index de69b7b9fa670..d3817b25d6100 100644 --- a/src/test/run-fail/divide-by-zero.rs +++ b/src/test/run-fail/divide-by-zero.rs @@ -8,7 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-pretty : (#23623) problems when ending with // comments + // error-pattern:attempted to divide by zero + +#![feature(rustc_attrs)] +#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn main() { let y = 0; let _z = 1 / y; diff --git a/src/test/run-fail/mod-zero.rs b/src/test/run-fail/mod-zero.rs index 76d4de7ecb03c..7a151c8c572f6 100644 --- a/src/test/run-fail/mod-zero.rs +++ b/src/test/run-fail/mod-zero.rs @@ -8,7 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-pretty : (#23623) problems when ending with // comments + // error-pattern:attempted remainder with a divisor of zero + +#![feature(rustc_attrs)] +#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn main() { let y = 0; let _z = 1 % y; diff --git a/src/test/run-fail/overflowing-add.rs b/src/test/run-fail/overflowing-add.rs index 6c6a41fa6f2e3..c989cc594536b 100644 --- a/src/test/run-fail/overflowing-add.rs +++ b/src/test/run-fail/overflowing-add.rs @@ -8,10 +8,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-pretty : (#23623) problems when ending with // comments + // error-pattern:thread '
' panicked at 'arithmetic operation overflowed' // compile-flags: -C debug-assertions +#![feature(rustc_attrs)] +#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn main() { let _x = 200u8 + 200u8 + 200u8; } diff --git a/src/test/run-fail/overflowing-lsh-1.rs b/src/test/run-fail/overflowing-lsh-1.rs index 62935bacce871..a27210112982a 100644 --- a/src/test/run-fail/overflowing-lsh-1.rs +++ b/src/test/run-fail/overflowing-lsh-1.rs @@ -8,11 +8,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-pretty : (#23623) problems when ending with // comments + // error-pattern:thread '
' panicked at 'shift operation overflowed' // compile-flags: -C debug-assertions #![warn(exceeding_bitshifts)] +#![feature(rustc_attrs)] +#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn main() { let _x = 1_i32 << 32; } diff --git a/src/test/run-fail/overflowing-lsh-2.rs b/src/test/run-fail/overflowing-lsh-2.rs index f6e6cb105c51b..fe0bcc5b98545 100644 --- a/src/test/run-fail/overflowing-lsh-2.rs +++ b/src/test/run-fail/overflowing-lsh-2.rs @@ -8,11 +8,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-pretty : (#23623) problems when ending with // comments + // error-pattern:thread '
' panicked at 'shift operation overflowed' // compile-flags: -C debug-assertions #![warn(exceeding_bitshifts)] +#![feature(rustc_attrs)] +#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn main() { let _x = 1 << -1; } diff --git a/src/test/run-fail/overflowing-lsh-3.rs b/src/test/run-fail/overflowing-lsh-3.rs index a70f31954c6ed..aac220d32d9ce 100644 --- a/src/test/run-fail/overflowing-lsh-3.rs +++ b/src/test/run-fail/overflowing-lsh-3.rs @@ -8,11 +8,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-pretty : (#23623) problems when ending with // comments + // error-pattern:thread '
' panicked at 'shift operation overflowed' // compile-flags: -C debug-assertions #![warn(exceeding_bitshifts)] +#![feature(rustc_attrs)] +#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn main() { let _x = 1_u64 << 64; } diff --git a/src/test/run-fail/overflowing-lsh-4.rs b/src/test/run-fail/overflowing-lsh-4.rs index 571feaeb94345..7e8b266da49be 100644 --- a/src/test/run-fail/overflowing-lsh-4.rs +++ b/src/test/run-fail/overflowing-lsh-4.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-pretty : (#23623) problems when ending with // comments + // error-pattern:thread '
' panicked at 'shift operation overflowed' // compile-flags: -C debug-assertions @@ -16,6 +18,8 @@ #![warn(exceeding_bitshifts)] +#![feature(rustc_attrs)] +#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn main() { // this signals overflow when checking is on let x = 1_i8 << 17; diff --git a/src/test/run-fail/overflowing-mul.rs b/src/test/run-fail/overflowing-mul.rs index a413a6f0abfa2..8cba700bbf9a3 100644 --- a/src/test/run-fail/overflowing-mul.rs +++ b/src/test/run-fail/overflowing-mul.rs @@ -8,9 +8,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-pretty : (#23623) problems when ending with // comments + // error-pattern:thread '
' panicked at 'arithmetic operation overflowed' // compile-flags: -C debug-assertions +#![feature(rustc_attrs)] +#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn main() { let x = 200u8 * 4; } diff --git a/src/test/run-fail/overflowing-neg.rs b/src/test/run-fail/overflowing-neg.rs index 7891d1ce9bed4..2d9d746bef324 100644 --- a/src/test/run-fail/overflowing-neg.rs +++ b/src/test/run-fail/overflowing-neg.rs @@ -8,9 +8,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-pretty : (#23623) problems when ending with // comments + // error-pattern:thread '
' panicked at 'attempted to negate with overflow' // compile-flags: -C debug-assertions +#![feature(rustc_attrs)] +#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn main() { let _x = -std::i8::MIN; } diff --git a/src/test/run-fail/overflowing-rsh-1.rs b/src/test/run-fail/overflowing-rsh-1.rs index b58eaf7f836c2..63c808dc80a4e 100644 --- a/src/test/run-fail/overflowing-rsh-1.rs +++ b/src/test/run-fail/overflowing-rsh-1.rs @@ -8,11 +8,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-pretty : (#23623) problems when ending with // comments + // error-pattern:thread '
' panicked at 'shift operation overflowed' // compile-flags: -C debug-assertions #![warn(exceeding_bitshifts)] +#![feature(rustc_attrs)] +#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn main() { let _x = -1_i32 >> 32; } diff --git a/src/test/run-fail/overflowing-rsh-2.rs b/src/test/run-fail/overflowing-rsh-2.rs index 40b468a6ad419..8b89e57c85bb5 100644 --- a/src/test/run-fail/overflowing-rsh-2.rs +++ b/src/test/run-fail/overflowing-rsh-2.rs @@ -8,11 +8,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-pretty : (#23623) problems when ending with // comments + // error-pattern:thread '
' panicked at 'shift operation overflowed' // compile-flags: -C debug-assertions #![warn(exceeding_bitshifts)] +#![feature(rustc_attrs)] +#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn main() { let _x = -1_i32 >> -1; } diff --git a/src/test/run-fail/overflowing-rsh-3.rs b/src/test/run-fail/overflowing-rsh-3.rs index afe6a908cb5f1..8874587064c35 100644 --- a/src/test/run-fail/overflowing-rsh-3.rs +++ b/src/test/run-fail/overflowing-rsh-3.rs @@ -8,11 +8,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-pretty : (#23623) problems when ending with // comments + // error-pattern:thread '
' panicked at 'shift operation overflowed' // compile-flags: -C debug-assertions #![warn(exceeding_bitshifts)] +#![feature(rustc_attrs)] +#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn main() { let _x = -1_i64 >> 64; } diff --git a/src/test/run-fail/overflowing-rsh-4.rs b/src/test/run-fail/overflowing-rsh-4.rs index 585186575f6ed..d74fd8a6b8e41 100644 --- a/src/test/run-fail/overflowing-rsh-4.rs +++ b/src/test/run-fail/overflowing-rsh-4.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-pretty : (#23623) problems when ending with // comments + // error-pattern:thread '
' panicked at 'shift operation overflowed' // compile-flags: -C debug-assertions @@ -16,6 +18,8 @@ #![warn(exceeding_bitshifts)] +#![feature(rustc_attrs)] +#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn main() { // this signals overflow when checking is on let x = 2_i8 >> 17; diff --git a/src/test/run-fail/overflowing-rsh-5.rs b/src/test/run-fail/overflowing-rsh-5.rs index 34a7ff833bbbd..249b952a5dca2 100644 --- a/src/test/run-fail/overflowing-rsh-5.rs +++ b/src/test/run-fail/overflowing-rsh-5.rs @@ -8,11 +8,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-pretty : (#23623) problems when ending with // comments + // error-pattern:thread '
' panicked at 'shift operation overflowed' // compile-flags: -C debug-assertions #![warn(exceeding_bitshifts)] +#![feature(rustc_attrs)] +#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn main() { let _n = 1i64 >> [64][0]; } diff --git a/src/test/run-fail/overflowing-rsh-6.rs b/src/test/run-fail/overflowing-rsh-6.rs index b6f4348b184de..1227f35444a60 100644 --- a/src/test/run-fail/overflowing-rsh-6.rs +++ b/src/test/run-fail/overflowing-rsh-6.rs @@ -8,12 +8,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-pretty : (#23623) problems when ending with // comments + // error-pattern:thread '
' panicked at 'shift operation overflowed' // compile-flags: -C debug-assertions #![warn(exceeding_bitshifts)] #![feature(const_indexing)] +#![feature(rustc_attrs)] +#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn main() { let _n = 1i64 >> [64][0]; } diff --git a/src/test/run-fail/overflowing-sub.rs b/src/test/run-fail/overflowing-sub.rs index ece4d37c36eb3..ce243a50e0b66 100644 --- a/src/test/run-fail/overflowing-sub.rs +++ b/src/test/run-fail/overflowing-sub.rs @@ -8,9 +8,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-pretty : (#23623) problems when ending with // comments + // error-pattern:thread '
' panicked at 'arithmetic operation overflowed' // compile-flags: -C debug-assertions +#![feature(rustc_attrs)] +#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn main() { let _x = 42u8 - (42u8 + 1); } diff --git a/src/test/run-make/debug-assertions/debug.rs b/src/test/run-make/debug-assertions/debug.rs index a0ccc75afd05b..fb54161c2c127 100644 --- a/src/test/run-make/debug-assertions/debug.rs +++ b/src/test/run-make/debug-assertions/debug.rs @@ -8,6 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![feature(rustc_attrs)] #![deny(warnings)] use std::env; @@ -36,6 +37,7 @@ fn debug_assert() { } fn overflow() { + #[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn add(a: u8, b: u8) -> u8 { a + b } add(200u8, 200u8); diff --git a/src/test/run-pass-valgrind/cast-enum-with-dtor.rs b/src/test/run-pass-valgrind/cast-enum-with-dtor.rs index 247e82c2f0924..0de949471c684 100644 --- a/src/test/run-pass-valgrind/cast-enum-with-dtor.rs +++ b/src/test/run-pass-valgrind/cast-enum-with-dtor.rs @@ -8,10 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-pretty : (#23623) problems when ending with // comments + // no-prefer-dynamic #![allow(dead_code)] -#![feature(const_fn)] +#![feature(const_fn, rustc_attrs)] // check dtor calling order when casting enums. @@ -36,6 +38,7 @@ impl Drop for E { } } +#[rustc_no_mir] // FIXME #27840 MIR miscompiles this. fn main() { assert_eq!(FLAG.load(Ordering::SeqCst), 0); { diff --git a/src/test/run-pass/backtrace-debuginfo-aux.rs b/src/test/run-pass/backtrace-debuginfo-aux.rs index 48df600214ad0..b80c938fed5d4 100644 --- a/src/test/run-pass/backtrace-debuginfo-aux.rs +++ b/src/test/run-pass/backtrace-debuginfo-aux.rs @@ -11,6 +11,7 @@ // ignore-test: not a test, used by backtrace-debuginfo.rs to test file!() #[inline(never)] +#[rustc_no_mir] // FIXME #31005 MIR missing debuginfo currently. pub fn callback(f: F) where F: FnOnce((&'static str, u32)) { f((file!(), line!())) } @@ -20,6 +21,7 @@ pub fn callback(f: F) where F: FnOnce((&'static str, u32)) { // this case. #[cfg_attr(not(target_env = "msvc"), inline(always))] #[cfg_attr(target_env = "msvc", inline(never))] +#[rustc_no_mir] // FIXME #31005 MIR missing debuginfo currently. pub fn callback_inlined(f: F) where F: FnOnce((&'static str, u32)) { f((file!(), line!())) } diff --git a/src/test/run-pass/backtrace-debuginfo.rs b/src/test/run-pass/backtrace-debuginfo.rs index 8b2b26948824f..fd1c01723395d 100644 --- a/src/test/run-pass/backtrace-debuginfo.rs +++ b/src/test/run-pass/backtrace-debuginfo.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![feature(rustc_attrs)] + // We disable tail merging here because it can't preserve debuginfo and thus // potentially breaks the backtraces. Also, subtle changes can decide whether // tail merging suceeds, so the test might work today but fail tomorrow due to a @@ -72,6 +74,7 @@ fn dump_filelines(filelines: &[Pos]) { } #[inline(never)] +#[rustc_no_mir] // FIXME #31005 MIR missing debuginfo currently. fn inner(counter: &mut i32, main_pos: Pos, outer_pos: Pos) { check!(counter; main_pos, outer_pos); check!(counter; main_pos, outer_pos); @@ -88,6 +91,7 @@ fn inner(counter: &mut i32, main_pos: Pos, outer_pos: Pos) { // this case. #[cfg_attr(not(target_env = "msvc"), inline(always))] #[cfg_attr(target_env = "msvc", inline(never))] +#[rustc_no_mir] // FIXME #31005 MIR missing debuginfo currently. fn inner_inlined(counter: &mut i32, main_pos: Pos, outer_pos: Pos) { check!(counter; main_pos, outer_pos); check!(counter; main_pos, outer_pos); @@ -113,6 +117,7 @@ fn inner_inlined(counter: &mut i32, main_pos: Pos, outer_pos: Pos) { } #[inline(never)] +#[rustc_no_mir] // FIXME #31005 MIR missing debuginfo currently. fn outer(mut counter: i32, main_pos: Pos) { inner(&mut counter, main_pos, pos!()); inner_inlined(&mut counter, main_pos, pos!()); @@ -157,6 +162,7 @@ fn run_test(me: &str) { } #[inline(never)] +#[rustc_no_mir] // FIXME #31005 MIR missing debuginfo currently. fn main() { let args: Vec = env::args().collect(); if args.len() >= 2 { diff --git a/src/test/run-pass/const-str-ptr.rs b/src/test/run-pass/const-str-ptr.rs index 4c5152ff90ffd..1736ab5bb82c8 100644 --- a/src/test/run-pass/const-str-ptr.rs +++ b/src/test/run-pass/const-str-ptr.rs @@ -8,6 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![feature(rustc_attrs)] + +// ignore-pretty : (#23623) problems when ending with // comments use std::{str, string}; @@ -15,6 +18,7 @@ const A: [u8; 2] = ['h' as u8, 'i' as u8]; const B: &'static [u8; 2] = &A; const C: *const u8 = B as *const u8; +#[rustc_no_mir] // FIXME #27840 MIR can't do rvalue promotion yet. pub fn main() { unsafe { let foo = &A as *const u8; diff --git a/src/test/run-pass/issue-23338-ensure-param-drop-order.rs b/src/test/run-pass/issue-23338-ensure-param-drop-order.rs index 507d482febfd9..73c52a0843cfb 100644 --- a/src/test/run-pass/issue-23338-ensure-param-drop-order.rs +++ b/src/test/run-pass/issue-23338-ensure-param-drop-order.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![feature(rustc_attrs)] + // ignore-pretty : (#23623) problems when ending with // comments // This test is ensuring that parameters are indeed dropped after @@ -64,6 +66,7 @@ fn test<'a>(log: d::Log<'a>) { d::println(&format!("result {}", result)); } +#[rustc_no_mir] // FIXME #29855 MIR doesn't handle all drops correctly. fn foo<'a>(da0: D<'a>, de1: D<'a>) -> D<'a> { d::println("entered foo"); let de2 = de1.incr(); // creates D(de_2, 2) diff --git a/src/test/run-pass/issue-8460.rs b/src/test/run-pass/issue-8460.rs index 8ec9f8aff8ec7..7589bce31f480 100644 --- a/src/test/run-pass/issue-8460.rs +++ b/src/test/run-pass/issue-8460.rs @@ -9,31 +9,44 @@ // except according to those terms. // ignore-emscripten no threads support +// ignore-pretty : (#23623) problems when ending with // comments -#![feature(zero_one)] +#![feature(rustc_attrs, stmt_expr_attributes, zero_one)] use std::num::Zero; use std::thread; +macro_rules! check { + ($($e:expr),*) => { + $(assert!(thread::spawn({ + #[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. + move|| { $e; } + }).join().is_err());)* + } +} + +#[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD. fn main() { - assert!(thread::spawn(move|| { isize::min_value() / -1; }).join().is_err()); - assert!(thread::spawn(move|| { i8::min_value() / -1; }).join().is_err()); - assert!(thread::spawn(move|| { i16::min_value() / -1; }).join().is_err()); - assert!(thread::spawn(move|| { i32::min_value() / -1; }).join().is_err()); - assert!(thread::spawn(move|| { i64::min_value() / -1; }).join().is_err()); - assert!(thread::spawn(move|| { 1isize / isize::zero(); }).join().is_err()); - assert!(thread::spawn(move|| { 1i8 / i8::zero(); }).join().is_err()); - assert!(thread::spawn(move|| { 1i16 / i16::zero(); }).join().is_err()); - assert!(thread::spawn(move|| { 1i32 / i32::zero(); }).join().is_err()); - assert!(thread::spawn(move|| { 1i64 / i64::zero(); }).join().is_err()); - assert!(thread::spawn(move|| { isize::min_value() % -1; }).join().is_err()); - assert!(thread::spawn(move|| { i8::min_value() % -1; }).join().is_err()); - assert!(thread::spawn(move|| { i16::min_value() % -1; }).join().is_err()); - assert!(thread::spawn(move|| { i32::min_value() % -1; }).join().is_err()); - assert!(thread::spawn(move|| { i64::min_value() % -1; }).join().is_err()); - assert!(thread::spawn(move|| { 1isize % isize::zero(); }).join().is_err()); - assert!(thread::spawn(move|| { 1i8 % i8::zero(); }).join().is_err()); - assert!(thread::spawn(move|| { 1i16 % i16::zero(); }).join().is_err()); - assert!(thread::spawn(move|| { 1i32 % i32::zero(); }).join().is_err()); - assert!(thread::spawn(move|| { 1i64 % i64::zero(); }).join().is_err()); + check![ + isize::min_value() / -1, + i8::min_value() / -1, + i16::min_value() / -1, + i32::min_value() / -1, + i64::min_value() / -1, + 1isize / isize::zero(), + 1i8 / i8::zero(), + 1i16 / i16::zero(), + 1i32 / i32::zero(), + 1i64 / i64::zero(), + isize::min_value() % -1, + i8::min_value() % -1, + i16::min_value() % -1, + i32::min_value() % -1, + i64::min_value() % -1, + 1isize % isize::zero(), + 1i8 % i8::zero(), + 1i16 % i16::zero(), + 1i32 % i32::zero(), + 1i64 % i64::zero() + ]; } diff --git a/src/test/run-pass/mir_raw_fat_ptr.rs b/src/test/run-pass/mir_raw_fat_ptr.rs index 9bbfbb6822463..c0ba7a76dba48 100644 --- a/src/test/run-pass/mir_raw_fat_ptr.rs +++ b/src/test/run-pass/mir_raw_fat_ptr.rs @@ -10,6 +10,8 @@ #![feature(rustc_attrs)] +// ignore-pretty : (#23623) problems when ending with // comments + // check raw fat pointer ops in mir // FIXME: please improve this when we get monomorphization support @@ -119,6 +121,7 @@ impl Foo for T { struct S(u32, T); +#[rustc_no_mir] // FIXME #27840 MIR can't do rvalue promotion yet. fn main() { let array = [0,1,2,3,4]; let array2 = [5,6,7,8,9]; diff --git a/src/test/run-pass/simd-intrinsic-generic-elements.rs b/src/test/run-pass/simd-intrinsic-generic-elements.rs index f0444c2717056..ffb9e6072dfe3 100644 --- a/src/test/run-pass/simd-intrinsic-generic-elements.rs +++ b/src/test/run-pass/simd-intrinsic-generic-elements.rs @@ -8,7 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(repr_simd, platform_intrinsics)] +#![feature(repr_simd, rustc_attrs, platform_intrinsics)] + +// ignore-pretty : (#23623) problems when ending with // comments #[repr(simd)] #[derive(Copy, Clone, Debug, PartialEq)] @@ -50,6 +52,7 @@ macro_rules! all_eq { }} } +#[rustc_no_mir] // FIXME #27840 MIR doesn't handle shuffle constants. fn main() { let x2 = i32x2(20, 21); let x3 = i32x3(30, 31, 32); diff --git a/src/test/run-pass/super-fast-paren-parsing.rs b/src/test/run-pass/super-fast-paren-parsing.rs index 69ec0a2222ddb..b764a983a0c09 100644 --- a/src/test/run-pass/super-fast-paren-parsing.rs +++ b/src/test/run-pass/super-fast-paren-parsing.rs @@ -11,6 +11,7 @@ // ignore-pretty // // exec-env:RUST_MIN_STACK=16000000 +// rustc-env:RUST_MIN_STACK=16000000 // // Big stack is needed for pretty printing, a little sad... diff --git a/src/test/run-pass/unique-ffi-symbols.rs b/src/test/run-pass/unique-ffi-symbols.rs new file mode 100644 index 0000000000000..81563f40e94bb --- /dev/null +++ b/src/test/run-pass/unique-ffi-symbols.rs @@ -0,0 +1,25 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// We used to have a __rust_abi shim that resulted in duplicated symbols +// whenever the item path wasn't enough to disambiguate between them. +fn main() { + let a = { + extern fn good() -> i32 { return 0; } + good as extern fn() -> i32 + }; + let b = { + extern fn good() -> i32 { return 5; } + good as extern fn() -> i32 + }; + + assert!(a != b); + assert_eq!((a(), b()), (0, 5)); +} diff --git a/src/test/run-pass/zero-size-type-destructors.rs b/src/test/run-pass/zero-size-type-destructors.rs index fecbeed407c03..a663ae650c087 100644 --- a/src/test/run-pass/zero-size-type-destructors.rs +++ b/src/test/run-pass/zero-size-type-destructors.rs @@ -8,11 +8,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![feature(rustc_attrs, unsafe_no_drop_flag)] -#![feature(unsafe_no_drop_flag)] +// ignore-pretty : (#23623) problems when ending with // comments static mut destructions : isize = 3; +#[rustc_no_mir] // FIXME #29855 MIR doesn't handle all drops correctly. pub fn foo() { #[unsafe_no_drop_flag] struct Foo;