diff --git a/src/librustc/dep_graph/graph.rs b/src/librustc/dep_graph/graph.rs index de57edc1251a2..8a2f79e6793c0 100644 --- a/src/librustc/dep_graph/graph.rs +++ b/src/librustc/dep_graph/graph.rs @@ -292,17 +292,28 @@ impl DepGraph { task_deps.map(|lock| lock.into_inner()), ); + let print_status = cfg!(debug_assertions) && hcx.sess().opts.debugging_opts.dep_tasks; + // Determine the color of the new DepNode. if let Some(prev_index) = data.previous.node_to_index_opt(&key) { let prev_fingerprint = data.previous.fingerprint_by_index(prev_index); let color = if let Some(current_fingerprint) = current_fingerprint { if current_fingerprint == prev_fingerprint { + if print_status { + eprintln!("[task::green] {:?}", key); + } DepNodeColor::Green(dep_node_index) } else { + if print_status { + eprintln!("[task::red] {:?}", key); + } DepNodeColor::Red } } else { + if print_status { + eprintln!("[task::unknown] {:?}", key); + } // Mark the node as Red if we can't hash the result DepNodeColor::Red }; @@ -312,6 +323,10 @@ impl DepGraph { insertion for {:?}", key); data.colors.insert(prev_index, color); + } else { + if print_status { + eprintln!("[task::new] {:?}", key); + } } (result, dep_node_index) diff --git a/src/librustc/infer/error_reporting/nice_region_error/mod.rs b/src/librustc/infer/error_reporting/nice_region_error/mod.rs index dad1e3ba80da3..d995fe92337c4 100644 --- a/src/librustc/infer/error_reporting/nice_region_error/mod.rs +++ b/src/librustc/infer/error_reporting/nice_region_error/mod.rs @@ -1,9 +1,10 @@ use crate::infer::InferCtxt; use crate::infer::lexical_region_resolve::RegionResolutionError; use crate::infer::lexical_region_resolve::RegionResolutionError::*; -use syntax::source_map::Span; use crate::ty::{self, TyCtxt}; use crate::util::common::ErrorReported; +use errors::DiagnosticBuilder; +use syntax::source_map::Span; mod different_lifetimes; mod find_anon_type; @@ -59,7 +60,7 @@ impl<'cx, 'gcx, 'tcx> NiceRegionError<'cx, 'gcx, 'tcx> { self.infcx.tcx } - pub fn try_report_from_nll(&self) -> Option { + pub fn try_report_from_nll(&self) -> Option> { // Due to the improved diagnostics returned by the MIR borrow checker, only a subset of // the nice region errors are required when running under the MIR borrow checker. self.try_report_named_anon_conflict() @@ -68,6 +69,7 @@ impl<'cx, 'gcx, 'tcx> NiceRegionError<'cx, 'gcx, 'tcx> { pub fn try_report(&self) -> Option { self.try_report_from_nll() + .map(|mut diag| { diag.emit(); ErrorReported }) .or_else(|| self.try_report_anon_anon_conflict()) .or_else(|| self.try_report_outlives_closure()) .or_else(|| self.try_report_static_impl_trait()) diff --git a/src/librustc/infer/error_reporting/nice_region_error/named_anon_conflict.rs b/src/librustc/infer/error_reporting/nice_region_error/named_anon_conflict.rs index b10af400f2b6c..3821484d38e5f 100644 --- a/src/librustc/infer/error_reporting/nice_region_error/named_anon_conflict.rs +++ b/src/librustc/infer/error_reporting/nice_region_error/named_anon_conflict.rs @@ -2,13 +2,12 @@ //! where one region is named and the other is anonymous. use crate::infer::error_reporting::nice_region_error::NiceRegionError; use crate::ty; -use crate::util::common::ErrorReported; -use errors::Applicability; +use errors::{Applicability, DiagnosticBuilder}; impl<'a, 'gcx, 'tcx> NiceRegionError<'a, 'gcx, 'tcx> { /// When given a `ConcreteFailure` for a function with arguments containing a named region and /// an anonymous region, emit an descriptive diagnostic error. - pub(super) fn try_report_named_anon_conflict(&self) -> Option { + pub(super) fn try_report_named_anon_conflict(&self) -> Option> { let (span, sub, sup) = self.get_regions(); debug!( @@ -96,21 +95,23 @@ impl<'a, 'gcx, 'tcx> NiceRegionError<'a, 'gcx, 'tcx> { ("parameter type".to_owned(), "type".to_owned()) }; - struct_span_err!( + let mut diag = struct_span_err!( self.tcx().sess, span, E0621, "explicit lifetime required in {}", error_var - ).span_suggestion( - new_ty_span, - &format!("add explicit lifetime `{}` to {}", named, span_label_var), - new_ty.to_string(), - Applicability::Unspecified, - ) - .span_label(span, format!("lifetime `{}` required", named)) - .emit(); - return Some(ErrorReported); + ); + + diag.span_suggestion( + new_ty_span, + &format!("add explicit lifetime `{}` to {}", named, span_label_var), + new_ty.to_string(), + Applicability::Unspecified, + ) + .span_label(span, format!("lifetime `{}` required", named)); + + Some(diag) } // This method returns whether the given Region is Named diff --git a/src/librustc/infer/error_reporting/nice_region_error/placeholder_error.rs b/src/librustc/infer/error_reporting/nice_region_error/placeholder_error.rs index 843fa8b0182e2..3b2fb7d41008e 100644 --- a/src/librustc/infer/error_reporting/nice_region_error/placeholder_error.rs +++ b/src/librustc/infer/error_reporting/nice_region_error/placeholder_error.rs @@ -8,13 +8,12 @@ use crate::traits::{ObligationCause, ObligationCauseCode}; use crate::ty; use crate::ty::error::ExpectedFound; use crate::ty::subst::Substs; -use crate::util::common::ErrorReported; use crate::util::ppaux::RegionHighlightMode; impl NiceRegionError<'me, 'gcx, 'tcx> { /// When given a `ConcreteFailure` for a function with arguments containing a named region and /// an anonymous region, emit a descriptive diagnostic error. - pub(super) fn try_report_placeholder_conflict(&self) -> Option { + pub(super) fn try_report_placeholder_conflict(&self) -> Option> { match &self.error { /////////////////////////////////////////////////////////////////////////// // NB. The ordering of cases in this match is very @@ -178,7 +177,7 @@ impl NiceRegionError<'me, 'gcx, 'tcx> { trait_def_id: DefId, expected_substs: &'tcx Substs<'tcx>, actual_substs: &'tcx Substs<'tcx>, - ) -> ErrorReported { + ) -> DiagnosticBuilder<'me> { debug!( "try_report_placeholders_trait(\ vid={:?}, \ @@ -295,8 +294,7 @@ impl NiceRegionError<'me, 'gcx, 'tcx> { any_self_ty_has_vid, ); - err.emit(); - ErrorReported + err } /// Add notes with details about the expected and actual trait refs, with attention to cases diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs index 22b1c3f18acb2..65da458efbfff 100644 --- a/src/librustc/session/config.rs +++ b/src/librustc/session/config.rs @@ -1237,6 +1237,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "show extended diagnostic help"), continue_parse_after_error: bool = (false, parse_bool, [TRACKED], "attempt to recover from parse errors (experimental)"), + dep_tasks: bool = (false, parse_bool, [UNTRACKED], + "print tasks that execute and the color their dep node gets (requires debug build)"), incremental: Option = (None, parse_opt_string, [UNTRACKED], "enable incremental compilation (experimental)"), incremental_queries: bool = (true, parse_bool, [UNTRACKED], diff --git a/src/librustc/session/mod.rs b/src/librustc/session/mod.rs index 58bd4782b2143..833785f04076c 100644 --- a/src/librustc/session/mod.rs +++ b/src/librustc/session/mod.rs @@ -9,7 +9,7 @@ use crate::lint; use crate::lint::builtin::BuiltinLintDiagnostics; use crate::middle::allocator::AllocatorKind; use crate::middle::dependency_format; -use crate::session::config::{OutputType, Lto}; +use crate::session::config::OutputType; use crate::session::search_paths::{PathKind, SearchPath}; use crate::util::nodemap::{FxHashMap, FxHashSet}; use crate::util::common::{duration_to_secs_str, ErrorReported}; @@ -1246,20 +1246,6 @@ pub fn build_session_( // If it is useful to have a Session available already for validating a // commandline argument, you can do so here. fn validate_commandline_args_with_session_available(sess: &Session) { - - if sess.opts.incremental.is_some() { - match sess.lto() { - Lto::Thin | - Lto::Fat => { - sess.err("can't perform LTO when compiling incrementally"); - } - Lto::ThinLocal | - Lto::No => { - // This is fine - } - } - } - // Since we don't know if code in an rlib will be linked to statically or // dynamically downstream, rustc generates `__imp_` symbols that help the // MSVC linker deal with this lack of knowledge (#27438). Unfortunately, diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index 233aff9de243f..19b39be8a4ddf 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -124,7 +124,15 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> { let job = match lock.active.entry((*key).clone()) { Entry::Occupied(entry) => { match *entry.get() { - QueryResult::Started(ref job) => job.clone(), + QueryResult::Started(ref job) => { + //For parallel queries, we'll block and wait until the query running + //in another thread has completed. Record how long we wait in the + //self-profiler + #[cfg(parallel_compiler)] + tcx.sess.profiler(|p| p.query_blocked_start(Q::NAME, Q::CATEGORY)); + + job.clone() + }, QueryResult::Poisoned => FatalError.raise(), } } @@ -160,7 +168,10 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> { // thread #[cfg(parallel_compiler)] { - if let Err(cycle) = job.r#await(tcx, span) { + let result = job.r#await(tcx, span); + tcx.sess.profiler(|p| p.query_blocked_end(Q::NAME, Q::CATEGORY)); + + if let Err(cycle) = result { return TryGetJob::JobCompleted(Err(cycle)); } } @@ -441,7 +452,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // First we try to load the result from the on-disk cache let result = if Q::cache_on_disk(self.global_tcx(), key.clone()) && self.sess.opts.debugging_opts.incremental_queries { + self.sess.profiler(|p| p.incremental_load_result_start(Q::NAME)); let result = Q::try_load_from_disk(self.global_tcx(), prev_dep_node_index); + self.sess.profiler(|p| p.incremental_load_result_end(Q::NAME)); // We always expect to find a cached result for things that // can be forced from DepNode. diff --git a/src/librustc/util/profiling.rs b/src/librustc/util/profiling.rs index f8fa01b639500..c90bd12a3100f 100644 --- a/src/librustc/util/profiling.rs +++ b/src/librustc/util/profiling.rs @@ -25,6 +25,10 @@ pub enum ProfilerEvent { GenericActivityEnd { category: ProfileCategory, time: Instant }, QueryCacheHit { query_name: &'static str, category: ProfileCategory }, QueryCount { query_name: &'static str, category: ProfileCategory, count: usize }, + IncrementalLoadResultStart { query_name: &'static str, time: Instant }, + IncrementalLoadResultEnd { query_name: &'static str, time: Instant }, + QueryBlockedStart { query_name: &'static str, category: ProfileCategory, time: Instant }, + QueryBlockedEnd { query_name: &'static str, category: ProfileCategory, time: Instant }, } impl ProfilerEvent { @@ -32,9 +36,17 @@ impl ProfilerEvent { use self::ProfilerEvent::*; match self { - QueryStart { .. } | GenericActivityStart { .. } => true, - QueryEnd { .. } | GenericActivityEnd { .. } | - QueryCacheHit { .. } | QueryCount { .. } => false, + QueryStart { .. } | + GenericActivityStart { .. } | + IncrementalLoadResultStart { .. } | + QueryBlockedStart { .. } => true, + + QueryEnd { .. } | + GenericActivityEnd { .. } | + QueryCacheHit { .. } | + QueryCount { .. } | + IncrementalLoadResultEnd { .. } | + QueryBlockedEnd { .. } => false, } } } @@ -57,12 +69,7 @@ impl CategoryResultData { } fn total_time(&self) -> u64 { - let mut total = 0; - for (_, time) in &self.query_times { - total += time; - } - - total + self.query_times.iter().map(|(_, time)| time).sum() } fn total_cache_data(&self) -> (u64, u64) { @@ -121,13 +128,7 @@ impl CalculatedResults { } fn total_time(&self) -> u64 { - let mut total = 0; - - for (_, data) in &self.categories { - total += data.total_time(); - } - - total + self.categories.iter().map(|(_, data)| data.total_time()).sum() } fn with_options(mut self, opts: &Options) -> CalculatedResults { @@ -225,6 +226,40 @@ impl SelfProfiler { }) } + #[inline] + pub fn incremental_load_result_start(&mut self, query_name: &'static str) { + self.record(ProfilerEvent::IncrementalLoadResultStart { + query_name, + time: Instant::now(), + }) + } + + #[inline] + pub fn incremental_load_result_end(&mut self, query_name: &'static str) { + self.record(ProfilerEvent::IncrementalLoadResultEnd { + query_name, + time: Instant::now(), + }) + } + + #[inline] + pub fn query_blocked_start(&mut self, query_name: &'static str, category: ProfileCategory) { + self.record(ProfilerEvent::QueryBlockedStart { + query_name, + category, + time: Instant::now(), + }) + } + + #[inline] + pub fn query_blocked_end(&mut self, query_name: &'static str, category: ProfileCategory) { + self.record(ProfilerEvent::QueryBlockedEnd { + query_name, + category, + time: Instant::now(), + }) + } + #[inline] fn record(&mut self, event: ProfilerEvent) { let thread_id = std::thread::current().id(); @@ -317,6 +352,10 @@ impl SelfProfiler { result_data.query_cache_stats.entry(query_name).or_insert((0, 0)); *totals += *count as u64; }, + //we don't summarize incremental load result events in the simple output mode + IncrementalLoadResultStart { .. } | IncrementalLoadResultEnd { .. } => { }, + //we don't summarize parallel query blocking in the simple output mode + QueryBlockedStart { .. } | QueryBlockedEnd { .. } => { }, } } @@ -361,9 +400,9 @@ impl SelfProfiler { .unwrap(); let mut categories: Vec<_> = results.categories.iter().collect(); - categories.sort_by(|(_, data1), (_, data2)| data2.total_time().cmp(&data1.total_time())); + categories.sort_by_cached_key(|(_, d)| d.total_time()); - for (category, data) in categories { + for (category, data) in categories.iter().rev() { let (category_hits, category_total) = data.total_cache_data(); let category_hit_percent = calculate_percent(category_hits, category_total); diff --git a/src/librustc_codegen_llvm/back/lto.rs b/src/librustc_codegen_llvm/back/lto.rs index ac55244d8d931..fc3ab7f734a79 100644 --- a/src/librustc_codegen_llvm/back/lto.rs +++ b/src/librustc_codegen_llvm/back/lto.rs @@ -1,6 +1,6 @@ use back::bytecode::{DecodedBytecode, RLIB_BYTECODE_EXTENSION}; use rustc_codegen_ssa::back::symbol_export; -use rustc_codegen_ssa::back::write::{ModuleConfig, CodegenContext, pre_lto_bitcode_filename}; +use rustc_codegen_ssa::back::write::{ModuleConfig, CodegenContext, FatLTOInput}; use rustc_codegen_ssa::back::lto::{SerializedModule, LtoModuleCodegen, ThinShared, ThinModule}; use rustc_codegen_ssa::traits::*; use back::write::{self, DiagnosticHandlers, with_llvm_pmb, save_temp_bitcode, to_llvm_opt_settings}; @@ -21,7 +21,6 @@ use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; use libc; use std::ffi::{CStr, CString}; -use std::fs; use std::ptr; use std::slice; use std::sync::Arc; @@ -133,7 +132,8 @@ fn prepare_lto(cgcx: &CodegenContext, /// Performs fat LTO by merging all modules into a single one and returning it /// for further optimization. pub(crate) fn run_fat(cgcx: &CodegenContext, - modules: Vec>, + modules: Vec>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, timeline: &mut Timeline) -> Result, FatalError> { @@ -142,7 +142,15 @@ pub(crate) fn run_fat(cgcx: &CodegenContext, let symbol_white_list = symbol_white_list.iter() .map(|c| c.as_ptr()) .collect::>(); - fat_lto(cgcx, &diag_handler, modules, upstream_modules, &symbol_white_list, timeline) + fat_lto( + cgcx, + &diag_handler, + modules, + cached_modules, + upstream_modules, + &symbol_white_list, + timeline, + ) } /// Performs thin LTO by performing necessary global analysis and returning two @@ -173,33 +181,17 @@ pub(crate) fn run_thin(cgcx: &CodegenContext, } pub(crate) fn prepare_thin( - cgcx: &CodegenContext, module: ModuleCodegen ) -> (String, ThinBuffer) { let name = module.name.clone(); let buffer = ThinBuffer::new(module.module_llvm.llmod()); - - // We emit the module after having serialized it into a ThinBuffer - // because only then it will contain the ThinLTO module summary. - if let Some(ref incr_comp_session_dir) = cgcx.incr_comp_session_dir { - if cgcx.config(module.kind).emit_pre_thin_lto_bc { - let path = incr_comp_session_dir - .join(pre_lto_bitcode_filename(&name)); - - fs::write(&path, buffer.data()).unwrap_or_else(|e| { - panic!("Error writing pre-lto-bitcode file `{}`: {}", - path.display(), - e); - }); - } - } - (name, buffer) } fn fat_lto(cgcx: &CodegenContext, diag_handler: &Handler, - mut modules: Vec>, + mut modules: Vec>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, mut serialized_modules: Vec<(SerializedModule, CString)>, symbol_white_list: &[*const libc::c_char], timeline: &mut Timeline) @@ -216,8 +208,14 @@ fn fat_lto(cgcx: &CodegenContext, // file copy operations in the backend work correctly. The only other kind // of module here should be an allocator one, and if your crate is smaller // than the allocator module then the size doesn't really matter anyway. - let (_, costliest_module) = modules.iter() + let costliest_module = modules.iter() .enumerate() + .filter_map(|(i, module)| { + match module { + FatLTOInput::InMemory(m) => Some((i, m)), + FatLTOInput::Serialized { .. } => None, + } + }) .filter(|&(_, module)| module.kind == ModuleKind::Regular) .map(|(i, module)| { let cost = unsafe { @@ -225,9 +223,38 @@ fn fat_lto(cgcx: &CodegenContext, }; (cost, i) }) - .max() - .expect("must be codegen'ing at least one module"); - let module = modules.remove(costliest_module); + .max(); + + // If we found a costliest module, we're good to go. Otherwise all our + // inputs were serialized which could happen in the case, for example, that + // all our inputs were incrementally reread from the cache and we're just + // re-executing the LTO passes. If that's the case deserialize the first + // module and create a linker with it. + let module: ModuleCodegen = match costliest_module { + Some((_cost, i)) => { + match modules.remove(i) { + FatLTOInput::InMemory(m) => m, + FatLTOInput::Serialized { .. } => unreachable!(), + } + } + None => { + let pos = modules.iter().position(|m| { + match m { + FatLTOInput::InMemory(_) => false, + FatLTOInput::Serialized { .. } => true, + } + }).expect("must have at least one serialized module"); + let (name, buffer) = match modules.remove(pos) { + FatLTOInput::Serialized { name, buffer } => (name, buffer), + FatLTOInput::InMemory(_) => unreachable!(), + }; + ModuleCodegen { + module_llvm: ModuleLlvm::parse(cgcx, &name, &buffer, diag_handler)?, + name, + kind: ModuleKind::Regular, + } + } + }; let mut serialized_bitcode = Vec::new(); { let (llcx, llmod) = { @@ -247,10 +274,20 @@ fn fat_lto(cgcx: &CodegenContext, // way we know of to do that is to serialize them to a string and them parse // them later. Not great but hey, that's why it's "fat" LTO, right? serialized_modules.extend(modules.into_iter().map(|module| { - let buffer = ModuleBuffer::new(module.module_llvm.llmod()); - let llmod_id = CString::new(&module.name[..]).unwrap(); - - (SerializedModule::Local(buffer), llmod_id) + match module { + FatLTOInput::InMemory(module) => { + let buffer = ModuleBuffer::new(module.module_llvm.llmod()); + let llmod_id = CString::new(&module.name[..]).unwrap(); + (SerializedModule::Local(buffer), llmod_id) + } + FatLTOInput::Serialized { name, buffer } => { + let llmod_id = CString::new(name).unwrap(); + (SerializedModule::Local(buffer), llmod_id) + } + } + })); + serialized_modules.extend(cached_modules.into_iter().map(|(buffer, wp)| { + (buffer, CString::new(wp.cgu_name.clone()).unwrap()) })); // For all serialized bitcode files we parse them and link them in as we did @@ -579,6 +616,16 @@ impl ModuleBuffer { llvm::LLVMRustModuleBufferCreate(m) }) } + + pub fn parse<'a>( + &self, + name: &str, + cx: &'a llvm::Context, + handler: &Handler, + ) -> Result<&'a llvm::Module, FatalError> { + let name = CString::new(name).unwrap(); + parse_module(cx, &name, self.data(), handler) + } } impl ModuleBufferMethods for ModuleBuffer { @@ -658,15 +705,12 @@ pub unsafe fn optimize_thin_module( // crates but for locally codegened modules we may be able to reuse // that LLVM Context and Module. let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names); - let llmod_raw = llvm::LLVMRustParseBitcodeForThinLTO( + let llmod_raw = parse_module( llcx, - thin_module.data().as_ptr(), - thin_module.data().len(), - thin_module.shared.module_names[thin_module.idx].as_ptr(), - ).ok_or_else(|| { - let msg = "failed to parse bitcode for thin LTO module"; - write::llvm_err(&diag_handler, msg) - })? as *const _; + &thin_module.shared.module_names[thin_module.idx], + thin_module.data(), + &diag_handler, + )? as *const _; let module = ModuleCodegen { module_llvm: ModuleLlvm { llmod_raw, @@ -823,3 +867,22 @@ fn module_name_to_str(c_str: &CStr) -> &str { c_str.to_str().unwrap_or_else(|e| bug!("Encountered non-utf8 LLVM module name `{}`: {}", c_str.to_string_lossy(), e)) } + +fn parse_module<'a>( + cx: &'a llvm::Context, + name: &CStr, + data: &[u8], + diag_handler: &Handler, +) -> Result<&'a llvm::Module, FatalError> { + unsafe { + llvm::LLVMRustParseBitcodeForLTO( + cx, + data.as_ptr(), + data.len(), + name.as_ptr(), + ).ok_or_else(|| { + let msg = "failed to parse bitcode for LTO module"; + write::llvm_err(&diag_handler, msg) + }) + } +} diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index ad8db25ee95a0..b605badc153f0 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -54,7 +54,7 @@ extern crate tempfile; extern crate memmap; use rustc_codegen_ssa::traits::*; -use rustc_codegen_ssa::back::write::{CodegenContext, ModuleConfig}; +use rustc_codegen_ssa::back::write::{CodegenContext, ModuleConfig, FatLTOInput}; use rustc_codegen_ssa::back::lto::{SerializedModule, LtoModuleCodegen, ThinModule}; use rustc_codegen_ssa::CompiledModule; use errors::{FatalError, Handler}; @@ -165,10 +165,11 @@ impl WriteBackendMethods for LlvmCodegenBackend { } fn run_fat_lto( cgcx: &CodegenContext, - modules: Vec>, + modules: Vec>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, timeline: &mut Timeline ) -> Result, FatalError> { - back::lto::run_fat(cgcx, modules, timeline) + back::lto::run_fat(cgcx, modules, cached_modules, timeline) } fn run_thin_lto( cgcx: &CodegenContext, @@ -204,10 +205,14 @@ impl WriteBackendMethods for LlvmCodegenBackend { back::write::codegen(cgcx, diag_handler, module, config, timeline) } fn prepare_thin( - cgcx: &CodegenContext, module: ModuleCodegen ) -> (String, Self::ThinBuffer) { - back::lto::prepare_thin(cgcx, module) + back::lto::prepare_thin(module) + } + fn serialize_module( + module: ModuleCodegen + ) -> (String, Self::ModuleBuffer) { + (module.name, back::lto::ModuleBuffer::new(module.module_llvm.llmod())) } fn run_lto_pass_manager( cgcx: &CodegenContext, @@ -375,6 +380,31 @@ impl ModuleLlvm { } } + fn parse( + cgcx: &CodegenContext, + name: &str, + buffer: &back::lto::ModuleBuffer, + handler: &Handler, + ) -> Result { + unsafe { + let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names); + let llmod_raw = buffer.parse(name, llcx, handler)?; + let tm = match (cgcx.tm_factory.0)() { + Ok(m) => m, + Err(e) => { + handler.struct_err(&e).emit(); + return Err(FatalError) + } + }; + + Ok(ModuleLlvm { + llmod_raw, + llcx, + tm, + }) + } + } + fn llmod(&self) -> &llvm::Module { unsafe { &*self.llmod_raw diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index 3232f4e8f51ac..52292f67b331d 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -1804,7 +1804,7 @@ extern "C" { CallbackPayload: *mut c_void, ); pub fn LLVMRustFreeThinLTOData(Data: &'static mut ThinLTOData); - pub fn LLVMRustParseBitcodeForThinLTO( + pub fn LLVMRustParseBitcodeForLTO( Context: &Context, Data: *const u8, len: usize, diff --git a/src/librustc_codegen_ssa/back/write.rs b/src/librustc_codegen_ssa/back/write.rs index 2922d326c3b35..20842553c2666 100644 --- a/src/librustc_codegen_ssa/back/write.rs +++ b/src/librustc_codegen_ssa/back/write.rs @@ -41,7 +41,7 @@ use std::sync::mpsc::{channel, Sender, Receiver}; use std::time::Instant; use std::thread; -const PRE_THIN_LTO_BC_EXT: &str = "pre-thin-lto.bc"; +const PRE_LTO_BC_EXT: &str = "pre-lto.bc"; /// Module-specific configuration for `optimize_and_codegen`. pub struct ModuleConfig { @@ -58,7 +58,7 @@ pub struct ModuleConfig { pub pgo_use: String, // Flags indicating which outputs to produce. - pub emit_pre_thin_lto_bc: bool, + pub emit_pre_lto_bc: bool, pub emit_no_opt_bc: bool, pub emit_bc: bool, pub emit_bc_compressed: bool, @@ -96,7 +96,7 @@ impl ModuleConfig { pgo_use: String::new(), emit_no_opt_bc: false, - emit_pre_thin_lto_bc: false, + emit_pre_lto_bc: false, emit_bc: false, emit_bc_compressed: false, emit_lto_bc: false, @@ -258,7 +258,7 @@ impl CodegenContext { fn generate_lto_work( cgcx: &CodegenContext, - needs_fat_lto: Vec>, + needs_fat_lto: Vec>, needs_thin_lto: Vec<(String, B::ThinBuffer)>, import_only_modules: Vec<(SerializedModule, WorkProduct)> ) -> Vec<(WorkItem, u64)> { @@ -270,9 +270,13 @@ fn generate_lto_work( let (lto_modules, copy_jobs) = if !needs_fat_lto.is_empty() { assert!(needs_thin_lto.is_empty()); - assert!(import_only_modules.is_empty()); - let lto_module = B::run_fat_lto(cgcx, needs_fat_lto, &mut timeline) - .unwrap_or_else(|e| e.raise()); + let lto_module = B::run_fat_lto( + cgcx, + needs_fat_lto, + import_only_modules, + &mut timeline, + ) + .unwrap_or_else(|e| e.raise()); (vec![lto_module], vec![]) } else { assert!(needs_fat_lto.is_empty()); @@ -302,14 +306,14 @@ fn need_crate_bitcode_for_rlib(sess: &Session) -> bool { sess.opts.output_types.contains_key(&OutputType::Exe) } -fn need_pre_thin_lto_bitcode_for_incr_comp(sess: &Session) -> bool { +fn need_pre_lto_bitcode_for_incr_comp(sess: &Session) -> bool { if sess.opts.incremental.is_none() { return false } match sess.lto() { - Lto::Fat | Lto::No => false, + Lto::Fat | Lto::Thin | Lto::ThinLocal => true, } @@ -375,7 +379,7 @@ pub fn start_async_codegen( // Save all versions of the bytecode if we're saving our temporaries. if sess.opts.cg.save_temps { modules_config.emit_no_opt_bc = true; - modules_config.emit_pre_thin_lto_bc = true; + modules_config.emit_pre_lto_bc = true; modules_config.emit_bc = true; modules_config.emit_lto_bc = true; metadata_config.emit_bc = true; @@ -390,8 +394,8 @@ pub fn start_async_codegen( allocator_config.emit_bc_compressed = true; } - modules_config.emit_pre_thin_lto_bc = - need_pre_thin_lto_bitcode_for_incr_comp(sess); + modules_config.emit_pre_lto_bc = + need_pre_lto_bitcode_for_incr_comp(sess); modules_config.no_integrated_as = tcx.sess.opts.cg.no_integrated_as || tcx.sess.target.target.options.no_integrated_as; @@ -686,10 +690,18 @@ impl WorkItem { enum WorkItemResult { Compiled(CompiledModule), - NeedsFatLTO(ModuleCodegen), + NeedsFatLTO(FatLTOInput), NeedsThinLTO(String, B::ThinBuffer), } +pub enum FatLTOInput { + Serialized { + name: String, + buffer: B::ModuleBuffer, + }, + InMemory(ModuleCodegen), +} + fn execute_work_item( cgcx: &CodegenContext, work_item: WorkItem, @@ -771,6 +783,15 @@ fn execute_optimize_work_item( } }; + // If we're doing some form of incremental LTO then we need to be sure to + // save our module to disk first. + let bitcode = if cgcx.config(module.kind).emit_pre_lto_bc { + let filename = pre_lto_bitcode_filename(&module.name); + cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename)) + } else { + None + }; + Ok(match lto_type { ComputedLtoType::No => { let module = unsafe { @@ -779,10 +800,30 @@ fn execute_optimize_work_item( WorkItemResult::Compiled(module) } ComputedLtoType::Thin => { - let (name, thin_buffer) = B::prepare_thin(cgcx, module); + let (name, thin_buffer) = B::prepare_thin(module); + if let Some(path) = bitcode { + fs::write(&path, thin_buffer.data()).unwrap_or_else(|e| { + panic!("Error writing pre-lto-bitcode file `{}`: {}", + path.display(), + e); + }); + } WorkItemResult::NeedsThinLTO(name, thin_buffer) } - ComputedLtoType::Fat => WorkItemResult::NeedsFatLTO(module), + ComputedLtoType::Fat => { + match bitcode { + Some(path) => { + let (name, buffer) = B::serialize_module(module); + fs::write(&path, buffer.data()).unwrap_or_else(|e| { + panic!("Error writing pre-lto-bitcode file `{}`: {}", + path.display(), + e); + }); + WorkItemResult::NeedsFatLTO(FatLTOInput::Serialized { name, buffer }) + } + None => WorkItemResult::NeedsFatLTO(FatLTOInput::InMemory(module)), + } + } }) } @@ -866,7 +907,7 @@ fn execute_lto_work_item( pub enum Message { Token(io::Result), NeedsFatLTO { - result: ModuleCodegen, + result: FatLTOInput, worker_id: usize, }, NeedsThinLTO { @@ -1877,7 +1918,7 @@ pub fn submit_pre_lto_module_to_llvm( } pub fn pre_lto_bitcode_filename(module_name: &str) -> String { - format!("{}.{}", module_name, PRE_THIN_LTO_BC_EXT) + format!("{}.{}", module_name, PRE_LTO_BC_EXT) } fn msvc_imps_needed(tcx: TyCtxt) -> bool { diff --git a/src/librustc_codegen_ssa/traits/write.rs b/src/librustc_codegen_ssa/traits/write.rs index e8ef815b32acb..d8fb7c608c8af 100644 --- a/src/librustc_codegen_ssa/traits/write.rs +++ b/src/librustc_codegen_ssa/traits/write.rs @@ -1,5 +1,5 @@ use crate::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule}; -use crate::back::write::{CodegenContext, ModuleConfig}; +use crate::back::write::{CodegenContext, ModuleConfig, FatLTOInput}; use crate::{CompiledModule, ModuleCodegen}; use rustc::dep_graph::WorkProduct; @@ -18,7 +18,8 @@ pub trait WriteBackendMethods: 'static + Sized + Clone { /// for further optimization. fn run_fat_lto( cgcx: &CodegenContext, - modules: Vec>, + modules: Vec>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, timeline: &mut Timeline, ) -> Result, FatalError>; /// Performs thin LTO by performing necessary global analysis and returning two @@ -51,9 +52,11 @@ pub trait WriteBackendMethods: 'static + Sized + Clone { timeline: &mut Timeline, ) -> Result; fn prepare_thin( - cgcx: &CodegenContext, module: ModuleCodegen ) -> (String, Self::ThinBuffer); + fn serialize_module( + module: ModuleCodegen + ) -> (String, Self::ModuleBuffer); fn run_lto_pass_manager( cgcx: &CodegenContext, llmod: &ModuleCodegen, diff --git a/src/librustc_lint/Cargo.toml b/src/librustc_lint/Cargo.toml index 82f7118df2d0b..fd2b635faefb4 100644 --- a/src/librustc_lint/Cargo.toml +++ b/src/librustc_lint/Cargo.toml @@ -8,7 +8,6 @@ edition = "2018" name = "rustc_lint" path = "lib.rs" crate-type = ["dylib"] -test = false [dependencies] log = "0.4" diff --git a/src/librustc_lint/nonstandard_style.rs b/src/librustc_lint/nonstandard_style.rs index 2dbafc7ede2a2..c2dd9a3d1b84f 100644 --- a/src/librustc_lint/nonstandard_style.rs +++ b/src/librustc_lint/nonstandard_style.rs @@ -38,66 +38,87 @@ declare_lint! { "types, variants, traits and type parameters should have camel case names" } -#[derive(Copy, Clone)] -pub struct NonCamelCaseTypes; +fn char_has_case(c: char) -> bool { + c.is_lowercase() || c.is_uppercase() +} -impl NonCamelCaseTypes { - fn check_case(&self, cx: &EarlyContext<'_>, sort: &str, ident: &Ident) { - fn char_has_case(c: char) -> bool { - c.is_lowercase() || c.is_uppercase() - } +fn is_camel_case(name: &str) -> bool { + let name = name.trim_matches('_'); + if name.is_empty() { + return true; + } - fn is_camel_case(name: &str) -> bool { - let name = name.trim_matches('_'); - if name.is_empty() { - return true; + // start with a non-lowercase letter rather than non-uppercase + // ones (some scripts don't have a concept of upper/lowercase) + !name.chars().next().unwrap().is_lowercase() + && !name.contains("__") + && !name.chars().collect::>().windows(2).any(|pair| { + // contains a capitalisable character followed by, or preceded by, an underscore + char_has_case(pair[0]) && pair[1] == '_' || char_has_case(pair[1]) && pair[0] == '_' + }) +} + +fn to_camel_case(s: &str) -> String { + s.trim_matches('_') + .split('_') + .filter(|component| !component.is_empty()) + .map(|component| { + let mut camel_cased_component = String::new(); + + let mut new_word = true; + let mut prev_is_lower_case = true; + + for c in component.chars() { + // Preserve the case if an uppercase letter follows a lowercase letter, so that + // `camelCase` is converted to `CamelCase`. + if prev_is_lower_case && c.is_uppercase() { + new_word = true; + } + + if new_word { + camel_cased_component.push_str(&c.to_uppercase().to_string()); + } else { + camel_cased_component.push_str(&c.to_lowercase().to_string()); + } + + prev_is_lower_case = c.is_lowercase(); + new_word = false; } - // start with a non-lowercase letter rather than non-uppercase - // ones (some scripts don't have a concept of upper/lowercase) - !name.is_empty() && !name.chars().next().unwrap().is_lowercase() && - !name.contains("__") && !name.chars().collect::>().windows(2).any(|pair| { - // contains a capitalisable character followed by, or preceded by, an underscore - char_has_case(pair[0]) && pair[1] == '_' || - char_has_case(pair[1]) && pair[0] == '_' - }) - } + camel_cased_component + }) + .fold( + (String::new(), None), + |(acc, prev): (String, Option), next| { + // separate two components with an underscore if their boundary cannot + // be distinguished using a uppercase/lowercase case distinction + let join = if let Some(prev) = prev { + let l = prev.chars().last().unwrap(); + let f = next.chars().next().unwrap(); + !char_has_case(l) && !char_has_case(f) + } else { + false + }; + (acc + if join { "_" } else { "" } + &next, Some(next)) + }, + ) + .0 +} - fn to_camel_case(s: &str) -> String { - s.trim_matches('_') - .split('_') - .map(|word| { - word.chars().enumerate().map(|(i, c)| if i == 0 { - c.to_uppercase().collect::() - } else { - c.to_lowercase().collect() - }) - .collect::() - }) - .filter(|x| !x.is_empty()) - .fold((String::new(), None), |(acc, prev): (String, Option), next| { - // separate two components with an underscore if their boundary cannot - // be distinguished using a uppercase/lowercase case distinction - let join = if let Some(prev) = prev { - let l = prev.chars().last().unwrap(); - let f = next.chars().next().unwrap(); - !char_has_case(l) && !char_has_case(f) - } else { false }; - (acc + if join { "_" } else { "" } + &next, Some(next)) - }).0 - } +#[derive(Copy, Clone)] +pub struct NonCamelCaseTypes; +impl NonCamelCaseTypes { + fn check_case(&self, cx: &EarlyContext<'_>, sort: &str, ident: &Ident) { let name = &ident.name.as_str(); if !is_camel_case(name) { - let c = to_camel_case(name); - - let msg = format!("{} `{}` should have a camel case name", sort, name); + let msg = format!("{} `{}` should have an upper camel case name", sort, name); cx.struct_span_lint(NON_CAMEL_CASE_TYPES, ident.span, &msg) .span_suggestion( ident.span, - "convert the identifier to camel case", - c, + "convert the identifier to upper camel case", + to_camel_case(name), Applicability::MaybeIncorrect, ) .emit(); @@ -119,11 +140,7 @@ impl EarlyLintPass for NonCamelCaseTypes { fn check_item(&mut self, cx: &EarlyContext<'_>, it: &ast::Item) { let has_repr_c = it.attrs .iter() - .any(|attr| { - attr::find_repr_attrs(&cx.sess.parse_sess, attr) - .iter() - .any(|r| r == &attr::ReprC) - }); + .any(|attr| attr::find_repr_attrs(&cx.sess.parse_sess, attr).contains(&attr::ReprC)); if has_repr_c { return; @@ -439,3 +456,28 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for NonUpperCaseGlobals { } } } + +#[cfg(test)] +mod tests { + use super::{is_camel_case, to_camel_case}; + + #[test] + fn camel_case() { + assert!(!is_camel_case("userData")); + assert_eq!(to_camel_case("userData"), "UserData"); + + assert!(is_camel_case("X86_64")); + + assert!(!is_camel_case("X86__64")); + assert_eq!(to_camel_case("X86__64"), "X86_64"); + + assert!(!is_camel_case("Abc_123")); + assert_eq!(to_camel_case("Abc_123"), "Abc123"); + + assert!(!is_camel_case("A1_b2_c3")); + assert_eq!(to_camel_case("A1_b2_c3"), "A1B2C3"); + + assert!(!is_camel_case("ONE_TWO_THREE")); + assert_eq!(to_camel_case("ONE_TWO_THREE"), "OneTwoThree"); + } +} diff --git a/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/mod.rs b/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/mod.rs index f741af0b228b5..081c458bfc17a 100644 --- a/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/mod.rs +++ b/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/mod.rs @@ -244,7 +244,8 @@ impl<'tcx> RegionInferenceContext<'tcx> { if let (Some(f), Some(o)) = (self.to_error_region(fr), self.to_error_region(outlived_fr)) { let tables = infcx.tcx.typeck_tables_of(mir_def_id); let nice = NiceRegionError::new_from_span(infcx, span, o, f, Some(tables)); - if let Some(_error_reported) = nice.try_report_from_nll() { + if let Some(diag) = nice.try_report_from_nll() { + diag.buffer(errors_buffer); return; } } diff --git a/src/librustc_mir/borrow_check/nll/region_infer/mod.rs b/src/librustc_mir/borrow_check/nll/region_infer/mod.rs index 6de05777fe888..cbeb5dc206ee6 100644 --- a/src/librustc_mir/borrow_check/nll/region_infer/mod.rs +++ b/src/librustc_mir/borrow_check/nll/region_infer/mod.rs @@ -15,7 +15,7 @@ use rustc::mir::{ ConstraintCategory, Local, Location, Mir, }; use rustc::ty::{self, RegionVid, Ty, TyCtxt, TypeFoldable}; -use rustc::util::common; +use rustc::util::common::{self, ErrorReported}; use rustc_data_structures::bit_set::BitSet; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::graph::scc::Sccs; @@ -763,20 +763,26 @@ impl<'tcx> RegionInferenceContext<'tcx> { debug!("try_promote_type_test: ur={:?}", ur); - let non_local_ub = self.universal_region_relations.non_local_upper_bound(ur); + let non_local_ub = self.universal_region_relations.non_local_upper_bounds(&ur); debug!("try_promote_type_test: non_local_ub={:?}", non_local_ub); - assert!(self.universal_regions.is_universal_region(non_local_ub)); - assert!(!self.universal_regions.is_local_free_region(non_local_ub)); - - let requirement = ClosureOutlivesRequirement { - subject, - outlived_free_region: non_local_ub, - blame_span: locations.span(mir), - category: ConstraintCategory::Boring, - }; - debug!("try_promote_type_test: pushing {:#?}", requirement); - propagated_outlives_requirements.push(requirement); + // This is slightly too conservative. To show T: '1, given `'2: '1` + // and `'3: '1` we only need to prove that T: '2 *or* T: '3, but to + // avoid potential non-determinism we approximate this by requiring + // T: '1 and T: '2. + for &upper_bound in non_local_ub { + debug_assert!(self.universal_regions.is_universal_region(upper_bound)); + debug_assert!(!self.universal_regions.is_local_free_region(upper_bound)); + + let requirement = ClosureOutlivesRequirement { + subject, + outlived_free_region: upper_bound, + blame_span: locations.span(mir), + category: ConstraintCategory::Boring, + }; + debug!("try_promote_type_test: pushing {:#?}", requirement); + propagated_outlives_requirements.push(requirement); + } } true } @@ -1157,63 +1163,109 @@ impl<'tcx> RegionInferenceContext<'tcx> { .is_none() ); + // Only check all of the relations for the main representative of each + // SCC, otherwise just check that we outlive said representative. This + // reduces the number of redundant relations propagated out of + // closures. + // Note that the representative will be a universal region if there is + // one in this SCC, so we will always check the representative here. + let representative = self.scc_representatives[longer_fr_scc]; + if representative != longer_fr { + self.check_universal_region_relation( + longer_fr, + representative, + infcx, + mir, + mir_def_id, + propagated_outlives_requirements, + errors_buffer, + ); + return; + } + // Find every region `o` such that `fr: o` // (because `fr` includes `end(o)`). for shorter_fr in self.scc_values.universal_regions_outlived_by(longer_fr_scc) { - // If it is known that `fr: o`, carry on. - if self.universal_region_relations - .outlives(longer_fr, shorter_fr) - { - continue; + if let Some(ErrorReported) = self.check_universal_region_relation( + longer_fr, + shorter_fr, + infcx, + mir, + mir_def_id, + propagated_outlives_requirements, + errors_buffer, + ) { + // continuing to iterate just reports more errors than necessary + return; } + } + } - debug!( - "check_universal_region: fr={:?} does not outlive shorter_fr={:?}", - longer_fr, shorter_fr, - ); + fn check_universal_region_relation( + &self, + longer_fr: RegionVid, + shorter_fr: RegionVid, + infcx: &InferCtxt<'_, 'gcx, 'tcx>, + mir: &Mir<'tcx>, + mir_def_id: DefId, + propagated_outlives_requirements: &mut Option<&mut Vec>>, + errors_buffer: &mut Vec, + ) -> Option { + // If it is known that `fr: o`, carry on. + if self.universal_region_relations + .outlives(longer_fr, shorter_fr) + { + return None; + } - let blame_span_category = self.find_outlives_blame_span(mir, longer_fr, shorter_fr); - - if let Some(propagated_outlives_requirements) = propagated_outlives_requirements { - // Shrink `fr` until we find a non-local region (if we do). - // We'll call that `fr-` -- it's ever so slightly smaller than `fr`. - if let Some(fr_minus) = self.universal_region_relations - .non_local_lower_bound(longer_fr) - { - debug!("check_universal_region: fr_minus={:?}", fr_minus); - - // Grow `shorter_fr` until we find a non-local - // region. (We always will.) We'll call that - // `shorter_fr+` -- it's ever so slightly larger than - // `fr`. - let shorter_fr_plus = self.universal_region_relations - .non_local_upper_bound(shorter_fr); - debug!( - "check_universal_region: shorter_fr_plus={:?}", - shorter_fr_plus - ); + debug!( + "check_universal_region_relation: fr={:?} does not outlive shorter_fr={:?}", + longer_fr, shorter_fr, + ); + if let Some(propagated_outlives_requirements) = propagated_outlives_requirements { + // Shrink `longer_fr` until we find a non-local region (if we do). + // We'll call it `fr-` -- it's ever so slightly smaller than + // `longer_fr`. + + if let Some(fr_minus) = self + .universal_region_relations + .non_local_lower_bound(longer_fr) + { + debug!("check_universal_region: fr_minus={:?}", fr_minus); + + let blame_span_category = self.find_outlives_blame_span(mir, longer_fr, shorter_fr); + + // Grow `shorter_fr` until we find some non-local regions. (We + // always will.) We'll call them `shorter_fr+` -- they're ever + // so slightly larger than `shorter_fr`. + let shorter_fr_plus = self.universal_region_relations + .non_local_upper_bounds(&shorter_fr); + debug!( + "check_universal_region: shorter_fr_plus={:?}", + shorter_fr_plus + ); + for &&fr in &shorter_fr_plus { // Push the constraint `fr-: shorter_fr+` propagated_outlives_requirements.push(ClosureOutlivesRequirement { subject: ClosureOutlivesSubject::Region(fr_minus), - outlived_free_region: shorter_fr_plus, + outlived_free_region: fr, blame_span: blame_span_category.1, category: blame_span_category.0, }); - continue; } + return None; } - - // If we are not in a context where we can propagate - // errors, or we could not shrink `fr` to something - // smaller, then just report an error. - // - // Note: in this case, we use the unapproximated regions - // to report the error. This gives better error messages - // in some cases. - self.report_error(mir, infcx, mir_def_id, longer_fr, shorter_fr, errors_buffer); - return; // continuing to iterate just reports more errors than necessary } + + // If we are not in a context where we can't propagate errors, or we + // could not shrink `fr` to something smaller, then just report an + // error. + // + // Note: in this case, we use the unapproximated regions to report the + // error. This gives better error messages in some cases. + self.report_error(mir, infcx, mir_def_id, longer_fr, shorter_fr, errors_buffer); + Some(ErrorReported) } fn check_bound_universal_region<'gcx>( diff --git a/src/librustc_mir/borrow_check/nll/type_check/free_region_relations.rs b/src/librustc_mir/borrow_check/nll/type_check/free_region_relations.rs index 7ddfd55dbbb94..3b663ef6dad61 100644 --- a/src/librustc_mir/borrow_check/nll/type_check/free_region_relations.rs +++ b/src/librustc_mir/borrow_check/nll/type_check/free_region_relations.rs @@ -105,44 +105,89 @@ impl UniversalRegionRelations<'tcx> { /// Finds an "upper bound" for `fr` that is not local. In other /// words, returns the smallest (*) known region `fr1` that (a) - /// outlives `fr` and (b) is not local. This cannot fail, because - /// we will always find `'static` at worst. + /// outlives `fr` and (b) is not local. /// - /// (*) If there are multiple competing choices, we pick the "postdominating" - /// one. See `TransitiveRelation::postdom_upper_bound` for details. - crate fn non_local_upper_bound(&self, fr: RegionVid) -> RegionVid { + /// (*) If there are multiple competing choices, we return all of them. + crate fn non_local_upper_bounds(&'a self, fr: &'a RegionVid) -> Vec<&'a RegionVid> { debug!("non_local_upper_bound(fr={:?})", fr); - self.non_local_bound(&self.inverse_outlives, fr) + let res = self.non_local_bounds(&self.inverse_outlives, fr); + assert!(!res.is_empty(), "can't find an upper bound!?"); + res + } + + /// Returns the "postdominating" bound of the set of + /// `non_local_upper_bounds` for the given region. + crate fn non_local_upper_bound(&self, fr: RegionVid) -> RegionVid { + let upper_bounds = self.non_local_upper_bounds(&fr); + + // In case we find more than one, reduce to one for + // convenience. This is to prevent us from generating more + // complex constraints, but it will cause spurious errors. + let post_dom = self + .inverse_outlives + .mutual_immediate_postdominator(upper_bounds); + + debug!("non_local_bound: post_dom={:?}", post_dom); + + post_dom + .and_then(|&post_dom| { + // If the mutual immediate postdom is not local, then + // there is no non-local result we can return. + if !self.universal_regions.is_local_free_region(post_dom) { + Some(post_dom) + } else { + None + } + }) .unwrap_or(self.universal_regions.fr_static) } + /// Finds a "lower bound" for `fr` that is not local. In other /// words, returns the largest (*) known region `fr1` that (a) is - /// outlived by `fr` and (b) is not local. This cannot fail, - /// because we will always find `'static` at worst. + /// outlived by `fr` and (b) is not local. /// /// (*) If there are multiple competing choices, we pick the "postdominating" /// one. See `TransitiveRelation::postdom_upper_bound` for details. crate fn non_local_lower_bound(&self, fr: RegionVid) -> Option { debug!("non_local_lower_bound(fr={:?})", fr); - self.non_local_bound(&self.outlives, fr) + let lower_bounds = self.non_local_bounds(&self.outlives, &fr); + + // In case we find more than one, reduce to one for + // convenience. This is to prevent us from generating more + // complex constraints, but it will cause spurious errors. + let post_dom = self + .outlives + .mutual_immediate_postdominator(lower_bounds); + + debug!("non_local_bound: post_dom={:?}", post_dom); + + post_dom + .and_then(|&post_dom| { + // If the mutual immediate postdom is not local, then + // there is no non-local result we can return. + if !self.universal_regions.is_local_free_region(post_dom) { + Some(post_dom) + } else { + None + } + }) } - /// Helper for `non_local_upper_bound` and - /// `non_local_lower_bound`. Repeatedly invokes `postdom_parent` - /// until we find something that is not local. Returns `None` if we - /// never do so. - fn non_local_bound( + /// Helper for `non_local_upper_bounds` and `non_local_lower_bounds`. + /// Repeatedly invokes `postdom_parent` until we find something that is not + /// local. Returns `None` if we never do so. + fn non_local_bounds<'a>( &self, - relation: &TransitiveRelation, - fr0: RegionVid, - ) -> Option { + relation: &'a TransitiveRelation, + fr0: &'a RegionVid, + ) -> Vec<&'a RegionVid> { // This method assumes that `fr0` is one of the universally // quantified region variables. - assert!(self.universal_regions.is_universal_region(fr0)); + assert!(self.universal_regions.is_universal_region(*fr0)); let mut external_parents = vec![]; - let mut queue = vec![&fr0]; + let mut queue = vec![fr0]; // Keep expanding `fr` into its parents until we reach // non-local regions. @@ -157,24 +202,7 @@ impl UniversalRegionRelations<'tcx> { debug!("non_local_bound: external_parents={:?}", external_parents); - // In case we find more than one, reduce to one for - // convenience. This is to prevent us from generating more - // complex constraints, but it will cause spurious errors. - let post_dom = relation - .mutual_immediate_postdominator(external_parents) - .cloned(); - - debug!("non_local_bound: post_dom={:?}", post_dom); - - post_dom.and_then(|post_dom| { - // If the mutual immediate postdom is not local, then - // there is no non-local result we can return. - if !self.universal_regions.is_local_free_region(post_dom) { - Some(post_dom) - } else { - None - } - }) + external_parents } /// Returns `true` if fr1 is known to outlive fr2. diff --git a/src/librustc_mir/build/matches/mod.rs b/src/librustc_mir/build/matches/mod.rs index cb71ff27ceb53..28d2a397f3ed6 100644 --- a/src/librustc_mir/build/matches/mod.rs +++ b/src/librustc_mir/build/matches/mod.rs @@ -7,7 +7,7 @@ use crate::build::scope::{CachedBlock, DropKind}; use crate::build::ForGuard::{self, OutsideGuard, RefWithinGuard, ValWithinGuard}; use crate::build::{BlockAnd, BlockAndExtension, Builder}; use crate::build::{GuardFrame, GuardFrameLocal, LocalsForNode}; -use crate::hair::*; +use crate::hair::{self, *}; use rustc::mir::*; use rustc::ty::{self, CanonicalUserTypeAnnotation, Ty}; use rustc::ty::layout::VariantIdx; @@ -283,9 +283,11 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { }, .. }, - user_ty: pat_ascription_ty, - variance: _, - user_ty_span, + ascription: hair::pattern::Ascription { + user_ty: pat_ascription_ty, + variance: _, + user_ty_span, + }, } => { let place = self.storage_live_binding(block, var, irrefutable_pat.span, OutsideGuard); @@ -560,9 +562,11 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } PatternKind::AscribeUserType { ref subpattern, - ref user_ty, - user_ty_span, - variance: _, + ascription: hair::pattern::Ascription { + ref user_ty, + user_ty_span, + variance: _, + }, } => { // This corresponds to something like // diff --git a/src/librustc_mir/build/matches/simplify.rs b/src/librustc_mir/build/matches/simplify.rs index 6be9ccb27036e..b8e38e40b6347 100644 --- a/src/librustc_mir/build/matches/simplify.rs +++ b/src/librustc_mir/build/matches/simplify.rs @@ -14,7 +14,7 @@ use crate::build::Builder; use crate::build::matches::{Ascription, Binding, MatchPair, Candidate}; -use crate::hair::*; +use crate::hair::{self, *}; use rustc::ty; use rustc::ty::layout::{Integer, IntegerExt, Size}; use syntax::attr::{SignedInt, UnsignedInt}; @@ -58,9 +58,11 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { match *match_pair.pattern.kind { PatternKind::AscribeUserType { ref subpattern, - variance, - ref user_ty, - user_ty_span + ascription: hair::pattern::Ascription { + variance, + ref user_ty, + user_ty_span, + }, } => { // Apply the type ascription to the value at `match_pair.place`, which is the // value being matched, taking the variance field into account. diff --git a/src/librustc_mir/hair/cx/block.rs b/src/librustc_mir/hair/cx/block.rs index 38c3e68a441f4..4a64ddb73fc2e 100644 --- a/src/librustc_mir/hair/cx/block.rs +++ b/src/librustc_mir/hair/cx/block.rs @@ -1,4 +1,4 @@ -use crate::hair::*; +use crate::hair::{self, *}; use crate::hair::cx::Cx; use crate::hair::cx::to_ref::ToRef; use rustc::middle::region; @@ -83,10 +83,12 @@ fn mirror_stmts<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, ty: pattern.ty, span: pattern.span, kind: Box::new(PatternKind::AscribeUserType { - user_ty: PatternTypeProjection::from_user_type(user_ty), - user_ty_span: ty.span, + ascription: hair::pattern::Ascription { + user_ty: PatternTypeProjection::from_user_type(user_ty), + user_ty_span: ty.span, + variance: ty::Variance::Covariant, + }, subpattern: pattern, - variance: ty::Variance::Covariant, }) }; } diff --git a/src/librustc_mir/hair/pattern/_match.rs b/src/librustc_mir/hair/pattern/_match.rs index c52a57e17c60b..1c7e1aa4d71e0 100644 --- a/src/librustc_mir/hair/pattern/_match.rs +++ b/src/librustc_mir/hair/pattern/_match.rs @@ -871,18 +871,24 @@ impl<'tcx> IntRange<'tcx> { } fn from_pat(tcx: TyCtxt<'_, 'tcx, 'tcx>, - pat: &Pattern<'tcx>) + mut pat: &Pattern<'tcx>) -> Option> { - Self::from_ctor(tcx, &match pat.kind { - box PatternKind::Constant { value } => ConstantValue(value), - box PatternKind::Range(PatternRange { lo, hi, ty, end }) => ConstantRange( - lo.to_bits(tcx, ty::ParamEnv::empty().and(ty)).unwrap(), - hi.to_bits(tcx, ty::ParamEnv::empty().and(ty)).unwrap(), - ty, - end, - ), - _ => return None, - }) + let range = loop { + match pat.kind { + box PatternKind::Constant { value } => break ConstantValue(value), + box PatternKind::Range(PatternRange { lo, hi, ty, end }) => break ConstantRange( + lo.to_bits(tcx, ty::ParamEnv::empty().and(ty)).unwrap(), + hi.to_bits(tcx, ty::ParamEnv::empty().and(ty)).unwrap(), + ty, + end, + ), + box PatternKind::AscribeUserType { ref subpattern, .. } => { + pat = subpattern; + }, + _ => return None, + } + }; + Self::from_ctor(tcx, &range) } // The return value of `signed_bias` should be XORed with an endpoint to encode/decode it. diff --git a/src/librustc_mir/hair/pattern/mod.rs b/src/librustc_mir/hair/pattern/mod.rs index fc3af3295cc11..2012086ea6649 100644 --- a/src/librustc_mir/hair/pattern/mod.rs +++ b/src/librustc_mir/hair/pattern/mod.rs @@ -58,7 +58,7 @@ pub struct Pattern<'tcx> { } -#[derive(Clone, Debug)] +#[derive(Copy, Clone, Debug, PartialEq)] pub struct PatternTypeProjection<'tcx> { pub user_ty: CanonicalUserType<'tcx>, } @@ -87,33 +87,38 @@ impl<'tcx> PatternTypeProjection<'tcx> { } } +#[derive(Copy, Clone, Debug, PartialEq)] +pub struct Ascription<'tcx> { + pub user_ty: PatternTypeProjection<'tcx>, + /// Variance to use when relating the type `user_ty` to the **type of the value being + /// matched**. Typically, this is `Variance::Covariant`, since the value being matched must + /// have a type that is some subtype of the ascribed type. + /// + /// Note that this variance does not apply for any bindings within subpatterns. The type + /// assigned to those bindings must be exactly equal to the `user_ty` given here. + /// + /// The only place where this field is not `Covariant` is when matching constants, where + /// we currently use `Contravariant` -- this is because the constant type just needs to + /// be "comparable" to the type of the input value. So, for example: + /// + /// ```text + /// match x { "foo" => .. } + /// ``` + /// + /// requires that `&'static str <: T_x`, where `T_x` is the type of `x`. Really, we should + /// probably be checking for a `PartialEq` impl instead, but this preserves the behavior + /// of the old type-check for now. See #57280 for details. + pub variance: ty::Variance, + pub user_ty_span: Span, +} + #[derive(Clone, Debug)] pub enum PatternKind<'tcx> { Wild, AscribeUserType { - user_ty: PatternTypeProjection<'tcx>, + ascription: Ascription<'tcx>, subpattern: Pattern<'tcx>, - /// Variance to use when relating the type `user_ty` to the **type of the value being - /// matched**. Typically, this is `Variance::Covariant`, since the value being matched must - /// have a type that is some subtype of the ascribed type. - /// - /// Note that this variance does not apply for any bindings within subpatterns. The type - /// assigned to those bindings must be exactly equal to the `user_ty` given here. - /// - /// The only place where this field is not `Covariant` is when matching constants, where - /// we currently use `Contravariant` -- this is because the constant type just needs to - /// be "comparable" to the type of the input value. So, for example: - /// - /// ```text - /// match x { "foo" => .. } - /// ``` - /// - /// requires that `&'static str <: T_x`, where `T_x` is the type of `x`. Really, we should - /// probably be checking for a `PartialEq` impl instead, but this preserves the behavior - /// of the old type-check for now. See #57280 for details. - variance: ty::Variance, - user_ty_span: Span, }, /// `x`, `ref x`, `x @ P`, etc. @@ -169,18 +174,7 @@ pub enum PatternKind<'tcx> { }, } -impl<'tcx> PatternKind<'tcx> { - /// If this is a `PatternKind::AscribeUserType` then return the subpattern kind, otherwise - /// return this pattern kind. - fn with_user_type_ascription_subpattern(self) -> Self { - match self { - PatternKind::AscribeUserType { subpattern: Pattern { box kind, .. }, .. } => kind, - kind => kind, - } - } -} - -#[derive(Clone, Copy, Debug, PartialEq)] +#[derive(Copy, Clone, Debug, PartialEq)] pub struct PatternRange<'tcx> { pub lo: ty::Const<'tcx>, pub hi: ty::Const<'tcx>, @@ -407,6 +401,19 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { ) } + fn lower_range_expr( + &mut self, + expr: &'tcx hir::Expr, + ) -> (PatternKind<'tcx>, Option>) { + match self.lower_lit(expr) { + PatternKind::AscribeUserType { + ascription: lo_ascription, + subpattern: Pattern { kind: box kind, .. }, + } => (kind, Some(lo_ascription)), + kind => (kind, None), + } + } + fn lower_pattern_unadjusted(&mut self, pat: &'tcx hir::Pat) -> Pattern<'tcx> { let mut ty = self.tables.node_type(pat.hir_id); @@ -416,14 +423,10 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { PatKind::Lit(ref value) => self.lower_lit(value), PatKind::Range(ref lo_expr, ref hi_expr, end) => { - match ( - // Look for `PatternKind::Constant` patterns inside of any - // `PatternKind::AscribeUserType` patterns. Type ascriptions can be safely - // ignored for the purposes of lowering a range correctly - these are checked - // elsewhere for well-formedness. - self.lower_lit(lo_expr).with_user_type_ascription_subpattern(), - self.lower_lit(hi_expr).with_user_type_ascription_subpattern(), - ) { + let (lo, lo_ascription) = self.lower_range_expr(lo_expr); + let (hi, hi_ascription) = self.lower_range_expr(hi_expr); + + let mut kind = match (lo, hi) { (PatternKind::Constant { value: lo }, PatternKind::Constant { value: hi }) => { use std::cmp::Ordering; let cmp = compare_const_vals( @@ -472,17 +475,33 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { PatternKind::Wild } } - } + }, ref pats => { self.tcx.sess.delay_span_bug( pat.span, - &format!("found bad range pattern `{:?}` outside of error recovery", - pats), + &format!( + "found bad range pattern `{:?}` outside of error recovery", + pats, + ), ); PatternKind::Wild + }, + }; + + // If we are handling a range with associated constants (e.g. + // `Foo::<'a>::A..=Foo::B`), we need to put the ascriptions for the associated + // constants somewhere. Have them on the range pattern. + for ascription in &[lo_ascription, hi_ascription] { + if let Some(ascription) = ascription { + kind = PatternKind::AscribeUserType { + ascription: *ascription, + subpattern: Pattern { span: pat.span, ty, kind: Box::new(kind), }, + }; } } + + kind } PatKind::Path(ref qpath) => { @@ -758,9 +777,11 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { ty, kind: Box::new(kind), }, - user_ty: PatternTypeProjection::from_user_type(user_ty), - user_ty_span: span, - variance: ty::Variance::Covariant, + ascription: Ascription { + user_ty: PatternTypeProjection::from_user_type(user_ty), + user_ty_span: span, + variance: ty::Variance::Covariant, + }, }; } @@ -810,11 +831,13 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { kind: Box::new( PatternKind::AscribeUserType { subpattern: pattern, - /// Note that use `Contravariant` here. See the - /// `variance` field documentation for details. - variance: ty::Variance::Contravariant, - user_ty, - user_ty_span: span, + ascription: Ascription { + /// Note that use `Contravariant` here. See the + /// `variance` field documentation for details. + variance: ty::Variance::Contravariant, + user_ty, + user_ty_span: span, + }, } ), ty: value.ty, @@ -1107,14 +1130,18 @@ impl<'tcx> PatternFoldable<'tcx> for PatternKind<'tcx> { PatternKind::Wild => PatternKind::Wild, PatternKind::AscribeUserType { ref subpattern, - variance, - ref user_ty, - user_ty_span, + ascription: Ascription { + variance, + ref user_ty, + user_ty_span, + }, } => PatternKind::AscribeUserType { subpattern: subpattern.fold_with(folder), - user_ty: user_ty.fold_with(folder), - variance, - user_ty_span, + ascription: Ascription { + user_ty: user_ty.fold_with(folder), + variance, + user_ty_span, + }, }, PatternKind::Binding { mutability, diff --git a/src/rustllvm/PassWrapper.cpp b/src/rustllvm/PassWrapper.cpp index 18d277be21a16..25595e14982ae 100644 --- a/src/rustllvm/PassWrapper.cpp +++ b/src/rustllvm/PassWrapper.cpp @@ -1092,10 +1092,10 @@ LLVMRustThinLTOBufferLen(const LLVMRustThinLTOBuffer *Buffer) { // processing. We'll call this once per module optimized through ThinLTO, and // it'll be called concurrently on many threads. extern "C" LLVMModuleRef -LLVMRustParseBitcodeForThinLTO(LLVMContextRef Context, - const char *data, - size_t len, - const char *identifier) { +LLVMRustParseBitcodeForLTO(LLVMContextRef Context, + const char *data, + size_t len, + const char *identifier) { StringRef Data(data, len); MemoryBufferRef Buffer(Data, identifier); unwrap(Context)->enableDebugTypeODRUniquing(); diff --git a/src/test/incremental/lto.rs b/src/test/incremental/lto.rs new file mode 100644 index 0000000000000..2a3e3c2467cdc --- /dev/null +++ b/src/test/incremental/lto.rs @@ -0,0 +1,40 @@ +// no-prefer-dynamic +// revisions:rpass1 rpass2 +// compile-flags: -C lto + +mod x { + pub struct X { + x: u32, y: u32, + } + + #[cfg(rpass1)] + fn make() -> X { + X { x: 22, y: 0 } + } + + #[cfg(rpass2)] + fn make() -> X { + X { x: 11, y: 11 } + } + + pub fn new() -> X { + make() + } + + pub fn sum(x: &X) -> u32 { + x.x + x.y + } +} + +mod y { + use x; + + pub fn assert_sum() -> bool { + let x = x::new(); + x::sum(&x) == 22 + } +} + +pub fn main() { + y::assert_sum(); +} diff --git a/src/test/ui/lint/lint-group-nonstandard-style.rs b/src/test/ui/lint/lint-group-nonstandard-style.rs index 0386daaa59b07..bd7f327bc0f63 100644 --- a/src/test/ui/lint/lint-group-nonstandard-style.rs +++ b/src/test/ui/lint/lint-group-nonstandard-style.rs @@ -19,7 +19,7 @@ mod test { fn CamelCase() {} //~ WARN should have a snake - struct snake_case; //~ WARN should have a camel + struct snake_case; //~ WARN should have an upper camel } } diff --git a/src/test/ui/lint/lint-group-nonstandard-style.stderr b/src/test/ui/lint/lint-group-nonstandard-style.stderr index f3c7d70054b77..ab36cda57ec80 100644 --- a/src/test/ui/lint/lint-group-nonstandard-style.stderr +++ b/src/test/ui/lint/lint-group-nonstandard-style.stderr @@ -1,8 +1,8 @@ -warning: type `snake_case` should have a camel case name +warning: type `snake_case` should have an upper camel case name --> $DIR/lint-group-nonstandard-style.rs:22:16 | -LL | struct snake_case; //~ WARN should have a camel - | ^^^^^^^^^^ help: convert the identifier to camel case: `SnakeCase` +LL | struct snake_case; //~ WARN should have an upper camel + | ^^^^^^^^^^ help: convert the identifier to upper camel case: `SnakeCase` | note: lint level defined here --> $DIR/lint-group-nonstandard-style.rs:18:17 diff --git a/src/test/ui/lint/lint-non-camel-case-types.rs b/src/test/ui/lint/lint-non-camel-case-types.rs index bca1992605b77..d3b119a944109 100644 --- a/src/test/ui/lint/lint-non-camel-case-types.rs +++ b/src/test/ui/lint/lint-non-camel-case-types.rs @@ -2,43 +2,35 @@ #![allow(dead_code)] struct ONE_TWO_THREE; -//~^ ERROR type `ONE_TWO_THREE` should have a camel case name +//~^ ERROR type `ONE_TWO_THREE` should have an upper camel case name -struct foo { //~ ERROR type `foo` should have a camel case name +struct foo { //~ ERROR type `foo` should have an upper camel case name bar: isize, } -enum foo2 { //~ ERROR type `foo2` should have a camel case name +enum foo2 { //~ ERROR type `foo2` should have an upper camel case name Bar } -struct foo3 { //~ ERROR type `foo3` should have a camel case name +struct foo3 { //~ ERROR type `foo3` should have an upper camel case name bar: isize } -type foo4 = isize; //~ ERROR type `foo4` should have a camel case name +type foo4 = isize; //~ ERROR type `foo4` should have an upper camel case name enum Foo5 { - bar //~ ERROR variant `bar` should have a camel case name + bar //~ ERROR variant `bar` should have an upper camel case name } -trait foo6 { //~ ERROR trait `foo6` should have a camel case name +trait foo6 { //~ ERROR trait `foo6` should have an upper camel case name fn dummy(&self) { } } -fn f(_: ty) {} //~ ERROR type parameter `ty` should have a camel case name +fn f(_: ty) {} //~ ERROR type parameter `ty` should have an upper camel case name #[repr(C)] struct foo7 { bar: isize, } -struct X86_64; - -struct X86__64; //~ ERROR type `X86__64` should have a camel case name - -struct Abc_123; //~ ERROR type `Abc_123` should have a camel case name - -struct A1_b2_c3; //~ ERROR type `A1_b2_c3` should have a camel case name - fn main() { } diff --git a/src/test/ui/lint/lint-non-camel-case-types.stderr b/src/test/ui/lint/lint-non-camel-case-types.stderr index 74f9a5993b859..7afacf64d8778 100644 --- a/src/test/ui/lint/lint-non-camel-case-types.stderr +++ b/src/test/ui/lint/lint-non-camel-case-types.stderr @@ -1,8 +1,8 @@ -error: type `ONE_TWO_THREE` should have a camel case name +error: type `ONE_TWO_THREE` should have an upper camel case name --> $DIR/lint-non-camel-case-types.rs:4:8 | LL | struct ONE_TWO_THREE; - | ^^^^^^^^^^^^^ help: convert the identifier to camel case: `OneTwoThree` + | ^^^^^^^^^^^^^ help: convert the identifier to upper camel case: `OneTwoThree` | note: lint level defined here --> $DIR/lint-non-camel-case-types.rs:1:11 @@ -10,65 +10,47 @@ note: lint level defined here LL | #![forbid(non_camel_case_types)] | ^^^^^^^^^^^^^^^^^^^^ -error: type `foo` should have a camel case name +error: type `foo` should have an upper camel case name --> $DIR/lint-non-camel-case-types.rs:7:8 | -LL | struct foo { //~ ERROR type `foo` should have a camel case name - | ^^^ help: convert the identifier to camel case: `Foo` +LL | struct foo { //~ ERROR type `foo` should have an upper camel case name + | ^^^ help: convert the identifier to upper camel case: `Foo` -error: type `foo2` should have a camel case name +error: type `foo2` should have an upper camel case name --> $DIR/lint-non-camel-case-types.rs:11:6 | -LL | enum foo2 { //~ ERROR type `foo2` should have a camel case name - | ^^^^ help: convert the identifier to camel case: `Foo2` +LL | enum foo2 { //~ ERROR type `foo2` should have an upper camel case name + | ^^^^ help: convert the identifier to upper camel case: `Foo2` -error: type `foo3` should have a camel case name +error: type `foo3` should have an upper camel case name --> $DIR/lint-non-camel-case-types.rs:15:8 | -LL | struct foo3 { //~ ERROR type `foo3` should have a camel case name - | ^^^^ help: convert the identifier to camel case: `Foo3` +LL | struct foo3 { //~ ERROR type `foo3` should have an upper camel case name + | ^^^^ help: convert the identifier to upper camel case: `Foo3` -error: type `foo4` should have a camel case name +error: type `foo4` should have an upper camel case name --> $DIR/lint-non-camel-case-types.rs:19:6 | -LL | type foo4 = isize; //~ ERROR type `foo4` should have a camel case name - | ^^^^ help: convert the identifier to camel case: `Foo4` +LL | type foo4 = isize; //~ ERROR type `foo4` should have an upper camel case name + | ^^^^ help: convert the identifier to upper camel case: `Foo4` -error: variant `bar` should have a camel case name +error: variant `bar` should have an upper camel case name --> $DIR/lint-non-camel-case-types.rs:22:5 | -LL | bar //~ ERROR variant `bar` should have a camel case name - | ^^^ help: convert the identifier to camel case: `Bar` +LL | bar //~ ERROR variant `bar` should have an upper camel case name + | ^^^ help: convert the identifier to upper camel case: `Bar` -error: trait `foo6` should have a camel case name +error: trait `foo6` should have an upper camel case name --> $DIR/lint-non-camel-case-types.rs:25:7 | -LL | trait foo6 { //~ ERROR trait `foo6` should have a camel case name - | ^^^^ help: convert the identifier to camel case: `Foo6` +LL | trait foo6 { //~ ERROR trait `foo6` should have an upper camel case name + | ^^^^ help: convert the identifier to upper camel case: `Foo6` -error: type parameter `ty` should have a camel case name +error: type parameter `ty` should have an upper camel case name --> $DIR/lint-non-camel-case-types.rs:29:6 | -LL | fn f(_: ty) {} //~ ERROR type parameter `ty` should have a camel case name - | ^^ help: convert the identifier to camel case: `Ty` +LL | fn f(_: ty) {} //~ ERROR type parameter `ty` should have an upper camel case name + | ^^ help: convert the identifier to upper camel case: `Ty` -error: type `X86__64` should have a camel case name - --> $DIR/lint-non-camel-case-types.rs:38:8 - | -LL | struct X86__64; //~ ERROR type `X86__64` should have a camel case name - | ^^^^^^^ help: convert the identifier to camel case: `X86_64` - -error: type `Abc_123` should have a camel case name - --> $DIR/lint-non-camel-case-types.rs:40:8 - | -LL | struct Abc_123; //~ ERROR type `Abc_123` should have a camel case name - | ^^^^^^^ help: convert the identifier to camel case: `Abc123` - -error: type `A1_b2_c3` should have a camel case name - --> $DIR/lint-non-camel-case-types.rs:42:8 - | -LL | struct A1_b2_c3; //~ ERROR type `A1_b2_c3` should have a camel case name - | ^^^^^^^^ help: convert the identifier to camel case: `A1B2C3` - -error: aborting due to 11 previous errors +error: aborting due to 8 previous errors diff --git a/src/test/ui/nll/closure-requirements/issue-58127-mutliple-requirements.rs b/src/test/ui/nll/closure-requirements/issue-58127-mutliple-requirements.rs new file mode 100644 index 0000000000000..71d5d4053ee25 --- /dev/null +++ b/src/test/ui/nll/closure-requirements/issue-58127-mutliple-requirements.rs @@ -0,0 +1,40 @@ +// revisions: migrate nll +//[migrate]compile-flags: -Z borrowck=migrate +#![cfg_attr(nll, feature(nll))] + +// compile-pass + +// Test that we propagate region relations from closures precisely when there is +// more than one non-local lower bound. + +// In this case the closure has signature +// |x: &'4 mut (&'5 (&'1 str, &'2 str), &'3 str)| -> .. +// We end up with a `'3: '5` constraint that we can propagate as +// `'3: '1`, `'3: '2`, but previously we approximated it as `'3: 'static`. + +// As an optimization, we primarily propagate bounds for the "representative" +// of each SCC. As such we have these two similar cases where hopefully one +// of them will test the case we want (case2, when this test was added). +mod case1 { + fn f(s: &str) { + g(s, |x| h(x)); + } + + fn g(_: T, _: F) + where F: Fn(&mut (&(T, T), T)) {} + + fn h(_: &mut (&(T, T), T)) {} +} + +mod case2 { + fn f(s: &str) { + g(s, |x| h(x)); + } + + fn g(_: T, _: F) + where F: Fn(&mut (T, &(T, T))) {} + + fn h(_: &mut (T, &(T, T))) {} +} + +fn main() {} diff --git a/src/test/ui/nll/issue-58299.rs b/src/test/ui/nll/issue-58299.rs new file mode 100644 index 0000000000000..9267cac5dd3d7 --- /dev/null +++ b/src/test/ui/nll/issue-58299.rs @@ -0,0 +1,30 @@ +#![allow(dead_code)] +#![feature(nll)] + +struct A<'a>(&'a ()); + +trait Y { + const X: i32; +} + +impl Y for A<'static> { + const X: i32 = 10; +} + +fn foo<'a>(x: i32) { + match x { + // This uses as Y>::X, but `A<'a>` does not implement `Y`. + A::<'a>::X..=A::<'static>::X => (), //~ ERROR lifetime may not live long enough + _ => (), + } +} + +fn bar<'a>(x: i32) { + match x { + // This uses as Y>::X, but `A<'a>` does not implement `Y`. + A::<'static>::X..=A::<'a>::X => (), //~ ERROR lifetime may not live long enough + _ => (), + } +} + +fn main() {} diff --git a/src/test/ui/nll/issue-58299.stderr b/src/test/ui/nll/issue-58299.stderr new file mode 100644 index 0000000000000..b87d0de51a37e --- /dev/null +++ b/src/test/ui/nll/issue-58299.stderr @@ -0,0 +1,20 @@ +error: lifetime may not live long enough + --> $DIR/issue-58299.rs:17:9 + | +LL | fn foo<'a>(x: i32) { + | -- lifetime `'a` defined here +... +LL | A::<'a>::X..=A::<'static>::X => (), //~ ERROR lifetime may not live long enough + | ^^^^^^^^^^ requires that `'a` must outlive `'static` + +error: lifetime may not live long enough + --> $DIR/issue-58299.rs:25:27 + | +LL | fn bar<'a>(x: i32) { + | -- lifetime `'a` defined here +... +LL | A::<'static>::X..=A::<'a>::X => (), //~ ERROR lifetime may not live long enough + | ^^^^^^^^^^ requires that `'a` must outlive `'static` + +error: aborting due to 2 previous errors + diff --git a/src/test/ui/utf8_idents.rs b/src/test/ui/utf8_idents.rs index e601c6e455544..f59d5502aae30 100644 --- a/src/test/ui/utf8_idents.rs +++ b/src/test/ui/utf8_idents.rs @@ -1,9 +1,7 @@ -// - fn foo< 'β, //~ ERROR non-ascii idents are not fully supported γ //~ ERROR non-ascii idents are not fully supported - //~^ WARN type parameter `γ` should have a camel case name + //~^ WARN type parameter `γ` should have an upper camel case name >() {} struct X { diff --git a/src/test/ui/utf8_idents.stderr b/src/test/ui/utf8_idents.stderr index 268dd99d06031..52fb607af5b25 100644 --- a/src/test/ui/utf8_idents.stderr +++ b/src/test/ui/utf8_idents.stderr @@ -1,5 +1,5 @@ error[E0658]: non-ascii idents are not fully supported. (see issue #55467) - --> $DIR/utf8_idents.rs:4:5 + --> $DIR/utf8_idents.rs:2:5 | LL | 'β, //~ ERROR non-ascii idents are not fully supported | ^^ @@ -7,7 +7,7 @@ LL | 'β, //~ ERROR non-ascii idents are not fully supported = help: add #![feature(non_ascii_idents)] to the crate attributes to enable error[E0658]: non-ascii idents are not fully supported. (see issue #55467) - --> $DIR/utf8_idents.rs:5:5 + --> $DIR/utf8_idents.rs:3:5 | LL | γ //~ ERROR non-ascii idents are not fully supported | ^ @@ -15,7 +15,7 @@ LL | γ //~ ERROR non-ascii idents are not fully supported = help: add #![feature(non_ascii_idents)] to the crate attributes to enable error[E0658]: non-ascii idents are not fully supported. (see issue #55467) - --> $DIR/utf8_idents.rs:10:5 + --> $DIR/utf8_idents.rs:8:5 | LL | δ: usize //~ ERROR non-ascii idents are not fully supported | ^ @@ -23,18 +23,18 @@ LL | δ: usize //~ ERROR non-ascii idents are not fully supported = help: add #![feature(non_ascii_idents)] to the crate attributes to enable error[E0658]: non-ascii idents are not fully supported. (see issue #55467) - --> $DIR/utf8_idents.rs:14:9 + --> $DIR/utf8_idents.rs:12:9 | LL | let α = 0.00001f64; //~ ERROR non-ascii idents are not fully supported | ^ | = help: add #![feature(non_ascii_idents)] to the crate attributes to enable -warning: type parameter `γ` should have a camel case name - --> $DIR/utf8_idents.rs:5:5 +warning: type parameter `γ` should have an upper camel case name + --> $DIR/utf8_idents.rs:3:5 | LL | γ //~ ERROR non-ascii idents are not fully supported - | ^ help: convert the identifier to camel case: `Γ` + | ^ help: convert the identifier to upper camel case: `Γ` | = note: #[warn(non_camel_case_types)] on by default diff --git a/src/tools/publish_toolstate.py b/src/tools/publish_toolstate.py index ed697fc542535..24d6fd5b19ba9 100755 --- a/src/tools/publish_toolstate.py +++ b/src/tools/publish_toolstate.py @@ -17,7 +17,7 @@ 'miri': '@oli-obk @RalfJung @eddyb', 'clippy-driver': '@Manishearth @llogiq @mcarton @oli-obk', 'rls': '@nrc @Xanewok', - 'rustfmt': '@nrc', + 'rustfmt': '@nrc @topecongiro', 'book': '@carols10cents @steveklabnik', 'nomicon': '@frewsxcv @Gankro', 'reference': '@steveklabnik @Havvy @matthewjasper @alercah',