diff --git a/src/Cargo.lock b/src/Cargo.lock index 6d7fcb71efa..7e969ce9b84 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -293,6 +293,9 @@ dependencies = [ [[package]] name = "core" version = "0.0.0" +dependencies = [ + "rand 0.0.0", +] [[package]] name = "crates-io" @@ -1099,6 +1102,7 @@ dependencies = [ "flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", "fmt_macros 0.0.0", "graphviz 0.0.0", + "jobserver 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_back 0.0.0", @@ -1394,8 +1398,10 @@ dependencies = [ name = "rustc_trans" version = "0.0.0" dependencies = [ + "crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", "flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", + "jobserver 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", diff --git a/src/libcore/Cargo.toml b/src/libcore/Cargo.toml index 5af63aa970f..178df02ccdd 100644 --- a/src/libcore/Cargo.toml +++ b/src/libcore/Cargo.toml @@ -9,6 +9,9 @@ path = "lib.rs" test = false bench = false +[dev-dependencies] +rand = { path = "../librand" } + [[test]] name = "coretests" path = "../libcore/tests/lib.rs" diff --git a/src/librustc/Cargo.toml b/src/librustc/Cargo.toml index 3d59a4eb882..89169548bbb 100644 --- a/src/librustc/Cargo.toml +++ b/src/librustc/Cargo.toml @@ -12,6 +12,7 @@ crate-type = ["dylib"] arena = { path = "../libarena" } fmt_macros = { path = "../libfmt_macros" } graphviz = { path = "../libgraphviz" } +jobserver = "0.1" log = "0.3" owning_ref = "0.3.3" rustc_back = { path = "../librustc_back" } diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index 77a43c5319c..b81c56e5ee8 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -61,6 +61,7 @@ #[macro_use] extern crate syntax; extern crate syntax_pos; #[macro_use] #[no_link] extern crate rustc_bitflags; +extern crate jobserver; extern crate serialize as rustc_serialize; // used by deriving diff --git a/src/librustc/session/mod.rs b/src/librustc/session/mod.rs index 827fa72f034..70c07982f83 100644 --- a/src/librustc/session/mod.rs +++ b/src/librustc/session/mod.rs @@ -38,14 +38,16 @@ use rustc_back::{LinkerFlavor, PanicStrategy}; use rustc_back::target::Target; use rustc_data_structures::flock; +use jobserver::Client; -use std::path::{Path, PathBuf}; use std::cell::{self, Cell, RefCell}; use std::collections::HashMap; use std::env; -use std::io::Write; -use std::rc::Rc; use std::fmt; +use std::io::Write; +use std::path::{Path, PathBuf}; +use std::rc::Rc; +use std::sync::{Once, ONCE_INIT}; use std::time::Duration; mod code_stats; @@ -134,6 +136,10 @@ pub struct Session { pub print_fuel_crate: Option, /// Always set to zero and incremented so that we can print fuel expended by a crate. pub print_fuel: Cell, + + /// Loaded up early on in the initialization of this `Session` to avoid + /// false positives about a job server in our environment. + pub jobserver_from_env: Option, } pub struct PerfStats { @@ -697,6 +703,24 @@ pub fn build_session_(sopts: config::Options, print_fuel_crate: print_fuel_crate, print_fuel: print_fuel, out_of_fuel: Cell::new(false), + + // Note that this is unsafe because it may misinterpret file descriptors + // on Unix as jobserver file descriptors. We hopefully execute this near + // the beginning of the process though to ensure we don't get false + // positives, or in other words we try to execute this before we open + // any file descriptors ourselves. + // + // Also note that we stick this in a global because there could be + // multiple `Session` instances in this process, and the jobserver is + // per-process. + jobserver_from_env: unsafe { + static mut GLOBAL_JOBSERVER: *mut Option = 0 as *mut _; + static INIT: Once = ONCE_INIT; + INIT.call_once(|| { + GLOBAL_JOBSERVER = Box::into_raw(Box::new(Client::from_env())); + }); + (*GLOBAL_JOBSERVER).clone() + }, }; sess diff --git a/src/librustc_trans/Cargo.toml b/src/librustc_trans/Cargo.toml index a36c5613711..86590bff4ff 100644 --- a/src/librustc_trans/Cargo.toml +++ b/src/librustc_trans/Cargo.toml @@ -10,7 +10,9 @@ crate-type = ["dylib"] test = false [dependencies] +crossbeam = "0.2" flate2 = "0.2" +jobserver = "0.1.5" log = "0.3" owning_ref = "0.3.3" rustc = { path = "../librustc" } diff --git a/src/librustc_trans/back/link.rs b/src/librustc_trans/back/link.rs index 7cd1ef77298..1f88f90dbbb 100644 --- a/src/librustc_trans/back/link.rs +++ b/src/librustc_trans/back/link.rs @@ -329,34 +329,38 @@ pub fn filename_for_input(sess: &Session, } pub fn each_linked_rlib(sess: &Session, - f: &mut FnMut(CrateNum, &Path)) { + f: &mut FnMut(CrateNum, &Path)) -> Result<(), String> { let crates = sess.cstore.used_crates(LinkagePreference::RequireStatic).into_iter(); let fmts = sess.dependency_formats.borrow(); let fmts = fmts.get(&config::CrateTypeExecutable) .or_else(|| fmts.get(&config::CrateTypeStaticlib)) .or_else(|| fmts.get(&config::CrateTypeCdylib)) .or_else(|| fmts.get(&config::CrateTypeProcMacro)); - let fmts = fmts.unwrap_or_else(|| { - bug!("could not find formats for rlibs"); - }); + let fmts = match fmts { + Some(f) => f, + None => return Err(format!("could not find formats for rlibs")) + }; for (cnum, path) in crates { - match fmts[cnum.as_usize() - 1] { - Linkage::NotLinked | Linkage::IncludedFromDylib => continue, - _ => {} + match fmts.get(cnum.as_usize() - 1) { + Some(&Linkage::NotLinked) | + Some(&Linkage::IncludedFromDylib) => continue, + Some(_) => {} + None => return Err(format!("could not find formats for rlibs")) } let name = sess.cstore.crate_name(cnum).clone(); let path = match path { LibSource::Some(p) => p, LibSource::MetadataOnly => { - sess.fatal(&format!("could not find rlib for: `{}`, found rmeta (metadata) file", - name)); + return Err(format!("could not find rlib for: `{}`, found rmeta (metadata) file", + name)) } LibSource::None => { - sess.fatal(&format!("could not find rlib for: `{}`", name)); + return Err(format!("could not find rlib for: `{}`", name)) } }; f(cnum, &path); } + Ok(()) } fn out_filename(sess: &Session, @@ -669,7 +673,7 @@ fn link_staticlib(sess: &Session, objects: &[PathBuf], out_filename: &Path, let mut ab = link_rlib(sess, None, objects, out_filename, tempdir); let mut all_native_libs = vec![]; - each_linked_rlib(sess, &mut |cnum, path| { + let res = each_linked_rlib(sess, &mut |cnum, path| { let name = sess.cstore.crate_name(cnum); let native_libs = sess.cstore.native_libraries(cnum); @@ -694,6 +698,9 @@ fn link_staticlib(sess: &Session, objects: &[PathBuf], out_filename: &Path, all_native_libs.extend(sess.cstore.native_libraries(cnum)); }); + if let Err(e) = res { + sess.fatal(&e); + } ab.update_symbols(); ab.build(); diff --git a/src/librustc_trans/back/lto.rs b/src/librustc_trans/back/lto.rs index a5f9a41470d..906815583bf 100644 --- a/src/librustc_trans/back/lto.rs +++ b/src/librustc_trans/back/lto.rs @@ -10,15 +10,16 @@ use back::link; use back::write; -use back::symbol_export::{self, ExportedSymbols}; -use rustc::session::{self, config}; +use back::symbol_export; +use rustc::session::config; +use errors::FatalError; use llvm; use llvm::archive_ro::ArchiveRO; use llvm::{ModuleRef, TargetMachineRef, True, False}; use rustc::util::common::time; use rustc::util::common::path2cstr; use rustc::hir::def_id::LOCAL_CRATE; -use back::write::{ModuleConfig, with_llvm_pmb}; +use back::write::{ModuleConfig, with_llvm_pmb, CodegenContext}; use libc; use flate2::read::ZlibDecoder; @@ -39,30 +40,31 @@ pub fn crate_type_allows_lto(crate_type: config::CrateType) -> bool { } } -pub fn run(sess: &session::Session, +pub fn run(cgcx: &CodegenContext, llmod: ModuleRef, tm: TargetMachineRef, - exported_symbols: &ExportedSymbols, config: &ModuleConfig, - temp_no_opt_bc_filename: &Path) { - if sess.opts.cg.prefer_dynamic { - sess.struct_err("cannot prefer dynamic linking when performing LTO") + temp_no_opt_bc_filename: &Path) -> Result<(), FatalError> { + let handler = cgcx.handler; + if cgcx.opts.cg.prefer_dynamic { + handler.struct_err("cannot prefer dynamic linking when performing LTO") .note("only 'staticlib', 'bin', and 'cdylib' outputs are \ supported with LTO") .emit(); - sess.abort_if_errors(); + return Err(FatalError) } // Make sure we actually can run LTO - for crate_type in sess.crate_types.borrow().iter() { + for crate_type in cgcx.crate_types.iter() { if !crate_type_allows_lto(*crate_type) { - sess.fatal("lto can only be run for executables, cdylibs and \ - static library outputs"); + let e = handler.fatal("lto can only be run for executables, cdylibs and \ + static library outputs"); + return Err(e) } } let export_threshold = - symbol_export::crates_export_threshold(&sess.crate_types.borrow()); + symbol_export::crates_export_threshold(&cgcx.crate_types); let symbol_filter = &|&(ref name, level): &(String, _)| { if symbol_export::is_below_threshold(level, export_threshold) { @@ -74,7 +76,7 @@ pub fn run(sess: &session::Session, } }; - let mut symbol_white_list: Vec = exported_symbols + let mut symbol_white_list: Vec = cgcx.exported_symbols .exported_symbols(LOCAL_CRATE) .iter() .filter_map(symbol_filter) @@ -83,16 +85,11 @@ pub fn run(sess: &session::Session, // For each of our upstream dependencies, find the corresponding rlib and // load the bitcode from the archive. Then merge it into the current LLVM // module that we've got. - link::each_linked_rlib(sess, &mut |cnum, path| { - // `#![no_builtins]` crates don't participate in LTO. - if sess.cstore.is_no_builtins(cnum) { - return; - } - + for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() { symbol_white_list.extend( - exported_symbols.exported_symbols(cnum) - .iter() - .filter_map(symbol_filter)); + cgcx.exported_symbols.exported_symbols(cnum) + .iter() + .filter_map(symbol_filter)); let archive = ArchiveRO::open(&path).expect("wanted an rlib"); let bytecodes = archive.iter().filter_map(|child| { @@ -102,7 +99,7 @@ pub fn run(sess: &session::Session, let bc_encoded = data.data(); let bc_decoded = if is_versioned_bytecode_format(bc_encoded) { - time(sess.time_passes(), &format!("decode {}", name), || { + time(cgcx.time_passes, &format!("decode {}", name), || { // Read the version let version = extract_bytecode_format_version(bc_encoded); @@ -117,17 +114,19 @@ pub fn run(sess: &session::Session, let res = ZlibDecoder::new(compressed_data) .read_to_end(&mut inflated); if res.is_err() { - sess.fatal(&format!("failed to decompress bc of `{}`", - name)) + let msg = format!("failed to decompress bc of `{}`", + name); + Err(handler.fatal(&msg)) + } else { + Ok(inflated) } - inflated } else { - sess.fatal(&format!("Unsupported bytecode format version {}", - version)) + Err(handler.fatal(&format!("Unsupported bytecode format version {}", + version))) } - }) + })? } else { - time(sess.time_passes(), &format!("decode {}", name), || { + time(cgcx.time_passes, &format!("decode {}", name), || { // the object must be in the old, pre-versioning format, so // simply inflate everything and let LLVM decide if it can // make sense of it @@ -135,26 +134,29 @@ pub fn run(sess: &session::Session, let res = ZlibDecoder::new(bc_encoded) .read_to_end(&mut inflated); if res.is_err() { - sess.fatal(&format!("failed to decompress bc of `{}`", - name)) + let msg = format!("failed to decompress bc of `{}`", + name); + Err(handler.fatal(&msg)) + } else { + Ok(inflated) } - inflated - }) + })? }; let ptr = bc_decoded.as_ptr(); debug!("linking {}", name); - time(sess.time_passes(), &format!("ll link {}", name), || unsafe { - if !llvm::LLVMRustLinkInExternalBitcode(llmod, - ptr as *const libc::c_char, - bc_decoded.len() as libc::size_t) { - write::llvm_err(sess.diagnostic(), - format!("failed to load bc of `{}`", - name)); + time(cgcx.time_passes, &format!("ll link {}", name), || unsafe { + if llvm::LLVMRustLinkInExternalBitcode(llmod, + ptr as *const libc::c_char, + bc_decoded.len() as libc::size_t) { + Ok(()) + } else { + let msg = format!("failed to load bc of `{}`", name); + Err(write::llvm_err(handler, msg)) } - }); + })?; } - }); + } // Internalize everything but the exported symbols of the current module let arr: Vec<*const libc::c_char> = symbol_white_list.iter() @@ -167,13 +169,13 @@ pub fn run(sess: &session::Session, arr.len() as libc::size_t); } - if sess.no_landing_pads() { + if cgcx.no_landing_pads { unsafe { llvm::LLVMRustMarkAllFunctionsNounwind(llmod); } } - if sess.opts.cg.save_temps { + if cgcx.opts.cg.save_temps { let cstr = path2cstr(temp_no_opt_bc_filename); unsafe { llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr()); @@ -203,12 +205,13 @@ pub fn run(sess: &session::Session, assert!(!pass.is_null()); llvm::LLVMRustAddPass(pm, pass); - time(sess.time_passes(), "LTO passes", || + time(cgcx.time_passes, "LTO passes", || llvm::LLVMRunPassManager(pm, llmod)); llvm::LLVMDisposePassManager(pm); } debug!("lto done"); + Ok(()) } fn is_versioned_bytecode_format(bc: &[u8]) -> bool { diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 4871d638d12..549cb2567cf 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -9,7 +9,7 @@ // except according to those terms. use back::lto; -use back::link::{get_linker, remove}; +use back::link::{self, get_linker, remove}; use back::symbol_export::ExportedSymbols; use rustc_incremental::{save_trans_partition, in_incr_comp_dir}; use rustc::session::config::{self, OutputFilenames, OutputType, OutputTypes, Passes, SomePasses, @@ -19,21 +19,24 @@ use llvm::{ModuleRef, TargetMachineRef, PassManagerRef, DiagnosticInfoRef, ContextRef}; use llvm::SMDiagnosticRef; use {CrateTranslation, ModuleLlvm, ModuleSource, ModuleTranslation}; +use rustc::hir::def_id::CrateNum; use rustc::util::common::{time, time_depth, set_time_depth, path2cstr}; use rustc::util::fs::link_or_copy; -use errors::{self, Handler, Level, DiagnosticBuilder}; +use errors::{self, Handler, Level, DiagnosticBuilder, FatalError}; use errors::emitter::Emitter; +use syntax::ext::hygiene::Mark; use syntax_pos::MultiSpan; use context::{is_pie_binary, get_reloc_model}; +use jobserver::{Client, Acquired}; +use crossbeam::{scope, Scope}; use std::cmp; use std::ffi::CString; use std::fs; +use std::io; use std::path::{Path, PathBuf}; use std::str; -use std::sync::{Arc, Mutex}; -use std::sync::mpsc::channel; -use std::thread; +use std::sync::mpsc::{channel, Sender}; use libc::{c_uint, c_void}; pub const RELOC_MODEL_ARGS : [(&'static str, llvm::RelocMode); 7] = [ @@ -54,10 +57,10 @@ ("large", llvm::CodeModel::Large), ]; -pub fn llvm_err(handler: &errors::Handler, msg: String) -> ! { +pub fn llvm_err(handler: &errors::Handler, msg: String) -> FatalError { match llvm::last_error() { - Some(err) => panic!(handler.fatal(&format!("{}: {}", msg, err))), - None => panic!(handler.fatal(&msg)), + Some(err) => handler.fatal(&format!("{}: {}", msg, err)), + None => handler.fatal(&msg), } } @@ -67,73 +70,16 @@ pub fn write_output_file( pm: llvm::PassManagerRef, m: ModuleRef, output: &Path, - file_type: llvm::FileType) { + file_type: llvm::FileType) -> Result<(), FatalError> { unsafe { let output_c = path2cstr(output); let result = llvm::LLVMRustWriteOutputFile( target, pm, m, output_c.as_ptr(), file_type); if result.into_result().is_err() { - llvm_err(handler, format!("could not write output to {}", output.display())); - } - } -} - - -struct Diagnostic { - msg: String, - code: Option, - lvl: Level, -} - -// We use an Arc instead of just returning a list of diagnostics from the -// child thread because we need to make sure that the messages are seen even -// if the child thread panics (for example, when `fatal` is called). -#[derive(Clone)] -struct SharedEmitter { - buffer: Arc>>, -} - -impl SharedEmitter { - fn new() -> SharedEmitter { - SharedEmitter { - buffer: Arc::new(Mutex::new(Vec::new())), - } - } - - fn dump(&mut self, handler: &Handler) { - let mut buffer = self.buffer.lock().unwrap(); - for diag in &*buffer { - match diag.code { - Some(ref code) => { - handler.emit_with_code(&MultiSpan::new(), - &diag.msg, - &code, - diag.lvl); - }, - None => { - handler.emit(&MultiSpan::new(), - &diag.msg, - diag.lvl); - }, - } - } - buffer.clear(); - } -} - -impl Emitter for SharedEmitter { - fn emit(&mut self, db: &DiagnosticBuilder) { - self.buffer.lock().unwrap().push(Diagnostic { - msg: db.message(), - code: db.code.clone(), - lvl: db.level, - }); - for child in &db.children { - self.buffer.lock().unwrap().push(Diagnostic { - msg: child.message(), - code: None, - lvl: child.level, - }); + let msg = format!("could not write output to {}", output.display()); + Err(llvm_err(handler, msg)) + } else { + Ok(()) } } } @@ -231,9 +177,9 @@ pub fn create_target_machine(sess: &Session) -> TargetMachineRef { }; if tm.is_null() { - llvm_err(sess.diagnostic(), - format!("Could not create LLVM TargetMachine for triple: {}", - triple).to_string()); + let msg = format!("Could not create LLVM TargetMachine for triple: {}", + triple); + panic!(llvm_err(sess.diagnostic(), msg)); } else { return tm; }; @@ -333,36 +279,28 @@ fn set_flags(&mut self, sess: &Session, trans: &CrateTranslation) { } /// Additional resources used by optimize_and_codegen (not module specific) -struct CodegenContext<'a> { - // Extra resources used for LTO: (sess, reachable). This will be `None` - // when running in a worker thread. - lto_ctxt: Option<(&'a Session, &'a ExportedSymbols)>, +pub struct CodegenContext<'a> { + // Resouces needed when running LTO + pub time_passes: bool, + pub lto: bool, + pub no_landing_pads: bool, + pub exported_symbols: &'a ExportedSymbols, + pub opts: &'a config::Options, + pub crate_types: Vec, + pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>, // Handler to use for diagnostics produced during codegen. - handler: &'a Handler, + pub handler: &'a Handler, // LLVM passes added by plugins. - plugin_passes: Vec, + pub plugin_passes: Vec, // LLVM optimizations for which we want to print remarks. - remark: Passes, + pub remark: Passes, // Worker thread number - worker: usize, + pub worker: usize, // The incremental compilation session directory, or None if we are not // compiling incrementally - incr_comp_session_dir: Option -} - -impl<'a> CodegenContext<'a> { - fn new_with_session(sess: &'a Session, - exported_symbols: &'a ExportedSymbols) - -> CodegenContext<'a> { - CodegenContext { - lto_ctxt: Some((sess, exported_symbols)), - handler: sess.diagnostic(), - plugin_passes: sess.plugin_llvm_passes.borrow().clone(), - remark: sess.opts.cg.remark.clone(), - worker: 0, - incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()) - } - } + pub incr_comp_session_dir: Option, + // Channel back to the main control thread to send messages to + pub tx: Sender, } struct HandlerFreeVars<'a> { @@ -373,22 +311,7 @@ struct HandlerFreeVars<'a> { unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext<'a>, msg: &'b str, cookie: c_uint) { - use syntax::ext::hygiene::Mark; - - match cgcx.lto_ctxt { - Some((sess, _)) => { - match Mark::from_u32(cookie).expn_info() { - Some(ei) => sess.span_err(ei.call_site, msg), - None => sess.err(msg), - }; - } - - None => { - cgcx.handler.struct_err(msg) - .note("build without -C codegen-units for more exact errors") - .emit(); - } - } + drop(cgcx.tx.send(Message::InlineAsmError(cookie as u32, msg.to_string()))); } unsafe extern "C" fn inline_asm_handler(diag: SMDiagnosticRef, @@ -437,7 +360,9 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, mtrans: ModuleTranslation, mllvm: ModuleLlvm, config: ModuleConfig, - output_names: OutputFilenames) { + output_names: OutputFilenames) + -> Result<(), FatalError> +{ let llmod = mllvm.llmod; let llcx = mllvm.llcx; let tm = config.tm; @@ -525,25 +450,21 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, llvm::LLVMDisposePassManager(fpm); llvm::LLVMDisposePassManager(mpm); - match cgcx.lto_ctxt { - Some((sess, exported_symbols)) if sess.lto() => { - time(sess.time_passes(), "all lto passes", || { - let temp_no_opt_bc_filename = - output_names.temp_path_ext("no-opt.lto.bc", module_name); - lto::run(sess, - llmod, - tm, - exported_symbols, - &config, - &temp_no_opt_bc_filename); - }); - if config.emit_lto_bc { - let out = output_names.temp_path_ext("lto.bc", module_name); - let out = path2cstr(&out); - llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr()); - } - }, - _ => {}, + if cgcx.lto { + time(cgcx.time_passes, "all lto passes", || { + let temp_no_opt_bc_filename = + output_names.temp_path_ext("no-opt.lto.bc", module_name); + lto::run(cgcx, + llmod, + tm, + &config, + &temp_no_opt_bc_filename) + })?; + if config.emit_lto_bc { + let out = output_names.temp_path_ext("lto.bc", module_name); + let out = path2cstr(&out); + llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr()); + } } } @@ -555,16 +476,16 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, // pass manager passed to the closure should be ensured to not // escape the closure itself, and the manager should only be // used once. - unsafe fn with_codegen(tm: TargetMachineRef, - llmod: ModuleRef, - no_builtins: bool, - f: F) where - F: FnOnce(PassManagerRef), + unsafe fn with_codegen(tm: TargetMachineRef, + llmod: ModuleRef, + no_builtins: bool, + f: F) -> R + where F: FnOnce(PassManagerRef) -> R, { let cpm = llvm::LLVMCreatePassManager(); llvm::LLVMRustAddAnalysisPasses(tm, cpm, llmod); llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins); - f(cpm); + f(cpm) } // Change what we write and cleanup based on whether obj files are @@ -584,7 +505,8 @@ unsafe fn with_codegen(tm: TargetMachineRef, llvm::LLVMWriteBitcodeToFile(llmod, bc_out_c.as_ptr()); } - time(config.time_passes, &format!("codegen passes [{}]", cgcx.worker), || { + time(config.time_passes, &format!("codegen passes [{}]", cgcx.worker), + || -> Result<(), FatalError> { if config.emit_ir { let out = output_names.temp_path(OutputType::LlvmAssembly, module_name); let out = path2cstr(&out); @@ -607,8 +529,8 @@ unsafe fn with_codegen(tm: TargetMachineRef, }; with_codegen(tm, llmod, config.no_builtins, |cpm| { write_output_file(cgcx.handler, tm, cpm, llmod, &path, - llvm::FileType::AssemblyFile); - }); + llvm::FileType::AssemblyFile) + })?; if config.emit_obj { llvm::LLVMDisposeModule(llmod); } @@ -617,10 +539,12 @@ unsafe fn with_codegen(tm: TargetMachineRef, if write_obj { with_codegen(tm, llmod, config.no_builtins, |cpm| { write_output_file(cgcx.handler, tm, cpm, llmod, &obj_out, - llvm::FileType::ObjectFile); - }); + llvm::FileType::ObjectFile) + })?; } - }); + + Ok(()) + })?; if copy_bc_to_obj { debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out); @@ -637,6 +561,7 @@ unsafe fn with_codegen(tm: TargetMachineRef, } llvm::LLVMRustDisposeTargetMachine(tm); + Ok(()) } @@ -781,19 +706,16 @@ pub fn run_passes(sess: &Session, dump_incremental_data(&trans); } - // Process the work items, optionally using worker threads. - // NOTE: We are hardcoding a limit of worker threads for now. With - // incremental compilation we can run into situations where we would - // open hundreds of threads otherwise -- which can make things slower - // if things don't fit into memory anymore, or can cause the compiler - // to crash because of too many open file handles. See #39280 for - // some discussion on how to improve this in the future. - let num_workers = cmp::min(work_items.len() - 1, 32); - if num_workers <= 1 { - run_work_singlethreaded(sess, &trans.exported_symbols, work_items); - } else { - run_work_multithreaded(sess, work_items, num_workers); - } + let client = sess.jobserver_from_env.clone().unwrap_or_else(|| { + // Pick a "reasonable maximum" if we don't otherwise have a jobserver in + // our environment, capping out at 32 so we don't take everything down + // by hogging the process run queue. + let num_workers = cmp::min(work_items.len() - 1, 32); + Client::new(num_workers).expect("failed to create jobserver") + }); + scope(|scope| { + execute_work(sess, work_items, client, &trans.exported_symbols, scope); + }); // If in incr. comp. mode, preserve the `.o` files for potential re-use for mtrans in trans.modules.iter() { @@ -995,8 +917,9 @@ fn build_work_item(sess: &Session, } } -fn execute_work_item(cgcx: &CodegenContext, - work_item: WorkItem) { +fn execute_work_item(cgcx: &CodegenContext, work_item: WorkItem) + -> Result<(), FatalError> +{ unsafe { match work_item.mtrans.source { ModuleSource::Translated(mllvm) => { @@ -1005,7 +928,7 @@ fn execute_work_item(cgcx: &CodegenContext, work_item.mtrans, mllvm, work_item.config, - work_item.output_names); + work_item.output_names)?; } ModuleSource::Preexisting(wp) => { let incr_comp_session_dir = cgcx.incr_comp_session_dir @@ -1033,94 +956,283 @@ fn execute_work_item(cgcx: &CodegenContext, } } } + + Ok(()) } -fn run_work_singlethreaded(sess: &Session, - exported_symbols: &ExportedSymbols, - work_items: Vec) { - let cgcx = CodegenContext::new_with_session(sess, exported_symbols); +pub enum Message { + Token(io::Result), + Diagnostic(Diagnostic), + Done { success: bool }, + InlineAsmError(u32, String), + AbortIfErrors, +} - // Since we're running single-threaded, we can pass the session to - // the proc, allowing `optimize_and_codegen` to perform LTO. - for work in work_items.into_iter().rev() { - execute_work_item(&cgcx, work); +pub struct Diagnostic { + msg: String, + code: Option, + lvl: Level, +} + +fn execute_work<'a>(sess: &'a Session, + mut work_items: Vec, + jobserver: Client, + exported_symbols: &'a ExportedSymbols, + scope: &Scope<'a>) { + let (tx, rx) = channel(); + let tx2 = tx.clone(); + + // First up, convert our jobserver into a helper thread so we can use normal + // mpsc channels to manage our messages and such. Once we've got the helper + // thread then request `n-1` tokens because all of our work items are ready + // to go. + // + // Note that the `n-1` is here because we ourselves have a token (our + // process) and we'll use that token to execute at least one unit of work. + // + // After we've requested all these tokens then we'll, when we can, get + // tokens on `rx` above which will get managed in the main loop below. + let helper = jobserver.into_helper_thread(move |token| { + drop(tx2.send(Message::Token(token))); + }).expect("failed to spawn helper thread"); + for _ in 0..work_items.len() - 1 { + helper.request_token(); } -} -fn run_work_multithreaded(sess: &Session, - work_items: Vec, - num_workers: usize) { - assert!(num_workers > 0); + // This is the "main loop" of parallel work happening for parallel codegen. + // It's here that we manage parallelism, schedule work, and work with + // messages coming from clients. + // + // Our channel `rx` created above is a channel of messages coming from our + // various worker threads. This includes the jobserver helper thread above + // as well as the work we'll spawn off here. Each turn of this loop starts + // off by trying to spawn as much work as possible. After we've done that we + // then wait for an event and dispatch accordingly once the event is + // received. We're only done once all our work items have been drained and + // nothing is running, at which point we return back up the stack. + // + // ## Parallelism management + // + // It's worth also touching on the management of parallelism here. We don't + // want to just spawn a thread per work item because while that's optimal + // parallelism it may overload a system with too many threads or violate our + // configuration for the maximum amount of cpu to use for this process. To + // manage this we use the `jobserver` crate. + // + // Job servers are an artifact of GNU make and are used to manage + // parallelism between processes. A jobserver is a glorified IPC semaphore + // basically. Whenever we want to run some work we acquire the semaphore, + // and whenever we're done with that work we release the semaphore. In this + // manner we can ensure that the maximum number of parallel workers is + // capped at any one point in time. + // + // The jobserver protocol is a little unique, however. We, as a running + // process, already have an ephemeral token assigned to us. We're not going + // to be doing any productive work in this thread though so we're going to + // give this token to a worker thread (there's no actual token to give, this + // is just conceptually). As a result you'll see a few `+1` and `-1` + // instances below, and it's about working with this ephemeral token. + // + // To acquire tokens we have our `helper` thread above which is just in a + // loop acquiring tokens and sending them to us. We then store all tokens + // locally in a `tokens` vector once they're acquired. Currently we don't + // literally send a token to a worker thread to assist with management of + // our "ephemeral token". + // + // As a result, our "spawn as much work as possible" basically means that we + // fill up the `running` counter up to the limit of the `tokens` list. + // Whenever we get a new token this'll mean a new unit of work is spawned, + // and then whenever a unit of work finishes we relinquish a token, if we + // had one, to maybe get re-acquired later. + // + // Note that there's a race which may mean that we acquire more tokens than + // we originally anticipated. For example let's say we have 2 units of work. + // First we request one token from the helper thread and then we + // immediately spawn one unit of work with our ephemeral token after. We may + // then finish the first piece of work before the token is acquired, but we + // can continue to spawn the second piece of work with our ephemeral token. + // Before that work finishes, however, we may acquire a token. In that case + // we actually wastefully acquired the token, so we relinquish it back to + // the jobserver. + let mut tokens = Vec::new(); + let mut running = 0; + while work_items.len() > 0 || running > 0 { - // Run some workers to process the work items. - let work_items_arc = Arc::new(Mutex::new(work_items)); - let mut diag_emitter = SharedEmitter::new(); - let mut futures = Vec::with_capacity(num_workers); + // Spin up what work we can, only doing this while we've got available + // parallelism slots and work left to spawn. + while work_items.len() > 0 && running < tokens.len() + 1 { + let item = work_items.pop().unwrap(); + let index = work_items.len(); + spawn_work(sess, exported_symbols, scope, tx.clone(), item, index); + running += 1; + } - for i in 0..num_workers { - let work_items_arc = work_items_arc.clone(); - let diag_emitter = diag_emitter.clone(); - let plugin_passes = sess.plugin_llvm_passes.borrow().clone(); - let remark = sess.opts.cg.remark.clone(); + // Relinquish accidentally acquired extra tokens + tokens.truncate(running.saturating_sub(1)); - let (tx, rx) = channel(); - let mut tx = Some(tx); - futures.push(rx); + match rx.recv().unwrap() { + // Save the token locally and the next turn of the loop will use + // this to spawn a new unit of work, or it may get dropped + // immediately if we have no more work to spawn. + Message::Token(token) => { + tokens.push(token.expect("failed to acquire jobserver token")); + } - let incr_comp_session_dir = sess.incr_comp_session_dir_opt().map(|r| r.clone()); + // If a thread exits successfully then we drop a token associated + // with that worker and update our `running` count. We may later + // re-acquire a token to continue running more work. We may also not + // actually drop a token here if the worker was running with an + // "ephemeral token" + // + // Note that if the thread failed that means it panicked, so we + // abort immediately. + Message::Done { success: true } => { + drop(tokens.pop()); + running -= 1; + } + Message::Done { success: false } => { + sess.fatal("aborting due to worker thread panic"); + } - let depth = time_depth(); - thread::Builder::new().name(format!("codegen-{}", i)).spawn(move || { - set_time_depth(depth); - - let diag_handler = Handler::with_emitter(true, false, box diag_emitter); - - // Must construct cgcx inside the proc because it has non-Send - // fields. - let cgcx = CodegenContext { - lto_ctxt: None, - handler: &diag_handler, - plugin_passes: plugin_passes, - remark: remark, - worker: i, - incr_comp_session_dir: incr_comp_session_dir - }; - - loop { - // Avoid holding the lock for the entire duration of the match. - let maybe_work = work_items_arc.lock().unwrap().pop(); - match maybe_work { - Some(work) => { - execute_work_item(&cgcx, work); - - // Make sure to fail the worker so the main thread can - // tell that there were errors. - cgcx.handler.abort_if_errors(); + // Our worker wants us to emit an error message, so get ahold of our + // `sess` and print it out + Message::Diagnostic(diag) => { + let handler = sess.diagnostic(); + match diag.code { + Some(ref code) => { + handler.emit_with_code(&MultiSpan::new(), + &diag.msg, + &code, + diag.lvl); } - None => break, + None => { + handler.emit(&MultiSpan::new(), + &diag.msg, + diag.lvl); + } + } + } + Message::InlineAsmError(cookie, msg) => { + match Mark::from_u32(cookie).expn_info() { + Some(ei) => sess.span_err(ei.call_site, &msg), + None => sess.err(&msg), } } - tx.take().unwrap().send(()).unwrap(); - }).unwrap(); + // Sent to us after a worker sends us a batch of error messages, and + // it's the point at which we check for errors. + Message::AbortIfErrors => sess.diagnostic().abort_if_errors(), + } } - let mut panicked = false; - for rx in futures { - match rx.recv() { - Ok(()) => {}, - Err(_) => { - panicked = true; - }, + // Just in case, check this on the way out. + sess.diagnostic().abort_if_errors(); +} + +struct SharedEmitter { + tx: Sender, +} + +impl Emitter for SharedEmitter { + fn emit(&mut self, db: &DiagnosticBuilder) { + drop(self.tx.send(Message::Diagnostic(Diagnostic { + msg: db.message(), + code: db.code.clone(), + lvl: db.level, + }))); + for child in &db.children { + drop(self.tx.send(Message::Diagnostic(Diagnostic { + msg: child.message(), + code: None, + lvl: child.level, + }))); } - // Display any new diagnostics. - diag_emitter.dump(sess.diagnostic()); - } - if panicked { - sess.fatal("aborting due to worker thread panic"); + drop(self.tx.send(Message::AbortIfErrors)); } } +fn spawn_work<'a>(sess: &'a Session, + exported_symbols: &'a ExportedSymbols, + scope: &Scope<'a>, + tx: Sender, + work: WorkItem, + idx: usize) { + let plugin_passes = sess.plugin_llvm_passes.borrow().clone(); + let remark = sess.opts.cg.remark.clone(); + let incr_comp_session_dir = sess.incr_comp_session_dir_opt().map(|r| r.clone()); + let depth = time_depth(); + let lto = sess.lto(); + let crate_types = sess.crate_types.borrow().clone(); + let mut each_linked_rlib_for_lto = Vec::new(); + drop(link::each_linked_rlib(sess, &mut |cnum, path| { + // `#![no_builtins]` crates don't participate in LTO. + if sess.cstore.is_no_builtins(cnum) { + return + } + each_linked_rlib_for_lto.push((cnum, path.to_path_buf())); + })); + let time_passes = sess.time_passes(); + let no_landing_pads = sess.no_landing_pads(); + let opts = &sess.opts; + + scope.spawn(move || { + set_time_depth(depth); + + // Set up a destructor which will fire off a message that we're done as + // we exit. + struct Bomb { + tx: Sender, + success: bool, + } + impl Drop for Bomb { + fn drop(&mut self) { + drop(self.tx.send(Message::Done { success: self.success })); + } + } + let mut bomb = Bomb { + tx: tx.clone(), + success: false, + }; + + // Set up our non-`Send` `CodegenContext` now that we're in a helper + // thread and have all our info available to us. + let emitter = SharedEmitter { tx: tx.clone() }; + let diag_handler = Handler::with_emitter(true, false, Box::new(emitter)); + + let cgcx = CodegenContext { + crate_types: crate_types, + each_linked_rlib_for_lto: each_linked_rlib_for_lto, + lto: lto, + no_landing_pads: no_landing_pads, + opts: opts, + time_passes: time_passes, + exported_symbols: exported_symbols, + handler: &diag_handler, + plugin_passes: plugin_passes, + remark: remark, + worker: idx, + incr_comp_session_dir: incr_comp_session_dir, + tx: tx.clone(), + }; + + // Execute the work itself, and if it finishes successfully then flag + // ourselves as a success as well. + // + // Note that we ignore the result coming out of `execute_work_item` + // which will tell us if the worker failed with a `FatalError`. If that + // has happened, however, then a diagnostic was sent off to the main + // thread, along with an `AbortIfErrors` message. In that case the main + // thread is already exiting anyway most likely. + // + // In any case, there's no need for us to take further action here, so + // we just ignore the result and then send off our message saying that + // we're done, which if `execute_work_item` failed is unlikely to be + // seen by the main thread, but hey we might as well try anyway. + drop(execute_work_item(&cgcx, work).is_err()); + bomb.success = true; + }); +} + pub fn run_assembler(sess: &Session, outputs: &OutputFilenames) { let (pname, mut cmd, _) = get_linker(sess); diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index 41faf1fa768..859c6574787 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -40,6 +40,7 @@ use syntax_pos::symbol::Symbol; extern crate flate2; +extern crate crossbeam; extern crate libc; extern crate owning_ref; #[macro_use] extern crate rustc; @@ -52,6 +53,7 @@ #[macro_use] #[no_link] extern crate rustc_bitflags; +extern crate jobserver; #[macro_use] extern crate log; #[macro_use] extern crate syntax; diff --git a/src/test/compile-fail-fulldeps/derive-no-std-not-supported.rs b/src/test/compile-fail-fulldeps/derive-no-std-not-supported.rs index 6ae5544d686..1e97cb07f89 100644 --- a/src/test/compile-fail-fulldeps/derive-no-std-not-supported.rs +++ b/src/test/compile-fail-fulldeps/derive-no-std-not-supported.rs @@ -10,7 +10,6 @@ #![no_std] -extern crate rand; extern crate serialize as rustc_serialize; #[derive(RustcEncodable)] //~ ERROR this trait cannot be derived diff --git a/src/test/compile-fail/asm-src-loc-codegen-units.rs b/src/test/compile-fail/asm-src-loc-codegen-units.rs index df1a6d52f57..6c5c5b00776 100644 --- a/src/test/compile-fail/asm-src-loc-codegen-units.rs +++ b/src/test/compile-fail/asm-src-loc-codegen-units.rs @@ -7,17 +7,16 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -// + // WONTFIX(#20184) Needs landing pads (not present in stage1) or the compiler hangs. // ignore-stage1 // compile-flags: -C codegen-units=2 -// error-pattern: build without -C codegen-units for more exact errors // ignore-emscripten #![feature(asm)] fn main() { unsafe { - asm!("nowayisthisavalidinstruction"); + asm!("nowayisthisavalidinstruction"); //~ ERROR instruction } } diff --git a/src/test/compile-fail/auxiliary/issue-36881-aux.rs b/src/test/compile-fail/auxiliary/issue-36881-aux.rs new file mode 100644 index 00000000000..33ac11feb2d --- /dev/null +++ b/src/test/compile-fail/auxiliary/issue-36881-aux.rs @@ -0,0 +1,11 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub trait Foo {} diff --git a/src/test/compile-fail/auxiliary/lint_unused_extern_crate2.rs b/src/test/compile-fail/auxiliary/lint_unused_extern_crate2.rs new file mode 100644 index 00000000000..b61667cfd88 --- /dev/null +++ b/src/test/compile-fail/auxiliary/lint_unused_extern_crate2.rs @@ -0,0 +1,11 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub fn foo() {} diff --git a/src/test/compile-fail/auxiliary/lint_unused_extern_crate3.rs b/src/test/compile-fail/auxiliary/lint_unused_extern_crate3.rs new file mode 100644 index 00000000000..b61667cfd88 --- /dev/null +++ b/src/test/compile-fail/auxiliary/lint_unused_extern_crate3.rs @@ -0,0 +1,11 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub fn foo() {} diff --git a/src/test/compile-fail/auxiliary/lint_unused_extern_crate4.rs b/src/test/compile-fail/auxiliary/lint_unused_extern_crate4.rs new file mode 100644 index 00000000000..fc4bca865c9 --- /dev/null +++ b/src/test/compile-fail/auxiliary/lint_unused_extern_crate4.rs @@ -0,0 +1,9 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. diff --git a/src/test/compile-fail/issue-36881.rs b/src/test/compile-fail/issue-36881.rs index d75ac0c7f2e..e05dc066199 100644 --- a/src/test/compile-fail/issue-36881.rs +++ b/src/test/compile-fail/issue-36881.rs @@ -8,9 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rand)] +// aux-build:issue-36881-aux.rs fn main() { - extern crate rand; - use rand::Rng; //~ ERROR unresolved import + extern crate issue_36881_aux; + use issue_36881_aux::Foo; //~ ERROR unresolved import } diff --git a/src/test/compile-fail/lint-unused-extern-crate.rs b/src/test/compile-fail/lint-unused-extern-crate.rs index 010c55afb2b..b12ef6277bb 100644 --- a/src/test/compile-fail/lint-unused-extern-crate.rs +++ b/src/test/compile-fail/lint-unused-extern-crate.rs @@ -9,34 +9,34 @@ // except according to those terms. // aux-build:lint_unused_extern_crate.rs +// aux-build:lint_unused_extern_crate2.rs +// aux-build:lint_unused_extern_crate3.rs +// aux-build:lint_unused_extern_crate4.rs #![deny(unused_extern_crates)] #![allow(unused_variables)] #![allow(deprecated)] -#![feature(alloc)] -#![feature(libc)] -#![feature(rand)] -extern crate libc; //~ ERROR: unused extern crate +extern crate lint_unused_extern_crate4; //~ ERROR: unused extern crate -extern crate alloc as collecs; // no error, it is used +extern crate lint_unused_extern_crate3; // no error, it is used -extern crate rand; // no error, the use marks it as used - // even if imported objects aren't used +extern crate lint_unused_extern_crate2; // no error, the use marks it as used + // even if imported objects aren't used extern crate lint_unused_extern_crate as other; // no error, the use * marks it as used #[allow(unused_imports)] -use rand::isaac::IsaacRng; +use lint_unused_extern_crate2::foo as bar; use other::*; mod foo { - // Test that this is unused even though an earler `extern crate rand` is used. - extern crate rand; //~ ERROR unused extern crate + // Test that this is unused even though an earler `extern crate` is used. + extern crate lint_unused_extern_crate2; //~ ERROR unused extern crate } fn main() { - let x: collecs::vec::Vec = Vec::new(); + lint_unused_extern_crate3::foo(); let y = foo(); } diff --git a/src/test/run-pass/auxiliary/allocator-dummy.rs b/src/test/run-pass/auxiliary/allocator-dummy.rs index a54233535a4..f4a32a93dfb 100644 --- a/src/test/run-pass/auxiliary/allocator-dummy.rs +++ b/src/test/run-pass/auxiliary/allocator-dummy.rs @@ -10,11 +10,13 @@ // no-prefer-dynamic -#![feature(allocator, core_intrinsics)] +#![feature(allocator, core_intrinsics, panic_unwind)] #![allocator] #![crate_type = "rlib"] #![no_std] +extern crate unwind; + pub static mut HITS: usize = 0; type size_t = usize;