Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix Windows job management #1511

Merged
merged 1 commit into from
Sep 19, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
187 changes: 32 additions & 155 deletions src/rustup-cli/job.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@ pub fn setup() -> Option<Setup> {

#[cfg(unix)]
mod imp {
use std::env;
use libc;

pub type Setup = ();

pub unsafe fn setup() -> Option<()> {
Expand All @@ -36,19 +39,23 @@ mod imp {
mod imp {
extern crate winapi;

use std::ffi::OsString;
use std::io;
use std::mem;
use std::os::windows::prelude::*;
use winapi::shared::*;
use winapi::um::*;
use std::ptr;

use self::winapi::shared::minwindef::*;
use self::winapi::um::handleapi::*;
use self::winapi::um::jobapi2::*;
use self::winapi::um::processthreadsapi::*;
use self::winapi::um::winnt::*;
use self::winapi::um::winnt::HANDLE;

pub struct Setup {
job: Handle,
}

pub struct Handle {
inner: ntdef::HANDLE,
inner: HANDLE,
}

fn last_err() -> io::Error {
Expand All @@ -65,67 +72,53 @@ mod imp {
// use job objects, so we instead just ignore errors and assume that
// we're otherwise part of someone else's job object in this case.

let job = jobapi2::CreateJobObjectW(0 as *mut _, 0 as *const _);
let job = CreateJobObjectW(ptr::null_mut(), ptr::null());
if job.is_null() {
return None;
}
let job = Handle { inner: job };

// Indicate that when all handles to the job object are gone that all
// process in the object should be killed. Note that this includes our
// entire process tree by default because we've added ourselves and and
// entire process tree by default because we've added ourselves and
// our children will reside in the job once we spawn a process.
let mut info: winnt::JOBOBJECT_EXTENDED_LIMIT_INFORMATION;
let mut info: JOBOBJECT_EXTENDED_LIMIT_INFORMATION;
info = mem::zeroed();
info.BasicLimitInformation.LimitFlags = winnt::JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE;
let r = jobapi2::SetInformationJobObject(
info.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE;
let r = SetInformationJobObject(
job.inner,
winnt::JobObjectExtendedLimitInformation,
&mut info as *mut _ as minwindef::LPVOID,
mem::size_of_val(&info) as minwindef::DWORD,
JobObjectExtendedLimitInformation,
&mut info as *mut _ as LPVOID,
mem::size_of_val(&info) as DWORD,
);
if r == 0 {
return None;
}

// Assign our process to this job object, meaning that our children will
// now live or die based on our existence.
let me = processthreadsapi::GetCurrentProcess();
let r = jobapi2::AssignProcessToJobObject(job.inner, me);
let me = GetCurrentProcess();
let r = AssignProcessToJobObject(job.inner, me);
if r == 0 {
return None;
}

Some(Setup { job: job })
Some(Setup { job })
}

impl Drop for Setup {
fn drop(&mut self) {
// This is a litte subtle. By default if we are terminated then all
// processes in our job object are terminated as well, but we
// intentionally want to whitelist some processes to outlive our job
// object (see below).
//
// To allow for this, we manually kill processes instead of letting
// the job object kill them for us. We do this in a loop to handle
// processes spawning other processes.
//
// Finally once this is all done we know that the only remaining
// ones are ourselves and the whitelisted processes. The destructor
// here then configures our job object to *not* kill everything on
// close, then closes the job object.
// On normal exits (not ctrl-c), we don't want to kill any child
// processes. The destructor here configures our job object to
// *not* kill everything on close, then closes the job object.
unsafe {
while self.kill_remaining() {
info!("killed some, going for more");
}

let mut info: winnt::JOBOBJECT_EXTENDED_LIMIT_INFORMATION;
let mut info: JOBOBJECT_EXTENDED_LIMIT_INFORMATION;
info = mem::zeroed();
let r = jobapi2::SetInformationJobObject(
let r = SetInformationJobObject(
self.job.inner,
winnt::JobObjectExtendedLimitInformation,
&mut info as *mut _ as minwindef::LPVOID,
mem::size_of_val(&info) as minwindef::DWORD,
JobObjectExtendedLimitInformation,
&mut info as *mut _ as LPVOID,
mem::size_of_val(&info) as DWORD,
);
if r == 0 {
info!("failed to configure job object to defaults: {}", last_err());
Expand All @@ -134,126 +127,10 @@ mod imp {
}
}

impl Setup {
unsafe fn kill_remaining(&mut self) -> bool {
#[repr(C)]
struct Jobs {
header: winnt::JOBOBJECT_BASIC_PROCESS_ID_LIST,
list: [basetsd::ULONG_PTR; 1024],
}

let mut jobs: Jobs = mem::zeroed();
let r = jobapi2::QueryInformationJobObject(
self.job.inner,
winnt::JobObjectBasicProcessIdList,
&mut jobs as *mut _ as minwindef::LPVOID,
mem::size_of_val(&jobs) as minwindef::DWORD,
0 as *mut _,
);
if r == 0 {
info!("failed to query job object: {}", last_err());
return false;
}

let mut killed = false;
let list = &jobs.list[..jobs.header.NumberOfProcessIdsInList as usize];
assert!(list.len() > 0);

let list = list.iter()
.filter(|&&id| {
// let's not kill ourselves
id as minwindef::DWORD != processthreadsapi::GetCurrentProcessId()
})
.filter_map(|&id| {
// Open the process with the necessary rights, and if this
// fails then we probably raced with the process exiting so we
// ignore the problem.
let flags = winnt::PROCESS_QUERY_INFORMATION | winnt::PROCESS_TERMINATE
| winnt::SYNCHRONIZE;
let p = processthreadsapi::OpenProcess(
flags,
minwindef::FALSE,
id as minwindef::DWORD,
);
if p.is_null() {
None
} else {
Some(Handle { inner: p })
}
})
.filter(|p| {
// Test if this process was actually in the job object or not.
// If it's not then we likely raced with something else
// recycling this PID, so we just skip this step.
let mut res = 0;
let r = jobapi::IsProcessInJob(p.inner, self.job.inner, &mut res);
if r == 0 {
info!("failed to test is process in job: {}", last_err());
return false;
}
res == minwindef::TRUE
});

for p in list {
// Load the file which this process was spawned from. We then
// later use this for identification purposes.
let mut buf = [0; 1024];
let r = psapi::GetProcessImageFileNameW(
p.inner,
buf.as_mut_ptr(),
buf.len() as minwindef::DWORD,
);
if r == 0 {
info!("failed to get image name: {}", last_err());
continue;
}
let s = OsString::from_wide(&buf[..r as usize]);
info!("found remaining: {:?}", s);

// And here's where we find the whole purpose for this
// function! Currently, our only whitelisted process is
// `mspdbsrv.exe`, and more details about that can be found
// here:
//
// https://github.com/rust-lang/rust/issues/33145
//
// The gist of it is that all builds on one machine use the
// same `mspdbsrv.exe` instance. If we were to kill this
// instance then we could erroneously cause other builds to
// fail.
if let Some(s) = s.to_str() {
if s.contains("mspdbsrv") {
info!("\toops, this is mspdbsrv");
continue;
}
}

// Ok, this isn't mspdbsrv, let's kill the process. After we
// kill it we wait on it to ensure that the next time around in
// this function we're not going to see it again.
let r = processthreadsapi::TerminateProcess(p.inner, 1);
if r == 0 {
info!("\tfailed to kill subprocess: {}", last_err());
info!("\tassuming subprocess is dead...");
} else {
info!("\tterminated subprocess");
}
let r = synchapi::WaitForSingleObject(p.inner, winbase::INFINITE);
if r != 0 {
info!("failed to wait for process to die: {}", last_err());
return false;
}
killed = true;
}

return killed;
}
}

impl Drop for Handle {
fn drop(&mut self) {
unsafe {
handleapi::CloseHandle(self.inner);
CloseHandle(self.inner);
}
}
}
Expand Down
65 changes: 34 additions & 31 deletions src/rustup-cli/proxy_mode.rs
Original file line number Diff line number Diff line change
@@ -1,51 +1,54 @@
use common::set_globals;
use rustup::Cfg;
use errors::*;
use rustup_utils::utils;
use rustup_utils::utils::{self, ExitCode};
use rustup::command::run_command_for_dir;
use std::env;
use std::ffi::OsString;
use std::path::PathBuf;
use std::process;
use job;

pub fn main() -> Result<()> {
::self_update::cleanup_self_updater()?;

let _setup = job::setup();

let mut args = env::args();

let arg0 = args.next().map(PathBuf::from);
let arg0 = arg0.as_ref()
.and_then(|a| a.file_name())
.and_then(|a| a.to_str());
let ref arg0 = arg0.ok_or(ErrorKind::NoExeName)?;

// Check for a toolchain specifier.
let arg1 = args.next();
let toolchain = arg1.as_ref().and_then(|arg1| {
if arg1.starts_with('+') {
Some(&arg1[1..])
let ExitCode(c) = {
let _setup = job::setup();

let mut args = env::args();

let arg0 = args.next().map(PathBuf::from);
let arg0 = arg0.as_ref()
.and_then(|a| a.file_name())
.and_then(|a| a.to_str());
let ref arg0 = arg0.ok_or(ErrorKind::NoExeName)?;

// Check for a toolchain specifier.
let arg1 = args.next();
let toolchain = arg1.as_ref().and_then(|arg1| {
if arg1.starts_with('+') {
Some(&arg1[1..])
} else {
None
}
});

// Build command args now while we know whether or not to skip arg 1.
let cmd_args: Vec<_> = if toolchain.is_none() {
env::args_os().skip(1).collect()
} else {
None
}
});

// Build command args now while we know whether or not to skip arg 1.
let cmd_args: Vec<_> = if toolchain.is_none() {
env::args_os().skip(1).collect()
} else {
env::args_os().skip(2).collect()
};
env::args_os().skip(2).collect()
};

let cfg = set_globals(false)?;
cfg.check_metadata_version()?;
direct_proxy(&cfg, arg0, toolchain, &cmd_args)?;
let cfg = set_globals(false)?;
cfg.check_metadata_version()?;
direct_proxy(&cfg, arg0, toolchain, &cmd_args)?
};

Ok(())
process::exit(c)
}

fn direct_proxy(cfg: &Cfg, arg0: &str, toolchain: Option<&str>, args: &[OsString]) -> Result<()> {
fn direct_proxy(cfg: &Cfg, arg0: &str, toolchain: Option<&str>, args: &[OsString]) -> Result<ExitCode> {
let cmd = match toolchain {
None => cfg.create_command_for_dir(&utils::current_dir()?, arg0)?,
Some(tc) => cfg.create_command_for_toolchain(tc, false, arg0)?,
Expand Down
10 changes: 6 additions & 4 deletions src/rustup-cli/rustup_mode.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@ use rustup::settings::TelemetryMode;
use errors::*;
use rustup_dist::manifest::Component;
use rustup_dist::dist::{PartialTargetTriple, PartialToolchainDesc, TargetTriple};
use rustup_utils::utils;
use rustup_utils::utils::{self, ExitCode};
use self_update;
use std::path::Path;
use std::process::Command;
use std::process::{self, Command};
use std::iter;
use std::error::Error;
use term2;
Expand Down Expand Up @@ -606,12 +606,14 @@ fn run(cfg: &Cfg, m: &ArgMatches) -> Result<()> {
let args: Vec<_> = args.collect();
let cmd = cfg.create_command_for_toolchain(toolchain, m.is_present("install"), args[0])?;

Ok(command::run_command_for_dir(
let ExitCode(c) = command::run_command_for_dir(
cmd,
args[0],
&args[1..],
&cfg,
)?)
)?;

process::exit(c)
}

fn which(cfg: &Cfg, m: &ArgMatches) -> Result<()> {
Expand Down
2 changes: 2 additions & 0 deletions src/rustup-utils/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ use url::Url;
pub use raw::{find_cmd, has_cmd, if_not_empty, is_directory, is_file, path_exists, prefix_arg,
random_string};

pub struct ExitCode(pub i32);

pub fn ensure_dir_exists(
name: &'static str,
path: &Path,
Expand Down
Loading