From 195176124c685d116055ed20c975b81bd2a5978f Mon Sep 17 00:00:00 2001 From: Alex Huszagh Date: Sun, 12 Jun 2022 18:41:23 -0500 Subject: [PATCH] Initial commit with basic support for remote docker. This supports the volume-based structure, and uses some nice optimizations to ensure that only the desired toolchain and cargo items are copied over. It also uses drops to ensure scoped deletion of resources, to avoid complex logic ensuring their cleanup. It also supports persistent data volumes, through `cross-util`. In order to setup a persistent data volume, use: ```bash cross-util create-crate-volume --target arm-unknown-linux-gnueabihf ``` Make sure you provide your `DOCKER_HOST` or correct engine type to ensure these are being made on the remote host. Then, run your command as before: ```bash CROSS_REMOTE=true cross build --target arm-unknown-linux-gnueabihf ``` Finally, you can clean up the generated volume using: ```bash cross-util remove-crate-volume --target arm-unknown-linux-gnueabihf ``` A few other utilities are present in `cross-util`: - `list-volumes`: list all volumes created by cross. - `remove-volumes`: remove all volumes created by cross. - `prune-volumes`: prune all volumes unassociated with a container. - `list-containers`: list all active containers created by cross. - `remove-containers`: remove all active containers created by cross. The initial implementation was done by Marc Schreiber, https://github.com/schrieveslaach. Fixes #248. Fixes #273. Closes #449. --- CHANGELOG.md | 1 + Cargo.lock | 58 +++ Cargo.toml | 2 + src/bin/cross-util.rs | 393 +++++++++++++++- src/cargo.rs | 7 +- src/cli.rs | 5 + src/docker.rs | 1047 +++++++++++++++++++++++++++++++++++------ src/lib.rs | 63 ++- src/rustc.rs | 4 + 9 files changed, 1400 insertions(+), 180 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0d5c18367..6a547fc2c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ This project adheres to [Semantic Versioning](http://semver.org/). ### Added +- #785 - added support for remote container engines through data volumes. also adds in utility to commands to create and remove persistent data volumes. - #775 - forward Cargo exit code to host - #772 - added `CROSS_CONTAINER_OPTS` environment variable to replace `DOCKER_OPTS`. - #767 - added the `cross-util` and `cross-dev` commands. diff --git a/Cargo.lock b/Cargo.lock index 8041c8d5e..a487bdc76 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -161,8 +161,10 @@ dependencies = [ "serde_ignored", "serde_json", "serde_yaml", + "sha1_smol", "shell-escape", "shell-words", + "tempfile", "toml", "walkdir", "which", @@ -191,6 +193,15 @@ dependencies = [ "once_cell", ] +[[package]] +name = "fastrand" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +dependencies = [ + "instant", +] + [[package]] name = "gimli" version = "0.26.1" @@ -243,6 +254,15 @@ dependencies = [ "hashbrown", ] +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + [[package]] name = "itoa" version = "1.0.1" @@ -368,6 +388,15 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "redox_syscall" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" +dependencies = [ + "bitflags", +] + [[package]] name = "regex" version = "1.5.5" @@ -385,6 +414,15 @@ version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + [[package]] name = "rustc-demangle" version = "0.1.21" @@ -473,6 +511,12 @@ dependencies = [ "yaml-rust", ] +[[package]] +name = "sha1_smol" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" + [[package]] name = "sharded-slab" version = "0.1.4" @@ -511,6 +555,20 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "tempfile" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +dependencies = [ + "cfg-if", + "fastrand", + "libc", + "redox_syscall", + "remove_dir_all", + "winapi", +] + [[package]] name = "termcolor" version = "1.1.3" diff --git a/Cargo.toml b/Cargo.toml index 75a7b0070..f83e021c5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,6 +28,8 @@ serde_json = "1" serde_yaml = { version = "0.8", optional = true } serde_ignored = "0.1.2" shell-words = "1.1.0" +sha1_smol = "1.0.0" +tempfile = "3.3.0" [target.'cfg(not(windows))'.dependencies] nix = { version = "0.24", default-features = false, features = ["user"] } diff --git a/src/bin/cross-util.rs b/src/bin/cross-util.rs index 21169e3e3..40233546b 100644 --- a/src/bin/cross-util.rs +++ b/src/bin/cross-util.rs @@ -3,8 +3,9 @@ use std::path::{Path, PathBuf}; use std::process::Command; +use atty::Stream; use clap::{Parser, Subcommand}; -use cross::CommandExt; +use cross::{CommandExt, VersionMetaExt}; // known image prefixes, with their registry // the docker.io registry can also be implicit @@ -51,6 +52,99 @@ enum Commands { #[clap(long)] engine: Option, }, + /// List cross data volumes in local storage. + ListVolumes { + /// Provide verbose diagnostic output. + #[clap(short, long)] + verbose: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + engine: Option, + }, + /// Remove cross data volumes in local storage. + RemoveVolumes { + /// Provide verbose diagnostic output. + #[clap(short, long)] + verbose: bool, + /// Force removal of volumes. + #[clap(short, long)] + force: bool, + /// Remove volumes. Default is a dry run. + #[clap(short, long)] + execute: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + engine: Option, + }, + /// Prune volumes not used by any container. + PruneVolumes { + /// Provide verbose diagnostic output. + #[clap(short, long)] + verbose: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + engine: Option, + }, + /// Create a persistent data volume for the current crate. + CreateCrateVolume { + /// Triple for the target platform. + #[clap(long)] + target: String, + /// Toolchain name/version to use (such as stable or 1.59.0). + #[clap(long)] + toolchain: Option, + /// If cross is running inside a container. + #[clap(short, long)] + docker_in_docker: bool, + /// Provide verbose diagnostic output. + #[clap(short, long)] + verbose: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + engine: Option, + }, + /// Remove a persistent data volume for the current crate. + RemoveCrateVolume { + /// Triple for the target platform. + #[clap(long)] + target: String, + /// Toolchain name/version to use (such as stable or 1.59.0). + #[clap(long)] + toolchain: Option, + /// If cross is running inside a container. + #[clap(short, long)] + docker_in_docker: bool, + /// Provide verbose diagnostic output. + #[clap(short, long)] + verbose: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + engine: Option, + }, + /// List cross containers in local storage. + ListContainers { + /// Provide verbose diagnostic output. + #[clap(short, long)] + verbose: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + engine: Option, + }, + /// Stop and remove cross containers in local storage. + RemoveContainers { + /// Provide verbose diagnostic output. + #[clap(short, long)] + verbose: bool, + /// Force removal of containers. + #[clap(short, long)] + force: bool, + /// Remove containers. Default is a dry run. + #[clap(short, long)] + execute: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + engine: Option, + }, } #[derive(Debug, PartialOrd, Ord, PartialEq, Eq)] @@ -204,6 +298,239 @@ fn remove_target_images( remove_images(engine, &ids, verbose, force, execute) } +fn get_cross_volumes(engine: &Path, verbose: bool) -> cross::Result> { + let stdout = Command::new(engine) + .args(&["volume", "list"]) + .arg("--format") + .arg("{{.Name}}") + .arg("--filter") + // handles simple regex: ^ for start of line. + .arg("name=^cross-") + .run_and_get_stdout(verbose)?; + + let mut volumes: Vec = stdout.lines().map(|s| s.to_string()).collect(); + volumes.sort(); + + Ok(volumes) +} + +fn list_volumes(engine: &Path, verbose: bool) -> cross::Result<()> { + get_cross_volumes(engine, verbose)? + .iter() + .for_each(|line| println!("{}", line)); + + Ok(()) +} + +fn remove_volumes(engine: &Path, verbose: bool, force: bool, execute: bool) -> cross::Result<()> { + let volumes = get_cross_volumes(engine, verbose)?; + + let mut command = Command::new(engine); + command.args(&["volume", "rm"]); + if force { + command.arg("--force"); + } + command.args(&volumes); + if execute { + command.run(verbose) + } else { + println!("{:?}", command); + Ok(()) + } +} + +fn prune_volumes(engine: &Path, verbose: bool) -> cross::Result<()> { + Command::new(engine) + .args(&["volume", "prune", "--force"]) + .run_and_get_status(verbose)?; + + Ok(()) +} + +fn get_package_info( + target: &str, + channel: Option<&str>, + docker_in_docker: bool, + verbose: bool, +) -> cross::Result<(cross::Target, cross::CargoMetadata, cross::Directories)> { + let target_list = cross::target_list(false)?; + let target = cross::Target::from(target, &target_list); + let metadata = cross::cargo_metadata_with_args(None, None, verbose)? + .ok_or(eyre::eyre!("unable to get project metadata"))?; + let cwd = std::env::current_dir()?; + let host_meta = cross::version_meta()?; + let host = host_meta.host(); + let sysroot = cross::get_sysroot(&host, &target, channel, verbose)?.1; + let dirs = cross::Directories::create(&metadata, &cwd, &sysroot, docker_in_docker, verbose)?; + + Ok((target, metadata, dirs)) +} + +fn create_crate_volume( + engine: &cross::Engine, + target: &str, + docker_in_docker: bool, + channel: Option<&str>, + verbose: bool, +) -> cross::Result<()> { + let (target, metadata, dirs) = get_package_info(target, channel, docker_in_docker, verbose)?; + let container = cross::remote_identifier(&target, &metadata, &dirs)?; + let volume = format!("{container}-keep"); + + if cross::volume_exists(engine, &volume, verbose)? { + eyre::bail!("error: volume {volume} already exists."); + } + + cross::docker_command(engine) + .args(&["volume", "create", &volume]) + .run_and_get_status(verbose)?; + + // stop the container if it's already running + let state = cross::container_state(engine, &container, verbose)?; + if !state.is_stopped() { + eprintln!("warning: container {container} was running."); + cross::container_stop(engine, &container, verbose)?; + } + if state.exists() { + eprintln!("warning: container {container} was exited."); + cross::container_rm(engine, &container, verbose)?; + } + + // create a dummy running container to copy data over + let mount_prefix = Path::new("/cross"); + let mut docker = cross::docker_command(engine); + docker.arg("run"); + docker.args(&["--name", &container]); + docker.args(&["-v", &format!("{}:{}", volume, mount_prefix.display())]); + docker.arg("-d"); + if atty::is(Stream::Stdin) && atty::is(Stream::Stdout) && atty::is(Stream::Stderr) { + docker.arg("-t"); + } + docker.arg("ubuntu:16.04"); + // ensure the process never exits until we stop it + docker.args(&["sh", "-c", "sleep infinity"]); + docker.run_and_get_status(verbose)?; + + cross::copy_volume_xargo( + engine, + &container, + &dirs.xargo, + &target, + mount_prefix, + verbose, + )?; + cross::copy_volume_cargo(engine, &container, &dirs.cargo, mount_prefix, verbose)?; + cross::copy_volume_rust( + engine, + &container, + &dirs.sysroot, + &target, + mount_prefix, + verbose, + )?; + + cross::container_stop(engine, &container, verbose)?; + cross::container_rm(engine, &container, verbose)?; + + Ok(()) +} + +fn remove_crate_volume( + engine: &cross::Engine, + target: &str, + docker_in_docker: bool, + channel: Option<&str>, + verbose: bool, +) -> cross::Result<()> { + let (target, metadata, dirs) = get_package_info(target, channel, docker_in_docker, verbose)?; + let container = cross::remote_identifier(&target, &metadata, &dirs)?; + let volume = format!("{container}-keep"); + + if !cross::volume_exists(engine, &volume, verbose)? { + eyre::bail!("error: volume {volume} does not exist."); + } + + cross::volume_rm(engine, &volume, verbose)?; + + Ok(()) +} + +fn get_cross_containers(engine: &Path, verbose: bool) -> cross::Result> { + let stdout = Command::new(engine) + .args(&["ps", "-a"]) + .arg("--format") + .arg("{{.Names}}: {{.State}}") + .arg("--filter") + // handles simple regex: ^ for start of line. + .arg("name=^cross-") + .run_and_get_stdout(verbose)?; + + let mut containers: Vec = stdout.lines().map(|s| s.to_string()).collect(); + containers.sort(); + + Ok(containers) +} + +fn list_containers(engine: &Path, verbose: bool) -> cross::Result<()> { + get_cross_containers(engine, verbose)? + .iter() + .for_each(|line| println!("{}", line)); + + Ok(()) +} + +fn remove_containers( + engine: &Path, + verbose: bool, + force: bool, + execute: bool, +) -> cross::Result<()> { + let containers = get_cross_containers(engine, verbose)?; + let mut running = vec![]; + let mut stopped = vec![]; + for container in containers.iter() { + // cannot fail, formatted as {{.Names}}: {{.State}} + let (name, state) = container.split_once(':').unwrap(); + let name = name.trim(); + let state = cross::ContainerState::new(state.trim())?; + if state.is_stopped() { + stopped.push(name); + } else { + running.push(name); + } + } + + let mut commands = vec![]; + if !running.is_empty() { + let mut stop = Command::new(engine); + stop.arg("stop"); + stop.args(&running); + commands.push(stop); + } + + if !(stopped.is_empty() && running.is_empty()) { + let mut rm = Command::new(engine); + rm.arg("rm"); + if force { + rm.arg("--force"); + } + rm.args(&running); + rm.args(&stopped); + commands.push(rm); + } + if execute { + for mut command in commands { + command.run(verbose)?; + } + } else { + for command in commands { + println!("{:?}", command); + } + } + + Ok(()) +} + pub fn main() -> cross::Result<()> { cross::install_panic_hook()?; let cli = Cli::parse(); @@ -227,6 +554,70 @@ pub fn main() -> cross::Result<()> { remove_target_images(&engine, targets, *verbose, *force, *local, *execute)?; } } + Commands::ListVolumes { verbose, engine } => { + let engine = get_container_engine(engine.as_deref())?; + list_volumes(&engine, *verbose)?; + } + Commands::RemoveVolumes { + verbose, + force, + execute, + engine, + } => { + let engine = get_container_engine(engine.as_deref())?; + remove_volumes(&engine, *verbose, *force, *execute)?; + } + Commands::PruneVolumes { verbose, engine } => { + let engine = get_container_engine(engine.as_deref())?; + prune_volumes(&engine, *verbose)?; + } + Commands::CreateCrateVolume { + target, + toolchain, + docker_in_docker, + verbose, + engine, + } => { + let engine = get_container_engine(engine.as_deref())?; + let engine = cross::Engine::from_path(engine, true, *verbose)?; + create_crate_volume( + &engine, + target, + *docker_in_docker, + toolchain.as_deref(), + *verbose, + )?; + } + Commands::RemoveCrateVolume { + target, + toolchain, + docker_in_docker, + verbose, + engine, + } => { + let engine = get_container_engine(engine.as_deref())?; + let engine = cross::Engine::from_path(engine, true, *verbose)?; + remove_crate_volume( + &engine, + target, + *docker_in_docker, + toolchain.as_deref(), + *verbose, + )?; + } + Commands::ListContainers { verbose, engine } => { + let engine = get_container_engine(engine.as_deref())?; + list_containers(&engine, *verbose)?; + } + Commands::RemoveContainers { + verbose, + force, + execute, + engine, + } => { + let engine = get_container_engine(engine.as_deref())?; + remove_containers(&engine, *verbose, *force, *execute)?; + } } Ok(()) diff --git a/src/cargo.rs b/src/cargo.rs index 544dabc31..a86c32bf3 100644 --- a/src/cargo.rs +++ b/src/cargo.rs @@ -76,9 +76,10 @@ impl CargoMetadata { #[derive(Debug, Deserialize)] pub struct Package { - id: String, - manifest_path: PathBuf, - source: Option, + pub name: String, + pub id: String, + pub manifest_path: PathBuf, + pub source: Option, } impl Package { diff --git a/src/cli.rs b/src/cli.rs index e8b5ac0cb..140c9a0a3 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -16,6 +16,7 @@ pub struct Args { pub target_dir: Option, pub docker_in_docker: bool, pub enable_doctests: bool, + pub is_remote: bool, pub manifest_path: Option, } @@ -139,6 +140,9 @@ pub fn parse(target_list: &TargetList) -> Result { let enable_doctests = env::var("CROSS_UNSTABLE_ENABLE_DOCTESTS") .map(|s| bool_from_envvar(&s)) .unwrap_or_default(); + let is_remote = env::var("CROSS_REMOTE") + .map(|s| bool_from_envvar(&s)) + .unwrap_or_default(); Ok(Args { all, @@ -149,6 +153,7 @@ pub fn parse(target_list: &TargetList) -> Result { target_dir, docker_in_docker, enable_doctests, + is_remote, manifest_path, }) } diff --git a/src/docker.rs b/src/docker.rs index ed8d1c3b4..6bceafc06 100644 --- a/src/docker.rs +++ b/src/docker.rs @@ -4,10 +4,11 @@ use std::process::{Command, ExitStatus}; use std::{env, fs}; use crate::cargo::CargoMetadata; +use crate::errors::*; use crate::extensions::{CommandExt, SafeCommand}; -use crate::file::write_file; +use crate::file::{self, write_file}; use crate::id; -use crate::{errors::*, file}; +use crate::rustc; use crate::{Config, Target}; use atty::Stream; use eyre::bail; @@ -22,13 +23,96 @@ const PODMAN: &str = "podman"; // to fork the process, and which podman allows by default. const SECCOMP: &str = include_str!("seccomp.json"); -#[derive(Debug, PartialEq, Eq)] -enum EngineType { +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum EngineType { Docker, Podman, + PodmanRemote, Other, } +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Engine { + pub kind: EngineType, + pub path: PathBuf, + pub is_remote: bool, +} + +impl Engine { + pub fn new(is_remote: bool, verbose: bool) -> Result { + let path = get_container_engine() + .map_err(|_| eyre::eyre!("no container engine found")) + .with_suggestion(|| "is docker or podman installed?")?; + Self::from_path(path, is_remote, verbose) + } + + pub fn from_path(path: PathBuf, is_remote: bool, verbose: bool) -> Result { + let kind = get_engine_type(&path, verbose)?; + Ok(Engine { + path, + kind, + is_remote, + }) + } + + pub fn needs_remote(&self) -> bool { + self.is_remote && self.kind == EngineType::Podman + } +} + +struct DeleteVolume<'a>(&'a Engine, &'a VolumeId, bool); + +impl<'a> Drop for DeleteVolume<'a> { + fn drop(&mut self) { + if let VolumeId::Discard(id) = self.1 { + volume_rm(self.0, id, self.2).ok(); + } + } +} + +struct DeleteContainer<'a>(&'a Engine, &'a str, bool); + +impl<'a> Drop for DeleteContainer<'a> { + fn drop(&mut self) { + container_stop(self.0, self.1, self.2).ok(); + container_rm(self.0, self.1, self.2).ok(); + } +} + +#[derive(Debug, PartialEq, Eq)] +pub enum ContainerState { + Created, + Running, + Paused, + Restarting, + Dead, + Exited, + DoesNotExist, +} + +impl ContainerState { + pub fn new(state: &str) -> Result { + match state { + "created" => Ok(ContainerState::Created), + "running" => Ok(ContainerState::Running), + "paused" => Ok(ContainerState::Paused), + "restarting" => Ok(ContainerState::Restarting), + "dead" => Ok(ContainerState::Dead), + "exited" => Ok(ContainerState::Exited), + "" => Ok(ContainerState::DoesNotExist), + _ => eyre::bail!("unknown container state: got {state}"), + } + } + + pub fn is_stopped(&self) -> bool { + matches!(self, Self::Exited | Self::DoesNotExist) + } + + pub fn exists(&self) -> bool { + !matches!(self, Self::DoesNotExist) + } +} + // determine if the container engine is docker. this fixes issues with // any aliases (#530), and doesn't fail if an executable suffix exists. fn get_engine_type(ce: &Path, verbose: bool) -> Result { @@ -37,7 +121,9 @@ fn get_engine_type(ce: &Path, verbose: bool) -> Result { .run_and_get_stdout(verbose)? .to_lowercase(); - if stdout.contains("podman") { + if stdout.contains("podman-remote") { + Ok(EngineType::PodmanRemote) + } else if stdout.contains("podman") { Ok(EngineType::Podman) } else if stdout.contains("docker") && !stdout.contains("emulate") { Ok(EngineType::Docker) @@ -54,15 +140,23 @@ pub fn get_container_engine() -> Result { } } -pub fn docker_command(engine: &Path, subcommand: &str) -> Result { - let mut command = Command::new(engine); +pub fn docker_command(engine: &Engine) -> Command { + let mut command = Command::new(&engine.path); + if engine.needs_remote() { + // if we're using podman and not podman-remote, need `--remote`. + command.arg("--remote"); + } + command +} + +pub fn docker_subcommand(engine: &Engine, subcommand: &str) -> Command { + let mut command = docker_command(engine); command.arg(subcommand); - command.args(&["--userns", "host"]); - Ok(command) + command } /// Register binfmt interpreters -pub fn register(target: &Target, verbose: bool) -> Result<()> { +pub fn register(target: &Target, is_remote: bool, verbose: bool) -> Result<()> { let cmd = if target.is_windows() { // https://www.kernel.org/doc/html/latest/admin-guide/binfmt-misc.html "mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc && \ @@ -72,8 +166,9 @@ pub fn register(target: &Target, verbose: bool) -> Result<()> { binfmt-support qemu-user-static" }; - let engine = get_container_engine()?; - docker_command(&engine, "run")? + let engine = Engine::new(is_remote, verbose)?; + docker_subcommand(&engine, "run") + .args(&["--userns", "host"]) .arg("--privileged") .arg("--rm") .arg("ubuntu:16.04") @@ -98,145 +193,412 @@ fn parse_docker_opts(value: &str) -> Result> { shell_words::split(value).wrap_err_with(|| format!("could not parse docker opts of {}", value)) } +#[derive(Debug)] +pub struct Directories { + pub cargo: PathBuf, + pub xargo: PathBuf, + pub target: PathBuf, + pub nix_store: Option, + pub host_root: PathBuf, + pub mount_root: PathBuf, + pub mount_cwd: PathBuf, + pub sysroot: PathBuf, +} + +impl Directories { + #[allow(unused_variables)] + pub fn create( + metadata: &CargoMetadata, + cwd: &Path, + sysroot: &Path, + docker_in_docker: bool, + verbose: bool, + ) -> Result { + let mount_finder = if docker_in_docker { + MountFinder::new(docker_read_mount_paths()?) + } else { + MountFinder::default() + }; + let home_dir = + home::home_dir().ok_or_else(|| eyre::eyre!("could not find home directory"))?; + let cargo = home::cargo_home()?; + let xargo = env::var_os("XARGO_HOME") + .map(PathBuf::from) + .unwrap_or_else(|| home_dir.join(".xargo")); + let nix_store = env::var_os("NIX_STORE").map(PathBuf::from); + let target = &metadata.target_directory; + + // create the directories we are going to mount before we mount them, + // otherwise `docker` will create them but they will be owned by `root` + fs::create_dir(&cargo).ok(); + fs::create_dir(&xargo).ok(); + fs::create_dir(&target).ok(); + + let cargo = mount_finder.find_mount_path(cargo); + let xargo = mount_finder.find_mount_path(xargo); + let target = mount_finder.find_mount_path(target); + + // root is either workspace_root, or, if we're outside the workspace root, the current directory + let host_root = mount_finder.find_mount_path(if metadata.workspace_root.starts_with(cwd) { + cwd + } else { + &metadata.workspace_root + }); + + // root is either workspace_root, or, if we're outside the workspace root, the current directory + let mount_root: PathBuf; + #[cfg(target_os = "windows")] + { + // On Windows, we can not mount the directory name directly. Instead, we use wslpath to convert the path to a linux compatible path. + mount_root = wslpath(&host_root, verbose)?; + } + #[cfg(not(target_os = "windows"))] + { + mount_root = mount_finder.find_mount_path(host_root.clone()); + } + let mount_cwd: PathBuf; + #[cfg(target_os = "windows")] + { + // On Windows, we can not mount the directory name directly. Instead, we use wslpath to convert the path to a linux compatible path. + mount_cwd = wslpath(cwd, verbose)?; + } + #[cfg(not(target_os = "windows"))] + { + mount_cwd = mount_finder.find_mount_path(cwd); + } + let sysroot = mount_finder.find_mount_path(sysroot); + + Ok(Directories { + cargo, + xargo, + target, + nix_store, + host_root, + mount_root, + mount_cwd, + sysroot, + }) + } +} + +#[derive(Debug)] +enum VolumeId { + Keep(String), + Discard(String), +} + +impl VolumeId { + fn create(engine: &Engine, container: &str, verbose: bool) -> Result { + let keep_id = format!("{container}-keep"); + if volume_exists(engine, &keep_id, verbose)? { + Ok(Self::Keep(keep_id)) + } else { + Ok(Self::Discard(container.to_string())) + } + } +} + +impl AsRef for VolumeId { + fn as_ref(&self) -> &str { + match self { + Self::Keep(s) => s, + Self::Discard(s) => s, + } + } +} + +fn cargo_cmd(uses_xargo: bool) -> SafeCommand { + if uses_xargo { + SafeCommand::new("xargo") + } else { + SafeCommand::new("cargo") + } +} + #[allow(unused_variables)] -pub fn mount(cmd: &mut Command, val: &Path, verbose: bool) -> Result { - let host_path = file::canonicalize(&val) - .wrap_err_with(|| format!("when canonicalizing path `{}`", val.display()))?; - let mount_path: PathBuf; +fn canonicalize_mount_path(path: &Path, verbose: bool) -> Result { #[cfg(target_os = "windows")] { // On Windows, we can not mount the directory name directly. Instead, we use wslpath to convert the path to a linux compatible path. - mount_path = wslpath(&host_path, verbose)?; + wslpath(&path, verbose) } #[cfg(not(target_os = "windows"))] { - mount_path = host_path.clone(); + Ok(path.to_path_buf()) } - cmd.args(&[ +} + +fn canonicalize_host_path(path: &Path) -> Result { + file::canonicalize(path).wrap_err_with(|| format!("when canonicalizing path `{path:?}`")) +} + +fn remote_mount_path(val: &Path, verbose: bool) -> Result { + let host_path = canonicalize_host_path(val)?; + canonicalize_mount_path(&host_path, verbose) +} + +fn mount(docker: &mut Command, val: &Path, prefix: &str, verbose: bool) -> Result { + let host_path = canonicalize_host_path(val)?; + let mount_path = canonicalize_mount_path(&host_path, verbose)?; + docker.args(&[ "-v", - &format!("{}:{}", host_path.display(), mount_path.display()), + &format!("{}:{prefix}{}", host_path.display(), mount_path.display()), ]); Ok(mount_path) } -#[allow(clippy::too_many_arguments)] // TODO: refactor -pub fn run( - target: &Target, - args: &[String], - metadata: &CargoMetadata, - config: &Config, - uses_xargo: bool, - sysroot: &Path, +fn create_volume_dir( + engine: &Engine, + container: &str, + dir: &Path, verbose: bool, - docker_in_docker: bool, - cwd: &Path, ) -> Result { - let mount_finder = if docker_in_docker { - MountFinder::new(docker_read_mount_paths()?) - } else { - MountFinder::default() - }; + // make our parent directory if needed + docker_subcommand(engine, "exec") + .arg(container) + .args(&["sh", "-c", &format!("mkdir -p '{}'", dir.display())]) + .run_and_get_status(verbose) +} - let home_dir = home::home_dir().ok_or_else(|| eyre::eyre!("could not find home directory"))?; - let cargo_dir = home::cargo_home()?; - let xargo_dir = env::var_os("XARGO_HOME") - .map(PathBuf::from) - .unwrap_or_else(|| home_dir.join(".xargo")); - let nix_store_dir = env::var_os("NIX_STORE").map(PathBuf::from); - let target_dir = &metadata.target_directory; - - // create the directories we are going to mount before we mount them, - // otherwise `docker` will create them but they will be owned by `root` - fs::create_dir(&target_dir).ok(); - fs::create_dir(&cargo_dir).ok(); - fs::create_dir(&xargo_dir).ok(); - - // update paths to the host mounts path. - let cargo_dir = mount_finder.find_mount_path(cargo_dir); - let xargo_dir = mount_finder.find_mount_path(xargo_dir); - let target_dir = mount_finder.find_mount_path(target_dir); - // root is either workspace_root, or, if we're outside the workspace root, the current directory - let host_root = mount_finder.find_mount_path(if metadata.workspace_root.starts_with(cwd) { - cwd - } else { - &metadata.workspace_root - }); - let mount_root: PathBuf; - #[cfg(target_os = "windows")] - { - // On Windows, we can not mount the directory name directly. Instead, we use wslpath to convert the path to a linux compatible path. - mount_root = wslpath(&host_root, verbose)?; - } - #[cfg(not(target_os = "windows"))] - { - mount_root = mount_finder.find_mount_path(host_root.clone()); - } - let mount_cwd: PathBuf; - #[cfg(target_os = "windows")] - { - // On Windows, we can not mount the directory name directly. Instead, we use wslpath to convert the path to a linux compatible path. - mount_cwd = wslpath(cwd, verbose)?; - } - #[cfg(not(target_os = "windows"))] - { - mount_cwd = mount_finder.find_mount_path(cwd); +// copy files for a docker volume, for remote host support +fn copy_volume_files( + engine: &Engine, + container: &str, + src: &Path, + dst: &Path, + verbose: bool, +) -> Result { + docker_subcommand(engine, "cp") + .arg("-a") + .arg(&src.display().to_string()) + .arg(format!("{container}:{}", dst.display())) + .run_and_get_status(verbose) +} + +pub fn copy_volume_xargo( + engine: &Engine, + container: &str, + xargo_dir: &Path, + target: &Target, + mount_prefix: &Path, + verbose: bool, +) -> Result<()> { + // only need to copy the rustlib files for our current target. + let triple = target.triple(); + let relpath = Path::new("lib").join("rustlib").join(&triple); + let src = xargo_dir.join(&relpath); + let dst = mount_prefix.join("xargo").join(&relpath); + if Path::new(&src).exists() { + create_volume_dir(engine, container, dst.parent().unwrap(), verbose)?; + copy_volume_files(engine, container, &src, &dst, verbose)?; } - let sysroot = mount_finder.find_mount_path(sysroot); - let mut cmd = if uses_xargo { - SafeCommand::new("xargo") - } else { - SafeCommand::new("cargo") - }; + Ok(()) +} - cmd.args(args); +pub fn copy_volume_cargo( + engine: &Engine, + container: &str, + cargo_dir: &Path, + mount_prefix: &Path, + verbose: bool, +) -> Result<()> { + let dst = mount_prefix.join("cargo"); + // can copy a limit subset of files: the rest is present. + create_volume_dir(engine, container, &dst, verbose)?; + for entry in fs::read_dir(cargo_dir)? { + let file = entry?; + let basename = file.file_name().to_string_lossy().into_owned(); + if !basename.starts_with('.') && !matches!(basename.as_ref(), "git" | "registry") { + copy_volume_files(engine, container, &file.path(), &dst, verbose)?; + } + } - let runner = config.runner(target)?; + Ok(()) +} - let engine = get_container_engine() - .map_err(|_| eyre::eyre!("no container engine found")) - .with_suggestion(|| "is docker or podman installed?")?; - let engine_type = get_engine_type(&engine, verbose)?; +// recursively copy a directory into another +fn copy_dir(src: &Path, dst: &Path, depth: u32, skip: Skip) -> Result<()> +where + Skip: Copy + Fn(&fs::DirEntry, u32) -> bool, +{ + for entry in fs::read_dir(src)? { + let file = entry?; + if skip(&file, depth) { + continue; + } - let mut docker = docker_command(&engine, "run")?; + let src_path = file.path(); + let dst_path = dst.join(file.file_name()); + if file.file_type()?.is_file() { + fs::copy(&src_path, &dst_path)?; + } else { + fs::create_dir(&dst_path).ok(); + copy_dir(&src_path, &dst_path, depth + 1, skip)?; + } + } - for ref var in config.env_passthrough(target)? { - validate_env_var(var)?; + Ok(()) +} - // Only specifying the environment variable name in the "-e" - // flag forwards the value from the parent shell - docker.args(&["-e", var]); +pub fn copy_volume_rust( + engine: &Engine, + container: &str, + sysroot: &Path, + target: &Target, + mount_prefix: &Path, + verbose: bool, +) -> Result<()> { + // the rust toolchain is quite large, but most of it isn't needed + // we need the bin, libexec, and etc directories, and part of the lib directory. + let dst = mount_prefix.join("rust"); + create_volume_dir(engine, container, &dst, verbose)?; + for basename in ["bin", "libexec", "etc"] { + let file = sysroot.join(basename); + copy_volume_files(engine, container, &file, &dst, verbose)?; } - let mut mount_volumes = false; - // FIXME(emilgardis 2022-04-07): This is a fallback so that if it's hard for us to do mounting logic, make it simple(r) - // Preferably we would not have to do this. - if cwd.strip_prefix(&metadata.workspace_root).is_err() { - mount_volumes = true; + + // the lib directories are rather large, so we want only a subset. + // now, we use a temp directory for everything else in the libdir + // we can pretty safely assume we don't have symlinks here. + let rustlib = Path::new("lib").join("rustlib"); + let src_rustlib = sysroot.join(&rustlib); + let dst_rustlib = dst.join(&rustlib); + + let tempdir = tempfile::tempdir()?; + let temppath = tempdir.path(); + copy_dir(&sysroot.join("lib"), temppath, 0, |e, d| { + d == 0 && e.file_name() == "rustlib" + })?; + fs::create_dir(&temppath.join("rustlib")).ok(); + copy_dir( + &src_rustlib, + &temppath.join("rustlib"), + 0, + |entry, depth| { + if depth != 0 { + return false; + } + let file_type = match entry.file_type() { + Ok(file_type) => file_type, + Err(_) => return true, + }; + let file_name = entry.file_name(); + !(file_type.is_file() || file_name == "src" || file_name == "etc") + }, + )?; + copy_volume_files(engine, container, temppath, &dst.join("lib"), verbose)?; + // must make the `dst.join("lib")` **after** here, or we copy temp into lib. + create_volume_dir(engine, container, &dst_rustlib, verbose)?; + + // we first copy over the toolchain file, then everything besides it. + // since we don't want to call docker 100x, we copy the intermediate + // files to a temp directory so they're cleaned up afterwards. + let toolchain_path = src_rustlib.join(&target.triple()); + if toolchain_path.exists() { + copy_volume_files(engine, container, &toolchain_path, &dst_rustlib, verbose)?; } - for ref var in config.env_volumes(target)? { - let (var, value) = validate_env_var(var)?; - let value = match value { - Some(v) => Ok(v.to_string()), - None => env::var(var), - }; + // now we need to copy over the host toolchain too, since it has + // some requirements to find std libraries, etc. + let rustc = sysroot.join("bin").join("rustc"); + let libdir = Command::new(rustc) + .args(&["--print", "target-libdir"]) + .run_and_get_stdout(verbose)?; + let host_toolchain_path = Path::new(libdir.trim()).parent().unwrap(); + copy_volume_files( + engine, + container, + host_toolchain_path, + &dst_rustlib, + verbose, + )?; + + Ok(()) +} - if let Ok(val) = value { - let mount_path = mount(&mut docker, val.as_ref(), verbose)?; - docker.args(&["-e", &format!("{}={}", var, mount_path.display())]); - mount_volumes = true; - } - } +pub fn volume_create(engine: &Engine, volume: &str, verbose: bool) -> Result { + docker_subcommand(engine, "volume") + .args(&["create", volume]) + .run_and_get_status(verbose) +} - for path in metadata.path_dependencies() { - mount(&mut docker, path, verbose)?; - mount_volumes = true; - } +pub fn volume_rm(engine: &Engine, volume: &str, verbose: bool) -> Result { + docker_subcommand(engine, "volume") + .args(&["rm", volume]) + .run_and_get_status(verbose) +} - docker.args(&["-e", "PKG_CONFIG_ALLOW_CROSS=1"]); +pub fn volume_exists(engine: &Engine, volume: &str, verbose: bool) -> Result { + let output = docker_subcommand(engine, "volume") + .args(&["inspect", volume]) + .run_and_get_output(verbose)?; + Ok(output.status.success()) +} - docker.arg("--rm"); +pub fn container_stop(engine: &Engine, container: &str, verbose: bool) -> Result { + docker_subcommand(engine, "stop") + .arg(container) + .run_and_get_status(verbose) +} + +pub fn container_rm(engine: &Engine, container: &str, verbose: bool) -> Result { + docker_subcommand(engine, "rm") + .arg(container) + .run_and_get_status(verbose) +} + +pub fn container_state(engine: &Engine, container: &str, verbose: bool) -> Result { + let stdout = docker_subcommand(engine, "ps") + .arg("-a") + .args(&["--filter", &format!("name={container}")]) + .args(&["--format", "{{.State}}"]) + .run_and_get_stdout(verbose)?; + ContainerState::new(stdout.trim()) +} +fn path_hash(path: &Path) -> String { + sha1_smol::Sha1::from(path.display().to_string().as_bytes()) + .digest() + .to_string() + .get(..5) + .expect("sha1 is expected to be at least 5 characters long") + .to_string() +} + +pub fn remote_identifier( + target: &Target, + metadata: &CargoMetadata, + dirs: &Directories, +) -> Result { + let host_version_meta = rustc::version_meta()?; + let commit_hash = host_version_meta + .commit_hash + .unwrap_or(host_version_meta.short_version_string); + + let workspace_root = &metadata.workspace_root; + let package = metadata + .packages + .iter() + .find(|p| p.manifest_path.parent().unwrap() == workspace_root) + .unwrap_or_else(|| metadata.packages.get(0).unwrap()); + + let name = &package.name; + let triple = target.triple(); + let project_hash = path_hash(&package.manifest_path); + let toolchain_hash = path_hash(&dirs.sysroot); + Ok(format!( + "cross-{name}-{triple}-{project_hash}-{toolchain_hash}-{commit_hash}" + )) +} + +#[allow(unused_variables)] +fn docker_seccomp( + docker: &mut Command, + engine_type: EngineType, + target: &Target, + verbose: bool, +) -> Result<()> { // docker uses seccomp now on all installations if target.needs_docker_seccomp() { let seccomp = if engine_type == EngineType::Docker && cfg!(target_os = "windows") { @@ -256,7 +618,7 @@ pub fn run( write_file(&path, false)?.write_all(SECCOMP.as_bytes())?; } #[cfg(target_os = "windows")] - if engine_type == EngineType::Podman { + if matches!(engine_type, EngineType::Podman | EngineType::PodmanRemote) { // podman weirdly expects a WSL path here, and fails otherwise path = wslpath(&path, verbose)?; } @@ -266,22 +628,41 @@ pub fn run( docker.args(&["--security-opt", &format!("seccomp={}", seccomp)]); } + Ok(()) +} + +fn user_id() -> String { + env::var("CROSS_CONTAINER_UID").unwrap_or_else(|_| id::user().to_string()) +} + +fn group_id() -> String { + env::var("CROSS_CONTAINER_GID").unwrap_or_else(|_| id::group().to_string()) +} + +fn docker_user_id(docker: &mut Command, engine_type: EngineType) { // We need to specify the user for Docker, but not for Podman. if engine_type == EngineType::Docker { - docker.args(&[ - "--user", - &format!( - "{}:{}", - env::var("CROSS_CONTAINER_UID").unwrap_or_else(|_| id::user().to_string()), - env::var("CROSS_CONTAINER_GID").unwrap_or_else(|_| id::group().to_string()), - ), - ]); + docker.args(&["--user", &format!("{}:{}", user_id(), group_id(),)]); } +} +fn docker_envvars(docker: &mut Command, config: &Config, target: &Target) -> Result<()> { + for ref var in config.env_passthrough(target)? { + validate_env_var(var)?; + + // Only specifying the environment variable name in the "-e" + // flag forwards the value from the parent shell + docker.args(&["-e", var]); + } + + let runner = config.runner(target)?; + let cross_runner = format!("CROSS_RUNNER={}", runner.unwrap_or_default()); docker + .args(&["-e", "PKG_CONFIG_ALLOW_CROSS=1"]) .args(&["-e", "XARGO_HOME=/xargo"]) .args(&["-e", "CARGO_HOME=/cargo"]) - .args(&["-e", "CARGO_TARGET_DIR=/target"]); + .args(&["-e", "CARGO_TARGET_DIR=/target"]) + .args(&["-e", &cross_runner]); if let Some(username) = id::username().unwrap() { docker.args(&["-e", &format!("USER={username}")]); @@ -305,30 +686,61 @@ pub fn run( docker.args(&parse_docker_opts(&value)?); }; - docker - .args(&[ - "-e", - &format!("CROSS_RUNNER={}", runner.unwrap_or_default()), - ]) - .args(&["-v", &format!("{}:/xargo:Z", xargo_dir.display())]) - .args(&["-v", &format!("{}:/cargo:Z", cargo_dir.display())]) - // Prevent `bin` from being mounted inside the Docker container. - .args(&["-v", "/cargo/bin"]); - if mount_volumes { - docker.args(&[ - "-v", - &format!("{}:{}:Z", host_root.display(), mount_root.display()), - ]); - } else { - docker.args(&["-v", &format!("{}:/project:Z", host_root.display())]); + Ok(()) +} + +#[allow(clippy::too_many_arguments)] // TODO: refactor +fn docker_mount( + docker: &mut Command, + metadata: &CargoMetadata, + config: &Config, + target: &Target, + cwd: &Path, + verbose: bool, + mount_cb: impl Fn(&mut Command, &Path, bool) -> Result, + mut store_cb: impl FnMut((String, PathBuf)), +) -> Result { + let mut mount_volumes = false; + // FIXME(emilgardis 2022-04-07): This is a fallback so that if it's hard for us to do mounting logic, make it simple(r) + // Preferably we would not have to do this. + if cwd.strip_prefix(&metadata.workspace_root).is_err() { + mount_volumes = true; + } + + for ref var in config.env_volumes(target)? { + let (var, value) = validate_env_var(var)?; + let value = match value { + Some(v) => Ok(v.to_string()), + None => env::var(var), + }; + + if let Ok(val) = value { + let mount_path = mount_cb(docker, val.as_ref(), verbose)?; + docker.args(&["-e", &format!("{}={}", var, mount_path.display())]); + store_cb((val, mount_path)); + mount_volumes = true; + } } - docker - .args(&["-v", &format!("{}:/rust:Z,ro", sysroot.display())]) - .args(&["-v", &format!("{}:/target:Z", target_dir.display())]); + for path in metadata.path_dependencies() { + let mount_path = mount_cb(docker, path, verbose)?; + store_cb((path.display().to_string(), mount_path)); + mount_volumes = true; + } + + Ok(mount_volumes) +} + +fn docker_cwd( + docker: &mut Command, + metadata: &CargoMetadata, + dirs: &Directories, + cwd: &Path, + mount_volumes: bool, +) -> Result<()> { if mount_volumes { - docker.args(&["-w".as_ref(), mount_cwd.as_os_str()]); - } else if mount_cwd == metadata.workspace_root { + docker.args(&["-w".as_ref(), dirs.mount_cwd.as_os_str()]); + } else if dirs.mount_cwd == metadata.workspace_root { docker.args(&["-w", "/project"]); } else { // We do this to avoid clashes with path separators. Windows uses `\` as a path separator on Path::join @@ -343,9 +755,302 @@ pub fn run( docker.args(&["-w".as_ref(), mount_wd.as_os_str()]); } + Ok(()) +} + +#[allow(clippy::too_many_arguments)] // TODO: refactor +fn remote_run( + target: &Target, + args: &[String], + metadata: &CargoMetadata, + config: &Config, + uses_xargo: bool, + sysroot: &Path, + verbose: bool, + docker_in_docker: bool, + cwd: &Path, +) -> Result { + let dirs = Directories::create(metadata, cwd, sysroot, docker_in_docker, verbose)?; + + let mut cmd = cargo_cmd(uses_xargo); + cmd.args(args); + + let engine = Engine::new(true, verbose)?; + let mount_prefix = "/cross"; + + // the logic is broken into the following steps + // 1. get our unique identifiers and cleanup from a previous run. + // 2. create a data volume to store everything + // 3. start our container with the data volume and all envvars + // 4. copy all mounted volumes over + // 5. create symlinks for all mounted data + // 6. execute our cargo command inside the container + // 7. copy data from target dir back to host + // 8. stop container and delete data volume + // + // we use structs that wrap the resources to ensure they're dropped + // in the correct order even on error, to ensure safe cleanup + + // 1. get our unique identifiers and cleanup from a previous run. + // this can happen if we didn't gracefully exit before + let container = remote_identifier(target, metadata, &dirs)?; + let volume = VolumeId::create(&engine, &container, verbose)?; + let state = container_state(&engine, &container, verbose)?; + if !state.is_stopped() { + eprintln!("warning: container {container} was running."); + container_stop(&engine, &container, verbose)?; + } + if state.exists() { + eprintln!("warning: container {container} was exited."); + container_rm(&engine, &container, verbose)?; + } + if let VolumeId::Discard(ref id) = volume { + if volume_exists(&engine, id, verbose)? { + eprintln!("warning: temporary volume {container} existed."); + volume_rm(&engine, id, verbose)?; + } + } + + // 2. create our volume to copy all our data over to + if let VolumeId::Discard(ref id) = volume { + volume_create(&engine, id, verbose)?; + } + let _volume_deletter = DeleteVolume(&engine, &volume, verbose); + + // 3. create our start container command here + let mut docker = docker_subcommand(&engine, "run"); + docker.args(&["--userns", "host"]); + docker.args(&["--name", &container]); + docker.args(&["-v", &format!("{}:{mount_prefix}", volume.as_ref())]); + docker_envvars(&mut docker, config, target)?; + + let mut volumes = vec![]; + let mount_volumes = docker_mount( + &mut docker, + metadata, + config, + target, + cwd, + verbose, + |_, val, verbose| remote_mount_path(val, verbose), + |(src, dst)| volumes.push((src, dst)), + )?; + + docker_seccomp(&mut docker, engine.kind, target, verbose)?; + + // Prevent `bin` from being mounted inside the Docker container. + docker.args(&["-v", &format!("{mount_prefix}/cargo/bin")]); + + // When running inside NixOS or using Nix packaging we need to add the Nix + // Store to the running container so it can load the needed binaries. + if let Some(ref nix_store) = dirs.nix_store { + volumes.push((nix_store.display().to_string(), nix_store.to_path_buf())) + } + + docker.arg("-d"); + if atty::is(Stream::Stdin) && atty::is(Stream::Stdout) && atty::is(Stream::Stderr) { + docker.arg("-t"); + } + + docker + .arg(&image(config, target)?) + // ensure the process never exits until we stop it + .args(&["sh", "-c", "sleep infinity"]) + .run_and_get_status(verbose)?; + let _container_deletter = DeleteContainer(&engine, &container, verbose); + + // 4. copy all mounted volumes over + let copy = |src, dst: &PathBuf| copy_volume_files(&engine, &container, src, dst, verbose); + let mount_prefix_path = mount_prefix.as_ref(); + if let VolumeId::Discard(_) = volume { + copy_volume_xargo( + &engine, + &container, + &dirs.xargo, + target, + mount_prefix_path, + verbose, + )?; + copy_volume_cargo(&engine, &container, &dirs.cargo, mount_prefix_path, verbose)?; + copy_volume_rust( + &engine, + &container, + &dirs.sysroot, + target, + mount_prefix_path, + verbose, + )?; + } + let mount_root = if mount_volumes { + // cannot panic: absolute unix path, must have root + let rel_mount_root = dirs.mount_root.strip_prefix("/").unwrap(); + let mount_root = mount_prefix_path.join(rel_mount_root); + if rel_mount_root != PathBuf::new() { + create_volume_dir(&engine, &container, mount_root.parent().unwrap(), verbose)?; + } + mount_root + } else { + mount_prefix_path.join("project") + }; + copy(&dirs.host_root, &mount_root)?; + + let mut copied = vec![ + (&dirs.xargo, mount_prefix_path.join("xargo")), + (&dirs.cargo, mount_prefix_path.join("cargo")), + (&dirs.sysroot, mount_prefix_path.join("rust")), + (&dirs.host_root, mount_root.clone()), + ]; + let mut to_symlink = vec![]; + let target_dir = canonicalize_host_path(&dirs.target)?; + let target_dir = if let Ok(relpath) = target_dir.strip_prefix(&dirs.host_root) { + // target dir is in the project, just symlink it in + let target_dir = mount_root.join(relpath); + to_symlink.push((target_dir.clone(), "/target".to_string())); + target_dir + } else { + // outside project, need to copy the target data over + let target_dir = mount_prefix_path.join("target"); + copy(&dirs.target, &target_dir)?; + copied.push((&dirs.target, target_dir.clone())); + target_dir + }; + for (src, dst) in volumes.iter() { + let src: &Path = src.as_ref(); + if let Some((psrc, pdst)) = copied.iter().find(|(p, _)| src.starts_with(p)) { + // path has already been copied over + let relpath = src.strip_prefix(psrc).unwrap(); + to_symlink.push((pdst.join(relpath), dst.display().to_string())); + } else { + let rel_dst = dst.strip_prefix("/").unwrap(); + let mount_dst = mount_prefix_path.join(rel_dst); + if rel_dst != PathBuf::new() { + create_volume_dir(&engine, &container, mount_dst.parent().unwrap(), verbose)?; + } + copy(src, &mount_dst)?; + } + } + + // 5. create symlinks for copied data + let mut symlink = vec!["set -e pipefail".to_string()]; + if verbose { + symlink.push("set -x".to_string()); + } + symlink.push(format!( + "chown -R {uid}:{gid} {mount_prefix}/*", + uid = user_id(), + gid = group_id(), + )); + // need a simple script to add symlinks, but not override existing files. + symlink.push(format!( + "prefix=\"{mount_prefix}\" + +symlink_recurse() {{ + for f in \"${{1}}\"/*; do + dst=${{f#\"$prefix\"}} + if [ -f \"${{dst}}\" ]; then + echo \"invalid: got unexpected file at ${{dst}}\" 1>&2 + exit 1 + elif [ -d \"${{dst}}\" ]; then + symlink_recurse \"${{f}}\" + else + ln -s \"${{f}}\" \"${{dst}}\" + fi + done +}} + +symlink_recurse \"${{prefix}}\" +" + )); + for (src, dst) in to_symlink { + symlink.push(format!("ln -s \"{}\" \"{}\"", src.display(), dst)); + } + docker_subcommand(&engine, "exec") + .arg(&container) + .args(&["sh", "-c", &symlink.join("\n")]) + .run_and_get_status(verbose)?; + + // 6. execute our cargo command inside the container + let mut docker = docker_subcommand(&engine, "exec"); + docker_user_id(&mut docker, engine.kind); + docker_cwd(&mut docker, metadata, &dirs, cwd, mount_volumes)?; + docker.arg(&container); + docker.args(&["sh", "-c", &format!("PATH=$PATH:/rust/bin {:?}", cmd)]); + let status = docker.run_and_get_status(verbose); + + // 7. copy data from our target dir back to host + docker_subcommand(&engine, "cp") + .arg("-a") + .arg(&format!("{container}:{}", target_dir.display())) + .arg(&dirs.target.parent().unwrap()) + .run_and_get_status(verbose)?; + + status +} + +#[allow(clippy::too_many_arguments)] // TODO: refactor +fn local_run( + target: &Target, + args: &[String], + metadata: &CargoMetadata, + config: &Config, + uses_xargo: bool, + sysroot: &Path, + verbose: bool, + docker_in_docker: bool, + cwd: &Path, +) -> Result { + let dirs = Directories::create(metadata, cwd, sysroot, docker_in_docker, verbose)?; + + let mut cmd = cargo_cmd(uses_xargo); + cmd.args(args); + + let engine = Engine::new(false, verbose)?; + + let mut docker = docker_subcommand(&engine, "run"); + docker.args(&["--userns", "host"]); + docker_envvars(&mut docker, config, target)?; + + let mount_volumes = docker_mount( + &mut docker, + metadata, + config, + target, + cwd, + verbose, + |docker, val, verbose| mount(docker, val, "", verbose), + |_| {}, + )?; + + docker.arg("--rm"); + + docker_seccomp(&mut docker, engine.kind, target, verbose)?; + docker_user_id(&mut docker, engine.kind); + + docker + .args(&["-v", &format!("{}:/xargo:Z", dirs.xargo.display())]) + .args(&["-v", &format!("{}:/cargo:Z", dirs.cargo.display())]) + // Prevent `bin` from being mounted inside the Docker container. + .args(&["-v", "/cargo/bin"]); + if mount_volumes { + docker.args(&[ + "-v", + &format!( + "{}:{}:Z", + dirs.host_root.display(), + dirs.mount_root.display() + ), + ]); + } else { + docker.args(&["-v", &format!("{}:/project:Z", dirs.host_root.display())]); + } + docker + .args(&["-v", &format!("{}:/rust:Z,ro", dirs.sysroot.display())]) + .args(&["-v", &format!("{}:/target:Z", dirs.target.display())]); + docker_cwd(&mut docker, metadata, &dirs, cwd, mount_volumes)?; + // When running inside NixOS or using Nix packaging we need to add the Nix // Store to the running container so it can load the needed binaries. - if let Some(nix_store) = nix_store_dir { + if let Some(ref nix_store) = dirs.nix_store { docker.args(&[ "-v", &format!("{}:{}:Z", nix_store.display(), nix_store.display()), @@ -365,6 +1070,46 @@ pub fn run( .run_and_get_status(verbose) } +#[allow(clippy::too_many_arguments)] // TODO: refactor +pub fn run( + target: &Target, + args: &[String], + metadata: &CargoMetadata, + config: &Config, + uses_xargo: bool, + sysroot: &Path, + verbose: bool, + docker_in_docker: bool, + is_remote: bool, + cwd: &Path, +) -> Result { + if is_remote { + remote_run( + target, + args, + metadata, + config, + uses_xargo, + sysroot, + verbose, + docker_in_docker, + cwd, + ) + } else { + local_run( + target, + args, + metadata, + config, + uses_xargo, + sysroot, + verbose, + docker_in_docker, + cwd, + ) + } +} + pub fn image(config: &Config, target: &Target) -> Result { if let Some(image) = config.image(target)? { return Ok(image); diff --git a/src/lib.rs b/src/lib.rs index f4642c6ff..e2b4ece03 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -39,14 +39,15 @@ use config::Config; use rustc_version::Channel; use serde::Deserialize; -use self::cargo::{CargoMetadata, Subcommand}; +use self::cargo::Subcommand; use self::cross_toml::CrossToml; use self::errors::Context; -use self::rustc::{TargetList, VersionMetaExt}; -pub use self::docker::get_container_engine; +pub use self::cargo::{cargo_metadata_with_args, CargoMetadata}; +pub use self::docker::*; pub use self::errors::{install_panic_hook, Result}; -pub use self::extensions::{CommandExt, OutputExt}; +pub use self::extensions::*; +pub use self::rustc::{target_list, version_meta, TargetList, VersionMetaExt}; #[allow(non_camel_case_types)] #[derive(Debug, Clone, PartialEq, Eq)] @@ -245,7 +246,7 @@ impl std::fmt::Display for Target { } impl Target { - fn from(triple: &str, target_list: &TargetList) -> Target { + pub fn from(triple: &str, target_list: &TargetList) -> Target { if target_list.contains(triple) { Target::new_built_in(triple) } else { @@ -276,6 +277,32 @@ impl From<&str> for Target { } } +pub fn get_sysroot( + host: &Host, + target: &Target, + channel: Option<&str>, + verbose: bool, +) -> Result<(String, PathBuf)> { + let mut sysroot = rustc::sysroot(host, target, verbose)?; + let default_toolchain = sysroot + .file_name() + .and_then(|file_name| file_name.to_str()) + .ok_or_else(|| eyre::eyre!("couldn't get toolchain name"))?; + let toolchain = if let Some(channel) = channel { + [channel] + .iter() + .cloned() + .chain(default_toolchain.splitn(2, '-').skip(1)) + .collect::>() + .join("-") + } else { + default_toolchain.to_string() + }; + sysroot.set_file_name(&toolchain); + + Ok((toolchain, sysroot)) +} + pub fn run() -> Result { let target_list = rustc::target_list(false)?; let args = cli::parse(&target_list)?; @@ -292,10 +319,9 @@ pub fn run() -> Result { .iter() .any(|a| a == "--verbose" || a == "-v" || a == "-vv"); - let host_version_meta = - rustc_version::version_meta().wrap_err("couldn't fetch the `rustc` version")?; + let host_version_meta = rustc::version_meta()?; let cwd = std::env::current_dir()?; - if let Some(metadata) = cargo::cargo_metadata_with_args(None, Some(&args), verbose)? { + if let Some(metadata) = cargo_metadata_with_args(None, Some(&args), verbose)? { let host = host_version_meta.host(); let toml = toml(&metadata)?; let config = Config::new(toml); @@ -314,22 +340,8 @@ pub fn run() -> Result { }; if image_exists && host.is_supported(Some(&target)) { - let mut sysroot = rustc::sysroot(&host, &target, verbose)?; - let default_toolchain = sysroot - .file_name() - .and_then(|file_name| file_name.to_str()) - .ok_or_else(|| eyre::eyre!("couldn't get toolchain name"))?; - let toolchain = if let Some(channel) = args.channel { - [channel] - .iter() - .map(|c| c.as_str()) - .chain(default_toolchain.splitn(2, '-').skip(1)) - .collect::>() - .join("-") - } else { - default_toolchain.to_string() - }; - sysroot.set_file_name(&toolchain); + let (toolchain, sysroot) = + get_sysroot(&host, &target, args.channel.as_deref(), verbose)?; let mut is_nightly = toolchain.contains("nightly"); let installed_toolchains = rustup::installed_toolchains(verbose)?; @@ -417,7 +429,7 @@ pub fn run() -> Result { && target.needs_interpreter() && !interpreter::is_registered(&target)? { - docker::register(&target, verbose)? + docker::register(&target, args.is_remote, verbose)? } return docker::run( @@ -429,6 +441,7 @@ pub fn run() -> Result { &sysroot, verbose, args.docker_in_docker, + args.is_remote, &cwd, ); } diff --git a/src/rustc.rs b/src/rustc.rs index 49ef6e45e..8db4b779e 100644 --- a/src/rustc.rs +++ b/src/rustc.rs @@ -56,3 +56,7 @@ pub fn sysroot(host: &Host, target: &Target, verbose: bool) -> Result { Ok(PathBuf::from(stdout)) } + +pub fn version_meta() -> Result { + rustc_version::version_meta().wrap_err("couldn't fetch the `rustc` version") +}